我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.histogramdd()。
def calc_tvd(sess,Generator,Data,N=50000,nbins=10): Xd=sess.run(Data.X,{Data.N:N}) step,Xg=sess.run([Generator.step,Generator.X],{Generator.N:N}) p_gen,_ = np.histogramdd(Xg,bins=nbins,range=[[0,1],[0,1],[0,1]],normed=True) p_dat,_ = np.histogramdd(Xd,bins=nbins,range=[[0,1],[0,1],[0,1]],normed=True) p_gen/=nbins**3 p_dat/=nbins**3 tvd=0.5*np.sum(np.abs( p_gen-p_dat )) mvd=np.max(np.abs( p_gen-p_dat )) return step,tvd, mvd s_tvd=make_summary(Data.name+'_tvd',tvd) s_mvd=make_summary(Data.name+'_mvd',mvd) return step,s_tvd,s_mvd #return make_summary('tvd/'+Generator.name,tvd)
def test_rightmost_binedge(self): # Test event very close to rightmost binedge. See Github issue #4266 x = [0.9999999995] bins = [[0., 0.5, 1.0]] hist, _ = histogramdd(x, bins=bins) assert_(hist[0] == 0.0) assert_(hist[1] == 1.) x = [1.0] bins = [[0., 0.5, 1.0]] hist, _ = histogramdd(x, bins=bins) assert_(hist[0] == 0.0) assert_(hist[1] == 1.) x = [1.0000000001] bins = [[0., 0.5, 1.0]] hist, _ = histogramdd(x, bins=bins) assert_(hist[0] == 0.0) assert_(hist[1] == 1.) x = [1.0001] bins = [[0., 0.5, 1.0]] hist, _ = histogramdd(x, bins=bins) assert_(hist[0] == 0.0) assert_(hist[1] == 0.0)
def extract_colour_histogram(image, labels, n_bins=8, use_lab=False): ih, iw, _ = image.shape n_labels = labels.max()+1 _range = np.array([[0, 256], [0, 256], [0, 256]], dtype='float') # for rgb histograms if use_lab: image = rgb2lab(image) _range[:] = [[0,100],[-500*25/29, 500*25/29], [-200*25/29, 200*25/29]] hist = np.zeros((n_labels, n_bins**3)) mask = np.zeros((ih, iw), dtype='bool') for i in range(n_labels): mask[:] = labels == i yy, xx = mask.nonzero() pixels = image[yy, xx, :] hist[i, :] = np.histogramdd(sample=pixels, bins=n_bins, range=_range)[0].flat return hist
def posterior_histogram(self, n_bins = 10): """ Computes a weighted histogram of multivariate posterior samples andreturns histogram H and A list of p arrays describing the bin edges for each dimension. Returns ------- python list containing two elements (H = np.ndarray, edges = list of p arraya) """ endp = len(self.parameters) - 1 endw = len(self.weights) - 1 params = self.parameters[endp] weights = self.weights[endw] weights.shape H, edges = np.histogramdd(params, bins = n_bins, weights = weights.reshape(len(weights),)) return [H, edges]
def calculate_confusion_matrix_from_arrays(prediction, ground_truth, nr_labels): """ calculate the confusion matrix for one image pair. prediction and ground_truth have to have the same shape. """ # a long 2xn array with each column being a pixel pair replace_indices = np.vstack(( ground_truth.flatten(), prediction.flatten()) ).T # add up confusion matrix confusion_matrix, _ = np.histogramdd( replace_indices, bins=(nr_labels, nr_labels), range=[(0, nr_labels), (0, nr_labels)] ) confusion_matrix = confusion_matrix.astype(np.uint32) return confusion_matrix
def kl_preds_v2(model,sess,s_test,a_test,n_rep_per_item=200): ## Compare sample distribution to ground truth Env = grid_env(False) n_test_items,state_size = s_test.shape distances = np.empty([n_test_items,3]) for i in range(n_test_items): state = s_test[i,:].astype('int32') action = np.round(a_test[i,:]).astype('int32') # ground truth state_truth = np.empty([n_rep_per_item,s_test.shape[1]]) for o in range(n_rep_per_item): Env.set_state(state.flatten()) s1,r,dead = Env.step(action.flatten()) state_truth[o,:] = s1 truth_count,bins = np.histogramdd(state_truth,bins=[np.arange(8)-0.5]*state_size) truth_prob = truth_count/n_rep_per_item # predictions of model y_sample = sess.run(model.y_sample,{ model.x : state[None,:].repeat(n_rep_per_item,axis=0), model.y : np.zeros(np.shape(state[None,:])).repeat(n_rep_per_item,axis=0), model.a : action[None,:].repeat(n_rep_per_item,axis=0), model.Qtarget : np.zeros(np.shape(action[None,:])).repeat(n_rep_per_item,axis=0), model.lr : 0, model.lamb : 1, model.temp : 0.00001, model.is_training : False, model.k: 1}) sample_count,bins = np.histogramdd(y_sample,bins=[np.arange(8)-0.5]*state_size) sample_prob = sample_count/n_rep_per_item distances[i,0]= np.sum(truth_prob*(np.log(truth_prob+1e-5)-np.log(sample_prob+1e-5))) # KL(p|p_tilde) distances[i,1]= np.sum(sample_prob*(np.log(sample_prob+1e-5)-np.log(truth_prob+1e-5))) # Inverse KL(p_tilde|p) distances[i,2]= norm(np.sqrt(truth_prob) - np.sqrt(sample_prob))/np.sqrt(2) return np.mean(distances,axis=0)
def test_histogramdd_too_many_bins(self): # Ticket 928. assert_raises(ValueError, np.histogramdd, np.ones((1, 10)), bins=2**10)
def test_simple(self): x = np.array([[-.5, .5, 1.5], [-.5, 1.5, 2.5], [-.5, 2.5, .5], [.5, .5, 1.5], [.5, 1.5, 2.5], [.5, 2.5, 2.5]]) H, edges = histogramdd(x, (2, 3, 3), range=[[-1, 1], [0, 3], [0, 3]]) answer = np.array([[[0, 1, 0], [0, 0, 1], [1, 0, 0]], [[0, 1, 0], [0, 0, 1], [0, 0, 1]]]) assert_array_equal(H, answer) # Check normalization ed = [[-2, 0, 2], [0, 1, 2, 3], [0, 1, 2, 3]] H, edges = histogramdd(x, bins=ed, normed=True) assert_(np.all(H == answer / 12.)) # Check that H has the correct shape. H, edges = histogramdd(x, (2, 3, 4), range=[[-1, 1], [0, 3], [0, 4]], normed=True) answer = np.array([[[0, 1, 0, 0], [0, 0, 1, 0], [1, 0, 0, 0]], [[0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 1, 0]]]) assert_array_almost_equal(H, answer / 6., 4) # Check that a sequence of arrays is accepted and H has the correct # shape. z = [np.squeeze(y) for y in split(x, 3, axis=1)] H, edges = histogramdd( z, bins=(4, 3, 2), range=[[-2, 2], [0, 3], [0, 2]]) answer = np.array([[[0, 0], [0, 0], [0, 0]], [[0, 1], [0, 0], [1, 0]], [[0, 1], [0, 0], [0, 0]], [[0, 0], [0, 0], [0, 0]]]) assert_array_equal(H, answer) Z = np.zeros((5, 5, 5)) Z[list(range(5)), list(range(5)), list(range(5))] = 1. H, edges = histogramdd([np.arange(5), np.arange(5), np.arange(5)], 5) assert_array_equal(H, Z)
def test_shape_3d(self): # All possible permutations for bins of different lengths in 3D. bins = ((5, 4, 6), (6, 4, 5), (5, 6, 4), (4, 6, 5), (6, 5, 4), (4, 5, 6)) r = rand(10, 3) for b in bins: H, edges = histogramdd(r, b) assert_(H.shape == b)
def test_shape_4d(self): # All possible permutations for bins of different lengths in 4D. bins = ((7, 4, 5, 6), (4, 5, 7, 6), (5, 6, 4, 7), (7, 6, 5, 4), (5, 7, 6, 4), (4, 6, 7, 5), (6, 5, 7, 4), (7, 5, 4, 6), (7, 4, 6, 5), (6, 4, 7, 5), (6, 7, 5, 4), (4, 6, 5, 7), (4, 7, 5, 6), (5, 4, 6, 7), (5, 7, 4, 6), (6, 7, 4, 5), (6, 5, 4, 7), (4, 7, 6, 5), (4, 5, 6, 7), (7, 6, 4, 5), (5, 4, 7, 6), (5, 6, 7, 4), (6, 4, 5, 7), (7, 5, 6, 4)) r = rand(10, 4) for b in bins: H, edges = histogramdd(r, b) assert_(H.shape == b)
def test_weights(self): v = rand(100, 2) hist, edges = histogramdd(v) n_hist, edges = histogramdd(v, normed=True) w_hist, edges = histogramdd(v, weights=np.ones(100)) assert_array_equal(w_hist, hist) w_hist, edges = histogramdd(v, weights=np.ones(100) * 2, normed=True) assert_array_equal(w_hist, n_hist) w_hist, edges = histogramdd(v, weights=np.ones(100, int) * 2) assert_array_equal(w_hist, 2 * hist)
def test_empty(self): a, b = histogramdd([[], []], bins=([0, 1], [0, 1])) assert_array_max_ulp(a, np.array([[0.]])) a, b = np.histogramdd([[], [], []], bins=2) assert_array_max_ulp(a, np.zeros((2, 2, 2)))
def test_bins_errors(self): # There are two ways to specify bins. Check for the right errors # when mixing those. x = np.arange(8).reshape(2, 4) assert_raises(ValueError, np.histogramdd, x, bins=[-1, 2, 4, 5]) assert_raises(ValueError, np.histogramdd, x, bins=[1, 0.99, 1, 1]) assert_raises( ValueError, np.histogramdd, x, bins=[1, 1, 1, [1, 2, 2, 3]]) assert_raises( ValueError, np.histogramdd, x, bins=[1, 1, 1, [1, 2, 3, -3]]) assert_(np.histogramdd(x, bins=[1, 1, 1, [1, 2, 3, 4]]))
def test_inf_edges(self): # Test using +/-inf bin edges works. See #1788. with np.errstate(invalid='ignore'): x = np.arange(6).reshape(3, 2) expected = np.array([[1, 0], [0, 1], [0, 1]]) h, e = np.histogramdd(x, bins=[3, [-np.inf, 2, 10]]) assert_allclose(h, expected) h, e = np.histogramdd(x, bins=[3, np.array([-1, 2, np.inf])]) assert_allclose(h, expected) h, e = np.histogramdd(x, bins=[3, [-np.inf, 3, np.inf]]) assert_allclose(h, expected)
def test_finite_range(self): vals = np.random.random((100, 3)) histogramdd(vals, range=[[0.0, 1.0], [0.25, 0.75], [0.25, 0.5]]) assert_raises(ValueError, histogramdd, vals, range=[[0.0, 1.0], [0.25, 0.75], [0.25, np.inf]]) assert_raises(ValueError, histogramdd, vals, range=[[0.0, 1.0], [np.nan, 0.75], [0.25, 0.5]])
def atoms_to_density_map(atoms, voxelSZ): (x, y, z) = atoms[:,1:4].T.copy() (x_min, x_max) = (x.min(), x.max()) (y_min, y_max) = (y.min(), y.max()) (z_min, z_max) = (z.min(), z.max()) grid_len = max([x_max - x_min, y_max - y_min, z_max - z_min]) R = np.int(np.ceil(grid_len / voxelSZ)) if R % 2 == 0: R += 1 msg = "Length of particle (voxels), %d"%(R) logging.info(msg) elec_den = atoms[:,0].copy() #x = (x-x_min)/voxelSZ #y = (y-y_min)/voxelSZ #z = (z-z_min)/voxelSZ x = (x-0.5*(x_max+x_min-grid_len))/voxelSZ y = (y-0.5*(y_max+y_min-grid_len))/voxelSZ z = (z-0.5*(z_max+z_min-grid_len))/voxelSZ bins = np.arange(R+1) all_bins = np.vstack((bins,bins,bins)) coords = np.asarray([x,y,z]).T #(h, h_edges) = np.histogramdd(coords, bins=all_bins, weights=elec_den) #return h #return griddata(coords, elec_den, np.mgrid[0:R,0:R,0:R].T, method='linear', fill_value=0.).T integ = np.floor(coords) frac = coords - integ ix = integ[:,0]; iy = integ[:,1]; iz = integ[:,2] fx = frac[:,0]; fy = frac[:,1]; fz = frac[:,2] cx = 1. - fx; cy = 1. - fy; cz = 1. - fz h_total = np.histogramdd(np.asarray([ix,iy,iz]).T, weights=elec_den*cx*cy*cz, bins=all_bins)[0] h_total += np.histogramdd(np.asarray([ix,iy,iz+1]).T, weights=elec_den*cx*cy*fz, bins=all_bins)[0] h_total += np.histogramdd(np.asarray([ix,iy+1,iz]).T, weights=elec_den*cx*fy*cz, bins=all_bins)[0] h_total += np.histogramdd(np.asarray([ix,iy+1,iz+1]).T, weights=elec_den*cx*fy*fz, bins=all_bins)[0] h_total += np.histogramdd(np.asarray([ix+1,iy,iz]).T, weights=elec_den*fx*cy*cz, bins=all_bins)[0] h_total += np.histogramdd(np.asarray([ix+1,iy,iz+1]).T, weights=elec_den*fx*cy*fz, bins=all_bins)[0] h_total += np.histogramdd(np.asarray([ix+1,iy+1,iz]).T, weights=elec_den*fx*fy*cz, bins=all_bins)[0] h_total += np.histogramdd(np.asarray([ix+1,iy+1,iz+1]).T, weights=elec_den*fx*fy*fz, bins=all_bins)[0] return h_total
def img_hist(img): arr = np.array(img.getdata(), np.uint8) return np.histogramdd(arr[:,:-1], bins = 6, range = [[0, 256]] * 3, weights = arr[:,3])[0]
def make_histogram( image_array, num_bins, multidim, threshold_palette=None, ranges=((0, 255), (0, 255), (0, 255)), ): # type: (any, any, bool, any, any) -> any channel, x, y = image_array.shape if not multidim: histogram_one = [] for h_channel, range in zip(image_array, ranges): hist = numpy.histogram(h_channel, num_bins, range=range)[0] histogram_one.append(hist) else: h_each_channel = numpy.reshape(image_array, (channel, x * y)).T bins_each_channel = numpy.asarray([num_bins] * channel) histogram_one = numpy.histogramdd(h_each_channel, bins_each_channel, range=ranges)[0] hist = numpy.asarray(histogram_one) / (x * y) if threshold_palette is not None: palette = numpy.zeros(shape=hist.shape) palette[hist > threshold_palette] = 1 hist = palette hist = hist.reshape(-1) return hist.astype(image_array.dtype)
def __init__(self, data, names=None): if names is None: self.names = range(data.shape[1]) else: assert (len(names) == self.NVAR), 'Passed-in names length must equal number of data columns' self.names = names self.NROW = data.shape[0] self.NVAR = data.shape[1] self.bins = [len(np.unique(data[:,n])) for n in range(self.NVAR)] hist,_ = np.histogramdd(data, bins=self.bins) self.counts = hist self.joint = (hist / hist.sum()) + 1e-3 ## COMPUTE MARGINAL FOR EACH VARIABLE ## #_range = range(self.NVAR) #for i,rv in enumerate(self.names): # _axis = copy(_range) # _axis.remove(i) # self.marginal[rv] = np.sum(self.joint,axis=_axis) #self.marginal = dict([(rv, np.sum(self.joint,axis=i)) for i,rv in enumerate(self.names)]) self.cache = {}
def image_entropy(img): w,h = img.shape a = np.array(img.reshape((w*h,1))) h,e = np.histogramdd(a, bins=(16,), range=((0,256),)) prob = h/np.sum(h) # normalize prob = prob[prob>0] # remove zeros return -np.sum(prob*np.log2(prob))
def chist(im): '''Compute color histogram of input image Parameters ---------- im : ndarray should be an RGB image Returns ------- c : ndarray 1-D array of histogram values ''' # Downsample pixel values: im = im // 64 # We can also implement the following by using np.histogramdd # im = im.reshape((-1,3)) # bins = [np.arange(5), np.arange(5), np.arange(5)] # hist = np.histogramdd(im, bins=bins)[0] # hist = hist.ravel() # Separate RGB channels: r,g,b = im.transpose((2,0,1)) pixels = 1 * r + 4 * g + 16 * b hist = np.bincount(pixels.ravel(), minlength=64) hist = hist.astype(float) return np.log1p(hist)