我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.repeat()。
def compute_nearest_neighbors(submatrix, balltree, k, row_start): """ Compute k nearest neighbors on a submatrix Args: submatrix (np.ndarray): Data submatrix balltree: Nearest neighbor index (from sklearn) k: number of nearest neigbors to compute row_start: row offset into larger matrix Returns a COO sparse adjacency matrix of nearest neighbor relations as (i,j,x)""" nn_dist, nn_idx = balltree.query(submatrix, k=k+1) # Remove the self-as-neighbors nn_idx = nn_idx[:,1:] nn_dist = nn_dist[:,1:] # Construct a COO sparse matrix of edges and distances i = np.repeat(row_start + np.arange(nn_idx.shape[0]), k) j = nn_idx.ravel().astype(int) return (i, j, nn_dist.ravel())
def standard_case(self): """Create standard testcase from Thetas defined in this Testcase. The following metrics can be calculated by hand and should match the computations: precisions: [1, 1, 0, 2/3, 1] recalls: [1, 1, 0, 1, 0.5] f1s: [1, 1, 0, 0.8, 2/3] tps: 1 + 1 + 0 + 2 + 1 = 5 fps: 0 + 0 + 1 + 1 + 0 = 2 fns: 0 + 0 + 2 + 0 + 1 = 3 tns: 2 + 2 + 0 + 0 + 1 = 5 """ Theta_true = np.vstack([ np.repeat(self.Theta_true1[nx, :, :], 2, axis=0), np.repeat(self.Theta_true2[nx, :, :], 3, axis=0) ]) Theta_pred = np.vstack([ np.repeat(self.Theta_pred1[nx, :, :], 3, axis=0), self.Theta_pred2[nx, :, :], self.Theta_pred3[nx, :, :] ]) return Theta_true, Theta_pred
def test_repeat(self): """ Test if `repeat` works the same as np.repeat.""" with tf.Session().as_default(): # try different tensor types for npdtype, tfdtype in [(np.int32, tf.int32), (np.float32, tf.float32)]: for init_value in [np.array([0, 1, 2, 3], dtype=npdtype), np.array([[0, 1], [2, 3], [4, 5]], dtype=npdtype)]: # and all their axes for axis in range(len(init_value.shape)): for repeats in [1, 2, 3, 11]: tensor = tf.constant(init_value, dtype=tfdtype) repeated_value = repeat(tensor, repeats=repeats, axis=axis).eval() expected_value = np.repeat(init_value, repeats=repeats, axis=axis) self.assertTrue(np.all(repeated_value == expected_value))
def check_string_input(self, input_name, input_value): if type(input_value) is np.array: if input_value.size == self.P: setattr(self, input_name, input_value) elif input_value.size == 1: setattr(self, input_name, np.repeat(input_value, self.P)) else: raise ValueError("length of %s is %d; should be %d" % (input_name, input_value.size, self.P)) elif type(input_value) is str: setattr(self, input_name, float(input_value)*np.ones(self.P)) elif type(input_value) is list: if len(input_value) == self.P: setattr(self, input_name, np.array([str(x) for x in input_value])) elif len(input_value) == 1: setattr(self, input_name, np.repeat(input_value, self.P)) else: raise ValueError("length of %s is %d; should be %d" % (input_name, len(input_value), self.P)) else: raise ValueError("user provided %s with an unsupported type" % input_name)
def __mmap_ncs_packet_headers(self, filename): """ Memory map of the Neuralynx .ncs file optimized for extraction of data packet headers Reading standard dtype improves speed, but timestamps need to be reconstructed """ filesize = getsize(self.sessiondir + sep + filename) # in byte if filesize > 16384: data = np.memmap(self.sessiondir + sep + filename, dtype='<u4', shape=((filesize - 16384) / 4 / 261, 261), mode='r', offset=16384) ts = data[:, 0:2] multi = np.repeat(np.array([1, 2 ** 32], ndmin=2), len(data), axis=0) timestamps = np.sum(ts * multi, axis=1) # timestamps = data[:,0] + (data[:,1] *2**32) header_u4 = data[:, 2:5] return timestamps, header_u4 else: return None
def __mmap_ncs_packet_timestamps(self, filename): """ Memory map of the Neuralynx .ncs file optimized for extraction of data packet headers Reading standard dtype improves speed, but timestamps need to be reconstructed """ filesize = getsize(self.sessiondir + sep + filename) # in byte if filesize > 16384: data = np.memmap(self.sessiondir + sep + filename, dtype='<u4', shape=(int((filesize - 16384) / 4 / 261), 261), mode='r', offset=16384) ts = data[:, 0:2] multi = np.repeat(np.array([1, 2 ** 32], ndmin=2), len(data), axis=0) timestamps = np.sum(ts * multi, axis=1) # timestamps = data[:,0] + data[:,1]*2**32 return timestamps else: return None
def _makeflat(self, start=None, end=None, groups = False): eeg = list() for sub in self.data[start:end]: if len(sub) % self.chunk_len == 0: eeg.append(sub.reshape([-1, self.chunk_len,3])) else: print('ERROR: Please choose a chunk length that is a factor of {}. Current len = {}'.format(self.samples_per_epoch, len(sub))) return [0,0] hypno = list() group = list() hypno_repeat = self.samples_per_epoch / self.chunk_len idx = 0 for sub in self.hypno[start:end]: hypno.append(np.repeat(sub, hypno_repeat)) group.append(np.repeat(idx, len(hypno[-1]))) idx += 1 if groups: return np.vstack(eeg), np.hstack(hypno), np.hstack(group) else: return np.vstack(eeg), np.hstack(hypno)
def _get_intercept_stats(self, add_slopes=True): # start with mean and variance of Y on the link scale mod = sm.GLM(endog=self.model.y.data, exog=np.repeat(1, len(self.model.y.data)), family=self.model.family.smfamily(), missing='drop' if self.model.dropna else 'none').fit() mu = mod.params # multiply SE by sqrt(N) to turn it into (approx.) SD(Y) on link scale sd = (mod.cov_params()[0] * len(mod.mu))**.5 # modify mu and sd based on means and SDs of slope priors. if len(self.model.fixed_terms) > 1 and add_slopes: means = np.array([x['mu'] for x in self.priors.values()]) sds = np.array([x['sd'] for x in self.priors.values()]) # add to intercept prior index = list(self.priors.keys()) mu -= np.dot(means, self.stats['mean_x'][index]) sd = (sd**2 + np.dot(sds**2, self.stats['mean_x'][index]**2))**.5 return mu, sd
def test_slda(): l = language(10000) n_iter = 2000 KL_thresh = 0.001 nu2 = l['K'] sigma2 = 1 np.random.seed(l['seed']) eta = np.random.normal(scale=nu2, size=l['K']) y = [np.dot(eta, l['thetas'][i]) for i in range(l['D'])] + \ np.random.normal(scale=sigma2, size=l['D']) _beta = np.repeat(0.01, l['V']) _mu = 0 slda = SLDA(l['K'], l['alpha'], _beta, _mu, nu2, sigma2, n_iter, seed=l['seed'], n_report_iter=l['n_report_iters']) slda.fit(l['doc_term_matrix'], y) assert_probablity_distribution(slda.phi) check_KL_divergence(l['topics'], slda.phi, KL_thresh)
def test_blslda(): l = language(10000) n_iter = 1500 KL_thresh = 0.03 mu = 0. nu2 = 1. np.random.seed(l['seed']) eta = np.random.normal(loc=mu, scale=nu2, size=l['K']) zeta = np.array([np.dot(eta, l['thetas'][i]) for i in range(l['D'])]) y = (zeta >= 0).astype(int) _beta = np.repeat(0.01, l['V']) _b = 7.25 blslda = BLSLDA(l['K'], l['alpha'], _beta, mu, nu2, _b, n_iter, seed=l['seed'], n_report_iter=l['n_report_iters']) blslda.fit(l['doc_term_matrix'], y) assert_probablity_distribution(blslda.phi) check_KL_divergence(l['topics'], blslda.phi, KL_thresh)
def update_photo(data=None,widget=None): global Z if data is None: # By default, assume we're updating with the current value of Z data = np.repeat(np.repeat(np.uint8(from_tanh(model.sample_at(np.float32([Z.flatten()]))[0])),4,1),4,2) else: data = np.repeat(np.repeat(np.uint8(data),4,1),4,2) if widget is None: widget = output # Reshape image to canvas mshape = (4*64,4*64,1) im = Image.fromarray(np.concatenate([np.reshape(data[0],mshape),np.reshape(data[1],mshape),np.reshape(data[2],mshape)],axis=2),mode='RGB') # Make sure photo is an object of the current widget so the garbage collector doesn't wreck it widget.photo = ImageTk.PhotoImage(image=im) widget.create_image(0,0,image=widget.photo,anchor=NW) widget.tag_raise(pixel_rect) # Function to update the latent canvas.
def update_canvas(widget=None): global r, Z, res, rects, painted_rects if widget is None: widget = w # Update display values r = np.repeat(np.repeat(Z,r.shape[0]//Z.shape[0],0),r.shape[1]//Z.shape[1],1) # If we're letting freeform painting happen, delete the painted rectangles for p in painted_rects: w.delete(p) painted_rects = [] for i in range(Z.shape[0]): for j in range(Z.shape[1]): w.itemconfig(int(rects[i,j]),fill = rb(255*Z[i,j]),outline = rb(255*Z[i,j])) # Function to move the paintbrush
def test_find_multiple_noisy(self): """ Test finding multiple particles (noisy) """ self.atol = 5 radius = np.random.random() * 15 + 15 generated_image = self.generate_image(radius, 10, noise=0.2) actual_number = len(generated_image.coords) fits = find_disks(generated_image.image, (radius / 2.0, radius * 2.0), maximum=actual_number) _, coords = sort_positions(generated_image.coords, np.array([fits['y'].values, fits['x'].values]).T) if len(fits) == 0: # Nothing found actual = np.repeat([[np.nan, np.nan, np.nan]], actual_number, axis=0) else: actual = fits[['r', 'y', 'x']].values.astype(np.float64) expected = np.array([np.full(actual_number, radius, np.float64), coords[:, 0], coords[:, 1]]).T return np.sqrt(((actual - expected)**2).mean(0)), [0] * 3
def make_quantile_df(data, draw_quantiles): """ Return a dataframe with info needed to draw quantile segments """ dens = data['density'].cumsum() / data['density'].sum() ecdf = interp1d(dens, data['y'], assume_sorted=True) ys = ecdf(draw_quantiles) # Get the violin bounds for the requested quantiles violin_xminvs = interp1d(data['y'], data['xminv'])(ys) violin_xmaxvs = interp1d(data['y'], data['xmaxv'])(ys) data = pd.DataFrame({ 'x': interleave(violin_xminvs, violin_xmaxvs), 'y': np.repeat(ys, 2), 'group': np.repeat(np.arange(1, len(ys)+1), 2)}) return data
def draw_group(data, panel_params, coord, ax, **params): n = len(data) data = data.sort_values('x', kind='mergesort') # create stepped path -- interleave x with # itself and y with itself xs = np.repeat(range(n), 2)[:-1] ys = np.repeat(range(0, n), 2)[1:] # horizontal first if params['direction'] == 'hv': xs, ys = ys, xs df = pd.DataFrame({'x': data['x'].values[xs], 'y': data['y'].values[ys]}) copy_missing_columns(df, data) geom_path.draw_group(df, panel_params, coord, ax, **params)
def sample_crop(self, n): kx = np.array([len(x) for x in self.maps_with_class]) class_hist = np.random.multinomial(n, self.class_probs * (kx != 0)) class_ids = np.repeat(np.arange(class_hist.shape[0]), class_hist) X = [] for class_id in class_ids: for i in range(20): random_image_idx = np.random.choice(self.maps_with_class[class_id]) if random_image_idx < 25: break x = self.kde_samplers[random_image_idx][class_id].sample()[0] x /= self.mask_size x = np.clip(x, 0., 1.) return x, class_id, random_image_idx X.append(x) return X
def test_FaceInnerProductAnisotropicDeriv(self): def fun(x): # fake anisotropy (testing anistropic implementation with isotropic # vector). First order behavior expected for fully anisotropic x = np.repeat(np.atleast_2d(x), 3, axis=0).T x0 = np.repeat(self.x0, 3, axis=0).T zero = sp.csr_matrix((self.mesh.nC, self.mesh.nC)) eye = sp.eye(self.mesh.nC) P = sp.vstack([sp.hstack([eye, zero, eye])]) MfSig = self.mesh.getFaceInnerProduct(x) MfSigDeriv = self.mesh.getFaceInnerProductDeriv(x0) return MfSig*self.face_vec , MfSigDeriv(self.face_vec) * P.T print('Testing FaceInnerProduct Anisotropic') return self.assertTrue(Tests.checkDerivative(fun, self.x0, num=7, tolerance=TOLD, plotIt=False))
def test_FaceInnerProductAnisotropicDerivInvProp(self): def fun(x): x = np.repeat(np.atleast_2d(x), 3, axis=0).T x0 = np.repeat(self.x0, 3, axis=0).T zero = sp.csr_matrix((self.mesh.nC, self.mesh.nC)) eye = sp.eye(self.mesh.nC) P = sp.vstack([sp.hstack([eye, zero, eye])]) MfSig = self.mesh.getFaceInnerProduct(x, invProp=True) MfSigDeriv = self.mesh.getFaceInnerProductDeriv(x0, invProp=True) return MfSig*self.face_vec, MfSigDeriv(self.face_vec) * P.T print('Testing FaceInnerProduct Anisotropic InvProp') return self.assertTrue(Tests.checkDerivative(fun, self.x0, num=7, tolerance=TOLD, plotIt=False))
def test_FaceInnerProductAnisotropicDerivInvMat(self): def fun(x): x = np.repeat(np.atleast_2d(x), 3, axis=0).T x0 = np.repeat(self.x0, 3, axis=0).T zero = sp.csr_matrix((self.mesh.nC, self.mesh.nC)) eye = sp.eye(self.mesh.nC) P = sp.vstack([sp.hstack([eye, zero, eye])]) MfSig = self.mesh.getFaceInnerProduct(x, invMat=True) MfSigDeriv = self.mesh.getFaceInnerProductDeriv(x0, invMat=True) return MfSig*self.face_vec, MfSigDeriv(self.face_vec) * P.T print('Testing FaceInnerProduct Anisotropic InvMat') return self.assertTrue(Tests.checkDerivative(fun, self.x0, num=7, tolerance=TOLD, plotIt=False))
def test_EdgeInnerProductAnisotropicDeriv(self): def fun(x): x = np.repeat(np.atleast_2d(x), 3, axis=0).T x0 = np.repeat(self.x0, 3, axis=0).T zero = sp.csr_matrix((self.mesh.nC, self.mesh.nC)) eye = sp.eye(self.mesh.nC) P = sp.vstack([sp.hstack([zero, eye, zero])]) MeSig = self.mesh.getEdgeInnerProduct(x.reshape(self.mesh.nC, 3)) MeSigDeriv = self.mesh.getEdgeInnerProductDeriv(x0) return MeSig*self.edge_vec, MeSigDeriv(self.edge_vec) * P.T print('Testing EdgeInnerProduct Anisotropic') return self.assertTrue(Tests.checkDerivative(fun, self.x0, num=7, tolerance=TOLD, plotIt=False))
def test_EdgeInnerProductAnisotropicDerivInvProp(self): def fun(x): x = np.repeat(np.atleast_2d(x), 3, axis=0).T x0 = np.repeat(self.x0, 3, axis=0).T zero = sp.csr_matrix((self.mesh.nC, self.mesh.nC)) eye = sp.eye(self.mesh.nC) P = sp.vstack([sp.hstack([zero, eye, zero])]) MeSig = self.mesh.getEdgeInnerProduct(x, invProp=True) MeSigDeriv = self.mesh.getEdgeInnerProductDeriv(x0, invProp=True) return MeSig*self.edge_vec, MeSigDeriv(self.edge_vec) * P.T print('Testing EdgeInnerProduct Anisotropic InvProp') return self.assertTrue(Tests.checkDerivative(fun, self.x0, num=7, tolerance=TOLD, plotIt=False))
def test_EdgeInnerProductAnisotropicDerivInvMat(self): def fun(x): x = np.repeat(np.atleast_2d(x), 3, axis=0).T x0 = np.repeat(self.x0, 3, axis=0).T zero = sp.csr_matrix((self.mesh.nC, self.mesh.nC)) eye = sp.eye(self.mesh.nC) P = sp.vstack([sp.hstack([zero, eye, zero])]) MeSig = self.mesh.getEdgeInnerProduct(x, invMat=True) MeSigDeriv = self.mesh.getEdgeInnerProductDeriv(x0, invMat=True) return MeSig*self.edge_vec, MeSigDeriv(self.edge_vec) * P.T print('Testing EdgeInnerProduct Anisotropic InvMat') return self.assertTrue(Tests.checkDerivative(fun, self.x0, num=7, tolerance=TOLD, plotIt=False))
def test_EdgeInnerProductAnisotropicDerivInvPropInvMat(self): def fun(x): x = np.repeat(np.atleast_2d(x), 3, axis=0).T x0 = np.repeat(self.x0, 3, axis=0).T zero = sp.csr_matrix((self.mesh.nC, self.mesh.nC)) eye = sp.eye(self.mesh.nC) P = sp.vstack([sp.hstack([zero, eye, zero])]) MeSig = self.mesh.getEdgeInnerProduct(x, invProp=True, invMat=True) MeSigDeriv = self.mesh.getEdgeInnerProductDeriv(x0, invProp=True, invMat=True) return MeSig*self.edge_vec, MeSigDeriv(self.edge_vec) * P.T print('Testing EdgeInnerProduct Anisotropic InvProp InvMat') return self.assertTrue(Tests.checkDerivative(fun, self.x0, num=7, tolerance=TOLD, plotIt=False))
def get_im2col_indices(x_shape, field_height, field_width, padding=1, stride=1): # First figure out what the size of the output should be N, C, H, W = x_shape assert (H + 2 * padding - field_height) % stride == 0 assert (W + 2 * padding - field_height) % stride == 0 out_height = (H + 2 * padding - field_height) / stride + 1 out_width = (W + 2 * padding - field_width) / stride + 1 i0 = np.repeat(np.arange(field_height), field_width) i0 = np.tile(i0, C) i1 = stride * np.repeat(np.arange(out_height), out_width) j0 = np.tile(np.arange(field_width), field_height * C) j1 = stride * np.tile(np.arange(out_width), out_height) i = i0.reshape(-1, 1) + i1.reshape(1, -1) j = j0.reshape(-1, 1) + j1.reshape(1, -1) k = np.repeat(np.arange(C), field_height * field_width).reshape(-1, 1) return (k, i, j)
def test_two_keys_two_vars(self): a = np.array(list(zip(np.tile([10, 11], 5), np.repeat(np.arange(5), 2), np.arange(50, 60), np.arange(10, 20))), dtype=[('k', int), ('a', int), ('b', int), ('c', int)]) b = np.array(list(zip(np.tile([10, 11], 5), np.repeat(np.arange(5), 2), np.arange(65, 75), np.arange(0, 10))), dtype=[('k', int), ('a', int), ('b', int), ('c', int)]) control = np.array([(10, 0, 50, 65, 10, 0), (11, 0, 51, 66, 11, 1), (10, 1, 52, 67, 12, 2), (11, 1, 53, 68, 13, 3), (10, 2, 54, 69, 14, 4), (11, 2, 55, 70, 15, 5), (10, 3, 56, 71, 16, 6), (11, 3, 57, 72, 17, 7), (10, 4, 58, 73, 18, 8), (11, 4, 59, 74, 19, 9)], dtype=[('k', int), ('a', int), ('b1', int), ('b2', int), ('c1', int), ('c2', int)]) test = join_by( ['a', 'k'], a, b, r1postfix='1', r2postfix='2', jointype='inner') assert_equal(test.dtype, control.dtype) assert_equal(test, control)
def get_im2col_indices(x_shape, field_height, field_width, padding=1, stride=1): # First figure out what the size of the output should be C, H, W = x_shape assert (H + 2 * padding - field_height) % stride == 0 assert (W + 2 * padding - field_height) % stride == 0 out_height = (H + 2 * padding - field_height) / stride + 1 out_width = (W + 2 * padding - field_width) / stride + 1 i0 = np.repeat(np.arange(field_height), field_width) i0 = np.tile(i0, C) i1 = stride * np.repeat(np.arange(out_height), out_width) j0 = np.tile(np.arange(field_width), field_height * C) j1 = stride * np.tile(np.arange(out_width), out_height) i = i0.reshape(-1, 1) + i1.reshape(1, -1) j = j0.reshape(-1, 1) + j1.reshape(1, -1) k = np.repeat(np.arange(C), field_height * field_width).reshape(-1, 1) return (k, i, j)
def precompute_marginals(self): sys.stderr.write('Precomputing marginals...\n') self._pdfs = [None] * self._num_instances # precomputing all possible marginals for i in xrange(self._num_instances): mean = self._corrected_means[i] cov = self._corrected_covs[i] self._pdfs[i] = [None] * (2 ** mean.shape[0]) for marginal_pattern in itertools.product([False, True], repeat=mean.shape[0]): marginal_length = marginal_pattern.count(True) if marginal_length == 0: continue m = np.array(marginal_pattern) marginal_mean = mean[m] mm = m[:, np.newaxis] marginal_cov = cov[np.dot(mm, mm.transpose())].reshape((marginal_length, marginal_length)) self._pdfs[i][hash_bool_array(m)] = multivariate_normal(mean=marginal_mean, cov=marginal_cov)
def attribute_category(out, ratios): ''' This function distributes each subject in a 'train' or 'test' category. Args: out (pd.DataFrame): a pd.DataFrame that contains the info of all files by subject. ratios (list): a list containing the proportions of train/test subjects. should sum to 1 and supposedly it has been tested before. Returns: out (pd.DataFrame): a pd.DataFrame that contains the info of all files by subject where the 'category' column has been set to either train or test depending the result of the random draw. The value of test or train is the same for a given subject. ''' nSubjects = len(out.subject.unique()) i_train = np.random.choice( np.arange(nSubjects), int(ratios[0] * nSubjects)) train_or_test_by_subject = [ 'train' if i in i_train else 'test' for i in range(nSubjects)] images_per_subject = out.groupby(["subject"]).category.count().values out.category = list(np.repeat(train_or_test_by_subject, images_per_subject)) return(out)
def get_op(self): """Returns all symmetry operations (including inversions and subtranslations), but unlike get_symop(), they are returned as two ndarrays.""" if self.centrosymmetric: rot = np.tile(np.vstack((self.rotations, -self.rotations)), (self.nsubtrans, 1, 1)) trans = np.tile(np.vstack((self.translations, -self.translations)), (self.nsubtrans, 1)) trans += np.repeat(self.subtrans, 2 * len(self.rotations), axis=0) trans = np.mod(trans, 1) else: rot = np.tile(self.rotations, (self.nsubtrans, 1, 1)) trans = np.tile(self.translations, (self.nsubtrans, 1)) trans += np.repeat(self.subtrans, len(self.rotations), axis=0) trans = np.mod(trans, 1) return rot, trans
def test_shape_operations(self): # concatenate xval = np.random.random((4, 3)) xth = KTH.variable(xval) xtf = KTF.variable(xval) yval = np.random.random((4, 2)) yth = KTH.variable(yval) ytf = KTF.variable(yval) zth = KTH.eval(KTH.concatenate([xth, yth], axis=-1)) ztf = KTF.eval(KTF.concatenate([xtf, ytf], axis=-1)) assert zth.shape == ztf.shape assert_allclose(zth, ztf, atol=1e-05) check_single_tensor_operation('reshape', (4, 2), shape=(8, 1)) check_single_tensor_operation('permute_dimensions', (4, 2, 3), pattern=(2, 0, 1)) check_single_tensor_operation('repeat', (4, 1), n=3) check_single_tensor_operation('flatten', (4, 1)) check_single_tensor_operation('expand_dims', (4, 3), dim=-1) check_single_tensor_operation('expand_dims', (4, 3, 2), dim=1) check_single_tensor_operation('squeeze', (4, 3, 1), axis=2) check_single_tensor_operation('squeeze', (4, 1, 1), axis=1) check_composed_tensor_operations('reshape', {'shape': (4, 3, 1, 1)}, 'squeeze', {'axis': 2}, (4, 3, 1, 1))
def __init__(self, name, shape, initial_stdev = 2.0, initial_prec = 5.0, a0 = 1.0, b0 = 1.0): mean_std = 1.0 / np.sqrt(shape[-1]) with tf.variable_scope(name) as scope: self.mean = tf.Variable(tf.random_uniform(shape, minval=-mean_std, maxval=mean_std)) self.logvar = tf.Variable(np.log(initial_stdev**2.0) * np.ones(shape), name = "logvar", dtype = tf.float32) self.prec = np.repeat(initial_prec, shape[-1]) self.prec_ph= tf.placeholder(shape=shape[-1], name="prec", dtype = tf.float32) self.var = tf.exp(self.logvar, name = "var") self.a0 = a0 self.b0 = b0 self.shape = shape # def prec_div(self): # return - tf.reduce_sum(gammaPrior(self.prec_a, self.prec_b, self.a0, self.b0)) ## outputs E_q[ log N( x | 0, prec^-1) ] + Entropy(q(x)) ## where x is the normally distributed variable
def supercell(self, scale_mat): """ Get the supercell of the origin gcell scale_mat is similar as H matrix in superlattice generator """ # return self.__class__(...) sarr_lat = np.matmul(scale_mat, self.lattice) # coor_conv_pos = np.matmul(self.positions, self.lattice) # o_conv_pos = np.matmul(coor_conv_pos, np.linalg.inv(scale_mat)) o_conv_pos = np.matmul(self.positions, np.linalg.inv(scale_mat)) o_pos = self.get_frac_from_mat(scale_mat) l_of_positions = [i for i in map(lambda x: x+o_pos, list(o_conv_pos))] pos = np.concatenate(l_of_positions, axis=0) n = scale_mat.diagonal().prod() numbers = np.repeat(self.numbers, n) return self.__class__(sarr_lat, pos, numbers)
def in_euclidean_discance(self, pos, center, r): """ A helper function to return true or false. Decided whether a position(frac) inside a distance restriction. """ from scipy.spatial.distance import euclidean as euclidean_discance from itertools import product cart_cent = self.get_cartesian_from_frac(center) trans = np.array([i for i in product([-1, 0, 1], repeat=3)]) allpos = pos + trans for p in allpos: cart_p = self.get_cartesian_from_frac(p) if euclidean_discance(cart_p, cart_cent) < r: return True break return False
def cochleagram_extractor(xx, sr, win_len, shift_len, channel_number, win_type): fcoefs, f = make_erb_filters(sr, channel_number, 50) fcoefs = np.flipud(fcoefs) xf = erb_frilter_bank(xx, fcoefs) if win_type == 'hanning': window = np.hanning(channel_number) elif win_type == 'hamming': window = np.hamming(channel_number) elif win_type == 'triangle': window = (1 - (np.abs(channel_number - 1 - 2 * np.arange(1, channel_number + 1, 1)) / (channel_number + 1))) else: window = np.ones(channel_number) window = window.reshape((channel_number, 1)) xe = np.power(xf, 2.0) frames = 1 + ((np.size(xe, 1)-win_len) // shift_len) cochleagram = np.zeros((channel_number, frames)) for i in range(frames): one_frame = np.multiply(xe[:, i*shift_len:i*shift_len+win_len], np.repeat(window, win_len, 1)) cochleagram[:, i] = np.sqrt(np.mean(one_frame, 1)) cochleagram = np.where(cochleagram == 0.0, np.finfo(float).eps, cochleagram) return cochleagram
def postaud(x, fmax, fbtype=None): if fbtype is None: fbtype = 'bark' nbands = x.shape[0] nframes = x.shape[1] nfpts = nbands if fbtype == 'bark': bancfhz = bark2freq(np.linspace(0, freq2bark(fmax), nfpts)) fsq = bancfhz * bancfhz ftmp = fsq + 1.6e5 eql = ((fsq/ftmp)**2) * ((fsq + 1.44e6)/(fsq + 9.61e6)) eql = eql.reshape(np.size(eql), 1) z = np.repeat(eql, nframes, axis=1) * x z = z ** (1./3.) y = np.vstack((z[1, :], z[1:nbands-1, :], z[nbands-2, :])) return y
def lpc2cep(a, nout=None): nin = np.size(a, 0) ncol = np.size(a, 1) order = nin - 1 if nout is None: nout = order + 1 c = np.zeros((nout, ncol)) c[0, :] = -1. * np.log(a[0, :]) renormal_coef = np.reshape(a[0,:], (1, ncol)) renormal_coef = np.repeat(renormal_coef, nin, axis=0) a = a / renormal_coef for n in range(1, nout): sumn = np.zeros(ncol) for m in range(1, n+1): sumn = sumn + (n-m) * a[m, :] * c[n-m, :] c[n, :] = -1. * (a[n, :] + 1. / n * sumn) return c
def postaud(x, fmax, fbtype=None): if fbtype is None: fbtype = 'bark' nbands = x.shape[0] nframes = x.shape[1] nfpts = nbands if fbtype == 'bark': bancfhz = bark2freq(np.linspace(0, freq2bark(fmax), nfpts)) fsq = bancfhz * bancfhz ftmp = fsq + 1.6e5 eql = ((fsq/ftmp)**2) * ((fsq + 1.44e6)/(fsq + 9.61e6)) ''' plt.figure() plt.plot(eql) plt.show() ''' eql = eql.reshape(np.size(eql), 1) z = np.repeat(eql, nframes, axis=1) * x z = z ** (1./3.) y = np.vstack((z[1, :], z[1:nbands-1, :], z[nbands-2, :])) return y
def get_im2col_indices(x_shape, filter_shape, stride, pad): BS, in_D, in_H, in_W = x_shape f_H, f_W = filter_shape pad_H, pad_W = pad stride_H, stride_W = stride out_H = int((in_H + 2*pad_H - f_H) / stride_W + 1) out_W = int((in_W + 2*pad_W - f_W) / stride_W + 1) i_col = np.repeat(np.arange(f_H), f_W) i_col = np.tile(i_col, in_D).reshape(-1, 1) i_row = stride_H * np.repeat(np.arange(out_H), out_W) i = i_col + i_row #shape=(in_D*f_H*f_W,out_H*out_W) j_col = np.tile(np.arange(f_W), f_H) j_col = np.tile(j_col, in_D).reshape(-1, 1) j_row = stride_W * np.tile(np.arange(out_W), out_H) j = j_col + j_row #shape=(in_D*f_H*f_W,out_W*out_H) c = np.repeat(np.arange(in_D), f_H * f_W).reshape(-1, 1) #shape=(in_D*f_H*f_W,1) return (c, i, j)
def _conform_kernel_to_tensor(kernel, tensor, shape): """ Re-shape a convolution kernel to match the given tensor's color dimensions. """ l = len(kernel) channels = shape[-1] temp = np.repeat(kernel, channels) temp = tf.reshape(temp, (l, l, channels, 1)) temp = tf.cast(temp, tf.float32) temp /= tf.maximum(tf.reduce_max(temp), tf.reduce_min(temp) * -1) return temp
def specular_reflection_matrix(self, frequency, eps_1, mu1, npol, compute_coherent_only): if npol > 2: raise NotImplementedError("active model is not yet implemented, need modification for the third compunant") if self.backscatter_coefficient is not None: raise NotImplementedError("backscatter_coefficient to be implemented") if self.specular_reflection is None and self.backscatter_coefficient is None: self.specular_reflection = 1 if isinstance(self.specular_reflection, dict): # we have a dictionary with polarization spec_refl_coeff = np.empty(npol*len(mu1)) spec_refl_coeff[0::npol] = self._get_refl(self.specular_reflection['V'], mu1) spec_refl_coeff[1::npol] = self._get_refl(self.specular_reflection['H'], mu1) else: # we have a scalar, both polarization are the same spec_refl_coeff = np.repeat(self._get_refl(self.specular_reflection, mu1), npol) return scipy.sparse.diags(spec_refl_coeff, 0)
def absorption_matrix(self, frequency, eps_1, mu1, npol, compute_coherent_only): if self.specular_reflection is None and self.backscatter_coefficient is None: self.specular_reflection = 1 if npol > 2: raise NotImplementedError("active model is not yet implemented, need modification for the third compunant") if isinstance(self.specular_reflection, dict): # we have a dictionary with polarization abs_coeff = np.empty(npol*len(mu1)) abs_coeff[0::npol] = 1 - self._get_refl(self.specular_reflection['V'], mu1) abs_coeff[1::npol] = 1 - self._get_refl(self.specular_reflection['H'], mu1) else: # we have a scalar, both polarization are the same abs_coeff = 1 - np.repeat(self._get_refl(self.specular_reflection, mu1), npol) return scipy.sparse.diags(abs_coeff, 0)
def specular_reflection_matrix(self, frequency, eps_1, mu1, npol, compute_coherent_only): if npol > 2 and not hasattr(self, "stop_pol2_warning"): print("active model is not yet fully implemented, need modification for the third component") # !!! self.stop_pol2_warning = True if self.specular_reflection is None and self.backscattering_coefficient is None: self.specular_reflection = 1 if isinstance(self.specular_reflection, dict): # we have a dictionary with polarization spec_refl_coeff = np.empty(npol*len(mu1)) spec_refl_coeff[0::npol] = self._get_refl(self.specular_reflection['V'], mu1) spec_refl_coeff[1::npol] = self._get_refl(self.specular_reflection['H'], mu1) else: # we have a scalar, both polarization are the same spec_refl_coeff = np.repeat(self._get_refl(self.specular_reflection, mu1), npol) return scipy.sparse.diags(spec_refl_coeff, 0)
def absorption_matrix(self, frequency, eps_1, mu1, npol, compute_coherent_only): if self.specular_reflection is None and self.backscattering_coefficient is None: self.specular_reflection = 1 if npol > 2 and not hasattr(self, "stop_pol2_warning"): print("active model is not yet fully implemented, need modification for the third component") # !!! self.stop_pol2_warning = True if isinstance(self.specular_reflection, dict): # we have a dictionary with polarization abs_coeff = np.empty(npol*len(mu1)) abs_coeff[0::npol] = 1 - self._get_refl(self.specular_reflection['V'], mu1) abs_coeff[1::npol] = 1 - self._get_refl(self.specular_reflection['H'], mu1) else: # we have a scalar, both polarization are the same abs_coeff = 1 - np.repeat(self._get_refl(self.specular_reflection, mu1), npol) return scipy.sparse.diags(abs_coeff, 0)
def cartesian(arrays, out=None, dtype='f'): """http://stackoverflow.com/questions/28684492/numpy-equivalent-of-itertools-product""" arrays = [np.asarray(x) for x in arrays] # dtype = arrays[0].dtype n = np.prod([x.size for x in arrays]) if out is None: out = np.zeros([n, len(arrays)], dtype=dtype) m = int(n / arrays[0].size) out[:,0] = np.repeat(arrays[0], m) if arrays[1:]: cartesian(arrays[1:], out=out[0:m,1:]) for j in range(1, arrays[0].size): out[j*m:(j+1)*m,1:] = out[0:m,1:] return out
def __call__(self, root, combo): subject, session = decode_subject_and_session(combo.subject) path = os.path.join(root, 'subject%d' % subject, 'session%d' % session, 'gest%d.mat' % combo.gesture) if path not in self.memo: data = _get_data(path, self.preprocess) self.memo[path] = data logger.debug('{}', path) else: data = self.memo[path] assert combo.trial < len(data), str(combo) data = data[combo.trial].copy() gesture = np.repeat(combo.gesture, len(data)) subject = np.repeat(combo.subject, len(data)) return Trial(data=data, gesture=gesture, subject=subject)
def add_vibrational_mode(uni, freqdx): displacements = uni.frequency.displacements(freqdx) if not all(displacements['symbol'] == uni.atom['symbol']): print('Mismatch in ordering of atoms and frequencies.') return displaced = [] frames = [] # Should these only be absolute values? factor = np.abs(np.sin(np.linspace(-4*np.pi, 4*np.pi, 200))) for fac in factor: moved = uni.atom.copy() moved['x'] += displacements['dx'].values * fac moved['y'] += displacements['dy'].values * fac moved['z'] += displacements['dz'].values * fac displaced.append(moved) frames.append(uni.frame) movie = pd.concat(displaced).reset_index() movie['frame'] = np.repeat(range(len(factor)), len(uni.atom)) uni.frame = pd.concat(frames).reset_index() uni.atom = movie