我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用scipy.sparse.vstack()。
def refit_model(self): """Learns a new surrogate model using the data observed so far. """ # only fit the model if there is data for it. if len(self.known_models) > 0: self._build_feature_maps(self.known_models, self.ngram_maxlen, self.thres) X = sp.vstack([ self._compute_features(mdl) for mdl in self.known_models], "csr") y = np.array(self.known_scores, dtype='float64') #A = np.dot(X.T, X) + lamb * np.eye(X.shape[1]) #b = np.dot(X.T, y) self.surr_model = lm.Ridge(self.lamb_ridge) self.surr_model.fit(X, y) # NOTE: if the search space has holes, it break. needs try/except module.
def edgeCurl(self): """The edgeCurl property.""" if self.nCy > 1: raise NotImplementedError('Edge curl not yet implemented for ' 'nCy > 1') if getattr(self, '_edgeCurl', None) is None: # 1D Difference matricies dr = sp.spdiags((np.ones((self.nCx+1, 1))*[-1, 1]).T, [-1, 0], self.nCx, self.nCx, format="csr") dz = sp.spdiags((np.ones((self.nCz+1, 1))*[-1, 1]).T, [0, 1], self.nCz, self.nCz+1, format="csr") # 2D Difference matricies Dr = sp.kron(sp.identity(self.nNz), dr) Dz = -sp.kron(dz, sp.identity(self.nCx)) A = self.area E = self.edge # Edge curl operator self._edgeCurl = (utils.sdiag(1/A)*sp.vstack((Dz, Dr)) * utils.sdiag(E)) return self._edgeCurl
def nodalGrad(self): """ Construct gradient operator (nodes to edges). """ if getattr(self, '_nodalGrad', None) is None: # The number of cell centers in each direction n = self.vnC # Compute divergence operator on faces if(self.dim == 1): G = ddx(n[0]) elif(self.dim == 2): D1 = sp.kron(speye(n[1]+1), ddx(n[0])) D2 = sp.kron(ddx(n[1]), speye(n[0]+1)) G = sp.vstack((D1, D2), format="csr") elif(self.dim == 3): D1 = kron3(speye(n[2]+1), speye(n[1]+1), ddx(n[0])) D2 = kron3(speye(n[2]+1), ddx(n[1]), speye(n[0]+1)) D3 = kron3(ddx(n[2]), speye(n[1]+1), speye(n[0]+1)) G = sp.vstack((D1, D2, D3), format="csr") # Compute lengths of cell edges L = self.edge self._nodalGrad = sdiag(1/L)*G return self._nodalGrad
def aveCC2F(self): "Construct the averaging operator on cell cell centers to faces." if getattr(self, '_aveCC2F', None) is None: if self.dim == 1: self._aveCC2F = av_extrap(self.nCx) elif self.dim == 2: self._aveCC2F = sp.vstack(( sp.kron(speye(self.nCy), av_extrap(self.nCx)), sp.kron(av_extrap(self.nCy), speye(self.nCx)) ), format="csr") elif self.dim == 3: self._aveCC2F = sp.vstack(( kron3( speye(self.nCz), speye(self.nCy), av_extrap(self.nCx) ), kron3( speye(self.nCz), av_extrap(self.nCy), speye(self.nCx) ), kron3( av_extrap(self.nCz), speye(self.nCy), speye(self.nCx) ) ), format="csr") return self._aveCC2F
def aveN2F(self): """ Construct the averaging operator on cell nodes to cell faces, keeping each dimension separate. """ if getattr(self, '_aveN2F', None) is None: # The number of cell centers in each direction n = self.vnC if(self.dim == 1): self._aveN2F = av(n[0]) elif(self.dim == 2): self._aveN2F = sp.vstack((sp.kron(av(n[1]), speye(n[0]+1)), sp.kron(speye(n[1]+1), av(n[0]))), format="csr") elif(self.dim == 3): self._aveN2F = sp.vstack((kron3(av(n[2]), av(n[1]), speye(n[0]+1)), kron3(av(n[2]), speye(n[1]+1), av(n[0])), kron3(speye(n[2]+1), av(n[1]), av(n[0]))), format="csr") return self._aveN2F
def test_invXXXBlockDiagonal(self): a = [np.random.rand(5, 1) for i in range(4)] B = inv2X2BlockDiagonal(*a) A = sp.vstack((sp.hstack((sdiag(a[0]), sdiag(a[1]))), sp.hstack((sdiag(a[2]), sdiag(a[3]))))) Z2 = B*A - sp.identity(10) self.assertTrue(np.linalg.norm(Z2.todense().ravel(), 2) < TOL) a = [np.random.rand(5, 1) for i in range(9)] B = inv3X3BlockDiagonal(*a) A = sp.vstack((sp.hstack((sdiag(a[0]), sdiag(a[1]), sdiag(a[2]))), sp.hstack((sdiag(a[3]), sdiag(a[4]), sdiag(a[5]))), sp.hstack((sdiag(a[6]), sdiag(a[7]), sdiag(a[8]))))) Z3 = B*A - sp.identity(15) self.assertTrue(np.linalg.norm(Z3.todense().ravel(), 2) < TOL)
def test_FaceInnerProductAnisotropicDerivInvProp(self): def fun(x): x = np.repeat(np.atleast_2d(x), 3, axis=0).T x0 = np.repeat(self.x0, 3, axis=0).T zero = sp.csr_matrix((self.mesh.nC, self.mesh.nC)) eye = sp.eye(self.mesh.nC) P = sp.vstack([sp.hstack([eye, zero, eye])]) MfSig = self.mesh.getFaceInnerProduct(x, invProp=True) MfSigDeriv = self.mesh.getFaceInnerProductDeriv(x0, invProp=True) return MfSig*self.face_vec, MfSigDeriv(self.face_vec) * P.T print('Testing FaceInnerProduct Anisotropic InvProp') return self.assertTrue(Tests.checkDerivative(fun, self.x0, num=7, tolerance=TOLD, plotIt=False))
def test_FaceInnerProductAnisotropicDerivInvMat(self): def fun(x): x = np.repeat(np.atleast_2d(x), 3, axis=0).T x0 = np.repeat(self.x0, 3, axis=0).T zero = sp.csr_matrix((self.mesh.nC, self.mesh.nC)) eye = sp.eye(self.mesh.nC) P = sp.vstack([sp.hstack([eye, zero, eye])]) MfSig = self.mesh.getFaceInnerProduct(x, invMat=True) MfSigDeriv = self.mesh.getFaceInnerProductDeriv(x0, invMat=True) return MfSig*self.face_vec, MfSigDeriv(self.face_vec) * P.T print('Testing FaceInnerProduct Anisotropic InvMat') return self.assertTrue(Tests.checkDerivative(fun, self.x0, num=7, tolerance=TOLD, plotIt=False))
def test_FaceInnerProductAnisotropicDerivInvPropInvMat(self): def fun(x): x = np.repeat(np.atleast_2d(x), 3, axis=0).T x0 = np.repeat(self.x0, 3, axis=0).T zero = sp.csr_matrix((self.mesh.nC, self.mesh.nC)) eye = sp.eye(self.mesh.nC) P = sp.vstack([sp.hstack([eye, zero, eye])]) MfSig = self.mesh.getFaceInnerProduct(x, invProp=True, invMat=True) MfSigDeriv = self.mesh.getFaceInnerProductDeriv(x0, invProp=True, invMat=True) return MfSig*self.face_vec, MfSigDeriv(self.face_vec) * P.T print('Testing FaceInnerProduct Anisotropic InvProp InvMat') return self.assertTrue(Tests.checkDerivative(fun, self.x0, num=7, tolerance=TOLD, plotIt=False))
def test_EdgeInnerProductAnisotropicDeriv(self): def fun(x): x = np.repeat(np.atleast_2d(x), 3, axis=0).T x0 = np.repeat(self.x0, 3, axis=0).T zero = sp.csr_matrix((self.mesh.nC, self.mesh.nC)) eye = sp.eye(self.mesh.nC) P = sp.vstack([sp.hstack([zero, eye, zero])]) MeSig = self.mesh.getEdgeInnerProduct(x.reshape(self.mesh.nC, 3)) MeSigDeriv = self.mesh.getEdgeInnerProductDeriv(x0) return MeSig*self.edge_vec, MeSigDeriv(self.edge_vec) * P.T print('Testing EdgeInnerProduct Anisotropic') return self.assertTrue(Tests.checkDerivative(fun, self.x0, num=7, tolerance=TOLD, plotIt=False))
def test_EdgeInnerProductAnisotropicDerivInvProp(self): def fun(x): x = np.repeat(np.atleast_2d(x), 3, axis=0).T x0 = np.repeat(self.x0, 3, axis=0).T zero = sp.csr_matrix((self.mesh.nC, self.mesh.nC)) eye = sp.eye(self.mesh.nC) P = sp.vstack([sp.hstack([zero, eye, zero])]) MeSig = self.mesh.getEdgeInnerProduct(x, invProp=True) MeSigDeriv = self.mesh.getEdgeInnerProductDeriv(x0, invProp=True) return MeSig*self.edge_vec, MeSigDeriv(self.edge_vec) * P.T print('Testing EdgeInnerProduct Anisotropic InvProp') return self.assertTrue(Tests.checkDerivative(fun, self.x0, num=7, tolerance=TOLD, plotIt=False))
def test_EdgeInnerProductAnisotropicDerivInvPropInvMat(self): def fun(x): x = np.repeat(np.atleast_2d(x), 3, axis=0).T x0 = np.repeat(self.x0, 3, axis=0).T zero = sp.csr_matrix((self.mesh.nC, self.mesh.nC)) eye = sp.eye(self.mesh.nC) P = sp.vstack([sp.hstack([zero, eye, zero])]) MeSig = self.mesh.getEdgeInnerProduct(x, invProp=True, invMat=True) MeSigDeriv = self.mesh.getEdgeInnerProductDeriv(x0, invProp=True, invMat=True) return MeSig*self.edge_vec, MeSigDeriv(self.edge_vec) * P.T print('Testing EdgeInnerProduct Anisotropic InvProp InvMat') return self.assertTrue(Tests.checkDerivative(fun, self.x0, num=7, tolerance=TOLD, plotIt=False))
def generate_offspring(population,fitness,tournament_size=2,crossover_prob=0.9,n_crossovers=1,mutation_rate=0.3,win_condition='highest',no_mutation=[]): new_population = [] while len(new_population) < population.shape[0]: # select selections = tournament_selection(fitness,tournament_size,win_condition) parents = population[selections,:] # generate and mutate if random.random() < crossover_prob: offspring = [] for generation in range(2): child = offspring_crossover(parents,n_crossovers) child_mutated = mutate(child,mutation_rate,no_mutation) offspring.append(child_mutated) else: offspring = parents # accept new_population.extend(offspring) return sparse.vstack(new_population)
def balance_data(instances, labels): # identify lowest frequency] unique_labels = list(set(labels)) label_count_sorted = sorted([(label,labels.count(label)) for label in unique_labels], key = lambda k : k[1]) least_frequent_indices = [i for i,label in enumerate(labels) if label == label_count_sorted[0][0]] least_frequent_count = label_count_sorted[0][1] balanced_instances = instances[least_frequent_indices,:] balanced_labels = [label_count_sorted[0][0]] * least_frequent_count # impose lowest frequency on other labels for cursorlabel in [lc[0] for lc in label_count_sorted[1:]]: label_indices = [i for i,label in enumerate(labels) if label == cursorlabel] samples = random.sample(label_indices, least_frequent_count) sampled_instances = instances[samples,:] balanced_instances = sparse.vstack((balanced_instances,sampled_instances), format='csr') balanced_labels.extend([cursorlabel] * least_frequent_count) return balanced_instances, balanced_labels
def combine_sdf_files(run_folder, folders, verbose=False, **kwargs): """function for concatenating SparseDataFrames together""" combined = SparseDataFrame() combined.rows = [] columns = set() for folder in folders: filename = os.path.join(run_folder, folder, f'{folder}.mus.cell-gene.npz') if verbose: print(f'Reading {filename} ...') sdf = SparseDataFrame(filename) columns.add(tuple(sdf.columns)) combined.rows.extend(sdf.rows) if combined.matrix is None: combined.matrix = sdf.matrix else: combined.matrix = sparse.vstack((combined.matrix, sdf.matrix), format='csr') assert len(columns) == 1 combined.columns = columns.pop() return combined
def predict(self, X, fmt='sparse'): assert fmt in ('sparse', 'dict') s = [] num = X.shape[0] if isinstance(X, sp.csr_matrix) else len(X) for i in range(num): Xi = X[i] mean = self.predictor.predict(Xi.data, Xi.indices, self.blend, self.gamma, self.leaf_probs) if fmt == 'sparse': s.append(mean) else: od = OrderedDict() for idx in reversed(mean.data.argsort()): od[mean.indices[idx]] = mean.data[idx] s.append(od) if fmt == 'sparse': return sp.vstack(s) return s
def theta_matrix(coord, adj, preload=True, train=True): print "creating adjacent theta matrix ..." if preload is True: if train is True: theta_matrix = np.load('../data/theta_matrix_train_100.npy') else: theta_matrix = np.load('../data/theta_matrix_test_100.npy') else: theta_matrix = [] for i in tqdm(range(coord.shape[0])): for j in range(coord.shape[1]): theta_row = angle(coord[i,adj[i][j].nonzero()[1],:] - coord[i,j,:]) col_indice = adj[i][j].nonzero()[1] row_indice = (np.zeros(col_indice.shape[0])).astype(int32) if j == 0: theta_matrix_tmp = sp.csc_matrix((theta_row, (row_indice, col_indice)), shape=(1,coord.shape[1])) else: theta_matrix_tmp = sp.vstack((theta_matrix_tmp, sp.csc_matrix((theta_row, (row_indice, col_indice)), shape=(1,coord.shape[1])))) theta_matrix.append(theta_matrix_tmp) theta_matrix = np.array(theta_matrix) return theta_matrix
def _partial_fit(self, X, y=None): _checkXy(X, y) # update index self._inv_X = sp.vstack([self._inv_X, self._cv.transform(X)]) # update source # self._fit_X = np.hstack([self._fit_X, np.asarray(X)]) # try to infer viable doc ids if y is None: next_id = np.amax(self._y) + 1 y = np.arange(next_id, next_id + len(X)) else: y = np.asarray(y) self._y = np.hstack([self._y, y]) self.n_docs += len(X) return self
def grid_to_adjacency_matrix(grid, neighborhood=8): """Convert a boolean grid where 0's express holes and 1's connected pixel into a sparse adjacency matrix representing the grid-graph. Neighborhood for each pixel is calculated from its 4 or 8 more immediate surrounding neighbors (defaults to 8).""" coords = np.argwhere(grid) coords_x = coords[:, 0] coords_y = coords[:, 1] # lil is the most performance format to build a sparse matrix iteratively matrix = sparse.lil_matrix((0, coords.shape[0]), dtype=np.uint8) if neighborhood == 4: for px, py in coords: row = (((px == coords_x) & (np.abs(py - coords_y) == 1)) | ((np.abs(px - coords_x) == 1) & (py == coords_y))) matrix = sparse.vstack([matrix, row]) else: for px, py in coords: row = (np.abs(px - coords_x) <= 1) & (np.abs(py - coords_y) <= 1) matrix = sparse.vstack([matrix, row]) matrix.setdiag(1) # Once built, we convert it to compressed sparse columns or rows return matrix.tocsc() # or .tocsr()
def sparse_remove_row(X, to_remove): """ Delete rows from a sparse matrix Parameters ---------- X : scipy.sparse matrix to_remove : a list of row indices to be removed. Returns ------- Y : scipy.sparse matrix """ if not sps.isspmatrix_lil(X): X = X.tolil() to_keep = [i for i in xrange(0, X.shape[0]) if i not in to_remove] Y = sps.vstack([X.getrowview(i) for i in to_keep]) return Y
def _concatenate_dense_jac(jac_list): # Read sequentially all jacobians. # Convert all values to numpy arrays. jac_ineq_list = [] jac_eq_list = [] for jac_tuple in jac_list: J_ineq, J_eq = jac_tuple if spc.issparse(J_ineq): jac_ineq_list += [J_ineq.toarray()] else: jac_ineq_list += [np.atleast_2d(J_ineq)] if spc.issparse(J_eq): jac_eq_list += [J_eq.toarray()] else: jac_eq_list += [np.atleast_2d(J_eq)] # Concatenate all J_ineq = np.vstack(jac_ineq_list) J_eq = np.vstack(jac_eq_list) # Return return J_ineq, J_eq
def allMB_multi(casesPerTask,datasets,mbNumber): if mbNumber == 0: # to begin a new epoch, permute each dataset first, then sequencially use training data in new order for i in range(len(datasets)): datasets[i].perm() inpsList = [] targs = num.zeros((sum(casesPerTask), len(datasets)), dtype=num.float32) targsMask = num.zeros((sum(casesPerTask), len(datasets)), dtype=num.float32) for i in range(len(datasets)): # in case that we need to use certain datasets multiple times in one epoch idx = [ xx % (datasets[i].inps.shape[0]) for xx in range(casesPerTask[i]*(mbNumber-1), casesPerTask[i]*mbNumber)] inpsList.append(datasets[i].inps[idx]) targs[sum(casesPerTask[:i]):sum(casesPerTask[:(i+1)])] = datasets[i].targsFull[idx] targsMask[sum(casesPerTask[:i]):sum(casesPerTask[:(i+1)]), i] = 1 if isinstance(inpsList[0], num.ndarray): inps = num.vstack(inpsList) else: inps = sp.vstack(inpsList) return inps, targs, targsMask
def update(self, indices): templates = self.template_store.get(indices) for t in templates: # Add new and updated templates to the dictionary. self.norms['1'] = np.concatenate((self.norms['1'], [t.first_component.norm])) self.amplitudes = np.vstack((self.amplitudes, t.amplitudes)) if self.two_components: self.norms['2'] = np.concatenate((self.norms['2'], [t.second_component.norm])) t.normalize() self.first_component = vstack((self.first_component, t.first_component.to_sparse('csc', flatten=True)), 'csc') if self.two_components: self.second_component = vstack((self.second_component, t.second_component.to_sparse('csc', flatten=True)), 'csc')
def predict(self, X): """Predict the class labels for the provided data Parameters ---------- X: array-like, shape (n_query, n_features), or (n_query, n_indexed) if metric == ‘precomputed’ Test samples. Returns ------- array of shape [n_samples] or [n_samples, n_outputs] Class labels for each data sample. """ start = default_timer() neighbor_ids = self.knn.kneighbors(X) result = sp.csr_matrix((0, self.y.shape[1])) for n in neighbor_ids: neighbor_labels = self.y[n] result = sp.vstack((result, neighbor_labels)) print('Prediction took ' + str(datetime.timedelta(seconds=default_timer() - start))) return result
def predict(self, X): n_query = X.shape[0] ''' Compute Y_probas Y_probas shape should be (n_query, n_topics * len(clfs)) ''' Y_probas = np.hstack(clf.predict_proba(X) for clf in self.clfs) print("Y_probas shape:", Y_probas.shape) mean_cut = self.mu if self.verbose: print("MeanCut = %d" % mean_cut) Y_pred = sp.csr_matrix((0, self.n_topics)) for i in range(n_query): # sample based predictions, can probably be avoided indices = np.argsort(Y_probas[i])[-mean_cut:] # sort probabilites row-wise data = np.array([1] * len(indices)) indptr = np.array([0, 1]) row = sp.csr_matrix((data, indices, indptr), shape=(1, self.n_topics)) Y_pred = sp.vstack((Y_pred, row)) return Y_pred
def _b(self, neighbor_ids): result = sp.csr_matrix((0, self.y.shape[1])) for ns in neighbor_ids: average_label_nums = int(np.floor(np.mean([self.y[n].sum() for n in ns]))) neighbor_labels = self.y[ns] labels_sum = np.array(neighbor_labels.sum(0)) # By squeezing we support matrix output from scipy.sparse.sum and 1D array from np.sum divide = np.squeeze(np.divide(labels_sum, len(ns))) predicted_indices = np.argsort(divide)[-average_label_nums:] predicted_labels = sp.dok_matrix((1, len(divide))) # noinspection PyTypeChecker for index in predicted_indices: predicted_labels[0, index] = 1 predicted_labels = sp.csr_matrix(predicted_labels) result = sp.vstack((result, predicted_labels)) return result
def test_dataset_append(): h5_path = mkstemp(suffix=".h5")[1] sparse_matrix = ss.csr_matrix([[0, 1, 0], [0, 0, 1], [0, 0, 0], [1, 1, 0]], dtype=np.float64) to_append = ss.csr_matrix([[0, 1, 1], [1, 0, 0]], dtype=np.float64) appended_matrix = ss.vstack((sparse_matrix, to_append)) with h5sparse.File(h5_path) as h5f: h5f.create_dataset('matrix', data=sparse_matrix, chunks=(100000,), maxshape=(None,)) h5f['matrix'].append(to_append) assert (h5f['matrix'].value != appended_matrix).size == 0 os.remove(h5_path)
def decision_function(self, X): """Decision function for the OneVsOneClassifier. The decision values for the samples are computed by adding the normalized sum of pair-wise classification confidence levels to the votes in order to disambiguate between the decision values when the votes for all the classes are equal leading to a tie. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- Y : array-like, shape = [n_samples, n_classes] """ check_is_fitted(self, 'estimators_') predictions = np.vstack([est.predict(X) for est in self.estimators_]).T confidences = np.vstack([_predict_binary(est, X) for est in self.estimators_]).T return _ovr_decision_function(predictions, confidences, len(self.classes_))
def test_dataset_append(): h5_path = mkstemp(suffix=".h5")[1] sparse_matrix = ss.csr_matrix([[0, 1, 0], [0, 0, 1], [0, 0, 0], [1, 1, 0]], dtype=np.float64) to_append = ss.csr_matrix([[0, 1, 1], [1, 0, 0]], dtype=np.float64) appended_matrix = ss.vstack((sparse_matrix, to_append)) with h5py.File(h5_path) as h5f: h5f.create_dataset('matrix', data=sparse_matrix, chunks=(100000,), maxshape=(None,)) h5f['matrix'].append(to_append) assert (h5f['matrix'].value != appended_matrix).size == 0 os.remove(h5_path)
def make_linearOperator(shape, Xn, K): M,N = shape fx = K[0,0] fy = K[1,1] x_hat = Xn[0,:] y_hat = Xn[1,:] Kx,Ky = make_derivatives_2D_complete(shape) # use one-sided differences with backward diff at image border Kx = Kx.tocsr() Ky = Ky.tocsr() spId = sparse.eye(M*N, M*N, format='csr') spXhat = sparse.diags(x_hat.flatten(), 0).tocsr() spYhat = sparse.diags(y_hat.flatten(), 0).tocsr() L = sparse.vstack([-Kx/fy, -Ky/fx, spXhat*Kx/fy + spYhat*Ky/fx + 2*spId/(fx*fy) ]) return L.tocsr()
def score(self, user, candidates, context): # i_mat is (n_item_context, n_item) for all possible items # extract only target items i_mat = self.i_mat[:, candidates] n_target = len(candidates) # u_mat will be (n_user_context, n_item) for the target user u_vec = np.concatenate((user.feature, context)) u_vec = np.array([u_vec]).T u_mat = sp.csr_matrix(np.repeat(u_vec, n_target, axis=1)) # stack them into (p, n_item) matrix Y = sp.vstack((u_mat, i_mat)) Y = self.proj.reduce(Y) Y = sp.csr_matrix(preprocessing.normalize(Y, norm='l2', axis=0)) X = np.identity(self.k) - np.dot(self.U_r, self.U_r.T) A = safe_sparse_dot(X, Y, dense_output=True) return ln.norm(A, axis=0, ord=2)
def load_data(dataset): # load the data: x, tx, allx, graph names = ['x', 'tx', 'allx', 'graph'] objects = [] for i in range(len(names)): objects.append(pkl.load(open("data/ind.{}.{}".format(dataset, names[i])))) x, tx, allx, graph = tuple(objects) test_idx_reorder = parse_index_file("data/ind.{}.test.index".format(dataset)) test_idx_range = np.sort(test_idx_reorder) if dataset == 'citeseer': # Fix citeseer dataset (there are some isolated nodes in the graph) # Find isolated nodes, add them as zero-vecs into the right position test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder)+1) tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1])) tx_extended[test_idx_range-min(test_idx_range), :] = tx tx = tx_extended features = sp.vstack((allx, tx)).tolil() features[test_idx_reorder, :] = features[test_idx_range, :] adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph)) return adj, features
def _cellGradStencil(self): BC = self.setCellGradBC(self._cellGradBC_list) n = self.vnC if(self.dim == 1): G = ddxCellGrad(n[0], BC[0]) elif(self.dim == 2): G1 = sp.kron(speye(n[1]), ddxCellGrad(n[0], BC[0])) G2 = sp.kron(ddxCellGrad(n[1], BC[1]), speye(n[0])) G = sp.vstack((G1, G2), format="csr") elif(self.dim == 3): G1 = kron3(speye(n[2]), speye(n[1]), ddxCellGrad(n[0], BC[0])) G2 = kron3(speye(n[2]), ddxCellGrad(n[1], BC[1]), speye(n[0])) G3 = kron3(ddxCellGrad(n[2], BC[2]), speye(n[1]), speye(n[0])) G = sp.vstack((G1, G2, G3), format="csr") return G
def edgeCurl(self): """ Construct the 3D curl operator. """ if getattr(self, '_edgeCurl', None) is None: assert self.dim > 1, "Edge Curl only programed for 2 or 3D." n = self.vnC # The number of cell centers in each direction L = self.edge # Compute lengths of cell edges S = self.area # Compute areas of cell faces # Compute divergence operator on faces if self.dim == 2: D21 = sp.kron(ddx(n[1]), speye(n[0])) D12 = sp.kron(speye(n[1]), ddx(n[0])) C = sp.hstack((-D21, D12), format="csr") self._edgeCurl = C*sdiag(1/S) elif self.dim == 3: D32 = kron3(ddx(n[2]), speye(n[1]), speye(n[0]+1)) D23 = kron3(speye(n[2]), ddx(n[1]), speye(n[0]+1)) D31 = kron3(ddx(n[2]), speye(n[1]+1), speye(n[0])) D13 = kron3(speye(n[2]), speye(n[1]+1), ddx(n[0])) D21 = kron3(speye(n[2]+1), ddx(n[1]), speye(n[0])) D12 = kron3(speye(n[2]+1), speye(n[1]), ddx(n[0])) O1 = spzeros(np.shape(D32)[0], np.shape(D31)[1]) O2 = spzeros(np.shape(D31)[0], np.shape(D32)[1]) O3 = spzeros(np.shape(D21)[0], np.shape(D13)[1]) C = sp.vstack((sp.hstack((O1, -D32, D23)), sp.hstack((D31, O2, -D13)), sp.hstack((-D21, D12, O3))), format="csr") self._edgeCurl = sdiag(1/S)*(C*sdiag(L)) return self._edgeCurl
def inv2X2BlockDiagonal(a11, a12, a21, a22, returnMatrix=True): """ B = inv2X2BlockDiagonal(a11, a12, a21, a22) Inverts a stack of 2x2 matrices by using the inversion formula inv(A) = (1/det(A)) * cof(A)^T Input: A - a11, a12, a21, a22 Output: B - inverse """ a11 = mkvc(a11) a12 = mkvc(a12) a21 = mkvc(a21) a22 = mkvc(a22) # compute inverse of the determinant. detAinv = 1./(a11*a22 - a21*a12) b11 = +detAinv*a22 b12 = -detAinv*a12 b21 = -detAinv*a21 b22 = +detAinv*a11 if not returnMatrix: return b11, b12, b21, b22 return sp.vstack((sp.hstack((sdiag(b11), sdiag(b12))), sp.hstack((sdiag(b21), sdiag(b22)))))
def makePropertyTensor(M, tensor): if tensor is None: # default is ones tensor = np.ones(M.nC) if isScalar(tensor): tensor = tensor * np.ones(M.nC) propType = TensorType(M, tensor) if propType == 1: # Isotropic! Sigma = sp.kron(sp.identity(M.dim), sdiag(mkvc(tensor))) elif propType == 2: # Diagonal tensor Sigma = sdiag(mkvc(tensor)) elif M.dim == 2 and tensor.size == M.nC*3: # Fully anisotropic, 2D tensor = tensor.reshape((M.nC, 3), order='F') row1 = sp.hstack((sdiag(tensor[:, 0]), sdiag(tensor[:, 2]))) row2 = sp.hstack((sdiag(tensor[:, 2]), sdiag(tensor[:, 1]))) Sigma = sp.vstack((row1, row2)) elif M.dim == 3 and tensor.size == M.nC*6: # Fully anisotropic, 3D tensor = tensor.reshape((M.nC, 6), order='F') row1 = sp.hstack( (sdiag(tensor[:, 0]), sdiag(tensor[:, 3]), sdiag(tensor[:, 4])) ) row2 = sp.hstack( (sdiag(tensor[:, 3]), sdiag(tensor[:, 1]), sdiag(tensor[:, 5])) ) row3 = sp.hstack( (sdiag(tensor[:, 4]), sdiag(tensor[:, 5]), sdiag(tensor[:, 2])) ) Sigma = sp.vstack((row1, row2, row3)) else: raise Exception('Unexpected shape of tensor') return Sigma
def stack(X): stacker = np.vstack if isinstance(X[0], np.ndarray) else sp.vstack return stacker(X)
def compact(self, root, dims): #CLS Ws = [] bs = [] # Tree: index, left, right, isLeaf tree = [] # Payload probs = [] def f(node): if node.is_leaf: treeIdx = len(probs) probs.append(node.probs) tree.append([treeIdx, 0, 0, 1]) else: leftIndex = f(node.left) rightIndex = f(node.right) clfIdx = len(Ws) Ws.append(node.w) bs.append(node.b[0]) tree.append([clfIdx, leftIndex, rightIndex, 0]) curIdx = len(tree) - 1 return curIdx rootIdx = f(root) if Ws: W_stack = sp.vstack(Ws) else: W_stack = sp.csr_matrix(([], ([], [])), shape=(0, dims)).astype('float32') b = np.array(bs, dtype='float32') t = np.array(tree, dtype='uint32') return Tree(rootIdx, W_stack, b, t, probs)
def _compute_leaf_probs(self, X, y): dd = defaultdict(list) norms = compute_unit_norms(X) ml = 0 for Xi, yis in zip(X, y): Xin = norm(norms, Xi) for yi in yis: dd[yi].append(Xin) ml = max(yi, ml) if self.verbose: print("Computing means and radius for hard margin") xmeans = [] xrs = [] with closing(multiprocessing.Pool(processes=self.n_jobs)) as p: it = ((i, dd[i], self.leaf_eps) for i in range(ml + 1)) for k, ux, r in p.imap(compute_leaf_metrics, it, 100): if self.verbose and k % 100 == 0: print("Training leaf classifier: %s of %s" % (k, ml)) if ux is None: ux = sp.csr_matrix((1, X[0].shape[1])).astype('float64') xmeans.append(ux) xrs.append(r) return norms, sp.vstack(xmeans), np.array(xrs, dtype=np.float32)
def to_realimag(z): """ Convert a complex hermitian matrix to a real valued doubled up representation, i.e., for ``Z = Z_r + 1j * Z_i`` return ``R(Z)``:: R(Z) = [ Z_r Z_i] [-Z_i Z_r] A complex hermitian matrix ``Z`` with elementwise real and imaginary parts ``Z = Z_r + 1j * Z_i`` can be isomorphically represented in doubled up form as:: R(Z) = [ Z_r Z_i] [-Z_i Z_r] R(X)*R(Y) = [ (X_r*Y_r-X_i*Y_i) (X_r*Y_i + X_i*Y_r)] [-(X_r*Y_i + X_i*Y_r) (X_r*Y_r-X_i*Y_i) ] = R(X*Y). In particular, ``Z`` is complex positive (semi-)definite iff ``R(Z)`` is real positive (semi-)definite. :param (qutip.Qobj|scipy.sparse.base.spmatrix) z: The operator representation matrix. :returns: R(Z) the doubled up representation. :rtype: scipy.sparse.csr_matrix """ if isinstance(z, qt.Qobj): z = z.data if not is_hermitian(z): # pragma no coverage raise ValueError("Need a hermitian matrix z") return spvstack([sphstack([z.real, z.imag]), sphstack([z.imag.T, z.real])]).tocsr().real
def partial_fit(self, X, y=None): self._partial_fit(X, y) Xt = self.tfidf.transform(self._cv.transform(X)) self._X = sp.vstack([self._X, Xt]) return self
def vstack(Xs): Xs = iter(Xs) X = next(Xs) Xs = itertools.chain([X], Xs) return sparse.vstack(Xs) if sparse.issparse(X) else np.vstack(Xs)
def objective_function_test(self, x, **kwargs): start_time = time.time() rng = kwargs.get("rng", None) self.rng = rng_helper.get_rng(rng=rng, self_rng=self.rng) # Concatenate training and validation dataset if type(self.train) == sparse.csr.csr_matrix or type(self.valid) == sparse.csr.csr_matrix: train = sparse.vstack((self.train, self.valid)) else: train = np.concatenate((self.train, self.valid)) train_targets = np.concatenate((self.train_targets, self.valid_targets)) # Transform hyperparameters to linear scale C = np.exp(float(x[0])) gamma = np.exp(float(x[1])) # Train support vector machine clf = svm.SVC(gamma=gamma, C=C, random_state=self.rng) clf.fit(train, train_targets) # Compute test error y = 1 - clf.score(self.test, self.test_targets) c = time.time() - start_time return {'function_value': y, "cost": c}
def vstack(x): if any(sp.issparse(p) for p in x): return sp.vstack(x, format='csr') else: return np.vstack(x)
def construct_gradient_matrix(shape_image, return_separate=False): h,w = shape_image Dx = matrix_from_filter.matrix_from_filter(np.array([[0,-1.0,1.0]]),shape=shape_image) Dy = matrix_from_filter.matrix_from_filter(np.array([[0,-1.0,1.0]]).T,shape=shape_image) if return_separate: return Dx.tocsr(), Dy.tocsr() else: G = sparse.vstack((Dx,Dy)).tocsr() return G
def construct_2ndorder_matrix(shape_image, return_separate=False): h,w = shape_image Dxx = matrix_from_filter.matrix_from_filter(np.array([[1.0,-2.0,1.0]]),shape=shape_image) Dyy = matrix_from_filter.matrix_from_filter(np.array([[1.0,-2.0,1.0]]).T,shape=shape_image) #filt_xy = np.array([[0.0,1.0,-1.0],[0.0,-1.0,1.0],[0.0,0.0,0.0]]) #Dxy = matrix_from_filter.matrix_from_filter(filt_xy, shape=shape_image) #G = sparse.vstack((Dxx,Dxy,Dyy)).tocsr() if return_separate: return Dxx.tocsr(), Dyy.tocsr() else: G = sparse.vstack((Dxx,Dyy)).tocsr() return G
def construct_2ndorder_matrix_full(shape_image): h,w = shape_image Dxx = matrix_from_filter.matrix_from_filter(np.array([[1.0,-2.0,1.0]]),shape=shape_image) Dyy = matrix_from_filter.matrix_from_filter(np.array([[1.0,-2.0,1.0]]).T,shape=shape_image) #filt_xy = np.array([[0.0,1.0,-1.0],[0.0,-1.0,1.0],[0.0,0.0,0.0]]) filt_xy = np.array([[0.0,0.0,0.0],[0.0,1.0,-1.0],[0.0,-1.0,1.0]]) Dxy = matrix_from_filter.matrix_from_filter(filt_xy, shape=shape_image) G = sparse.vstack((Dxx,Dxy,Dyy)).tocsr() return G
def construct_2ndorder_matrix_div(shape_image): h,w = shape_image Dxx = matrix_from_filter.matrix_from_filter(np.array([[1.0,-2.0,1.0]]),shape=shape_image) Dyy = matrix_from_filter.matrix_from_filter(np.array([[1.0,-2.0,1.0]]).T,shape=shape_image) #filt_xy = np.array([[0.0,1.0,-1.0],[0.0,-1.0,1.0],[0.0,0.0,0.0]]) filt_xy = np.array([[0.0,0.0,0.0],[0.0,1.0,-1.0],[0.0,-1.0,1.0]]) Dxy = matrix_from_filter.matrix_from_filter(filt_xy, shape=shape_image) G = sparse.vstack((Dxx,Dxy,Dyy)).T.tocsr() return G
def visit_VStack(self, node): """ VStack( SpMatrices ) => SpMatrix """ node = self.generic_visit(node) if all(isinstance(c, SpMatrix) for c in node._children): name = "{}+".format(node._children[0]._name) dtype = node._children[0].dtype log.debug('realizing vstack %s', ', '.join(c._name for c in node._children)) m = spp.vstack( [c._matrix for c in node._children], dtype=dtype ) return SpMatrix( node._backend, m, name=name ) else: return node