我们从Python开源项目中,提取了以下6个代码示例,用于说明如何使用scipy.sparse.linalg.splu()。
def preprocess(self, filepath): """Remember: row-first ordered csv file only!""" self.pp("reading") self.node2index, H = read_matrix(filepath, d=-self.d, add_identity=True) n, _ = H.shape if self.t is None: self.t = np.power(n, -0.5) elif self.t == 0: self.exact = True self.pp("sorting H") self.perm = degree_reverse_rank_perm(H) H = reorder_matrix(H, self.perm).tocsc() self.pp("computing LU decomposition") if self.exact: self.LU = splu(H) else: self.LU = spilu(H, drop_tol=self.t)
def do_newton_lu_cycle(self,prob,max_outer=20,eps=1e-10): """Newton iteration using sparse lu decomposition""" #compatibility for linear problems v=np.ones(prob.rhs.shape) F = prob.A if type(prob.A) is csc_matrix: F = lambda x: prob.A.dot(x) r=prob.rhs - F(v) while max_outer > 0 and np.linalg.norm(r,np.inf) > eps: Jv = specificJacobi(prob.ndofs,prob.gamma,v) r=prob.rhs-F(v) e=sLA.splu(csc_matrix(Jv)).solve(r) v+=e max_outer -= 1 return v, max_outer
def spsolve2(a, b): a_lu = spl.splu(a.tocsc()) # LU decomposition for sparse a out = sp.lil_matrix((a.shape[1], b.shape[1]), dtype=np.float32) b_csc = b.tocsc() for j in xrange(b.shape[1]): bb = np.array(b_csc[j, :].todense()).squeeze() out[j, j] = a_lu.solve(bb)[j] return out.tocsr()
def myfactor(A): if issparse(A): return splu(A.tocsc()) else: return lu_factor(A)
def preprocess(self, filepath): """Remember: row-first ordered csv file only!""" self.pp("reading") self.node2index, H = read_matrix(filepath, d=-self.d, add_identity=True) self.n, _ = H.shape if self.t is None: self.t = np.power(self.n, -0.5) elif self.t == 0: self.exact = True if self.k is None: self.k = max(1, int(0.001 * self.n)) self.pp("running slashburn") self.perm_H, wing = slashburn(H, self.k, self.greedy) self.body = self.n - wing self.pp("sorting H") H = reorder_matrix(H, self.perm_H) self.pp("partitioning H") H11, H12, H21, H22 = matrix_partition(H, self.body) del H H11 = H11.tocsc() self.pp("computing LU decomposition on H11") if self.exact: self.LU1 = splu(H11) else: self.LU1 = spilu(H11, drop_tol=self.t) del H11 self.pp("computing LU1 solve") S = H22 - H21 @ self.LU1.solve(H12.toarray()) S = coo_matrix(S) self.pp("sorting S") self.perm_S = degree_reverse_rank_perm(S) S = reorder_matrix(S, self.perm_S) self.H12 = reorder_matrix(H12, self.perm_S, fix_row=True) self.H21 = reorder_matrix(H21, self.perm_S, fix_col=True) S = S.tocsc() del H12, H21, H22 self.pp("computing LU decomposition on S") if self.exact: self.LU2 = splu(S) else: self.LU2 = spilu(S, drop_tol=self.t) # issue: this approximation drops accuracy way too much! why? """ if not self.exact: H12 = drop_tolerance(self.H12, self.t) del self.H12 self.H12 = H12 H21 = drop_tolerance(self.H21, self.t) del self.H21 self.H21 = H21 """ del S
def __init__(self, sess, n, filename, jump_prob=0.05, drop_tol=1e-8, verbose=False): """ Computes PPR using LU decomposition. Args: sess (Session): tensorflow session. n (int): Number of nodes. filename (str): A csv file denoting the graph. jump_prob (float): Jumping probability of PPR. drop_tol (float): Drops entries with absolute value lower than this value when computing inverse of LU. verbose (bool): Prints step messages if True. """ self.alias = 'ludc' self.verbose = verbose self.pp("initializing") self.sess = sess self.n = n self.c = jump_prob d = 1 - self.c t = drop_tol exact = False if t is None: t = np.power(n, -0.5) elif t == 0: exact = True self.pp("reading") self.node2index, H = read_matrix(filename, d=-d, add_identity=True) self.pp("sorting H") self.perm = degree_reverse_rank_perm(H) H = reorder_matrix(H, self.perm).tocsc() self.pp("computing LU decomposition") if exact: self.LU = splu(H) else: self.LU = spilu(H, drop_tol=t) Linv = inv(self.LU.L).tocoo() Uinv = inv(self.LU.U).tocoo() self.pp("tf init") with tf.variable_scope('ppr_lu_decomposition_tf'): t_Linv = tf.SparseTensorValue(list(zip(Linv.row, Linv.col)), Linv.data, dense_shape=self.LU.L.shape) t_Uinv = tf.SparseTensorValue(list(zip(Uinv.row, Uinv.col)), Uinv.data, dense_shape=self.LU.U.shape) self.t_q = tf.placeholder(tf.float64, shape=[self.n, 1]) self.t_r = _sdmm(t_Uinv, _sdmm(t_Linv, self.c * self.t_q))