我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用scipy.sparse.eye()。
def sparse_cH(terms, ldim=2): """Construct a sparse cyclic nearest-neighbour Hamiltonian :param terms: List of nearst-neighbour terms (square array or MPO, see return value of :func:`cXY_local_terms`) :param ldim: Local dimension :returns: The Hamiltonian as sparse matrix """ H = 0 N = len(terms) for pos, term in enumerate(terms[:-1]): if hasattr(term, 'lt'): # Convert MPO to regular matrix term = term.to_array_global().reshape((ldim**2, ldim**2)) left = sp.eye(ldim**pos) right = sp.eye(ldim**(N - pos - 2)) H += sp.kron(left, sp.kron(term, right)) # The last term acts on the first and last site. cyc = terms[-1] middle = sp.eye(ldim**pos) for i in range(cyc.ranks[0]): H += sp.kron(cyc.lt[0][0, ..., i], sp.kron(middle, cyc.lt[1][i, ..., 0])) return H
def test_FaceInnerProductAnisotropicDeriv(self): def fun(x): # fake anisotropy (testing anistropic implementation with isotropic # vector). First order behavior expected for fully anisotropic x = np.repeat(np.atleast_2d(x), 3, axis=0).T x0 = np.repeat(self.x0, 3, axis=0).T zero = sp.csr_matrix((self.mesh.nC, self.mesh.nC)) eye = sp.eye(self.mesh.nC) P = sp.vstack([sp.hstack([eye, zero, eye])]) MfSig = self.mesh.getFaceInnerProduct(x) MfSigDeriv = self.mesh.getFaceInnerProductDeriv(x0) return MfSig*self.face_vec , MfSigDeriv(self.face_vec) * P.T print('Testing FaceInnerProduct Anisotropic') return self.assertTrue(Tests.checkDerivative(fun, self.x0, num=7, tolerance=TOLD, plotIt=False))
def test_FaceInnerProductAnisotropicDerivInvProp(self): def fun(x): x = np.repeat(np.atleast_2d(x), 3, axis=0).T x0 = np.repeat(self.x0, 3, axis=0).T zero = sp.csr_matrix((self.mesh.nC, self.mesh.nC)) eye = sp.eye(self.mesh.nC) P = sp.vstack([sp.hstack([eye, zero, eye])]) MfSig = self.mesh.getFaceInnerProduct(x, invProp=True) MfSigDeriv = self.mesh.getFaceInnerProductDeriv(x0, invProp=True) return MfSig*self.face_vec, MfSigDeriv(self.face_vec) * P.T print('Testing FaceInnerProduct Anisotropic InvProp') return self.assertTrue(Tests.checkDerivative(fun, self.x0, num=7, tolerance=TOLD, plotIt=False))
def test_FaceInnerProductAnisotropicDerivInvMat(self): def fun(x): x = np.repeat(np.atleast_2d(x), 3, axis=0).T x0 = np.repeat(self.x0, 3, axis=0).T zero = sp.csr_matrix((self.mesh.nC, self.mesh.nC)) eye = sp.eye(self.mesh.nC) P = sp.vstack([sp.hstack([eye, zero, eye])]) MfSig = self.mesh.getFaceInnerProduct(x, invMat=True) MfSigDeriv = self.mesh.getFaceInnerProductDeriv(x0, invMat=True) return MfSig*self.face_vec, MfSigDeriv(self.face_vec) * P.T print('Testing FaceInnerProduct Anisotropic InvMat') return self.assertTrue(Tests.checkDerivative(fun, self.x0, num=7, tolerance=TOLD, plotIt=False))
def test_FaceInnerProductAnisotropicDerivInvPropInvMat(self): def fun(x): x = np.repeat(np.atleast_2d(x), 3, axis=0).T x0 = np.repeat(self.x0, 3, axis=0).T zero = sp.csr_matrix((self.mesh.nC, self.mesh.nC)) eye = sp.eye(self.mesh.nC) P = sp.vstack([sp.hstack([eye, zero, eye])]) MfSig = self.mesh.getFaceInnerProduct(x, invProp=True, invMat=True) MfSigDeriv = self.mesh.getFaceInnerProductDeriv(x0, invProp=True, invMat=True) return MfSig*self.face_vec, MfSigDeriv(self.face_vec) * P.T print('Testing FaceInnerProduct Anisotropic InvProp InvMat') return self.assertTrue(Tests.checkDerivative(fun, self.x0, num=7, tolerance=TOLD, plotIt=False))
def test_EdgeInnerProductAnisotropicDeriv(self): def fun(x): x = np.repeat(np.atleast_2d(x), 3, axis=0).T x0 = np.repeat(self.x0, 3, axis=0).T zero = sp.csr_matrix((self.mesh.nC, self.mesh.nC)) eye = sp.eye(self.mesh.nC) P = sp.vstack([sp.hstack([zero, eye, zero])]) MeSig = self.mesh.getEdgeInnerProduct(x.reshape(self.mesh.nC, 3)) MeSigDeriv = self.mesh.getEdgeInnerProductDeriv(x0) return MeSig*self.edge_vec, MeSigDeriv(self.edge_vec) * P.T print('Testing EdgeInnerProduct Anisotropic') return self.assertTrue(Tests.checkDerivative(fun, self.x0, num=7, tolerance=TOLD, plotIt=False))
def test_EdgeInnerProductAnisotropicDerivInvMat(self): def fun(x): x = np.repeat(np.atleast_2d(x), 3, axis=0).T x0 = np.repeat(self.x0, 3, axis=0).T zero = sp.csr_matrix((self.mesh.nC, self.mesh.nC)) eye = sp.eye(self.mesh.nC) P = sp.vstack([sp.hstack([zero, eye, zero])]) MeSig = self.mesh.getEdgeInnerProduct(x, invMat=True) MeSigDeriv = self.mesh.getEdgeInnerProductDeriv(x0, invMat=True) return MeSig*self.edge_vec, MeSigDeriv(self.edge_vec) * P.T print('Testing EdgeInnerProduct Anisotropic InvMat') return self.assertTrue(Tests.checkDerivative(fun, self.x0, num=7, tolerance=TOLD, plotIt=False))
def test_EdgeInnerProductAnisotropicDerivInvPropInvMat(self): def fun(x): x = np.repeat(np.atleast_2d(x), 3, axis=0).T x0 = np.repeat(self.x0, 3, axis=0).T zero = sp.csr_matrix((self.mesh.nC, self.mesh.nC)) eye = sp.eye(self.mesh.nC) P = sp.vstack([sp.hstack([zero, eye, zero])]) MeSig = self.mesh.getEdgeInnerProduct(x, invProp=True, invMat=True) MeSigDeriv = self.mesh.getEdgeInnerProductDeriv(x0, invProp=True, invMat=True) return MeSig*self.edge_vec, MeSigDeriv(self.edge_vec) * P.T print('Testing EdgeInnerProduct Anisotropic InvProp InvMat') return self.assertTrue(Tests.checkDerivative(fun, self.x0, num=7, tolerance=TOLD, plotIt=False))
def linear_diagonal_solver ( observations, mask, H_matrix, n_params, x_forecast, P_forecast, R_mat, the_metadata, approx_diagonal=True): LOG.info("Diagonal covariance solver") the_mask = np.array([mask.ravel() for i in xrange(n_params)]).ravel() S = (H_matrix.dot(P_forecast.dot(H_matrix.T))) + R_mat Sd = np.zeros(x_forecast.shape[0]/n_params) Sd[mask.ravel()] = 1./(S.diagonal()[mask.ravel()]) Sinv = sp.eye(Sd.shape[0]) Sinv.setdiag(Sd) kalman_gain = (P_forecast.dot(H_matrix.T)).dot(Sinv) innovations = (observations.ravel() - H_matrix.dot(x_forecast)) innovations[~mask.ravel()] = 0. x_analysis = x_forecast + kalman_gain*innovations P_analysis = (sp.eye(x_analysis.shape[0]) - kalman_gain.dot(H_matrix)).dot(P_forecast) P_analysis_prime = None LOG.info("Solved!") return x_analysis, P_analysis, None, innovations[~mask.ravel()]
def set_trajectory_uncertainty(self, Q): """In a Kalman filter, the model that propagates the state from time `t` to `t+1` is assumed to be *wrong*, and this is indicated by having additive Gaussian noise, which we assume is zero-mean, and controlled by a covariance matrix `Q`. Here, you can provide the main diagonal of `Q`. Parameters ----------- Q: array The main diagonal of the model uncertainty covariance matrix. """ n = self.n_state_elems self.trajectory_uncertainty = sp.eye(self.n_params*n, self.n_params*n, format="csr") self.trajectory_uncertainty.setdiag(Q)
def test_random_cholmod(self): n_rows = 100 A0 = 10*sp.rand(n_rows, n_rows, density=0.01, format='csc') A = A0*A0.transpose() + sp.eye(n_rows, n_rows) [L, L_nonpsd, S] = lchol.lchol(A) self.assertTrue(sum((abs(S.T.dot(A.dot(S))-L.dot(L.T))).data) < 1e-5) self.assertEqual(L_nonpsd, 0) # def test_memory_leak(self): # n_rows = 3000 # A0 = 10*sp.rand(n_rows, n_rows, density=0.001, format='csc') # A = A0*A0.transpose() + sp.eye(n_rows, n_rows) # # mem0 = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss # for i in range(50): # [chol_L, L_nonpsd, chol_S] = lchol.lchol(A) # import gc # gc.collect() # # mem1 = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss # #print(mem1 - mem0) # self.assertTrue(True)
def sakurai(n): """ Example taken from T. Sakurai, H. Tadano, Y. Inadomi and U. Nagashima A moment-based method for large-scale generalized eigenvalue problems Appl. Num. Anal. Comp. Math. Vol. 1 No. 2 (2004) """ A = sparse.eye( n, n ) d0 = array(r_[5,6*ones(n-2),5]) d1 = -4*ones(n) d2 = ones(n) B = sparse.spdiags([d2,d1,d0,d1,d2],[-2,-1,0,1,2],n,n) k = arange(1,n+1) w_ex = sort(1./(16.*pow(cos(0.5*k*pi/(n+1)),4))) # exact eigenvalues return A,B, w_ex
def ordinaryWalk(self): tick = time.time() alpha = self.param['alpha'] n = self.graph.shape[0] c = self.Y.shape[1] nf = self.param['normalize_factor'] #self.graph = self.graph + 3000 * sparse.eye(n,n) Di = sparse.diags([np.sqrt(1 / (self.graph.sum(axis=0) + nf * np.ones(n))).getA1()], [0]) S = Di.dot(self.graph.dot(Di)) S_iter = (sparse.eye(n) - alpha * S).tocsc() F = np.zeros((n,c)) for i in range(c): F[:, i], info = slin.cg(S_iter, self.Y[:, i], tol=1e-12) toc = time.time() #print np.where(F > 0) self.ElapsedTime = toc - tick self.PredictedProbs = F
def _evalPolicyMatrix(self): # Evaluate the value function of the policy using linear equations. # # Arguments # --------- # Let S = number of states, A = number of actions # P(SxSxA) = transition matrix # P could be an array with 3 dimensions or a cell array (1xA), # each cell containing a matrix (SxS) possibly sparse # R(SxSxA) or (SxA) = reward matrix # R could be an array with 3 dimensions (SxSxA) or # a cell array (1xA), each cell containing a sparse matrix (SxS) or # a 2D array(SxA) possibly sparse # discount = discount rate in ]0; 1[ # policy(S) = a policy # # Evaluation # ---------- # Vpolicy(S) = value function of the policy # Ppolicy, Rpolicy = self._computePpolicyPRpolicy() # V = PR + gPV => (I-gP)V = PR => V = inv(I-gP)* PR self.V = _np.linalg.solve( (_sp.eye(self.S, self.S) - self.discount * Ppolicy), Rpolicy)
def chebyshev_polynomial(X, k): """Calculate Chebyshev polynomials up to order k. Return a list of sparse matrices.""" print("Calculating Chebyshev polynomials up to order {}...".format(k)) T_k = list() T_k.append(sp.eye(X.shape[0]).tocsr()) T_k.append(X) def chebyshev_recurrence(T_k_minus_one, T_k_minus_two, X): X_ = sp.csr_matrix(X, copy=True) return 2 * X_.dot(T_k_minus_one) - T_k_minus_two for i in range(2, k+1): T_k.append(chebyshev_recurrence(T_k[-1], T_k[-2], X)) return T_k
def iteration(self, user, fixed_vecs): num_solve = self.num_users if user else self.num_items num_fixed = fixed_vecs.shape[0] YTY = fixed_vecs.T.dot(fixed_vecs) eye = sparse.eye(num_fixed) lambda_eye = self.reg_param * sparse.eye(self.num_factors) solve_vecs = np.zeros((num_solve, self.num_factors)) for i in xrange(num_solve): if user: matrix_i = self.matrix[i].toarray() else: matrix_i = self.matrix[:, i].T.toarray() CuI = sparse.diags(matrix_i, [0]) pu = matrix_i.copy() pu[np.where(pu != 0)] = 1.0 YTCuIY = fixed_vecs.T.dot(CuI).dot(fixed_vecs) YTCupu = fixed_vecs.T.dot(CuI + eye).dot(sparse.csr_matrix(pu).T) xu = spsolve(YTY + YTCuIY + lambda_eye, YTCupu) solve_vecs[i] = xu return solve_vecs
def update_V(m_opts, m_vars): P, N = E_x_omega_col(m_opts, m_vars) # expectation of xi_{nl} for n = i, expecation of omega_{nl} for n = i Kappa = PG_col(m_opts, m_vars) # polyagamma kappa_{nl} for n = i PN = P*N PK = P*Kappa for i in range(m_vars['n_labels']): PN_i = PN[i][:,np.newaxis] PK_i = PK[i] sigma = m_vars['U_batch'].T.dot(PN_i*m_vars['U_batch'])# + m_opts['lam_v']*np.eye(m_opts['n_components']) x = m_vars['U_batch'].T.dot(PK_i) m_vars['sigma_v'][i] = (1-m_vars['gamma'])*m_vars['sigma_v'][i] + m_vars['gamma']*sigma m_vars['x_v'][i] = (1-m_vars['gamma'])*m_vars['x_v'][i] + m_vars['gamma']*x m_vars['V'][i] = linalg.solve(m_vars['sigma_v'][i], m_vars['x_v'][i])
def learn_embedding(self, graph=None, edge_f=None, is_weighted=False, no_python=False): if not graph and not edge_f: raise Exception('graph/edge_f needed') if not graph: graph = graph_util.loadGraphFromEdgeListTxt(edge_f) graph = graph.to_undirected() t1 = time() A = nx.to_scipy_sparse_matrix(graph) normalize(A, norm='l1', axis=1, copy=False) I_n = sp.eye(graph.number_of_nodes()) I_min_A = I_n - A u, s, vt = lg.svds(I_min_A, k=self._d + 1, which='SM') t2 = time() self._X = vt.T self._X = self._X[:, 1:] return self._X, (t2 - t1)
def test_creation(): AnnData(np.array([[1, 2], [3, 4]])) AnnData(np.array([[1, 2], [3, 4]]), {}, {}) AnnData(ma.array([[1, 2], [3, 4]]), uns={'mask': [0, 1, 1, 0]}) AnnData(sp.eye(2)) AnnData( np.array([[1, 2, 3], [4, 5, 6]]), dict(Smp=['A', 'B']), dict(Feat=['a', 'b', 'c'])) assert AnnData(np.array([1, 2])).X.shape == (2,) from pytest import raises raises(ValueError, AnnData, np.array([[1, 2], [3, 4]]), dict(TooLong=[1, 2, 3, 4]))
def make_linearOperator(shape, Xn, K): M,N = shape fx = K[0,0] fy = K[1,1] x_hat = Xn[0,:] y_hat = Xn[1,:] Kx,Ky = make_derivatives_2D_complete(shape) # use one-sided differences with backward diff at image border Kx = Kx.tocsr() Ky = Ky.tocsr() spId = sparse.eye(M*N, M*N, format='csr') spXhat = sparse.diags(x_hat.flatten(), 0).tocsr() spYhat = sparse.diags(y_hat.flatten(), 0).tocsr() L = sparse.vstack([-Kx/fy, -Ky/fx, spXhat*Kx/fy + spYhat*Ky/fx + 2*spId/(fx*fy) ]) return L.tocsr()
def fit(self, X, y, L): """Fit the model according to the given training data. Prameters --------- X : array-like, shpae = [n_samples, n_features] Training data. y : array-like, shpae = [n_samples] Target values (unlabeled points are marked as 0). L : array-like, shpae = [n_samples, n_samples] Graph Laplacian. """ labeled = y != 0 X_labeled = X[labeled] y_labeled = y[labeled] n_samples, n_features = X.shape n_labeled_samples = y_labeled.size I = sp.eye(n_features) M = X_labeled.T @ X_labeled \ + self.gamma_a * n_labeled_samples * I \ + self.gamma_i * n_labeled_samples / n_samples**2 * X.T @ L**self.p @ X # Train a classifer self.coef_ = LA.solve(M, X_labeled.T @ y_labeled) return self
def fit(self, X, y, L): """Fit the model according to the given training data. Prameters --------- X : array-like, shpae = [n_samples, n_features] Training data. y : array-like, shpae = [n_samples] Target values (unlabeled points are marked as 0). L : array-like, shpae = [n_samples, n_samples] Graph Laplacian. """ labeled = y != 0 y_labeled = y[labeled] n_samples, n_features = X.shape n_labeled_samples = y_labeled.size I = sp.eye(n_samples) J = sp.diags(labeled.astype(np.float64)) K = rbf_kernel(X, gamma=self.gamma_k) M = J @ K \ + self.gamma_a * n_labeled_samples * I \ + self.gamma_i * n_labeled_samples / n_samples**2 * L**self.p @ K # Train a classifer self.dual_coef_ = LA.solve(M, y) return self
def globalphase(theta, N=1): """ Returns quantum object representing the global phase shift gate. Parameters ---------- theta : float Phase rotation angle. Returns ------- phase_gate : qobj Quantum object representation of global phase shift gate. Examples -------- >>> phasegate(pi/4) Quantum object: dims = [[2], [2]], \ shape = [2, 2], type = oper, isHerm = False Qobj data = [[ 0.70710678+0.70710678j 0.00000000+0.j] [ 0.00000000+0.j 0.70710678+0.70710678j]] """ data = (np.exp(1.0j * theta) * sp.eye(2 ** N, 2 ** N, dtype=complex, format="csr")) return Qobj(data, dims=[[2] * N, [2] * N]) # # Operation on Gates #
def preprocess_graph(adj): adj = sp.coo_matrix(adj) adj_ = adj + sp.eye(adj.shape[0]) rowsum = np.array(adj_.sum(1)) degree_mat_inv_sqrt = sp.diags(np.power(rowsum, -0.5).flatten()) adj_normalized = adj_.dot(degree_mat_inv_sqrt).transpose().dot(degree_mat_inv_sqrt).tocoo() return sparse_to_tuple(adj_normalized)
def getLapCoefRow(win, numPixInWindow, d, _lambda): win = np.concatenate((win, np.ones((win.shape[0], numPixInWindow, 1))), axis=2) I = np.tile(np.eye(numPixInWindow), (win.shape[0], 1, 1)) I[:, -1, -1] = 0 winTProd = np.einsum('...ij,...kj ->...ik', win, win) winTProd_reg_inv = np.linalg.inv(winTProd + I*_lambda) F = np.einsum('...ij,...jk->...ik', winTProd, winTProd_reg_inv) I_F = np.eye(numPixInWindow) - F return np.einsum('...ji,...jk->...ik', I_F, I_F )
def solveQuadOpt(L, C, alpha_star): lbda = 1e-9 # different lambda to the one used to calc the lap # this one regularises the alpha calculation D = sparse.eye(L.shape[0]) alpha = sparse.linalg.spsolve(L + C + D*lbda, C @ alpha_star.ravel()) alpha = np.reshape(alpha, alpha_star.shape) # rescale alpha to ~ [0, 1] if using [-1, 0, 1] as labels if np.min(alpha_star) == -1: alpha = alpha/2 + 0.5 # clip to [0, 1] return np.maximum(np.minimum(alpha, 1), 0)
def get_fiedler(A): """ Compute the 2nd lowest eigenvalue of the laplacian of A and associated eigenvector (Fiedler vector). Parameters ---------- A : scipy.sparse matrix (similarity matrix) Returns ---------- fidval : real (2nd smallest eigenvalue) fidvec : array (associated eigenvector) """ # Construct Laplacian L_A = laplacian(A, normed=False, return_diag=False) + 1.e-9 * eye(A.shape[0]) # Construct M = lambda_max(lap)*I - lap, whose "second largest" # eigenvector is the "second smallest" eigenvector of lap (Fiedler vector) (evals_max, _) = eigsh(L_A, 1, which='LA', tol=1e-15, ncv=20) maxval = float(evals_max) m_lap = maxval * eye(L_A.shape[0]) - L_A evec0 = np.ones((1, L_A.shape[0])) evals_small, evecs_small = eigsh(m_lap, 2, which='LA', v0=evec0, tol=1e-20, ncv=20) fidvec = -evecs_small[:, 0] fidval = float(maxval - evals_small[0]) return fidval, fidvec
def common_mat(mats): assert len({i.shape for i in mats}) == 1 mats = [i.tocoo() for i in mats] + [eye(mats[0].shape[0], format='coo')] rows = np.hstack([i.row for i in mats]) cols = np.hstack([i.col for i in mats]) data = np.ones(len(rows)) return csr_matrix((data, (rows, cols)))
def test_solve_identity_ones(dtype): b = np.ones(solver.N, dtype=dtype) mat = sp.eye(solver.N, format='csr', dtype=dtype) obs = solver.solve(mat, b) exp = spla.spsolve(mat, b) assert np.allclose(exp, obs)
def test_solve_identity_range(dtype): b = np.arange(solver.N, dtype=dtype) mat = sp.eye(solver.N, format='csr', dtype=dtype) obs = solver.solve(mat, b) exp = spla.spsolve(mat, b) assert np.allclose(exp, obs)
def test_solve_ones_ones(dtype): b = np.ones(solver.N, dtype=dtype) mat = solver.ones(dtype=dtype) + 9*sp.eye(solver.N, format='csr', dtype=dtype) obs = solver.solve(mat, b) exp = spla.spsolve(mat, b) assert np.allclose(exp, obs)
def test_solve_ones_range(dtype): b = np.arange(solver.N, dtype=dtype) mat = solver.ones(dtype=dtype) + 9*sp.eye(solver.N, format='csr', dtype=dtype) obs = solver.solve(mat, b) exp = spla.spsolve(mat, b) assert np.allclose(exp, obs)
def test_dot(dtype): x = np.arange(solver.N, dtype=dtype) mat = solver.ones(dtype=dtype) + 9*sp.eye(solver.N, format='csr', dtype=dtype) exp = mat.dot(x) obs = solver.dot(mat, x) assert np.allclose(exp, obs)
def __cache__(self, N): Id = sps.eye(N, format='csc') return Id
def preprocesss_adj(adj): adj_normalized, sup_normalized = [], [] print "normalize adjcent matrix..." for i in tqdm(range(adj.shape[0]), ascii=True): adj_normalized.append(normalize_adj(adj[i])) sup_normalized.append(normalize_adj(adj[i]+sp.eye(adj[i].shape[0]))) return np.array(adj_normalized), np.array(sup_normalized)
def propagate_information_filter_SLOW(x_analysis, P_analysis, P_analysis_inverse, M_matrix, Q_matrix): """Information filter state propagation using the INVERSER state covariance matrix and a linear state transition model. This function returns `None` for the forecast covariance matrix (as this takes forever). This method is based on the approximation to the inverse of the KF covariance matrix. Parameters ----------- x_analysis : array The analysis state vector. This comes either from the assimilation or directly from a previoulsy propagated state. P_analysis : 2D sparse array The analysis covariance matrix (typically will be a sparse matrix). As this is an information filter update, you will typically pass `None` to it, as it is unused. P_analysis_inverse : 2D sparse array The INVERSE analysis covariance matrix (typically a sparse matrix). M_matrix : 2D array The linear state propagation model. Q_matrix: 2D array (sparse) The state uncertainty inflation matrix that is added to the covariance matrix. Returns ------- x_forecast (forecast state vector), `None` and P_forecast_inverse (forecast inverse covariance matrix)""" logging.info("Starting the propagation...") x_forecast = M_matrix.dot(x_analysis) n, n = P_analysis_inverse.shape S= P_analysis_inverse.dot(Q_matrix) A = (sp.eye(n) + S).tocsc() P_forecast_inverse = spl.spsolve(A, P_analysis_inverse) logging.info("DOne with propagation") return x_forecast, None, P_forecast_inverse
def set_trajectory_model(self): """In a Kalman filter, the state is progated from time `t` to `t+1` using a model. We assume that this model is a matrix, and for the time being, the matrix is the identity matrix. That's how we roll!""" n = self.n_state_elems self.trajectory_model = sp.eye(self.n_params*n, self.n_params*n, format="csr")
def run(self): #Run the linear programming algorithm. self.time = _time.time() # The objective is to resolve : min V / V >= PR + discount*P*V # The function linprog of the optimisation Toolbox of Mathworks # resolves : # min f'* x / M * x <= b # So the objective could be expressed as : # min V / (discount*P-I) * V <= - PR # To avoid loop on states, the matrix M is structured following actions # M(A*S,S) f = self._cvxmat(_np.ones((self.S, 1))) h = _np.array(self.R).reshape(self.S * self.A, 1, order="F") h = self._cvxmat(h, tc='d') M = _np.zeros((self.A * self.S, self.S)) for aa in range(self.A): pos = (aa + 1) * self.S M[(pos - self.S):pos, :] = ( self.discount * self.P[aa] - _sp.eye(self.S, self.S)) M = self._cvxmat(M) # Using the glpk option will make this behave more like Octave # (Octave uses glpk) and perhaps Matlab. If solver=None (ie using the # default cvxopt solver) then V agrees with the Octave equivalent # only to 10e-8 places. This assumes glpk is installed of course. self.V = _np.array(self._linprog(f, M, -h)['x']).reshape(self.S) # apply the Bellman operator self.policy, self.V = self._bellmanOperator() # update the time spent solving self.time = _time.time() - self.time # store value and policy as tuples self.V = tuple(self.V.tolist()) self.policy = tuple(self.policy.tolist())
def visit_Eye(self, node): node = self.generic_visit(node) eye = spp.eye(node.shape[0], dtype=node.dtype) return SpMatrix( node._backend, eye, name=node._name )
def test_KronI(backend, M, N, K, density, alpha, beta): b = backend() mat_h = indigo.util.randM(M,N,density) A_h = spp.kron( spp.eye(K), mat_h ) mat_d = b.SpMatrix(mat_h) A = b.KronI(K, mat_d) # forward x = b.rand_array((A.shape[1],K)) y = b.rand_array((A.shape[0],K)) y_exp = beta * y.to_host() + alpha * A_h @ x.to_host() A.eval(y, x, alpha=alpha, beta=beta) npt.assert_allclose(y.to_host(), y_exp, rtol=1e-5) # adjoint x = b.rand_array((A.shape[0],K)) y = b.rand_array((A.shape[1],K)) y_exp = beta * y.to_host() + alpha * np.conj(A_h.T) @ x.to_host() A.H.eval(y, x, alpha=alpha, beta=beta) npt.assert_allclose(y.to_host(), y_exp, rtol=1e-5) # shape assert A.shape == (M*K,N*K) assert A.H.shape == (N*K,M*K) # dtype assert A.dtype == np.dtype('complex64')
def test_nested_kroni(backend, batch, M, N, c1, c2): A00_h = indigo.util.randM(M, N, 0.9) A0_h = spp.kron( spp.eye(c1), A00_h ) A_h = spp.kron( spp.eye(c2), A0_h ) b = backend() A00 = b.SpMatrix(A00_h) A0 = b.KronI(c1, A00) A = b.KronI(c2, A0) V, U = A.shape u = np.random.rand(U) + 1j*np.random.rand(U) u = np.require(u, dtype=np.dtype('complex64'), requirements='F') v = np.random.rand(V) + 1j*np.random.rand(V) v = np.require(v, dtype=np.dtype('complex64'), requirements='F') # forward u_d = b.copy_array(u) v_d = b.copy_array(v) A.eval(v_d, u_d) v_act = v_d.to_host() v_exp = A_h @ u np.testing.assert_allclose( v_act, v_exp, rtol=1e-6 ) # adjoint u_d = b.copy_array(u) v_d = b.copy_array(v) A.H.eval(u_d, v_d) u_act = u_d.to_host() u_exp = A_h.H @ v np.testing.assert_allclose( u_act, u_exp, rtol=1e-6 )
def default_scaling(x): n, = np.shape(x) return spc.eye(n)
def evaluate_and_initialize(self, x0, sparse_jacobian=None): x0 = np.atleast_1d(x0).astype(float) f0 = x0 self.n = x0.size self.m = f0.size if sparse_jacobian or sparse_jacobian is None: J0 = spc.eye(self.n).tocsr() self.sparse_jacobian = True else: J0 = np.eye(self.n) self.sparse_jacobian = False self.J0 = J0 self.kind = _check_kind(self.kind, self.m) self.enforce_feasibility \ = _check_enforce_feasibility(self.enforce_feasibility, self.m) self.isinitialized = True if not _is_feasible(self.kind, self.enforce_feasibility, f0): warn("The initial point was changed in order " "to stay inside box constraints.") x0_new = _reinforce_box_constraint(self.kind, self.enforce_feasibility, x0) self.x0 = x0_new self.f0 = x0_new return x0_new else: self.x0 = x0 self.f0 = f0 return x0
def preprocess_adj(adj, symmetric=True): adj = adj + sp.eye(adj.shape[0]) adj = normalize_adj(adj, symmetric) return adj
def normalized_laplacian(adj, symmetric=True): adj_normalized = normalize_adj(adj, symmetric) laplacian = sp.eye(adj.shape[0]) - adj_normalized return laplacian
def rescale_laplacian(laplacian): try: print('Calculating largest eigenvalue of normalized graph Laplacian...') largest_eigval = eigsh(laplacian, 1, which='LM', return_eigenvectors=False)[0] except ArpackNoConvergence: print('Eigenvalue calculation did not converge! Using largest_eigval=2 instead.') largest_eigval = 2 scaled_laplacian = (2. / largest_eigval) * laplacian - sp.eye(laplacian.shape[0]) return scaled_laplacian
def update_U(m_opts, m_vars): for it in range(m_opts['PG_iters']): P, N = E_x_omega_row(m_opts, m_vars) # expectation of xi_{nl} for n = i, expecation of omega_{nl} for n = i Kappa = PG_row(m_opts, m_vars) # polyagamma kappa_{nl} for n = i PN = P*N PK = P*Kappa for i in range(m_opts['batch_size']): PN_i = PN[i][:,np.newaxis] PK_i = PK[i] sigma = m_vars['V'].T.dot(PN_i*m_vars['V']) + m_opts['lam_u']*np.eye(m_opts['n_components']) x = m_vars['V'].T.dot(PK_i) + np.asarray((m_opts['lam_u']*m_vars['W']).dot(m_vars['X_batch'][i].todense().T)).reshape(-1) m_vars['U_batch'][i] = linalg.solve(sigma, x)
def create_test_A_b_rand(n=1000, density=0.05, matrix=False): np.random.seed(27) A = sp.csr_matrix(sp.rand(n, n, density) + sp.eye(n)) if matrix: b = np.random.rand(n,5) else: b = np.random.rand(n,1) return A, b
def linearMethod(self): """ Determines ``pi`` by solving a system of linear equations using :func:`spsolve`. The method has no parameters since it is an exact method. The result is stored in the class attribute ``pi``. Example ------- >>> P = np.array([[0.5,0.5],[0.6,0.4]]) >>> mc = markovChain(P) >>> mc.linearMethod() >>> print(mc.pi) [ 0.54545455 0.45454545] Remarks ------- For large state spaces, the linear algebra solver may not work well due to memory overflow. Code due to http://stackoverflow.com/questions/21308848/ """ P = self.getIrreducibleTransitionMatrix() #if P consists of one element, then set self.pi = 1.0 if P.shape == (1, 1): self.pi = np.array([1.0]) return size = P.shape[0] dP = P - eye(size) #Replace the first equation by the normalizing condition. A = vstack([np.ones(size), dP.T[1:,:]]).tocsr() rhs = np.zeros((size,)) rhs[0] = 1 self.pi = spsolve(A, rhs)