我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.linalg.eig()。
def test_eig_build(self, level=rlevel): # Ticket #652 rva = array([1.03221168e+02 + 0.j, -1.91843603e+01 + 0.j, -6.04004526e-01 + 15.84422474j, -6.04004526e-01 - 15.84422474j, -1.13692929e+01 + 0.j, -6.57612485e-01 + 10.41755503j, -6.57612485e-01 - 10.41755503j, 1.82126812e+01 + 0.j, 1.06011014e+01 + 0.j, 7.80732773e+00 + 0.j, -7.65390898e-01 + 0.j, 1.51971555e-15 + 0.j, -1.51308713e-15 + 0.j]) a = arange(13 * 13, dtype=float64) a.shape = (13, 13) a = a % 17 va, ve = linalg.eig(a) va.sort() rva.sort() assert_array_almost_equal(va, rva)
def get_dimensions(self): # Calculate within class scatter Sw = np.sum(self._scatter_matrix_by_class.values(), axis=0) # Calculate between class scatter s = (self._p, self._p) Sb = np.zeros(s) for k in self._classes: a = self._mean_vector_by_class[k] - self._global_mean_vector Sb += self._N_by_class[k] * a.dot(a.T) # Compute eigenvectors Sw_inv = inv(Sw) A = Sw_inv.dot(Sb) eigen_values, eigen_vectors = eig(A) idx = np.argsort(eigen_values) eigen_vectors = eigen_vectors[idx][::-1] return eigen_vectors
def getPrincipalAxes(self): X = self.VPos - self.getCentroid() XTX = (X.T).dot(X) (lambdas, axes) = linalg.eig(XTX) #Put the eigenvalues in decreasing order idx = lambdas.argsort()[::-1] lambdas = lambdas[idx] axes = axes[:, idx] T = X.dot(axes) maxProj = T.max(0) minProj = T.min(0) axes = axes.T #Put each axis on each row to be consistent with everything else return (axes, maxProj, minProj) #Delete the parts of the mesh below "plane". If fillHoles #is true, plug up the holes that result from the cut
def __compute_lanczos(self, H, G): """Compute change-point score using the Lanczos method. """ # assuming m = w self.q, _, _ = power1(G, self.q, n_iter=1) k = 2 * self.r if self.r % 2 == 0 else 2 * self.r - 1 T = lanczos(np.dot(H, H.T), self.q, k) # find eigenvectors and eigenvalues of T # eigvals, eigvecs = ln.eig(T) eigvals, eigvecs = tridiag_eig(T, n_iter=1) # `eig()` returns unordered eigenvalues, # so the top-r eigenvectors should be picked carefully return 1 - np.sqrt(np.sum(eigvecs[0, np.argsort(eigvals)[::-1][:self.r]] ** 2))
def solve_spectral(prob, *args, **kwargs): """Solve the spectral relaxation with lambda = 1. """ # TODO: do this efficiently without SDP lifting # lifted variables and semidefinite constraint X = cvx.Semidef(prob.n + 1) W = prob.f0.homogeneous_form() rel_obj = cvx.Minimize(cvx.sum_entries(cvx.mul_elemwise(W, X))) W1 = sum([f.homogeneous_form() for f in prob.fs if f.relop == '<=']) W2 = sum([f.homogeneous_form() for f in prob.fs if f.relop == '==']) rel_prob = cvx.Problem( rel_obj, [ cvx.sum_entries(cvx.mul_elemwise(W1, X)) <= 0, cvx.sum_entries(cvx.mul_elemwise(W2, X)) == 0, X[-1, -1] == 1 ] ) rel_prob.solve(*args, **kwargs) if rel_prob.status not in [cvx.OPTIMAL, cvx.OPTIMAL_INACCURATE]: raise Exception("Relaxation problem status: %s" % rel_prob.status) (w, v) = LA.eig(X.value) return np.sqrt(np.max(w))*np.asarray(v[:-1, np.argmax(w)]).flatten(), rel_prob.value
def fit_ellipse(x,y): x = x[:,NP.newaxis] y = y[:,NP.newaxis] D = NP.hstack((x*x, x*y, y*y, x, y, NP.ones_like(x))) S = NP.dot(D.T,D) C = NP.zeros([6,6]) C[0,2] = C[2,0] = 2; C[1,1] = -1 E, V = eig(NP.dot(inv(S), C)) n = NP.argmax(NP.abs(E)) a = V[:,n] return a
def train_bbox_regressor(X, bbox, gt): config = Data() config.min_overlap = 0.6 config.delta = 1000 config.method = 'ridge_reg_chol' # get training groundtruth Y, O = get_examples(bbox, gt) X = X[O>config.min_overlap] Y = Y[O>config.min_overlap] # add bias X = np.c_[X, np.ones([X.shape[0], 1])] # center and decorrelate targets mu = np.mean(Y, axis=0).reshape(1, -1) Y = Y - mu S = dot(Y.T, Y) / Y.shape[0] D, V = eig(S) T = dot(dot(V, diag(1.0/sqrt(D+0.001))), V.T) T_inv = dot(dot(V, diag(sqrt(D+0.001))), V.T) Y = dot(Y, T) model = Data() model.mu = mu model.T = T model.T_inv = T_inv model.Beta = np.c_[solve(X, Y[:, 0], config.delta, config.method), solve(X, Y[:, 1], config.delta, config.method), solve(X, Y[:, 2], config.delta, config.method), solve(X, Y[:, 3], config.delta, config.method)] # pack bbox_reg = Data() bbox_reg.model = model bbox_reg.config = config return bbox_reg
def do(self, a, b): ev = linalg.eigvals(a) evalues, evectors = linalg.eig(a) assert_almost_equal(ev, evalues)
def do(self, a, b): evalues, evectors = linalg.eig(a) assert_allclose(dot_generalized(a, evectors), np.asarray(evectors) * np.asarray(evalues)[..., None, :], rtol=get_rtol(evalues.dtype)) assert_(imply(isinstance(a, matrix), isinstance(evectors, matrix)))
def test_types(self): def check(dtype): x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) w, v = np.linalg.eig(x) assert_equal(w.dtype, dtype) assert_equal(v.dtype, dtype) x = np.array([[1, 0.5], [-1, 1]], dtype=dtype) w, v = np.linalg.eig(x) assert_equal(w.dtype, get_complex_dtype(dtype)) assert_equal(v.dtype, get_complex_dtype(dtype)) for dtype in [single, double, csingle, cdouble]: yield check, dtype
def do(self, a, b): # note that eigenvalue arrays returned by eig must be sorted since # their order isn't guaranteed. ev = linalg.eigvalsh(a, 'L') evalues, evectors = linalg.eig(a) evalues.sort(axis=-1) assert_allclose(ev, evalues, rtol=get_rtol(ev.dtype)) ev2 = linalg.eigvalsh(a, 'U') assert_allclose(ev2, evalues, rtol=get_rtol(ev.dtype))
def fitEllipse(x,y): x = x[:,np.newaxis] y = y[:,np.newaxis] D = np.hstack((x*x, x*y, y*y, x, y, np.ones_like(x))) S = np.dot(D.T,D) C = np.zeros([6,6]) C[0,2] = C[2,0] = 2; C[1,1] = -1 E, V = eig(np.dot(inv(S), C)) n = np.argmax(np.abs(E)) a = V[:,n] return a
def steady_state(P): """ Calculates the steady state probability vector for a regular Markov transition matrix P Parameters ---------- P : matrix (kxk) an ergodic Markov transition probability matrix Returns ------- implicit : matrix (kx1) steady state distribution Examples -------- Taken from Kemeny and Snell. [1]_ Land of Oz example where the states are Rain, Nice and Snow - so there is 25 percent chance that if it rained in Oz today, it will snow tomorrow, while if it snowed today in Oz there is a 50 percent chance of snow again tomorrow and a 25 percent chance of a nice day (nice, like when the witch with the monkeys is melting). >>> import numpy as np >>> p=np.matrix([[.5, .25, .25],[.5,0,.5],[.25,.25,.5]]) >>> steady_state(p) matrix([[ 0.4], [ 0.2], [ 0.4]]) Thus, the long run distribution for Oz is to have 40 percent of the days classified as Rain, 20 percent as Nice, and 40 percent as Snow (states are mutually exclusive). """ v,d=la.eig(np.transpose(P)) # for a regular P maximum eigenvalue will be 1 mv=max(v) # find its position i=v.tolist().index(mv) # normalize eigenvector corresponding to the eigenvalue 1 return d[:,i]/sum(d[:,i])
def normalize_spectral_radius(M, g): """reservoirs.normalize_spectral_radius Normalize the spectral radius of a given matrix M to scale to g """ # logger.debug('normalize_spectral_radius: M = %s, g = %s', M, g) # eig_success = False # eig_cnt = 0 # compute eigenvalues # while not eig_success and eig_cnt < 100: try: [w,v] = LA.eig(M) eig_success = True except LA.linalg.LinAlgError as e: # logger.error('normalize_spectral_radius LA.eig(M) failed for N = %d with e = %s', M.shape[0], e) # eig_cnt += 1 pass # get maximum absolute eigenvalue lae = np.max(np.abs(w)) # if lae < 1e-3: # lae = 1 assert lae > 1e-3, "Largest eigenvalue is close to zero with lae = %f" % (lae, ) # normalize matrix by max ev M /= lae # scale normalized matrix to desired spectral radius M *= g # logger.debug('normalize_spectral_radius: M = %s, g = %s, lae = %s', M, g, lae) # check for scaling [w,v] = LA.eig(M) lae = np.max(np.abs(w)) # print "normalize_spectral_radius: lae post/desired = %f / %f" % (lae, g) assert np.abs(g - lae) < 0.1 ################################################################################ # input matrix creation
def gauss(N): beta = 0.5/sqrt(1.-(2.*arange(1,N))**(-2)) T = diag(beta, 1) + diag(beta, -1) x, V = eig(T) i = argsort(x) x = x[i] w = 2*V[0,i]**2 return x, w
def modularity(graph): # convert to numpy adjacency matrix A = nx.to_numpy_matrix(graph) # compute adjacency matrix A's degree centrality degree_centrality = np.sum(A, axis=0, dtype=int) m = np.sum(degree_centrality, dtype=int) / 2 # compute matrix B B = np.zeros(A.shape, dtype=float) for i in range(len(A)): for j in range(len(A)): B[i, j] = A[i, j] - (degree_centrality[0, i] * degree_centrality[0, j]) / float(2 * m) # compute A's eigenvector w, v = LA.eig(B) wmax = np.argmax(w) s = np.zeros((len(A), 1), dtype=float) for i in range(len(A)): if v[i, wmax] < 0: s[i, 0] = -1 else: s[i, 0] = 1 Q = s.T.dot(B.dot(s)) / float(4 * m) return Q[0, 0], s
def update(self, Y): """Alg. 3: Randomized streaming update of the singular vectors at time t. Args: Y (numpy array): m-by-n_t matrix which has n_t "normal" unit vectors. """ if not hasattr(self, 'E'): # initial sketch M = np.empty_like(Y) M[:] = Y[:] else: # combine current sketched matrix with input at time t # D: m-by-(n+ell-1) matrix M = np.concatenate((self.E[:, :-1], Y), axis=1) G = np.random.normal(0., 0.1, (self.m, 100 * self.ell)) MM = np.dot(M, M.T) Q, R = ln.qr(np.dot(MM, G)) # eig() returns eigen values/vectors with unsorted order s, A = ln.eig(np.dot(np.dot(Q.T, MM), Q)) order = np.argsort(s)[::-1] s = s[order] A = A[:, order] U = np.dot(Q, A) # update k orthogonal bases self.U_k = U[:, :self.k] U_ell = U[:, :self.ell] s_ell = s[:self.ell] # shrink step in the Frequent Directions algorithm delta = s_ell[-1] s_ell = np.sqrt(s_ell - delta) self.E = np.dot(U_ell, np.diag(s_ell))
def get_ellipse_points(self, mean, covariance, confidence=0.85): eigenvalues, eigenvectors = eig(covariance) # Get the index of the largest eigenvector largest_eigenvector_index = eigenvalues.argmax() largest_eigenvalue = eigenvalues[largest_eigenvector_index] largest_eigenvector = eigenvectors[:,largest_eigenvector_index] # Get the smallest eigenvector and eigenvalue smallest_eigenvector_index = (largest_eigenvector_index + 1) % 2 smallest_eigenvalue = eigenvalues[smallest_eigenvector_index] smallest_eigenvector = eigenvectors[:,smallest_eigenvector_index] # Calculate the angle between the x-axis and the largest eigenvector angle = arctan2(largest_eigenvector[1], largest_eigenvector[0]) # This angle is between -pi and pi. # Let's shift it such that the angle is between 0 and 2pi if angle < 0: angle += 2*pi # Get the confidence interval error ellipse chisquare_val = chi2.ppf(confidence, df=2)**(0.5) theta_grid = linspace(0,2*pi) phi = angle X0=mean[0] Y0=mean[1] a=chisquare_val*sqrt(largest_eigenvalue) b=chisquare_val*sqrt(smallest_eigenvalue) # the ellipse in x and y coordinates ellipse_x_r = a*cos(theta_grid) ellipse_y_r = b*sin(theta_grid) # Define a rotation matrix R = array([[cos(phi), sin(phi)],[-sin(phi), cos(phi)]]) # let's rotate the ellipse to some angle phi r_ellipse = dot(array([ellipse_x_r, ellipse_y_r]).T, R) return r_ellipse + array([X0, Y0])
def test_make_spd_matrix(): X = make_spd_matrix(n_dim=5, random_state=0) assert_equal(X.shape, (5, 5), "X shape mismatch") assert_array_almost_equal(X, X.T) from numpy.linalg import eig eigenvalues, _ = eig(X) assert_array_equal(eigenvalues > 0, np.array([True] * 5), "X is not positive-definite")
def biggest_eigen_k(matrix, K): """ Return the K eigenvector/eigenvalue pairs associated to the K greatest eigenvalues """ eig_val, eig_vec = lg.eig(matrix) order = eig_val.argsort()[::-1] eig_val = eig_val[order] eig_vec = eig_vec[:, order] return eig_vec[:, :K], eig_val[:K]