我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.linalg.eigh()。
def do(self, a, b): # note that eigenvalue arrays returned by eig must be sorted since # their order isn't guaranteed. ev, evc = linalg.eigh(a) evalues, evectors = linalg.eig(a) evalues.sort(axis=-1) assert_almost_equal(ev, evalues) assert_allclose(dot_generalized(a, evc), np.asarray(ev)[..., None, :] * np.asarray(evc), rtol=get_rtol(ev.dtype)) ev2, evc2 = linalg.eigh(a, 'U') assert_almost_equal(ev2, evalues) assert_allclose(dot_generalized(a, evc2), np.asarray(ev2)[..., None, :] * np.asarray(evc2), rtol=get_rtol(ev.dtype), err_msg=repr(a))
def test_UPLO(self): Klo = np.array([[0, 0], [1, 0]], dtype=np.double) Kup = np.array([[0, 1], [0, 0]], dtype=np.double) tgt = np.array([-1, 1], dtype=np.double) rtol = get_rtol(np.double) # Check default is 'L' w, v = np.linalg.eigh(Klo) assert_allclose(w, tgt, rtol=rtol) # Check 'L' w, v = np.linalg.eigh(Klo, UPLO='L') assert_allclose(w, tgt, rtol=rtol) # Check 'l' w, v = np.linalg.eigh(Klo, UPLO='l') assert_allclose(w, tgt, rtol=rtol) # Check 'U' w, v = np.linalg.eigh(Kup, UPLO='U') assert_allclose(w, tgt, rtol=rtol) # Check 'u' w, v = np.linalg.eigh(Kup, UPLO='u') assert_allclose(w, tgt, rtol=rtol)
def GetBestFitPlane(pts, weights=None): if weights is None: wSum = len(pts) origin = np.sum(pts, 0) origin /= wSum sums = np.zeros((3, 3), np.double) for pt in pts: dp = pt - origin for i in range(3): sums[i, i] += dp[i] * dp[i] for j in range(i + 1, 3): sums[i, j] += dp[i] * dp[j] sums[j, i] += dp[i] * dp[j] sums /= wSum vals, vects = linalg.eigh(sums) order = np.argsort(vals) normal = vects[:, order[0]] plane = np.zeros((4, ), np.double) plane[:3] = normal plane[3] = -1 * normal.dot(origin) return plane
def __newlambdagammaC(self, theta, l): """ Apply Eqns. 25-27 in Vidal 2003 to update lambda^C and gamma^C (lambda and gamma of this qbit). """ gamma_ket = self.coefs[l+1].lam gamma_bra = np.conjugate(gamma_ket) Gamma_star = np.conjugate(self.coefs[l+1].gamma) inputs = [Gamma_star, theta, gamma_bra, gamma_ket] Gamma_star_idx = [1, -3, -2] theta_idx = [-1, 1, -4, -5] gamma_bra_idx = [-6] gamma_ket_idx = [-7] idx = [Gamma_star_idx, theta_idx, gamma_bra_idx, gamma_ket_idx] contract_me = scon(inputs, idx) svd_me = np.einsum('agibggg', contract_me) evals, evecs = la.eigh(svd_me) return evals, evecs
def plot_ellipses(self): q = 0.60 r = ops.get_ellipse_rad(q) H = ops.get_hessian(self.X) eigv, rotation = la.eigh(H) center = self.Bh u = np.linspace(0.0, 2.0 * np.pi, 100) v = np.linspace(0.0, np.pi, 100) x = (r/math.sqrt(eigv[0]))* np.outer(np.cos(u), np.sin(v)) y = (r/math.sqrt(eigv[1])) * np.outer(np.sin(u), np.sin(v)) z = (r/math.sqrt(eigv[2])) * np.outer(np.ones_like(u), np.cos(v)) for i in range(len(x)): for j in range(len(x)): [x[i,j],y[i,j],z[i,j]] = np.dot([x[i,j],y[i,j],z[i,j]], rotation) + center plt.plot(z,x) plt.axis('equal') plt.show()
def spectral_embedding(mat, target_dim, gramian=True, discard_first=True): if discard_first: last = -1 first = target_dim - last else: first = target_dim last = None if not gramian: mat = mat.dot(mat.T) eigvals, eigvecs = eigh(mat) sl = slice(-first, last) eigvecs = eigvecs[:, sl] eigvals_crop = eigvals[sl] Y = eigvecs.dot(np.diag(np.sqrt(eigvals_crop))) Y = Y[:, ::-1] variance_explaned(eigvals, eigvals_crop) return Y
def make_eigvals_positive(am, targetprod): """For the symmetric square matrix `am`, increase any zero eigenvalues such that the total product of eigenvalues is greater or equal to `targetprod`. Returns a (possibly) new, non-singular matrix.""" w, v = linalg.eigh(am) # use eigh since a is symmetric mask = w < 1.e-10 if np.any(mask): nzprod = np.product(w[~mask]) # product of nonzero eigenvalues nzeros = mask.sum() # number of zero eigenvalues new_val = max(1.e-10, (targetprod / nzprod) ** (1. / nzeros)) w[mask] = new_val # adjust zero eigvals am_new = np.dot(np.dot(v, np.diag(w)), linalg.inv(v)) # re-form cov else: am_new = am return am_new
def inv_sqrth(x): """ Matrix inverse square root Parameters ---------- x : ndarray Real, symmetric matrix Returns ------- invsqrt : ndarray Input to the power -1/2 """ vals, vecs = eigh(x) return vecs @ diag(1 / sqrt(vals)) @ vecs.T
def get_whitening_matrix(X, fudge=1E-18): from numpy.linalg import eigh Xcov = numpy.dot(X.T, X)/X.shape[0] d,V = eigh(Xcov) D = numpy.diag(1./numpy.sqrt(d+fudge)) W = numpy.dot(numpy.dot(V,D), V.T) return W
def __init__(self, P, q, r, relop=None): self.P, self.q, self.r = P, q, r self.qarray = np.squeeze(np.asarray(q.todense())) self.relop = relop self.eigh = None # for ADMM # Evalutes f with a numpy array x.
def dc_split(self, use_eigen_split=False): n = self.P.shape[0] if self.P.nnz == 0: # P is zero P1, P2 = sp.csr_matrix((n, n)), sp.csr_matrix((n, n)) if use_eigen_split: lmb, Q = LA.eigh(self.P.todense()) P1 = sum([Q[:, i]*lmb[i]*Q[:, i].T for i in range(n) if lmb[i] > 0]) P2 = sum([-Q[:, i]*lmb[i]*Q[:, i].T for i in range(n) if lmb[i] < 0]) assert abs(np.sum(P1 - P2 - self.P)) < 1e-8 else: lmb_min = np.min(LA.eigh(self.P.todense())[0]) if lmb_min < 0: P1 = self.P + (1-lmb_min)*sp.identity(n) P2 = (1-lmb_min)*sp.identity(n) else: P1 = self.P P2 = sp.csr_matrix((n, n)) f1 = QuadraticFunction(P1, self.q, self.r) f2 = QuadraticFunction(P2, sp.csc_matrix((n, 1)), 0) return (f1, f2) # Returns the one-variable function when regarding f(x) # as a quadratic expression in x[k]. # f is an instance of QuadraticFunction # return value is an instance of OneVarQuadraticFunction # TODO: speedup
def improve_admm(x0, prob, *args, **kwargs): num_iters = kwargs.get('num_iters', 1000) viol_lim = kwargs.get('viol_lim', 1e4) tol = kwargs.get('tol', 1e-2) rho = kwargs.get('rho', None) phase1 = kwargs.get('phase1', True) if rho is not None: lmb0, P0Q = map(np.asmatrix, LA.eigh(prob.f0.P.todense())) lmb_min = np.min(lmb0) if lmb_min + prob.m*rho < 0: logging.error("rho parameter is too small, z-update not convex.") logging.error("Minimum possible value of rho: %.3f\n", -lmb_min/prob.m) logging.error("Given value of rho: %.3f\n", rho) raise Exception("rho parameter is too small, need at least %.3f." % rho) # TODO: find a reasonable auto parameter if rho is None: lmb0, P0Q = map(np.asmatrix, LA.eigh(prob.f0.P.todense())) lmb_min = np.min(lmb0) lmb_max = np.max(lmb0) if lmb_min < 0: rho = 2.*(1.-lmb_min)/prob.m else: rho = 1./prob.m rho *= 50. logging.warning("Automatically setting rho to %.3f", rho) if phase1: x1 = prob.better(x0, admm_phase1(x0, prob, tol, num_iters)) else: x1 = x0 x2 = prob.better(x1, admm_phase2(x1, prob, rho, tol, num_iters, viol_lim)) return x2
def test_eigh_build(self, level=rlevel): # Ticket 662. rvals = [68.60568999, 89.57756725, 106.67185574] cov = array([[77.70273908, 3.51489954, 15.64602427], [3.51489954, 88.97013878, -1.07431931], [15.64602427, -1.07431931, 98.18223512]]) vals, vecs = linalg.eigh(cov) assert_array_almost_equal(vals, rvals)
def test_types(self): def check(dtype): x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) w, v = np.linalg.eigh(x) assert_equal(w.dtype, get_real_dtype(dtype)) assert_equal(v.dtype, dtype) for dtype in [single, double, csingle, cdouble]: yield check, dtype
def test_invalid(self): x = np.array([[1, 0.5], [0.5, 1]], dtype=np.float32) assert_raises(ValueError, np.linalg.eigh, x, UPLO="lrong") assert_raises(ValueError, np.linalg.eigh, x, "lower") assert_raises(ValueError, np.linalg.eigh, x, "upper")
def getPCAVideo(I): ICov = I.dot(I.T) [lam, V] = linalg.eigh(ICov) lam[lam < 0] = 0 V = V*np.sqrt(lam[None, :]) return V
def average_structure(X): """ Calculate an average structure from an ensemble of structures (i.e. X is a rank-3 tensor: X[i] is a (N,3) configuration matrix). @param X: m x n x 3 input vector @type X: numpy array @return: average structure @rtype: (n,3) numpy.array """ from numpy.linalg import eigh B = csb.numeric.gower_matrix(X) v, U = eigh(B) if numpy.iscomplex(v).any(): v = v.real if numpy.iscomplex(U).any(): U = U.real indices = numpy.argsort(v)[-3:] v = numpy.take(v, indices, 0) U = numpy.take(U, indices, 1) x = U * numpy.sqrt(v) i = 0 while is_mirror_image(x, X[0]) and i < 2: x[:, i] *= -1 i += 1 return x
def train(self): super().train() sigma = self.Sigma_hat D_, U = LA.eigh(sigma) D = np.diagflat(D_) self.A = np.power(LA.pinv(D), 0.5) @ U.T
def train(self): super().train() W = self.Sigma_hat # prior probabilities (K, 1) Pi = self.Pi # class centroids (K, p) Mu = self.Mu p = self.p # the number of class K = self.n_class # the dimension you want L = self.L # Mu is (K, p) matrix, Pi is (K, 1) mu = np.sum(Pi * Mu, axis=0) B = np.zeros((p, p)) for k in range(K): # vector @ vector equal scalar, use vector[:, None] to transform to matrix # vec[:, None] equal to vec.reshape((1, vec.shape[0])) B = B + Pi[k]*((Mu[k] - mu)[:, None] @ ((Mu[k] - mu)[None, :])) # Be careful, the `eigh` method get the eigenvalues in ascending , which is opposite to R. Dw, Uw = LA.eigh(W) # reverse the Dw_ and Uw Dw = Dw[::-1] Uw = np.fliplr(Uw) W_half = self.math.pinv(np.diagflat(Dw**0.5) @ Uw.T) B_star = W_half.T @ B @ W_half D_, V = LA.eigh(B_star) # reverse V V = np.fliplr(V) # overwrite `self.A` so that we can reuse `predict` method define in parent class self.A = np.zeros((L, p)) for l in range(L): self.A[l, :] = W_half @ V[:, l]
def __newgammaD(self, theta, l): """ Apply Eqns. 22-24 in Vidal 2003 to update gamma^D (gamma of the next qbit). """ rhoDK = self.__rhoDK(theta, self.coefs[l-1].lam) #diagonalize idx = self.chi * self.hdim rhoDKflat = rhoDK.reshape([idx, idx]) evals, evecs = la.eigh(rhoDKflat) #note rho is a density matrix and thus #hermitian evals = evals[:self.chi] evecs = evecs[:,:self.chi] return evecs
def plot_ellipsoid(self): q = 0.60 r = ops.get_ellipse_rad(q) H = ops.get_hessian(self.X) eigv, rotation = la.eigh(H) center = self.Bh u = np.linspace(0.0, 2.0 * np.pi, 100) v = np.linspace(0.0, np.pi, 100) x = (r/math.sqrt(eigv[0])) * np.outer(np.cos(u), np.sin(v)) y = (r/math.sqrt(eigv[1])) * np.outer(np.sin(u), np.sin(v)) z = (r/math.sqrt(eigv[2])) * np.outer(np.ones_like(u), np.cos(v)) ux = (r/math.sqrt(eigv[0]))* np.outer(np.cos(u), np.sin(v)) uy = (r/math.sqrt(eigv[1])) * np.outer(np.sin(u), np.sin(v)) uz = (r/math.sqrt(eigv[2])) * np.outer(np.ones_like(u), np.cos(v)) for i in range(len(x)): for j in range(len(x)): [x[i,j],y[i,j],z[i,j]] = np.dot([x[i,j],y[i,j],z[i,j]], rotation) + center # plot fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.plot_surface(x, y,z, rstride=2, cstride=5, color='g', alpha=0.5) ax.plot_surface(ux, uy, uz, rstride=2, cstride=5, color='r', alpha=0.5) plt.axis('equal') plt.show()
def pca_fit(X, var_ratio=1, return_transform=True): """ Parameters ---------- X : array_like An array of data samples with shape (n_samples, n_features). var_ratio : float The variance ratio to be captured (Default value = 1). return_transform : bool Whether to apply the transformation to the given data. Returns ------- array_like If return_transform is True, an array with shape (n_samples, n_components) which is the input samples projected onto `n_components` principal components. Otherwise the first `n_components` eigenvectors of the covariance matrix corresponding to the `n_components` largest eigenvalues are returned as rows. """ cov_ = np.cov(X, rowvar=False) # Mean is removed evals, evecs = LA.eigh(cov_) # Get eigenvalues in ascending order, eigenvectors in columns evecs = np.fliplr(evecs) # Flip eigenvectors to get them in descending eigenvalue order if var_ratio == 1: L = evecs.T else: evals = np.flip(evals, axis=0) var_exp = np.cumsum(evals) var_exp = var_exp / var_exp[-1] n_components = np.argmax(np.greater_equal(var_exp, var_ratio)) L = evecs.T[:n_components] # Set the first n_components eigenvectors as rows of L if return_transform: return X.dot(L.T) else: return L
def _get_whitening_matrix(self): Xcov = numpy.dot(self.silences.T, self.silences)/self.silences.shape[0] d,V = eigh(Xcov) D = numpy.diag(1./numpy.sqrt(d + self.fudge)) self.whitening_matrix = numpy.dot(numpy.dot(V,D), V.T).astype(numpy.float32)
def __init__(self, ctr, am): self.n = len(ctr) # dimension self.ctr = np.array(ctr) # center coordinates self.am = np.array(am) # precision matrix (inverse of covariance) # Volume of ellipsoid is the volume of an n-sphere divided # by the (determinant of the) Jacobian associated with the # transformation, which by definition is the precision matrix. self.vol = vol_prefactor(self.n) / np.sqrt(linalg.det(self.am)) # The eigenvalues (l) of `a` are (a^-2, b^-2, ...) where # (a, b, ...) are the lengths of principle axes. # The eigenvectors (v) are the normalized principle axes. l, v = linalg.eigh(self.am) if np.all((l > 0.) & (np.isfinite(l))): self.axlens = 1. / np.sqrt(l) else: raise ValueError("The input precision matrix defining the " "ellipsoid {0} is apparently singular with " "l={1} and v={2}.".format(self.am, l, v)) # Scaled eigenvectors are the axes, where `axes[:,i]` is the # i-th axis. Multiplying this matrix by a vector will transform a # point in the unit n-sphere to a point in the ellipsoid. self.axes = np.dot(v, np.diag(self.axlens)) # Amount by which volume was increased after initialization (i.e. # cumulative factor from `scale_to_vol`). self.expand = 1.