我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用scipy.linalg.eigh()。
def _sym_decorrelation(W): """ Symmetric decorrelation i.e. W <- (W * W.T) ^{-1/2} * W """ s, u = linalg.eigh(np.dot(W, W.T)) # u (resp. s) contains the eigenvectors (resp. square roots of # the eigenvalues) of W * W.T return np.dot(np.dot(u * (1. / np.sqrt(s)), u.T), W)
def removeTopPCs(X, numRemovePCs): t0 = time.time() X_mean = X.mean(axis=0) X -= X_mean XXT = symmetrize(blas.dsyrk(1.0, X, lower=0)) s,U = la.eigh(XXT) if (np.min(s) < -1e-4): raise Exception('Negative eigenvalues found') s[s<0]=0 ind = np.argsort(s)[::-1] U = U[:, ind] s = s[ind] s = np.sqrt(s) #remove null PCs ind = (s>1e-6) U = U[:, ind] s = s[ind] V = X.T.dot(U/s) #print 'max diff:', np.max(((U*s).dot(V.T) - X)**2) X = (U[:, numRemovePCs:]*s[numRemovePCs:]).dot((V.T)[numRemovePCs:, :]) X += X_mean return X
def low_rank_align(X, Y, Cxy, d=None, mu=0.8): """Input: data matrices X,Y, correspondence matrix Cxy, embedding dimension d, and correspondence weight mu Output: embedded X and embedded Y """ nx, dx = X.shape ny, dy = Y.shape assert Cxy.shape==(nx,ny), \ 'Correspondence matrix must be shape num_X_samples X num_Y_samples.' C = np.fliplr(block_diag(np.fliplr(Cxy),np.fliplr(Cxy.T))) if d is None: d = min(dx,dy) Rx = low_rank_repr(X,d) Ry = low_rank_repr(Y,d) R = block_diag(Rx,Ry) tmp = np.eye(R.shape[0]) - R M = tmp.T.dot(tmp) L = laplacian(C) eigen_prob = (1-mu)*M + 2*mu*L _,F = eigh(eigen_prob,eigvals=(1,d),overwrite_a=True) Xembed = F[:nx] Yembed = F[nx:] return Xembed, Yembed
def nvecs(X, n, rank, do_flipsign=True, dtype=np.float): """ Eigendecomposition of mode-n unfolding of a tensor """ Xn = X.unfold(n) if issparse_mat(Xn): Xn = csr_matrix(Xn, dtype=dtype) Y = Xn.dot(Xn.T) _, U = eigsh(Y, rank, which='LM') else: Y = Xn.dot(Xn.T) N = Y.shape[0] _, U = eigh(Y, eigvals=(N - rank, N - 1)) #_, U = eigsh(Y, rank, which='LM') # reverse order of eigenvectors such that eigenvalues are decreasing U = array(U[:, ::-1]) # flip sign if do_flipsign: U = flipsign(U) return U
def concurrence(state): """Calculate the concurrence. Args: state (np.array): a quantum state Returns: concurrence. """ rho = np.array(state) if rho.ndim == 1: rho = outer(state) if len(state) != 4: raise Exception("Concurence is not defined for more than two qubits") YY = np.fliplr(np.diag([-1, 1, 1, -1])) A = rho.dot(YY).dot(rho.conj()).dot(YY) w = la.eigh(A, eigvals_only=True) w = np.sqrt(np.maximum(w, 0)) return max(0.0, w[-1]-np.sum(w[0:-1])) ############################################################### # Other. ###############################################################
def get_pca_vector(target_psd_matrix): """ Returns the beamforming vector of a PCA beamformer. :param target_psd_matrix: Target PSD matrix with shape (..., sensors, sensors) :return: Set of beamforming vectors with shape (..., sensors) """ # Save the shape of target_psd_matrix shape = target_psd_matrix.shape # Reduce independent dims to 1 independent dim target_psd_matrix = np.reshape(target_psd_matrix, (-1,) + shape[-2:]) # Calculate eigenvals/vecs eigenvals, eigenvecs = np.linalg.eigh(target_psd_matrix) # Find max eigenvals vals = np.argmax(eigenvals, axis=-1) # Select eigenvec for max eigenval beamforming_vector = np.array( [eigenvecs[i, :, vals[i]] for i in range(eigenvals.shape[0])]) # Reconstruct original shape beamforming_vector = np.reshape(beamforming_vector, shape[:-1]) return beamforming_vector
def get_gev_vector(target_psd_matrix, noise_psd_matrix): """ Returns the GEV beamforming vector. :param target_psd_matrix: Target PSD matrix with shape (bins, sensors, sensors) :param noise_psd_matrix: Noise PSD matrix with shape (bins, sensors, sensors) :return: Set of beamforming vectors with shape (bins, sensors) """ bins, sensors, _ = target_psd_matrix.shape beamforming_vector = np.empty((bins, sensors), dtype=np.complex) for f in range(bins): try: eigenvals, eigenvecs = eigh(target_psd_matrix[f, :, :], noise_psd_matrix[f, :, :]) except np.linalg.LinAlgError: eigenvals, eigenvecs = eig(target_psd_matrix[f, :, :], noise_psd_matrix[f, :, :]) beamforming_vector[f, :] = eigenvecs[:, np.argmax(eigenvals)] return beamforming_vector
def get_gevd_vals_vecs(target_psd_matrix, noise_psd_matrix): """ Returns the eigenvalues and eigenvectors of GEVD :param target_psd_matrix: Target PSD matrix with shape (bins, sensors, sensors) :param noise_psd_matrix: Noise PSD matrix with shape (bins, sensors, sensors) :return: Set of eigen values with shape (bins, sensors) eigenvectors (bins, sensors, sensors) """ bins, sensors, _ = target_psd_matrix.shape eigen_values = np.empty((bins, sensors), dtype=np.complex) eigen_vectors = np.empty((bins, sensors, sensors), dtype=np.complex) for f in range(bins): try: eigenvals, eigenvecs = eigh(target_psd_matrix[f, :, :], noise_psd_matrix[f, :, :]) except np.linalg.LinAlgError: eigenvals, eigenvecs = eig(target_psd_matrix[f, :, :], noise_psd_matrix[f, :, :]) # values in increasing order eigen_values[f,:] = eigenvals eigen_vectors[f, :] = eigenvecs return eigen_values, eigen_vectors
def test_jw_restrict_operator(self): """Test the scheme for restricting JW encoded operators to number""" # Make a Hamiltonian that cares mostly about number of electrons n_qubits = 6 target_electrons = 3 penalty_const = 100. number_sparse = jordan_wigner_sparse(number_operator(n_qubits)) bias_sparse = jordan_wigner_sparse( sum([FermionOperator(((i, 1), (i, 0)), 1.0) for i in range(n_qubits)], FermionOperator())) hamiltonian_sparse = penalty_const * ( number_sparse - target_electrons * scipy.sparse.identity(2**n_qubits)).dot( number_sparse - target_electrons * scipy.sparse.identity(2**n_qubits)) + bias_sparse restricted_hamiltonian = jw_number_restrict_operator( hamiltonian_sparse, target_electrons, n_qubits) true_eigvals, _ = eigh(hamiltonian_sparse.A) test_eigvals, _ = eigh(restricted_hamiltonian.A) self.assertAlmostEqual(norm(true_eigvals[:20] - test_eigvals[:20]), 0.0)
def rbf_kernel_pca(X, gamma, n_components): sq_dists = pdist(X, 'sqeuclidean') mat_sq_dists = squareform(sq_dists) K = exp(-gamma * mat_sq_dists) N = K.shape[0] one_n = np.ones((N, N)) / N K = K - one_n.dot(K) - K.dot(one_n) + one_n.dot(K).dot(one_n) eigenvalues, eigenvectors = eigh(K) alphas = np.column_stack(( eigenvectors[:, -i] for i in range(1, n_components+1) )) lambdas = [eigenvalues[-i] for i in range(1, n_components+1)] return alphas, lambdas
def _get_ch_whitener(A, pca, ch_type, rank): """"Get whitener params for a set of channels.""" # whitening operator eig, eigvec = linalg.eigh(A, overwrite_a=True) eigvec = eigvec.T eig[:-rank] = 0.0 logger.info('Setting small %s eigenvalues to zero.' % ch_type) if not pca: # No PCA case. logger.info('Not doing PCA for %s.' % ch_type) else: logger.info('Doing PCA for %s.' % ch_type) # This line will reduce the actual number of variables in data # and leadfield to the true rank. eigvec = eigvec[:-rank].copy() return eig, eigvec
def update_ops(self,kspace=None): ''' Update the order parameters of the system. Parameters ---------- kspace : BaseSpace, optional The Brillouin zone of the system. ''' self.update(**{name:order.value for name,order in self.ops.iteritems()}) self.mu,nmatrix=super(SCMF,self).mu(self.filling,kspace),self.nmatrix f=(lambda e,mu: 1 if e<=mu else 0) if abs(self.temperature)<RZERO else (lambda e,mu: 1/(exp((e-mu)/self.temperature)+1)) m=zeros((nmatrix,nmatrix),dtype=complex128) for matrix in self.matrices(kspace): eigs,eigvecs=eigh(matrix) for eig,eigvec in zip(eigs,eigvecs.T): m+=dot(eigvec.conj().reshape((nmatrix,1)),eigvec.reshape((1,nmatrix)))*f(eig,self.mu) nstate=(1 if kspace is None else kspace.rank('k'))*nmatrix/self.config.values()[0].nspin for key in self.ops.keys(): self.ops[key].value=sum(m*self.ops[key].matrix)/nstate if self.ops[key].dtype in (float32,float64): self.ops[key].value=self.ops[key].value.real
def eigvals(self,basespace=None,mode='*'): ''' This method returns all the eigenvalues of the Hamiltonian. Parameters ---------- basespace : BaseSpace, optional The base space on which the Hamiltonian is defined. mode : string,optional The mode to iterate over the base space. Returns ------- 1d ndarray All the eigenvalues. ''' if basespace is None: result=eigh(self.matrix(),eigvals_only=True) else: result=asarray([eigh(self.matrix(**paras),eigvals_only=True) for paras in basespace(mode)]).reshape(-1) return result
def TBAEB(engine,app): ''' This method calculates the energy bands of the Hamiltonian. ''' nmatrix=engine.nmatrix if app.path is not None: assert len(app.path.tags)==1 result=zeros((app.path.rank(0),nmatrix+1)) if app.path.mesh(0).ndim==1: result[:,0]=app.path.mesh(0) else: result[:,0]=array(xrange(app.path.rank(0))) for i,paras in enumerate(app.path()): result[i,1:]=eigh(engine.matrix(**paras),eigvals_only=True) else: result=zeros((2,nmatrix+1)) result[:,0]=array(xrange(2)) result[0,1:]=eigh(engine.matrix(),eigvals_only=True) result[1,1:]=result[0,1:] name='%s_%s'%(engine.tostr(mask=() if app.path is None else app.path.tags),app.name) if app.savedata: savetxt('%s/%s.dat'%(engine.dout,name),result) if app.plot: app.figure('L',result,'%s/%s'%(engine.dout,name)) if app.returndata: return result
def FBFMEB(engine,app): ''' This method calculates the energy spectrums of the spin excitations. ''' path,ne=app.path,min(app.ne or engine.nmatrix,engine.nmatrix) if path is not None: bz,reciprocals=engine.basis.BZ,engine.lattice.reciprocals parameters=list(path('+')) if isinstance(path,HP.BaseSpace) else [{'k':bz[pos]} for pos in bz.path(HP.KMap(reciprocals,path) if isinstance(path,str) else path,mode='I')] result=np.zeros((len(parameters),ne+1)) result[:,0]=path.mesh(0) if isinstance(path,HP.BaseSpace) and path.mesh(0).ndim==1 else np.array(xrange(len(parameters))) engine.log<<'%s: '%len(parameters) for i,paras in enumerate(parameters): engine.log<<'%s%s'%(i,'..' if i<len(parameters)-1 else '') m=engine.matrix(**paras) result[i,1:]=sl.eigh(m,eigvals_only=False)[0][:ne] if app.method=='eigh' else HM.eigsh(m,k=ne,return_eigenvectors=False) engine.log<<'\n' else: result=np.zeros((2,ne+1)) result[:,0]=np.array(xrange(2)) result[0,1:]=sl.eigh(engine.matrix(),eigvals_only=True)[:ne] if app.method=='eigh' else HM.eigsh(engine.matrix(),k=ne,return_eigenvectors=False) result[1,1:]=result[0,1:] name='%s_%s'%(engine.tostr(mask=path.tags if isinstance(path,HP.BaseSpace) else ()),app.name) if app.savedata: np.savetxt('%s/%s.dat'%(engine.dout,name),result) if app.plot: app.figure('L',result,'%s/%s'%(engine.dout,name)) if app.returndata: return result
def FBFMPOS(engine,app): ''' This method calculates the profiles of spin-1-excitation states. ''' result=[] table=engine.config.table(mask=['spin','nambu']) U1,U2,vs=engine.basis.U1,engine.basis.U2,sl.eigh(engine.matrix(k=app.k),eigvals_only=False)[1] for i,index in enumerate(sorted(table,key=table.get)): result.append([i]) gs=np.vdot(U2[table[index],:,:].reshape(-1),U2[table[index],:,:].reshape(-1))*(1 if engine.basis.polarization=='up' else -1) dw=optrep(HP.FQuadratic(1.0,(index.replace(spin=0,nambu=HP.CREATION),index.replace(spin=0,nambu=HP.ANNIHILATION)),seqs=(table[index],table[index])),app.k,engine.basis) up=optrep(HP.FQuadratic(1.0,(index.replace(spin=1,nambu=HP.CREATION),index.replace(spin=1,nambu=HP.ANNIHILATION)),seqs=(table[index],table[index])),app.k,engine.basis) for pos in app.ns or (0,): result[-1].append((np.vdot(vs[:,pos],up.dot(vs[:,pos]))-np.vdot(vs[:,pos],dw.dot(vs[:,pos]))-gs)*(-1 if engine.basis.polarization=='up' else 1)) result=np.asarray(result) assert nl.norm(np.asarray(result).imag)<HP.RZERO result=result.real name='%s_%s'%(engine,app.name) if app.savedata: np.savetxt('%s/%s.dat'%(engine.dout,name),result) if app.plot: app.figure('L',result,'%s/%s'%(engine.dout,name),legend=['Level %s'%n for n in app.ns or (0,)]) if app.returndata: return result
def test_spectral_embedding_unnormalized(): # Test that spectral_embedding is also processing unnormalized laplacian # correctly random_state = np.random.RandomState(36) data = random_state.randn(10, 30) sims = rbf_kernel(data) n_components = 8 embedding_1 = spectral_embedding(sims, norm_laplacian=False, n_components=n_components, drop_first=False) # Verify using manual computation with dense eigh laplacian, dd = graph_laplacian(sims, normed=False, return_diag=True) _, diffusion_map = eigh(laplacian) embedding_2 = diffusion_map.T[:n_components] * dd embedding_2 = _deterministic_vector_sign_flip(embedding_2).T assert_array_almost_equal(embedding_1, embedding_2)
def rankRegions(self, X, C, y, pos, regionLength, reml=True): #get resiong list regionsList = self.createRegionsList(pos, regionLength) #precompute log determinant of covariates XX = C.T.dot(C) [Sxx,Uxx]= la.eigh(XX) logdetXX = np.log(Sxx).sum() #score each region betas = np.zeros(len(regionsList)) for r_i, r in enumerate(regionsList): regionSize = len(r) if (self.verbose and r_i % 1000==0): print 'Testing region ' + str(r_i+1)+'/'+str(len(regionsList)), print 'with', regionSize, 'SNPs\t' s,U = self.eigenDecompose(X[:, np.array(r)], None) sig2g_kernel, sig2e_kernel, fixedEffects, ll = self.optSigma2(U, s, y, C, logdetXX, reml) betas[r_i] = ll return regionsList, betas ### this code is taken from the FastLMM package (see attached license)###
def lleval(self, Uy, UX, Sd, yKy, logdetK, logdetXX, logdelta, UUXUUX, UUXUUy, UUyUUy, numIndividuals, reml): N = numIndividuals D = UX.shape[1] UXS = UX / np.lib.stride_tricks.as_strided(Sd, (Sd.size, D), (Sd.itemsize,0)) XKy = UXS.T.dot(Uy) XKX = UXS.T.dot(UX) if (Sd.shape[0] < numIndividuals): delta = np.exp(logdelta) denom = delta XKX += UUXUUX / denom XKy += UUXUUy / denom yKy += UUyUUy / denom logdetK += (numIndividuals-Sd.shape[0]) * logdelta [SxKx,UxKx]= la.eigh(XKX) i_pos = SxKx>1E-10 beta = np.dot(UxKx[:,i_pos], (np.dot(UxKx[:,i_pos].T, XKy) / SxKx[i_pos])) r2 = yKy-XKy.dot(beta) if reml: logdetXKX = np.log(SxKx).sum() sigma2 = (r2 / (N - D)) ll = -0.5 * (logdetK + (N-D)*np.log(2.0*np.pi*sigma2) + (N-D) + logdetXKX - logdetXX) else: sigma2 = r2 / N ll = -0.5 * (logdetK + N*np.log(2.0*np.pi*sigma2) + N) return ll, sigma2, beta, r2
def eigenDecompose(self, X, K, normalize=True): if (X.shape[1] >= X.shape[0]): s,U = la.eigh(K) else: U, s, _ = la.svd(X, check_finite=False, full_matrices=False) if (s.shape[0] < U.shape[1]): s = np.concatenate((s, np.zeros(U.shape[1]-s.shape[0]))) #note: can use low-rank formulas here s=s**2 if normalize: s /= float(X.shape[1]) if (np.min(s) < -1e-10): raise Exception('Negative eigenvalues found') s[s<0]=0 ind = np.argsort(s)[::-1] U = U[:, ind] s = s[ind] return s,U
def solve_dynamic_eigen_model(system): from scipy import linalg as sl if not system._is_inited: system.calc_KG() system.calc_deleted_KG_matrix() system.calc_MG() system.calc_deleted_MG_matrix() w1,system.model = sl.eigh(system.KG_keeped,system.MG_keeped) system.w = np.sqrt(w1) T = 2*np.pi/system.w system.freq = 1/T
def learn(self,x,y): ''' Function that learns the GMM with ridge regularizationb from training samples Input: x : the training samples y : the labels Output: the mean, covariance and proportion of each class, as well as the spectral decomposition of the covariance matrix ''' ## Get information from the data C = sp.unique(y).shape[0] #C = int(y.max(0)) # Number of classes n = x.shape[0] # Number of samples d = x.shape[1] # Number of variables eps = sp.finfo(sp.float64).eps ## Initialization self.ni = sp.empty((C,1)) # Vector of number of samples for each class self.prop = sp.empty((C,1)) # Vector of proportion self.mean = sp.empty((C,d)) # Vector of means self.cov = sp.empty((C,d,d)) # Matrix of covariance self.Q = sp.empty((C,d,d)) # Matrix of eigenvectors self.L = sp.empty((C,d)) # Vector of eigenvalues self.classnum = sp.empty(C).astype('uint8') ## Learn the parameter of the model for each class for c,cR in enumerate(sp.unique(y)): j = sp.where(y==(cR))[0] self.classnum[c] = cR # Save the right label self.ni[c] = float(j.size) self.prop[c] = self.ni[c]/n self.mean[c,:] = sp.mean(x[j,:],axis=0) self.cov[c,:,:] = sp.cov(x[j,:],bias=1,rowvar=0) # Normalize by ni to be consistent with the update formulae # Spectral decomposition L,Q = linalg.eigh(self.cov[c,:,:]) idx = L.argsort()[::-1] self.L[c,:] = L[idx] self.Q[c,:,:]=Q[:,idx]
def principal_coordinates(D, nd=None): """ Reconstruction of a multidimensional configuration that optimally reproduces the input distance matrix. See: Gower, J (1966) """ from numpy import clip, sqrt, take, argsort, sort from csb.numeric import reverse from scipy.linalg import eigh ## calculate centered similarity matrix B = -clip(D, 1e-150, 1e150) ** 2 / 2. b = B.mean(0) B = B - b B = (B.T - b).T B += b.mean() ## calculate spectral decomposition v, U = eigh(B) v = v.real U = U.real U = take(U, argsort(v), 1) v = sort(v) U = reverse(U, 1) v = reverse(v) if nd is None: nd = len(v) X = U[:, :nd] * sqrt(clip(v[:nd], 0., 1e300)) return X
def plot_results(X, Y_, means, covariances, index, title): splot = plt.subplot(2, 1, 1 + index) for i, (mean, covar, color) in enumerate(zip(means, covariances, color_iter)): v, w = linalg.eigh(covar) v = 2. * np.sqrt(2.) * np.sqrt(v) u = w[0] / linalg.norm(w[0]) # as the DP will not use every component it has access to # unless it needs it, we shouldn't plot the redundant # components. if not np.any(Y_ == i): continue plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color) # Plot an ellipse to show the Gaussian component angle = np.arctan(u[1] / u[0]) angle = 180. * angle / np.pi # convert to degrees ell = mpl.patches.Ellipse(mean, v[0], v[1], 180. + angle, color=color) ell.set_clip_box(splot.bbox) ell.set_alpha(0.5) splot.add_artist(ell) plt.title(title) ##################### # Data
def fit(self, X, y=None): # X = array2d(X) n_samples, n_features = X.shape X = as_float_array(X, copy=self.copy) # substracts the mean for each feature vector self.mean_ = np.mean(X, axis=0) X -= self.mean_ eigs, eigv = eigh(np.dot(X.T, X) / n_samples + \ self.bias * np.identity(n_features)) components = np.dot(eigv * np.sqrt(1.0 / eigs), eigv.T) self.components_ = components # Order the explained variance from greatest to least self.explained_variance_ = eigs[::-1] return self
def test_linear_operator(): """Test linear operator.""" n_times, n_atoms, n_times_atom = 128, 32, 32 n_times_valid = n_times - n_times_atom + 1 rng = check_random_state(42) ds = rng.randn(n_atoms, n_times_atom) some_sample_weights = np.abs(rng.randn(n_times)) for sample_weights in [None, some_sample_weights]: gbc = partial(gram_block_circulant, ds=ds, n_times_valid=n_times_valid, sample_weights=sample_weights) DTD_full = gbc(method='full') DTD_scipy = gbc(method='scipy') DTD_custom = gbc(method='custom') z = rng.rand(DTD_full.shape[1]) assert_allclose(DTD_full.dot(z), DTD_scipy.dot(z)) assert_allclose(DTD_full.dot(z), DTD_custom.dot(z)) # test power iterations with linear operator mu, _ = linalg.eigh(DTD_full) t = [] for DTD in [DTD_full, DTD_scipy, DTD_custom]: start = time.time() mu_hat = power_iteration(DTD) t.append(time.time() - start) assert_allclose(np.max(mu), mu_hat, rtol=1e-2) print(t) assert_true(t[1] < t[0]) assert_true(t[2] < t[0])
def _eig_decomposition(self, A, largest=True): n_features = A.shape[0] if (largest): eig_range = (n_features - self.n_components, n_features - 1) else: eig_range = (0, self.n_components - 1) eigvals, eigvecs = spla.eigh(A, eigvals=eig_range) if (largest): eigvals = eigvals[::-1] eigvecs = sp.fliplr(eigvecs) return eigvals, eigvecs
def calc_W(self, S_b, S_w): # W is the array of eigenvectors, where each column is a vector. eigenvalues, W = eigh(S_b, S_w) return W
def test_optimized_W(cls): tolerance = 1e-100 vals, W = eigh(cls.model.S_b, cls.model.S_w) cls.assert_same(cls.model.W, W, tolerance=tolerance)
def test_calc_W(self): tolerance = 1e-100 S_b = [[17.70840444, 17.96889098, 18.19513973], [17.96889098, 18.24564939, 18.46561872], [18.19513973, 18.46561872, 18.69940039]] S_w = [[0.94088804, -0.05751511, 0.01467744], [-0.05751511, 1.01617648, -0.03831551], [0.01467744, -0.03831551, 0.88440609]] W_model = self.model.calc_W(S_b, S_w) _, W_truth = eigh(S_b, S_w) self.assert_same(W_model, W_truth, tolerance=tolerance)
def _calculate_whitening_transformation_matrix(self, sample_from_prior): samples_vec = sp.asarray([self._dict_to_to_vect(x) for x in sample_from_prior]) # samples_vec is an array of shape nr_samples x nr_features means = samples_vec.mean(axis=0) centered = samples_vec - means covariance = centered.T.dot(centered) w, v = la.eigh(covariance) self._whitening_transformation_matrix = ( v.dot(sp.diag(1. / sp.sqrt(w))).dot(v.T))
def _logdet(A): """Compute the log det of a symmetric matrix.""" vals = linalg.eigh(A)[0] # avoid negative (numerical errors) or zero (semi-definite matrix) values tol = vals.max() * vals.size * np.finfo(np.float64).eps vals = np.where(vals > tol, vals, tol) return np.sum(np.log(vals))
def set(self,gse): ''' Set the Lambda matrix, Q matrix and QT matrix of the block. Parameters ---------- gse : number The groundstate energy. ''' sign=self.controllers['sign'] if self.method=='S': lczs,Qs=self.controllers['lczs'],self.controllers['Qs'] self.data['niters']=np.zeros(Qs.shape[0],dtype=np.int32) self.data['Lambdas']=np.zeros((Qs.shape[0],Qs.shape[2]),dtype=np.float64) self.data['Qs']=np.zeros(Qs.shape,dtype=Qs.dtype) self.data['QTs']=np.zeros((Qs.shape[0],Qs.shape[2]),dtype=Qs.dtype) for i,(lcz,Q) in enumerate(zip(lczs,Qs)): E,V=sl.eigh(lcz.T,eigvals_only=False) self.data['niters'][i]=lcz.niter self.data['Lambdas'][i,0:lcz.niter]=sign*(E-gse) self.data['Qs'][i,:,0:lcz.niter]=Q[:,0:lcz.niter].dot(V) self.data['QTs'][i,0:lcz.niter]=lcz.P[0,0]*V[0,:].conjugate() else: lanczos=self.controllers['lanczos'] E,V=sl.eigh(lanczos.T,eigvals_only=False) self.data['Lambda']=sign*(E-gse) self.data['Q']=lanczos.P[:min(lanczos.nv0,lanczos.niter),:].T.conjugate().dot(V[:min(lanczos.nv0,lanczos.niter),:]) self.data['QT']=HM.dagger(self.data['Q'])
def VCATEB(engine,app): ''' This method calculates the topological Hamiltonian's spectrum. ''' engine.rundependences(app.name) engine.gf(omega=app.mu) H=lambda kx,ky: -inv(engine.gf(k=[kx,ky])) result=np.zeros((app.path.rank('k'),engine.nopt+1)) for i,paras in enumerate(app.path('+')): result[i,0]=i result[i,1:]=eigh(H(paras['k'][0],paras['k'][1]),eigvals_only=True) name='%s_%s'%(engine,app.name) if app.savedata: np.savetxt('%s/%s.dat'%(engine.dout,name),result) if app.plot: app.figure('L',result,'%s/%s'%(engine.dout,name)) if app.returndata: return result
def berry_phase(H,path,ns): ''' Calculate the Berry phase of some bands of a Hamiltonian along a certain path. Parameters ---------- H : callable Input function which returns the Hamiltonian as a 2D array. path : iterable The path along which to calculate the Berry phase. ns : iterable of int The sequences of bands whose Berry phases are wanted. Returns ------- 1d ndarray The wanted Berry phase of the selected bands. ''' ns=np.array(ns) for i,parameters in enumerate(path): new=eigh(H(**parameters))[1][:,ns] if i==0: result=np.ones(len(ns),new.dtype) evs=new else: for j in xrange(len(ns)): result[j]*=np.vdot(old[:,j],new[:,j]) old=new else: for j in xrange(len(ns)): result[j]*=np.vdot(old[:,j],evs[:,j]) return np.angle(result)/np.pi
def set(self,matrix): ''' Set the basis. Parameters ---------- matrix : callable The function to get the single particle matrix. ''' Eup,Uup,Edw,Udw=[],[],[],[] for k in [()] if self.BZ is None else self.BZ.mesh('k'): m=matrix(k) es,us=sl.eigh(m[:m.shape[0]/2,:m.shape[0]/2]) Edw.append(es) Udw.append(us) es,us=sl.eigh(m[m.shape[0]/2:,m.shape[0]/2:]) Eup.append(es) Uup.append(us) Eup,Uup=np.asarray(Eup),np.asarray(Uup).transpose((1,0,2)) Edw,Udw=np.asarray(Edw),np.asarray(Udw).transpose((1,0,2)) if self.polarization=='up': self._E1_=Edw self._E2_=Eup self._U1_=Udw self._U2_=Uup else: self._E1_=Eup self._E2_=Edw self._U1_=Uup self._U2_=Udw
def __init__(self,ne=6,method='eigh',**karg): ''' Constructor. Parameters ---------- ne : integer, optional The number of energy spectrums. method : 'eigh','eigsh' The function used to calculate the spectrums. 'eigh' for `sl.eigh` and 'eigsh' for `HM.eigsh`. ''' super(EB,self).__init__(**karg) self.ne=ne self.method=method
def eigs(self): ''' The eigenvalues of the Lanczos matrix. Returns ------- 1d ndarray The eigenvalues of the Lanczos matrix. ''' return sl.eigh(self.T,eigvals_only=True)
def test_lanczos(): print 'test_lanczos' N,Nv,Ne,Niter=4000,15,1,750 np.random.seed(1) m=np.random.random((N,N))+1j*np.random.random((N,N)) m=m+m.T.conjugate() v0=np.random.random((Nv,N)) exacteigs=sl.eigh(m,eigvals_only=True)[:Ne] if N<1000 else eigsh(m,k=Ne,return_eigenvectors=False,which='SA')[::-1] print 'Exact eigs:',exacteigs print a=Lanczos(m,deepcopy(v0),maxiter=Niter,keepstate=True) for i in xrange(Niter): a.iter() print 'diff from Hermitian: %s.'%sl.norm(a.T-a.T.T.conjugate()) h=np.zeros((Niter,Niter),dtype=m.dtype) for i in xrange(Niter): hi=a.matrix.dot(a.vectors[i]).conjugate() for j in xrange(Niter): h[i,j]=hi.dot(a.vectors[j]) print 'diff from h: %s.'%sl.norm(a.T-h) V=np.asarray(a.vectors) print 'vecotrs input diff: %s.'%sl.norm(V[0:Nv,:].T.dot(a.P)-v0.T) print 'vectors orthonormal diff: %s.'%sl.norm(np.identity(Niter)-V.dot(V.T.conjugate())) Leigs=a.eigs()[:Ne] print 'Lanczos eigs:',Leigs print 'eigenvalues diff: %s.'%sl.norm(exacteigs-Leigs) print
def plot_ellipse(splot, mean, cov, color): v, w = linalg.eigh(cov) u = w[0] / linalg.norm(w[0]) angle = np.arctan(u[1] / u[0]) angle = 180 * angle / np.pi # convert to degrees # filled Gaussian at 2 standard deviation ell = mpl.patches.Ellipse(mean, 2 * v[0] ** 0.5, 2 * v[1] ** 0.5, 180 + angle, color=color) ell.set_clip_box(splot.bbox) ell.set_alpha(0.5) splot.add_artist(ell) splot.set_xticks(()) splot.set_yticks(())
def _pre_compute(self, X, y): # even if X is very sparse, K is usually very dense K = safe_sparse_dot(X, X.T, dense_output=True) v, Q = linalg.eigh(K) QT_y = np.dot(Q.T, y) return v, Q, QT_y
def PCA(data, dims_rescaled_data=2): """ returns: data transformed in 2 dims/columns + regenerated original data pass in: data as 2D NumPy array """ import numpy as NP from scipy import linalg as LA m, n = data.shape # mean center the data data -= data.mean(axis=0) # calculate the covariance matrix R = NP.cov(data, rowvar=False) # calculate eigenvectors & eigenvalues of the covariance matrix # use 'eigh' rather than 'eig' since R is symmetric, # the performance gain is substantial evals, evecs = LA.eigh(R) # sort eigenvalue in decreasing order idx = NP.argsort(evals)[::-1] evecs = evecs[:,idx] # sort eigenvectors according to same index evals = evals[idx] # select the first n eigenvectors (n is desired dimension # of rescaled data array, or dims_rescaled_data) evecs = evecs[:, :dims_rescaled_data] # carry out the transformation on the data using eigenvectors # and return the re-scaled data, eigenvalues, and eigenvectors return NP.dot(evecs.T, data.T).T, evals, evecs
def solve_eigen_problem(self, A, B, solver): N = A.testfunction[0].N s = A.testfunction[0].slice() self.V = np.zeros((N, N)) self.lmbda = np.ones(N) if solver == 0: self.lmbda[s], self.V[s, s] = scipy_la.eigh(A.diags().toarray(), B.diags().toarray()) elif solver == 1: #self.lmbda[s], self.V[s, s] = scipy_la.eigh(B.diags().toarray()) a = np.zeros((3, N-2)) a[0, :] = B[0] a[2, :-2] = B[2] self.lmbda[s], self.V[s, s] = scipy_la.eig_banded(a, lower=True)
def fit(self, X, y): """Train xdawn spatial filters. Parameters ---------- X : ndarray, shape (n_trials, n_channels, n_samples) ndarray of trials. y : ndarray shape (n_trials, 1) labels corresponding to each trial. Returns ------- self : Xdawn instance The Xdawn instance. """ Nt, Ne, Ns = X.shape self.classes_ = (numpy.unique(y) if self.classes is None else self.classes) # FIXME : too many reshape operation tmp = X.transpose((1, 2, 0)) Cx = numpy.matrix(self.estimator(tmp.reshape(Ne, Ns * Nt))) self.evokeds_ = [] self.filters_ = [] self.patterns_ = [] for c in self.classes_: # Prototyped responce for each class P = numpy.mean(X[y == c, :, :], axis=0) # Covariance matrix of the prototyper response & signal C = numpy.matrix(self.estimator(P)) # Spatial filters evals, evecs = eigh(C, Cx) evecs = evecs[:, numpy.argsort(evals)[::-1]] # sort eigenvectors evecs /= numpy.apply_along_axis(numpy.linalg.norm, 0, evecs) V = evecs A = numpy.linalg.pinv(V.T) # create the reduced prototyped response self.filters_.append(V[:, 0:self.nfilter].T) self.patterns_.append(A[:, 0:self.nfilter].T) self.evokeds_.append(numpy.dot(V[:, 0:self.nfilter].T, P)) self.evokeds_ = numpy.concatenate(self.evokeds_, axis=0) self.filters_ = numpy.concatenate(self.filters_, axis=0) self.patterns_ = numpy.concatenate(self.patterns_, axis=0) return self
def test_hamiltonian(self): # printing an example from a H2 file hfile = self._get_resource_path("H2Equilibrium.txt") hamiltonian = make_Hamiltonian(Hamiltonian_from_file(hfile)) self.log.info(hamiltonian) # [[-0.24522469381221926 0 0 0.18093133934472627 ] # [0 -1.0636560168497590 0.18093133934472627 0] # [0 0.18093133934472627 -1.0636560168497592 0] # [0.18093133934472627 0 0 -1.8369675149908681]] self.assertSequenceEqual([str(i) for i in hamiltonian[0]], ['(-0.245224693812+0j)', '0j', '0j', '(0.180931339345+0j)']) self.assertSequenceEqual([str(i) for i in hamiltonian[1]], ['0j', '(-1.06365601685+0j)', '(0.180931339345+0j)', '0j']) self.assertSequenceEqual([str(i) for i in hamiltonian[2]], ['0j', '(0.180931339345+0j)', '(-1.06365601685+0j)', '0j']) self.assertSequenceEqual([str(i) for i in hamiltonian[3]], ['(0.180931339345+0j)', '0j', '0j', '(-1.83696751499+0j)']) # printing an example from a graph input n = 3 v0 = np.zeros(n) v0[2] = 1 v1 = np.zeros(n) v1[0] = 1 v1[1] = 1 v2 = np.zeros(n) v2[0] = 1 v2[2] = 1 v3 = np.zeros(n) v3[1] = 1 v3[2] = 1 pauli_list = [(1, Pauli(v0, np.zeros(n))), (1, Pauli(v1, np.zeros(n))), (1, Pauli(v2, np.zeros(n))), (1, Pauli(v3, np.zeros(n)))] a = make_Hamiltonian(pauli_list) self.log.info(a) w, v = la.eigh(a, eigvals=(0, 0)) self.log.info(w) self.log.info(v) data = {'000': 10} self.log.info(Energy_Estimate(data, pauli_list)) data = {'001': 10} self.log.info(Energy_Estimate(data, pauli_list)) data = {'010': 10} self.log.info(Energy_Estimate(data, pauli_list)) data = {'011': 10} self.log.info(Energy_Estimate(data, pauli_list)) data = {'100': 10} self.log.info(Energy_Estimate(data, pauli_list)) data = {'101': 10} self.log.info(Energy_Estimate(data, pauli_list)) data = {'110': 10} self.log.info(Energy_Estimate(data, pauli_list)) data = {'111': 10} self.log.info(Energy_Estimate(data, pauli_list))
def nfiedler(A=None, tol=1e-12): """ :param A: motif adjacency matrix A :param tol: no uesd in this program :return: the fiedler vector of the normalized laplacian of A (fiedler vector: eigenvector corresponding to the second smallest eigenvalue) """ if A is None: print('Error! matrix A is None..') return None L = nlaplacian(A) # print('L\n', L, '\n') n = A.shape[0] # print(n) eigvalue, eigvector = SLA.eigh(L + np.eye(n)) # print('eigvalue\n', eigvalue) # print('eigvector\n', eigvector[:, 0], '\n\n', eigvector[:, 1]) # print(L + np.eye(n)) # print('eigvalues\n', eigvalue) # print('eigvalue\n', eigvalue[:2]) # print('eigvector\n', eigvector[0:1]) # print(eigvalue) x = eigvector[:, 1] # print('\n\nx\n', x, '\n') # print(x) x = x / (np.sqrt(np.sum(A, 1)).T) # x = x.T # print('\nx\n', x.T) eigvalue = eigvalue[1] - 1 # print('\neigvalue\n', eigvalue) # print('\nnp.argsort(e+igvector[1])\n', np.argsort(eigvector[1]), '\n') # print('eigvector[1]\n', eigvector[1]) # print('\nx\n', x) # print('\neigvalue\n', eigvalue) return x, eigvalue