我们从Python开源项目中,提取了以下7个代码示例,用于说明如何使用scipy.linalg.svdvals()。
def rank(X, cond=1.0e-12): """ Return the rank of a matrix X based on its generalized inverse, not the SVD. """ X = np.asarray(X) if len(X.shape) == 2: import scipy.linalg as SL D = SL.svdvals(X) result = np.add.reduce(np.greater(D / D.max(), cond)) return int(result.astype(np.int32)) else: return int(not np.alltrue(np.equal(X, 0.)))
def lowRank_distance(A, k): ''' distance between A and any lower rank matrix ''' if rank(A) >= k: raise Exception('Please provide a lower rank k') sigma = la.svdvals(A) # return the k+1'th singular value return sigma[k]
def arun_2010(topic_term_dists, doc_topic_dists, doc_lengths, num_topics): P = svdvals(topic_term_dists) Q = np.matmul(doc_lengths, doc_topic_dists) / np.linalg.norm(doc_lengths) return entropy(P, Q)
def _process(self, X): """ Perform TOPS for a given frame in order to estimate steered response spectrum. """ # need more than 1 frequency band if self.num_freq < 2: raise ValueError('Need more than one frequency band!') # select reference frequency max_bin = np.argmax(np.sum(np.sum(abs(X[:,self.freq_bins,:]),axis=0), axis=1)) f0 = self.freq_bins[max_bin] freq = list(self.freq_bins) freq.remove(f0) # compute empirical cross correlation matrices C_hat = self._compute_correlation_matrices(X) # compute signal and noise subspace for each frequency band F = np.zeros((self.num_freq,self.M,self.num_src), dtype='complex64') W = np.zeros((self.num_freq,self.M,self.M-self.num_src), dtype='complex64') for k in range(self.num_freq): # subspace decomposition F[k,:,:], W[k,:,:], ws, wn = \ self._subspace_decomposition(C_hat[k,:,:]) # create transformation matrix f = 1.0/self.nfft/self.c*1j*2*np.pi*self.fs*(np.linspace(0,self.nfft/2, self.nfft/2+1)-f0) Phi = np.zeros((len(f),self.M,self.num_loc), dtype='complex64') for n in range(self.num_loc): p_s = self.loc[:,n] proj = np.dot(p_s,self.L) for m in range(self.M): Phi[:,m,n] = np.exp(f*proj[m]) # determine direction using rank test for n in range(self.num_loc): # form D matrix D = np.zeros((self.num_src,(self.M-self.num_src)*(self.num_freq-1)), dtype='complex64') for k in range(self.num_freq-1): Uk = np.conjugate(np.dot(np.diag(Phi[k,:,n]), F[max_bin,:,:])).T # F[max_bin,:,:])).T idx = range(k*(self.M-self.num_src),(k+1)*(self.M-self.num_src)) D[:,idx] = np.dot(Uk,W[k,:,:]) #u,s,v = np.linalg.svd(D) s = linalg.svdvals(D) # FASTER!!! self.P[n] = 1.0/s[-1]
def compute_depth_prior(G, gain_info, is_fixed_ori, exp=0.8, limit=10.0, patch_areas=None, limit_depth_chs=False): """Compute weighting for depth prior """ logger.info('Creating the depth weighting matrix...') # If possible, pick best depth-weighting channels if limit_depth_chs is True: G = _restrict_gain_matrix(G, gain_info) # Compute the gain matrix if is_fixed_ori: d = np.sum(G ** 2, axis=0) else: n_pos = G.shape[1] // 3 d = np.zeros(n_pos) for k in range(n_pos): Gk = G[:, 3 * k:3 * (k + 1)] d[k] = linalg.svdvals(np.dot(Gk.T, Gk))[0] # XXX Currently the fwd solns never have "patch_areas" defined if patch_areas is not None: d /= patch_areas ** 2 logger.info(' Patch areas taken into account in the depth ' 'weighting') w = 1.0 / d ws = np.sort(w) weight_limit = limit ** 2 if limit_depth_chs is False: # match old mne-python behavor ind = np.argmin(ws) n_limit = ind limit = ws[ind] * weight_limit wpp = (np.minimum(w / limit, 1)) ** exp else: # match C code behavior limit = ws[-1] n_limit = len(d) if ws[-1] > weight_limit * ws[0]: ind = np.where(ws > weight_limit * ws[0])[0][0] limit = ws[ind] n_limit = ind logger.info(' limit = %d/%d = %f' % (n_limit + 1, len(d), np.sqrt(limit / ws[0]))) scale = 1.0 / limit logger.info(' scale = %g exp = %g' % (scale, exp)) wpp = np.minimum(w / limit, 1) ** exp depth_prior = wpp if is_fixed_ori else np.repeat(wpp, 3) return depth_prior
def error_norm(self, comp_cov, norm='frobenius', scaling=True, squared=True): """Computes the Mean Squared Error between two covariance estimators. (In the sense of the Frobenius norm). Parameters ---------- comp_cov : array-like, shape = [n_features, n_features] The covariance to compare with. norm : str The type of norm used to compute the error. Available error types: - 'frobenius' (default): sqrt(tr(A^t.A)) - 'spectral': sqrt(max(eigenvalues(A^t.A)) where A is the error ``(comp_cov - self.covariance_)``. scaling : bool If True (default), the squared error norm is divided by n_features. If False, the squared error norm is not rescaled. squared : bool Whether to compute the squared error norm or the error norm. If True (default), the squared error norm is returned. If False, the error norm is returned. Returns ------- The Mean Squared Error (in the sense of the Frobenius norm) between `self` and `comp_cov` covariance estimators. """ # compute the error error = comp_cov - self.covariance_ # compute the error norm if norm == "frobenius": squared_norm = np.sum(error ** 2) elif norm == "spectral": squared_norm = np.amax(linalg.svdvals(np.dot(error.T, error))) else: raise NotImplementedError( "Only spectral and frobenius norms are implemented") # optionally scale the error norm if scaling: squared_norm = squared_norm / error.shape[0] # finally get either the squared norm or the norm if squared: result = squared_norm else: result = np.sqrt(squared_norm) return result
def fit(self, X, y=None): """Fits a Minimum Covariance Determinant with the FastMCD algorithm. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training data, where n_samples is the number of samples and n_features is the number of features. y : not used, present for API consistence purpose. Returns ------- self : object Returns self. """ X = check_array(X, ensure_min_samples=2, estimator='MinCovDet') random_state = check_random_state(self.random_state) n_samples, n_features = X.shape # check that the empirical covariance is full rank if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features: warnings.warn("The covariance matrix associated to your dataset " "is not full rank") # compute and store raw estimates raw_location, raw_covariance, raw_support, raw_dist = fast_mcd( X, support_fraction=self.support_fraction, cov_computation_method=self._nonrobust_covariance, random_state=random_state) if self.assume_centered: raw_location = np.zeros(n_features) raw_covariance = self._nonrobust_covariance(X[raw_support], assume_centered=True) # get precision matrix in an optimized way precision = pinvh(raw_covariance) raw_dist = np.sum(np.dot(X, precision) * X, 1) self.raw_location_ = raw_location self.raw_covariance_ = raw_covariance self.raw_support_ = raw_support self.location_ = raw_location self.support_ = raw_support self.dist_ = raw_dist # obtain consistency at normal models self.correct_covariance(X) # re-weight estimator self.reweight_covariance(X) return self