我们从Python开源项目中,提取了以下27个代码示例,用于说明如何使用scipy.linalg.pinv()。
def annopred_inf(beta_hats, pr_sigi, n=1000, reference_ld_mats=None, ld_window_size=100): """ infinitesimal model with snp-specific heritability derived from annotation used as the initial values for MCMC of non-infinitesimal model """ num_betas = len(beta_hats) updated_betas = sp.empty(num_betas) m = len(beta_hats) for i, wi in enumerate(range(0, num_betas, ld_window_size)): start_i = wi stop_i = min(num_betas, wi + ld_window_size) curr_window_size = stop_i - start_i Li = 1.0/pr_sigi[start_i: stop_i] D = reference_ld_mats[i] A = (n/(1))*D + sp.diag(Li) A_inv = linalg.pinv(A) updated_betas[start_i: stop_i] = sp.dot(A_inv / (1.0/n) , beta_hats[start_i: stop_i]) # Adjust the beta_hats return updated_betas
def compute(self,signal): check_MD(signal.X) N=signal.N M,L=signal.X.shape sigma2=signal.sigma2 H=np.matrix(signal.H) X=np.matrix(signal.X) Rx=X*X.H/L PH=np.eye(N)-H*lg.pinv(H) D=np.matrix(np.diag(1j*np.arange(N))) M=H.H*D.H*PH*D*H CRB=(sigma2/2)*lg.inv(np.real(np.multiply(M,Rx))) self.w=np.diag(CRB) return self
def _make_impute_stats(self, x): self.mean = mpiops.mean(x) cov = mpiops.covariance(x) self.prec, rank = pinv(cov, return_rank=True) # stable pseudo inverse # if rank < len(self.mean): # raise RuntimeError("This imputation method does not work on low " # "rank problems!")
def ridge_regression(data, labels, mu=0.0): r"""Implementation of the Regularized Least Squares solver. It solves the ridge regression problem with parameter ``mu`` on the `l2-norm`. Parameters ---------- data : (N, P) ndarray Data matrix. labels : (N,) or (N, 1) ndarray Labels vector. mu : float, optional (default is `0.0`) `l2-norm` penalty. Returns -------- beta : (P, 1) ndarray Ridge regression solution. Examples -------- >>> X = numpy.array([[0.1, 1.1, 0.3], [0.2, 1.2, 1.6], [0.3, 1.3, -0.6]]) >>> beta = numpy.array([0.1, 0.1, 0.0]) >>> Y = numpy.dot(X, beta) >>> beta = l1l2py.algorithms.ridge_regression(X, Y, 1e3).T >>> len(numpy.flatnonzero(beta)) 3 """ n, p = data.shape if n < p: tmp = np.dot(data, data.T) if mu: tmp += mu * n * np.eye(n) tmp = la.pinv(tmp) return np.dot(np.dot(data.T, tmp), labels.reshape(-1, 1)) else: tmp = np.dot(data.T, data) if mu: tmp += mu * n * np.eye(p) tmp = la.pinv(tmp) return np.dot(tmp, np.dot(data.T, labels.reshape(-1, 1)))
def inverse(self): return la.pinv(self._Sigma)
def OSP(M, E, t): """ Performs the othogonal subspace projection algorithm for target detection. Parameters: M: `numpy array` 2d matrix of HSI data (N x p). E: `numpy array` 2d matrix of background endmebers (p x q). t: `numpy array` A target endmember (p). Returns: `numpy array` Vector of detector output (N). References: Qian Du, Hsuan Ren, and Chein-I Cheng. "A Comparative Study of Orthogonal Subspace Projection and Constrained Energy Minimization." IEEE TGRS. Volume 41. Number 6. June 2003. """ N, p = M.shape P_U = np.eye(p, dtype=np.float) - np.dot(E.T, lin.pinv(E.T)) tmp = np.dot(t.T, np.dot(P_U, t)) nu = np.zeros(N, dtype=np.float) for k in range(N): nu[k] = np.dot(t.T, np.dot(P_U, M[k])) / tmp return nu
def pseudo_inverse_scipy(tensor): dtype = tensor.dtype print(linalg.pinv, tensor, dtype) result = tf.py_func(linalg.pinv, [tensor], [dtype])[0] result.set_shape(tensor.shape) return result
def kl2(arr1, arr2): """Simmetric Kullback-Leibler distance""" S1 = np.cov(arr1, rowvar=0) S2 = np.cov(arr2, rowvar=0) m1 = np.mean(arr1, 0) m2 = np.mean(arr2, 0) delta = m1 - m2 d = 0.5 * np.trace((S1 - S2) * (pinv(S2) - pinv(S1))) +\ 0.5 * np.trace((pinv(S1) + pinv(S2)) * delta * delta.T) return d
def cost_function(self,non_linear_parameter_value,Y): self.N,L=Y.shape setattr(self,self.non_linear_parameter_name,non_linear_parameter_value) H=np.matrix(self.H) cost=np.real(np.trace(Y.H*H*lg.pinv(H)*Y)) return cost
def find_frame_of_reference(target, source): target = copy(target) source = copy(source[:, :target.shape[1]]) target /= target[3,:] source /= source[3,:] return dot(linalg.pinv(source.T), target.T).T
def reconstruct_columns(W, B): W = W.copy() for i in range(W.shape[1]): C = W[:, i] M = isntnan(C) if sum(M) != C.shape[0]: coeffs = dot(linalg.pinv(B[M,:]), C[M]) W[:, i] = dot(B, coeffs) return W
def invert_svd(w_matrix, b_vector): # Compute the Moore-Penrose pseudo-inverse of a matrix from SVD inverse_w_matrix = np.matrix(linalg.pinv(w_matrix)) # reshape b_vector into a column vector b_vector = b_vector.reshape((len(b_vector), 1)) print(b_vector.shape) inverted_x_vector = (inverse_w_matrix * b_vector).flatten() return np.asarray(inverted_x_vector).flatten()
def __MR_affinity_matrix(self,img,labels): W,D = self.__MR_W_D_matrix(img,labels) aff = pinv(D-self.weight_parameters['alpha']*W) aff[sp.eye(sp.amax(labels)+1).astype(bool)] = 0.0 # diagonal elements to 0 return aff
def _fit(self, X, compute_sources=False): """Fit the model Parameters ---------- X : array-like, shape (n_samples, n_features) Training data, where n_samples is the number of samples and n_features is the number of features. compute_sources : bool If False, sources are not computes but only the rotation matrix. This can save memory when working with big data. Defaults to False. Returns ------- X_new : array-like, shape (n_samples, n_components) """ fun_args = {} if self.fun_args is None else self.fun_args whitening, unmixing, sources, X_mean, self.n_iter_ = fastica( X=X, n_components=self.n_components, algorithm=self.algorithm, whiten=self.whiten, fun=self.fun, fun_args=fun_args, max_iter=self.max_iter, tol=self.tol, w_init=self.w_init, random_state=self.random_state, return_X_mean=True, compute_sources=compute_sources, return_n_iter=True) if self.whiten: self.components_ = np.dot(unmixing, whitening) self.mean_ = X_mean self.whitening_ = whitening else: self.components_ = unmixing self.mixing_ = linalg.pinv(self.components_) if compute_sources: self.__sources = sources return sources
def ridge_regression(data, labels, mu=0.0): r"""Implementation of the Regularized Least Squares solver. It solves the ridge regression problem with parameter ``mu`` on the `l2-norm`. Parameters ---------- data : (N, P) ndarray Data matrix. labels : (N,) or (N, 1) ndarray Labels vector. mu : float, optional (default is `0.0`) `l2-norm` penalty. Returns -------- beta : (P, 1) ndarray Ridge regression solution. Examples -------- >>> X = numpy.array([[0.1, 1.1, 0.3], [0.2, 1.2, 1.6], [0.3, 1.3, -0.6]]) >>> beta = numpy.array([0.1, 0.1, 0.0]) >>> Y = numpy.dot(X, beta) >>> beta = l1l2py.algorithms.ridge_regression(X, Y, 1e3).T >>> len(numpy.flatnonzero(beta)) 3 """ n, p = data.shape if n < p: tmp = np.dot(data, data.T) if mu: tmp += mu*n*np.eye(n) tmp = la.pinv(tmp) return np.dot(np.dot(data.T, tmp), labels.reshape(-1, 1)) else: tmp = np.dot(data.T, data) if mu: tmp += mu*n*np.eye(p) tmp = la.pinv(tmp) return np.dot(tmp, np.dot(data.T, labels.reshape(-1, 1)))
def semi_nmf(x, iter = 30): ''' Semi Nonnegative Matrix Factorization. It returns a feature matrix F and a representation matrix G by minimizing frobenius norm ||X - FG^T||^2. The only contraint is that elements in G to be positive. Args: x: input matrix X int iter: number of iterations of optimization algorithm Return: f: feature matrix F g: representation matrix G ''' x = x.numpy() # n * m f, g, p = svd_initialization(x) if < 2: raise ValueError("The number of components (r) has to be >=2.") for i in range(iter): f = np.dot(x, np.dot(g, la.pinv(np.dot(g.T, g)))) f = np.nan_to_num(f) Ap = (abs(np.dot(x.T, f)) + np.dot(x.T, f))/2 #m * r An = (abs(np.dot(x.T, f)) - np.dot(x.T, f))/2 Bp = (abs(np.dot(g, np.dot(f.T, f))) + np.dot(g, np.dot(f.T, f)))/2 Bn = (abs(np.dot(g, np.dot(f.T, f))) - np.dot(g, np.dot(f.T, f)))/2 C = An + Bp for m in range(C.shape[0]): for n in range(C.shape[1]): if C[m, n] is 0: C[m, n] += 0.0001 for j in range(g.shape[0]): for k in range(g.shape[1]): g[j, k] = g[j, k] * np.sqrt( (Ap+Bn)[j,k]/(An+Bp)[j,k] ) g = np.nan_to_num(g) return torch.from_numpy(f), torch.from_numpy(g)
def _make_interpolation_matrix(pos_from, pos_to, alpha=1e-5): """Compute interpolation matrix based on spherical splines Implementation based on [1] Parameters ---------- pos_from : np.ndarray of float, shape(n_good_sensors, 3) The positions to interpoloate from. pos_to : np.ndarray of float, shape(n_bad_sensors, 3) The positions to interpoloate. alpha : float Regularization parameter. Defaults to 1e-5. Returns ------- interpolation : np.ndarray of float, shape(len(pos_from), len(pos_to)) The interpolation matrix that maps good signals to the location of bad signals. References ---------- [1] Perrin, F., Pernier, J., Bertrand, O. and Echallier, JF. (1989). Spherical splines for scalp potential and current density mapping. Electroencephalography Clinical Neurophysiology, Feb; 72(2):184-7. """ pos_from = pos_from.copy() pos_to = pos_to.copy() # normalize sensor positions to sphere _normalize_vectors(pos_from) _normalize_vectors(pos_to) # cosine angles between source positions cosang_from = pos_from.dot(pos_from.T) cosang_to_from = pos_to.dot(pos_from.T) G_from = _calc_g(cosang_from) G_to_from = _calc_g(cosang_to_from) if alpha is not None: G_from.flat[::len(G_from) + 1] += alpha n_channels = G_from.shape[0] # G_from should be square matrix C = np.r_[np.c_[G_from, np.ones((n_channels, 1))], np.c_[np.ones((1, n_channels)), 0]] C_inv = linalg.pinv(C) interpolation = np.c_[G_to_from, np.ones((G_to_from.shape[0], 1))].dot(C_inv[:, :-1]) return interpolation
def _pick_sources(self, data, include, exclude): """Aux function""" fast_dot = _get_fast_dot() if exclude is None: exclude = self.exclude else: exclude = list(set(self.exclude + list(exclude))) _n_pca_comp = self._check_n_pca_components(self.n_pca_components) if not(self.n_components_ <= _n_pca_comp <= self.max_pca_components): raise ValueError('n_pca_components must be >= ' 'n_components and <= max_pca_components.') n_components = self.n_components_ logger.info('Transforming to ICA space (%i components)' % n_components) # Apply first PCA if self.pca_mean_ is not None: data -= self.pca_mean_[:, None] sel_keep = np.arange(n_components) if include not in (None, []): sel_keep = np.unique(include) elif exclude not in (None, []): sel_keep = np.setdiff1d(np.arange(n_components), exclude) logger.info('Zeroing out %i ICA components' % (n_components - len(sel_keep))) unmixing = np.eye(_n_pca_comp) unmixing[:n_components, :n_components] = self.unmixing_matrix_ unmixing = np.dot(unmixing, self.pca_components_[:_n_pca_comp]) mixing = np.eye(_n_pca_comp) mixing[:n_components, :n_components] = self.mixing_matrix_ mixing = np.dot(self.pca_components_[:_n_pca_comp].T, mixing) if _n_pca_comp > n_components: sel_keep = np.concatenate( (sel_keep, range(n_components, _n_pca_comp))) proj_mat = np.dot(mixing[:, sel_keep], unmixing[sel_keep, :]) data = fast_dot(proj_mat, data) if self.pca_mean_ is not None: data += self.pca_mean_[:, None] # restore scaling if self.noise_cov is None: # revert standardization data *= self._pre_whitener else: data = fast_dot(linalg.pinv(self._pre_whitener), data) return data
def _least_square_evoked(data, events, event_id, tmin, tmax, sfreq): """Least square estimation of evoked response from data. Parameters ---------- data : ndarray, shape (n_channels, n_times) The data to estimates evoked events : ndarray, shape (n_events, 3) The events typically returned by the read_events function. If some events don't match the events of interest as specified by event_id, they will be ignored. event_id : dict The id of the events to consider tmin : float Start time before event. tmax : float End time after event. sfreq : float Sampling frequency. Returns ------- evokeds_data : dict of ndarray A dict of evoked data for each event type in event_id. toeplitz : dict of ndarray A dict of toeplitz matrix for each event type in event_id. """ nmin = int(tmin * sfreq) nmax = int(tmax * sfreq) window = nmax - nmin n_samples = data.shape[1] toeplitz_mat = dict() full_toep = list() for eid in event_id: # select events by type ix_ev = events[:, -1] == event_id[eid] # build toeplitz matrix trig = np.zeros((n_samples, 1)) ix_trig = (events[ix_ev, 0]) + nmin trig[ix_trig] = 1 toep_mat = linalg.toeplitz(trig[0:window], trig) toeplitz_mat[eid] = toep_mat full_toep.append(toep_mat) # Concatenate toeplitz full_toep = np.concatenate(full_toep) # least square estimation predictor = np.dot(linalg.pinv(np.dot(full_toep, full_toep.T)), full_toep) all_evokeds = np.dot(predictor, data.T) all_evokeds = np.vsplit(all_evokeds, len(event_id)) # parse evoked response evoked_data = dict() for idx, eid in enumerate(event_id): evoked_data[eid] = all_evokeds[idx].T return evoked_data, toeplitz_mat