我们从Python开源项目中,提取了以下31个代码示例,用于说明如何使用numpy.linalg.qr()。
def _rcanonicalize(self, to_site): """Left-canonicalizes all local tensors _ltens[:to_site] in place :param to_site: Index of the site up to which canonicalization is to be performed """ assert 0 <= to_site < len(self), 'to_site={!r}'.format(to_site) lcanon, rcanon = self._lt.canonical_form for site in range(lcanon, to_site): ltens = self._lt[site] q, r = qr(ltens.reshape((-1, ltens.shape[-1]))) # if ltens.shape[-1] > prod(ltens.phys_shape) --> trivial comp. # can be accounted by adapting rank here newtens = (q.reshape(ltens.shape[:-1] + (-1,)), matdot(r, self._lt[site + 1])) self._lt.update(slice(site, site + 2), newtens, canonicalization=('left', None))
def _lcanonicalize(self, to_site): """Right-canonicalizes all local tensors _ltens[to_site:] in place :param to_site: Index of the site up to which canonicalization is to be performed """ assert 0 < to_site <= len(self), 'to_site={!r}'.format(to_site) lcanon, rcanon = self.canonical_form for site in range(rcanon - 1, to_site - 1, -1): ltens = self._lt[site] q, r = qr(ltens.reshape((ltens.shape[0], -1)).T) # if ltens.shape[-1] > prod(ltens.phys_shape) --> trivial comp. # can be accounted by adapting rank here newtens = (matdot(self._lt[site - 1], r.T), q.T.reshape((-1,) + ltens.shape[1:])) self._lt.update(slice(site - 1, site + 1), newtens, canonicalization=(None, 'right'))
def _extract_factors(tens, ndims): """Extract iteratively the leftmost MPO tensor with given number of legs by a qr-decomposition :param np.ndarray tens: Full tensor to be factorized :param ndims: Number of physical legs per site or iterator over number of physical legs :returns: List of local tensors with given number of legs yielding a factorization of tens """ current = next(ndims) if isinstance(ndims, collections.Iterator) else ndims if tens.ndim == current + 2: return [tens] elif tens.ndim < current + 2: raise AssertionError("Number of remaining legs insufficient.") else: unitary, rest = qr(tens.reshape((np.prod(tens.shape[:current + 1]), -1))) unitary = unitary.reshape(tens.shape[:current + 1] + rest.shape[:1]) rest = rest.reshape(rest.shape[:1] + tens.shape[current + 1:]) return [unitary] + _extract_factors(rest, ndims)
def test_mode_raw(self): # The factorization is not unique and varies between libraries, # so it is not possible to check against known values. Functional # testing is a possibility, but awaits the exposure of more # of the functions in lapack_lite. Consequently, this test is # very limited in scope. Note that the results are in FORTRAN # order, hence the h arrays are transposed. a = array([[1, 2], [3, 4], [5, 6]], dtype=np.double) # Test double h, tau = linalg.qr(a, mode='raw') assert_(h.dtype == np.double) assert_(tau.dtype == np.double) assert_(h.shape == (2, 3)) assert_(tau.shape == (2,)) h, tau = linalg.qr(a.T, mode='raw') assert_(h.dtype == np.double) assert_(tau.dtype == np.double) assert_(h.shape == (3, 2)) assert_(tau.shape == (2,))
def check_qr(self, a): # This test expects the argument `a` to be an ndarray or # a subclass of an ndarray of inexact type. a_type = type(a) a_dtype = a.dtype m, n = a.shape k = min(m, n) # mode == 'complete' q, r = linalg.qr(a, mode='complete') assert_(q.dtype == a_dtype) assert_(r.dtype == a_dtype) assert_(isinstance(q, a_type)) assert_(isinstance(r, a_type)) assert_(q.shape == (m, m)) assert_(r.shape == (m, n)) assert_almost_equal(dot(q, r), a) assert_almost_equal(dot(q.T.conj(), q), np.eye(m)) assert_almost_equal(np.triu(r), r) # mode == 'reduced' q1, r1 = linalg.qr(a, mode='reduced') assert_(q1.dtype == a_dtype) assert_(r1.dtype == a_dtype) assert_(isinstance(q1, a_type)) assert_(isinstance(r1, a_type)) assert_(q1.shape == (m, k)) assert_(r1.shape == (k, n)) assert_almost_equal(dot(q1, r1), a) assert_almost_equal(dot(q1.T.conj(), q1), np.eye(k)) assert_almost_equal(np.triu(r1), r1) # mode == 'r' r2 = linalg.qr(a, mode='r') assert_(r2.dtype == a_dtype) assert_(isinstance(r2, a_type)) assert_almost_equal(r2, r1)
def test_qr_empty(self): a = np.zeros((0, 2)) assert_raises(linalg.LinAlgError, linalg.qr, a)
def qr(a): return matlabarray(_qr(np.asarray(a)))
def update(self, Y): """Alg. 3: Randomized streaming update of the singular vectors at time t. Args: Y (numpy array): m-by-n_t matrix which has n_t "normal" unit vectors. """ if not hasattr(self, 'E'): # initial sketch M = np.empty_like(Y) M[:] = Y[:] else: # combine current sketched matrix with input at time t # D: m-by-(n+ell-1) matrix M = np.concatenate((self.E[:, :-1], Y), axis=1) G = np.random.normal(0., 0.1, (self.m, 100 * self.ell)) MM = np.dot(M, M.T) Q, R = ln.qr(np.dot(MM, G)) # eig() returns eigen values/vectors with unsorted order s, A = ln.eig(np.dot(np.dot(Q.T, MM), Q)) order = np.argsort(s)[::-1] s = s[order] A = A[:, order] U = np.dot(Q, A) # update k orthogonal bases self.U_k = U[:, :self.k] U_ell = U[:, :self.ell] s_ell = s[:self.ell] # shrink step in the Frequent Directions algorithm delta = s_ell[-1] s_ell = np.sqrt(s_ell - delta) self.E = np.dot(U_ell, np.diag(s_ell))
def tucker_tensor(shape, rank, full=False, random_state=None): """Generates a random Tucker tensor Parameters ---------- shape : tuple shape of the tensor to generate rank : int or int list rank of the Tucker decomposition if int, the same rank is used for each mode otherwise, dimension of each mode full : bool, optional, default is False if True, a full tensor is returned otherwise, the decomposed tensor is returned random_state : `np.random.RandomState` Returns ------- tucker_tensor : ND-array or (ND-array, 2D-array list) ND-array : full tensor if `full` is True (ND-array, 2D-array list) : core tensor and list of factors otherwise """ rns = check_random_state(random_state) if isinstance(rank, int): rank = [rank for _ in shape] for i, (s, r) in enumerate(zip(shape, rank)): if r > s: raise ValueError('The rank should be smaller than the tensor size, yet rank[{0}]={1} > shape[{0}]={2}.'.format(i, r, s)) factors = [] for (s, r) in zip(shape, rank): Q, _= qr(rns.random_sample((s, s))) factors.append(T.tensor(Q[:, :r])) core = T.tensor(rns.random_sample(rank)) if full: return tucker_to_tensor(core, factors) else: return core, factors
def update_model(self, y): y = self.proj.reduce(np.array([y]).T) y = np.ravel(preprocessing.normalize(y, norm='l2', axis=0)) if not hasattr(self, 'E'): self.E = np.zeros((self.k, self.ell)) # combine current sketched matrix with input at time t zero_cols = np.nonzero([np.isclose(s_col, 0.0) for s_col in np.sum(self.E, axis=0)])[0] j = zero_cols[0] if zero_cols.size != 0 else self.ell - 1 # left-most all-zero column in B self.E[:, j] = y Gaussian = np.random.normal(0., 0.1, (self.k, 100 * self.ell)) MM = np.dot(self.E, self.E.T) Q, R = ln.qr(np.dot(MM, Gaussian)) # eig() returns eigen values/vectors with unsorted order s, A = ln.eig(np.dot(np.dot(Q.T, MM), Q)) order = np.argsort(s)[::-1] s = s[order] A = A[:, order] U = np.dot(Q, A) # update the tracked orthonormal bases self.U_r = U[:, :self.r] # update ell orthogonal bases U_ell = U[:, :self.ell] s_ell = s[:self.ell] # shrink step in the Frequent Directions algorithm delta = s_ell[-1] s_ell = np.sqrt(s_ell - delta) self.E = np.dot(U_ell, np.diag(s_ell))
def __simultaneous_iteration(self, A, k, eps): n, d = A.shape q = int(np.log((n / eps) / eps)) G = np.random.normal(0., 1., (d, k)) # Gram-Schmidt Y = np.dot(np.dot(ln.matrix_power(np.dot(A, A.T), q), A), G) # (n, k) Q, R = ln.qr(Y, mode='complete') return Q
def mils(A, B, y, p=1): # x_hat,z_hat = mils(A,B,y,p) produces p pairs of optimal solutions to # the mixed integer least squares problem min_{x,z}||y-Ax-Bz||, # where x and z are real and integer vectors, respectively. # # Input arguments: # A - m by k real matrix # B - m by n real matrix # [A,B] has full column rank # y - m-dimensional real vector # p - the number of optimal solutions # # Output arguments: # x_hat - k by p real matrix # z_hat - n by p integer matrix (in double precision). # The pair {x_hat(:,j),z_hat(:,j)} is the j-th optimal solution # i.e., its residual is the j-th smallest, so # ||y-A*x_hat(:,1)-B*z_hat(:,1)||<=...<=||y-A*x_hat(:,p)-B*z_hat(:,p)|| m, k = A.shape m2, n = B.shape if m != m2 or m != len(y) or len(y[1]) != 1: raise ValueError("Input arguments have a matrix dimension error!") if rank(A) + rank(B) < k + n: raise ValueError("hmmm...") Q, R = qr(A, mode='complete') Q_A = Q[:, :k] Q_Abar = Q[:, k:] R_A = R[:k, :] # Compute the p optimal integer least squares solutions z_hat = ils(dot(Q_Abar.T, B), dot(Q_Abar.T, y), p) # Compute the corresponding real least squares solutions x_hat = lstsq(R_A, dot(Q_A.T, (dot(y, ones((1, p))) - dot(B, z_hat)))) return x_hat, z_hat
def projector_splitting(self, eta=5e-6, d=100, MAX_ITER=1, from_iter=0, display=0, init=(False, None, None), save=(False, None)): """ Projector splitting algorithm for word2vec matrix factorization. """ # Initialization if (init[0]): self.C = init[1] self.W = init[2] else: self.C = np.random.rand(d, self.D.shape[0]) self.W = np.random.rand(d, self.D.shape[1]) if (save[0] and from_iter==0): self.save_CW(save[1], 0) X = (self.C).T.dot(self.W) for it in xrange(from_iter, from_iter+MAX_ITER): if (display): print "Iter #:", it+1 U, S, V = svds(X, d) S = np.diag(S) V = V.T self.C = U.dot(np.sqrt(S)).T self.W = np.sqrt(S).dot(V.T) if (save[0]): self.save_CW(save[1], it+1) F = self.grad_MF(self.C, self.W) #mask = np.random.binomial(1, .5, size=F.shape) #F = F * mask U, _ = qr((X + eta*F).dot(V)) V, S = qr((X + eta*F).T.dot(U)) V = V.T S = S.T X = U.dot(S).dot(V)