我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用scipy.linalg.svd()。
def fit(self, graphs, y=None): rnd = check_random_state(self.random_state) n_samples = len(graphs) # get basis vectors if self.n_components > n_samples: n_components = n_samples else: n_components = self.n_components n_components = min(n_samples, n_components) inds = rnd.permutation(n_samples) basis_inds = inds[:n_components] basis = [] for ind in basis_inds: basis.append(graphs[ind]) basis_kernel = self.kernel(basis, basis, **self._get_kernel_params()) # sqrt of kernel matrix on basis vectors U, S, V = svd(basis_kernel) S = np.maximum(S, 1e-12) self.normalization_ = np.dot(U * 1. / np.sqrt(S), V) self.components_ = basis self.component_indices_ = inds return self
def truncated_svd(A, k): """Compute the truncated SVD of the matrix `A` i.e. the `k` largest singular values as well as the corresponding singular vectors. It might return less singular values/vectors, if one dimension of `A` is smaller than `k`. In the background it performs a full SVD. Therefore, it might be inefficient when `k` is much smaller than the dimensions of `A`. :param A: A real or complex matrix :param k: Number of singular values/vectors to compute :returns: u, s, v, where u: left-singular vectors s: singular values in descending order v: right-singular vectors """ u, s, v = np.linalg.svd(A) k_prime = min(k, len(s)) return u[:, :k_prime], s[:k_prime], v[:k_prime] #################### # Randomized SVD # ####################
def fit(self, X): '''Required for featurewise_center, featurewise_std_normalization and zca_whitening. # Arguments X: Numpy array, the data to fit on. ''' if self.featurewise_center: self.mean = np.mean(X, axis=0) X -= self.mean if self.featurewise_std_normalization: self.std = np.std(X, axis=0) X /= (self.std + 1e-7) if self.zca_whitening: flatX = np.reshape(X, (X.shape[0], X.shape[1] * X.shape[2] * X.shape[3])) sigma = np.dot(flatX.T, flatX) / flatX.shape[1] U, S, V = linalg.svd(sigma) self.principal_components = np.dot(np.dot(U, np.diag(1. / np.sqrt(S + 10e-7))), U.T)
def csvd(arr): """ Do the complex SVD of a 2D array, returning real valued U, S, VT http://stemblab.github.io/complex-svd/ """ C_r = arr.real C_i = arr.imag block_x = C_r.shape[0] block_y = C_r.shape[1] K = np.zeros((2 * block_x, 2 * block_y)) # Upper left K[:block_x, :block_y] = C_r # Lower left K[:block_x, block_y:] = C_i # Upper right K[block_x:, :block_y] = -C_i # Lower right K[block_x:, block_y:] = C_r return svd(K, full_matrices=False)
def funm_svd(a, func): """Apply real scalar function to singular values of a matrix. Args: a : (N, N) array_like Matrix at which to evaluate the function. func : callable Callable object that evaluates a scalar function f. Returns: funm : (N, N) ndarray Value of the matrix function specified by func evaluated at `A`. """ U, s, Vh = la.svd(a, lapack_driver='gesvd') S = np.diag(func(s)) return U.dot(S).dot(Vh)
def orthogonal(size, sparsity=-1, scale=1, dtype=numpy.float32): sizeX = size sizeY = size if sparsity < 0: sparsity = sizeY else: sparsity = numpy.minimum(sizeY, sparsity) values = numpy.zeros((sizeX, sizeY), dtype=dtype) for dx in range(sizeX): perm = numpy.random.permutation(sizeY) new_vals = numpy.random.normal(loc=0, scale=scale, size=(sparsity,)) values[dx, perm[:sparsity]] = new_vals u, s, v = svd(values) values = u * scale return values.astype(dtype)
def get_zca_whitening_principal_components_img(X): """Return the ZCA whitening principal components matrix. Parameters ----------- x : numpy array Batch of image with dimension of [n_example, row, col, channel] (default). """ flatX = np.reshape(X, (X.shape[0], X.shape[1] * X.shape[2] * X.shape[3])) print("zca : computing sigma ..") sigma = np.dot(flatX.T, flatX) / flatX.shape[0] print("zca : computing U, S and V ..") U, S, V = linalg.svd(sigma) print("zca : computing principal components ..") principal_components = np.dot(np.dot(U, np.diag(1. / np.sqrt(S + 10e-7))), U.T) return principal_components
def update_scipy_svd(self): sess = u.get_default_session() target0 = sess.run(self.target) # A=u.diag(s).v', singular vectors are columns # TODO: catch "ValueError: array must not contain infs or NaNs" try: u0, s0, vt0 = linalg.svd(target0) v0 = vt0.T except Exception as e: print("Got error %s"%(repr(e),)) if DUMP_BAD_SVD: dump32(target0, "badsvd") print("gesdd failed, trying gesvd") u0, s0, vt0 = linalg.svd(target0, lapack_driver="gesvd") v0 = vt0.T feed_dict = {self.holder.u: u0, self.holder.v: v0, self.holder.s: s0} sess.run(self.update_external_op, feed_dict=feed_dict)
def flatten(self, ind): ''' Transform the set of points so that the subset of markers given as argument is as close as flat (wrt z-axis) as possible. Parameters ---------- ind : list of bools Lists of marker indices that should be all in the same subspace ''' # Transform references to indices if needed ind = [self.key2ind(s) for s in ind] # center point cloud around the group of indices centroid = self.X[:,ind].mean(axis=1, keepdims=True) X_centered = self.X - centroid # The rotation is given by left matrix of SVD U,S,V = la.svd(X_centered[:,ind], full_matrices=False) self.X = np.dot(U.T, X_centered) + centroid
def __init__(self, n_components, solver='svd'): """Principal component analysis (PCA) implementation. Transforms a dataset of possibly correlated values into n linearly uncorrelated components. The components are ordered such that the first has the largest possible variance and each following component as the largest possible variance given the previous components. This causes the early components to contain most of the variability in the dataset. Parameters ---------- n_components : int solver : str, default 'svd' {'svd', 'eigen'} """ self.solver = solver self.n_components = n_components self.components = None self.mean = None
def ITQtrain(data,nbit, niter): data_mean=npy.mean(data,axis=0) data=data-data_mean objPCA=PCA(copy=True,n_components=nbit, whiten=False) dataTrans=objPCA.fit_transform(data) codeITQ=npy.ones(dataTrans.shape, dtype=npy.float32) codeITQ[dataTrans<0]=-1 for tau in range(niter): dataTemp1=npy.dot(codeITQ.T,dataTrans) matL,sig, matR=svd(dataTemp1) matRot=npy.dot(matR.T,matL.T) dataTemp2=dataTrans.dot(matRot) codeITQ=npy.ones(dataTrans.shape, dtype=npy.float32) codeITQ[dataTemp2<0]=-1 modelITQ={"mu":data_mean,"objPCA":objPCA, "matRot":matRot} return modelITQ
def fullrank(X): ''' return full-rank decomposition of X = FG^T ''' rankX = rank(X) U, eigvals, Vh = la.svd(X) #construct a r-rank sigma-square-root matrix sigma = np.eye(rankX) for i in range(sigma.shape[0]): sigma[i, i] = np.sqrt(eigvals[i]) F = U.dot(np.vstack((sigma, np.zeros((X.shape[0] - rankX, rankX))))) Gh = np.hstack((sigma, np.zeros((rankX, X.shape[1] - rankX)))).dot(Vh) return F, Gh
def rotation_measure(A, B): ''' function rotationMeasure measures how close a matrix can be to another matrix by rotation @param1: Matrix A @param2: Matrix B find the orthogonal matrix Q that minimizes ||A - BQ||_2 ''' # ask if the two matrics have the same shape if A.shape != B.shape: raise Exception('Two matrics do not have the same shape') # C=B^T * A C = B.conjugate().T.dot(A) U = la.svd(C)[0] Vh = la.svd(C)[2] # Q = UVh return U.dot(Vh) # compute the minimal angle between subspaces
def __init__(self, sizes, problemnumber=None, matrices=[]): if sizes[0] != sizes[1] or sizes[2] != sizes[3]: raise Exception("Currently only square problems are supported.") elif sizes[0] % sizes[2] != 0: raise Exception("Dimensions are incompatible: m must be " "divisible by p.") else: self.sizes = sizes self.problemnumber = problemnumber self._setproblem(matrices, problemnumber) # stats will be filled when the optimization is finished and # will be returned in the OptimizeResult instance. self.stats = dict([]) # { "nbiter": 0, # "svd": 0, # "fev": 0, # "gradient": 0, # "blocksteps": 0, # "full_results": [None]}
def _compute_linear_parameters(mu, u): """Compute the best-fitting linear parameters""" _compose_linear_fitting_data(mu, u) uu, sing, vv = linalg.svd(u['M'], full_matrices=False) # Compute the residuals u['resi'] = u['y'].copy() vec = np.empty(u['nfit'] - 1) for p in range(u['nfit'] - 1): vec[p] = np.dot(uu[:, p], u['y']) for k in range(u['nterms'] - 1): u['resi'][k] -= uu[k, p] * vec[p] vec[p] = vec[p] / sing[p] lambda_ = np.zeros(u['nfit']) for p in range(u['nfit'] - 1): sum_ = 0. for q in range(u['nfit'] - 1): sum_ += vv[q, p] * vec[q] lambda_[p + 1] = sum_ lambda_[0] = u['fn'][0] - np.sum(lambda_[1:]) rv = np.dot(u['resi'], u['resi']) / np.dot(u['y'], u['y']) return rv, lambda_
def _one_step(mu, u): """Evaluate the residual sum of squares fit for one set of mu values""" if np.abs(mu).max() > 1.0: return 1.0 # Compose the data for the linear fitting, compute SVD, then residuals _compose_linear_fitting_data(mu, u) u['uu'], u['sing'], u['vv'] = linalg.svd(u['M']) u['resi'][:] = u['y'][:] for p in range(u['nfit'] - 1): dot = np.dot(u['uu'][p], u['y']) for k in range(u['nterms'] - 1): u['resi'][k] = u['resi'][k] - u['uu'][p, k] * dot # Return their sum of squares return np.dot(u['resi'], u['resi'])
def test_svd_flip(): # Check that svd_flip works in both situations, and reconstructs input. rs = np.random.RandomState(1999) n_samples = 20 n_features = 10 X = rs.randn(n_samples, n_features) # Check matrix reconstruction U, S, V = linalg.svd(X, full_matrices=False) U1, V1 = svd_flip(U, V, u_based_decision=False) assert_almost_equal(np.dot(U1 * S, V1), X, decimal=6) # Check transposed matrix reconstruction XT = X.T U, S, V = linalg.svd(XT, full_matrices=False) U2, V2 = svd_flip(U, V, u_based_decision=True) assert_almost_equal(np.dot(U2 * S, V2), XT, decimal=6) # Check that different flip methods are equivalent under reconstruction U_flip1, V_flip1 = svd_flip(U, V, u_based_decision=True) assert_almost_equal(np.dot(U_flip1 * S, V_flip1), XT, decimal=6) U_flip2, V_flip2 = svd_flip(U, V, u_based_decision=False) assert_almost_equal(np.dot(U_flip2 * S, V_flip2), XT, decimal=6)
def fit(self, x): s = x.shape x = x.copy().reshape((s[0],np.prod(s[1:]))) m = np.mean(x, axis=0) x -= m sigma = np.dot(x.T,x) / x.shape[0] U, S, V = linalg.svd(sigma) tmp = np.dot(U, np.diag(1./np.sqrt(S+self.regularization))) tmp2 = np.dot(U, np.diag(np.sqrt(S+self.regularization))) self.ZCA_mat = th.shared(np.dot(tmp, U.T).astype(th.config.floatX)) self.inv_ZCA_mat = th.shared(np.dot(tmp2, U.T).astype(th.config.floatX)) self.mean = th.shared(m.astype(th.config.floatX))
def eigenDecompose(self, X, K, normalize=True): if (X.shape[1] >= X.shape[0]): s,U = la.eigh(K) else: U, s, _ = la.svd(X, check_finite=False, full_matrices=False) if (s.shape[0] < U.shape[1]): s = np.concatenate((s, np.zeros(U.shape[1]-s.shape[0]))) #note: can use low-rank formulas here s=s**2 if normalize: s /= float(X.shape[1]) if (np.min(s) < -1e-10): raise Exception('Negative eigenvalues found') s[s<0]=0 ind = np.argsort(s)[::-1] U = U[:, ind] s = s[ind] return s,U
def compute_epipole(F): """ Computes the (right) epipole from a fundamental matrix F. (Use with F.T for left epipole.) """ # return null space of F (Fx=0) U,S,V = linalg.svd(F) e = V[-1] return e/e[2]
def fit(self, X, augment=False, rounds=1, seed=None): '''Required for featurewise_center, featurewise_std_normalization and zca_whitening. # Arguments X: Numpy array, the data to fit on. augment: whether to fit on randomly augmented samples rounds: if `augment`, how many augmentation passes to do over the data seed: random seed. ''' if seed is not None: np.random.seed(seed) X = np.copy(X) if augment: aX = np.zeros(tuple([rounds * X.shape[0]] + list(X.shape)[1:])) for r in range(rounds): for i in range(X.shape[0]): aX[i + r * X.shape[0]] = self.random_transform(X[i]) X = aX if self.featurewise_center: self.mean = np.mean(X, axis=0) X -= self.mean if self.featurewise_std_normalization: self.std = np.std(X, axis=0) X /= (self.std + 1e-7) if self.zca_whitening: flatX = np.reshape(X, (X.shape[0], X.shape[1] * X.shape[2] * X.shape[3])) sigma = np.dot(flatX.T, flatX) / flatX.shape[1] U, S, V = linalg.svd(sigma) self.principal_components = np.dot(np.dot(U, np.diag(1. / np.sqrt(S + 10e-7))), U.T)
def random_orthogonal_matrix(n): """Returns a random, orthogonal matrix of n by n.""" a = np.random.randn(n, n) U, _, _ = linalg.svd(a) assert U.shape == (n, n) return U
def fit(self, X, augment=False, rounds=1, seed=None): '''Required for featurewise_center, featurewise_std_normalization and zca_whitening. # Arguments X: Numpy array, the data to fit on. augment: whether to fit on randomly augmented samples rounds: if `augment`, how many augmentation passes to do over the data seed: random seed. ''' if seed is not None: np.random.seed(seed) X = np.copy(X) if augment: aX = np.zeros(tuple([rounds * X.shape[0]] + list(X.shape)[1:])) for r in range(rounds): for i in range(X.shape[0]): aX[i + r * X.shape[0]] = self.random_transform(X[i]) X = aX if self.featurewise_center: self.mean = np.mean(X, axis=0) X -= self.mean if self.featurewise_std_normalization: self.std = np.std(X, axis=0) X /= (self.std + 1e-7) if self.zca_whitening: flatX = np.reshape(X, (X.shape[0], X.shape[1] * X.shape[2] * X.shape[3])) sigma = np.dot(flatX.T, flatX) / flatX.shape[0] U, S, V = linalg.svd(sigma) self.principal_components = np.dot(np.dot(U, np.diag(1. / np.sqrt(S + 10e-7))), U.T)
def svd_wrapper(X, rank = None): """ Computes the (possibly partial) SVD of a matrix. Handles the case where X is either dense or sparse. Parameters ---------- X: either dense or sparse rank: rank of the desired SVD (required for sparse matrices) Output ------ U, D, V the columns of U are the left singular vectors the COLUMNS of V are the left singular vectors """ if isinstance(X, LinearOperator): scipy_svds = svds(convert2scipy(X), rank) U, D, V = fix_scipy_svds(scipy_svds) V = V.T elif issparse(X): scipy_svds = svds(X, rank) U, D, V = fix_scipy_svds(scipy_svds) V = V.T else: # TODO: implement partial SVD U, D, V = full_svd(X, full_matrices=False) V = V.T if rank: U = U[:, :rank] D = D[:rank] V = V[:, :rank] return U, D, V
def low_rank_repr(X, n_dim): U, S, V = svd(X.T,full_matrices=False) mask = S > 1 V = V[mask] S = S[mask] R = (V.T * (1 - S**-2)).dot(V) return R
def _hrchy(DB, classes, src_idx, trg_idx): from hierarchy import load def simplex(num_vecs): from scipy.linalg import svd cw = np.eye(num_vecs) cw -= cw.mean(axis=1, keepdims=True) cw, _, _ = svd(cw) return cw[:, :num_vecs-1] hrchy_fn = 'data/%s/wordnet_hierarchy.txt' % DB hrchy = load(hrchy_fn) constrains = OrderedDict() k = 0 for q in hrchy.nodes: if len(q.fpointer) <= 1: continue labs = len(q.fpointer)*np.ones((len(classes))) for s, cls in enumerate(classes): if cls in q.name['classes']: labs[s] = [cls in hrchy[cid].name['classes'] for cid in q.fpointer].index(True) assigns = np.zeros((len(q.fpointer)+1, len(classes))) for c in range(len(q.fpointer)+1): assigns[c, labs == c] = 1 constrains[q.identifier] = {'codewords': simplex(len(q.fpointer)+1).T, 'idxDim': np.arange(k, k+len(q.fpointer)), 'labels': {'source': assigns[:, src_idx], 'target': assigns[:, trg_idx]}} k += len(q.fpointer) source_deg = [key for key in constrains.keys() if constrains[key]['labels']['source'].sum(axis=1).max() == len(src_idx)] [constrains.pop(key) for key in source_deg] return constrains
def load_caltech101_30(folder=CALTECH101_30_DIR, tiny_problem=False): caltech = scio.loadmat(folder + '/caltech101-30.matlab') k_train, k_test = caltech['Ktrain'], caltech['Ktest'] label_tr, label_te = caltech['tr_label'], caltech['te_label'] file_tr, file_te = caltech['tr_files'], caltech['te_files'] if tiny_problem: pattern_step = 5 fraction_limit = 0.2 k_train = k_train[:int(len(label_tr) * fraction_limit):pattern_step, :int(len(label_tr) * fraction_limit):pattern_step] label_tr = label_tr[:int(len(label_tr) * fraction_limit):pattern_step] U, s, Vh = linalg.svd(k_train) S_sqrt = linalg.diagsvd(s ** 0.5, len(s), len(s)) X = np.dot(U, S_sqrt) # examples in rows train_x, val_x, test_x = X[0:len(X):3, :], X[1:len(X):3, :], X[2:len(X):3, :] label_tr_enc = to_one_hot_enc(np.array(label_tr) - 1) train_y, val_y, test_y = label_tr_enc[0:len(X):3, :], label_tr_enc[1:len(X):3, :], label_tr_enc[2:len(X):3, :] train_file, val_file, test_file = file_tr[0:len(X):3], file_tr[1:len(X):3], file_tr[2:len(X):3] test_dataset = Dataset(data=test_x, target=test_y, info={'files': test_file}) validation_dataset = Dataset(data=val_x, target=val_y, info={'files': val_file}) training_dataset = Dataset(data=train_x, target=train_y, info={'files': train_file}) return Datasets(train=training_dataset, validation=validation_dataset, test=test_dataset)
def test(): data = np.random.random((5000, 100)) u, s, v = linalg.svd(data, full_matrices=False) pca = np.dot(u[:, :10].T, data) results = fastica(pca.T, whiten=False)
def test(): data = np.random.random((5000, 100)) u, s, v = linalg.svd(data) pca = np.dot(u[:, :10].T, data) results = fastica(pca.T, whiten=False)
def main(): parser = argparse.ArgumentParser('Transforms the space by applying the SVD') parser.add_argument('--input', '-i', help='Input file') parser.add_argument('--output', '-o', help='Output file') args = parser.parse_args() space = load_mikolov_text(args.input) U, s, Vh = svd(space.matrix, full_matrices=False) transformed = U.dot(np.diag(s)) newspace = VectorSpace(transformed, space.vocab) newspace.save_mikolov_text(args.output)
def fit(self, X, augment=False, # fit on randomly augmented samples rounds=1, # if augment, how many augmentation passes over the data do we use seed=None ): ''' Required for featurewise_center, featurewise_std_normalization and zca_whitening. ''' X = np.copy(X) if augment: aX = np.zeros(tuple([rounds*X.shape[0]]+list(X.shape)[1:])) for r in range(rounds): for i in range(X.shape[0]): img = array_to_img(X[i]) img = self.random_transform(img) aX[i+r*X.shape[0]] = img_to_array(img) X = aX if self.featurewise_center: self.mean = np.mean(X, axis=0) X -= self.mean if self.featurewise_std_normalization: self.std = np.std(X, axis=0) X /= self.std if self.zca_whitening: flatX = np.reshape(X, (X.shape[0], X.shape[1]*X.shape[2]*X.shape[3])) fudge = 10e-6 sigma = np.dot(flatX.T, flatX) / flatX.shape[1] U, S, V = linalg.svd(sigma) self.principal_components = np.dot(np.dot(U, np.diag(1. / np.sqrt(S + fudge))), U.T)
def condensed_svd(self, M, tol=1e-3, store_singular_vals=False): U, S, Vt = linalg.svd(M, full_matrices=False) if store_singular_vals: self.singular_vals = S #want tolerance on fraction of variance in singular value #when not norm_covariance, need to normalize singular values S_norm = 1.0 if self.norm_covariance else np.sum(S) rank = np.sum( (S/S_norm) > tol ) return U[:,:rank], S[:rank], Vt[:rank,:], S_norm
def condensed_svd(self, M, tol=1e-3, store_singular_vals=False): U, S, Vt = linalg.svd(M, full_matrices=False) if store_singular_vals: self.singular_vals = S #want tolerance on fraction of variance in singular value #when not norm_covariance, need to normalize singular values S_norm = np.sum(S) rank = np.sum( (S/S_norm) > tol ) return U[:,:rank], S[:rank], Vt[:rank,:], S_norm
def make_orthogonal(self): # orthogonalize the columns U, s, V = svd(self.Tm, full_matrices=False) self.Tm = np.diag(s).dot(V)
def ComputeCCA(X, Y): assert X.shape[0] == Y.shape[0], (X.shape, Y.shape, "Unequal number of rows") assert X.shape[0] > 1, (X.shape, "Must have more than 1 row") X = NormCenterMatrix(X) Y = NormCenterMatrix(Y) X_q, _, _ = decomp_qr.qr(X, overwrite_a=True, mode='economic', pivoting=True) Y_q, _, _ = decomp_qr.qr(Y, overwrite_a=True, mode='economic', pivoting=True) C = np.dot(X_q.T, Y_q) r = linalg.svd(C, full_matrices=False, compute_uv=False) d = min(X.shape[1], Y.shape[1]) r = r[:d] r = np.minimum(np.maximum(r, 0.0), 1.0) # remove roundoff errs return r.mean()
def pseudo_inverse(mat, eps=1e-10): """Computes pseudo-inverse of mat, treating eigenvalues below eps as 0.""" s, u, v = tf.svd(mat) eps = 1e-10 # zero threshold for eigenvalues si = tf.where(tf.less(s, eps), s, 1./s) return u @ tf.diag(si) @ tf.transpose(v)
def symsqrt(mat, eps=1e-7): """Symmetric square root.""" s, u, v = tf.svd(mat) # sqrt is unstable around 0, just use 0 in such case print("Warning, cutting off at eps") si = tf.where(tf.less(s, eps), s, tf.sqrt(s)) return u @ tf.diag(si) @ tf.transpose(v)
def pseudo_inverse_sqrt(mat, eps=1e-7): """half pseduo-inverse""" s, u, v = tf.svd(mat) # zero threshold for eigenvalues si = tf.where(tf.less(s, eps), s, 1./tf.sqrt(s)) return u @ tf.diag(si) @ tf.transpose(v)
def pseudo_inverse_sqrt2(svd, eps=1e-7): """half pseduo-inverse, accepting existing values""" # zero threshold for eigenvalues if svd.__class__.__name__=='SvdTuple': (s, u, v) = (svd.s, svd.u, svd.v) elif svd.__class__.__name__=='SvdWrapper': (s, u, v) = (svd.s, svd.u, svd.v) else: assert False, "Unknown type" si = tf.where(tf.less(s, eps), s, 1./tf.sqrt(s)) return u @ tf.diag(si) @ tf.transpose(v)
def pseudo_inverse2(svd, eps=1e-7): """pseudo-inverse, accepting existing values""" # use float32 machine precision as cut-off (works for MKL) # https://www.wolframcloud.com/objects/927b2aa5-de9c-46f5-89fe-c4a58aa4c04b if svd.__class__.__name__=='SvdTuple': (s, u, v) = (svd.s, svd.u, svd.v) elif svd.__class__.__name__=='SvdWrapper': (s, u, v) = (svd.s, svd.u, svd.v) else: assert False, "Unknown type" max_eigen = tf.reduce_max(s) si = tf.where(s/max_eigen<eps, 0.*s, 1./s) return u @ tf.diag(si) @ tf.transpose(v)