我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.tril()。
def alpha(self): # Cronbach Alpha alpha = pd.DataFrame(0, index=np.arange(1), columns=self.latent) for i in range(self.lenlatent): block = self.data_[self.Variables['measurement'] [self.Variables['latent'] == self.latent[i]]] p = len(block.columns) if(p != 1): p_ = len(block) correction = np.sqrt((p_ - 1) / p_) soma = np.var(np.sum(block, axis=1)) cor_ = pd.DataFrame.corr(block) denominador = soma * correction**2 numerador = 2 * np.sum(np.tril(cor_) - np.diag(np.diag(cor_))) alpha_ = (numerador / denominador) * (p / (p - 1)) alpha[self.latent[i]] = alpha_ else: alpha[self.latent[i]] = 1 return alpha.T
def getMedianDistanceBetweenSamples(self, sampleSet=None) : """ Jaakkola's heuristic method for setting the width parameter of the Gaussian radial basis function kernel is to pick a quantile (usually the median) of the distribution of Euclidean distances between points having different labels. Reference: Jaakkola, M. Diekhaus, and D. Haussler. Using the Fisher kernel method to detect remote protein homologies. In T. Lengauer, R. Schneider, P. Bork, D. Brutlad, J. Glasgow, H.- W. Mewes, and R. Zimmer, editors, Proceedings of the Seventh International Conference on Intelligent Systems for Molecular Biology. """ numrows = sampleSet.shape[0] samples = sampleSet G = sum((samples * samples), 1) Q = numpy.tile(G[:, None], (1, numrows)) R = numpy.tile(G, (numrows, 1)) distances = Q + R - 2 * numpy.dot(samples, samples.T) distances = distances - numpy.tril(distances) distances = distances.reshape(numrows**2, 1, order="F").copy() return numpy.sqrt(0.5 * numpy.median(distances[distances > 0]))
def test_tril_triu_ndim3(): for dtype in np.typecodes['AllFloat'] + np.typecodes['AllInteger']: a = np.array([ [[1, 1], [1, 1]], [[1, 1], [1, 0]], [[1, 1], [0, 0]], ], dtype=dtype) a_tril_desired = np.array([ [[1, 0], [1, 1]], [[1, 0], [1, 0]], [[1, 0], [0, 0]], ], dtype=dtype) a_triu_desired = np.array([ [[1, 1], [0, 1]], [[1, 1], [0, 0]], [[1, 1], [0, 0]], ], dtype=dtype) a_triu_observed = np.triu(a) a_tril_observed = np.tril(a) yield assert_array_equal, a_triu_observed, a_triu_desired yield assert_array_equal, a_tril_observed, a_tril_desired yield assert_equal, a_triu_observed.dtype, a.dtype yield assert_equal, a_tril_observed.dtype, a.dtype
def test_tril_triu_dtype(): # Issue 4916 # tril and triu should return the same dtype as input for c in np.typecodes['All']: if c == 'V': continue arr = np.zeros((3, 3), dtype=c) assert_equal(np.triu(arr).dtype, arr.dtype) assert_equal(np.tril(arr).dtype, arr.dtype) # check special cases arr = np.array([['2001-01-01T12:00', '2002-02-03T13:56'], ['2004-01-01T12:00', '2003-01-03T13:45']], dtype='datetime64') assert_equal(np.triu(arr).dtype, arr.dtype) assert_equal(np.tril(arr).dtype, arr.dtype) arr = np.zeros((3,3), dtype='f4,f4') assert_equal(np.triu(arr).dtype, arr.dtype) assert_equal(np.tril(arr).dtype, arr.dtype)
def two_time_state_to_results(state): """Convert the internal state of the two time generator into usable results Parameters ---------- state : namedtuple The internal state that is yielded from `lazy_two_time` Returns ------- results : namedtuple A results object that contains the two time correlation results and the lag steps """ for q in range(np.max(state.label_array)): x0 = (state.g2)[q, :, :] (state.g2)[q, :, :] = (np.tril(x0) + np.tril(x0).T - np.diag(np.diag(x0))) return results(state.g2, state.lag_steps, state)
def setUp(self): with self.test_session(): N = 4 M = 5 self.mu = tf.placeholder(settings.float_type, [M, N]) self.sqrt = tf.placeholder(settings.float_type, [M, N]) self.chol = tf.placeholder(settings.float_type, [M, M, N]) self.I = tf.placeholder(settings.float_type, [M, M]) self.rng = np.random.RandomState(0) self.mu_data = self.rng.randn(M, N) self.sqrt_data = self.rng.randn(M, N) q_sqrt = np.rollaxis(np.array([np.tril(self.rng.randn(M, M)) for _ in range(N)]), 0, 3) self.chol_data = q_sqrt self.feed_dict = { self.mu: self.mu_data, self.sqrt: self.sqrt_data, self.chol: self.chol_data, self.I: np.eye(M), }
def setUp(self): with self.test_session(): N = 4 M = 5 self.mu = tf.placeholder(settings.float_type, [M, N]) self.sqrt = tf.placeholder(settings.float_type, [M, N]) self.chol = tf.placeholder(settings.float_type, [M, M, N]) self.K = tf.placeholder(settings.float_type, [M, M]) self.Kdiag = tf.placeholder(settings.float_type, [M, M]) self.rng = np.random.RandomState(0) self.mu_data = self.rng.randn(M, N) sqrt_diag = self.rng.randn(M) self.sqrt_data = np.array([sqrt_diag for _ in range(N)]).T sqrt_chol = np.tril(self.rng.randn(M, M)) self.chol_data = np.rollaxis(np.array([sqrt_chol for _ in range(N)]), 0, 3) self.feed_dict = { self.mu: np.zeros((M, N)), self.sqrt: self.sqrt_data, self.chol: self.chol_data, self.K: squareT(sqrt_chol), self.Kdiag: np.diag(sqrt_diag ** 2), }
def consensusCDF(self,K, matrix): self.count = 0 lEN = len(matrix) Sum = 0 Denominator = (lEN*(lEN-1))/2 CDF = dict() matrix = np.tril(matrix) for c in self.HistogramValues.keys(): Sum = 0 for cumalativeC in self.HistogramValues.keys(): if cumalativeC <= c: Sum += self.HistogramValues[cumalativeC] CDF[c] = Sum/Denominator self.GlobalCDF[K] = copy.deepcopy(CDF) del lEN, matrix
def read_mongodb_matrix(tickers, matrix_name): mis = MatrixItem.objects(i__in = tickers, j__in = tickers, matrix_name = matrix_name) n = len(tickers) available_tickers = set([mi.i for mi in mis]) np.random.seed(n) a = np.absolute(np.random.normal(0, 0.001, [n, n])) a_triu = np.triu(a, k=0) a_tril = np.tril(a, k=0) a_diag = np.diag(np.diag(a)) a_sym_triu = a_triu + a_triu.T - a_diag matrix = pd.DataFrame(a_sym_triu, index = tickers, columns = tickers) for mi in mis: if abs(mi.v) > 10: mi.v = 0.001 matrix.set_value(mi.i, mi.j, mi.v) matrix.set_value(mi.j, mi.i, mi.v) matrix = matrix.round(6) return matrix
def make_symmetric_lower(mat): ''' Copies the matrix entries below the main diagonal to the upper triangle half of the matrix. Leaves the diagonal unchanged. Returns a `NumPy` matrix object. **mat** : `numpy.matrix` A lower diagonal matrix. returns : `numpy.matrix` The lower triangle matrix. ''' # extract lower triangle from matrix (including diagonal) tmp_mat = np.tril(mat) # if the matrix given wasn't a lower triangle matrix, raise an error if (mat != tmp_mat).all(): raise Exception('Matrix to symmetrize is not a lower diagonal matrix.') # add its transpose to itself, zeroing the diagonal to avoid doubling tmp_mat += np.triu(tmp_mat.transpose(), 1) return np.asmatrix(tmp_mat)
def get_masked(self, percent_hole, diag_off=1): """ Construct a random mask. Random training set on 20% on Data / debug5 - debug11 -- Unbalanced """ data = self.data if type(data) is np.ndarray: #self.data_mat = sp.sparse.csr_matrix(data) pass else: raise NotImplementedError('type %s unknow as corpus' % type(data)) n = int(data.size * percent_hole) mask_index = np.unravel_index(np.random.permutation(data.size)[:n], data.shape) mask = np.zeros(data.shape, dtype=data.dtype) mask[mask_index] = 1 if self.is_symmetric(): mask = np.tril(mask) + np.tril(mask, -1).T data_ma = ma.array(data, mask=mask) if diag_off == 1: np.fill_diagonal(data_ma, ma.masked) return data_ma
def get_masked_zeros(self, diag_off=1): ''' Take out all zeros ''' data = self.data if type(data) is np.ndarray: #self.data_mat = sp.sparse.csr_matrix(data) pass else: raise NotImplementedError('type %s unknow as corpus' % type(data)) mask = np.zeros(data.shape, dtype=data.dtype) mask[data == 0] = 1 if self.is_symmetric(): mask = np.tril(mask) + np.tril(mask, -1).T data_ma = ma.array(data, mask=mask) if diag_off == 1: np.fill_diagonal(data_ma, ma.masked) return data_ma
def verify_solve_grad(self, m, n, A_structure, lower, rng): # ensure diagonal elements of A relatively large to avoid numerical # precision issues A_val = (rng.normal(size=(m, m)) * 0.5 + numpy.eye(m)).astype(config.floatX) if A_structure == 'lower_triangular': A_val = numpy.tril(A_val) elif A_structure == 'upper_triangular': A_val = numpy.triu(A_val) if n is None: b_val = rng.normal(size=m).astype(config.floatX) else: b_val = rng.normal(size=(m, n)).astype(config.floatX) eps = None if config.floatX == "float64": eps = 2e-8 solve_op = Solve(A_structure=A_structure, lower=lower) utt.verify_grad(solve_op, [A_val, b_val], 3, rng, eps=eps)
def testCholesky(self): # Tests the cholesky function np.random.seed(8) # generating two symmetric positive-definite tt-cores L_1 = np.tril(np.random.normal(scale=2., size=(2, 2))) L_2 = np.tril(np.random.normal(scale=2., size=(3, 3))) K_1 = L_1.dot(L_1.T) K_2 = L_2.dot(L_2.T) K = np.kron(K_1, K_2) initializer = tensor_train.TensorTrain([K_1[None, :, :, None], K_2[None, :, :, None]], tt_ranks=7*[1]) kron_mat = variables.get_variable('kron_mat', initializer=initializer) init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) desired = np.linalg.cholesky(K) actual = ops.full(kr.cholesky(kron_mat)).eval() self.assertAllClose(desired, actual)
def htmt(self): htmt_ = pd.DataFrame(pd.DataFrame.corr(self.data_), index=self.manifests, columns=self.manifests) mean = [] allBlocks = [] for i in range(self.lenlatent): block_ = self.Variables['measurement'][ self.Variables['latent'] == self.latent[i]] allBlocks.append(list(block_.values)) block = htmt_.ix[block_, block_] mean_ = (block - np.diag(np.diag(block))).values mean_[mean_ == 0] = np.nan mean.append(np.nanmean(mean_)) comb = [[k, j] for k in range(self.lenlatent) for j in range(self.lenlatent)] comb_ = [(np.sqrt(mean[comb[i][1]] * mean[comb[i][0]])) for i in range(self.lenlatent ** 2)] comb__ = [] for i in range(self.lenlatent ** 2): block = (htmt_.ix[allBlocks[comb[i][1]], allBlocks[comb[i][0]]]).values # block[block == 1] = np.nan comb__.append(np.nanmean(block)) htmt__ = np.divide(comb__, comb_) where_are_NaNs = np.isnan(htmt__) htmt__[where_are_NaNs] = 0 htmt = pd.DataFrame(np.tril(htmt__.reshape( (self.lenlatent, self.lenlatent)), k=-1), index=self.latent, columns=self.latent) return htmt
def corLVs(self): # Correlations LVs corLVs_ = np.tril(pd.DataFrame.corr(self.fscores)) return pd.DataFrame(corLVs_, index=self.latent, columns=self.latent)
def check_pd(A, lower=True): """ Checks if A is PD. If so returns True and Cholesky decomposition, otherwise returns False and None """ try: return True, np.tril(cho_factor(A, lower=lower)[0]) except LinAlgError as err: if 'not positive definite' in str(err): return False, None
def test_tril_triu_ndim2(): for dtype in np.typecodes['AllFloat'] + np.typecodes['AllInteger']: a = np.ones((2, 2), dtype=dtype) b = np.tril(a) c = np.triu(a) yield assert_array_equal, b, [[1, 0], [1, 1]] yield assert_array_equal, c, b.T # should return the same dtype as the original array yield assert_equal, b.dtype, a.dtype yield assert_equal, c.dtype, a.dtype
def test_tril_triu_with_inf(): # Issue 4859 arr = np.array([[1, 1, np.inf], [1, 1, 1], [np.inf, 1, 1]]) out_tril = np.array([[1, 0, 0], [1, 1, 0], [np.inf, 1, 1]]) out_triu = out_tril.T assert_array_equal(np.triu(arr), out_triu) assert_array_equal(np.tril(arr), out_tril)
def __init__(self, batch_size, mem_size, hidden_size): self.hidden_size = hidden_size self.mem_size = mem_size self.batch_size = batch_size N, M, d = batch_size, mem_size, hidden_size self.L = np.tril(np.ones([M, M], dtype='float32')) self.sL = np.tril(np.ones([M, M], dtype='float32'), k=-1)
def grad_AQ(A, Gamma, B1_B3, B2, T,q, prior_A, prior_Q, flag_A_time_vary, A_flag = True, Q_flag = True): grad_A = np.zeros(A.shape) grad_Gamma = np.zeros(Gamma.shape) eig_Q_sqrt, inv_Q = _util_obj_grad_AQ(Gamma) if A_flag: if flag_A_time_vary: for t in range(T): # 2 Q^-1(-B2 + A B_3) grad_A[t] = 2.0*np.dot(inv_Q, (-B2[t]+ A[t].dot(B1_B3[t]))) if prior_A is not None: # gradient of lambda0||A_t||_F^2 + lambda1||A_t-A_t_1||_F^2 # 2 lambda0 A_t + 2 lambda1 ((A_t_1 + A_t+1)-2 A_t) grad_A[t] += 2.0* prior_A['lambda0'] *A[t] if t > 0 and t < T-1: #grad_A[t] += 2.0*prior_A['lambda1']*(A[t-1]+A[t+1]-2.0*A[t]) # correction on May 19 grad_A[t] += 2.0*prior_A['lambda1']*(2.0*A[t]-A[t-1]-A[t+1]) elif t == 0: grad_A[t] += 2.0*prior_A['lambda1']*(A[0]-A[1]) else: # t = t-1 grad_A[t] += 2.0*prior_A['lambda1']*(A[t]-A[t-1]) else: grad_A = 2.0*np.dot(inv_Q, (-B2.sum(axis=0)+A.dot((B1_B3[0:T]).sum(axis = 0)))) if prior_A is not None: grad_A += 2.0*prior_A['lambda0'] if Q_flag: # gradient = Q^{-1}(qT I-\sum_T B1t-AtB2t^T-B2tAt^T + AtB3tAt^T)Q^{-1}) Gamma tmp = _util_obj_grad_AQ_sum_B123(A, B1_B3, B2, T, flag_A_time_vary) grad_Gamma = reduce(np.dot, [inv_Q, (np.eye(inv_Q.shape[0])*np.float(q)*np.float(T)-tmp.dot(inv_Q)), Gamma]) if prior_Q is not None: grad_Gamma += grad_prior_Q(Gamma, prior_Q) grad_Gamma = np.tril(grad_Gamma) return grad_A, grad_Gamma #=========== gradient descent with back tracking for AQ =============
def grad_Q0(Gamma0, B7, q, prior_Q0): eig_Q_sqrt, inv_Q = _util_obj_grad_AQ(Gamma0) grad_Gamma0 = reduce(np.dot, [inv_Q, np.float(q)*np.eye(Gamma0.shape[0])- B7.dot(inv_Q), Gamma0]) if prior_Q0 is not None: grad_Gamma0 += grad_prior_Q(Gamma0, prior_Q0) grad_Gamma0 = np.tril(grad_Gamma0) return grad_Gamma0 # gradient descent
def get_neg_log_post_grad_Qu(Phi, G, MMT, q, Sigma_E, L,Sigma_J, nu, V, GL, prior_on = False): """ Just get the gradient for Qu, ignore other variables """ p = Phi.shape[0] Qu = Phi.dot(Phi.T) G_Sigma_G = np.zeros(MMT.shape) for i in range(Sigma_J.size): G_Sigma_G += Sigma_J[i]* np.outer(G[:,i], G[:,i]) cov = Sigma_E + G_Sigma_G + GL.dot(Qu).dot(GL.T) inv_cov = np.linalg.inv(cov) GLT_inv_cov = np.dot(GL.T, inv_cov) invQ = np.linalg.inv(Qu) if prior_on: grad0 = (q* GL.T.dot(inv_cov).dot(GL) - GLT_inv_cov.dot(MMT).dot(GLT_inv_cov.T) \ + invQ.dot( (nu+p+1) *np.eye(p) - V.dot(invQ))) else: grad0 = q* GL.T.dot(inv_cov).dot(GL) - GLT_inv_cov.dot(MMT).dot(GLT_inv_cov.T) grad1 = 2.0* grad0.dot(Phi) # cholesky decomposition is lower triangular grad = np.tril(grad1) return grad #============================================================================== # gradient descent optimization, using back track # only update Qu
def plomp(f1, f2): b1 = 3.51 b2 = 5.75 xstar = 0.24 s1 = 0.0207 s2 = 18.96 s = np.tril(xstar / ((s1 * np.minimum(f1, f2)) + s2)) pd = np.exp(-b1 * s * np.abs(f2 - f1)) - np.exp(-b2 * s * np.abs(f2 - f1)) return pd
def prof_instance(nz, neq, nineq, nIter, cuda): L = np.tril(npr.uniform(0,1, (nz,nz))) + np.eye(nz,nz) G = npr.randn(nineq,nz) A = npr.randn(neq,nz) z0 = npr.randn(nz) s0 = np.ones(nineq) p = npr.randn(nz) p, L, G, A, z0, s0 = [torch.Tensor(x) for x in [p, L, G, A, z0, s0]] Q = torch.mm(L, L.t())+0.001*torch.eye(nz).type_as(L) if cuda: p, L, Q, G, A, z0, s0 = [x.cuda() for x in [p, L, Q, G, A, z0, s0]] af = adact.AdactFunction() start = time.time() # One-time cost for numpy conversion. p_np, L_np, G_np, A_np, z0_np, s0_np = [adact.toNp(v) for v in [p, L, G, A, z0, s0]] cp = time.time()-start for i in range(nIter): start = time.time() zhat, nu, lam = af.forward_single_np(p_np, L_np, G_np, A_np, z0_np, s0_np) cp += time.time()-start b = torch.mv(A, z0) if neq > 0 else None h = torch.mv(G, z0)+s0 L_Q, L_S, R = aip.pre_factor_kkt(Q, G, A, nineq, neq) pdipm = [] for i in range(nIter): start = time.time() zhat_ip, nu_ip, lam_ip = aip.forward_single(p, Q, G, A, b, h, L_Q, L_S, R) pdipm.append(time.time()-start) return cp, np.sum(pdipm)
def gen_L(rng, n, *shape): return np.array([np.tril(rng.randn(*shape)) for _ in range(n)])
def gen_q_sqrt(rng, D_out, *shape): q_sqrt = np.array([np.tril(rng.randn(*shape)) for _ in range(D_out)]) return np.transpose(q_sqrt, [1, 2, 0])
def update(self, x): """Single step learning update""" # print x.shape x = x.reshape((self.ndims, 1)) y = np.dot(self.w.T, x) # GHA rule in matrix form d_w = self.anneal(self.cnt) * self.eta * (np.dot(x, y.T) - np.dot(self.w, np.tril(np.dot(y, y.T)))) self.w += d_w self.cnt += 1 return y
def _cost_strictly_lower_triangular(b): return np.sum((np.tril(b, -1) - b) ** 2)
def histogram(self,matrix): matrix= np.tril(matrix) matrix1 = matrix.flatten() nonZeroValues = np.flatnonzero(matrix1) matrix1 = matrix1[nonZeroValues] unique, counts = np.unique(matrix1, return_counts=True) self.HistogramValues = dict() for i,j in np.asarray((unique, counts)).T: self.HistogramValues[float(format(i, '.1f'))] = j
def Find_HighlightedEdges(self,weight = 0): self.ThresholdData = np.copy(self.data) # low_values_indices = self.ThresholdData < weight # Where values are low # self.ThresholdData[low_values_indices] = 0 # graterindices = [ (i,j) for i,j in np.ndenumerate(self.ThresholdData) if any(i > j) ] # self.ThresholdData[graterindices[:1]] = 0 # self.ThresholdData = np.tril(self.ThresholdData) # print self.ThresholdData, "is the data same??" """ test 2 highlighted edges there """ # np.savetxt('test2.txt', self.ThresholdData, delimiter=',', fmt='%1.4e') self.g = nx.from_numpy_matrix(self.ThresholdData)
def feed_forward(self,sigma=.1): '''Generate random feedforward wrec (lower triangular)''' N_in = self.N_in N_rec = self.N_rec N_out = self.N_out weights_path = self.init_weights_path #Uniform between -.1 and .1 W_in = .2*np.random.rand(N_rec,N_in) - .1 W_out = .2*np.random.rand(N_out,N_rec) - .1 b_rec = np.zeros(N_rec) b_out = np.zeros(N_out) init_state = .1 + .01*np.random.randn(N_rec) W_rec = np.tril(sigma*np.random.randn(N_rec,N_rec),-1) input_Connectivity = np.ones([N_rec,N_in]) rec_Connectivity = np.ones([N_rec,N_rec]) output_Connectivity = np.ones([N_out,N_rec]) if not self.autapses: W_rec[np.eye(N_rec)==1] = 0 rec_Connectivity[np.eye(N_rec)==1] = 0 np.savez(weights_path, W_in = W_in, W_rec = W_rec, W_out = W_out, b_rec = b_rec, b_out = b_out, init_state = init_state, input_Connectivity = input_Connectivity, rec_Connectivity= rec_Connectivity, output_Connectivity=output_Connectivity) return weights_path