我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.diag_indices()。
def test_covariances_and_eigenvalues(self): reader = FeatureReader(self.trajnames, self.temppdb) for tau in [1, 10, 100, 1000, 2000]: trans = tica(lag=tau, dim=self.dim, kinetic_map=False) trans.data_producer = reader log.info('number of trajectories reported by tica %d' % trans.number_of_trajectories()) trans.parametrize() data = trans.get_output() log.info('max. eigenvalue: %f' % np.max(trans.eigenvalues)) self.assertTrue(np.all(trans.eigenvalues <= 1.0)) # check ICs check = tica(data=data, lag=tau, dim=self.dim) np.testing.assert_allclose(np.eye(self.dim), check.cov, atol=1e-8) np.testing.assert_allclose(check.mean, 0.0, atol=1e-8) ic_cov_tau = np.zeros((self.dim, self.dim)) ic_cov_tau[np.diag_indices(self.dim)] = trans.eigenvalues np.testing.assert_allclose(ic_cov_tau, check.cov_tau, atol=1e-8)
def update_hypers(self, params): for i in range(self.nolayers): layeri = self.layers[i] Mi = layeri.M Dini = layeri.Din Douti = layeri.Dout layeri.ls.set_value(params['ls'][i]) layeri.sf.set_value(params['sf'][i]) layeri.sn.set_value(params['sn'][i]) triu_ind = np.triu_indices(Mi) diag_ind = np.diag_indices(Mi) for d in range(Douti): layeri.zu[d].set_value(params['zu'][i][d]) theta_m_d = params['eta2'][i][d] theta_R_d = params['eta1_R'][i][d] R = np.zeros((Mi, Mi)) R[triu_ind] = theta_R_d.reshape(theta_R_d.shape[0], ) R[diag_ind] = np.exp(R[diag_ind]) layeri.theta_1_R[d] = R layeri.theta_1[d] = np.dot(R.T, R) layeri.theta_2[d] = theta_m_d
def _passthrough_delay(m, c): """Analytically derived state-space when p = q = m. We use this because it is numerically stable for high m. """ j = np.arange(1, m+1, dtype=np.float64) u = (m + j) * (m - j + 1) / (c * j) A = np.zeros((m, m)) B = np.zeros((m, 1)) C = np.zeros((1, m)) D = np.zeros((1,)) A[0, :] = B[0, 0] = -u[0] A[1:, :-1][np.diag_indices(m-1)] = u[1:] D[0] = (-1)**m C[0, np.arange(m) % 2 == 0] = 2*D[0] return LinearSystem((A, B, C, D), analog=True)
def _proper_delay(q, c): """Analytically derived state-space when p = q - 1. We use this because it is numerically stable for high q and doesn't have a passthrough. """ j = np.arange(q, dtype=np.float64) u = (q + j) * (q - j) / (c * (j + 1)) A = np.zeros((q, q)) B = np.zeros((q, 1)) C = np.zeros((1, q)) D = np.zeros((1,)) A[0, :] = -u[0] B[0, 0] = u[0] A[1:, :-1][np.diag_indices(q-1)] = u[1:] C[0, :] = (j + 1) / float(q) * (-1) ** (q - 1 - j) return LinearSystem((A, B, C, D), analog=True)
def convert_solution(z, Cs): if issparse(Cs): Cs = Cs.toarray() M = Cs.shape[0] x = z[0:M] y = z[M:] w=np.exp(y) pi=w/w.sum() X=pi[:,np.newaxis]*x[np.newaxis,:] Y=X+np.transpose(X) denom=Y enum=Cs*np.transpose(pi) P=enum/denom ind=np.diag_indices(Cs.shape[0]) P[ind]=0.0 rowsums=P.sum(axis=1) P[ind]=1.0-rowsums return pi, P ############################################################################### # Objective, Gradient, and Hessian ###############################################################################
def get_connectivity_matrix_nodiag(self): """ Returns a similar matrix as in Ontology.get_connectivity_matrix(), but the diagonal of the matrix is 0. Note: !!!!!!!!!!!!!!!!!!!!!!!! d[a, a] == 0 instead of 1 """ if not hasattr(self, 'd_nodiag'): d = self.get_connectivity_matrix() self.d_nodiag = d.copy() self.d_nodiag[np.diag_indices(self.d_nodiag.shape[0])] = 0 assert not np.isfortran(self.d_nodiag) return self.d_nodiag
def corrsort(features, use_tsp=False): """ Given a features array, one feature per axis=0 entry, return axis=0 indices such that adjacent indices correspond to features that are correlated. cf. Traveling Salesman Problem. Not an optimal solution. use_tsp: Use tsp solver. See tsp_solver.greedy module that is used for this. Slows run-time considerably: O(N^4) computation, O(N^2) memory. Without use_tsp, both computation and memory are O(N^2). """ correlations = np.ma.corrcoef(arrays.plane(features)) if use_tsp: return tsp.solve_tsp(-correlations) size = features.shape[0] correlations.mask[np.diag_indices(size)] = True # initialize results with the pair with the highest correlations. largest = np.argmax(correlations) results = [int(largest / size), largest % size] correlations.mask[:, results[0]] = True while len(results) < size: correlations.mask[:, results[-1]] = True results.append(np.argmax(correlations[results[-1]])) return results
def _get_batch_diagonal_cpu(array): batch_size, m, n = array.shape assert m == n rows, cols = np.diag_indices(n) return array[:, rows, cols]
def _set_batch_diagonal_cpu(array, diag_val): batch_size, m, n = array.shape assert m == n rows, cols = np.diag_indices(n) array[:, rows, cols] = diag_val
def _diagonal_idx_array(batch_size, n): idx_offsets = np.arange( start=0, stop=batch_size * n * n, step=n * n, dtype=np.int32).reshape( (batch_size, 1)) idx = np.ravel_multi_index( np.diag_indices(n), (n, n)).reshape((1, n)).astype(np.int32) return cuda.to_gpu(idx + idx_offsets)
def check_forward(self, diag_data, non_diag_data): diag = chainer.Variable(diag_data) non_diag = chainer.Variable(non_diag_data) y = lower_triangular_matrix(diag, non_diag) correct_y = numpy.zeros( (self.batch_size, self.n, self.n), dtype=numpy.float32) tril_rows, tril_cols = numpy.tril_indices(self.n, -1) correct_y[:, tril_rows, tril_cols] = cuda.to_cpu(non_diag_data) diag_rows, diag_cols = numpy.diag_indices(self.n) correct_y[:, diag_rows, diag_cols] = cuda.to_cpu(diag_data) gradient_check.assert_allclose(correct_y, cuda.to_cpu(y.data))
def kth_diag_indices(n, k): """ Return indices of bins k steps away from the diagonal. (from http://stackoverflow.com/questions/10925671/numpy-k-th-diagonal-indices) """ rows, cols = np.diag_indices(n) if k < 0: return rows[:k], cols[-k:] elif k > 0: return rows[k:], cols[:-k] else: return rows, cols
def compute_posterior_grad_u(self, dmu, dSu): # return grads wrt u params and Kuuinv triu_ind = np.triu_indices(self.M) diag_ind = np.diag_indices(self.M) if self.nat_param: dSu_via_m = np.einsum('da,db->dab', dmu, self.theta_2) dSu += dSu_via_m dSuinv = - np.einsum('dab,dbc,dce->dae', self.Su, dSu, self.Su) dKuuinv = np.sum(dSuinv, axis=0) dtheta1 = dSuinv deta2 = np.einsum('dab,db->da', self.Su, dmu) else: deta2 = dmu dtheta1 = dSu dKuuinv = 0 dtheta1T = np.transpose(dtheta1, [0, 2, 1]) dtheta1_R = np.einsum('dab,dbc->dac', self.theta_1_R, dtheta1 + dtheta1T) deta1_R = np.zeros([self.Dout, self.M * (self.M + 1) / 2]) for d in range(self.Dout): dtheta1_R_d = dtheta1_R[d, :, :] theta1_R_d = self.theta_1_R[d, :, :] dtheta1_R_d[diag_ind] = dtheta1_R_d[diag_ind] * theta1_R_d[diag_ind] dtheta1_R_d = dtheta1_R_d[triu_ind] deta1_R[d, :] = dtheta1_R_d.reshape((dtheta1_R_d.shape[0], )) return deta1_R, deta2, dKuuinv
def get_hypers(self, key_suffix=''): """Summary Args: key_suffix (str, optional): Description Returns: TYPE: Description """ params = {} M = self.M Din = self.Din Dout = self.Dout params['ls' + key_suffix] = self.ls params['sf' + key_suffix] = self.sf triu_ind = np.triu_indices(M) diag_ind = np.diag_indices(M) params_eta2 = self.theta_2 params_eta1_R = np.zeros((Dout, M * (M + 1) / 2)) params_zu_i = self.zu for d in range(Dout): Rd = np.copy(self.theta_1_R[d, :, :]) Rd[diag_ind] = np.log(Rd[diag_ind]) params_eta1_R[d, :] = np.copy(Rd[triu_ind]) params['zu' + key_suffix] = self.zu params['eta1_R' + key_suffix] = params_eta1_R params['eta2' + key_suffix] = params_eta2 return params
def update_hypers(self, params, key_suffix=''): """Summary Args: params (TYPE): Description key_suffix (str, optional): Description """ M = self.M self.ls = params['ls' + key_suffix] self.sf = params['sf' + key_suffix] triu_ind = np.triu_indices(M) diag_ind = np.diag_indices(M) zu = params['zu' + key_suffix] self.zu = zu for d in range(self.Dout): theta_m_d = params['eta2' + key_suffix][d, :] theta_R_d = params['eta1_R' + key_suffix][d, :] R = np.zeros((M, M)) R[triu_ind] = np.copy(theta_R_d.reshape(theta_R_d.shape[0], )) R[diag_ind] = np.exp(R[diag_ind]) self.theta_1_R[d, :, :] = R self.theta_1[d, :, :] = np.dot(R.T, R) self.theta_2[d, :] = theta_m_d # update Kuu given new hypers self.compute_kuu() # compute mu and Su for each layer self.update_posterior()
def compute_cav_grad_u(self, dmu, dSu, alpha): # return grads wrt u params and Kuuinv triu_ind = np.triu_indices(self.M) diag_ind = np.diag_indices(self.M) beta = (self.N - alpha) * 1.0 / self.N if self.nat_param: dSu_via_m = np.einsum('da,db->dab', dmu, beta * self.theta_2) dSu += dSu_via_m dSuinv = - np.einsum('dab,dbc,dce->dae', self.Suhat, dSu, self.Suhat) dKuuinv = np.sum(dSuinv, axis=0) dtheta1 = beta * dSuinv deta2 = beta * np.einsum('dab,db->da', self.Suhat, dmu) else: Suhat = self.Suhat Suinv = self.Suinv mu = self.mu data_f_2 = np.einsum('dab,db->da', Suinv, mu) dSuhat_via_mhat = np.einsum('da,db->dab', dmu, beta * data_f_2) dSuhat = dSu + dSuhat_via_mhat dmuhat = dmu dSuhatinv = - np.einsum('dab,dbc,dce->dae', Suhat, dSuhat, Suhat) dSuinv_1 = beta * dSuhatinv Suhatdmu = np.einsum('dab,db->da', Suhat, dmuhat) dSuinv = dSuinv_1 + beta * np.einsum('da,db->dab', Suhatdmu, mu) dtheta1 = - np.einsum('dab,dbc,dce->dae', Suinv, dSuinv, Suinv) deta2 = beta * np.einsum('dab,db->da', Suinv, Suhatdmu) dKuuinv = (1 - beta) / beta * np.sum(dSuinv_1, axis=0) dtheta1T = np.transpose(dtheta1, [0, 2, 1]) dtheta1_R = np.einsum('dab,dbc->dac', self.theta_1_R, dtheta1 + dtheta1T) deta1_R = np.zeros([self.Dout, self.M * (self.M + 1) / 2]) for d in range(self.Dout): dtheta1_R_d = dtheta1_R[d, :, :] theta1_R_d = self.theta_1_R[d, :, :] dtheta1_R_d[diag_ind] = dtheta1_R_d[diag_ind] * theta1_R_d[diag_ind] dtheta1_R_d = dtheta1_R_d[triu_ind] deta1_R[d, :] = dtheta1_R_d.reshape((dtheta1_R_d.shape[0], )) return deta1_R, deta2, dKuuinv
def test_integration_adaptive_graph_lasso(self, params_in): ''' Just tests inputs/outputs (not validity of result). ''' n_features = 20 n_samples = 25 cov, prec, adj = ClusterGraph( n_blocks=1, chain_blocks=False, seed=1, ).create(n_features, 0.8) prng = np.random.RandomState(2) X = prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples) model = AdaptiveGraphLasso(**params_in) model.fit(X) assert model.estimator_ is not None assert model.lam_ is not None assert np.sum(model.lam_[np.diag_indices(n_features)]) == 0 if params_in['method'] == 'binary': uvals = set(model.lam_.flat) assert len(uvals) == 2 assert 0 in uvals assert 1 in uvals elif params_in['method'] == 'inverse' or\ params_in['method'] == 'inverse_squared': uvals = set(model.lam_.flat[model.lam_.flat != 0]) assert len(uvals) > 0
def _nonzero_intersection(m, m_hat): '''Count the number of nonzeros in and between m and m_hat. Returns ---------- m_nnz : number of nonzeros in m (w/o diagonal) m_hat_nnz : number of nonzeros in m_hat (w/o diagonal) intersection_nnz : number of nonzeros in intersection of m/m_hat (w/o diagonal) ''' n_features, _ = m.shape m_no_diag = m.copy() m_no_diag[np.diag_indices(n_features)] = 0 m_hat_no_diag = m_hat.copy() m_hat_no_diag[np.diag_indices(n_features)] = 0 m_hat_nnz = len(np.nonzero(m_hat_no_diag.flat)[0]) m_nnz = len(np.nonzero(m_no_diag.flat)[0]) intersection_nnz = len(np.intersect1d( np.nonzero(m_no_diag.flat)[0], np.nonzero(m_hat_no_diag.flat)[0] )) return m_nnz, m_hat_nnz, intersection_nnz
def _binary_weights(self, estimator): n_features, _ = estimator.precision_.shape lam = np.zeros((n_features, n_features)) lam[estimator.precision_ == 0] = 1 lam[np.diag_indices(n_features)] = 0 return lam
def _inverse_squared_weights(self, estimator): n_features, _ = estimator.precision_.shape lam = np.zeros((n_features, n_features)) mask = estimator.precision_ != 0 lam[mask] = 1. / (np.abs(estimator.precision_[mask]) ** 2) mask_0 = estimator.precision_ == 0 lam[mask_0] = np.max(lam[mask].flat) # non-zero in appropriate range lam[np.diag_indices(n_features)] = 0 return lam
def _inverse_weights(self, estimator): n_features, _ = estimator.precision_.shape lam = np.zeros((n_features, n_features)) mask = estimator.precision_ != 0 lam[mask] = 1. / np.abs(estimator.precision_[mask]) mask_0 = estimator.precision_ == 0 lam[mask_0] = np.max(lam[mask].flat) # non-zero in appropriate range lam[np.diag_indices(n_features)] = 0 return lam
def _count_support_diff(m, m_hat): n_features, _ = m.shape m_no_diag = m.copy() m_no_diag[np.diag_indices(n_features)] = 0 m_hat_no_diag = m_hat.copy() m_hat_no_diag[np.diag_indices(n_features)] = 0 m_nnz = len(np.nonzero(m_no_diag.flat)[0]) m_hat_nnz = len(np.nonzero(m_hat_no_diag.flat)[0]) nnz_intersect = len(np.intersect1d(np.nonzero(m_no_diag.flat)[0], np.nonzero(m_hat_no_diag.flat)[0])) return m_nnz + m_hat_nnz - (2 * nnz_intersect)
def dist_diff(self, geom): diff = self.coords[:, None, :]-geom.coords[None, :, :] dist = np.sqrt(np.sum(diff**2, 2)) dist[np.diag_indices(len(self))] = np.inf return dist, diff
def forward(self, x): # create diagonal matrices m = np.zeros((x.size * self.dim)).reshape(-1, self.dim, self.dim) x = x.reshape(-1, self.dim) m[(np.s_[:],) + np.diag_indices(x.shape[1])] = x return m
def __init__(self, n=10, l=None): self.n = n self.l = l self.W = randfilt(self.W if hasattr(self, 'W') else None, (self.l, self.n)) self.C = np.zeros((n, n), dtype='float32') self.C[np.diag_indices(n)] = 1
def __init__(self, n=10, l=None): self.n = n self.l = l self.W = randfilt(self.W if hasattr(self, 'W') else None, (self.l, self.n)) self.C = np.zeros((n, n), dtype='float32') - 1 self.C[np.diag_indices(n)] = 1 self.SII = None self.SIO = None
def test_naf_layer_full(): batch_size = 2 for nb_actions in (1, 3): # Construct single model with NAF as the only layer, hence it is fully deterministic # since no weights are used, which would be randomly initialized. L_flat_input = Input(shape=((nb_actions * nb_actions + nb_actions) // 2,)) mu_input = Input(shape=(nb_actions,)) action_input = Input(shape=(nb_actions,)) x = NAFLayer(nb_actions, mode='full')([L_flat_input, mu_input, action_input]) model = Model(inputs=[L_flat_input, mu_input, action_input], outputs=x) model.compile(loss='mse', optimizer='sgd') # Create random test data. L_flat = np.random.random((batch_size, (nb_actions * nb_actions + nb_actions) // 2)).astype('float32') mu = np.random.random((batch_size, nb_actions)).astype('float32') action = np.random.random((batch_size, nb_actions)).astype('float32') # Perform reference computations in numpy since these are much easier to verify. L = np.zeros((batch_size, nb_actions, nb_actions)).astype('float32') LT = np.copy(L) for l, l_T, l_flat in zip(L, LT, L_flat): l[np.tril_indices(nb_actions)] = l_flat l[np.diag_indices(nb_actions)] = np.exp(l[np.diag_indices(nb_actions)]) l_T[:, :] = l.T P = np.array([np.dot(l, l_T) for l, l_T in zip(L, LT)]).astype('float32') A_ref = np.array([np.dot(np.dot(a - m, p), a - m) for a, m, p in zip(action, mu, P)]).astype('float32') A_ref *= -.5 # Finally, compute the output of the net, which should be identical to the previously # computed reference. A_net = model.predict([L_flat, mu, action]).flatten() assert_allclose(A_net, A_ref, rtol=1e-5)
def test_naf_layer_diag(): batch_size = 2 for nb_actions in (1, 3): # Construct single model with NAF as the only layer, hence it is fully deterministic # since no weights are used, which would be randomly initialized. L_flat_input = Input(shape=(nb_actions,)) mu_input = Input(shape=(nb_actions,)) action_input = Input(shape=(nb_actions,)) x = NAFLayer(nb_actions, mode='diag')([L_flat_input, mu_input, action_input]) model = Model(inputs=[L_flat_input, mu_input, action_input], outputs=x) model.compile(loss='mse', optimizer='sgd') # Create random test data. L_flat = np.random.random((batch_size, nb_actions)).astype('float32') mu = np.random.random((batch_size, nb_actions)).astype('float32') action = np.random.random((batch_size, nb_actions)).astype('float32') # Perform reference computations in numpy since these are much easier to verify. P = np.zeros((batch_size, nb_actions, nb_actions)).astype('float32') for p, l_flat in zip(P, L_flat): p[np.diag_indices(nb_actions)] = l_flat print(P, L_flat) A_ref = np.array([np.dot(np.dot(a - m, p), a - m) for a, m, p in zip(action, mu, P)]).astype('float32') A_ref *= -.5 # Finally, compute the output of the net, which should be identical to the previously # computed reference. A_net = model.predict([L_flat, mu, action]).flatten() assert_allclose(A_net, A_ref, rtol=1e-5)
def setupMatrices(dim, mult=0.05): di = np.diag_indices(dim) bt = np.random.randn(dim,)*mult Wt = np.zeros((dim,dim)) Wt[di] = np.random.randn(dim,)*mult We = np.zeros((dim,dim)) We[di] = np.random.randn(dim,)*mult return Wt,bt,We
def toPartialCorr(Prec): D=Prec[np.diag_indices(Prec.shape[0])] P=Prec.copy() D=np.outer(D,D) return -P/np.sqrt(D)
def toPartialCorr(Prec): D=Prec[np.diag_indices(Prec.shape[0])] P=Prec.copy() D=np.outer(D,D) D[D == 0] = 1 if(np.sum(D==0)): print 'Ds',np.sum(D==0) if(np.sum(D<0)): print 'Dsminus',np.sum(D<0) return -P/np.sqrt(D)
def corrcoef_matrix(matrix): # Code originating from http://stackoverflow.com/a/24547964 by http://stackoverflow.com/users/2455058/jingchao r = np.corrcoef(matrix) rf = r[np.triu_indices(r.shape[0], 1)] df = matrix.shape[1] - 2 ts = rf * rf * (df / (1 - rf * rf)) pf = betai(0.5 * df, 0.5, df / (df + ts)) p = np.zeros(shape=r.shape) p[np.triu_indices(p.shape[0], 1)] = pf p[np.tril_indices(p.shape[0], -1)] = pf p[np.diag_indices(p.shape[0])] = np.ones(p.shape[0]) return r, p
def get_hypers(self): params = {'ls': [], 'sf': [], 'zu': [], 'sn': [], 'eta1_R': [], 'eta2': []} for i in range(self.nolayers): layeri = self.layers[i] Mi = layeri.M Dini = layeri.Din Douti = layeri.Dout params['ls'].append(layeri.ls.get_value()) params['sf'].append(layeri.sf.get_value()) params['sn'].append(layeri.sn.get_value()) triu_ind = np.triu_indices(Mi) diag_ind = np.diag_indices(Mi) params_zu_i = [] params_eta2_i = [] params_eta1_Ri = [] for d in range(Douti): params_zu_i.append(layeri.zu[d].get_value()) params_eta2_i.append(layeri.theta_2[d]) Rd = layeri.theta_1_R[d] Rd[diag_ind] = np.log(Rd[diag_ind]) params_eta1_Ri.append(Rd[triu_ind].reshape((Mi*(Mi+1)/2, 1))) params['zu'].append(params_zu_i) params['eta1_R'].append(params_eta1_Ri) params['eta2'].append(params_eta2_i) return params
def get_hypers(self): params = {'ls': [], 'sf': [], 'zu': [], 'sn': [], 'eta1_R': [], 'eta2': []} for i in range(self.no_layers): Mi = self.no_pseudos[i] Dini = self.layer_sizes[i] Douti = self.layer_sizes[i+1] params['ls'].append(self.ls[i]) params['sf'].append(self.sf[i]) if not (self.no_output_noise and (i == self.no_layers-1)): params['sn'].append(self.sn[i]) triu_ind = np.triu_indices(Mi) diag_ind = np.diag_indices(Mi) params_zu_i = [] params_eta2_i = [] params_eta1_Ri = [] if self.zu_tied: params_zu_i = self.zu[i] else: for d in range(Douti): params_zu_i.append(self.zu[i][d]) for d in range(Douti): params_eta2_i.append(self.theta_2[i][d]) Rd = self.theta_1_R[i][d] Rd[diag_ind] = np.log(Rd[diag_ind]) params_eta1_Ri.append(Rd[triu_ind].reshape((Mi*(Mi+1)/2,))) params['zu'].append(params_zu_i) params['eta1_R'].append(params_eta1_Ri) params['eta2'].append(params_eta2_i) return params
def update_hypers(self, params): for i in range(self.no_layers): Mi = self.no_pseudos[i] Dini = self.layer_sizes[i] Douti = self.layer_sizes[i+1] self.ls[i] = params['ls'][i] self.ones_M_ls[i] = np.outer(self.ones_M[i], 1.0 / np.exp(2*self.ls[i])) self.sf[i] = params['sf'][i] if not ((self.no_output_noise) and (i == self.no_layers-1)): self.sn[i] = params['sn'][i] triu_ind = np.triu_indices(Mi) diag_ind = np.diag_indices(Mi) if self.zu_tied: zi = params['zu'][i] self.zu[i] = zi else: for d in range(Douti): zid = params['zu'][i][d] self.zu[i][d] = zid for d in range(Douti): theta_m_d = params['eta2'][i][d] theta_R_d = params['eta1_R'][i][d] R = np.zeros((Mi, Mi)) R[triu_ind] = theta_R_d.reshape(theta_R_d.shape[0], ) R[diag_ind] = np.exp(R[diag_ind]) self.theta_1_R[i][d] = R self.theta_1[i][d] = np.dot(R.T, R) self.theta_2[i][d] = theta_m_d
def test_balreal(): isys = Lowpass(0.05) noise = 0.5*Lowpass(0.01) + 0.5*Alpha(0.005) p = 0.8 sys = p*isys + (1-p)*noise T, Tinv, S = balanced_transformation(sys) assert np.allclose(inv(T), Tinv) assert np.allclose(S, hsvd(sys)) balsys = sys.transform(T, Tinv) assert balsys == sys assert np.all(S >= 0) assert np.all(S[0] > 0.3) assert np.all(S[1:] < 0.05) assert np.allclose(sorted(S, reverse=True), S) P = control_gram(balsys) Q = observe_gram(balsys) diag = np.diag_indices(len(P)) offdiag = np.ones_like(P, dtype=bool) offdiag[diag] = False offdiag = np.where(offdiag) assert np.allclose(P[diag], S) assert np.allclose(P[offdiag], 0) assert np.allclose(Q[diag], S) assert np.allclose(Q[offdiag], 0)
def __add_third_in_line(matrix, player, win_or_defend): """Look for 2 in line and try to win or defend in 1 move""" done = False opponent = model.opponent(player) # = horizontals for row in range(3): if not done: done, line = win_or_defend(matrix[row, :], player) if done: matrix[row, :] = line # || verticals for col in range(3): if not done: done, line = win_or_defend(matrix[:, col], player) if done: matrix[:, col] = line if not done: # \ diagonal done, line = win_or_defend(matrix.diagonal(), player) if done: matrix[np.diag_indices(3)] = line if not done: # / diagonal done, line = win_or_defend(np.fliplr(matrix).diagonal(), player) if done: np.fliplr(matrix)[np.diag_indices(3)] = line if not done: return matrix, player else: return matrix, opponent
def _get_diagIDs(K): if K in diagIDsDict: return diagIDsDict[K] else: diagIDs = np.diag_indices(K) diagIDsDict[K] = diagIDs return diagIDs
def _calcWordTypeCooccurMatrix(self, Q, sameWordVec, nDoc): """ Transform building blocks into the final Q matrix Returns ------- Q : 2D array, size W x W (where W is vocab_size) """ Q /= np.float32(nDoc) sameWordVec /= np.float32(nDoc) diagIDs = np.diag_indices(self.vocab_size) Q[diagIDs] -= sameWordVec # Fix small numerical issues (like diag entries of -1e-15 instead of 0) np.maximum(Q, 1e-100, out=Q) return Q
def add_preference(hdf5_file, preference): """Assign the value 'preference' to the diagonal entries of the matrix of similarities stored in the HDF5 data structure at 'hdf5_file'. """ Worker.hdf5_lock.acquire() with tables.open_file(hdf5_file, 'r+') as fileh: S = fileh.root.aff_prop_group.similarities diag_ind = np.diag_indices(S.nrows) S[diag_ind] = preference Worker.hdf5_lock.release()
def check_convergence(hdf5_file, iteration, convergence_iter, max_iter): """If the estimated number of clusters has not changed for 'convergence_iter' consecutive iterations in a total of 'max_iter' rounds of message-passing, the procedure herewith returns 'True'. Otherwise, returns 'False'. Parameter 'iteration' identifies the run of message-passing that has just completed. """ Worker.hdf5_lock.acquire() with tables.open_file(hdf5_file, 'r+') as fileh: A = fileh.root.aff_prop_group.availabilities R = fileh.root.aff_prop_group.responsibilities P = fileh.root.aff_prop_group.parallel_updates N = A.nrows diag_ind = np.diag_indices(N) E = (A[diag_ind] + R[diag_ind]) > 0 P[:, iteration % convergence_iter] = E e_mat = P[:] K = E.sum(axis = 0) Worker.hdf5_lock.release() if iteration >= convergence_iter: se = e_mat.sum(axis = 1) unconverged = (np.sum((se == convergence_iter) + (se == 0)) != N) if (not unconverged and (K > 0)) or (iteration == max_iter): return True return False
def _gt_weights(W): """Computes the weights V for a Guttman transform V X = B(X) Z.""" V = -W V[np.diag_indices(V.shape[0])] = W.sum(axis=1) - W.diagonal() return V
def _gt_mapping(D, W, Z): """Computes the mapping B(X) for a Guttman transform V X = B(X) Z.""" # Compute the Euclidean distances between all pairs of points Dz = distance.cdist(Z, Z) # Fill the diagonal of Dz, because *we don't want a division by zero* np.fill_diagonal(Dz, 1e-5) B = - W * D / Dz np.fill_diagonal(B, 0.0) B[np.diag_indices(B.shape[0])] = -np.sum(B, axis=1) return B
def binning_as_matrix( bin_count, array, minimum=None, maximum=None, axis=[], remove_small_cycles=True): """ :param bin_count: :param array: :param maximum: maximum value to be recognized. Values bigger than max will be filtered :param minimum: minimum value to be recognized. Values smaller than min will be filtered :param axis: list of placements for axis. Possible list-elements are: * 'bottom' * 'left' * 'right' * 'top' :param remove_small_cycles: if True cycles where start and end are identical after binning will be removed :return: data matrix with start in rows and target in columns """ # Bilding a 2d-histogram if minimum is None: minimum = np.nanmin(array) if maximum is None: maximum = np.nanmax(array) minimum = float(minimum) maximum = float(maximum) start = array[:, 0] target = array[:, 1] classified_data = np.histogram2d(start, target, bins=bin_count, range=[ [minimum, maximum], [minimum, maximum]]) res_matrix = classified_data[0] intervall_edges = classified_data[1] axis_value = np.diff(intervall_edges) / 2.0 + intervall_edges[0:-1] if remove_small_cycles: indexes = np.diag_indices(bin_count) res_matrix[indexes] = 0.0 if axis: res_matrix = append_axis(res_matrix, horizontal_axis=axis_value, vertical_axis=axis_value, axis=axis) return res_matrix
def get_displacement_tensor(self): """A matrix where the entry A[i, j, :] is the vector self.cartesian_pos[i] - self.cartesian_pos[j]. For periodic systems the distance of an atom from itself is the smallest displacement of an atom from one of it's periodic copies, and the distance of two different atoms is the distance of two closest copies. Returns: np.array: 3D matrix containing the pairwise distance vectors. """ if self._displacement_tensor is None: if self.pbc.any(): pos = self.get_scaled_positions() disp_tensor = pos[:, None, :] - pos[None, :, :] # Take periodicity into account by wrapping coordinate elements # that are bigger than 0.5 or smaller than -0.5 indices = np.where(disp_tensor > 0.5) disp_tensor[indices] = 1 - disp_tensor[indices] indices = np.where(disp_tensor < -0.5) disp_tensor[indices] = disp_tensor[indices] + 1 # Transform to cartesian disp_tensor = self.to_cartesian(disp_tensor) # Figure out the smallest basis vector and set it as # displacement for diagonal cell = self.get_cell() basis_lengths = np.linalg.norm(cell, axis=1) min_index = np.argmin(basis_lengths) min_basis = cell[min_index] diag_indices = np.diag_indices(len(disp_tensor)) disp_tensor[diag_indices] = min_basis else: pos = self.get_positions() disp_tensor = pos[:, None, :] - pos[None, :, :] self._displacement_tensor = disp_tensor return self._displacement_tensor
def test_parameterized_orf(self): T1 = 3.16e8 pl = utils.powerlaw(log10_A=parameter.Uniform(-18,-12), gamma=parameter.Uniform(1,7)) orf = hd_orf_generic(a=parameter.Uniform(0,5), b=parameter.Uniform(0,5), c=parameter.Uniform(0,5)) rn = gp_signals.FourierBasisGP(spectrum=pl, Tspan=T1, components=30) crn = gp_signals.FourierBasisCommonGP(spectrum=pl, orf=orf, components=30, name='gw', Tspan=T1) model = rn + crn pta = model(self.psrs[0]) + model(self.psrs[1]) lA1, gamma1 = -13, 1e-15 lA2, gamma2 = -13.3, 1e-15 lAc, gammac = -13.1, 1e-15 a, b, c = 1.9, 0.4, 0.23 params = {'gw_log10_A': lAc, 'gw_gamma': gammac, 'gw_a': a, 'gw_b':b, 'gw_c':c, 'B1855+09_log10_A': lA1, 'B1855+09_gamma': gamma1, 'J1909-3744_log10_A': lA2, 'J1909-3744_gamma': gamma2} phi = pta.get_phi(params) phiinv = pta.get_phiinv(params) F1, f1 = utils.createfourierdesignmatrix_red( self.psrs[0].toas, nmodes=30, Tspan=T1) F2, f2 = utils.createfourierdesignmatrix_red( self.psrs[1].toas, nmodes=30, Tspan=T1) msg = 'F matrix incorrect' assert np.allclose(pta.get_basis(params)[0], F1, rtol=1e-10), msg assert np.allclose(pta.get_basis(params)[1], F2, rtol=1e-10), msg nftot = 120 phidiag = np.zeros(nftot) phit = np.zeros((nftot, nftot)) phidiag[:60] = utils.powerlaw(f1, lA1, gamma1) phidiag[:60] += utils.powerlaw(f1, lAc, gammac) phidiag[60:] = utils.powerlaw(f2, lA2, gamma2) phidiag[60:] += utils.powerlaw(f2, lAc, gammac) phit[np.diag_indices(nftot)] = phidiag orf = hd_orf_generic(self.psrs[0].pos, self.psrs[1].pos, a=a, b=b, c=c) spec = utils.powerlaw(f1, log10_A=lAc, gamma=gammac) phit[:60, 60:] = np.diag(orf*spec) phit[60:, :60] = phit[:60, 60:] msg = '{} {}'.format(np.diag(phi), np.diag(phit)) assert np.allclose(phi, phit, rtol=1e-15, atol=1e-17), msg msg = 'PTA Phi inverse is incorrect {}.'.format(params) assert np.allclose(phiinv, np.linalg.inv(phit), rtol=1e-15, atol=1e-17), msg