我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.triu_indices()。
def run_test_matmul_aa_correlator_kernel(self, ntime, nstand, nchan, misalign=0): x_shape = (ntime, nchan, nstand*2) perm = [1,0,2] x8 = ((np.random.random(size=x_shape+(2,))*2-1)*127).astype(np.int8) x = x8.astype(np.float32).view(np.complex64).reshape(x_shape) x = x.transpose(perm) x = x[..., misalign:] b_gold = np.matmul(H(x), x) triu = np.triu_indices(x.shape[-1], 1) b_gold[..., triu[0], triu[1]] = 0 x = x8.view(bf.DataType.ci8).reshape(x_shape) x = bf.asarray(x, space='cuda') x = x.transpose(perm) x = x[..., misalign:] b = bf.zeros_like(b_gold, space='cuda') self.linalg.matmul(1, None, x, 0, b) b = b.copy('system') np.testing.assert_allclose(b, b_gold, RTOL*10, ATOL)
def get_close_markers(markers,centroids=None, min_distance=20): if centroids is None: centroids = [m['centroid']for m in markers] centroids = np.array(centroids) ti = np.triu_indices(centroids.shape[0], 1) def full_idx(i): #get the pair from condensed matrix index #defindend inline because ti changes every time return np.array([ti[0][i], ti[1][i]]) #calculate pairwise distance, return dense distace matrix (upper triangle) distances = pdist(centroids,'euclidean') close_pairs = np.where(distances<min_distance) return full_idx(close_pairs)
def doTotalOrderExperiment(N, binaryWeights = False): I, J = np.meshgrid(np.arange(N), np.arange(N)) I = I[np.triu_indices(N, 1)] J = J[np.triu_indices(N, 1)] #[I, J] = [I[0:N-1], J[0:N-1]] NEdges = len(I) R = np.zeros((NEdges, 2)) R[:, 0] = J R[:, 1] = I #W = np.random.rand(NEdges) W = np.ones(NEdges) #Note: When using binary weights, Y is not necessarily a cocycle Y = I - J if binaryWeights: Y = np.ones(NEdges) (s, I, H) = doHodge(R, W, Y, verbose = True) printConsistencyRatios(Y, I, H, W) #Random flip experiment with linear order
def from_tri_2_sym(tri, dim): """convert a upper triangular matrix in 1D format to 2D symmetric matrix Parameters ---------- tri: 1D array Contains elements of upper triangular matrix dim : int The dimension of target matrix. Returns ------- symm : 2D array Symmetric matrix in shape=[dim, dim] """ symm = np.zeros((dim, dim)) symm[np.triu_indices(dim)] = tri return symm
def run_test_matmul_aa_ci8_shape(self, shape, transpose=False): # **TODO: This currently never triggers the transpose path in the backend shape_complex = shape[:-1] + (shape[-1] * 2,) # Note: The xGPU-like correlation kernel does not support input values of -128 (only [-127:127]) a8 = ((np.random.random(size=shape_complex) * 2 - 1) * 127).astype(np.int8) a_gold = a8.astype(np.float32).view(np.complex64) if transpose: a_gold = H(a_gold) # Note: np.matmul seems to be slow and inaccurate when there are batch dims c_gold = np.matmul(a_gold, H(a_gold)) triu = np.triu_indices(shape[-2] if not transpose else shape[-1], 1) c_gold[..., triu[0], triu[1]] = 0 a = a8.view(bf.DataType.ci8) a = bf.asarray(a, space='cuda') if transpose: a = H(a) c = bf.zeros_like(c_gold, space='cuda') self.linalg.matmul(1, a, None, 0, c) c = c.copy('system') np.testing.assert_allclose(c, c_gold, RTOL, ATOL)
def run_test_matmul_aa_dtype_shape(self, shape, dtype, axes=None, conj=False): a = ((np.random.random(size=shape)) * 127).astype(dtype) if axes is None: axes = range(len(shape)) aa = a.transpose(axes) if conj: aa = aa.conj() c_gold = np.matmul(aa, H(aa)) triu = np.triu_indices(shape[axes[-2]], 1) c_gold[..., triu[0], triu[1]] = 0 a = bf.asarray(a, space='cuda') aa = a.transpose(axes) if conj: aa = aa.conj() c = bf.zeros_like(c_gold, space='cuda') self.linalg.matmul(1, aa, None, 0, c) c = c.copy('system') np.testing.assert_allclose(c, c_gold, RTOL, ATOL)
def run_benchmark_matmul_aa_correlator_kernel(self, ntime, nstand, nchan): x_shape = (ntime, nchan, nstand*2) perm = [1,0,2] x8 = ((np.random.random(size=x_shape+(2,))*2-1)*127).astype(np.int8) x = x8.astype(np.float32).view(np.complex64).reshape(x_shape) x = x.transpose(perm) b_gold = np.matmul(H(x[:,[0],:]), x[:,[0],:]) triu = np.triu_indices(x_shape[-1], 1) b_gold[..., triu[0], triu[1]] = 0 x = x8.view(bf.DataType.ci8).reshape(x_shape) x = bf.asarray(x, space='cuda') x = x.transpose(perm) b = bf.zeros_like(b_gold, space='cuda') bf.device.stream_synchronize(); t0 = time.time() nrep = 200 for _ in xrange(nrep): self.linalg.matmul(1, None, x, 0, b) bf.device.stream_synchronize(); dt = time.time() - t0 nflop = nrep * nchan * ntime * nstand*(nstand+1)/2 * 2*2 * 8 print nstand, '\t', nflop / dt / 1e9, 'GFLOP/s' print '\t\t', nrep*ntime*nchan / dt / 1e6, 'MHz'
def _compute_dispersion_matrix(X, labels): n = len(np.unique(labels)) dist = np.zeros((n, n)) ITR = list(itertools.combinations_with_replacement(range(n), 2)) for i, j in tqdm(ITR): if i == j: d = pdist(X[labels == i], metric='cosine') else: d = cdist(X[labels == i], X[labels == j], metric='cosine') # Only take upper diagonal (+diagonal elements) d = d[np.triu_indices(n=d.shape[0], m=d.shape[1], k=0)] dist[i, j] = dist[j, i] = d.mean() return dist
def get_gcovmat(h2, rg): """ Args: h2: vector with SNP heritabilities rg: vector with genetic correlations Returns: numpy trait by trait array with h2 on diagonal and genetic covariance on offdiagnoals """ mat = numpy.zeros((len(h2), len(h2))) mat[numpy.triu_indices(len(h2), 1)] = rg mat = mat + mat.T mat = mat * numpy.sqrt(numpy.outer(h2, h2)) numpy.fill_diagonal(mat, h2) return numpy.array(mat) # When input files are score files, not beta files, mtot may be unknown. # Here mtot=1e6 is assumed. The absolute value of the expected variances for each trait is irrelevant for the multi-trait weighting, so it doesn't matter too much what this value is, expecially if M > N.
def applyNNBig(Xin,model,msize=500,start=150): #Returns an adjacency matrix n_features=Xin.shape[1] X=Xin.copy() #Center X -= X.mean(axis=0) std = X.std(axis=0) std[std == 0] = 1 X /= std larger=np.zeros((msize,msize)) larger[start:start+n_features,start:start+n_features]=X.T.dot(X)/X.shape[0] emp_cov_matrix=np.expand_dims(larger,0) pred=model.predict(np.expand_dims(emp_cov_matrix,0)) pred=pred.reshape(msize,msize)[start:start+n_features,start:start+n_features] C=np.zeros((X.shape[1],X.shape[1])) C[np.triu_indices(n_features,k=1)]=pred[np.triu_indices(n_features,k=1)] C=C+C.T return C
def linkage(df, n_groups): # create the distance matrix based on the forbenius norm: |A-B|_F where A is # a 24 x N matrix with N the number of timeseries inside the dataframe df # TODO: We can save have time as we only need the upper triangle once as the # distance matrix is symmetric if True: Y = np.empty((n_groups, n_groups,)) Y[:] = np.NAN for i in range(len(Y)): for j in range(len(Y[i,:])): A = df.loc[i+1].values B = df.loc[j+1].values #print('Computing distance of:{},{}'.format(i,j)) Y[i,j] = norm(A-B, ord='fro') # condensed distance matrix as vector for linkage (upper triangle as a vector) y = Y[np.triu_indices(n_groups, 1)] # create linkage matrix with wards algorithm an euclidean norm Z = hac.linkage(y, method='ward', metric='euclidean') # R = hac.inconsistent(Z, d=10) return Z
def estimate_ls(self, X): Ntrain = X.shape[0] if Ntrain < 10000: X1 = np.copy(X) else: randind = np.random.permutation(Ntrain) X1 = X[randind[1:10000], :] d2 = compute_distance_matrix(X1) D = X1.shape[1] N = X1.shape[0] triu_ind = np.triu_indices(N) ls = np.zeros((D, )) for i in range(D): d2i = d2[:, :, i] d2imed = np.median(d2i[triu_ind]) ls[i] = np.log(d2imed) return ls
def update_hypers(self, params): for i in range(self.nolayers): layeri = self.layers[i] Mi = layeri.M Dini = layeri.Din Douti = layeri.Dout layeri.ls.set_value(params['ls'][i]) layeri.sf.set_value(params['sf'][i]) layeri.sn.set_value(params['sn'][i]) triu_ind = np.triu_indices(Mi) diag_ind = np.diag_indices(Mi) for d in range(Douti): layeri.zu[d].set_value(params['zu'][i][d]) theta_m_d = params['eta2'][i][d] theta_R_d = params['eta1_R'][i][d] R = np.zeros((Mi, Mi)) R[triu_ind] = theta_R_d.reshape(theta_R_d.shape[0], ) R[diag_ind] = np.exp(R[diag_ind]) layeri.theta_1_R[d] = R layeri.theta_1[d] = np.dot(R.T, R) layeri.theta_2[d] = theta_m_d
def estimate_ls(self, X): Ntrain = X.shape[0] if Ntrain < 10000: X1 = np.copy(X) else: randind = np.random.permutation(Ntrain) X1 = X[randind[0:(5*self.no_pseudos[0])], :] # d2 = compute_distance_matrix(X1) D = X1.shape[1] N = X1.shape[0] triu_ind = np.triu_indices(N) ls = np.zeros((D, )) for i in range(D): X1i = np.reshape(X1[:, i], (N, 1)) d2i = cdist(X1i, X1i, 'euclidean') # d2i = d2[:, :, i] d2imed = np.median(d2i[triu_ind]) # d2imed = 0.01 # print d2imed, ls[i] = np.log(d2imed + 1e-16) return ls
def array2tree(d, names, outbase="", method="ward"): """Return tree representation for array""" import ete3 Z = fastcluster.linkage(d[np.triu_indices(d.shape[0], 1)], method=method) tree = sch.to_tree(Z, False) t = ete3.Tree(getNewick(tree, "", tree.dist, names)) # save tree & newick if outbase: pdf, nw = outbase+".nw.pdf", outbase+".nw" with open(nw, "w") as out: out.write(t.write()) ts = ete3.TreeStyle() ts.show_leaf_name = False ts.layout_fn = mylayout t.render(pdf, tree_style=ts) return t
def xyz2U(x, y, z): """ compute the potential """ dsq = 0. indices = np.triu_indices(x.size, k=1) for coord in [x, y, z]: coord_i = np.tile(coord, (coord.size, 1)) coord_j = coord_i.T dsq += (coord_i[indices]-coord_j[indices])**2 d = np.sqrt(dsq) U = np.sum(1./d) return U
def ang_potential(x0): """ If distance is computed along sphere rather than through 3-space. """ theta = x0[0:x0.size/2] phi = np.pi/2-x0[x0.size/2:] indices = np.triu_indices(theta.size, k=1) theta_i = np.tile(theta, (theta.size, 1)) theta_j = theta_i.T phi_i = np.tile(phi, (phi.size, 1)) phi_j = phi_i.T d = _angularSeparation(theta_i[indices], phi_i[indices], theta_j[indices], phi_j[indices]) U = np.sum(1./d) return U
def elec_potential_xyz(x0): x0 = x0.reshape(3, x0.size/3) x = x0[0, :] y = x0[1, :] z = x0[2, :] dsq = 0. r = np.sqrt(x**2 + y**2 + z**2) x = x/r y = y/r z = z/r indices = np.triu_indices(x.size, k=1) for coord in [x, y, z]: coord_i = np.tile(coord, (coord.size, 1)) coord_j = coord_i.T d = (coord_i[indices]-coord_j[indices])**2 dsq += d U = np.sum(1./np.sqrt(dsq)) return U
def prepare_pairs(X, y, site, indices): """ Prepare the graph pairs before feeding them to the network """ N, M, F = X.shape n_pairs = int(len(indices) * (len(indices) - 1) / 2) triu_pairs = np.triu_indices(len(indices), 1) X_pairs = np.ones((n_pairs, M, F, 2)) X_pairs[:, :, :, 0] = X[indices][triu_pairs[0]] X_pairs[:, :, :, 1] = X[indices][triu_pairs[1]] site_pairs = np.ones(int(n_pairs)) site_pairs[site[indices][triu_pairs[0]] != site[indices][triu_pairs[1]]] = 0 y_pairs = np.ones(int(n_pairs)) y_pairs[y[indices][triu_pairs[0]] != y[indices][triu_pairs[1]]] = 0 # -1 print(n_pairs) return X_pairs, y_pairs, site_pairs
def plot(self, fig=None, ax=None): if fig is None: fig = plt.figure() if ax is None: ax = fig.gca() fmax = self.fs / 2.0 bicoherence = np.copy(self.bicoherence) n_freq = bicoherence.shape[0] np.flipud(bicoherence)[np.triu_indices(n_freq, 1)] = 0 bicoherence[np.triu_indices(n_freq, 1)] = 0 bicoherence = bicoherence[:, :n_freq // 2 + 1] vmin, vmax = compute_vmin_vmax(bicoherence, tick=1e-15, percentile=1) ax.imshow(bicoherence, cmap=plt.cm.viridis, aspect='auto', vmin=vmin, vmax=vmax, origin='lower', extent=(0, fmax // 2, 0, fmax), interpolation='none') ax.set_title('Bicoherence (%s)' % self.method) ax.set_xlabel('Frequency (Hz)') ax.set_ylabel('Frequency (Hz)') # add_colorbar(fig, cax, vmin, vmax, unit='', ax=ax) return ax
def cluster_words(words, service_name, size): stopwords = ["GET", "POST", "total", "http-requests", service_name, "-", "_"] cleaned_words = [] for word in words: for stopword in stopwords: word = word.replace(stopword, "") cleaned_words.append(word) def distance(coord): i, j = coord return 1 - jaro_distance(cleaned_words[i], cleaned_words[j]) indices = np.triu_indices(len(words), 1) distances = np.apply_along_axis(distance, 0, indices) return cluster_of_size(linkage(distances), size)
def doRandomFlipExperiment(N, PercentFlips): I, J = np.meshgrid(np.arange(N), np.arange(N)) I = I[np.triu_indices(N, 1)] J = J[np.triu_indices(N, 1)] NEdges = len(I) R = np.zeros((NEdges, 2)) R[:, 0] = J R[:, 1] = I # toKeep = int(NEdges/200) # R = R[np.random.permutation(NEdges)[0:toKeep], :] # NEdges = toKeep W = np.random.rand(NEdges) #W = np.ones(NEdges) Y = np.ones(NEdges) NFlips = int(PercentFlips*len(Y)) Y[np.random.permutation(NEdges)[0:NFlips]] = -1 (s, I, H) = doHodge(R, W, Y) [INorm, HNorm] = [getWNorm(I, W), getWNorm(H, W)] return (INorm, HNorm) #Do a bunch of random flip experiments, varying the percentage #of flips, and take the mean consistency norms for each percentage #over a number of trials
def doBatchExperiments(N, omissions, flips, NTrials = 10): I, J = np.meshgrid(np.arange(N), np.arange(N)) I = I[np.triu_indices(N, 1)] J = J[np.triu_indices(N, 1)] Rankings = np.zeros((len(omissions), len(flips), NTrials, N)) INorms = np.zeros((len(omissions), len(flips), NTrials)) HNorms = np.zeros((len(omissions), len(flips), NTrials)) for t in range(NTrials): for i in range(0, len(omissions)): om = omissions[i] NEdges = int(len(I)*(1-om)) for j in range(len(flips)): print("Doing trial %i of %i, omission %i of %i, flip %i of %i"%(t, NTrials, i, len(omissions), j, len(flips))) R = np.zeros((NEdges, 2)) idx = np.random.permutation(len(I))[0:NEdges] R[:, 0] = I[idx] R[:, 1] = J[idx] f = flips[j] Y = np.ones(NEdges) W = 0.5 + 0.5*np.random.rand(NEdges) #W = np.ones(NEdges) NFlips = int(f*len(Y)) Y[np.random.permutation(NEdges)[0:NFlips]] = -1 (s, IM, H) = doHodge(R, W, Y, verbose = True) Rankings[i, j, t, :] = s [INorms[i, j, t], HNorms[i, j, t]] = [getWNorm(IM, W), getWNorm(H, W)] KTScores = scoreBatchRankings(Rankings) sio.savemat("BatchResults.mat", {"Rankings":Rankings, "INorms":INorms, "HNorms":HNorms, "omissions":omissions, "flips":flips, "KTScores":KTScores})
def triu_indices(n, k=0): m = numpy.ones((n, n), int) a = triu(m, k) return numpy.where(a != 0)
def __call__(self, row): ''' Compute partition function stats over each document. ''' text = row['text'] stat_names = [ 'Z_mu', 'Z_std', 'Z_skew', 'Z_kurtosis', 'I_mu', 'I_std', 'I_skew', 'I_kurtosis', ] stats = {} for key in stat_names: stats[key] = 0.0 # Only keep words that are defined in the embedding valid_tokens = [w for w in text.split() if w in self.Z] # Take only the unique words in the document all_tokens = np.array(list(set(valid_tokens))) if len(all_tokens) > 3: # Possibly clip the values here as very large Z don't contribute doc_z = np.array([self.Z[w] for w in all_tokens]) compute_stats(doc_z, stats, "Z") # Take top x% most descriptive words z_sort_idx = np.argsort(doc_z)[::-1] z_cut = max(int(self.intra_document_cutoff * len(doc_z)), 3) important_index = z_sort_idx[:z_cut] sub_tokens = all_tokens[important_index] doc_v = np.array([self.model[w] for w in sub_tokens]) upper_idx = np.triu_indices(doc_v.shape[0], k=1) dist = np.dot(doc_v, doc_v.T)[upper_idx] compute_stats(dist, stats, "I") stats['_ref'] = row['_ref'] return stats
def init_hypers(self, y_train): """Summary Returns: TYPE: Description Args: y_train (TYPE): Description """ N = self.N M = self.M Din = self.Din Dout = self.Dout x_train = self.x_train if N < 10000: centroids, label = kmeans2(x_train, M, minit='points') else: randind = np.random.permutation(N) centroids = x_train[randind[0:M], :] zu = centroids if N < 10000: X1 = np.copy(x_train) else: randind = np.random.permutation(N) X1 = X[randind[:5000], :] x_dist = cdist(X1, X1, 'euclidean') triu_ind = np.triu_indices(N) ls = np.zeros((Din, )) d2imed = np.median(x_dist[triu_ind]) for i in range(Din): ls[i] = 2 * np.log(d2imed + 1e-16) sf = np.log(np.array([0.5])) params = dict() params['sf'] = sf params['ls'] = ls params['zu'] = zu params['sn'] = np.log(0.01) return params
def compute_posterior_grad_u(self, dmu, dSu): # return grads wrt u params and Kuuinv triu_ind = np.triu_indices(self.M) diag_ind = np.diag_indices(self.M) if self.nat_param: dSu_via_m = np.einsum('da,db->dab', dmu, self.theta_2) dSu += dSu_via_m dSuinv = - np.einsum('dab,dbc,dce->dae', self.Su, dSu, self.Su) dKuuinv = np.sum(dSuinv, axis=0) dtheta1 = dSuinv deta2 = np.einsum('dab,db->da', self.Su, dmu) else: deta2 = dmu dtheta1 = dSu dKuuinv = 0 dtheta1T = np.transpose(dtheta1, [0, 2, 1]) dtheta1_R = np.einsum('dab,dbc->dac', self.theta_1_R, dtheta1 + dtheta1T) deta1_R = np.zeros([self.Dout, self.M * (self.M + 1) / 2]) for d in range(self.Dout): dtheta1_R_d = dtheta1_R[d, :, :] theta1_R_d = self.theta_1_R[d, :, :] dtheta1_R_d[diag_ind] = dtheta1_R_d[diag_ind] * theta1_R_d[diag_ind] dtheta1_R_d = dtheta1_R_d[triu_ind] deta1_R[d, :] = dtheta1_R_d.reshape((dtheta1_R_d.shape[0], )) return deta1_R, deta2, dKuuinv
def get_hypers(self, key_suffix=''): """Summary Args: key_suffix (str, optional): Description Returns: TYPE: Description """ params = {} M = self.M Din = self.Din Dout = self.Dout params['ls' + key_suffix] = self.ls params['sf' + key_suffix] = self.sf triu_ind = np.triu_indices(M) diag_ind = np.diag_indices(M) params_eta2 = self.theta_2 params_eta1_R = np.zeros((Dout, M * (M + 1) / 2)) params_zu_i = self.zu for d in range(Dout): Rd = np.copy(self.theta_1_R[d, :, :]) Rd[diag_ind] = np.log(Rd[diag_ind]) params_eta1_R[d, :] = np.copy(Rd[triu_ind]) params['zu' + key_suffix] = self.zu params['eta1_R' + key_suffix] = params_eta1_R params['eta2' + key_suffix] = params_eta2 return params
def update_hypers(self, params, key_suffix=''): """Summary Args: params (TYPE): Description key_suffix (str, optional): Description """ M = self.M self.ls = params['ls' + key_suffix] self.sf = params['sf' + key_suffix] triu_ind = np.triu_indices(M) diag_ind = np.diag_indices(M) zu = params['zu' + key_suffix] self.zu = zu for d in range(self.Dout): theta_m_d = params['eta2' + key_suffix][d, :] theta_R_d = params['eta1_R' + key_suffix][d, :] R = np.zeros((M, M)) R[triu_ind] = np.copy(theta_R_d.reshape(theta_R_d.shape[0], )) R[diag_ind] = np.exp(R[diag_ind]) self.theta_1_R[d, :, :] = R self.theta_1[d, :, :] = np.dot(R.T, R) self.theta_2[d, :] = theta_m_d # update Kuu given new hypers self.compute_kuu() # compute mu and Su for each layer self.update_posterior()
def compute_cav_grad_u(self, dmu, dSu, alpha): # return grads wrt u params and Kuuinv triu_ind = np.triu_indices(self.M) diag_ind = np.diag_indices(self.M) beta = (self.N - alpha) * 1.0 / self.N if self.nat_param: dSu_via_m = np.einsum('da,db->dab', dmu, beta * self.theta_2) dSu += dSu_via_m dSuinv = - np.einsum('dab,dbc,dce->dae', self.Suhat, dSu, self.Suhat) dKuuinv = np.sum(dSuinv, axis=0) dtheta1 = beta * dSuinv deta2 = beta * np.einsum('dab,db->da', self.Suhat, dmu) else: Suhat = self.Suhat Suinv = self.Suinv mu = self.mu data_f_2 = np.einsum('dab,db->da', Suinv, mu) dSuhat_via_mhat = np.einsum('da,db->dab', dmu, beta * data_f_2) dSuhat = dSu + dSuhat_via_mhat dmuhat = dmu dSuhatinv = - np.einsum('dab,dbc,dce->dae', Suhat, dSuhat, Suhat) dSuinv_1 = beta * dSuhatinv Suhatdmu = np.einsum('dab,db->da', Suhat, dmuhat) dSuinv = dSuinv_1 + beta * np.einsum('da,db->dab', Suhatdmu, mu) dtheta1 = - np.einsum('dab,dbc,dce->dae', Suinv, dSuinv, Suinv) deta2 = beta * np.einsum('dab,db->da', Suinv, Suhatdmu) dKuuinv = (1 - beta) / beta * np.sum(dSuinv_1, axis=0) dtheta1T = np.transpose(dtheta1, [0, 2, 1]) dtheta1_R = np.einsum('dab,dbc->dac', self.theta_1_R, dtheta1 + dtheta1T) deta1_R = np.zeros([self.Dout, self.M * (self.M + 1) / 2]) for d in range(self.Dout): dtheta1_R_d = dtheta1_R[d, :, :] theta1_R_d = self.theta_1_R[d, :, :] dtheta1_R_d[diag_ind] = dtheta1_R_d[diag_ind] * theta1_R_d[diag_ind] dtheta1_R_d = dtheta1_R_d[triu_ind] deta1_R[d, :] = dtheta1_R_d.reshape((dtheta1_R_d.shape[0], )) return deta1_R, deta2, dKuuinv
def getImageDescriptors_HOG_cdist(self, all_emb, ref_emb, ref_mask): # unnormalized cosine distance for HOG dist = numpy.dot(all_emb, ref_emb.T) # normalize by length of query descriptor projected on reference norm = numpy.sqrt(numpy.dot(numpy.square(all_emb), ref_mask.T)) dist /= norm dist[numpy.isinf(dist)] = 0. dist[numpy.isnan(dist)] = 0. # dist[numpy.triu_indices(dist.shape[0], 1)] = numpy.maximum(dist[numpy.triu_indices(dist.shape[0], 1)], # dist.T[numpy.triu_indices(dist.shape[0], 1)]) # dist[numpy.tril_indices(dist.shape[0], -1)] = 0. # dist += dist.T return dist
def _fully_random_weights(n_features, lam_scale, prng): """Generate a symmetric random matrix with zeros along the diagonal.""" weights = np.zeros((n_features, n_features)) n_off_diag = int((n_features ** 2 - n_features) / 2) weights[np.triu_indices(n_features, k=1)] =\ 0.1 * lam_scale * prng.randn(n_off_diag) + (0.25 * lam_scale) weights[weights < 0] = 0 weights = weights + weights.T return weights
def _random_weights(n_features, lam, lam_perturb, prng): """Generate a symmetric random matrix with zeros along the diagnoal and non-zero elements take the value {lam * lam_perturb, lam / lam_perturb} with probability 1/2. """ weights = np.zeros((n_features, n_features)) n_off_diag = int((n_features ** 2 - n_features) / 2) berns = prng.binomial(1, 0.5, size=n_off_diag) vals = np.zeros(berns.shape) vals[berns == 0] = 1. * lam * lam_perturb vals[berns == 1] = 1. * lam / lam_perturb weights[np.triu_indices(n_features, k=1)] = vals weights[weights < 0] = 0 weights = weights + weights.T return weights
def vech_kh(X, stack_cols=True, conserve_norm=False): assert X.shape[0] == X.shape[1] # Scale off-diagonal indexes if norm has to be preserved d = X.shape[0] if conserve_norm: # Scale off-diagonal tmp = np.copy(X) triu_scale_idx = np.triu_indices(d, 1) tmp[triu_scale_idx] = np.sqrt(2) * tmp[triu_scale_idx] else: tmp = X triu_idx_r = [] triu_idx_c = [] # Find appropriate indexes if stack_cols: for c in range(0, d): for r in range(0, c+1): triu_idx_r.append(r) triu_idx_c.append(c) else: for r in range(0, d): for c in range(r, d): triu_idx_r.append(r) triu_idx_c.append(c) # Extract and return upper triangular triu_idx = (triu_idx_r, triu_idx_c) return tmp[triu_idx]
def unvech_kh(v, cols_stacked=True, norm_conserved=False): # Restore matrix dimension and add triangular v = v.flatten() d = int(0.5 * (np.sqrt(8 * len(v) + 1) - 1)) X = np.empty((d, d)) triu_idx_r = [] triu_idx_c = [] # Find appropriate indexes if cols_stacked: for c in range(0, d): for r in range(0, c+1): triu_idx_r.append(r) triu_idx_c.append(c) else: for r in range(0, d): for c in range(r, d): triu_idx_r.append(r) triu_idx_c.append(c) # Restore original matrix triu_idx = (triu_idx_r, triu_idx_c) X[triu_idx] = v X[np.tril_indices(d)] = X.T[np.tril_indices(d)] # Undo rescaling on off diagonal elements if norm_conserved: X[np.triu_indices(d, 1)] /= np.sqrt(2) X[np.tril_indices(d, -1)] /= np.sqrt(2) return X
def upper2Full(a, eps = 0): ind = (a<eps)&(a>-eps) a[ind] = 0 n = int((-1 + np.sqrt(1+ 8*a.shape[0]))/2) A = np.zeros([n,n]) A[np.triu_indices(n)] = a temp = A.diagonal() A = np.asarray((A + A.T) - np.diag(temp)) return A
def upper2Full(a): n = int((-1 + numpy.sqrt(1+ 8*a.shape[0]))/2) A = numpy.zeros([n,n]) A[numpy.triu_indices(n)] = a temp = A.diagonal() A = (A + A.T) - numpy.diag(temp) return A
def upper2Full(self, a): n = int((-1 + numpy.sqrt(1+ 8*a.shape[0]))/2) A = numpy.zeros([n,n]) A[numpy.triu_indices(n)] = a temp = A.diagonal() A = (A + A.T) - numpy.diag(temp) return A
def Prox_logdet(self, S, A, eta): d, q = numpy.linalg.eigh(eta*A-S) q = numpy.matrix(q) X_var = ( 1/(2*float(eta)) )*q*( numpy.diag(d + numpy.sqrt(numpy.square(d) + (4*eta)*numpy.ones(d.shape))) )*q.T x_var = X_var[numpy.triu_indices(S.shape[1])] # extract upper triangular part as update variable return numpy.matrix(x_var).T
def upperToFull(a, eps = 0): ind = (a<eps)&(a>-eps) a[ind] = 0 n = int((-1 + np.sqrt(1+ 8*a.shape[0]))/2) A = np.zeros([n,n]) A[np.triu_indices(n)] = a temp = A.diagonal() A = np.asarray((A + A.T) - np.diag(temp)) return A
def spd_to_vector(M): return M[np.triu_indices(M.shape[0])]
def spd_to_vector_nondiag(M,scale=False): result=M[np.triu_indices(M.shape[0],k=1)] if(scale): result=np.abs(result)/np.max(np.abs(result)) return result
def applyNNBigRandom(Xin,model,reps=10,msize=500,start=150): #Returns an adjacency matrix n_features=Xin.shape[1] X=Xin.copy() #Center X -= X.mean(axis=0) std = X.std(axis=0) std[std == 0] = 1 X /= std C_Final=np.zeros((X.shape[1],X.shape[1])) ind=np.arange(0,X.shape[1]) larger=np.zeros((msize,msize)) for i in xrange(reps): np.random.shuffle(ind) I=np.eye(X.shape[1]) P=I[:,ind] larger[start:start+n_features,start:start+n_features]=P.T.dot(X.T.dot(X)).dot(P)/X.shape[0] emp_cov_matrix=np.expand_dims(larger,0) pred=model.predict(np.expand_dims(emp_cov_matrix,0)) pred=pred.reshape(msize,msize)[start:start+n_features,start:start+n_features] C=np.zeros((X.shape[1],X.shape[1])) C[np.triu_indices(n_features,k=1)]=pred[np.triu_indices(n_features,k=1)] C=C+C.T C=P.dot(C).dot(P.T) C_Final+=C C_Final=C_Final/float(reps) return C_Final