我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.diagonal()。
def test_diagonal(self): a = np.arange(12).reshape((3, 4)) assert_equal(a.diagonal(), [0, 5, 10]) assert_equal(a.diagonal(0), [0, 5, 10]) assert_equal(a.diagonal(1), [1, 6, 11]) assert_equal(a.diagonal(-1), [4, 9]) b = np.arange(8).reshape((2, 2, 2)) assert_equal(b.diagonal(), [[0, 6], [1, 7]]) assert_equal(b.diagonal(0), [[0, 6], [1, 7]]) assert_equal(b.diagonal(1), [[2], [3]]) assert_equal(b.diagonal(-1), [[4], [5]]) assert_raises(ValueError, b.diagonal, axis1=0, axis2=0) assert_equal(b.diagonal(0, 1, 2), [[0, 3], [4, 7]]) assert_equal(b.diagonal(0, 0, 1), [[0, 6], [1, 7]]) assert_equal(b.diagonal(offset=1, axis1=0, axis2=2), [[1], [3]]) # Order of axis argument doesn't matter: assert_equal(b.diagonal(0, 2, 1), [[0, 3], [4, 7]])
def zz(matrix, nb): r"""Zig-zag traversal of the input matrix :param matrix: input matrix :param nb: number of coefficients to keep :return: an array of nb coefficients """ flipped = np.fliplr(matrix) rows, cols = flipped.shape # nb of columns coefficient_list = [] for loop, i in enumerate(range(cols - 1, -rows, -1)): anti_diagonal = np.diagonal(flipped, i) # reversing even diagonals prioritizes the X resolution # reversing odd diagonals prioritizes the Y resolution # for square matrices, the information content is the same only when nb covers half of the matrix # e.g. [ nb = n*(n+1)/2 ] if loop % 2 == 0: anti_diagonal = anti_diagonal[::-1] # reverse anti_diagonal coefficient_list.extend([x for x in anti_diagonal]) # flattened = [val for sublist in coefficient_list for val in sublist] return coefficient_list[:nb]
def predict_y(self, inputs): """Summary Args: inputs (TYPE): Description Returns: TYPE: Description """ mf, vf = self.dyn_layer.forward_prop_thru_post(inputs) if self.gp_emi: mg, vg = self.emi_layer.forward_prop_thru_post(mf, vf) my, vy = self.lik_layer.output_probabilistic(mg, vg) else: my, _, vy = self.emi_layer.output_probabilistic(mf, vf) vy = np.diagonal(vy, axis1=1, axis2=2) return my, vy
def get_posterior_y(self): """Summary Returns: TYPE: Description """ mx, vx = self.get_posterior_x() if self.Dcon_emi > 0: mx = np.hstack((mx, self.x_control)) vx = np.hstack((vx, np.zeros((self.N, self.Dcon_emi)))) if self.gp_emi: mf, vf = self.emi_layer.forward_prop_thru_post(mx, vx) my, vyn = self.lik_layer.output_probabilistic(mf, vf) else: my, vy, vyn = self.emi_layer.output_probabilistic(mx, vx) vf = np.diagonal(vy, axis1=1, axis2=2) vyn = np.diagonal(vyn, axis1=1, axis2=2) return my, vf, vyn
def lsInversion(self): """ LS Inversion from Hwang et al (2002) """ At=np.transpose(self.A) St=np.transpose(self.S) N=At.dot(self.P).dot(self.A) #solution: self.X=np.linalg.inv(N+self.S.dot(St)).dot(At).dot(self.P).dot(self.Obs) self.r=self.A.dot(self.X)-self.Obs rt=np.transpose(self.r) self.VtPV=rt.dot(self.P).dot(self.r) var_post_norm=self.VtPV/self.dof self.SDaposteriori=np.sqrt(var_post_norm) cov_post=np.linalg.inv(N)*var_post_norm self.var=np.diagonal(cov_post)
def _fisher_vector(self, img_descriptors): """ :param img_descriptors: X :return: fisher vector :rtype: np.array """ means, covariances, weights = self.gmm.means, self.gmm.covariances, self.gmm.weights s0, s1, s2 = self._likelihood_statistics(img_descriptors) T = img_descriptors.shape[0] diagonal_covariances = np.float32([np.diagonal(covariances[k]) for k in range(0, covariances.shape[0])]) """ Refer page 4, first column of reference [1] """ g_weights = self._fisher_vector_weights(s0, s1, s2, means, diagonal_covariances, weights, T) g_means = self._fisher_vector_means(s0, s1, s2, means, diagonal_covariances, weights, T) g_sigma = self._fisher_vector_sigma(s0, s1, s2, means, diagonal_covariances, weights, T) fv = np.concatenate([np.concatenate(g_weights), np.concatenate(g_means), np.concatenate(g_sigma)]) fv = self.normalize(fv) return fv
def compute_mar_likelihood(X_train, X_test, y_train, sigma, l): """ compute log marginal likelihood for tuning parameters using Bayesian optimization :param X_train: training data :param X_test: test data :param y_train: training targets :param sigma: output variance :param l: lengthscalar :return: log marginal likelihood """ s = 0.0005 # noise variance and zero mean for noise n = len(X_train) # choose RBF kernel in this regression case K_train = RBF_kernel(X_train, X_train, sigma, l) L = np.linalg.cholesky(K_train + s * np.eye(n)) m = np.linalg.solve(L, y_train) alpha = np.linalg.solve(L.T, m) # compute log marginal likelihood log_marg_likelihood = -.5 * np.dot(y_train.T, alpha) - np.log(np.diagonal(L)).sum(0) - n / 2.0 * np.log(2 * np.pi) return log_marg_likelihood
def print_performance(cm): tp = np.diagonal(cm).astype(np.float) tpfp = np.sum(cm, axis=0).astype(np.float) # sum of each col tpfn = np.sum(cm, axis=1).astype(np.float) # sum of each row acc = np.sum(tp) / np.sum(cm) precision = tp / tpfp recall = tp / tpfn f1 = (2 * precision * recall) / (precision + recall) mf1 = np.mean(f1) print "Sample: {}".format(np.sum(cm)) print "W: {}".format(tpfn[W]) print "N1: {}".format(tpfn[N1]) print "N2: {}".format(tpfn[N2]) print "N3: {}".format(tpfn[N3]) print "REM: {}".format(tpfn[REM]) print "Confusion matrix:" print cm print "Precision: {}".format(precision) print "Recall: {}".format(recall) print "F1: {}".format(f1) print "Overall accuracy: {}".format(acc) print "Macro-F1 accuracy: {}".format(mf1)
def compute_precision_and_recall(confusion_matrix): correct_predictions = np.diagonal(confusion_matrix) samples_per_class = np.sum(confusion_matrix, axis=0) false_positives = np.sum(confusion_matrix, axis=1) - correct_predictions false_negatives = samples_per_class - correct_predictions prectmp = correct_predictions / (correct_predictions + false_positives) prectmp[np.where(correct_predictions == 0)[0]] = 0 prectmp[np.where(samples_per_class == 0)[0]] = float('nan') precision = np.nanmean(prectmp) rectmp = correct_predictions / (correct_predictions + false_negatives) rectmp[np.where(correct_predictions == 0)[0]] = 0 rectmp[np.where(samples_per_class == 0)[0]] = float('nan') recall = np.nanmean(rectmp) return precision, recall
def GetClassMetric(gtLabal, testLabel, numClass=-1, labelSet=npy.array([])): if numClass>0: labelSet=npy.arange(numClass) else: if labelSet.size()==0 or npy.min(labelSet)<0: return numClass=npy.max(labelSet)+1 confMat=npy.zeros((numClass,numClass),dtype=npy.float32) vecOnes=npy.ones(len(gtLabal)) for ii in labelSet: for jj in labelSet: confMat[ii,jj]=npy.sum(vecOnes[npy.logical_and(testLabel==ii, gtLabal==jj)]) ccn=npy.diagonal(confMat) oa=npy.sum(ccn)/npy.sum(confMat) pa=ccn/npy.sum(confMat, axis=0) ua=ccn/npy.sum(confMat, axis=1) temp1=npy.sum(confMat)*npy.sum(ccn)-npy.sum(npy.sum(confMat,axis=1)*npy.sum(confMat,axis=0)); temp2=npy.power(npy.sum(confMat),2)-npy.sum(npy.sum(confMat,axis=1)*npy.sum(confMat,axis=0)); kappa=temp1/temp2 confMat=confMat.astype(npy.int32) accMetric={"confMat":confMat, "oa":oa, "pa":pa, "ua":ua, "kappa": kappa} return accMetric
def write_overlap_densities( path_hdf5: str, paths_fragment_overlaps: List, swaps: Matrix, dt: int=1): """ Write the diagonal of the overlap matrices """ logger.info("writing densities in human readable format") # Track the crossing between MOs for paths_overlaps in paths_fragment_overlaps: overlaps = np.stack(retrieve_hdf5_data(path_hdf5, paths_overlaps)) for k, mtx in enumerate(np.rollaxis(overlaps, 0)): overlaps[k] = mtx[:, swaps[k]][swaps[k]] # Print to file the densities for each fragment on a given MO for ifrag, paths_overlaps in enumerate(paths_fragment_overlaps): # time frame frames = overlaps.shape[0] ts = np.arange(1, frames + 1).reshape(frames, 1) * dt # Diagonal of the 3D-tensor densities = np.diagonal(overlaps, axis1=1, axis2=2) data = np.hstack((ts, densities)) # Save data in human readable format file_name = 'densities_fragment_{}.txt'.format(ifrag) np.savetxt(file_name, data, fmt='{:^3}'.format('%e'))
def tril(m, k=0): """ Lower triangle of an array. Return a copy of an array with elements above the `k`-th diagonal zeroed. Parameters ---------- m : array_like, shape (M, N) Input array. k : int, optional Diagonal above which to zero elements. `k = 0` (the default) is the main diagonal, `k < 0` is below it and `k > 0` is above. Returns ------- array, shape (M, N) Lower triangle of `m`, of same shape and data-type as `m`. See Also -------- triu : Same thing, only for the upper triangle. """ return m * tri(m.shape[0], m.shape[1], k=k, dtype=m.dtype)
def test_log_normal_matrix_full(): n_points, n_components, n_features = 10,5,2 points = np.random.randn(n_points,n_features) means = np.random.randn(n_components,n_features) cov = generate_covariance_matrices_full(n_components,n_features) # Beginnig of the test log_det_cov = np.log(np.linalg.det(cov)) precisions = np.linalg.inv(cov) log_prob = np.empty((n_points,n_components)) for i in range(n_components): diff = points - means[i] y = np.dot(diff,np.dot(precisions[i],diff.T)) log_prob[:,i] = np.diagonal(y) expected_log_normal_matrix = -0.5 * (n_features * np.log(2*np.pi) + log_prob + log_det_cov) predected_log_normal_matrix = _log_normal_matrix(points,means,cov,'full') assert_almost_equal(expected_log_normal_matrix,predected_log_normal_matrix)
def test_log_normal_matrix_full(): n_points, n_components, n_features = 10,5,2 points = np.random.randn(n_points,n_features) means = np.random.randn(n_components,n_features) cov = generate.generate_covariance_matrices_full(n_components,n_features) cov_chol = np.empty((n_components,n_features,n_features)) for i in range(n_components): cov_chol[i] = linalg.cholesky(cov[i],lower=True) # Beginnig of the test log_det_cov = np.log(np.linalg.det(cov)) precisions = np.linalg.inv(cov) log_prob = np.empty((n_points,n_components)) for i in range(n_components): diff = points - means[i] y = np.dot(diff,np.dot(precisions[i],diff.T)) log_prob[:,i] = np.diagonal(y) expected_log_normal_matrix = -0.5 * (n_features * np.log(2*np.pi) + log_prob + log_det_cov) predected_log_normal_matrix = _log_normal_matrix(points,means,cov_chol,'full') assert_almost_equal(expected_log_normal_matrix,predected_log_normal_matrix)
def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7): """Log probability for full covariance matrices.""" n_samples, n_dim = X.shape nmix = len(means) log_prob = np.empty((n_samples, nmix)) for c, (mu, cv) in enumerate(zip(means, covars)): try: cv_chol = linalg.cholesky(cv, lower=True) except linalg.LinAlgError: # The model is most probably stuck in a component with too # few observations, we need to reinitialize this components try: cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim), lower=True) except linalg.LinAlgError: raise ValueError("'covars' must be symmetric, " "positive-definite") cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol))) cv_sol = linalg.solve_triangular(cv_chol, (X - mu).T, lower=True).T log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) + n_dim * np.log(2 * np.pi) + cv_log_det) return log_prob
def test_item_is_self_similar(self): sim_matrix, _ = self.out diagonal = np.diagonal(sim_matrix) self.assertEqual(diagonal.tolist(), [2.0, 1.0, 2.0, 2.0, 1.0])
def diagonal_mpa(entries, sites): """Returns an MPA with ``entries`` on the diagonal and zeros otherwise. :param numpy.ndarray entries: one-dimensional array :returns: :class:`~mpnum.mparray.MPArray` with rank ``len(entries)``. """ assert sites > 0 if entries.ndim != 1: raise NotImplementedError("Currently only supports diagonal MPA with " "one leg per site.") if sites < 2: return mp.MPArray.from_array(entries) ldim = len(entries) leftmost_ltens = np.eye(ldim).reshape((1, ldim, ldim)) rightmost_ltens = np.diag(entries).reshape((ldim, ldim, 1)) center_ltens = np.zeros((ldim,) * 3) np.fill_diagonal(center_ltens, 1) ltens = it.chain((leftmost_ltens,), it.repeat(center_ltens, sites - 2), (rightmost_ltens,)) return mp.MPArray(LocalTensors(ltens, cform=(sites - 1, sites))) ######################### # More physical stuff # #########################
def _unitary_haar(dim, randstate=None): """Returns a sample from the Haar measure of the unitary group of given dimension. :param int dim: Dimension :param randn: Function to create real N(0,1) distributed random variables. It should take the shape of the output as numpy.random.randn does (default: numpy.random.randn) """ z = _zrandn((dim, dim), randstate) / np.sqrt(2.0) q, r = qr(z) d = np.diagonal(r) ph = d / np.abs(d) return q * ph
def test_diagonal(self): a = [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]] out = np.diagonal(a) tgt = [0, 5, 10] assert_equal(out, tgt)
def test_diagonal_view_notwriteable(self): # this test is only for 1.9, the diagonal view will be # writeable in 1.10. a = np.eye(3).diagonal() assert_(not a.flags.writeable) assert_(not a.flags.owndata) a = np.diagonal(np.eye(3)) assert_(not a.flags.writeable) assert_(not a.flags.owndata) a = np.diag(np.eye(3)) assert_(not a.flags.writeable) assert_(not a.flags.owndata)
def test_diagonal_memleak(self): # Regression test for a bug that crept in at one point a = np.zeros((100, 100)) assert_(sys.getrefcount(a) < 50) for i in range(100): a.diagonal() assert_(sys.getrefcount(a) < 50)
def _half_log_det(self, M): """ Return log(|M|)*0.5. For positive definite matrix M of more than 2 dimensions, calculate this for the last two dimension and return a value corresponding to each element in the first few dimensions. """ chol = np.linalg.cholesky(M) if M.ndim == 2: return np.sum(np.log(np.abs(np.diag(chol)))) else: return np.sum(np.log(np.abs(np.diagonal( chol, axis1=-2, axis2=-1))), axis=-1)
def predict_forward_mm(self, T, x_control): """Summary Args: T (TYPE): Description x_control (None, optional): Description Returns: TYPE: Description """ mx = np.zeros((T, self.Din)) vx = np.zeros((T, self.Din)) my = np.zeros((T, self.Dout)) vy_noiseless = np.zeros((T, self.Dout)) vy = np.zeros((T, self.Dout)) post_m, post_v = self.get_posterior_x() mtm1 = post_m[[-1], :] vtm1 = post_v[[-1], :] for t in range(T): if self.Dcon_dyn > 0: mtm1 = np.hstack((mtm1, x_control[[t], :])) vtm1 = np.hstack((vtm1, np.zeros((1, self.Dcon_dyn)))) mt, vt = self.dyn_layer.forward_prop_thru_post(mtm1, vtm1) if self.Dcon_emi > 0: mtc = np.hstack((mt, x_control[[t], :])) vtc = np.hstack((vt, np.zeros((1, self.Dcon_emi)))) else: mtc, vtc = mt, vt if self.gp_emi: mft, vft = self.emi_layer.forward_prop_thru_post(mtc, vtc) myt, vyt_n = self.lik_layer.output_probabilistic(mft, vft) else: myt, vyt, vyt_n = self.emi_layer.output_probabilistic(mt, vt) vft = np.diagonal(vyt, axis1=1, axis2=2) vyt_n = np.diagonal(vyt_n, axis1=1, axis2=2) mx[t, :], vx[t, :] = mt, vt my[t, :], vy_noiseless[t, :], vy[t, :] = myt, vft, vyt_n mtm1 = mt vtm1 = vt return mx, vx, my, vy_noiseless, vy
def predict_forward_mc(self, T, x_control, no_samples): """Summary Args: T (TYPE): Description x_control (None, optional): Description Returns: TYPE: Description """ x = np.zeros((T, no_samples, self.Din)) my = np.zeros((T, no_samples, self.Dout)) vy = np.zeros((T, no_samples, self.Dout)) post_m, post_v = self.get_posterior_x() mtm1 = post_m[[-1], :] vtm1 = post_v[[-1], :] eps = np.random.randn(no_samples, self.Din) x_samples = eps * np.sqrt(vtm1) + mtm1 for t in range(T): if self.Dcon_dyn > 0: xc_samples = np.hstack((x_samples, np.tile(x_control[[t], :], [no_samples, 1]))) else: xc_samples = x_samples mt, vt = self.dyn_layer.forward_prop_thru_post(xc_samples) eps = np.random.randn(no_samples, self.Din) x_samples = eps * np.sqrt(vt) + mt if self.Dcon_emi > 0: xc_samples = np.hstack((x_samples, np.tile(x_control[[t], :]), [no_samples, 1])) else: xc_samples = x_samples if self.gp_emi: mft, vft = self.emi_layer.forward_prop_thru_post(xc_samples) myt, vyt_n = self.lik_layer.output_probabilistic(mft, vft) else: myt, _, vyt_n = self.emi_layer.output_probabilistic(xc_samples, np.zeros_like(x_samples)) vyt_n = np.diagonal(vyt_n, axis1=1, axis2=2) x[t, :, :] = x_samples my[t, :, :], vy[t, :, :] = myt, vyt_n return x, my, vy
def update_clustered_homogeneous_block_sizes(self, x, weight=1.0, block_size=None, include_self_loops=True): print("update_clustered_homogeneous_block_sizes ") if block_size is None: er = "error, block_size not specified!!!!" raise Exception(er) # block_size = self.block_size if isinstance(block_size, numpy.ndarray): er = "Error: inhomogeneous block sizes not supported by this function" raise Exception(er) # Assuming block_size is an integer: num_samples, dim = x.shape if num_samples % block_size > 0: err = "Inconsistency error: num_samples (%d) is not a multiple of block_size (%d)" % \ (num_samples, block_size) raise Exception(err) num_blocks = num_samples / block_size # warning, plenty of dtype missing!!!!!!!! sum_x = x.sum(axis=0) sum_prod_x = mdp.utils.mult(x.T, x) self.AddSamples(sum_prod_x, sum_x, num_samples, weight) self.last_block = None # DCorrelation Matrix. Compute medias signal media = numpy.zeros((num_blocks, dim)) for i in range(num_blocks): media[i] = x[i * block_size:(i + 1) * block_size].sum(axis=0) * (1.0 / block_size) sum_prod_meds = mdp.utils.mult(media.T, media) # FIX1: AFTER DT in (0,4) normalization num_diffs = num_blocks * block_size # ## * (block_size-1+1) / (block_size-1) print("num_diffs in block:", num_diffs, " num_samples:", num_samples) if include_self_loops: sum_prod_diffs = 2.0 * block_size * (sum_prod_x - block_size * sum_prod_meds) / block_size else: sum_prod_diffs = 2.0 * block_size * (sum_prod_x - block_size * sum_prod_meds) / (block_size - 1) self.AddDiffs(sum_prod_diffs, num_diffs, weight) print("(Diag(complete)/num_diffs.avg)**0.5 =", ((numpy.diagonal(sum_prod_diffs) / num_diffs).mean()) ** 0.5)
def diagonal(self): return numpy.diagonal(self.__mat)
def __animate_1d(self, mat, **kwargs): delta_list = numpy.diagonal(mat) for (idx, val) in enumerate(delta_list): if val is True: self.activate_1d(idx, **kwargs) elif val is False: self.deactivate_1d(idx, **kwargs) self.commit(**kwargs)
def _predict(self, steps, exog, alpha): assert 0 < alpha < 1 y = (exog if exog is not None else self._endog)[-self.results.k_ar:] forecast = self.results.forecast(y, steps) # FIXME: The following is adapted from statsmodels's # VAR.forecast_interval() as the original doesn't work q = norm.ppf(1 - alpha / 2) sigma = np.sqrt(np.abs(np.diagonal(self.results.mse(steps), axis1=2))) err = q * sigma return np.asarray([forecast, forecast - err, forecast + err])
def __str__(self): return "x: {}, var: {}".format(self._x, np.diagonal(self._P))
def fisher_vector(samples, means, covs, w): s0, s1, s2 = likelihood_statistics(samples, means, covs, w) T = len(samples) covs = np.float32([np.diagonal(covs[k]) for k in range(0, covs.shape[0])]) #pdb.set_trace() a = fisher_vector_weights(s0, s1, s2, means, covs, w, T) b = fisher_vector_means(s0, s1, s2, means, covs, w, T) c = fisher_vector_sigma(s0, s1, s2, means, covs, w, T) fv = np.concatenate([np.concatenate(a), np.concatenate(b), np.concatenate(c)]) fv = normalize(fv) return fv
def _update_iteration_data(self, itr, algorithm, costs, pol_sample_lists): """ Update iteration data information: iteration, average cost, and for each condition the mean cost over samples, step size, linear Guassian controller entropies, and initial/final KL divergences for BADMM. """ avg_cost = np.mean(costs) if pol_sample_lists is not None: test_idx = algorithm._hyperparams['test_conditions'] # pol_sample_lists is a list of singletons samples = [sl[0] for sl in pol_sample_lists] pol_costs = [np.sum(algorithm.cost[idx].eval(s)[0]) for s, idx in zip(samples, test_idx)] itr_data = '%3d | %8.2f %12.2f' % (itr, avg_cost, np.mean(pol_costs)) else: itr_data = '%3d | %8.2f' % (itr, avg_cost) for m in range(algorithm.M): cost = costs[m] step = np.mean(algorithm.prev[m].step_mult * algorithm.base_kl_step) entropy = 2*np.sum(np.log(np.diagonal(algorithm.prev[m].traj_distr.chol_pol_covar, axis1=1, axis2=2))) itr_data += ' | %8.2f %8.2f %8.2f' % (cost, step, entropy) if isinstance(algorithm, AlgorithmBADMM): kl_div_i = algorithm.cur[m].pol_info.init_kl.mean() kl_div_f = algorithm.cur[m].pol_info.prev_kl.mean() itr_data += ' %8.2f %8.2f %8.2f' % (pol_costs[m], kl_div_i, kl_div_f) elif isinstance(algorithm, AlgorithmMDGPS): # TODO: Change for test/train better. if test_idx == algorithm._hyperparams['train_conditions']: itr_data += ' %8.2f' % (pol_costs[m]) else: itr_data += ' %8s' % ("N/A") self.append_output_text(itr_data)
def f1_score(confusion): tps = np.diagonal(confusion) supports = confusion.sum(axis=1) # TODO remove this ignore divide by 0, shouldn't happen with np.errstate(divide='ignore', invalid='ignore'): precisions = np.true_divide(tps, confusion.sum(axis=0)) recalls = np.true_divide(tps, supports) f1s = 2*np.true_divide((precisions*recalls),(precisions+recalls)) f1s[f1s == np.inf] = 0 f1s = np.nan_to_num(f1s) f1 = np.average(f1s, weights=supports) return f1 # TODO remove duplicated code, same as intent model utils
def gradient_ascent(a, b, sigma, l, alpha, K_y): """ tune hyperparameters sigma and l for RBF kernel :param a: input vector a :param b: input vector b :param sigma: output variance determines the average distance of your function away from its mean :param l: lengthscale determines the length of the 'wiggles' in your function. :param alpha: equals to K_inv * y :param K_y: K_inv :return: current sigmal and l """ step_size = 0.01 sqdist = ((a[:, :, None] - b[:, :, None].T) ** 2).sum(1) # fix the output variance of RBF kernel in order to visualize it in one dimension ''' # tune hyperparameter sigma sigma_grad = 2 * sigma * np.exp(-.5*sqdist/(l**2)) sigma_matrix = np.dot(np.dot(alpha, alpha.T) - K_y, sigma_grad) tr_sigma = np.diagonal(sigma_matrix).sum() sigma_var = .5 * tr_sigma ''' # tune hyperparameter l l_grad = sigma**2 * np.exp(-.5*sqdist/(l**2)) * (sqdist/l**3) l_matrix = np.dot(np.dot(alpha, alpha.T) - K_y, l_grad) tr_l = np.diagonal(l_matrix).sum() l_var = .5 * tr_l # gradient ascent to maximum log marginal likelihood simultaneously ''' sigma = sigma + step_size * sigma_var ''' l = l + step_size * l_var return sigma, l
def test_diagonal(): b1 = np.matrix([[1,2],[3,4]]) diag_b1 = np.matrix([[1, 4]]) array_b1 = np.array([1, 4]) assert_equal(b1.diagonal(), diag_b1) assert_equal(np.diagonal(b1), array_b1) assert_equal(np.diag(b1), array_b1)
def _compute_output_errors(traj, x, P, output_stamps, gyro_model, accel_model): T = _errors_transform_matrix(traj.loc[output_stamps]) y = util.mv_prod(T, x[:, :N_BASE_STATES]) Py = util.mm_prod(T, P[:, :N_BASE_STATES, :N_BASE_STATES]) Py = util.mm_prod(Py, T, bt=True) sd_y = np.diagonal(Py, axis1=1, axis2=2) ** 0.5 err = pd.DataFrame(index=output_stamps) err['lat'] = y[:, DRN] err['lon'] = y[:, DRE] err['VE'] = y[:, DVE] err['VN'] = y[:, DVN] err['h'] = np.rad2deg(y[:, DH]) err['p'] = np.rad2deg(y[:, DP]) err['r'] = np.rad2deg(y[:, DR]) sd = pd.DataFrame(index=output_stamps) sd['lat'] = sd_y[:, DRN] sd['lon'] = sd_y[:, DRE] sd['VE'] = sd_y[:, DVE] sd['VN'] = sd_y[:, DVN] sd['h'] = np.rad2deg(sd_y[:, DH]) sd['p'] = np.rad2deg(sd_y[:, DP]) sd['r'] = np.rad2deg(sd_y[:, DR]) gyro_err = pd.DataFrame(index=output_stamps) gyro_sd = pd.DataFrame(index=output_stamps) n = N_BASE_STATES for i, name in enumerate(gyro_model.states): gyro_err[name] = x[:, n + i] gyro_sd[name] = P[:, n + i, n + i] ** 0.5 accel_err = pd.DataFrame(index=output_stamps) accel_sd = pd.DataFrame(index=output_stamps) ng = gyro_model.n_states for i, name in enumerate(accel_model.states): accel_err[name] = x[:, n + ng + i] accel_sd[name] = P[:, n + ng + i, n + ng + i] ** 0.5 return err, sd, gyro_err, gyro_sd, accel_err, accel_sd