我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.linalg.det()。
def __call__(self, params): print '???', params sd1 = params[0] sd2 = params[1] cor = params[2] if sd1 < 0. or sd1 > 10. or sd2 < 0. or sd2 > 10. or cor < -1. or cor > 1.: return np.inf bandwidth = maths.stats.choleskysqrt2d(sd1, sd2, cor) bandwidthdet = la.det(bandwidth) bandwidthinv = la.inv(bandwidth) diff = sample[self.__iidx] - sample[self.__jidx] temp = diff.dot(bandwidthinv.T) temp *= temp e = np.exp(np.sum(temp, axis=1)) s = np.sum(e**(-.25) - 4 * e**(-.5)) cost = self.__n / bandwidthdet + (2. / bandwidthdet) * s print '!!!', cost return cost / 10000.
def do(self, a, b): d = linalg.det(a) (s, ld) = linalg.slogdet(a) if asarray(a).dtype.type in (single, double): ad = asarray(a).astype(double) else: ad = asarray(a).astype(cdouble) ev = linalg.eigvals(ad) assert_almost_equal(d, multiply.reduce(ev, axis=-1)) assert_almost_equal(s * np.exp(ld), multiply.reduce(ev, axis=-1)) s = np.atleast_1d(s) ld = np.atleast_1d(ld) m = (s != 0) assert_almost_equal(np.abs(s[m]), 1) assert_equal(ld[~m], -inf)
def test_byteorder_check(): # Byte order check should pass for native order if sys.byteorder == 'little': native = '<' else: native = '>' for dtt in (np.float32, np.float64): arr = np.eye(4, dtype=dtt) n_arr = arr.newbyteorder(native) sw_arr = arr.newbyteorder('S').byteswap() assert_equal(arr.dtype.byteorder, '=') for routine in (linalg.inv, linalg.det, linalg.pinv): # Normal call res = routine(arr) # Native but not '=' assert_array_equal(res, routine(n_arr)) # Swapped assert_array_equal(res, routine(sw_arr))
def F_value_multivariate(ER, EF, dfnum, dfden): """ Returns an F-statistic given the following: ER = error associated with the null hypothesis (the Restricted model) EF = error associated with the alternate hypothesis (the Full model) dfR = degrees of freedom the Restricted model dfF = degrees of freedom associated with the Restricted model where ER and EF are matrices from a multivariate F calculation. """ if type(ER) in [IntType, FloatType]: ER = N.array([[ER]]) if type(EF) in [IntType, FloatType]: EF = N.array([[EF]]) n_um = (LA.det(ER) - LA.det(EF)) / float(dfnum) d_en = LA.det(EF) / float(dfden) return n_um / d_en ##################################### ####### ASUPPORT FUNCTIONS ######## #####################################
def _int_var_rbf(self, X, hyp, jitter=1e-8): """ Posterior integral variance of the Gaussian Process quadrature. X - vector (1, 2*xdim**2+xdim) hyp - kernel hyperparameters [s2, el_1, ... el_d] """ # reshape X to SP matrix X = np.reshape(X, (self.n, self.d)) # set kernel hyper-parameters s2, el = hyp[0], hyp[1:] self.kern.param_array[0] = s2 # variance self.kern.param_array[1:] = el # lengthscale K = self.kern.K(X) L = np.diag(el ** 2) # posterior variance of the integral ks = s2 * np.sqrt(det(L + np.eye(self.d))) * multivariate_normal(mean=np.zeros(self.d), cov=L).pdf(X) postvar = -ks.dot(solve(K + jitter * np.eye(self.n), ks.T)) return postvar
def _int_var_rbf_hyp(self, hyp, X, jitter=1e-8): """ Posterior integral variance as a function of hyper-parameters :param hyp: RBF kernel hyper-parameters [s2, el_1, ..., el_d] :param X: sigma-points :param jitter: numerical jitter (for stabilizing computations) :return: posterior integral variance """ # reshape X to SP matrix X = np.reshape(X, (self.n, self.d)) # set kernel hyper-parameters s2, el = 1, hyp # sig_var hyper always set to 1 self.kern.param_array[0] = s2 # variance self.kern.param_array[1:] = el # lengthscale K = self.kern.K(X) L = np.diag(el ** 2) # posterior variance of the integral ks = s2 * np.sqrt(det(L + np.eye(self.d))) * multivariate_normal(mean=np.zeros(self.d), cov=L).pdf(X) postvar = s2 * np.sqrt(det(2 * inv(L) + np.eye(self.d))) ** -1 - ks.dot( solve(K + jitter * np.eye(self.n), ks.T)) return postvar
def genCovariace(size): MaxIter = 1e+6 S = np.zeros((size,size)) itn = 0 while(alg.det(S) <= 1e-3 and itn < MaxIter): itn = itn + 1 #print int(numpy.log2(size))*size G6 = GenRndGnm(PUNGraph, size, int((size*(size-1))*0.05)) S = np.zeros((size,size)) for EI in G6.Edges(): S[EI.GetSrcNId(), EI.GetDstNId()] = 0.6 S = S + S.T + S.max()*np.matrix(np.eye(size)) if itn == MaxIter: print 'fail to find an invertible sparse inverse covariance matrix' S = np.asarray(S) return S
def genCovariace(size): MaxIter = 1e+6 S = np.zeros((size,size)) itn = 0 while(alg.det(S) <= 1e-3 and itn < MaxIter): itn = itn + 1 #print int(numpy.log2(size))*size G6 = GenRndGnm(PUNGraph, size, int((size*(size-1))*0.05)) #G6 = snap.GenRndGnm(snap.PUNGraph, 5, 5) S = np.zeros((size,size)) for EI in G6.Edges(): S[EI.GetSrcNId(), EI.GetDstNId()] = 0.6 S = S + S.T + S.max()*np.matrix(np.eye(size)) if itn == MaxIter: print 'fail to find an invertible sparse inverse covariance matrix' S = np.asarray(S) return S
def F(self, other): """ Compute the fundamental matrix with respect to other camera http://www.robots.ox.ac.uk/~vgg/hzbook/code/vgg_multiview/vgg_F_from_P.m The computed fundamental matrix, given by the formula 17.3 (p. 412) in Hartley & Zisserman book (2nd ed.). Use as: F_10 = poses[0].F(poses[1]) l_1 = F_10 * x_0 """ X1 = self.P[[1,2],:] X2 = self.P[[2,0],:] X3 = self.P[[0,1],:] Y1 = other.P[[1,2],:] Y2 = other.P[[2,0],:] Y3 = other.P[[0,1],:] F = np.float64([ [det(np.vstack([X1, Y1])), det(np.vstack([X2, Y1])), det(np.vstack([X3, Y1]))], [det(np.vstack([X1, Y2])), det(np.vstack([X2, Y2])), det(np.vstack([X3, Y2]))], [det(np.vstack([X1, Y3])), det(np.vstack([X2, Y3])), det(np.vstack([X3, Y3]))] ]) return F # / F[2,2]
def __init__(self, density, bandwidth): self.__density = density self.__bandwidth = bandwidth self.__bandwidthinv = la.inv(bandwidth) self.__bandwidthdet = la.det(bandwidth)
def test_zero(self): assert_equal(linalg.det([[0.0]]), 0.0) assert_equal(type(linalg.det([[0.0]])), double) assert_equal(linalg.det([[0.0j]]), 0.0) assert_equal(type(linalg.det([[0.0j]])), cdouble) assert_equal(linalg.slogdet([[0.0]]), (0.0, -inf)) assert_equal(type(linalg.slogdet([[0.0]])[0]), double) assert_equal(type(linalg.slogdet([[0.0]])[1]), double) assert_equal(linalg.slogdet([[0.0j]]), (0.0j, -inf)) assert_equal(type(linalg.slogdet([[0.0j]])[0]), cdouble) assert_equal(type(linalg.slogdet([[0.0j]])[1]), double)
def test_types(self): def check(dtype): x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) assert_equal(np.linalg.det(x).dtype, dtype) ph, s = np.linalg.slogdet(x) assert_equal(s.dtype, get_real_dtype(dtype)) assert_equal(ph.dtype, dtype) for dtype in [single, double, csingle, cdouble]: yield check, dtype
def is_left(x0, x1, x2): """Returns True if x0 is left of the line between x1 and x2, False otherwise. Ref: https://stackoverflow.com/questions/1560492""" assert x1.shape == x2.shape == (2,) matrix = array([x1-x0, x2-x0]) if len(x0.shape) == 2: matrix = matrix.transpose((1, 2, 0)) return det(matrix) > 0
def log_prob(self, x): from numpy.linalg import det mu = self.mu S = self.sigma D = len(mu) q = self.__q(x) return -0.5 * (D * log(2 * pi) + log(abs(det(S)))) - 0.5 * q ** 2
def rmsd(X, Y): """ Calculate the root mean squared deviation (RMSD) using Kabsch' formula. @param X: (n, d) input vector @type X: numpy array @param Y: (n, d) input vector @type Y: numpy array @return: rmsd value between the input vectors @rtype: float """ from numpy import sum, dot, sqrt, clip, average from numpy.linalg import svd, det X = X - X.mean(0) Y = Y - Y.mean(0) R_x = sum(X ** 2) R_y = sum(Y ** 2) V, L, U = svd(dot(Y.T, X)) if det(dot(V, U)) < 0.: L[-1] *= -1 return sqrt(clip(R_x + R_y - 2 * sum(L), 0., 1e300) / len(X))
def gauss2D(x, mean, cov): # x, mean, cov??numpy.array?? z = -np.dot(np.dot((x-mean).T,inv(cov)),(x-mean))/2.0 temp = pow(sqrt(2.0*pi),len(x))*sqrt(det(cov)) return (1.0/temp)*exp(z)
def emit_prob_updated(self, X, post_state): # ?????? for k in range(self.n_state): for j in range(self.x_size): self.emit_means[k][j] = np.sum(post_state[:,k] *X[:,j]) / np.sum(post_state[:,k]) X_cov = np.dot((X-self.emit_means[k]).T, (post_state[:,k]*(X-self.emit_means[k]).T).T) self.emit_covars[k] = X_cov / np.sum(post_state[:,k]) if det(self.emit_covars[k]) == 0: # ???????? self.emit_covars[k] = self.emit_covars[k] + 0.01*np.eye(len(X[0]))
def V(self): n1,n2,n3,n4 = self.getNodes() V = np.array([[1, n1.x, n1.y, n1.z], [1, n2.x, n2.y, n2.z], [1, n3.x, n3.y, n3.z], [1, n4.x, n4.y, n4.z]]) return la.det(V)/6
def get_gauss_pdf_value(x, mu, cov): p = len(mu) xs = x - mu covi = inv(cov) arg = -0.5 * (xs.T).dot(covi.dot(xs)) # Normalization constant C = (((2.0 * np.pi)**p)*det(cov))**(-0.5) prob = C * np.exp(arg) return prob
def calc_log_Z(a, b, V_inv): # Equation 19. return gammaln(a) + log(sqrt(1./det(V_inv))) - a * np.log(b)
def dispersion_relation_analytical(omega,beta,tau,Tpar_Tperp,kperp,kpar,gam,eta,nb,theta,k): k2=kperp**2+kpar**2 b=0.5*kperp**2/Tpar_Tperp inv_kpar=1./kpar inv_kperp=1./kperp inv_b=1./b summand=get_sums_analytical(kperp,kpar,omega,Tpar_Tperp,nb) M = 1j*eta*k2*inv_kpar + omega - 0.5*inv_kpar*(1j*eta*kperp**2*inv_kpar+omega)*(summand[6]-summand[5]) + 0.5*(Tpar_Tperp-1.)/Tpar_Tperp*inv_kpar*(summand[8]-summand[7]) + 1j*eta*inv_kpar*(summand[2]-summand[3]) N = 1j/beta*k2*inv_kperp + 1j*omega*inv_kperp*(summand[9]+inv_b*summand[10]-0.5*(summand[6]+3*summand[5])) - 1j*inv_kperp*(Tpar_Tperp-1.)/Tpar_Tperp*(summand[11]+inv_b*summand[12]-0.5*(summand[8]+3*summand[7])) O = 0.5j*gam/tau*(-inv_kperp*(summand[2]-summand[3]) + 0.5*kperp*inv_kpar*(summand[6]-summand[5])) P = 1j*kpar/beta - 1j*inv_kpar*(1j*eta*kperp**2*inv_kpar+omega)*(summand[9]+inv_b*summand[10]+0.5*(summand[6]-summand[5])) + 1j*inv_kpar*(Tpar_Tperp-1.)/Tpar_Tperp*(summand[11]+inv_b*summand[12]+0.5*(summand[8]-summand[7])) + eta*inv_kpar*(summand[2]+summand[3]) Q = -(1j*eta*k2+omega*kpar)*inv_kperp + 0.5*omega*inv_kperp*(summand[6]-summand[5]) - 0.5*(Tpar_Tperp-1.)/Tpar_Tperp*inv_kperp*(summand[8]-summand[7]) R = -0.5*gam/tau*(inv_kperp*(summand[2]+summand[3]) + kperp*inv_kpar*(summand[9] + inv_b*summand[10] + 0.5*(summand[6]-summand[5]))) S = -(1j*eta*kperp**2*inv_kpar+omega)*1j*inv_kperp*inv_kpar*Tpar_Tperp*(summand[0]+summand[1]) + 1j*inv_kpar*inv_kperp*(Tpar_Tperp-1)*(summand[2]+summand[3]) + 2*eta*kperp*inv_kpar*summand[4] T = -Tpar_Tperp*omega*inv_kperp**2*(summand[0]-summand[1]) + inv_kperp**2*(Tpar_Tperp-1)*(summand[2]-summand[3]) U = -1 - 0.5*gam/tau*(2*summand[4]+Tpar_Tperp*inv_kpar*(summand[0]+summand[1])) global mat mat=[[M,N,O],[P,Q,R],[S,T,U]] if det(mat)>1: print(summand[6],-summand[5],file=outfile) return det(mat) #wrapper for analytical or numerical dispersion relation
def update (self): pass # def execute(self, refholder): # input_matrix = refholder.matrices[self.inputs[0].links[0].from_socket.matrix_ref] # answer_matrix = np.zeros(16) # if la.det(input_matrix) == 0: # print("Matrix has no inverse") # else: # answer_matrix = la.inv(input_matrix) # self.outputs[0].matrix_ref = refholder.getRefForMatrix(answer_matrix)
def __init__(self, ctr, am): self.n = len(ctr) # dimension self.ctr = np.array(ctr) # center coordinates self.am = np.array(am) # precision matrix (inverse of covariance) # Volume of ellipsoid is the volume of an n-sphere divided # by the (determinant of the) Jacobian associated with the # transformation, which by definition is the precision matrix. self.vol = vol_prefactor(self.n) / np.sqrt(linalg.det(self.am)) # The eigenvalues (l) of `a` are (a^-2, b^-2, ...) where # (a, b, ...) are the lengths of principle axes. # The eigenvectors (v) are the normalized principle axes. l, v = linalg.eigh(self.am) if np.all((l > 0.) & (np.isfinite(l))): self.axlens = 1. / np.sqrt(l) else: raise ValueError("The input precision matrix defining the " "ellipsoid {0} is apparently singular with " "l={1} and v={2}.".format(self.am, l, v)) # Scaled eigenvectors are the axes, where `axes[:,i]` is the # i-th axis. Multiplying this matrix by a vector will transform a # point in the unit n-sphere to a point in the ellipsoid. self.axes = np.dot(v, np.diag(self.axlens)) # Amount by which volume was increased after initialization (i.e. # cumulative factor from `scale_to_vol`). self.expand = 1.
def correct(self, r): """Perform correction (measurement) update.""" zhat, H = r.mfn(self.x) dz = r.z - zhat S = H @ self.P @ H.T + r.R SI = inv(S) K = self.P @ H.T @ SI self.x += K @ dz self.P -= K @ H @ self.P score = dz.T @ SI @ dz / 2.0 + ln(2 * pi * sqrt(det(S))) self._calc_bbox() return float(score)
def nll(self, r): """Get the nll score of assigning a measurement to the filter.""" zhat, H = r.mfn(self.x) dz = r.z - zhat S = H @ self.P @ H.T + r.R score = dz.T @ inv(S) @ dz / 2.0 + ln(2 * pi * sqrt(det(S))) return float(score)
def _cov_and_inv(self, n, indices): """ Calculate covariance around local support vector and also the inverse """ cov = self._cov(indices, n) det = la.det(cov) while det <= 0: cov += sp.identity(cov.shape[0]) * self.EPS det = la.det(cov) inv_cov = la.inv(cov) return cov, inv_cov, det
def weights_rbf(self, unit_sp, hypers): # BQ weights for RBF kernel with given hypers, computations adopted from the GP-ADF code [Deisenroth] with # the following assumptions: # (A1) the uncertain input is zero-mean with unit covariance # (A2) one set of hyper-parameters is used for all output dimensions (one GP models all outputs) d, n = unit_sp.shape # GP kernel hyper-parameters alpha, el, jitter = hypers['sig_var'], hypers['lengthscale'], hypers['noise_var'] assert len(el) == d # pre-allocation for convenience eye_d, eye_n = np.eye(d), np.eye(n) iLam1 = np.atleast_2d(np.diag(el ** -1)) # sqrt(Lambda^-1) iLam2 = np.atleast_2d(np.diag(el ** -2)) inp = unit_sp.T.dot(iLam1) # sigmas / el[:, na] (x - m)^T*sqrt(Lambda^-1) # (numSP, xdim) K = np.exp(2 * np.log(alpha) - 0.5 * maha(inp, inp)) iK = cho_solve(cho_factor(K + jitter * eye_n), eye_n) B = iLam2 + eye_d # (D, D) c = alpha ** 2 / np.sqrt(det(B)) t = inp.dot(inv(B)) # inn*(P + Lambda)^-1 l = np.exp(-0.5 * np.sum(inp * t, 1)) # (N, 1) zet = 2 * np.log(alpha) - 0.5 * np.sum(inp * inp, 1) inp = inp.dot(iLam1) R = 2 * iLam2 + eye_d t = 1 / np.sqrt(det(R)) L = np.exp((zet[:, na] + zet[:, na].T) + maha(inp, -inp, V=0.5 * inv(R))) q = c * l # evaluations of the kernel mean map (from the viewpoint of RHKS methods) # mean weights wm_f = q.dot(iK) iKQ = iK.dot(t * L) # covariance weights wc_f = iKQ.dot(iK) # cross-covariance "weights" wc_fx = np.diag(q).dot(iK) # used for self.D.dot(x - mean).dot(wc_fx).dot(fx) self.D = inv(eye_d + np.diag(el ** 2)) # S(S+Lam)^-1; for S=I, (I+Lam)^-1 # model variance; to be added to the covariance # this diagonal form assumes independent GP outputs (cov(f^a, f^b) = 0 for all a, b: a neq b) self.model_var = np.diag((alpha ** 2 - np.trace(iKQ)) * np.ones((d, 1))) return wm_f, wc_f, wc_fx
def weights_rbf(self, unit_sp, hypers): # BQ weights for RBF kernel with given hypers, computations adopted from the GP-ADF code [Deisenroth] with # the following assumptions: # (A1) the uncertain input is zero-mean with unit covariance # (A2) one set of hyper-parameters is used for all output dimensions (one GP models all outputs) d, n = unit_sp.shape # GP kernel hyper-parameters alpha, el, jitter = hypers['sig_var'], hypers['lengthscale'], hypers['noise_var'] assert len(el) == d # pre-allocation for convenience eye_d, eye_n = np.eye(d), np.eye(n) iLam1 = np.atleast_2d(np.diag(el ** -1)) # sqrt(Lambda^-1) iLam2 = np.atleast_2d(np.diag(el ** -2)) inp = unit_sp.T.dot(iLam1) # sigmas / el[:, na] (x - m)^T*sqrt(Lambda^-1) # (numSP, xdim) K = np.exp(2 * np.log(alpha) - 0.5 * maha(inp, inp)) iK = cho_solve(cho_factor(K + jitter * eye_n), eye_n) B = iLam2 + eye_d # (D, D) c = alpha ** 2 / np.sqrt(det(B)) t = inp.dot(inv(B)) # inn*(P + Lambda)^-1 l = np.exp(-0.5 * np.sum(inp * t, 1)) # (N, 1) zet = 2 * np.log(alpha) - 0.5 * np.sum(inp * inp, 1) inp = inp.dot(iLam1) R = 2 * iLam2 + eye_d t = 1 / np.sqrt(det(R)) L = np.exp((zet[:, na] + zet[:, na].T) + maha(inp, -inp, V=0.5 * inv(R))) q = c * l # evaluations of the kernel mean map (from the viewpoint of RHKS methods) # mean weights wm_f = q.dot(iK) iKQ = iK.dot(t * L) # covariance weights wc_f = iKQ.dot(iK) # cross-covariance "weights" wc_fx = np.diag(q).dot(iK) self.iK = iK # used for self.D.dot(x - mean).dot(wc_fx).dot(fx) self.D = inv(eye_d + np.diag(el ** 2)) # S(S+Lam)^-1; for S=I, (I+Lam)^-1 # model variance; to be added to the covariance # this diagonal form assumes independent GP outputs (cov(f^a, f^b) = 0 for all a, b: a neq b) self.model_var = np.diag((alpha ** 2 - np.trace(iKQ)) * np.ones((d, 1))) return wm_f, wc_f, wc_fx