Python scipy.linalg 模块,inv() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用scipy.linalg.inv()

项目:MKLMM    作者:omerwe    | 项目源码 | 文件源码
def infExact_scipy_post(self, K, covars, y, sig2e, fixedEffects):
        n = y.shape[0]

        #mean vector
        m = covars.dot(fixedEffects)

        if (K.shape[1] < K.shape[0]): K_true = K.dot(K.T)
        else: K_true = K

        if sig2e<1e-6:
            L = la.cholesky(K_true + sig2e*np.eye(n), overwrite_a=True, check_finite=False)      #Cholesky factor of covariance with noise
            sl =   1
            pL = -self.solveChol(L, np.eye(n))                                                   #L = -inv(K+inv(sW^2))
        else:
            L = la.cholesky(K_true/sig2e + np.eye(n), overwrite_a=True, check_finite=False)      #Cholesky factor of B
            sl = sig2e                     
            pL = L                                                                               #L = chol(eye(n)+sW*sW'.*K)
        alpha = self.solveChol(L, y-m, overwrite_b=False) / sl

        post = dict([]) 
        post['alpha'] = alpha                                                                   #return the posterior parameters
        post['sW'] = np.ones(n) / np.sqrt(sig2e)                                                #sqrt of noise precision vector
        post['L'] = pL
        return post
项目:Neural_Artistic_Style    作者:everfor    | 项目源码 | 文件源码
def transfer_color(content, style):
    import scipy.linalg as sl
    # Mean and covariance of content
    content_mean = np.mean(content, axis = (0, 1))
    content_diff = content - content_mean
    content_diff = np.reshape(content_diff, (-1, content_diff.shape[2]))
    content_covariance = np.matmul(content_diff.T, content_diff) / (content_diff.shape[0])

    # Mean and covariance of style
    style_mean = np.mean(style, axis = (0, 1))
    style_diff = style - style_mean
    style_diff = np.reshape(style_diff, (-1, style_diff.shape[2]))
    style_covariance = np.matmul(style_diff.T, style_diff) / (style_diff.shape[0])

    # Calculate A and b
    A = np.matmul(sl.sqrtm(content_covariance), sl.inv(sl.sqrtm(style_covariance)))
    b = content_mean - np.matmul(A, style_mean)

    # Construct new style
    new_style = np.reshape(style, (-1, style.shape[2])).T
    new_style = np.matmul(A, new_style).T
    new_style = np.reshape(new_style, style.shape)
    new_style = new_style + b

    return new_style
项目:PersonalizedMultitaskLearning    作者:mitmedialab    | 项目源码 | 文件源码
def updateGamma(self):
        task_matrices = np.zeros((self.n_tasks, self.num_feats, self.num_feats))
        for m in range(self.n_tasks):
            rho_vector = rhoFunction(np.array(self.xi[m]))
            rho_vector = rho_vector.reshape((1,-1))             # Make rho vector 2D
            task_X = self.task_dict[m]['X']
            # Note that the transposing doesn't exactly match the paper because our data format is slightly different
            rho_matrix = abs(rho_vector) * task_X.T
            task_matrices[m,:,:] = np.dot(rho_matrix, task_X)  

        for k in range(self.K):
            inner_sum = np.zeros((self.num_feats,self.num_feats))
            for m in range(self.n_tasks):
                inner_sum = inner_sum + self.phi[m,k] * task_matrices[m,:,:]
            self.gamma[k] = la.inv(la.inv(self.sigma) + 2*inner_sum)
            if self.debug: 
                print "gamma computation {0}".format(k), la.det(la.inv(self.sigma) + 2*inner_sum)
项目:adversarial-variational-bayes    作者:gdikov    | 项目源码 | 文件源码
def normal_density_at_zero(m, c):
    """ Compute the normal density with given mean and covariance at zero. 

    Parameters
    ----------    
    m : vector
        Mean.
    c : ndarray
        Covariance matrix. Assumption: c is square matrix and its size is 
        compatible with that of m.

    Returns
    -------
    g : float
        Computed density value.

    """

    dim = len(m)
    g = 1 / ((2 * pi)**(dim / 2) * sqrt(absolute(det(c)))) *\
        exp(-1/2 * dot(dot(m, inv(c)), m))

    return g
项目:pyGPGO    作者:hawk31    | 项目源码 | 文件源码
def fit(self, X, y):
        """
        Fits a t-Student Process regressor

        Parameters
        ----------
        X: np.ndarray, shape=(nsamples, nfeatures)
            Training instances to fit the GP.
        y: np.ndarray, shape=(nsamples,)
            Corresponding continuous target values to `X`.

        """
        self.X = X
        self.y = y
        self.n1 = X.shape[0]

        if self.optimize:
            self.optHyp(param_key=self.covfunc.parameters, param_bounds=self.covfunc.bounds)

        self.K11 = self.covfunc.K(self.X, self.X)
        self.beta1 = np.dot(np.dot(self.y.T, inv(self.K11)), self.y)
        self.logp = logpdf(self.y, self.nu, mu=np.zeros(self.n1), Sigma=self.K11)
项目:QuantEcon.lectures.code    作者:QuantEcon    | 项目源码 | 文件源码
def predict(self, a_hist, t):
        """
        This function implements the prediction formula discussed is section 6 (1.59)
        It takes a realization for a^N, and the period in which the prediciton is formed

        Output:  E[abar | a_t, a_{t-1}, ..., a_1, a_0]
        """

        N = np.asarray(a_hist).shape[0] - 1        
        a_hist = np.asarray(a_hist).reshape(N + 1, 1)
        V = self.construct_V(N + 1)

        aux_matrix = np.zeros((N + 1, N + 1))
        aux_matrix[:(t + 1), :(t + 1)] = np.eye(t + 1)
        L = la.cholesky(V).T
        Ea_hist = la.inv(L) @ aux_matrix @ L @ a_hist

        return Ea_hist
项目:QuantEcon.lectures.code    作者:QuantEcon    | 项目源码 | 文件源码
def plot4():
    # Density 1
    Z = gen_gaussian_plot_vals(x_hat, ?)
    cs1 = ax.contour(X, Y, Z, 6, colors="black")
    ax.clabel(cs1, inline=1, fontsize=10)
    # Density 2
    M = ? * G.T * linalg.inv(G * ? * G.T + R)
    x_hat_F = x_hat + M * (y - G * x_hat)
    ?_F = ? - M * G * ?
    Z_F = gen_gaussian_plot_vals(x_hat_F, ?_F)
    cs2 = ax.contour(X, Y, Z_F, 6, colors="black")
    ax.clabel(cs2, inline=1, fontsize=10)
    # Density 3
    new_x_hat = A * x_hat_F
    new_? = A * ?_F * A.T + Q
    new_Z = gen_gaussian_plot_vals(new_x_hat, new_?)
    cs3 = ax.contour(X, Y, new_Z, 6, colors="black")
    ax.clabel(cs3, inline=1, fontsize=10)
    ax.contourf(X, Y, new_Z, 6, alpha=0.6, cmap=cm.jet)
    ax.text(float(y[0]), float(y[1]), r"$y$", fontsize=20, color="black")

# == Choose a plot to generate == #
项目:car-detection    作者:mmetcalfe    | 项目源码 | 文件源码
def factor(self):
        """    Factorize the camera matrix into K,R,t as P = K[R|t]. """

        # factor first 3*3 part
        K,R = linalg.rq(self.P[:,:3])

        # make diagonal of K positive
        T = diag(sign(diag(K)))
        if linalg.det(T) < 0:
            T[1,1] *= -1

        self.K = dot(K,T)
        self.R = dot(T,R) # T is its own inverse
        self.t = dot(linalg.inv(self.K),self.P[:,3])

        return self.K, self.R, self.t
项目:stuff    作者:yaroslavvb    | 项目源码 | 文件源码
def __init__(self, suvi, *args):
    if list_or_tuple(suvi):
      if len(suvi) == 3:
        s, u, v = suvi
        inv = Identity(s.shape[0])
      else:
        s, u, v, inv = suvi
    else:
      s = suvi
      u = args[0]
      v = args[1]
      if len(args)>2:
        inv = args[2]
      else:
        inv = Identity(s.shape[0])
    self.s = s
    self.u = u
    self.v = v
    self.inv = inv
项目:stuff    作者:yaroslavvb    | 项目源码 | 文件源码
def __init__(self, suvi, *args):
    if list_or_tuple(suvi):
      if len(suvi) == 3:
        s, u, v = suvi
        inv = Identity(s.shape[0])
      else:
        s, u, v, inv = suvi
    else:
      s = suvi
      u = args[0]
      v = args[1]
      if len(args)>2:
        inv = args[2]
      else:
        inv = Identity(s.shape[0])
    self.s = s
    self.u = u
    self.v = v
    self.inv = inv
项目:stuff    作者:yaroslavvb    | 项目源码 | 文件源码
def __init__(self, suvi, *args):
    if list_or_tuple(suvi):
      if len(suvi) == 3:
        s, u, v = suvi
        inv = Identity(s.shape[0])
      else:
        s, u, v, inv = suvi
    else:
      s = suvi
      u = args[0]
      v = args[1]
      if len(args)>2:
        inv = args[2]
      else:
        inv = Identity(s.shape[0])
    self.s = s
    self.u = u
    self.v = v
    self.inv = inv
项目:stuff    作者:yaroslavvb    | 项目源码 | 文件源码
def __init__(self, suvi, *args):
    if list_or_tuple(suvi):
      if len(suvi) == 3:
        s, u, v = suvi
        inv = Identity(s.shape[0])
      else:
        s, u, v, inv = suvi
    else:
      s = suvi
      u = args[0]
      v = args[1]
      if len(args)>2:
        inv = args[2]
      else:
        inv = Identity(s.shape[0])
    self.s = s
    self.u = u
    self.v = v
    self.inv = inv
项目:plda    作者:RaviSoji    | 项目源码 | 文件源码
def test_optimized_A_and_?_w_make_eye(cls):
        """ I = (V^T)(?_w)(V), where A = inv(V^T)
        NOTES:
        (1) There are two ways to compute ?_w:
            ?_w = (A)(A^T)
            ?_w = n / (n-1) * S_w
        (2) *** COMPUTE PHI WITH S_w: ?_w = n/(n-1) * S_w. ***
        (3) Do NOT use ?_w = (A)(A^T) because that is trivially true:
             (V^T)(?_w)(V), where V = inv(A^T), which gives
             (inv(A))(A)(A^T)(inv(A^T)) = (I)(I) = I.
        """
        tolerance = 1e-13  # Should be smaller than n / (n - 1).

        S_w = cls.model.S_w
        n = cls.model.n_avg
        V = inv(cls.model.A.T)
        cls.assertTrue(tolerance < (n / (n - 1)))

        ?_w = n / (n - 1) * S_w
        result = np.matmul(np.matmul(V.T, ?_w), V)
        cls.assert_same(result, np.eye(cls.dims), tolerance=tolerance)
项目:plda    作者:RaviSoji    | 项目源码 | 文件源码
def test_calc_A(self):
        tolerance = 1e-100
        dims = self.dims

        ?_w = np.diag(np.ones(dims))
        W = np.random.randint(0, 9, self.dims ** 2).reshape(dims, dims) + \
            np.eye(dims)
        n_avg = 9

        A_truth = ?_w * n_avg / (n_avg - 1)
        A_truth = np.sqrt(A_truth)
        A_truth = np.matmul(np.linalg.inv(W).T, A_truth)

        A_model = self.model.calc_A(n_avg, ?_w, W)

        self.assert_same(A_model, A_truth, tolerance=tolerance)
        self.assert_invertible(self.model.W)
项目:parametrix    作者:vincentchoqueuse    | 项目源码 | 文件源码
def compute(self,signal):

        check_MD(signal.X)

        N=signal.N
        M,L=signal.X.shape
        sigma2=signal.sigma2
        H=np.matrix(signal.H)
        X=np.matrix(signal.X)
        Rx=X*X.H/L
        PH=np.eye(N)-H*lg.pinv(H)
        D=np.matrix(np.diag(1j*np.arange(N)))
        M=H.H*D.H*PH*D*H

        CRB=(sigma2/2)*lg.inv(np.real(np.multiply(M,Rx)))
        self.w=np.diag(CRB)

        return self
项目:parametrix    作者:vincentchoqueuse    | 项目源码 | 文件源码
def compute_criterion(self,y):

        self.N=len(y)

        #construct matrices
        A=np.matrix(self.A)
        b=np.matrix(self.b).T
        H=np.matrix(self.H)

        x=lg.inv(H.T*H)*H.T*y                           #estimation of x

        if self.estimate_sigma2 is True:
            r,p=self.A.shape
            coef=(self.N-p)/r
            den=lg.norm(y-H*x)**2
        else:
            den=self.sigma2
            coef=1

        term1=A*x-b
        num=term1.T*lg.inv(A*lg.inv(H.T*H)*A.T)*term1
        self.criterion=coef*num/den  ## See page 274 / 345
项目:nengolib    作者:arvoelke    | 项目源码 | 文件源码
def test_func_realize(radii):
    sys = Alpha(0.1)

    T = np.asarray([[1., 2.], [0, -1.]])

    for Tinv in (None, inv(T)):
        realize_result = _realize(sys, radii, T, Tinv)

        assert realize_result.sys is sys
        assert np.allclose(inv(realize_result.T), realize_result.Tinv)

        rsys = realize_result.realization
        assert ss_equal(rsys, sys.transform(realize_result.T))

        # Check that the state vector are related by T
        length = 1000
        dt = 0.001
        x_old = np.asarray([sub.impulse(length, dt) for sub in sys])
        x_new = np.asarray([sub.impulse(length, dt) for sub in rsys])

        r = np.atleast_2d(np.asarray(radii).T).T
        assert np.allclose(np.dot(T, x_new * r), x_old)
项目:nengolib    作者:arvoelke    | 项目源码 | 文件源码
def test_identity(radii):
    sys = Alpha(0.1)

    identity = Identity()
    assert repr(identity) == "Identity()"

    I = np.eye(len(sys))
    realize_result = identity(sys, radii)
    assert realize_result.sys is sys
    assert np.allclose(realize_result.T, I * radii)
    assert np.allclose(realize_result.Tinv, inv(I * radii))

    rsys = realize_result.realization
    assert ss_equal(rsys, sys.transform(realize_result.T))

    # Check that it's still the same system, even though different matrices
    assert sys_equal(sys, rsys)
    if radii == 1:
        assert ss_equal(sys, rsys)
    else:
        assert not np.allclose(sys.B, rsys.B)
        assert not np.allclose(sys.C, rsys.C)

    # Check that the state vectors have scaled power
    assert np.allclose(state_norm(sys) / radii, state_norm(rsys))
项目:nengolib    作者:arvoelke    | 项目源码 | 文件源码
def _realize(sys, radii, T, Tinv=None):
    """Helper function for producing a RealizerResult."""
    sys = LinearSystem(sys)
    r = np.asarray(radii, dtype=np.float64)
    if r.ndim == 0:
        r = np.ones(len(sys)) * r
    elif r.ndim > 1:
        raise ValueError("radii (%s) must be a 1-dim array or scalar" % (
            radii,))
    elif len(r) != len(sys):
        raise ValueError("radii (%s) length must match state dimension %d" % (
            radii, len(sys)))

    T = T * r[None, :]
    if Tinv is None:  # this needs to be computed eventually anyways
        Tinv = inv(T)
    else:
        Tinv = Tinv / r[:, None]

    return RealizerResult(sys, T, Tinv, sys.transform(T, Tinv))
项目:calibration    作者:ciechowoj    | 项目源码 | 文件源码
def decompose(P):
    M = P[:3, :3]
    T = P[:3, 3]

    K, R = scipy.linalg.rq(M)

    for i in range(2):
        if K[i,i] < 0:
            K[:, i] *= -1
            R[i, :] *= -1

    if K[2,2] > 0:
        K[:, 2] *= -1
        R[2, :] *= -1

    if det(R) < 0:
        R *= -1

    T = linalg.inv(dot(K, -R)).dot(T.reshape((3, 1)))
    K /= -K[2,2]

    return K, R, T
项目:decoding_challenge_cortana_2016_3rd    作者:kingjr    | 项目源码 | 文件源码
def _fwd_bem_multi_solution(solids, gamma, nps):
    """Do multi surface solution

      * Invert I - solids/(2*M_PI)
      * Take deflation into account
      * The matrix is destroyed after inversion
      * This is the general multilayer case

    """
    pi2 = 1.0 / (2 * np.pi)
    n_tot = np.sum(nps)
    assert solids.shape == (n_tot, n_tot)
    nsurf = len(nps)
    defl = 1.0 / n_tot
    # Modify the matrix
    offsets = np.cumsum(np.concatenate(([0], nps)))
    for si_1 in range(nsurf):
        for si_2 in range(nsurf):
            mult = pi2 if gamma is None else pi2 * gamma[si_1, si_2]
            slice_j = slice(offsets[si_1], offsets[si_1 + 1])
            slice_k = slice(offsets[si_2], offsets[si_2 + 1])
            solids[slice_j, slice_k] = defl - solids[slice_j, slice_k] * mult
    solids += np.eye(n_tot)
    return linalg.inv(solids, overwrite_a=True)
项目:mne-hcp    作者:mne-tools    | 项目源码 | 文件源码
def _check_infos_trans(infos):
    """XXX this goes to tests later, currently not used"""
    chan_max_idx = np.argmax([c['nchan'] for c in infos])
    chan_template = infos[chan_max_idx]['ch_names']
    channels = [c['ch_names'] for c in infos]
    common_channels = set(chan_template).intersection(*channels)

    common_chs = [[c['chs'][c['ch_names'].index(ch)] for ch in common_channels]
                  for c in infos]
    dev_ctf_trans = [i['dev_ctf_t']['trans'] for i in infos]
    cns = [[c['ch_name'] for c in cc] for cc in common_chs]
    for cn1, cn2 in itt.combinations(cns, 2):
        assert cn1 == cn2
    # BTI stores data in head coords, as a consequence the coordinates
    # change across run, we apply the ctf->ctf_head transform here
    # to check that all transforms are correct.
    cts = [np.array([linalg.inv(_loc_to_coil_trans(c['loc'])).dot(t)
                    for c in cc])
           for t, cc in zip(dev_ctf_trans, common_chs)]
    for ct1, ct2 in itt.combinations(cts, 2):
        np.testing.assert_array_almost_equal(ct1, ct2, 12)
项目:Parallel-SGD    作者:angadgill    | 项目源码 | 文件源码
def test_graph_lasso_cv(random_state=1):
    # Sample data from a sparse multivariate normal
    dim = 5
    n_samples = 6
    random_state = check_random_state(random_state)
    prec = make_sparse_spd_matrix(dim, alpha=.96,
                                  random_state=random_state)
    cov = linalg.inv(prec)
    X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
    # Capture stdout, to smoke test the verbose mode
    orig_stdout = sys.stdout
    try:
        sys.stdout = StringIO()
        # We need verbose very high so that Parallel prints on stdout
        GraphLassoCV(verbose=100, alphas=5, tol=1e-1).fit(X)
    finally:
        sys.stdout = orig_stdout

    # Smoke test with specified alphas
    GraphLassoCV(alphas=[0.8, 0.5], tol=1e-1, n_jobs=1).fit(X)
项目:spyking-circus    作者:spyking-circus    | 项目源码 | 文件源码
def get_precision(self):
        """Compute data precision matrix with the generative model.
        Equals the inverse of the covariance but computed with
        the matrix inversion lemma for efficiency.
        Returns
        -------
        precision : array, shape=(n_features, n_features)
            Estimated precision of data.
        """
        n_features = self.components_.shape[1]

        # handle corner cases first
        if self.n_components_ == 0:
            return np.eye(n_features) / self.noise_variance_
        if self.n_components_ == n_features:
            return linalg.inv(self.get_covariance())

        # Get precision using matrix inversion lemma
        components_ = self.components_
        exp_var = self.explained_variance_
        exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
        precision = np.dot(components_, components_.T) / self.noise_variance_
        precision.flat[::len(precision) + 1] += 1. / exp_var_diff
        precision = np.dot(components_.T,
                           np.dot(linalg.inv(precision), components_))
        precision /= -(self.noise_variance_ ** 2)
        precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
        return precision
项目:PersonalizedMultitaskLearning    作者:mitmedialab    | 项目源码 | 文件源码
def updateTheta(self):
        for k in range(self.K):
            inner_sum = np.zeros((1,self.num_feats))
            for m in range(self.n_tasks):
                inner_sum = inner_sum + self.phi[m,k] * np.atleast_2d(self.task_vectors[m,:])
            self.theta[k,:] = (np.dot(self.gamma[k],(np.dot(la.inv(self.sigma),self.mu.T) + inner_sum.T)  )).T
项目:Least-Squared-Error-Based-FIR-Filters    作者:fourier-being    | 项目源码 | 文件源码
def lpfls2notch(N,wp,ws,wn1,wn2,W):
    M = (N-1)/2
    nq = np.arange(0,2*M+1)
    nb = np.arange(0,M+1)
    q = (wp/np.pi)*np.sinc((wp/np.pi)*nq) - W*(ws/np.pi)*np.sinc((ws/np.pi)*nq)
    b = (wp/np.pi)*np.sinc((wp/np.pi)*nb)
    q[0] = wp/np.pi + W*(1-ws/np.pi) # since sin(pi*n)/pi*n = 1, not 0
    b = np.asmatrix(b)
    b = b.transpose()

    Q1 = ln.toeplitz(q[0:M+1])
    Q2 = ln.hankel(q[0:M+1],q[M:])
    Q = Q1+Q2

    G1 = np.cos(wn1*nb)
    G2 = np.cos(wn2*nb)
    G = np.matrix([G1,G2])

    d = np.array([0,0])
    d = np.asmatrix(d)
    d = d.transpose()

    c = np.asmatrix(ln.solve(Q,b))

    mu = ln.solve(G*ln.inv(Q)*G.transpose(),G*c - d)

    a = c - ln.solve(Q,G.transpose()*mu)
    h = np.zeros(N)
    for i in nb:
        h[i] = 0.5*a[M-i]
        h[N-1-i] = h[i]
    h[M] = 2*h[M]
    hmax = max(np.absolute(h))
    for i in nq:
        h[i] = (8191/hmax)*h[i]
    return h
项目:Least-Squared-Error-Based-FIR-Filters    作者:fourier-being    | 项目源码 | 文件源码
def lpfls1notch(N,wp,ws,wn1,W):
    M = (N-1)/2
    nq = np.arange(0,2*M+1)
    nb = np.arange(0,M+1)
    q = (wp/np.pi)*np.sinc((wp/np.pi)*nq) - W*(ws/np.pi)*np.sinc((ws/np.pi)*nq)
    b = (wp/np.pi)*np.sinc((wp/np.pi)*nb)
    q[0] = wp/np.pi + W*(1-ws/np.pi) # since sin(pi*n)/pi*n = 1, not 0
    b = np.asmatrix(b)
    b = b.transpose()

    Q1 = ln.toeplitz(q[0:M+1])
    Q2 = ln.hankel(q[0:M+1],q[M:])
    Q = Q1+Q2

    G1 = np.cos(wn1*nb)
    G = np.matrix([G1])

    d = np.array([0])
    d = np.asmatrix(d)

    c = np.asmatrix(ln.solve(Q,b))

    mu = ln.solve(G*ln.inv(Q)*G.transpose(),G*c - d)

    a = c - ln.solve(Q,G.transpose()*mu)
    h = np.zeros(N)
    for i in nb:
        h[i] = 0.5*a[M-i]
        h[N-1-i] = h[i]
    h[M] = 2*h[M]
    hmax = max(np.absolute(h))
    for i in nq:
        h[i] = (8191/hmax)*h[i]
    return h
项目:Stereo-Pose-Machines    作者:ppwwyyxx    | 项目源码 | 文件源码
def __init__(self, K,R,t):
        self.K = K
        self.R = R
        self.t = t
        self.invR = self.R.T
        self.P = np.matmul(self.K,
                np.concatenate((self.R, self.t.reshape((3,1))), axis=1))
        self.invP3 = la.inv(self.P[:3,:3])
        self.center = np.matmul(-self.invP3, self.P[:3,3])
项目:Stereo-Pose-Machines    作者:ppwwyyxx    | 项目源码 | 文件源码
def triangulate(cam1, cam2, p1, p2):
    p1 = np.asarray([p1[0],p1[1],1])
    p2 = np.asarray([p2[0],p2[1],1])
    c1, v1 = cam_center_vector(cam1, p1)
    c2, v2 = cam_center_vector(cam2, p2)
    t = c2 - c1
    v3 = np.cross(v1, v2)
    X = np.stack((v1, v3, -v2), axis=1)

    alpha = np.matmul(la.inv(X), t)
    output = c1 + v1 * alpha[0] + alpha[1]*0.5*v3
    return output
项目:DAB_analyzer    作者:meklon    | 项目源码 | 文件源码
def calc_deconv_matrix(matrix_vector_dab_he):
    """
    Custom calculated matrix of lab's stains DAB + Hematoxylin
    The raw matrix was moved to the global scope before main() function as a constant
    """

    matrix_vector_dab_he[2, :] = np.cross(matrix_vector_dab_he[0, :], matrix_vector_dab_he[1, :])
    matrix_dh = linalg.inv(matrix_vector_dab_he)
    return matrix_dh
项目:adversarial-variational-bayes    作者:gdikov    | 项目源码 | 文件源码
def analytical_value_c_cross_entropy(distr1, distr2, par1, par2):
    """ Analytical value of the cross-entropy for the given distributions.

    Parameters
    ----------    
    distr1, distr2 : str
                     Name of the distributions.
    par1, par2 : dictionaries
                 Parameters of the distribution. If distr1 = distr2 =
                 'normal': par1["mean"], par1["cov"] and par2["mean"],
                 par2["cov"] are the means and the covariance matrices.

    Returns
    -------
    c : float
        Analytical value of the cross-entropy.

    """

    if distr1 == 'normal' and distr2 == 'normal':
        # covariance matrices, expectations:
        c1, m1 = par1['cov'], par1['mean']
        c2, m2 = par2['cov'], par2['mean']
        dim = len(m1)

        invc2 = inv(c2)
        diffm = m1 - m2

        c = 1/2 * (dim * log(2*pi) + log(det(c2)) + trace(dot(invc2, c1)) +
                   dot(diffm, dot(invc2, diffm)))
    else:
        raise Exception('Distribution=?')

    return c
项目:adversarial-variational-bayes    作者:gdikov    | 项目源码 | 文件源码
def analytical_value_d_kullback_leibler(distr1, distr2, par1, par2):
    """ Analytical value of the KL divergence for the given distributions.

    Parameters
    ----------    
    distr1, distr2 : str-s
                    Names of the distributions.
    par1, par2 : dictionary-s
                 Parameters of the distributions. If distr1 = distr2 =
                 'normal': par1["mean"], par1["cov"] and par2["mean"],
                 par2["cov"] are the means and the covariance matrices.

    Returns
    -------
    d : float
        Analytical value of the Kullback-Leibler divergence.

    """

    if distr1 == 'normal' and distr2 == 'normal':
        # covariance matrices, expectations:
        c1, m1 = par1['cov'], par1['mean']
        c2, m2 = par2['cov'], par2['mean']
        dim = len(m1)    

        invc2 = inv(c2)
        diffm = m1 - m2

        d = 1/2 * (log(det(c2)/det(c1)) + trace(dot(invc2, c1)) +
                   dot(diffm, dot(invc2, diffm)) - dim)
    else:
        raise Exception('Distribution=?')

    return d
项目:adversarial-variational-bayes    作者:gdikov    | 项目源码 | 文件源码
def analytical_value_i_renyi(distr, alpha, par):
    """ Analytical value of the Renyi mutual information.

    Parameters
    ----------    
    distr : str
            Name of the distribution.
    alpha : float
            Parameter of the Renyi mutual information.
    par : dictionary
          Parameters of the distribution. If distr = 'normal': par["cov"]
          is the covariance matrix.

    Returns
    -------
    i : float
        Analytical value of the Renyi mutual information.

    """

    if distr == 'normal':
        c = par["cov"]        

        t1 = -alpha / 2 * log(det(c))
        t2 = -(1 - alpha) / 2 * log(prod(diag(c)))
        t3 = log(det(alpha * inv(c) + (1 - alpha) * diag(1 / diag(c)))) / 2
        i = 1 / (alpha - 1) * (t1 + t2 - t3)            
    else:
        raise Exception('Distribution=?')

    return i
项目:adversarial-variational-bayes    作者:gdikov    | 项目源码 | 文件源码
def analytical_value_d_hellinger(distr1, distr2, par1, par2):
    """ Analytical value of Hellinger distance for the given distributions.

    Parameters
    ----------
    distr1, distr2 : str-s
                    Names of the distributions.
    par1, par2 : dictionary-s
                 Parameters of the distributions. If distr1 = distr2 =
                 'normal': par1["mean"], par1["cov"] and par2["mean"],
                 par2["cov"] are the means and the covariance matrices.

    Returns
    -------
    d : float
        Analytical value of the Hellinger distance.

    """

    if distr1 == 'normal' and distr2 == 'normal':
        # covariance matrices, expectations:
        c1, m1 = par1['cov'], par1['mean']
        c2, m2 = par2['cov'], par2['mean']

        # "https://en.wikipedia.org/wiki/Hellinger_distance": Examples:
        diffm = m1 - m2
        avgc = (c1 + c2) / 2
        inv_avgc = inv(avgc)
        d = 1 - det(c1)**(1/4) * det(c2)**(1/4) / sqrt(det(avgc)) * \
            exp(-dot(diffm, dot(inv_avgc, diffm))/8)  # D^2

        d = sqrt(d)
    else:
        raise Exception('Distribution=?')

    return d
项目:pyGPGO    作者:hawk31    | 项目源码 | 文件源码
def logpdf(x, df, mu, Sigma):
    """
    Marginal log-likelihood of a Student-t Process

    Parameters
    ----------
    x: array-like
        Point to be evaluated
    df: float
        Degrees of freedom (>2.0)
    mu: array-like
        Mean of the process.
    Sigma: array-like
        Covariance matrix of the process.

    Returns
    -------
    logp: float
        log-likelihood 

    """
    d = len(x)
    x = np.atleast_2d(x)
    xm = x - mu
    V = df * Sigma
    V_inv = np.linalg.inv(V)
    _, logdet = slogdet(np.pi * V)

    logz = -gamma(df / 2.0 + d / 2.0) + gamma(df / 2.0) + 0.5 * logdet
    logp = -0.5 * (df + d) * np.log(1 + np.sum(np.dot(xm, V_inv) * xm, axis=1))

    logp = logp - logz

    return logp[0]
项目:pyGPGO    作者:hawk31    | 项目源码 | 文件源码
def predict(self, Xstar, return_std=False):
        """
        Returns mean and covariances for the posterior t-Student process.

        Parameters
        ----------
        Xstar: np.ndarray, shape=((nsamples, nfeatures))
            Testing instances to predict.
        return_std: bool
            Whether to return the standard deviation of the posterior process. Otherwise,
            it returns the whole covariance matrix of the posterior process.

        Returns
        -------
        np.ndarray
            Mean of the posterior process for testing instances.
        np.ndarray
            Covariance of the posterior process for testing instances.
        """
        Xstar = np.atleast_2d(Xstar)
        self.K21 = self.covfunc.K(Xstar, self.X)
        self.K22 = self.covfunc.K(Xstar, Xstar)
        self.K12 = self.covfunc.K(self.X, Xstar)
        self.K22_tilde = self.K22 - np.dot(np.dot(self.K21, inv(self.K11)), self.K12)

        phi2 = np.dot(np.dot(self.K21, inv(self.K11)), self.y)
        cov = (self.nu + self.beta1 - 2) / (self.nu + self.n1 - 2) * self.K22_tilde
        if return_std:
            return phi2, np.diag(cov)
        return phi2, cov
项目:astrology    作者:mattsgithub    | 项目源码 | 文件源码
def train(self, X, y, **kwargs):
        features = kwargs.get('features')
        self.fit_intercept = kwargs.get('fit_intercept')

        self.y = y
        self.N = y.shape[0]
        # Ignore column vector
        if self.fit_intercept:
            self.features = ['bias'] + features
            X = np.concatenate((np.ones((X.shape[0], 1)), X), axis=1)
            self.p = X.shape[1] - (1 if self.fit_intercept else 0)
        else:
            self.features = features
            self.p = X.shape[1]

        self.X = X
        XT = X.T
        std_error_matrix = inv(XT.dot(X))
        self.beta = std_error_matrix.dot(XT).dot(y)

        # Prediction
        self.y_hat = X.dot(self.beta)

        # Residual sum of squares
        self.rss = np.sum((y - self.y_hat)**2)

        # Estimated variance
        self.df = (self.N - self.p - 1)
        self.pop_var = self.rss / self.df

        # Standard error
        self.std_error = np.sqrt(std_error_matrix.diagonal() * self.pop_var)

        # t scores
        self.t = self.beta / self.std_error
项目:astrology    作者:mattsgithub    | 项目源码 | 文件源码
def plot_f_distrib_for_many_coefficients(self, features):
        from scipy.stats import f

        # Remove a particular subset of features
        X = np.delete(self.X, [self.features.index(_) for _ in features], 1)

        # Prediction from reduced model
        XT = X.T
        std_error_matrix = inv(XT.dot(X))
        beta = std_error_matrix.dot(XT).dot(self.y)
        y_hat = X.dot(beta)
        rss_reduced_model = np.sum((self.y - y_hat)**2)

        dfn = len(features)
        dfd = self.df

        # This should be distributed as chi squared
        # with degrees of freedom equal to number
        # of dropped features
        rss_diff = (rss_reduced_model - self.rss)
        chi_1 = rss_diff / dfn
        chi_2 = self.pop_var
        f_score = chi_1 / chi_2

        # 5% and 95% percentile
        f_05, f_95 = f.ppf([0.05, 0.95], dfn, dfd)

        x = np.linspace(0.001, 5.0)

        plt.axvline(x=f_05)
        plt.axvline(x=f_95)

        plt.scatter(f_score, f.pdf(f_score, dfn, dfd), marker='o', color='red')
        plt.plot(x, f.pdf(x, dfn, dfd), color='gray', lw=5, alpha=0.6)
        plt.title('f-distribtion for dropping features: {0}'.format(features))
        plt.show()
项目:pysptools    作者:ctherien    | 项目源码 | 文件源码
def ACE(M, t):
    """
    Performs the adaptive cosin/coherent estimator algorithm for target
    detection.

    Parameters:
        M: `numpy array`
            2d matrix of HSI data (N x p).

        t: `numpy array`
            A target endmember (p).

    Returns: `numpy array`
        Vector of detector output (N).

    References:
      X Jin, S Paswater, H Cline.  "A Comparative Study of Target Detection
      Algorithms for Hyperspectral Imagery."  SPIE Algorithms and Technologies
      for Multispectral, Hyperspectral, and Ultraspectral Imagery XV.  Vol
      7334.  2009.
    """
    N, p = M.shape
    # Remove mean from data
    u = M.mean(axis=0)
    M = M - np.kron(np.ones((N, 1)), u)
    t = t - u;

    R_hat = np.cov(M.T)
    G = lin.inv(R_hat)

    results = np.zeros(N, dtype=np.float32)
    ##% From Broadwater's paper
    ##%tmp = G*S*inv(S.'*G*S)*S.'*G;
    tmp = np.array(np.dot(t.T, np.dot(G, t)))
    dot_G_M = np.dot(G, M[0:,:].T)
    num = np.square(np.dot(t, dot_G_M))
    for k in range(N):
        denom = np.dot(tmp, np.dot(M[k], dot_G_M[:,k]))
        results[k] = num[k] / denom
    return results
项目:pysptools    作者:ctherien    | 项目源码 | 文件源码
def MatchedFilter(M, t):
    """
    Performs the matched filter algorithm for target
    detection.

    Parameters:
        M: `numpy array`
            2d matrix of HSI data (N x p).

        t: `numpy array`
            A target endmember (p).

    Returns: `numpy array`
        Vector of detector output (N).

    References:
      X Jin, S Paswater, H Cline.  "A Comparative Study of Target Detection
     Algorithms for Hyperspectral Imagery."  SPIE Algorithms and Technologies
     for Multispectral, Hyperspectral, and Ultraspectral Imagery XV.  Vol
     7334.  2009.
    """

    N, p = M.shape
    # Remove mean from data
    u = M.mean(axis=0)
    M = M - np.kron(np.ones((N, 1)), u)
    t = t - u;

    R_hat = np.cov(M.T)
    G = lin.inv(R_hat)

    tmp = np.array(np.dot(t.T, np.dot(G, t)))
    w = np.dot(G, t)
    return np.dot(w, M.T) / tmp
项目:pysptools    作者:ctherien    | 项目源码 | 文件源码
def CEM(M, t):
    """
    Performs the constrained energy minimization algorithm for target
    detection.

    Parameters:
        M: `numpy array`
            2d matrix of HSI data (N x p).

        t: `numpy array`
            A target endmember (p).

    Returns: `numpy array`
        Vector of detector output (N).

    References:
        Qian Du, Hsuan Ren, and Chein-I Cheng. A Comparative Study of
        Orthogonal Subspace Projection and Constrained Energy Minimization.
        IEEE TGRS. Volume 41. Number 6. June 2003.
    """
    def corr(M):
        p, N = M.shape
        return np.dot(M, M.T) / N

    N, p = M.shape
    R_hat = corr(M.T)
    Rinv = lin.inv(R_hat)
    denom = np.dot(t.T, np.dot(Rinv, t))
    t_Rinv = np.dot(t.T, Rinv)
    return np.dot(t_Rinv , M[0:,:].T) / denom
项目:pysptools    作者:ctherien    | 项目源码 | 文件源码
def GLRT(M, t):
    """
    Performs the generalized likelihood test ratio algorithm for target
    detection.

    Parameters:
        M: `numpy array`
            2d matrix of HSI data (N x p).

        t: `numpy array`
            A target endmember (p).

    Returns: `numpy array`
        Vector of detector output (N).

    References:
        T F AyouB, "Modified GLRT Signal Detection Algorithm," IEEE
        Transactions on Aerospace and Electronic Systems, Vol 36, No 3, July
        2000.
    """
    N, p = M.shape

    # Remove mean from data
    u = M.mean(axis=0)
    M = M - np.kron(np.ones((N, 1)), u)
    t = t - u;

    R = lin.inv(np.cov(M.T))
    results = np.zeros(N, dtype=np.float)

    t_R_t_dot = np.dot(t, np.dot(R, t.T))
    for k in range(N):
        x = M[k]
        R_x_dot = np.dot(R, x)
        num = np.dot(t, R_x_dot)**2
        denom = t_R_t_dot * (1 + np.dot(x.T, R_x_dot))
        results[k] = num / denom
    return results
项目:ThermoCodeLib    作者:longlevan    | 项目源码 | 文件源码
def MultiDimNewtRaph(f,x0,dx=1e-6,args=(),ytol=1e-5,w=1.0,JustOneStep=False):
    """
    A Newton-Raphson solver where the Jacobian is always re-evaluated rather than
    re-using the information as in the fsolve method of scipy.optimize
    """
    #Convert to numpy array, force type conversion to float
    x=np.array(x0,dtype=np.float)
    error=999
    J=np.zeros((len(x),len(x)))

    #If a float is passed in for dx, convert to a numpy-like list the same shape
    #as x
    if isinstance(dx,int):
        dx=dx*np.ones_like(float(x))
    elif isinstance(dx,float):
        dx=dx*np.ones_like(x)

    r0=array(f(x,*args))
    while abs(error)>ytol:
        #Build the Jacobian matrix by columns
        for i in range(len(x)):
            epsilon=np.zeros_like(x)
            epsilon[i]=dx[i]
            J[:,i]=(array(f(x+epsilon,*args))-r0)/epsilon[i]
        v=np.dot(-inv(J),r0)
        x=x+w*v
        #Calculate the residual vector at the new step
        r0=f(x,*args)
        error = np.max(np.abs(r0))
        #Just do one step and stop
        if JustOneStep==True:
            return x
    return x
项目:kerpy    作者:oxmlcs    | 项目源码 | 文件源码
def compute_induced_kernel_matrix_on_data(self,data_x,data_y):
        '''Z follows the same distribution as X; W follows that of Y.
        The current data generating methods we use 
        generate X and Y at the same time. '''
        size_induced_set = max(self.num_inducex,self.num_inducey)
        #print "size_induce_set", size_induced_set
        if self.data_generator is None:
            subsample_idx = np.random.randint(self.num_samples, size=size_induced_set)
            self.data_z = data_x[subsample_idx,:]
            self.data_w = data_y[subsample_idx,:]
        else:
            self.data_z, self.data_w = self.data_generator(size_induced_set)
            self.data_z[[range(self.num_inducex)],:]
            self.data_w[[range(self.num_inducey)],:]
        print 'Induce Set'
        if self.kernelX_use_median:
            sigmax = self.kernelX.get_sigma_median_heuristic(data_x)
            self.kernelX.set_width(float(sigmax))
        if self.kernelY_use_median:
            sigmay = self.kernelY.get_sigma_median_heuristic(data_y)
            self.kernelY.set_width(float(sigmay))
        Kxz = self.kernelX.kernel(data_x,self.data_z)
        Kzz = self.kernelX.kernel(self.data_z)
        #R = inv(sqrtm(Kzz))
        R = inv(sqrtm(Kzz + np.eye(np.shape(Kzz)[0])*10**(-6)))
        phix = Kxz.dot(R)
        Kyw = self.kernelY.kernel(data_y,self.data_w)
        Kww = self.kernelY.kernel(self.data_w)
        #S = inv(sqrtm(Kww))
        S = inv(sqrtm(Kww + np.eye(np.shape(Kww)[0])*10**(-6)))
        phiy = Kyw.dot(S)
        return phix, phiy
项目:Boxy-disky-parameter-of-E-galaxies    作者:spica095    | 项目源码 | 文件源码
def fitEllipse (x, y):
     x = x [:, np.newaxis]
     y = y [:, np.newaxis]
     D = np.hstack(( x*x , x*y , y*y , x, y, np.ones_like(x)))
     S = np.dot (D.T ,D)
     C = np.zeros ([6 , 6])
     C[0, 2] = C[2, 0] = 2 
     C[1, 1] = -1
     E ,V = eig( np.dot(inv(S),C ))
     n = np.argmax(np.abs(E))
     a = V[:, n]
     if a[0] < 0 : a = -a
     return a
项目:QuantEcon.lectures.code    作者:QuantEcon    | 项目源码 | 文件源码
def price_single_beliefs(transition, dividend_payoff, ?=.75):
    """
    Function to Solve Single Beliefs
    """
    # First compute inverse piece
    imbq_inv = la.inv(np.eye(transition.shape[0]) - ?*transition)

    # Next compute prices
    prices = ? * np.dot(np.dot(imbq_inv, transition), dividend_payoff)

    return prices
项目:QuantEcon.lectures.code    作者:QuantEcon    | 项目源码 | 文件源码
def plot3():
    Z = gen_gaussian_plot_vals(x_hat, ?)
    cs1 = ax.contour(X, Y, Z, 6, colors="black")
    ax.clabel(cs1, inline=1, fontsize=10)
    M = ? * G.T * linalg.inv(G * ? * G.T + R)
    x_hat_F = x_hat + M * (y - G * x_hat)
    ?_F = ? - M * G * ?
    new_Z = gen_gaussian_plot_vals(x_hat_F, ?_F)
    cs2 = ax.contour(X, Y, new_Z, 6, colors="black")
    ax.clabel(cs2, inline=1, fontsize=10)
    ax.contourf(X, Y, new_Z, 6, alpha=0.6, cmap=cm.jet)
    ax.text(float(y[0]), float(y[1]), r"$y$", fontsize=20, color="black")
项目:odin    作者:imito    | 项目源码 | 文件源码
def extract(self, N, F):
    L = np.zeros((self.tv_dim, self.tv_dim))
    L[self.itril] = N.dot(self.T_iS_Tt)
    L += np.tril(L, -1).T + self.Im
    Cxx = inv(L)
    B = self.T_iS.dot(F)
    Ex = Cxx.dot(B)
    return Ex
项目:odin    作者:imito    | 项目源码 | 文件源码
def expectation_tv(self, data_list):
    N, F = read_data(data_list, self.nmix, self.ndim)
    nfiles = F.shape[0]
    nframes = N.sum()
    LU = np.zeros((self.nmix, self.tv_dim * (self.tv_dim + 1) // 2))
    RU = np.zeros((self.tv_dim, self.nmix * self.ndim))
    LLK = 0.
    T_invS = self.Tm / self.Sigma
    T_iS_Tt = self.comp_T_invS_Tt()
    parts = 2500  # modify this based on your HW resources (e.g., memory)
    nbatch = int(nfiles / parts + 0.99999)
    for batch in range(nbatch):
      start = batch * parts
      fin = min((batch + 1) * parts, nfiles)
      length = fin - start
      N1 = N[start:fin]
      F1 = F[start:fin]
      L1 = N1.dot(T_iS_Tt)
      B1 = F1.dot(T_invS.T)
      Ex = np.empty((length, self.tv_dim))
      Exx = np.empty((length, self.tv_dim * (self.tv_dim + 1) // 2))
      llk = np.zeros((length, 1))
      for ix in range(length):
        L = np.zeros((self.tv_dim, self.tv_dim))
        L[self.itril] = L1[ix]
        L += np.tril(L, -1).T + self.Im
        Cxx = inv(L)
        B = B1[ix][:, np.newaxis]
        this_Ex = Cxx.dot(B)
        llk[ix] = self.res_llk(this_Ex, B)
        Ex[ix] = this_Ex.T
        Exx[ix] = (Cxx + this_Ex.dot(this_Ex.T))[self.itril]
      RU += Ex.T.dot(F1)
      LU += N1.T.dot(Exx)
      LLK += llk.sum()
    self.Tm = None
    tmp_string = ''.join(random.choices(string.ascii_uppercase + string.digits, k=16))
    tmpfile = self.tmpdir + 'tmat_' + tmp_string + '.h5'
    h5write(tmpfile, LU, 'LU')
    return RU, LLK, nframes
项目:prml    作者:Yevgnen    | 项目源码 | 文件源码
def predict_one(self, X, T, x, beta, C):
        """Predict one single new point ``x``.
        The hyperparameter ``beta`` and Gram matrix ``C`` should be estimated
        and calculated from the training data, respectively. """

        kXx = self.kernel.inner(X, x)
        kxx = self.kernel.inner(x, x)

        inv_C = linalg.inv(C)
        c = kxx + 1 / beta

        self.pred_mean = kXx.T.dot(inv_C).dot(T)
        self.pred_cov = c - kXx.T.dot(inv_C).dot(kXx)  # Why the cov so small ?

        return self.pred_mean, self.pred_cov