Python scipy.linalg 模块,det() 实例源码

我们从Python开源项目中,提取了以下35个代码示例,用于说明如何使用scipy.linalg.det()

项目:PersonalizedMultitaskLearning    作者:mitmedialab    | 项目源码 | 文件源码
def updateGamma(self):
        task_matrices = np.zeros((self.n_tasks, self.num_feats, self.num_feats))
        for m in range(self.n_tasks):
            rho_vector = rhoFunction(np.array(self.xi[m]))
            rho_vector = rho_vector.reshape((1,-1))             # Make rho vector 2D
            task_X = self.task_dict[m]['X']
            # Note that the transposing doesn't exactly match the paper because our data format is slightly different
            rho_matrix = abs(rho_vector) * task_X.T
            task_matrices[m,:,:] = np.dot(rho_matrix, task_X)  

        for k in range(self.K):
            inner_sum = np.zeros((self.num_feats,self.num_feats))
            for m in range(self.n_tasks):
                inner_sum = inner_sum + self.phi[m,k] * task_matrices[m,:,:]
            self.gamma[k] = la.inv(la.inv(self.sigma) + 2*inner_sum)
            if self.debug: 
                print "gamma computation {0}".format(k), la.det(la.inv(self.sigma) + 2*inner_sum)
项目:MarkovModels    作者:pmontalb    | 项目源码 | 文件源码
def build_gradient_matrix(self, x):
        """
        :param x: grid index
        :return:
        """
        if x == 0 or x == self.d - 1:
            raise ValueError("Boundary Conditions not treated here")

        nabla_matrix = np.zeros((2, 2))

        nabla_minus = self.grid[x - 1] - self.grid[x]
        nabla_plus = self.grid[x + 1] - self.grid[x]

        nabla_matrix[0, 0] = nabla_minus
        nabla_matrix[0, 1] = nabla_plus
        nabla_matrix[1, 0] = nabla_minus * nabla_minus
        nabla_matrix[1, 1] = nabla_plus * nabla_plus

        if linalg.det(nabla_matrix) == 0:
            raise ValueError("Nabla is not invertible")

        return nabla_matrix
项目:adversarial-variational-bayes    作者:gdikov    | 项目源码 | 文件源码
def normal_density_at_zero(m, c):
    """ Compute the normal density with given mean and covariance at zero. 

    Parameters
    ----------    
    m : vector
        Mean.
    c : ndarray
        Covariance matrix. Assumption: c is square matrix and its size is 
        compatible with that of m.

    Returns
    -------
    g : float
        Computed density value.

    """

    dim = len(m)
    g = 1 / ((2 * pi)**(dim / 2) * sqrt(absolute(det(c)))) *\
        exp(-1/2 * dot(dot(m, inv(c)), m))

    return g
项目:car-detection    作者:mmetcalfe    | 项目源码 | 文件源码
def factor(self):
        """    Factorize the camera matrix into K,R,t as P = K[R|t]. """

        # factor first 3*3 part
        K,R = linalg.rq(self.P[:,:3])

        # make diagonal of K positive
        T = diag(sign(diag(K)))
        if linalg.det(T) < 0:
            T[1,1] *= -1

        self.K = dot(K,T)
        self.R = dot(T,R) # T is its own inverse
        self.t = dot(linalg.inv(self.K),self.P[:,3])

        return self.K, self.R, self.t
项目:speaker-diarization    作者:aalto-speech    | 项目源码 | 文件源码
def bic(arr1, arr2, epsilon=1e-50):
    """Bayes Information Criterion"""
    # Notes: In the seminal paper "Speakers, environment and channel
    # change detection and clustering via the Bayesian Information
    # Criterion" by Chen and Gopalakrishnan, they use a growing window
    # approach, so it's not directly comparable when using a fixed
    # sliding window.
    arr = np.concatenate((arr1, arr2))
    detS1 = det(np.cov(arr1, rowvar=0))
    detS2 = det(np.cov(arr2, rowvar=0))
    N1 = arr1.shape[0]
    N2 = arr2.shape[0]
    N = arr.shape[0]
    detS = det(np.cov(arr, rowvar=0))
    d = 0.5 * N * np.log(detS) - 0.5 * N1 * np.log(detS1)\
        - 0.5 * N2 * np.log(detS2)
    p = arr.shape[1]
    corr = args.lambdac * 0.5 * (p + 0.5 * p * (p + 1)) * np.log(N)
    d -= corr
    return d
项目:speaker-diarization    作者:aalto-speech    | 项目源码 | 文件源码
def glr(arr1, arr2):
    """Generalized Likelihood Ratio"""
    S1 = np.cov(arr1, rowvar=0)
    S2 = np.cov(arr2, rowvar=0)
    N1 = arr1.shape[0]
    N2 = arr2.shape[0]
    N = float(N1 + N2)
    # This is COV only version, not optimized (revise) but more robust
    # to environment noise conditions.
    # See Ulpu thesis pages 30-31, also Gish et al. "Segregation of
    # Speakers for Speech Recognition and Speaker Identification"
    d = -(N / 2.0) * ((N1 / N) * np.log(det(S1)) + (N2 / N) * np.log(det(S2))
                      - np.log(det((N1 / N) * S1 + (N2 / N) * S2)))
    # Ulpu version:
    # Includes the mean, theoretically less robust
    # arr = features[start:start+2*winsize]
    # S = cov(arr, rowvar=0)
    # d = -0.5*(N1*log(det(S1))+N2*log(det(S2))-N*log(det(S)))
    return d
项目:speaker-diarization    作者:aalto-speech    | 项目源码 | 文件源码
def bic(arr1, arr2):
    """Bayes Information Criterion."""
    # Notes: In the seminal paper "Speakers, environment and channel
    # change detection and clustering via the Bayesian Information
    # Criterion" by Chen and Gopalakrishnan, they use a growing window
    # approach, so it's not directly comparable when using a fixed
    # sliding window.
    arr = np.concatenate((arr1, arr2))
    N1 = arr1.shape[0]
    N2 = arr2.shape[0]
    S1 = np.cov(arr1, rowvar=0)
    S2 = np.cov(arr2, rowvar=0)
    N = arr.shape[0]
    S = np.cov(arr, rowvar=0)
    d = 0.5 * N * np.log(det(S)) - 0.5 * N1 * np.log(det(S1))\
        - 0.5 * N2 * np.log(det(S2))
    p = arr.shape[1]
    corr = args.lambdac * 0.5 * (p + 0.5 * p * (p + 1)) * np.log(N)
    d -= corr
    return d
项目:speaker-diarization    作者:aalto-speech    | 项目源码 | 文件源码
def glr(arr1, arr2):
    """Generalized Likelihood Ratio"""
    N1 = arr1.shape[0]
    N2 = arr2.shape[0]
    S1 = np.cov(arr1, rowvar=0)
    S2 = np.cov(arr2, rowvar=0)
    N = float(N1 + N2)
    # This is COV only version, not optimized (revise) but more robust
    # to environment noise conditions.
    # See Ulpu thesis pages 30-31, also Gish et al. "Segregation of
    # Speakers for Speech Recognition and Speaker Identification"
    d = -(N / 2.0) * ((N1 / N) * np.log(det(S1)) + (N2 / N) * np.log(det(S2))
                      - np.log(det((N1 / N) * S1 + (N2 / N) * S2)))
    # Ulpu version:
    # Includes the mean, theoretically less robust
    # arr = features[start:start+2*winsize]
    # S = cov(arr, rowvar=0)
    # d = -0.5*(N1*log(det(S1))+N2*log(det(S2))-N*log(det(S)))
    return d
项目:adversarial-variational-bayes    作者:gdikov    | 项目源码 | 文件源码
def estimation(self, y, ds):
        """ Estimate KGV.

        Parameters
        ----------
        y : (number of samples, dimension)-ndarray
             One row of y corresponds to one sample.
        ds : int vector
             Dimensions of the individual subspaces in y; ds[i] = i^th
             subspace dimension.

        Returns
        -------
        i : float
            Estimated value of KGV.

        References
        ----------
        Francis Bach, Michael I. Jordan. Kernel Independent Component
        Analysis. Journal of Machine Learning Research, 3: 1-48, 2002.

        Francis Bach, Michael I. Jordan. Learning graphical models with
        Mercer kernels. International Conference on Neural Information
        Processing Systems (NIPS), pages 1033-1040, 2002.

        Examples
        --------
        i = co.estimation(y,ds)

        """

        # verification:
        self.verification_compatible_subspace_dimensions(y, ds)

        num_of_samples = y.shape[0]
        tol = num_of_samples * self.eta

        r = compute_matrix_r_kcca_kgv(y, ds, self.kernel, tol, self.kappa)
        i = -log(det(r)) / 2

        return i
项目:adversarial-variational-bayes    作者:gdikov    | 项目源码 | 文件源码
def analytical_value_h_shannon(distr, par):
    """ Analytical value of the Shannon entropy for the given distribution.

    Parameters
    ----------    
    distr : str
            Name of the distribution.
    par : dictionary
          Parameters of the distribution. If distr = 'uniform': par["a"], 
          par["b"], par["l"] <- lxU[a,b]. If distr = 'normal' : par["cov"] 
          is the covariance matrix.

    Returns
    -------
    h : float
        Analytical value of the Shannon entropy.

    """

    if distr == 'uniform':
        # par = {"a": a, "b": b, "l": l}
        h = log(prod(par["b"] - par["a"])) + log(absolute(det(par["l"]))) 
    elif distr == 'normal':
        # par = {"cov": c}
        dim = par["cov"].shape[0]  # =c.shape[1]
        h = 1/2 * log((2 * pi * exp(1))**dim * det(par["cov"]))
        # = 1/2 * log(det(c)) + d / 2 * log(2*pi) + d / 2
    else:
        raise Exception('Distribution=?')

    return h
项目:adversarial-variational-bayes    作者:gdikov    | 项目源码 | 文件源码
def analytical_value_c_cross_entropy(distr1, distr2, par1, par2):
    """ Analytical value of the cross-entropy for the given distributions.

    Parameters
    ----------    
    distr1, distr2 : str
                     Name of the distributions.
    par1, par2 : dictionaries
                 Parameters of the distribution. If distr1 = distr2 =
                 'normal': par1["mean"], par1["cov"] and par2["mean"],
                 par2["cov"] are the means and the covariance matrices.

    Returns
    -------
    c : float
        Analytical value of the cross-entropy.

    """

    if distr1 == 'normal' and distr2 == 'normal':
        # covariance matrices, expectations:
        c1, m1 = par1['cov'], par1['mean']
        c2, m2 = par2['cov'], par2['mean']
        dim = len(m1)

        invc2 = inv(c2)
        diffm = m1 - m2

        c = 1/2 * (dim * log(2*pi) + log(det(c2)) + trace(dot(invc2, c1)) +
                   dot(diffm, dot(invc2, diffm)))
    else:
        raise Exception('Distribution=?')

    return c
项目:adversarial-variational-bayes    作者:gdikov    | 项目源码 | 文件源码
def analytical_value_i_shannon(distr, par):
    """ Analytical value of mutual information for the given distribution.

    Parameters
    ----------    
    distr : str
            Name of the distribution.
    par : dictionary
          Parameters of the distribution. If distr = 'normal': par["ds"], 
          par["cov"] are the vector of component dimensions and the (joint) 
          covariance matrix. 

    Returns
    -------
    i : float
        Analytical value of the Shannon mutual information.

    """

    if distr == 'normal':
        c, ds = par["cov"], par["ds"]
        # 0,d_1,d_1+d_2,...,d_1+...+d_{M-1}; starting indices of the
        # subspaces:
        cum_ds = cumsum(hstack((0, ds[:-1])))
        i = 1
        for m in range(len(ds)):
            idx = range(cum_ds[m], cum_ds[m] + ds[m])
            i *= det(c[ix_(idx, idx)])

        i = log(i / det(c)) / 2     
    else:
        raise Exception('Distribution=?')

    return i
项目:adversarial-variational-bayes    作者:gdikov    | 项目源码 | 文件源码
def analytical_value_h_renyi(distr, alpha, par):
    """ Analytical value of the Renyi entropy for the given distribution.

    Parameters
    ----------    
    distr : str
            Name of the distribution.
    alpha : float, alpha \ne 1
            Parameter of the Renyi entropy.
    par : dictionary
          Parameters of the distribution. If distr = 'uniform': par["a"], 
          par["b"], par["l"] <- lxU[a,b]. If distr = 'normal' : par["cov"]
          is the covariance matrix.

    Returns
    -------
    h : float
        Analytical value of the Renyi entropy.

    References
    ----------
    Kai-Sheng Song. Renyi information, loglikelihood and an intrinsic 
    distribution measure. Journal of Statistical Planning and Inference
    93: 51-69, 2001.

    """

    if distr == 'uniform':
        # par = {"a": a, "b": b, "l": l}
        # We also apply the transformation rule of the Renyi entropy in
        # case of linear transformations:
        h = log(prod(par["b"] - par["a"])) + log(absolute(det(par["l"]))) 
    elif distr == 'normal': 
        # par = {"cov": c}
        dim = par["cov"].shape[0]  # =c.shape[1]
        h = log((2*pi)**(dim / 2) * sqrt(absolute(det(par["cov"])))) -\
            dim * log(alpha) / 2 / (1 - alpha)        
    else:
        raise Exception('Distribution=?')

    return h
项目:adversarial-variational-bayes    作者:gdikov    | 项目源码 | 文件源码
def analytical_value_h_sharma_mittal(distr, alpha, beta, par):
    """ Analytical value of the Sharma-Mittal entropy.

    Parameters
    ----------    
    distr : str
            Name of the distribution.
    alpha : float, 0 < alpha \ne 1
            Parameter of the Sharma-Mittal entropy.
    beta : float, beta \ne 1
           Parameter of the Sharma-Mittal entropy.

    par : dictionary
          Parameters of the distribution. If distr = 'normal' : par["cov"] 
          = covariance matrix.

    Returns
    -------
    h : float
        Analytical value of the Sharma-Mittal entropy.

    References
    ----------   
    Frank Nielsen and Richard Nock. A closed-form expression for the 
    Sharma-Mittal entropy of exponential families. Journal of Physics A: 
    Mathematical and Theoretical, 45:032003, 2012.

    """

    if distr == 'normal': 
        # par = {"cov": c}
        c = par['cov']
        dim = c.shape[0]  # =c.shape[1]
        h = (((2*pi)**(dim / 2) * sqrt(absolute(det(c))))**(1 - beta) /
             alpha**(dim * (1 - beta) / (2 * (1 - alpha))) - 1) / \
            (1 - beta)

    else:
        raise Exception('Distribution=?')

    return h
项目:adversarial-variational-bayes    作者:gdikov    | 项目源码 | 文件源码
def analytical_value_i_renyi(distr, alpha, par):
    """ Analytical value of the Renyi mutual information.

    Parameters
    ----------    
    distr : str
            Name of the distribution.
    alpha : float
            Parameter of the Renyi mutual information.
    par : dictionary
          Parameters of the distribution. If distr = 'normal': par["cov"]
          is the covariance matrix.

    Returns
    -------
    i : float
        Analytical value of the Renyi mutual information.

    """

    if distr == 'normal':
        c = par["cov"]        

        t1 = -alpha / 2 * log(det(c))
        t2 = -(1 - alpha) / 2 * log(prod(diag(c)))
        t3 = log(det(alpha * inv(c) + (1 - alpha) * diag(1 / diag(c)))) / 2
        i = 1 / (alpha - 1) * (t1 + t2 - t3)            
    else:
        raise Exception('Distribution=?')

    return i
项目:adversarial-variational-bayes    作者:gdikov    | 项目源码 | 文件源码
def analytical_value_d_hellinger(distr1, distr2, par1, par2):
    """ Analytical value of Hellinger distance for the given distributions.

    Parameters
    ----------
    distr1, distr2 : str-s
                    Names of the distributions.
    par1, par2 : dictionary-s
                 Parameters of the distributions. If distr1 = distr2 =
                 'normal': par1["mean"], par1["cov"] and par2["mean"],
                 par2["cov"] are the means and the covariance matrices.

    Returns
    -------
    d : float
        Analytical value of the Hellinger distance.

    """

    if distr1 == 'normal' and distr2 == 'normal':
        # covariance matrices, expectations:
        c1, m1 = par1['cov'], par1['mean']
        c2, m2 = par2['cov'], par2['mean']

        # "https://en.wikipedia.org/wiki/Hellinger_distance": Examples:
        diffm = m1 - m2
        avgc = (c1 + c2) / 2
        inv_avgc = inv(avgc)
        d = 1 - det(c1)**(1/4) * det(c2)**(1/4) / sqrt(det(avgc)) * \
            exp(-dot(diffm, dot(inv_avgc, diffm))/8)  # D^2

        d = sqrt(d)
    else:
        raise Exception('Distribution=?')

    return d
项目:cgpm    作者:probcomp    | 项目源码 | 文件源码
def logsqrtdet(self):
    # Sigma = L^T L -- but be careful: only the lower triangle and
    # diagonal of L are actually initialized; the upper triangle is
    # garbage.
    L, _lower = self._cholesky

    # det Sigma = det L^T L = det L^T det L = (det L)^2.  Since L is
    # triangular, its determinant is the product of its diagonal.  To
    # compute log sqrt(det Sigma) = log det L, we sum the logs of its
    # diagonal.
    return np.sum(np.log(np.diag(L)))
项目:cgpm    作者:probcomp    | 项目源码 | 文件源码
def logsqrtdet(self):
    return (1/2)*np.log(la.det(self._Sigma))
项目:cgpm    作者:probcomp    | 项目源码 | 文件源码
def logpdf(X, Mu, Sigma):
  """Multivariate normal log pdf."""
  # This is the multivariate normal log pdf for an array X of n
  # outputs, an array Mu of n means, and an n-by-n positive-definite
  # covariance matrix Sigma.  The direct-space density is:
  #
  #     P(X | Mu, Sigma)
  #       = ((2 pi)^n det Sigma)^(-1/2)
  #         exp((-1/2) (X - Mu)^T Sigma^-1 (X - Mu)),
  #
  # We want this in log-space, so we have
  #
  #     log P(X | Mu, Sigma)
  #     = (-1/2) (X - Mu)^T Sigma^-1 (X - Mu) - log ((2 pi)^n det Sigma)^(1/2)
  #     = (-1/2) (X - Mu)^T Sigma^-1 (X - Mu)
  #         - (n/2) log (2 pi) - (1/2) log det Sigma.
  #
  n = len(X)
  assert X.shape == (n,)
  assert Mu.shape == (n,)
  assert Sigma.shape == (n, n)
  assert np.all(np.isfinite(X))
  assert np.all(np.isfinite(Mu))
  assert np.all(np.isfinite(Sigma))

  X_ = X - Mu
  covf = _covariance_factor(Sigma)

  logp = -np.dot(X_.T, covf.solve(X_)/2.)
  logp -= (n/2.)*np.log(2*np.pi)
  logp -= covf.logsqrtdet()

  # Convert 1x1 matrix to float.
  return float(logp)
项目:QuantEcon.lectures.code    作者:QuantEcon    | 项目源码 | 文件源码
def loglikelihood_path(self, x, y):
        A, B, D, F = self.A, self.B, self.D, self.F
        k, T = y.shape
        FF = F @ F.T
        FFinv = la.inv(FF)
        temp = y[:, 1:] - y[:, :-1] - D @ x[:, :-1]
        obs =  temp * FFinv * temp
        obssum = np.cumsum(obs)
        scalar = (np.log(la.det(FF)) + k*np.log(2*np.pi))*np.arange(1, T)

        return -(.5)*(obssum + scalar)
项目:basicCV    作者:chenminhua    | 项目源码 | 文件源码
def factor(self):
        # factor first 3*3 part
        K,R = linalg.rq(self.P[:,:3])
        # make diagonal of K positive
        T = diag(sign(diag(K)))
        if linalg.det(T) < 0:
            T[1,1] *= -1

        self.K = dot(K,T)
        self.R = dot(T,R) # T is its own inverse
        self.t = dot(linalg.inv(self.K),self.P[:,3])

        return self.K, self.R, self.t
项目:speaker-diarization    作者:aalto-speech    | 项目源码 | 文件源码
def bic(arr1, arr2, arr, i=0, saved={}):
    """Bayes Information Criterion

    Notes: In the seminal paper "Speakers, environment and channel change
    detection and clustering via the Bayesian Information Criterion" by Chen
    and Gopalakrishnan, they use a growing window approach, so it's not
    directly comparable when using a fixed sliding window.

    In BIC, we can save the first matrix calculations since in growing windows
    these keep repeating all the time, and we are saving just one float so
    it's also memory efficient and saves a lot of time (Antonio)"""

    if i in saved:
        c1 = saved[i]
    else:
        S1 = np.cov(arr1, rowvar=0)
        N1 = arr1.shape[0]
        c1 = 0.5 * N1 * np.log(det(S1))
        saved[i] = c1
    S2 = np.cov(arr2, rowvar=0)
    N2 = arr2.shape[0]
    N = arr.shape[0]
    S = np.cov(arr, rowvar=0)
    d = 0.5 * N * np.log(det(S)) - c1\
        - 0.5 * N2 * np.log(det(S2))
    p = arr.shape[1]
    corr = args.lambdac * 0.5 * (p + 0.5 * p * (p + 1)) * np.log(N)
    d -= corr
    return d
项目:PyDiatomic    作者:stggh    | 项目源码 | 文件源码
def matching_point(en, rot, V, R, mu):
    """ estimate matching point for inward and outward solutions position
    based on the determinant of the R-matrix.

    Parameters
    ----------
    en : float
        potential energy of the solution
    rot : int
        rotational quantum number J
    V : numpy 3d array
        potential curve and couplings matrix
    R : numpy 1d array
        internuclear distance grid
    mu : float
        reduced mass in kg

    Returns
    -------
    mx : int
        matching point grid index

    """

    oo, n, m = V.shape

    Vm = min([V[-1][j][j].min() for j in range(n)])  # lowest dissociation energy

    if en > Vm:
        return oo-1
    else:
        Vnn = np.transpose(V)[-1][-1]  # -1 -1 highest PEC?
        mx = np.abs(Vnn - en).argmin()

        WI = WImat(en, rot, V, R, mu)
        Rm = RImat(WI, mx)
        while linalg.det(Rm[mx]) > 1:
            mx -= 1

    return mx
项目:PyDiatomic    作者:stggh    | 项目源码 | 文件源码
def eigen(energy, rot, mx, V, R, mu):
    """ determine eigen energy solution based.

    Parameters
    ----------
    energy : float
        energy (eV) of the attempted solution
    rot : int
        rotational quantum number
    mx : int
        matching point index, for inward and outward solutions
    V : numpy 3d array
        potential energy curve and coupling matrix
    R : numpy 1d array
        internuclear distance grid
    mu : float
        reduced mass in kg

    Returns
    -------
    eigenvalue : float
        energy of the solution

    """

    WI = WImat(energy, rot, V, R, mu)
    RI = RImat(WI, mx)

    # | R_mx - R^-1_mx+1 |
    return linalg.det(linalg.inv(RI[mx])-RI[mx+1])
项目:deepGP_approxEP    作者:thangbui    | 项目源码 | 文件源码
def compute_phi_prior(self):
        phi = 0
        for d in range(self.Dout):
            # s, a = npalg.slogdet(self.Kuu[d])
            a = np.log(npalg.det(self.Kuu[d]))
            phi += 0.5 * a
        return phi
项目:deepGP_approxEP    作者:thangbui    | 项目源码 | 文件源码
def compute_phi_posterior(self):
        phi = 0
        for d in range(self.Dout):
            mud_val = self.mu[d].get_value()
            Sud_val = self.Su[d].get_value()
            # s, a = npalg.slogdet(Sud_val)
            a = np.log(npalg.det(Sud_val))
            phi += 0.5 * np.dot(mud_val.T, np.dot(npalg.inv(Sud_val), mud_val))[0, 0]
            phi += 0.5 * a
        return phi
项目:deepGP_approxEP    作者:thangbui    | 项目源码 | 文件源码
def compute_phi_cavity(self):
        phi = 0
        for d in range(self.Dout):
            muhatd_val = self.muhat[d].get_value()
            Suhatd_val = self.Suhat[d].get_value()
            # s, a = npalg.slogdet(Sud_val)
            a = np.log(npalg.det(Suhatd_val))
            phi += 0.5 * np.dot(muhatd_val.T, np.dot(npalg.inv(Suhatd_val), muhatd_val))[0, 0]
            phi += 0.5 * a
        return phi
项目:Matrix-Analysis    作者:kingofspace0wzz    | 项目源码 | 文件源码
def projection(X, Y):

    rankX = rank(X)
    rankY = rank(Y)

    # rank, or dimension, or the original space
    rankO = rankX + rankY

    # check if two subspaces have the same shapes
    if X.shape != Y.shape:
        raise Exception('The two subspaces do not have the same shapes')

    # check if O is singular
    if la.det(np.hstack((X, Y))) == 0:
        raise Exception('X + Y is not the direct sum of the original space')

    # check whether each subspace is of full column/row rank
    if rankX < min(X.shape):
        raise Exception('subspace X is not of full rank')

    elif rankY < min(Y.shape):
        raise Exception('subspace Y is not of full rank')
    # X and Y are of full column rank
    elif rankX == X.shape[1] & rankY == Y.shape[1]:
        return np.hstack((X, np.zeros((X.shape[0], rankO - rankX)))).dot(la.inv(np.hstack((X, Y))))
    # X and Y are of full row rank
    elif rankX == X.shape[0] & rankY == Y.shape[0]:
        return np.vstack((X, np.zeros((rankO - rankX, X.shape[1])))).dot(la.inv(np.vstack(X, Y)))

# orthogonal projection matrix
项目:adversarial-variational-bayes    作者:gdikov    | 项目源码 | 文件源码
def estimation(self, y):
        """ Estimate Shannon entropy.

        Parameters
        ----------
        y : (number of samples, dimension)-ndarray
             One row of y corresponds to one sample.

        Returns
        -------
        h : float
            Estimated Shannon entropy.

        References
        ----------
        Quing Wang, Sanjeev R. Kulkarni, and Sergio Verdu. Universal
        estimation of information measures for analog sources. Foundations
        And Trends In Communications And Information Theory, 5:265-353,
        2009.

        Examples
        --------
        h = co.estimation(y,ds)

        """

        num_of_samples, dim = y.shape  # number of samples, dimension

        # estimate the mean and the covariance of y:
        m = mean(y, axis=0)
        c = cov(y, rowvar=False)  # 'rowvar=False': 1 row = 1 observation

        # entropy of N(m,c):
        if dim == 1: 
            det_c = c  # det(): 'expected square matrix' exception
            # multivariate_normal(): 'cov must be 2 dimensional and square'
            # exception:
            c = array([[c]])

        else:
            det_c = det(c)

        h_normal = 1/2 * log((2*pi*exp(1))**dim * det_c)

        # generate samples from N(m,c):
        y_normal = multivariate_normal(m, c, num_of_samples)

        h = h_normal - self.kl_co.estimation(y, y_normal)

        return h
项目:adversarial-variational-bayes    作者:gdikov    | 项目源码 | 文件源码
def analytical_value_k_prob_product(distr1, distr2, rho, par1, par2):
    """ Analytical value of the probability product kernel.

    Parameters
    ----------    
    distr1, distr2 : str
                     Name of the distributions.
    rho: float, >0
         Parameter of the probability product kernel.
    par1, par2 : dictionary-s
                 Parameters of the distributions. If distr1 = distr2 = 
                 'normal': par1["mean"], par1["cov"] and par2["mean"], 
                 par2["cov"] are the means and the covariance matrices.

    Returns
    -------
    k : float
         Analytical value of the probability product kernel.

    """

    if distr1 == 'normal' and distr2 == 'normal':
        # covariance matrices, expectations:
        c1, m1 = par1['cov'], par1['mean']
        c2, m2 = par2['cov'], par2['mean']
        dim = len(m1)

        # inv1, inv2, inv12:
        inv1, inv2 = inv(c1), inv(c2)
        inv12 = inv(inv1+inv2)

        m12 = dot(inv1, m1) + dot(inv2, m2)
        exp_arg = \
            dot(m1, dot(inv1, m1)) + dot(m2, dot(inv2, m2)) -\
            dot(m12, dot(inv12, m12))

        k = (2 * pi)**((1 - 2 * rho) * dim / 2) * rho**(-dim / 2) *\
            absolute(det(inv12))**(1 / 2) * \
            absolute(det(c1))**(-rho / 2) * \
            absolute(det(c2))**(-rho / 2) * exp(-rho / 2 * exp_arg)
    else:
        raise Exception('Distribution=?')

    return k
项目:adversarial-variational-bayes    作者:gdikov    | 项目源码 | 文件源码
def analytical_value_k_expected(distr1, distr2, sigma, par1, par2):
    """ Analytical value of expected kernel for the given distributions.

    Parameters
    ----------    
    distr1, distr2 : str
                     Names of the distributions.
    sigma: float, >0
           Std parameter of the expected kernel.
    par1, par2 : dictionary-s
                 Parameters of the distributions. If distr1 = distr2 = 
                 'normal': par1["mean"], par1["cov"] and par2["mean"], 
                 par2["cov"] are the means and the covariance matrices.

    Returns
    -------
    k : float
        Analytical value of the expected kernel.

    References
    ----------
    Krikamol Muandet, Kenji Fukumizu, Francesco Dinuzzo, and Bernhard 
    Scholkopf. Learning from distributions via support measure machines.
    In Advances in Neural Information Processing Systems (NIPS), pages
    10-18, 2011.

    """

    if distr1 == 'normal' and distr2 == 'normal':

        # covariance matrices, expectations:
        c1, m1 = par1['cov'], par1['mean']
        c2, m2 = par2['cov'], par2['mean']
        dim = len(m1)

        gam = 1 / sigma**2
        diffm = m1 - m2 
        exp_arg = dot(dot(diffm, inv(c1 + c2 + eye(dim) / gam)), diffm)
        k = exp(-exp_arg / 2) / \
            sqrt(absolute(det(gam * c1 + gam * c2 + eye(dim))))
    else:
        raise Exception('Distribution=?')

    return k
项目:adversarial-variational-bayes    作者:gdikov    | 项目源码 | 文件源码
def analytical_value_d_sharma_mittal(distr1, distr2, alpha, beta, par1,
                                     par2):
    """ Analytical value of the Sharma-Mittal divergence.

    Parameters
    ----------    
    distr1, distr2 : str-s
                     Names of distributions.
    alpha : float, 0 < alpha \ne 1
            Parameter of the Sharma-Mittal divergence.
    beta : float, beta \ne 1
           Parameter of the Sharma-Mittal divergence.
    par1, par2 : dictionary-s
                 Parameters of distributions.
                 If (distr1,distr2) = ('normal','normal'), then distr1 =
                 N(m1,c1), where m1 = par1['mean'], c1 = par1['cov'],
                 distr2 = N(m2,c2), where m2 = par2['mean'], c2 =
                 par2['cov'].

    Returns
    -------
    D : float
        Analytical value of the Tsallis divergence.

    References
    ----------          
    Frank Nielsen and Richard Nock. A closed-form expression for the 
    Sharma-Mittal entropy of exponential families. Journal of Physics A: 
    Mathematical and Theoretical, 45:032003, 2012.

    """

    if distr1 == 'normal' and distr2 == 'normal':
        # covariance matrices, expectations:
        c1, m1 = par1['cov'], par1['mean']
        c2, m2 = par2['cov'], par2['mean']

        c = inv(alpha * inv(c1) + (1 - alpha) * inv(c2))
        diffm = m1 - m2

        # Jensen difference divergence, c2:
        j = (log(absolute(det(c1))**alpha * absolute(det(c2))**(1 -
                                                                alpha) /
                 absolute(det(c))) + alpha * (1 - alpha) *
             dot(dot(diffm, inv(c)), diffm)) / 2
        c2 = exp(-j)

        d = (c2**((1 - beta) / (1 - alpha)) - 1) / (beta - 1)

    else:
        raise Exception('Distribution=?')

    return d
项目:prml    作者:Yevgnen    | 项目源码 | 文件源码
def fit(self, X, T, max_iter=int(1e2), tol=1e-3, bound=1e10):
        """Fit a RVM model with the training data ``(X, T)``."""
        # Initialize the hyperparameters
        self._init_hyperparameters(X, T)

        # Compute design matrix
        n_samples = X.shape[0]
        phi = sp.c_[sp.ones(n_samples), self._compute_design_matrix(X)]  # Add x0

        alpha = self.cov
        beta = self.beta

        log_evidence = -1e10
        for iter in range(max_iter):
            alpha[alpha >= bound] = bound
            rv_indices = sp.nonzero(alpha < bound)[0]
            rv_phi = phi[:, rv_indices]
            rv_alpha = alpha[rv_indices]

            # Compute the posterior distribution
            post_cov = spla.inv(sp.diag(rv_alpha) + beta * sp.dot(rv_phi.T, rv_phi))
            post_mean = beta * sp.dot(post_cov, sp.dot(rv_phi.T, T))

            # Re-estimate the hyperparameters
            gamma = 1 - rv_alpha * post_cov.diagonal()
            rv_alpha = gamma / (post_mean * post_mean)
            beta = (n_samples + 1 - gamma.sum()) / spla.norm(T - sp.dot(rv_phi, post_mean))**2

            # Evalueate the log evidence and test the relative change
            C = sp.eye(rv_phi.shape[0]) / beta + rv_phi.dot(sp.diag(1.0 / rv_alpha)).dot(rv_phi.T)
            log_evidence_new = -0.5 * (sp.log(spla.det(C)) + T.dot(spla.inv(C)).dot((T)))
            diff = spla.norm(log_evidence_new - log_evidence)
            if (diff < tol * spla.norm(log_evidence)):
                break

            log_evidence = log_evidence_new
            alpha[rv_indices] = rv_alpha

        # Should re-compute the posterior distribution
        self.rv_indices = rv_indices
        self.cov = post_cov
        self.mean = post_mean
        self.beta = beta

        return self
项目:PyDiatomic    作者:stggh    | 项目源码 | 文件源码
def amplitude(wf, R, edash, mu):
    # Mies    F ~ JA + NB       J ~ sin(kR)/kR
    # normalization sqrt(2 mu/pu hbar^2) = zz
    zz = np.sqrt(2*mu/const.pi)/const.hbar

    oo, n, nopen = wf.shape

    # two asymptotic points on wavefunction wf[:, j]
    i1 = oo-5
    i2 = i1-1
    x1 = R[i1]*1.0e-10
    x2 = R[i2]*1.0e-10

    A = np.zeros((nopen, nopen))
    B = np.zeros((nopen, nopen))
    oc = 0
    for j in range(n):
        if edash[j] < 0:
            continue
        # open channel
        ke = np.sqrt(2*mu*edash[j]*const.e)/const.hbar
        rtk = np.sqrt(ke)
        kex1 = ke*x1
        kex2 = ke*x2

        j1 = sph_jn(0, kex1)[0]*x1*rtk*zz
        y1 = sph_yn(0, kex1)[0]*x1*rtk*zz

        j2 = sph_jn(0, kex2)[0]*x2*rtk*zz
        y2 = sph_yn(0, kex2)[0]*x2*rtk*zz

        det = j1*y2 - j2*y1

        for k in range(nopen):
            A[oc, k] = (y2*wf[i1, j, k] - y1*wf[i2, j, k])/det
            B[oc, k] = (j1*wf[i2, j, k] - j2*wf[i1, j, k])/det

        oc += 1

    AI = linalg.inv(A)
    K = B @ AI

    return K, AI, B
项目:Parallel-SGD    作者:angadgill    | 项目源码 | 文件源码
def _update_precisions(self, X, z):
        """Update the variational distributions for the precisions"""
        n_features = X.shape[1]
        if self.covariance_type == 'spherical':
            self.dof_ = 0.5 * n_features * np.sum(z, axis=0)
            for k in range(self.n_components):
                # could be more memory efficient ?
                sq_diff = np.sum((X - self.means_[k]) ** 2, axis=1)
                self.scale_[k] = 1.
                self.scale_[k] += 0.5 * np.sum(z.T[k] * (sq_diff + n_features))
                self.bound_prec_[k] = (
                    0.5 * n_features * (
                        digamma(self.dof_[k]) - np.log(self.scale_[k])))
            self.precs_ = np.tile(self.dof_ / self.scale_, [n_features, 1]).T

        elif self.covariance_type == 'diag':
            for k in range(self.n_components):
                self.dof_[k].fill(1. + 0.5 * np.sum(z.T[k], axis=0))
                sq_diff = (X - self.means_[k]) ** 2  # see comment above
                self.scale_[k] = np.ones(n_features) + 0.5 * np.dot(
                    z.T[k], (sq_diff + 1))
                self.precs_[k] = self.dof_[k] / self.scale_[k]
                self.bound_prec_[k] = 0.5 * np.sum(digamma(self.dof_[k])
                                                   - np.log(self.scale_[k]))
                self.bound_prec_[k] -= 0.5 * np.sum(self.precs_[k])

        elif self.covariance_type == 'tied':
            self.dof_ = 2 + X.shape[0] + n_features
            self.scale_ = (X.shape[0] + 1) * np.identity(n_features)
            for k in range(self.n_components):
                diff = X - self.means_[k]
                self.scale_ += np.dot(diff.T, z[:, k:k + 1] * diff)
            self.scale_ = pinvh(self.scale_)
            self.precs_ = self.dof_ * self.scale_
            self.det_scale_ = linalg.det(self.scale_)
            self.bound_prec_ = 0.5 * wishart_log_det(
                self.dof_, self.scale_, self.det_scale_, n_features)
            self.bound_prec_ -= 0.5 * self.dof_ * np.trace(self.scale_)

        elif self.covariance_type == 'full':
            for k in range(self.n_components):
                sum_resp = np.sum(z.T[k])
                self.dof_[k] = 2 + sum_resp + n_features
                self.scale_[k] = (sum_resp + 1) * np.identity(n_features)
                diff = X - self.means_[k]
                self.scale_[k] += np.dot(diff.T, z[:, k:k + 1] * diff)
                self.scale_[k] = pinvh(self.scale_[k])
                self.precs_[k] = self.dof_[k] * self.scale_[k]
                self.det_scale_[k] = linalg.det(self.scale_[k])
                self.bound_prec_[k] = 0.5 * wishart_log_det(
                    self.dof_[k], self.scale_[k], self.det_scale_[k],
                    n_features)
                self.bound_prec_[k] -= 0.5 * self.dof_[k] * np.trace(
                    self.scale_[k])