Python scipy.special 模块,betaln() 实例源码

我们从Python开源项目中,提取了以下13个代码示例,用于说明如何使用scipy.special.betaln()

项目:siHMM    作者:Ardavans    | 项目源码 | 文件源码
def _get_statistics(self,data):
        # NOTE: since this isn't really in exponential family, this method needs
        # to look at hyperparameters. form posterior hyperparameters for the p
        # parameters here so we can integrate them out and get the r statistics
        n, tot = super(NegativeBinomialIntegerR,self)._get_statistics(data)
        if n > 0:
            alpha_n, betas_n = self.alpha_0 + tot, self.beta_0 + self.r_support*n
            data = flattendata(data)
            log_marg_likelihoods = \
                    special.betaln(alpha_n, betas_n) \
                        - special.betaln(self.alpha_0, self.beta_0) \
                    + (special.gammaln(data[:,na]+self.r_support)
                        - special.gammaln(data[:,na]+1) \
                        - special.gammaln(self.r_support)).sum(0)
        else:
            log_marg_likelihoods = np.zeros_like(self.r_support)

        return n, tot, log_marg_likelihoods
项目:cgpm    作者:probcomp    | 项目源码 | 文件源码
def calc_logpdf_marginal(N, x_sum, alpha, beta):
        return betaln(x_sum + alpha, N - x_sum + beta) - betaln(alpha, beta)
项目:cgpm    作者:probcomp    | 项目源码 | 文件源码
def calc_log_Z(a, b):
        return betaln(a, b)
项目:pgsm    作者:aroth85    | 项目源码 | 文件源码
def log_marginal_likelihood(self, params):
        return np.sum(log_beta(params.a, params.b) - log_beta(self.priors.a, self.priors.b))
项目:pgsm    作者:aroth85    | 项目源码 | 文件源码
def log_predictive_likelihood(self, data_point, params):
        ll = log_beta(params.a + data_point, params.b + (1 - data_point))

        ll -= log_beta(params.a, params.b)

        return np.sum(ll)
项目:siHMM    作者:Ardavans    | 项目源码 | 文件源码
def _log_partition_function(self,alpha,beta):
        return special.betaln(alpha,beta)
项目:siHMM    作者:Ardavans    | 项目源码 | 文件源码
def _log_partition_function(self,alpha,beta):
        return special.betaln(alpha,beta)
项目:siHMM    作者:Ardavans    | 项目源码 | 文件源码
def get_vlb(self):
        Elnp, Eln1mp = self._mf_expected_statistics()
        p_avgengy = (self.alpha_0-1)*Elnp + (self.beta_0-1)*Eln1mp \
                - (special.gammaln(self.alpha_0) + special.gammaln(self.beta_0)
                        - special.gammaln(self.alpha_0 + self.beta_0))
        q_entropy = special.betaln(self.alpha_mf,self.beta_mf) \
                - (self.alpha_mf-1)*special.digamma(self.alpha_mf) \
                - (self.beta_mf-1)*special.digamma(self.beta_mf) \
                + (self.alpha_mf+self.beta_mf-2)*special.digamma(self.alpha_mf+self.beta_mf)
        return p_avgengy + q_entropy
项目:siHMM    作者:Ardavans    | 项目源码 | 文件源码
def _get_statistics(self,data=[]):
        n, tot = self._fixedr_distns[0]._get_statistics(data)
        if n > 0:
            data = flattendata(data)
            alphas_n, betas_n = self.alphas_0 + tot, self.betas_0 + self.r_support*n
            log_marg_likelihoods = \
                    special.betaln(alphas_n, betas_n) \
                        - special.betaln(self.alphas_0, self.betas_0) \
                    + (special.gammaln(data[:,na]+self.r_support)
                        - special.gammaln(data[:,na]+1) \
                        - special.gammaln(self.r_support)).sum(0)
        else:
            log_marg_likelihoods = np.zeros_like(self.r_support)
        return log_marg_likelihoods
项目:siHMM    作者:Ardavans    | 项目源码 | 文件源码
def _posterior_hypparams(self,n,tot,normalizers,feasible):
        if n == 0:
            return n, self.alpha_0, self.r_probs, self.r_support
        else:
            r_probs = self.r_probs[feasible]
            r_support = self.r_support[feasible]
            log_marg_likelihoods = special.betaln(self.alpha_0 + tot - n*r_support,
                                                        self.beta_0 + r_support*n) \
                                    - special.betaln(self.alpha_0, self.beta_0) \
                                    + normalizers
            log_marg_probs = np.log(r_probs) + log_marg_likelihoods
            log_marg_probs -= log_marg_probs.max()
            marg_probs = np.exp(log_marg_probs)

            return n, self.alpha_0 + tot, marg_probs, r_support
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testBetaBetaKL(self):
    with self.test_session() as sess:
      for shape in [(10,), (4, 5)]:
        a1 = 6.0 * np.random.random(size=shape) + 1e-4
        b1 = 6.0 * np.random.random(size=shape) + 1e-4
        a2 = 6.0 * np.random.random(size=shape) + 1e-4
        b2 = 6.0 * np.random.random(size=shape) + 1e-4
        # Take inverse softplus of values to test BetaWithSoftplusAB
        a1_sp = np.log(np.exp(a1) - 1.0)
        b1_sp = np.log(np.exp(b1) - 1.0)
        a2_sp = np.log(np.exp(a2) - 1.0)
        b2_sp = np.log(np.exp(b2) - 1.0)

        d1 = beta_lib.Beta(a=a1, b=b1)
        d2 = beta_lib.Beta(a=a2, b=b2)
        d1_sp = beta_lib.BetaWithSoftplusAB(a=a1_sp, b=b1_sp)
        d2_sp = beta_lib.BetaWithSoftplusAB(a=a2_sp, b=b2_sp)

        kl_expected = (special.betaln(a2, b2) - special.betaln(a1, b1) +
                       (a1 - a2) * special.digamma(a1) +
                       (b1 - b2) * special.digamma(b1) +
                       (a2 - a1 + b2 - b1) * special.digamma(a1 + b1))

        for dist1 in [d1, d1_sp]:
          for dist2 in [d2, d2_sp]:
            kl = kullback_leibler.kl(dist1, dist2)
            kl_val = sess.run(kl)
            self.assertEqual(kl.get_shape(), shape)
            self.assertAllClose(kl_val, kl_expected)

        # Make sure KL(d1||d1) is 0
        kl_same = sess.run(kullback_leibler.kl(d1, d1))
        self.assertAllClose(kl_same, np.zeros_like(kl_expected))
项目:python-psignifit    作者:wichmann-lab    | 项目源码 | 文件源码
def my_betapdf(x,a,b):
    ''' this implements the betapdf with less input checks '''

    if type(x) is int or float:
        x = array(x)

    # Initialize y to zero.
    y = zeros(shape(x))

    if len(ravel(a)) == 1:
        a = tile(a,shape(x))

    if len(ravel(b)) == 1:
        b = tile(b,shape(x))

    # Special cases
    y[logical_and(a==1, x==0)] = b[logical_and(a==1 , x==0)]
    y[logical_and(b==1 , x==1)] = a[logical_and(b==1 , x==1)]
    y[logical_and(a<1 , x==0)] = inf
    y[logical_and(b<1 , x==1)] = inf

    # Return NaN for out of range parameters.
    y[a<=0] = nan
    y[b<=0] = nan
    y[logical_or(logical_or(isnan(a), isnan(b)), isnan(x))] = nan

    # Normal values
    k = logical_and(logical_and(a>0, b>0),logical_and(x>0 , x<1))
    a = a[k]
    b = b[k]
    x = x[k]

    # Compute logs
    smallx = x<0.1

    loga = (a-1)*log(x)

    logb = zeros(shape(x))
    logb[smallx] = (b[smallx]-1) * log1p(-x[smallx])
    logb[~smallx] = (b[~smallx]-1) * log(1-x[~smallx])

    y[k] = exp(loga+logb - betaln(a,b))

    return y
项目:megamix    作者:14thibea    | 项目源码 | 文件源码
def _convergence_criterion_simplified(self,points,log_resp,log_prob_norm):
        """
        Compute the lower bound of the likelihood using the simplified Blei and
        Jordan formula. Can only be used with data which fits the model.


        Parameters
        ----------
        points : an array (n_points,dim)

        log_resp: an array (n_points,n_components)
            an array containing the logarithm of the responsibilities.

        log_prob_norm : an array (n_points,)
            logarithm of the probability of each sample in points

        Returns
        -------
        result : float
            the lower bound of the likelihood

        """

        resp = np.exp(log_resp)
        n_points,dim = points.shape

        prec = np.linalg.inv(self._inv_prec)
        prec_prior = np.linalg.inv(self._inv_prec_prior)

        lower_bound = np.zeros(self.n_components)

        for i in range(self.n_components):

            lower_bound[i] = _log_B(prec_prior,self.nu_0) - _log_B(prec[i],self.nu[i])

            resp_i = resp[:,i:i+1]
            log_resp_i = log_resp[:,i:i+1]

            lower_bound[i] -= np.sum(resp_i*log_resp_i)
            lower_bound[i] += dim*0.5*(np.log(self.beta_0) - np.log(self.beta[i]))

        result = np.sum(lower_bound)
        result -= self.n_components * betaln(1,self.alpha_0)
        result += np.sum(betaln(self.alpha.T[0],self.alpha.T[1]))
        result -= n_points * dim * 0.5 * np.log(2*np.pi)

        return result