我们从Python开源项目中,提取了以下26个代码示例,用于说明如何使用tensorflow.lgamma()。
def _log_prob(self, given): # TODO: not right when given=0 or 1 alpha, beta = self.alpha, self.beta log_given = tf.log(given) log_1_minus_given = tf.log(1 - given) lgamma_alpha, lgamma_beta = tf.lgamma(alpha), tf.lgamma(beta) lgamma_alpha_plus_beta = tf.lgamma(alpha + beta) if self._check_numerics: log_given = tf.check_numerics(log_given, "log(given)") log_1_minus_given = tf.check_numerics( log_1_minus_given, "log(1 - given)") lgamma_alpha = tf.check_numerics(lgamma_alpha, "lgamma(alpha)") lgamma_beta = tf.check_numerics(lgamma_beta, "lgamma(beta)") lgamma_alpha_plus_beta = tf.check_numerics( lgamma_alpha_plus_beta, "lgamma(alpha + beta)") return (alpha - 1) * log_given + (beta - 1) * log_1_minus_given - ( lgamma_alpha + lgamma_beta - lgamma_alpha_plus_beta)
def _log_prob(self, given): logits = self.logits n = tf.cast(self.n_experiments, self.param_dtype) given = tf.cast(given, self.param_dtype) log_1_minus_p = -tf.nn.softplus(logits) lgamma_n_plus_1 = tf.lgamma(n + 1) lgamma_given_plus_1 = tf.lgamma(given + 1) lgamma_n_minus_given_plus_1 = tf.lgamma(n - given + 1) if self._check_numerics: lgamma_given_plus_1 = tf.check_numerics( lgamma_given_plus_1, "lgamma(given + 1)") lgamma_n_minus_given_plus_1 = tf.check_numerics( lgamma_n_minus_given_plus_1, "lgamma(n - given + 1)") return lgamma_n_plus_1 - lgamma_n_minus_given_plus_1 - \ lgamma_given_plus_1 + given * logits + n * log_1_minus_p
def _log_prob(self, given): logits, temperature = self.path_param(self.logits), \ self.path_param(self.temperature) log_given = tf.log(given) log_temperature = tf.log(temperature) n = tf.cast(self.n_categories, self.dtype) if self._check_numerics: log_given = tf.check_numerics(log_given, "log(given)") log_temperature = tf.check_numerics( log_temperature, "log(temperature)") temp = logits - temperature * log_given return tf.lgamma(n) + (n - 1) * log_temperature + \ tf.reduce_sum(temp - log_given, axis=-1) - \ n * tf.reduce_logsumexp(temp, axis=-1)
def Poisson(lambda_, name=None): k = tf.placeholder(config.int_dtype, name=name) # FIXME tf.lgamma only supports floats so cast before Distribution.logp = ( tf.cast(k, config.dtype)*tf.log(lambda_) - lambda_ - tf.lgamma(tf.cast(k+1, config.dtype)) ) # TODO Distribution.integral = ... def integral(l, u): return tf.constant(1, dtype=config.dtype) Distribution.integral = integral return k
def tf_parameterize(self, x): # Softplus to ensure alpha and beta >= 1 # epsilon < 1.0, hence negative log_eps = log(util.epsilon) alpha = self.alpha.apply(x=x) alpha = tf.clip_by_value(t=alpha, clip_value_min=log_eps, clip_value_max=-log_eps) alpha = tf.log(x=(tf.exp(x=alpha) + 1.0)) + 1.0 beta = self.beta.apply(x=x) beta = tf.clip_by_value(t=beta, clip_value_min=log_eps, clip_value_max=-log_eps) beta = tf.log(x=(tf.exp(x=beta) + 1.0)) + 1.0 shape = (-1,) + self.shape alpha = tf.reshape(tensor=alpha, shape=shape) beta = tf.reshape(tensor=beta, shape=shape) alpha_beta = tf.maximum(x=(alpha + beta), y=util.epsilon) log_norm = tf.lgamma(x=alpha) + tf.lgamma(x=beta) - tf.lgamma(x=alpha_beta) return alpha, beta, alpha_beta, log_norm
def GumbelSoftmaxLogDensity(y, p, tau): # EPS = tf.constant(1e-10) k = tf.shape(y)[-1] k = tf.cast(k, tf.float32) # y = y + EPS # y = tf.divide(y, tf.reduce_sum(y, -1, keep_dims=True)) y = normalize_to_unit_sum(y) sum_p_over_y = tf.reduce_sum(tf.divide(p, tf.pow(y, tau)), -1) logp = tf.lgamma(k) logp = logp + (k - 1) * tf.log(tau) logp = logp - k * tf.log(sum_p_over_y) logp = logp + sum_p_over_y return logp
def gammaPrior(alpha, beta, n, m): return - (alpha - n)*tf.digamma(alpha) + tf.lgamma(alpha) - scipy.special.gammaln(n) - n * (tf.log(beta) - np.log(m)) - alpha * (m / beta - 1.0)
def _log_prob(self, given): alpha, beta = self.alpha, self.beta log_given = tf.log(given) log_beta = tf.log(beta) lgamma_alpha = tf.lgamma(alpha) if self._check_numerics: log_given = tf.check_numerics(log_given, "log(given)") log_beta = tf.check_numerics(log_beta, "log(beta)") lgamma_alpha = tf.check_numerics(lgamma_alpha, "lgamma(alpha)") return alpha * log_beta - lgamma_alpha + (alpha - 1) * log_given - \ beta * given
def _log_prob(self, given): rate = self.rate given = tf.cast(given, self.param_dtype) log_rate = tf.log(rate) lgamma_given_plus_1 = tf.lgamma(given + 1) if self._check_numerics: log_rate = tf.check_numerics(log_rate, "log(rate)") lgamma_given_plus_1 = tf.check_numerics( lgamma_given_plus_1, "lgamma(given + 1)") return given * log_rate - rate - lgamma_given_plus_1
def _log_prob(self, given): alpha, beta = self.alpha, self.beta log_given = tf.log(given) log_beta = tf.log(beta) lgamma_alpha = tf.lgamma(alpha) if self._check_numerics: log_given = tf.check_numerics(log_given, "log(given)") log_beta = tf.check_numerics(log_beta, "log(beta)") lgamma_alpha = tf.check_numerics(lgamma_alpha, "lgamma(alpha)") return alpha * log_beta - lgamma_alpha - (alpha + 1) * log_given - \ beta / given
def log_combination(n, ks): """ Compute the log combination function. .. math:: \\log \\binom{n}{k_1, k_2, \\dots} = \\log n! - \\sum_{i}\\log k_i! :param n: A N-D `float` Tensor. Can broadcast to match `ks[:-1]`. :param ks: A (N + 1)-D `float` Tensor. Each slice `[i, j, ..., k, :]` is a vector of `[k_1, k_2, ...]`. :return: A N-D Tensor of type same as `n`. """ return tf.lgamma(n + 1) - tf.reduce_sum(tf.lgamma(ks + 1), axis=-1)
def variational_expectations(self, Fmu, Fvar, Y): if self.invlink is tf.exp: return Y * Fmu - tf.exp(Fmu + Fvar / 2) * self.binsize \ - tf.lgamma(Y + 1) + Y * tf.log(self.binsize) return super(Poisson, self).variational_expectations(Fmu, Fvar, Y)
def variational_expectations(self, Fmu, Fvar, Y): if self.invlink is tf.exp: return -self.shape * Fmu - tf.lgamma(self.shape) \ + (self.shape - 1.) * tf.log(Y) - Y * tf.exp(-Fmu + Fvar / 2.) else: return Likelihood.variational_expectations(self, Fmu, Fvar, Y)
def poisson(lamb, y): return y * tf.log(lamb) - lamb - tf.lgamma(y + 1.)
def gamma(shape, scale, x): return -shape * tf.log(scale) - tf.lgamma(shape)\ + (shape - 1.) * tf.log(x) - x / scale
def student_t(x, mean, scale, deg_free): const = tf.lgamma(tf.cast((deg_free + 1.) * 0.5, settings.float_type))\ - tf.lgamma(tf.cast(deg_free * 0.5, settings.float_type))\ - 0.5*(tf.log(tf.square(scale)) + tf.cast(tf.log(deg_free), settings.float_type) + np.log(np.pi)) const = tf.cast(const, settings.float_type) return const - 0.5*(deg_free + 1.) * \ tf.log(1. + (1. / deg_free) * (tf.square((x - mean) / scale)))
def setUp(self): super(CoreUnaryOpsTest, self).setUp() self.ops = [ ('abs', operator.abs, tf.abs, core.abs_function), ('neg', operator.neg, tf.neg, core.neg), # TODO(shoyer): add unary + to core TensorFlow ('pos', None, None, None), ('sign', None, tf.sign, core.sign), ('reciprocal', None, tf.reciprocal, core.reciprocal), ('square', None, tf.square, core.square), ('round', None, tf.round, core.round_function), ('sqrt', None, tf.sqrt, core.sqrt), ('rsqrt', None, tf.rsqrt, core.rsqrt), ('log', None, tf.log, core.log), ('exp', None, tf.exp, core.exp), ('log', None, tf.log, core.log), ('ceil', None, tf.ceil, core.ceil), ('floor', None, tf.floor, core.floor), ('cos', None, tf.cos, core.cos), ('sin', None, tf.sin, core.sin), ('tan', None, tf.tan, core.tan), ('acos', None, tf.acos, core.acos), ('asin', None, tf.asin, core.asin), ('atan', None, tf.atan, core.atan), ('lgamma', None, tf.lgamma, core.lgamma), ('digamma', None, tf.digamma, core.digamma), ('erf', None, tf.erf, core.erf), ('erfc', None, tf.erfc, core.erfc), ('lgamma', None, tf.lgamma, core.lgamma), ] total_size = np.prod([v.size for v in self.original_lt.axes.values()]) self.test_lt = core.LabeledTensor( tf.cast(self.original_lt, tf.float32) / total_size, self.original_lt.axes)
def poisson_loss(y_true, y_pred): y_pred = tf.cast(y_pred, tf.float32) y_true = tf.cast(y_true, tf.float32) # we can use the Possion PMF from TensorFlow as well # dist = tf.contrib.distributions # return -tf.reduce_mean(dist.Poisson(y_pred).log_pmf(y_true)) nelem = _nelem(y_true) y_true = _nan2zero(y_true) # last term can be avoided since it doesn't depend on y_pred # however keeping it gives a nice lower bound to zero ret = y_pred - y_true*tf.log(y_pred+1e-10) + tf.lgamma(y_true+1.0) return tf.divide(tf.reduce_sum(ret), nelem) # We need a class (or closure) here, # because it's not possible to # pass extra arguments to Keras loss functions # See https://github.com/fchollet/keras/issues/2121 # dispersion (theta) parameter is a scalar by default. # scale_factor scales the nbinom mean before the # calculation of the loss to balance the # learning rates of theta and network weights
def test_Lgamma(self): t = tf.lgamma(self.random(4, 3)) self.check(t)
def kl_Beta(alpha, beta, alpha_0, beta_0): return tf.reduce_sum(tf.lgamma(alpha_0) + tf.lgamma(beta_0) - tf.lgamma(alpha_0+beta_0) + tf.lgamma(alpha + beta) - tf.lgamma(alpha) - tf.lgamma(beta) + (alpha - alpha_0) * tf.digamma(alpha) + (beta - beta_0) * tf.digamma(beta) - (alpha + beta - alpha_0 - beta_0) * tf.digamma(alpha + beta))
def kl_Beta(alpha, beta, alpha_0, beta_0): return tf.reduce_sum(math.lgamma(alpha_0) + math.lgamma(beta_0) - math.lgamma(alpha_0+beta_0) + tf.lgamma(alpha + beta) - tf.lgamma(alpha) - tf.lgamma(beta) + (alpha - alpha_0) * tf.digamma(alpha) + (beta - beta_0) * tf.digamma(beta) - (alpha + beta - alpha_0 - beta_0) * tf.digamma(alpha + beta) )
def loss(self, y_true, y_pred, mean=True): scale_factor = self.scale_factor eps = self.eps with tf.name_scope(self.scope): y_true = tf.cast(y_true, tf.float32) y_pred = tf.cast(y_pred, tf.float32) * scale_factor if self.masking: nelem = _nelem(y_true) y_true = _nan2zero(y_true) # Clip theta theta = tf.minimum(self.theta, 1e6) t1 = tf.lgamma(theta+eps) + tf.lgamma(y_true+1.0) - tf.lgamma(y_true+theta+eps) t2 = (theta+y_true) * tf.log(1.0 + (y_pred/(theta+eps))) + (y_true * (tf.log(theta+eps) - tf.log(y_pred+eps))) if self.debug: assert_ops = [ tf.verify_tensor_all_finite(y_pred, 'y_pred has inf/nans'), tf.verify_tensor_all_finite(t1, 't1 has inf/nans'), tf.verify_tensor_all_finite(t2, 't2 has inf/nans')] tf.summary.histogram('t1', t1) tf.summary.histogram('t2', t2) with tf.control_dependencies(assert_ops): final = t1 + t2 else: final = t1 + t2 final = _nan2inf(final) if mean: if self.masking: final = tf.divide(tf.reduce_sum(final), nelem) else: final = tf.reduce_mean(final) return final