本文整理汇总了Python中tensorflow.lgamma函数的典型用法代码示例。如果您正苦于以下问题:Python lgamma函数的具体用法?Python lgamma怎么用?Python lgamma使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了lgamma函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: beta_log_prob
def beta_log_prob(self, val):
conc0 = self.parameters['concentration0']
conc1 = self.parameters['concentration1']
result = (conc1 - 1.0) * tf.log(val)
result += (conc0 - 1.0) * tf.log(1.0 - val)
result += -tf.lgamma(conc1) - tf.lgamma(conc0) + tf.lgamma(conc1 + conc0)
return result
示例2: logpmf
def logpmf(self, x, n, p):
"""Log of the probability mass function.
Parameters
----------
x : tf.Tensor
A n-D tensor for n > 1, where the inner (right-most)
dimension represents the multivariate dimension. Each
element is the number of outcomes in a bucket and not a
one-hot.
n : tf.Tensor
A tensor of one less dimension than ``x``,
representing the number of outcomes, equal to sum x[i]
along the inner (right-most) dimension.
p : tf.Tensor
A tensor of one less dimension than ``x``, representing
probabilities which sum to 1.
Returns
-------
tf.Tensor
A tensor of one dimension less than the input.
"""
x = tf.cast(x, dtype=tf.float32)
n = tf.cast(n, dtype=tf.float32)
p = tf.cast(p, dtype=tf.float32)
multivariate_idx = len(get_dims(x)) - 1
if multivariate_idx == 0:
return tf.lgamma(n + 1.0) - \
tf.reduce_sum(tf.lgamma(x + 1.0)) + \
tf.reduce_sum(x * tf.log(p))
else:
return tf.lgamma(n + 1.0) - \
tf.reduce_sum(tf.lgamma(x + 1.0), multivariate_idx) + \
tf.reduce_sum(x * tf.log(p), multivariate_idx)
示例3: logpdf
def logpdf(self, x, df, loc=0, scale=1):
"""Log of the probability density function.
Parameters
----------
x : tf.Tensor
A n-D tensor.
df : tf.Tensor
A tensor of same shape as ``x``, and with all elements
constrained to :math:`df > 0`.
loc : tf.Tensor
A tensor of same shape as ``x``.
scale : tf.Tensor
A tensor of same shape as ``x``, and with all elements
constrained to :math:`scale > 0`.
Returns
-------
tf.Tensor
A tensor of same shape as input.
"""
x = tf.cast(x, dtype=tf.float32)
df = tf.cast(df, dtype=tf.float32)
loc = tf.cast(loc, dtype=tf.float32)
scale = tf.cast(scale, dtype=tf.float32)
z = (x - loc) / scale
return tf.lgamma(0.5 * (df + 1.0)) - tf.lgamma(0.5 * df) - \
0.5 * (tf.log(np.pi) + tf.log(df)) - tf.log(scale) - \
0.5 * (df + 1.0) * tf.log(1.0 + (1.0/df) * tf.square(z))
示例4: beta
def beta(alpha, beta, y):
# need to clip y, since log of 0 is nan...
y = tf.clip_by_value(y, 1e-6, 1-1e-6)
return (alpha - 1.) * tf.log(y) + (beta - 1.) * tf.log(1. - y) \
+ tf.lgamma(alpha + beta)\
- tf.lgamma(alpha)\
- tf.lgamma(beta)
示例5: beta
def beta(x, alpha, beta):
# need to clip x, since log of 0 is nan...
x = tf.clip_by_value(x, 1e-6, 1-1e-6)
return (alpha - 1.) * tf.log(x) + (beta - 1.) * tf.log(1. - x) \
+ tf.lgamma(alpha + beta)\
- tf.lgamma(alpha)\
- tf.lgamma(beta)
示例6: multinomial_log_prob
def multinomial_log_prob(self, val):
n = self.parameters['total_count']
probs = self.parameters['probs']
f_n = tf.cast(n, tf.float32)
f_val = tf.cast(val, tf.float32)
result = tf.reduce_sum(tf.log(probs) * f_val, -1)
result += tf.lgamma(f_n + 1) - tf.reduce_sum(tf.lgamma(f_val + 1), -1)
return result
示例7: student_t
def student_t(x, mean, scale, deg_free):
const = tf.lgamma(tf.cast((deg_free + 1.) * 0.5, tf.float64))\
- tf.lgamma(tf.cast(deg_free * 0.5, tf.float64))\
- 0.5*(tf.log(tf.square(scale)) + tf.cast(tf.log(deg_free), tf.float64)
+ np.log(np.pi))
const = tf.cast(const, tf.float64)
return const - 0.5*(deg_free + 1.) * \
tf.log(1. + (1. / deg_free) * (tf.square((x - mean) / scale)))
示例8: binomial_log_prob
def binomial_log_prob(self, val):
n = self.parameters['total_count']
probs = self.parameters['probs']
f_n = tf.cast(n, tf.float32)
f_val = tf.cast(val, tf.float32)
result = f_val * tf.log(probs) + (f_n - f_val) * tf.log(1.0 - probs)
result += tf.lgamma(f_n + 1) - tf.lgamma(f_val + 1) - \
tf.lgamma(f_n - f_val + 1)
return result
示例9: log_nb_positive
def log_nb_positive(x, mu, theta, eps=1e-8):
"""
log likelihood (scalar) of a minibatch according to a nb model.
Variables:
mu: mean of the negative binomial (has to be positive support) (shape: minibatch x genes)
theta: inverse dispersion parameter (has to be positive support) (shape: minibatch x genes)
eps: numerical stability constant
"""
res = tf.lgamma(x + theta) - tf.lgamma(theta) - tf.lgamma(x + 1) + x * tf.log(mu + eps) \
- x * tf.log(theta + mu + eps) + theta * tf.log(theta + eps) \
- theta * tf.log(theta + mu + eps)
return tf.reduce_sum(res, axis=-1)
示例10: chi2_log_prob
def chi2_log_prob(self, val):
df = self.parameters['df']
eta = 0.5 * df - 1
result = tf.reduce_sum(eta * tf.log(val), -1)
result += tf.exp(-0.5 * val)
result -= tf.lgamma(eta + 1) + (eta + 1) * tf.log(2.0)
return result
示例11: gamma_log_prob
def gamma_log_prob(self, val):
conc = self.parameters['concentration']
rate = self.parameters['rate']
result = (conc - 1.0) * tf.log(val)
result -= rate * val
result += -tf.lgamma(conc) + conc * tf.log(rate)
return result
示例12: inverse_gamma_log_prob
def inverse_gamma_log_prob(self, val):
conc = self.parameters['concentration']
rate = self.parameters['rate']
result = -(conc + 1) * tf.log(val)
result -= rate * tf.reciprocal(val)
result += -tf.lgamma(conc) + conc * tf.log(rate)
return result
示例13: testPoissonLogPmfContinuousRelaxation
def testPoissonLogPmfContinuousRelaxation(self):
batch_size = 12
lam = tf.constant([3.0] * batch_size)
x = np.array([-3., -0.5, 0., 2., 2.2, 3., 3.1, 4., 5., 5.5, 6., 7.]).astype(
np.float32)
poisson = self._make_poisson(rate=lam,
interpolate_nondiscrete=True)
expected_continuous_log_pmf = (x * poisson.log_rate - tf.lgamma(1. + x)
- poisson.rate)
neg_inf = tf.fill(
tf.shape(expected_continuous_log_pmf),
value=np.array(-np.inf,
dtype=expected_continuous_log_pmf.dtype.as_numpy_dtype))
expected_continuous_log_pmf = tf.where(x >= 0.,
expected_continuous_log_pmf,
neg_inf)
expected_continuous_pmf = tf.exp(expected_continuous_log_pmf)
log_pmf = poisson.log_prob(x)
self.assertEqual(log_pmf.get_shape(), (batch_size,))
self.assertAllClose(self.evaluate(log_pmf),
self.evaluate(expected_continuous_log_pmf))
pmf = poisson.prob(x)
self.assertEqual(pmf.get_shape(), (batch_size,))
self.assertAllClose(self.evaluate(pmf),
self.evaluate(expected_continuous_pmf))
示例14: Poisson
def Poisson(lambda_, name=None):
k = tf.placeholder(config.int_dtype, name=name)
Distribution.logp = k*lambda_ - lambda_ + tf.lgamma(k+1)
# TODO Distribution.integral = ...
return k
示例15: actual_hypersphere_volume
def actual_hypersphere_volume(dims, radius):
# https://en.wikipedia.org/wiki/Volume_of_an_n-ball
# Using tf.lgamma because we'd have to otherwise use SciPy which is not
# a required dependency of core.
radius = np.asarray(radius)
dims = tf.cast(dims, dtype=radius.dtype)
return tf.exp((dims / 2.) * np.log(np.pi) - tf.lgamma(1. + dims / 2.) +
dims * tf.log(radius))