本文整理汇总了Python中tensorflow.lbeta函数的典型用法代码示例。如果您正苦于以下问题:Python lbeta函数的具体用法?Python lbeta怎么用?Python lbeta使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了lbeta函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: entropy
def entropy(self, alpha):
"""Entropy of probability distribution.
Parameters
----------
alpha : tf.Tensor
A n-D tensor with each :math:`\\alpha` constrained to
:math:`\\alpha_i > 0`.
Returns
-------
tf.Tensor
A tensor of one dimension less than the input.
"""
alpha = tf.cast(alpha, dtype=tf.float32)
multivariate_idx = len(get_dims(alpha)) - 1
K = get_dims(alpha)[multivariate_idx]
if multivariate_idx == 0:
a = tf.reduce_sum(alpha)
return tf.lbeta(alpha) + \
(a - K) * tf.digamma(a) - \
tf.reduce_sum((alpha-1.0) * tf.digamma(alpha))
else:
a = tf.reduce_sum(alpha, multivariate_idx)
return tf.lbeta(alpha) + \
(a - K) * tf.digamma(a) - \
tf.reduce_sum((alpha-1.0) * tf.digamma(alpha), multivariate_idx)
示例2: logpdf
def logpdf(self, x, alpha):
"""Log of the probability density function.
Parameters
----------
x : tf.Tensor
A n-D tensor for n > 1, where the inner (right-most)
dimension represents the multivariate dimension.
alpha : tf.Tensor
A tensor of same shape as ``x``, and with each
:math:`\\alpha` constrained to :math:`\\alpha_i > 0`.
Returns
-------
tf.Tensor
A tensor of one dimension less than the input.
"""
x = tf.cast(x, dtype=tf.float32)
alpha = tf.cast(alpha, dtype=tf.float32)
multivariate_idx = len(get_dims(x)) - 1
if multivariate_idx == 0:
return -tf.lbeta(alpha) + tf.reduce_sum((alpha-1.0) * tf.log(x))
else:
return -tf.lbeta(alpha) + \
tf.reduce_sum((alpha-1.0) * tf.log(x), multivariate_idx)
示例3: test_two_dimensional_proper_shape
def test_two_dimensional_proper_shape(self):
# Should evaluate to 1/2.
x_one_half = [[2, 1.], [2, 1.]]
with self.test_session(use_gpu=self._use_gpu):
self.assertAllClose([0.5, 0.5], tf.exp(tf.lbeta(x_one_half)).eval())
self.assertEqual((2,), tf.shape(tf.lbeta(x_one_half)).eval())
self.assertEqual(tf.TensorShape([2]), tf.lbeta(x_one_half).get_shape())
示例4: test_one_dimensional_arg
def test_one_dimensional_arg(self):
# Should evaluate to 1 and 1/2.
x_one = [1, 1.]
x_one_half = [2, 1.]
with self.test_session(use_gpu=self._use_gpu):
self.assertAllClose(1, tf.exp(tf.lbeta(x_one)).eval())
self.assertAllClose(0.5, tf.exp(tf.lbeta(x_one_half)).eval())
self.assertEqual([], tf.lbeta(x_one).get_shape())
示例5: test_length_1_last_dimension_results_in_one
def test_length_1_last_dimension_results_in_one(self):
# If there is only one coefficient, the formula still works, and we get one
# as the answer, always.
x_a = [5.5]
x_b = [0.1]
with self.test_session(use_gpu=self._use_gpu):
self.assertAllClose(1, tf.exp(tf.lbeta(x_a)).eval())
self.assertAllClose(1, tf.exp(tf.lbeta(x_b)).eval())
self.assertEqual((), tf.lbeta(x_a).get_shape())
示例6: test_two_dimensional_arg_dynamic_alloc
def test_two_dimensional_arg_dynamic_alloc(self):
# Should evaluate to 1/2.
x_one_half = [[2, 1.], [2, 1.]]
with self.test_session(use_gpu=self._use_gpu):
ph = tf.placeholder(tf.float32)
beta_ph = tf.exp(tf.lbeta(ph))
self.assertAllClose([0.5, 0.5], beta_ph.eval(feed_dict={ph: x_one_half}))
示例7: _entropy
def _entropy(self):
v = tf.ones(self.batch_shape_tensor(),
dtype=self.dtype)[..., tf.newaxis]
u = v * self.df[..., tf.newaxis]
beta_arg = tf.concat([u, v], -1) / 2.
return (tf.log(tf.abs(self.scale)) +
0.5 * tf.log(self.df) +
tf.lbeta(beta_arg) +
0.5 * (self.df + 1.) *
(tf.digamma(0.5 * (self.df + 1.)) -
tf.digamma(0.5 * self.df)))
示例8: _moment
def _moment(self, n):
"""Compute the n'th (uncentered) moment."""
total_concentration = self.concentration1 + self.concentration0
expanded_concentration1 = tf.ones_like(
total_concentration, dtype=self.dtype) * self.concentration1
expanded_concentration0 = tf.ones_like(
total_concentration, dtype=self.dtype) * self.concentration0
beta_arg0 = 1 + n / expanded_concentration1
beta_arg = tf.stack([beta_arg0, expanded_concentration0], -1)
log_moment = tf.log(expanded_concentration0) + tf.lbeta(beta_arg)
return tf.exp(log_moment)
示例9: test_empty_rank2_or_greater_input_gives_empty_output_dynamic_alloc
def test_empty_rank2_or_greater_input_gives_empty_output_dynamic_alloc(self):
with self.test_session(use_gpu=self._use_gpu):
ph = tf.placeholder(tf.float32)
self.assertAllEqual([], tf.lbeta(ph).eval(feed_dict={ph: [[]]}))
self.assertAllEqual([[]], tf.lbeta(ph).eval(feed_dict={ph: [[[]]]}))
示例10: test_empty_rank2_or_greater_input_gives_empty_output
def test_empty_rank2_or_greater_input_gives_empty_output(self):
with self.test_session(use_gpu=self._use_gpu):
self.assertAllEqual([], tf.lbeta([[]]).eval())
self.assertEqual((0,), tf.lbeta([[]]).get_shape())
self.assertAllEqual([[]], tf.lbeta([[[]]]).eval())
self.assertEqual((1, 0), tf.lbeta([[[]]]).get_shape())
示例11: test_complicated_shape
def test_complicated_shape(self):
with self.test_session(use_gpu=self._use_gpu):
x = tf.convert_to_tensor(np.random.rand(3, 2, 2))
self.assertAllEqual((3, 2), tf.shape(tf.lbeta(x)).eval())
self.assertEqual(tf.TensorShape([3, 2]), tf.lbeta(x).get_shape())
示例12: _kl_dirichlet_dirichlet
def _kl_dirichlet_dirichlet(d1, d2, name=None):
"""Batchwise KL divergence KL(d1 || d2) with d1 and d2 Dirichlet.
Args:
d1: instance of a Dirichlet distribution object.
d2: instance of a Dirichlet distribution object.
name: (optional) Name to use for created operations.
default is "kl_dirichlet_dirichlet".
Returns:
Batchwise KL(d1 || d2)
"""
with tf.name_scope(name, "kl_dirichlet_dirichlet", values=[
d1.concentration, d2.concentration]):
# The KL between Dirichlet distributions can be derived as follows. We have
#
# Dir(x; a) = 1 / B(a) * prod_i[x[i]^(a[i] - 1)]
#
# where B(a) is the multivariate Beta function:
#
# B(a) = Gamma(a[1]) * ... * Gamma(a[n]) / Gamma(a[1] + ... + a[n])
#
# The KL is
#
# KL(Dir(x; a), Dir(x; b)) = E_Dir(x; a){log(Dir(x; a) / Dir(x; b))}
#
# so we'll need to know the log density of the Dirichlet. This is
#
# log(Dir(x; a)) = sum_i[(a[i] - 1) log(x[i])] - log B(a)
#
# The only term that matters for the expectations is the log(x[i]). To
# compute the expectation of this term over the Dirichlet density, we can
# use the following facts about the Dirichlet in exponential family form:
# 1. log(x[i]) is a sufficient statistic
# 2. expected sufficient statistics (of any exp family distribution) are
# equal to derivatives of the log normalizer with respect to
# corresponding natural parameters: E{T[i](x)} = dA/d(eta[i])
#
# To proceed, we can rewrite the Dirichlet density in exponential family
# form as follows:
#
# Dir(x; a) = exp{eta(a) . T(x) - A(a)}
#
# where '.' is the dot product of vectors eta and T, and A is a scalar:
#
# eta[i](a) = a[i] - 1
# T[i](x) = log(x[i])
# A(a) = log B(a)
#
# Now, we can use fact (2) above to write
#
# E_Dir(x; a)[log(x[i])]
# = dA(a) / da[i]
# = d/da[i] log B(a)
# = d/da[i] (sum_j lgamma(a[j])) - lgamma(sum_j a[j])
# = digamma(a[i])) - digamma(sum_j a[j])
#
# Putting it all together, we have
#
# KL[Dir(x; a) || Dir(x; b)]
# = E_Dir(x; a){log(Dir(x; a) / Dir(x; b)}
# = E_Dir(x; a){sum_i[(a[i] - b[i]) log(x[i])} - (lbeta(a) - lbeta(b))
# = sum_i[(a[i] - b[i]) * E_Dir(x; a){log(x[i])}] - lbeta(a) + lbeta(b)
# = sum_i[(a[i] - b[i]) * (digamma(a[i]) - digamma(sum_j a[j]))]
# - lbeta(a) + lbeta(b))
digamma_sum_d1 = tf.digamma(
tf.reduce_sum(d1.concentration, axis=-1, keepdims=True))
digamma_diff = tf.digamma(d1.concentration) - digamma_sum_d1
concentration_diff = d1.concentration - d2.concentration
return (tf.reduce_sum(concentration_diff * digamma_diff, axis=-1) -
tf.lbeta(d1.concentration) +
tf.lbeta(d2.concentration))
示例13: test_empty_rank1_input_raises_value_error
def test_empty_rank1_input_raises_value_error(self):
with self.test_session(use_gpu=self._use_gpu):
with self.assertRaisesRegexp(ValueError, 'rank'):
tf.lbeta([])
示例14: test_empty_rank1_dynamic_alloc_input_raises_op_error
def test_empty_rank1_dynamic_alloc_input_raises_op_error(self):
with self.test_session(use_gpu=self._use_gpu):
ph = tf.placeholder(tf.float32)
with self.assertRaisesOpError('rank'):
tf.lbeta(ph).eval(feed_dict={ph: []})
示例15: _log_normalization
def _log_normalization(self):
return tf.lbeta(self.concentration)