本文整理汇总了Python中tensorflow.python.ops.distributions.kullback_leibler.kl_divergence函数的典型用法代码示例。如果您正苦于以下问题:Python kl_divergence函数的具体用法?Python kl_divergence怎么用?Python kl_divergence使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了kl_divergence函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testDomainErrorExceptions
def testDomainErrorExceptions(self):
class MyDistException(normal.Normal):
pass
# Register KL to a lambda that spits out the name parameter
@kullback_leibler.RegisterKL(MyDistException, MyDistException)
# pylint: disable=unused-argument,unused-variable
def _kl(a, b, name=None):
return array_ops.identity([float("nan")])
# pylint: disable=unused-argument,unused-variable
with self.cached_session():
a = MyDistException(loc=0.0, scale=1.0, allow_nan_stats=False)
kl = kullback_leibler.kl_divergence(a, a, allow_nan_stats=False)
with self.assertRaisesOpError(
"KL calculation between .* and .* returned NaN values"):
self.evaluate(kl)
with self.assertRaisesOpError(
"KL calculation between .* and .* returned NaN values"):
a.kl_divergence(a).eval()
a = MyDistException(loc=0.0, scale=1.0, allow_nan_stats=True)
kl_ok = kullback_leibler.kl_divergence(a, a)
self.assertAllEqual([float("nan")], self.evaluate(kl_ok))
self_kl_ok = a.kl_divergence(a)
self.assertAllEqual([float("nan")], self.evaluate(self_kl_ok))
cross_ok = a.cross_entropy(a)
self.assertAllEqual([float("nan")], self.evaluate(cross_ok))
示例2: testKLRaises
def testKLRaises(self):
ind1 = independent_lib.Independent(
distribution=normal_lib.Normal(
loc=np.float32([-1., 1]),
scale=np.float32([0.1, 0.5])),
reinterpreted_batch_ndims=1)
ind2 = independent_lib.Independent(
distribution=normal_lib.Normal(
loc=np.float32(-1),
scale=np.float32(0.5)),
reinterpreted_batch_ndims=0)
with self.assertRaisesRegexp(
ValueError, "Event shapes do not match"):
kullback_leibler.kl_divergence(ind1, ind2)
ind1 = independent_lib.Independent(
distribution=normal_lib.Normal(
loc=np.float32([-1., 1]),
scale=np.float32([0.1, 0.5])),
reinterpreted_batch_ndims=1)
ind2 = independent_lib.Independent(
distribution=mvn_diag_lib.MultivariateNormalDiag(
loc=np.float32([-1., 1]),
scale_diag=np.float32([0.1, 0.5])),
reinterpreted_batch_ndims=0)
with self.assertRaisesRegexp(
NotImplementedError, "different event shapes"):
kullback_leibler.kl_divergence(ind1, ind2)
示例3: testBetaBetaKL
def testBetaBetaKL(self):
with self.test_session() as sess:
for shape in [(10,), (4, 5)]:
a1 = 6.0 * np.random.random(size=shape) + 1e-4
b1 = 6.0 * np.random.random(size=shape) + 1e-4
a2 = 6.0 * np.random.random(size=shape) + 1e-4
b2 = 6.0 * np.random.random(size=shape) + 1e-4
# Take inverse softplus of values to test BetaWithSoftplusConcentration
a1_sp = np.log(np.exp(a1) - 1.0)
b1_sp = np.log(np.exp(b1) - 1.0)
a2_sp = np.log(np.exp(a2) - 1.0)
b2_sp = np.log(np.exp(b2) - 1.0)
d1 = beta_lib.Beta(concentration1=a1, concentration0=b1)
d2 = beta_lib.Beta(concentration1=a2, concentration0=b2)
d1_sp = beta_lib.BetaWithSoftplusConcentration(concentration1=a1_sp,
concentration0=b1_sp)
d2_sp = beta_lib.BetaWithSoftplusConcentration(concentration1=a2_sp,
concentration0=b2_sp)
kl_expected = (special.betaln(a2, b2) - special.betaln(a1, b1) +
(a1 - a2) * special.digamma(a1) +
(b1 - b2) * special.digamma(b1) +
(a2 - a1 + b2 - b1) * special.digamma(a1 + b1))
for dist1 in [d1, d1_sp]:
for dist2 in [d2, d2_sp]:
kl = kullback_leibler.kl_divergence(dist1, dist2)
kl_val = sess.run(kl)
self.assertEqual(kl.get_shape(), shape)
self.assertAllClose(kl_val, kl_expected)
# Make sure KL(d1||d1) is 0
kl_same = sess.run(kullback_leibler.kl_divergence(d1, d1))
self.assertAllClose(kl_same, np.zeros_like(kl_expected))
示例4: testDirichletDirichletKL
def testDirichletDirichletKL(self):
conc1 = np.array([[1., 2., 3., 1.5, 2.5, 3.5],
[1.5, 2.5, 3.5, 4.5, 5.5, 6.5]])
conc2 = np.array([[0.5, 1., 1.5, 2., 2.5, 3.]])
d1 = dirichlet_lib.Dirichlet(conc1)
d2 = dirichlet_lib.Dirichlet(conc2)
x = d1.sample(int(1e4), seed=0)
kl_sample = math_ops.reduce_mean(d1.log_prob(x) - d2.log_prob(x), 0)
kl_actual = kullback_leibler.kl_divergence(d1, d2)
kl_sample_val = self.evaluate(kl_sample)
kl_actual_val = self.evaluate(kl_actual)
self.assertEqual(conc1.shape[:-1], kl_actual.get_shape())
if not special:
return
kl_expected = (
special.gammaln(np.sum(conc1, -1))
- special.gammaln(np.sum(conc2, -1))
- np.sum(special.gammaln(conc1) - special.gammaln(conc2), -1)
+ np.sum((conc1 - conc2) * (special.digamma(conc1) - special.digamma(
np.sum(conc1, -1, keepdims=True))), -1))
self.assertAllClose(kl_expected, kl_actual_val, atol=0., rtol=1e-6)
self.assertAllClose(kl_sample_val, kl_actual_val, atol=0., rtol=1e-1)
# Make sure KL(d1||d1) is 0
kl_same = self.evaluate(kullback_leibler.kl_divergence(d1, d1))
self.assertAllClose(kl_same, np.zeros_like(kl_expected))
示例5: testCategoricalCategoricalKL
def testCategoricalCategoricalKL(self):
def np_softmax(logits):
exp_logits = np.exp(logits)
return exp_logits / exp_logits.sum(axis=-1, keepdims=True)
with self.cached_session() as sess:
for categories in [2, 10]:
for batch_size in [1, 2]:
p_logits = self._rng.random_sample((batch_size, categories))
q_logits = self._rng.random_sample((batch_size, categories))
p = onehot_categorical.OneHotCategorical(logits=p_logits)
q = onehot_categorical.OneHotCategorical(logits=q_logits)
prob_p = np_softmax(p_logits)
prob_q = np_softmax(q_logits)
kl_expected = np.sum(
prob_p * (np.log(prob_p) - np.log(prob_q)), axis=-1)
kl_actual = kullback_leibler.kl_divergence(p, q)
kl_same = kullback_leibler.kl_divergence(p, p)
x = p.sample(int(2e4), seed=0)
x = math_ops.cast(x, dtype=dtypes.float32)
# Compute empirical KL(p||q).
kl_sample = math_ops.reduce_mean(p.log_prob(x) - q.log_prob(x), 0)
[kl_sample_, kl_actual_, kl_same_] = sess.run([kl_sample, kl_actual,
kl_same])
self.assertEqual(kl_actual.get_shape(), (batch_size,))
self.assertAllClose(kl_same_, np.zeros_like(kl_expected))
self.assertAllClose(kl_actual_, kl_expected, atol=0., rtol=1e-6)
self.assertAllClose(kl_sample_, kl_expected, atol=1e-2, rtol=0.)
示例6: testCategoricalCategoricalKL
def testCategoricalCategoricalKL(self):
def np_softmax(logits):
exp_logits = np.exp(logits)
return exp_logits / exp_logits.sum(axis=-1, keepdims=True)
with self.cached_session() as sess:
for categories in [2, 4]:
for batch_size in [1, 10]:
a_logits = np.random.randn(batch_size, categories)
b_logits = np.random.randn(batch_size, categories)
a = categorical.Categorical(logits=a_logits)
b = categorical.Categorical(logits=b_logits)
kl = kullback_leibler.kl_divergence(a, b)
kl_val = sess.run(kl)
# Make sure KL(a||a) is 0
kl_same = sess.run(kullback_leibler.kl_divergence(a, a))
prob_a = np_softmax(a_logits)
prob_b = np_softmax(b_logits)
kl_expected = np.sum(prob_a * (np.log(prob_a) - np.log(prob_b)),
axis=-1)
self.assertEqual(kl.get_shape(), (batch_size,))
self.assertAllClose(kl_val, kl_expected)
self.assertAllClose(kl_same, np.zeros_like(kl_expected))
示例7: _kl_independent
def _kl_independent(a, b, name="kl_independent"):
"""Batched KL divergence `KL(a || b)` for Independent distributions.
We can leverage the fact that
```
KL(Independent(a) || Independent(b)) = sum(KL(a || b))
```
where the sum is over the `reinterpreted_batch_ndims`.
Args:
a: Instance of `Independent`.
b: Instance of `Independent`.
name: (optional) name to use for created ops. Default "kl_independent".
Returns:
Batchwise `KL(a || b)`.
Raises:
ValueError: If the event space for `a` and `b`, or their underlying
distributions don't match.
"""
p = a.distribution
q = b.distribution
# The KL between any two (non)-batched distributions is a scalar.
# Given that the KL between two factored distributions is the sum, i.e.
# KL(p1(x)p2(y) || q1(x)q2(y)) = KL(p1 || q1) + KL(q1 || q2), we compute
# KL(p || q) and do a `reduce_sum` on the reinterpreted batch dimensions.
if a.event_shape.is_fully_defined() and b.event_shape.is_fully_defined():
if a.event_shape == b.event_shape:
if p.event_shape == q.event_shape:
num_reduce_dims = a.event_shape.ndims - p.event_shape.ndims
reduce_dims = [-i - 1 for i in range(0, num_reduce_dims)]
return math_ops.reduce_sum(
kullback_leibler.kl_divergence(p, q, name=name), axis=reduce_dims)
else:
raise NotImplementedError("KL between Independents with different "
"event shapes not supported.")
else:
raise ValueError("Event shapes do not match.")
else:
with ops.control_dependencies([
check_ops.assert_equal(a.event_shape_tensor(), b.event_shape_tensor()),
check_ops.assert_equal(p.event_shape_tensor(), q.event_shape_tensor())
]):
num_reduce_dims = (
array_ops.shape(a.event_shape_tensor()[0]) -
array_ops.shape(p.event_shape_tensor()[0]))
reduce_dims = math_ops.range(-num_reduce_dims - 1, -1, 1)
return math_ops.reduce_sum(
kullback_leibler.kl_divergence(p, q, name=name), axis=reduce_dims)
示例8: test_kl_reverse_multidim
def test_kl_reverse_multidim(self):
with self.test_session() as sess:
d = 5 # Dimension
p = mvn_full_lib.MultivariateNormalFullCovariance(
covariance_matrix=self._tridiag(d, diag_value=1, offdiag_value=0.5))
q = mvn_diag_lib.MultivariateNormalDiag(scale_diag=[0.5]*d)
approx_kl = cd.monte_carlo_csiszar_f_divergence(
f=cd.kl_reverse,
p=p,
q=q,
num_draws=int(1e5),
seed=1)
approx_kl_self_normalized = cd.monte_carlo_csiszar_f_divergence(
f=lambda logu: cd.kl_reverse(logu, self_normalized=True),
p=p,
q=q,
num_draws=int(1e5),
seed=1)
exact_kl = kullback_leibler.kl_divergence(q, p)
[approx_kl_, approx_kl_self_normalized_, exact_kl_] = sess.run([
approx_kl, approx_kl_self_normalized, exact_kl])
self.assertAllClose(approx_kl_, exact_kl_,
rtol=0.02, atol=0.)
self.assertAllClose(approx_kl_self_normalized_, exact_kl_,
rtol=0.08, atol=0.)
示例9: testGammaGammaKL
def testGammaGammaKL(self):
alpha0 = np.array([3.])
beta0 = np.array([1., 2., 3., 1.5, 2.5, 3.5])
alpha1 = np.array([0.4])
beta1 = np.array([0.5, 1., 1.5, 2., 2.5, 3.])
# Build graph.
with self.test_session() as sess:
g0 = gamma_lib.Gamma(concentration=alpha0, rate=beta0)
g1 = gamma_lib.Gamma(concentration=alpha1, rate=beta1)
x = g0.sample(int(1e4), seed=0)
kl_sample = math_ops.reduce_mean(g0.log_prob(x) - g1.log_prob(x), 0)
kl_actual = kullback_leibler.kl_divergence(g0, g1)
# Execute graph.
[kl_sample_, kl_actual_] = sess.run([kl_sample, kl_actual])
kl_expected = ((alpha0 - alpha1) * special.digamma(alpha0)
+ special.gammaln(alpha1)
- special.gammaln(alpha0)
+ alpha1 * np.log(beta0)
- alpha1 * np.log(beta1)
+ alpha0 * (beta1 / beta0 - 1.))
self.assertEqual(beta0.shape, kl_actual.get_shape())
self.assertAllClose(kl_expected, kl_actual_, atol=0., rtol=1e-6)
self.assertAllClose(kl_sample_, kl_actual_, atol=0., rtol=1e-2)
示例10: test_kl_reverse
def test_kl_reverse(self):
with self.test_session() as sess:
q = normal_lib.Normal(
loc=np.ones(6),
scale=np.array([0.5, 1.0, 1.5, 2.0, 2.5, 3.0]))
p = normal_lib.Normal(loc=q.loc + 0.1, scale=q.scale - 0.2)
approx_kl = cd.monte_carlo_csiszar_f_divergence(
f=cd.kl_reverse,
p=p,
q=q,
num_draws=int(1e5),
seed=1)
approx_kl_self_normalized = cd.monte_carlo_csiszar_f_divergence(
f=lambda logu: cd.kl_reverse(logu, self_normalized=True),
p=p,
q=q,
num_draws=int(1e5),
seed=1)
exact_kl = kullback_leibler.kl_divergence(q, p)
[approx_kl_, approx_kl_self_normalized_, exact_kl_] = sess.run([
approx_kl, approx_kl_self_normalized, exact_kl])
self.assertAllClose(approx_kl_, exact_kl_,
rtol=0.07, atol=0.)
self.assertAllClose(approx_kl_self_normalized_, exact_kl_,
rtol=0.02, atol=0.)
示例11: test_convergence_to_kl_using_sample_form_on_3dim_normal
def test_convergence_to_kl_using_sample_form_on_3dim_normal(self):
# Test that the sample mean KL is the same as analytic when we use samples
# to estimate every part of the KL divergence ratio.
vector_shape = (2, 3)
n_samples = 5000
with self.test_session():
q = mvn_diag_lib.MultivariateNormalDiag(
loc=self._rng.rand(*vector_shape),
scale_diag=self._rng.rand(*vector_shape))
p = mvn_diag_lib.MultivariateNormalDiag(
loc=self._rng.rand(*vector_shape),
scale_diag=self._rng.rand(*vector_shape))
# In this case, the log_ratio is the KL.
sample_kl = -1 * entropy.elbo_ratio(
log_p=p.log_prob,
q=q,
n=n_samples,
form=entropy.ELBOForms.sample,
seed=42)
actual_kl = kullback_leibler_lib.kl_divergence(q, p)
# Relative tolerance (rtol) chosen 2 times as large as minimim needed to
# pass.
self.assertEqual((2,), sample_kl.get_shape())
self.assertAllClose(actual_kl.eval(), sample_kl.eval(), rtol=0.05)
示例12: testDefaultVariationalAndPrior
def testDefaultVariationalAndPrior(self):
_, prior, variational, _, log_likelihood = mini_vae()
elbo = vi.elbo(log_likelihood)
expected_elbo = log_likelihood - kullback_leibler.kl_divergence(
variational.distribution, prior)
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
self.assertAllEqual(*sess.run([expected_elbo, elbo]))
示例13: testKLScalarToMultivariate
def testKLScalarToMultivariate(self):
normal1 = normal_lib.Normal(
loc=np.float32([-1., 1]),
scale=np.float32([0.1, 0.5]))
ind1 = independent_lib.Independent(
distribution=normal1, reinterpreted_batch_ndims=1)
normal2 = normal_lib.Normal(
loc=np.float32([-3., 3]),
scale=np.float32([0.3, 0.3]))
ind2 = independent_lib.Independent(
distribution=normal2, reinterpreted_batch_ndims=1)
normal_kl = kullback_leibler.kl_divergence(normal1, normal2)
ind_kl = kullback_leibler.kl_divergence(ind1, ind2)
self.assertAllClose(
self.evaluate(math_ops.reduce_sum(normal_kl, axis=-1)),
self.evaluate(ind_kl))
示例14: testExplicitVariationalAndPrior
def testExplicitVariationalAndPrior(self):
with self.test_session() as sess:
_, _, variational, _, log_likelihood = mini_vae()
prior = normal.Normal(loc=3., scale=2.)
elbo = vi.elbo(
log_likelihood, variational_with_prior={variational: prior})
expected_elbo = log_likelihood - kullback_leibler.kl_divergence(
variational.distribution, prior)
sess.run(variables.global_variables_initializer())
self.assertAllEqual(*sess.run([expected_elbo, elbo]))
示例15: testRegistration
def testRegistration(self):
class MyDist(normal.Normal):
pass
# Register KL to a lambda that spits out the name parameter
@kullback_leibler.RegisterKL(MyDist, MyDist)
def _kl(a, b, name=None): # pylint: disable=unused-argument,unused-variable
return name
a = MyDist(loc=0.0, scale=1.0)
self.assertEqual("OK", kullback_leibler.kl_divergence(a, a, name="OK"))