本文整理汇总了Python中tensorflow.distributions方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.distributions方法的具体用法?Python tensorflow.distributions怎么用?Python tensorflow.distributions使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.distributions方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: sample_categorical
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import distributions [as 别名]
def sample_categorical(p):
# TODO change to tf.distributions once update tf version
dist = tf.contrib.distributions.Categorical(probs=p)
sample = dist.sample()
return sample
###############
### Asserts ###
###############
示例2: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import distributions [as 别名]
def __init__(self, name, **kwargs):
super().__init__(name, **kwargs)
name = _get_method_by_alias(name, 'tf', tf.distributions)
self.name = name
self._params = copy(kwargs)
self.graph = tf.Graph()
with self.graph.as_default():
config = tf.ConfigProto(device_count={'GPU':0})
self.sess = tf.Session(config=config)
_ = kwargs.pop('dim', None)
self.sampler = getattr(tf.distributions, self.name)(**kwargs)
示例3: elbo
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import distributions [as 别名]
def elbo(log_likelihood, KL, N):
r"""Build the evidence lower bound (ELBO) loss for a neural net.
Parameters
----------
log_likelihood : Tensor
the log-likelihood Tensor that takes neural network(s) and targets as
an input. We recommend using a ``tf.distributions`` object's
``log_prob()`` method to obtain this tensor. The shape of this Tensor
should be ``(n_samples, N, ...)``, where ``n_samples`` is the number of
log-likelihood samples (defined by ab.InputLayer) and ``N`` is the
number of observations (can be ``?`` if you are using a placeholder and
mini-batching). These likelihoods can also be weighted to, for example,
adjust for class imbalance etc. This weighting is left up to the user.
KL : float, Tensor
the Kullback Leibler divergence between the posterior and prior
parameters of the model (:math:`\text{KL}[q\|p]`).
N : int, Tensor
the total size of the dataset (i.e. number of observations).
Returns
-------
nelbo : Tensor
the loss function of the Bayesian neural net (negative ELBO).
Example
-------
This is how we would typically generate a likelihood for this objective,
.. code-block:: python
noise = ab.pos_variable(1.0)
likelihood = tf.distributions.Normal(loc=NN, scale=noise)
log_likelihood = likelihood.log_prob(Y)
where ``NN`` is our neural network, and ``Y`` are our targets.
Note
----
The way ``tf.distributions.Bernoulli`` and ``tf.distributions.Categorical``
are implemented are a little confusing... it is worth noting that you
should use a target array, ``Y``, of shape ``(N, 1)`` of ints with the
Bernoulli likelihood, and a target array of shape ``(N,)`` of ints with
the Categorical likelihood.
"""
# Batch amplification factor
B = N / tf.to_float(tf.shape(log_likelihood)[1])
# averaging over samples
n_samples = tf.to_float(tf.shape(log_likelihood)[0])
# Just mean over samps for expected log-likelihood
ELL = tf.squeeze(tf.reduce_sum(log_likelihood, axis=[0, 1])) / n_samples
# negative ELBO is batch weighted ELL and KL
nELBO = - B * ELL + KL
return nELBO
示例4: max_posterior
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import distributions [as 别名]
def max_posterior(log_likelihood, regulariser):
r"""Build maximum a-posteriori (MAP) loss for a neural net.
Parameters
----------
log_likelihood : Tensor
the log-likelihood Tensor that takes neural network(s) and targets as
an input. We recommend using a ``tf.distributions`` object's
``log_prob()`` method to obtain this tensor. The shape of this Tensor
should be ``(n_samples, N, ...)``, where ``n_samples`` is the number of
log-likelihood samples (defined by ab.InputLayer) and ``N`` is the
number of observations (can be ``?`` if you are using a placeholder and
mini-batching). These likelihoods can also be weighted to, for example,
adjust for class imbalance etc. This weighting is left up to the user.
regulariser : float, Tensor
the regulariser on the parameters of the model to penalise model
complexity.
Returns
-------
map : Tensor
the loss function of the MAP neural net.
Example
-------
This is how we would typically generate a likelihood for this objective,
.. code-block:: python
noise = ab.pos_variable(1.0)
likelihood = tf.distributions.Normal(loc=NN, scale=noise)
log_likelihood = likelihood.log_prob(Y)
where ``NN`` is our neural network, and ``Y`` are our targets.
Note
----
The way ``tf.distributions.Bernoulli`` and ``tf.distributions.Categorical``
are implemented are a little confusing... it is worth noting that you
should use a target array, ``Y``, of shape ``(N, 1)`` of ints with the
Bernoulli likelihood, and a target array of shape ``(N,)`` of ints with
the Categorical likelihood.
"""
# Average likelihood for batch
AVLL = tf.squeeze(tf.reduce_mean(log_likelihood, axis=[0, 1]))
# MAP objective
MAP = - AVLL + regulariser
return MAP
示例5: train
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import distributions [as 别名]
def train(self):
img, sym = self.read_data_sets()
with tf.variable_scope("beta_VAE"):
img_q_mu, img_q_sigma = self.img_encoder(img)
img_z = distributions.Normal(img_q_mu, img_q_sigma)
img_gen = self.img_decoder(img_z.sample(self.cfg.batch_size))
img_reconstruct_error = tf.reduce_mean(img_gen)
img_z_prior = distributions.Normal()
KL_divergence = kl_divergence(img_z, img_z_prior)
KL_divergence = self.cfg.beta_vae * KL_divergence
loss = img_reconstruct_error - KL_divergence
# train beta VAE
optimizer = tf.train.AdamOptimizer(self.cfg.learning_rate)
train_op = optimizer.minimize(loss)
for step in range(self.cfg.epoch):
self.sess.run(train_op)
with tf.variable_scope("SCAN"):
sym_q_mu, sym_q_sigma = self.sym_encoder(sym)
sym_z = distributions.Normal(sym_q_mu, sym_q_sigma)
self.sym_decoder(sym_z.sample(self.cfg.batch_size))
sym_reconstruct_error = tf.reduce_mean()
sym_z_prior = distributions.Normal()
beta_KL_divergence = kl_divergence(sym_z, sym_z_prior)
beta_KL_divergence = self.cfg.beta_scan * beta_KL_divergence
lambda_KL_divergence = kl_divergence(img_z, sym_z)
loss = sym_reconstruct_error - beta_KL_divergence
loss -= self.cfg.lambda_scan * lambda_KL_divergence
# train SCAN
optimizer = tf.train.AdamOptimizer(self.cfg.learning_rate)
train_op = optimizer.minimize(loss)
for step in range(self.cfg.epoch):
self.sess.run(train_op)