当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.distributions方法代码示例

本文整理汇总了Python中tensorflow.distributions方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.distributions方法的具体用法?Python tensorflow.distributions怎么用?Python tensorflow.distributions使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.distributions方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: sample_categorical

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import distributions [as 别名]
def sample_categorical(p):
    # TODO change to tf.distributions once update tf version
    dist = tf.contrib.distributions.Categorical(probs=p)
    sample = dist.sample()
    return sample

###############
### Asserts ###
############### 
开发者ID:gkahn13,项目名称:GtS,代码行数:11,代码来源:tf_utils.py

示例2: __init__

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import distributions [as 别名]
def __init__(self, name, **kwargs):
        super().__init__(name, **kwargs)
        name = _get_method_by_alias(name, 'tf', tf.distributions)
        self.name = name
        self._params = copy(kwargs)
        self.graph = tf.Graph()
        with self.graph.as_default():
            config = tf.ConfigProto(device_count={'GPU':0})
            self.sess = tf.Session(config=config)
            _ = kwargs.pop('dim', None)
            self.sampler = getattr(tf.distributions, self.name)(**kwargs) 
开发者ID:analysiscenter,项目名称:batchflow,代码行数:13,代码来源:tf_sampler.py

示例3: elbo

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import distributions [as 别名]
def elbo(log_likelihood, KL, N):
    r"""Build the evidence lower bound (ELBO) loss for a neural net.

    Parameters
    ----------
    log_likelihood : Tensor
        the log-likelihood Tensor that takes neural network(s) and targets as
        an input. We recommend using a ``tf.distributions`` object's
        ``log_prob()`` method to obtain this tensor. The shape of this Tensor
        should be ``(n_samples, N, ...)``, where ``n_samples`` is the number of
        log-likelihood samples (defined by ab.InputLayer) and ``N`` is the
        number of observations (can be ``?`` if you are using a placeholder and
        mini-batching). These likelihoods can also be weighted to, for example,
        adjust for class imbalance etc. This weighting is left up to the user.
    KL : float, Tensor
        the Kullback Leibler divergence between the posterior and prior
        parameters of the model (:math:`\text{KL}[q\|p]`).
    N : int, Tensor
        the total size of the dataset (i.e. number of observations).

    Returns
    -------
    nelbo : Tensor
        the loss function of the Bayesian neural net (negative ELBO).

    Example
    -------
    This is how we would typically generate a likelihood for this objective,

    .. code-block:: python

        noise = ab.pos_variable(1.0)
        likelihood = tf.distributions.Normal(loc=NN, scale=noise)
        log_likelihood = likelihood.log_prob(Y)

    where ``NN`` is our neural network, and ``Y`` are our targets.

    Note
    ----
    The way ``tf.distributions.Bernoulli`` and ``tf.distributions.Categorical``
    are implemented are a little confusing... it is worth noting that you
    should use a target array, ``Y``, of shape ``(N, 1)`` of ints with the
    Bernoulli likelihood, and a target array of shape ``(N,)`` of ints with
    the Categorical likelihood.

    """
    # Batch amplification factor
    B = N / tf.to_float(tf.shape(log_likelihood)[1])

    # averaging over samples
    n_samples = tf.to_float(tf.shape(log_likelihood)[0])

    # Just mean over samps for expected log-likelihood
    ELL = tf.squeeze(tf.reduce_sum(log_likelihood, axis=[0, 1])) / n_samples

    # negative ELBO is batch weighted ELL and KL
    nELBO = - B * ELL + KL

    return nELBO 
开发者ID:gradientinstitute,项目名称:aboleth,代码行数:61,代码来源:losses.py

示例4: max_posterior

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import distributions [as 别名]
def max_posterior(log_likelihood, regulariser):
    r"""Build maximum a-posteriori (MAP) loss for a neural net.

    Parameters
    ----------
    log_likelihood : Tensor
        the log-likelihood Tensor that takes neural network(s) and targets as
        an input. We recommend using a ``tf.distributions`` object's
        ``log_prob()`` method to obtain this tensor.  The shape of this Tensor
        should be ``(n_samples, N, ...)``, where ``n_samples`` is the number of
        log-likelihood samples (defined by ab.InputLayer) and ``N`` is the
        number of observations (can be ``?`` if you are using a placeholder and
        mini-batching). These likelihoods can also be weighted to, for example,
        adjust for class imbalance etc. This weighting is left up to the user.
    regulariser : float, Tensor
        the regulariser on the parameters of the model to penalise model
        complexity.

    Returns
    -------
    map : Tensor
        the loss function of the MAP neural net.

    Example
    -------
    This is how we would typically generate a likelihood for this objective,

    .. code-block:: python

        noise = ab.pos_variable(1.0)
        likelihood = tf.distributions.Normal(loc=NN, scale=noise)
        log_likelihood = likelihood.log_prob(Y)

    where ``NN`` is our neural network, and ``Y`` are our targets.

    Note
    ----
    The way ``tf.distributions.Bernoulli`` and ``tf.distributions.Categorical``
    are implemented are a little confusing... it is worth noting that you
    should use a target array, ``Y``, of shape ``(N, 1)`` of ints with the
    Bernoulli likelihood, and a target array of shape ``(N,)`` of ints with
    the Categorical likelihood.

    """
    # Average likelihood for batch
    AVLL = tf.squeeze(tf.reduce_mean(log_likelihood, axis=[0, 1]))

    # MAP objective
    MAP = - AVLL + regulariser

    return MAP 
开发者ID:gradientinstitute,项目名称:aboleth,代码行数:53,代码来源:losses.py

示例5: train

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import distributions [as 别名]
def train(self):
        img, sym = self.read_data_sets()

        with tf.variable_scope("beta_VAE"):
            img_q_mu, img_q_sigma = self.img_encoder(img)
            img_z = distributions.Normal(img_q_mu, img_q_sigma)
            img_gen = self.img_decoder(img_z.sample(self.cfg.batch_size))

            img_reconstruct_error = tf.reduce_mean(img_gen)

            img_z_prior = distributions.Normal()
            KL_divergence = kl_divergence(img_z, img_z_prior)
            KL_divergence = self.cfg.beta_vae * KL_divergence

            loss = img_reconstruct_error - KL_divergence

        # train beta VAE
        optimizer = tf.train.AdamOptimizer(self.cfg.learning_rate)
        train_op = optimizer.minimize(loss)

        for step in range(self.cfg.epoch):
            self.sess.run(train_op)

        with tf.variable_scope("SCAN"):
            sym_q_mu, sym_q_sigma = self.sym_encoder(sym)
            sym_z = distributions.Normal(sym_q_mu, sym_q_sigma)
            self.sym_decoder(sym_z.sample(self.cfg.batch_size))

            sym_reconstruct_error = tf.reduce_mean()

            sym_z_prior = distributions.Normal()
            beta_KL_divergence = kl_divergence(sym_z, sym_z_prior)
            beta_KL_divergence = self.cfg.beta_scan * beta_KL_divergence

            lambda_KL_divergence = kl_divergence(img_z, sym_z)

            loss = sym_reconstruct_error - beta_KL_divergence
            loss -= self.cfg.lambda_scan * lambda_KL_divergence

        # train SCAN
        optimizer = tf.train.AdamOptimizer(self.cfg.learning_rate)
        train_op = optimizer.minimize(loss)

        for step in range(self.cfg.epoch):
            self.sess.run(train_op) 
开发者ID:naturomics,项目名称:SCAN-tensorflow,代码行数:47,代码来源:SCAN.py


注:本文中的tensorflow.distributions方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。