當前位置: 首頁>>代碼示例>>Python>>正文


Python tensorflow.distributions方法代碼示例

本文整理匯總了Python中tensorflow.distributions方法的典型用法代碼示例。如果您正苦於以下問題:Python tensorflow.distributions方法的具體用法?Python tensorflow.distributions怎麽用?Python tensorflow.distributions使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.distributions方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: sample_categorical

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import distributions [as 別名]
def sample_categorical(p):
    # TODO change to tf.distributions once update tf version
    dist = tf.contrib.distributions.Categorical(probs=p)
    sample = dist.sample()
    return sample

###############
### Asserts ###
############### 
開發者ID:gkahn13,項目名稱:GtS,代碼行數:11,代碼來源:tf_utils.py

示例2: __init__

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import distributions [as 別名]
def __init__(self, name, **kwargs):
        super().__init__(name, **kwargs)
        name = _get_method_by_alias(name, 'tf', tf.distributions)
        self.name = name
        self._params = copy(kwargs)
        self.graph = tf.Graph()
        with self.graph.as_default():
            config = tf.ConfigProto(device_count={'GPU':0})
            self.sess = tf.Session(config=config)
            _ = kwargs.pop('dim', None)
            self.sampler = getattr(tf.distributions, self.name)(**kwargs) 
開發者ID:analysiscenter,項目名稱:batchflow,代碼行數:13,代碼來源:tf_sampler.py

示例3: elbo

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import distributions [as 別名]
def elbo(log_likelihood, KL, N):
    r"""Build the evidence lower bound (ELBO) loss for a neural net.

    Parameters
    ----------
    log_likelihood : Tensor
        the log-likelihood Tensor that takes neural network(s) and targets as
        an input. We recommend using a ``tf.distributions`` object's
        ``log_prob()`` method to obtain this tensor. The shape of this Tensor
        should be ``(n_samples, N, ...)``, where ``n_samples`` is the number of
        log-likelihood samples (defined by ab.InputLayer) and ``N`` is the
        number of observations (can be ``?`` if you are using a placeholder and
        mini-batching). These likelihoods can also be weighted to, for example,
        adjust for class imbalance etc. This weighting is left up to the user.
    KL : float, Tensor
        the Kullback Leibler divergence between the posterior and prior
        parameters of the model (:math:`\text{KL}[q\|p]`).
    N : int, Tensor
        the total size of the dataset (i.e. number of observations).

    Returns
    -------
    nelbo : Tensor
        the loss function of the Bayesian neural net (negative ELBO).

    Example
    -------
    This is how we would typically generate a likelihood for this objective,

    .. code-block:: python

        noise = ab.pos_variable(1.0)
        likelihood = tf.distributions.Normal(loc=NN, scale=noise)
        log_likelihood = likelihood.log_prob(Y)

    where ``NN`` is our neural network, and ``Y`` are our targets.

    Note
    ----
    The way ``tf.distributions.Bernoulli`` and ``tf.distributions.Categorical``
    are implemented are a little confusing... it is worth noting that you
    should use a target array, ``Y``, of shape ``(N, 1)`` of ints with the
    Bernoulli likelihood, and a target array of shape ``(N,)`` of ints with
    the Categorical likelihood.

    """
    # Batch amplification factor
    B = N / tf.to_float(tf.shape(log_likelihood)[1])

    # averaging over samples
    n_samples = tf.to_float(tf.shape(log_likelihood)[0])

    # Just mean over samps for expected log-likelihood
    ELL = tf.squeeze(tf.reduce_sum(log_likelihood, axis=[0, 1])) / n_samples

    # negative ELBO is batch weighted ELL and KL
    nELBO = - B * ELL + KL

    return nELBO 
開發者ID:gradientinstitute,項目名稱:aboleth,代碼行數:61,代碼來源:losses.py

示例4: max_posterior

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import distributions [as 別名]
def max_posterior(log_likelihood, regulariser):
    r"""Build maximum a-posteriori (MAP) loss for a neural net.

    Parameters
    ----------
    log_likelihood : Tensor
        the log-likelihood Tensor that takes neural network(s) and targets as
        an input. We recommend using a ``tf.distributions`` object's
        ``log_prob()`` method to obtain this tensor.  The shape of this Tensor
        should be ``(n_samples, N, ...)``, where ``n_samples`` is the number of
        log-likelihood samples (defined by ab.InputLayer) and ``N`` is the
        number of observations (can be ``?`` if you are using a placeholder and
        mini-batching). These likelihoods can also be weighted to, for example,
        adjust for class imbalance etc. This weighting is left up to the user.
    regulariser : float, Tensor
        the regulariser on the parameters of the model to penalise model
        complexity.

    Returns
    -------
    map : Tensor
        the loss function of the MAP neural net.

    Example
    -------
    This is how we would typically generate a likelihood for this objective,

    .. code-block:: python

        noise = ab.pos_variable(1.0)
        likelihood = tf.distributions.Normal(loc=NN, scale=noise)
        log_likelihood = likelihood.log_prob(Y)

    where ``NN`` is our neural network, and ``Y`` are our targets.

    Note
    ----
    The way ``tf.distributions.Bernoulli`` and ``tf.distributions.Categorical``
    are implemented are a little confusing... it is worth noting that you
    should use a target array, ``Y``, of shape ``(N, 1)`` of ints with the
    Bernoulli likelihood, and a target array of shape ``(N,)`` of ints with
    the Categorical likelihood.

    """
    # Average likelihood for batch
    AVLL = tf.squeeze(tf.reduce_mean(log_likelihood, axis=[0, 1]))

    # MAP objective
    MAP = - AVLL + regulariser

    return MAP 
開發者ID:gradientinstitute,項目名稱:aboleth,代碼行數:53,代碼來源:losses.py

示例5: train

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import distributions [as 別名]
def train(self):
        img, sym = self.read_data_sets()

        with tf.variable_scope("beta_VAE"):
            img_q_mu, img_q_sigma = self.img_encoder(img)
            img_z = distributions.Normal(img_q_mu, img_q_sigma)
            img_gen = self.img_decoder(img_z.sample(self.cfg.batch_size))

            img_reconstruct_error = tf.reduce_mean(img_gen)

            img_z_prior = distributions.Normal()
            KL_divergence = kl_divergence(img_z, img_z_prior)
            KL_divergence = self.cfg.beta_vae * KL_divergence

            loss = img_reconstruct_error - KL_divergence

        # train beta VAE
        optimizer = tf.train.AdamOptimizer(self.cfg.learning_rate)
        train_op = optimizer.minimize(loss)

        for step in range(self.cfg.epoch):
            self.sess.run(train_op)

        with tf.variable_scope("SCAN"):
            sym_q_mu, sym_q_sigma = self.sym_encoder(sym)
            sym_z = distributions.Normal(sym_q_mu, sym_q_sigma)
            self.sym_decoder(sym_z.sample(self.cfg.batch_size))

            sym_reconstruct_error = tf.reduce_mean()

            sym_z_prior = distributions.Normal()
            beta_KL_divergence = kl_divergence(sym_z, sym_z_prior)
            beta_KL_divergence = self.cfg.beta_scan * beta_KL_divergence

            lambda_KL_divergence = kl_divergence(img_z, sym_z)

            loss = sym_reconstruct_error - beta_KL_divergence
            loss -= self.cfg.lambda_scan * lambda_KL_divergence

        # train SCAN
        optimizer = tf.train.AdamOptimizer(self.cfg.learning_rate)
        train_op = optimizer.minimize(loss)

        for step in range(self.cfg.epoch):
            self.sess.run(train_op) 
開發者ID:naturomics,項目名稱:SCAN-tensorflow,代碼行數:47,代碼來源:SCAN.py


注:本文中的tensorflow.distributions方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。