當前位置: 首頁>>代碼示例>>Python>>正文


Python v1.reduce_logsumexp方法代碼示例

本文整理匯總了Python中tensorflow.compat.v1.reduce_logsumexp方法的典型用法代碼示例。如果您正苦於以下問題:Python v1.reduce_logsumexp方法的具體用法?Python v1.reduce_logsumexp怎麽用?Python v1.reduce_logsumexp使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.compat.v1的用法示例。


在下文中一共展示了v1.reduce_logsumexp方法的8個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: log_prob_from_logits

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import reduce_logsumexp [as 別名]
def log_prob_from_logits(logits, reduce_axis=-1):
  return logits - tf.reduce_logsumexp(logits, axis=reduce_axis, keepdims=True) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:4,代碼來源:common_layers.py

示例2: call

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import reduce_logsumexp [as 別名]
def call(self, x, translations, blend_terms, points):
    """Construct object by assembling convex polytopes differentiably.

    Args:
      x: Tensor, [batch_size, n_parts, n_half_planes, dims], hyperplane
        parameters.
      translations: Tensor, [batch_size, n_parts, dims], translation vectors.
      blend_terms: Tensor, [batch_size, n_parts], smoothness terms for blending
        hyperplanes.
      points: Tensor, [batch_size, n_points, dims], query points.

    Returns:
      indicator: Tensor, [batch_size, n_points, 1], indicators for query points.
      extra: list, contains:
        trans: Tensor, [batch_size, n_parts, dims], translations.
        imgsum: Tensor, [batch_size, n_points, 1], sum of indicators.
        offset: Tensor, [batch_size, n_parts, n_half_planes, 1], offset of
        hyperplanes.
        image_indica: Tensor, [batch_Size, n_parts, n_points, 1], per part
        indicators.
    """
    points = tf.concat([points, translations], axis=1)
    signed_dis, transform, blend_planes, offset = self._compute_sdf(
        x, translations, blend_terms, points)

    # Generate convex shapes (use logsumexp as the intersection of half-spaces)
    part_logits = tf.reduce_logsumexp(
        signed_dis * tf.reshape(blend_planes, [-1, self._n_parts, 1, 1, 1]),
        axis=2)
    part_logits = (-part_logits /
                   tf.reshape(blend_planes, [-1, self._n_parts, 1, 1]))

    part_indica_full = tf.nn.sigmoid(part_logits * self._sharpness)
    part_indica = part_indica_full[:, :, :-self._n_parts]

    image_indica_sum = tf.reduce_sum(part_indica_full, axis=1)
    image_indica_max = tf.reduce_max(part_indica, axis=1)

    return image_indica_max, (transform, image_indica_sum, offset, part_indica) 
開發者ID:tensorflow,項目名稱:graphics,代碼行數:41,代碼來源:models.py

示例3: _get_mdn_loss

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import reduce_logsumexp [as 別名]
def _get_mdn_loss(logmix, mean, logstd, y, batch_mask, dont_reduce_loss):
  """Computes MDN loss term for svg decoder model."""
  logsqrttwopi = np.log(np.sqrt(2.0 * np.pi))

  v = logmix + _tf_lognormal(y, mean, logstd, logsqrttwopi)
  v = tf.reduce_logsumexp(v, 1, keepdims=True)
  v = tf.reshape(v, [-1, 51, 1, 6])

  # mask out unimportant terms given the ground truth commands
  v = tf.multiply(v, batch_mask)
  if dont_reduce_loss:
    return -tf.reduce_mean(tf.reduce_sum(v, axis=3), [1, 2])
  return -tf.reduce_mean(tf.reduce_sum(v, axis=3)) 
開發者ID:magenta,項目名稱:magenta,代碼行數:15,代碼來源:svg_decoder_loss.py

示例4: _get_mdn_coef

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import reduce_logsumexp [as 別名]
def _get_mdn_coef(output):
  logmix, mean, logstd = tf.split(output, 3, -1)
  logmix = logmix - tf.reduce_logsumexp(logmix, -1, keepdims=True)
  return logmix, mean, logstd 
開發者ID:magenta,項目名稱:magenta,代碼行數:6,代碼來源:svg_decoder_loss.py

示例5: _log_prob_from_logits

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import reduce_logsumexp [as 別名]
def _log_prob_from_logits(logits):
  return logits - tf.reduce_logsumexp(logits, axis=2, keep_dims=True) 
開發者ID:google-research,項目名稱:language,代碼行數:4,代碼來源:beam_search.py

示例6: _log_prob_from_logits

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import reduce_logsumexp [as 別名]
def _log_prob_from_logits(logits):
  return logits - tf.reduce_logsumexp(logits, axis=2, keepdims=True) 
開發者ID:tensorflow,項目名稱:models,代碼行數:4,代碼來源:beam_search_v1.py

示例7: mixture_of_softmaxes

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import reduce_logsumexp [as 別名]
def mixture_of_softmaxes(x, k, e, to_logits):
  """A slower, but supposedly more flexible softmax.

  See "Breaking the Softmax Bottleneck: A High-Rank RNN Language Model"
  by Yang et al, 2017.

  Args:
    x: A 2d tensor of shape [b, *]. Typically the output of an RNN cell.
    k: The number of mixture components.
    e: The embedding size. Often the same as the second dimension of x.
    to_logits: A function that takes a [b*k, e] tensor as its argument and
        transforms it into shape [b*k, v] where v is the vocabulary size.

  Returns:
    A [b, v] tensor of log probabilities. Each element is computed from
    the mixture of the k components. The components share most of the
    parameters (i.e. those in to_logits), but they have a smaller number
    of non-shared parameters (those in the projections).
  """
  # TODO(melisgl): For training where the entire output distribution is not
  # needed, maybe sparse_softmax_cross_entropy_with_logits would be more
  # efficient.
  if True:  # pylint: disable=using-constant-test
    # This log-domain implementation seems preferrable, but it uses much more
    # memory for some reason.
    b = tf.shape(x)[0]
    p_b_ke = tf.tanh(linear(x, k*e, True, scope='projection'))
    p_bk_e = tf.reshape(p_b_ke, [b*k, e])
    log_mixture_weights_b_k = tf.nn.log_softmax(
        linear(x, k, False, scope='mos_weights'))
    log_mixture_weights_b_k_1 = tf.reshape(log_mixture_weights_b_k, [b, k, 1])
    logits_bk_v = to_logits(p_bk_e)
    logprobs_bk_v = tf.nn.log_softmax(logits_bk_v)
    logprobs_b_k_v = tf.reshape(logprobs_bk_v, [b, k, -1])
    logprobs_b_v = tf.reduce_logsumexp(
        logprobs_b_k_v + log_mixture_weights_b_k_1,
        axis=1)
    return logprobs_b_v
  else:
    # Alternatively, calculate with probabilities directly.
    b = tf.shape(x)[0]
    p_b_ke = tf.tanh(linear(x, k*e, True, scope='projection'))
    p_bk_e = tf.reshape(p_b_ke, [b*k, e])
    mixture_weights_b_k = tf.nn.softmax(
        linear(x, k, False, scope='mos_weights'))
    mixture_weights_b_k_1 = tf.reshape(mixture_weights_b_k, [b, k, 1])
    logits_bk_v = to_logits(p_bk_e)
    probs_bk_v = tf.nn.softmax(logits_bk_v)
    probs_b_k_v = tf.reshape(probs_bk_v, [b, k, -1])
    probs_b_v = tf.reduce_sum(
        probs_b_k_v * mixture_weights_b_k_1,
        axis=1)
    return tf.log(probs_b_v+1e-8) 
開發者ID:deepmind,項目名稱:lamb,代碼行數:55,代碼來源:utils.py

示例8: compute_iw_marginal

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import reduce_logsumexp [as 別名]
def compute_iw_marginal(
      self, targets, targets_mask, decoder_self_attention_bias, features,
      n_samples, reduce_mean=True, **kwargs):
    hparams = self._hparams
    z_q, log_q_z, _ = self.sample_q(
        targets, targets_mask, decoder_self_attention_bias,
        n_samples=n_samples, temp=1.0, **kwargs)  # [K*B, L, C]
    iw_kwargs = {key: ops.prepare_for_iw(value, n_samples) for (
        key, value) in kwargs.items()}
    iw_targets_mask = ops.prepare_for_iw(targets_mask, n_samples)
    iw_decoder_self_attention_bias = (
        common_attention.attention_bias_ignore_padding(1.0 - iw_targets_mask))
    iw_features = copy.copy(features)
    iw_features["targets"] = ops.prepare_for_iw(
        features["targets"], n_samples)

    log_p_z_base, log_abs_det = self.compute_prior_log_prob(
        z_q, iw_targets_mask, iw_decoder_self_attention_bias,
        check_invertibility=False, **iw_kwargs)
    log_p_z = log_p_z_base + log_abs_det

    body_output = ops.decoder(
        "decoder", z_q, hparams, iw_decoder_self_attention_bias, **iw_kwargs)
    logits = self.top(body_output, iw_features)
    numerator, denominator = self.loss_iw(logits, iw_features)
    numerator = tf.reduce_sum(numerator[..., 0, 0], 1)  # [K*B]
    denominator = tf.reduce_sum(denominator[..., 0, 0], 1)  # [K*B]
    log_p_x = -1 * numerator / denominator
    log_q_z = gops.reduce_mean_over_l_sum_over_c(log_q_z, iw_targets_mask)
    log_p_z = log_p_z / tf.reduce_sum(iw_targets_mask, 1)

    log_p_x, log_q_z, log_p_z = [ops.unprepare_for_iw(ii, n_samples) for ii in [
        log_p_x, log_q_z, log_p_z]]

    log_w_n = log_p_z - log_q_z
    log_w_n = tf.nn.log_softmax(log_w_n, axis=0)  # [K, B]

    iw_marginal = log_p_x + log_w_n
    iw_marginal = tf.reduce_logsumexp(iw_marginal, 0)  # [B]

    if reduce_mean:
      iw_marginal = tf.cast(tf.reduce_mean(iw_marginal, 0), tf.float32)  # [1]
    else:
      iw_marginal = tf.cast(iw_marginal, tf.float32)  # [1]
    return iw_marginal 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:47,代碼來源:transformer_vae_flow_prior.py


注:本文中的tensorflow.compat.v1.reduce_logsumexp方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。