当前位置: 首页>>代码示例>>Python>>正文


Python v1.reduce_logsumexp方法代码示例

本文整理汇总了Python中tensorflow.compat.v1.reduce_logsumexp方法的典型用法代码示例。如果您正苦于以下问题:Python v1.reduce_logsumexp方法的具体用法?Python v1.reduce_logsumexp怎么用?Python v1.reduce_logsumexp使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.compat.v1的用法示例。


在下文中一共展示了v1.reduce_logsumexp方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: log_prob_from_logits

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import reduce_logsumexp [as 别名]
def log_prob_from_logits(logits, reduce_axis=-1):
  return logits - tf.reduce_logsumexp(logits, axis=reduce_axis, keepdims=True) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:4,代码来源:common_layers.py

示例2: call

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import reduce_logsumexp [as 别名]
def call(self, x, translations, blend_terms, points):
    """Construct object by assembling convex polytopes differentiably.

    Args:
      x: Tensor, [batch_size, n_parts, n_half_planes, dims], hyperplane
        parameters.
      translations: Tensor, [batch_size, n_parts, dims], translation vectors.
      blend_terms: Tensor, [batch_size, n_parts], smoothness terms for blending
        hyperplanes.
      points: Tensor, [batch_size, n_points, dims], query points.

    Returns:
      indicator: Tensor, [batch_size, n_points, 1], indicators for query points.
      extra: list, contains:
        trans: Tensor, [batch_size, n_parts, dims], translations.
        imgsum: Tensor, [batch_size, n_points, 1], sum of indicators.
        offset: Tensor, [batch_size, n_parts, n_half_planes, 1], offset of
        hyperplanes.
        image_indica: Tensor, [batch_Size, n_parts, n_points, 1], per part
        indicators.
    """
    points = tf.concat([points, translations], axis=1)
    signed_dis, transform, blend_planes, offset = self._compute_sdf(
        x, translations, blend_terms, points)

    # Generate convex shapes (use logsumexp as the intersection of half-spaces)
    part_logits = tf.reduce_logsumexp(
        signed_dis * tf.reshape(blend_planes, [-1, self._n_parts, 1, 1, 1]),
        axis=2)
    part_logits = (-part_logits /
                   tf.reshape(blend_planes, [-1, self._n_parts, 1, 1]))

    part_indica_full = tf.nn.sigmoid(part_logits * self._sharpness)
    part_indica = part_indica_full[:, :, :-self._n_parts]

    image_indica_sum = tf.reduce_sum(part_indica_full, axis=1)
    image_indica_max = tf.reduce_max(part_indica, axis=1)

    return image_indica_max, (transform, image_indica_sum, offset, part_indica) 
开发者ID:tensorflow,项目名称:graphics,代码行数:41,代码来源:models.py

示例3: _get_mdn_loss

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import reduce_logsumexp [as 别名]
def _get_mdn_loss(logmix, mean, logstd, y, batch_mask, dont_reduce_loss):
  """Computes MDN loss term for svg decoder model."""
  logsqrttwopi = np.log(np.sqrt(2.0 * np.pi))

  v = logmix + _tf_lognormal(y, mean, logstd, logsqrttwopi)
  v = tf.reduce_logsumexp(v, 1, keepdims=True)
  v = tf.reshape(v, [-1, 51, 1, 6])

  # mask out unimportant terms given the ground truth commands
  v = tf.multiply(v, batch_mask)
  if dont_reduce_loss:
    return -tf.reduce_mean(tf.reduce_sum(v, axis=3), [1, 2])
  return -tf.reduce_mean(tf.reduce_sum(v, axis=3)) 
开发者ID:magenta,项目名称:magenta,代码行数:15,代码来源:svg_decoder_loss.py

示例4: _get_mdn_coef

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import reduce_logsumexp [as 别名]
def _get_mdn_coef(output):
  logmix, mean, logstd = tf.split(output, 3, -1)
  logmix = logmix - tf.reduce_logsumexp(logmix, -1, keepdims=True)
  return logmix, mean, logstd 
开发者ID:magenta,项目名称:magenta,代码行数:6,代码来源:svg_decoder_loss.py

示例5: _log_prob_from_logits

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import reduce_logsumexp [as 别名]
def _log_prob_from_logits(logits):
  return logits - tf.reduce_logsumexp(logits, axis=2, keep_dims=True) 
开发者ID:google-research,项目名称:language,代码行数:4,代码来源:beam_search.py

示例6: _log_prob_from_logits

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import reduce_logsumexp [as 别名]
def _log_prob_from_logits(logits):
  return logits - tf.reduce_logsumexp(logits, axis=2, keepdims=True) 
开发者ID:tensorflow,项目名称:models,代码行数:4,代码来源:beam_search_v1.py

示例7: mixture_of_softmaxes

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import reduce_logsumexp [as 别名]
def mixture_of_softmaxes(x, k, e, to_logits):
  """A slower, but supposedly more flexible softmax.

  See "Breaking the Softmax Bottleneck: A High-Rank RNN Language Model"
  by Yang et al, 2017.

  Args:
    x: A 2d tensor of shape [b, *]. Typically the output of an RNN cell.
    k: The number of mixture components.
    e: The embedding size. Often the same as the second dimension of x.
    to_logits: A function that takes a [b*k, e] tensor as its argument and
        transforms it into shape [b*k, v] where v is the vocabulary size.

  Returns:
    A [b, v] tensor of log probabilities. Each element is computed from
    the mixture of the k components. The components share most of the
    parameters (i.e. those in to_logits), but they have a smaller number
    of non-shared parameters (those in the projections).
  """
  # TODO(melisgl): For training where the entire output distribution is not
  # needed, maybe sparse_softmax_cross_entropy_with_logits would be more
  # efficient.
  if True:  # pylint: disable=using-constant-test
    # This log-domain implementation seems preferrable, but it uses much more
    # memory for some reason.
    b = tf.shape(x)[0]
    p_b_ke = tf.tanh(linear(x, k*e, True, scope='projection'))
    p_bk_e = tf.reshape(p_b_ke, [b*k, e])
    log_mixture_weights_b_k = tf.nn.log_softmax(
        linear(x, k, False, scope='mos_weights'))
    log_mixture_weights_b_k_1 = tf.reshape(log_mixture_weights_b_k, [b, k, 1])
    logits_bk_v = to_logits(p_bk_e)
    logprobs_bk_v = tf.nn.log_softmax(logits_bk_v)
    logprobs_b_k_v = tf.reshape(logprobs_bk_v, [b, k, -1])
    logprobs_b_v = tf.reduce_logsumexp(
        logprobs_b_k_v + log_mixture_weights_b_k_1,
        axis=1)
    return logprobs_b_v
  else:
    # Alternatively, calculate with probabilities directly.
    b = tf.shape(x)[0]
    p_b_ke = tf.tanh(linear(x, k*e, True, scope='projection'))
    p_bk_e = tf.reshape(p_b_ke, [b*k, e])
    mixture_weights_b_k = tf.nn.softmax(
        linear(x, k, False, scope='mos_weights'))
    mixture_weights_b_k_1 = tf.reshape(mixture_weights_b_k, [b, k, 1])
    logits_bk_v = to_logits(p_bk_e)
    probs_bk_v = tf.nn.softmax(logits_bk_v)
    probs_b_k_v = tf.reshape(probs_bk_v, [b, k, -1])
    probs_b_v = tf.reduce_sum(
        probs_b_k_v * mixture_weights_b_k_1,
        axis=1)
    return tf.log(probs_b_v+1e-8) 
开发者ID:deepmind,项目名称:lamb,代码行数:55,代码来源:utils.py

示例8: compute_iw_marginal

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import reduce_logsumexp [as 别名]
def compute_iw_marginal(
      self, targets, targets_mask, decoder_self_attention_bias, features,
      n_samples, reduce_mean=True, **kwargs):
    hparams = self._hparams
    z_q, log_q_z, _ = self.sample_q(
        targets, targets_mask, decoder_self_attention_bias,
        n_samples=n_samples, temp=1.0, **kwargs)  # [K*B, L, C]
    iw_kwargs = {key: ops.prepare_for_iw(value, n_samples) for (
        key, value) in kwargs.items()}
    iw_targets_mask = ops.prepare_for_iw(targets_mask, n_samples)
    iw_decoder_self_attention_bias = (
        common_attention.attention_bias_ignore_padding(1.0 - iw_targets_mask))
    iw_features = copy.copy(features)
    iw_features["targets"] = ops.prepare_for_iw(
        features["targets"], n_samples)

    log_p_z_base, log_abs_det = self.compute_prior_log_prob(
        z_q, iw_targets_mask, iw_decoder_self_attention_bias,
        check_invertibility=False, **iw_kwargs)
    log_p_z = log_p_z_base + log_abs_det

    body_output = ops.decoder(
        "decoder", z_q, hparams, iw_decoder_self_attention_bias, **iw_kwargs)
    logits = self.top(body_output, iw_features)
    numerator, denominator = self.loss_iw(logits, iw_features)
    numerator = tf.reduce_sum(numerator[..., 0, 0], 1)  # [K*B]
    denominator = tf.reduce_sum(denominator[..., 0, 0], 1)  # [K*B]
    log_p_x = -1 * numerator / denominator
    log_q_z = gops.reduce_mean_over_l_sum_over_c(log_q_z, iw_targets_mask)
    log_p_z = log_p_z / tf.reduce_sum(iw_targets_mask, 1)

    log_p_x, log_q_z, log_p_z = [ops.unprepare_for_iw(ii, n_samples) for ii in [
        log_p_x, log_q_z, log_p_z]]

    log_w_n = log_p_z - log_q_z
    log_w_n = tf.nn.log_softmax(log_w_n, axis=0)  # [K, B]

    iw_marginal = log_p_x + log_w_n
    iw_marginal = tf.reduce_logsumexp(iw_marginal, 0)  # [B]

    if reduce_mean:
      iw_marginal = tf.cast(tf.reduce_mean(iw_marginal, 0), tf.float32)  # [1]
    else:
      iw_marginal = tf.cast(iw_marginal, tf.float32)  # [1]
    return iw_marginal 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:47,代码来源:transformer_vae_flow_prior.py


注:本文中的tensorflow.compat.v1.reduce_logsumexp方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。