当前位置: 首页>>代码示例>>Python>>正文


Python v1.exp方法代码示例

本文整理汇总了Python中tensorflow.compat.v1.exp方法的典型用法代码示例。如果您正苦于以下问题:Python v1.exp方法的具体用法?Python v1.exp怎么用?Python v1.exp使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.compat.v1的用法示例。


在下文中一共展示了v1.exp方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _learning_rate_warmup

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import exp [as 别名]
def _learning_rate_warmup(warmup_steps, warmup_schedule="exp", hparams=None):
  """Learning rate warmup multiplier."""
  if not warmup_steps:
    return tf.constant(1.)

  tf.logging.info("Applying %s learning rate warmup for %d steps",
                  warmup_schedule, warmup_steps)

  warmup_steps = tf.to_float(warmup_steps)
  global_step = _global_step(hparams)

  if warmup_schedule == "exp":
    return tf.exp(tf.log(0.01) / warmup_steps)**(warmup_steps - global_step)
  else:
    assert warmup_schedule == "linear"
    start = tf.constant(0.35)
    return ((tf.constant(1.) - start) / warmup_steps) * global_step + start 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:19,代码来源:learning_rate.py

示例2: single_conv_dist

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import exp [as 别名]
def single_conv_dist(name, x, output_channels=None):
  """A 3x3 convolution mapping x to a standard normal distribution at init.

  Args:
    name: variable scope.
    x: 4-D Tensor.
    output_channels: number of channels of the mean and std.
  """
  with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
    x_shape = common_layers.shape_list(x)
    if output_channels is None:
      output_channels = x_shape[-1]
    mean_log_scale = conv("conv2d", x, output_channels=2*output_channels,
                          conv_init="zeros", apply_actnorm=False)
    mean = mean_log_scale[:, :, :, 0::2]
    log_scale = mean_log_scale[:, :, :, 1::2]
    return tf.distributions.Normal(mean, tf.exp(log_scale)) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:19,代码来源:glow_ops.py

示例3: scale_gaussian_prior

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import exp [as 别名]
def scale_gaussian_prior(name, z, logscale_factor=3.0, trainable=True):
  """Returns N(s^i * z^i, std^i) where s^i and std^i are pre-component.

  s^i is a learnable parameter with identity initialization.
  std^i is optionally learnable with identity initialization.

  Args:
    name: variable scope.
    z: input_tensor
    logscale_factor: equivalent to scaling up the learning_rate by a factor
                     of logscale_factor.
    trainable: Whether or not std^i is learnt.
  """
  with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
    z_shape = common_layers.shape_list(z)
    latent_multiplier = tf.get_variable(
        "latent_multiplier", shape=z_shape, dtype=tf.float32,
        initializer=tf.ones_initializer())
    log_scale = tf.get_variable(
        "log_scale_latent", shape=z_shape, dtype=tf.float32,
        initializer=tf.zeros_initializer(), trainable=trainable)
    log_scale = log_scale * logscale_factor
    return tfp.distributions.Normal(
        loc=latent_multiplier * z, scale=tf.exp(log_scale)) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:26,代码来源:glow_ops.py

示例4: bottleneck

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import exp [as 别名]
def bottleneck(self, x):  # pylint: disable=arguments-differ
    hparams = self.hparams
    if hparams.unordered:
      return super(AutoencoderOrderedDiscrete, self).bottleneck(x)
    noise = hparams.bottleneck_noise
    hparams.bottleneck_noise = 0.0  # We'll add noise below.
    x, loss = discretization.parametrized_bottleneck(x, hparams)
    hparams.bottleneck_noise = noise
    if hparams.mode == tf.estimator.ModeKeys.TRAIN:
      # We want a number p such that p^bottleneck_bits = 1 - noise.
      # So log(p) * bottleneck_bits = log(noise)
      log_p = tf.log1p(-float(noise) / 2) / float(hparams.bottleneck_bits)
      # Probabilities of flipping are p, p^2, p^3, ..., p^bottleneck_bits.
      noise_mask = 1.0 - tf.exp(tf.cumsum(tf.zeros_like(x) + log_p, axis=-1))
      # Having the no-noise mask, we can make noise just uniformly at random.
      ordered_noise = tf.random_uniform(tf.shape(x))
      # We want our noise to be 1s at the start and random {-1, 1} bits later.
      ordered_noise = tf.to_float(tf.less(noise_mask, ordered_noise))
      # Now we flip the bits of x on the noisy positions (ordered and normal).
      x *= 2.0 * ordered_noise - 1
    return x, loss 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:23,代码来源:autoencoders.py

示例5: actnorm

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import exp [as 别名]
def actnorm(name, x, x_mask, inverse, init, logscale_factor=3.0):
  """Activation normalization, returns logabsdet of shape [B]."""
  eps = tf.keras.backend.epsilon()
  n_channels = common_layers.shape_list(x)[2]

  with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
    x_mean, x_var = gops.moments_over_bl(x, x_mask)
    b = gops.get_variable_ddi(
        "b", (n_channels), -x_mean, init, tf.zeros_initializer)
    log_w_init = -0.5 * tf.log(x_var + eps) / logscale_factor
    log_w = gops.get_variable_ddi(
        "log_w", (n_channels), log_w_init, init,
        tf.zeros_initializer) * logscale_factor

    if not inverse:
      x = (x + b) * tf.exp(log_w)
    else:
      x = x * tf.exp(-log_w) - b

    x_length = tf.reduce_sum(x_mask, -1)
    logabsdet = x_length * tf.reduce_sum(log_w)
    if inverse:
      logabsdet *= -1
    return x, logabsdet 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:26,代码来源:transformer_glow_layers.py

示例6: kl_divergence

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import exp [as 别名]
def kl_divergence(mu, log_var, mu_p=0.0, log_var_p=0.0):
  """KL divergence of diagonal gaussian N(mu,exp(log_var)) and N(0,1).

  Args:
    mu: mu parameter of the distribution.
    log_var: log(var) parameter of the distribution.
    mu_p: optional mu from a learned prior distribution
    log_var_p: optional log(var) from a learned prior distribution
  Returns:
    the KL loss.
  """

  batch_size = shape_list(mu)[0]
  prior_distribution = tfp.distributions.Normal(
      mu_p, tf.exp(tf.multiply(0.5, log_var_p)))
  posterior_distribution = tfp.distributions.Normal(
      mu, tf.exp(tf.multiply(0.5, log_var)))
  kld = tfp.distributions.kl_divergence(posterior_distribution,
                                        prior_distribution)
  return tf.reduce_sum(kld) / to_float(batch_size) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:22,代码来源:common_layers.py

示例7: vae

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import exp [as 别名]
def vae(x, z_size, name=None):
  """Simple variational autoencoder without discretization.

  Args:
    x: Input to the discretization bottleneck.
    z_size: Number of bits, where discrete codes range from 1 to 2**z_size.
    name: Name for the bottleneck scope.

  Returns:
    Embedding function, latent, loss, mu and log_simga.
  """
  with tf.variable_scope(name, default_name="vae"):
    mu = tf.layers.dense(x, z_size, name="mu")
    log_sigma = tf.layers.dense(x, z_size, name="log_sigma")
    shape = common_layers.shape_list(x)
    epsilon = tf.random_normal([shape[0], shape[1], 1, z_size])
    z = mu + tf.exp(log_sigma / 2) * epsilon
    kl = 0.5 * tf.reduce_mean(
        tf.expm1(log_sigma) + tf.square(mu) - log_sigma, axis=-1)
    free_bits = z_size // 4
    kl_loss = tf.reduce_mean(tf.maximum(kl - free_bits, 0.0))
    return z, kl_loss, mu, log_sigma 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:24,代码来源:discretization.py

示例8: call

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import exp [as 别名]
def call(self, x, training=False):
    """Forward method.

    Args:
      x: `[batch, in_grid_res, in_grid_res, in_grid_res, in_features]` tensor,
        input voxel grid.
      training: bool, flag indicating whether model is in training mode.

    Returns:
      `[batch, codelen]` tensor, output voxel grid.
    """
    x = self.conv_in(x)
    x = tf.nn.relu(x)
    for conv in self.down_conv:
      x = conv(x, training=training)
      x = self.down_pool(x, training=training)  # [batch, res, res, res, c]
    x = tf.squeeze(x, axis=(1, 2, 3))  # [batch, c]
    x = self.fc_out(x)  # [batch, code_len*2]
    mu, logvar = x[:, :self.codelen], x[:, self.codelen:]
    noise = tf.random.normal(mu.shape)
    std = tf.exp(0.5 * logvar)
    x_out = mu + noise * std
    return x_out, mu, logvar 
开发者ID:tensorflow,项目名称:graphics,代码行数:25,代码来源:model_g2v.py

示例9: bottleneck

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import exp [as 别名]
def bottleneck(self, x):
    z_size = self.hparams.bottleneck_bits
    x_shape = common_layers.shape_list(x)
    with tf.variable_scope('bottleneck', reuse=tf.AUTO_REUSE):
      mu = x[..., :self.hparams.bottleneck_bits]
      if self.hparams.mode != tf.estimator.ModeKeys.TRAIN:
        return mu, 0.0  # No sampling or kl loss on eval.
      log_sigma = x[..., self.hparams.bottleneck_bits:]
      epsilon = tf.random_normal(x_shape[:-1] + [z_size])
      z = mu + tf.exp(log_sigma / 2) * epsilon
      kl = 0.5 * tf.reduce_mean(
          tf.exp(log_sigma) + tf.square(mu) - 1. - log_sigma, axis=-1)
      # This is the 'free bits' trick mentioned in Kingma et al. (2016)
      free_bits = self.hparams.free_bits
      kl_loss = tf.reduce_mean(tf.maximum(kl - free_bits, 0.0))
    return z, kl_loss * self.hparams.kl_beta 
开发者ID:magenta,项目名称:magenta,代码行数:18,代码来源:image_vae.py

示例10: specgrams_to_stfts

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import exp [as 别名]
def specgrams_to_stfts(self, specgrams):
    """Converts specgrams to stfts.

    Args:
      specgrams: Tensor of log magnitudes and instantaneous frequencies,
        shape [batch, time, freq, 2].

    Returns:
      stfts: Complex64 tensor of stft, shape [batch, time, freq, 1].
    """
    logmag = specgrams[:, :, :, 0]
    p = specgrams[:, :, :, 1]

    mag = tf.exp(logmag)

    if self._ifreq:
      phase_angle = tf.cumsum(p * np.pi, axis=-2)
    else:
      phase_angle = p * np.pi

    return spectral_ops.polar2rect(mag, phase_angle)[:, :, :, tf.newaxis] 
开发者ID:magenta,项目名称:magenta,代码行数:23,代码来源:specgrams_helper.py

示例11: testSlicewiseOperationAndGenericGradOperation

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import exp [as 别名]
def testSlicewiseOperationAndGenericGradOperation(self):
    slicewise_operation = mtf.SlicewiseOperation(
        tf.exp,
        [self.x],
        [self.x.shape],
        [self.x.dtype],
        splittable_dims=[self.a_dim],  # pretend only dim "a" can be split.
        grad_function=lambda op, dy: [dy * op.outputs[0]],
        name="component-wise exp")

    self.assertEqual(slicewise_operation.splittable_dims, frozenset(["a"]))
    self.assertEqual(slicewise_operation.unsplittable_dims, frozenset(["b"]))

    generic_grad_operation = mtf.GenericGradOperation(slicewise_operation,
                                                      [self.x])

    self.assertEqual(generic_grad_operation.splittable_dims,
                     frozenset(["a", "b"]))
    self.assertEqual(generic_grad_operation.unsplittable_dims,
                     frozenset()) 
开发者ID:tensorflow,项目名称:mesh,代码行数:22,代码来源:ops_test.py

示例12: get_timing_signal

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import exp [as 别名]
def get_timing_signal(length,
                      min_timescale=1,
                      max_timescale=1e4,
                      num_timescales=16):
  """Create Tensor of sinusoids of different frequencies.

  Args:
    length: Length of the Tensor to create, i.e. Number of steps.
    min_timescale: a float
    max_timescale: a float
    num_timescales: an int
  Returns:
    Tensor of shape [length, 2 * num_timescales].
  """
  positions = tf.to_float(tf.range(length))
  log_timescale_increment = (
      math.log(max_timescale / min_timescale) / (num_timescales - 1))
  inv_timescales = min_timescale * tf.exp(
      tf.to_float(tf.range(num_timescales)) * -log_timescale_increment)
  scaled_time = tf.expand_dims(positions, 1) * tf.expand_dims(inv_timescales, 0)
  return tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1) 
开发者ID:google-research,项目名称:language,代码行数:23,代码来源:tensor_utils.py

示例13: safe_cumprod

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import exp [as 别名]
def safe_cumprod(x, *args, **kwargs):
  """Computes cumprod of x in logspace using cumsum to avoid underflow.

  The cumprod function and its gradient can result in numerical instabilities
  when its argument has very small and/or zero values.  As long as the argument
  is all positive, we can instead compute the cumulative product as
  exp(cumsum(log(x))).  This function can be called identically to tf.cumprod.

  Args:
    x: Tensor to take the cumulative product of.
    *args: Passed on to cumsum; these are identical to those in cumprod.
    **kwargs: Passed on to cumsum; these are identical to those in cumprod.
  Returns:
    Cumulative product of x.
  """
  with tf.name_scope(None, "SafeCumprod", [x]):
    x = tf.convert_to_tensor(x, name="x")
    tiny = np.finfo(x.dtype.as_numpy_dtype).tiny
    return tf.exp(
        tf.cumsum(tf.log(tf.clip_by_value(x, tiny, 1)), *args, **kwargs)) 
开发者ID:google-research,项目名称:language,代码行数:22,代码来源:attention.py

示例14: _build

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import exp [as 别名]
def _build(self, a, b):  # pylint: disable=arguments-differ
    # Normalize inputs.
    a_normed = tf.nn.l2_normalize(a, axis=-1)
    b_normed = tf.nn.l2_normalize(b, axis=-1)
    # <float32> [batch_size, seq_len_a, seq_len_b].
    cosine_similarity = tf.matmul(a_normed, b_normed, transpose_b=True)
    pairwise_distances = 0.5 * (1. - cosine_similarity)
    # Compute log attention distributions.
    # <float32> [batch_size, seq_len_a, seq_len_b].
    att_a_b = tf.nn.softmax(pairwise_distances, axis=2)
    # <float32> [batch_size, seq_len_b, seq_len_a].
    att_b_a = tf.transpose(tf.nn.softmax(pairwise_distances, axis=1), [0, 2, 1])
    # Compute cross-attention contexts.
    # <float32> [batch_size, seq_len_a, size].
    ctx_a_b = tf.matmul(att_a_b, b)
    # <float32> [batch_size, seq_len_b, size].
    ctx_b_a = tf.matmul(att_b_a, a)
    # Compute entropy loss.
    loss = tf.reduce_mean(
        self._dist_fn(a, ctx_a_b, reduce_axis=[1, 2]) +
        self._dist_fn(b, ctx_b_a, reduce_axis=[1, 2]))
    # loss = - tf.reduce_mean(
    #     tf.reduce_sum(log_att_input1 * tf.exp(log_att_input1), axis=[1, 2]) +
    #     tf.reduce_sum(log_att_input2 * tf.exp(log_att_input2), axis=[1, 2]))
    return loss 
开发者ID:google-research,项目名称:language,代码行数:27,代码来源:losses.py

示例15: apply_box_deltas_graph

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import exp [as 别名]
def apply_box_deltas_graph(boxes, deltas):
    """Applies the given deltas to the given boxes.
    boxes: [N, (y1, x1, y2, x2)] boxes to update
    deltas: [N, (dy, dx, log(dh), log(dw))] refinements to apply
    """
    # Convert to y, x, h, w
    height = boxes[:, 2] - boxes[:, 0]
    width = boxes[:, 3] - boxes[:, 1]
    center_y = boxes[:, 0] + 0.5 * height
    center_x = boxes[:, 1] + 0.5 * width
    # Apply deltas
    center_y += deltas[:, 0] * height
    center_x += deltas[:, 1] * width
    height *= tf.exp(deltas[:, 2])
    width *= tf.exp(deltas[:, 3])
    # Convert back to y1, x1, y2, x2
    y1 = center_y - 0.5 * height
    x1 = center_x - 0.5 * width
    y2 = y1 + height
    x2 = x1 + width
    result = tf.stack([y1, x1, y2, x2], axis=1, name="apply_box_deltas_out")
    return result 
开发者ID:OCR-D,项目名称:ocrd_anybaseocr,代码行数:24,代码来源:model.py


注:本文中的tensorflow.compat.v1.exp方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。