當前位置: 首頁>>代碼示例>>Python>>正文


Python v1.random_normal方法代碼示例

本文整理匯總了Python中tensorflow.compat.v1.random_normal方法的典型用法代碼示例。如果您正苦於以下問題:Python v1.random_normal方法的具體用法?Python v1.random_normal怎麽用?Python v1.random_normal使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.compat.v1的用法示例。


在下文中一共展示了v1.random_normal方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: sample_q

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import random_normal [as 別名]
def sample_q(
      self, targets, targets_mask, decoder_self_attention_bias, n_samples,
      temp, **kwargs):
    hparams = self._hparams
    batch_size, targets_max_length = common_layers.shape_list(targets_mask)[:2]
    q_params = ops.posterior("posterior", hparams, targets, targets_mask,
                             decoder_self_attention_bias, **kwargs)
    q_dist = gops.diagonal_normal(q_params, "posterior")
    loc, scale = q_dist.loc, q_dist.scale
    z_shape = [batch_size, targets_max_length, hparams.latent_size]
    iw_z_shape = [n_samples*batch_size, targets_max_length, hparams.latent_size]
    if n_samples == 1:
      noise = tf.random_normal(z_shape, stddev=temp)
      z_q = loc + scale * noise
      log_q_z = q_dist.log_prob(z_q)  # [B, L, C]
    else:
      noise = tf.random_normal([n_samples] + z_shape, stddev=temp)
      z_q = loc[tf.newaxis, ...] + scale[tf.newaxis, ...] * noise
      log_q_z = q_dist.log_prob(z_q)  # [K, B, L, C]
      z_q = tf.reshape(z_q, iw_z_shape)
      log_q_z = tf.reshape(log_q_z, iw_z_shape)
    return z_q, log_q_z, q_dist 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:24,代碼來源:transformer_vae_flow_prior.py

示例2: bottleneck

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import random_normal [as 別名]
def bottleneck(self, x):
    hparams = self.hparams
    z_size = hparams.bottleneck_bits
    x_shape = common_layers.shape_list(x)
    with tf.variable_scope("vae"):
      mu = tf.layers.dense(x, z_size, name="mu")
      if hparams.mode != tf.estimator.ModeKeys.TRAIN:
        return mu, 0.0  # No sampling or kl loss on eval.
      log_sigma = tf.layers.dense(x, z_size, name="log_sigma")
      epsilon = tf.random_normal(x_shape[:-1] + [z_size])
      z = mu + tf.exp(log_sigma / 2) * epsilon
      kl = 0.5 * tf.reduce_mean(
          tf.expm1(log_sigma) + tf.square(mu) - log_sigma, axis=-1)
      free_bits = z_size // 4
      kl_loss = tf.reduce_mean(tf.maximum(kl - free_bits, 0.0))
    return z, kl_loss * hparams.kl_beta 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:18,代碼來源:autoencoders.py

示例3: testDmlLoss

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import random_normal [as 別名]
def testDmlLoss(self, batch, height, width, num_mixtures, reduce_sum):
    channels = 3
    pred = tf.random_normal([batch, height, width, num_mixtures * 10])
    labels = tf.random_uniform([batch, height, width, channels],
                               minval=0, maxval=256, dtype=tf.int32)
    actual_loss_num, actual_loss_den = common_layers.dml_loss(
        pred=pred, labels=labels, reduce_sum=reduce_sum)
    actual_loss = actual_loss_num / actual_loss_den

    real_labels = common_layers.convert_rgb_to_symmetric_real(labels)
    expected_loss = common_layers.discretized_mix_logistic_loss(
        pred=pred, labels=real_labels) / channels
    if reduce_sum:
      expected_loss = tf.reduce_mean(expected_loss)

    actual_loss_val, expected_loss_val = self.evaluate(
        [actual_loss, expected_loss])
    self.assertAllClose(actual_loss_val, expected_loss_val) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:20,代碼來源:common_layers_test.py

示例4: vae

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import random_normal [as 別名]
def vae(x, z_size, name=None):
  """Simple variational autoencoder without discretization.

  Args:
    x: Input to the discretization bottleneck.
    z_size: Number of bits, where discrete codes range from 1 to 2**z_size.
    name: Name for the bottleneck scope.

  Returns:
    Embedding function, latent, loss, mu and log_simga.
  """
  with tf.variable_scope(name, default_name="vae"):
    mu = tf.layers.dense(x, z_size, name="mu")
    log_sigma = tf.layers.dense(x, z_size, name="log_sigma")
    shape = common_layers.shape_list(x)
    epsilon = tf.random_normal([shape[0], shape[1], 1, z_size])
    z = mu + tf.exp(log_sigma / 2) * epsilon
    kl = 0.5 * tf.reduce_mean(
        tf.expm1(log_sigma) + tf.square(mu) - log_sigma, axis=-1)
    free_bits = z_size // 4
    kl_loss = tf.reduce_mean(tf.maximum(kl - free_bits, 0.0))
    return z, kl_loss, mu, log_sigma 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:24,代碼來源:discretization.py

示例5: testLocalUnmaskedAttention2D

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import random_normal [as 別名]
def testLocalUnmaskedAttention2D(self, batch, heads, length,
                                   depth_k, depth_v, query_shape):
    if batch is None:
      batch = tf.random_uniform([], minval=0, maxval=5, dtype=tf.int32)
    q = tf.random_normal([batch, heads, length, length, depth_k])
    k = tf.random_normal([batch, heads, length, length, depth_k])
    v = tf.random_normal([batch, heads, length, length, depth_v])
    output = common_attention.local_attention_2d(
        q,
        k,
        v,
        query_shape=query_shape,
        memory_flange=(3, 3))
    if isinstance(batch, tf.Tensor):
      batch, res = self.evaluate([batch, output])
    else:
      res = self.evaluate(output)

    self.assertEqual(res.shape, (batch, heads, length, length, depth_v)) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:21,代碼來源:common_attention_test.py

示例6: testDilatedAttention

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import random_normal [as 別名]
def testDilatedAttention(self, batch, heads, length, depth_v, block_length):
    if batch is None:
      batch = tf.random_uniform([], minval=0, maxval=5, dtype=tf.int32)
    q = tf.random_normal([batch, heads, length, depth_v])
    k = tf.random_normal([batch, heads, length, depth_v])
    v = tf.random_normal([batch, heads, length, depth_v])
    output = common_attention.dilated_self_attention_1d(
        q, k, v,
        query_block_size=block_length,
        memory_block_size=block_length,
        gap_size=2,
        num_memory_blocks=2)
    if isinstance(batch, tf.Tensor):
      batch, res = self.evaluate([batch, output])
    else:
      res = self.evaluate(output)

    self.assertEqual(res.shape, (batch, heads, length, depth_v)) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:20,代碼來源:common_attention_test.py

示例7: testMaskedDilatedAttention

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import random_normal [as 別名]
def testMaskedDilatedAttention(self, batch, heads, length, depth_v,
                                 block_length):
    if batch is None:
      batch = tf.random_uniform([], minval=0, maxval=5, dtype=tf.int32)
    q = tf.random_normal([batch, heads, length, depth_v])
    k = tf.random_normal([batch, heads, length, depth_v])
    v = tf.random_normal([batch, heads, length, depth_v])
    output = common_attention.masked_dilated_self_attention_1d(
        q, k, v,
        query_block_size=block_length,
        memory_block_size=block_length,
        gap_size=2,
        num_memory_blocks=2)
    if isinstance(batch, tf.Tensor):
      batch, res = self.evaluate([batch, output])
    else:
      res = self.evaluate(output)

    self.assertEqual(res.shape, (batch, heads, length, depth_v)) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:21,代碼來源:common_attention_test.py

示例8: bottleneck

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import random_normal [as 別名]
def bottleneck(self, x):
    z_size = self.hparams.bottleneck_bits
    x_shape = common_layers.shape_list(x)
    with tf.variable_scope('bottleneck', reuse=tf.AUTO_REUSE):
      mu = x[..., :self.hparams.bottleneck_bits]
      if self.hparams.mode != tf.estimator.ModeKeys.TRAIN:
        return mu, 0.0  # No sampling or kl loss on eval.
      log_sigma = x[..., self.hparams.bottleneck_bits:]
      epsilon = tf.random_normal(x_shape[:-1] + [z_size])
      z = mu + tf.exp(log_sigma / 2) * epsilon
      kl = 0.5 * tf.reduce_mean(
          tf.exp(log_sigma) + tf.square(mu) - 1. - log_sigma, axis=-1)
      # This is the 'free bits' trick mentioned in Kingma et al. (2016)
      free_bits = self.hparams.free_bits
      kl_loss = tf.reduce_mean(tf.maximum(kl - free_bits, 0.0))
    return z, kl_loss * self.hparams.kl_beta 
開發者ID:magenta,項目名稱:magenta,代碼行數:18,代碼來源:image_vae.py

示例9: testExternalBias

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import random_normal [as 別名]
def testExternalBias(self):
    batch_size = 4
    num_hidden = 6
    num_dims = 8
    test_inputs = tf.random_normal(shape=(batch_size, num_dims))
    test_b_enc = tf.random_normal(shape=(batch_size, num_hidden))
    test_b_dec = tf.random_normal(shape=(batch_size, num_dims))

    nade = Nade(num_dims, num_hidden)
    log_prob, cond_probs = nade.log_prob(test_inputs, test_b_enc, test_b_dec)
    sample, sample_prob = nade.sample(b_enc=test_b_enc, b_dec=test_b_dec)
    with self.test_session() as sess:
      sess.run([tf.global_variables_initializer()])
      self.assertEqual(log_prob.eval().shape, (batch_size,))
      self.assertEqual(cond_probs.eval().shape, (batch_size, num_dims))
      self.assertEqual(sample.eval().shape, (batch_size, num_dims))
      self.assertEqual(sample_prob.eval().shape, (batch_size,)) 
開發者ID:magenta,項目名稱:magenta,代碼行數:19,代碼來源:nade_test.py

示例10: conv_kernel_initializer

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import random_normal [as 別名]
def conv_kernel_initializer(shape, dtype=None, partition_info=None):
  """Initialization for convolutional kernels.

  The main difference with tf.variance_scaling_initializer is that
  tf.variance_scaling_initializer uses a truncated normal with an uncorrected
  standard deviation, whereas here we use a normal distribution. Similarly,
  tf.initializers.variance_scaling uses a truncated normal with
  a corrected standard deviation.

  Args:
    shape: shape of variable
    dtype: dtype of variable
    partition_info: unused

  Returns:
    an initialization for the variable
  """
  del partition_info
  kernel_height, kernel_width, _, out_filters = shape
  fan_out = int(kernel_height * kernel_width * out_filters)
  return tf.random_normal(
      shape, mean=0.0, stddev=np.sqrt(2.0 / fan_out), dtype=dtype) 
開發者ID:JunweiLiang,項目名稱:Object_Detection_Tracking,代碼行數:24,代碼來源:efficientnet_model.py

示例11: __call__

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import random_normal [as 別名]
def __call__(self, shape, dtype=None, partition_info=None):
    if partition_info is not None:
      raise ValueError('partition_info not supported.')
    if dtype is None:
      dtype = self.dtype

    # Calculate number of non-zero weights
    nnz = 1.
    for d in shape:
      nnz *= d
    nnz *= (1. - self.sparsity)

    input_channels = shape[-2]
    n = nnz / input_channels

    variance = (2. / n)**.5

    return tf.random_normal(shape, 0, variance, dtype, seed=self.seed) 
開發者ID:google-research,項目名稱:rigl,代碼行數:20,代碼來源:resnet_model.py

示例12: _ensure_keep_mask

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import random_normal [as 別名]
def _ensure_keep_mask(self, x):
    if self._keep_mask is None or not self._share_mask:
      shape = tf.shape(x)
      # Calculate the stddev for the normal distribution that
      # matches the stddev of the bernoulli with p=keep_prob.
      stddev = tf.sqrt((1 - self._keep_prob) / self._keep_prob)
      self._keep_mask = tf.random_normal(shape, mean=1.0, stddev=stddev,
                                         dtype=x.dtype)
      self._keep_mask.set_shape(x.get_shape())
    return self._keep_mask 
開發者ID:deepmind,項目名稱:lamb,代碼行數:12,代碼來源:dropout.py

示例13: random_mask

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import random_normal [as 別名]
def random_mask(shape, k):
  x = tf.random_normal(shape=shape)
  kth_largest = tf.nn.top_k(x, k)[0][:, k-1]
  return tf.to_float(tf.greater_equal(x, tf.expand_dims(kth_largest, 1))) 
開發者ID:deepmind,項目名稱:lamb,代碼行數:6,代碼來源:utils.py

示例14: random_mask2

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import random_normal [as 別名]
def random_mask2(shape, k):
  x = tf.random_normal(shape=shape)
  x = tf.transpose(x)
  kth_largest = tf.nn.top_k(x, k)[0][:, k-1]
  mask = tf.to_float(tf.greater_equal(x, tf.expand_dims(kth_largest, 1)))
  return tf.transpose(mask) 
開發者ID:deepmind,項目名稱:lamb,代碼行數:8,代碼來源:utils.py

示例15: testSummarizeLosses

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import random_normal [as 別名]
def testSummarizeLosses(self):
    with tf.Graph().as_default():
      model = t2t_model.T2TModel(hparam.HParams())
      losses = {"training": tf.random_normal([]),
                "extra": tf.random_normal([])}
      outputs = model._summarize_losses(losses)
      self.assertIsNone(outputs, None)
      self.assertEqual(
          len(tf.get_collection(tf.GraphKeys.SUMMARIES, scope="losses")),
          len(losses)) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:12,代碼來源:t2t_model_test.py


注:本文中的tensorflow.compat.v1.random_normal方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。