本文整理汇总了Python中tensorflow.compat.v1.random_normal方法的典型用法代码示例。如果您正苦于以下问题:Python v1.random_normal方法的具体用法?Python v1.random_normal怎么用?Python v1.random_normal使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.compat.v1
的用法示例。
在下文中一共展示了v1.random_normal方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: sample_q
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import random_normal [as 别名]
def sample_q(
self, targets, targets_mask, decoder_self_attention_bias, n_samples,
temp, **kwargs):
hparams = self._hparams
batch_size, targets_max_length = common_layers.shape_list(targets_mask)[:2]
q_params = ops.posterior("posterior", hparams, targets, targets_mask,
decoder_self_attention_bias, **kwargs)
q_dist = gops.diagonal_normal(q_params, "posterior")
loc, scale = q_dist.loc, q_dist.scale
z_shape = [batch_size, targets_max_length, hparams.latent_size]
iw_z_shape = [n_samples*batch_size, targets_max_length, hparams.latent_size]
if n_samples == 1:
noise = tf.random_normal(z_shape, stddev=temp)
z_q = loc + scale * noise
log_q_z = q_dist.log_prob(z_q) # [B, L, C]
else:
noise = tf.random_normal([n_samples] + z_shape, stddev=temp)
z_q = loc[tf.newaxis, ...] + scale[tf.newaxis, ...] * noise
log_q_z = q_dist.log_prob(z_q) # [K, B, L, C]
z_q = tf.reshape(z_q, iw_z_shape)
log_q_z = tf.reshape(log_q_z, iw_z_shape)
return z_q, log_q_z, q_dist
示例2: bottleneck
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import random_normal [as 别名]
def bottleneck(self, x):
hparams = self.hparams
z_size = hparams.bottleneck_bits
x_shape = common_layers.shape_list(x)
with tf.variable_scope("vae"):
mu = tf.layers.dense(x, z_size, name="mu")
if hparams.mode != tf.estimator.ModeKeys.TRAIN:
return mu, 0.0 # No sampling or kl loss on eval.
log_sigma = tf.layers.dense(x, z_size, name="log_sigma")
epsilon = tf.random_normal(x_shape[:-1] + [z_size])
z = mu + tf.exp(log_sigma / 2) * epsilon
kl = 0.5 * tf.reduce_mean(
tf.expm1(log_sigma) + tf.square(mu) - log_sigma, axis=-1)
free_bits = z_size // 4
kl_loss = tf.reduce_mean(tf.maximum(kl - free_bits, 0.0))
return z, kl_loss * hparams.kl_beta
示例3: testDmlLoss
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import random_normal [as 别名]
def testDmlLoss(self, batch, height, width, num_mixtures, reduce_sum):
channels = 3
pred = tf.random_normal([batch, height, width, num_mixtures * 10])
labels = tf.random_uniform([batch, height, width, channels],
minval=0, maxval=256, dtype=tf.int32)
actual_loss_num, actual_loss_den = common_layers.dml_loss(
pred=pred, labels=labels, reduce_sum=reduce_sum)
actual_loss = actual_loss_num / actual_loss_den
real_labels = common_layers.convert_rgb_to_symmetric_real(labels)
expected_loss = common_layers.discretized_mix_logistic_loss(
pred=pred, labels=real_labels) / channels
if reduce_sum:
expected_loss = tf.reduce_mean(expected_loss)
actual_loss_val, expected_loss_val = self.evaluate(
[actual_loss, expected_loss])
self.assertAllClose(actual_loss_val, expected_loss_val)
示例4: vae
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import random_normal [as 别名]
def vae(x, z_size, name=None):
"""Simple variational autoencoder without discretization.
Args:
x: Input to the discretization bottleneck.
z_size: Number of bits, where discrete codes range from 1 to 2**z_size.
name: Name for the bottleneck scope.
Returns:
Embedding function, latent, loss, mu and log_simga.
"""
with tf.variable_scope(name, default_name="vae"):
mu = tf.layers.dense(x, z_size, name="mu")
log_sigma = tf.layers.dense(x, z_size, name="log_sigma")
shape = common_layers.shape_list(x)
epsilon = tf.random_normal([shape[0], shape[1], 1, z_size])
z = mu + tf.exp(log_sigma / 2) * epsilon
kl = 0.5 * tf.reduce_mean(
tf.expm1(log_sigma) + tf.square(mu) - log_sigma, axis=-1)
free_bits = z_size // 4
kl_loss = tf.reduce_mean(tf.maximum(kl - free_bits, 0.0))
return z, kl_loss, mu, log_sigma
示例5: testLocalUnmaskedAttention2D
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import random_normal [as 别名]
def testLocalUnmaskedAttention2D(self, batch, heads, length,
depth_k, depth_v, query_shape):
if batch is None:
batch = tf.random_uniform([], minval=0, maxval=5, dtype=tf.int32)
q = tf.random_normal([batch, heads, length, length, depth_k])
k = tf.random_normal([batch, heads, length, length, depth_k])
v = tf.random_normal([batch, heads, length, length, depth_v])
output = common_attention.local_attention_2d(
q,
k,
v,
query_shape=query_shape,
memory_flange=(3, 3))
if isinstance(batch, tf.Tensor):
batch, res = self.evaluate([batch, output])
else:
res = self.evaluate(output)
self.assertEqual(res.shape, (batch, heads, length, length, depth_v))
示例6: testDilatedAttention
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import random_normal [as 别名]
def testDilatedAttention(self, batch, heads, length, depth_v, block_length):
if batch is None:
batch = tf.random_uniform([], minval=0, maxval=5, dtype=tf.int32)
q = tf.random_normal([batch, heads, length, depth_v])
k = tf.random_normal([batch, heads, length, depth_v])
v = tf.random_normal([batch, heads, length, depth_v])
output = common_attention.dilated_self_attention_1d(
q, k, v,
query_block_size=block_length,
memory_block_size=block_length,
gap_size=2,
num_memory_blocks=2)
if isinstance(batch, tf.Tensor):
batch, res = self.evaluate([batch, output])
else:
res = self.evaluate(output)
self.assertEqual(res.shape, (batch, heads, length, depth_v))
示例7: testMaskedDilatedAttention
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import random_normal [as 别名]
def testMaskedDilatedAttention(self, batch, heads, length, depth_v,
block_length):
if batch is None:
batch = tf.random_uniform([], minval=0, maxval=5, dtype=tf.int32)
q = tf.random_normal([batch, heads, length, depth_v])
k = tf.random_normal([batch, heads, length, depth_v])
v = tf.random_normal([batch, heads, length, depth_v])
output = common_attention.masked_dilated_self_attention_1d(
q, k, v,
query_block_size=block_length,
memory_block_size=block_length,
gap_size=2,
num_memory_blocks=2)
if isinstance(batch, tf.Tensor):
batch, res = self.evaluate([batch, output])
else:
res = self.evaluate(output)
self.assertEqual(res.shape, (batch, heads, length, depth_v))
示例8: bottleneck
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import random_normal [as 别名]
def bottleneck(self, x):
z_size = self.hparams.bottleneck_bits
x_shape = common_layers.shape_list(x)
with tf.variable_scope('bottleneck', reuse=tf.AUTO_REUSE):
mu = x[..., :self.hparams.bottleneck_bits]
if self.hparams.mode != tf.estimator.ModeKeys.TRAIN:
return mu, 0.0 # No sampling or kl loss on eval.
log_sigma = x[..., self.hparams.bottleneck_bits:]
epsilon = tf.random_normal(x_shape[:-1] + [z_size])
z = mu + tf.exp(log_sigma / 2) * epsilon
kl = 0.5 * tf.reduce_mean(
tf.exp(log_sigma) + tf.square(mu) - 1. - log_sigma, axis=-1)
# This is the 'free bits' trick mentioned in Kingma et al. (2016)
free_bits = self.hparams.free_bits
kl_loss = tf.reduce_mean(tf.maximum(kl - free_bits, 0.0))
return z, kl_loss * self.hparams.kl_beta
示例9: testExternalBias
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import random_normal [as 别名]
def testExternalBias(self):
batch_size = 4
num_hidden = 6
num_dims = 8
test_inputs = tf.random_normal(shape=(batch_size, num_dims))
test_b_enc = tf.random_normal(shape=(batch_size, num_hidden))
test_b_dec = tf.random_normal(shape=(batch_size, num_dims))
nade = Nade(num_dims, num_hidden)
log_prob, cond_probs = nade.log_prob(test_inputs, test_b_enc, test_b_dec)
sample, sample_prob = nade.sample(b_enc=test_b_enc, b_dec=test_b_dec)
with self.test_session() as sess:
sess.run([tf.global_variables_initializer()])
self.assertEqual(log_prob.eval().shape, (batch_size,))
self.assertEqual(cond_probs.eval().shape, (batch_size, num_dims))
self.assertEqual(sample.eval().shape, (batch_size, num_dims))
self.assertEqual(sample_prob.eval().shape, (batch_size,))
示例10: conv_kernel_initializer
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import random_normal [as 别名]
def conv_kernel_initializer(shape, dtype=None, partition_info=None):
"""Initialization for convolutional kernels.
The main difference with tf.variance_scaling_initializer is that
tf.variance_scaling_initializer uses a truncated normal with an uncorrected
standard deviation, whereas here we use a normal distribution. Similarly,
tf.initializers.variance_scaling uses a truncated normal with
a corrected standard deviation.
Args:
shape: shape of variable
dtype: dtype of variable
partition_info: unused
Returns:
an initialization for the variable
"""
del partition_info
kernel_height, kernel_width, _, out_filters = shape
fan_out = int(kernel_height * kernel_width * out_filters)
return tf.random_normal(
shape, mean=0.0, stddev=np.sqrt(2.0 / fan_out), dtype=dtype)
示例11: __call__
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import random_normal [as 别名]
def __call__(self, shape, dtype=None, partition_info=None):
if partition_info is not None:
raise ValueError('partition_info not supported.')
if dtype is None:
dtype = self.dtype
# Calculate number of non-zero weights
nnz = 1.
for d in shape:
nnz *= d
nnz *= (1. - self.sparsity)
input_channels = shape[-2]
n = nnz / input_channels
variance = (2. / n)**.5
return tf.random_normal(shape, 0, variance, dtype, seed=self.seed)
示例12: _ensure_keep_mask
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import random_normal [as 别名]
def _ensure_keep_mask(self, x):
if self._keep_mask is None or not self._share_mask:
shape = tf.shape(x)
# Calculate the stddev for the normal distribution that
# matches the stddev of the bernoulli with p=keep_prob.
stddev = tf.sqrt((1 - self._keep_prob) / self._keep_prob)
self._keep_mask = tf.random_normal(shape, mean=1.0, stddev=stddev,
dtype=x.dtype)
self._keep_mask.set_shape(x.get_shape())
return self._keep_mask
示例13: random_mask
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import random_normal [as 别名]
def random_mask(shape, k):
x = tf.random_normal(shape=shape)
kth_largest = tf.nn.top_k(x, k)[0][:, k-1]
return tf.to_float(tf.greater_equal(x, tf.expand_dims(kth_largest, 1)))
示例14: random_mask2
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import random_normal [as 别名]
def random_mask2(shape, k):
x = tf.random_normal(shape=shape)
x = tf.transpose(x)
kth_largest = tf.nn.top_k(x, k)[0][:, k-1]
mask = tf.to_float(tf.greater_equal(x, tf.expand_dims(kth_largest, 1)))
return tf.transpose(mask)
示例15: testSummarizeLosses
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import random_normal [as 别名]
def testSummarizeLosses(self):
with tf.Graph().as_default():
model = t2t_model.T2TModel(hparam.HParams())
losses = {"training": tf.random_normal([]),
"extra": tf.random_normal([])}
outputs = model._summarize_losses(losses)
self.assertIsNone(outputs, None)
self.assertEqual(
len(tf.get_collection(tf.GraphKeys.SUMMARIES, scope="losses")),
len(losses))