本文整理汇总了Python中tensorflow.random_normal方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.random_normal方法的具体用法?Python tensorflow.random_normal怎么用?Python tensorflow.random_normal使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.random_normal方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: set_input_shape
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import random_normal [as 别名]
def set_input_shape(self, input_shape):
batch_size, rows, cols, input_channels = input_shape
kernel_shape = tuple(self.kernel_shape) + (input_channels,
self.output_channels)
assert len(kernel_shape) == 4
assert all(isinstance(e, int) for e in kernel_shape), kernel_shape
init = tf.random_normal(kernel_shape, dtype=tf.float32)
init = init / tf.sqrt(1e-7 + tf.reduce_sum(tf.square(init),
axis=(0, 1, 2)))
self.kernels = tf.Variable(init)
self.b = tf.Variable(
np.zeros((self.output_channels,)).astype('float32'))
input_shape = list(input_shape)
input_shape[0] = 1
dummy_batch = tf.zeros(input_shape)
dummy_output = self.fprop(dummy_batch)
output_shape = [int(e) for e in dummy_output.get_shape()]
output_shape[0] = batch_size
self.output_shape = tuple(output_shape)
示例2: set_input_shape
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import random_normal [as 别名]
def set_input_shape(self, input_shape):
batch_size, dim = input_shape
self.input_shape = [batch_size, dim]
self.output_shape = [batch_size, self.num_hid]
if self.init_mode == "norm":
init = tf.random_normal([dim, self.num_hid], dtype=tf.float32)
init = init / tf.sqrt(1e-7 + tf.reduce_sum(tf.square(init), axis=0,
keep_dims=True))
init = init * self.init_scale
elif self.init_mode == "uniform_unit_scaling":
scale = np.sqrt(3. / dim)
init = tf.random_uniform([dim, self.num_hid], dtype=tf.float32,
minval=-scale, maxval=scale)
else:
raise ValueError(self.init_mode)
self.W = PV(init)
if self.use_bias:
self.b = PV((np.zeros((self.num_hid,))
+ self.init_b).astype('float32'))
示例3: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import random_normal [as 别名]
def __init__(self, batch_size, z_size, mean, logvar):
"""Create a diagonal gaussian distribution.
Args:
batch_size: The size of the batch, i.e. 0th dim in 2D tensor of samples.
z_size: The dimension of the distribution, i.e. 1st dim in 2D tensor.
mean: The N-D mean of the distribution.
logvar: The N-D log variance of the diagonal distribution.
"""
size__xz = [None, z_size]
self.mean = mean # bxn already
self.logvar = logvar # bxn already
self.noise = noise = tf.random_normal(tf.shape(logvar))
self.sample = mean + tf.exp(0.5 * logvar) * noise
mean.set_shape(size__xz)
logvar.set_shape(size__xz)
self.sample.set_shape(size__xz)
示例4: testLinearShared
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import random_normal [as 别名]
def testLinearShared(self):
# Create a linear map which is applied twice on different inputs
# (i.e. the weights of the map are shared).
linear_map = blocks_std.Linear(6)
x1 = tf.random_normal(shape=[1, 5])
x2 = tf.random_normal(shape=[1, 5])
xs = x1 + x2
# Apply the transform with the same weights.
y1 = linear_map(x1)
y2 = linear_map(x2)
ys = linear_map(xs)
with self.test_session() as sess:
# Initialize all the variables of the graph.
tf.global_variables_initializer().run()
y1_res, y2_res, ys_res = sess.run([y1, y2, ys])
self.assertAllClose(y1_res + y2_res, ys_res)
示例5: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import random_normal [as 别名]
def __init__(self, n_input, n_hidden, transfer_function = tf.nn.softplus, optimizer = tf.train.AdamOptimizer(),
scale = 0.1):
self.n_input = n_input
self.n_hidden = n_hidden
self.transfer = transfer_function
self.scale = tf.placeholder(tf.float32)
self.training_scale = scale
network_weights = self._initialize_weights()
self.weights = network_weights
# model
self.x = tf.placeholder(tf.float32, [None, self.n_input])
self.hidden = self.transfer(tf.add(tf.matmul(self.x + scale * tf.random_normal((n_input,)),
self.weights['w1']),
self.weights['b1']))
self.reconstruction = tf.add(tf.matmul(self.hidden, self.weights['w2']), self.weights['b2'])
# cost
self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(self.reconstruction, self.x), 2.0))
self.optimizer = optimizer.minimize(self.cost)
init = tf.global_variables_initializer()
self.sess = tf.Session()
self.sess.run(init)
示例6: sample_action
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import random_normal [as 别名]
def sample_action(self, logits, sampling_dim,
act_dim, act_type, greedy=False):
"""Sample an action from a distribution."""
if self.env_spec.is_discrete(act_type):
if greedy:
act = tf.argmax(logits, 1)
else:
act = tf.reshape(tf.multinomial(logits, 1), [-1])
elif self.env_spec.is_box(act_type):
means = logits[:, :sampling_dim / 2]
std = logits[:, sampling_dim / 2:]
if greedy:
act = means
else:
batch_size = tf.shape(logits)[0]
act = means + std * tf.random_normal([batch_size, act_dim])
else:
assert False
return act
示例7: sample_action
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import random_normal [as 别名]
def sample_action(self, policy_parameters):
"""
constructs a symbolic operation for stochastically sampling from the policy
distribution
arguments:
policy_parameters
mean, log_std) of a Gaussian distribution over actions
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (batch_size, self.ac_dim)
returns:
sy_sampled_ac:
(batch_size, self.ac_dim)
"""
sy_mean, sy_logstd = policy_parameters
sy_sampled_ac = sy_mean + tf.exp(sy_logstd) * tf.random_normal(tf.shape(sy_mean), 0, 1)
return sy_sampled_ac
示例8: fc
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import random_normal [as 别名]
def fc(inputs, output_size, init_bias=0.0, activation_func=tf.nn.relu, stddev=0.01):
input_shape = inputs.get_shape().as_list()
if len(input_shape) == 4:
fc_weights = tf.Variable(
tf.random_normal([input_shape[1] * input_shape[2] * input_shape[3], output_size], dtype=tf.float32,
stddev=stddev),
name='weights')
inputs = tf.reshape(inputs, [-1, fc_weights.get_shape().as_list()[0]])
else:
fc_weights = tf.Variable(tf.random_normal([input_shape[-1], output_size], dtype=tf.float32, stddev=stddev),
name='weights')
fc_biases = tf.Variable(tf.constant(init_bias, shape=[output_size], dtype=tf.float32), name='biases')
fc_layer = tf.matmul(inputs, fc_weights)
fc_layer = tf.nn.bias_add(fc_layer, fc_biases)
if activation_func:
fc_layer = activation_func(fc_layer)
return fc_layer
示例9: testDmlLoss
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import random_normal [as 别名]
def testDmlLoss(self, batch, height, width, num_mixtures, reduce_sum):
channels = 3
pred = tf.random_normal([batch, height, width, num_mixtures * 10])
labels = tf.random_uniform([batch, height, width, channels],
minval=0, maxval=256, dtype=tf.int32)
actual_loss_num, actual_loss_den = common_layers.dml_loss(
pred=pred, labels=labels, reduce_sum=reduce_sum)
actual_loss = actual_loss_num / actual_loss_den
real_labels = common_layers.convert_rgb_to_symmetric_real(labels)
expected_loss = common_layers.discretized_mix_logistic_loss(
pred=pred, labels=real_labels) / channels
if reduce_sum:
expected_loss = tf.reduce_mean(expected_loss)
with self.test_session() as sess:
actual_loss_val, expected_loss_val = sess.run(
[actual_loss, expected_loss])
self.assertAllClose(actual_loss_val, expected_loss_val)
示例10: testCreateOutputTrainMode
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import random_normal [as 别名]
def testCreateOutputTrainMode(self, likelihood, num_mixtures, depth):
batch = 1
height = 8
width = 8
channels = 3
rows = height
if likelihood == common_image_attention.DistributionType.CAT:
cols = channels * width
else:
cols = width
hparams = tf.contrib.training.HParams(
hidden_size=2,
likelihood=likelihood,
mode=tf.estimator.ModeKeys.TRAIN,
num_mixtures=num_mixtures,
)
decoder_output = tf.random_normal([batch, rows, cols, hparams.hidden_size])
targets = tf.random_uniform([batch, height, width, channels],
minval=-1., maxval=1.)
output = common_image_attention.create_output(
decoder_output, rows, cols, targets, hparams)
if hparams.likelihood == common_image_attention.DistributionType.CAT:
self.assertEqual(output.shape, (batch, height, width, channels, depth))
else:
self.assertEqual(output.shape, (batch, height, width, depth))
示例11: vae
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import random_normal [as 别名]
def vae(x, name, z_size):
"""Simple variational autoencoder without discretization.
Args:
x: Input to the discretization bottleneck.
name: Name for the bottleneck scope.
z_size: Number of bits used to produce discrete code; discrete codes range
from 1 to 2**z_size.
Returns:
Embedding function, latent, loss, mu and log_simga.
"""
with tf.variable_scope(name):
mu = tf.layers.dense(x, z_size, name="mu")
log_sigma = tf.layers.dense(x, z_size, name="log_sigma")
shape = common_layers.shape_list(x)
epsilon = tf.random_normal([shape[0], shape[1], 1, z_size])
z = mu + tf.exp(log_sigma / 2) * epsilon
kl = 0.5 * tf.reduce_mean(
tf.exp(log_sigma) + tf.square(mu) - 1. - log_sigma, axis=-1)
free_bits = z_size // 4
kl_loss = tf.reduce_mean(tf.maximum(kl - free_bits, 0.0))
return z, kl_loss, mu, log_sigma
示例12: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import random_normal [as 别名]
def __init__(self, ob_dim, ac_dim): #pylint: disable=W0613
X = tf.placeholder(tf.float32, shape=[None, ob_dim*2+ac_dim*2+2]) # batch of observations
vtarg_n = tf.placeholder(tf.float32, shape=[None], name='vtarg')
wd_dict = {}
h1 = tf.nn.elu(dense(X, 64, "h1", weight_init=U.normc_initializer(1.0), bias_init=0, weight_loss_dict=wd_dict))
h2 = tf.nn.elu(dense(h1, 64, "h2", weight_init=U.normc_initializer(1.0), bias_init=0, weight_loss_dict=wd_dict))
vpred_n = dense(h2, 1, "hfinal", weight_init=U.normc_initializer(1.0), bias_init=0, weight_loss_dict=wd_dict)[:,0]
sample_vpred_n = vpred_n + tf.random_normal(tf.shape(vpred_n))
wd_loss = tf.get_collection("vf_losses", None)
loss = tf.reduce_mean(tf.square(vpred_n - vtarg_n)) + tf.add_n(wd_loss)
loss_sampled = tf.reduce_mean(tf.square(vpred_n - tf.stop_gradient(sample_vpred_n)))
self._predict = U.function([X], vpred_n)
optim = kfac.KfacOptimizer(learning_rate=0.001, cold_lr=0.001*(1-0.9), momentum=0.9, \
clip_kl=0.3, epsilon=0.1, stats_decay=0.95, \
async=1, kfac_update=2, cold_iter=50, \
weight_decay_dict=wd_dict, max_grad_norm=None)
vf_var_list = []
for var in tf.trainable_variables():
if "vf" in var.name:
vf_var_list.append(var)
update_op, self.q_runner = optim.minimize(loss, loss_sampled, var_list=vf_var_list)
self.do_update = U.function([X, vtarg_n], update_op) #pylint: disable=E1101
U.initialize() # Initialize uninitialized TF variables
示例13: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import random_normal [as 别名]
def __init__(self, name, state_size, output_size):
self.state_size = state_size
self.output_size = output_size
with tf.variable_scope(name):
self.input = tf.placeholder(tf.float32, shape=[None, self.state_size])
self.action = tf.placeholder(tf.float32, shape=[None, self.output_size])
self.l1 = tf.layers.dense(inputs=self.input, units=128, activation=tf.nn.relu)
self.l2 = tf.layers.dense(inputs=self.l1, units=128, activation=tf.nn.relu)
self.l3 = tf.layers.dense(inputs=self.l2, units=128, activation=tf.nn.relu)
self.mu = tf.layers.dense(inputs=self.l3, units=self.output_size, activation=None)
self.log_std = tf.get_variable(name='log_std', initializer= -0.5 * np.ones(self.output_size, dtype=np.float32))
self.std = tf.exp(self.log_std)
self.pi = self.mu + tf.random_normal(tf.shape(self.mu)) * self.std
self.logp = gaussian_likelihood(self.action, self.mu, self.log_std)
self.logp_pi = gaussian_likelihood(self.pi, self.mu, self.log_std)
self.scope = tf.get_variable_scope().name
示例14: test_generator_graph
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import random_normal [as 别名]
def test_generator_graph(self):
tf.set_random_seed(1234)
# Check graph construction for a number of image size/depths and batch
# sizes.
for i, batch_size in zip(xrange(3, 7), xrange(3, 8)):
tf.reset_default_graph()
final_size = 2 ** i
noise = tf.random_normal([batch_size, 64])
image, end_points = dcgan.generator(
noise,
depth=32,
final_size=final_size)
self.assertAllEqual([batch_size, final_size, final_size, 3],
image.shape.as_list())
expected_names = ['deconv%i' % j for j in xrange(1, i)] + ['logits']
self.assertSetEqual(set(expected_names), set(end_points.keys()))
# Check layer depths.
for j in range(1, i):
layer = end_points['deconv%i' % j]
self.assertEqual(32 * 2**(i-j-1), layer.get_shape().as_list()[-1])
示例15: _test_tf_hvp
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import random_normal [as 别名]
def _test_tf_hvp(func, optimized, tf):
a = tf.random_normal(shape=(300,))
v = tf.reshape(a, shape=(-1,))
modes = ['forward', 'reverse']
for mode1 in modes:
for mode2 in modes:
if mode1 == mode2 == 'forward':
continue
df = tangent.autodiff(
func,
mode=mode1,
motion='joint',
optimized=optimized,
check_dims=False)
ddf = tangent.autodiff(
df, mode=mode2, motion='joint', optimized=optimized, check_dims=False)
dx = ddf(a, tf.constant(1.0), v)
# We just ensure it computes something in this case.
assert dx.shape == a.shape