本文整理匯總了Python中tensorflow.compat.v1.random_normal_initializer方法的典型用法代碼示例。如果您正苦於以下問題:Python v1.random_normal_initializer方法的具體用法?Python v1.random_normal_initializer怎麽用?Python v1.random_normal_initializer使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tensorflow.compat.v1
的用法示例。
在下文中一共展示了v1.random_normal_initializer方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: add_depth_embedding
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import random_normal_initializer [as 別名]
def add_depth_embedding(x):
"""Add n-dimensional embedding as the depth embedding (timing signal).
Adds embeddings to represent the position of the step in the recurrent
tower.
Args:
x: a tensor with shape [max_step, batch, length, depth]
Returns:
a Tensor the same shape as x.
"""
x_shape = common_layers.shape_list(x)
depth = x_shape[-1]
num_steps = x_shape[0]
shape = [num_steps, 1, 1, depth]
depth_embedding = (
tf.get_variable(
"depth_embedding",
shape,
initializer=tf.random_normal_initializer(0, depth**-0.5)) * (depth**
0.5))
x += depth_embedding
return x
示例2: dense_weightnorm
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import random_normal_initializer [as 別名]
def dense_weightnorm(
name, x, n_out, x_mask, init_scale, init, dtype=tf.float32):
"""Dense layer with weight normalization."""
n_in = common_layers.shape_list(x)[2]
eps = tf.keras.backend.epsilon()
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
v = tf.get_variable(
"v", [n_in, n_out], dtype,
initializer=tf.random_normal_initializer(0, 0.05), trainable=True)
v = v / tf.norm(v, axis=0, keepdims=True)
t = tf.matmul(x, v) # [B, L, n_out]
mean, var = moments_over_bl(t, x_mask)
g_init = init_scale / (tf.sqrt(var) + eps)
g = get_variable_ddi(
"g", [n_out], g_init, init,
initializer=tf.zeros_initializer, dtype=dtype, trainable=True)
b = get_variable_ddi(
"b", [n_out], -mean*g_init, init,
initializer=tf.zeros_initializer, dtype=dtype, trainable=True)
w = g * v
y = tf.matmul(x, w) + b
tf.summary.histogram("_g", g)
return y
示例3: _get_weights
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import random_normal_initializer [as 別名]
def _get_weights(model_hparams, vocab_size, hidden_dim=None):
"""Copied from tensor2tensor/layers/modalities.py but uses total vocab."""
if hidden_dim is None:
hidden_dim = model_hparams.hidden_size
num_shards = model_hparams.symbol_modality_num_shards
shards = []
for i in range(num_shards):
shard_size = (sum(vocab_size) // num_shards) + (
1 if i < sum(vocab_size) % num_shards else 0)
var_name = 'weights_%d' % i
shards.append(
tf.get_variable(
var_name, [shard_size, hidden_dim],
initializer=tf.random_normal_initializer(0.0, hidden_dim**-0.5)))
if num_shards == 1:
ret = shards[0]
else:
ret = tf.concat(shards, 0)
# Convert ret to tensor.
if not tf.executing_eagerly():
ret = common_layers.convert_gradient_to_tensor(ret)
return ret
示例4: get_noised_result
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import random_normal_initializer [as 別名]
def get_noised_result(self, sample_state, global_state):
"""See base class."""
if LooseVersion(tf.__version__) < LooseVersion('2.0.0'):
def add_noise(v):
return v + tf.random.normal(
tf.shape(input=v), stddev=global_state.stddev, dtype=v.dtype)
else:
random_normal = tf.random_normal_initializer(
stddev=global_state.stddev)
def add_noise(v):
return v + tf.cast(random_normal(tf.shape(input=v)), dtype=v.dtype)
if self._ledger:
dependencies = [
self._ledger.record_sum_query(
global_state.l2_norm_clip, global_state.stddev)
]
else:
dependencies = []
with tf.control_dependencies(dependencies):
return tf.nest.map_structure(add_noise, sample_state), global_state
示例5: embedding_weights
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import random_normal_initializer [as 別名]
def embedding_weights(mesh,
vocab_dim,
output_dim,
variable_dtype,
name="embedding",
ensemble_dim=None,
initializer=None):
"""Embedding weights."""
if not ensemble_dim:
ensemble_dim = []
elif not isinstance(ensemble_dim, list):
ensemble_dim = [ensemble_dim]
shape = mtf.Shape(ensemble_dim) + [vocab_dim, output_dim]
if initializer is None:
initializer = tf.random_normal_initializer()
ret = mtf.get_variable(
mesh, name, shape, dtype=variable_dtype, initializer=initializer)
return ret
示例6: pix2pix_arg_scope
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import random_normal_initializer [as 別名]
def pix2pix_arg_scope():
"""Returns a default argument scope for isola_net.
Returns:
An arg scope.
"""
# These parameters come from the online port, which don't necessarily match
# those in the paper.
# TODO(nsilberman): confirm these values with Philip.
instance_norm_params = {
'center': True,
'scale': True,
'epsilon': 0.00001,
}
with slim.arg_scope(
[slim.conv2d, slim.conv2d_transpose],
normalizer_fn=slim.instance_norm,
normalizer_params=instance_norm_params,
weights_initializer=tf.random_normal_initializer(0, 0.02)) as sc:
return sc
示例7: feed_forward_gaussian_fun
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import random_normal_initializer [as 別名]
def feed_forward_gaussian_fun(action_space, config, observations):
"""Feed-forward Gaussian."""
if not isinstance(action_space, gym.spaces.box.Box):
raise ValueError("Expecting continuous action space.")
mean_weights_initializer = tf.initializers.variance_scaling(
scale=config.init_mean_factor)
logstd_initializer = tf.random_normal_initializer(config.init_logstd, 1e-10)
flat_observations = tf.reshape(observations, [
tf.shape(observations)[0], tf.shape(observations)[1],
functools.reduce(operator.mul, observations.shape.as_list()[2:], 1)])
with tf.variable_scope("network_parameters"):
with tf.variable_scope("policy"):
x = flat_observations
for size in config.policy_layers:
x = tf.layers.dense(x, size, activation=tf.nn.relu)
mean = tf.layers.dense(
x, action_space.shape[0], activation=tf.tanh,
kernel_initializer=mean_weights_initializer)
logstd = tf.get_variable(
"logstd", mean.shape[2:], tf.float32, logstd_initializer)
logstd = tf.tile(
logstd[None, None],
[tf.shape(mean)[0], tf.shape(mean)[1]] + [1] * (mean.shape.ndims - 2))
with tf.variable_scope("value"):
x = flat_observations
for size in config.value_layers:
x = tf.layers.dense(x, size, activation=tf.nn.relu)
value = tf.layers.dense(x, 1)[..., 0]
mean = tf.check_numerics(mean, "mean")
logstd = tf.check_numerics(logstd, "logstd")
value = tf.check_numerics(value, "value")
policy = tfp.distributions.MultivariateNormalDiag(mean, tf.exp(logstd))
return NetworkOutput(policy, value, lambda a: tf.clip_by_value(a, -2., 2))
示例8: default_initializer
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import random_normal_initializer [as 別名]
def default_initializer(std=0.05):
return tf.random_normal_initializer(0., std)
示例9: embed
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import random_normal_initializer [as 別名]
def embed(self, x, name="embedding"):
"""Input embedding with a non-zero bias for uniform inputs."""
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
x_shape = common_layers.shape_list(x)
# Merge channels and depth before embedding.
x = tf.reshape(x, x_shape[:-2] + [x_shape[-2] * x_shape[-1]])
x = tf.layers.dense(
x,
self.hparams.hidden_size,
name="embed",
activation=common_layers.belu,
bias_initializer=tf.random_normal_initializer(stddev=0.01))
x = common_layers.layer_norm(x, name="ln_embed")
return common_attention.add_timing_signal_nd(x)
示例10: discriminator
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import random_normal_initializer [as 別名]
def discriminator(self, x, is_training, reuse=False):
"""Discriminator architecture based on InfoGAN.
Args:
x: input images, shape [bs, h, w, channels]
is_training: boolean, are we in train or eval model.
reuse: boolean, should params be re-used.
Returns:
out_logit: the output logits (before sigmoid).
"""
hparams = self.hparams
with tf.variable_scope(
"discriminator", reuse=reuse,
initializer=tf.random_normal_initializer(stddev=0.02)):
batch_size, height, width = common_layers.shape_list(x)[:3]
# Mapping x from [bs, h, w, c] to [bs, 1]
net = tf.layers.conv2d(x, 64, (4, 4), strides=(2, 2),
padding="SAME", name="d_conv1")
# [bs, h/2, w/2, 64]
net = lrelu(net)
net = tf.layers.conv2d(net, 128, (4, 4), strides=(2, 2),
padding="SAME", name="d_conv2")
# [bs, h/4, w/4, 128]
if hparams.discriminator_batchnorm:
net = tf.layers.batch_normalization(net, training=is_training,
momentum=0.999, name="d_bn2")
net = lrelu(net)
size = height * width
net = tf.reshape(net, [batch_size, size * 8]) # [bs, h * w * 8]
net = tf.layers.dense(net, 1024, name="d_fc3") # [bs, 1024]
if hparams.discriminator_batchnorm:
net = tf.layers.batch_normalization(net, training=is_training,
momentum=0.999, name="d_bn3")
net = lrelu(net)
return net
示例11: generator
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import random_normal_initializer [as 別名]
def generator(self, z, is_training, out_shape):
"""Generator outputting image in [0, 1]."""
hparams = self.hparams
height, width, c_dim = out_shape
batch_size = hparams.batch_size
with tf.variable_scope(
"generator",
initializer=tf.random_normal_initializer(stddev=0.02)):
net = tf.layers.dense(z, 1024, name="g_fc1")
net = tf.layers.batch_normalization(net, training=is_training,
momentum=0.999, name="g_bn1")
net = lrelu(net)
net = tf.layers.dense(net, 128 * (height // 4) * (width // 4),
name="g_fc2")
net = tf.layers.batch_normalization(net, training=is_training,
momentum=0.999, name="g_bn2")
net = lrelu(net)
net = tf.reshape(net, [batch_size, height // 4, width // 4, 128])
net = deconv2d(net, [batch_size, height // 2, width // 2, 64],
4, 4, 2, 2, name="g_dc3")
net = tf.layers.batch_normalization(net, training=is_training,
momentum=0.999, name="g_bn3")
net = lrelu(net)
net = deconv2d(net, [batch_size, height, width, c_dim],
4, 4, 2, 2, name="g_dc4")
out = tf.nn.sigmoid(net)
return common_layers.convert_real_to_rgb(out)
示例12: create_positional_emb_2d
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import random_normal_initializer [as 別名]
def create_positional_emb_2d(self, targets):
"""Learned 2d positional embedding for images."""
mesh = targets.mesh
positional_emb_rows_var = mtf.get_variable(
mesh, "positional_emb_rows",
mtf.Shape([self.pos_dim, self.model_dim]),
initializer=tf.random_normal_initializer(),
activation_dtype=self.activation_type)
positional_emb_cols_var = mtf.get_variable(
mesh, "positional_emb_cols",
mtf.Shape([self.pos_dim, self.model_dim]),
initializer=tf.random_normal_initializer(),
activation_dtype=self.activation_type)
targets_position_x = mtf.range(mesh, self.rows_dim, dtype=tf.int32)
targets_position_y = mtf.range(mesh, self.cols_dim, dtype=tf.int32)
position_x = mtf.broadcast(
mtf.gather(positional_emb_rows_var, targets_position_x,
self.pos_dim),
mtf.Shape([self.rows_dim, self.cols_dim, self.model_dim]))
position_y = mtf.broadcast(
mtf.gather(positional_emb_cols_var, targets_position_y,
self.pos_dim),
mtf.Shape([self.rows_dim, self.cols_dim, self.model_dim]))
return position_x + position_y
示例13: get_weights
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import random_normal_initializer [as 別名]
def get_weights(model_hparams, vocab_size, hidden_dim=None):
"""Create or get concatenated embedding or softmax variable.
Args:
model_hparams: HParams, model hyperparmeters.
vocab_size: int, vocabulary size.
hidden_dim: dim of the variable. Defaults to _model_hparams' hidden_size
Returns:
a list of num_shards Tensors.
"""
if hidden_dim is None:
hidden_dim = model_hparams.hidden_size
num_shards = model_hparams.symbol_modality_num_shards
shards = []
for i in range(num_shards):
shard_size = (vocab_size // num_shards) + (
1 if i < vocab_size % num_shards else 0)
var_name = "weights_%d" % i
shards.append(
tf.get_variable(
var_name, [shard_size, hidden_dim],
initializer=tf.random_normal_initializer(0.0, hidden_dim**-0.5)))
if num_shards == 1:
ret = shards[0]
else:
ret = tf.concat(shards, 0)
# Convert ret to tensor.
if not tf.executing_eagerly():
ret = common_layers.convert_gradient_to_tensor(ret)
return ret
示例14: dense
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import random_normal_initializer [as 別名]
def dense(name, x, n_out, dtype=tf.float32, init_w=0.05):
"""Dense layer."""
n_in = common_layers.shape_list(x)[2]
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
w = tf.get_variable(
"w", [n_in, n_out], dtype,
initializer=tf.random_normal_initializer(0.0, init_w), trainable=True)
b = tf.get_variable(
"b", [n_out,], dtype, initializer=tf.zeros_initializer, trainable=True)
x = tf.matmul(x, w) + b
return x
示例15: make_edge_vectors
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import random_normal_initializer [as 別名]
def make_edge_vectors(adjacency_matrix,
num_edge_types,
depth,
name=None):
"""Gets edge vectors for the edge types in the adjacency matrix.
Args:
adjacency_matrix: A [batch, num_nodes, num_nodes, num_edge_types] tensor.
num_edge_types: Number of different edge types
depth: Number of channels
name: A optional string name for scoping
Returns:
A [batch, num_nodes, num_nodes, depth] vector of tensors
"""
with tf.variable_scope(name, default_name="edge_vectors"):
att_adj_vectors_shape = [num_edge_types, depth]
adjacency_matrix_shape = common_layers.shape_list(adjacency_matrix)
adj_vectors = (
tf.get_variable(
"adj_vectors",
att_adj_vectors_shape,
initializer=tf.random_normal_initializer(0, depth**-0.5)) *
(depth**0.5))
att_adj_vectors = tf.matmul(
tf.reshape(tf.to_float(adjacency_matrix), [-1, num_edge_types]),
adj_vectors)
# Reshape to be [batch, num_nodes, num_nodes, depth].
att_adj_vectors = tf.reshape(att_adj_vectors, [
adjacency_matrix_shape[0], adjacency_matrix_shape[1],
adjacency_matrix_shape[2], depth
])
return att_adj_vectors