本文整理汇总了Python中tensor2tensor.layers.common_layers.layer_norm方法的典型用法代码示例。如果您正苦于以下问题:Python common_layers.layer_norm方法的具体用法?Python common_layers.layer_norm怎么用?Python common_layers.layer_norm使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensor2tensor.layers.common_layers
的用法示例。
在下文中一共展示了common_layers.layer_norm方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: encoder
# 需要导入模块: from tensor2tensor.layers import common_layers [as 别名]
# 或者: from tensor2tensor.layers.common_layers import layer_norm [as 别名]
def encoder(self, x):
with tf.variable_scope("encoder"):
hparams = self.hparams
kernel, strides = self._get_kernel_and_strides()
# Down-convolutions.
for i in range(hparams.num_hidden_layers):
x = self.make_even_size(x)
x = tf.layers.conv2d(
x,
hparams.hidden_size * 2**(i + 1),
kernel,
strides=strides,
padding="SAME",
activation=common_layers.belu,
name="conv_%d" % i)
x = common_layers.layer_norm(x)
return x
示例2: encode
# 需要导入模块: from tensor2tensor.layers import common_layers [as 别名]
# 或者: from tensor2tensor.layers.common_layers import layer_norm [as 别名]
def encode(self, inputs, target_space, hparams, features=None, losses=None):
"""Add layers of strided convolutions on top of encoder."""
with tf.variable_scope("downstride"):
hparams = self.hparams
kernel, strides = (4, 4), (2, 2)
x = inputs
# Down-convolutions.
for i in range(hparams.num_compress_steps):
x = common_layers.make_even_size(x)
x = tf.layers.conv2d(
x, hparams.hidden_size, kernel, strides=strides,
padding="SAME", activation=common_layers.belu, name="conv_%d" % i)
x = common_layers.layer_norm(x)
encoder_output, encoder_decoder_attention_bias = super(
TransformerSketch, self).encode(
x, target_space, hparams, features=features, losses=losses)
return encoder_output, encoder_decoder_attention_bias
示例3: residual_dilated_conv
# 需要导入模块: from tensor2tensor.layers import common_layers [as 别名]
# 或者: from tensor2tensor.layers.common_layers import layer_norm [as 别名]
def residual_dilated_conv(x, repeat, padding, name, hparams):
"""A stack of convolution blocks with residual connections."""
with tf.variable_scope(name):
k = (hparams.kernel_height, hparams.kernel_width)
dilations_and_kernels = [((2**i, 1), k)
for i in range(hparams.num_hidden_layers)]
for i in range(repeat):
with tf.variable_scope("repeat_%d" % i):
y = common_layers.conv_block(
common_layers.layer_norm(x, hparams.hidden_size, name="lnorm"),
hparams.hidden_size,
dilations_and_kernels,
padding=padding,
name="residual_conv")
y = tf.nn.dropout(y, 1.0 - hparams.dropout)
x += y
return x
示例4: encoder
# 需要导入模块: from tensor2tensor.layers import common_layers [as 别名]
# 或者: from tensor2tensor.layers.common_layers import layer_norm [as 别名]
def encoder(self, x):
with tf.variable_scope("encoder"):
hparams = self.hparams
layers = []
kernel, strides = self._get_kernel_and_strides()
# Down-convolutions.
for i in range(hparams.num_hidden_layers):
x = self.make_even_size(x)
layers.append(x)
x = tf.layers.conv2d(
x,
hparams.hidden_size * 2**(i + 1),
kernel,
strides=strides,
padding="SAME",
activation=common_layers.belu,
name="conv_%d" % i)
x = common_layers.layer_norm(x, name="ln_%d" % i)
return x, layers
示例5: decoder
# 需要导入模块: from tensor2tensor.layers import common_layers [as 别名]
# 或者: from tensor2tensor.layers.common_layers import layer_norm [as 别名]
def decoder(self, x, encoder_layers):
del encoder_layers
with tf.variable_scope("decoder"):
hparams = self.hparams
kernel, strides = self._get_kernel_and_strides()
# Up-convolutions.
for i in range(hparams.num_hidden_layers):
j = hparams.num_hidden_layers - i - 1
x = tf.layers.conv2d_transpose(
x,
hparams.hidden_size * 2**j,
kernel,
strides=strides,
padding="SAME",
activation=common_layers.belu,
name="deconv_%d" % j)
x = common_layers.layer_norm(x, name="ln_%d" % i)
return x
示例6: residual_conv
# 需要导入模块: from tensor2tensor.layers import common_layers [as 别名]
# 或者: from tensor2tensor.layers.common_layers import layer_norm [as 别名]
def residual_conv(x, repeat, k, hparams, name, reuse=None):
"""A stack of convolution blocks with residual connections."""
with tf.variable_scope(name, reuse=reuse):
dilations_and_kernels = [((1, 1), k) for _ in range(3)]
for i in range(repeat):
with tf.variable_scope("repeat_%d" % i):
y = common_layers.conv_block(
common_layers.layer_norm(x, hparams.hidden_size, name="lnorm"),
hparams.hidden_size,
dilations_and_kernels,
padding="SAME",
name="residual_conv")
y = tf.nn.dropout(y, 1.0 - hparams.dropout)
x += y
return x
示例7: residual_fn2
# 需要导入模块: from tensor2tensor.layers import common_layers [as 别名]
# 或者: from tensor2tensor.layers.common_layers import layer_norm [as 别名]
def residual_fn2(x, y, hparams):
y = tf.nn.dropout(y, 1.0 - hparams.dropout)
return common_layers.layer_norm(x + y)
示例8: residual_fn3
# 需要导入模块: from tensor2tensor.layers import common_layers [as 别名]
# 或者: from tensor2tensor.layers.common_layers import layer_norm [as 别名]
def residual_fn3(x, y, z, hparams):
y = tf.nn.dropout(y, 1.0 - hparams.dropout)
z = tf.nn.dropout(z, 1.0 - hparams.dropout)
return common_layers.layer_norm(x + y + z)
示例9: residual_block
# 需要导入模块: from tensor2tensor.layers import common_layers [as 别名]
# 或者: from tensor2tensor.layers.common_layers import layer_norm [as 别名]
def residual_block(x, hparams):
"""A stack of convolution blocks with residual connection."""
k = (hparams.kernel_height, hparams.kernel_width)
dilations_and_kernels = [((1, 1), k) for _ in range(3)]
y = common_layers.subseparable_conv_block(
x,
hparams.hidden_size,
dilations_and_kernels,
padding="SAME",
separability=0,
name="residual_block")
x = common_layers.layer_norm(x + y, hparams.hidden_size, name="lnorm")
return tf.nn.dropout(x, 1.0 - hparams.dropout)
示例10: residual_block_layer
# 需要导入模块: from tensor2tensor.layers import common_layers [as 别名]
# 或者: from tensor2tensor.layers.common_layers import layer_norm [as 别名]
def residual_block_layer(inputs, hparams):
"""Residual block over inputs.
Runs a residual block consisting of
conv: kernel_size x kernel_size
conv: 1x1
dropout, add and normalize according to hparams.layer_postprocess_sequence.
Args:
inputs: Tensor of shape [batch, height, width, hparams.hidden_size].
hparams: tf.contrib.training.HParams.
Returns:
Tensor of shape [batch, height, width, hparams.hidden_size].
"""
kernel = (hparams.res_kernel_size, hparams.res_kernel_size)
x = inputs
for i in range(hparams.num_res_layers):
with tf.variable_scope("res_conv_%d" % i):
# kernel_size x kernel_size conv block
y = common_layers.conv_block(
common_layers.layer_norm(x, hparams.hidden_size, name="lnorm"),
hparams.hidden_size, [((1, 1), kernel)],
strides=(1, 1),
padding="SAME",
name="residual_conv")
# 1x1 conv block
y = common_layers.conv_block(
y,
hparams.hidden_size, [((1, 1), (1, 1))],
strides=(1, 1),
padding="SAME",
name="residual_dense")
x = common_layers.layer_postprocess(x, y, hparams)
return x
示例11: embed
# 需要导入模块: from tensor2tensor.layers import common_layers [as 别名]
# 或者: from tensor2tensor.layers.common_layers import layer_norm [as 别名]
def embed(self, x, name="embedding"):
"""Input embedding with a non-zero bias for uniform inputs."""
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
x_shape = common_layers.shape_list(x)
# Merge channels and depth before embedding.
x = tf.reshape(x, x_shape[:-2] + [x_shape[-2] * x_shape[-1]])
x = tf.layers.dense(
x,
self.hparams.hidden_size,
name="embed",
activation=common_layers.belu,
bias_initializer=tf.random_normal_initializer(stddev=0.01))
x = common_layers.layer_norm(x, name="ln_embed")
return common_attention.add_timing_signal_nd(x)
示例12: mlp
# 需要导入模块: from tensor2tensor.layers import common_layers [as 别名]
# 或者: from tensor2tensor.layers.common_layers import layer_norm [as 别名]
def mlp(feature, hparams, name="mlp"):
"""Multi layer perceptron with dropout and relu activation."""
with tf.variable_scope(name, "mlp", values=[feature]):
num_mlp_layers = hparams.num_mlp_layers
mlp_size = hparams.mlp_size
for _ in range(num_mlp_layers):
feature = common_layers.dense(feature, mlp_size, activation=None)
utils.collect_named_outputs("norms", "mlp_feature",
tf.norm(feature, axis=-1))
feature = common_layers.layer_norm(feature)
feature = tf.nn.relu(feature)
feature = tf.nn.dropout(feature, keep_prob=1.-hparams.dropout)
return feature
示例13: testLayerNorm
# 需要导入模块: from tensor2tensor.layers import common_layers [as 别名]
# 或者: from tensor2tensor.layers.common_layers import layer_norm [as 别名]
def testLayerNorm(self):
x = np.random.rand(5, 7, 11)
y = common_layers.layer_norm(tf.constant(x, dtype=tf.float32), 11)
self.evaluate(tf.global_variables_initializer())
res = self.evaluate(y)
self.assertEqual(res.shape, (5, 7, 11))
示例14: residual_block_layer
# 需要导入模块: from tensor2tensor.layers import common_layers [as 别名]
# 或者: from tensor2tensor.layers.common_layers import layer_norm [as 别名]
def residual_block_layer(inputs, hparams):
"""Residual block over inputs.
Runs a residual block consisting of
conv: kernel_size x kernel_size
conv: 1x1
dropout, add and normalize according to hparams.layer_postprocess_sequence.
Args:
inputs: Tensor of shape [batch, height, width, hparams.hidden_size].
hparams: HParams.
Returns:
Tensor of shape [batch, height, width, hparams.hidden_size].
"""
kernel = (hparams.res_kernel_size, hparams.res_kernel_size)
x = inputs
for i in range(hparams.num_res_layers):
with tf.variable_scope("res_conv_%d" % i):
# kernel_size x kernel_size conv block
y = common_layers.conv_block(
common_layers.layer_norm(x, hparams.hidden_size, name="lnorm"),
hparams.hidden_size, [((1, 1), kernel)],
strides=(1, 1),
padding="SAME",
name="residual_conv")
# 1x1 conv block
y = common_layers.conv_block(
y,
hparams.hidden_size, [((1, 1), (1, 1))],
strides=(1, 1),
padding="SAME",
name="residual_dense")
x = common_layers.layer_postprocess(x, y, hparams)
return x