本文整理汇总了Python中tensor2tensor.layers.common_layers.dense方法的典型用法代码示例。如果您正苦于以下问题:Python common_layers.dense方法的具体用法?Python common_layers.dense怎么用?Python common_layers.dense使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensor2tensor.layers.common_layers
的用法示例。
在下文中一共展示了common_layers.dense方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: update_hparams_for_vq_gating
# 需要导入模块: from tensor2tensor.layers import common_layers [as 别名]
# 或者: from tensor2tensor.layers.common_layers import dense [as 别名]
def update_hparams_for_vq_gating(hparams):
"""VQ Gating hparams."""
hparams.add_hparam("z_size", 4)
hparams.add_hparam("noise_dev", 0.5)
# Bottleneck kinds supported: dense, vae, dvq.
hparams.add_hparam("bottleneck_kind", "dvq")
hparams.add_hparam("num_blocks", 1)
hparams.add_hparam("num_residuals", 1)
# Reshape method for DVQ: slice, project
hparams.add_hparam("beta", 0.25)
hparams.add_hparam("epsilon", 1e-5)
hparams.add_hparam("decay", 0.999)
hparams.add_hparam("ema", False) # default is false until ema is implemented
hparams.add_hparam("random_top_k", 1)
hparams.add_hparam("soft_em", False)
hparams.add_hparam("num_samples", 10)
hparams.add_hparam("gating_type", "vq")
hparams.add_hparam("use_scales", int(True))
hparams.add_hparam("residual_centroids", int(False))
示例2: vae
# 需要导入模块: from tensor2tensor.layers import common_layers [as 别名]
# 或者: from tensor2tensor.layers.common_layers import dense [as 别名]
def vae(x, z_size, name=None):
"""Simple variational autoencoder without discretization.
Args:
x: Input to the discretization bottleneck.
z_size: Number of bits, where discrete codes range from 1 to 2**z_size.
name: Name for the bottleneck scope.
Returns:
Embedding function, latent, loss, mu and log_simga.
"""
with tf.variable_scope(name, default_name="vae"):
mu = tf.layers.dense(x, z_size, name="mu")
log_sigma = tf.layers.dense(x, z_size, name="log_sigma")
shape = common_layers.shape_list(x)
epsilon = tf.random_normal([shape[0], shape[1], 1, z_size])
z = mu + tf.exp(log_sigma / 2) * epsilon
kl = 0.5 * tf.reduce_mean(
tf.expm1(log_sigma) + tf.square(mu) - log_sigma, axis=-1)
free_bits = z_size // 4
kl_loss = tf.reduce_mean(tf.maximum(kl - free_bits, 0.0))
return z, kl_loss, mu, log_sigma
示例3: tanh_discrete_bottleneck
# 需要导入模块: from tensor2tensor.layers import common_layers [as 别名]
# 或者: from tensor2tensor.layers.common_layers import dense [as 别名]
def tanh_discrete_bottleneck(x, bottleneck_bits, bottleneck_noise,
discretize_warmup_steps, mode):
"""Simple discretization through tanh, flip bottleneck_noise many bits."""
x = tf.layers.dense(x, bottleneck_bits, name="tanh_discrete_bottleneck")
d0 = tf.stop_gradient(2.0 * tf.to_float(tf.less(0.0, x))) - 1.0
if mode == tf.estimator.ModeKeys.TRAIN:
x += tf.truncated_normal(
common_layers.shape_list(x), mean=0.0, stddev=0.2)
x = tf.tanh(x)
d = x + tf.stop_gradient(2.0 * tf.to_float(tf.less(0.0, x)) - 1.0 - x)
if mode == tf.estimator.ModeKeys.TRAIN:
noise = tf.random_uniform(common_layers.shape_list(x))
noise = 2.0 * tf.to_float(tf.less(bottleneck_noise, noise)) - 1.0
d *= noise
d = common_layers.mix(d, x, discretize_warmup_steps,
mode == tf.estimator.ModeKeys.TRAIN)
return d, d0
示例4: vae
# 需要导入模块: from tensor2tensor.layers import common_layers [as 别名]
# 或者: from tensor2tensor.layers.common_layers import dense [as 别名]
def vae(x, z_size, name=None):
"""Simple variational autoencoder without discretization.
Args:
x: Input to the discretization bottleneck.
z_size: Number of bits, where discrete codes range from 1 to 2**z_size.
name: Name for the bottleneck scope.
Returns:
Embedding function, latent, loss, mu and log_simga.
"""
with tf.variable_scope(name, default_name="vae"):
mu = tf.layers.dense(x, z_size, name="mu")
log_sigma = tf.layers.dense(x, z_size, name="log_sigma")
shape = common_layers.shape_list(x)
epsilon = tf.random_normal([shape[0], shape[1], 1, z_size])
z = mu + tf.exp(log_sigma / 2) * epsilon
kl = 0.5 * tf.reduce_mean(
tf.exp(log_sigma) + tf.square(mu) - 1. - log_sigma, axis=-1)
free_bits = z_size // 4
kl_loss = tf.reduce_mean(tf.maximum(kl - free_bits, 0.0))
return z, kl_loss, mu, log_sigma
示例5: step_preprocess
# 需要导入模块: from tensor2tensor.layers import common_layers [as 别名]
# 或者: from tensor2tensor.layers.common_layers import dense [as 别名]
def step_preprocess(x, step, hparams):
"""Preprocess the input at the beginning of each step.
Args:
x: input tensor
step: step
hparams: model hyper-parameters
Returns:
preprocessed input.
"""
original_channel_size = common_layers.shape_list(x)[-1]
if hparams.add_position_timing_signal:
x = add_position_timing_signal(x, step, hparams)
if hparams.add_step_timing_signal:
x = add_step_timing_signal(x, step, hparams)
if ((hparams.add_position_timing_signal or hparams.add_position_timing_signal)
and hparams.add_or_concat_timing_signal == "concat"):
# linear projection to the original dimension of x
x = common_layers.dense(
x, original_channel_size, activation=None, use_bias=False)
if hparams.add_sru:
x = common_layers.sru(x)
return x
示例6: compute_attention_component
# 需要导入模块: from tensor2tensor.layers import common_layers [as 别名]
# 或者: from tensor2tensor.layers.common_layers import dense [as 别名]
def compute_attention_component(antecedent,
total_depth,
filter_width=1,
padding="VALID",
name="c",
vars_3d_num_heads=0):
"""Computes attention compoenent (query, key or value).
Args:
antecedent: a Tensor with shape [batch, length, channels]
total_depth: an integer
filter_width: An integer specifying how wide you want the attention
component to be.
padding: One of "VALID", "SAME" or "LEFT". Default is VALID: No padding.
name: a string specifying scope name.
vars_3d_num_heads: an optional integer (if we want to use 3d variables)
Returns:
c : [batch, length, depth] tensor
"""
if vars_3d_num_heads > 0:
assert filter_width == 1
input_depth = antecedent.get_shape().as_list()[-1]
depth_per_head = total_depth // vars_3d_num_heads
initializer_stddev = input_depth ** -0.5
if "q" in name:
initializer_stddev *= depth_per_head ** -0.5
var = tf.get_variable(
name, [input_depth,
vars_3d_num_heads,
total_depth // vars_3d_num_heads],
initializer=tf.random_normal_initializer(stddev=initializer_stddev))
var = tf.cast(var, antecedent.dtype)
var = tf.reshape(var, [input_depth, total_depth])
return tf.tensordot(antecedent, var, axes=1)
if filter_width == 1:
return common_layers.dense(
antecedent, total_depth, use_bias=False, name=name)
else:
return common_layers.conv1d(
antecedent, total_depth, filter_width, padding, name=name)
示例7: mlp
# 需要导入模块: from tensor2tensor.layers import common_layers [as 别名]
# 或者: from tensor2tensor.layers.common_layers import dense [as 别名]
def mlp(feature, hparams, name="mlp"):
"""Multi layer perceptron with dropout and relu activation."""
with tf.variable_scope(name, "mlp", values=[feature]):
num_mlp_layers = hparams.num_mlp_layers
mlp_dim = hparams.mlp_dim
for _ in range(num_mlp_layers):
feature = common_layers.dense(feature, mlp_dim, activation=tf.nn.relu)
feature = tf.nn.dropout(feature, keep_prob=1.-hparams.dropout)
return feature
示例8: mlp
# 需要导入模块: from tensor2tensor.layers import common_layers [as 别名]
# 或者: from tensor2tensor.layers.common_layers import dense [as 别名]
def mlp(feature, hparams, name="mlp"):
"""Multi layer perceptron with dropout and relu activation."""
with tf.variable_scope(name, "mlp", values=[feature]):
num_mlp_layers = hparams.num_mlp_layers
mlp_size = hparams.mlp_size
for _ in range(num_mlp_layers):
feature = common_layers.dense(feature, mlp_size, activation=None)
utils.collect_named_outputs("norms", "mlp_feature",
tf.norm(feature, axis=-1))
feature = common_layers.layer_norm(feature)
feature = tf.nn.relu(feature)
feature = tf.nn.dropout(feature, keep_prob=1.-hparams.dropout)
return feature
示例9: testDenseWithLayerCollection
# 需要导入模块: from tensor2tensor.layers import common_layers [as 别名]
# 或者: from tensor2tensor.layers.common_layers import dense [as 别名]
def testDenseWithLayerCollection(self):
with tf.variable_scope("test_layer_collection"):
x1 = tf.zeros([3, 4], tf.float32)
layer_collection = kfac.LayerCollection()
common_layers.dense(
x1, units=10, layer_collection=layer_collection, name="y1")
self.assertLen(layer_collection.get_blocks(), 1)
# 3D inputs.
x2 = tf.zeros([3, 4, 5], tf.float32)
common_layers.dense(
x2, units=10, layer_collection=layer_collection, name="y2")
self.assertLen(layer_collection.get_blocks(), 2)
示例10: int_to_bit_embed
# 需要导入模块: from tensor2tensor.layers import common_layers [as 别名]
# 或者: from tensor2tensor.layers.common_layers import dense [as 别名]
def int_to_bit_embed(x_int, num_bits, embedding_size, base=2):
"""Turn x_int into a bitwise (lower-endian) tensor and embed densly."""
shape = common_layers.shape_list(x_int)
inputs = int_to_bit(x_int, num_bits, base=base)
inputs = tf.reshape(inputs, shape[:-1] + [shape[-1] * 8])
inputs = 2.0 * tf.to_float(inputs) - 1.0 # Move from 0/1 to -1/1.
return tf.layers.dense(inputs, embedding_size, name="int_to_bit_embed")
示例11: tanh_discrete_unbottleneck
# 需要导入模块: from tensor2tensor.layers import common_layers [as 别名]
# 或者: from tensor2tensor.layers.common_layers import dense [as 别名]
def tanh_discrete_unbottleneck(x, hidden_size):
"""Simple un-discretization from tanh."""
x = tf.layers.dense(x, hidden_size, name="tanh_discrete_unbottleneck")
return x
示例12: isemhash_bottleneck
# 需要导入模块: from tensor2tensor.layers import common_layers [as 别名]
# 或者: from tensor2tensor.layers.common_layers import dense [as 别名]
def isemhash_bottleneck(x,
bottleneck_bits,
bottleneck_noise,
discretize_warmup_steps,
mode,
isemhash_noise_dev=0.5,
isemhash_mix_prob=0.5):
"""Improved semantic hashing bottleneck."""
with tf.variable_scope("isemhash_bottleneck"):
x = tf.layers.dense(x, bottleneck_bits, name="dense")
y = common_layers.saturating_sigmoid(x)
if isemhash_noise_dev > 0 and mode == tf.estimator.ModeKeys.TRAIN:
noise = tf.truncated_normal(
common_layers.shape_list(x), mean=0.0, stddev=isemhash_noise_dev)
y = common_layers.saturating_sigmoid(x + noise)
d = tf.to_float(tf.less(0.5, y)) + y - tf.stop_gradient(y)
d = 2.0 * d - 1.0 # Move from [0, 1] to [-1, 1].
if mode == tf.estimator.ModeKeys.TRAIN: # Flip some bits.
noise = tf.random_uniform(common_layers.shape_list(x))
noise = 2.0 * tf.to_float(tf.less(bottleneck_noise, noise)) - 1.0
d *= noise
d = common_layers.mix(
d,
2.0 * y - 1.0,
discretize_warmup_steps,
mode == tf.estimator.ModeKeys.TRAIN,
max_prob=isemhash_mix_prob)
return d, 0.0
示例13: isemhash_unbottleneck
# 需要导入模块: from tensor2tensor.layers import common_layers [as 别名]
# 或者: from tensor2tensor.layers.common_layers import dense [as 别名]
def isemhash_unbottleneck(x, hidden_size, isemhash_filter_size_multiplier=1.0):
"""Improved semantic hashing un-bottleneck."""
filter_size = int(hidden_size * isemhash_filter_size_multiplier)
x = 0.5 * (x - 1.0) # Move from [-1, 1] to [0, 1].
with tf.variable_scope("isemhash_unbottleneck"):
h1a = tf.layers.dense(x, filter_size, name="hidden1a")
h1b = tf.layers.dense(1.0 - x, filter_size, name="hidden1b")
h2 = tf.layers.dense(tf.nn.relu(h1a + h1b), filter_size, name="hidden2")
return tf.layers.dense(tf.nn.relu(h2), hidden_size, name="final")