本文整理汇总了Python中tensor2tensor.layers.common_layers.shape_list方法的典型用法代码示例。如果您正苦于以下问题:Python common_layers.shape_list方法的具体用法?Python common_layers.shape_list怎么用?Python common_layers.shape_list使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensor2tensor.layers.common_layers
的用法示例。
在下文中一共展示了common_layers.shape_list方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: padded_accuracy_topk
# 需要导入模块: from tensor2tensor.layers import common_layers [as 别名]
# 或者: from tensor2tensor.layers.common_layers import shape_list [as 别名]
def padded_accuracy_topk(predictions,
labels,
k,
weights_fn=common_layers.weights_nonzero):
"""Percentage of times that top-k predictions matches labels on non-0s."""
with tf.variable_scope("padded_accuracy_topk", values=[predictions, labels]):
padded_predictions, padded_labels = common_layers.pad_with_zeros(
predictions, labels)
weights = weights_fn(padded_labels)
effective_k = tf.minimum(k,
common_layers.shape_list(padded_predictions)[-1])
_, outputs = tf.nn.top_k(padded_predictions, k=effective_k)
outputs = tf.to_int32(outputs)
padded_labels = tf.to_int32(padded_labels)
padded_labels = tf.expand_dims(padded_labels, axis=-1)
padded_labels += tf.zeros_like(outputs) # Pad to same shape.
same = tf.to_float(tf.equal(outputs, padded_labels))
same_topk = tf.reduce_sum(same, axis=-1)
return same_topk, weights
示例2: _quantize
# 需要导入模块: from tensor2tensor.layers import common_layers [as 别名]
# 或者: from tensor2tensor.layers.common_layers import shape_list [as 别名]
def _quantize(x, params, randomize=True):
"""Quantize x according to params, optionally randomizing the rounding."""
if not params.quantize:
return x
if not randomize:
return tf.bitcast(
tf.cast(x / params.quantization_scale, tf.int16), tf.float16)
abs_x = tf.abs(x)
sign_x = tf.sign(x)
y = abs_x / params.quantization_scale
y = tf.floor(y + tf.random_uniform(common_layers.shape_list(x)))
y = tf.minimum(y, tf.int16.max) * sign_x
q = tf.bitcast(tf.cast(y, tf.int16), tf.float16)
return q
示例3: decode_transformer
# 需要导入模块: from tensor2tensor.layers import common_layers [as 别名]
# 或者: from tensor2tensor.layers.common_layers import shape_list [as 别名]
def decode_transformer(encoder_output, encoder_decoder_attention_bias, targets,
hparams, name):
"""Original Transformer decoder."""
with tf.variable_scope(name):
targets = common_layers.flatten4d3d(targets)
decoder_input, decoder_self_bias = (
transformer.transformer_prepare_decoder(targets, hparams))
decoder_input = tf.nn.dropout(decoder_input,
1.0 - hparams.layer_prepostprocess_dropout)
decoder_output = transformer.transformer_decoder(
decoder_input, encoder_output, decoder_self_bias,
encoder_decoder_attention_bias, hparams)
decoder_output = tf.expand_dims(decoder_output, axis=2)
decoder_output_shape = common_layers.shape_list(decoder_output)
decoder_output = tf.reshape(
decoder_output, [decoder_output_shape[0], -1, 1, hparams.hidden_size])
# Expand since t2t expects 4d tensors.
return decoder_output
示例4: dense_bitwise_categorical_fun
# 需要导入模块: from tensor2tensor.layers import common_layers [as 别名]
# 或者: from tensor2tensor.layers.common_layers import shape_list [as 别名]
def dense_bitwise_categorical_fun(action_space, config, observations):
"""Dense network with bitwise input and categorical output."""
del config
obs_shape = common_layers.shape_list(observations)
x = tf.reshape(observations, [-1] + obs_shape[2:])
with tf.variable_scope("network_parameters"):
with tf.variable_scope("dense_bitwise"):
x = discretization.int_to_bit_embed(x, 8, 32)
flat_x = tf.reshape(
x, [obs_shape[0], obs_shape[1],
functools.reduce(operator.mul, x.shape.as_list()[1:], 1)])
x = tf.contrib.layers.fully_connected(flat_x, 256, tf.nn.relu)
x = tf.contrib.layers.fully_connected(flat_x, 128, tf.nn.relu)
logits = tf.contrib.layers.fully_connected(x, action_space.n,
activation_fn=None)
value = tf.contrib.layers.fully_connected(
x, 1, activation_fn=None)[..., 0]
policy = tf.contrib.distributions.Categorical(logits=logits)
return NetworkOutput(policy, value, lambda a: a)
示例5: add_depth_embedding
# 需要导入模块: from tensor2tensor.layers import common_layers [as 别名]
# 或者: from tensor2tensor.layers.common_layers import shape_list [as 别名]
def add_depth_embedding(x):
"""Add n-dimensional embedding as the depth embedding (timing signal).
Adds embeddings to represent the position of the step in the recurrent
tower.
Args:
x: a tensor with shape [max_step, batch, length, depth]
Returns:
a Tensor the same shape as x.
"""
x_shape = common_layers.shape_list(x)
depth = x_shape[-1]
num_steps = x_shape[0]
shape = [num_steps, 1, 1, depth]
depth_embedding = (
tf.get_variable(
"depth_embedding",
shape,
initializer=tf.random_normal_initializer(0, depth**-0.5)) * (depth**
0.5))
x += depth_embedding
return x
示例6: decode
# 需要导入模块: from tensor2tensor.layers import common_layers [as 别名]
# 或者: from tensor2tensor.layers.common_layers import shape_list [as 别名]
def decode(self, bottleneck):
"""Auto-decode from the bottleneck and return the result."""
# Get the shape from bottleneck and num channels.
shape = common_layers.shape_list(bottleneck)
try:
num_channels = self.hparams.problem.num_channels
except AttributeError:
num_channels = 1
dummy_targets = tf.zeros(shape[:-1] + [num_channels])
# Set the bottleneck to decode.
if len(shape) > 4:
bottleneck = tf.squeeze(bottleneck, axis=[1])
bottleneck = 2 * bottleneck - 1 # Be -1/1 instead of 0/1.
self._cur_bottleneck_tensor = bottleneck
# Run decoding.
res = self.infer({"targets": dummy_targets})
self._cur_bottleneck_tensor = None
return res
示例7: attention_lm_prepare_decoder
# 需要导入模块: from tensor2tensor.layers import common_layers [as 别名]
# 或者: from tensor2tensor.layers.common_layers import shape_list [as 别名]
def attention_lm_prepare_decoder(targets, hparams):
"""Prepare one shard of the model for the decoder.
Args:
targets: a Tensor.
hparams: run hyperparameters
Returns:
decoder_input: a Tensor, bottom of decoder stack
decoder_self_attention_bias: a Tensor, containing large negative values
to implement masked attention and possibly biases for diagonal alignments
"""
if hparams.prepend_mode == "prepend_inputs_full_attention":
decoder_self_attention_bias = (
common_attention.attention_bias_prepend_inputs_full_attention(
common_attention.embedding_to_padding(targets)))
else:
decoder_self_attention_bias = (
common_attention.attention_bias_lower_triangle(
common_layers.shape_list(targets)[1]))
decoder_input = common_layers.shift_right_3d(targets)
if hparams.pos == "timing":
decoder_input = common_attention.add_timing_signal_1d(decoder_input)
return (decoder_input, decoder_self_attention_bias)
示例8: shake_shake_skip_connection
# 需要导入模块: from tensor2tensor.layers import common_layers [as 别名]
# 或者: from tensor2tensor.layers.common_layers import shape_list [as 别名]
def shake_shake_skip_connection(x, output_filters, stride, is_training):
"""Adds a residual connection to the filter x for the shake-shake model."""
curr_filters = common_layers.shape_list(x)[-1]
if curr_filters == output_filters:
return x
stride_spec = [1, stride, stride, 1]
# Skip path 1.
path1 = tf.nn.avg_pool(x, [1, 1, 1, 1], stride_spec, "VALID")
path1 = tf.layers.conv2d(
path1, int(output_filters / 2), (1, 1), padding="SAME", name="path1_conv")
# Skip path 2.
pad_arr = [[0, 0], [0, 1], [0, 1], [0, 0]] # First pad with 0's then crop.
path2 = tf.pad(x, pad_arr)[:, 1:, 1:, :]
path2 = tf.nn.avg_pool(path2, [1, 1, 1, 1], stride_spec, "VALID")
path2 = tf.layers.conv2d(
path2, int(output_filters / 2), (1, 1), padding="SAME", name="path2_conv")
# Concat and apply BN.
final_path = tf.concat(values=[path1, path2], axis=-1)
final_path = tf.layers.batch_normalization(
final_path, training=is_training, name="final_path_bn")
return final_path
示例9: body
# 需要导入模块: from tensor2tensor.layers import common_layers [as 别名]
# 或者: from tensor2tensor.layers.common_layers import shape_list [as 别名]
def body(self, features):
hparams = copy.copy(self._hparams)
inputs = features["inputs"]
targets = features["targets"]
targets_shape = common_layers.shape_list(targets)
if not (tf.get_variable_scope().reuse or
hparams.mode == tf.contrib.learn.ModeKeys.INFER):
tf.summary.image("targets", targets, max_outputs=1)
decoder_input, rows, cols = cia.prepare_decoder(
targets, hparams)
# Add class label to decoder input.
if not hparams.unconditional:
decoder_input += tf.reshape(inputs,
[targets_shape[0], 1, 1, hparams.hidden_size])
decoder_output = cia.transformer_decoder_layers(
decoder_input, None,
hparams.num_decoder_layers,
hparams,
attention_type=hparams.dec_attention_type,
name="decoder")
output = cia.create_output(decoder_output, rows, cols, targets, hparams)
return output
示例10: top
# 需要导入模块: from tensor2tensor.layers import common_layers [as 别名]
# 或者: from tensor2tensor.layers.common_layers import shape_list [as 别名]
def top(self, body_output, _):
# TODO(lukaszkaiser): is this a universal enough way to get channels?
num_channels = self._model_hparams.problem.num_channels
with tf.variable_scope("rgb_softmax"):
body_output_shape = common_layers.shape_list(body_output)
reshape_shape = body_output_shape[:3]
reshape_shape.extend([num_channels, self.top_dimensionality])
res = tf.layers.dense(body_output, self.top_dimensionality * num_channels)
res = tf.reshape(res, reshape_shape)
if not tf.get_variable_scope().reuse:
res_argmax = tf.argmax(res, axis=-1)
tf.summary.image(
"result",
common_layers.tpu_safe_image_summary(res_argmax),
max_outputs=1)
return res
示例11: targets_bottom
# 需要导入模块: from tensor2tensor.layers import common_layers [as 别名]
# 或者: from tensor2tensor.layers.common_layers import shape_list [as 别名]
def targets_bottom(self, x, summary_prefix="targets_bottom"): # pylint: disable=arguments-differ
inputs = x
with tf.variable_scope(self.name, reuse=tf.AUTO_REUSE):
common_layers.summarize_video(inputs, summary_prefix)
inputs_shape = common_layers.shape_list(inputs)
# We embed each of 256=self.top_dimensionality possible pixel values.
embedding_var = tf.get_variable(
"pixel_embedding",
[self.top_dimensionality, self.PIXEL_EMBEDDING_SIZE])
hot_inputs = tf.one_hot(tf.to_int32(inputs), self.top_dimensionality)
hot_inputs = tf.reshape(hot_inputs, [-1, self.top_dimensionality])
embedded = tf.matmul(hot_inputs, embedding_var)
# Let's now merge all channels that were embedded into a single vector.
merged_size = self.PIXEL_EMBEDDING_SIZE * inputs_shape[4]
embedded = tf.reshape(embedded, inputs_shape[:4] + [merged_size])
transposed = common_layers.time_to_channels(embedded)
return tf.layers.dense(
transposed,
self._body_input_depth,
name="merge_pixel_embedded_frames")
示例12: add_layer_timing_signal_learned_1d
# 需要导入模块: from tensor2tensor.layers import common_layers [as 别名]
# 或者: from tensor2tensor.layers.common_layers import shape_list [as 别名]
def add_layer_timing_signal_learned_1d(x, layer, num_layers):
"""Add n-dimensional embedding as the layer (vertical) timing signal.
Adds embeddings to represent the position of the layer in the tower.
Args:
x: a tensor with shape [batch, length, depth]
layer: layer num
num_layers: total number of layers
Returns:
a Tensor the same shape as x.
"""
channels = common_layers.shape_list(x)[-1]
signal = get_layer_timing_signal_learned_1d(channels, layer, num_layers)
x += signal
return x
示例13: add_positional_embedding
# 需要导入模块: from tensor2tensor.layers import common_layers [as 别名]
# 或者: from tensor2tensor.layers.common_layers import shape_list [as 别名]
def add_positional_embedding(x, max_length, name, positions=None):
"""Add positional embedding.
Args:
x: a Tensor with shape [batch, length, depth]
max_length: an integer. static maximum size of any dimension.
name: a name for this layer.
positions: an optional tensor with shape [batch, length]
Returns:
a Tensor the same shape as x.
"""
_, length, depth = common_layers.shape_list(x)
var = tf.cast(tf.get_variable(name, [max_length, depth]), x.dtype)
if positions is None:
sliced = tf.cond(
tf.less(length, max_length),
lambda: tf.slice(var, [0, 0], [length, -1]),
lambda: tf.pad(var, [[0, length - max_length], [0, 0]]))
return x + tf.expand_dims(sliced, 0)
else:
return x + tf.gather(var, tf.to_int32(positions))
示例14: _relative_position_to_absolute_position_masked
# 需要导入模块: from tensor2tensor.layers import common_layers [as 别名]
# 或者: from tensor2tensor.layers.common_layers import shape_list [as 别名]
def _relative_position_to_absolute_position_masked(x):
"""Helper to dot_product_self_attention_relative_v2.
Rearrange an attention logits or weights Tensor.
The dimensions of the input represent:
[batch, heads, query_position, memory_position - query_position + length - 1]
The dimensions of the output represent:
[batch, heads, query_position, memory_position]
Only works with masked_attention. Undefined behavior for regions of the
input where memory_position > query_position.
Args:
x: a Tensor with shape [batch, heads, length, length]
Returns:
a Tensor with shape [batch, heads, length, length]
"""
batch, heads, length, _ = common_layers.shape_list(x)
x = tf.pad(x, [[0, 0], [0, 0], [0, 0], [1, 0]])
x = tf.reshape(x, [batch, heads, 1 + length, length])
x = tf.slice(x, [0, 0, 1, 0], [-1, -1, -1, -1])
return x
示例15: gather_indices_2d
# 需要导入模块: from tensor2tensor.layers import common_layers [as 别名]
# 或者: from tensor2tensor.layers.common_layers import shape_list [as 别名]
def gather_indices_2d(x, block_shape, block_stride):
"""Getting gather indices."""
# making an identity matrix kernel
kernel = tf.eye(block_shape[0] * block_shape[1])
kernel = reshape_range(kernel, 0, 1, [block_shape[0], block_shape[1], 1])
# making indices [1, h, w, 1] to appy convs
x_shape = common_layers.shape_list(x)
indices = tf.range(x_shape[2] * x_shape[3])
indices = tf.reshape(indices, [1, x_shape[2], x_shape[3], 1])
indices = tf.nn.conv2d(
tf.cast(indices, tf.float32),
kernel,
strides=[1, block_stride[0], block_stride[1], 1],
padding="VALID")
# making indices [num_blocks, dim] to gather
dims = common_layers.shape_list(indices)[:3]
if all([isinstance(dim, int) for dim in dims]):
num_blocks = functools.reduce(operator.mul, dims, 1)
else:
num_blocks = tf.reduce_prod(dims)
indices = tf.reshape(indices, [num_blocks, -1])
return tf.cast(indices, tf.int32)