本文整理汇总了Python中tensorflow.compat.v1.tile方法的典型用法代码示例。如果您正苦于以下问题:Python v1.tile方法的具体用法?Python v1.tile怎么用?Python v1.tile使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.compat.v1
的用法示例。
在下文中一共展示了v1.tile方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _prepare_indexes
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import tile [as 别名]
def _prepare_indexes(self, shape, channels=None):
shape = tf.convert_to_tensor(shape)
if not self.built:
if not (shape.shape.is_fully_defined() and shape.shape.ndims == 1):
raise ValueError("`shape` must be a vector with known length.")
ndim = shape.shape.as_list()[0] + 1
channel_axis = self._channel_axis(ndim)
input_shape = ndim * [None]
input_shape[channel_axis] = channels
self.build(input_shape)
_, channel_axis, channels, input_slices = self._get_input_dims()
# TODO(jonycgn, ssjhv): Investigate broadcasting.
indexes = tf.range(channels, dtype=tf.int32)
indexes = tf.cast(indexes, tf.int32)
tiles = tf.concat(
[shape[:channel_axis - 1], [1], shape[channel_axis:]], axis=0)
indexes = tf.tile(indexes[input_slices[1:]], tiles)
return indexes
# Just giving a more useful docstring.
示例2: multi_label_loss
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import tile [as 别名]
def multi_label_loss(top_out, targets, model_hparams, vocab_size, weights_fn):
"""Average loss over the labels."""
del vocab_size # unused arg
logits = top_out
num_labels = tf.shape(targets)[1]
logits = tf.tile(logits, [1, num_labels, 1, 1, 1])
xent, weights = common_layers.padded_cross_entropy(
logits,
targets,
model_hparams.label_smoothing,
weights_fn=weights_fn,
reduce_sum=False,
)
xent = tf.squeeze(xent, [2, 3])
weights = tf.squeeze(weights, [2, 3])
# average loss over all labels
loss = tf.reduce_sum(xent, axis=1)
weights = tf.reduce_sum(weights, axis=1)
loss /= (weights + 1e-8)
weights = tf.to_float(tf.greater(weights, 0.))
return tf.reduce_sum(loss*weights), tf.reduce_sum(weights)
示例3: project_hidden
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import tile [as 别名]
def project_hidden(x, projection_tensors, hidden_size, num_blocks):
"""Project encoder hidden state under num_blocks using projection tensors.
Args:
x: Encoder hidden state of shape [batch_size, latent_dim, hidden_size].
projection_tensors: Projection tensors used to project the hidden state.
hidden_size: Dimension of the latent space.
num_blocks: Number of blocks in DVQ.
Returns:
x_projected: Projected states of shape [batch_size, latent_dim, num_blocks,
hidden_size / num_blocks].
"""
batch_size, latent_dim, _ = common_layers.shape_list(x)
x = tf.reshape(x, shape=[1, -1, hidden_size])
x_tiled = tf.reshape(
tf.tile(x, multiples=[num_blocks, 1, 1]),
shape=[num_blocks, -1, hidden_size])
x_projected = tf.matmul(x_tiled, projection_tensors)
x_projected = tf.transpose(x_projected, perm=[1, 0, 2])
x_4d = tf.reshape(x_projected, [batch_size, latent_dim, num_blocks, -1])
return x_4d
示例4: reset
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import tile [as 别名]
def reset(self, entries_to_reset):
"""Reset the entries in the memory.
Args:
entries_to_reset: a 1D tensor.
Returns:
the reset op.
"""
num_updates = tf.size(entries_to_reset)
update_vals = tf.scatter_update(
self.mem_vals, entries_to_reset,
tf.tile(tf.expand_dims(
tf.fill([self.memory_size, self.val_depth], .0), 0),
[num_updates, 1, 1]))
update_logits = tf.scatter_update(
self.mean_logits, entries_to_reset,
tf.tile(tf.expand_dims(
tf.fill([self.memory_size], .0), 0),
[num_updates, 1]))
reset_op = tf.group([update_vals, update_logits])
return reset_op
示例5: tile_and_concat
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import tile [as 别名]
def tile_and_concat(image, latent, concat_latent=True):
"""Tile latent and concatenate to image across depth.
Args:
image: 4-D Tensor, (batch_size X height X width X channels)
latent: 2-D Tensor, (batch_size X latent_dims)
concat_latent: If set to False, the image is returned as is.
Returns:
concat_latent: 4-D Tensor, (batch_size X height X width X channels+1)
latent tiled and concatenated to the image across the channels.
"""
if not concat_latent:
return image
image_shape = common_layers.shape_list(image)
latent_shape = common_layers.shape_list(latent)
height, width = image_shape[1], image_shape[2]
latent_dims = latent_shape[1]
height_multiples = height // latent_dims
pad = height - (height_multiples * latent_dims)
latent = tf.reshape(latent, (-1, latent_dims, 1, 1))
latent = tf.tile(latent, (1, height_multiples, width, 1))
latent = tf.pad(latent, [[0, 0], [pad // 2, pad // 2], [0, 0], [0, 0]])
return tf.concat([image, latent], axis=-1)
示例6: identity_initializer
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import tile [as 别名]
def identity_initializer(shape, dtype=None, partition_info=None):
"""Fake weight initializer to initialize a 3x3 identity kernel."""
del shape # Unused.
del dtype # Unused.
del partition_info # Unused.
# Start with a 3x3 kernel identity kernel.
kernel = [[0, 0, 0],
[0, 1, 0],
[0, 0, 0]]
# Expand and tile kernel to get a tensor with shape [3, 3, 5, 2].
kernel = tf.expand_dims(kernel, axis=-1)
kernel = tf.expand_dims(kernel, axis=-1)
tensor = tf.tile(kernel, [1, 1, 5, 2])
return tf.cast(tensor, dtype=tf.float32)
示例7: upscale
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import tile [as 别名]
def upscale(images, scale):
"""Box upscaling (also called nearest neighbors) of images.
Args:
images: A 4D `Tensor` in NHWC format.
scale: A positive integer scale.
Returns:
A 4D `Tensor` of `images` up scaled by a factor `scale`.
Raises:
ValueError: If `scale` is not a positive integer.
"""
scale = _get_validated_scale(scale)
if scale == 1:
return images
return tf.batch_to_space(
tf.tile(images, [scale**2, 1, 1, 1]),
crops=[[0, 0], [0, 0]],
block_size=scale)
示例8: upscale_height
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import tile [as 别名]
def upscale_height(images, scale):
"""Box upscaling along the H (axis=1) dimension.
Args:
images: A 4D `Tensor` in NHWC format.
scale: A positive integer scale.
Returns:
A 4D `Tensor` of `images` up scaled by a factor `scale`.
Raises:
ValueError: If `scale` is not a positive integer.
"""
scale = _get_validated_scale(scale)
if scale == 1:
return images
images = tf.batch_to_space_nd(
tf.tile(images, [scale, 1, 1, 1]), block_shape=[scale], crops=[[0, 0]])
return images
示例9: __init__
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import tile [as 别名]
def __init__(self, c, d=None, prune_irrelevant=True, collapse=True):
"""Builds a linear specification module."""
super(LinearSpecification, self).__init__(name='specs', collapse=collapse)
# c has shape [batch_size, num_specifications, num_outputs]
# d has shape [batch_size, num_specifications]
# Some specifications may be irrelevant (not a function of the output).
# We automatically remove them for clarity. We expect the number of
# irrelevant specs to be equal for all elements of a batch.
# Shape is [batch_size, num_specifications]
if prune_irrelevant:
irrelevant = tf.equal(tf.reduce_sum(
tf.cast(tf.abs(c) > 1e-6, tf.int32), axis=-1, keepdims=True), 0)
batch_size = tf.shape(c)[0]
num_outputs = tf.shape(c)[2]
irrelevant = tf.tile(irrelevant, [1, 1, num_outputs])
self._c = tf.reshape(
tf.boolean_mask(c, tf.logical_not(irrelevant)),
[batch_size, -1, num_outputs])
else:
self._c = c
self._d = d
示例10: tile_to_match_context
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import tile [as 别名]
def tile_to_match_context(net, context):
"""Tiles net along a new axis=1 to match context.
Repeats minibatch elements of `net` tensor to match multiple corresponding
minibatch elements from `context`.
Args:
net: Tensor of shape [num_batch_net, ....].
context: Tensor of shape [num_batch_net, num_examples, context_size].
Returns:
Tensor of shape [num_batch_net, num_examples, ...], where each minibatch
element of net has been tiled M times where M = num_batch_context /
num_batch_net.
"""
with tf.name_scope('tile_to_context'):
num_samples = tf.shape(context)[1]
net_examples = tf.expand_dims(net, 1) # [batch_size, 1, ...]
net_ndim = len(net_examples.get_shape().as_list())
# Tile net by num_samples in axis=1.
multiples = [1]*net_ndim
multiples[1] = num_samples
net_examples = tf.tile(net_examples, multiples)
return net_examples
示例11: add_context
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import tile [as 别名]
def add_context(net, context):
"""Merges visual perception with context using elementwise addition.
Actions are reshaped to match net dimension depth-wise, and are added to
the conv layers by broadcasting element-wise across H, W extent.
Args:
net: Tensor of shape [batch_size, H, W, C].
context: Tensor of shape [batch_size * num_examples, C].
Returns:
Tensor with shape [batch_size * num_examples, H, W, C]
"""
num_batch_net = tf.shape(net)[0]
_, h, w, d1 = net.get_shape().as_list()
_, d2 = context.get_shape().as_list()
assert d1 == d2
context = tf.reshape(context, [num_batch_net, -1, d2])
net_examples = tile_to_match_context(net, context)
# Flatten first two dimensions.
net = tf.reshape(net_examples, [-1, h, w, d1])
context = tf.reshape(context, [-1, 1, 1, d2])
context = tf.tile(context, [1, h, w, 1])
net = tf.add_n([net, context])
return net
示例12: linear_transform
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import tile [as 别名]
def linear_transform(x, output_size, scope, bias=False, input_size=None):
"""Simple linear transform of x.
Args:
x: <float>[batch_size, length, input_size]
output_size: Integer specifying output size.
scope: String name for variable scope.
bias: If True, adds a learned bias term.
input_size: Explicitly specify input_size if not set as static shape.
Returns:
<float>[batch_size, length, output_size]
"""
input_size = input_size or x.get_shape()[-1]
with tf.variable_scope(scope):
batch_size = tf.shape(x)[0]
weights = tf.get_variable("weights", shape=(input_size, output_size))
weights = tf.expand_dims(weights, 0)
weights = tf.tile(weights, [batch_size, 1, 1])
x = tf.matmul(x, weights)
if bias:
bias = tf.get_variable(
"bias", shape=(output_size), initializer=tf.zeros_initializer())
x += bias
return x
示例13: _one_hot_tensor_3d
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import tile [as 别名]
def _one_hot_tensor_3d(x, index, total_length):
"""One-hot encodes a 2d Tensor in a 3d Tensor.
This could potentially be implemented in a simpler way using tf.pad but
this method is compatible with XLA's restriction on static shapes defined
by constants.
Args:
x: A Tensor of shape [m, n].
index: The "on" index of the inner dimension of the output Tensor.
total_length: Total length of the output Tensor.
Returns:
A Tensor of shape [m, n, total_length].
"""
m = x.get_shape()[0]
n = x.get_shape()[1]
x = tf.expand_dims(x, 2)
index_tiled = tf.tile(tf.expand_dims(index, 0), [m * n])
one_hot = tf.one_hot(index_tiled, total_length, on_value=1)
one_hot = tf.cast(one_hot, dtype=x.dtype)
one_hot = tf.reshape(one_hot, [m, n, total_length])
return one_hot * x
示例14: compute_thresholded_labels
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import tile [as 别名]
def compute_thresholded_labels(labels, null_threshold=4):
"""Computes thresholded labels.
Args:
labels: <int32> [batch_size, num_annotators]
null_threshold: If number of null annotations is greater than or equal to
this threshold, all annotations are set to null for this example.
Returns:
thresholded_labels: <int32> [batch_size, num_annotators]
"""
null_labels = tf.equal(labels, 0)
# <int32> [batch_size]
null_count = tf.reduce_sum(tf.to_int32(null_labels), 1)
threshold_mask = tf.less(null_count, null_threshold)
# <bool> [batch_size, num_annotators]
threshold_mask = tf.tile(
tf.expand_dims(threshold_mask, -1), [1, tf.shape(labels)[1]])
# <bool> [batch_size, num_annotators]
thresholded_labels = tf.where(
threshold_mask, x=labels, y=tf.zeros_like(labels))
return thresholded_labels
示例15: testPixelControlLossTensorDiscount
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import tile [as 别名]
def testPixelControlLossTensorDiscount(self):
"""Compute loss for given observations, actions, values, tensor discount."""
zero_discount = tf.zeros((1, self.batch_size))
non_zero_discount = tf.tile(
tf.reshape(self.discount, [1, 1]),
[self.seq_length - 1, self.batch_size])
tensor_discount = tf.concat([zero_discount, non_zero_discount], axis=0)
loss, _ = pixel_control_ops.pixel_control_loss(
self.observations_ph, self.actions_ph, self.action_values_ph,
self.cell_size, tensor_discount, self.scale)
init = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init)
feed_dict = {
self.observations_ph: self.observations,
self.action_values_ph: self.action_values,
self.actions_ph: self.actions}
loss_np = sess.run(loss, feed_dict=feed_dict)
self.assertNear(loss_np, self.error_term, 1e-3)