本文整理汇总了Python中tensorflow.python.ops.array_ops.tile方法的典型用法代码示例。如果您正苦于以下问题:Python array_ops.tile方法的具体用法?Python array_ops.tile怎么用?Python array_ops.tile使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.ops.array_ops
的用法示例。
在下文中一共展示了array_ops.tile方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _SumGrad
# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import tile [as 别名]
def _SumGrad(op, grad):
"""Gradient for Sum."""
# Fast path for when reducing to a scalar and ndims is known: adds only
# Reshape and Tile ops (and possibly a Shape).
if (op.inputs[0].get_shape().ndims is not None and
op.inputs[1].op.type == "Const"):
rank = op.inputs[0].get_shape().ndims
axes = tensor_util.MakeNdarray(op.inputs[1].op.get_attr("value"))
if np.array_equal(axes, np.arange(rank)): # Reduce all dims.
grad = array_ops.reshape(grad, [1] * rank)
# If shape is not fully defined (but rank is), we use Shape.
if op.inputs[0].get_shape().is_fully_defined():
input_shape = op.inputs[0].get_shape().as_list()
else:
input_shape = array_ops.shape(op.inputs[0])
return [array_ops.tile(grad, input_shape), None]
input_shape = array_ops.shape(op.inputs[0])
output_shape_kept_dims = math_ops.reduced_shape(input_shape, op.inputs[1])
tile_scaling = _safe_shape_div(input_shape, output_shape_kept_dims)
grad = array_ops.reshape(grad, output_shape_kept_dims)
return [array_ops.tile(grad, tile_scaling), None]
示例2: repeat
# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import tile [as 别名]
def repeat(x, n):
"""Repeats a 2D tensor.
if `x` has shape (samples, dim) and `n` is `2`,
the output will have shape `(samples, 2, dim)`.
Arguments:
x: Tensor or variable.
n: Python integer, number of times to repeat.
Returns:
A tensor.
"""
assert ndim(x) == 2
x = array_ops.expand_dims(x, 1)
pattern = array_ops.stack([1, n, 1])
return array_ops.tile(x, pattern)
示例3: _lengths_to_masks
# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import tile [as 别名]
def _lengths_to_masks(lengths, max_length):
"""Creates a binary matrix that can be used to mask away padding.
Args:
lengths: A vector of integers representing lengths.
max_length: An integer indicating the maximum length. All values in
lengths should be less than max_length.
Returns:
masks: Masks that can be used to get rid of padding.
"""
tiled_ranges = array_ops.tile(
array_ops.expand_dims(math_ops.range(max_length), 0),
[array_ops.shape(lengths)[0], 1])
lengths = array_ops.expand_dims(lengths, 1)
masks = math_ops.to_float(
math_ops.to_int64(tiled_ranges) < math_ops.to_int64(lengths))
return masks
示例4: _tile_batch
# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import tile [as 别名]
def _tile_batch(t, multiplier):
"""Core single-tensor implementation of tile_batch."""
t = ops.convert_to_tensor(t, name="t")
shape_t = array_ops.shape(t)
if t.shape.ndims is None or t.shape.ndims < 1:
raise ValueError("t must have statically known rank")
tiling = [1] * (t.shape.ndims + 1)
tiling[1] = multiplier
tiled_static_batch_size = (
t.shape[0].value * multiplier if t.shape[0].value is not None else None)
tiled = array_ops.tile(array_ops.expand_dims(t, 1), tiling)
tiled = array_ops.reshape(
tiled, array_ops.concat(([shape_t[0] * multiplier], shape_t[1:]), 0))
tiled.set_shape(
tensor_shape.TensorShape(
[tiled_static_batch_size]).concatenate(t.shape[1:]))
return tiled
示例5: _lengths_to_masks
# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import tile [as 别名]
def _lengths_to_masks(lengths, max_length):
"""Creates a binary matrix that can be used to mask away padding.
Args:
lengths: A vector of integers representing lengths.
max_length: An integer indicating the maximum length. All values in
lengths should be less than max_length.
Returns:
masks: Masks that can be used to get rid of padding.
"""
tiled_ranges = array_ops.tile(
array_ops.expand_dims(math_ops.range(max_length), 0),
[array_ops.shape(lengths)[0], 1])
lengths = array_ops.expand_dims(lengths, 1)
masks = math_ops.to_float(
math_ops.to_int64(tiled_ranges) < math_ops.to_int64(lengths))
return masks
# 计算标签序列的非正则化得分
示例6: _SumGrad
# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import tile [as 别名]
def _SumGrad(op, grad):
"""Gradient for Sum."""
# Fast path for when reducing to a scalar and ndims is known: adds only
# Reshape and Tile ops (and possibly a Shape).
if (op.inputs[0].get_shape().ndims is not None and op.inputs[1].op.type ==
"Const"):
rank = op.inputs[0].get_shape().ndims
axes = tensor_util.MakeNdarray(op.inputs[1].op.get_attr("value"))
if np.array_equal(axes, np.arange(rank)): # Reduce all dims.
grad = array_ops.reshape(grad, [1] * rank)
# If shape is not fully defined (but rank is), we use Shape.
if op.inputs[0].get_shape().is_fully_defined():
input_shape = op.inputs[0].get_shape().as_list()
else:
input_shape = array_ops.shape(op.inputs[0])
return [array_ops.tile(grad, input_shape), None]
input_shape = array_ops.shape(op.inputs[0])
output_shape_kept_dims = math_ops.reduced_shape(input_shape, op.inputs[1])
tile_scaling = _safe_shape_div(input_shape, output_shape_kept_dims)
grad = array_ops.reshape(grad, output_shape_kept_dims)
return [array_ops.tile(grad, tile_scaling), None]
示例7: _lengths_to_masks
# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import tile [as 别名]
def _lengths_to_masks(lengths, max_length):
"""Creates a binary matrix that can be used to mask away padding.
Args:
lengths: A vector of integers representing lengths.
max_length: An integer indicating the maximum length. All values in
lengths should be less than max_length.
Returns:
masks: Masks that can be used to get rid of padding.
"""
tiled_ranges = array_ops.tile(
array_ops.expand_dims(math_ops.range(max_length), 0),
[array_ops.shape(lengths)[0], 1])
lengths = array_ops.expand_dims(lengths, 1)
masks = math_ops.to_float(
math_ops.to_int64(tiled_ranges) < math_ops.to_int64(lengths))
return masks
示例8: _ragged_substr
# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import tile [as 别名]
def _ragged_substr(text_input, begin, end):
text_input_flat = None
if ragged_tensor.is_ragged(text_input):
text_input_flat = text_input.flat_values
else:
text_input_flat = text_input
def _ragged_tile(x):
input_text, indices = x
multiple = math_ops.reduce_sum(indices.row_lengths())
return array_ops.tile([input_text], [multiple])
broadcasted_text = ragged_map_ops.map_fn(
_ragged_tile,
(text_input_flat, begin),
dtype=ragged_tensor.RaggedTensorType(dtype=dtypes.string, ragged_rank=1),
infer_shape=False,
)
size = math_ops.sub(
array_ops.squeeze(end.flat_values), array_ops.squeeze(begin.flat_values))
new_tokens = string_ops.substr_v2(broadcasted_text,
array_ops.squeeze(begin.flat_values), size)
return begin.with_flat_values(new_tokens.flat_values)
示例9: _tile_batch
# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import tile [as 别名]
def _tile_batch(t, multiplier):
"""Core single-tensor implementation of tile_batch."""
t = ops.convert_to_tensor(t, name="t")
shape_t = array_ops.shape(t)
if t.shape.ndims is None or t.shape.ndims < 1:
raise ValueError("t must have statically known rank")
tiling = [1] * (t.shape.ndims + 1)
tiling[1] = multiplier
tiled_static_batch_size = (
t.shape[0].value * multiplier if t.shape[0].value is not None else None)
tiled = array_ops.tile(array_ops.expand_dims(t, 1), tiling)
tiled = array_ops.reshape(tiled,
array_ops.concat(
([shape_t[0] * multiplier], shape_t[1:]), 0))
tiled.set_shape(
tensor_shape.TensorShape([tiled_static_batch_size]).concatenate(
t.shape[1:]))
return tiled
示例10: initialize
# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import tile [as 别名]
def initialize(self, name=None):
finished = array_ops.tile([False], [self._batch_size])
return finished, self._start_inputs
示例11: unit_norm
# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import tile [as 别名]
def unit_norm(inputs, dim, epsilon=1e-7, scope=None):
"""Normalizes the given input across the specified dimension to unit length.
Note that the rank of `input` must be known.
Args:
inputs: A `Tensor` of arbitrary size.
dim: The dimension along which the input is normalized.
epsilon: A small value to add to the inputs to avoid dividing by zero.
scope: Optional scope for variable_scope.
Returns:
The normalized `Tensor`.
Raises:
ValueError: If dim is smaller than the number of dimensions in 'inputs'.
"""
with variable_scope.variable_scope(scope, 'UnitNorm', [inputs]):
if not inputs.get_shape():
raise ValueError('The input rank must be known.')
input_rank = len(inputs.get_shape().as_list())
if dim < 0 or dim >= input_rank:
raise ValueError('dim must be positive but smaller than the input rank.')
lengths = math_ops.sqrt(
epsilon + math_ops.reduce_sum(math_ops.square(inputs), dim, True))
multiples = []
if dim > 0:
multiples.append(array_ops.ones([dim], dtypes.int32))
multiples.append(
array_ops.strided_slice(array_ops.shape(inputs), [dim], [dim + 1]))
if dim < (input_rank - 1):
multiples.append(array_ops.ones([input_rank - 1 - dim], dtypes.int32))
multiples = array_ops.concat(multiples, 0)
return math_ops.div(inputs, array_ops.tile(lengths, multiples))
示例12: _num_relevant
# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import tile [as 别名]
def _num_relevant(labels, k):
"""Computes number of relevant values for each row in labels.
For labels with shape [D1, ... DN, num_labels], this is the minimum of
`num_labels` and `k`.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels].
k: Integer, k for @k metric.
Returns:
Integer `Tensor` of shape [D1, ... DN], where each value is the number of
relevant values for that row.
Raises:
ValueError: if inputs have invalid dtypes or values.
"""
if k < 1:
raise ValueError('Invalid k=%s.' % k)
with ops.name_scope(None, 'num_relevant', (labels,)) as scope:
# For SparseTensor, calculate separate count for each row.
labels = sparse_tensor.convert_to_tensor_or_sparse_tensor(labels)
if isinstance(labels, sparse_tensor.SparseTensor):
return math_ops.minimum(sets.set_size(labels), k, name=scope)
# For dense Tensor, calculate scalar count based on last dimension, and
# tile across labels shape.
labels_shape = array_ops.shape(labels)
labels_size = labels_shape[-1]
num_relevant_scalar = math_ops.minimum(labels_size, k)
return array_ops.fill(labels_shape[0:-1], num_relevant_scalar, name=scope)
示例13: grayscale_to_rgb
# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import tile [as 别名]
def grayscale_to_rgb(images, name=None):
"""Converts one or more images from Grayscale to RGB.
Outputs a tensor of the same `DType` and rank as `images`. The size of the
last dimension of the output is 3, containing the RGB value of the pixels.
Args:
images: The Grayscale tensor to convert. Last dimension must be size 1.
name: A name for the operation (optional).
Returns:
The converted grayscale image(s).
"""
with ops.name_scope(name, 'grayscale_to_rgb', [images]) as name:
images = ops.convert_to_tensor(images, name='images')
rank_1 = array_ops.expand_dims(array_ops.rank(images) - 1, 0)
shape_list = (
[array_ops.ones(rank_1,
dtype=dtypes.int32)] + [array_ops.expand_dims(3, 0)])
multiples = array_ops.concat(shape_list, 0)
rgb = array_ops.tile(images, multiples, name=name)
rgb.set_shape(images.get_shape()[:-1].concatenate([3]))
return rgb
# pylint: disable=invalid-name
示例14: _entropy
# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import tile [as 别名]
def _entropy(self):
if not self.bijector.is_constant_jacobian:
raise NotImplementedError("entropy is not implemented")
# Suppose Y = g(X) where g is a diffeomorphism and X is a continuous rv. It
# can be shown that:
# H[Y] = H[X] + E_X[(log o abs o det o J o g)(X)].
# If is_constant_jacobian then:
# E_X[(log o abs o det o J o g)(X)] = (log o abs o det o J o g)(c)
# where c can by anything.
entropy = self.distribution.entropy()
if self._is_maybe_event_override:
# H[X] = sum_i H[X_i] if X_i are mutually independent.
# This means that a reduce_sum is a simple rescaling.
entropy *= math_ops.cast(math_ops.reduce_prod(self._override_event_shape),
dtype=entropy.dtype.base_dtype)
if self._is_maybe_batch_override:
new_shape = array_ops.concat([
_ones_like(self._override_batch_shape),
self.distribution.batch_shape_tensor()
], 0)
entropy = array_ops.reshape(entropy, new_shape)
multiples = array_ops.concat([
self._override_batch_shape,
_ones_like(self.distribution.batch_shape_tensor())
], 0)
entropy = array_ops.tile(entropy, multiples)
dummy = array_ops.zeros([], self.dtype)
entropy -= self.bijector.inverse_log_det_jacobian(dummy)
entropy.set_shape(self.batch_shape)
return entropy
示例15: _BiasAddGradGrad
# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import tile [as 别名]
def _BiasAddGradGrad(op, received_grad):
"""Gradient for the BiasAddGrad op.
Args:
op: BiasAddGrad op for which we are calculating gradients.
received_grad: The gradients passed to the BiasAddGrad op.
Returns:
A single gradient Tensor for the input to BiasAddGrad (which
is the gradient of the bias term in BiasAdd)
"""
try:
data_format = op.get_attr("data_format")
except ValueError:
data_format = None
shape = array_ops.shape(op.inputs[0])
rank = array_ops.rank(op.inputs[0])
bias_shape = array_ops.shape(received_grad)
if data_format == b"NCHW":
expanded_shape = array_ops.concat([
array_ops.ones_like(shape[:-3]), bias_shape,
array_ops.ones_like(shape[-2:])
], 0)
tile_mults = array_ops.concat([shape[:-3], [1], shape[-2:]], 0)
else:
expanded_shape = array_ops.concat(
[array_ops.ones_like(shape[:-1]), bias_shape], 0)
tile_mults = array_ops.concat([shape[:-1], [1]], 0)
expanded_grad = array_ops.reshape(received_grad, expanded_shape)
return array_ops.tile(expanded_grad, tile_mults)