本文整理匯總了Python中tensorflow.python.framework.tensor_shape.TensorShape方法的典型用法代碼示例。如果您正苦於以下問題:Python tensor_shape.TensorShape方法的具體用法?Python tensor_shape.TensorShape怎麽用?Python tensor_shape.TensorShape使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tensorflow.python.framework.tensor_shape
的用法示例。
在下文中一共展示了tensor_shape.TensorShape方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: _rnn_output_size
# 需要導入模塊: from tensorflow.python.framework import tensor_shape [as 別名]
# 或者: from tensorflow.python.framework.tensor_shape import TensorShape [as 別名]
def _rnn_output_size(self):
size = self._cell.output_size
if self._output_layer is None:
return size
else:
# To use layer's compute_output_shape, we need to convert the
# RNNCell's output_size entries into shapes with an unknown
# batch size. We then pass this through the layer's
# compute_output_shape and read off all but the first (batch)
# dimensions to get the output size of the rnn with the layer
# applied to the top.
output_shape_with_unknown_batch = nest.map_structure(
lambda s: tensor_shape.TensorShape([None]).concatenate(s),
size)
layer_output_shape = self._output_layer.compute_output_shape( # pylint: disable=protected-access
output_shape_with_unknown_batch)
return nest.map_structure(lambda s: s[1:], layer_output_shape)
示例2: __init__
# 需要導入模塊: from tensorflow.python.framework import tensor_shape [as 別名]
# 或者: from tensorflow.python.framework.tensor_shape import TensorShape [as 別名]
def __init__(self, initialize_fn, sample_fn, next_inputs_fn,
sample_ids_shape=None, sample_ids_dtype=None):
"""Initializer.
Args:
initialize_fn: callable that returns `(finished, next_inputs)`
for the first iteration.
sample_fn: callable that takes `(time, outputs, state)`
and emits tensor `sample_ids`.
next_inputs_fn: callable that takes `(time, outputs, state, sample_ids)`
and emits `(finished, next_inputs, next_state)`.
sample_ids_shape: Either a list of integers, or a 1-D Tensor of type
`int32`, the shape of each value in the `sample_ids` batch. Defaults to
a scalar.
sample_ids_dtype: The dtype of the `sample_ids` tensor. Defaults to int32.
"""
self._initialize_fn = initialize_fn
self._sample_fn = sample_fn
self._next_inputs_fn = next_inputs_fn
self._batch_size = None
self._sample_ids_shape = tensor_shape.TensorShape(sample_ids_shape or [])
self._sample_ids_dtype = sample_ids_dtype or dtypes.int32
示例3: channel_dimension
# 需要導入模塊: from tensorflow.python.framework import tensor_shape [as 別名]
# 或者: from tensorflow.python.framework.tensor_shape import TensorShape [as 別名]
def channel_dimension(shape, data_format, min_rank=1):
"""Returns the channel dimension of shape, while checking it has min_rank.
Args:
shape: A `TensorShape`.
data_format: `channels_first` or `channels_last`.
min_rank: Integer, minimum rank of shape.
Returns:
The value of the first dimension.
Raises:
ValueError: if inputs don't have at least min_rank dimensions, or if the
first dimension value is not defined.
"""
return _get_dimension(shape, 1 if data_format == 'channels_first' else -1,
min_rank=min_rank)
示例4: _transpose_batch_time
# 需要導入模塊: from tensorflow.python.framework import tensor_shape [as 別名]
# 或者: from tensorflow.python.framework.tensor_shape import TensorShape [as 別名]
def _transpose_batch_time(x):
"""Transpose the batch and time dimensions of a Tensor.
Retains as much of the static shape information as possible.
Args:
x: A tensor of rank 2 or higher.
Returns:
x transposed along the first two dimensions.
Raises:
ValueError: if `x` is rank 1 or lower.
"""
x_static_shape = x.get_shape()
if x_static_shape.ndims is not None and x_static_shape.ndims < 2:
raise ValueError(
"Expected input tensor %s to have rank at least 2, but saw shape: %s" %
(x, x_static_shape))
x_rank = array_ops.rank(x)
x_t = array_ops.transpose(
x, array_ops.concat(
([1, 0], math_ops.range(2, x_rank)), axis=0))
x_t.set_shape(
tensor_shape.TensorShape([
x_static_shape[1].value, x_static_shape[0].value
]).concatenate(x_static_shape[2:]))
return x_t
示例5: _tile_batch
# 需要導入模塊: from tensorflow.python.framework import tensor_shape [as 別名]
# 或者: from tensorflow.python.framework.tensor_shape import TensorShape [as 別名]
def _tile_batch(t, multiplier):
"""Core single-tensor implementation of tile_batch."""
t = ops.convert_to_tensor(t, name="t")
shape_t = tf.shape(t)
if t.shape.ndims is None or t.shape.ndims < 1:
raise ValueError("t must have statically known rank")
tiling = [1] * (t.shape.ndims + 1)
tiling[1] = multiplier
tiled_static_batch_size = (
t.shape[0].value * multiplier if t.shape[0].value is not None else None)
tiled = tf.tile(tf.expand_dims(t, 1), tiling)
tiled = tf.reshape(
tiled, tf.concat(([shape_t[0] * multiplier], shape_t[1:]), 0))
tiled.set_shape(
tensor_shape.TensorShape(
[tiled_static_batch_size]).concatenate(t.shape[1:]))
return tiled
開發者ID:hirofumi0810,項目名稱:tensorflow_end2end_speech_recognition,代碼行數:19,代碼來源:beam_search_decoder_from_tensorflow.py
示例6: _rnn_output_size
# 需要導入模塊: from tensorflow.python.framework import tensor_shape [as 別名]
# 或者: from tensorflow.python.framework.tensor_shape import TensorShape [as 別名]
def _rnn_output_size(self):
size = self._cell.output_size
if self._output_layer is None:
return size
else:
# To use layer's compute_output_shape, we need to convert the
# RNNCell's output_size entries into shapes with an unknown
# batch size. We then pass this through the layer's
# compute_output_shape and read off all but the first (batch)
# dimensions to get the output size of the rnn with the layer
# applied to the top.
output_shape_with_unknown_batch = nest.map_structure(
lambda s: tensor_shape.TensorShape([None]).concatenate(s),
size)
layer_output_shape = self._output_layer._compute_output_shape( # pylint: disable=protected-access
output_shape_with_unknown_batch)
return nest.map_structure(lambda s: s[1:], layer_output_shape)
開發者ID:hirofumi0810,項目名稱:tensorflow_end2end_speech_recognition,代碼行數:19,代碼來源:beam_search_decoder_from_tensorflow.py
示例7: _maybe_split_batch_beams
# 需要導入模塊: from tensorflow.python.framework import tensor_shape [as 別名]
# 或者: from tensorflow.python.framework.tensor_shape import TensorShape [as 別名]
def _maybe_split_batch_beams(self, t, s):
"""Maybe splits the tensor from a batch by beams into a batch of beams.
We do this so that we can use nest and not run into problems with shapes.
Args:
t: Tensor of dimension [batch_size*beam_width, s]
s: Tensor, Python int, or TensorShape.
Returns:
Either a reshaped version of t with dimension
[batch_size, beam_width, s] if t's first dimension is of size
batch_size*beam_width or t if not.
Raises:
TypeError: If t is an instance of TensorArray.
ValueError: If the rank of t is not statically known.
"""
_check_maybe(t)
if t.shape.ndims >= 1:
return self._split_batch_beams(t, s)
else:
return t
開發者ID:hirofumi0810,項目名稱:tensorflow_end2end_speech_recognition,代碼行數:21,代碼來源:beam_search_decoder_from_tensorflow.py
示例8: _maybe_merge_batch_beams
# 需要導入模塊: from tensorflow.python.framework import tensor_shape [as 別名]
# 或者: from tensorflow.python.framework.tensor_shape import TensorShape [as 別名]
def _maybe_merge_batch_beams(self, t, s):
"""Splits the tensor from a batch by beams into a batch of beams.
More exactly, t is a tensor of dimension [batch_size*beam_width, s]. We
reshape this into [batch_size, beam_width, s]
Args:
t: Tensor of dimension [batch_size*beam_width, s]
s: Tensor, Python int, or TensorShape.
Returns:
A reshaped version of t with dimension [batch_size, beam_width, s].
Raises:
TypeError: If t is an instance of TensorArray.
ValueError: If the rank of t is not statically known.
"""
_check_maybe(t)
if t.shape.ndims >= 2:
return self._merge_batch_beams(t, s)
else:
return t
開發者ID:hirofumi0810,項目名稱:tensorflow_end2end_speech_recognition,代碼行數:20,代碼來源:beam_search_decoder_from_tensorflow.py
示例9: __call__
# 需要導入模塊: from tensorflow.python.framework import tensor_shape [as 別名]
# 或者: from tensorflow.python.framework.tensor_shape import TensorShape [as 別名]
def __call__(self, getter, *args, **kwargs):
size = tf.TensorShape(kwargs['shape']).num_elements()
if size < self.small_variable_size_threshold:
device_name = self.device_for_small_variables
else:
device_index, _ = min(enumerate(self.sizes), key=operator.itemgetter(1))
device_name = self.devices[device_index]
self.sizes[device_index] += size
kwargs['caching_device'] = device_name
var = getter(*args, **kwargs)
return var
# To be used with custom_getter on tf.get_variable. Ensures the created variable
# is in LOCAL_VARIABLES and not GLOBAL_VARIBLES collection.
示例10: __init__
# 需要導入模塊: from tensorflow.python.framework import tensor_shape [as 別名]
# 或者: from tensorflow.python.framework.tensor_shape import TensorShape [as 別名]
def __init__(self, dtype, shape, accumulator_ref):
"""Creates a new ConditionalAccumulator.
Args:
dtype: Datatype of the accumulated gradients.
shape: Shape of the accumulated gradients.
accumulator_ref: A handle to the conditional accumulator, created by sub-
classes
"""
self._dtype = dtype
if shape is not None:
self._shape = tensor_shape.TensorShape(shape)
else:
self._shape = tensor_shape.unknown_shape()
self._accumulator_ref = accumulator_ref
self._name = self._accumulator_ref.op.name.split("/")[-1]
示例11: zero_state
# 需要導入模塊: from tensorflow.python.framework import tensor_shape [as 別名]
# 或者: from tensorflow.python.framework.tensor_shape import TensorShape [as 別名]
def zero_state(self, batch_size, dtype):
"""Return zero-filled state tensor(s).
Args:
batch_size: int, float, or unit Tensor representing the batch size.
dtype: the data type to use for the state.
Returns:
If `state_size` is an int or TensorShape, then the return value is a
`N-D` tensor of shape `[batch_size x state_size]` filled with zeros.
If `state_size` is a nested list or tuple, then the return value is
a nested list or tuple (of the same structure) of `2-D` tensors with
the shapes `[batch_size x s]` for each s in `state_size`.
"""
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
state_size = self.state_size
return _zero_state_tensors(state_size, batch_size, dtype)
示例12: _TileGradShape
# 需要導入模塊: from tensorflow.python.framework import tensor_shape [as 別名]
# 或者: from tensorflow.python.framework.tensor_shape import TensorShape [as 別名]
def _TileGradShape(op):
"""Shape function for the TileGrad op."""
multiples_shape = op.inputs[1].get_shape().with_rank(1)
input_shape = op.inputs[0].get_shape().with_rank(multiples_shape[0])
# NOTE(mrry): Represent `multiples` as a `TensorShape` because (i)
# it is a vector of non-negative integers, and (ii) doing so allows
# us to handle partially-known multiples.
multiples = tensor_util.constant_value_as_shape(op.inputs[1]).with_rank(
input_shape.ndims)
if multiples.ndims is None:
return [tensor_shape.unknown_shape()]
else:
output_dims = []
for dim, multiple in zip(input_shape.dims, multiples.dims):
output_dims.append(dim // multiple)
return [tensor_shape.TensorShape(output_dims)]
示例13: _merge_element_shape
# 需要導入模塊: from tensorflow.python.framework import tensor_shape [as 別名]
# 或者: from tensorflow.python.framework.tensor_shape import TensorShape [as 別名]
def _merge_element_shape(self, shape):
"""Changes the element shape of the array given a shape to merge with.
Args:
shape: A `TensorShape` object to merge with.
Raises:
ValueError: if the provided shape is incompatible with the current
element shape of the `TensorArray`.
"""
if self._element_shape:
if not shape.is_compatible_with(self._element_shape[0]):
raise ValueError(
"Inconsistent shapes: saw %s but expected %s "
"(and infer_shape=True)" % (shape, self._element_shape[0]))
self._element_shape[0] = self._element_shape[0].merge_with(shape)
else:
self._element_shape.append(shape)
示例14: _GatherGrad
# 需要導入模塊: from tensorflow.python.framework import tensor_shape [as 別名]
# 或者: from tensorflow.python.framework.tensor_shape import TensorShape [as 別名]
def _GatherGrad(op, grad):
"""Gradient for gather op."""
# Build appropriately shaped IndexedSlices
# Walk graph back until the original handle is found.
# TODO(apassos): more robust way of getting the shape.
handle = op.inputs[0]
while handle.op.type != "VarHandleOp":
handle = handle.op.inputs[0]
params_shape = ops.convert_to_tensor(
tensor_shape.TensorShape(handle.op.get_attr("shape")))
indices = op.inputs[1]
size = array_ops.expand_dims(array_ops.size(indices), 0)
values_shape = array_ops.concat([size, params_shape[1:]], 0)
values = array_ops.reshape(grad, values_shape)
indices = array_ops.reshape(indices, size)
return [ops.IndexedSlices(values, indices, params_shape), None]
示例15: build
# 需要導入模塊: from tensorflow.python.framework import tensor_shape [as 別名]
# 或者: from tensorflow.python.framework.tensor_shape import TensorShape [as 別名]
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if input_shape[-1].value is None:
raise ValueError('The last dimension of the inputs to `Dense` '
'should be defined. Found `None`.')
self.input_spec = base.InputSpec(min_ndim=2,
axes={-1: input_shape[-1].value})
self.kernel = self.add_variable('kernel',
shape=[input_shape[-1].value, self.units],
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
dtype=self.dtype,
trainable=True)
if self.use_bias:
self.bias = self.add_variable('bias',
shape=[self.units,],
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
dtype=self.dtype,
trainable=True)
else:
self.bias = None
self.built = True