本文整理汇总了Python中tensorflow.python.ops.array_ops.unstack方法的典型用法代码示例。如果您正苦于以下问题:Python array_ops.unstack方法的具体用法?Python array_ops.unstack怎么用?Python array_ops.unstack使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.ops.array_ops
的用法示例。
在下文中一共展示了array_ops.unstack方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _ImageDimensions
# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import unstack [as 别名]
def _ImageDimensions(images, dynamic_shape=False):
"""Returns the dimensions of an image tensor.
Args:
images: 4-D Tensor of shape [batch, height, width, channels]
dynamic_shape: Whether the input image has undertermined shape. If set to
`True`, shape information will be retrieved at run time. Default to
`False`.
Returns:
list of integers [batch, height, width, channels]
"""
# A simple abstraction to provide names for each dimension. This abstraction
# should make it simpler to switch dimensions in the future (e.g. if we ever
# want to switch height and width.)
if dynamic_shape:
return array_ops.unstack(array_ops.shape(images))
else:
return images.get_shape().as_list()
# In[6]:
示例2: _ImageDimensions
# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import unstack [as 别名]
def _ImageDimensions(image):
"""Returns the dimensions of an image tensor.
Args:
image: A 3-D Tensor of shape `[height, width, channels]`.
Returns:
A list of `[height, width, channels]` corresponding to the dimensions of the
input image. Dimensions that are statically known are python integers,
otherwise they are integer scalar tensors.
"""
if image.get_shape().is_fully_defined():
return image.get_shape().as_list()
else:
static_shape = image.get_shape().with_rank(3).as_list()
dynamic_shape = array_ops.unstack(array_ops.shape(image), 3)
return [s if s is not None else d
for s, d in zip(static_shape, dynamic_shape)]
示例3: _infer_fft_length_for_irfft
# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import unstack [as 别名]
def _infer_fft_length_for_irfft(input_tensor, fft_rank):
"""Infers the `fft_length` argument for a `rank` IRFFT from `input_tensor`."""
# A TensorShape for the inner fft_rank dimensions.
fft_shape = input_tensor.get_shape()[-fft_rank:]
# If any dim is unknown, fall back to tensor-based math.
if not fft_shape.is_fully_defined():
fft_length = _array_ops.unstack(_array_ops.shape(input_tensor)[-fft_rank:])
fft_length[-1] = _math_ops.maximum(0, 2 * (fft_length[-1] - 1))
return _array_ops.stack(fft_length)
# Otherwise, return a constant.
fft_length = fft_shape.as_list()
if fft_length:
fft_length[-1] = max(0, 2 * (fft_length[-1] - 1))
return _ops.convert_to_tensor(fft_length, _dtypes.int32)
示例4: _ImageDimensions
# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import unstack [as 别名]
def _ImageDimensions(image, rank):
"""Returns the dimensions of an image tensor.
Args:
image: A rank-D Tensor. For 3-D of shape: `[height, width, channels]`.
rank: The expected rank of the image
Returns:
A list of corresponding to the dimensions of the
input image. Dimensions that are statically known are python integers,
otherwise they are integer scalar tensors.
"""
if image.get_shape().is_fully_defined():
return image.get_shape().as_list()
else:
static_shape = image.get_shape().with_rank(rank).as_list()
dynamic_shape = array_ops.unstack(array_ops.shape(image), rank)
return [s if s is not None else d
for s, d in zip(static_shape, dynamic_shape)]
示例5: _ImageDimensions
# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import unstack [as 别名]
def _ImageDimensions(image):
"""Returns the dimensions of an image tensor.
Args:
image: A 3-D Tensor of shape `[height, width, channels]`.
Returns:
A list of `[height, width, channels]` corresponding to the dimensions of the
input image. Dimensions that are statically known are python integers,
otherwise they are integer scalar tensors.
"""
if image.get_shape().is_fully_defined():
return image.get_shape().as_list()
else:
static_shape = image.get_shape().with_rank(3).as_list()
dynamic_shape = array_ops.unstack(array_ops.shape(image), 3)
return [s if s is not None else d
for s, d in zip(static_shape, dynamic_shape)]
示例6: call
# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import unstack [as 别名]
def call(self, inputs):
shape = inputs.get_shape().as_list()
input_dim = shape[-1]
output_shape = shape[:-1] + [self.units]
if len(output_shape) > 2:
# Reshape the input to 2D.
output_shape_tensors = array_ops.unstack(array_ops.shape(inputs))
output_shape_tensors[-1] = self.units
output_shape_tensor = array_ops.stack(output_shape_tensors)
inputs = array_ops.reshape(inputs, [-1, input_dim])
outputs = standard_ops.matmul(inputs, self.kernel)
if self.use_bias:
outputs = nn.bias_add(outputs, self.bias)
if len(output_shape) > 2:
# Reshape the output back to the original ndim of the input.
outputs = array_ops.reshape(outputs, output_shape_tensor)
outputs.set_shape(output_shape)
if self.activation is not None:
return self.activation(outputs) # pylint: disable=not-callable
return outputs
示例7: _ImageDimensions
# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import unstack [as 别名]
def _ImageDimensions(image, rank = 3):
"""Returns the dimensions of an image tensor.
Args:
image: A rank-D Tensor. For 3-D of shape: `[height, width, channels]`.
rank: The expected rank of the image
Returns:
A list of corresponding to the dimensions of the
input image. Dimensions that are statically known are python integers,
otherwise they are integer scalar tensors.
"""
if image.get_shape().is_fully_defined():
return image.get_shape().as_list()
else:
static_shape = image.get_shape().with_rank(rank).as_list()
dynamic_shape = array_ops.unstack(array_ops.shape(image), rank)
return [s if s is not None else d
for s, d in zip(static_shape, dynamic_shape)]
示例8: _PackGrad
# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import unstack [as 别名]
def _PackGrad(op, grad):
"""Gradient for pack op."""
return array_ops.unstack(grad, num=op.get_attr("N"), axis=op.get_attr("axis"))
示例9: ndlstm_base_unrolled
# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import unstack [as 别名]
def ndlstm_base_unrolled(inputs, noutput, scope=None, reverse=False):
"""Run an LSTM, either forward or backward.
This is a 1D LSTM implementation using unrolling and the TensorFlow
LSTM op.
Args:
inputs: input sequence (length, batch_size, ninput)
noutput: depth of output
scope: optional scope name
reverse: run LSTM in reverse
Returns:
Output sequence (length, batch_size, noutput)
"""
with variable_scope.variable_scope(scope, "SeqLstmUnrolled", [inputs]):
length, batch_size, _ = _shape(inputs)
lstm_cell = rnn_cell.BasicLSTMCell(noutput, state_is_tuple=False)
state = array_ops.zeros([batch_size, lstm_cell.state_size])
output_u = []
inputs_u = array_ops.unstack(inputs)
if reverse:
inputs_u = list(reversed(inputs_u))
for i in xrange(length):
if i > 0:
variable_scope.get_variable_scope().reuse_variables()
output, state = lstm_cell(inputs_u[i], state)
output_u += [output]
if reverse:
output_u = list(reversed(output_u))
outputs = array_ops.stack(output_u)
return outputs
示例10: sequence_to_final
# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import unstack [as 别名]
def sequence_to_final(inputs, noutput, scope=None, name=None, reverse=False):
"""Run an LSTM across all steps and returns only the final state.
Args:
inputs: (length, batch_size, depth) tensor
noutput: size of output vector
scope: optional scope name
name: optional name for output tensor
reverse: run in reverse
Returns:
Batch of size (batch_size, noutput).
"""
with variable_scope.variable_scope(scope, "SequenceToFinal", [inputs]):
length, batch_size, _ = _shape(inputs)
lstm = rnn_cell.BasicLSTMCell(noutput, state_is_tuple=False)
state = array_ops.zeros([batch_size, lstm.state_size])
inputs_u = array_ops.unstack(inputs)
if reverse:
inputs_u = list(reversed(inputs_u))
for i in xrange(length):
if i > 0:
variable_scope.get_variable_scope().reuse_variables()
output, state = lstm(inputs_u[i], state)
outputs = array_ops.reshape(output, [batch_size, noutput], name=name)
return outputs
示例11: sequence_softmax
# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import unstack [as 别名]
def sequence_softmax(inputs, noutput, scope=None, name=None, linear_name=None):
"""Run a softmax layer over all the time steps of an input sequence.
Args:
inputs: (length, batch_size, depth) tensor
noutput: output depth
scope: optional scope name
name: optional name for output tensor
linear_name: name for linear (pre-softmax) output
Returns:
A tensor of size (length, batch_size, noutput).
"""
length, _, ninputs = _shape(inputs)
inputs_u = array_ops.unstack(inputs)
output_u = []
with variable_scope.variable_scope(scope, "SequenceSoftmax", [inputs]):
initial_w = random_ops.truncated_normal([0 + ninputs, noutput], stddev=0.1)
initial_b = constant_op.constant(0.1, shape=[noutput])
w = variables.model_variable("weights", initializer=initial_w)
b = variables.model_variable("biases", initializer=initial_b)
for i in xrange(length):
with variable_scope.variable_scope(scope, "SequenceSoftmaxStep",
[inputs_u[i]]):
# TODO(tmb) consider using slim.fully_connected(...,
# activation_fn=tf.nn.softmax)
linear = nn_ops.xw_plus_b(inputs_u[i], w, b, name=linear_name)
output = nn_ops.softmax(linear)
output_u += [output]
outputs = array_ops.stack(output_u, name=name)
return outputs
示例12: seq2seq_inputs
# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import unstack [as 别名]
def seq2seq_inputs(x, y, input_length, output_length, sentinel=None, name=None):
"""Processes inputs for Sequence to Sequence models.
Args:
x: Input Tensor [batch_size, input_length, embed_dim].
y: Output Tensor [batch_size, output_length, embed_dim].
input_length: length of input x.
output_length: length of output y.
sentinel: optional first input to decoder and final output expected.
If sentinel is not provided, zeros are used. Due to fact that y is not
available in sampling time, shape of sentinel will be inferred from x.
name: Operation name.
Returns:
Encoder input from x, and decoder inputs and outputs from y.
"""
with ops.name_scope(name, "seq2seq_inputs", [x, y]):
in_x = array_ops.unstack(x, axis=1)
y = array_ops.unstack(y, axis=1)
if not sentinel:
# Set to zeros of shape of y[0], using x for batch size.
sentinel_shape = array_ops.stack(
[array_ops.shape(x)[0], y[0].get_shape()[1]])
sentinel = array_ops.zeros(sentinel_shape)
sentinel.set_shape(y[0].get_shape())
in_y = [sentinel] + y
out_y = y + [sentinel]
return in_x, in_y, out_y
示例13: _cat_probs
# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import unstack [as 别名]
def _cat_probs(self, log_probs):
"""Get a list of num_components batchwise probabilities."""
which_softmax = nn_ops.log_softmax if log_probs else nn_ops.softmax
cat_probs = which_softmax(self.cat.logits)
cat_probs = array_ops.unstack(cat_probs, num=self.num_components, axis=-1)
return cat_probs
示例14: unpack
# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import unstack [as 别名]
def unpack(labeled_tensor, axis_name=None, name=None):
"""Unpack the tensor.
See tf.unpack.
Args:
labeled_tensor: The input tensor.
axis_name: Optional name of axis to unpack. By default, the first axis is
used.
name: Optional op name.
Returns:
The list of unpacked LabeledTensors.
Raises:
ValueError: If `axis_name` is not an axis on the input.
"""
with ops.name_scope(name, 'lt_unpack', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
axis_names = list(labeled_tensor.axes.keys())
if axis_name is None:
axis_name = axis_names[0]
if axis_name not in axis_names:
raise ValueError('%s not in %s' % (axis_name, axis_names))
axis = axis_names.index(axis_name)
unpack_ops = array_ops.unstack(labeled_tensor.tensor, axis=axis, name=scope)
axes = [a for i, a in enumerate(labeled_tensor.axes.values()) if i != axis]
return [core.LabeledTensor(t, axes) for t in unpack_ops]
示例15: __call__
# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import unstack [as 别名]
def __call__(self,
inputs,
initial_state=None,
dtype=None,
sequence_length=None,
scope=None):
is_list = isinstance(inputs, list)
if self._use_dynamic_rnn:
if is_list:
inputs = array_ops.stack(inputs)
outputs, state = rnn.dynamic_rnn(
self._cell,
inputs,
sequence_length=sequence_length,
initial_state=initial_state,
dtype=dtype,
time_major=True,
scope=scope)
if is_list:
# Convert outputs back to list
outputs = array_ops.unstack(outputs)
else: # non-dynamic rnn
if not is_list:
inputs = array_ops.unstack(inputs)
outputs, state = contrib_rnn.static_rnn(self._cell,
inputs,
initial_state=initial_state,
dtype=dtype,
sequence_length=sequence_length,
scope=scope)
if not is_list:
# Convert outputs back to tensor
outputs = array_ops.stack(outputs)
return outputs, state