本文整理汇总了Python中tensorflow.python.util.nest.map_structure方法的典型用法代码示例。如果您正苦于以下问题:Python nest.map_structure方法的具体用法?Python nest.map_structure怎么用?Python nest.map_structure使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.util.nest
的用法示例。
在下文中一共展示了nest.map_structure方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _rnn_output_size
# 需要导入模块: from tensorflow.python.util import nest [as 别名]
# 或者: from tensorflow.python.util.nest import map_structure [as 别名]
def _rnn_output_size(self):
size = self._cell.output_size
if self._output_layer is None:
return size
else:
# To use layer's compute_output_shape, we need to convert the
# RNNCell's output_size entries into shapes with an unknown
# batch size. We then pass this through the layer's
# compute_output_shape and read off all but the first (batch)
# dimensions to get the output size of the rnn with the layer
# applied to the top.
output_shape_with_unknown_batch = nest.map_structure(
lambda s: tensor_shape.TensorShape([None]).concatenate(s),
size)
layer_output_shape = self._output_layer.compute_output_shape( # pylint: disable=protected-access
output_shape_with_unknown_batch)
return nest.map_structure(lambda s: s[1:], layer_output_shape)
示例2: output_dtype
# 需要导入模块: from tensorflow.python.util import nest [as 别名]
# 或者: from tensorflow.python.util.nest import map_structure [as 别名]
def output_dtype(self):
"""Types of output of one step.
"""
# Assume the dtype of the cell is the output_size structure
# containing the input_state's first component's dtype.
# Return that structure and the sample_ids_dtype from the helper.
dtype = nest.flatten(self._initial_state)[0].dtype
return AttentionRNNDecoderOutput(
logits=nest.map_structure(lambda _: dtype, self._rnn_output_size()),
sample_id=self._helper.sample_ids_dtype,
cell_output=nest.map_structure(
lambda _: dtype, self._cell.output_size),
attention_scores=nest.map_structure(
lambda _: dtype, self._alignments_size()),
attention_context=nest.map_structure(
lambda _: dtype, self._cell.state_size.attention))
示例3: _rnn_output_size
# 需要导入模块: from tensorflow.python.util import nest [as 别名]
# 或者: from tensorflow.python.util.nest import map_structure [as 别名]
def _rnn_output_size(self):
size = self._cell.output_size
if self._output_layer is tf.identity:
return size
else:
# To use layer's compute_output_shape, we need to convert the
# RNNCell's output_size entries into shapes with an unknown
# batch size. We then pass this through the layer's
# compute_output_shape and read off all but the first (batch)
# dimensions to get the output size of the rnn with the layer
# applied to the top.
output_shape_with_unknown_batch = nest.map_structure(
lambda s: tensor_shape.TensorShape([None]).concatenate(s),
size)
layer_output_shape = self._output_layer.compute_output_shape(
output_shape_with_unknown_batch)
return nest.map_structure(lambda s: s[1:], layer_output_shape)
示例4: _create
# 需要导入模块: from tensorflow.python.util import nest [as 别名]
# 或者: from tensorflow.python.util.nest import map_structure [as 别名]
def _create(self):
# Concat bridge inputs on the depth dimensions
bridge_input = nest.map_structure(
lambda x: tf.reshape(x, [self.batch_size, _total_tensor_depth(x)]),
self._bridge_input)
bridge_input_flat = nest.flatten([bridge_input])
bridge_input_concat = tf.concat(bridge_input_flat, axis=1)
state_size_splits = nest.flatten(self.decoder_state_size)
total_decoder_state_size = sum(state_size_splits)
# Pass bridge inputs through a fully connected layer layer
initial_state_flat = tf.contrib.layers.fully_connected(
bridge_input_concat,
num_outputs=total_decoder_state_size,
activation_fn=self._activation_fn,
weights_initializer=tf.truncated_normal_initializer(
stddev=self.parameter_init),
biases_initializer=tf.zeros_initializer(),
scope=None)
# Shape back into required state size
initial_state = tf.split(initial_state_flat, state_size_splits, axis=1)
return nest.pack_sequence_as(self.decoder_state_size, initial_state)
示例5: tile_batch
# 需要导入模块: from tensorflow.python.util import nest [as 别名]
# 或者: from tensorflow.python.util.nest import map_structure [as 别名]
def tile_batch(t, multiplier, name=None):
"""Tile the batch dimension of a (possibly nested structure of) tensor(s) t.
For each tensor t in a (possibly nested structure) of tensors,
this function takes a tensor t shaped `[batch_size, s0, s1, ...]` composed of
minibatch entries `t[0], ..., t[batch_size - 1]` and tiles it to have a shape
`[batch_size * multiplier, s0, s1, ...]` composed of minibatch entries
`t[0], t[0], ..., t[1], t[1], ...` where each minibatch entry is repeated
`multiplier` times.
Args:
t: `Tensor` shaped `[batch_size, ...]`.
multiplier: Python int.
name: Name scope for any created operations.
Returns:
A (possibly nested structure of) `Tensor` shaped
`[batch_size * multiplier, ...]`.
Raises:
ValueError: if tensor(s) `t` do not have a statically known rank or
the rank is < 1.
"""
flat_t = nest.flatten(t)
with tf.name_scope(name, "tile_batch", flat_t + [multiplier]):
return nest.map_structure(lambda t_: _tile_batch(t_, multiplier), t)
开发者ID:hirofumi0810,项目名称:tensorflow_end2end_speech_recognition,代码行数:24,代码来源:beam_search_decoder_from_tensorflow.py
示例6: _rnn_output_size
# 需要导入模块: from tensorflow.python.util import nest [as 别名]
# 或者: from tensorflow.python.util.nest import map_structure [as 别名]
def _rnn_output_size(self):
size = self._cell.output_size
if self._output_layer is None:
return size
else:
# To use layer's compute_output_shape, we need to convert the
# RNNCell's output_size entries into shapes with an unknown
# batch size. We then pass this through the layer's
# compute_output_shape and read off all but the first (batch)
# dimensions to get the output size of the rnn with the layer
# applied to the top.
output_shape_with_unknown_batch = nest.map_structure(
lambda s: tensor_shape.TensorShape([None]).concatenate(s),
size)
layer_output_shape = self._output_layer._compute_output_shape( # pylint: disable=protected-access
output_shape_with_unknown_batch)
return nest.map_structure(lambda s: s[1:], layer_output_shape)
开发者ID:hirofumi0810,项目名称:tensorflow_end2end_speech_recognition,代码行数:19,代码来源:beam_search_decoder_from_tensorflow.py
示例7: __call__
# 需要导入模块: from tensorflow.python.util import nest [as 别名]
# 或者: from tensorflow.python.util.nest import map_structure [as 别名]
def __call__(self, inputs, state, scope=None):
"""Run the cell and add its inputs to its outputs.
Args:
inputs: cell inputs.
state: cell state.
scope: optional cell scope.
Returns:
Tuple of cell outputs and new state.
Raises:
TypeError: If cell inputs and outputs have different structure (type).
ValueError: If cell inputs and outputs have different structure (value).
"""
outputs, new_state = self._cell(inputs, state, scope=scope)
nest.assert_same_structure(inputs, outputs)
# Ensure shapes match
def assert_shape_match(inp, out):
inp.get_shape().assert_is_compatible_with(out.get_shape())
nest.map_structure(assert_shape_match, inputs, outputs)
res_outputs = nest.map_structure(
lambda inp, out: inp + out, inputs, outputs)
return (res_outputs, new_state)
示例8: _BuildCondTensor
# 需要导入模块: from tensorflow.python.util import nest [as 别名]
# 或者: from tensorflow.python.util.nest import map_structure [as 别名]
def _BuildCondTensor(self, v):
if isinstance(v, ops.Operation):
# Use pivot as the proxy for this op.
return with_dependencies([v], self._pivot)
elif isinstance(v, (ops.IndexedSlices, sparse_tensor.SparseTensor)):
values = self._ProcessOutputTensor(v.values)
indices = self._ProcessOutputTensor(v.indices)
if isinstance(v, ops.IndexedSlices):
dense_shape = v.dense_shape
if dense_shape is not None:
dense_shape = self._ProcessOutputTensor(dense_shape)
return ops.IndexedSlices(values, indices, dense_shape)
else:
dense_shape = self._ProcessOutputTensor(v.dense_shape)
return sparse_tensor.SparseTensor(indices, values, dense_shape)
else:
v = nest.map_structure(_convert_tensorarray_to_flow, v)
return self._ProcessOutputTensor(ops.convert_to_tensor(v))
示例9: __call__
# 需要导入模块: from tensorflow.python.util import nest [as 别名]
# 或者: from tensorflow.python.util.nest import map_structure [as 别名]
def __call__(self, inputs, state, scope=None):
"""Run the cell and add its inputs to its outputs.
Args:
inputs: cell inputs.
state: cell state.
scope: optional cell scope.
Returns:
Tuple of cell outputs and new state.
Raises:
TypeError: If cell inputs and outputs have different structure (type).
ValueError: If cell inputs and outputs have different structure (value).
"""
outputs, new_state = self._cell(inputs, state, scope=scope)
nest.assert_same_structure(inputs, outputs)
# Ensure shapes match
def assert_shape_match(inp, out):
inp.get_shape().assert_is_compatible_with(out.get_shape())
nest.map_structure(assert_shape_match, inputs, outputs)
res_outputs = nest.map_structure(self._highway, inputs, outputs)
return (res_outputs, new_state)
示例10: _rnn_output_size
# 需要导入模块: from tensorflow.python.util import nest [as 别名]
# 或者: from tensorflow.python.util.nest import map_structure [as 别名]
def _rnn_output_size(self):
size = self._cell.output_size
if self._output_layer is None:
return size
else:
# To use layer's compute_output_shape, we need to convert the
# RNNCell's output_size entries into shapes with an unknown
# batch size. We then pass this through the layer's
# compute_output_shape and read off all but the first (batch)
# dimensions to get the output size of the rnn with the layer
# applied to the top.
output_shape_with_unknown_batch = nest.map_structure(
lambda s: tensor_shape.TensorShape([None]).concatenate(s),
size)
layer_output_shape = self._output_layer._compute_output_shape( # pylint: disable=protected-access
output_shape_with_unknown_batch)
return nest.map_structure(lambda s: s[1:], layer_output_shape)
示例11: _rnn_output_size
# 需要导入模块: from tensorflow.python.util import nest [as 别名]
# 或者: from tensorflow.python.util.nest import map_structure [as 别名]
def _rnn_output_size(self):
size = self._cell.output_size
if self._output_layer is None:
return size
else:
# To use layer's compute_output_shape, we need to convert the
# RNNCell's output_size entries into shapes with an unknown
# batch size. We then pass this through the layer's
# compute_output_shape and read off all but the first (batch)
# dimensions to get the output size of the rnn with the layer
# applied to the top.
output_shape_with_unknown_batch = nest.map_structure(
lambda s: tensor_shape.TensorShape([None]).concatenate(s),
size)
layer_output_shape = self._output_layer._compute_output_shape( # pylint: disable=protected-access
output_shape_with_unknown_batch)
return nest.map_structure(lambda s: s[1:], layer_output_shape)
示例12: __call__
# 需要导入模块: from tensorflow.python.util import nest [as 别名]
# 或者: from tensorflow.python.util.nest import map_structure [as 别名]
def __call__(self, inputs, state, scope=None):
"""Run the cell and add its inputs to its outputs.
Args:
inputs: cell inputs.
state: cell state.
scope: optional cell scope.
Returns:
Tuple of cell outputs and new state.
Raises:
TypeError: If cell inputs and outputs have different structure (type).
ValueError: If cell inputs and outputs have different structure (value).
"""
outputs, new_state = self._cell(inputs, state, scope=scope)
nest.assert_same_structure(inputs, outputs)
# Ensure shapes match
def assert_shape_match(inp, out):
inp.get_shape().assert_is_compatible_with(out.get_shape())
nest.map_structure(assert_shape_match, inputs, outputs)
res_outputs = nest.map_structure(self._highway, inputs, outputs)
return (res_outputs, new_state)
示例13: gnmt_residual_fn
# 需要导入模块: from tensorflow.python.util import nest [as 别名]
# 或者: from tensorflow.python.util.nest import map_structure [as 别名]
def gnmt_residual_fn(inputs, outputs):
"""Residual function that handles different inputs and outputs inner dims.
Args:
inputs: cell inputs, this is actual inputs concatenated with the attention
vector.
outputs: cell outputs
Returns:
outputs + actual inputs
"""
def split_input(inp, out):
out_dim = out.get_shape().as_list()[-1]
inp_dim = inp.get_shape().as_list()[-1]
return tf.split(inp, [out_dim, inp_dim - out_dim], axis=-1)
actual_inputs, _ = nest.map_structure(split_input, inputs, outputs)
def assert_shape_match(inp, out):
inp.get_shape().assert_is_compatible_with(out.get_shape())
nest.assert_same_structure(actual_inputs, outputs)
nest.map_structure(assert_shape_match, actual_inputs, outputs)
return nest.map_structure(lambda inp, out: inp + out, actual_inputs, outputs)
示例14: _rnn_output_size
# 需要导入模块: from tensorflow.python.util import nest [as 别名]
# 或者: from tensorflow.python.util.nest import map_structure [as 别名]
def _rnn_output_size(self):
size = self._cell.output_size
if self._output_layer is None:
return size
else:
# To use layer's compute_output_shape, we need to convert the
# RNNCell's output_size entries into shapes with an unknown
# batch size. We then pass this through the layer's
# compute_output_shape and read off all but the first (batch)
# dimensions to get the output size of the rnn with the layer
# applied to the top.
output_shape_with_unknown_batch = nest.map_structure(
lambda s: tensor_shape.TensorShape([None]).concatenate(s),
size)
layer_output_shape = self._output_layer._compute_output_shape( # pylint: disable=protected-access
output_shape_with_unknown_batch)
return nest.map_structure(lambda s: s[1:], layer_output_shape)
示例15: output_dtype
# 需要导入模块: from tensorflow.python.util import nest [as 别名]
# 或者: from tensorflow.python.util.nest import map_structure [as 别名]
def output_dtype(self):
# Assume the dtype of the cell is the output_size structure
# containing the input_state's first component's dtype.
# Return that structure and int32 (the id)
dtype = nest.flatten(self._initial_state)[0].dtype
return BasicDecoderOutput(
nest.map_structure(lambda _: dtype, self._rnn_output_size()),
dtypes.int32)