当前位置: 首页>>代码示例>>Python>>正文


Python nest.map_structure函数代码示例

本文整理汇总了Python中tensorflow.python.util.nest.map_structure函数的典型用法代码示例。如果您正苦于以下问题:Python map_structure函数的具体用法?Python map_structure怎么用?Python map_structure使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了map_structure函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: body

  def body(time, outputs_ta, state, inputs, finished):
    """Internal while_loop body.

    Args:
      time: scalar int32 tensor.
      outputs_ta: structure of TensorArray.
      state: (structure of) state tensors and TensorArrays.
      inputs: (structure of) input tensors.
      finished: 1-D bool tensor.

    Returns:
      `(time + 1, outputs_ta, next_state, next_inputs, next_finished)`.
    """
    (next_outputs, decoder_state, next_inputs, decoder_finished) = decoder.step(
        time, inputs, state)
    next_finished = math_ops.logical_or(decoder_finished, finished)

    nest.assert_same_structure(state, decoder_state)
    nest.assert_same_structure(outputs_ta, next_outputs)
    nest.assert_same_structure(inputs, next_inputs)

    # Zero out output values past finish
    emit = nest.map_structure(
        lambda out, zero: array_ops.where(finished, zero, out), next_outputs,
        zero_outputs)

    # Copy through states past finish
    def _maybe_copy_state(new, cur):
      return (new if isinstance(cur, tensor_array_ops.TensorArray) else
              array_ops.where(finished, cur, new))

    next_state = nest.map_structure(_maybe_copy_state, decoder_state, state)
    outputs_ta = nest.map_structure(lambda ta, out: ta.write(time, out),
                                    outputs_ta, emit)
    return (time + 1, outputs_ta, next_state, next_inputs, next_finished)
开发者ID:adventuroussrv,项目名称:tensorflow,代码行数:35,代码来源:decoder.py

示例2: generate_synthetic_data

def generate_synthetic_data(
    input_shape, input_value=0, input_dtype=None, label_shape=None,
    label_value=0, label_dtype=None):
  """Create a repeating dataset with constant values.

  Args:
    input_shape: a tf.TensorShape object or nested tf.TensorShapes. The shape of
      the input data.
    input_value: Value of each input element.
    input_dtype: Input dtype. If None, will be inferred by the input value.
    label_shape: a tf.TensorShape object or nested tf.TensorShapes. The shape of
      the label data.
    label_value: Value of each input element.
    label_dtype: Input dtype. If None, will be inferred by the target value.

  Returns:
    Dataset of tensors or tuples of tensors (if label_shape is set).
  """
  # TODO(kathywu): Replace with SyntheticDataset once it is in contrib.
  element = input_element = nest.map_structure(
      lambda s: tf.constant(input_value, input_dtype, s), input_shape)

  if label_shape:
    label_element = nest.map_structure(
        lambda s: tf.constant(label_value, label_dtype, s), label_shape)
    element = (input_element, label_element)

  return tf.data.Dataset.from_tensors(element).repeat()
开发者ID:Exscotticus,项目名称:models,代码行数:28,代码来源:model_helpers.py

示例3: testMemoryIsFreed

  def testMemoryIsFreed(self):
    # Note: we use `set` values for components and metadata because we need
    # to construct weakrefs to them.  Other builtin types, such as `list` and
    # `tuple`, do not support weakrefs.
    ct1 = CT(set([1, 2]), set(['no', 'leaks']))
    ct2 = CT(set([3, 4]), set(['no', 'leaks']))
    ct3 = CT(set([5, 6]), set(['other', 'metadata']))

    # Note: map_structure exercises flatten, pack_sequence_as, and
    # assert_same_structure.
    func = lambda x, y: x | y
    ct4 = nest.map_structure(func, ct1, ct2, expand_composites=True)

    # Check that the exception-raising path in assert_same_structure
    # doesn't leak any objects.
    with self.assertRaisesRegexp(ValueError,
                                 ".*don't have the same nested structure.*"):
      nest.map_structure(func, ct2, ct3, expand_composites=True)
    if hasattr(sys, 'exc_clear'):
      sys.exc_clear()  # Remove any references in exception stack traces.

    refs = []
    for ct in [ct1, ct2, ct3, ct4]:
      refs.append(weakref.ref(ct))
      refs.append(weakref.ref(ct.components))
      refs.append(weakref.ref(ct.metadata))
    del ct  # pylint: disable=undefined-loop-variable

    for ref in refs:
      self.assertIsNotNone(ref())

    del ct1, ct2, ct3, ct4
    gc.collect()
    for ref in refs:
      self.assertIsNone(ref())
开发者ID:aritratony,项目名称:tensorflow,代码行数:35,代码来源:composite_tensor_test.py

示例4: get_next

  def get_next(self, name=None):
    """Return PerIteration with `iterations x batches_per_iteration` inputs."""
    data = []
    for _ in range(self._batches_per_iteration):
      batch = []
      for _ in range(self._iterations):
        batch.append(self._dataset_iterator.get_next(name=name))
      data.append(batch)

    # Here is an example.  Suppose each get_next returns a tuple of two tensors.
    # For 3 `iterations` and 2 `batches_per_iteration`, the `data` is:
    # [[(a,z), (b,y), (c,x)], [(A,Z), (B,Y), (C,X)]]
    #
    # After the first `map_structure` it gets transformed to:
    #  [(Batches(a, A), Batches(z, Z)),
    #   (Batches(b, B), Batches(y, Y)),
    #   (Batches(c, C), Batches(x, X))]
    #
    # After the second `map_structure` it gets transformed to a tuple of:
    # (PerIteration([Batches(a, A), Batches(b, B), Batches(c, C)]),
    #  PerIteration([Batches(z, Z), Batches(y, Y), Batches(x, X)]))

    data = nest.map_structure(Batches, *data)
    data = nest.map_structure(PerIteration, *data)

    return data
开发者ID:sonnyhu,项目名称:tensorflow,代码行数:26,代码来源:values.py

示例5: body

    def body(time, outputs_ta, state, inputs, finished, sequence_lengths):
      """Internal while_loop body.

      Args:
        time: scalar int32 tensor.
        outputs_ta: structure of TensorArray.
        state: (structure of) state tensors and TensorArrays.
        inputs: (structure of) input tensors.
        finished: bool tensor (keeping track of what's finished).
        sequence_lengths: int32 tensor (keeping track of time of finish).

      Returns:
        `(time + 1, outputs_ta, next_state, next_inputs, next_finished,
          next_sequence_lengths)`.
        ```
      """
      (next_outputs, decoder_state, next_inputs,
       decoder_finished) = decoder.step(time, inputs, state)
      next_finished = math_ops.logical_or(decoder_finished, finished)
      if maximum_iterations is not None:
        next_finished = math_ops.logical_or(
            next_finished, time + 1 >= maximum_iterations)
      next_sequence_lengths = array_ops.where(
          math_ops.logical_and(math_ops.logical_not(finished), next_finished),
          array_ops.fill(array_ops.shape(sequence_lengths), time + 1),
          sequence_lengths)

      nest.assert_same_structure(state, decoder_state)
      nest.assert_same_structure(outputs_ta, next_outputs)
      nest.assert_same_structure(inputs, next_inputs)

      # Zero out output values past finish
      if impute_finished:
        emit = nest.map_structure(
            lambda out, zero: array_ops.where(finished, zero, out),
            next_outputs,
            zero_outputs)
      else:
        emit = next_outputs

      # Copy through states past finish
      def _maybe_copy_state(new, cur):
        # TensorArrays and scalar states get passed through.
        if isinstance(cur, tensor_array_ops.TensorArray):
          pass_through = True
        else:
          new.set_shape(cur.shape)
          pass_through = (new.shape.ndims == 0)
        return new if pass_through else array_ops.where(finished, cur, new)

      if impute_finished:
        next_state = nest.map_structure(
            _maybe_copy_state, decoder_state, state)
      else:
        next_state = decoder_state

      outputs_ta = nest.map_structure(lambda ta, out: ta.write(time, out),
                                      outputs_ta, emit)
      return (time + 1, outputs_ta, next_state, next_inputs, next_finished,
              next_sequence_lengths)
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:60,代码来源:decoder.py

示例6: __init__

  def __init__(self, inputs, sequence_length, sampling_probability,
               time_major=False, seed=None, next_inputs_fn=None,
               auxiliary_inputs=None, name=None):
    """Initializer.

    Args:
      inputs: A (structure) of input tensors.
      sequence_length: An int32 vector tensor.
      sampling_probability: A 0D `float32` tensor: the probability of sampling
        from the outputs instead of reading directly from the inputs.
      time_major: Python bool.  Whether the tensors in `inputs` are time major.
        If `False` (default), they are assumed to be batch major.
      seed: The sampling seed.
      next_inputs_fn: (Optional) callable to apply to the RNN outputs to create
        the next input when sampling. If `None` (default), the RNN outputs will
        be used as the next inputs.
      auxiliary_inputs: An optional (structure of) auxiliary input tensors with
        a shape that matches `inputs` in all but (potentially) the final
        dimension. These tensors will be concatenated to the sampled output or
        the `inputs` when not sampling for use as the next input.
      name: Name scope for any created operations.

    Raises:
      ValueError: if `sampling_probability` is not a scalar or vector.
    """
    with ops.name_scope(name, "ScheduledOutputTrainingHelper",
                        [inputs, auxiliary_inputs, sampling_probability]):
      self._sampling_probability = ops.convert_to_tensor(
          sampling_probability, name="sampling_probability")
      if self._sampling_probability.get_shape().ndims not in (0, 1):
        raise ValueError(
            "sampling_probability must be either a scalar or a vector. "
            "saw shape: %s" % (self._sampling_probability.get_shape()))

      if auxiliary_inputs is None:
        maybe_concatenated_inputs = inputs
      else:
        inputs = ops.convert_to_tensor(inputs, name="inputs")
        auxiliary_inputs = ops.convert_to_tensor(
            auxiliary_inputs, name="auxiliary_inputs")
        maybe_concatenated_inputs = nest.map_structure(
            lambda x, y: array_ops.concat((x, y), -1),
            inputs, auxiliary_inputs)
        if not time_major:
          auxiliary_inputs = nest.map_structure(
              _transpose_batch_time, auxiliary_inputs)

      self._auxiliary_input_tas = (
          nest.map_structure(_unstack_ta, auxiliary_inputs)
          if auxiliary_inputs is not None else None)

      self._seed = seed

      self._next_inputs_fn = next_inputs_fn

      super(ScheduledOutputTrainingHelper, self).__init__(
          inputs=maybe_concatenated_inputs,
          sequence_length=sequence_length,
          time_major=time_major,
          name=name)
开发者ID:AnddyWang,项目名称:tensorflow,代码行数:60,代码来源:helper.py

示例7: __init__

  def __init__(self, inputs, sequence_length, time_major=False, name=None):
    """Initializer.

    Args:
      inputs: A (structure of) input tensors.
      sequence_length: An int32 vector tensor.
      time_major: Python bool.  Whether the tensors in `inputs` are time major.
        If `False` (default), they are assumed to be batch major.
      name: Name scope for any created operations.

    Raises:
      ValueError: if `sequence_length` is not a 1D tensor.
    """
    with ops.name_scope(name, "TrainingHelper", [inputs, sequence_length]):
      inputs = ops.convert_to_tensor(inputs, name="inputs")
      if not time_major:
        inputs = nest.map_structure(_transpose_batch_time, inputs)

      self._input_tas = nest.map_structure(_unstack_ta, inputs)
      self._sequence_length = ops.convert_to_tensor(
          sequence_length, name="sequence_length")
      if self._sequence_length.get_shape().ndims != 1:
        raise ValueError(
            "Expected sequence_length to be a vector, but received shape: %s" %
            self._sequence_length.get_shape())

      self._zero_inputs = nest.map_structure(
          lambda inp: array_ops.zeros_like(inp[0, :]), inputs)

      self._batch_size = array_ops.size(sequence_length)
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:30,代码来源:helper.py

示例8: step

    def step(self, time, inputs, state, name=None):
        """Perform a decoding step.
        Args:
            time: scalar `int32` tensor
            inputs: A input tensors
            state: A state tensors and TensorArrays
            name: Name scope for any created operations
        Returns:
            outputs: An instance of `BeamSearchDecoderOutput`
            next_state: A state tensors and TensorArrays
            next_inputs: The tensor that should be used as input for the
                next step
            finished: A boolean tensor telling whether the sequence is
                complete, for each sequence in the batch
        """
        print('===== step (beam search) =====')
        decoder_state, beam_state = state

        # Call the original decoder
        decoder_output, decoder_state, _, _ = self.decoder.step(time, inputs,
                                                                decoder_state)

        # Perform a step of beam search
        beam_search_output, beam_state = beam_search_step(
            time=time,
            logits=decoder_output.logits,
            beam_state=beam_state,
            beam_width=self.beam_width,
            vocab_size=self.vocab_size,
            eos_index=self.eos_index,
            length_penalty_weight=self.length_penalty_weight,
            choose_successors_fn=self.choose_successors_fn)

        # Shuffle everything according to beam search result
        decoder_state = nest.map_structure(
            lambda x: tf.gather(x, beam_search_output.beam_parent_ids),
            decoder_state)
        decoder_output = nest.map_structure(
            lambda x: tf.gather(x, beam_search_output.beam_parent_ids),
            decoder_output)

        next_state = (decoder_state, beam_state)

        outputs = BeamSearchDecoderOutput(
            logits=tf.zeros([self.beam_width, self.vocab_size]),
            predicted_ids=beam_search_output.predicted_ids,
            log_probs=beam_state.log_probs,
            scores=beam_search_output.scores,
            beam_parent_ids=beam_search_output.beam_parent_ids,
            original_outputs=decoder_output)

        finished, next_inputs, next_state = self.decoder.helper.next_inputs(
            time=time,
            outputs=decoder_output,
            state=next_state,
            sample_ids=beam_search_output.predicted_ids)
        next_inputs.set_shape([self.batch_size, None])

        return outputs, next_state, next_inputs, finished
开发者ID:seasky100,项目名称:tensorflow_end2end_speech_recognition,代码行数:59,代码来源:beam_search_decoder.py

示例9: _build

  def _build(self, inputs, prev_state):
    """Connects the DeepRNN module into the graph.

    If this is not the first time the module has been connected to the graph,
    the Tensors provided as input_ and state must have the same final
    dimension, in order for the existing variables to be the correct size for
    their corresponding multiplications. The batch size may differ for each
    connection.

    Args:
      inputs: a nested tuple of Tensors of arbitrary dimensionality, with at
        least an initial batch dimension.
      prev_state: a tuple of `prev_state`s that corresponds to the state
        of each one of the cores of the `DeepCore`.

    Returns:
      output: a nested tuple of Tensors of arbitrary dimensionality, with at
        least an initial batch dimension.
      next_state: a tuple of `next_state`s that corresponds to the updated state
        of each one of the cores of the `DeepCore`.

    Raises:
      ValueError: if connecting the module into the graph any time after the
        first time, and the inferred size of the inputs does not match previous
        invocations. This may happen if one connects a module any time after the
        first time that does not have the configuration of skip connections as
        the first time.
    """
    current_input = inputs
    next_states = []
    outputs = []
    recurrent_idx = 0
    concatenate = lambda *args: tf.concat(args, axis=-1)
    for i, core in enumerate(self._cores):
      if self._skip_connections and i > 0:
        current_input = nest.map_structure(concatenate, inputs, current_input)

      # Determine if this core in the stack is recurrent or not and call
      # accordingly.
      if self._is_recurrent_list[i]:
        current_input, next_state = core(current_input,
                                         prev_state[recurrent_idx])
        next_states.append(next_state)
        recurrent_idx += 1
      else:
        current_input = core(current_input)

      if self._skip_connections:
        outputs.append(current_input)

    if self._skip_connections and self._concat_final_output_if_skip:
      output = nest.map_structure(concatenate, *outputs)
    else:
      output = current_input

    self._last_output_size = _get_shape_without_batch_dimension(output)
    return output, tuple(next_states)
开发者ID:ccchang0111,项目名称:sonnet,代码行数:57,代码来源:basic_rnn.py

示例10: _prepare_memory

def _prepare_memory(memory, memory_sequence_length, check_inner_dims_defined):
  """Convert to tensor and possibly mask `memory`.

  Args:
    memory: `Tensor`, shaped `[batch_size, max_time, ...]`.
    memory_sequence_length: `int32` `Tensor`, shaped `[batch_size]`.
    check_inner_dims_defined: Python boolean.  If `True`, the `memory`
      argument's shape is checked to ensure all but the two outermost
      dimensions are fully defined.

  Returns:
    A (possibly masked), checked, new `memory`.

  Raises:
    ValueError: If `check_inner_dims_defined` is `True` and not
      `memory.shape[2:].is_fully_defined()`.
  """
  memory = nest.map_structure(
      lambda m: ops.convert_to_tensor(m, name="memory"), memory)
  if memory_sequence_length is not None:
    memory_sequence_length = ops.convert_to_tensor(
        memory_sequence_length, name="memory_sequence_length")
  if check_inner_dims_defined:
    def _check_dims(m):
      if not m.get_shape()[2:].is_fully_defined():
        raise ValueError("Expected memory %s to have fully defined inner dims, "
                         "but saw shape: %s" % (m.name, m.get_shape()))
    nest.map_structure(_check_dims, memory)
  if memory_sequence_length is None:
    seq_len_mask = None
  else:
    seq_len_mask = array_ops.sequence_mask(
        memory_sequence_length,
        maxlen=array_ops.shape(nest.flatten(memory)[0])[1],
        dtype=nest.flatten(memory)[0].dtype)
    seq_len_batch_size = (
        memory_sequence_length.shape[0].value
        or array_ops.shape(memory_sequence_length)[0])
  def _maybe_mask(m, seq_len_mask):
    rank = m.get_shape().ndims
    rank = rank if rank is not None else array_ops.rank(m)
    extra_ones = array_ops.ones(rank - 2, dtype=dtypes.int32)
    m_batch_size = m.shape[0].value or array_ops.shape(m)[0]
    if memory_sequence_length is not None:
      message = ("memory_sequence_length and memory tensor batch sizes do not "
                 "match.")
      with ops.control_dependencies([
          check_ops.assert_equal(
              seq_len_batch_size, m_batch_size, message=message)]):
        seq_len_mask = array_ops.reshape(
            seq_len_mask,
            array_ops.concat((array_ops.shape(seq_len_mask), extra_ones), 0))
        return m * seq_len_mask
    else:
      return m
  return nest.map_structure(lambda m: _maybe_mask(m, seq_len_mask), memory)
开发者ID:ajaybhat,项目名称:tensorflow,代码行数:56,代码来源:attention_wrapper.py

示例11: testAssertions

 def testAssertions(self):
   a = tracking.Checkpointable()
   a.l = {"k": [numpy.zeros([2, 2])]}
   self.assertAllEqual(nest.flatten({"k": [numpy.zeros([2, 2])]}),
                       nest.flatten(a.l))
   self.assertAllClose({"k": [numpy.zeros([2, 2])]}, a.l)
   nest.map_structure(self.assertAllClose, a.l, {"k": [numpy.zeros([2, 2])]})
   a.tensors = {"k": [array_ops.ones([2, 2]), array_ops.zeros([3, 3])]}
   self.assertAllClose({"k": [numpy.ones([2, 2]), numpy.zeros([3, 3])]},
                       self.evaluate(a.tensors))
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:10,代码来源:tracking_test.py

示例12: test_autolambda

  def test_autolambda(self, model_fn):
    inputs, outputs = model_fn()
    model = keras.Model(inputs, outputs)
    model.compile(
        adam.Adam(0.001), 'mse', run_eagerly=testing_utils.should_run_eagerly())

    np_inputs = nest.map_structure(lambda x: np.ones((10, 10), 'float32'),
                                   inputs)
    np_outputs = nest.map_structure(lambda x: np.ones((10, 10), 'float32'),
                                    outputs)
    model.fit(np_inputs, np_outputs, batch_size=2)
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:11,代码来源:tensorflow_op_layer_test.py

示例13: _reset_padding

    def _reset_padding(self,
                       memory,
                       memory_sequence_length,
                       check_inner_dims_defined=True):
        """Reset the padding part for encoder inputs.
        This funtion comes from tensorflow's `_prepare_memory` function.
        """
        memory = nest.map_structure(
                lambda m: ops.convert_to_tensor(m, name="memory"), memory)
        if memory_sequence_length is not None:
            memory_sequence_length = ops.convert_to_tensor(
                memory_sequence_length, name="memory_sequence_length")
        if check_inner_dims_defined:

            def _check_dims(m):
                if not m.get_shape()[2:].is_fully_defined():
                    raise ValueError(
                        "Expected memory %s to have fully defined inner dims, "
                        "but saw shape: %s" % (m.name, m.get_shape()))

            nest.map_structure(_check_dims, memory)
        if memory_sequence_length is None:
            seq_len_mask = None
        else:
            seq_len_mask = array_ops.sequence_mask(
                memory_sequence_length,
                maxlen=array_ops.shape(nest.flatten(memory)[0])[1],
                dtype=nest.flatten(memory)[0].dtype)
            seq_len_batch_size = (memory_sequence_length.shape[0].value or
                                  array_ops.shape(memory_sequence_length)[0])

        def _maybe_mask(m, seq_len_mask):
            rank = m.get_shape().ndims
            rank = rank if rank is not None else array_ops.rank(m)
            extra_ones = array_ops.ones(rank - 2, dtype=dtypes.int32)
            m_batch_size = m.shape[0].value or array_ops.shape(m)[0]
            if memory_sequence_length is not None:
                message = ("memory_sequence_length and memory tensor "
                           "batch sizes do not match.")
                with ops.control_dependencies([
                        check_ops.assert_equal(
                            seq_len_batch_size, m_batch_size, message=message)
                ]):
                    seq_len_mask = array_ops.reshape(
                        seq_len_mask,
                        array_ops.concat(
                            (array_ops.shape(seq_len_mask), extra_ones), 0))
                return m * seq_len_mask
            else:
                return m

        return nest.map_structure(lambda m: _maybe_mask(m, seq_len_mask),
                                  memory)
开发者ID:absorbguo,项目名称:Paddle,代码行数:53,代码来源:machine_translation.py

示例14: step

  def step(self, time, inputs, state, name=None):
    """Perform a decoding step.

    Args:
      time: scalar `int32` tensor.
      inputs: A (structure of) input tensors.
      state: A (structure of) state tensors and TensorArrays.
      name: Name scope for any created operations.

    Returns:
      `(outputs, next_state, next_inputs, finished)`.
    """
    batch_size = self._batch_size
    beam_width = self._beam_width
    end_token = self._end_token
    length_penalty_weight = self._length_penalty_weight

    with ops.name_scope(name, "BeamSearchDecoderStep", (time, inputs, state)):
      cell_state = state.cell_state
      inputs = nest.map_structure(self._merge_batch_beams, inputs)
      cell_state = nest.map_structure(self._maybe_merge_batch_beams, cell_state)
      try:
        cell_outputs, next_cell_state = self._cell(
            inputs, cell_state, tiling_factor=beam_width)
      except TypeError as e:
        if "unexpected keyword argument 'tiling_factor'" in str(e):
          cell_outputs, next_cell_state = self._cell(inputs, cell_state)
        else:
          raise

      cell_outputs = nest.map_structure(self._split_batch_beams, cell_outputs)
      next_cell_state = nest.map_structure(self._maybe_split_batch_beams,
                                           next_cell_state)

      if self._output_layer is not None:
        cell_outputs = self._output_layer(cell_outputs)

      beam_search_output, beam_search_state = _beam_search_step(
          time=time,
          logits=cell_outputs,
          beam_state=state,
          batch_size=batch_size,
          beam_width=beam_width,
          end_token=end_token,
          length_penalty_weight=length_penalty_weight)
      finished = beam_search_state.finished
      sample_ids = beam_search_output.predicted_ids
      next_inputs = control_flow_ops.cond(
          math_ops.reduce_all(finished), lambda: self._start_inputs,
          lambda: self._embedding_fn(sample_ids))

    return (beam_search_output, beam_search_state, next_inputs, finished)
开发者ID:LUTAN,项目名称:tensorflow,代码行数:52,代码来源:beam_search_decoder.py

示例15: maybe_concatenate_auxiliary_inputs

        def maybe_concatenate_auxiliary_inputs(outputs_, indices=None):
          """Concatenate outputs with auxiliary inputs, if they exist."""
          if self._auxiliary_input_tas is None:
            return outputs_

          next_time = time + 1
          auxiliary_inputs = nest.map_structure(
              lambda ta: ta.read(next_time), self._auxiliary_input_tas)
          if indices is not None:
            auxiliary_inputs = array_ops.gather_nd(auxiliary_inputs, indices)
          return nest.map_structure(
              lambda x, y: array_ops.concat((x, y), -1),
              outputs_, auxiliary_inputs)
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:13,代码来源:helper.py


注:本文中的tensorflow.python.util.nest.map_structure函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。