当前位置: 首页>>代码示例>>Python>>正文


Python nest.pack_sequence_as函数代码示例

本文整理汇总了Python中tensorflow.python.util.nest.pack_sequence_as函数的典型用法代码示例。如果您正苦于以下问题:Python pack_sequence_as函数的具体用法?Python pack_sequence_as怎么用?Python pack_sequence_as使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了pack_sequence_as函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: testFlattenAndPack

  def testFlattenAndPack(self):
    structure = ((3, 4), 5, (6, 7, (9, 10), 8))
    flat = ["a", "b", "c", "d", "e", "f", "g", "h"]
    self.assertEqual(nest.flatten(structure), [3, 4, 5, 6, 7, 9, 10, 8])
    self.assertEqual(
        nest.pack_sequence_as(structure, flat), (("a", "b"), "c",
                                                 ("d", "e", ("f", "g"), "h")))
    point = collections.namedtuple("Point", ["x", "y"])
    structure = (point(x=4, y=2), ((point(x=1, y=0),),))
    flat = [4, 2, 1, 0]
    self.assertEqual(nest.flatten(structure), flat)
    restructured_from_flat = nest.pack_sequence_as(structure, flat)
    self.assertEqual(restructured_from_flat, structure)
    self.assertEqual(restructured_from_flat[0].x, 4)
    self.assertEqual(restructured_from_flat[0].y, 2)
    self.assertEqual(restructured_from_flat[1][0][0].x, 1)
    self.assertEqual(restructured_from_flat[1][0][0].y, 0)

    self.assertEqual([5], nest.flatten(5))
    self.assertEqual([np.array([5])], nest.flatten(np.array([5])))

    self.assertEqual("a", nest.pack_sequence_as(5, ["a"]))
    self.assertEqual(
        np.array([5]), nest.pack_sequence_as("scalar", [np.array([5])]))

    with self.assertRaisesRegexp(ValueError, "Structure is a scalar"):
      nest.pack_sequence_as("scalar", [4, 5])

    with self.assertRaisesRegexp(TypeError, "flat_sequence"):
      nest.pack_sequence_as([4, 5], "bad_sequence")

    with self.assertRaises(ValueError):
      nest.pack_sequence_as([5, 6, [7, 8]], ["a", "b", "c"])
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:33,代码来源:nest_test.py

示例2: _tpu_run

def _tpu_run(strategy, fn, args, kwargs):
  """Common implementation of TPUStrategy.experimental_run_v2."""
  if context.executing_eagerly() and not ops.inside_function():
    raise NotImplementedError(
        "Eager mode not supported in TPUStrategy outside TF functions.")

  if kwargs is None:
    kwargs = {}

  # Used to re-structure flattened output tensors from `tpu.replicate()`
  # into a structured format.
  result = [[]]

  def replicated_fn(replica_id, replica_args, replica_kwargs):
    """Wraps user function to provide replica ID and `Tensor` inputs."""
    with _TPUReplicaContext(strategy, replica_id_in_sync_group=replica_id):
      result[0] = fn(*replica_args, **replica_kwargs)
    return result[0]

  replicate_inputs = []  # By replica.
  for i in range(strategy.num_replicas_in_sync):
    replicate_inputs.append(
        [constant_op.constant(i, dtype=dtypes.int32),
         values.select_replica(i, args),
         values.select_replica(i, kwargs)])

  # Construct and pass `maximum_shapes` so that we could support dynamic
  # shapes using dynamic padder.
  if replicate_inputs:
    maximum_shapes = []
    flattened_list = nest.flatten(replicate_inputs[0])
    for input_tensor in flattened_list:
      maximum_shapes.append(input_tensor.get_shape())
    maximum_shapes = nest.pack_sequence_as(replicate_inputs[0],
                                           maximum_shapes)
  else:
    maximum_shapes = None

  with strategy.scope():
    replicate_outputs = tpu.replicate(replicated_fn, replicate_inputs,
                                      maximum_shapes=maximum_shapes)

  # Remove all no ops that may have been added during 'tpu.replicate()'
  if isinstance(result[0], list):
    result[0] = [
        output for output in result[0] if tensor_util.is_tensor(output)
    ]

  # Workaround for `tpu.replicate` behaviour when single `Tensor` returned.
  replicate_outputs = [
      nest.pack_sequence_as(result[0], nest.flatten(replica_output))
      for replica_output in replicate_outputs
  ]

  device_map = strategy.extended._device_map  # pylint: disable=protected-access
  return values.regroup(device_map, replicate_outputs)
开发者ID:aritratony,项目名称:tensorflow,代码行数:56,代码来源:tpu_strategy.py

示例3: testPackDictOrder

 def testPackDictOrder(self):
   """Packing orders dicts by key, including OrderedDicts."""
   ordered = collections.OrderedDict([("d", 0), ("b", 0), ("a", 0), ("c", 0)])
   plain = {"d": 0, "b": 0, "a": 0, "c": 0}
   seq = [0, 1, 2, 3]
   ordered_reconstruction = nest.pack_sequence_as(ordered, seq)
   plain_reconstruction = nest.pack_sequence_as(plain, seq)
   self.assertEqual(
       collections.OrderedDict([("d", 3), ("b", 1), ("a", 0), ("c", 2)]),
       ordered_reconstruction)
   self.assertEqual({"d": 3, "b": 1, "a": 0, "c": 2}, plain_reconstruction)
开发者ID:Huoxubeiyin,项目名称:tensorflow,代码行数:11,代码来源:nest_test.py

示例4: testPackDictOrder

 def testPackDictOrder(self, mapping_type):
   """Packing orders dicts by key, including OrderedDicts."""
   custom = mapping_type([("d", 0), ("b", 0), ("a", 0), ("c", 0)])
   plain = {"d": 0, "b": 0, "a": 0, "c": 0}
   seq = [0, 1, 2, 3]
   custom_reconstruction = nest.pack_sequence_as(custom, seq)
   plain_reconstruction = nest.pack_sequence_as(plain, seq)
   self.assertIsInstance(custom_reconstruction, mapping_type)
   self.assertIsInstance(plain_reconstruction, dict)
   self.assertEqual(
       mapping_type([("d", 3), ("b", 1), ("a", 0), ("c", 2)]),
       custom_reconstruction)
   self.assertEqual({"d": 3, "b": 1, "a": 0, "c": 2}, plain_reconstruction)
开发者ID:ThunderQi,项目名称:tensorflow,代码行数:13,代码来源:nest_test.py

示例5: _PostProcessOutput

def _PostProcessOutput(extended_acc_state, extended_final_state, func_cell,
                       total_time, inputs_lengths, is_reversed):
  """Post-process output of recurrent.

  This function takes the accumulated extended state and extracts the requested
  state and output.

  When `inputs_lengths` has been set, it extracts the output from the
  accumulated state. It also sets outputs past.

  When `is_reversed` is true, the output will be reversed in this function.

  It also sets the static shape information.

  Args:
    extended_acc_state: A structure containing the accumulated state at each
      time. It may contain the output at each time as well.
    extended_final_state: A structure containing the final state. It may
      contain the output at the final time.
    func_cell: The functional wrapper around the cell.
    total_time: A scalar integer tensor.
    inputs_lengths: An integer tensor with one entry per input.
    is_reversed: A boolean to indicate if the sequence is reversed.

  Returns:
    A tuple with the outputs at each time, and the final state.
  """
  if inputs_lengths is None or is_reversed:
    flat_final_state = func_cell.MaybeRemoveOutputFromState(
        nest.flatten(extended_final_state))
    tf_state = nest.pack_sequence_as(func_cell.state_template, flat_final_state)
  else:
    # The accumulated state is over the entire sequence, so we pick it
    # out from the acc_state sequence.
    flat_acc_state = func_cell.MaybeRemoveOutputFromState(
        nest.flatten(extended_acc_state))
    acc_state = nest.pack_sequence_as(
        func_cell.state_template, flat_acc_state)
    tf_state = _PickFinalStateFromHistory(acc_state, inputs_lengths)

  output_from_state = func_cell.GetOutputFromState(extended_acc_state)
  if is_reversed:
    output_from_state = array_ops.reverse(output_from_state, [0])
  tf_output = array_ops.transpose(output_from_state, [1, 0, 2])
  tf_output.set_shape(
      [func_cell.output_shape[0], total_time, func_cell.output_shape[1]])
  if inputs_lengths is not None:
    # Need set the outputs to zero.
    tf_output = _ApplyLengthsToBatch(inputs_lengths, tf_output)
  _SetShapeFromTemplate(tf_state, func_cell.state_template)
  return tf_output, tf_state
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:51,代码来源:functional_rnn.py

示例6: get_next

  def get_next(self, name=None):
    """See `tf.data.Iterator.get_next`."""
    self._get_next_call_count += 1
    if self._get_next_call_count > iterator_ops.GET_NEXT_CALL_WARNING_THRESHOLD:
      warnings.warn(iterator_ops.GET_NEXT_CALL_WARNING_MESSAGE)

    flat_result = []
    # TODO(priyag): This will fail if the input size (typically number of
    # batches) is not divisible by number of devices.
    # How do we handle that more gracefully / let the user know?
    for buffer_resource in self._buffering_resources:
      flat_ret = gen_dataset_ops.function_buffering_resource_get_next(
          buffer_resource,
          output_types=data_nest.flatten(sparse.as_dense_types(
              self.output_types, self.output_classes)), name=name)

      ret = sparse.deserialize_sparse_tensors(
          data_nest.pack_sequence_as(self.output_types, flat_ret),
          self.output_types, self.output_shapes, self.output_classes)

      for tensor, shape in zip(
          data_nest.flatten(ret), data_nest.flatten(self.output_shapes)):
        if isinstance(tensor, ops.Tensor):
          tensor.set_shape(shape)
      flat_result.append(ret)

    return nest.pack_sequence_as(self._devices, flat_result)
开发者ID:AnishShah,项目名称:tensorflow,代码行数:27,代码来源:prefetching_ops_v2.py

示例7: loop_fn

  def loop_fn(i):
    sequence_length_i = array_ops.gather(sequence_length, i)

    def body_fn(t, state, ta):
      inputs_t = array_ops.expand_dims(
          array_ops.gather(inputs_ta.read(t), i), 0)
      output, new_state = cell(inputs_t, state)
      output = array_ops.reshape(output, [-1])
      # TODO(agarwal): one optimization that dynamic_rnn uses is to avoid the
      # array_ops.where when t < min(sequence_length). Doing that requires
      # supporting tf.cond pfor conversion.
      done = t >= sequence_length_i
      output = array_ops.where(done, zeros, output)
      ta = ta.write(t, output)
      new_state = [array_ops.where(done, s, ns) for s, ns in
                   zip(nest.flatten(state), nest.flatten(new_state))]
      new_state = nest.pack_sequence_as(state, new_state)
      return t + 1, new_state, ta

    def condition_fn(t, _, unused):
      del unused
      return t < max_steps

    initial_state = cell.zero_state(1, dtypes.float32)
    _, state, ta = control_flow_ops.while_loop(condition_fn, body_fn, [
        0, initial_state,
        tensor_array_ops.TensorArray(dtypes.float32, max_steps)
    ])

    new_state = [array_ops.reshape(x, [-1]) for x in nest.flatten(state)]
    new_state = nest.pack_sequence_as(initial_state, new_state)
    return ta.stack(), new_state
开发者ID:aritratony,项目名称:tensorflow,代码行数:32,代码来源:control_flow_ops_test.py

示例8: _deep_copy_state

def _deep_copy_state(complex_state_tuple, name):
    flat_state = nest.flatten(complex_state_tuple)
    flat_state_copy = [tf.identity(s, name="{}_{}".format(name, i))
                       for i, s in enumerate(flat_state)]
    state_copy = nest.pack_sequence_as(complex_state_tuple,
                                       flat_state_copy)
    return state_copy
开发者ID:gokhanettin,项目名称:driverless-car,代码行数:7,代码来源:model.py

示例9: _time_step

  def _time_step(time, next_input, output_ta_t, state):
    for input_, shape in zip(next_input, inputs_got_shape):
      input_.set_shape(shape)

    
    input_t = nest.pack_sequence_as(structure=input, flat_sequence=next_input)
    # import tensorflow as tf
    # input_t = tf.Print(input_t, [input_t, time], "input_t, time")

    call_cell = lambda: cell(input_t, state)

    if sequence_length is not None:
      (output, new_state) = _rnn_step(
          time=time,
          sequence_length=sequence_length,
          min_sequence_length=min_sequence_length,
          max_sequence_length=max_sequence_length,
          zero_output=zero_output,
          state=state,
          call_cell=call_cell,
          state_size=state_size,
          skip_conditionals=True)
    else:
      (output, new_state) = call_cell()

    # Pack state if using state tuples
    output = nest.flatten(output)

    output_ta_t = tuple(
        ta.write(time, out) for ta, out in zip(output_ta_t, output))

    return (time + 1, output, output_ta_t, new_state)
开发者ID:alexeyche,项目名称:alexeyche-junk,代码行数:32,代码来源:gen_rnn.py

示例10: testRegularizers

  def testRegularizers(self, trainable, state_size):
    batch_size = 6

    # Set the attribute to the class since it we can't set properties of
    # abstract classes
    snt.RNNCore.state_size = state_size
    flat_state_size = nest.flatten(state_size)
    core = snt.RNNCore(name="dummy_core")
    flat_regularizer = ([tf.contrib.layers.l1_regularizer(scale=0.5)] *
                        len(flat_state_size))
    trainable_regularizers = nest.pack_sequence_as(
        structure=state_size, flat_sequence=flat_regularizer)

    core.initial_state(batch_size, dtype=tf.float32, trainable=trainable,
                       trainable_regularizers=trainable_regularizers)

    graph_regularizers = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
    if not trainable:
      self.assertFalse(graph_regularizers)
    else:
      self.assertEqual(len(graph_regularizers), len(flat_state_size))
      if not tf.executing_eagerly():
        for i in range(len(flat_state_size)):
          self.assertRegexpMatches(
              graph_regularizers[i].name, ".*l1_regularizer.*")
开发者ID:ccchang0111,项目名称:sonnet,代码行数:25,代码来源:rnn_core_test.py

示例11: restore

  def restore(self, file_prefix):
    """Restore the saveable objects from a checkpoint with `file_prefix`.

    Args:
      file_prefix: A string or scalar string Tensor containing the prefix for
        files to read from.

    Returns:
      A dictionary mapping from SaveableObject names to restore operations.
    """
    restore_specs = []
    tensor_structure = []
    for saveable in self._saveable_objects:
      saveable_tensor_structure = []
      tensor_structure.append(saveable_tensor_structure)
      for spec in saveable.specs:
        saveable_tensor_structure.append(spec.name)
        restore_specs.append((spec.name, spec.slice_spec, spec.dtype))
    tensor_names, tensor_slices, tensor_dtypes = zip(*restore_specs)
    with ops.device("cpu:0"):
      restored_tensors = io_ops.restore_v2(
          file_prefix, tensor_names, tensor_slices, tensor_dtypes)
    structured_restored_tensors = nest.pack_sequence_as(
        tensor_structure, restored_tensors)
    restore_ops = {}
    for saveable, restored_tensors in zip(self._saveable_objects,
                                          structured_restored_tensors):
      restore_ops[saveable.name] = saveable.restore(
          restored_tensors, restored_shapes=None)
    return restore_ops
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:30,代码来源:functional_saver.py

示例12: _pack_dict

def _pack_dict(dct, lst):
  d, keys = Struct(), sorted(dct)
  for k in keys:
    n = len(nest.flatten(dct[k]))
    d[k] = nest.pack_sequence_as(dct[k], lst[:n])
    lst = lst[n:]
  return d
开发者ID:codealphago,项目名称:pixelsnail-public,代码行数:7,代码来源:tf_utils.py

示例13: decorated

  def decorated(*args, **kwds):
    """Computes the value and gradient of the decorated function."""
    parameter_positions = _get_arg_spec(f, params, args)
    assert not kwds, "The gradient function can't take keyword arguments."
    this_tape = tape.push_new_tape(persistent=persistent)
    try:
      sources = []
      args = [
          ops.convert_to_tensor(args[i])
          if i in parameter_positions else args[i]
          for i in range(len(args))
      ]
      args = _ensure_unique_tensor_objects(parameter_positions, args)
      for i in parameter_positions:
        sources.append(args[i])
        tape.watch(this_tape, args[i])
      result = f(*args)
      if result is None:
        raise ValueError("Cannot differentiate a function that returns None; "
                         "did you forget to return a value from {}?".format(
                             f.__name__))
      flat_result = nest.flatten(result)
      flat_result = [gen_array_ops.identity(x) for x in flat_result]
      result = nest.pack_sequence_as(result, flat_result)
    finally:
      tape.pop_tape(this_tape)
    def vjp(dy=None):
      if dy is not None:
        dy = [ops.convert_to_tensor(x) for x in nest.flatten(dy)]
      return imperative_grad.imperative_grad(
          this_tape, nest.flatten(result), sources, output_gradients=dy)

    return result, vjp
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:33,代码来源:backprop.py

示例14: _create

    def _create(self, encoder_output, decoder_state_size, **kwargs):
        """ Creates decoder's initial RNN states according to
        `decoder_state_size`.

        Creates a tf variable and passes to decoder.
        Args:
            encoder_output: An instance of `collections.namedtuple`
              from `Encoder.encode()`.
            decoder_state_size: RNN decoder state size.
            **kwargs:

        Returns: The decoder states with the structure determined
          by `decoder_state_size`.
        """
        batch_size = tf.shape(encoder_output.attention_length)[0]
        name = kwargs["name"] if "name" in kwargs else None
        state_size_splits = nest.flatten(decoder_state_size)
        total_decoder_state_size = sum(state_size_splits)
        with tf.variable_scope(name or "init_state"):
            init_state_total = tf.get_variable(
                name="init_states", shape=(total_decoder_state_size,),
                dtype=tf.float32, initializer=tf.zeros_initializer)
        init_state_total = tf.tile([init_state_total], [batch_size, 1])
        init_state = nest.pack_sequence_as(
            decoder_state_size,
            tf.split(init_state_total, state_size_splits, axis=1))
        return init_state
开发者ID:KIngpon,项目名称:NJUNMT-tf,代码行数:27,代码来源:bridges.py

示例15: _concrete_function_callable_with

def _concrete_function_callable_with(function, inputs, allow_conversion):
  """Returns whether concrete `function` can be called with `inputs`."""
  expected_structure = function.graph.structured_input_signature
  try:
    flatten_inputs = nest.flatten_up_to(expected_structure, inputs)
  except (TypeError, ValueError):
    return False
  try:
    # Verify that no input elements were dropped during flattening.
    repacked = nest.pack_sequence_as(expected_structure, flatten_inputs)
    # TODO(b/129422719): Namedtuple subclasses re-created through
    # saved_model.load don't compare equal in type to the original in
    # assert_same_structure. Fix that and we can take out check_types=False
    # here.
    nest.assert_same_structure(inputs, repacked, check_types=False)
  except (TypeError, ValueError):
    return False

  for arg, expected in zip(flatten_inputs, nest.flatten(expected_structure)):
    if isinstance(expected, tensor_spec.TensorSpec):
      if allow_conversion:
        arg = _try_convert_to_tensor_spec(arg, dtype_hint=expected.dtype)
      if not _is_tensor(arg) and not isinstance(arg, tensor_spec.TensorSpec):
        return False
      if arg.dtype != expected.dtype:
        return False
      if not expected.shape.is_compatible_with(arg.shape):
        return False
    else:
      if arg != expected:
        return False
  return True
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:32,代码来源:function_deserialization.py


注:本文中的tensorflow.python.util.nest.pack_sequence_as函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。