当前位置: 首页>>代码示例>>Python>>正文


Python array_ops.pack函数代码示例

本文整理汇总了Python中tensorflow.python.ops.array_ops.pack函数的典型用法代码示例。如果您正苦于以下问题:Python pack函数的具体用法?Python pack怎么用?Python pack使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了pack函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: zero_state

  def zero_state(self, batch_size, dtype):
    """Return zero-filled state tensor(s).

    Args:
      batch_size: int, float, or unit Tensor representing the batch size.
      dtype: the data type to use for the state.

    Returns:
      If `state_size` is an int or TensorShape, then the return value is a
      `N-D` tensor of shape `[batch_size x state_size]` filled with zeros.

      If `state_size` is a nested list or tuple, then the return value is
      a nested list or tuple (of the same structure) of `2-D` tensors with
    the shapes `[batch_size x s]` for each s in `state_size`.
    """
    state_size = self.state_size
    if nest.is_sequence(state_size):
      state_size_flat = nest.flatten(state_size)
      zeros_flat = [
          array_ops.zeros(
              array_ops.pack(_state_size_with_prefix(s, prefix=[batch_size])),
              dtype=dtype)
          for s in state_size_flat]
      for s, z in zip(state_size_flat, zeros_flat):
        z.set_shape(_state_size_with_prefix(s, prefix=[None]))
      zeros = nest.pack_sequence_as(structure=state_size,
                                    flat_sequence=zeros_flat)
    else:
      zeros_size = _state_size_with_prefix(state_size, prefix=[batch_size])
      zeros = array_ops.zeros(array_ops.pack(zeros_size), dtype=dtype)
      zeros.set_shape(_state_size_with_prefix(state_size, prefix=[None]))

    return zeros
开发者ID:821760408-sp,项目名称:tensorflow,代码行数:33,代码来源:rnn_cell.py

示例2: testConst

  def testConst(self):
    np.random.seed(7)
    with self.test_session(use_gpu=True):
      for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
        data = np.random.randn(*shape).astype(np.float32)
        # Pack back into a single tensorflow tensor directly using np array
        c = array_ops.pack(data)
        # This is implemented via a Const:
        self.assertEqual(c.op.type, "Const")
        self.assertAllEqual(c.eval(), data)

        # Python lists also work for 1-D case:
        if len(shape) == 1:
          data_list = list(data)
          cl = array_ops.pack(data_list)
          self.assertEqual(cl.op.type, "Const")
          self.assertAllEqual(cl.eval(), data)

          cl = array_ops.stack(data_list)
          self.assertEqual(cl.op.type, "Const")
          self.assertAllEqual(cl.eval(), data)

      # Verify that shape induction works with shapes produced via const pack
      a = constant_op.constant([1, 2, 3, 4, 5, 6])
      b = array_ops.reshape(a, array_ops.pack([2, 3]))
      self.assertAllEqual(b.get_shape(), [2, 3])

      b = array_ops.reshape(a, array_ops.stack([2, 3]))
      self.assertAllEqual(b.get_shape(), [2, 3])
开发者ID:kdavis-mozilla,项目名称:tensorflow,代码行数:29,代码来源:pack_op_test.py

示例3: __call__

  def __call__(self,
               inputs,
               initial_state=None,
               dtype=None,
               sequence_length=None,
               scope=None):
    is_list = isinstance(inputs, list)
    if self._use_dynamic_rnn:
      if is_list:
        inputs = array_ops.pack(inputs)
      outputs, state = rnn.dynamic_rnn(
          self._cell,
          inputs,
          sequence_length=sequence_length,
          initial_state=initial_state,
          dtype=dtype,
          time_major=True,
          scope=scope)
      if is_list:
        # Convert outputs back to list
        outputs = array_ops.unpack(outputs)
    else:  # non-dynamic rnn
      if not is_list:
        inputs = array_ops.unpack(inputs)
      outputs, state = rnn.rnn(self._cell,
                               inputs,
                               initial_state=initial_state,
                               dtype=dtype,
                               sequence_length=sequence_length,
                               scope=scope)
      if not is_list:
        # Convert outputs back to tensor
        outputs = array_ops.pack(outputs)

    return outputs, state
开发者ID:MostafaGazar,项目名称:tensorflow,代码行数:35,代码来源:fused_rnn_cell.py

示例4: zero_state

  def zero_state(self, batch_size, dtype):
    """Return zero-filled state tensor(s).

    Args:
      batch_size: int, float, or unit Tensor representing the batch size.
      dtype: the data type to use for the state.

    Returns:
      If `state_size` is an int, then the return value is a `2-D` tensor of
      shape `[batch_size x state_size]` filled with zeros.

      If `state_size` is a list or tuple of ints, then the return value is
      a tuple of `2-D` tensors with shape
      `[batch_size x s] for s in state_size`.
    """
    state_size = self.state_size
    if isinstance(state_size, (list, tuple)):
      zeros = tuple(
          array_ops.zeros(array_ops.pack([batch_size, s]), dtype=dtype)
          for s in state_size)
      for s, z in zip(state_size, zeros):
        z.set_shape([None, s])
    else:
      zeros = array_ops.zeros(
          array_ops.pack([batch_size, state_size]), dtype=dtype)
      zeros.set_shape([None, state_size])

    return zeros
开发者ID:0-T-0,项目名称:tensorflow,代码行数:28,代码来源:rnn_cell.py

示例5: zero_state

  def zero_state(self, batch_size, dtype):
    """Return zero-filled state tensor(s).

    Args:
      batch_size: int, float, or unit Tensor representing the batch size.
      dtype: the data type to use for the state.

    Returns:
      If `state_size` is an int, then the return value is a `2-D` tensor of
      shape `[batch_size x state_size]` filled with zeros.

      If `state_size` is a nested list or tuple, then the return value is
      a nested list or tuple (of the same structure) of `2-D` tensors with
    the shapes `[batch_size x s]` for each s in `state_size`.
    """
    state_size = self.state_size
    if _is_sequence(state_size):
      state_size_flat = _unpacked_state(state_size)
      zeros_flat = [
          array_ops.zeros(array_ops.pack([batch_size, s]), dtype=dtype)
          for s in state_size_flat]
      for s, z in zip(state_size_flat, zeros_flat):
        z.set_shape([None, s])
      zeros = _packed_state(structure=state_size, state=zeros_flat)
    else:
      zeros = array_ops.zeros(
          array_ops.pack([batch_size, state_size]), dtype=dtype)
      zeros.set_shape([None, state_size])

    return zeros
开发者ID:MISingularity,项目名称:tensorflow,代码行数:30,代码来源:rnn_cell.py

示例6: crop_to_1d_bounding_box

def crop_to_1d_bounding_box(image, offset_height, target_height,
                         dynamic_shape=False):
  """Crops an image to a specified bounding box.

  This op cuts a rectangular part out of `image`. The top-left corner of the
  returned image is at `offset_height, offset_width` in `image`, and its
  lower-right corner is at
  `offset_height + target_height, offset_width + target_width`.

  Args:
    image: 3-D tensor with shape `[height, width, channels]`
    offset_height: Vertical coordinate of the top-left corner of the result in
                   the input.
    target_height: Height of the result.
    dynamic_shape: Whether the input image has undertermined shape. If set to
      `True`, shape information will be retrieved at run time. Default to
      `False`.

  Returns:
    3-D tensor of image with shape `[target_height, target_width, channels]`

  Raises:
    ValueError: If the shape of `image` is incompatible with the `offset_*` or
    `target_*` arguments, and `dynamic_shape` is set to `False`.
  """
  image = tf.convert_to_tensor(image, name='image')
  height, _ = _ImageDimensions(image, dynamic_shape=dynamic_shape)

  cropped = array_ops.slice(image,
                            array_ops.pack([offset_height, 0]),
                            array_ops.pack([target_height, -1]))

  return cropped
开发者ID:255BITS,项目名称:hyperchamber-gan,代码行数:33,代码来源:resize_audio_patch.py

示例7: to_weighted_sum

  def to_weighted_sum(self,
                      input_tensor,
                      num_outputs=1,
                      weight_collections=None,
                      trainable=True):
    """Returns a Tensor as linear predictions and a list of created Variable."""
    dimension = self.source_column.dimension
    batch_size = array_ops.shape(input_tensor)[0]

    if dimension > 1:
      i1 = array_ops.reshape(array_ops.tile(array_ops.expand_dims(
          math_ops.range(0, batch_size), 1), [1, dimension]), [-1])
      i2 = array_ops.tile(math_ops.range(0, dimension), [batch_size])
      # Flatten the bucket indices and unique them across dimensions
      # E.g. 2nd dimension indices will range from k to 2*k-1 with k buckets
      # TODO(chapelle): move that logic to insert_transformed_feature to ensure
      #   unique buckets across dimensions after crossing.
      bucket_indices = array_ops.reshape(input_tensor, [-1]) + self.length * i2
    else:
      # Simpler indices when dimension=1
      i1 = math_ops.range(0, batch_size)
      i2 = array_ops.zeros([batch_size], dtype=dtypes.int32)
      bucket_indices = array_ops.reshape(input_tensor, [-1])

    indices = math_ops.to_int64(array_ops.transpose(array_ops.pack((i1, i2))))
    shape = math_ops.to_int64(array_ops.pack([batch_size, 1]))
    sparse_id_values = ops.SparseTensor(indices, bucket_indices, shape)
    vocab_size = self.length * self.source_column.dimension

    return _create_embedding_lookup(
        sparse_id_values, vocab_size, num_outputs,
        _add_variable_collection(weight_collections), 0., "sum",
        trainable, self.name + "_weights")
开发者ID:YanLongDong,项目名称:tensorflow,代码行数:33,代码来源:feature_column.py

示例8: confusion_matrix

def confusion_matrix(predictions, labels, num_classes=None,
                     dtype=dtypes.int32, name=None):
  """Computes the confusion matrix from predictions and labels.

  Calculate the Confusion Matrix for a pair of prediction and
  label 1-D int arrays.

  Considering a prediction array such as: `[1, 2, 3]`
  And a label array such as: `[2, 2, 3]`

  The confusion matrix returned would be the following one:
      [[0, 0, 0]
       [0, 1, 0]
       [0, 1, 0]
       [0, 0, 1]]

  Where the matrix rows represent the prediction labels and the columns
  represents the real labels. The confusion matrix is always a 2-D array
  of shape [n, n], where n is the number of valid labels for a given
  classification task. Both prediction and labels must be 1-D arrays of
  the same shape in order for this function to work.

  Args:
    predictions: A 1-D array represeting the predictions for a given
                 classification.
    labels: A 1-D represeting the real labels for the classification task.
    num_classes: The possible number of labels the classification task can
                 have. If this value is not provided, it will be calculated
                 using both predictions and labels array.
    dtype: Data type of the confusion matrix.
    name: Scope name.

  Returns:
    A k X k matrix represeting the confusion matrix, where k is the number of
    possible labels in the classification task.

  Raises:
    ValueError: If both predictions and labels are not 1-D vectors and do not
                have the same size.
  """
  with ops.name_scope(name, 'confusion_matrix',
                      [predictions, labels, num_classes]) as name:
    predictions, labels = metric_ops_util.remove_squeezable_dimensions(
        ops.convert_to_tensor(
            predictions, name='predictions', dtype=dtypes.int64),
        ops.convert_to_tensor(labels, name='labels', dtype=dtypes.int64))

    if num_classes is None:
      num_classes = math_ops.maximum(math_ops.reduce_max(predictions),
                                     math_ops.reduce_max(labels)) + 1

    shape = array_ops.pack([num_classes, num_classes])
    indices = array_ops.transpose(array_ops.pack([predictions, labels]))
    values = array_ops.ones_like(predictions, dtype)
    cm_sparse = ops.SparseTensor(
        indices=indices, values=values, shape=shape)
    zero_matrix = array_ops.zeros(math_ops.to_int32(shape), dtype)

    return sparse_ops.sparse_add(zero_matrix, cm_sparse)
开发者ID:JamesFysh,项目名称:tensorflow,代码行数:59,代码来源:confusion_matrix_ops.py

示例9: build_memory

    def build_memory(self, M_prev, read_w_prev, write_w_prev, last_output):
        with tf.variable_scope("memory"):
            # 3.1 Reading
            if self.read_head_size == 1:
                read_w, read = self.build_read_head(M_prev, tf.reshape(read_w_prev, [-1, 1]), last_output, 0)
            else:
                read_w_list = []
                read_list = []

                for idx in xrange(self.read_head_size):
                    read_w_prev_idx = tf.reshape(tf.gather(read_w_prev, idx), [-1, 1])

                    read_w_idx, read_idx = self.build_read_head(M_prev, read_w_prev_idx, last_output, idx)

                    read_w_list.append(tf.transpose(read_w_idx))
                    read_list.append(tf.reshape(read_idx, [1, self.mem_size, self.mem_dim]))

                read_w = array_ops.pack(read_w_list)
                read = array_ops.pack(read_list)

            # 3.2 Writing
            if self.write_head_size == 1:
                write_w, write, erase = self.build_write_head(M_prev, tf.reshape(write_w_prev, [-1, 1]),
                                                              last_output, 0)

                M_erase = tf.ones([self.mem_size, self.mem_dim]) - OuterProd(write_w, erase)
                M_write = OuterProd(write_w, write)
            else:
                write_w_list = []
                write_list = []
                erase_list = []

                M_erases = []
                M_writes = []

                for idx in xrange(self.write_head_size):
                    write_w_prev_idx = tf.reshape(tf.gather(write_w_prev, idx), [-1, 1])

                    write_w_idx, write_idx, erase_idx = self.build_write_head(M_prev, write_w_prev_idx,
                                                                              last_output, idx)

                    write_w_list.append(tf.transpose(write_w_idx))
                    write_list.append(tf.reshape(write_idx, [1, self.mem_size, self.mem_dim]))
                    erase_list.append(tf.reshape(erase_idx, [1, 1, self.mem_dim]))

                    M_erases.append(tf.ones([self.mem_size, self.mem_dim]) * OuterProd(write_w_idx, erase_idx))
                    M_writes.append(OuterProd(write_w_idx, write_idx))

                write_w = array_ops.pack(write_w_list)
                write = array_ops.pack(write_list)
                erase = array_ops.pack(erase_list)

                M_erase = reduce(lambda x, y: x*y, M_erases)
                M_write = tf.add_n(M_writes)

            M = M_prev * M_erase + M_write

            return M, read_w, write_w, read
开发者ID:ramtej,项目名称:NTM-tensorflow,代码行数:58,代码来源:model.py

示例10: crop_to_bounding_box

def crop_to_bounding_box(image, offset_height, offset_width, target_height,
                         target_width):
  """Crops an image to a specified bounding box.

  This op cuts a rectangular part out of `image`. The top-left corner of the
  returned image is at `offset_height, offset_width` in `image`, and its
  lower-right corner is at
  `offset_height + target_height, offset_width + target_width`.

  Args:
    image: 3-D tensor with shape `[height, width, channels]`
    offset_height: Vertical coordinate of the top-left corner of the result in
                   the input.
    offset_width: Horizontal coordinate of the top-left corner of the result in
                  the input.
    target_height: Height of the result.
    target_width: Width of the result.

  Returns:
    3-D tensor of image with shape `[target_height, target_width, channels]`

  Raises:
    ValueError: If the shape of `image` is incompatible with the `offset_*` or
      `target_*` arguments, or either `offset_height` or `offset_width` is
      negative, or either `target_height` or `target_width` is not positive.
  """
  image = ops.convert_to_tensor(image, name='image')

  assert_ops = []
  assert_ops += _Check3DImage(image, require_static=False)

  height, width, depth = _ImageDimensions(image, static_only=False)

  assert_ops += _assert(offset_width >= 0, ValueError,
                        'offset_width must be >= 0.')
  assert_ops += _assert(offset_height >= 0, ValueError,
                        'offset_height must be >= 0.')
  assert_ops += _assert(target_width > 0, ValueError,
                        'target_width must be > 0.')
  assert_ops += _assert(target_height > 0, ValueError,
                        'target_height must be > 0.')
  assert_ops += _assert(width >= (target_width + offset_width), ValueError,
                        'width must be >= target + offset.')
  assert_ops += _assert(height >= (target_height + offset_height), ValueError,
                        'height must be >= target + offset.')
  image = control_flow_ops.with_dependencies(assert_ops, image)

  cropped = array_ops.slice(
    image,
    array_ops.pack([offset_height, offset_width, 0]),
    array_ops.pack([target_height, target_width, -1]))

  cropped_shape = [None if is_tensor(i) else i
                   for i in [target_height, target_width, depth]]
  cropped.set_shape(cropped_shape)

  return cropped
开发者ID:31H0B1eV,项目名称:tensorflow,代码行数:57,代码来源:image_ops.py

示例11: testOpsBetweenUnreachable

 def testOpsBetweenUnreachable(self):
   with ops.Graph().as_default() as g:
     t1 = constant(1.0)
     t2 = constant(2.0)
     _ = array_ops.pack([t1, t2])
     t4 = constant(1.0)
     t5 = constant(2.0)
     t6 = array_ops.pack([t4, t5])
   # Elements of to_ops are always listed.
   self._assertOpListEqual([t6.op], _OpsBetween(g, [t6.op], [t1.op]))
开发者ID:Ambier,项目名称:tensorflow,代码行数:10,代码来源:gradients_test.py

示例12: testIndexedSlicesToTensorList

 def testIndexedSlicesToTensorList(self):
   with self.test_session():
     numpy_list = []
     dense_list = []
     sparse_list = []
     for _ in range(3):
       np_val = np.random.rand(4, 4, 4, 4).astype(np.float32)
       c = constant_op.constant(np_val)
       c_sparse = math_ops._as_indexed_slices(c)
       numpy_list.append(np_val)
       dense_list.append(c)
       sparse_list.append(c_sparse)
     packed_dense = array_ops.pack(dense_list)
     packed_sparse = array_ops.pack(sparse_list)
     self.assertAllClose(packed_dense.eval(), packed_sparse.eval())
开发者ID:Ambier,项目名称:tensorflow,代码行数:15,代码来源:gradients_test.py

示例13: sample

  def sample(self, n, seed=None, name=None):
    """Sample `n` observations from the Multivariate Normal Distributions.

    Args:
      n: `Scalar`, type int32, the number of observations to sample.
      seed: Python integer, the random seed.
      name: The name to give this op.

    Returns:
      samples: `[n, ...]`, a `Tensor` of `n` samples for each
        of the distributions determined by broadcasting the hyperparameters.
    """
    with ops.op_scope(
        [self._mu, self._sigma_chol, n], name, "MultivariateNormalSample"):
      # TODO(ebrevdo): Is there a better way to get broadcast_shape?
      broadcast_shape = self.mu.get_shape()
      n = ops.convert_to_tensor(n)
      sigma_shape_left = array_ops.slice(
          array_ops.shape(self._sigma_chol),
          [0], array_ops.pack([array_ops.rank(self._sigma_chol) - 2]))

      k_n = array_ops.pack([self._k, n])
      shape = array_ops.concat(0, [sigma_shape_left, k_n])
      white_samples = random_ops.random_normal(
          shape=shape, mean=0, stddev=1, dtype=self._mu.dtype, seed=seed)

      correlated_samples = math_ops.batch_matmul(
          self._sigma_chol, white_samples)

      # Move the last dimension to the front
      perm = array_ops.concat(
          0,
          (array_ops.pack([array_ops.rank(correlated_samples) - 1]),
           math_ops.range(0, array_ops.rank(correlated_samples) - 1)))

      # TODO(ebrevdo): Once we get a proper tensor contraction op,
      # perform the inner product using that instead of batch_matmul
      # and this slow transpose can go away!
      correlated_samples = array_ops.transpose(correlated_samples, perm)

      samples = correlated_samples + self.mu

      # Provide some hints to shape inference
      n_val = tensor_util.constant_value(n)
      final_shape = tensor_shape.vector(n_val).concatenate(broadcast_shape)
      samples.set_shape(final_shape)

      return samples
开发者ID:0-T-0,项目名称:tensorflow,代码行数:48,代码来源:mvn.py

示例14: seq2seq_inputs

def seq2seq_inputs(x, y, input_length, output_length, sentinel=None, name=None):
  """Processes inputs for Sequence to Sequence models.

  Args:
    x: Input Tensor [batch_size, input_length, embed_dim].
    y: Output Tensor [batch_size, output_length, embed_dim].
    input_length: length of input x.
    output_length: length of output y.
    sentinel: optional first input to decoder and final output expected.
      If sentinel is not provided, zeros are used. Due to fact that y is not
      available in sampling time, shape of sentinel will be inferred from x.
    name: Operation name.

  Returns:
    Encoder input from x, and decoder inputs and outputs from y.
  """
  with ops.op_scope([x, y], name, "seq2seq_inputs"):
    in_x = array_ops.split_squeeze(1, input_length, x)
    y = array_ops.split_squeeze(1, output_length, y)
    if not sentinel:
      # Set to zeros of shape of y[0], using x for batch size.
      sentinel_shape = array_ops_.pack(
          [array_ops_.shape(x)[0], y[0].get_shape()[1]])
      sentinel = array_ops_.zeros(sentinel_shape)
      sentinel.set_shape(y[0].get_shape())
    in_y = [sentinel] + y
    out_y = y + [sentinel]
    return in_x, in_y, out_y
开发者ID:0ruben,项目名称:tensorflow,代码行数:28,代码来源:seq2seq_ops.py

示例15: inference_graph

  def inference_graph(self, input_data, data_spec=None, **inference_args):
    """Constructs a TF graph for evaluating a random forest.

    Args:
      input_data: A tensor or SparseTensor or placeholder for input data.
      data_spec: A list of tf.dtype values specifying the original types of
        each column.
      **inference_args: Keyword arguments to pass through to each tree.

    Returns:
      The last op in the random forest inference graph.
    """
    data_spec = [constants.DATA_FLOAT] if data_spec is None else data_spec
    probabilities = []
    for i in range(self.params.num_trees):
      with ops.device(self.device_assigner.get_device(i)):
        tree_data = input_data
        if self.params.bagged_features:
          tree_data = self._bag_features(i, input_data)
        probabilities.append(self.trees[i].inference_graph(
            tree_data, data_spec, **inference_args))
    with ops.device(self.device_assigner.get_device(0)):
      all_predict = array_ops.pack(probabilities)
      return math_ops.div(
          math_ops.reduce_sum(all_predict, 0), self.params.num_trees,
          name='probabilities')
开发者ID:JamesFysh,项目名称:tensorflow,代码行数:26,代码来源:tensor_forest.py


注:本文中的tensorflow.python.ops.array_ops.pack函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。