当前位置: 首页>>代码示例>>Python>>正文


Python tensor_shape.as_shape函数代码示例

本文整理汇总了Python中tensorflow.python.framework.tensor_shape.as_shape函数的典型用法代码示例。如果您正苦于以下问题:Python as_shape函数的具体用法?Python as_shape怎么用?Python as_shape使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了as_shape函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _unshard_shape

  def _unshard_shape(self, shape):
    """Return the unsharded shape that would generate a given sharded shape.

    Args:
      shape: the sharded shape to unshard

    Returns:
      The unsharded shape.

    Raises:
      ValueError: if shape is unknown or does not contain
        self.shard_dimension
      TypeError: if shape is not convertible to a TensorShape
    """
    shape = tensor_shape.as_shape(shape)
    if self._number_of_shards == 1:
      # Don't do anything when there's only one shard.
      return shape
    ndims = shape.ndims
    if ndims is None:
      raise ValueError("shape must be a specified shape not Unknown")
    if ndims <= self._shard_dimension:
      raise ValueError("shape %s does not contain shard_dimension %d" %
                       (shape.as_list(), self._shard_dimension))
    dims = shape.as_list()
    dims[self._shard_dimension] *= self._number_of_shards
    return tensor_shape.as_shape(dims)
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:27,代码来源:tpu_sharding.py

示例2: compute_output_shape

  def compute_output_shape(self, input_shape):
    if isinstance(input_shape, list):
      input_shape = input_shape[0]

    if _is_multiple_state(self.cell.state_size):
      state_size = self.cell.state_size
    else:
      state_size = [self.cell.state_size]

    if getattr(self.cell, 'output_size', None) is not None:
      output_dim = tensor_shape.as_shape(self.cell.output_size).as_list()
    else:
      # Note that state_size[0] could be a tensor_shape or int.
      output_dim = tensor_shape.as_shape(state_size[0]).as_list()

    if self.return_sequences:
      output_shape = tuple([input_shape[0], input_shape[1]] + output_dim)
    else:
      output_shape = tuple([input_shape[0]] + output_dim)

    if self.return_state:
      state_shape = [
          tuple([input_shape[0]] + tensor_shape.as_shape(dim).as_list())
          for dim in state_size
      ]
      return [output_shape] + state_shape
    else:
      return output_shape
开发者ID:bunbutter,项目名称:tensorflow,代码行数:28,代码来源:unified_rnn_test.py

示例3: _concat

def _concat(prefix, suffix, static=False):
  """Concat that enables int, Tensor, or TensorShape values.

  This function takes a size specification, which can be an integer, a
  TensorShape, or a Tensor, and converts it into a concatenated Tensor
  (if static = False) or a list of integers (if static = True).

  Args:
    prefix: The prefix; usually the batch size (and/or time step size).
      (TensorShape, int, or Tensor.)
    suffix: TensorShape, int, or Tensor.
    static: If `True`, return a python list with possibly unknown dimensions.
      Otherwise return a `Tensor`.

  Returns:
    shape: the concatenation of prefix and suffix.

  Raises:
    ValueError: if `suffix` is not a scalar or vector (or TensorShape).
    ValueError: if prefix or suffix was `None` and asked for dynamic
      Tensors out.
  """
  if isinstance(prefix, ops.Tensor):
    p = prefix
    p_static = tensor_util.constant_value(prefix)
    if p.shape.ndims == 0:
      p = array_ops.expand_dims(p, 0)
    elif p.shape.ndims != 1:
      raise ValueError("prefix tensor must be either a scalar or vector, "
                       "but saw tensor: %s" % p)
  else:
    p = tensor_shape.as_shape(prefix)
    p_static = p.as_list() if p.ndims is not None else None
    p = (constant_op.constant(p.as_list(), dtype=dtypes.int32)
         if p.is_fully_defined() else None)
  if isinstance(suffix, ops.Tensor):
    s = suffix
    s_static = tensor_util.constant_value(suffix)
    if s.shape.ndims == 0:
      s = array_ops.expand_dims(s, 0)
    elif s.shape.ndims != 1:
      raise ValueError("suffix tensor must be either a scalar or vector, "
                       "but saw tensor: %s" % s)
  else:
    s = tensor_shape.as_shape(suffix)
    s_static = s.as_list() if s.ndims is not None else None
    s = (constant_op.constant(s.as_list(), dtype=dtypes.int32)
         if s.is_fully_defined() else None)

  if static:
    shape = tensor_shape.as_shape(p_static).concatenate(s_static)
    shape = shape.as_list() if shape.ndims is not None else None
  else:
    if p is None or s is None:
      raise ValueError("Provided a prefix or suffix of None: %s and %s"
                       % (prefix, suffix))
    shape = array_ops.concat((p, s), 0)
  return shape
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:58,代码来源:rnn_cell_impl.py

示例4: make_attr

def make_attr(attr_type, value):
  if attr_type == pywrap_tensorflow.TF_ATTR_TYPE:
    return dtypes.as_dtype(value)
  elif attr_type == [pywrap_tensorflow.TF_ATTR_TYPE]:
    return [dtypes.as_dtype(v) for v in value]
  elif attr_type == pywrap_tensorflow.TF_ATTR_SHAPE:
    return tensor_shape.as_shape(value).as_proto()
  elif attr_type == [pywrap_tensorflow.TF_ATTR_SHAPE]:
    return [tensor_shape.as_shape(v).as_proto() for v in value]
  return value
开发者ID:andrewharp,项目名称:tensorflow,代码行数:10,代码来源:backprop.py

示例5: _duplicated_test

 def _duplicated_test(self,
                      init,
                      shape=None,
                      dtype=dtypes.float32):
   if shape is None:
     shape = [100]
   t1 = self.evaluate(init(shape, dtype))
   t2 = self.evaluate(init(shape, dtype))
   self.assertEqual(tensor_shape.as_shape(shape), t1.shape)
   self.assertEqual(tensor_shape.as_shape(shape), t2.shape)
   self.assertFalse(np.allclose(t1, t2, rtol=1e-15, atol=1e-15))
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:11,代码来源:init_ops_v2_test.py

示例6: get_sharded_shape

  def get_sharded_shape(self, shape, shard_index=None):
    """Returns the shape of a shard of a full Tensor.

    When given the shape of a 'full-size' Tensor, returns the shape of
    the sub-Tensor after it has been sharded. Freezes the policy if it
    has not yet been frozen.

    Args:
      shape: The shape of the full-size Tensor to be sharded.
      shard_index: The index of the shard whose shape should be returned.
        shard_index can be None for sharding policies that use the same
        shape for every shard.
      freeze_config:

    Returns:
      The shape of the sharded version of the Tensor.

    Raises:
      ValueError: If shard_index is None when shards are of different
        shapes; or shard_index is not None and
        !(0<=shard_index<number_of_shards); or shape does not have at
        least self.shard_dimension+1 dimensions; or the value of
        shape's shard dimension is not a multiple of
        self.number_of_shards
    """
    if self._shard_dimension is None or self._number_of_shards is None:
      # Don't raise an error if the config is unset.
      return None
    if shard_index is not None:
      if shard_index < 0 or shard_index >= self.number_of_shards:
        raise ValueError("shard_index %d, but must be in [0,%d)." %
                         (shard_index, self._number_of_shards))
    shape = tensor_shape.as_shape(shape)
    if self._number_of_shards == 1:
      # Don't do anything when there's only one shard.
      return shape
    ndims = shape.ndims
    if ndims is None:
      raise ValueError("shape must be a specified shape not Unknown")
    if ndims <= self._shard_dimension:
      raise ValueError("shape %s does not contain shard_dimension %d" %
                       (shape.as_list(), self._shard_dimension))
    dims = shape.as_list()
    if dims[self._shard_dimension] is None:
      raise ValueError("shape %s must have a fixed size for dimension %d "
                       "that is known at graph construction time." %
                       (shape.as_list(), self._shard_dimension))
    if (dims[self._shard_dimension] % self._number_of_shards) != 0:
      raise ValueError("shape %s cannot be sharded %d ways along dimension %d" %
                       (shape.as_list(), self._number_of_shards,
                        self._shard_dimension))
    dims[self._shard_dimension] /= self._number_of_shards
    return tensor_shape.as_shape(dims)
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:53,代码来源:tpu_sharding.py

示例7: testConvertFromProto

  def testConvertFromProto(self):
    proto = tensor_util.MakeTensorShapeProto([])
    self.assertEqual(tensor_shape.TensorShape([]),
                     tensor_shape.TensorShape(proto))
    self.assertEqual(tensor_shape.TensorShape([]),
                     tensor_shape.as_shape(proto))

    proto = tensor_util.MakeTensorShapeProto([1, 37, 42])
    self.assertEqual(tensor_shape.TensorShape([1, 37, 42]),
                     tensor_shape.TensorShape(proto))
    self.assertEqual(tensor_shape.TensorShape([1, 37, 42]),
                     tensor_shape.as_shape(proto))
开发者ID:DapengLan,项目名称:tensorflow,代码行数:12,代码来源:tensor_shape_test.py

示例8: _identical_test

 def _identical_test(self,
                     init1,
                     init2,
                     assertion,
                     shape=None,
                     dtype=dtypes.float32):
   if shape is None:
     shape = [100]
   t1 = self.evaluate(init1(shape, dtype))
   t2 = self.evaluate(init2(shape, dtype))
   self.assertEqual(tensor_shape.as_shape(shape), t1.shape)
   self.assertEqual(tensor_shape.as_shape(shape), t2.shape)
   self.assertEqual(assertion, np.allclose(t1, t2, rtol=1e-15, atol=1e-15))
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:13,代码来源:init_ops_v2_test.py

示例9: __init__

  def __init__(self,
               initial_value=None,
               name=None,
               trainable=True,
               collections=None,
               dtype=None,
               shape=None):
    """Creates a variable.

    Args:
      initial_value: A `Tensor` or Python object convertible to a `Tensor`
        representing the initial value of this variable.
      name: The name of this variable. Automatically uniquified.
      trainable: Whether the global read of this variable will be used for
        training.
      collections: Additional collections to which the `read` operation for
        this variable is to be added. Defaults to [].
      dtype: The type of this variable. Can be omitted if it can be deduced
        from the initial_value. If different from the type of the initial
        value it will be cast to this type.
      shape: The shape of this variable. Only specify if there is no initial
        value but shape inference is desired.
    """
    if initial_value is not None:
      initial_value = ops.convert_to_tensor(initial_value)
    if dtype is None:
      assert initial_value is not None, ("Trying to create a resource variable "
                                         "with no dtype or initial value. At"
                                         " least one of these must be set.")
      dtype = initial_value.dtype
    elif initial_value is not None:
      initial_value = math_ops.cast(initial_value, dtype)
    if shape is None:
      if initial_value is not None:
        shape = initial_value.get_shape().as_proto()
      else:
        shape = tensor_shape.unknown_shape()
    else:
      shape = tensor_shape.as_shape(shape)

    self._dtype = dtype
    with ops.name_scope(name, "Variable", [initial_value]) as name:
      self._handle = var_handle_op(shared_name=name,
                                   name=name,
                                   dtype=dtype,
                                   shape=shape)

      with ops.name_scope("IsInitialized"):
        self._is_initialized_op = var_is_initialized_op(self._handle)
      if initial_value is not None:
        with ops.name_scope("Create"):
          self._initialize_op = create_variable_op(self._handle, initial_value)
        resources.register_resource(self._handle,
                                    self._initialize_op,
                                    self._is_initialized_op)

      with ops.name_scope("Read"):
        self._value = read_variable_op(self._handle, dtype=self._dtype)
      _register_dense_variable_read(
          self._value, trainable=trainable, collections=collections)
开发者ID:brchiu,项目名称:tensorflow,代码行数:60,代码来源:resource_variable_ops.py

示例10: _as_shape_list

def _as_shape_list(shapes, dtypes, unknown_dim_allowed=False,
                   unknown_rank_allowed=False):
  """Convert shapes to a list of tuples of int (or None)."""
  if unknown_dim_allowed:
    if (not isinstance(shapes, collections.Sequence)
        or not shapes
        or any(shape is None or isinstance(shape, int) for shape in shapes)):
      raise ValueError(
          "When providing partial shapes, a list of shapes must be provided.")
  if shapes is None: return None
  if isinstance(shapes, tensor_shape.TensorShape):
    shapes = [shapes]
  if not isinstance(shapes, (tuple, list)):
    raise TypeError(
        "shapes must be a TensorShape or a list or tuple of TensorShapes.")
  if all(shape is None or isinstance(shape, int) for shape in shapes):
    # We have a single shape.
    shapes = [shapes]
  shapes = [tensor_shape.as_shape(shape) for shape in shapes]
  if not unknown_dim_allowed:
    if any([not shape.is_fully_defined() for shape in shapes]):
      raise ValueError("All shapes must be fully defined: %s" % shapes)
  if not unknown_rank_allowed:
    if any([shape.dims is None for shape in shapes]):
      raise ValueError("All shapes must have a defined rank: %s" % shapes)

  return shapes
开发者ID:0-T-0,项目名称:tensorflow,代码行数:27,代码来源:data_flow_ops.py

示例11: partial_shape_to_tensor

def partial_shape_to_tensor(shape_like):
  """Returns a `tf.Tensor` that represents the given shape.

  Args:
    shape_like: A value that can be converted to a `tf.TensorShape` or a
      `tf.Tensor`.

  Returns:
    A 1-D `tf.Tensor` of `tf.int64` elements representing the given shape, where
    `-1` is substituted for any unknown dimensions.
  """
  try:
    # First attempt to convert the input to a shape, and return the
    # "canonical" tensor representation, which uses `-1` in place of
    # `None`.
    shape_like = tensor_shape.as_shape(shape_like)
    return ops.convert_to_tensor(
        [dim if dim is not None else -1 for dim in shape_like.as_list()],
        dtype=dtypes.int64)
  except (TypeError, ValueError):
    # The argument was not trivially convertible to a
    # `tf.TensorShape`, so fall back on the conversion to tensor
    # machinery.
    ret = ops.convert_to_tensor(shape_like, preferred_dtype=dtypes.int64)
    if ret.shape.dims is not None and len(ret.shape.dims) != 1:
      raise ValueError("The given shape %s must be a 1-D tensor of tf.int64 "
                       "values, but the shape was %s."
                       % (shape_like, ret.shape))
    if ret.dtype != dtypes.int64:
      raise TypeError("The given shape %s must be a 1-D tensor of tf.int64 "
                      "values, but the element type was %s."
                      % (shape_like, ret.dtype.name))

    return ret
开发者ID:AnishShah,项目名称:tensorflow,代码行数:34,代码来源:convert.py

示例12: testUnknownInputChannels

  def testUnknownInputChannels(self):
    images = random_ops.random_uniform((5, 7, 9, 4))
    images._shape = tensor_shape.as_shape((5, 7, 9, None))
    layer = conv_layers.Conv2D(32, [3, 3], activation=nn_ops.relu)
    with self.assertRaisesRegexp(ValueError,
                                 'The channel dimension of the inputs '
                                 'should be defined. Found `None`.'):
      _ = layer.apply(images)

    images = random_ops.random_uniform((5, 4, 7, 9))
    images._shape = tensor_shape.as_shape((5, None, 7, 9))
    layer = conv_layers.Conv2D(32, [3, 3], data_format='channels_first')
    with self.assertRaisesRegexp(ValueError,
                                 'The channel dimension of the inputs '
                                 'should be defined. Found `None`.'):
      _ = layer.apply(images)
开发者ID:Dr4KK,项目名称:tensorflow,代码行数:16,代码来源:convolutional_test.py

示例13: testUnknownInputChannelsConv1D

  def testUnknownInputChannelsConv1D(self):
    data = random_ops.random_uniform((5, 4, 7))
    data._shape = tensor_shape.as_shape((5, 4, None))
    layer = conv_layers.Conv1D(32, 3, activation=nn_ops.relu)
    with self.assertRaisesRegexp(ValueError,
                                 'The channel dimension of the inputs '
                                 'should be defined. Found `None`.'):
      _ = layer.apply(data)

    data = random_ops.random_uniform((5, 7, 4))
    data._shape = tensor_shape.as_shape((5, None, 4))
    layer = conv_layers.Conv1D(32, 3, data_format='channels_first')
    with self.assertRaisesRegexp(ValueError,
                                 'The channel dimension of the inputs '
                                 'should be defined. Found `None`.'):
      _ = layer.apply(data)
开发者ID:Dr4KK,项目名称:tensorflow,代码行数:16,代码来源:convolutional_test.py

示例14: _default_getter

def _default_getter(name, shape, dtype, initializer=None,
                    partition_info=None, **kwargs):
  """A pared-down version of get_variable which does not reuse variables."""
  dtype = dtypes.as_dtype(dtype)
  shape_object = tensor_shape.as_shape(shape)
  with ops.init_scope():
    if initializer is None:
      initializer, initializing_from_value = (
          variable_scope._get_default_variable_store()._get_default_initializer(  # pylint: disable=protected-access
              name=name, shape=shape_object, dtype=dtype))
    else:
      initializing_from_value = not callable(initializer)
    # Same logic as get_variable
    variable_dtype = dtype.base_dtype
    if initializing_from_value:
      if shape is not None:
        raise ValueError("If initializer is a constant, do not specify shape.")
      initial_value = initializer
    else:
      # Instantiate initializer if provided initializer is a type object.
      if isinstance(initializer, type(init_ops.Initializer)):
        initializer = initializer(dtype=dtype)
      def initial_value():
        return initializer(
            shape_object.as_list(), dtype=dtype, partition_info=partition_info)
    return resource_variable_ops.ResourceVariable(
        initial_value=initial_value,
        name=name,
        dtype=variable_dtype,
        **kwargs
    )
开发者ID:Jackiefan,项目名称:tensorflow,代码行数:31,代码来源:checkpointable_utils.py

示例15: _merge_batch_beams

 def _merge_batch_beams(self, t, s=None):
     """Merges the tensor from a batch of beams into a batch by beams.
     More exactly, t is a tensor of dimension [batch_size, beam_width, s]. We
     reshape this into [batch_size*beam_width, s]
     Args:
       t: Tensor of dimension [batch_size, beam_width, s]
       s: (Possibly known) depth shape.
     Returns:
       A reshaped version of t with dimension [batch_size * beam_width, s].
     """
     if isinstance(s, ops.Tensor):
         s = tensor_shape.as_shape(tensor_util.constant_value(s))
     else:
         s = tensor_shape.TensorShape(s)
     t_shape = tf.shape(t)
     static_batch_size = tensor_util.constant_value(self._batch_size)
     batch_size_beam_width = (
         None if static_batch_size is None
         else static_batch_size * self._beam_width)
     reshaped_t = tf.reshape(
         t, tf.concat(
             ([self._batch_size * self._beam_width], t_shape[2:]), 0))
     reshaped_t.set_shape(
         (tensor_shape.TensorShape([batch_size_beam_width]).concatenate(s)))
     return reshaped_t
开发者ID:seasky100,项目名称:tensorflow_end2end_speech_recognition,代码行数:25,代码来源:beam_search_decoder_from_tensorflow.py


注:本文中的tensorflow.python.framework.tensor_shape.as_shape函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。