当前位置: 首页>>代码示例>>Python>>正文


Python common_shapes.broadcast_shape函数代码示例

本文整理汇总了Python中tensorflow.python.framework.common_shapes.broadcast_shape函数的典型用法代码示例。如果您正苦于以下问题:Python broadcast_shape函数的具体用法?Python broadcast_shape怎么用?Python broadcast_shape使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了broadcast_shape函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _assert_incompatible_broadcast

 def _assert_incompatible_broadcast(self, shape1, shape2):
   if shape1.dims is not None and shape2.dims is not None:
     zeros1 = np.zeros(shape1.as_list())
     zeros2 = np.zeros(shape2.as_list())
     with self.assertRaises(ValueError):
       np.broadcast(zeros1, zeros2)
     with self.assertRaises(ValueError):
       np.broadcast(zeros2, zeros1)
   with self.assertRaises(ValueError):
     common_shapes.broadcast_shape(shape1, shape2)
   with self.assertRaises(ValueError):
     common_shapes.broadcast_shape(shape2, shape1)
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:12,代码来源:common_shapes_test.py

示例2: _assert_broadcast

 def _assert_broadcast(self, expected, shape1, shape2):
   if shape1.dims is not None and shape2.dims is not None:
     expected_np = expected.as_list()
     zeros1 = np.zeros(shape1.as_list())
     zeros2 = np.zeros(shape2.as_list())
     self.assertAllEqual(expected_np, np.broadcast(zeros1, zeros2).shape)
     self.assertAllEqual(expected_np, np.broadcast(zeros2, zeros1).shape)
     self.assertEqual(
         expected, common_shapes.broadcast_shape(shape1, shape2))
     self.assertEqual(
         expected, common_shapes.broadcast_shape(shape2, shape1))
   else:
     self.assertEqual(expected, common_shapes.broadcast_shape(shape1, shape2))
     self.assertEqual(expected, common_shapes.broadcast_shape(shape2, shape1))
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:14,代码来源:common_shapes_test.py

示例3: __init__

  def __init__(self,
               df,
               mu,
               sigma,
               validate_args=True,
               allow_nan_stats=False,
               name="StudentT"):
    """Construct Student's t distributions.

    The distributions have degree of freedom `df`, mean `mu`, and scale `sigma`.

    The parameters `df`, `mu`, and `sigma` must be shaped in a way that supports
    broadcasting (e.g. `df + mu + sigma` is a valid operation).

    Args:
      df: Floating point tensor, the degrees of freedom of the
        distribution(s). `df` must contain only positive values.
      mu: Floating point tensor, the means of the distribution(s).
      sigma: Floating point tensor, the scaling factor for the
        distribution(s). `sigma` must contain only positive values.
        Note that `sigma` is not the standard deviation of this distribution.
      validate_args: Whether to assert that `df > 0, sigma > 0`. If
        `validate_args` is `False` and inputs are invalid, correct behavior is
        not guaranteed.
      allow_nan_stats:  Boolean, default `False`.  If `False`, raise an
        exception if a statistic (e.g. mean/mode/etc...) is undefined for any
        batch member.  If `True`, batch members with valid parameters leading to
        undefined statistics will return NaN for this statistic.
      name: The name to give Ops created by the initializer.

    Raises:
      TypeError: if mu and sigma are different dtypes.
    """
    self._allow_nan_stats = allow_nan_stats
    self._validate_args = validate_args
    with ops.name_scope(name, values=[df, mu, sigma]) as scope:
      with ops.control_dependencies([check_ops.assert_positive(
          df), check_ops.assert_positive(sigma)] if validate_args else []):
        self._df = ops.convert_to_tensor(df, name="df")
        self._mu = ops.convert_to_tensor(mu, name="mu")
        self._sigma = ops.convert_to_tensor(sigma, name="sigma")
        contrib_tensor_util.assert_same_float_dtype(
            (self._df, self._mu, self._sigma))
      self._name = scope
      self._get_batch_shape = common_shapes.broadcast_shape(
          self._sigma.get_shape(), common_shapes.broadcast_shape(
              self._df.get_shape(), self._mu.get_shape()))
      self._get_event_shape = tensor_shape.TensorShape([])
开发者ID:alephman,项目名称:Tensorflow,代码行数:48,代码来源:student_t.py

示例4: __init__

  def __init__(self,
               a=0.0,
               b=1.0,
               validate_args=True,
               allow_nan_stats=False,
               name="Uniform"):
    """Construct Uniform distributions with `a` and `b`.

    The parameters `a` and `b` must be shaped in a way that supports
    broadcasting (e.g. `b - a` is a valid operation).

    Here are examples without broadcasting:

    ```python
    # Without broadcasting
    u1 = Uniform(3.0, 4.0)  # a single uniform distribution [3, 4]
    u2 = Uniform([1.0, 2.0], [3.0, 4.0])  # 2 distributions [1, 3], [2, 4]
    u3 = Uniform([[1.0, 2.0],
                  [3.0, 4.0]],
                 [[1.5, 2.5],
                  [3.5, 4.5]])  # 4 distributions
    ```

    And with broadcasting:

    ```python
    u1 = Uniform(3.0, [5.0, 6.0, 7.0])  # 3 distributions
    ```

    Args:
      a: Floating point tensor, the minimum endpoint.
      b: Floating point tensor, the maximum endpoint. Must be > `a`.
      validate_args: Whether to assert that `a > b`. If `validate_args` is
        `False` and inputs are invalid, correct behavior is not guaranteed.
      allow_nan_stats:  Boolean, default `False`.  If `False`, raise an
        exception if a statistic (e.g. mean/mode/etc...) is undefined for any
        batch member.  If `True`, batch members with valid parameters leading to
        undefined statistics will return NaN for this statistic.
      name: The name to prefix Ops created by this distribution class.

    Raises:
      InvalidArgumentError: if `a >= b` and `validate_args=True`.
    """
    self._allow_nan_stats = allow_nan_stats
    self._validate_args = validate_args
    with ops.name_scope(name, values=[a, b]):
      with ops.control_dependencies([check_ops.assert_less(
          a, b, message="uniform not defined when a > b.")] if validate_args
                                    else []):
        a = array_ops.identity(a, name="a")
        b = array_ops.identity(b, name="b")

    self._a = a
    self._b = b
    self._name = name
    self._batch_shape = common_shapes.broadcast_shape(
        self._a.get_shape(), self._b.get_shape())
    self._event_shape = tensor_shape.TensorShape([])

    contrib_tensor_util.assert_same_float_dtype((a, b))
开发者ID:alephman,项目名称:Tensorflow,代码行数:60,代码来源:uniform.py

示例5: sample_n

  def sample_n(self, n, seed=None, name="sample_n"):
    """Sample `n` observations from the Normal Distributions.

    Args:
      n: `Scalar`, type int32, the number of observations to sample.
      seed: Python integer, the random seed.
      name: The name to give this op.

    Returns:
      samples: `[n, ...]`, a `Tensor` of `n` samples for each
        of the distributions determined by broadcasting the hyperparameters.
    """
    with ops.name_scope(self.name):
      with ops.name_scope(name, values=[self._mu, self._sigma, n]):
        broadcast_shape = common_shapes.broadcast_shape(
            self._mu.get_shape(), self._sigma.get_shape())
        n = ops.convert_to_tensor(n)
        shape = array_ops.concat(0, ([n], array_ops.shape(self.mean())))
        sampled = random_ops.random_normal(
            shape=shape, mean=0, stddev=1, dtype=self._mu.dtype, seed=seed)

        # Provide some hints to shape inference
        n_val = tensor_util.constant_value(n)
        final_shape = tensor_shape.vector(n_val).concatenate(broadcast_shape)
        sampled.set_shape(final_shape)

        return sampled * self._sigma + self._mu
开发者ID:alephman,项目名称:Tensorflow,代码行数:27,代码来源:normal.py

示例6: __init__

  def __init__(self, shape, dtype, minimum, maximum, name=None):
    """Initializes a new `BoundedTensorSpec`.

    Args:
      shape: Value convertible to `tf.TensorShape`. The shape of the tensor.
      dtype: Value convertible to `tf.DType`. The type of the tensor values.
      minimum: Number or sequence specifying the minimum element bounds
        (inclusive). Must be broadcastable to `shape`.
      maximum: Number or sequence specifying the maximum element bounds
        (inclusive). Must be broadcastable to `shape`.
      name: Optional string containing a semantic name for the corresponding
        array. Defaults to `None`.

    Raises:
      ValueError: If `minimum` or `maximum` are not provided or not
        broadcastable to `shape`.
      TypeError: If the shape is not an iterable or if the `dtype` is an invalid
        numpy dtype.
    """
    super(BoundedTensorSpec, self).__init__(shape, dtype, name)

    if minimum is None or maximum is None:
      raise ValueError("minimum and maximum must be provided; but saw "
                       "'%s' and '%s'" % (minimum, maximum))

    try:
      minimum_shape = np.shape(minimum)
      common_shapes.broadcast_shape(
          tensor_shape.TensorShape(minimum_shape), self.shape)
    except ValueError as exception:
      raise ValueError("minimum is not compatible with shape. "
                       "Message: {!r}.".format(exception))

    try:
      maximum_shape = np.shape(maximum)
      common_shapes.broadcast_shape(
          tensor_shape.TensorShape(maximum_shape), self.shape)
    except ValueError as exception:
      raise ValueError("maximum is not compatible with shape. "
                       "Message: {!r}.".format(exception))

    self._minimum = np.array(minimum, dtype=self.dtype.as_numpy_dtype())
    self._minimum.setflags(write=False)

    self._maximum = np.array(maximum, dtype=self.dtype.as_numpy_dtype())
    self._maximum.setflags(write=False)
开发者ID:PuchatekwSzortach,项目名称:tensorflow,代码行数:46,代码来源:tensor_spec.py

示例7: _assert_broadcast_with_unknown_dims

  def _assert_broadcast_with_unknown_dims(self, expected, shape1, shape2):
    actual_dims = common_shapes.broadcast_shape(shape1, shape2).dims
    reflexive_actual_dims = common_shapes.broadcast_shape(shape2, shape1).dims

    if actual_dims is None:
      self.assertIsNone(reflexive_actual_dims)
    elif reflexive_actual_dims is None:
      self.assertIsNone(actual_dims)
    else:
      self.assertEqual(len(actual_dims), len(reflexive_actual_dims))
      for actual_dim, reflexive_actual_dim in zip(
          actual_dims, reflexive_actual_dims):
        self.assertEqual(actual_dim.value, reflexive_actual_dim.value)

    expected_dims = expected.dims
    if expected_dims is None:
      self.assertIsNone(actual_dims)
    elif actual_dims is None:
      self.assertIsNone(expected_dims)
    else:
      self.assertEqual(len(expected_dims), len(actual_dims))
      for expected_dim, actual_dim in zip(expected_dims, actual_dims):
        self.assertEqual(expected_dim.value, actual_dim.value)
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:23,代码来源:common_shapes_test.py

示例8: __init__

  def __init__(self,
               alpha,
               beta,
               validate_args=True,
               allow_nan_stats=False,
               name="Gamma"):
    """Construct Gamma distributions with parameters `alpha` and `beta`.

    The parameters `alpha` and `beta` must be shaped in a way that supports
    broadcasting (e.g. `alpha + beta` is a valid operation).

    Args:
      alpha: Floating point tensor, the shape params of the
        distribution(s).
        alpha must contain only positive values.
      beta: Floating point tensor, the inverse scale params of the
        distribution(s).
        beta must contain only positive values.
      validate_args: Whether to assert that `a > 0, b > 0`, and that `x > 0` in
        the methods `prob(x)` and `log_prob(x)`.  If `validate_args` is `False`
        and the inputs are invalid, correct behavior is not guaranteed.
      allow_nan_stats:  Boolean, default `False`.  If `False`, raise an
        exception if a statistic (e.g. mean/mode/etc...) is undefined for any
        batch member.  If `True`, batch members with valid parameters leading to
        undefined statistics will return NaN for this statistic.
      name: The name to prepend to all ops created by this distribution.

    Raises:
      TypeError: if `alpha` and `beta` are different dtypes.
    """
    self._allow_nan_stats = allow_nan_stats
    self._validate_args = validate_args
    with ops.name_scope(name, values=[alpha, beta]) as scope:
      self._name = scope
      with ops.control_dependencies([check_ops.assert_positive(
          alpha), check_ops.assert_positive(beta)] if validate_args else []):
        alpha = array_ops.identity(alpha, name="alpha")
        beta = array_ops.identity(beta, name="beta")

    self._get_batch_shape = common_shapes.broadcast_shape(
        alpha.get_shape(), beta.get_shape())
    self._get_event_shape = tensor_shape.TensorShape([])

    self._alpha = alpha
    self._beta = beta
开发者ID:alephman,项目名称:Tensorflow,代码行数:45,代码来源:gamma.py

示例9: _shape

  def _shape(self):
    # Get final matrix shape.
    domain_dimension = self.operators[0].domain_dimension
    range_dimension = self.operators[0].range_dimension
    for operator in self.operators[1:]:
      domain_dimension += operator.domain_dimension
      range_dimension += operator.range_dimension

    matrix_shape = tensor_shape.TensorShape([domain_dimension, range_dimension])

    # Get broadcast batch shape.
    # broadcast_shape checks for compatibility.
    batch_shape = self.operators[0].batch_shape
    for operator in self.operators[1:]:
      batch_shape = common_shapes.broadcast_shape(
          batch_shape, operator.batch_shape)

    return batch_shape.concatenate(matrix_shape)
开发者ID:aritratony,项目名称:tensorflow,代码行数:18,代码来源:linear_operator_block_diag.py

示例10: __init__

  def __init__(self,
               loc,
               scale,
               validate_args=True,
               allow_nan_stats=False,
               name="Laplace"):
    """Construct Laplace distribution with parameters `loc` and `scale`.

    The parameters `loc` and `scale` must be shaped in a way that supports
    broadcasting (e.g., `loc / scale` is a valid operation).

    Args:
      loc: Floating point tensor which characterizes the location (center)
        of the distribution.
      scale: Positive floating point tensor which characterizes the spread of
        the distribution.
      validate_args: Whether to validate input with asserts.  If `validate_args`
        is `False`, and the inputs are invalid, correct behavior is not
        guaranteed.
      allow_nan_stats:  Boolean, default `False`.  If `False`, raise an
        exception if a statistic (e.g. mean/mode/etc...) is undefined for any
        batch member.  If `True`, batch members with valid parameters leading to
        undefined statistics will return NaN for this statistic.
      name: The name to give Ops created by the initializer.

    Raises:
      TypeError: if `loc` and `scale` are of different dtype.
    """
    self._allow_nan_stats = allow_nan_stats
    self._validate_args = validate_args
    with ops.name_scope(name, values=[loc, scale]):
      loc = ops.convert_to_tensor(loc)
      scale = ops.convert_to_tensor(scale)
      with ops.control_dependencies([check_ops.assert_positive(scale)] if
                                    validate_args else []):
        self._name = name
        self._loc = array_ops.identity(loc, name="loc")
        self._scale = array_ops.identity(scale, name="scale")
        self._batch_shape = common_shapes.broadcast_shape(
            self._loc.get_shape(), self._scale.get_shape())
        self._event_shape = tensor_shape.TensorShape([])

    contrib_tensor_util.assert_same_float_dtype((loc, scale))
开发者ID:alephman,项目名称:Tensorflow,代码行数:43,代码来源:laplace.py

示例11: __init__

  def __init__(self,
               mu,
               sigma,
               validate_args=True,
               allow_nan_stats=False,
               name="Normal"):
    """Construct Normal distributions with mean and stddev `mu` and `sigma`.

    The parameters `mu` and `sigma` must be shaped in a way that supports
    broadcasting (e.g. `mu + sigma` is a valid operation).

    Args:
      mu: Floating point tensor, the means of the distribution(s).
      sigma: Floating point tensor, the stddevs of the distribution(s).
        sigma must contain only positive values.
      validate_args: Whether to assert that `sigma > 0`. If `validate_args` is
        `False`, correct output is not guaranteed when input is invalid.
      allow_nan_stats:  Boolean, default `False`.  If `False`, raise an
        exception if a statistic (e.g. mean/mode/etc...) is undefined for any
        batch member.  If `True`, batch members with valid parameters leading to
        undefined statistics will return NaN for this statistic.
      name: The name to give Ops created by the initializer.

    Raises:
      TypeError: if mu and sigma are different dtypes.
    """
    self._allow_nan_stats = allow_nan_stats
    self._validate_args = validate_args
    with ops.name_scope(name, values=[mu, sigma]):
      mu = ops.convert_to_tensor(mu)
      sigma = ops.convert_to_tensor(sigma)
      with ops.control_dependencies([check_ops.assert_positive(sigma)] if
                                    validate_args else []):
        self._name = name
        self._mu = array_ops.identity(mu, name="mu")
        self._sigma = array_ops.identity(sigma, name="sigma")
        self._batch_shape = common_shapes.broadcast_shape(
            self._mu.get_shape(), self._sigma.get_shape())
        self._event_shape = tensor_shape.TensorShape([])

    contrib_tensor_util.assert_same_float_dtype((mu, sigma))
开发者ID:alephman,项目名称:Tensorflow,代码行数:41,代码来源:normal.py

示例12: _get_batch_shape

 def _get_batch_shape(self):
   return common_shapes.broadcast_shape(self.loc.get_shape(),
                                        self.scale.get_shape())
开发者ID:KalraA,项目名称:tensorflow,代码行数:3,代码来源:laplace.py

示例13: _get_batch_shape

 def _get_batch_shape(self):
   return common_shapes.broadcast_shape(self.n.get_shape(),
                                        self.p.get_shape())
开发者ID:bsantanas,项目名称:tensorflow,代码行数:3,代码来源:binomial.py

示例14: _get_batch_shape

 def _get_batch_shape(self):
   return common_shapes.broadcast_shape(self.alpha.get_shape(),
                                        self.beta.get_shape())
开发者ID:Nishant23,项目名称:tensorflow,代码行数:3,代码来源:gamma.py

示例15: _get_batch_shape

 def _get_batch_shape(self):
   return common_shapes.broadcast_shape(
       self._mu.get_shape(), self.sigma.get_shape())
开发者ID:curtiszimmerman,项目名称:tensorflow,代码行数:3,代码来源:normal.py


注:本文中的tensorflow.python.framework.common_shapes.broadcast_shape函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。