当前位置: 首页>>代码示例>>Python>>正文


Python array_ops.broadcast_dynamic_shape函数代码示例

本文整理汇总了Python中tensorflow.python.ops.array_ops.broadcast_dynamic_shape函数的典型用法代码示例。如果您正苦于以下问题:Python broadcast_dynamic_shape函数的具体用法?Python broadcast_dynamic_shape怎么用?Python broadcast_dynamic_shape使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了broadcast_dynamic_shape函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _itemwise_error_rate

def _itemwise_error_rate(
    total_error_rate, param_tensors, sample_tensor=None, name=None):
  with ops.name_scope(
      name, "itemwise_error_rate",
      [total_error_rate, param_tensors, sample_tensor]):
    result_shape = [1]
    for p_tensor in param_tensors:
      result_shape = array_ops.broadcast_dynamic_shape(
          array_ops.shape(p_tensor), result_shape)
    if sample_tensor is not None:
      result_shape = array_ops.broadcast_dynamic_shape(
          array_ops.shape(sample_tensor)[1:], result_shape)
    num_items = math_ops.reduce_prod(result_shape)
    return total_error_rate / math_ops.cast(
        num_items, dtype=total_error_rate.dtype)
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:15,代码来源:statistical_testing.py

示例2: check

 def check(t):
   target = array_ops.shape(tensor)[1:]
   result = array_ops.broadcast_dynamic_shape(target, array_ops.shape(t))
   # This rank check ensures that I don't get a wrong answer from the
   # _shapes_ broadcasting against each other.
   gt = check_ops.assert_greater(array_ops.rank(target), array_ops.rank(t))
   eq = check_ops.assert_equal(target, result)
   return gt, eq
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:8,代码来源:statistical_testing.py

示例3: _broadcast_shape

def _broadcast_shape(shape1, shape2):
  """Convenience function which statically broadcasts shape when possible."""
  if (tensor_util.constant_value(shape1) is not None and
      tensor_util.constant_value(shape2) is not None):
    return array_ops.broadcast_static_shape(
        tensor_shape.TensorShape(tensor_util.constant_value(shape1)),
        tensor_shape.TensorShape(tensor_util.constant_value(shape2)))
  return array_ops.broadcast_dynamic_shape(shape1, shape2)
开发者ID:jzuern,项目名称:tensorflow,代码行数:8,代码来源:mvn_linear_operator.py

示例4: _cdf

 def _cdf(self, x):
   broadcast_shape = array_ops.broadcast_dynamic_shape(
       array_ops.shape(x), self.batch_shape_tensor())
   zeros = array_ops.zeros(broadcast_shape, dtype=self.dtype)
   ones = array_ops.ones(broadcast_shape, dtype=self.dtype)
   broadcasted_x = x * ones
   result_if_not_big = array_ops.where(
       x < self.low, zeros, (broadcasted_x - self.low) / self.range())
   return array_ops.where(x >= self.high, ones, result_if_not_big)
开发者ID:LUTAN,项目名称:tensorflow,代码行数:9,代码来源:uniform.py

示例5: check

 def check(t):
   samples_batch_shape = array_ops.shape(samples)[1:]
   broadcasted_batch_shape = array_ops.broadcast_dynamic_shape(
       samples_batch_shape, array_ops.shape(t))
   # This rank check ensures that I don't get a wrong answer from the
   # _shapes_ broadcasting against each other.
   samples_batch_ndims = array_ops.size(samples_batch_shape)
   ge = check_ops.assert_greater_equal(
       samples_batch_ndims, array_ops.rank(t))
   eq = check_ops.assert_equal(samples_batch_shape, broadcasted_batch_shape)
   return ge, eq
开发者ID:ahmedsaiduk,项目名称:tensorflow,代码行数:11,代码来源:statistical_testing.py

示例6: determine_batch_event_shapes

def determine_batch_event_shapes(grid, endpoint_affine):
  """Helper to infer batch_shape and event_shape."""
  with ops.name_scope(name="determine_batch_event_shapes"):
    # grid  # shape: [B, k, q]
    # endpoint_affine     # len=k, shape: [B, d, d]
    batch_shape = grid.shape[:-2]
    batch_shape_tensor = array_ops.shape(grid)[:-2]
    event_shape = None
    event_shape_tensor = None

    def _set_event_shape(shape, shape_tensor):
      if event_shape is None:
        return shape, shape_tensor
      return (array_ops.broadcast_static_shape(event_shape, shape),
              array_ops.broadcast_dynamic_shape(
                  event_shape_tensor, shape_tensor))

    for aff in endpoint_affine:
      if aff.shift is not None:
        batch_shape = array_ops.broadcast_static_shape(
            batch_shape, aff.shift.shape[:-1])
        batch_shape_tensor = array_ops.broadcast_dynamic_shape(
            batch_shape_tensor, array_ops.shape(aff.shift)[:-1])
        event_shape, event_shape_tensor = _set_event_shape(
            aff.shift.shape[-1:], array_ops.shape(aff.shift)[-1:])

      if aff.scale is not None:
        batch_shape = array_ops.broadcast_static_shape(
            batch_shape, aff.scale.batch_shape)
        batch_shape_tensor = array_ops.broadcast_dynamic_shape(
            batch_shape_tensor, aff.scale.batch_shape_tensor())
        event_shape, event_shape_tensor = _set_event_shape(
            tensor_shape.TensorShape([aff.scale.range_dimension]),
            aff.scale.range_dimension_tensor()[array_ops.newaxis])

    return batch_shape, batch_shape_tensor, event_shape, event_shape_tensor
开发者ID:bikong2,项目名称:tensorflow,代码行数:36,代码来源:vector_diffeomixture.py

示例7: _shape_tensor

  def _shape_tensor(self):
    domain_dimension = self.operators[0].domain_dimension_tensor()
    for operator in self.operators[1:]:
      domain_dimension *= operator.domain_dimension_tensor()

    range_dimension = self.operators[0].range_dimension_tensor()
    for operator in self.operators[1:]:
      range_dimension *= operator.range_dimension_tensor()

    matrix_shape = [range_dimension, domain_dimension]

    # Get broadcast batch shape.
    # broadcast_shape checks for compatibility.
    batch_shape = self.operators[0].batch_shape_tensor()
    for operator in self.operators[1:]:
      batch_shape = array_ops.broadcast_dynamic_shape(
          batch_shape, operator.batch_shape_tensor())

    return array_ops.concat((batch_shape, matrix_shape), 0)
开发者ID:aritratony,项目名称:tensorflow,代码行数:19,代码来源:linear_operator_kronecker.py

示例8: prefer_static_broadcast_shape

def prefer_static_broadcast_shape(
    shape1, shape2, name="prefer_static_broadcast_shape"):
  """Convenience function which statically broadcasts shape when possible.

  Args:
    shape1:  `1-D` integer `Tensor`.  Already converted to tensor!
    shape2:  `1-D` integer `Tensor`.  Already converted to tensor!
    name:  A string name to prepend to created ops.

  Returns:
    The broadcast shape, either as `TensorShape` (if broadcast can be done
      statically), or as a `Tensor`.
  """
  with ops.name_scope(name, values=[shape1, shape2]):
    if (tensor_util.constant_value(shape1) is not None and
        tensor_util.constant_value(shape2) is not None):
      return array_ops.broadcast_static_shape(
          tensor_shape.TensorShape(tensor_util.constant_value(shape1)),
          tensor_shape.TensorShape(tensor_util.constant_value(shape2)))
    return array_ops.broadcast_dynamic_shape(shape1, shape2)
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:20,代码来源:distribution_util.py

示例9: prefer_static_broadcast_shape

def prefer_static_broadcast_shape(
    shape1, shape2, name="prefer_static_broadcast_shape"):
  """Convenience function which statically broadcasts shape when possible.

  Args:
    shape1:  `1-D` integer `Tensor`.  Already converted to tensor!
    shape2:  `1-D` integer `Tensor`.  Already converted to tensor!
    name:  A string name to prepend to created ops.

  Returns:
    The broadcast shape, either as `TensorShape` (if broadcast can be done
      statically), or as a `Tensor`.
  """
  with ops.name_scope(name, values=[shape1, shape2]):
    def make_shape_tensor(x):
      return ops.convert_to_tensor(x, name="shape", dtype=dtypes.int32)

    def get_tensor_shape(s):
      if isinstance(s, tensor_shape.TensorShape):
        return s
      s_ = tensor_util.constant_value(make_shape_tensor(s))
      if s_ is not None:
        return tensor_shape.TensorShape(s_)
      return None

    def get_shape_tensor(s):
      if not isinstance(s, tensor_shape.TensorShape):
        return make_shape_tensor(s)
      if s.is_fully_defined():
        return make_shape_tensor(s.as_list())
      raise ValueError("Cannot broadcast from partially "
                       "defined `TensorShape`.")

    shape1_ = get_tensor_shape(shape1)
    shape2_ = get_tensor_shape(shape2)
    if shape1_ is not None and shape2_ is not None:
      return array_ops.broadcast_static_shape(shape1_, shape2_)

    shape1_ = get_shape_tensor(shape1)
    shape2_ = get_shape_tensor(shape2)
    return array_ops.broadcast_dynamic_shape(shape1_, shape2_)
开发者ID:Crazyonxh,项目名称:tensorflow,代码行数:41,代码来源:distribution_util.py

示例10: get_broadcast_shape

def get_broadcast_shape(*tensors):
  """Get broadcast shape as a Python list of integers (preferred) or `Tensor`.

  Args:
    *tensors:  One or more `Tensor` objects (already converted!).

  Returns:
    broadcast shape:  Python list (if shapes determined statically), otherwise
      an `int32` `Tensor`.
  """
  # Try static.
  s_shape = tensors[0].shape
  for t in tensors[1:]:
    s_shape = array_ops.broadcast_static_shape(s_shape, t.shape)
  if s_shape.is_fully_defined():
    return s_shape.as_list()

  # Fallback on dynamic.
  d_shape = array_ops.shape(tensors[0])
  for t in tensors[1:]:
    d_shape = array_ops.broadcast_dynamic_shape(d_shape, array_ops.shape(t))
  return d_shape
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:22,代码来源:distribution_util.py

示例11: _set_event_shape

 def _set_event_shape(shape, shape_tensor):
   if event_shape is None:
     return shape, shape_tensor
   return (array_ops.broadcast_static_shape(event_shape, shape),
           array_ops.broadcast_dynamic_shape(
               event_shape_tensor, shape_tensor))
开发者ID:bikong2,项目名称:tensorflow,代码行数:6,代码来源:vector_diffeomixture.py

示例12: _batch_shape_tensor

 def _batch_shape_tensor(self):
   return array_ops.broadcast_dynamic_shape(
       array_ops.shape(self.loc),
       array_ops.shape(self.scale))
开发者ID:ChengYuXiang,项目名称:tensorflow,代码行数:4,代码来源:normal.py

示例13: _batch_shape_tensor

 def _batch_shape_tensor(self):
   return array_ops.broadcast_dynamic_shape(
       array_ops.shape(self.total_count),
       array_ops.shape(self.probs))
开发者ID:arnonhongklay,项目名称:tensorflow,代码行数:4,代码来源:negative_binomial.py

示例14: _shape_tensor

 def _shape_tensor(self):
   batch_shape = array_ops.broadcast_dynamic_shape(
       self.base_operator.batch_shape_tensor(),
       array_ops.shape(self.u)[:-2])
   return array_ops.concat(
       [batch_shape, self.base_operator.shape_tensor()[-2:]], axis=0)
开发者ID:LugarkPirog,项目名称:tensorflow,代码行数:6,代码来源:linear_operator_udvh_update.py

示例15: _batch_shape

 def _batch_shape(self):
   return array_ops.broadcast_dynamic_shape(
       array_ops.shape(self.n), array_ops.shape(self.p))
开发者ID:ivankreso,项目名称:tensorflow,代码行数:3,代码来源:binomial.py


注:本文中的tensorflow.python.ops.array_ops.broadcast_dynamic_shape函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。