当前位置: 首页>>代码示例>>Python>>正文


Python math_ops.reduce_min方法代码示例

本文整理汇总了Python中tensorflow.python.ops.math_ops.reduce_min方法的典型用法代码示例。如果您正苦于以下问题:Python math_ops.reduce_min方法的具体用法?Python math_ops.reduce_min怎么用?Python math_ops.reduce_min使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.python.ops.math_ops的用法示例。


在下文中一共展示了math_ops.reduce_min方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _MatrixSetDiagGrad

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import reduce_min [as 别名]
def _MatrixSetDiagGrad(op, grad):
  """Gradient for MatrixSetDiag."""
  input_shape = op.inputs[0].get_shape().merge_with(grad.get_shape())
  diag_shape = op.inputs[1].get_shape()
  batch_shape = input_shape[:-2].merge_with(diag_shape[:-1])
  matrix_shape = input_shape[-2:]
  if batch_shape.is_fully_defined() and matrix_shape.is_fully_defined():
    diag_shape = batch_shape.as_list() + [min(matrix_shape.as_list())]
  else:
    with ops.colocate_with(grad):
      grad_shape = array_ops.shape(grad)
      grad_rank = array_ops.rank(grad)
      batch_shape = array_ops.slice(grad_shape, [0], [grad_rank - 2])
      matrix_shape = array_ops.slice(grad_shape, [grad_rank - 2], [2])
      min_dim = math_ops.reduce_min(matrix_shape)
      diag_shape = array_ops.concat([batch_shape, [min_dim]], 0)
  grad_input = array_ops.matrix_set_diag(
      grad, array_ops.zeros(
          diag_shape, dtype=grad.dtype))
  grad_diag = array_ops.matrix_diag_part(grad)
  return (grad_input, grad_diag) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:23,代码来源:array_grad.py

示例2: min

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import reduce_min [as 别名]
def min(x, axis=None, keepdims=False):
  """Minimum value in a tensor.

  Arguments:
      x: A tensor or variable.
      axis: An integer, the axis to find minimum values.
      keepdims: A boolean, whether to keep the dimensions or not.
          If `keepdims` is `False`, the rank of the tensor is reduced
          by 1. If `keepdims` is `True`,
          the reduced dimension is retained with length 1.

  Returns:
      A tensor with miminum values of `x`.
  """
  axis = _normalize_axis(axis, ndim(x))
  return math_ops.reduce_min(x, reduction_indices=axis, keep_dims=keepdims) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:18,代码来源:backend.py

示例3: _assert_non_singular

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import reduce_min [as 别名]
def _assert_non_singular(self):
    """Private default implementation of _assert_non_singular."""
    logging.warn(
        "Using (possibly slow) default implementation of assert_non_singular."
        "  Requires conversion to a dense matrix and O(N^3) operations.")
    if self._can_use_cholesky():
      return self.assert_positive_definite()
    else:
      singular_values = linalg_ops.svd(
          self._get_cached_dense_matrix(), compute_uv=False)
      # TODO(langmore) Add .eig and .cond as methods.
      cond = (math_ops.reduce_max(singular_values, axis=-1) /
              math_ops.reduce_min(singular_values, axis=-1))
      return check_ops.assert_less(
          cond,
          self._max_condition_number_to_be_non_singular(),
          message="Singular matrix up to precision epsilon.")
    raise NotImplementedError("assert_non_singular is not implemented.") 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:20,代码来源:linear_operator.py

示例4: _MatrixSetDiagGrad

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import reduce_min [as 别名]
def _MatrixSetDiagGrad(op, grad):
  input_shape = op.inputs[0].get_shape().merge_with(grad.get_shape())
  diag_shape = op.inputs[1].get_shape()
  batch_shape = input_shape[:-2].merge_with(diag_shape[:-1])
  matrix_shape = input_shape[-2:]
  if batch_shape.is_fully_defined() and matrix_shape.is_fully_defined():
    diag_shape = batch_shape.as_list() + [min(matrix_shape.as_list())]
  else:
    with ops.colocate_with(grad):
      grad_shape = array_ops.shape(grad)
      grad_rank = array_ops.rank(grad)
      batch_shape = array_ops.slice(grad_shape, [0], [grad_rank - 2])
      matrix_shape = array_ops.slice(grad_shape, [grad_rank - 2], [2])
      min_dim = math_ops.reduce_min(matrix_shape)
      diag_shape = array_ops.concat([batch_shape, [min_dim]], 0)
  grad_input = array_ops.matrix_set_diag(
      grad, array_ops.zeros(
          diag_shape, dtype=grad.dtype))
  grad_diag = array_ops.matrix_diag_part(grad)
  return (grad_input, grad_diag) 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:22,代码来源:array_grad.py

示例5: masked_maximum

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import reduce_min [as 别名]
def masked_maximum(data, mask, dim=1):
  """Computes the axis wise maximum over chosen elements.

  Args:
    data: 2-D float `Tensor` of size [n, m].
    mask: 2-D Boolean `Tensor` of size [n, m].
    dim: The dimension over which to compute the maximum.

  Returns:
    masked_maximums: N-D `Tensor`.
      The maximized dimension is of size 1 after the operation.
  """
  axis_minimums = math_ops.reduce_min(data, dim, keepdims=True)
  masked_maximums = math_ops.reduce_max(
      math_ops.multiply(data - axis_minimums, mask), dim,
      keepdims=True) + axis_minimums
  return masked_maximums 
开发者ID:google-research,项目名称:tf-slim,代码行数:19,代码来源:metric_learning.py

示例6: masked_minimum

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import reduce_min [as 别名]
def masked_minimum(data, mask, dim=1):
  """Computes the axis wise minimum over chosen elements.

  Args:
    data: 2-D float `Tensor` of size [n, m].
    mask: 2-D Boolean `Tensor` of size [n, m].
    dim: The dimension over which to compute the minimum.

  Returns:
    masked_minimums: N-D `Tensor`.
      The minimized dimension is of size 1 after the operation.
  """
  axis_maximums = math_ops.reduce_max(data, dim, keepdims=True)
  masked_minimums = math_ops.reduce_min(
      math_ops.multiply(data - axis_maximums, mask), dim,
      keepdims=True) + axis_maximums
  return masked_minimums 
开发者ID:google-research,项目名称:tf-slim,代码行数:19,代码来源:metric_learning.py

示例7: masked_maximum

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import reduce_min [as 别名]
def masked_maximum(data, mask, dim=1):
  """Computes the axis wise maximum over chosen elements.

  Args:
    data: 2-D float `Tensor` of size [n, m].
    mask: 2-D Boolean `Tensor` of size [n, m].
    dim: The dimension over which to compute the maximum.

  Returns:
    masked_maximums: N-D `Tensor`.
      The maximized dimension is of size 1 after the operation.
  """
  axis_minimums = math_ops.reduce_min(data, dim, keep_dims=True)
  masked_maximums = math_ops.reduce_max(
      math_ops.multiply(
          data - axis_minimums, mask), dim, keep_dims=True) + axis_minimums
  return masked_maximums 
开发者ID:CongWeilin,项目名称:cluster-loss-tensorflow,代码行数:19,代码来源:metric_loss_ops.py

示例8: masked_minimum

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import reduce_min [as 别名]
def masked_minimum(data, mask, dim=1):
  """Computes the axis wise minimum over chosen elements.

  Args:
    data: 2-D float `Tensor` of size [n, m].
    mask: 2-D Boolean `Tensor` of size [n, m].
    dim: The dimension over which to compute the minimum.

  Returns:
    masked_minimums: N-D `Tensor`.
      The minimized dimension is of size 1 after the operation.
  """
  axis_maximums = math_ops.reduce_max(data, dim, keep_dims=True)
  masked_minimums = math_ops.reduce_min(
      math_ops.multiply(
          data - axis_maximums, mask), dim, keep_dims=True) + axis_maximums
  return masked_minimums 
开发者ID:CongWeilin,项目名称:cluster-loss-tensorflow,代码行数:19,代码来源:metric_loss_ops.py

示例9: _MatrixSetDiagGrad

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import reduce_min [as 别名]
def _MatrixSetDiagGrad(op, grad):
  input_shape = op.inputs[0].get_shape().merge_with(grad.get_shape())
  diag_shape = op.inputs[1].get_shape()
  batch_shape = input_shape[:-2].merge_with(diag_shape[:-1])
  matrix_shape = input_shape[-2:]
  if batch_shape.is_fully_defined() and matrix_shape.is_fully_defined():
    diag_shape = batch_shape.as_list() + [min(matrix_shape.as_list())]
  else:
    with ops.colocate_with(grad):
      grad_shape = array_ops.shape(grad)
      grad_rank = array_ops.rank(grad)
      batch_shape = array_ops.slice(grad_shape, [0], [grad_rank - 2])
      matrix_shape = array_ops.slice(grad_shape, [grad_rank - 2], [2])
      min_dim = math_ops.reduce_min(matrix_shape)
      diag_shape = array_ops.concat(0, [batch_shape, [min_dim]])
  grad_input = array_ops.matrix_set_diag(
      grad, array_ops.zeros(
          diag_shape, dtype=grad.dtype))
  grad_diag = array_ops.matrix_diag_part(grad)
  return (grad_input, grad_diag) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:22,代码来源:array_grad.py

示例10: _ones_diag

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import reduce_min [as 别名]
def _ones_diag(self):
    """Returns the diagonal of this operator as all ones."""
    if self.shape.is_fully_defined():
      d_shape = self.batch_shape.concatenate(
          [min(self.domain_dimension.value, self.range_dimension.value)])
    else:
      d_shape = array_ops.concat(
          [self.batch_shape_tensor(),
           [math_ops.reduce_min(self.shape_tensor()[-2:])]], axis=0)

    return array_ops.ones(shape=d_shape, dtype=self.dtype) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:13,代码来源:linear_operator_identity.py

示例11: get_best

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import reduce_min [as 别名]
def get_best(self, n):
    """Return the indices and values of the n highest scores in the TopN."""

    def refresh_shortlist():
      """Update the shortlist with the highest scores in id_to_score."""
      new_scores, new_ids = nn_ops.top_k(self.id_to_score, self.shortlist_size)
      smallest_new_score = math_ops.reduce_min(new_scores)
      new_length = math_ops.reduce_sum(
          math_ops.to_int32(math_ops.greater(new_scores, dtypes.float32.min)))
      u1 = self.sl_ids.assign(
          math_ops.to_int64(array_ops.concat([[new_length], new_ids], 0)))
      u2 = self.sl_scores.assign(
          array_ops.concat([[smallest_new_score], new_scores], 0))
      self.last_ops = [u1, u2]
      return control_flow_ops.group(u1, u2)

    # We only need to refresh the shortlist if n is greater than the
    # current shortlist size (which is stored in sl_ids[0]).
    with ops.control_dependencies(self.last_ops):
      cond_op = control_flow_ops.cond(n > self.sl_ids[0], refresh_shortlist,
                                      control_flow_ops.no_op)
      with ops.control_dependencies([cond_op]):
        topk_values, topk_indices = nn_ops.top_k(
            self.sl_scores,
            math_ops.minimum(n, math_ops.to_int32(self.sl_ids[0])))
        # topk_indices are the indices into the shortlist, we want to return
        # the indices into id_to_score
        gathered_indices = array_ops.gather(self.sl_ids, topk_indices)
        return gathered_indices, topk_values 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:31,代码来源:topn.py

示例12: test_name

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import reduce_min [as 别名]
def test_name(self):
    result_lt = ops.reduce_min(self.original_lt, {'channel'})
    self.assertIn('lt_reduce_min', result_lt.name) 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:5,代码来源:ops_test.py

示例13: test

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import reduce_min [as 别名]
def test(self):
    result_lt = ops.reduce_min(self.original_lt, {'channel'})
    golden_lt = core.LabeledTensor(
        math_ops.reduce_min(self.original_lt.tensor, 1),
        [self.a0, self.a2, self.a3])
    self.assertLabeledTensorsEqual(result_lt, golden_lt) 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:8,代码来源:ops_test.py

示例14: compute_facility_energy

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import reduce_min [as 别名]
def compute_facility_energy(pairwise_distances, centroid_ids):
  """Compute the average travel distance to the assigned centroid.

  Args:
    pairwise_distances: 2-D Tensor of pairwise distances.
    centroid_ids: 1-D Tensor of indices.

  Returns:
    facility_energy: dtypes.float32 scalar.
  """
  return -1.0 * math_ops.reduce_sum(
      math_ops.reduce_min(
          array_ops.gather(pairwise_distances, centroid_ids), axis=0)) 
开发者ID:google-research,项目名称:tf-slim,代码行数:15,代码来源:metric_learning.py

示例15: compute_gt_cluster_score

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import reduce_min [as 别名]
def compute_gt_cluster_score(pairwise_distances, labels):
  """Compute ground truth facility location score.

  Loop over each unique classes and compute average travel distances.

  Args:
    pairwise_distances: 2-D Tensor of pairwise distances.
    labels: 1-D Tensor of ground truth cluster assignment.

  Returns:
    gt_cluster_score: dtypes.float32 score.
  """
  unique_class_ids = array_ops.unique(labels)[0]
  num_classes = array_ops.size(unique_class_ids)
  iteration = array_ops.constant(0)
  gt_cluster_score = array_ops.constant(0.0, dtype=dtypes.float32)

  def func_cond(iteration, gt_cluster_score):
    del gt_cluster_score  # Unused argument.
    return iteration < num_classes

  def func_body(iteration, gt_cluster_score):
    """Per each cluster, compute the average travel distance."""
    mask = math_ops.equal(labels, unique_class_ids[iteration])
    this_cluster_ids = array_ops.where(mask)
    pairwise_distances_subset = array_ops.transpose(
        array_ops.gather(
            array_ops.transpose(
                array_ops.gather(pairwise_distances, this_cluster_ids)),
            this_cluster_ids))
    this_cluster_score = -1.0 * math_ops.reduce_min(
        math_ops.reduce_sum(
            pairwise_distances_subset, axis=0))
    return iteration + 1, gt_cluster_score + this_cluster_score

  _, gt_cluster_score = control_flow_ops.while_loop(
      func_cond, func_body, [iteration, gt_cluster_score])
  return gt_cluster_score 
开发者ID:google-research,项目名称:tf-slim,代码行数:40,代码来源:metric_learning.py


注:本文中的tensorflow.python.ops.math_ops.reduce_min方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。