当前位置: 首页>>代码示例>>Python>>正文


Python v1.reduce_min方法代码示例

本文整理汇总了Python中tensorflow.compat.v1.reduce_min方法的典型用法代码示例。如果您正苦于以下问题:Python v1.reduce_min方法的具体用法?Python v1.reduce_min怎么用?Python v1.reduce_min使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.compat.v1的用法示例。


在下文中一共展示了v1.reduce_min方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: top_k_softmax

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import reduce_min [as 别名]
def top_k_softmax(x, k):
  """Calculate softmax(x), select top-k and rescale to sum to 1.

  Args:
    x: Input to softmax over.
    k: Number of top-k to select.

  Returns:
    softmax(x) and maximum item.
  """
  x = tf.nn.softmax(x)
  top_x, _ = tf.nn.top_k(x, k=k + 1)
  min_top = tf.reduce_min(top_x, axis=-1, keep_dims=True)
  x = tf.nn.relu((x - min_top) + 1e-12)
  x /= tf.reduce_sum(x, axis=-1, keep_dims=True)
  return x, tf.reduce_max(top_x, axis=-1) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:18,代码来源:discretization.py

示例2: apply_piecewise_monotonic_fn

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import reduce_min [as 别名]
def apply_piecewise_monotonic_fn(self, wrapper, fn, boundaries, *args):
    valid_values = []
    for a in [self] + list(args):
      vs = []
      vs.append(a.lower)
      vs.append(a.upper)
      for b in boundaries:
        vs.append(
            tf.maximum(a.lower, tf.minimum(a.upper, b * tf.ones_like(a.lower))))
      valid_values.append(vs)
    outputs = []
    for inputs in itertools.product(*valid_values):
      outputs.append(fn(*inputs))
    outputs = tf.stack(outputs, axis=-1)
    return IntervalBounds(tf.reduce_min(outputs, axis=-1),
                          tf.reduce_max(outputs, axis=-1)) 
开发者ID:deepmind,项目名称:interval-bound-propagation,代码行数:18,代码来源:bounds.py

示例3: _simplex_bounds

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import reduce_min [as 别名]
def _simplex_bounds(mapped_vertices, mapped_centres, r, axis):
  """Calculates naive bounds on the given layer-mapped vertices.

  Args:
    mapped_vertices: Tensor of shape (num_vertices, *output_shape)
      or of shape (batch_size, num_vertices, *output_shape)
      containing the vertices in the layer's output space.
    mapped_centres: Tensor of shape (batch_size, *output_shape)
      containing the layer's nominal outputs.
    r: Scalar in [0, 1) specifying the radius (in vocab space) of the simplex.
    axis: Index of the `num_vertices` dimension of `mapped_vertices`.

  Returns:
    lb_out: Tensor of shape (batch_size, *output_shape) with lower bounds
      on the outputs of the affine layer.
    ub_out: Tensor of shape (batch_size, *output_shape) with upper bounds
      on the outputs of the affine layer.
  """
  # Use the negative of r, instead of the complement of r, as
  # we're shifting the input domain to be centred at the origin.
  lb_out = -r * mapped_centres + r * tf.reduce_min(mapped_vertices, axis=axis)
  ub_out = -r * mapped_centres + r * tf.reduce_max(mapped_vertices, axis=axis)
  return lb_out, ub_out 
开发者ID:deepmind,项目名称:interval-bound-propagation,代码行数:25,代码来源:simplex_bounds.py

示例4: assert_box_normalized

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import reduce_min [as 别名]
def assert_box_normalized(boxes, maximum_normalized_coordinate=1.1):
  """Asserts the input box tensor is normalized.

  Args:
    boxes: a tensor of shape [N, 4] where N is the number of boxes.
    maximum_normalized_coordinate: Maximum coordinate value to be considered
      as normalized, default to 1.1.

  Returns:
    a tf.Assert op which fails when the input box tensor is not normalized.

  Raises:
    ValueError: When the input box tensor is not normalized.
  """
  box_minimum = tf.reduce_min(boxes)
  box_maximum = tf.reduce_max(boxes)
  return tf.Assert(
      tf.logical_and(
          tf.less_equal(box_maximum, maximum_normalized_coordinate),
          tf.greater_equal(box_minimum, 0)),
      [boxes]) 
开发者ID:tensorflow,项目名称:models,代码行数:23,代码来源:shape_utils.py

示例5: _curvature_range

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import reduce_min [as 别名]
def _curvature_range(self):
    """Curvature range.

    Returns:
      h_max_t, h_min_t ops
    """
    self._curv_win = tf.get_variable("curv_win",
                                     dtype=tf.float32,
                                     trainable=False,
                                     shape=[self.curvature_window_width,],
                                     initializer=tf.zeros_initializer)
    # We use log smoothing for curvature range
    self._curv_win = tf.scatter_update(self._curv_win,
                                       self._step % self.curvature_window_width,
                                       tf.log(self._grad_norm_squared))
    # Note here the iterations start from iteration 0
    valid_window = tf.slice(self._curv_win,
                            tf.constant([0,]),
                            tf.expand_dims(
                                tf.minimum(
                                    tf.constant(self.curvature_window_width),
                                    self._step + 1), dim=0))
    self._h_min_t = tf.reduce_min(valid_window)
    self._h_max_t = tf.reduce_max(valid_window)

    curv_range_ops = []
    with tf.control_dependencies([self._h_min_t, self._h_max_t]):
      avg_op = self._moving_averager.apply([self._h_min_t, self._h_max_t])
      with tf.control_dependencies([avg_op]):
        self._h_min = tf.exp(
            tf.identity(self._moving_averager.average(self._h_min_t)))
        self._h_max = tf.exp(
            tf.identity(self._moving_averager.average(self._h_max_t)))
        if self._sparsity_debias:
          self._h_min *= self._sparsity_avg
          self._h_max *= self._sparsity_avg
    curv_range_ops.append(avg_op)
    return curv_range_ops  # h_max_t, h_min_t 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:40,代码来源:yellowfin.py

示例6: top_k_softmax

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import reduce_min [as 别名]
def top_k_softmax(x, k):
  """Calculate softmax(x), select top-k and rescale to sum to 1."""
  x = tf.nn.softmax(x)
  top_x, _ = tf.nn.top_k(x, k=k+1)
  min_top = tf.reduce_min(top_x, axis=-1, keepdims=True)
  x = tf.nn.relu((x - min_top) + 1e-12)
  x /= tf.reduce_sum(x, axis=-1, keepdims=True)
  return x, tf.reduce_max(top_x, axis=-1) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:10,代码来源:transformer_vae.py

示例7: clip_logits

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import reduce_min [as 别名]
def clip_logits(logits, config):
  logits_clip = getattr(config, "logits_clip", 0.)
  if logits_clip > 0:
    min_logit = tf.reduce_min(logits)
    return tf.minimum(logits - min_logit, logits_clip)
  else:
    return logits 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:9,代码来源:rl.py

示例8: _compute_bbx_loss

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import reduce_min [as 别名]
def _compute_bbx_loss(self, trans, pts, gt):
    """Compute bounding box loss."""
    oo = 1e5
    inside = tf.expand_dims(tf.cast(gt > 0.5, tf.float32), axis=1)
    trans = tf.expand_dims(trans, axis=2)
    pts = tf.expand_dims(pts, axis=1)
    distances = tf.reduce_sum(tf.square(trans - pts), axis=-1, keepdims=True)
    distances = inside * distances + (1 - inside) * oo
    min_dis = tf.reduce_min(distances, axis=2)
    return tf.reduce_mean(min_dis) 
开发者ID:tensorflow,项目名称:graphics,代码行数:12,代码来源:models.py

示例9: debugprint

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import reduce_min [as 别名]
def debugprint(x, name=''):
  """Small wrapper for tf.Print which prints summary statistics."""
  name += '\t' + x.name
  return tf.Print(x,
                  [tf.reduce_min(x), tf.reduce_mean(x), tf.reduce_max(x)],
                  name) 
开发者ID:magenta,项目名称:magenta,代码行数:8,代码来源:layers.py

示例10: _topk_greater

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import reduce_min [as 别名]
def _topk_greater(bounds, k=5):
  # Bounds has shape [batch_size, num_specs].
  b = tf.nn.top_k(bounds, k=k, sorted=False).values
  return tf.reduce_min(b, axis=-1) > 0. 
开发者ID:deepmind,项目名称:interval-bound-propagation,代码行数:6,代码来源:attacks.py

示例11: variable_summaries

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import reduce_min [as 别名]
def variable_summaries(var, scope=""):
  """Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
  with tf.name_scope(scope):
    with tf.name_scope("summaries"):
      mean = tf.reduce_mean(var)
      tf.summary.scalar("mean", mean)
      with tf.name_scope("stddev"):
        stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
      tf.summary.scalar("stddev", stddev)
      tf.summary.scalar("max", tf.reduce_max(var))
      tf.summary.scalar("min", tf.reduce_min(var))
      tf.summary.histogram("histogram", var) 
开发者ID:google-research,项目名称:language,代码行数:14,代码来源:model_utils.py

示例12: linear_interpolation

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import reduce_min [as 别名]
def linear_interpolation(t, minimum, maximum):
  t_min = tf.reduce_min(t)
  t_max = tf.reduce_max(t)
  return minimum + (t - t_min) * (maximum - minimum) / (t_max - t_min) 
开发者ID:google-research,项目名称:language,代码行数:6,代码来源:tensor_utils.py

示例13: select_slate_greedy

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import reduce_min [as 别名]
def select_slate_greedy(slate_size, s_no_click, s, q):
  """Selects the slate using the adaptive greedy algorithm.

  This algorithm corresponds to the method "GS" in
  Ie et al. https://arxiv.org/abs/1905.12767.

  Args:
    slate_size: int, the size of the recommendation slate.
    s_no_click: float tensor, the score for not clicking any document.
    s: [num_of_documents] tensor, the scores for clicking documents.
    q: [num_of_documents] tensor, the predicted q values for documents.

  Returns:
    [slate_size] tensor, the selected slate.
  """

  def argmax(v, mask):
    return tf.argmax(
        input=(v - tf.reduce_min(input_tensor=v) + 1) * mask, axis=0)

  numerator = tf.constant(0.)
  denominator = tf.constant(0.) + s_no_click
  mask = tf.ones(tf.shape(input=q)[0])

  def set_element(v, i, x):
    mask = tf.one_hot(i, tf.shape(input=v)[0])
    v_new = tf.ones_like(v) * x
    return tf.where(tf.equal(mask, 1), v_new, v)

  for _ in range(slate_size):
    k = argmax((numerator + s * q) / (denominator + s), mask)
    mask = set_element(mask, k, 0)
    numerator = numerator + tf.gather(s * q, k)
    denominator = denominator + tf.gather(s, k)

  output_slate = tf.where(tf.equal(mask, 0))
  return output_slate 
开发者ID:google-research,项目名称:recsim,代码行数:39,代码来源:slate_decomp_q_agent.py

示例14: variable_summaries

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import reduce_min [as 别名]
def variable_summaries(var):
  """Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
  with tf.name_scope('summaries'):
    mean = tf.reduce_mean(var)
    tf.summary.scalar('mean', mean)
    with tf.name_scope('stddev'):
      stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
    tf.summary.scalar('stddev', stddev)
    tf.summary.scalar('max', tf.reduce_max(var))
    tf.summary.scalar('min', tf.reduce_min(var))
    tf.summary.histogram('histogram', var) 
开发者ID:iamvishnuks,项目名称:AudioNet,代码行数:13,代码来源:retrain.py

示例15: get_minimal_coverage_box

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import reduce_min [as 别名]
def get_minimal_coverage_box(boxlist,
                             default_box=None,
                             scope=None):
  """Creates a single bounding box which covers all boxes in the boxlist.

  Args:
    boxlist: A Boxlist.
    default_box: A [1, 4] float32 tensor. If no boxes are present in `boxlist`,
      this default box will be returned. If None, will use a default box of
      [[0., 0., 1., 1.]].
    scope: Name scope.

  Returns:
    A [1, 4] float32 tensor with a bounding box that tightly covers all the
    boxes in the box list. If the boxlist does not contain any boxes, the
    default box is returned.
  """
  with tf.name_scope(scope, 'CreateCoverageBox'):
    num_boxes = boxlist.num_boxes()

    def coverage_box(bboxes):
      y_min, x_min, y_max, x_max = tf.split(
          value=bboxes, num_or_size_splits=4, axis=1)
      y_min_coverage = tf.reduce_min(y_min, axis=0)
      x_min_coverage = tf.reduce_min(x_min, axis=0)
      y_max_coverage = tf.reduce_max(y_max, axis=0)
      x_max_coverage = tf.reduce_max(x_max, axis=0)
      return tf.stack(
          [y_min_coverage, x_min_coverage, y_max_coverage, x_max_coverage],
          axis=1)

    default_box = default_box or tf.constant([[0., 0., 1., 1.]])
    return tf.cond(
        tf.greater_equal(num_boxes, 1),
        true_fn=lambda: coverage_box(boxlist.get()),
        false_fn=lambda: default_box) 
开发者ID:tensorflow,项目名称:models,代码行数:38,代码来源:box_list_ops.py


注:本文中的tensorflow.compat.v1.reduce_min方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。