當前位置: 首頁>>代碼示例>>Python>>正文


Python v1.minimum方法代碼示例

本文整理匯總了Python中tensorflow.compat.v1.minimum方法的典型用法代碼示例。如果您正苦於以下問題:Python v1.minimum方法的具體用法?Python v1.minimum怎麽用?Python v1.minimum使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.compat.v1的用法示例。


在下文中一共展示了v1.minimum方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: _make_update

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import minimum [as 別名]
def _make_update(self):
    mss = []
    gsum = 0.0
    count = 0
    for sum_squared_grads in self._sum_squared_grads:
      ms = tf.sqrt(sum_squared_grads / self._num_squared_grads)
      gsum += tf.reduce_sum(ms)
      count += tf.reduce_sum(tf.ones_like(ms))
      mss.append(ms)
    gsum = gsum / count

    assignments = []
    for grad, var, save, sum_squared_grads, ms in zip(
        self._grads, self._vars, self._saves, self._sum_squared_grads, mss):
      decay_rate = tf.minimum(1.0, self._decay_rate*(ms/gsum))
      delta = (-self._learning_rate*grad / (ms + self._epsilon) +
               decay_rate*(save-var))
      assignments.append(var.assign_add(delta))
    return tf.group(assignments) 
開發者ID:deepmind,項目名稱:lamb,代碼行數:21,代碼來源:dyneval.py

示例2: padded_accuracy_topk

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import minimum [as 別名]
def padded_accuracy_topk(predictions,
                         labels,
                         k,
                         weights_fn=common_layers.weights_nonzero):
  """Percentage of times that top-k predictions matches labels on non-0s."""
  with tf.variable_scope("padded_accuracy_topk", values=[predictions, labels]):
    padded_predictions, padded_labels = common_layers.pad_with_zeros(
        predictions, labels)
    weights = weights_fn(padded_labels)
    effective_k = tf.minimum(k,
                             common_layers.shape_list(padded_predictions)[-1])
    _, outputs = tf.nn.top_k(padded_predictions, k=effective_k)
    outputs = tf.to_int32(outputs)
    padded_labels = tf.to_int32(padded_labels)
    padded_labels = tf.expand_dims(padded_labels, axis=-1)
    padded_labels += tf.zeros_like(outputs)  # Pad to same shape.
    same = tf.to_float(tf.equal(outputs, padded_labels))
    same_topk = tf.reduce_sum(same, axis=-1)
    return same_topk, weights 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:21,代碼來源:metrics.py

示例3: _quantize

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import minimum [as 別名]
def _quantize(x, params, randomize=True):
  """Quantize x according to params, optionally randomizing the rounding."""
  if not params.quantize:
    return x

  if not randomize:
    return tf.bitcast(
        tf.cast(x / params.quantization_scale, tf.int16), tf.float16)

  abs_x = tf.abs(x)
  sign_x = tf.sign(x)
  y = abs_x / params.quantization_scale
  y = tf.floor(y + tf.random_uniform(common_layers.shape_list(x)))
  y = tf.minimum(y, tf.int16.max) * sign_x
  q = tf.bitcast(tf.cast(y, tf.int16), tf.float16)
  return q 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:18,代碼來源:diet.py

示例4: min_total_num_images

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import minimum [as 別名]
def min_total_num_images(stable_stage_num_images, transition_stage_num_images,
                         num_blocks):
  """Returns the minimum total number of images.

  Computes the minimum total number of images required to reach the desired
  `resolution`.

  Args:
    stable_stage_num_images: Number of images in the stable stage.
    transition_stage_num_images: Number of images in the transition stage.
    num_blocks: Number of network blocks.

  Returns:
    An integer of the minimum total number of images.
  """
  return (num_blocks * stable_stage_num_images +
          (num_blocks - 1) * transition_stage_num_images) 
開發者ID:magenta,項目名稱:magenta,代碼行數:19,代碼來源:networks.py

示例5: _generator_alpha

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import minimum [as 別名]
def _generator_alpha(block_id, progress):
  """Returns the block output parameter for the generator network.

  The generator has N blocks with `block_id` = 1,2,...,N. Each block
  block_id outputs a fake data output(block_id). The generator output is a
  linear combination of all block outputs, i.e.
  SUM_block_id(output(block_id) * alpha(block_id, progress)) where
  alpha(block_id, progress) = _generator_alpha(block_id, progress). Note it
  garantees that SUM_block_id(alpha(block_id, progress)) = 1 for any progress.

  With a fixed block_id, the plot of alpha(block_id, progress) against progress
  is a 'triangle' with its peak at (block_id - 1, 1).

  Args:
    block_id: An integer of generator block id.
    progress: A scalar float `Tensor` of training progress.

  Returns:
    A scalar float `Tensor` of block output parameter.
  """
  return tf.maximum(0.0,
                    tf.minimum(progress - (block_id - 2), block_id - progress)) 
開發者ID:magenta,項目名稱:magenta,代碼行數:24,代碼來源:networks.py

示例6: center_crop_resize_image

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import minimum [as 別名]
def center_crop_resize_image(image, image_size):
  """Center-crop into a square and resize to image_size.

  Args:
    image: A 3-D image `Tensor`.
    image_size: int, Desired size. Crops the image to a square and resizes it
      to the requested size.

  Returns:
    A 4-D tensor of shape [1, image_size, image_size, 3] and dtype float32,
    with values in [0, 1].
  """
  shape = tf.shape(image)
  small_side = tf.minimum(shape[0], shape[1])
  image = tf.image.resize_image_with_crop_or_pad(image, small_side, small_side)
  image = tf.to_float(image) / 255.0

  image = tf.image.resize_images(image, tf.constant([image_size, image_size]))

  return tf.expand_dims(image, 0) 
開發者ID:magenta,項目名稱:magenta,代碼行數:22,代碼來源:image_utils.py

示例7: intersection

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import minimum [as 別名]
def intersection(boxlist1, boxlist2, scope=None):
  """Compute pairwise intersection areas between boxes.

  Args:
    boxlist1: BoxList holding N boxes
    boxlist2: BoxList holding M boxes
    scope: name scope.

  Returns:
    a tensor with shape [N, M] representing pairwise intersections
  """
  with tf.name_scope(scope, 'Intersection'):
    y_min1, x_min1, y_max1, x_max1 = tf.split(
        value=boxlist1.get(), num_or_size_splits=4, axis=1)
    y_min2, x_min2, y_max2, x_max2 = tf.split(
        value=boxlist2.get(), num_or_size_splits=4, axis=1)
    all_pairs_min_ymax = tf.minimum(y_max1, tf.transpose(y_max2))
    all_pairs_max_ymin = tf.maximum(y_min1, tf.transpose(y_min2))
    intersect_heights = tf.maximum(0.0, all_pairs_min_ymax - all_pairs_max_ymin)
    all_pairs_min_xmax = tf.minimum(x_max1, tf.transpose(x_max2))
    all_pairs_max_xmin = tf.maximum(x_min1, tf.transpose(x_min2))
    intersect_widths = tf.maximum(0.0, all_pairs_min_xmax - all_pairs_max_xmin)
    return intersect_heights * intersect_widths 
開發者ID:JunweiLiang,項目名稱:Object_Detection_Tracking,代碼行數:25,代碼來源:region_similarity_calculator.py

示例8: apply_linear

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import minimum [as 別名]
def apply_linear(self, wrapper, w, b):
    """Propagates the bounds through a linear layer.

    Args:
      wrapper: Contains prior bounds from a previous iteration.
      w: 2D tensor of shape (input_size, output_size) containing
        weights for the linear layer.
      b: 1D tensor of shape (output_size) containing biases for the linear
        layer, or `None` if no bias.

    Returns:
      Output bounds.
    """
    w_pos = tf.maximum(w, 0)
    w_neg = tf.minimum(w, 0)
    lb = (tf.matmul(self.lower_offset, w_pos) +
          tf.matmul(self.upper_offset, w_neg))
    ub = (tf.matmul(self.upper_offset, w_pos) +
          tf.matmul(self.lower_offset, w_neg))

    nominal_out = tf.matmul(self.nominal, w)
    if b is not None:
      nominal_out += b

    return RelativeIntervalBounds(lb, ub, nominal_out) 
開發者ID:deepmind,項目名稱:interval-bound-propagation,代碼行數:27,代碼來源:relative_bounds.py

示例9: concretize

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import minimum [as 別名]
def concretize(self):
    """Returns lower and upper interval bounds."""
    lb = ub = None
    if self.lower is not None:
      lb = (
          tf.einsum('nsi,ni->ns',
                    self._reshape_to_rank(tf.maximum(self.lower.w, 0), 3),
                    self._reshape_to_rank(self.lower.lower, 2)) +
          tf.einsum('nsi,ni->ns',
                    self._reshape_to_rank(tf.minimum(self.lower.w, 0), 3),
                    self._reshape_to_rank(self.lower.upper, 2)))
      lb += self.lower.b
    if self.upper is not None:
      ub = (
          tf.einsum('nsi,ni->ns',
                    self._reshape_to_rank(tf.maximum(self.upper.w, 0), 3),
                    self._reshape_to_rank(self.upper.upper, 2)) +
          tf.einsum('nsi,ni->ns',
                    self._reshape_to_rank(tf.minimum(self.upper.w, 0), 3),
                    self._reshape_to_rank(self.upper.lower, 2)))
      ub += self.upper.b
    return bounds.IntervalBounds(lb, ub) 
開發者ID:deepmind,項目名稱:interval-bound-propagation,代碼行數:24,代碼來源:crown.py

示例10: apply_increasing_monotonic_fn

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import minimum [as 別名]
def apply_increasing_monotonic_fn(self, wrapper, fn, *args):
    """Propagate CROWN bounds backward through a increasing monotonic fn."""
    # Function _get_monotonic_fn_bound returns matrix and bias term for linear
    # relaxation.
    (ub_scaling_matrix, lb_scaling_matrix,
     ub_bias, lb_bias) = self._get_monotonic_fn_bound(wrapper, fn)
    def _propagate_monotonic_fn(bound, ub_mult, lb_mult):
      # Matrix multiplication by a diagonal matrix.
      new_bound_w = ub_mult * ub_scaling_matrix + lb_mult * lb_scaling_matrix
      # Matrix vector product for the bias term. ub_bias or lb_bias might be 0
      # or a constant, or need broadcast. They will be handled optimally.
      b = self._matvec(ub_mult, ub_bias) + self._matvec(lb_mult, lb_bias)
      return fastlin.LinearExpression(w=new_bound_w, b=bound.b + b,
                                      lower=wrapper.input_bounds.lower,
                                      upper=wrapper.input_bounds.upper)
    # Multiplies w to upper or lower scaling terms according to its sign.
    ub_expr = _propagate_monotonic_fn(
        self.upper, tf.maximum(self.upper.w, 0),
        tf.minimum(self.upper.w, 0)) if self.upper else None
    lb_expr = _propagate_monotonic_fn(
        self.lower, tf.minimum(self.lower.w, 0),
        tf.maximum(self.lower.w, 0)) if self.lower else None
    return BackwardBounds(lb_expr, ub_expr) 
開發者ID:deepmind,項目名稱:interval-bound-propagation,代碼行數:25,代碼來源:crown.py

示例11: _concretize_bounds

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import minimum [as 別名]
def _concretize_bounds(lower, upper):
    """Returns lower and upper interval bounds."""
    if len(lower.b.shape) == 2:
      equation = 'ijk,ij->ik'
    elif len(lower.b.shape) == 3:
      equation = 'ijnc,ij->inc'
    elif len(lower.b.shape) == 4:
      equation = 'ijhwc,ij->ihwc'
    else:
      raise NotImplementedError('Shape unsupported: {}'.format(lower.b.shape))

    lb = (tf.einsum(equation, tf.maximum(lower.w, 0), lower.lower) +
          tf.einsum(equation, tf.minimum(lower.w, 0), lower.upper) +
          lower.b)
    ub = (tf.einsum(equation, tf.maximum(upper.w, 0), upper.upper) +
          tf.einsum(equation, tf.minimum(upper.w, 0), upper.lower) +
          upper.b)
    return lb, ub 
開發者ID:deepmind,項目名稱:interval-bound-propagation,代碼行數:20,代碼來源:fastlin.py

示例12: linear_decay

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import minimum [as 別名]
def linear_decay(step,
                 total_train_steps,
                 steps_or_fraction=0.1):
  """Linearly decay the learning rate to 0.

  If steps_or_fraction > 1 , it is the absolute number of final steps
  over which to decay.  If it is <=1, then it is a fraction of the total number
  of training steps.

  Args:
    step: a tf.scalar representing the step we want the learning rate for.
    total_train_steps: a number, the total number of training steps.
    steps_or_fraction: a number


  Returns:
    a tf.Scalar, the learning rate for the step.
  """
  decay_steps = steps_or_fraction
  if steps_or_fraction <= 1:
    decay_steps *= total_train_steps
  step = tf.cast(step, tf.float32)
  return tf.minimum(1.0, (total_train_steps - step) / decay_steps) 
開發者ID:tensorflow,項目名稱:mesh,代碼行數:25,代碼來源:learning_rate_schedules.py

示例13: linear_warmup

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import minimum [as 別名]
def linear_warmup(step,
                  total_train_steps,
                  steps_or_fraction=10000):
  """Linearly warm up the learning rate from 0.

  If steps_or_fraction > 1 , it is the absolute number of initial steps over
  which to warm up.  If it is <=1, then it is a fraction of the total number of
  training steps.

  Args:
    step: a tf.scalar representing the step we want the learning rate for.
    total_train_steps: a number, the total number of training steps.
    steps_or_fraction: a number


  Returns:
    a tf.Scalar, the learning rate for the step.
  """
  warmup_steps = steps_or_fraction
  if steps_or_fraction <= 1:
    warmup_steps *= total_train_steps
  step = tf.cast(step, tf.float32)
  return tf.minimum(1.0, step / warmup_steps) 
開發者ID:tensorflow,項目名稱:mesh,代碼行數:25,代碼來源:learning_rate_schedules.py

示例14: random_prefix_noise_mask

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import minimum [as 別名]
def random_prefix_noise_mask(length, noise_density):
  """First part of the sequence is noise (for prefix_lm).

  The length of the prefix is chosen uniformly between [1, length)
  noise_density must be 0.5
  TODO(noam): figure out some distribution to use if noise_density != 0.5

  Args:
    length: an int32 scalar
    noise_density: a float - must equal 0.5

  Returns:
    a boolean tensor with shape [length]
  """
  if noise_density != 0.5:
    raise NotImplementedError(
        'noise density must equal 0.5 for random_prefix_noise_mask')
  max_input_tokens = length - 1
  min_input_tokens = tf.minimum(max_input_tokens, 1)
  num_input_tokens = tf.random.uniform(
      [], minval=min_input_tokens, maxval=max_input_tokens + 1, dtype=tf.int32)
  return tf.range(length, dtype=tf.int32) < num_input_tokens 
開發者ID:google-research,項目名稱:text-to-text-transfer-transformer,代碼行數:24,代碼來源:preprocessors.py

示例15: clip_boxes_graph

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import minimum [as 別名]
def clip_boxes_graph(boxes, window):
    """
    boxes: [N, (y1, x1, y2, x2)]
    window: [4] in the form y1, x1, y2, x2
    """
    # Split
    wy1, wx1, wy2, wx2 = tf.split(window, 4)
    y1, x1, y2, x2 = tf.split(boxes, 4, axis=1)
    # Clip
    y1 = tf.maximum(tf.minimum(y1, wy2), wy1)
    x1 = tf.maximum(tf.minimum(x1, wx2), wx1)
    y2 = tf.maximum(tf.minimum(y2, wy2), wy1)
    x2 = tf.maximum(tf.minimum(x2, wx2), wx1)
    clipped = tf.concat([y1, x1, y2, x2], axis=1, name="clipped_boxes")
    clipped.set_shape((clipped.shape[0], 4))
    return clipped 
開發者ID:OCR-D,項目名稱:ocrd_anybaseocr,代碼行數:18,代碼來源:model.py


注:本文中的tensorflow.compat.v1.minimum方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。