當前位置: 首頁>>代碼示例>>Python>>正文


Python v1.maximum方法代碼示例

本文整理匯總了Python中tensorflow.compat.v1.maximum方法的典型用法代碼示例。如果您正苦於以下問題:Python v1.maximum方法的具體用法?Python v1.maximum怎麽用?Python v1.maximum使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.compat.v1的用法示例。


在下文中一共展示了v1.maximum方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: average_sharded_losses

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import maximum [as 別名]
def average_sharded_losses(sharded_losses):
  """Average losses across datashards.

  Args:
    sharded_losses: list<dict<str loss_name, Tensor loss>>. The loss
      can be a single Tensor or a 2-tuple (numerator and denominator).

  Returns:
    losses: dict<str loss_name, Tensor avg_loss>
  """
  losses = {}
  for loss_name in sorted(sharded_losses[0]):
    all_shards = [shard_losses[loss_name] for shard_losses in sharded_losses]
    if isinstance(all_shards[0], tuple):
      sharded_num, sharded_den = zip(*all_shards)
      mean_loss = (
          tf.add_n(sharded_num) / tf.maximum(
              tf.cast(1.0, sharded_den[0].dtype), tf.add_n(sharded_den)))
    else:
      mean_loss = tf.reduce_mean(all_shards)

    losses[loss_name] = mean_loss
  return losses 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:25,代碼來源:t2t_model.py

示例2: bottleneck

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import maximum [as 別名]
def bottleneck(self, x):
    hparams = self.hparams
    z_size = hparams.bottleneck_bits
    x_shape = common_layers.shape_list(x)
    with tf.variable_scope("vae"):
      mu = tf.layers.dense(x, z_size, name="mu")
      if hparams.mode != tf.estimator.ModeKeys.TRAIN:
        return mu, 0.0  # No sampling or kl loss on eval.
      log_sigma = tf.layers.dense(x, z_size, name="log_sigma")
      epsilon = tf.random_normal(x_shape[:-1] + [z_size])
      z = mu + tf.exp(log_sigma / 2) * epsilon
      kl = 0.5 * tf.reduce_mean(
          tf.expm1(log_sigma) + tf.square(mu) - log_sigma, axis=-1)
      free_bits = z_size // 4
      kl_loss = tf.reduce_mean(tf.maximum(kl - free_bits, 0.0))
    return z, kl_loss * hparams.kl_beta 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:18,代碼來源:autoencoders.py

示例3: rank_loss

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import maximum [as 別名]
def rank_loss(sentence_emb, image_emb, margin=0.2):
  """Experimental rank loss, thanks to kkurach@ for the code."""
  with tf.name_scope("rank_loss"):
    # Normalize first as this is assumed in cosine similarity later.
    sentence_emb = tf.nn.l2_normalize(sentence_emb, 1)
    image_emb = tf.nn.l2_normalize(image_emb, 1)
    # Both sentence_emb and image_emb have size [batch, depth].
    scores = tf.matmul(image_emb, tf.transpose(sentence_emb))  # [batch, batch]
    diagonal = tf.diag_part(scores)  # [batch]
    cost_s = tf.maximum(0.0, margin - diagonal + scores)  # [batch, batch]
    cost_im = tf.maximum(
        0.0, margin - tf.reshape(diagonal, [-1, 1]) + scores)  # [batch, batch]
    # Clear diagonals.
    batch_size = tf.shape(sentence_emb)[0]
    empty_diagonal_mat = tf.ones_like(cost_s) - tf.eye(batch_size)
    cost_s *= empty_diagonal_mat
    cost_im *= empty_diagonal_mat
    return tf.reduce_mean(cost_s) + tf.reduce_mean(cost_im) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:20,代碼來源:slicenet.py

示例4: lengths_to_area_mask

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import maximum [as 別名]
def lengths_to_area_mask(feature_length, length, max_area_size):
  """Generates a non-padding mask for areas based on lengths.

  Args:
    feature_length: a tensor of [batch_size]
    length: the length of the batch
    max_area_size: the maximum area size considered
  Returns:
    mask: a tensor in shape of [batch_size, num_areas]
  """

  paddings = tf.cast(tf.expand_dims(
      tf.logical_not(
          tf.sequence_mask(feature_length, maxlen=length)), 2), tf.float32)
  _, _, area_sum, _, _ = compute_area_features(paddings,
                                               max_area_width=max_area_size)
  mask = tf.squeeze(tf.logical_not(tf.cast(area_sum, tf.bool)), [2])
  return mask 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:20,代碼來源:area_attention.py

示例5: get_max_num_classes

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import maximum [as 別名]
def get_max_num_classes(self):
    """Compute the maximum number of classes any subtask has.

    This is useful for modifying the size of the softmax to include the output
    labels for the classification tasks. Currently, labels from different tasks
    are overloaded.

    Returns:
      num: Highest number of output classes in any text classification sub-task
        within this MultiProblem.
    """
    num = 0
    for task in self.task_list:
      if hasattr(task, "num_classes"):
        if num < task.num_classes:
          num = task.num_classes

    return num 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:20,代碼來源:multi_problem.py

示例6: __init__

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import maximum [as 別名]
def __init__(self, regularizers_to_group):
    """Creates an instance.

    Args:
      regularizers_to_group: A list of generic_regularizers.OpRegularizer
        objects.Their regularization_vector (alive_vector) are expected to be of
        the same length.

    Raises:
      ValueError: regularizers_to_group is not of length at least 2.
    """
    if len(regularizers_to_group) < 2:
      raise ValueError('Groups must be of at least size 2.')

    first = regularizers_to_group[0]
    regularization_vector = first.regularization_vector
    alive_vector = first.alive_vector
    for index in range(1, len(regularizers_to_group)):
      regularizer = regularizers_to_group[index]
      regularization_vector = tf.maximum(regularization_vector,
                                         regularizer.regularization_vector)
      alive_vector = tf.logical_or(alive_vector, regularizer.alive_vector)
    self._regularization_vector = regularization_vector
    self._alive_vector = alive_vector 
開發者ID:google-research,項目名稱:morph-net,代碼行數:26,代碼來源:grouping_regularizers.py

示例7: _lower_bound

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import maximum [as 別名]
def _lower_bound(inputs, bound, name=None):
    """Same as tf.maximum, but with helpful gradient for inputs < bound.

    The gradient is overwritten so that it is passed through if the input is not
    hitting the bound. If it is, only gradients that push `inputs` higher than
    the bound are passed through. No gradients are passed through to the bound.

    Args:
      inputs: input tensor
      bound: lower bound for the input tensor
      name: name for this op

    Returns:
      tf.maximum(inputs, bound)
    """
    with ops.name_scope(name, 'GDNLowerBound', [inputs, bound]) as scope:
      inputs = ops.convert_to_tensor(inputs, name='inputs')
      bound = ops.convert_to_tensor(bound, name='bound')
      with ops.get_default_graph().gradient_override_map(
          {'Maximum': 'GDNLowerBound'}):
        return math_ops.maximum(inputs, bound, name=scope) 
開發者ID:google-research,項目名稱:tf-slim,代碼行數:23,代碼來源:layers.py

示例8: intersection

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import maximum [as 別名]
def intersection(boxlist1, boxlist2, scope=None):
  """Compute pairwise intersection areas between boxes.

  Args:
    boxlist1: BoxList holding N boxes
    boxlist2: BoxList holding M boxes
    scope: name scope.

  Returns:
    a tensor with shape [N, M] representing pairwise intersections
  """
  with tf.name_scope(scope, 'Intersection'):
    y_min1, x_min1, y_max1, x_max1 = tf.split(
        value=boxlist1.get(), num_or_size_splits=4, axis=1)
    y_min2, x_min2, y_max2, x_max2 = tf.split(
        value=boxlist2.get(), num_or_size_splits=4, axis=1)
    all_pairs_min_ymax = tf.minimum(y_max1, tf.transpose(y_max2))
    all_pairs_max_ymin = tf.maximum(y_min1, tf.transpose(y_min2))
    intersect_heights = tf.maximum(0.0, all_pairs_min_ymax - all_pairs_max_ymin)
    all_pairs_min_xmax = tf.minimum(x_max1, tf.transpose(x_max2))
    all_pairs_max_xmin = tf.maximum(x_min1, tf.transpose(x_min2))
    intersect_widths = tf.maximum(0.0, all_pairs_min_xmax - all_pairs_max_xmin)
    return intersect_heights * intersect_widths 
開發者ID:JunweiLiang,項目名稱:Object_Detection_Tracking,代碼行數:25,代碼來源:region_similarity_calculator.py

示例9: apply_linear

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import maximum [as 別名]
def apply_linear(self, wrapper, w, b):
    """Propagates the bounds through a linear layer.

    Args:
      wrapper: Contains prior bounds from a previous iteration.
      w: 2D tensor of shape (input_size, output_size) containing
        weights for the linear layer.
      b: 1D tensor of shape (output_size) containing biases for the linear
        layer, or `None` if no bias.

    Returns:
      Output bounds.
    """
    w_pos = tf.maximum(w, 0)
    w_neg = tf.minimum(w, 0)
    lb = (tf.matmul(self.lower_offset, w_pos) +
          tf.matmul(self.upper_offset, w_neg))
    ub = (tf.matmul(self.upper_offset, w_pos) +
          tf.matmul(self.lower_offset, w_neg))

    nominal_out = tf.matmul(self.nominal, w)
    if b is not None:
      nominal_out += b

    return RelativeIntervalBounds(lb, ub, nominal_out) 
開發者ID:deepmind,項目名稱:interval-bound-propagation,代碼行數:27,代碼來源:relative_bounds.py

示例10: concretize

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import maximum [as 別名]
def concretize(self):
    """Returns lower and upper interval bounds."""
    lb = ub = None
    if self.lower is not None:
      lb = (
          tf.einsum('nsi,ni->ns',
                    self._reshape_to_rank(tf.maximum(self.lower.w, 0), 3),
                    self._reshape_to_rank(self.lower.lower, 2)) +
          tf.einsum('nsi,ni->ns',
                    self._reshape_to_rank(tf.minimum(self.lower.w, 0), 3),
                    self._reshape_to_rank(self.lower.upper, 2)))
      lb += self.lower.b
    if self.upper is not None:
      ub = (
          tf.einsum('nsi,ni->ns',
                    self._reshape_to_rank(tf.maximum(self.upper.w, 0), 3),
                    self._reshape_to_rank(self.upper.upper, 2)) +
          tf.einsum('nsi,ni->ns',
                    self._reshape_to_rank(tf.minimum(self.upper.w, 0), 3),
                    self._reshape_to_rank(self.upper.lower, 2)))
      ub += self.upper.b
    return bounds.IntervalBounds(lb, ub) 
開發者ID:deepmind,項目名稱:interval-bound-propagation,代碼行數:24,代碼來源:crown.py

示例11: apply_increasing_monotonic_fn

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import maximum [as 別名]
def apply_increasing_monotonic_fn(self, wrapper, fn, *args):
    """Propagate CROWN bounds backward through a increasing monotonic fn."""
    # Function _get_monotonic_fn_bound returns matrix and bias term for linear
    # relaxation.
    (ub_scaling_matrix, lb_scaling_matrix,
     ub_bias, lb_bias) = self._get_monotonic_fn_bound(wrapper, fn)
    def _propagate_monotonic_fn(bound, ub_mult, lb_mult):
      # Matrix multiplication by a diagonal matrix.
      new_bound_w = ub_mult * ub_scaling_matrix + lb_mult * lb_scaling_matrix
      # Matrix vector product for the bias term. ub_bias or lb_bias might be 0
      # or a constant, or need broadcast. They will be handled optimally.
      b = self._matvec(ub_mult, ub_bias) + self._matvec(lb_mult, lb_bias)
      return fastlin.LinearExpression(w=new_bound_w, b=bound.b + b,
                                      lower=wrapper.input_bounds.lower,
                                      upper=wrapper.input_bounds.upper)
    # Multiplies w to upper or lower scaling terms according to its sign.
    ub_expr = _propagate_monotonic_fn(
        self.upper, tf.maximum(self.upper.w, 0),
        tf.minimum(self.upper.w, 0)) if self.upper else None
    lb_expr = _propagate_monotonic_fn(
        self.lower, tf.minimum(self.lower.w, 0),
        tf.maximum(self.lower.w, 0)) if self.lower else None
    return BackwardBounds(lb_expr, ub_expr) 
開發者ID:deepmind,項目名稱:interval-bound-propagation,代碼行數:25,代碼來源:crown.py

示例12: _concretize_bounds

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import maximum [as 別名]
def _concretize_bounds(lower, upper):
    """Returns lower and upper interval bounds."""
    if len(lower.b.shape) == 2:
      equation = 'ijk,ij->ik'
    elif len(lower.b.shape) == 3:
      equation = 'ijnc,ij->inc'
    elif len(lower.b.shape) == 4:
      equation = 'ijhwc,ij->ihwc'
    else:
      raise NotImplementedError('Shape unsupported: {}'.format(lower.b.shape))

    lb = (tf.einsum(equation, tf.maximum(lower.w, 0), lower.lower) +
          tf.einsum(equation, tf.minimum(lower.w, 0), lower.upper) +
          lower.b)
    ub = (tf.einsum(equation, tf.maximum(upper.w, 0), upper.upper) +
          tf.einsum(equation, tf.minimum(upper.w, 0), upper.lower) +
          upper.b)
    return lb, ub 
開發者ID:deepmind,項目名稱:interval-bound-propagation,代碼行數:20,代碼來源:fastlin.py

示例13: get_noised_result

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import maximum [as 別名]
def get_noised_result(self, sample_state, global_state):
    """See base class."""
    noised_vectors, sum_state = self._sum_query.get_noised_result(
        sample_state.sum_state, global_state.sum_state)
    del sum_state  # To be set explicitly later when we know the new clip.

    new_l2_norm_clip, new_quantile_estimator_state = (
        self._quantile_estimator_query.get_noised_result(
            sample_state.quantile_estimator_state,
            global_state.quantile_estimator_state))

    new_l2_norm_clip = tf.maximum(new_l2_norm_clip, 0.0)
    new_sum_stddev = new_l2_norm_clip * global_state.noise_multiplier
    new_sum_query_state = self._sum_query.make_global_state(
        l2_norm_clip=new_l2_norm_clip,
        stddev=new_sum_stddev)

    new_global_state = self._GlobalState(
        global_state.noise_multiplier,
        new_sum_query_state,
        new_quantile_estimator_state)

    return noised_vectors, new_global_state 
開發者ID:tensorflow,項目名稱:privacy,代碼行數:25,代碼來源:quantile_adaptive_clip_sum_query.py

示例14: truncated_rsqrt

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import maximum [as 別名]
def truncated_rsqrt(step,
                    total_train_steps,
                    warmup_steps=10000):
  """Noam's favorite learning-rate schedule.

  rsqrt(max(step_num, warmup_steps)

  Args:
    step: a tf.scalar representing the step we want the learning rate for.
    total_train_steps: a number, the total number of training steps.
    warmup_steps: a number

  Returns:
    a tf.Scalar, the learning rate for the step.
  """
  del total_train_steps
  step_num = tf.cast(step, tf.float32)
  return tf.math.rsqrt(tf.maximum(step_num, warmup_steps)) 
開發者ID:tensorflow,項目名稱:mesh,代碼行數:20,代碼來源:learning_rate_schedules.py

示例15: calc_iou_tensor

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import maximum [as 別名]
def calc_iou_tensor(boxes1, boxes2):
  """Calculation of IoU based on two boxes tensor.

  Reference to https://github.com/kuangliu/pytorch-ssd

  Args:
    boxes1: shape (N, 4), four coordinates of N boxes
    boxes2: shape (M, 4), four coordinates of M boxes
  Returns:
    IoU: shape (N, M), IoU of the i-th box in `boxes1` and j-th box in `boxes2`
  """
  b1_left, b1_top, b1_right, b1_bottom = tf.split(boxes1, 4, axis=1)
  b2_left, b2_top, b2_right, b2_bottom = tf.split(boxes2, 4, axis=1)

  # Shape of intersect_* (N, M)
  intersect_left = tf.maximum(b1_left, tf.transpose(b2_left))
  intersect_top = tf.maximum(b1_top, tf.transpose(b2_top))
  intersect_right = tf.minimum(b1_right, tf.transpose(b2_right))
  intersect_bottom = tf.minimum(b1_bottom, tf.transpose(b2_bottom))

  boxes1_area = (b1_right - b1_left) * (b1_bottom - b1_top)
  boxes2_area = (b2_right - b2_left) * (b2_bottom - b2_top)

  intersect = tf.multiply(tf.maximum((intersect_right - intersect_left), 0),
                          tf.maximum((intersect_bottom - intersect_top), 0))
  union = boxes1_area + tf.transpose(boxes2_area) - intersect
  iou = intersect / union

  return iou 
開發者ID:tensorflow,項目名稱:benchmarks,代碼行數:31,代碼來源:ssd_dataloader.py


注:本文中的tensorflow.compat.v1.maximum方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。