当前位置: 首页>>代码示例>>Python>>正文


Python clip_ops.clip_by_value方法代码示例

本文整理汇总了Python中tensorflow.python.ops.clip_ops.clip_by_value方法的典型用法代码示例。如果您正苦于以下问题:Python clip_ops.clip_by_value方法的具体用法?Python clip_ops.clip_by_value怎么用?Python clip_ops.clip_by_value使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.python.ops.clip_ops的用法示例。


在下文中一共展示了clip_ops.clip_by_value方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: clip

# 需要导入模块: from tensorflow.python.ops import clip_ops [as 别名]
# 或者: from tensorflow.python.ops.clip_ops import clip_by_value [as 别名]
def clip(x, min_value, max_value):
  """Element-wise value clipping.

  Arguments:
      x: Tensor or variable.
      min_value: Python float or integer.
      max_value: Python float or integer.

  Returns:
      A tensor.
  """
  if max_value is not None and max_value < min_value:
    max_value = min_value
  if max_value is None:
    max_value = np.inf
  min_value = _to_tensor(min_value, x.dtype.base_dtype)
  max_value = _to_tensor(max_value, x.dtype.base_dtype)
  return clip_ops.clip_by_value(x, min_value, max_value) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:20,代码来源:backend.py

示例2: relu

# 需要导入模块: from tensorflow.python.ops import clip_ops [as 别名]
# 或者: from tensorflow.python.ops.clip_ops import clip_by_value [as 别名]
def relu(x, alpha=0., max_value=None):
  """Rectified linear unit.

  With default values, it returns element-wise `max(x, 0)`.

  Arguments:
      x: A tensor or variable.
      alpha: A scalar, slope of negative section (default=`0.`).
      max_value: Saturation threshold.

  Returns:
      A tensor.
  """
  if alpha != 0.:
    negative_part = nn.relu(-x)
  x = nn.relu(x)
  if max_value is not None:
    max_value = _to_tensor(max_value, x.dtype.base_dtype)
    zero = _to_tensor(0., x.dtype.base_dtype)
    x = clip_ops.clip_by_value(x, zero, max_value)
  if alpha != 0.:
    alpha = _to_tensor(alpha, x.dtype.base_dtype)
    x -= alpha * negative_part
  return x 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:26,代码来源:backend.py

示例3: binary_crossentropy

# 需要导入模块: from tensorflow.python.ops import clip_ops [as 别名]
# 或者: from tensorflow.python.ops.clip_ops import clip_by_value [as 别名]
def binary_crossentropy(output, target, from_logits=False):
  """Binary crossentropy between an output tensor and a target tensor.

  Arguments:
      output: A tensor.
      target: A tensor with the same shape as `output`.
      from_logits: Whether `output` is expected to be a logits tensor.
          By default, we consider that `output`
          encodes a probability distribution.

  Returns:
      A tensor.
  """
  # Note: nn.softmax_cross_entropy_with_logits
  # expects logits, Keras expects probabilities.
  if not from_logits:
    # transform back to logits
    epsilon = _to_tensor(_EPSILON, output.dtype.base_dtype)
    output = clip_ops.clip_by_value(output, epsilon, 1 - epsilon)
    output = math_ops.log(output / (1 - output))
  return nn.sigmoid_cross_entropy_with_logits(labels=target, logits=output) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:23,代码来源:backend.py

示例4: hard_sigmoid

# 需要导入模块: from tensorflow.python.ops import clip_ops [as 别名]
# 或者: from tensorflow.python.ops.clip_ops import clip_by_value [as 别名]
def hard_sigmoid(x):
  """Segment-wise linear approximation of sigmoid.

  Faster than sigmoid.
  Returns `0.` if `x < -2.5`, `1.` if `x > 2.5`.
  In `-2.5 <= x <= 2.5`, returns `0.2 * x + 0.5`.

  Arguments:
      x: A tensor or variable.

  Returns:
      A tensor.
  """
  x = (0.2 * x) + 0.5
  zero = _to_tensor(0., x.dtype.base_dtype)
  one = _to_tensor(1., x.dtype.base_dtype)
  x = clip_ops.clip_by_value(x, zero, one)
  return x 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:20,代码来源:backend.py

示例5: safe_cumprod

# 需要导入模块: from tensorflow.python.ops import clip_ops [as 别名]
# 或者: from tensorflow.python.ops.clip_ops import clip_by_value [as 别名]
def safe_cumprod(x, *args, **kwargs):
    """Computes cumprod of x in logspace using cumsum to avoid underflow.

    The cumprod function and its gradient can result in numerical instabilities
    when its argument has very small and/or zero values.  As long as the argument
    is all positive, we can instead compute the cumulative product as
    exp(cumsum(log(x))).  This function can be called identically to tf.cumprod.

    Args:
      x: Tensor to take the cumulative product of.
      *args: Passed on to cumsum; these are identical to those in cumprod.
      **kwargs: Passed on to cumsum; these are identical to those in cumprod.
    Returns:
      Cumulative product of x.
    """
    with ops.name_scope(None, "SafeCumprod", [x]):
        x = ops.convert_to_tensor(x, name="x")
        tiny = np.finfo(x.dtype.as_numpy_dtype).tiny
        return math_ops.exp(math_ops.cumsum(
            math_ops.log(clip_ops.clip_by_value(x, tiny, 1)), *args, **kwargs)) 
开发者ID:HareeshBahuleyan,项目名称:tf-var-attention,代码行数:22,代码来源:attention_wrapper.py

示例6: focal_loss

# 需要导入模块: from tensorflow.python.ops import clip_ops [as 别名]
# 或者: from tensorflow.python.ops.clip_ops import clip_by_value [as 别名]
def focal_loss(labels, logits, gamma=2.0):
    r"""
    Multi-class focal loss implementation: https://arxiv.org/abs/1708.02002
    :param labels: [batch_size, ] - Tensor of the correct class ids
    :param logits: [batch_size, num_classes] - Unscaled logits
    :param gamma: focal loss weight
    :return: [batch_size, ] - Tensor of average costs for each batch element
    """

    num_classes = array_ops.shape(logits)[1]
    onehot_labels = array_ops.one_hot(labels, num_classes, dtype=logits.dtype)

    p = nn_ops.softmax(logits)
    p = clip_ops.clip_by_value(p, 1e-7, 1.0 - 1e-7)

    f_loss = - onehot_labels * math_ops.pow(1.0 - p, gamma) * math_ops.log(p) \
             - (1 - onehot_labels) * math_ops.pow(p, gamma) * math_ops.log(1.0 - p)

    cost = math_ops.reduce_sum(f_loss, axis=1)
    return cost 
开发者ID:georgesterpu,项目名称:avsr-tf1,代码行数:22,代码来源:devel.py

示例7: mc_loss

# 需要导入模块: from tensorflow.python.ops import clip_ops [as 别名]
# 或者: from tensorflow.python.ops.clip_ops import clip_by_value [as 别名]
def mc_loss(labels, logits):
    r"""
    A multi-class cross-entropy loss
    :param labels: [batch_size, ] - Tensor of the correct class ids
    :param logits: [batch_size, num_classes] - Unscaled logits
    :return: [batch_size, ] - Tensor of average costs for each batch element
    """

    num_classes = array_ops.shape(logits)[1]
    onehot_labels = array_ops.one_hot(labels, num_classes, dtype=logits.dtype)

    p = nn_ops.softmax(logits)
    p = clip_ops.clip_by_value(p, 1e-7, 1.0 - 1e-7)

    ce_loss = - onehot_labels * math_ops.log(p) - (1 - onehot_labels) * math_ops.log(1.0-p)

    cost = math_ops.reduce_sum(ce_loss, axis=1)
    return cost 
开发者ID:georgesterpu,项目名称:avsr-tf1,代码行数:20,代码来源:devel.py

示例8: safe_cumprod

# 需要导入模块: from tensorflow.python.ops import clip_ops [as 别名]
# 或者: from tensorflow.python.ops.clip_ops import clip_by_value [as 别名]
def safe_cumprod(x, *args, **kwargs):
  """Computes cumprod of x in logspace using cumsum to avoid underflow.

  The cumprod function and its gradient can result in numerical instabilities
  when its argument has very small and/or zero values.  As long as the argument
  is all positive, we can instead compute the cumulative product as
  exp(cumsum(log(x))).  This function can be called identically to tf.cumprod.

  Args:
    x: Tensor to take the cumulative product of.
    *args: Passed on to cumsum; these are identical to those in cumprod.
    **kwargs: Passed on to cumsum; these are identical to those in cumprod.
  Returns:
    Cumulative product of x.
  """
  with ops.name_scope(None, "SafeCumprod", [x]):
    x = ops.convert_to_tensor(x, name="x")
    tiny = np.finfo(x.dtype.as_numpy_dtype).tiny
    return math_ops.exp(math_ops.cumsum(
        math_ops.log(clip_ops.clip_by_value(x, tiny, 1)), *args, **kwargs)) 
开发者ID:yanghoonkim,项目名称:NQG_ASs2s,代码行数:22,代码来源:attention_wrapper_mod.py

示例9: safe_cumprod

# 需要导入模块: from tensorflow.python.ops import clip_ops [as 别名]
# 或者: from tensorflow.python.ops.clip_ops import clip_by_value [as 别名]
def safe_cumprod(x, *args, **kwargs):
  """Computes cumprod of x in logspace using cumsum to avoid underflow.

  The cumprod function and its gradient can result in numerical instabilities
  when its argument has very small and/or zero values.  As long as the argument
  is all positive, we can instead compute the cumulative product as
  exp(cumsum(log(x))).  This function can be called identically to tf.cumprod.

  Args:
    x: Tensor to take the cumulative product of.
    *args: Passed on to cumsum; these are identical to those in cumprod.
    **kwargs: Passed on to cumsum; these are identical to those in cumprod.
  Returns:
    Cumulative product of x.
  """
  with ops.name_scope(None, "SafeCumprod", [x]):
    x = ops.convert_to_tensor(x, name="x")
    tiny = np.finfo(x.dtype.as_numpy_dtype).tiny
    return math_ops.exp(
        math_ops.cumsum(
            math_ops.log(clip_ops.clip_by_value(x, tiny, 1)), *args, **kwargs
        )
    ) 
开发者ID:NVIDIA,项目名称:OpenSeq2Seq,代码行数:25,代码来源:attention_wrapper.py

示例10: sqrt

# 需要导入模块: from tensorflow.python.ops import clip_ops [as 别名]
# 或者: from tensorflow.python.ops.clip_ops import clip_by_value [as 别名]
def sqrt(x):
  """Element-wise square root.

  Arguments:
      x: Tensor or variable.

  Returns:
      A tensor.
  """
  zero = _to_tensor(0., x.dtype.base_dtype)
  inf = _to_tensor(np.inf, x.dtype.base_dtype)
  x = clip_ops.clip_by_value(x, zero, inf)
  return math_ops.sqrt(x) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:15,代码来源:backend.py

示例11: categorical_crossentropy

# 需要导入模块: from tensorflow.python.ops import clip_ops [as 别名]
# 或者: from tensorflow.python.ops.clip_ops import clip_by_value [as 别名]
def categorical_crossentropy(output, target, from_logits=False):
  """Categorical crossentropy between an output tensor and a target tensor.

  Arguments:
      output: A tensor resulting from a softmax
          (unless `from_logits` is True, in which
          case `output` is expected to be the logits).
      target: A tensor of the same shape as `output`.
      from_logits: Boolean, whether `output` is the
          result of a softmax, or is a tensor of logits.

  Returns:
      Output tensor.
  """
  # Note: nn.softmax_cross_entropy_with_logits
  # expects logits, Keras expects probabilities.
  if not from_logits:
    # scale preds so that the class probas of each sample sum to 1
    output /= math_ops.reduce_sum(
        output, reduction_indices=len(output.get_shape()) - 1, keep_dims=True)
    # manual computation of crossentropy
    epsilon = _to_tensor(_EPSILON, output.dtype.base_dtype)
    output = clip_ops.clip_by_value(output, epsilon, 1. - epsilon)
    return -math_ops.reduce_sum(
        target * math_ops.log(output),
        reduction_indices=len(output.get_shape()) - 1)
  else:
    return nn.softmax_cross_entropy_with_logits(labels=target, logits=output) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:30,代码来源:backend.py

示例12: categorical_crossentropy

# 需要导入模块: from tensorflow.python.ops import clip_ops [as 别名]
# 或者: from tensorflow.python.ops.clip_ops import clip_by_value [as 别名]
def categorical_crossentropy(target, output, from_logits=False):
  """Categorical crossentropy between an output tensor and a target tensor.

  Arguments:
      target: A tensor of the same shape as `output`.
      output: A tensor resulting from a softmax
          (unless `from_logits` is True, in which
          case `output` is expected to be the logits).
      from_logits: Boolean, whether `output` is the
          result of a softmax, or is a tensor of logits.

  Returns:
      Output tensor.
  """
  # Note: nn.softmax_cross_entropy_with_logits
  # expects logits, Keras expects probabilities.
  if not from_logits:
    # scale preds so that the class probas of each sample sum to 1
    output /= math_ops.reduce_sum(
        output, axis=len(output.get_shape()) - 1, keep_dims=True)
    # manual computation of crossentropy
    epsilon_ = _to_tensor(epsilon(), output.dtype.base_dtype)
    output = clip_ops.clip_by_value(output, epsilon_, 1. - epsilon_)
    return -math_ops.reduce_sum(
        target * math_ops.log(output),
        axis=len(output.get_shape()) - 1)
  else:
    return nn.softmax_cross_entropy_with_logits(labels=target, logits=output) 
开发者ID:PacktPublishing,项目名称:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代码行数:30,代码来源:backend.py

示例13: sparse_categorical_crossentropy

# 需要导入模块: from tensorflow.python.ops import clip_ops [as 别名]
# 或者: from tensorflow.python.ops.clip_ops import clip_by_value [as 别名]
def sparse_categorical_crossentropy(target, output, from_logits=False):
  """Categorical crossentropy with integer targets.

  Arguments:
      target: An integer tensor.
      output: A tensor resulting from a softmax
          (unless `from_logits` is True, in which
          case `output` is expected to be the logits).
      from_logits: Boolean, whether `output` is the
          result of a softmax, or is a tensor of logits.

  Returns:
      Output tensor.
  """
  # Note: nn.sparse_softmax_cross_entropy_with_logits
  # expects logits, Keras expects probabilities.
  if not from_logits:
    epsilon_ = _to_tensor(epsilon(), output.dtype.base_dtype)
    output = clip_ops.clip_by_value(output, epsilon_, 1 - epsilon_)
    output = math_ops.log(output)

  output_shape = output.get_shape()
  targets = cast(flatten(target), 'int64')
  logits = array_ops.reshape(output, [-1, int(output_shape[-1])])
  res = nn.sparse_softmax_cross_entropy_with_logits(
      labels=targets, logits=logits)
  if len(output_shape) == 3:
    # if our output includes timesteps we need to reshape
    return array_ops.reshape(res, array_ops.shape(output)[:-1])
  else:
    return res 
开发者ID:PacktPublishing,项目名称:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代码行数:33,代码来源:backend.py

示例14: _apply

# 需要导入模块: from tensorflow.python.ops import clip_ops [as 别名]
# 或者: from tensorflow.python.ops.clip_ops import clip_by_value [as 别名]
def _apply(self, grad, var):
        graph = None if context.executing_eagerly() else ops.get_default_graph()
        lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
        base_lr_t = math_ops.cast(self._base_lr_t, var.dtype.base_dtype)
        beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
        beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
        epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)

        lr_t = lr_t * tf.sqrt(1-beta2_t)/(1-beta1_t)

        lower_bound = lr_t * self._lower_bound
        upper_bound = lr_t * self._upper_bound

        # m_t = beta1 * m + (1 - beta1) * g_t
        m = self.get_slot(var, "m")
        m_scaled_g_values = grad * (1 - beta1_t)
        m_t = state_ops.assign(m, beta1_t * m + m_scaled_g_values, use_locking=self._use_locking)

        # v_t = beta2 * v + (1 - beta2) * (g_t * g_t)
        v = self.get_slot(var, "v")
        v_scaled_g_values = (grad * grad) * (1 - beta2_t)
        v_t = state_ops.assign(v, beta2_t * v + v_scaled_g_values, use_locking=self._use_locking)

        # amsgrad
        vhat = self.get_slot(var, "vhat")
        if self._amsbound :
            vhat_t = state_ops.assign(vhat, math_ops.maximum(v_t, vhat))
            v_sqrt = math_ops.sqrt(vhat_t)
        else :
            vhat_t = state_ops.assign(vhat, vhat)
            v_sqrt = math_ops.sqrt(v_t)


        # Compute the bounds
        step_size_bound = lr_t / (v_sqrt + epsilon_t)
        if isinstance(self.config.lower_bound, int) and self.config.lower_bound < 0:
            bounded_lr = m_t * step_size_bound
        else:
            bounded_lr = m_t * clip_by_value(step_size_bound, lower_bound, upper_bound)

        if self._arad:
            bounded_lr *= (self.config.arad_lambda or 1.0) * tf.abs(m_t)

        var_update = state_ops.assign_sub(var, bounded_lr, use_locking=self._use_locking)
        return control_flow_ops.group(*[var_update, m_t, v_t, vhat_t]) 
开发者ID:HyperGAN,项目名称:HyperGAN,代码行数:47,代码来源:AdaBound.py

示例15: _apply_dense

# 需要导入模块: from tensorflow.python.ops import clip_ops [as 别名]
# 或者: from tensorflow.python.ops.clip_ops import clip_by_value [as 别名]
def _apply_dense(self, grad, var):
    lr_scale = self.get_slot(var, "lr_scale")
    momentum = self.get_slot(var, "momentum")
    gbar = self.get_slot(var, "gbar")
    gain = self.get_slot(var, "gain")
    counter = self.get_slot(var, "counter")
    counter_updated = state_ops.assign(counter, counter + 1)

    # lr_scale update uses normalized grad and momentum to be independent of dim
    normalized_grad = grad / (linalg_ops.norm(grad) + 1e-10)
    normalized_momentum = momentum / (linalg_ops.norm(momentum) + 1e-10)
    # Apply EG updates on lr_scale:
    # grad_lr_scale = -inner_product(current_grad, old_momentum)
    # lr_scale <- lr_scale * exp(-scale_learning_rate * grad_lr_scale)
    lr_scale_unnormalized_updated = clip_ops.clip_by_value(
        lr_scale * math_ops.exp(
            self._scale_learning_rate * math_ops.reduce_sum(grad * momentum)),
        self._min_scale, self._max_scale)
    lr_scale_normalized_updated = clip_ops.clip_by_value(
        lr_scale * math_ops.exp(self._scale_learning_rate * math_ops.reduce_sum(
            normalized_grad * normalized_momentum)), self._min_scale,
        self._max_scale)
    lr_scale_updated = state_ops.assign(
        lr_scale,
        array_ops.where(self._use_directions, lr_scale_normalized_updated,
                        lr_scale_unnormalized_updated))
    # remove the bias of zero initialization in gbar
    corrected_gbar = gbar / (
        1.0 - self._beta**math_ops.maximum(counter_updated - 1, 1))
    # Apply EG updates on gain:
    # grad_gain = - current_grad * old_gbar
    # gain <- gain * exp(-gain_learning_rate * grad_gain)
    gain_unnormalized_updated = clip_ops.clip_by_value(
        gain * math_ops.exp(self._gain_learning_rate * grad * corrected_gbar),
        self._min_gain, self._max_gain)
    # Normalized update uses sign(grad) * sign(gbar) as a proxy for grad_gain.
    gain_normalized_updated = clip_ops.clip_by_value(
        gain * math_ops.exp(self._gain_learning_rate * math_ops.sign(grad) *
                            math_ops.sign(gbar)), self._min_gain,
        self._max_gain)
    gain_updated = state_ops.assign(
        gain,
        array_ops.where(self._use_signs, gain_normalized_updated,
                        gain_unnormalized_updated))
    scaled_g = self._learning_rate_tensor * gain_updated * grad
    with ops.control_dependencies([lr_scale_updated, scaled_g]):
      momentum_updated = state_ops.assign(
          momentum, self._momentum_tensor * momentum + scaled_g)
      gbar_updated = state_ops.assign(
          gbar, self._beta * gbar + (1.0 - self._beta) * grad)
    with ops.control_dependencies([gbar_updated]):
      return state_ops.assign_sub(var, lr_scale_updated * momentum_updated) 
开发者ID:tensorflow,项目名称:lingvo,代码行数:54,代码来源:egdd.py


注:本文中的tensorflow.python.ops.clip_ops.clip_by_value方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。