当前位置: 首页>>代码示例>>Python>>正文


Python clip_ops.clip_by_norm方法代码示例

本文整理汇总了Python中tensorflow.python.ops.clip_ops.clip_by_norm方法的典型用法代码示例。如果您正苦于以下问题:Python clip_ops.clip_by_norm方法的具体用法?Python clip_ops.clip_by_norm怎么用?Python clip_ops.clip_by_norm使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.python.ops.clip_ops的用法示例。


在下文中一共展示了clip_ops.clip_by_norm方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: clip_gradient_norms

# 需要导入模块: from tensorflow.python.ops import clip_ops [as 别名]
# 或者: from tensorflow.python.ops.clip_ops import clip_by_norm [as 别名]
def clip_gradient_norms(gradients_to_variables, max_norm):
  """Clips the gradients by the given value.

  Args:
    gradients_to_variables: A list of gradient to variable pairs (tuples).
    max_norm: the maximum norm value.

  Returns:
    A list of clipped gradient to variable pairs.
  """
  clipped_grads_and_vars = []
  for grad, var in gradients_to_variables:
    if grad is not None:
      if isinstance(grad, ops.IndexedSlices):
        tmp = clip_ops.clip_by_norm(grad.values, max_norm)
        grad = ops.IndexedSlices(tmp, grad.indices, grad.dense_shape)
      else:
        grad = clip_ops.clip_by_norm(grad, max_norm)
    clipped_grads_and_vars.append((grad, var))
  return clipped_grads_and_vars 
开发者ID:yuantailing,项目名称:ctw-baseline,代码行数:22,代码来源:learning.py

示例2: _clip_sparse

# 需要导入模块: from tensorflow.python.ops import clip_ops [as 别名]
# 或者: from tensorflow.python.ops.clip_ops import clip_by_norm [as 别名]
def _clip_sparse(self, grad, var):
    assert isinstance(grad, ops.IndexedSlices)
    clip_dims = self._vars_to_clip_dims[var]
    if 0 in clip_dims:
      logging.warning("Clipping norm across dims %s for %s is inefficient "
                      "when including sparse dimension 0.", clip_dims,
                      var.op.name)
      return self._clip_dense(var)

    with ops.colocate_with(var):
      var_subset = array_ops.gather(var, grad.indices)
    with self._maybe_colocate_with(var):
      normalized_var_subset = clip_ops.clip_by_norm(
          var_subset, self._max_norm, clip_dims)
      delta = ops.IndexedSlices(
          var_subset - normalized_var_subset, grad.indices, grad.dense_shape)
    with ops.colocate_with(var):
      return var.scatter_sub(delta, use_locking=self._use_locking) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:20,代码来源:variable_clipping_optimizer.py

示例3: clip_gradient_norms

# 需要导入模块: from tensorflow.python.ops import clip_ops [as 别名]
# 或者: from tensorflow.python.ops.clip_ops import clip_by_norm [as 别名]
def clip_gradient_norms(gradients_to_variables, max_norm):
  """Clips the gradients by the given value.
  Args:
    gradients_to_variables: A list of gradient to variable pairs (tuples).
    max_norm: the maximum norm value.
  Returns:
    A list of clipped gradient to variable pairs.
  """
  clipped_grads_and_vars = []
  for grad, var in gradients_to_variables:
    if grad is not None:
      if isinstance(grad, ops.IndexedSlices):
        tmp = clip_ops.clip_by_norm(grad.values, max_norm)
        grad = ops.IndexedSlices(tmp, grad.indices, grad.dense_shape)
      else:
        grad = clip_ops.clip_by_norm(grad, max_norm)
    clipped_grads_and_vars.append((grad, var))
  return clipped_grads_and_vars 
开发者ID:autoai-org,项目名称:CVTron,代码行数:20,代码来源:learning.py

示例4: _clip_sparse

# 需要导入模块: from tensorflow.python.ops import clip_ops [as 别名]
# 或者: from tensorflow.python.ops.clip_ops import clip_by_norm [as 别名]
def _clip_sparse(self, grad, var):
    assert isinstance(grad, ops.IndexedSlices)
    clip_dims = self._vars_to_clip_dims[var]
    if 0 in clip_dims:
      logging.warning("Clipping norm across dims %s for %s is inefficient "
                      "when including sparse dimension 0.", clip_dims,
                      var.op.name)
      return self._clip_dense(var)

    with ops.colocate_with(var):
      var_subset = array_ops.gather(var.ref(), grad.indices)
    with self._maybe_colocate_with(var):
      normalized_var_subset = clip_ops.clip_by_norm(
          var_subset, self._max_norm, clip_dims)
      delta = ops.IndexedSlices(
          var_subset - normalized_var_subset, grad.indices, grad.dense_shape)
    with ops.colocate_with(var):
      return var.scatter_sub(delta, use_locking=self._use_locking) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:20,代码来源:variable_clipping_optimizer.py

示例5: __init__

# 需要导入模块: from tensorflow.python.ops import clip_ops [as 别名]
# 或者: from tensorflow.python.ops.clip_ops import clip_by_norm [as 别名]
def __init__(self,
               opt,
               vars_to_clip_dims,
               max_norm,
               use_locking=False,
               colocate_clip_ops_with_vars=False,
               name="VariableClipping"):
    """Construct a new clip-norm optimizer.

    Args:
      opt: The actual optimizer that will be used to compute and apply the
        gradients. Must be one of the Optimizer classes.
      vars_to_clip_dims: A dict with keys as Variables and values as lists
        of dimensions along which to compute the L2-norm.  See
        `tf.clip_by_norm` for more details.
      max_norm: The L2-norm to clip to, for all variables specified.
      use_locking: If `True` use locks for clip update operations.
      colocate_clip_ops_with_vars: If `True`, try colocating the clip norm
        ops with the corresponding variable.
      name: Optional name prefix for the operations created when applying
        gradients.  Defaults to "VariableClipping".
    """
    super(VariableClippingOptimizer, self).__init__(use_locking, name)
    self._opt = opt
    # Defensive copy of input dict
    self._vars_to_clip_dims = {
        var: clip_dims[:] for var, clip_dims in vars_to_clip_dims.items()}
    self._max_norm = max_norm
    self._colocate_clip_ops_with_vars = colocate_clip_ops_with_vars 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:31,代码来源:variable_clipping_optimizer.py

示例6: _clip_dense

# 需要导入模块: from tensorflow.python.ops import clip_ops [as 别名]
# 或者: from tensorflow.python.ops.clip_ops import clip_by_norm [as 别名]
def _clip_dense(self, var):
    with self._maybe_colocate_with(var):
      updated_var_value = var._ref()  # pylint: disable=protected-access
      normalized_var = clip_ops.clip_by_norm(
          updated_var_value, self._max_norm, self._vars_to_clip_dims[var])
      delta = updated_var_value - normalized_var
    with ops.colocate_with(var):
      return var.assign_sub(delta, use_locking=self._use_locking) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:10,代码来源:variable_clipping_optimizer.py

示例7: _clip_dense

# 需要导入模块: from tensorflow.python.ops import clip_ops [as 别名]
# 或者: from tensorflow.python.ops.clip_ops import clip_by_norm [as 别名]
def _clip_dense(self, var):
    with self._maybe_colocate_with(var):
      updated_var_value = array_ops.identity(var.ref())
      normalized_var = clip_ops.clip_by_norm(
          updated_var_value, self._max_norm, self._vars_to_clip_dims[var])
      delta = updated_var_value - normalized_var
    with ops.colocate_with(var):
      return var.assign_sub(delta, use_locking=self._use_locking) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:10,代码来源:variable_clipping_optimizer.py

示例8: _clip

# 需要导入模块: from tensorflow.python.ops import clip_ops [as 别名]
# 或者: from tensorflow.python.ops.clip_ops import clip_by_norm [as 别名]
def _clip(params, ids, max_norm):
  """Helper function for _embedding_lookup_and_transform.

  This function optionally clips embeddings to an l2-norm of max_norm.

  Args:
    params: A `Tensor` of embeddings retrieved by `_gather`.
    ids: The `ids` argument that was passed to `_gather`.
    max_norm: If provided, the embeddings are l2-normalized to the value of
      max_norm.

  Returns:
    A `Tensor` with the same type as `params`.
  """

  def _rank(x):
    """Helper function to retrieve the rank of a tensor.

    Args:
      x: Something convertible to `Tensor`.

    Returns:
      Either a pair `(rank, True)` where `rank` is an integer or a pair
      `(rank, False)` where `rank` is an integer `Tensor`. In either case,
      `rank` is the rank of `x`.
    """
    rank = ops.convert_to_tensor(x).get_shape().ndims
    if rank:
      return rank, True
    else:
      return array_ops.rank(x), False

  if max_norm is None:
    return params
  ids_rank, ids_static = _rank(ids)
  params_rank, params_static = _rank(params)
  return clip_ops.clip_by_norm(
      params,
      max_norm,
      axes=(list(range(ids_rank, params_rank))
            if ids_static and params_static
            else math_ops.range(ids_rank, params_rank))) 
开发者ID:PacktPublishing,项目名称:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代码行数:44,代码来源:embedding_ops.py


注:本文中的tensorflow.python.ops.clip_ops.clip_by_norm方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。