當前位置: 首頁>>代碼示例>>Python>>正文


Python clip_ops.clip_by_norm方法代碼示例

本文整理匯總了Python中tensorflow.python.ops.clip_ops.clip_by_norm方法的典型用法代碼示例。如果您正苦於以下問題:Python clip_ops.clip_by_norm方法的具體用法?Python clip_ops.clip_by_norm怎麽用?Python clip_ops.clip_by_norm使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.python.ops.clip_ops的用法示例。


在下文中一共展示了clip_ops.clip_by_norm方法的8個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: clip_gradient_norms

# 需要導入模塊: from tensorflow.python.ops import clip_ops [as 別名]
# 或者: from tensorflow.python.ops.clip_ops import clip_by_norm [as 別名]
def clip_gradient_norms(gradients_to_variables, max_norm):
  """Clips the gradients by the given value.

  Args:
    gradients_to_variables: A list of gradient to variable pairs (tuples).
    max_norm: the maximum norm value.

  Returns:
    A list of clipped gradient to variable pairs.
  """
  clipped_grads_and_vars = []
  for grad, var in gradients_to_variables:
    if grad is not None:
      if isinstance(grad, ops.IndexedSlices):
        tmp = clip_ops.clip_by_norm(grad.values, max_norm)
        grad = ops.IndexedSlices(tmp, grad.indices, grad.dense_shape)
      else:
        grad = clip_ops.clip_by_norm(grad, max_norm)
    clipped_grads_and_vars.append((grad, var))
  return clipped_grads_and_vars 
開發者ID:yuantailing,項目名稱:ctw-baseline,代碼行數:22,代碼來源:learning.py

示例2: _clip_sparse

# 需要導入模塊: from tensorflow.python.ops import clip_ops [as 別名]
# 或者: from tensorflow.python.ops.clip_ops import clip_by_norm [as 別名]
def _clip_sparse(self, grad, var):
    assert isinstance(grad, ops.IndexedSlices)
    clip_dims = self._vars_to_clip_dims[var]
    if 0 in clip_dims:
      logging.warning("Clipping norm across dims %s for %s is inefficient "
                      "when including sparse dimension 0.", clip_dims,
                      var.op.name)
      return self._clip_dense(var)

    with ops.colocate_with(var):
      var_subset = array_ops.gather(var, grad.indices)
    with self._maybe_colocate_with(var):
      normalized_var_subset = clip_ops.clip_by_norm(
          var_subset, self._max_norm, clip_dims)
      delta = ops.IndexedSlices(
          var_subset - normalized_var_subset, grad.indices, grad.dense_shape)
    with ops.colocate_with(var):
      return var.scatter_sub(delta, use_locking=self._use_locking) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:20,代碼來源:variable_clipping_optimizer.py

示例3: clip_gradient_norms

# 需要導入模塊: from tensorflow.python.ops import clip_ops [as 別名]
# 或者: from tensorflow.python.ops.clip_ops import clip_by_norm [as 別名]
def clip_gradient_norms(gradients_to_variables, max_norm):
  """Clips the gradients by the given value.
  Args:
    gradients_to_variables: A list of gradient to variable pairs (tuples).
    max_norm: the maximum norm value.
  Returns:
    A list of clipped gradient to variable pairs.
  """
  clipped_grads_and_vars = []
  for grad, var in gradients_to_variables:
    if grad is not None:
      if isinstance(grad, ops.IndexedSlices):
        tmp = clip_ops.clip_by_norm(grad.values, max_norm)
        grad = ops.IndexedSlices(tmp, grad.indices, grad.dense_shape)
      else:
        grad = clip_ops.clip_by_norm(grad, max_norm)
    clipped_grads_and_vars.append((grad, var))
  return clipped_grads_and_vars 
開發者ID:autoai-org,項目名稱:CVTron,代碼行數:20,代碼來源:learning.py

示例4: _clip_sparse

# 需要導入模塊: from tensorflow.python.ops import clip_ops [as 別名]
# 或者: from tensorflow.python.ops.clip_ops import clip_by_norm [as 別名]
def _clip_sparse(self, grad, var):
    assert isinstance(grad, ops.IndexedSlices)
    clip_dims = self._vars_to_clip_dims[var]
    if 0 in clip_dims:
      logging.warning("Clipping norm across dims %s for %s is inefficient "
                      "when including sparse dimension 0.", clip_dims,
                      var.op.name)
      return self._clip_dense(var)

    with ops.colocate_with(var):
      var_subset = array_ops.gather(var.ref(), grad.indices)
    with self._maybe_colocate_with(var):
      normalized_var_subset = clip_ops.clip_by_norm(
          var_subset, self._max_norm, clip_dims)
      delta = ops.IndexedSlices(
          var_subset - normalized_var_subset, grad.indices, grad.dense_shape)
    with ops.colocate_with(var):
      return var.scatter_sub(delta, use_locking=self._use_locking) 
開發者ID:tobegit3hub,項目名稱:deep_image_model,代碼行數:20,代碼來源:variable_clipping_optimizer.py

示例5: __init__

# 需要導入模塊: from tensorflow.python.ops import clip_ops [as 別名]
# 或者: from tensorflow.python.ops.clip_ops import clip_by_norm [as 別名]
def __init__(self,
               opt,
               vars_to_clip_dims,
               max_norm,
               use_locking=False,
               colocate_clip_ops_with_vars=False,
               name="VariableClipping"):
    """Construct a new clip-norm optimizer.

    Args:
      opt: The actual optimizer that will be used to compute and apply the
        gradients. Must be one of the Optimizer classes.
      vars_to_clip_dims: A dict with keys as Variables and values as lists
        of dimensions along which to compute the L2-norm.  See
        `tf.clip_by_norm` for more details.
      max_norm: The L2-norm to clip to, for all variables specified.
      use_locking: If `True` use locks for clip update operations.
      colocate_clip_ops_with_vars: If `True`, try colocating the clip norm
        ops with the corresponding variable.
      name: Optional name prefix for the operations created when applying
        gradients.  Defaults to "VariableClipping".
    """
    super(VariableClippingOptimizer, self).__init__(use_locking, name)
    self._opt = opt
    # Defensive copy of input dict
    self._vars_to_clip_dims = {
        var: clip_dims[:] for var, clip_dims in vars_to_clip_dims.items()}
    self._max_norm = max_norm
    self._colocate_clip_ops_with_vars = colocate_clip_ops_with_vars 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:31,代碼來源:variable_clipping_optimizer.py

示例6: _clip_dense

# 需要導入模塊: from tensorflow.python.ops import clip_ops [as 別名]
# 或者: from tensorflow.python.ops.clip_ops import clip_by_norm [as 別名]
def _clip_dense(self, var):
    with self._maybe_colocate_with(var):
      updated_var_value = var._ref()  # pylint: disable=protected-access
      normalized_var = clip_ops.clip_by_norm(
          updated_var_value, self._max_norm, self._vars_to_clip_dims[var])
      delta = updated_var_value - normalized_var
    with ops.colocate_with(var):
      return var.assign_sub(delta, use_locking=self._use_locking) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:10,代碼來源:variable_clipping_optimizer.py

示例7: _clip_dense

# 需要導入模塊: from tensorflow.python.ops import clip_ops [as 別名]
# 或者: from tensorflow.python.ops.clip_ops import clip_by_norm [as 別名]
def _clip_dense(self, var):
    with self._maybe_colocate_with(var):
      updated_var_value = array_ops.identity(var.ref())
      normalized_var = clip_ops.clip_by_norm(
          updated_var_value, self._max_norm, self._vars_to_clip_dims[var])
      delta = updated_var_value - normalized_var
    with ops.colocate_with(var):
      return var.assign_sub(delta, use_locking=self._use_locking) 
開發者ID:tobegit3hub,項目名稱:deep_image_model,代碼行數:10,代碼來源:variable_clipping_optimizer.py

示例8: _clip

# 需要導入模塊: from tensorflow.python.ops import clip_ops [as 別名]
# 或者: from tensorflow.python.ops.clip_ops import clip_by_norm [as 別名]
def _clip(params, ids, max_norm):
  """Helper function for _embedding_lookup_and_transform.

  This function optionally clips embeddings to an l2-norm of max_norm.

  Args:
    params: A `Tensor` of embeddings retrieved by `_gather`.
    ids: The `ids` argument that was passed to `_gather`.
    max_norm: If provided, the embeddings are l2-normalized to the value of
      max_norm.

  Returns:
    A `Tensor` with the same type as `params`.
  """

  def _rank(x):
    """Helper function to retrieve the rank of a tensor.

    Args:
      x: Something convertible to `Tensor`.

    Returns:
      Either a pair `(rank, True)` where `rank` is an integer or a pair
      `(rank, False)` where `rank` is an integer `Tensor`. In either case,
      `rank` is the rank of `x`.
    """
    rank = ops.convert_to_tensor(x).get_shape().ndims
    if rank:
      return rank, True
    else:
      return array_ops.rank(x), False

  if max_norm is None:
    return params
  ids_rank, ids_static = _rank(ids)
  params_rank, params_static = _rank(params)
  return clip_ops.clip_by_norm(
      params,
      max_norm,
      axes=(list(range(ids_rank, params_rank))
            if ids_static and params_static
            else math_ops.range(ids_rank, params_rank))) 
開發者ID:PacktPublishing,項目名稱:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代碼行數:44,代碼來源:embedding_ops.py


注:本文中的tensorflow.python.ops.clip_ops.clip_by_norm方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。