当前位置: 首页>>代码示例>>Python>>正文


Python clip_ops.clip_by_global_norm函数代码示例

本文整理汇总了Python中tensorflow.python.ops.clip_ops.clip_by_global_norm函数的典型用法代码示例。如果您正苦于以下问题:Python clip_by_global_norm函数的具体用法?Python clip_by_global_norm怎么用?Python clip_by_global_norm使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了clip_by_global_norm函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _get_train_ops

    def _get_train_ops(self, features, targets):
        """See base class."""
        global_step = contrib_variables.get_global_step()
        assert global_step
        logits = self._logits(features, is_training=True)
        if self._enable_centered_bias:
            centered_bias_step = [self._centered_bias_step(targets, features)]
        else:
            centered_bias_step = []
        with ops.control_dependencies(centered_bias_step):
            loss = self._loss(logits, targets, features)
        logging_ops.scalar_summary("loss", loss)

        linear_vars = self._get_linear_vars()
        dnn_vars = self._get_dnn_vars()
        grads = gradients.gradients(loss, dnn_vars + linear_vars)
        if self._gradient_clip_norm:
            grads, _ = clip_ops.clip_by_global_norm(grads, self._gradient_clip_norm)

        dnn_grads = grads[0 : len(dnn_vars)]
        linear_grads = grads[len(dnn_vars) :]

        train_ops = self._get_linear_training_ops(linear_grads, linear_vars) + self._get_dnn_training_ops(
            dnn_grads, dnn_vars
        )

        train_step = control_flow_ops.group(*train_ops, name="combined_training_op")
        with ops.control_dependencies([train_step]):
            with ops.get_default_graph().colocate_with(global_step):
                return state_ops.assign_add(global_step, 1).op, loss
开发者ID:285219011,项目名称:liuwenfeng,代码行数:30,代码来源:dnn_linear_combined.py

示例2: testThatBackpropRuns

  def testThatBackpropRuns(self):
    """Run optimization to ensure that gradients can be computed."""

    batch_size = 1
    image_height = 9
    image_width = 12
    image = variables.Variable(
        np.float32(
            np.random.uniform(size=[batch_size, image_height, image_width, 3])))
    control_point_locations = [[3., 3.]]
    control_point_locations = constant_op.constant(
        np.float32(np.expand_dims(control_point_locations, 0)))
    control_point_displacements = [[0.25, -0.5]]
    control_point_displacements = constant_op.constant(
        np.float32(np.expand_dims(control_point_displacements, 0)))
    warped_image, _ = sparse_image_warp.sparse_image_warp(
        image,
        control_point_locations,
        control_point_locations + control_point_displacements,
        num_boundary_points=3)

    loss = math_ops.reduce_mean(math_ops.abs(warped_image - image))
    optimizer = momentum.MomentumOptimizer(0.001, 0.9)
    grad = gradients.gradients(loss, [image])
    grad, _ = clip_ops.clip_by_global_norm(grad, 1.0)
    opt_func = optimizer.apply_gradients(zip(grad, [image]))
    init_op = variables.global_variables_initializer()

    with self.test_session() as sess:
      sess.run(init_op)
      for _ in range(5):
        sess.run([loss, opt_func])
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:32,代码来源:sparse_image_warp_test.py

示例3: test_interpolation_gradient

  def test_interpolation_gradient(self):
    """Make sure that backprop can run. Correctness of gradients is assumed.

    Here, we create a use a small 'training' set and a more densely-sampled
    set of query points, for which we know the true value in advance. The goal
    is to choose x locations for the training data such that interpolating using
    this training data yields the best reconstruction for the function
    values at the query points. The training data locations are optimized
    iteratively using gradient descent.
    """
    tp = _QuadraticPlusSinProblemND()
    (query_points, query_values, train_points,
     train_values) = tp.get_problem(optimizable=True)

    regularization = 0.001
    for interpolation_order in (1, 2, 3, 4):
      interpolator = interpolate_spline.interpolate_spline(
          train_points, train_values, query_points, interpolation_order,
          regularization)

      loss = math_ops.reduce_mean(math_ops.square(query_values - interpolator))

      optimizer = momentum.MomentumOptimizer(0.001, 0.9)
      grad = gradients.gradients(loss, [train_points])
      grad, _ = clip_ops.clip_by_global_norm(grad, 1.0)
      opt_func = optimizer.apply_gradients(zip(grad, [train_points]))
      init_op = variables.global_variables_initializer()

      with self.cached_session() as sess:
        sess.run(init_op)
        for _ in range(100):
          sess.run([loss, opt_func])
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:32,代码来源:interpolate_spline_test.py

示例4: clip_gradients_by_global_norm

def clip_gradients_by_global_norm(gradients_variables, clip_norm=20.):
  """Clips gradients of a multitask loss by their global norm.
  Ignores all-zero tensors when computing the global norm.

  Args:
  gradients_variables: a list of pairs (gradient, variable).
  clip_norm: a float Tensor, the global norm to clip on. Default is 20.0.

  Returns:
  list: A list of pairs of the same type as gradients_variables,.
  fixed_global_norm: A 0-D (scalar) Tensor representing the global norm.
  """
  gradients, variables = six.moves.zip(*gradients_variables)
  def _replace_nonexisting_grad(grad):
    if grad is None:
      return grad
    all_zeros = _is_all_zeros(grad)
    return control_flow_ops.cond(all_zeros,
                                 lambda: array_ops.zeros(
                                     [], dtype=dtypes.as_dtype(grad.dtype)),
                                 lambda: grad)
  nonzero_gradients = [_replace_nonexisting_grad(g) for g in gradients]
  fixed_global_norm = clip_ops.global_norm(nonzero_gradients)
  gradients, _ = clip_ops.clip_by_global_norm(gradients, clip_norm,
                                              use_norm=fixed_global_norm)
  return list(six.moves.zip(gradients, variables)), fixed_global_norm
开发者ID:SylChan,项目名称:tensorflow,代码行数:26,代码来源:multitask_optimizer_wrapper.py

示例5: _process_gradients

 def _process_gradients(self, gradients_vars):
   """Process gradients (e.g. clipping) before applying them to weights."""
   with ops.name_scope('process_gradients'):
     gradients, variables = zip(*gradients_vars)
     if self._gradient_clipping_norm is not None:
       gradients, _ = clip_ops.clip_by_global_norm(
           gradients, self._gradient_clipping_norm)
     return zip(gradients, variables)
开发者ID:821760408-sp,项目名称:tensorflow,代码行数:8,代码来源:dynamic_rnn_estimator.py

示例6: _train_op_fn

 def _train_op_fn(loss):
   global_step = training_util.get_global_step()
   my_vars = ops.get_collection(parent_scope)
   grads = gradients.gradients(loss, my_vars)
   if gradient_clip_norm:
     grads, _ = clip_ops.clip_by_global_norm(grads, gradient_clip_norm)
   return (_get_optimizer(optimizer).apply_gradients(
       zip(grads, my_vars), global_step=global_step))
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:8,代码来源:linear.py

示例7: _train_op_fn

 def _train_op_fn(loss):
   global_step = contrib_variables.get_global_step()
   my_vars = ops.get_collection("linear")
   grads = gradients.gradients(loss, my_vars)
   if gradient_clip_norm:
     grads, _ = clip_ops.clip_by_global_norm(grads, gradient_clip_norm)
   return (_get_optimizer(optimizer).apply_gradients(
       zip(grads, my_vars), global_step=global_step))
开发者ID:AliMiraftab,项目名称:tensorflow,代码行数:8,代码来源:linear.py

示例8: testClipByGlobalNormPreservesDenseShape

 def testClipByGlobalNormPreservesDenseShape(self):
   dense_shape = (1,)
   slices = ops.IndexedSlices(
       constant_op.constant([1.0]),
       constant_op.constant([0]),
       dense_shape=dense_shape)
   ans, _ = clip_ops.clip_by_global_norm([slices], 1.0)
   modified_slices = ans[0]
   self.assertEqual(dense_shape, slices.dense_shape)
   self.assertEqual(dense_shape, modified_slices.dense_shape)
开发者ID:moses-sun,项目名称:tensorflow,代码行数:10,代码来源:clip_ops_test.py

示例9: testClipByGlobalNormInf

  def testClipByGlobalNormInf(self):
    with self.session(use_gpu=True):
      x0 = constant_op.constant([-2.0, 0.0, np.inf, 4.0, 0.0, 0.0],
                                shape=[2, 3])
      x1 = constant_op.constant([1.0, -2.0])
      clip_norm = 6.0

      ans, norm = clip_ops.clip_by_global_norm([x0, x1], clip_norm)
      with self.assertRaisesRegexp(errors.InvalidArgumentError, "global norm"):
        self.evaluate(norm)
      with self.assertRaisesRegexp(errors.InvalidArgumentError, "global norm"):
        ans[0].eval()
      with self.assertRaisesRegexp(errors.InvalidArgumentError, "global norm"):
        ans[1].eval()
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:14,代码来源:clip_ops_test.py

示例10: __init__

    def __init__(self, loss, global_step, optimizer,
                 learning_rate, clip_gradients=5.0):
        """Build a trainer part of graph.

        Args:
          loss: Tensor that evaluates to model's loss.
          global_step: Tensor with global step of the model.
          optimizer: Name of the optimizer class (SGD, Adam, Adagrad) or class.
          learning_rate: If this is constant float value, no decay function is used.
                         Instead, a customized decay function can be passed that accepts
                         global_step as parameter and returns a Tensor.
                         e.g. exponential decay function:
                         def exp_decay(global_step):
                            return tf.train.exponential_decay(
                                learning_rate=0.1, global_step=global_step,
                                decay_steps=2, decay_rate=0.001)
        Raises:
            ValueError: if learning_rate is not a float or a callable.
        """
        self.loss = loss
        self.global_step = global_step
        # pylint: disable=redefined-variable-type
        if isinstance(learning_rate, float):
            self._learning_rate = vs.get_variable(
                "learning_rate",
                [],
                initializer=init_ops.constant_initializer(learning_rate))
        elif callable(learning_rate):
            self._learning_rate = learning_rate(self.global_step)
        else:
            raise ValueError("learning_rate should be a float or a callable function.")
        params = variables.trainable_variables()
        self.gradients = gradients.gradients(loss, params)
        if clip_gradients > 0.0:
            self.gradients, self.gradients_norm = clip_ops.clip_by_global_norm(
                self.gradients, clip_gradients)
        grads_and_vars = zip(self.gradients, params)
        if isinstance(optimizer, str):
            self._optimizer = OPTIMIZER_CLS_NAMES[
                optimizer](self._learning_rate)
        else:
            self._optimizer = optimizer(self._learning_rate)
        self.trainer = self._optimizer.apply_gradients(grads_and_vars,
                                                       global_step=global_step,
                                                       name="train")
        # Update ops during training, e.g. batch_norm_ops
        self.trainer = control_flow_ops.group(self.trainer, *ops.get_collection('update_ops'))
        # Get all initializers for all trainable variables.
        self._initializers = variables.initialize_all_variables()
开发者ID:Demo-yang,项目名称:tensorflow,代码行数:49,代码来源:trainer.py

示例11: testClipByGlobalNormInf

  def testClipByGlobalNormInf(self):
    # Expect all NaNs when global norm is inf.
    with self.session(use_gpu=True):
      x0 = constant_op.constant([-2.0, 0.0, np.inf, 4.0, 0.0, 0.0],
                                shape=[2, 3])
      x1 = constant_op.constant([1.0, -2.0])
      clip_norm = 6.0

      ans, norm = clip_ops.clip_by_global_norm([x0, x1], clip_norm)
      tf_ans_1 = ans[0].eval()
      tf_ans_2 = ans[1].eval()
      tf_norm = self.evaluate(norm)
      self.assertAllEqual(tf_norm, float('inf'))
      self.assertAllEqual(tf_ans_1, np.full([2, 3], float('nan')))
      self.assertAllEqual(tf_ans_2, np.full([2], float('nan')))
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:15,代码来源:clip_ops_test.py

示例12: get_train_step

  def get_train_step(self, loss):
    """Returns the ops to run to perform a training step on this estimator.

    Args:
      loss: The loss to use when calculating gradients.

    Returns:
      The ops to run to perform a training step.
    """
    my_vars = self._get_vars()
    if not (self._get_feature_columns() or my_vars):
      return []

    grads = gradients.gradients(loss, my_vars)
    if self._gradient_clip_norm:
      grads, _ = clip_ops.clip_by_global_norm(grads, self._gradient_clip_norm)
    return [self._get_optimizer().apply_gradients(zip(grads, my_vars))]
开发者ID:31H0B1eV,项目名称:tensorflow,代码行数:17,代码来源:composable_model.py

示例13: testClipByGlobalNormZero

  def testClipByGlobalNormZero(self):
    # No norm clipping when norm = 0
    with self.test_session(use_gpu=True):
      x0 = constant_op.constant([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], shape=[2, 3])
      x1 = constant_op.constant([0.0, 0.0])
      # Norm = 0, no changes
      np_ans_0 = [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
      np_ans_1 = [0.0, 0.0]
      clip_norm = 6.0

      ans, norm = clip_ops.clip_by_global_norm([x0, x1], clip_norm)
      tf_ans_1 = ans[0].eval()
      tf_ans_2 = ans[1].eval()
      tf_norm = norm.eval()

    self.assertAllClose(tf_norm, 0.0)
    self.assertAllClose(np_ans_0, tf_ans_1)
    self.assertAllClose(np_ans_1, tf_ans_2)
开发者ID:moses-sun,项目名称:tensorflow,代码行数:18,代码来源:clip_ops_test.py

示例14: testClipByGlobalNormNotClipped

  def testClipByGlobalNormNotClipped(self):
    # No norm clipping when clip_norm >= 5
    with self.test_session(use_gpu=True):
      x0 = constant_op.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
      x1 = constant_op.constant([1.0, -2.0])
      # Global norm of x0 and x1 = sqrt(1 + 4^2 + 2^2 + 2^2) = 5
      np_ans_0 = [[-2.0, 0.0, 0.0], [4.0, 0.0, 0.0]]
      np_ans_1 = [1.0, -2.0]
      clip_norm = 6.0

      ans, norm = clip_ops.clip_by_global_norm([x0, x1], clip_norm)
      tf_ans_1 = ans[0].eval()
      tf_ans_2 = ans[1].eval()
      tf_norm = norm.eval()

    self.assertAllClose(tf_norm, 5.0)
    self.assertAllClose(np_ans_0, tf_ans_1)
    self.assertAllClose(np_ans_1, tf_ans_2)
开发者ID:moses-sun,项目名称:tensorflow,代码行数:18,代码来源:clip_ops_test.py

示例15: testClipByGlobalNormClippedTensor

  def testClipByGlobalNormClippedTensor(self):
    # Norm clipping when clip_norm < 5
    with self.test_session(use_gpu=True):
      x0 = constant_op.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
      x1 = constant_op.constant([1.0, -2.0])
      # Global norm of x0 and x1 = sqrt(1 + 4^2 + 2^2 + 2^2) = 5
      clip_norm = constant_op.constant(4.0)

      # Answers are the original tensors scaled by 4.0/5.0
      np_ans_0 = [[-1.6, 0.0, 0.0], [3.2, 0.0, 0.0]]
      np_ans_1 = [0.8, -1.6]

      ans, norm = clip_ops.clip_by_global_norm((x0, x1), clip_norm)
      tf_ans_1 = ans[0].eval()
      tf_ans_2 = ans[1].eval()
      tf_norm = norm.eval()

    self.assertAllClose(tf_norm, 5.0)
    self.assertAllClose(np_ans_0, tf_ans_1)
    self.assertAllClose(np_ans_1, tf_ans_2)
开发者ID:moses-sun,项目名称:tensorflow,代码行数:20,代码来源:clip_ops_test.py


注:本文中的tensorflow.python.ops.clip_ops.clip_by_global_norm函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。