當前位置: 首頁>>代碼示例>>Python>>正文


Python v1.gradients方法代碼示例

本文整理匯總了Python中tensorflow.compat.v1.gradients方法的典型用法代碼示例。如果您正苦於以下問題:Python v1.gradients方法的具體用法?Python v1.gradients怎麽用?Python v1.gradients使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.compat.v1的用法示例。


在下文中一共展示了v1.gradients方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: recompute_grad

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import gradients [as 別名]
def recompute_grad(fn):
  """Decorator that recomputes the function on the backwards pass.

  Args:
    fn: a function that takes Tensors (all as positional arguments) and returns
      a tuple of Tensors.

  Returns:
    A wrapped fn that is identical to fn when called, but its activations will
    be discarded and recomputed on the backwards pass (i.e. on a call to
    tf.gradients).
  """

  @functools.wraps(fn)
  def wrapped(*args):
    return _recompute_grad(fn, args)

  return wrapped 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:20,代碼來源:common_layers.py

示例2: test_loss_gradient_conv2

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import gradients [as 別名]
def test_loss_gradient_conv2(self):
    loss = self.gamma_activation_reg.get_regularization_term(
        [self.get_conv('conv2')])
    expected_grad = np.array([-1.0] * 11 + [1.0] * 12)
    gammas = [
        self.name_to_var['conv%d/BatchNorm/gamma' % i] for i in range(1, 5)
    ]

    # Although the loss associated with conv2 depends on the gammas of conv2,
    # conv3 and conv4, only gamma2 should receive graients. The other gammas are
    # compared to the threshold to see if they are alive or not, but this should
    # not send gradients to them.
    grads = tf.gradients(loss, gammas)
    self.assertEqual(None, grads[0])
    self.assertEqual(None, grads[2])
    self.assertEqual(None, grads[3])

    # Regarding gamma2, it receives a -1 or a +1 gradient, depending on whether
    # the gamma is negative or positive, since the loss is |gamma|. This is
    # multiplied by expected_coeff.
    with self.cached_session():
      self.assertAllClose(expected_grad, grads[1].eval()) 
開發者ID:google-research,項目名稱:morph-net,代碼行數:24,代碼來源:activation_regularizer_test.py

示例3: test_adam

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import gradients [as 別名]
def test_adam(self):
    with self.test_session() as sess:
      w = tf.get_variable(
          "w",
          shape=[3],
          initializer=tf.constant_initializer([0.1, -0.2, -0.1]))
      x = tf.constant([0.4, 0.2, -0.5])
      loss = tf.reduce_mean(tf.square(x - w))
      tvars = tf.trainable_variables()
      grads = tf.gradients(loss, tvars)
      global_step = tf.train.get_or_create_global_step()
      optimizer = optimization.AdamWeightDecayOptimizer(learning_rate=0.2)
      train_op = optimizer.apply_gradients(list(zip(grads, tvars)), global_step)
      init_op = tf.group(tf.global_variables_initializer(),
                         tf.local_variables_initializer())
      sess.run(init_op)
      for _ in range(100):
        sess.run(train_op)
      w_np = sess.run(w)
      self.assertAllClose(w_np.flat, [0.4, 0.2, -0.5], rtol=1e-2, atol=1e-2) 
開發者ID:google-research,項目名稱:albert,代碼行數:22,代碼來源:optimization_test.py

示例4: minimize

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import gradients [as 別名]
def minimize(self, loss, x, optim_state):
    """Compute a new value of `x` to minimize `loss`.

    Args:
      loss: A scalar Tensor, the value to be minimized. `loss` should be a
        continuous function of `x` which supports gradients, `loss = f(x)`.
      x: A list of Tensors, the values to be updated. This is analogous to the
        `var_list` argument in standard TF Optimizer.
      optim_state: A (possibly nested) dict, containing any state info needed
        for the optimizer.

    Returns:
      new_x: A list of Tensors, the same length as `x`, which are updated
      new_optim_state: A new state, with the same structure as `optim_state`,
        which have been updated.
    """ 
開發者ID:deepmind,項目名稱:interval-bound-propagation,代碼行數:18,代碼來源:attacks.py

示例5: _apply_gradients

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import gradients [as 別名]
def _apply_gradients(self, grads, x, optim_state):
    """Applies gradients."""
    lr = self._lr_fn(optim_state.t)
    new_optim_state = self._State(
        t=optim_state.t + 1,
        m=[None] * len(x),
        u=[None] * len(x))
    t = tf.cast(new_optim_state.t, tf.float32)
    new_x = [None] * len(x)
    for i in range(len(x)):
      g = grads[i]
      m_old = optim_state.m[i]
      u_old = optim_state.u[i]
      new_optim_state.m[i] = self._beta1 * m_old + (1. - self._beta1) * g
      new_optim_state.u[i] = self._beta2 * u_old + (1. - self._beta2) * g * g
      m_hat = new_optim_state.m[i] / (1. - tf.pow(self._beta1, t))
      u_hat = new_optim_state.u[i] / (1. - tf.pow(self._beta2, t))
      new_x[i] = x[i] - lr * m_hat / (tf.sqrt(u_hat) + self._epsilon)
    return new_x, new_optim_state 
開發者ID:deepmind,項目名稱:interval-bound-propagation,代碼行數:21,代碼來源:attacks.py

示例6: _test_upper_bound

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import gradients [as 別名]
def _test_upper_bound(self, gradient):
    inputs = tf.placeholder(dtype=tf.float32)
    outputs = math_ops.upper_bound(inputs, 0, gradient=gradient)
    pgrads, = tf.gradients([outputs], [inputs], [tf.ones_like(inputs)])
    ngrads, = tf.gradients([outputs], [inputs], [-tf.ones_like(inputs)])

    inputs_feed = [-1, 1]
    outputs_expected = [-1, 0]
    if gradient == "disconnected":
      pgrads_expected = [1, 0]
      ngrads_expected = [-1, 0]
    elif gradient == "identity":
      pgrads_expected = [1, 1]
      ngrads_expected = [-1, -1]
    else:
      pgrads_expected = [1, 1]
      ngrads_expected = [-1, 0]

    with self.cached_session() as sess:
      outputs, pgrads, ngrads = sess.run(
          [outputs, pgrads, ngrads], {inputs: inputs_feed})
      self.assertAllEqual(outputs, outputs_expected)
      self.assertAllEqual(pgrads, pgrads_expected)
      self.assertAllEqual(ngrads, ngrads_expected) 
開發者ID:tensorflow,項目名稱:compression,代碼行數:26,代碼來源:math_ops_test.py

示例7: _test_lower_bound

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import gradients [as 別名]
def _test_lower_bound(self, gradient):
    inputs = tf.placeholder(dtype=tf.float32)
    outputs = math_ops.lower_bound(inputs, 0, gradient=gradient)
    pgrads, = tf.gradients([outputs], [inputs], [tf.ones_like(inputs)])
    ngrads, = tf.gradients([outputs], [inputs], [-tf.ones_like(inputs)])

    inputs_feed = [-1, 1]
    outputs_expected = [0, 1]
    if gradient == "disconnected":
      pgrads_expected = [0, 1]
      ngrads_expected = [0, -1]
    elif gradient == "identity":
      pgrads_expected = [1, 1]
      ngrads_expected = [-1, -1]
    else:
      pgrads_expected = [0, 1]
      ngrads_expected = [-1, -1]

    with self.cached_session() as sess:
      outputs, pgrads, ngrads = sess.run(
          [outputs, pgrads, ngrads], {inputs: inputs_feed})
      self.assertAllEqual(outputs, outputs_expected)
      self.assertAllEqual(pgrads, pgrads_expected)
      self.assertAllEqual(ngrads, ngrads_expected) 
開發者ID:tensorflow,項目名稱:compression,代碼行數:26,代碼來源:math_ops_test.py

示例8: testGradients

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import gradients [as 別名]
def testGradients(self, is_multi_actions):
    self._setUpLoss(is_multi_actions)
    with self.test_session() as sess:
      total_loss = tf.reduce_sum(self._loss)
      gradients = tf.gradients(
          [total_loss], nest.flatten(self._policy_logits_nest))
      grad_policy_logits_nest = sess.run(gradients)
      for grad_policy_logits in grad_policy_logits_nest:
        self.assertAllClose(grad_policy_logits,
                            [[[0, 0], [-0.731, 0.731]],
                             [[1, -1], [0, 0]]], atol=1e-4)
      dead_grads = tf.gradients(
          [total_loss],
          nest.flatten(self._actions_nest) + [self._action_values])
      for grad in dead_grads:
        self.assertIsNone(grad) 
開發者ID:deepmind,項目名稱:trfl,代碼行數:18,代碼來源:discrete_policy_gradient_ops_test.py

示例9: testEntropyGradients

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import gradients [as 別名]
def testEntropyGradients(self, is_multi_actions):
    if is_multi_actions:
      loss = self.multi_op.extra.entropy_loss
      policy_logits_nest = self.multi_policy_logits
    else:
      loss = self.op.extra.entropy_loss
      policy_logits_nest = self.policy_logits

    grad_policy_list = [
        tf.gradients(loss, policy_logits)[0] * self.num_actions
        for policy_logits in nest.flatten(policy_logits_nest)]

    for grad_policy in grad_policy_list:
      self.assertEqual(grad_policy.get_shape(), tf.TensorShape([2, 1, 3]))

    self.assertAllEqual(tf.gradients(loss, self.baseline_values), [None])
    self.assertAllEqual(tf.gradients(loss, self.invalid_grad_inputs),
                        self.invalid_grad_outputs) 
開發者ID:deepmind,項目名稱:trfl,代碼行數:20,代碼來源:discrete_policy_gradient_ops_test.py

示例10: testGradient

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import gradients [as 別名]
def testGradient(self):
    with self.test_session() as sess:
      x = tf.placeholder(tf.float64)
      quadratic_linear_boundary = 3
      loss = clipping_ops.huber_loss(x, quadratic_linear_boundary)
      xs = np.array([-5, -4, -3.1, -3, -2.9, 2, -1, 0, 1, 2, 2.9, 3, 3.1, 4, 5])
      grads = sess.run(tf.gradients([loss], [x]), feed_dict={x: xs})[0]

    self.assertTrue(np.all(np.abs(grads) <= quadratic_linear_boundary))

    # Everything <= -3 should have gradient -3.
    grads_lo = grads[xs <= -quadratic_linear_boundary]
    self.assertAllEqual(grads_lo,
                        [-quadratic_linear_boundary] * grads_lo.shape[0])

    # Everything >= 3 should have gradient 3.
    grads_hi = grads[xs >= quadratic_linear_boundary]
    self.assertAllEqual(grads_hi,
                        [quadratic_linear_boundary] * grads_hi.shape[0])

    # x in (-3, 3) should have gradient x.
    grads_mid = grads[np.abs(xs) <= quadratic_linear_boundary]
    xs_mid = xs[np.abs(xs) <= quadratic_linear_boundary]
    self.assertAllEqual(grads_mid, xs_mid) 
開發者ID:deepmind,項目名稱:trfl,代碼行數:26,代碼來源:clipping_ops_test.py

示例11: gradient_histogram_summary

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import gradients [as 別名]
def gradient_histogram_summary(self, avg_grads):
    """Create histogram of log values of all non-zero gradients."""
    with tf.name_scope('log_gradients_summary'):
      all_grads = []
      for grad, _ in avg_grads:
        all_grads.append(tf.reshape(grad, [-1]))
      grads = tf.abs(tf.concat(all_grads, 0))
      # exclude grads with zero values.
      indices_for_non_zero_grads = tf.where(tf.not_equal(grads, 0))
      log_grads = tf.reshape(
          tf.log(tf.gather(grads, indices_for_non_zero_grads)), [-1])
      tf.summary.histogram('log_gradients', log_grads) 
開發者ID:tensorflow,項目名稱:benchmarks,代碼行數:14,代碼來源:benchmark_cnn.py

示例12: _make_training_step

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import gradients [as 別名]
def _make_training_step(self, loss: tf.Tensor) -> tf.Tensor:
        """
        Constructs a trainig step from the loss parameter and hyperparameters.
        """
        optimizer_name = self.hyperparameters["optimizer"].lower()
        if optimizer_name == "sgd":
            optimizer = tf.train.GradientDescentOptimizer(
                learning_rate=self.hyperparameters["learning_rate"]
            )
        elif optimizer_name == "rmsprop":
            optimizer = tf.train.RMSPropOptimizer(
                learning_rate=self.hyperparameters["learning_rate"],
                decay=self.hyperparameters["learning_rate_decay"],
                momentum=self.hyperparameters["momentum"],
            )
        elif optimizer_name == "adam":
            optimizer = tf.train.AdamOptimizer(
                learning_rate=self.hyperparameters["learning_rate"]
            )
        else:
            raise Exception(
                'Unknown optimizer "%s".' % (self.hyperparameters["optimizer"])
            )

        # Calculate and clip gradients
        trainable_vars = self._sess.graph.get_collection(
            tf.GraphKeys.TRAINABLE_VARIABLES
        )
        gradients = tf.gradients(loss, trainable_vars)
        clipped_gradients, _ = tf.clip_by_global_norm(
            gradients, self.hyperparameters["gradient_clip_value"]
        )
        pruned_clipped_gradients = []
        for (gradient, trainable_var) in zip(clipped_gradients, trainable_vars):
            if gradient is None:
                continue
            pruned_clipped_gradients.append((gradient, trainable_var))
        return optimizer.apply_gradients(pruned_clipped_gradients) 
開發者ID:microsoft,項目名稱:machine-learning-for-programming-samples,代碼行數:40,代碼來源:model_tf1.py

示例13: compute_gradients

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import gradients [as 別名]
def compute_gradients(self,
                        loss,
                        var_list,
                        global_step=None,
                        gate_gradients=GATE_OP,
                        aggregation_method=None,
                        colocate_gradients_with_ops=False,
                        name=None,
                        grad_loss=None):
    """Compute gradients through momentum optimizer.

    Args:
      loss: A Tensor containing the value to minimize.
      var_list: Optional list or tuple of tf.Variable to update
        to minimize loss. Defaults to the list of variables collected
        in the graph under the key GraphKey.TRAINABLE_VARIABLES.
      global_step: Optional Variable to increment by one after the
        variables have been updated.
      gate_gradients: How to gate the computation of gradients.
        Can be GATE_NONE, GATE_OP, or GATE_GRAPH.
      aggregation_method: Specifies the method used to combine
        gradient terms. Valid values are defined in the class AggregationMethod.
      colocate_gradients_with_ops: If True, try collocating gradients with
        the corresponding op.
      name: Optional name for the returned operation. Default to the name
        passed to the Optimizer constructor.
      grad_loss: Optional. A Tensor holding the gradient computed for loss.

    Returns:
      A list of (gradient, variable) pairs. Variable is always present,
        but gradient can be None.
    """
    del global_step, name  # Unused for now.
    return self._momentum_optimizer.compute_gradients(
        loss,
        var_list=var_list,
        gate_gradients=gate_gradients,
        aggregation_method=aggregation_method,
        colocate_gradients_with_ops=colocate_gradients_with_ops,
        grad_loss=grad_loss) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:42,代碼來源:yellowfin.py

示例14: testDiet

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import gradients [as 別名]
def testDiet(self):

    params = diet.diet_adam_optimizer_params()

    @diet.fn_with_diet_vars(params)
    def model_fn(x):
      y = tf.layers.dense(x, 10, use_bias=False)
      return y

    @diet.fn_with_diet_vars(params)
    def model_fn2(x):
      y = tf.layers.dense(x, 10, use_bias=False)
      return y

    x = tf.random_uniform((10, 10))
    y = model_fn(x) + 10.
    y = model_fn2(y) + 10.
    grads = tf.gradients(y, [x])
    with tf.control_dependencies(grads):
      incr_step = tf.assign_add(tf.train.get_or_create_global_step(), 1)

    train_op = tf.group(incr_step, *grads)
    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())
      orig_vals = sess.run(tf.global_variables())
      for _ in range(10):
        sess.run(train_op)
      new_vals = sess.run(tf.global_variables())

      different = []
      for old, new in zip(orig_vals, new_vals):
        try:
          self.assertAllClose(old, new)
        except AssertionError:
          different.append(True)
      self.assertEqual(len(different), len(tf.global_variables())) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:38,代碼來源:diet_test.py

示例15: shakeshake2_grad

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import gradients [as 別名]
def shakeshake2_grad(x1, x2, dy):
  """Overriding gradient for shake-shake of 2 tensors."""
  y = shakeshake2_py(x1, x2)
  dx = tf.gradients(ys=[y], xs=[x1, x2], grad_ys=[dy])
  return dx 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:7,代碼來源:common_layers.py


注:本文中的tensorflow.compat.v1.gradients方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。