当前位置: 首页>>代码示例>>Python>>正文


Python gradients_impl.gradients方法代码示例

本文整理汇总了Python中tensorflow.python.ops.gradients_impl.gradients方法的典型用法代码示例。如果您正苦于以下问题:Python gradients_impl.gradients方法的具体用法?Python gradients_impl.gradients怎么用?Python gradients_impl.gradients使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.python.ops.gradients_impl的用法示例。


在下文中一共展示了gradients_impl.gradients方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: testUnbatchGrad

# 需要导入模块: from tensorflow.python.ops import gradients_impl [as 别名]
# 或者: from tensorflow.python.ops.gradients_impl import gradients [as 别名]
def testUnbatchGrad(self):
    """Tests that batch and unbatch are differentiable."""
    with self.test_session() as sess:
      inp = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
      batched, index, id_t = batch_ops.batch(
          [inp], num_batch_threads=1, max_batch_size=2,
          batch_timeout_micros=36000000, grad_timeout_micros=1000000,
          batching_queue="")
      computation = batched[0] * batched[0]
      result = batch_ops.unbatch(computation, index, id_t,
                                 timeout_micros=1000000, shared_name="unbatch")
      grad = gradients_impl.gradients(result, inp)
      thread_results = []

      def worker():
        thread_results.extend(sess.run([grad], feed_dict={inp: [1]}))

      worker_thread = threading.Thread(target=worker)
      worker_thread.start()
      main_results = sess.run([grad], feed_dict={inp: [2]})
      worker_thread.join()
      self.assertEqual(thread_results[0], [2])
      self.assertEqual(main_results[0], [4]) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:25,代码来源:batch_ops_test.py

示例2: testSequenceToSequenceGradient

# 需要导入模块: from tensorflow.python.ops import gradients_impl [as 别名]
# 或者: from tensorflow.python.ops.gradients_impl import gradients [as 别名]
def testSequenceToSequenceGradient(self):
    with self.test_session():
      size = (17, 1, 15)
      output_size = (17, 1, 8)
      inputs = constant_op.constant(_rand(*size))
      outputs = lstm1d.ndlstm_base(inputs, 8, dynamic=False)
      variables.global_variables_initializer().run()
      gradients = gradients_impl.gradients(outputs, inputs)
      if 1:  # pylint: disable=using-constant-test
        gradients = gradients_impl.gradients(outputs, inputs)[0].eval()
        self.assertEqual(gradients.shape, size)
      else:
        # TODO(tmb) tf.test.compute_gradient error is currently broken
        # with dynamic_rnn. Enable this test case eventually.
        err = gradient_checker.compute_gradient_error(
            inputs, size, outputs, output_size, delta=1e-4)
        self.assert_(not np.isnan(err))
        self.assert_(err < 0.1) 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:20,代码来源:lstm1d_test.py

示例3: testSequenceToSequenceGradientReverse

# 需要导入模块: from tensorflow.python.ops import gradients_impl [as 别名]
# 或者: from tensorflow.python.ops.gradients_impl import gradients [as 别名]
def testSequenceToSequenceGradientReverse(self):
    with self.test_session():
      size = (17, 1, 15)
      output_size = (17, 1, 8)
      inputs = constant_op.constant(_rand(*size))
      outputs = lstm1d.ndlstm_base(inputs, 8, reverse=1, dynamic=False)
      variables.global_variables_initializer().run()
      if 1:  # pylint: disable=using-constant-test
        gradients = gradients_impl.gradients(outputs, inputs)[0].eval()
        self.assertEqual(gradients.shape, size)
      else:
        # TODO(tmb) tf.test.compute_gradient error is currently broken
        # with dynamic_rnn. Enable this test case eventually.
        err = gradient_checker.compute_gradient_error(
            inputs, size, outputs, output_size, delta=1e-4)
        self.assert_(not np.isnan(err))
        self.assert_(err < 0.1) 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:19,代码来源:lstm1d_test.py

示例4: testDoubleCallInSameScopeFails

# 需要导入模块: from tensorflow.python.ops import gradients_impl [as 别名]
# 或者: from tensorflow.python.ops.gradients_impl import gradients [as 别名]
def testDoubleCallInSameScopeFails(self):

    @rev_block_lib.recompute_grad
    def layer_with_recompute(inputs):
      return core_layers.dense(inputs, 2)

    with variable_scope.variable_scope("layer", use_resource=True):
      inputs = array_ops.ones((2, 4), dtypes.float32)
      out1 = layer_with_recompute(inputs)
      out2 = layer_with_recompute(inputs) + out1
      out = math_ops.reduce_sum(out2)

    tvars = variables.trainable_variables()
    assert len(tvars) == 4
    with self.assertRaisesWithPredicateMatch(
        ValueError, "called twice in the same enclosing scope"):
      gradients_impl.gradients(out, [inputs] + tvars) 
开发者ID:google-research,项目名称:tf-slim,代码行数:19,代码来源:rev_block_lib_test.py

示例5: testDoubleCallInUniqueScope

# 需要导入模块: from tensorflow.python.ops import gradients_impl [as 别名]
# 或者: from tensorflow.python.ops.gradients_impl import gradients [as 别名]
def testDoubleCallInUniqueScope(self):

    @rev_block_lib.recompute_grad
    def layer_with_recompute(inputs):
      with variable_scope.variable_scope("inner", use_resource=True):
        return core_layers.dense(inputs, 2)

    with variable_scope.variable_scope("layer", use_resource=True):
      inputs = array_ops.ones((2, 4), dtypes.float32)

      with variable_scope.variable_scope("layer1", use_resource=True):
        out1 = layer_with_recompute(inputs)
      with variable_scope.variable_scope("layer2", use_resource=True):
        out2 = layer_with_recompute(inputs) + out1
      out = math_ops.reduce_sum(out2)

    tvars = variables.trainable_variables()
    assert len(tvars) == 4
    grads = gradients_impl.gradients(out, [inputs] + tvars)
    for grad in grads:
      self.assertIsNotNone(grad) 
开发者ID:google-research,项目名称:tf-slim,代码行数:23,代码来源:rev_block_lib_test.py

示例6: _Conv2DBackpropInputGrad

# 需要导入模块: from tensorflow.python.ops import gradients_impl [as 别名]
# 或者: from tensorflow.python.ops.gradients_impl import gradients [as 别名]
def _Conv2DBackpropInputGrad(op, grad):
  """The derivatives for deconvolution.

  Args:
    op: the Deconvolution op.
    grad: the tensor representing the gradient w.r.t. the output

  Returns:
    the gradients w.r.t. the input and the filter
  """
  return [None,
          nn_ops.conv2d_backprop_filter(grad, array_ops.shape(op.inputs[1]),
                                        op.inputs[2], op.get_attr("strides"),
                                        op.get_attr("padding"),
                                        op.get_attr("use_cudnn_on_gpu"),
                                        op.get_attr("data_format")),
          nn_ops.conv2d(grad, op.inputs[1], op.get_attr("strides"),
                        op.get_attr("padding"), op.get_attr("use_cudnn_on_gpu"),
                        op.get_attr("data_format"))] 
开发者ID:PacktPublishing,项目名称:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代码行数:21,代码来源:nn_grad.py

示例7: _BiasAddGrad

# 需要导入模块: from tensorflow.python.ops import gradients_impl [as 别名]
# 或者: from tensorflow.python.ops.gradients_impl import gradients [as 别名]
def _BiasAddGrad(op, received_grad):
  """Return the gradients for the 2 inputs of bias_op.

  The first input of unused_bias_op is the tensor t, and its gradient is
  just the gradient the unused_bias_op received.

  The second input of unused_bias_op is the bias vector which has one fewer
  dimension than "received_grad" (the batch dimension.)  Its gradient is the
  received gradient Summed on the batch dimension, which is the first dimension.

  Args:
    op: The BiasOp for which we need to generate gradients.
    received_grad: Tensor.  The gradients passed to the BiasOp.

  Returns:
    Two tensors, the first one for the "tensor" input of the BiasOp,
    the second one for the "bias" input of the BiasOp.
  """
  try:
    data_format = op.get_attr("data_format")
  except ValueError:
    data_format = None
  return (received_grad, gen_nn_ops.bias_add_grad(out_backprop=received_grad,
                                                  data_format=data_format)) 
开发者ID:PacktPublishing,项目名称:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代码行数:26,代码来源:nn_grad.py

示例8: _BiasAddGradV1

# 需要导入模块: from tensorflow.python.ops import gradients_impl [as 别名]
# 或者: from tensorflow.python.ops.gradients_impl import gradients [as 别名]
def _BiasAddGradV1(unused_bias_op, received_grad):
  """Return the gradients for the 2 inputs of bias_op.

  The first input of unused_bias_op is the tensor t, and its gradient is
  just the gradient the unused_bias_op received.

  The second input of unused_bias_op is the bias vector which has one fewer
  dimension than "received_grad" (the batch dimension.)  Its gradient is the
  received gradient Summed on the batch dimension, which is the first dimension.

  Args:
    unused_bias_op: The BiasOp for which we need to generate gradients.
    received_grad: Tensor.  The gradients passed to the BiasOp.

  Returns:
    Two tensors, the first one for the "tensor" input of the BiasOp,
    the second one for the "bias" input of the BiasOp.
  """
  reduction_dim_tensor = math_ops.range(array_ops.rank(received_grad) - 1)
  return (received_grad, math_ops.reduce_sum(received_grad,
                                             reduction_dim_tensor)) 
开发者ID:PacktPublishing,项目名称:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代码行数:23,代码来源:nn_grad.py

示例9: _FractionalMaxPoolGrad

# 需要导入模块: from tensorflow.python.ops import gradients_impl [as 别名]
# 或者: from tensorflow.python.ops.gradients_impl import gradients [as 别名]
def _FractionalMaxPoolGrad(op, grad_0, unused_grad_1, unused_grad_2):
  """Returns gradient for FractionalMaxPool.

  Since FractionalMaxPool has three outputs, there are three gradients passed in
  for each of the outputs. Only the first one is useful, the other two gradients
  are empty.

  Args:
    op: The FractionalMaxPoolOp.
    grad_0: Gradient with respect to op.outputs[0]
    unused_grad_1: Gradient with respect to op.outputs[1]/row_seq. It is empty.
    unused_grad_2: Gradient with respect to op.outputs[2]/col_seq. It is empty.

  Returns:
    Input backprop for FractionalMaxPool op.
  """
  # pylint: disable=protected-access
  return gen_nn_ops._fractional_max_pool_grad(op.inputs[0], op.outputs[0],
                                              grad_0, op.outputs[1],
                                              op.outputs[2],
                                              op.get_attr("overlapping")) 
开发者ID:PacktPublishing,项目名称:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代码行数:23,代码来源:nn_grad.py

示例10: _FractionalAvgPoolGrad

# 需要导入模块: from tensorflow.python.ops import gradients_impl [as 别名]
# 或者: from tensorflow.python.ops.gradients_impl import gradients [as 别名]
def _FractionalAvgPoolGrad(op, grad_0, unused_grad_1, unused_grad_2):
  """Returns gradient for FractionalAvgPool.

  Since FractionalAvgPool has three outputs, there are three gradients passed in
  for each of the outputs. Only the first one is useful, the other two gradients
  are empty.

  Args:
    op: The FractionalAvgPoolOp.
    grad_0: Gradient with respect to op.outputs[0]
    unused_grad_1: Gradient with respect to op.outputs[1]/row_seq. It is empty.
    unused_grad_2: Gradient with respect to op.outputs[2]/col_seq. It is empty.

  Returns:
    Input backprop for FractionalAvgPool op.
  """
  # pylint: disable=protected-access
  return gen_nn_ops._fractional_avg_pool_grad(op.inputs[0].get_shape(), grad_0,
                                              op.outputs[1], op.outputs[2],
                                              op.get_attr("overlapping")) 
开发者ID:PacktPublishing,项目名称:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代码行数:22,代码来源:nn_grad.py

示例11: _BatchNormWithGlobalNormalizationGrad

# 需要导入模块: from tensorflow.python.ops import gradients_impl [as 别名]
# 或者: from tensorflow.python.ops.gradients_impl import gradients [as 别名]
def _BatchNormWithGlobalNormalizationGrad(op, grad):
  """Return the gradients for the 5 inputs of BatchNormWithGlobalNormalization.

  We do not backprop anything for the mean and var intentionally as they are
  not being trained with backprop in the operation.

  Args:
    op: The BatchNormOp for which we need to generate gradients.
    grad: Tensor.  The gradients passed to the BatchNormOp.

  Returns:
    dx: Backprop for input, which is (grad * (g * rsqrt(v + epsilon)))
    dm: Backprop for mean, which is
        sum_over_rest(grad * g) * (-1 / rsqrt(v + epsilon))
    dv: Backprop for variance, which is
        sum_over_rest(grad * g * (x - m)) * (-1/2) * (v + epsilon) ^ (-3/2)
    db: Backprop for beta, which is grad reduced in all except the
        last dimension.
    dg: Backprop for gamma, which is (grad * ((x - m) * rsqrt(v + epsilon)))
  """
  dx, dm, dv, db, dg = gen_nn_ops._batch_norm_with_global_normalization_grad(
      op.inputs[0], op.inputs[1], op.inputs[2], op.inputs[4], grad,
      op.get_attr("variance_epsilon"), op.get_attr("scale_after_normalization"))
  return dx, dm, dv, db, dg 
开发者ID:PacktPublishing,项目名称:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代码行数:26,代码来源:nn_grad.py

示例12: _acc_grads

# 需要导入模块: from tensorflow.python.ops import gradients_impl [as 别名]
# 或者: from tensorflow.python.ops.gradients_impl import gradients [as 别名]
def _acc_grads(*lists_of_grads):
  """Accumulates lists of gradients."""
  acc_grads = []
  for grads in zip(*lists_of_grads):
    grads = [g for g in grads if g is not None]
    if grads:
      acc_grads.append(math_ops.add_n(grads))
    else:
      acc_grads.append(None)
  return acc_grads 
开发者ID:taehoonlee,项目名称:tensornets,代码行数:12,代码来源:rev_block_lib.py

示例13: _testGradient

# 需要导入模块: from tensorflow.python.ops import gradients_impl [as 别名]
# 或者: from tensorflow.python.ops.gradients_impl import gradients [as 别名]
def _testGradient(self, np_input, bias, dtype, data_format, use_gpu):
    with self.cached_session(use_gpu=use_gpu):
      if data_format == "NCHW":
        np_input = self._NHWCToNCHW(np_input)
      jacob_a, jacob_n = self._computeGradient(np_input, bias, dtype,
                                               data_format)
      input_jacob_a, bias_jacob_a, grad_jacob_a = jacob_a
      input_jacob_n, bias_jacob_n, grad_jacob_n = jacob_n

      if dtype == np.float16:
        # Compare fp16 analytical gradients to fp32 numerical gradients,
        # since fp16 numerical gradients are too imprecise unless great
        # care is taken with choosing the inputs and the delta. This is
        # a weaker, but pragmatic, check (in particular, it does not test
        # the op itself, only its gradient).
        _, jacob_n = self._computeGradient(np_input, bias, np.float32,
                                           data_format)
        input_jacob_n, bias_jacob_n, grad_jacob_n = jacob_n

      if dtype == dtypes.float64:
        threshold = 1e-10
      elif np_input.size >= 512:
        # The 5e-3 threshold seems to have been marginal in these cases, and
        # small changes in the test were pushing it over the limit.
        threshold = 5e-2
      else:
        threshold = 5e-3
      self.assertAllClose(input_jacob_a, input_jacob_n, threshold, threshold)
      self.assertAllClose(bias_jacob_a, bias_jacob_n, threshold, threshold)
      self.assertAllClose(grad_jacob_a, grad_jacob_n, threshold, threshold) 
开发者ID:NVIDIA,项目名称:framework-determinism,代码行数:32,代码来源:test_patch_bias_add.py

示例14: testClipGrads

# 需要导入模块: from tensorflow.python.ops import gradients_impl [as 别名]
# 或者: from tensorflow.python.ops.gradients_impl import gradients [as 别名]
def testClipGrads(self):
    xs = variables_lib2.Variable(0.0)
    ys = xs * 4.0
    grads = gradients_impl.gradients([ys], [xs])
    gradients_to_variables = list(zip(grads, [xs]))
    clipped_gradients_to_variables = training.clip_gradient_norms(
        gradients_to_variables, 3.0)

    with self.cached_session() as session:
      session.run(variables_lib2.global_variables_initializer())
      self.assertAlmostEqual(4.0, gradients_to_variables[0][0].eval())
      self.assertAlmostEqual(3.0, clipped_gradients_to_variables[0][0].eval()) 
开发者ID:google-research,项目名称:tf-slim,代码行数:14,代码来源:training_test.py

示例15: testClipGradsFn

# 需要导入模块: from tensorflow.python.ops import gradients_impl [as 别名]
# 或者: from tensorflow.python.ops.gradients_impl import gradients [as 别名]
def testClipGradsFn(self):
    xs = variables_lib2.Variable(0.0)
    ys = xs * 4.0
    grads = gradients_impl.gradients([ys], [xs])
    gradients_to_variables = list(zip(grads, [xs]))
    clipped_gradients_to_variables = training.clip_gradient_norms_fn(3.0)(
        gradients_to_variables)

    with self.cached_session() as session:
      session.run(variables_lib2.global_variables_initializer())
      self.assertAlmostEqual(4.0, gradients_to_variables[0][0].eval())
      self.assertAlmostEqual(3.0, clipped_gradients_to_variables[0][0].eval()) 
开发者ID:google-research,项目名称:tf-slim,代码行数:14,代码来源:training_test.py


注:本文中的tensorflow.python.ops.gradients_impl.gradients方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。