当前位置: 首页>>代码示例>>Python>>正文


Python gradient_checker.compute_gradient方法代码示例

本文整理汇总了Python中tensorflow.python.ops.gradient_checker.compute_gradient方法的典型用法代码示例。如果您正苦于以下问题:Python gradient_checker.compute_gradient方法的具体用法?Python gradient_checker.compute_gradient怎么用?Python gradient_checker.compute_gradient使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.python.ops.gradient_checker的用法示例。


在下文中一共展示了gradient_checker.compute_gradient方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _warp_test

# 需要导入模块: from tensorflow.python.ops import gradient_checker [as 别名]
# 或者: from tensorflow.python.ops.gradient_checker import compute_gradient [as 别名]
def _warp_test(self, first, second, flow, debug=False):
        with self.test_session(use_gpu=True) as sess:
            num_batch, height, width, channels = second.shape
            #second_ = tf.placeholder(tf.float32, shape=second.shape, name='im')
            flow_ = tf.placeholder(tf.float32, shape=flow.shape, name='flow')
            inv_warped_second = ops.backward_warp(second, flow_)

            pred = sess.run(inv_warped_second, feed_dict={flow_: flow})
            if debug:
                print('-- result channels')
                for c in range(channels):
                    print(np.reshape(pred[0, :, :, c], [height, width]))
            self.assertAllClose(first, pred)

            jacob_t, jacob_n = gradient_checker.compute_gradient(flow_, flow.shape,
                                                                 inv_warped_second, pred.shape)
            self.assertAllClose(jacob_t, jacob_n, 1e-3, 1e-3) 
开发者ID:simonmeister,项目名称:UnFlow,代码行数:19,代码来源:backward_warp.py

示例2: _test_correlation

# 需要导入模块: from tensorflow.python.ops import gradient_checker [as 别名]
# 或者: from tensorflow.python.ops.gradient_checker import compute_gradient [as 别名]
def _test_correlation(self, in0, in1, out=None, **kwargs):
        with self.test_session(use_gpu=True) as sess:
            in0_op = tf.constant(in0, tf.float32)
            in1_op = tf.constant(in1, tf.float32)
            result_op = ops.correlation(in0_op, in1_op, **kwargs)
            result = sess.run(result_op)

            if out is not None:
                self.assertAllClose(out, result)

            jacob_t, jacob_n = gradient_checker.compute_gradient([in0_op, in1_op],
                                                                 [in0.shape, in1.shape],
                                                                 result_op, result.shape)
            #print("--------------- n")
            #print(jacob_n)
            #print("--------------- t")
            #print(jacob_t)
            self.assertAllClose(jacob_t, jacob_n, 1e-3, 1e-3) 
开发者ID:simonmeister,项目名称:UnFlow,代码行数:20,代码来源:correlation.py

示例3: test_high_dim_filter_grad

# 需要导入模块: from tensorflow.python.ops import gradient_checker [as 别名]
# 或者: from tensorflow.python.ops.gradient_checker import compute_gradient [as 别名]
def test_high_dim_filter_grad(self):
        x_shape = [5, 10, 10]

        # Test inputs: unaries and RGB values
        unary_np = np.random.randn(*x_shape).astype(np.float32)
        rgb_np = np.random.randint(low=0, high=256, size=x_shape).astype(np.float32)

        with self.test_session():
            unary_tf = constant_op.constant(unary_np)
            rgb_tf = constant_op.constant(rgb_np)
            y_tf = custom_module.high_dim_filter(unary_tf, rgb_tf,
                                                 bilateral=True,
                                                 theta_alpha=1000.,
                                                 theta_beta=1000.,
                                                 theta_gamma=1000.)

            out = gradient_checker.compute_gradient([unary_tf, rgb_tf], [x_shape, x_shape],
                                                    y_tf, x_shape)

            # We only need to compare gradients w.r.t. unaries
            computed = out[0][0].flatten()
            estimated = out[0][1].flatten()

            mask = (computed != 0)
            computed = computed[mask]
            estimated = estimated[mask]
            difference = computed - estimated

            measure1 = np.mean(difference) / np.mean(computed)
            measure2 = np.max(difference) / np.max(computed)

            print('Gradient check: measure1 = {:.6f}, measure2 = {:.6f}'.format(measure1, measure2))
            self.assertLess(measure1, 1e-3, 'Errors found in the gradient computation.')
            self.assertLess(measure2, 2e-2, 'Errors found in the gradient computation.')
            print('Gradient check: success!') 
开发者ID:sadeepj,项目名称:crfasrnn_keras,代码行数:37,代码来源:test_gradients.py

示例4: test_grad

# 需要导入模块: from tensorflow.python.ops import gradient_checker [as 别名]
# 或者: from tensorflow.python.ops.gradient_checker import compute_gradient [as 别名]
def test_grad(self):
        with self.test_session(use_gpu=True) as sess:
            flow_shape = [1, 10, 10, 2]
            warped_shape = [1, 10, 10, 1]

            flow_ = tf.placeholder(tf.float32, shape=flow_shape, name='flow')
            warped_ = ops.forward_warp(flow_)

            jacob_t, jacob_n = gradient_checker.compute_gradient(flow_, flow_shape,
                                                                 warped_, warped_shape)
            self.assertAllClose(jacob_t, jacob_n, 1e-3, 1e-3) 
开发者ID:simonmeister,项目名称:UnFlow,代码行数:13,代码来源:forward_warp.py

示例5: _computeGradient

# 需要导入模块: from tensorflow.python.ops import gradient_checker [as 别名]
# 或者: from tensorflow.python.ops.gradient_checker import compute_gradient [as 别名]
def _computeGradient(self, np_input, bias, dtype, data_format):
    input_shape = output_shape = np_input.shape
    bias_shape = bias.shape
    input_tensor = constant_op.constant(
        np_input, shape=input_shape, dtype=dtype)
    bias_tensor = constant_op.constant(bias, shape=bias_shape, dtype=dtype)

    if context.executing_eagerly():

      def bias_add(input_tensor, bias_tensor):
        return nn_ops.bias_add(
            input_tensor, bias_tensor, data_format=data_format)

      # The following is a work-around for TF issue 33660. Instead of
      # calculating the analytical and numerical gradients for both
      # inputs in a single call to compute_gradient, compute_gradient
      # is called for each input separately.
      def bias_add_1(input_tensor):
        return bias_add(input_tensor, bias_tensor)

      def bias_add_2(bias_tensor):
        return bias_add(input_tensor, bias_tensor)

      input_jacob_a, input_jacob_n = gradient_checker_v2.compute_gradient(
          bias_add_1, [input_tensor])
      bias_jacob_a, bias_jacob_n = gradient_checker_v2.compute_gradient(
          bias_add_2, [bias_tensor])

      # Test gradient of BiasAddGrad
      def bias_add_grad_function(upstream_gradients):
        with backprop.GradientTape() as tape:
          tape.watch(bias_tensor)
          bias_add_output = bias_add(input_tensor, bias_tensor)
          gradient_injector_output = bias_add_output * upstream_gradients
          return tape.gradient(gradient_injector_output, bias_tensor)

      upstream_tensor = self._random_tensor(output_shape, dtype)
      grad_jacob_a, grad_jacob_n = gradient_checker_v2.compute_gradient(
          bias_add_grad_function, [upstream_tensor])
    else:
      output_tensor = nn_ops.bias_add(
          input_tensor, bias_tensor, data_format=data_format)
      jacobians = gradient_checker.compute_gradient(
          [input_tensor, bias_tensor], [input_shape, bias_shape],
          output_tensor, output_shape)
      (input_jacob_a, input_jacob_n), (bias_jacob_a, bias_jacob_n) = jacobians
      # Test gradient of BiasAddGrad
      bias_add_grad = gradients_impl.gradients(
          nn_ops.l2_loss(output_tensor), bias_tensor)[0]
      grad_jacob_a, grad_jacob_n = gradient_checker.compute_gradient(
          output_tensor, output_shape, bias_add_grad, bias_shape)

    return ((input_jacob_a, bias_jacob_a, grad_jacob_a),
            (input_jacob_n, bias_jacob_n, grad_jacob_n)) 
开发者ID:NVIDIA,项目名称:framework-determinism,代码行数:56,代码来源:test_patch_bias_add.py


注:本文中的tensorflow.python.ops.gradient_checker.compute_gradient方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。