本文整理汇总了Python中tensorflow.python.ops.gradient_checker.compute_gradient_error方法的典型用法代码示例。如果您正苦于以下问题:Python gradient_checker.compute_gradient_error方法的具体用法?Python gradient_checker.compute_gradient_error怎么用?Python gradient_checker.compute_gradient_error使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.ops.gradient_checker
的用法示例。
在下文中一共展示了gradient_checker.compute_gradient_error方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testSequenceToSequenceGradient
# 需要导入模块: from tensorflow.python.ops import gradient_checker [as 别名]
# 或者: from tensorflow.python.ops.gradient_checker import compute_gradient_error [as 别名]
def testSequenceToSequenceGradient(self):
with self.test_session():
size = (17, 1, 15)
output_size = (17, 1, 8)
inputs = constant_op.constant(_rand(*size))
outputs = lstm1d.ndlstm_base(inputs, 8, dynamic=False)
variables.global_variables_initializer().run()
gradients = gradients_impl.gradients(outputs, inputs)
if 1: # pylint: disable=using-constant-test
gradients = gradients_impl.gradients(outputs, inputs)[0].eval()
self.assertEqual(gradients.shape, size)
else:
# TODO(tmb) tf.test.compute_gradient error is currently broken
# with dynamic_rnn. Enable this test case eventually.
err = gradient_checker.compute_gradient_error(
inputs, size, outputs, output_size, delta=1e-4)
self.assert_(not np.isnan(err))
self.assert_(err < 0.1)
示例2: testSequenceToSequenceGradientReverse
# 需要导入模块: from tensorflow.python.ops import gradient_checker [as 别名]
# 或者: from tensorflow.python.ops.gradient_checker import compute_gradient_error [as 别名]
def testSequenceToSequenceGradientReverse(self):
with self.test_session():
size = (17, 1, 15)
output_size = (17, 1, 8)
inputs = constant_op.constant(_rand(*size))
outputs = lstm1d.ndlstm_base(inputs, 8, reverse=1, dynamic=False)
variables.global_variables_initializer().run()
if 1: # pylint: disable=using-constant-test
gradients = gradients_impl.gradients(outputs, inputs)[0].eval()
self.assertEqual(gradients.shape, size)
else:
# TODO(tmb) tf.test.compute_gradient error is currently broken
# with dynamic_rnn. Enable this test case eventually.
err = gradient_checker.compute_gradient_error(
inputs, size, outputs, output_size, delta=1e-4)
self.assert_(not np.isnan(err))
self.assert_(err < 0.1)
示例3: testPoincareNormalizeGradient
# 需要导入模块: from tensorflow.python.ops import gradient_checker [as 别名]
# 或者: from tensorflow.python.ops.gradient_checker import compute_gradient_error [as 别名]
def testPoincareNormalizeGradient(self):
x_shape = [20, 7, 3]
np.random.seed(1)
x_np = np.random.random_sample(x_shape).astype(np.float64)
for dim in range(len(x_shape)):
with self.cached_session():
x_tf = constant_op.constant(x_np, name='x')
y_tf = _layers.poincare_normalize(x_tf, dim)
err = gradient_checker.compute_gradient_error(x_tf, x_shape, y_tf,
x_shape)
print('PoinCareNormalize gradient err = %g ' % err)
self.assertLess(err, 1e-4)
# TODO(b/28426988): Add separate tests for non-legacy versions.
示例4: _test_gradient
# 需要导入模块: from tensorflow.python.ops import gradient_checker [as 别名]
# 或者: from tensorflow.python.ops.gradient_checker import compute_gradient_error [as 别名]
def _test_gradient(device):
if device == "gpu" and visible_gpu():
pytest.xfail("no gpu is visible")
with NumpySeed(100):
with tf.device('/{}:0'.format(device)):
sprites, scales, offsets, backgrounds = get_data(random_alpha=True, squash=0.99)
sprites_tf = constant_op.constant(sprites)
scales_tf = constant_op.constant(scales)
offsets_tf = constant_op.constant(offsets)
backgrounds_tf = constant_op.constant(backgrounds)
images = render_sprites.render_sprites(sprites_tf, scales_tf, offsets_tf, backgrounds_tf)
sess = get_session()
with sess.as_default():
with tf.device(device):
err = gradient_checker.compute_gradient_error(
[sprites_tf, scales_tf, offsets_tf, backgrounds_tf],
[sprites.shape, scales.shape, offsets.shape, backgrounds.shape],
images,
backgrounds.shape,
[sprites, scales, offsets, backgrounds],
delta=0.002)
print("Jacobian error: {}".format(err))
threshold = 2e-4
assert err < threshold, "Jacobian error ({}) exceeded threshold ({})".format(err, threshold)
示例5: test_basics
# 需要导入模块: from tensorflow.python.ops import gradient_checker [as 别名]
# 或者: from tensorflow.python.ops.gradient_checker import compute_gradient_error [as 别名]
def test_basics(self):
tf.set_random_seed(1234)
x_shape = [4, 4, 4, 5]
rnd = np.random.RandomState(0)
x_np = rnd.uniform(-1.0, 1.0, x_shape).astype(np.float32)
# test op max_pool_grad
with tf.Graph().as_default(), tf.Session() as sess:
x_tf = tf.constant(x_np, name="x")
y_tf1, _ = tf.nn.max_pool_with_argmax(
x_tf,
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding='SAME',
name="y1")
y_tf2 = tf.nn.max_pool(
x_tf,
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding='SAME',
name="y2")
z_tf1 = tf.reduce_sum(tf.square(y_tf1))
z_tf2 = tf.reduce_sum(tf.square(y_tf2))
dx1 = tf.gradients(z_tf1, x_tf, name='dx1')[0]
dx2 = tf.gradients(z_tf2, x_tf, name='dx2')[0]
err = gradient_checker.compute_gradient_error(
x_tf, x_shape, dx1, x_shape, delta=1e-3, x_init_value=x_np)
self.assertTrue(err < 1e-3)
err = gradient_checker.compute_gradient_error(
x_tf, x_shape, dx2, x_shape, delta=1e-3, x_init_value=x_np)
self.assertTrue(err < 1e-3)
示例6: test_grad_transform
# 需要导入模块: from tensorflow.python.ops import gradient_checker [as 别名]
# 或者: from tensorflow.python.ops.gradient_checker import compute_gradient_error [as 别名]
def test_grad_transform(self):
with self.test_session():
cloud_source = self.small_cloud
cloud_target = cloud_source + [0.05, 0, 0]
ego_motion = self.identity_transform
transform, unused_residual = self._run_icp(cloud_source, ego_motion,
cloud_target)
err = gradient_checker.compute_gradient_error(ego_motion,
ego_motion.shape.as_list(),
transform,
transform.shape.as_list())
# Since our gradient is an approximation, it doesn't pass a numerical check.
# Nonetheless, this test verifies that icp_grad computes a gradient.
self.assertGreater(err, 1e-3)
示例7: test_grad_transform_same_ego_motion
# 需要导入模块: from tensorflow.python.ops import gradient_checker [as 别名]
# 或者: from tensorflow.python.ops.gradient_checker import compute_gradient_error [as 别名]
def test_grad_transform_same_ego_motion(self):
with self.test_session():
cloud_source = self.small_cloud
cloud_target = cloud_source + [0.1, 0, 0]
ego_motion = tf.constant([[0.1, 0.0, 0.0, 0.0, 0.0, 0.0]],
dtype=tf.float32)
transform, unused_residual = self._run_icp(cloud_source, ego_motion,
cloud_target)
err = gradient_checker.compute_gradient_error(ego_motion,
ego_motion.shape.as_list(),
transform,
transform.shape.as_list())
# Since our gradient is an approximation, it doesn't pass a numerical check.
# Nonetheless, this test verifies that icp_grad computes a gradient.
self.assertGreater(err, 1e-3)
示例8: test_grad_residual
# 需要导入模块: from tensorflow.python.ops import gradient_checker [as 别名]
# 或者: from tensorflow.python.ops.gradient_checker import compute_gradient_error [as 别名]
def test_grad_residual(self):
with self.test_session():
cloud_source = self.small_cloud
cloud_target = cloud_source + [0.05, 0, 0]
ego_motion = self.identity_transform
unused_transform, residual = self._run_icp(cloud_source, ego_motion,
cloud_target)
err = gradient_checker.compute_gradient_error(
cloud_source, cloud_source.shape.as_list(), residual,
residual.shape.as_list())
# Since our gradient is an approximation, it doesn't pass a numerical check.
# Nonetheless, this test verifies that icp_grad computes a gradient.
self.assertGreater(err, 1e-3)