本文整理汇总了Python中tensorflow.python.ops.gradient_checker.compute_gradient_error函数的典型用法代码示例。如果您正苦于以下问题:Python compute_gradient_error函数的具体用法?Python compute_gradient_error怎么用?Python compute_gradient_error使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了compute_gradient_error函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testGradient
def testGradient(self):
if np.__version__ == "1.13.0":
self.skipTest("numpy 1.13.0 bug")
np.random.seed(8161)
test_dims = [(11, 1, 5, 7, 1), (2, 2)]
with self.test_session(use_gpu=False):
for dims in test_dims:
sp_t, nnz = _sparsify(np.random.randn(*dims))
# reduce random axes from 1D to N-D
for d in range(1, len(dims) + 1):
axes = np.random.choice(len(dims), size=d, replace=False).tolist()
reduced = sparse_ops.sparse_reduce_sum(sp_t, axes)
err = gradient_checker.compute_gradient_error(sp_t.values, (nnz,),
reduced,
reduced.eval().shape)
self.assertLess(err, 1e-3)
# Tests for negative axes.
reduced = sparse_ops.sparse_reduce_sum(sp_t, -1)
err = gradient_checker.compute_gradient_error(sp_t.values, (nnz,),
reduced,
reduced.eval().shape)
self.assertLess(err, 1e-3)
示例2: testFillFloat
def testFillFloat(self):
with self.test_session(use_gpu=False) as sess:
values = constant_op.constant(
[0.0, 10.0, 13.0, 14.0, 32.0, 33.0], dtype=dtypes.float64)
default_value = constant_op.constant(-1.0, dtype=dtypes.float64)
sp_input = sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2], [3, 3]]),
values=values,
dense_shape=np.array([5, 6]))
sp_output, empty_row_indicator = (sparse_ops.sparse_fill_empty_rows(
sp_input, default_value))
output, empty_row_indicator_out = sess.run(
[sp_output, empty_row_indicator])
self.assertAllEqual(output.indices, [[0, 0], [1, 0], [1, 3], [1, 4],
[2, 0], [3, 2], [3, 3], [4, 0]])
self.assertAllClose(output.values, [0, 10, 13, 14, -1, 32, 33, -1])
self.assertAllEqual(output.dense_shape, [5, 6])
self.assertAllEqual(empty_row_indicator_out,
np.array([0, 0, 1, 0, 1]).astype(np.bool))
values_grad_err = gradient_checker.compute_gradient_error(
values, values.shape.as_list(), sp_output.values, [8], delta=1e-8)
self.assertGreater(values_grad_err, 0)
self.assertLess(values_grad_err, 1e-8)
default_value_grad_err = gradient_checker.compute_gradient_error(
default_value,
default_value.shape.as_list(),
sp_output.values, [8],
delta=1e-8)
self.assertGreater(default_value_grad_err, 0)
self.assertLess(default_value_grad_err, 1e-8)
示例3: doOutputTest
def doOutputTest(self, input_shape, moments_axes, tol=1e-4,
check_gradients=False):
for mu in [0.0, 1.0, 1e3]:
for sigma in [1.0, 0.1]:
for keep_dims in [True, False]:
input_values = np.random.rand(*input_shape) * sigma + mu
expected_mean = np.mean(
input_values, axis=moments_axes, keepdims=keep_dims)
expected_var = np.var(
input_values, axis=moments_axes, keepdims=keep_dims)
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
inputs = constant_op.constant(
input_values, shape=input_shape, dtype=dtypes.float32)
mean, variance = nn_impl.moments(
inputs, moments_axes, keep_dims=keep_dims)
if check_gradients:
err = gradient_checker.compute_gradient_error(
inputs, input_shape, mean, mean.shape.as_list())
self.assertLess(err, 1e-3)
err = gradient_checker.compute_gradient_error(
inputs, input_shape, variance, variance.shape.as_list())
self.assertLess(err, 1e-3)
# Evaluate.
[mean, variance] = sess.run([mean, variance])
# Make sure that there are no NaNs
self.assertFalse(np.isnan(mean).any())
self.assertFalse(np.isnan(variance).any())
self.assertAllClose(mean, expected_mean, rtol=tol, atol=tol)
self.assertAllClose(variance, expected_var, rtol=tol, atol=tol)
示例4: testGradients
def testGradients(self):
np.random.seed(1618)
sp_shapes = [(10, 10, 10), (5, 5), (1618,), (3, 3, 7)]
dense_shapes = [(10, 10, 1), (5, 5), (1,), (1, 7)]
with self.test_session(use_gpu=False):
for dtype in [np.float32, np.float64]:
for sp_shape, dense_shape in zip(sp_shapes, dense_shapes):
sp_vals_np = np.random.rand(*sp_shape).astype(dtype) + 1
dense_vals_np = np.random.rand(*dense_shape).astype(dtype) + 1
sp_t, nnz = _sparsify(sp_vals_np, thresh=1.5)
dense_t = constant_op.constant(dense_vals_np)
cmul = sp_t * dense_t
err = gradient_checker.compute_gradient_error([sp_t.values, dense_t],
[(nnz,), dense_shape],
cmul.values, (nnz,))
self.assertLess(err, 1e-4)
cdiv = sp_t / dense_t
err = gradient_checker.compute_gradient_error(sp_t.values, (nnz,),
cdiv.values, (nnz,))
self.assertLess(err, 1e-4)
err = gradient_checker.compute_gradient_error(
dense_t,
dense_shape,
cdiv.values, (nnz,),
x_init_value=dense_vals_np)
self.assertLess(err, 2e-4)
示例5: testGradient
def testGradient(self):
np.random.seed(1) # Make it reproducible.
x_shape = [5, 10]
x_np = np.random.randn(*x_shape).astype(np.float32)
alpha_np = np.float32(np.random.rand(1, x_shape[1]) + 0.01)
clip_np = np.float32(np.random.rand(x_shape[0], 1) * 5.)
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np)
alpha_tf = constant_op.constant(alpha_np)
clip_tf = constant_op.constant(clip_np)
y_tf = scaled_softplus(x_tf, alpha_tf)
z_tf = scaled_softplus(x_tf, alpha_tf, clip_tf * 0.1)
err = gradient_checker.compute_gradient_error([x_tf, alpha_tf],
[x_shape, alpha_np.shape],
y_tf, x_shape,
[x_np, alpha_np],
delta=0.002)
err_clip = gradient_checker.compute_gradient_error(
[x_tf, alpha_tf, clip_tf],
[x_shape, alpha_np.shape, clip_np.shape],
z_tf, x_shape,
[x_np, alpha_np, clip_np],
delta=0.002)
eps = 2e-4
self.assertLess(err, eps)
self.assertLess(err_clip, eps)
示例6: _testGradients
def _testGradients(self, tr_a, tr_b, sp_a, sp_b, a_dtype, b_dtype, delta,
name):
with self.test_session():
a = constant_op.constant(
RandMatrix(
3, 2, tr_a, round_bfloat=True), dtype=dtypes.float32)
b = constant_op.constant(
RandMatrix(
2, 4, tr_b, round_bfloat=True), dtype=dtypes.float32)
tf_a = math_ops.cast(a, a_dtype) if a_dtype != dtypes.float32 else a
tf_b = math_ops.cast(b, b_dtype) if b_dtype != dtypes.float32 else b
m = math_ops.matmul(
tf_a,
tf_b,
name=name,
transpose_a=tr_a,
transpose_b=tr_b,
a_is_sparse=sp_a,
b_is_sparse=sp_b)
err = (gradient_checker.compute_gradient_error(
a, [2, 3] if tr_a else [3, 2],
m, [3, 4],
x_init_value=a.eval(),
delta=delta) + gradient_checker.compute_gradient_error(
b, [4, 2] if tr_b else [2, 4],
m, [3, 4],
x_init_value=b.eval(),
delta=delta))
self.assertLessEqual(err, delta / 2.)
示例7: _test_gradient
def _test_gradient(self,
x_shape,
scale_shape,
use_gpu=True,
data_format='NHWC'):
np.random.seed(1)
x_val = np.random.random_sample(x_shape).astype(np.float32)
scale_val = np.random.random_sample(scale_shape).astype(np.float32)
offset_val = np.random.random_sample(scale_shape).astype(np.float32)
with self.test_session(use_gpu=use_gpu):
x = constant_op.constant(x_val, name='x')
scale = constant_op.constant(scale_val, name='scale')
offset = constant_op.constant(offset_val, name='offset')
y, _, _ = nn_impl.fused_batch_norm(
x, scale, offset, data_format=data_format)
err_x = gradient_checker.compute_gradient_error(x, x_shape, y, x_shape)
err_scale = gradient_checker.compute_gradient_error(scale, scale_shape, y,
x_shape)
err_offset = gradient_checker.compute_gradient_error(offset, scale_shape,
y, x_shape)
err_tolerance = 1e-3
self.assertLess(err_x, err_tolerance)
self.assertLess(err_scale, err_tolerance)
self.assertLess(err_offset, err_tolerance)
示例8: _test_gradient
def _test_gradient(self,
x_shape,
x_dtype,
scale_shape,
scale_dtype,
use_gpu=True,
data_format='NHWC',
is_training=True):
np.random.seed(1)
x_val = np.random.random_sample(x_shape).astype(x_dtype)
scale_val = np.random.random_sample(scale_shape).astype(scale_dtype)
offset_val = np.random.random_sample(scale_shape).astype(scale_dtype)
with self.test_session(use_gpu=use_gpu):
x = constant_op.constant(x_val, name='x')
scale = constant_op.constant(scale_val, name='scale')
offset = constant_op.constant(offset_val, name='offset')
if is_training:
pop_mean = None
pop_var = None
else:
pop_mean = np.random.random_sample(scale_shape).astype(scale_dtype)
pop_var = np.random.random_sample(scale_shape).astype(scale_dtype)
y, _, _ = nn_impl.fused_batch_norm(
x,
scale,
offset,
mean=pop_mean,
variance=pop_var,
data_format=data_format,
is_training=is_training)
if x_dtype != np.float16:
err_x = gradient_checker.compute_gradient_error(x, x_shape, y, x_shape)
err_scale = gradient_checker.compute_gradient_error(
scale, scale_shape, y, x_shape)
err_offset = gradient_checker.compute_gradient_error(
offset, scale_shape, y, x_shape)
else:
x32 = constant_op.constant(x_val, name='x32', dtype=dtypes.float32)
y32, _, _ = nn_impl.fused_batch_norm(
x32,
scale,
offset,
mean=pop_mean,
variance=pop_var,
data_format=data_format,
is_training=is_training)
err_x = self._compute_gradient_error_float16(x, x32, x_shape, y, y32,
x_shape)
err_scale = self._compute_gradient_error_float16(
scale, scale, scale_shape, y, y32, x_shape)
err_offset = self._compute_gradient_error_float16(
offset, offset, scale_shape, y, y32, x_shape)
x_err_tolerance = 2e-3 if x_dtype == np.float16 else 1e-3
scale_err_tolerance = 1e-3
self.assertLess(err_x, x_err_tolerance)
self.assertLess(err_scale, scale_err_tolerance)
self.assertLess(err_offset, scale_err_tolerance)
示例9: testEmptyFails
def testEmptyFails(self):
with ops.Graph().as_default() as g:
with self.session(graph=g):
x = array_ops.placeholder(dtypes.float32)
with g.gradient_override_map({"Identity": "BadGrad"}):
y = array_ops.identity(x)
bad = r"Empty gradient has wrong shape: expected \(0, 3\), got \(3, 0\)"
with self.assertRaisesRegexp(ValueError, bad):
gradient_checker.compute_gradient(x, (0, 3), y, (0, 3))
with self.assertRaisesRegexp(ValueError, bad):
gradient_checker.compute_gradient_error(x, (0, 3), y, (0, 3))
示例10: testClipByValueGradient
def testClipByValueGradient(self):
inputs = constant_op.constant([1.0, 2.0, 3.0, 4.0], dtype=dtypes.float32)
outputs_1 = clip_ops.clip_by_value(inputs, 0.5, 3.5)
min_val = constant_op.constant([0.5, 0.5, 0.5, 0.5], dtype=dtypes.float32)
max_val = constant_op.constant([3.5, 3.5, 3.5, 3.5], dtype=dtypes.float32)
outputs_2 = clip_ops.clip_by_value(inputs, min_val, max_val)
with self.test_session():
error_1 = gradient_checker.compute_gradient_error(inputs, [4],
outputs_1, [4])
self.assertLess(error_1, 1e-4)
error_2 = gradient_checker.compute_gradient_error(inputs, [4],
outputs_2, [4])
self.assertLess(error_2, 1e-4)
示例11: testGradientsAxis0
def testGradientsAxis0(self):
np.random.seed(7)
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
data = np.random.randn(*shape)
shapes = [shape[1:]] * shape[0]
with self.test_session(use_gpu=True):
# TODO(irving): Remove list() once we handle maps correctly
xs = list(map(constant_op.constant, data))
c = array_ops.pack(xs)
err = gradient_checker.compute_gradient_error(xs, shapes, c, shape)
self.assertLess(err, 1e-6)
c = array_ops.stack(xs)
err = gradient_checker.compute_gradient_error(xs, shapes, c, shape)
self.assertLess(err, 1e-6)
示例12: testDifferentTensorShapesThroughGradientError
def testDifferentTensorShapesThroughGradientError(self):
pseudo_random = True
overlapping = True
pooling_ratio = [1, math.sqrt(3), math.sqrt(2), 1]
for num_batches in [1, 2]:
for num_rows in [5, 13]:
for num_cols in [5, 11]:
for num_channels in [1, 3]:
input_shape = (num_batches, num_rows, num_cols, num_channels)
input_data = self._GenerateUniqueRandomInputTensor(input_shape)
# Add some randomness to make input_data not so 'integer'
input_data += self._PRNG.random_sample(input_shape)
with self.cached_session() as _:
input_tensor = constant_op.constant(input_data, shape=input_shape)
output_tensor, unused_a, unused_b = nn_ops.fractional_max_pool_v2(
input_tensor,
pooling_ratio,
pseudo_random=pseudo_random,
overlapping=overlapping,
seed=self._SEED)
output_data = self.evaluate(output_tensor)
output_shape = output_data.shape
# error_margin and delta setting is similar to max_pool_grad.
error_margin = 1e-3
gradient_error = gradient_checker.compute_gradient_error(
input_tensor,
input_shape,
output_tensor,
output_shape,
x_init_value=input_data.reshape(input_shape),
delta=1e-2)
self.assertLess(gradient_error, error_margin)
示例13: testAllInputOptionsThroughGradientError
def testAllInputOptionsThroughGradientError(self):
input_shape = (1, 7, 13, 1)
input_data = self._GenerateUniqueRandomInputTensor(input_shape)
# Add some randomness to make input_data not so 'integer'
input_data += self._PRNG.random_sample(input_shape)
pooling_ratio = [1, math.sqrt(2), math.sqrt(3), 1]
for pseudo_random in True, False:
for overlapping in True, False:
with self.cached_session() as _:
input_tensor = constant_op.constant(input_data, shape=input_shape)
output_tensor, unused_a, unused_b = nn_ops.fractional_max_pool_v2(
input_tensor,
pooling_ratio,
pseudo_random=pseudo_random,
overlapping=overlapping,
seed=self._SEED)
output_data = self.evaluate(output_tensor)
output_shape = output_data.shape
# error_margin and delta setting is similar to max_pool_grad.
error_margin = 1e-3
gradient_error = gradient_checker.compute_gradient_error(
input_tensor,
input_shape,
output_tensor,
output_shape,
x_init_value=input_data.reshape(input_shape),
delta=1e-2)
self.assertLess(gradient_error, error_margin)
示例14: testSecondGradient
def testSecondGradient(self):
with self.test_session() as sess:
l = constant_op.constant(
[
0.0, 0.0, 1.0 / 3, 0.0, 1.0 / 3, 0.0, 0.0, 0.0, 0.0, 0.5 / 3, 0.0,
0.5 / 3
],
shape=[12],
dtype=dtypes.float64,
name="l")
f = constant_op.constant(
[0.1, 0.2, 0.3, 0.4, 0.1, 0.4, 0.9, 1.6, 0.1, 0.8, 2.7, 6.4],
shape=[12],
dtype=dtypes.float64,
name="f")
x = nn_ops.softmax_cross_entropy_with_logits(
labels=l, logits=f, name="xent")
loss = math_ops.reduce_sum(x)
gradients = gradients_impl.gradients(loss, [f])[0]
err = gradient_checker.compute_gradient_error(f, [12], gradients, [12])
# Check that second derivative is calculated.
# (it is equivalent to being `BatchMatMul` op in the graph because of implementation of xentropy grad)
op_names = [
op.op_def.name for op in sess.graph.get_operations() if op.op_def
]
self.assertIn("BatchMatMul", op_names)
print("cross entropy hessian err = ", err)
self.assertLess(err, 5e-8)
示例15: testDifferentTensorShapesThroughGradientError
def testDifferentTensorShapesThroughGradientError(self):
pseudo_random = True
overlapping = True
pooling_ratio = [1, math.sqrt(3), math.sqrt(2), 1]
for num_batches in [1, 2]:
for num_rows in [5, 13]:
for num_cols in [5, 11]:
for num_channels in [1, 3]:
input_shape = (num_batches, num_rows, num_cols, num_channels)
input_data = self._GenerateRandomInputTensor(input_shape)
with self.cached_session() as _:
input_tensor = constant_op.constant(input_data, shape=input_shape)
output_tensor, unused_a, unused_b = nn_ops.fractional_avg_pool(
input_tensor,
pooling_ratio,
pseudo_random=pseudo_random,
overlapping=overlapping,
deterministic=True,
seed=self._SEED,
seed2=self._SEED2)
output_data = output_tensor.eval()
output_shape = output_data.shape
# error_margin and delta setting is similar to avg_pool_grad.
error_margin = 1e-4
gradient_error = gradient_checker.compute_gradient_error(
input_tensor,
input_shape,
output_tensor,
output_shape,
x_init_value=input_data.reshape(input_shape),
delta=1e-2)
self.assertLess(gradient_error, error_margin)