本文整理汇总了Python中tensorflow.python.ops.clip_ops.clip_by_norm函数的典型用法代码示例。如果您正苦于以下问题:Python clip_by_norm函数的具体用法?Python clip_by_norm怎么用?Python clip_by_norm使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了clip_by_norm函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testClipByNormClipped
def testClipByNormClipped(self):
# Norm clipping when clip_norm < 5
with self.session(use_gpu=True):
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
# Norm of x = sqrt(3^2 + 4^2) = 5
np_ans = [[-2.4, 0.0, 0.0], [3.2, 0.0, 0.0]]
clip_norm = 4.0
ans = clip_ops.clip_by_norm(x, clip_norm)
tf_ans = self.evaluate(ans)
ans = clip_ops.clip_by_norm(x, clip_norm)
tf_ans_tensor = self.evaluate(ans)
self.assertAllClose(np_ans, tf_ans)
self.assertAllClose(np_ans, tf_ans_tensor)
示例2: testClipByNormBadShape
def testClipByNormBadShape(self):
with self.test_session(use_gpu=True):
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3, 1])
# Use a nonsensical shape.
clip = constant_op.constant([1.0, 2.0])
with self.assertRaises(ValueError):
_ = clip_ops.clip_by_norm(x, clip)
示例3: get_gradients
def get_gradients(self, loss, params):
"""Returns gradients of `loss` with respect to `params`.
Arguments:
loss: Loss tensor.
params: List of variables.
Returns:
List of gradient tensors.
Raises:
ValueError: In case any gradient cannot be computed (e.g. if gradient
function not implemented).
"""
params = nest.flatten(params)
with backend.get_graph().as_default():
grads = gradients.gradients(loss, params)
for grad, param in zip(grads, params):
if grad is None:
raise ValueError("Variable {} has `None` for gradient. "
"Please make sure that all of your ops have a "
"gradient defined (i.e. are differentiable). "
"Common ops without gradient: "
"K.argmax, K.round, K.eval.".format(param))
if hasattr(self, "clipnorm"):
grads = [clip_ops.clip_by_norm(g, self.clipnorm) for g in grads]
if hasattr(self, "clipvalue"):
grads = [
clip_ops.clip_by_value(g, -self.clipvalue, self.clipvalue)
for g in grads
]
return grads
示例4: get_gradients
def get_gradients(self, loss, params):
"""Returns gradients of `loss` with respect to `params`.
Arguments:
loss: Loss tensor.
params: List of variables.
Returns:
List of gradient tensors.
Raises:
ValueError: In case any gradient cannot be computed (e.g. if gradient
function not implemented).
"""
loss = self._scale_loss(loss)
grads = gradients.gradients(loss, params)
if None in grads:
raise ValueError("An operation has `None` for gradient. "
"Please make sure that all of your ops have a "
"gradient defined (i.e. are differentiable). "
"Common ops without gradient: "
"K.argmax, K.round, K.eval.")
if hasattr(self, "clipnorm"):
grads = [clip_ops.clip_by_norm(g, self.clipnorm) for g in grads]
if hasattr(self, "clipvalue"):
grads = [
clip_ops.clip_by_value(g, -self.clipvalue, self.clipvalue)
for g in grads
]
return grads
示例5: _testClipByNorm
def _testClipByNorm(self, inputs, max_norm, expected):
with self.test_session() as sess:
input_op = constant_op.constant(inputs)
clipped = clip_ops.clip_by_norm(input_op, max_norm)
check_op = numerics.add_check_numerics_ops()
result, _ = sess.run([clipped, check_op])
self.assertAllClose(result, expected)
示例6: _clip_dense
def _clip_dense(self, var):
with self._maybe_colocate_with(var):
updated_var_value = array_ops.identity(var.ref())
normalized_var = clip_ops.clip_by_norm(
updated_var_value, self._max_norm, self._vars_to_clip_dims[var])
delta = updated_var_value - normalized_var
with ops.colocate_with(var):
return var.assign_sub(delta, use_locking=self._use_locking)
示例7: maybe_normalize
def maybe_normalize(x):
if max_norm is not None:
if x.get_shape().ndims is not None:
ndims = x.get_shape().ndims
else:
ndims = array_ops.size(array_ops.shape(x))
return clip_ops.clip_by_norm(x, max_norm, axes=list(range(1, ndims)))
return x
示例8: _testClipIndexedSlicesByNorm
def _testClipIndexedSlicesByNorm(self, values, indices, shape, max_norm,
axes):
with self.cached_session() as sess:
values = constant_op.constant(values)
indices = constant_op.constant(indices)
shape = constant_op.constant(shape)
# IndexedSlices mode
indixed_slices = ops.IndexedSlices(values, indices, shape)
clipped = clip_ops.clip_by_norm(indixed_slices, max_norm, axes)
# clipped should be IndexedSlices
self.assertIsInstance(clipped, ops.IndexedSlices)
clipped = ops.convert_to_tensor(clipped)
# Tensor mode
dense_tensor = ops.convert_to_tensor(indixed_slices)
dense_clipped = clip_ops.clip_by_norm(dense_tensor, max_norm, axes)
result, expected = sess.run([clipped, dense_clipped])
self.assertAllClose(result, expected)
示例9: testClipByNormZero
def testClipByNormZero(self):
# No norm clipping when norm = 0
with self.test_session(use_gpu=True):
x = constant_op.constant([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], shape=[2, 3])
# Norm = 0, no changes
np_ans = [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
clip_norm = 6.0
ans = clip_ops.clip_by_norm(x, clip_norm)
tf_ans = ans.eval()
self.assertAllClose(np_ans, tf_ans)
示例10: testClipByNormClippedWithDim0
def testClipByNormClippedWithDim0(self):
# Norm clipping when clip_norm < 5
with self.test_session(use_gpu=True):
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 3.0], shape=[2, 3])
# Norm of x[:, 0] = sqrt(3^2 + 4^2) = 5, x[:, 2] = 3
np_ans = [[-2.4, 0.0, 0.0], [3.2, 0.0, 3.0]]
clip_norm = 4.0
ans = clip_ops.clip_by_norm(x, clip_norm, [0])
tf_ans = ans.eval()
self.assertAllClose(np_ans, tf_ans)
示例11: testClipByNormNotClipped
def testClipByNormNotClipped(self):
# No norm clipping when clip_norm >= 5
with self.test_session():
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
# Norm of x = sqrt(3^2 + 4^2) = 5
np_ans = [[-3.0, 0.0, 0.0], [4.0, 0.0, 0.0]]
clip_norm = 6.0
ans = clip_ops.clip_by_norm(x, clip_norm)
tf_ans = ans.eval()
self.assertAllClose(np_ans, tf_ans)
示例12: testClipByNormNotClippedWithAxes
def testClipByNormNotClippedWithAxes(self):
# No norm clipping when clip_norm >= 5
with self.test_session(use_gpu=True):
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 3.0], shape=[2, 3])
# Norm of x[0, :] = 3, x[1, :] = sqrt(3^2 + 4^2) = 5
np_ans = [[-3.0, 0.0, 0.0], [4.0, 0.0, 3.0]]
clip_norm = 6.0
ans = clip_ops.clip_by_norm(x, clip_norm, [1])
tf_ans = ans.eval()
self.assertAllClose(np_ans, tf_ans)
示例13: testClipByNormClippedWithDim1
def testClipByNormClippedWithDim1(self):
# Norm clipping when clip_norm < 5
with self.session(use_gpu=True):
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 3.0], shape=[2, 3])
# Norm of x[0, :] = 3, x[1, :] = sqrt(3^2 + 4^2) = 5
np_ans = [[-3.0, 0.0, 0.0], [3.2, 0.0, 2.4]]
clip_norm = 4.0
ans = clip_ops.clip_by_norm(x, clip_norm, [1])
tf_ans = self.evaluate(ans)
self.assertAllClose(np_ans, tf_ans)
示例14: clip_gradient_norms
def clip_gradient_norms(gradients_to_variables, max_norm):
"""Clips the gradients by the given value.
Args:
gradients_to_variables: A list of gradient to variable pairs (tuples).
max_norm: the maximum norm value.
Returns:
A list of clipped gradient to variable pairs.
"""
clipped_grads_and_vars = []
for grad, var in gradients_to_variables:
if grad is not None:
if isinstance(grad, ops.IndexedSlices):
tmp = clip_ops.clip_by_norm(grad.values, max_norm)
grad = ops.IndexedSlices(tmp, grad.indices, grad.dense_shape)
else:
grad = clip_ops.clip_by_norm(grad, max_norm)
clipped_grads_and_vars.append((grad, var))
return clipped_grads_and_vars
示例15: _compute_gradients
def _compute_gradients(self, loss, var_list, grad_loss=None):
"""Compute gradients of `loss` for the variables in `var_list`.
This is the first part of `minimize()`. It returns a list
of (gradient, variable) pairs where "gradient" is the gradient
for "variable". Note that "gradient" can be a `Tensor`, an
`IndexedSlices`, or `None` if there is no gradient for the
given variable.
Args:
loss: A callable taking no arguments which returns the value to minimize.
var_list: list or tuple of `Variable` objects to update to minimize
`loss`, or a callable returning the list or tuple of `Variable` objects.
Use callable when the variable list would otherwise be incomplete before
`minimize` and the variables are created at the first time when `loss`
is called.
grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`.
Returns:
A list of (gradient, variable) pairs. Variable is always present, but
gradient can be `None`.
Raises:
TypeError: If `var_list` contains anything else than `Variable` objects.
ValueError: If some arguments are invalid, or var_list is None.
"""
# TODO(josh11b): Test that we handle weight decay in a reasonable way.
with backprop.GradientTape() as tape:
if not callable(var_list):
tape.watch(var_list)
loss_value = loss()
if callable(var_list):
var_list = var_list()
var_list = nest.flatten(var_list)
grads = tape.gradient(loss_value, var_list, grad_loss)
if hasattr(self, "clipnorm"):
grads = [clip_ops.clip_by_norm(g, self.clipnorm) for g in grads]
if hasattr(self, "clipvalue"):
grads = [
clip_ops.clip_by_value(g, -self.clipvalue, self.clipvalue)
for g in grads
]
grads_and_vars = list(zip(grads, var_list))
self._assert_valid_dtypes([
v for g, v in grads_and_vars
if g is not None and v.dtype != dtypes.resource
])
return grads_and_vars