本文整理汇总了Python中tensorflow.python.ops.clip_ops.clip_by_value函数的典型用法代码示例。如果您正苦于以下问题:Python clip_by_value函数的具体用法?Python clip_by_value怎么用?Python clip_by_value使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了clip_by_value函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testClipByValueEmptyTensor
def testClipByValueEmptyTensor(self):
# Test case for GitHub issue 19337
zero = array_ops.placeholder(dtype=dtypes.float32, shape=None)
x = clip_ops.clip_by_value(zero, zero, zero)
y = clip_ops.clip_by_value(zero, 1.0, 1.0)
z = clip_ops.clip_by_value(zero, zero, 1.0)
w = clip_ops.clip_by_value(zero, 1.0, zero)
with self.session(use_gpu=True) as sess:
sess.run([x, y, z, w], feed_dict={zero: np.zeros((7, 0))})
示例2: testClipByValueBadShape
def testClipByValueBadShape(self):
with self.session(use_gpu=True):
x = constant_op.constant([-5.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3, 1])
# Use a nonsensical shape.
clip = constant_op.constant([1.0, 2.0])
with self.assertRaises(ValueError):
_ = clip_ops.clip_by_value(x, -clip, clip)
with self.assertRaises(ValueError):
_ = clip_ops.clip_by_value(x, 1.0, clip)
示例3: _get_coordinatewise_learning_rate
def _get_coordinatewise_learning_rate(self, grad, var):
# Compute the learning rate using a moving average for the diagonal of BB^T
avg_first = self.get_slot(var, 'first_moment')
avg_second = self.get_slot(var, 'second_moment')
decay_tensor = math_ops.cast(self._decay_tensor, var.dtype)
batch_size = math_ops.cast(self._batch_size_tensor, var.dtype)
# Create an estimator for the moving average of gradient mean and variance
# via Welford's algorithm
if isinstance(grad, ops.Tensor):
delta = grad - avg_first
first_moment_update = avg_first.assign_add(
array_ops.where(self._counter < 1, math_ops.cast(1, var.dtype),
1. - decay_tensor) * delta)
with ops.control_dependencies([first_moment_update]):
second_moment_update = avg_second.assign_add(
math_ops.cast(self._counter < 1, var.dtype) *
-(1. - decay_tensor) * (
avg_second - decay_tensor * math_ops.square(delta)))
diag_preconditioner = control_flow_ops.with_dependencies(
[second_moment_update],
clip_ops.clip_by_value(avg_second, 1e-12, 1e12))
elif isinstance(grad, ops.IndexedSlices):
delta = grad.values - array_ops.gather_nd(avg_first, grad.indices)
first_moment_update = state_ops.scatter_add(
avg_first,
grad.indices,
array_ops.where(self._counter < 1,
math_ops.cast(1., var.dtype),
1. - decay_tensor) * delta)
with ops.control_dependencies([first_moment_update]):
avg_second = state_ops.scatter_add(
avg_second,
grad.indices,
math_ops.cast(self._counter < 1, var.dtype) *
-(1. - decay_tensor) * (
array_ops.gather_nd(avg_second, grad.indices) - decay_tensor *
math_ops.square(delta)))
avg_second = array_ops.gather_nd(avg_second, grad.indices)
# TODO(b/70783772)
diag_preconditioner = clip_ops.clip_by_value(avg_second, 1e-12, 1e12)
else:
raise errors.InvalidArgumentError(
None, None, 'grad must of type Tensor or IndexedSlice')
diag_preconditioner *= batch_size
if self._use_single_learning_rate:
diag_preconditioner = math_ops.reduce_mean(diag_preconditioner)
# From Theorem 2 Corollary 1 of Mandt et al. 2017
return 2. * batch_size / (
math_ops.cast(self._total_num_examples, var.dtype.base_dtype) *
diag_preconditioner)
示例4: testClipByValueBadShape
def testClipByValueBadShape(self):
with self.test_session(use_gpu=True):
x = constant_op.constant([-5.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3, 1])
# Use a nonsensical shape.
clip = constant_op.constant([1.0, 2.0])
with self.assertRaises(errors_impl.InvalidArgumentError):
ans = clip_ops.clip_by_value(x, -clip, clip)
tf_ans = ans.eval()
with self.assertRaises(errors_impl.InvalidArgumentError):
ans = clip_ops.clip_by_value(x, 1.0, clip)
tf_ans = ans.eval()
示例5: testClipByValueGradient
def testClipByValueGradient(self):
inputs = constant_op.constant([1.0, 2.0, 3.0, 4.0], dtype=dtypes.float32)
outputs_1 = clip_ops.clip_by_value(inputs, 0.5, 3.5)
min_val = constant_op.constant([0.5, 0.5, 0.5, 0.5], dtype=dtypes.float32)
max_val = constant_op.constant([3.5, 3.5, 3.5, 3.5], dtype=dtypes.float32)
outputs_2 = clip_ops.clip_by_value(inputs, min_val, max_val)
with self.test_session():
error_1 = gradient_checker.compute_gradient_error(inputs, [4],
outputs_1, [4])
self.assertLess(error_1, 1e-4)
error_2 = gradient_checker.compute_gradient_error(inputs, [4],
outputs_2, [4])
self.assertLess(error_2, 1e-4)
示例6: get_gradients
def get_gradients(self, loss, params):
"""Returns gradients of `loss` with respect to `params`.
Arguments:
loss: Loss tensor.
params: List of variables.
Returns:
List of gradient tensors.
Raises:
ValueError: In case any gradient cannot be computed (e.g. if gradient
function not implemented).
"""
loss = self._scale_loss(loss)
grads = gradients.gradients(loss, params)
if None in grads:
raise ValueError("An operation has `None` for gradient. "
"Please make sure that all of your ops have a "
"gradient defined (i.e. are differentiable). "
"Common ops without gradient: "
"K.argmax, K.round, K.eval.")
if hasattr(self, "clipnorm"):
grads = [clip_ops.clip_by_norm(g, self.clipnorm) for g in grads]
if hasattr(self, "clipvalue"):
grads = [
clip_ops.clip_by_value(g, -self.clipvalue, self.clipvalue)
for g in grads
]
return grads
示例7: adjust_brightness
def adjust_brightness(image, delta):
"""Adjust the brightness of RGB or Grayscale images.
This is a convenience method that converts an RGB image to float
representation, adjusts its brightness, and then converts it back to the
original data type. If several adjustments are chained it is advisable to
minimize the number of redundant conversions.
The value `delta` is added to all components of the tensor `image`. Both
`image` and `delta` are converted to `float` before adding (and `image` is
scaled appropriately if it is in fixed-point representation). For regular
images, `delta` should be in the range `[0,1)`, as it is added to the image in
floating point representation, where pixel values are in the `[0,1)` range.
Args:
image: A tensor.
delta: A scalar. Amount to add to the pixel values.
Returns:
A brightness-adjusted tensor of the same shape and type as `image`.
"""
with ops.name_scope(None, 'adjust_brightness', [image, delta]) as name:
image = ops.convert_to_tensor(image, name='image')
# Remember original dtype to so we can convert back if needed
orig_dtype = image.dtype
flt_image = convert_image_dtype(image, dtypes.float32)
adjusted = math_ops.add(flt_image,
math_ops.cast(delta, dtypes.float32),
name=name)
adjusted = clip_ops.clip_by_value(adjusted, 0.0, 1.0)
return convert_image_dtype(adjusted, orig_dtype, saturate=True)
示例8: saturate_cast
def saturate_cast(image, dtype):
"""Performs a safe cast of image data to `dtype`.
This function casts the data in image to `dtype`, without applying any
scaling. If there is a danger that image data would over or underflow in the
cast, this op applies the appropriate clamping before the cast.
Args:
image: An image to cast to a different data type.
dtype: A `DType` to cast `image` to.
Returns:
`image`, safely cast to `dtype`.
"""
clamped = image
# When casting to a type with smaller representable range, clamp.
# Note that this covers casting to unsigned types as well.
if image.dtype.min < dtype.min and image.dtype.max > dtype.max:
clamped = clip_ops.clip_by_value(clamped,
math_ops.cast(dtype.min, image.dtype),
math_ops.cast(dtype.max, image.dtype))
elif image.dtype.min < dtype.min:
clamped = math_ops.maximum(clamped, math_ops.cast(dtype.min, image.dtype))
elif image.dtype.max > dtype.max:
clamped = math_ops.minimum(clamped, math_ops.cast(dtype.max, image.dtype))
return math_ops.cast(clamped, dtype)
示例9: get_gradients
def get_gradients(self, loss, params):
"""Returns gradients of `loss` with respect to `params`.
Arguments:
loss: Loss tensor.
params: List of variables.
Returns:
List of gradient tensors.
Raises:
ValueError: In case any gradient cannot be computed (e.g. if gradient
function not implemented).
"""
params = nest.flatten(params)
with backend.get_graph().as_default():
grads = gradients.gradients(loss, params)
for grad, param in zip(grads, params):
if grad is None:
raise ValueError("Variable {} has `None` for gradient. "
"Please make sure that all of your ops have a "
"gradient defined (i.e. are differentiable). "
"Common ops without gradient: "
"K.argmax, K.round, K.eval.".format(param))
if hasattr(self, "clipnorm"):
grads = [clip_ops.clip_by_norm(g, self.clipnorm) for g in grads]
if hasattr(self, "clipvalue"):
grads = [
clip_ops.clip_by_value(g, -self.clipvalue, self.clipvalue)
for g in grads
]
return grads
示例10: _do_maximum_mean
def _do_maximum_mean(samples, envelope, high, name=None):
"""Common code between maximum_mean and minimum_mean."""
with ops.name_scope(name, "do_maximum_mean", [samples, envelope, high]):
n = array_ops.rank(samples)
# Move the batch dimension of `samples` to the rightmost position,
# where the _batch_sort_vector function wants it.
perm = array_ops.concat([math_ops.range(1, n), [0]], axis=0)
samples = array_ops.transpose(samples, perm)
samples = _batch_sort_vector(samples)
# The maximum mean is given by taking `envelope`-worth of
# probability from the smallest samples and moving it to the
# maximum value. This amounts to:
# - ignoring the smallest k samples, where `k/n < envelope`
# - taking a `1/n - (envelope - k/n)` part of the index k sample
# - taking all the other samples
# - and adding `envelope * high` at the end.
# The following is a vectorized and batched way of computing this.
# `max_mean_contrib` is a mask implementing the previous.
batch_size = array_ops.shape(samples)[-1]
batch_size = math_ops.cast(batch_size, dtype=samples.dtype.base_dtype)
step = 1. / batch_size
cum_steps = step * math_ops.range(
1, batch_size + 1, dtype=samples.dtype.base_dtype)
max_mean_contrib = clip_ops.clip_by_value(
cum_steps - envelope[..., array_ops.newaxis],
clip_value_min=0.,
clip_value_max=step)
return math_ops.reduce_sum(
samples * max_mean_contrib, axis=-1) + envelope * high
示例11: LSTMCell
def LSTMCell(cls, x, mprev, cprev, weights):
xm = array_ops.concat([x, mprev], 1)
i_i, i_g, f_g, o_g = array_ops.split(
value=math_ops.matmul(xm, weights), num_or_size_splits=4, axis=1)
new_c = math_ops.sigmoid(f_g) * cprev + math_ops.sigmoid(
i_g) * math_ops.tanh(i_i)
new_c = clip_ops.clip_by_value(new_c, -50.0, 50.0)
new_m = math_ops.sigmoid(o_g) * math_ops.tanh(new_c)
return new_m, new_c
示例12: testClipByValue
def testClipByValue(self):
with self.test_session(use_gpu=True):
x = constant_op.constant([-5.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3])
np_ans = [[-4.4, 2.0, 3.0], [4.0, 4.4, 4.4]]
clip_value = 4.4
ans = clip_ops.clip_by_value(x, -clip_value, clip_value)
tf_ans = ans.eval()
self.assertAllClose(np_ans, tf_ans)
示例13: testClipByValueNonFinite
def testClipByValueNonFinite(self):
with self.test_session(use_gpu=True):
x = constant_op.constant([float('NaN'), float('Inf'), -float('Inf')])
np_ans = [float('NaN'), 4.0, -4.0]
clip_value = 4.0
ans = clip_ops.clip_by_value(x, -clip_value, clip_value)
tf_ans = ans.eval()
self.assertAllClose(np_ans, tf_ans)
示例14: compute_cdf
def compute_cdf(values, value_range, **kwargs):
"""Returns the normalized cumulative distribution of the given values tensor.
Uses tf.while_loop to directly compute the cdf of the values. Number of bins
for histogram is fixed at _NBINS=255
Args:
values: Numeric `Tensor`.
value_range: Shape [2] `Tensor` of same `dtype` as `values`
**kwargs: keyword arguments: name
Returns:
A 1-D `Tensor` holding normalized cdf of values.
"""
nbins = _NBINS
name = kwargs.get('name', None)
with ops.name_scope(name, 'cdf', [values, value_range, nbins]):
values = ops.convert_to_tensor(values, name='values')
value_range = ops.convert_to_tensor(value_range, name='value_range')
nbins_float = np.float32(nbins)
# Map tensor values that fall within value_range to [0, 1].
scaled_values = math_ops.truediv(
values - value_range[0],
value_range[1] - value_range[0],
name='scaled_values')
# map tensor values within the open interval value_range to {0,.., nbins-1},
# values outside the open interval will be zero or less, or nbins or more.
indices = math_ops.floor(nbins_float * scaled_values, name='indices')
# Clip edge cases (e.g. value = value_range[1]) or "outliers."
indices = math_ops.cast(
clip_ops.clip_by_value(indices, 0, nbins_float - 1), dtypes.int32)
cdf = array_ops.zeros(nbins)
i = constant_op.constant(0)
def loop_cond(loop_count, _):
return math_ops.less(loop_count, nbins)
def loop_body(loop_count, cdf):
temp = math_ops.reduce_sum(
math_ops.cast(
math_ops.less_equal(indices, loop_count), dtypes.float32))
cdf = math_ops.add(
cdf,
array_ops.one_hot(
loop_count, depth=_NBINS, on_value=temp, off_value=0.0))
return [loop_count + 1, cdf]
_, cdf = control_flow_ops.while_loop(
loop_cond, loop_body, [i, cdf], maximum_iterations=nbins)
return math_ops.div(cdf, math_ops.reduce_max(cdf))
示例15: testClipByValueNonFinite
def testClipByValueNonFinite(self):
# TODO(b/78016351): Enable test on GPU once the bug is fixed.
with self.cached_session():
x = constant_op.constant([float('NaN'), float('Inf'), -float('Inf')])
np_ans = [float('NaN'), 4.0, -4.0]
clip_value = 4.0
ans = clip_ops.clip_by_value(x, -clip_value, clip_value)
tf_ans = self.evaluate(ans)
self.assertAllClose(np_ans, tf_ans)