本文整理匯總了Python中tensorflow.compat.v1.clip_by_value方法的典型用法代碼示例。如果您正苦於以下問題:Python v1.clip_by_value方法的具體用法?Python v1.clip_by_value怎麽用?Python v1.clip_by_value使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tensorflow.compat.v1
的用法示例。
在下文中一共展示了v1.clip_by_value方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: _discriminator_alpha
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import clip_by_value [as 別名]
def _discriminator_alpha(block_id, progress):
"""Returns the block input parameter for discriminator network.
The discriminator has N blocks with `block_id` = 1,2,...,N. Each block
block_id accepts an
- input(block_id) transformed from the real data and
- the output of block block_id + 1, i.e. output(block_id + 1)
The final input is a linear combination of them,
i.e. alpha * input(block_id) + (1 - alpha) * output(block_id + 1)
where alpha = _discriminator_alpha(block_id, progress).
With a fixed block_id, alpha(block_id, progress) stays to be 1
when progress <= block_id - 1, then linear decays to 0 when
block_id - 1 < progress <= block_id, and finally stays at 0
when progress > block_id.
Args:
block_id: An integer of generator block id.
progress: A scalar float `Tensor` of training progress.
Returns:
A scalar float `Tensor` of block input parameter.
"""
return tf.clip_by_value(block_id - progress, 0.0, 1.0)
示例2: _get_projection
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import clip_by_value [as 別名]
def _get_projection(p):
"""Returns a projection function."""
if p == np.inf:
def _projection(perturbation, epsilon, input_image, image_bounds):
clipped_perturbation = tf.clip_by_value(perturbation, -epsilon, epsilon)
new_image = tf.clip_by_value(input_image + clipped_perturbation,
image_bounds[0], image_bounds[1])
return new_image - input_image
return _projection
elif p == 2:
def _projection(perturbation, epsilon, input_image, image_bounds):
axes = list(range(1, len(perturbation.get_shape())))
clipped_perturbation = tf.clip_by_norm(perturbation, epsilon, axes=axes)
new_image = tf.clip_by_value(input_image + clipped_perturbation,
image_bounds[0], image_bounds[1])
return new_image - input_image
return _projection
else:
raise ValueError('p must be np.inf or 2.')
示例3: _rand_noise
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import clip_by_value [as 別名]
def _rand_noise(noise_mean, noise_dev, scale, shape):
"""Generate random noise given a particular scale and shape."""
noise_shape = [x // scale for x in shape]
noise_shape = [1 if x == 0 else x for x in noise_shape]
noise = tf.random.normal(
shape=noise_shape, mean=noise_mean, stddev=noise_dev)
noise = tf.clip_by_value(
noise, noise_mean - 2.0 * noise_dev, noise_mean + 2.0 * noise_dev)
if scale != 1:
noise = tf.image.resize_images(
noise, [shape[0], shape[1]])
noise = tf.transpose(noise, [0, 2, 1])
noise = tf.image.resize_images(
noise, [shape[0], shape[2]])
noise = tf.transpose(noise, [0, 2, 1])
return noise
示例4: ensure_dataset_eos
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import clip_by_value [as 別名]
def ensure_dataset_eos(dataset, feature_keys=None):
"""Replaces the final token of features with EOS=1 if it is not PAD=0.
Args:
dataset: a tf.data.Dataset
feature_keys: (optional) list of strings, the feature names to ensure end
with EOS or padding. Defaults to all features.
Returns:
a tf.data.Dataset where all specified features end with PAD=0 or EOS=1.
"""
feature_keys = feature_keys or tf.data.get_output_shapes(dataset).keys()
def _ensure_eos(k, v):
if k not in feature_keys:
return v
return tf.concat([v[0:-1], tf.clip_by_value(v[-1:], 0, 1)], axis=0)
return dataset.map(
lambda ex: {k: _ensure_eos(k, v) for k, v in ex.items()},
num_parallel_calls=tf.data.experimental.AUTOTUNE)
示例5: safe_cumprod
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import clip_by_value [as 別名]
def safe_cumprod(x, *args, **kwargs):
"""Computes cumprod of x in logspace using cumsum to avoid underflow.
The cumprod function and its gradient can result in numerical instabilities
when its argument has very small and/or zero values. As long as the argument
is all positive, we can instead compute the cumulative product as
exp(cumsum(log(x))). This function can be called identically to tf.cumprod.
Args:
x: Tensor to take the cumulative product of.
*args: Passed on to cumsum; these are identical to those in cumprod.
**kwargs: Passed on to cumsum; these are identical to those in cumprod.
Returns:
Cumulative product of x.
"""
with tf.name_scope(None, "SafeCumprod", [x]):
x = tf.convert_to_tensor(x, name="x")
tiny = np.finfo(x.dtype.as_numpy_dtype).tiny
return tf.exp(
tf.cumsum(tf.log(tf.clip_by_value(x, tiny, 1)), *args, **kwargs))
示例6: ramp
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import clip_by_value [as 別名]
def ramp(x=None, v_min=0, v_max=1, name=None):
"""The ramp activation function.
Parameters
----------
x : a tensor input
input(s)
v_min : float
if input(s) smaller than v_min, change inputs to v_min
v_max : float
if input(s) greater than v_max, change inputs to v_max
name : a string or None
An optional name to attach to this activation function.
Returns
--------
A `Tensor` with the same type as `x`.
"""
return tf.clip_by_value(x, clip_value_min=v_min, clip_value_max=v_max, name=name)
示例7: loss_fn
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import clip_by_value [as 別名]
def loss_fn(self):
adv = tf.placeholder(tf.float32, [None], name="advantages")
returns = tf.placeholder(tf.float32, [None], name="returns")
logli_old = tf.placeholder(tf.float32, [None], name="logli_old")
value_old = tf.placeholder(tf.float32, [None], name="value_old")
ratio = tf.exp(self.policy.logli - logli_old)
clipped_ratio = tf.clip_by_value(ratio, 1-self.clip_ratio, 1+self.clip_ratio)
value_err = (self.value - returns)**2
if self.clip_value > 0.0:
clipped_value = tf.clip_by_value(self.value, value_old-self.clip_value, value_old+self.clip_value)
clipped_value_err = (clipped_value - returns)**2
value_err = tf.maximum(value_err, clipped_value_err)
policy_loss = -tf.reduce_mean(tf.minimum(adv * ratio, adv * clipped_ratio))
value_loss = tf.reduce_mean(value_err) * self.value_coef
entropy_loss = tf.reduce_mean(self.policy.entropy) * self.entropy_coef
# we want to reduce policy and value errors, and maximize entropy
# but since optimizer is minimizing the signs are opposite
full_loss = policy_loss + value_loss - entropy_loss
return full_loss, [policy_loss, value_loss, entropy_loss], [adv, returns, logli_old, value_old]
示例8: block35
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import clip_by_value [as 別名]
def block35(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Builds the 35x35 resnet block."""
with tf.variable_scope(scope, 'Block35', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 32, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 32, 3, scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
tower_conv2_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1')
tower_conv2_1 = slim.conv2d(tower_conv2_0, 48, 3, scope='Conv2d_0b_3x3')
tower_conv2_2 = slim.conv2d(tower_conv2_1, 64, 3, scope='Conv2d_0c_3x3')
mixed = tf.concat(axis=3, values=[tower_conv, tower_conv1_1, tower_conv2_2])
up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
activation_fn=None, scope='Conv2d_1x1')
scaled_up = up * scale
if activation_fn == tf.nn.relu6:
# Use clip_by_value to simulate bandpass activation.
scaled_up = tf.clip_by_value(scaled_up, -6.0, 6.0)
net += scaled_up
if activation_fn:
net = activation_fn(net)
return net
示例9: block17
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import clip_by_value [as 別名]
def block17(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Builds the 17x17 resnet block."""
with tf.variable_scope(scope, 'Block17', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 128, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 160, [1, 7],
scope='Conv2d_0b_1x7')
tower_conv1_2 = slim.conv2d(tower_conv1_1, 192, [7, 1],
scope='Conv2d_0c_7x1')
mixed = tf.concat(axis=3, values=[tower_conv, tower_conv1_2])
up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
activation_fn=None, scope='Conv2d_1x1')
scaled_up = up * scale
if activation_fn == tf.nn.relu6:
# Use clip_by_value to simulate bandpass activation.
scaled_up = tf.clip_by_value(scaled_up, -6.0, 6.0)
net += scaled_up
if activation_fn:
net = activation_fn(net)
return net
示例10: block8
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import clip_by_value [as 別名]
def block8(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Builds the 8x8 resnet block."""
with tf.variable_scope(scope, 'Block8', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 192, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 224, [1, 3],
scope='Conv2d_0b_1x3')
tower_conv1_2 = slim.conv2d(tower_conv1_1, 256, [3, 1],
scope='Conv2d_0c_3x1')
mixed = tf.concat(axis=3, values=[tower_conv, tower_conv1_2])
up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
activation_fn=None, scope='Conv2d_1x1')
scaled_up = up * scale
if activation_fn == tf.nn.relu6:
# Use clip_by_value to simulate bandpass activation.
scaled_up = tf.clip_by_value(scaled_up, -6.0, 6.0)
net += scaled_up
if activation_fn:
net = activation_fn(net)
return net
示例11: _clip_bbox
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import clip_by_value [as 別名]
def _clip_bbox(min_y, min_x, max_y, max_x):
"""Clip bounding box coordinates between 0 and 1.
Args:
min_y: Normalized bbox coordinate of type float between 0 and 1.
min_x: Normalized bbox coordinate of type float between 0 and 1.
max_y: Normalized bbox coordinate of type float between 0 and 1.
max_x: Normalized bbox coordinate of type float between 0 and 1.
Returns:
Clipped coordinate values between 0 and 1.
"""
min_y = tf.clip_by_value(min_y, 0.0, 1.0)
min_x = tf.clip_by_value(min_x, 0.0, 1.0)
max_y = tf.clip_by_value(max_y, 0.0, 1.0)
max_x = tf.clip_by_value(max_x, 0.0, 1.0)
return min_y, min_x, max_y, max_x
示例12: feed_forward_gaussian_fun
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import clip_by_value [as 別名]
def feed_forward_gaussian_fun(action_space, config, observations):
"""Feed-forward Gaussian."""
if not isinstance(action_space, gym.spaces.box.Box):
raise ValueError("Expecting continuous action space.")
mean_weights_initializer = tf.initializers.variance_scaling(
scale=config.init_mean_factor)
logstd_initializer = tf.random_normal_initializer(config.init_logstd, 1e-10)
flat_observations = tf.reshape(observations, [
tf.shape(observations)[0], tf.shape(observations)[1],
functools.reduce(operator.mul, observations.shape.as_list()[2:], 1)])
with tf.variable_scope("network_parameters"):
with tf.variable_scope("policy"):
x = flat_observations
for size in config.policy_layers:
x = tf.layers.dense(x, size, activation=tf.nn.relu)
mean = tf.layers.dense(
x, action_space.shape[0], activation=tf.tanh,
kernel_initializer=mean_weights_initializer)
logstd = tf.get_variable(
"logstd", mean.shape[2:], tf.float32, logstd_initializer)
logstd = tf.tile(
logstd[None, None],
[tf.shape(mean)[0], tf.shape(mean)[1]] + [1] * (mean.shape.ndims - 2))
with tf.variable_scope("value"):
x = flat_observations
for size in config.value_layers:
x = tf.layers.dense(x, size, activation=tf.nn.relu)
value = tf.layers.dense(x, 1)[..., 0]
mean = tf.check_numerics(mean, "mean")
logstd = tf.check_numerics(logstd, "logstd")
value = tf.check_numerics(value, "value")
policy = tfp.distributions.MultivariateNormalDiag(mean, tf.exp(logstd))
return NetworkOutput(policy, value, lambda a: tf.clip_by_value(a, -2., 2))
示例13: postprocess
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import clip_by_value [as 別名]
def postprocess(x, n_bits_x=8):
"""Converts x from [-0.5, 0.5], to [0, 255].
Args:
x: 3-D or 4-D Tensor normalized between [-0.5, 0.5]
n_bits_x: Number of bits representing each pixel of the output.
Defaults to 8, to default to 256 possible values.
Returns:
x: 3-D or 4-D Tensor representing images or videos.
"""
x = tf.where(tf.is_finite(x), x, tf.ones_like(x))
x = tf.clip_by_value(x, -0.5, 0.5)
x += 0.5
x = x * 2**n_bits_x
return tf.cast(tf.clip_by_value(x, 0, 255), dtype=tf.uint8)
示例14: visualize_predictions
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import clip_by_value [as 別名]
def visualize_predictions(self, real_frames, gen_frames, actions=None):
def concat_on_y_axis(x):
x = tf.unstack(x, axis=1)
x = tf.concat(x, axis=1)
return x
frames_gd = common_video.swap_time_and_batch_axes(real_frames)
frames_pd = common_video.swap_time_and_batch_axes(gen_frames)
if actions is not None:
actions = common_video.swap_time_and_batch_axes(actions)
if self.is_per_pixel_softmax:
frames_pd_shape = common_layers.shape_list(frames_pd)
frames_pd = tf.reshape(frames_pd, [-1, 256])
frames_pd = tf.to_float(tf.argmax(frames_pd, axis=-1))
frames_pd = tf.reshape(frames_pd, frames_pd_shape[:-1] + [3])
frames_gd = concat_on_y_axis(frames_gd)
frames_pd = concat_on_y_axis(frames_pd)
if actions is not None:
actions = tf.clip_by_value(actions, 0, 1)
summary("action_vid", tf.cast(actions * 255, tf.uint8))
actions = concat_on_y_axis(actions)
side_by_side_video = tf.concat([frames_gd, frames_pd, actions], axis=2)
else:
side_by_side_video = tf.concat([frames_gd, frames_pd], axis=2)
tf.summary.image("full_video", side_by_side_video)
示例15: vqa_v2_preprocess_image
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import clip_by_value [as 別名]
def vqa_v2_preprocess_image(
image,
height,
width,
mode,
resize_side=512,
distort=True,
image_model_fn="resnet_v1_152",
):
"""vqa v2 preprocess image."""
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
assert resize_side > 0
if resize_side:
image = _aspect_preserving_resize(image, resize_side)
if mode == tf.estimator.ModeKeys.TRAIN:
image = tf.random_crop(image, [height, width, 3])
else:
# Central crop, assuming resize_height > height, resize_width > width.
image = tf.image.resize_image_with_crop_or_pad(image, height, width)
image = tf.clip_by_value(image, 0.0, 1.0)
if mode == tf.estimator.ModeKeys.TRAIN and distort:
image = _flip(image)
num_distort_cases = 4
# pylint: disable=unnecessary-lambda
image = _apply_with_random_selector(
image, lambda x, ordering: _distort_color(x, ordering),
num_cases=num_distort_cases)
if image_model_fn.startswith("resnet_v1"):
# resnet_v1 uses vgg preprocessing
image = image * 255.
image = _mean_image_subtraction(image, [_R_MEAN, _G_MEAN, _B_MEAN])
elif image_model_fn.startswith("resnet_v2"):
# resnet v2 uses inception preprocessing
image = tf.subtract(image, 0.5)
image = tf.multiply(image, 2.0)
return image