本文整理汇总了Python中tensorflow.clip_by_value方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.clip_by_value方法的具体用法?Python tensorflow.clip_by_value怎么用?Python tensorflow.clip_by_value使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.clip_by_value方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: random_adjust_hue
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import clip_by_value [as 别名]
def random_adjust_hue(image, max_delta=0.02):
"""Randomly adjusts hue.
Makes sure the output image is still between 0 and 1.
Args:
image: rank 3 float32 tensor contains 1 image -> [height, width, channels]
with pixel values varying between [0, 1].
max_delta: change hue randomly with a value between 0 and max_delta.
Returns:
image: image which is the same shape as input image.
"""
with tf.name_scope('RandomAdjustHue', values=[image]):
image = tf.image.random_hue(image, max_delta)
image = tf.clip_by_value(image, clip_value_min=0.0, clip_value_max=1.0)
return image
示例2: visit_count_fc
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import clip_by_value [as 别名]
def visit_count_fc(visit_count, last_visit, embed_neurons, wt_decay, fc_dropout):
with tf.variable_scope('embed_visit_count'):
visit_count = tf.reshape(visit_count, shape=[-1])
last_visit = tf.reshape(last_visit, shape=[-1])
visit_count = tf.clip_by_value(visit_count, clip_value_min=-1,
clip_value_max=15)
last_visit = tf.clip_by_value(last_visit, clip_value_min=-1,
clip_value_max=15)
visit_count = tf.one_hot(visit_count, depth=16, axis=1, dtype=tf.float32,
on_value=10., off_value=0.)
last_visit = tf.one_hot(last_visit, depth=16, axis=1, dtype=tf.float32,
on_value=10., off_value=0.)
f = tf.concat([visit_count, last_visit], 1)
x, _ = tf_utils.fc_network(
f, neurons=embed_neurons, wt_decay=wt_decay, name='visit_count_embed',
offset=0, batch_norm_param=None, dropout_ratio=fc_dropout,
is_training=is_training)
return x
示例3: random_adjust_brightness
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import clip_by_value [as 别名]
def random_adjust_brightness(image, max_delta=0.2):
"""Randomly adjusts brightness.
Makes sure the output image is still between 0 and 1.
Args:
image: rank 3 float32 tensor contains 1 image -> [height, width, channels]
with pixel values varying between [0, 1].
max_delta: how much to change the brightness. A value between [0, 1).
Returns:
image: image which is the same shape as input image.
boxes: boxes which is the same shape as input boxes.
"""
with tf.name_scope('RandomAdjustBrightness', values=[image]):
image = tf.image.random_brightness(image, max_delta)
image = tf.clip_by_value(image, clip_value_min=0.0, clip_value_max=1.0)
return image
示例4: random_adjust_contrast
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import clip_by_value [as 别名]
def random_adjust_contrast(image, min_delta=0.8, max_delta=1.25):
"""Randomly adjusts contrast.
Makes sure the output image is still between 0 and 1.
Args:
image: rank 3 float32 tensor contains 1 image -> [height, width, channels]
with pixel values varying between [0, 1].
min_delta: see max_delta.
max_delta: how much to change the contrast. Contrast will change with a
value between min_delta and max_delta. This value will be
multiplied to the current contrast of the image.
Returns:
image: image which is the same shape as input image.
"""
with tf.name_scope('RandomAdjustContrast', values=[image]):
image = tf.image.random_contrast(image, min_delta, max_delta)
image = tf.clip_by_value(image, clip_value_min=0.0, clip_value_max=1.0)
return image
示例5: random_adjust_saturation
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import clip_by_value [as 别名]
def random_adjust_saturation(image, min_delta=0.8, max_delta=1.25):
"""Randomly adjusts saturation.
Makes sure the output image is still between 0 and 1.
Args:
image: rank 3 float32 tensor contains 1 image -> [height, width, channels]
with pixel values varying between [0, 1].
min_delta: see max_delta.
max_delta: how much to change the saturation. Saturation will change with a
value between min_delta and max_delta. This value will be
multiplied to the current saturation of the image.
Returns:
image: image which is the same shape as input image.
"""
with tf.name_scope('RandomAdjustSaturation', values=[image]):
image = tf.image.random_saturation(image, min_delta, max_delta)
image = tf.clip_by_value(image, clip_value_min=0.0, clip_value_max=1.0)
return image
示例6: call
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import clip_by_value [as 别名]
def call(self, inputs):
mean_and_log_std = self.model(inputs)
mean, log_std = tf.split(mean_and_log_std, num_or_size_splits=2, axis=1)
log_std = tf.clip_by_value(log_std, -20., 2.)
distribution = tfp.distributions.MultivariateNormalDiag(
loc=mean,
scale_diag=tf.exp(log_std)
)
raw_actions = distribution.sample()
if not self._reparameterize:
### Problem 1.3.A
### YOUR CODE HERE
raw_actions = tf.stop_gradient(raw_actions)
log_probs = distribution.log_prob(raw_actions)
log_probs -= self._squash_correction(raw_actions)
### Problem 2.A
### YOUR CODE HERE
self.actions = tf.tanh(raw_actions)
return self.actions, log_probs
示例7: setup_critic_optimizer
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import clip_by_value [as 别名]
def setup_critic_optimizer(self):
logger.info('setting up critic optimizer')
normalized_critic_target_tf = tf.clip_by_value(normalize(self.critic_target, self.ret_rms), self.return_range[0], self.return_range[1])
self.critic_loss = tf.reduce_mean(tf.square(self.normalized_critic_tf - normalized_critic_target_tf))
if self.critic_l2_reg > 0.:
critic_reg_vars = [var for var in self.critic.trainable_vars if 'kernel' in var.name and 'output' not in var.name]
for var in critic_reg_vars:
logger.info(' regularizing: {}'.format(var.name))
logger.info(' applying l2 regularization with {}'.format(self.critic_l2_reg))
critic_reg = tc.layers.apply_regularization(
tc.layers.l2_regularizer(self.critic_l2_reg),
weights_list=critic_reg_vars
)
self.critic_loss += critic_reg
critic_shapes = [var.get_shape().as_list() for var in self.critic.trainable_vars]
critic_nb_params = sum([reduce(lambda x, y: x * y, shape) for shape in critic_shapes])
logger.info(' critic shapes: {}'.format(critic_shapes))
logger.info(' critic params: {}'.format(critic_nb_params))
self.critic_grads = U.flatgrad(self.critic_loss, self.critic.trainable_vars, clip_norm=self.clip_norm)
self.critic_optimizer = MpiAdam(var_list=self.critic.trainable_vars,
beta1=0.9, beta2=0.999, epsilon=1e-08)
示例8: block35
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import clip_by_value [as 别名]
def block35(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Builds the 35x35 resnet block."""
with tf.variable_scope(scope, 'Block35', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 32, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 32, 3, scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
tower_conv2_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1')
tower_conv2_1 = slim.conv2d(tower_conv2_0, 48, 3, scope='Conv2d_0b_3x3')
tower_conv2_2 = slim.conv2d(tower_conv2_1, 64, 3, scope='Conv2d_0c_3x3')
mixed = tf.concat(axis=3, values=[tower_conv, tower_conv1_1, tower_conv2_2])
up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
activation_fn=None, scope='Conv2d_1x1')
scaled_up = up * scale
if activation_fn == tf.nn.relu6:
# Use clip_by_value to simulate bandpass activation.
scaled_up = tf.clip_by_value(scaled_up, -6.0, 6.0)
net += scaled_up
if activation_fn:
net = activation_fn(net)
return net
示例9: block17
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import clip_by_value [as 别名]
def block17(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Builds the 17x17 resnet block."""
with tf.variable_scope(scope, 'Block17', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 128, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 160, [1, 7],
scope='Conv2d_0b_1x7')
tower_conv1_2 = slim.conv2d(tower_conv1_1, 192, [7, 1],
scope='Conv2d_0c_7x1')
mixed = tf.concat(axis=3, values=[tower_conv, tower_conv1_2])
up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
activation_fn=None, scope='Conv2d_1x1')
scaled_up = up * scale
if activation_fn == tf.nn.relu6:
# Use clip_by_value to simulate bandpass activation.
scaled_up = tf.clip_by_value(scaled_up, -6.0, 6.0)
net += scaled_up
if activation_fn:
net = activation_fn(net)
return net
示例10: block8
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import clip_by_value [as 别名]
def block8(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Builds the 8x8 resnet block."""
with tf.variable_scope(scope, 'Block8', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 192, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 224, [1, 3],
scope='Conv2d_0b_1x3')
tower_conv1_2 = slim.conv2d(tower_conv1_1, 256, [3, 1],
scope='Conv2d_0c_3x1')
mixed = tf.concat(axis=3, values=[tower_conv, tower_conv1_2])
up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
activation_fn=None, scope='Conv2d_1x1')
scaled_up = up * scale
if activation_fn == tf.nn.relu6:
# Use clip_by_value to simulate bandpass activation.
scaled_up = tf.clip_by_value(scaled_up, -6.0, 6.0)
net += scaled_up
if activation_fn:
net = activation_fn(net)
return net
示例11: _clip_if_not_None
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import clip_by_value [as 别名]
def _clip_if_not_None(self, g, v, low, high):
""" Clip not-None gradients to (low, high). """
""" Gradient of T is None if T not connected to the objective. """
if g is not None:
return (tf.clip_by_value(g, low, high), v)
else:
return (g, v)
示例12: lerp_clip
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import clip_by_value [as 别名]
def lerp_clip(a, b, t):
with tf.name_scope('LerpClip'):
return a + (b - a) * tf.clip_by_value(t, 0.0, 1.0)
示例13: lerp_clip
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import clip_by_value [as 别名]
def lerp_clip(a, b, t): return a + (b - a) * tf.clip_by_value(t, 0.0, 1.0)
示例14: build_pgd_attack
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import clip_by_value [as 别名]
def build_pgd_attack(self, eps):
victim_embeddings = tf.constant(self.victim_embeddings, dtype=tf.float32)
def one_step_attack(image, grad):
"""
core components of this attack are:
(a) PGD adversarial attack (https://arxiv.org/pdf/1706.06083.pdf)
(b) momentum (https://arxiv.org/pdf/1710.06081.pdf)
(c) input diversity (https://arxiv.org/pdf/1803.06978.pdf)
"""
orig_image = image
image = self.structure(image)
image = (image - 127.5) / 128.0
image = image + tf.random_uniform(tf.shape(image), minval=-1e-2, maxval=1e-2)
prelogits, _ = self.network.inference(image, 1.0, False, bottleneck_layer_size=512)
embeddings = tf.nn.l2_normalize(prelogits, 1, 1e-10, name='embeddings')
embeddings = tf.reshape(embeddings[0], [512, 1])
objective = tf.reduce_mean(tf.matmul(victim_embeddings, embeddings)) # to be maximized
noise, = tf.gradients(objective, orig_image)
noise = noise / tf.reduce_mean(tf.abs(noise), [1, 2, 3], keep_dims=True)
noise = 0.9 * grad + noise
adv = tf.clip_by_value(orig_image + tf.sign(noise) * 1.0, lower_bound, upper_bound)
return adv, noise
input = tf.to_float(self.image_batch)
lower_bound = tf.clip_by_value(input - eps, 0, 255.)
upper_bound = tf.clip_by_value(input + eps, 0, 255.)
with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE):
adv, _ = tf.while_loop(
lambda _, __: True, one_step_attack,
(input, tf.zeros_like(input)),
back_prop=False,
maximum_iterations=100,
parallel_iterations=1)
self.adv_image = adv
return adv
示例15: attack_single_step
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import clip_by_value [as 别名]
def attack_single_step(self, x, eta, y):
"""
Given the original image and the perturbation computed so far, computes
a new perturbation.
:param x: A tensor with the original input.
:param eta: A tensor the same shape as x that holds the perturbation.
:param y: A tensor with the target labels or ground-truth labels.
"""
import tensorflow as tf
from cleverhans.utils_tf import clip_eta
from cleverhans.loss import attack_softmax_cross_entropy
adv_x = x + eta
logits = self.model.get_logits(adv_x)
loss = attack_softmax_cross_entropy(y, logits)
if self.targeted:
loss = -loss
grad, = tf.gradients(loss, adv_x)
scaled_signed_grad = self.eps_iter * tf.sign(grad)
adv_x = adv_x + scaled_signed_grad
if self.clip_min is not None and self.clip_max is not None:
adv_x = tf.clip_by_value(adv_x, self.clip_min, self.clip_max)
eta = adv_x - x
eta = clip_eta(eta, self.ord, self.eps)
return eta