本文整理汇总了Python中tensorflow.compat.v1.logical_not方法的典型用法代码示例。如果您正苦于以下问题:Python v1.logical_not方法的具体用法?Python v1.logical_not怎么用?Python v1.logical_not使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.compat.v1
的用法示例。
在下文中一共展示了v1.logical_not方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: lengths_to_area_mask
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import logical_not [as 别名]
def lengths_to_area_mask(feature_length, length, max_area_size):
"""Generates a non-padding mask for areas based on lengths.
Args:
feature_length: a tensor of [batch_size]
length: the length of the batch
max_area_size: the maximum area size considered
Returns:
mask: a tensor in shape of [batch_size, num_areas]
"""
paddings = tf.cast(tf.expand_dims(
tf.logical_not(
tf.sequence_mask(feature_length, maxlen=length)), 2), tf.float32)
_, _, area_sum, _, _ = compute_area_features(paddings,
max_area_width=max_area_size)
mask = tf.squeeze(tf.logical_not(tf.cast(area_sum, tf.bool)), [2])
return mask
示例2: __init__
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import logical_not [as 别名]
def __init__(self, c, d=None, prune_irrelevant=True, collapse=True):
"""Builds a linear specification module."""
super(LinearSpecification, self).__init__(name='specs', collapse=collapse)
# c has shape [batch_size, num_specifications, num_outputs]
# d has shape [batch_size, num_specifications]
# Some specifications may be irrelevant (not a function of the output).
# We automatically remove them for clarity. We expect the number of
# irrelevant specs to be equal for all elements of a batch.
# Shape is [batch_size, num_specifications]
if prune_irrelevant:
irrelevant = tf.equal(tf.reduce_sum(
tf.cast(tf.abs(c) > 1e-6, tf.int32), axis=-1, keepdims=True), 0)
batch_size = tf.shape(c)[0]
num_outputs = tf.shape(c)[2]
irrelevant = tf.tile(irrelevant, [1, 1, num_outputs])
self._c = tf.reshape(
tf.boolean_mask(c, tf.logical_not(irrelevant)),
[batch_size, -1, num_outputs])
else:
self._c = c
self._d = d
示例3: _build
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import logical_not [as 别名]
def _build(self, inputs, labels):
def cond(i, unused_attack, success):
# If we are already successful, we break.
return tf.logical_and(i < self._num_restarts,
tf.logical_not(tf.reduce_all(success)))
def body(i, attack, success):
new_attack = self._inner_attack(inputs, labels)
new_success = self._inner_attack.success
# The first iteration always sets the attack.
use_new_values = tf.logical_or(tf.equal(i, 0), new_success)
return (i + 1,
tf.where(use_new_values, new_attack, attack),
tf.logical_or(success, new_success))
_, self._attack, self._success = tf.while_loop(
cond, body, back_prop=False, parallel_iterations=1,
loop_vars=[
tf.constant(0, dtype=tf.int32),
inputs,
tf.zeros([tf.shape(inputs)[0]], dtype=tf.bool),
])
self._logits = self._eval_fn(self._attack, mode='final')
return self._attack
示例4: _sequence_correct
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import logical_not [as 别名]
def _sequence_correct(labels, predictions):
"""Computes a per-example sequence accuracy."""
target_decode_steps = decode_utils.decode_steps_from_labels(
labels, trim_start_symbol=True)
predicted_decode_steps = decode_utils.decode_steps_from_predictions(
predictions)
decode_utils.assert_shapes_match(target_decode_steps, predicted_decode_steps)
equal_tokens = decode_utils.compare_decode_steps(target_decode_steps,
predicted_decode_steps)
target_len = labels["target_len"] - 1
loss_mask = tf.sequence_mask(
lengths=tf.to_int32(target_len),
maxlen=tf.to_int32(tf.shape(equal_tokens)[1]))
equal_tokens = tf.logical_or(equal_tokens, tf.logical_not(loss_mask))
all_equal = tf.cast(tf.reduce_all(equal_tokens, 1), tf.float32)
return all_equal
示例5: attention_bias_lower_triangle
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import logical_not [as 别名]
def attention_bias_lower_triangle(length):
"""Create a bias tensor to be added to attention logits.
Masked elements will have an effective negative-infinity value.
Args:
length: Integer specifying maximum sequence length in batch.
Returns:
<float>[length, length]
"""
# First, create a sequence mask, e.g.:
# [1, 0, ..., 0]
# ...
# [1, 1, ..., 1]
sequence_mask = tf.sequence_mask(tf.range(1, length + 1), length)
# Invert to transform to attention biases.
attention_bias = tf.to_float(tf.logical_not(sequence_mask)) * -1e9
return attention_bias
示例6: attention_bias_ignore_padding
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import logical_not [as 别名]
def attention_bias_ignore_padding(source_len, max_length):
"""Create a bias tensor to be added to attention logits.
Out-of-range elements will have an effective negative-infinity value.
Args:
source_len: <int>[batch_size]
max_length: Integer specifying maxmimum sequence length in batch.
Returns:
<float>[batch_size, 1, 1, max_length]
"""
memory_padding = tf.to_float(
tf.logical_not(tf.sequence_mask(source_len, maxlen=max_length)))
ret = memory_padding * -1e9
# Expand so tensor can be broadcast across heads and query length.
return tf.expand_dims(tf.expand_dims(ret, axis=1), axis=1)
示例7: noise_span_to_sentinel
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import logical_not [as 别名]
def noise_span_to_sentinel(tokens, noise_mask, vocabulary):
"""Replace each run of consecutive noise tokens with a single sentinel.
Args:
tokens: a 1d integer Tensor
noise_mask: a boolean Tensor with the same shape as tokens
vocabulary: a vocabulary.Vocabulary
Returns:
a Tensor with the same shape and dtype as tokens
"""
tokens = tf.where_v2(noise_mask,
tf.cast(sentinel_id(vocabulary), tokens.dtype),
tokens)
prev_token_is_noise = tf.pad(noise_mask[:-1], [[1, 0]])
subsequent_noise_tokens = tf.logical_and(noise_mask, prev_token_is_noise)
return tf.boolean_mask(tokens, tf.logical_not(subsequent_noise_tokens))
示例8: filter_groundtruth_with_crowd_boxes
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import logical_not [as 别名]
def filter_groundtruth_with_crowd_boxes(tensor_dict):
"""Filters out groundtruth with boxes corresponding to crowd.
Args:
tensor_dict: a dictionary of following groundtruth tensors -
fields.InputDataFields.groundtruth_boxes
fields.InputDataFields.groundtruth_classes
fields.InputDataFields.groundtruth_confidences
fields.InputDataFields.groundtruth_keypoints
fields.InputDataFields.groundtruth_instance_masks
fields.InputDataFields.groundtruth_is_crowd
fields.InputDataFields.groundtruth_area
fields.InputDataFields.groundtruth_label_types
Returns:
a dictionary of tensors containing only the groundtruth that have bounding
boxes.
"""
if fields.InputDataFields.groundtruth_is_crowd in tensor_dict:
is_crowd = tensor_dict[fields.InputDataFields.groundtruth_is_crowd]
is_not_crowd = tf.logical_not(is_crowd)
is_not_crowd_indices = tf.where(is_not_crowd)
tensor_dict = retain_groundtruth(tensor_dict, is_not_crowd_indices)
return tensor_dict
示例9: filter_groundtruth_with_nan_box_coordinates
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import logical_not [as 别名]
def filter_groundtruth_with_nan_box_coordinates(tensor_dict):
"""Filters out groundtruth with no bounding boxes.
Args:
tensor_dict: a dictionary of following groundtruth tensors -
fields.InputDataFields.groundtruth_boxes
fields.InputDataFields.groundtruth_classes
fields.InputDataFields.groundtruth_confidences
fields.InputDataFields.groundtruth_keypoints
fields.InputDataFields.groundtruth_instance_masks
fields.InputDataFields.groundtruth_is_crowd
fields.InputDataFields.groundtruth_area
fields.InputDataFields.groundtruth_label_types
Returns:
a dictionary of tensors containing only the groundtruth that have bounding
boxes.
"""
groundtruth_boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
nan_indicator_vector = tf.greater(tf.reduce_sum(tf.cast(
tf.is_nan(groundtruth_boxes), dtype=tf.int32), reduction_indices=[1]), 0)
valid_indicator_vector = tf.logical_not(nan_indicator_vector)
valid_indices = tf.where(valid_indicator_vector)
return retain_groundtruth(tensor_dict, valid_indices)
示例10: aggregate_single_gradient_using_copy
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import logical_not [as 别名]
def aggregate_single_gradient_using_copy(grad_and_vars, use_mean,
check_inf_nan):
"""Calculate the average gradient for a shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
grad_and_vars: A list or tuple of (gradient, variable) tuples. Each
(gradient, variable) pair within the outer list represents the gradient
of the variable calculated for a single tower, and the number of pairs
equals the number of towers.
use_mean: if True, mean is taken, else sum of gradients is taken.
check_inf_nan: check grads for nans and infs.
Returns:
The tuple ([(average_gradient, variable),], has_nan_or_inf) where the
gradient has been averaged across all towers. The variable is chosen from
the first tower. The has_nan_or_inf indicates the grads has nan or inf.
"""
grads = [g for g, _ in grad_and_vars]
if any(isinstance(g, tf.IndexedSlices) for g in grads):
# TODO(reedwm): All-reduce IndexedSlices more effectively.
grad = aggregate_indexed_slices_gradients(grads)
else:
grad = tf.add_n(grads)
if use_mean and len(grads) > 1:
grad = tf.scalar_mul(1.0 / len(grads), grad)
v = grad_and_vars[0][1]
if check_inf_nan:
with tf.name_scope('check_for_inf_and_nan'):
has_nan_or_inf = tf.logical_not(tf.reduce_all(tf.is_finite(grads)))
return (grad, v), has_nan_or_inf
else:
return (grad, v), None
# This class is copied from
# https://github.com/tensorflow/tensorflow/blob/590d6eef7e91a6a7392c8ffffb7b58f2e0c8bc6b/tensorflow/contrib/training/python/training/device_setter.py#L56.
# We copy it since contrib has been removed from TensorFlow.
示例11: get_gradients_to_apply
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import logical_not [as 别名]
def get_gradients_to_apply(self, device_num, gradient_state):
device_grads = gradient_state
tower_grad = device_grads[device_num]
if self.benchmark_cnn.enable_auto_loss_scale and device_num == 0:
# Since we don't aggregate variables in --independent mode, we cannot tell
# if there are NaNs on all GPUs. So we arbitrarily choose to only check
# NaNs on the first GPU.
has_inf_nan_list = []
for grad, _ in tower_grad:
has_inf_nan_list.append(tf.reduce_all(tf.is_finite(grad)))
self.grad_has_inf_nan = tf.logical_not(tf.reduce_all(has_inf_nan_list))
return tower_grad
示例12: preprocess_device_grads
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import logical_not [as 别名]
def preprocess_device_grads(self, device_grads):
compact_grads = (self.benchmark_cnn.params.use_fp16 and
self.benchmark_cnn.params.compact_gradient_transfer)
defer_grads = (self.benchmark_cnn.params.variable_consistency == 'relaxed')
grads_to_reduce = [[g for g, _ in grad_vars] for grad_vars in device_grads]
algorithm = batch_allreduce.algorithm_from_params(self.benchmark_cnn.params)
reduced_grads, self._warmup_ops = algorithm.batch_all_reduce(
grads_to_reduce, self.benchmark_cnn.params.gradient_repacking,
compact_grads, defer_grads, self.benchmark_cnn.params.xla_compile)
if self.benchmark_cnn.enable_auto_loss_scale:
# Check for infs or nans
is_finite_list = []
with tf.name_scope('check_for_inf_and_nan'):
for tower_grads in reduced_grads:
with tf.colocate_with(tower_grads[0]):
# TODO(tanmingxing): Create fused op that takes in a list of tensors
# as input and returns scalar boolean True if there are any
# infs/nans.
is_finite_list.append(tf.reduce_all(
[tf.reduce_all(tf.is_finite(g)) for g in tower_grads]))
self.grad_has_inf_nan = tf.logical_not(tf.reduce_all(is_finite_list))
reduced_device_grads = [[
(g, v) for g, (_, v) in zip(grads, grad_vars)
] for grads, grad_vars in zip(reduced_grads, device_grads)]
return self.benchmark_cnn.devices, reduced_device_grads
示例13: _get_cubic_root
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import logical_not [as 别名]
def _get_cubic_root(self):
"""Get the cubic root."""
# We have the equation x^2 D^2 + (1-x)^4 * C / h_min^2
# where x = sqrt(mu).
# We substitute x, which is sqrt(mu), with x = y + 1.
# It gives y^3 + py = q
# where p = (D^2 h_min^2)/(2*C) and q = -p.
# We use the Vieta's substitution to compute the root.
# There is only one real solution y (which is in [0, 1] ).
# http://mathworld.wolfram.com/VietasSubstitution.html
assert_array = [
tf.Assert(
tf.logical_not(tf.is_nan(self._dist_to_opt_avg)),
[self._dist_to_opt_avg,]),
tf.Assert(
tf.logical_not(tf.is_nan(self._h_min)),
[self._h_min,]),
tf.Assert(
tf.logical_not(tf.is_nan(self._grad_var)),
[self._grad_var,]),
tf.Assert(
tf.logical_not(tf.is_inf(self._dist_to_opt_avg)),
[self._dist_to_opt_avg,]),
tf.Assert(
tf.logical_not(tf.is_inf(self._h_min)),
[self._h_min,]),
tf.Assert(
tf.logical_not(tf.is_inf(self._grad_var)),
[self._grad_var,])
]
with tf.control_dependencies(assert_array):
p = self._dist_to_opt_avg**2 * self._h_min**2 / 2 / self._grad_var
w3 = (-tf.sqrt(p**2 + 4.0 / 27.0 * p**3) - p) / 2.0
w = tf.sign(w3) * tf.pow(tf.abs(w3), 1.0/3.0)
y = w - p / 3.0 / w
x = y + 1
return x
示例14: get_gan_loss
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import logical_not [as 别名]
def get_gan_loss(self, true_frames, gen_frames, name):
"""Get the discriminator + generator loss at every step.
This performs an 1:1 update of the discriminator and generator at every
step.
Args:
true_frames: 5-D Tensor of shape (num_steps, batch_size, H, W, C)
Assumed to be ground truth.
gen_frames: 5-D Tensor of shape (num_steps, batch_size, H, W, C)
Assumed to be fake.
name: discriminator scope.
Returns:
loss: 0-D Tensor, with d_loss + g_loss
"""
# D - STEP
with tf.variable_scope("%s_discriminator" % name, reuse=tf.AUTO_REUSE):
gan_d_loss, _, fake_logits_stop = self.d_step(
true_frames, gen_frames)
# G - STEP
with tf.variable_scope("%s_discriminator" % name, reuse=True):
gan_g_loss_pos_d, gan_g_loss_neg_d = self.g_step(
gen_frames, fake_logits_stop)
gan_g_loss = gan_g_loss_pos_d + gan_g_loss_neg_d
tf.summary.scalar("gan_loss_%s" % name, gan_g_loss_pos_d + gan_d_loss)
if self.hparams.gan_optimization == "joint":
gan_loss = gan_g_loss + gan_d_loss
else:
curr_step = self.get_iteration_num()
gan_loss = tf.cond(
tf.logical_not(curr_step % 2 == 0), lambda: gan_g_loss,
lambda: gan_d_loss)
return gan_loss
示例15: get_scheduled_sample_inputs
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import logical_not [as 别名]
def get_scheduled_sample_inputs(self,
done_warm_start,
groundtruth_items,
generated_items,
scheduled_sampling_func):
"""Scheduled sampling.
Args:
done_warm_start: whether we are done with warm start or not.
groundtruth_items: list of ground truth items.
generated_items: list of generated items.
scheduled_sampling_func: scheduled sampling function to choose between
groundtruth items and generated items.
Returns:
A mix list of ground truth and generated items.
"""
def sample():
"""Calculate the scheduled sampling params based on iteration number."""
with tf.variable_scope("scheduled_sampling", reuse=tf.AUTO_REUSE):
return [
scheduled_sampling_func(item_gt, item_gen)
for item_gt, item_gen in zip(groundtruth_items, generated_items)]
cases = [
(tf.logical_not(done_warm_start), lambda: groundtruth_items),
(tf.logical_not(self.is_training), lambda: generated_items),
]
output_items = tf.case(cases, default=sample, strict=True)
return output_items