本文整理匯總了Python中tensorflow.compat.v1.not_equal方法的典型用法代碼示例。如果您正苦於以下問題:Python v1.not_equal方法的具體用法?Python v1.not_equal怎麽用?Python v1.not_equal使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tensorflow.compat.v1
的用法示例。
在下文中一共展示了v1.not_equal方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: summarize_features
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import not_equal [as 別名]
def summarize_features(features, num_shards=1):
"""Generate summaries for features."""
if not common_layers.should_generate_summaries():
return
with tf.name_scope("input_stats"):
for (k, v) in sorted(six.iteritems(features)):
if (isinstance(v, tf.Tensor) and (v.get_shape().ndims > 1) and
(v.dtype != tf.string)):
tf.summary.scalar("%s_batch" % k, tf.shape(v)[0] // num_shards)
tf.summary.scalar("%s_length" % k, tf.shape(v)[1])
nonpadding = tf.to_float(tf.not_equal(v, 0))
nonpadding_tokens = tf.reduce_sum(nonpadding)
tf.summary.scalar("%s_nonpadding_tokens" % k, nonpadding_tokens)
tf.summary.scalar("%s_nonpadding_fraction" % k,
tf.reduce_mean(nonpadding))
示例2: _symbol_bottom_simple
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import not_equal [as 別名]
def _symbol_bottom_simple(x, model_hparams, vocab_size, name, reuse):
"""Bottom transformation for symbols."""
with tf.variable_scope(name, reuse=reuse):
# Ensure the inputs are 3-D
if len(x.get_shape()) == 4:
x = tf.squeeze(x, axis=3)
while len(x.get_shape()) < 3:
x = tf.expand_dims(x, axis=-1)
var = get_weights(model_hparams, vocab_size)
x = common_layers.dropout_no_scaling(
x, 1.0 - model_hparams.symbol_dropout)
ret = common_layers.gather(var, x)
if model_hparams.multiply_embedding_mode == "sqrt_depth":
ret *= model_hparams.hidden_size**0.5
ret *= tf.expand_dims(
common_layers.cast_like(tf.not_equal(x, 0), ret), -1)
return ret
示例3: weights_multi_problem
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import not_equal [as 別名]
def weights_multi_problem(labels, taskid=-1):
"""Assign weight 1.0 to only the "targets" portion of the labels.
Weight 1.0 is assigned to all labels past the taskid.
Args:
labels: A Tensor of int32s.
taskid: an int32 representing the task id for a problem.
Returns:
A Tensor of floats.
Raises:
ValueError: The Task ID must be valid.
"""
taskid = check_nonnegative(taskid)
past_taskid = tf.cumsum(to_float(tf.equal(labels, taskid)), axis=1)
# Additionally zero out the task id location
past_taskid *= to_float(tf.not_equal(labels, taskid))
non_taskid = to_float(labels)
return to_float(tf.not_equal(past_taskid * non_taskid, 0))
示例4: _select_top_k
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import not_equal [as 別名]
def _select_top_k(logits, top_k):
"""Replaces logits, expect the top k highest values, with small number (-1e6).
If k is -1 don't replace anything.
Args:
logits: A `Tensor` of shape [batch_size, ..., vocab_size]
top_k: vector of batch size.
Returns:
A `Tensor` with same shape as logits.
"""
vocab_size = logits.shape[-1]
top_k = tf.where(
tf.not_equal(top_k, -1), top_k,
tf.ones_like(top_k) * vocab_size)
return tf.where(
tf.argsort(logits) < tf.reshape(top_k, [-1] + [1] *
(len(logits.shape) - 1)), logits,
tf.ones_like(logits) * -1e6)
示例5: filter_correct_class
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import not_equal [as 別名]
def filter_correct_class(verifiable_obj, num_classes, labels, margin):
"""Filters out the objective when the target class contains the true label.
Args:
verifiable_obj: 2D tensor of shape (num_classes, batch_size) containing
verifiable objectives.
num_classes: number of target classes.
labels: 1D tensor of shape (batch_size) containing the labels for each
example in the batch.
margin: Verifiable objective values for correct class will be forced to
`-margin`, thus disregarding large negative bounds when maximising.
Returns:
2D tensor of shape (num_classes, batch_size) containing the corrected
verifiable objective values for each (class, example).
"""
targets_to_filter = tf.expand_dims(
tf.range(num_classes, dtype=labels.dtype), axis=1)
neq = tf.not_equal(targets_to_filter, labels)
verifiable_obj = tf.where(neq, verifiable_obj, -margin *
tf.ones_like(verifiable_obj))
return verifiable_obj
示例6: compare_generating_steps
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import not_equal [as 別名]
def compare_generating_steps(target_decode_steps, predicted_decode_steps):
"""Compare generating steps only but ignoring target copying steps.
Args:
target_decode_steps: Target DecodeSteps, Each tensor is expected to be shape
[batch_size, output_length].
predicted_decode_steps: Predicted DecodeSteps, Each tensor is expected to be
shape [batch_size, output_length].
Returns:
A tensor of bools indicating whether generating steps are equal.
Copy Steps will have value True.
"""
# Set all copying steps to True, Since we only care about generating steps.
return tf.logical_or(
tf.not_equal(target_decode_steps.action_types, constants.GENERATE_ACTION),
tf.logical_and(
tf.equal(target_decode_steps.action_types,
predicted_decode_steps.action_types),
tf.equal(target_decode_steps.action_ids,
predicted_decode_steps.action_ids)))
示例7: weights_nonzero
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import not_equal [as 別名]
def weights_nonzero(labels):
"""Assign weight 1.0 to all labels except for padding (id=0)."""
return to_float(tf.not_equal(labels, 0))
示例8: weights_prepend_inputs_to_targets
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import not_equal [as 別名]
def weights_prepend_inputs_to_targets(labels):
"""Assign weight 1.0 to only the "targets" portion of the labels.
Weight 1.0 is assigned to all nonzero labels past the first zero.
See prepend_mode in common_hparams.py
Args:
labels: A Tensor of int32s.
Returns:
A Tensor of floats.
"""
past_first_zero = tf.cumsum(to_float(tf.equal(labels, 0)), axis=1)
nonzero = to_float(labels)
return to_float(tf.not_equal(past_first_zero * nonzero, 0))
示例9: bottom_simple
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import not_equal [as 別名]
def bottom_simple(x, model_hparams, vocab_size, name, reuse):
"""Internal bottom transformation."""
with tf.variable_scope(name, reuse=reuse):
var = _get_weights(model_hparams, vocab_size)
x = common_layers.dropout_no_scaling(
x, 1.0 - model_hparams.symbol_dropout)
# Add together the embeddings for each tuple position.
ret = tf.add_n([
tf.gather(var, x[:, :, :, i] + sum(vocab_size[:i])) *
tf.expand_dims(tf.to_float(tf.not_equal(x[:, :, :, i], 0)), -1)
for i in range(len(vocab_size))
])
if model_hparams.multiply_embedding_mode == 'sqrt_depth':
ret *= model_hparams.hidden_size**0.5
return ret
示例10: maybe_rot180
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import not_equal [as 別名]
def maybe_rot180(image, label, static_axis, rot180_k=None):
"""Randomly rotate the image 180 degrees."""
if rot180_k is None:
rot180_k = 2 * tf.random_uniform(
shape=[], minval=0, maxval=2, dtype=tf.int32)
rot_or_not = tf.not_equal(rot180_k, 0)
def _maybe_rot180(data):
"""Rotate or not according to rot_or_not."""
data = tf.cond(tf.logical_and(rot_or_not, tf.equal(static_axis, 0)),
lambda: tf.transpose(data, [2, 1, 0]),
lambda: data)
data = tf.cond(tf.logical_and(rot_or_not, tf.equal(static_axis, 1)),
lambda: tf.transpose(data, [0, 2, 1]),
lambda: data)
data = tf.cond(rot_or_not,
lambda: tf.image.rot90(data, k=rot180_k),
lambda: data)
data = tf.cond(tf.logical_and(rot_or_not, tf.equal(static_axis, 0)),
lambda: tf.transpose(data, [2, 1, 0]),
lambda: data)
data = tf.cond(tf.logical_and(rot_or_not, tf.equal(static_axis, 1)),
lambda: tf.transpose(data, [0, 2, 1]),
lambda: data)
return data
return _maybe_rot180(image), _maybe_rot180(label)
示例11: _ignore_pad
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import not_equal [as 別名]
def _ignore_pad(embeddings_table, ids, use_one_hot_embeddings=False):
"""Use mean of symbol embeddings as overall embedding but ignore PAD."""
source_embeddings = common_layers.embedding_lookup(embeddings_table, ids,
use_one_hot_embeddings)
# Set weights to ignore padding.
embedded_weights = tf.to_float(tf.not_equal(ids, constants.PAD_SYMBOL_ID))
embedded_weights = tf.expand_dims(embedded_weights, -1)
return source_embeddings * embedded_weights
示例12: _bert_embeddings
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import not_equal [as 別名]
def _bert_embeddings(wordpiece_embedding_size, bert_config, features,
is_training, use_one_hot_embeddings, scope,
use_segment_ids):
"""Get embeddings from BERT."""
token_type_ids = None
if use_segment_ids:
token_type_ids = features[constants.SEGMENT_ID_KEY]
max_seq_len = tf.shape(features[constants.SOURCE_WORDPIECES_KEY])[1]
input_mask = bert_utils.get_input_mask(max_seq_len,
features[constants.SOURCE_LEN_KEY])
input_ids = features[constants.SOURCE_WORDPIECES_KEY]
source_embeddings = bert_utils.get_bert_embeddings(
input_ids,
bert_config,
input_mask,
token_type_ids=token_type_ids,
is_training=is_training,
use_one_hot_embeddings=use_one_hot_embeddings,
scope=scope)
source_embeddings = common_layers.linear_transform(source_embeddings,
wordpiece_embedding_size,
"bert_transform")
# Set weights to ignore padding.
embedded_weights = tf.to_float(
tf.not_equal(input_ids, constants.PAD_SYMBOL_ID))
embedded_weights = tf.expand_dims(embedded_weights, -1)
return source_embeddings * embedded_weights
示例13: select_random_chunk
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import not_equal [as 別名]
def select_random_chunk(dataset,
max_length=gin.REQUIRED,
feature_key='targets',
**unused_kwargs):
"""Token-preprocessor to extract one span of at most `max_length` tokens.
If the token sequence is longer than `max_length`, then we return a random
subsequence. Otherwise, we return the full sequence.
This is generally followed by split_tokens.
Args:
dataset: a tf.data.Dataset with dictionaries containing the key feature_key.
max_length: an integer
feature_key: an string
Returns:
a dataset
"""
def _my_fn(x):
"""Select a random chunk of tokens.
Args:
x: a 1d Tensor
Returns:
a 1d Tensor
"""
tokens = x[feature_key]
n_tokens = tf.size(tokens)
num_segments = tf.cast(
tf.ceil(tf.cast(n_tokens, tf.float32)
/ tf.cast(max_length, tf.float32)),
tf.int32)
start = max_length * tf.random_uniform(
[], maxval=num_segments, dtype=tf.int32)
end = tf.minimum(start + max_length, n_tokens)
return {feature_key: tokens[start:end]}
# Filter empty examples.
dataset = dataset.filter(lambda x: tf.not_equal(tf.size(x[feature_key]), 0))
return dataset.map(_my_fn, num_parallel_calls=num_parallel_calls())
示例14: normalize_example
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import not_equal [as 別名]
def normalize_example(self, example, hparams):
"""Assumes that example contains both inputs and targets."""
length = self.max_length(hparams)
def _to_constant_shape(tensor):
tensor = tensor[:length]
tensor = tf.pad(tensor, [(0, length - tf.shape(tensor)[0])])
return tf.reshape(tensor, [length])
if self.has_inputs:
example['inputs'] = _to_constant_shape(example['inputs'])
example['targets'] = _to_constant_shape(example['targets'])
elif 'inputs' in example:
if self.packed_length:
raise ValueError('cannot concatenate packed examples on the fly.')
inputs = example.pop('inputs')[:-1] # Remove EOS token.
targets = tf.concat([inputs, example['targets']], 0)
example['targets'] = _to_constant_shape(targets)
else:
example['targets'] = _to_constant_shape(example['targets'])
if self.packed_length:
if self.has_inputs:
if 'inputs_segmentation' in example:
example['inputs_segmentation'] = _to_constant_shape(
example['inputs_segmentation'])
example['inputs_position'] = _to_constant_shape(
example['inputs_position'])
else:
example['inputs_segmentation'] = tf.to_int64(
tf.not_equal(example['inputs'], 0))
example['inputs_position'] = (
example['inputs_segmentation'] * tf.range(length, dtype=tf.int64))
if 'targets_segmentation' in example:
example['targets_segmentation'] = _to_constant_shape(
example['targets_segmentation'])
example['targets_position'] = _to_constant_shape(
example['targets_position'])
else:
example['targets_segmentation'] = tf.to_int64(
tf.not_equal(example['targets'], 0))
example['targets_position'] = (
example['targets_segmentation'] * tf.range(length, dtype=tf.int64))
return example
示例15: sample_mask_indices
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import not_equal [as 別名]
def sample_mask_indices(tokens, mask_rate, mask_blacklist, max_num_to_mask):
"""Samples indices to mask.
Args:
tokens (Tensor): 1-D string Tensor.
mask_rate (float): percentage of tokens to mask.
mask_blacklist (Tensor): 1-D string Tensor of tokens to NEVER mask.
max_num_to_mask (int): max # of masks.
Returns:
mask_indices (Tensor): 1-D int32 Tensor of indices to mask.
"""
if mask_rate < 0 or mask_rate > 1:
raise ValueError("mask_rate must be within [0, 1].")
# Compute how many tokens to mask.
num_tokens = tf.size(tokens)
num_to_mask = tf.to_int32(tf.ceil(mask_rate * tf.to_float(num_tokens)))
if mask_rate > 0:
# If masking is enabled, then mask at least one, no matter what.
# Original BERT code does this too.
num_to_mask = tf.maximum(num_to_mask, 1)
num_to_mask = tf.minimum(num_to_mask, max_num_to_mask)
# If there are any [CLS] or [SEP], we count these as part of num_tokens.
# Note that the original implementation of BERT does this as well.
all_indices = tf.range(num_tokens)
# Filter out indices containing CLS and SEP.
allow_masking = tf.reduce_all(
tf.not_equal(tokens, mask_blacklist[:, None]), axis=0)
filtered_indices = tf.boolean_mask(all_indices, allow_masking)
# Randomly select indices without replacement.
shuffled_indices = tf.random.shuffle(filtered_indices)
mask_indices = shuffled_indices[:num_to_mask]
return mask_indices