本文整理汇总了Python中tensorflow.logical_not方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.logical_not方法的具体用法?Python tensorflow.logical_not怎么用?Python tensorflow.logical_not使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.logical_not方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: filter_groundtruth_with_nan_box_coordinates
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import logical_not [as 别名]
def filter_groundtruth_with_nan_box_coordinates(tensor_dict):
"""Filters out groundtruth with no bounding boxes.
Args:
tensor_dict: a dictionary of following groundtruth tensors -
fields.InputDataFields.groundtruth_boxes
fields.InputDataFields.groundtruth_classes
fields.InputDataFields.groundtruth_is_crowd
fields.InputDataFields.groundtruth_area
fields.InputDataFields.groundtruth_label_types
Returns:
a dictionary of tensors containing only the groundtruth that have bounding
boxes.
"""
groundtruth_boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
nan_indicator_vector = tf.greater(tf.reduce_sum(tf.to_int32(
tf.is_nan(groundtruth_boxes)), reduction_indices=[1]), 0)
valid_indicator_vector = tf.logical_not(nan_indicator_vector)
valid_indices = tf.where(valid_indicator_vector)
return retain_groundtruth(tensor_dict, valid_indices)
示例2: filter_groundtruth_with_crowd_boxes
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import logical_not [as 别名]
def filter_groundtruth_with_crowd_boxes(tensor_dict):
"""Filters out groundtruth with boxes corresponding to crowd.
Args:
tensor_dict: a dictionary of following groundtruth tensors -
fields.InputDataFields.groundtruth_boxes
fields.InputDataFields.groundtruth_classes
fields.InputDataFields.groundtruth_keypoints
fields.InputDataFields.groundtruth_instance_masks
fields.InputDataFields.groundtruth_is_crowd
fields.InputDataFields.groundtruth_area
fields.InputDataFields.groundtruth_label_types
Returns:
a dictionary of tensors containing only the groundtruth that have bounding
boxes.
"""
if fields.InputDataFields.groundtruth_is_crowd in tensor_dict:
is_crowd = tensor_dict[fields.InputDataFields.groundtruth_is_crowd]
is_not_crowd = tf.logical_not(is_crowd)
is_not_crowd_indices = tf.where(is_not_crowd)
tensor_dict = retain_groundtruth(tensor_dict, is_not_crowd_indices)
return tensor_dict
示例3: filter_groundtruth_with_nan_box_coordinates
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import logical_not [as 别名]
def filter_groundtruth_with_nan_box_coordinates(tensor_dict):
"""Filters out groundtruth with no bounding boxes.
Args:
tensor_dict: a dictionary of following groundtruth tensors -
fields.InputDataFields.groundtruth_boxes
fields.InputDataFields.groundtruth_classes
fields.InputDataFields.groundtruth_keypoints
fields.InputDataFields.groundtruth_instance_masks
fields.InputDataFields.groundtruth_is_crowd
fields.InputDataFields.groundtruth_area
fields.InputDataFields.groundtruth_label_types
Returns:
a dictionary of tensors containing only the groundtruth that have bounding
boxes.
"""
groundtruth_boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
nan_indicator_vector = tf.greater(tf.reduce_sum(tf.to_int32(
tf.is_nan(groundtruth_boxes)), reduction_indices=[1]), 0)
valid_indicator_vector = tf.logical_not(nan_indicator_vector)
valid_indices = tf.where(valid_indicator_vector)
return retain_groundtruth(tensor_dict, valid_indices)
示例4: compute_loss_and_error
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import logical_not [as 别名]
def compute_loss_and_error(logits, label, label_smoothing):
loss = sparse_softmax_cross_entropy(
logits=logits, labels=label,
label_smoothing = label_smoothing,
weights=1.0)
loss = tf.reduce_mean(loss, name='xentropy-loss')
def prediction_incorrect(logits, label, topk=1, name='incorrect_vector'):
with tf.name_scope('prediction_incorrect'):
x = tf.logical_not(tf.nn.in_top_k(logits, label, topk))
return tf.cast(x, tf.float32, name=name)
if label.shape.ndims > 1:
label = tf.cast(tf.argmax(label, axis=1), tf.int32)
wrong = prediction_incorrect(logits, label, 1, name='wrong-top1')
add_moving_summary(tf.reduce_mean(wrong, name='train-error-top1'))
wrong = prediction_incorrect(logits, label, 5, name='wrong-top5')
add_moving_summary(tf.reduce_mean(wrong, name='train-error-top5'))
return loss
示例5: init
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import logical_not [as 别名]
def init(self, data: Tensor) -> None:
tau = self.__tauInit
dtype = self.__dtype
properties = self.__properties
noiseDistribution = CenNormal(tau=tf.constant([tau], dtype=dtype),
properties=properties)
self.__noiseDistribution = noiseDistribution
observedMask = tf.logical_not(tf.is_nan(data))
trainMask = tf.logical_not(self.cv.mask(X=data))
trainMask = tf.get_variable("trainMask",
dtype=trainMask.dtype,
initializer=trainMask)
trainMask = tf.logical_and(trainMask, observedMask)
testMask = tf.logical_and(observedMask,
tf.logical_not(trainMask))
self.__observedMask = observedMask
self.__trainMask = trainMask
self.__testMask = testMask
示例6: updateK
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import logical_not [as 别名]
def updateK(self, k, prepVars, U):
f = self.__f
UfShape = U[f].get_shape()
lhUfk = self.__likelihood.lhUfk(U[f], prepVars, f, k)
postfk = lhUfk*self.prior[k].cond()
Ufk = postfk.draw()
Ufk = tf.expand_dims(Ufk, 0)
normUfk = tf.norm(Ufk)
notNanNorm = tf.logical_not(tf.is_nan(normUfk))
finiteNorm = tf.is_finite(normUfk)
positiveNorm = normUfk > 0.
isValid = tf.logical_and(notNanNorm,
tf.logical_and(finiteNorm,
positiveNorm))
Uf = tf.cond(isValid, lambda: self.updateUf(U[f], Ufk, k),
lambda: U[f])
# TODO: if valid -> self.__likelihood.lhU()[f].updateUfk(U[f][k], k)
Uf.set_shape(UfShape)
U[f] = Uf
return(U)
示例7: compute_loss_and_error
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import logical_not [as 别名]
def compute_loss_and_error(logits, label, label_smoothing=0.):
if label_smoothing != 0.:
nclass = logits.shape[-1]
label = tf.one_hot(label, nclass) if label.shape.ndims == 1 else label
if label.shape.ndims == 1:
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label)
else:
loss = tf.losses.softmax_cross_entropy(
label, logits, label_smoothing=label_smoothing,
reduction=tf.losses.Reduction.NONE)
loss = tf.reduce_mean(loss, name='xentropy-loss')
def prediction_incorrect(logits, label, topk=1, name='incorrect_vector'):
with tf.name_scope('prediction_incorrect'):
x = tf.logical_not(tf.nn.in_top_k(logits, label, topk))
return tf.cast(x, tf.float32, name=name)
wrong = prediction_incorrect(logits, label, 1, name='wrong-top1')
add_moving_summary(tf.reduce_mean(wrong, name='train-error-top1'))
wrong = prediction_incorrect(logits, label, 5, name='wrong-top5')
add_moving_summary(tf.reduce_mean(wrong, name='train-error-top5'))
return loss
示例8: filter_groundtruth_with_crowd_boxes
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import logical_not [as 别名]
def filter_groundtruth_with_crowd_boxes(tensor_dict):
"""Filters out groundtruth with boxes corresponding to crowd.
Args:
tensor_dict: a dictionary of following groundtruth tensors -
fields.InputDataFields.groundtruth_boxes
fields.InputDataFields.groundtruth_classes
fields.InputDataFields.groundtruth_is_crowd
fields.InputDataFields.groundtruth_area
fields.InputDataFields.groundtruth_label_types
Returns:
a dictionary of tensors containing only the groundtruth that have bounding
boxes.
"""
if fields.InputDataFields.groundtruth_is_crowd in tensor_dict:
is_crowd = tensor_dict[fields.InputDataFields.groundtruth_is_crowd]
is_not_crowd = tf.logical_not(is_crowd)
is_not_crowd_indices = tf.where(is_not_crowd)
tensor_dict = retain_groundtruth(tensor_dict, is_not_crowd_indices)
return tensor_dict
示例9: filter_groundtruth_with_nan_box_coordinates
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import logical_not [as 别名]
def filter_groundtruth_with_nan_box_coordinates(tensor_dict):
"""Filters out groundtruth with no bounding boxes.
Args:
tensor_dict: a dictionary of following groundtruth tensors -
fields.InputDataFields.groundtruth_boxes
fields.InputDataFields.groundtruth_instance_masks
fields.InputDataFields.groundtruth_classes
fields.InputDataFields.groundtruth_is_crowd
fields.InputDataFields.groundtruth_area
fields.InputDataFields.groundtruth_label_types
Returns:
a dictionary of tensors containing only the groundtruth that have bounding
boxes.
"""
groundtruth_boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
nan_indicator_vector = tf.greater(tf.reduce_sum(tf.to_int32(
tf.is_nan(groundtruth_boxes)), reduction_indices=[1]), 0)
valid_indicator_vector = tf.logical_not(nan_indicator_vector)
valid_indices = tf.where(valid_indicator_vector)
return retain_groundtruth(tensor_dict, valid_indices)
示例10: _get_anchor_positive_triplet_mask
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import logical_not [as 别名]
def _get_anchor_positive_triplet_mask(labels):
"""Return a 2D mask where mask[a, p] is True iff a and p are distinct and have same label.
Args:
labels: tf.int32 `Tensor` with shape [batch_size]
Returns:
mask: tf.bool `Tensor` with shape [batch_size, batch_size]
"""
# Check that i and j are distinct
indices_equal = tf.cast(tf.eye(tf.shape(labels)[0]), tf.bool)
indices_not_equal = tf.logical_not(indices_equal)
# Check if labels[i] == labels[j]
# Uses broadcasting where the 1st argument has shape (1, batch_size) and the 2nd (batch_size, 1)
labels_equal = tf.equal(tf.expand_dims(labels, 0), tf.expand_dims(labels, 1))
# Combine the two masks
mask = tf.logical_and(indices_not_equal, labels_equal)
return mask
示例11: lengths_to_area_mask
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import logical_not [as 别名]
def lengths_to_area_mask(feature_length, length, max_area_size):
"""Generates a non-padding mask for areas based on lengths.
Args:
feature_length: a tensor of [batch_size]
length: the length of the batch
max_area_size: the maximum area size considered
Returns:
mask: a tensor in shape of [batch_size, num_areas]
"""
paddings = tf.cast(tf.expand_dims(
tf.logical_not(
tf.sequence_mask(feature_length, maxlen=length)), 2), tf.float32)
_, _, area_sum, _, _ = compute_area_features(paddings,
max_area_width=max_area_size)
mask = tf.squeeze(tf.logical_not(tf.cast(area_sum, tf.bool)), [2])
return mask
示例12: compute_loss_and_error
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import logical_not [as 别名]
def compute_loss_and_error(logits, label):
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label)
loss = tf.reduce_mean(loss, name='xentropy-loss')
def prediction_incorrect(logits, label, topk=1, name='incorrect_vector'):
with tf.name_scope('prediction_incorrect'):
x = tf.logical_not(tf.nn.in_top_k(logits, label, topk))
return tf.cast(x, tf.float32, name=name)
res_scores, res_top5 = tf.nn.top_k(logits, k=5)
res_scores=tf.identity(logits, name="logits")
res_top = tf.identity(res_top5, name="res-top5")
wrong = prediction_incorrect(logits, label, 1, name='wrong-top1')
add_moving_summary(tf.reduce_mean(wrong, name='train-error-top1'))
wrong = prediction_incorrect(logits, label, 5, name='wrong-top5')
add_moving_summary(tf.reduce_mean(wrong, name='train-error-top5'))
return loss
示例13: tp_tn_fp_fn_for_each
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import logical_not [as 别名]
def tp_tn_fp_fn_for_each(output, labels, threshold=0.5):
"""Calculate True Positive, True Negative, False Positive, False Negative.
Args:
output: network output sigmoided tensor. shape is [batch_size, num_class]
labels: multi label encoded bool tensor. shape is [batch_size, num_class]
threshold: python float
Returns:
shape is [4(tp, tn, fp, fn), num_class]
"""
predicted = tf.greater_equal(output, threshold)
gt_positive = tf.reduce_sum(tf.cast(labels, tf.int32), axis=0, keepdims=True)
gt_negative = tf.reduce_sum(tf.cast(tf.logical_not(labels), tf.int32), axis=0, keepdims=True)
true_positive = tf.math.logical_and(predicted, labels)
true_positive = tf.reduce_sum(tf.cast(true_positive, tf.int32), axis=0, keepdims=True)
true_negative = tf.math.logical_and(tf.logical_not(predicted), tf.math.logical_not(labels))
true_negative = tf.reduce_sum(tf.cast(true_negative, tf.int32), axis=0, keepdims=True)
false_negative = gt_positive - true_positive
false_positive = gt_negative - true_negative
return tf.concat(axis=0, values=[true_positive, true_negative, false_positive, false_negative])
示例14: tp_tn_fp_fn
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import logical_not [as 别名]
def tp_tn_fp_fn(output, labels, threshold=0.5):
"""Calculate True Positive, True Negative, False Positive, False Negative.
Args:
output: network output sigmoided tensor. shape is [batch_size, num_class]
labels: multi label encoded bool tensor. shape is [batch_size, num_class]
threshold: python float
"""
predicted = tf.greater_equal(output, threshold)
gt_positive = tf.reduce_sum(tf.cast(labels, tf.int32))
gt_negative = tf.reduce_sum(tf.cast(tf.logical_not(labels), tf.int32))
true_positive = tf.math.logical_and(predicted, labels)
true_positive = tf.reduce_sum(tf.cast(true_positive, tf.int32))
true_negative = tf.math.logical_and(tf.logical_not(predicted), tf.math.logical_not(labels))
true_negative = tf.reduce_sum(tf.cast(true_negative, tf.int32))
false_negative = gt_positive - true_positive
false_positive = gt_negative - true_negative
return true_positive, true_negative, false_positive, false_negative
示例15: filter_infinity
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import logical_not [as 别名]
def filter_infinity(self, sample):
""" Filter infinity sample. """
return tf.logical_not(
tf.math.is_inf(
sample[self._min_spectrogram_key]))