本文整理汇总了Python中tensorflow.not_equal函数的典型用法代码示例。如果您正苦于以下问题:Python not_equal函数的具体用法?Python not_equal怎么用?Python not_equal使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了not_equal函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: rpn_cls_loss
def rpn_cls_loss(rpn_cls_score,rpn_labels):
'''
Calculate the Region Proposal Network classifier loss. Measures how well
the RPN is able to propose regions by the performance of its "objectness"
classifier.
Standard cross-entropy loss on logits
'''
with tf.variable_scope('rpn_cls_loss'):
# input shape dimensions
shape = tf.shape(rpn_cls_score)
# Stack all classification scores into 2D matrix
rpn_cls_score = tf.transpose(rpn_cls_score,[0,3,1,2])
rpn_cls_score = tf.reshape(rpn_cls_score,[shape[0],2,shape[3]//2*shape[1],shape[2]])
rpn_cls_score = tf.transpose(rpn_cls_score,[0,2,3,1])
rpn_cls_score = tf.reshape(rpn_cls_score,[-1,2])
# Stack labels
rpn_labels = tf.reshape(rpn_labels,[-1])
# Ignore label=-1 (Neither object nor background: IoU between 0.3 and 0.7)
rpn_cls_score = tf.reshape(tf.gather(rpn_cls_score,tf.where(tf.not_equal(rpn_labels,-1))),[-1,2])
rpn_labels = tf.reshape(tf.gather(rpn_labels,tf.where(tf.not_equal(rpn_labels,-1))),[-1])
# Cross entropy error
rpn_cross_entropy = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=rpn_cls_score, labels=rpn_labels))
return rpn_cross_entropy
示例2: print_mask_parameter_counts
def print_mask_parameter_counts():
print("# Mask Parameter Counts")
print(" - Mask1: {0}".format(
sess.run(tf.reduce_sum(tf.to_float(tf.not_equal(indicator_matrix1, tf.zeros_like(indicator_matrix1)))))))
print(" - Mask2: {0}".format(
sess.run(tf.reduce_sum(tf.to_float(tf.not_equal(indicator_matrix2, tf.zeros_like(indicator_matrix2)))))))
print(" - Mask3: {0}".format(
sess.run(tf.reduce_sum(tf.to_float(tf.not_equal(indicator_matrix3, tf.zeros_like(indicator_matrix3)))))))
示例3: retrieve_seq_length_op3
def retrieve_seq_length_op3(data, pad_val=0):
"""An op to compute the length of a sequence, the data shape can be [batch_size, n_step(max)] or
[batch_size, n_step(max), n_features].
If the data has type of tf.string and pad_val is assigned as empty string (''), this op will compute the
length of the string sequence.
Parameters:
-----------
data : tensor
[batch_size, n_step(max)] or [batch_size, n_step(max), n_features] with zero padding on the right hand side.
pad_val:
By default 0. If the data is tf.string, please assign this as empty string ('')
Examples
-----------
>>> data = [[[1],[2],[0],[0],[0]],
>>> [[1],[2],[3],[0],[0]],
>>> [[1],[2],[6],[1],[0]]]
>>> data = tf.convert_to_tensor(data, dtype=tf.float32)
>>> length = tl.layers.retrieve_seq_length_op3(data)
[2, 3, 4]
>>> data = [[[1,2],[2,2],[1,2],[1,2],[0,0]],
>>> [[2,3],[2,4],[3,2],[0,0],[0,0]],
>>> [[3,3],[2,2],[5,3],[1,2],[0,0]]]
>>> data = tf.convert_to_tensor(data, dtype=tf.float32)
>>> length = tl.layers.retrieve_seq_length_op3(data)
[4, 3, 4]
>>> data = [[1,2,0,0,0],
>>> [1,2,3,0,0],
>>> [1,2,6,1,0]]
>>> data = tf.convert_to_tensor(data, dtype=tf.float32)
>>> length = tl.layers.retrieve_seq_length_op3(data)
[2, 3, 4]
>>> data = [['hello','world','','',''],
>>> ['hello','world','tensorlayer','',''],
>>> ['hello','world','tensorlayer','2.0','']]
>>> data = tf.convert_to_tensor(data, dtype=tf.string)
>>> length = tl.layers.retrieve_seq_length_op3(data, pad_val='')
[2, 3, 4]
"""
data_shape_size = data.get_shape().ndims
if data_shape_size == 3:
return tf.reduce_sum(
input_tensor=tf.cast(tf.reduce_any(input_tensor=tf.not_equal(data, pad_val), axis=2), dtype=tf.int32),
axis=1
)
elif data_shape_size == 2:
return tf.reduce_sum(input_tensor=tf.cast(tf.not_equal(data, pad_val), dtype=tf.int32), axis=1)
elif data_shape_size == 1:
raise ValueError("retrieve_seq_length_op3: data has wrong shape! Shape got ", data.get_shape().as_list())
else:
raise ValueError(
"retrieve_seq_length_op3: handling data with num of dims %s hasn't been implemented!" % (data_shape_size)
)
示例4: padded_sequence_accuracy
def padded_sequence_accuracy(logits, labels):
"""Percentage of times that predictions matches labels everywhere (non-0)."""
with tf.variable_scope("padded_sequence_accuracy", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
weights = tf.to_float(tf.not_equal(labels, 0))
outputs = tf.to_int32(tf.argmax(logits, axis=-1))
padded_labels = tf.to_int32(labels)
not_correct = tf.to_float(tf.not_equal(outputs, padded_labels)) * weights
axis = list(range(1, len(outputs.get_shape())))
correct_seq = 1.0 - tf.minimum(1.0, tf.reduce_sum(not_correct, axis=axis))
return correct_seq, tf.constant(1.0)
示例5: target_mask_op
def target_mask_op(data, pad_val=0): # HangSheng: return tensor for mask,if input is tf.string
"""Return tensor for mask, if input is ``tf.string``."""
data_shape_size = data.get_shape().ndims
if data_shape_size == 3:
return tf.cast(tf.reduce_any(input_tensor=tf.not_equal(data, pad_val), axis=2), dtype=tf.int32)
elif data_shape_size == 2:
return tf.cast(tf.not_equal(data, pad_val), dtype=tf.int32)
elif data_shape_size == 1:
raise ValueError("target_mask_op: data has wrong shape!")
else:
raise ValueError("target_mask_op: handling data_shape_size %s hasn't been implemented!" % (data_shape_size))
示例6: compute_error
def compute_error(self):
#Sets mask variables and performs batch processing
self.batch_gold_select = self.batch_print_answer > 0.0
self.full_column_mask = tf.concat(
axis=1, values=[self.batch_number_column_mask, self.batch_word_column_mask])
self.full_processed_column = tf.concat(
axis=1,
values=[self.batch_processed_number_column, self.batch_processed_word_column])
self.full_processed_sorted_index_column = tf.concat(axis=1, values=[
self.batch_processed_sorted_index_number_column,
self.batch_processed_sorted_index_word_column
])
self.select_bad_number_mask = tf.cast(
tf.logical_and(
tf.not_equal(self.full_processed_column,
self.utility.FLAGS.pad_int),
tf.not_equal(self.full_processed_column,
self.utility.FLAGS.bad_number_pre_process)),
self.data_type)
self.select_mask = tf.cast(
tf.logical_not(
tf.equal(self.batch_number_column, self.utility.FLAGS.pad_int)),
self.data_type)
self.select_word_mask = tf.cast(
tf.logical_not(
tf.equal(self.batch_word_column_entry_mask,
self.utility.dummy_token_id)), self.data_type)
self.select_full_mask = tf.concat(
axis=1, values=[self.select_mask, self.select_word_mask])
self.select_whole_mask = tf.maximum(
tf.reshape(
tf.slice(self.select_mask, [0, 0, 0],
[self.batch_size, 1, self.max_elements]),
[self.batch_size, self.max_elements]),
tf.reshape(
tf.slice(self.select_word_mask, [0, 0, 0],
[self.batch_size, 1, self.max_elements]),
[self.batch_size, self.max_elements]))
self.invert_select_full_mask = tf.cast(
tf.concat(axis=1, values=[
tf.equal(self.batch_number_column, self.utility.FLAGS.pad_int),
tf.equal(self.batch_word_column_entry_mask,
self.utility.dummy_token_id)
]), self.data_type)
self.batch_lookup_answer = tf.zeros(tf.shape(self.batch_gold_select))
self.reset_select = self.select_whole_mask
self.rows = tf.reduce_sum(self.select_whole_mask, 1)
self.num_entries = tf.reshape(
tf.reduce_sum(tf.reduce_sum(self.select_full_mask, 1), 1),
[self.batch_size])
self.final_error, self.final_correct = self.batch_process()
return self.final_error
示例7: add_embedding
def add_embedding(self):
#embed=np.load('glove{0}_uniform.npy'.format(self.emb_dim))
with tf.variable_scope("Embed",regularizer=None):
embedding=tf.get_variable('embedding',[self.num_emb,
self.emb_dim]
,initializer=tf.random_uniform_initializer(-0.05,0.05),trainable=True,regularizer=None)
ix=tf.to_int32(tf.not_equal(self.input,-1))*self.input
emb_tree=tf.nn.embedding_lookup(embedding,ix)
emb_tree=emb_tree*(tf.expand_dims(
tf.to_float(tf.not_equal(self.input,-1)),2))
return emb_tree
示例8: add_placeholders
def add_placeholders(self):
dim2=self.config.maxnodesize
dim1=self.config.batch_size
self.input = tf.placeholder(tf.int32,[dim1,dim2],name='input')
self.treestr = tf.placeholder(tf.int32,[dim1,dim2,2],name='tree')
self.labels = tf.placeholder(tf.int32,[dim1,dim2],name='labels')
self.dropout = tf.placeholder(tf.float32,name='dropout')
self.n_inodes = tf.reduce_sum(tf.to_int32(tf.not_equal(self.treestr,-1)),[1,2])
self.n_inodes = self.n_inodes/2
self.num_leaves = tf.reduce_sum(tf.to_int32(tf.not_equal(self.input,-1)),[1])
self.batch_len = tf.placeholder(tf.int32,name="batch_len")
示例9: add_embedding
def add_embedding(self):
#embed=np.load('glove{0}_uniform.npy'.format(self.emb_dim))
with tf.device('/cpu:0'):
with tf.variable_scope("Embed"):
embedding=tf.get_variable('embedding',[self.num_emb,
self.emb_dim]
,initializer=
tf.random_uniform_initializer(-0.05,0.05),trainable=True,
regularizer=tf.contrib.layers.l2_regularizer(0.0))
ix=tf.to_int32(tf.not_equal(self.input,-1))*self.input
emb = tf.nn.embedding_lookup(embedding,ix)
emb = emb * tf.to_float(tf.not_equal(tf.expand_dims(self.input,2),-1))
return emb
示例10: get_mask
def get_mask(gt, num_classes, ignore_label):
less_equal_class = tf.less_equal(gt, num_classes-1)
not_equal_ignore = tf.not_equal(gt, ignore_label)
mask = tf.logical_and(less_equal_class, not_equal_ignore)
indices = tf.squeeze(tf.where(mask), 1)
return indices
示例11: classification_costs
def classification_costs(logits, labels, name=None):
"""Compute classification cost mean and classification cost per sample
Assume unlabeled examples have label == -1. For unlabeled examples, cost == 0.
Compute the mean over all examples.
Note that unlabeled examples are treated differently in error calculation.
"""
with tf.name_scope(name, "classification_costs") as scope:
applicable = tf.not_equal(labels, -1)
# Change -1s to zeros to make cross-entropy computable
labels = tf.where(applicable, labels, tf.zeros_like(labels))
# This will now have incorrect values for unlabeled examples
per_sample = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels)
# Retain costs only for labeled
per_sample = tf.where(applicable, per_sample, tf.zeros_like(per_sample))
# Take mean over all examples, not just labeled examples.
labeled_sum = tf.reduce_sum(per_sample)
total_count = tf.to_float(tf.shape(per_sample)[0])
mean = tf.div(labeled_sum, total_count, name=scope)
return mean, per_sample
示例12: _add_rpn_losses
def _add_rpn_losses(self, sigma_rpn=3.0):
with tf.variable_scope('loss_' + self._tag) as scope:
# RPN, class loss
rpn_cls_score = tf.reshape(self._predictions['rpn_cls_score_reshape'], [-1, 2])
rpn_label = tf.reshape(self._anchor_targets['rpn_labels'], [-1])
rpn_select = tf.where(tf.not_equal(rpn_label, -1))
rpn_cls_score = tf.reshape(tf.gather(rpn_cls_score, rpn_select), [-1, 2])
rpn_label = tf.reshape(tf.gather(rpn_label, rpn_select), [-1])
rpn_cross_entropy = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(logits=rpn_cls_score, labels=rpn_label))
# RPN, bbox loss
rpn_bbox_pred = self._predictions['rpn_bbox_pred']
rpn_bbox_targets = self._anchor_targets['rpn_bbox_targets']
rpn_bbox_inside_weights = self._anchor_targets['rpn_bbox_inside_weights']
rpn_bbox_outside_weights = self._anchor_targets['rpn_bbox_outside_weights']
rpn_loss_box = self._smooth_l1_loss(rpn_bbox_pred, rpn_bbox_targets, rpn_bbox_inside_weights,
rpn_bbox_outside_weights, sigma=sigma_rpn, dim=[1, 2, 3])
self._losses['rpn_cross_entropy'] = rpn_cross_entropy
self._losses['rpn_loss_box'] = rpn_loss_box
self._losses['rpn_loss'] = rpn_loss_box + rpn_cross_entropy
self._event_summaries.update(self._losses)
return self._losses['rpn_loss']
示例13: measure
def measure():
E = tf.reduce_mean(energy(layers))
C = tf.reduce_mean(cost(layers))
y_prediction = tf.argmax(layers[-1], 1)
error = tf.reduce_mean(tf.cast(tf.not_equal(y_prediction, tf.cast(y, tf.int64)), tf.float32))
return E, C, error
示例14: padded_sequence_accuracy
def padded_sequence_accuracy(predictions,
labels,
weights_fn=common_layers.weights_nonzero):
"""Percentage of times that predictions matches labels everywhere (non-0)."""
# If the last dimension is 1 then we're using L1/L2 loss.
if common_layers.shape_list(predictions)[-1] == 1:
return rounding_sequence_accuracy(
predictions, labels, weights_fn=weights_fn)
with tf.variable_scope(
"padded_sequence_accuracy", values=[predictions, labels]):
padded_predictions, padded_labels = common_layers.pad_with_zeros(
predictions, labels)
weights = weights_fn(padded_labels)
# Flatten, keeping batch dim (and num_classes dim for predictions)
# TPU argmax can only deal with a limited number of dimensions
predictions_shape = common_layers.shape_list(padded_predictions)
batch_size = predictions_shape[0]
num_classes = predictions_shape[-1]
flat_size = common_layers.list_product(
common_layers.shape_list(padded_labels)[1:])
padded_predictions = tf.reshape(
padded_predictions,
[batch_size, common_layers.list_product(predictions_shape[1:-1]),
num_classes])
padded_labels = tf.reshape(padded_labels, [batch_size, flat_size])
weights = tf.reshape(weights, [batch_size, flat_size])
outputs = tf.to_int32(tf.argmax(padded_predictions, axis=-1))
padded_labels = tf.to_int32(padded_labels)
not_correct = tf.to_float(tf.not_equal(outputs, padded_labels)) * weights
axis = list(range(1, len(outputs.get_shape())))
correct_seq = 1.0 - tf.minimum(1.0, tf.reduce_sum(not_correct, axis=axis))
return correct_seq, tf.constant(1.0)
示例15: compute_loss
def compute_loss(self,emb_batch,curr_batch_size=None):
outloss=[]
prediction=[]
for idx_batch in range(self.config.batch_size):
tree_states=self.compute_states(emb_batch,idx_batch)
logits = self.create_output(tree_states)
labels1=tf.gather(self.labels,idx_batch)
labels2=tf.reduce_sum(tf.to_int32(tf.not_equal(labels1,-1)))
labels=tf.gather(labels1,tf.range(labels2))
loss = self.calc_loss(logits,labels)
pred = tf.nn.softmax(logits)
pred_root=tf.gather(pred,labels2-1)
prediction.append(pred_root)
outloss.append(loss)
batch_loss=tf.pack(outloss)
self.pred = tf.pack(prediction)
return batch_loss