本文整理汇总了Python中tensorflow.count_nonzero方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.count_nonzero方法的具体用法?Python tensorflow.count_nonzero怎么用?Python tensorflow.count_nonzero使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.count_nonzero方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: micro_f1
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import count_nonzero [as 别名]
def micro_f1(logits, labels, mask):
"""Accuracy with masking."""
predicted = tf.round(tf.nn.sigmoid(logits))
# Use integers to avoid any nasty FP behaviour
predicted = tf.cast(predicted, dtype=tf.int32)
labels = tf.cast(labels, dtype=tf.int32)
mask = tf.cast(mask, dtype=tf.int32)
# expand the mask so that broadcasting works ([nb_nodes, 1])
mask = tf.expand_dims(mask, -1)
# Count true positives, true negatives, false positives and false negatives.
tp = tf.count_nonzero(predicted * labels * mask)
tn = tf.count_nonzero((predicted - 1) * (labels - 1) * mask)
fp = tf.count_nonzero(predicted * (labels - 1) * mask)
fn = tf.count_nonzero((predicted - 1) * labels * mask)
# Calculate accuracy, precision, recall and F1 score.
precision = tp / (tp + fp)
recall = tp / (tp + fn)
fmeasure = (2 * precision * recall) / (precision + recall)
fmeasure = tf.cast(fmeasure, tf.float32)
return fmeasure
示例2: _grad_sparsity
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import count_nonzero [as 别名]
def _grad_sparsity(self):
"""Gradient sparsity."""
# If the sparse minibatch gradient has 10 percent of its entries
# non-zero, its sparsity is 0.1.
# The norm of dense gradient averaged from full dataset
# are roughly estimated norm of minibatch
# sparse gradient norm * sqrt(sparsity)
# An extension maybe only correct the sparse blob.
non_zero_cnt = tf.add_n([tf.count_nonzero(g) for g in self._grad])
all_entry_cnt = tf.add_n([tf.size(g) for g in self._grad])
self._sparsity = tf.cast(non_zero_cnt, self._grad[0].dtype)
self._sparsity /= tf.cast(all_entry_cnt, self._grad[0].dtype)
avg_op = self._moving_averager.apply([self._sparsity,])
with tf.control_dependencies([avg_op]):
self._sparsity_avg = self._moving_averager.average(self._sparsity)
return avg_op
示例3: calculate_model_precision
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import count_nonzero [as 别名]
def calculate_model_precision(input_tensor, label_tensor):
"""
calculate accuracy acc = correct_nums / ground_truth_nums
:param input_tensor: binary segmentation logits
:param label_tensor: binary segmentation label
:return:
"""
logits = tf.nn.softmax(logits=input_tensor)
final_output = tf.expand_dims(tf.argmax(logits, axis=-1), axis=-1)
idx = tf.where(tf.equal(final_output, 1))
pix_cls_ret = tf.gather_nd(label_tensor, idx)
accuracy = tf.count_nonzero(pix_cls_ret)
accuracy = tf.divide(
accuracy,
tf.cast(tf.shape(tf.gather_nd(label_tensor, tf.where(tf.equal(label_tensor, 1))))[0], tf.int64))
return accuracy
示例4: calculate_model_fp
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import count_nonzero [as 别名]
def calculate_model_fp(input_tensor, label_tensor):
"""
calculate fp figure
:param input_tensor:
:param label_tensor:
:return:
"""
logits = tf.nn.softmax(logits=input_tensor)
final_output = tf.expand_dims(tf.argmax(logits, axis=-1), axis=-1)
idx = tf.where(tf.equal(final_output, 1))
pix_cls_ret = tf.gather_nd(final_output, idx)
false_pred = tf.cast(tf.shape(pix_cls_ret)[0], tf.int64) - tf.count_nonzero(
tf.gather_nd(label_tensor, idx)
)
return tf.divide(false_pred, tf.cast(tf.shape(pix_cls_ret)[0], tf.int64))
示例5: calculate_model_fn
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import count_nonzero [as 别名]
def calculate_model_fn(input_tensor, label_tensor):
"""
calculate fn figure
:param input_tensor:
:param label_tensor:
:return:
"""
logits = tf.nn.softmax(logits=input_tensor)
final_output = tf.expand_dims(tf.argmax(logits, axis=-1), axis=-1)
idx = tf.where(tf.equal(label_tensor, 1))
pix_cls_ret = tf.gather_nd(final_output, idx)
label_cls_ret = tf.gather_nd(label_tensor, tf.where(tf.equal(label_tensor, 1)))
mis_pred = tf.cast(tf.shape(label_cls_ret)[0], tf.int64) - tf.count_nonzero(pix_cls_ret)
return tf.divide(mis_pred, tf.cast(tf.shape(label_cls_ret)[0], tf.int64))
示例6: log_coral_loss
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import count_nonzero [as 别名]
def log_coral_loss(self, h_src, h_trg, gamma=1e-3):
# regularized covariances result in inf or nan
# First: subtract the mean from the data matrix
batch_size = tf.to_float(tf.shape(h_src)[0])
h_src = h_src - tf.reduce_mean(h_src, axis=0)
h_trg = h_trg - tf.reduce_mean(h_trg, axis=0 )
cov_source = (1./(batch_size-1)) * tf.matmul( h_src, h_src, transpose_a=True) #+ gamma * tf.eye(self.hidden_repr_size)
cov_target = (1./(batch_size-1)) * tf.matmul( h_trg, h_trg, transpose_a=True) #+ gamma * tf.eye(self.hidden_repr_size)
#eigen decomposition
eig_source = tf.self_adjoint_eig(cov_source)
eig_target = tf.self_adjoint_eig(cov_target)
log_cov_source = tf.matmul( eig_source[1] , tf.matmul(tf.diag( tf.log(eig_source[0]) ), eig_source[1], transpose_b=True) )
log_cov_target = tf.matmul( eig_target[1] , tf.matmul(tf.diag( tf.log(eig_target[0]) ), eig_target[1], transpose_b=True) )
# Returns the Frobenius norm
return tf.reduce_mean(tf.square( tf.subtract(log_cov_source,log_cov_target)))
#~ return tf.reduce_mean(tf.reduce_max(eig_target[0]))
#~ return tf.to_float(tf.equal(tf.count_nonzero(h_src), tf.count_nonzero(h_src)))
示例7: count_nonzero_wrapper
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import count_nonzero [as 别名]
def count_nonzero_wrapper(X, optype):
"""Wrapper for handling sparse and dense versions of `tf.count_nonzero`.
Parameters
----------
X : tf.Tensor (N, K)
optype : str, {'dense', 'sparse'}
Returns
-------
tf.Tensor (1,K)
"""
with tf.name_scope('count_nonzero_wrapper') as scope:
if optype == 'dense':
return tf.count_nonzero(X, axis=0, keep_dims=True)
elif optype == 'sparse':
indicator_X = tf.SparseTensor(X.indices, tf.ones_like(X.values), X.dense_shape)
return tf.sparse_reduce_sum(indicator_X, axis=0, keep_dims=True)
else:
raise NameError('Unknown input type in count_nonzero_wrapper')
示例8: statistics
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import count_nonzero [as 别名]
def statistics(prediction, annotation):
with tf.name_scope('statistics'):
predict_values = tf.cast(prediction, tf.float32)
annotation_values = tf.cast(annotation, tf.float32)
true_positive = tf.count_nonzero(predict_values * annotation_values, dtype=tf.float32)
true_negative = tf.count_nonzero((predict_values - 1) * (annotation_values - 1), dtype=tf.float32)
false_positive = tf.count_nonzero(predict_values * (annotation_values - 1), dtype=tf.float32)
false_negative = tf.count_nonzero((predict_values - 1) * annotation_values, dtype=tf.float32)
precesion = true_positive / (true_positive + false_positive)
recall = true_positive / (true_positive + false_negative)
f_score = 2 * precesion * recall / (precesion + recall)
mcc = (true_positive * true_negative - false_positive * false_negative) / tf.sqrt(
(true_positive + false_positive) * (true_positive + false_negative) *
(true_negative + false_positive) * (true_negative + false_negative))
return precesion, recall, f_score, mcc
示例9: concat
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import count_nonzero [as 别名]
def concat(layers, embed_keep_prob=1., drop_func=dropout, reuse=True):
""""""
layer = tf.concat(layers, 2)
if embed_keep_prob < 1:
layer = drop_func(layer, embed_keep_prob)
## Upscale everything
#if embed_keep_prob < 1 and drop_func == dropout:
# output_size = layer.get_shape().as_list()[-1]
# n_nonzeros = tf.count_nonzero(layer, axis=2, keep_dims=True)
# scale_factor = output_size / (n_nonzeros+1e-12)
# layer *= scale_factor
return layer
#===============================================================
示例10: create_model
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import count_nonzero [as 别名]
def create_model(self):
"""Creates a TF model and returns ops necessary to run training/eval."""
features = tf.compat.v1.placeholder(tf.float32, [None, self.input_dim])
labels = tf.compat.v1.placeholder(tf.float32, [None, self.num_classes])
w = tf.Variable(tf.random.normal(shape=[self.input_dim, self.num_classes]))
b = tf.Variable(tf.random.normal(shape=[self.num_classes]))
pred = tf.nn.softmax(tf.matmul(features, w) + b)
loss = tf.reduce_mean(-tf.reduce_sum(labels * tf.math.log(pred), axis=1))
train_op = self.optimizer.minimize(
loss=loss, global_step=tf.train.get_or_create_global_step())
correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(labels, 1))
eval_metric_op = tf.count_nonzero(correct_pred)
return features, labels, train_op, loss, eval_metric_op
示例11: get_predict_nodes
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import count_nonzero [as 别名]
def get_predict_nodes(self, x, y=None, config=None):
"""Return a dictionary of graph nodes for training."""
LOGGER.info("Building prediction nodes.")
nodes = {}
with tf.variable_scope(self.name, reuse=tf.AUTO_REUSE):
# Generator output an predictions
nodes['fake_y'] = self.gen(x, training=False)
nodes['predictions'] = tf.expand_dims(
tf.argmax(nodes['fake_y'], 1, output_type=tf.int32), -1)
if y is not None:
# Compute the accuracy
nodes['real_y'] = tf.one_hot(y, config['n_classes'])
nodes['correct_predictions_counts'] = tf.count_nonzero(
tf.equal(y, nodes['predictions']), dtype=tf.int32)
return nodes
示例12: triplet_loss
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import count_nonzero [as 别名]
def triplet_loss(self, anchor, positive, negative, gamma):
"""
Triplet loss calculation.
Args:
anchor: anchor feature matrix (NxM)
positive: positive feature matrix (NxM)
negative: negative feature matrix (NxM)
gamma: margin parameter
Returns:
loss: total triplet loss
error: number of triplets with positive loss
"""
with tf.name_scope('triplet_loss'):
pos_dist = self.euclidean_distance(anchor, positive)
neg_dist = self.euclidean_distance(anchor, negative)
loss = tf.maximum(0., pos_dist - neg_dist + gamma)
error = tf.count_nonzero(loss, dtype=tf.float32) / \
tf.cast(tf.shape(anchor)[0], tf.float32) * tf.constant(100.0)
loss = tf.reduce_mean(loss)
tf.summary.scalar('loss', loss)
tf.summary.scalar('error', error)
return loss, error
示例13: _compare
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import count_nonzero [as 别名]
def _compare(self,
x,
reduction_axes,
keep_dims,
use_gpu=False,
feed_dict=None):
np_ans = (x != 0).astype(np.int32)
if reduction_axes is None:
np_ans = np.sum(np_ans, keepdims=keep_dims)
else:
reduction_axes = np.array(reduction_axes).astype(np.int32)
for ra in reduction_axes.ravel()[::-1]:
np_ans = np.sum(np_ans, axis=ra, keepdims=keep_dims)
with self.test_session(use_gpu=use_gpu) as sess:
tf_ans = tf.count_nonzero(x, reduction_axes, keep_dims)
out = sess.run(tf_ans, feed_dict)
self.assertAllClose(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
示例14: MaskedCrossEntropyLoss
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import count_nonzero [as 别名]
def MaskedCrossEntropyLoss(outputs, targets, lengths=None, mask=None, max_len=None):
if lengths is None and mask is None:
raise RuntimeError('Please provide either lengths or mask')
#[batch_size, time_length]
if mask is None:
mask = sequence_mask(lengths, max_len, False)
#One hot encode targets (outputs.shape[-1] = hparams.quantize_channels)
targets_ = tf.one_hot(targets, depth=tf.shape(outputs)[-1])
with tf.control_dependencies([tf.assert_equal(tf.shape(outputs), tf.shape(targets_))]):
losses = tf.nn.softmax_cross_entropy_with_logits_v2(logits=outputs, labels=targets_)
with tf.control_dependencies([tf.assert_equal(tf.shape(mask), tf.shape(losses))]):
masked_loss = losses * mask
return tf.reduce_sum(masked_loss) / tf.count_nonzero(masked_loss, dtype=tf.float32)
示例15: MaskedSigmoidCrossEntropy
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import count_nonzero [as 别名]
def MaskedSigmoidCrossEntropy(targets, outputs, targets_lengths, hparams, mask=None):
'''Computes a masked SigmoidCrossEntropy with logits
'''
#[batch_size, time_dimension]
#example:
#sequence_mask([1, 3, 2], 5) = [[1., 0., 0., 0., 0.],
# [1., 1., 1., 0., 0.],
# [1., 1., 0., 0., 0.]]
#Note the maxlen argument that ensures mask shape is compatible with r>1
#This will by default mask the extra paddings caused by r>1
if mask is None:
mask = sequence_mask(targets_lengths, hparams.outputs_per_step, False)
with tf.control_dependencies([tf.assert_equal(tf.shape(targets), tf.shape(mask))]):
#Use a weighted sigmoid cross entropy to measure the <stop_token> loss. Set hparams.cross_entropy_pos_weight to 1
#will have the same effect as vanilla tf.nn.sigmoid_cross_entropy_with_logits.
losses = tf.nn.weighted_cross_entropy_with_logits(targets=targets, logits=outputs, pos_weight=hparams.cross_entropy_pos_weight)
with tf.control_dependencies([tf.assert_equal(tf.shape(mask), tf.shape(losses))]):
masked_loss = losses * mask
return tf.reduce_sum(masked_loss) / tf.count_nonzero(masked_loss, dtype=tf.float32)