本文整理汇总了Python中tensorflow.count_nonzero函数的典型用法代码示例。如果您正苦于以下问题:Python count_nonzero函数的具体用法?Python count_nonzero怎么用?Python count_nonzero使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了count_nonzero函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: polyphonic_rate
def polyphonic_rate(tensor, threshold=2):
"""Return the ratio of the number of time steps where the number of pitches
being played is larger than `threshold` to the total number of time steps"""
if tensor.get_shape().ndims != 5:
raise ValueError("Input tensor must have 5 dimensions.")
n_poly = tf.count_nonzero((tf.count_nonzero(tensor, 3) > threshold), 2)
return tf.reduce_mean((n_poly / tensor.get_shape()[2]), [0, 1])
示例2: init_training_graph
def init_training_graph(self):
with tf.name_scope('Evaluation'):
logits = self.last
prob_b = tf.squeeze(logits, squeeze_dims=[1,2])
self.predictions = tf.argmax(prob_b, axis=1)
with tf.name_scope('Loss'):
self.loss = tf.reduce_mean((tf.nn.sparse_softmax_cross_entropy_with_logits(logits=prob_b,
labels=tf.cast(self.train_labels_node, tf.int32),
name="entropy")))
tf.summary.scalar("entropy", self.loss)
with tf.name_scope('Accuracy'):
LabelInt = tf.cast(self.train_labels_node, tf.int64)
CorrectPrediction = tf.equal(self.predictions, LabelInt)
self.accuracy = tf.reduce_mean(tf.cast(CorrectPrediction, tf.float32))
tf.summary.scalar("accuracy", self.accuracy)
with tf.name_scope('Prediction'):
self.TP = tf.count_nonzero(self.predictions * LabelInt)
self.TN = tf.count_nonzero((self.predictions - 1) * (LabelInt - 1))
self.FP = tf.count_nonzero(self.predictions * (LabelInt - 1))
self.FN = tf.count_nonzero((self.predictions - 1) * LabelInt)
with tf.name_scope('Precision'):
self.precision = tf.divide(self.TP, tf.add(self.TP, self.FP))
tf.summary.scalar('Precision', self.precision)
with tf.name_scope('Recall'):
self.recall = tf.divide(self.TP, tf.add(self.TP, self.FN))
tf.summary.scalar('Recall', self.recall)
with tf.name_scope('F1'):
num = tf.multiply(self.precision, self.recall)
dem = tf.add(self.precision, self.recall)
self.F1 = tf.scalar_mul(2, tf.divide(num, dem))
tf.summary.scalar('F1', self.F1)
with tf.name_scope('MeanAccuracy'):
Nprecision = tf.divide(self.TN, tf.add(self.TN, self.FN))
self.MeanAcc = tf.divide(tf.add(self.precision, Nprecision) ,2)
#self.batch = tf.Variable(0, name = "batch_iterator")
self.train_prediction = tf.nn.softmax(logits)
self.test_prediction = tf.nn.softmax(logits)
tf.global_variables_initializer().run()
print('Computational graph initialised')
示例3: rpn_losses
def rpn_losses(anchor_labels, anchor_boxes, label_logits, box_logits):
"""
Args:
anchor_labels: fHxfWxNA
anchor_boxes: fHxfWxNAx4, encoded
label_logits: fHxfWxNA
box_logits: fHxfWxNAx4
Returns:
label_loss, box_loss
"""
with tf.device('/cpu:0'):
valid_mask = tf.stop_gradient(tf.not_equal(anchor_labels, -1))
pos_mask = tf.stop_gradient(tf.equal(anchor_labels, 1))
nr_valid = tf.stop_gradient(tf.count_nonzero(valid_mask, dtype=tf.int32), name='num_valid_anchor')
nr_pos = tf.count_nonzero(pos_mask, dtype=tf.int32, name='num_pos_anchor')
valid_anchor_labels = tf.boolean_mask(anchor_labels, valid_mask)
valid_label_logits = tf.boolean_mask(label_logits, valid_mask)
with tf.name_scope('label_metrics'):
valid_label_prob = tf.nn.sigmoid(valid_label_logits)
summaries = []
with tf.device('/cpu:0'):
for th in [0.5, 0.2, 0.1]:
valid_prediction = tf.cast(valid_label_prob > th, tf.int32)
nr_pos_prediction = tf.reduce_sum(valid_prediction, name='num_pos_prediction')
pos_prediction_corr = tf.count_nonzero(
tf.logical_and(
valid_label_prob > th,
tf.equal(valid_prediction, valid_anchor_labels)),
dtype=tf.int32)
summaries.append(tf.truediv(
pos_prediction_corr,
nr_pos, name='recall_th{}'.format(th)))
precision = tf.to_float(tf.truediv(pos_prediction_corr, nr_pos_prediction))
precision = tf.where(tf.equal(nr_pos_prediction, 0), 0.0, precision, name='precision_th{}'.format(th))
summaries.append(precision)
add_moving_summary(*summaries)
label_loss = tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.to_float(valid_anchor_labels), logits=valid_label_logits)
label_loss = tf.reduce_mean(label_loss, name='label_loss')
pos_anchor_boxes = tf.boolean_mask(anchor_boxes, pos_mask)
pos_box_logits = tf.boolean_mask(box_logits, pos_mask)
delta = 1.0 / 9
box_loss = tf.losses.huber_loss(
pos_anchor_boxes, pos_box_logits, delta=delta,
reduction=tf.losses.Reduction.SUM) / delta
box_loss = tf.div(
box_loss,
tf.cast(nr_valid, tf.float32), name='box_loss')
add_moving_summary(label_loss, box_loss, nr_valid, nr_pos)
return label_loss, box_loss
示例4: build_graph
def build_graph(self):
file_pattern = os.path.join(self.params['data_dir'],
self.params['file_pattern'])
self.batched_dataset = _read_and_batch_from_files(
file_pattern=file_pattern,
batch_size=self.params['batch_size'],
max_length=self.params['max_length'],
num_cpu_cores=self.params.get('num_cpu_cores', 2),
shuffle=self.params['shuffle'],
repeat=self.params['repeat'],
num_workers=self._num_workers,
worker_id=self._worker_id)
self._iterator = self.batched_dataset.make_initializable_iterator()
x, y = self.iterator.get_next()
if self.params.get('m_padding', False):
# MAGIC PADDING
x = tf.cond(tf.equal(tf.shape(x)[1] % 8, 0),
true_fn = lambda: x,
false_fn = lambda: tf.pad(x,
paddings=[[0, 0],
[0, 8 - tf.shape(x)[1] % 8]]))
y = tf.cond(tf.equal(tf.shape(y)[1] % 8, 0),
true_fn = lambda: y,
false_fn = lambda: tf.pad(y,
paddings=[[0, 0],
[0, 8 - tf.shape(y)[1] % 8]]))
x = tf.cond(tf.equal(tf.shape(x)[0] % 8, 0),
true_fn = lambda: x,
false_fn = lambda: tf.pad(x,
paddings=[[0, 8 - tf.shape(x)[0] % 8],
[0, 0]]))
y = tf.cond(tf.equal(tf.shape(y)[0] % 8, 0),
true_fn=lambda: y,
false_fn=lambda: tf.pad(y,
paddings=[[0, 8 - tf.shape(y)[0] % 8],
[0, 0]]))
# ENDOF MAGIC PADDING
len_x = tf.count_nonzero(x, axis=1, dtype=tf.int32)
len_y = tf.count_nonzero(y, axis=1, dtype=tf.int32)
if self.params['mode'] == 'train' or self.params['mode'] == 'eval':
self._input_tensors['source_tensors'] = [x, len_x]
self._input_tensors['target_tensors'] = [y, len_y]
else:
self._input_tensors['source_tensors'] = [x, len_x]
示例5: hard_negative_mining
def hard_negative_mining():
bboxes_per_batch = tf.unstack(bboxes)
classification_loss_per_batch = tf.unstack(classification_loss)
num_positives_per_batch = tf.unstack(tf.reduce_sum(positives, axis=-1))
neg_class_loss_per_batch = tf.unstack(neg_class_loss_all)
neg_class_losses = []
total_negatives = []
for bboxes_per_image, classification_loss_per_image, num_positives_per_image, neg_class_loss_per_image in \
zip(bboxes_per_batch, classification_loss_per_batch, num_positives_per_batch, neg_class_loss_per_batch):
min_negatives_keep = tf.maximum(self.neg_pos_ratio * num_positives_per_image, 3)
num_negatives_keep = tf.minimum(min_negatives_keep,
tf.count_nonzero(neg_class_loss_per_image, dtype=tf.float32))
indices = tf.image.non_max_suppression(bboxes_per_image, classification_loss_per_image,
tf.to_int32(num_negatives_keep), iou_threshold=0.99)
num_negatives = tf.size(indices)
total_negatives.append(num_negatives)
expanded_indexes = tf.expand_dims(indices, axis=1) # shape: (num_negatives, 1)
negatives_keep = tf.scatter_nd(expanded_indexes, updates=tf.ones_like(indices, dtype=tf.int32),
shape=tf.shape(classification_loss_per_image)) # shape: (num_priors,)
negatives_keep = tf.to_float(tf.reshape(negatives_keep, [num_priors])) # shape: (batch_size, num_priors)
neg_class_losses.append(tf.reduce_sum(classification_loss_per_image * negatives_keep, axis=-1)) # shape: (1,)
return tf.stack(neg_class_losses), tf.reduce_sum(tf.stack(total_negatives))
示例6: testSparseConstraint
def testSparseConstraint(self):
expected = [float(round(N * WEIGHT_SPARSITY))] * BATCH_SIZE
constraint = htm.constraints.Sparse(sparsity=WEIGHT_SPARSITY)
with self.test_session(config=CONFIG):
actual = constraint(tf.ones([BATCH_SIZE, N]))
tf.global_variables_initializer().run()
self.assertAllEqual(tf.count_nonzero(actual, axis=1).eval(), expected)
示例7: _get_testing
def _get_testing(rnn_logits,sequence_length,label,label_length):
"""Create ops for testing (all scalars):
loss: CTC loss function value,
label_error: Batch-normalized edit distance on beam search max
sequence_error: Batch-normalized sequence error rate
"""
with tf.name_scope("train"):
loss = model.ctc_loss_layer(rnn_logits,label,sequence_length)
with tf.name_scope("test"):
predictions,_ = tf.nn.ctc_beam_search_decoder(rnn_logits,
sequence_length,
beam_width=128,
top_paths=1,
merge_repeated=True)
hypothesis = tf.cast(predictions[0], tf.int32) # for edit_distance
label_errors = tf.edit_distance(hypothesis, label, normalize=False)
sequence_errors = tf.count_nonzero(label_errors,axis=0)
total_label_error = tf.reduce_sum( label_errors )
total_labels = tf.reduce_sum( label_length )
label_error = tf.truediv( total_label_error,
tf.cast(total_labels, tf.float32 ),
name='label_error')
sequence_error = tf.truediv( tf.cast( sequence_errors, tf.int32 ),
tf.shape(label_length)[0],
name='sequence_error')
tf.summary.scalar( 'loss', loss )
tf.summary.scalar( 'label_error', label_error )
tf.summary.scalar( 'sequence_error', sequence_error )
return loss, label_error, sequence_error
示例8: _decode_and_resize
def _decode_and_resize(image_tensor):
"""Decodes jpeg string, resizes it and returns a uint8 tensor."""
# These constants are set by Inception v3's expectations.
height = 299
width = 299
channels = 3
image_tensor = tf.where(tf.equal(image_tensor, ''), IMAGE_DEFAULT_STRING, image_tensor)
# Fork by whether image_tensor value is a file path, or a base64 encoded string.
slash_positions = tf.equal(tf.string_split([image_tensor], delimiter="").values, '/')
is_file_path = tf.cast(tf.count_nonzero(slash_positions), tf.bool)
# The following two functions are required for tf.cond. Note that we can not replace them
# with lambda. According to TF docs, if using inline lambda, both branches of condition
# will be executed. The workaround is to use a function call.
def _read_file():
return tf.read_file(image_tensor)
def _decode_base64():
return tf.decode_base64(image_tensor)
image = tf.cond(is_file_path, lambda: _read_file(), lambda: _decode_base64())
image = tf.image.decode_jpeg(image, channels=channels)
image = tf.expand_dims(image, 0)
image = tf.image.resize_bilinear(image, [height, width], align_corners=False)
image = tf.squeeze(image, squeeze_dims=[0])
image = tf.cast(image, dtype=tf.uint8)
return image
示例9: buildGraph
def buildGraph(self):
self.graph = tf.Graph()
with self.graph.as_default():
# train_input , [batch_size * embed_size] 一个batch有多条
self.train_input = tf.placeholder(tf.float32,shape=[self.batch_size,self.embed_size],name='train_input')
self.train_label = tf.placeholder(tf.int32,shape=[self.batch_size],name='train_label')
label_float = tf.cast(self.train_label,tf.float32)
# label_matrix = tf.Variable(tf.diag(tf.ones(self.label_size)),trainable=False)
label_matrix = tf.diag(tf.ones(self.label_size))
embed_label = tf.nn.embedding_lookup(label_matrix,self.train_label)
hidden_unit = 50
self.weight = tf.Variable(tf.truncated_normal(shape=[hidden_unit,self.embed_size],stddev=1.0/math.sqrt(self.embed_size)))
self.biase = tf.Variable(tf.zeros([hidden_unit]))
y1 = tf.matmul(self.train_input,self.weight,transpose_b=True) + self.biase
g1 = tf.nn.sigmoid(y1) # batch_size * label_size
weight2 = tf.Variable(tf.truncated_normal(shape=[self.label_size,hidden_unit],stddev=1.0/math.sqrt(hidden_unit)))
biase2 = tf.Variable(tf.zeros([self.label_size]))
y2 = tf.matmul(g1,weight2,transpose_b=True) + biase2
g2 = tf.nn.sigmoid(y2)
self.predict = tf.cast(tf.argmax(g2,axis=1),tf.float32)
self.error_num = tf.count_nonzero(label_float-self.predict)
self.loss = tf.reduce_mean(-tf.reduce_sum(embed_label*tf.log(g2+0.0001)+(1-embed_label)*tf.log(1+0.0001-g2),axis=1))
# self.train_op = tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(self.loss)
self.train_op = tf.train.AdagradOptimizer(learning_rate=1).minimize(self.loss)
self.init_op = tf.global_variables_initializer()
示例10: calculate_reshape
def calculate_reshape(original_shape, new_shape, validate=False, name=None):
"""Calculates the reshaped dimensions (replacing up to one -1 in reshape)."""
batch_shape_static = tensor_util.constant_value_as_shape(new_shape)
if batch_shape_static.is_fully_defined():
return np.int32(batch_shape_static.as_list()), batch_shape_static, []
with tf.name_scope(name, "calculate_reshape", [original_shape, new_shape]):
original_size = tf.reduce_prod(original_shape)
implicit_dim = tf.equal(new_shape, -1)
size_implicit_dim = (
original_size // tf.maximum(1, -tf.reduce_prod(new_shape)))
new_ndims = tf.shape(new_shape)
expanded_new_shape = tf.where( # Assumes exactly one `-1`.
implicit_dim, tf.fill(new_ndims, size_implicit_dim), new_shape)
validations = [] if not validate else [
tf.assert_rank(
original_shape, 1, message="Original shape must be a vector."),
tf.assert_rank(new_shape, 1, message="New shape must be a vector."),
tf.assert_less_equal(
tf.count_nonzero(implicit_dim, dtype=tf.int32),
1,
message="At most one dimension can be unknown."),
tf.assert_positive(
expanded_new_shape, message="Shape elements must be >=-1."),
tf.assert_equal(
tf.reduce_prod(expanded_new_shape),
original_size,
message="Shape sizes do not match."),
]
return expanded_new_shape, batch_shape_static, validations
示例11: testDegenerate
def testDegenerate(self):
for use_gpu in False, True:
with self.test_session(use_gpu=use_gpu):
for dtype in (tf.bool,):
# A large number is needed to get Eigen to die
x = tf.zeros((0, 9938), dtype=dtype)
y = tf.count_nonzero(x, [0])
self.assertAllEqual(y.eval(), np.zeros(9938))
示例12: buildGraph
def buildGraph(self):
self.graph = tf.Graph()
with self.graph.as_default():
self.inputs = tf.placeholder(tf.float32,[self.batch_size,self.max_depth,self.embed_size]) # inputs: num_step * embed_size
self.seq_len = tf.placeholder(tf.int32)
self.label = tf.placeholder(tf.int32,[1])
label_float = tf.cast(self.label,tf.float32)
label_matrix = tf.diag(tf.ones(self.label_size))
embed_label = tf.nn.embedding_lookup(label_matrix,self.label)
print('pin2.1')
# input_list = list(tf.split(0,self.max_depth,expand_inputs))
input_list = tf.unpack(self.inputs,axis=1) # [[1,embed_size,]...,[1,embed_size]]
print('pin2.2')
# BasicRNNCell: [num_units, input_size, ...]
# self.rnn_cell = tf.nn.rnn_cell.BasicRNNCell(self.hidden_size,self.embed_size)
# self.rnn_cell = tf.nn.rnn_cell.LSTMCell(self.hidden_size,self.embed_size,state_is_tuple=True)
self.rnn_cell = LTMCell(self.hidden_size,self.embed_size,state_is_tuple=True)
self.rnn_cell = tf.nn.rnn_cell.DropoutWrapper(self.rnn_cell,output_keep_prob=0.9)
print('pin2.3')
init_stat = self.rnn_cell.zero_state(1,tf.float32)
output_embedding,states = tf.nn.rnn(self.rnn_cell,input_list,
initial_state=init_stat,
sequence_length=self.seq_len)
# state = init_stat
# states = []
# with tf.variable_scope('RNN'):
# for time_step in range(max_depth):
# if tf.equal(time_step,self.seq_len):
# break
# if time_step>0:
# tf.get_variable_scope().reuse_variables()
# m,state = self.rnn_cell(input_list[time_step,:],state)
# states.append(state)
# final_output = states[-1][0]
print('pin2.4')
final_output = states[-1] # final_output : [1,hidden_size]
print(final_output.get_shape())
weight = tf.Variable(tf.truncated_normal([self.label_size,self.hidden_size],
stddev=1.0/math.sqrt(self.hidden_size)))
biase = tf.Variable(tf.zeros([self.label_size]))
tmp_y = tf.matmul(final_output,weight,transpose_b=True) + biase
tmp_g = tf.sigmoid(tmp_y)
self.predict = tf.cast(tf.argmax(tmp_g,axis=1),tf.float32)
self.error_num = tf.count_nonzero(label_float-self.predict)
tiny_v = 0.0001
self.loss = -tf.reduce_mean(embed_label*tf.log(tmp_g+tiny_v) + (1-embed_label)*tf.log(1+tiny_v-tmp_g))
self.train_op = tf.train.AdagradOptimizer(learning_rate=1).minimize(self.loss)
self.init_op = tf.global_variables_initializer()
示例13: contrastive_loss
def contrastive_loss(left, right, y, margin, extra=False, scope="constrastive_loss"):
r"""Loss for Siamese networks as described in the paper:
`Learning a Similarity Metric Discriminatively, with Application to Face
Verification <http://yann.lecun.com/exdb/publis/pdf/chopra-05.pdf>`_ by Chopra et al.
.. math::
\frac{1}{2} [y \cdot d^2 + (1-y) \cdot \max(0, m - d)^2], d = \Vert l - r \Vert_2
Args:
left (tf.Tensor): left feature vectors of shape [Batch, N].
right (tf.Tensor): right feature vectors of shape [Batch, N].
y (tf.Tensor): binary labels of shape [Batch]. 1: similar, 0: not similar.
margin (float): horizon for negative examples (y==0).
extra (bool): also return distances for pos and neg.
Returns:
tf.Tensor: constrastive_loss (averaged over the batch), (and optionally average_pos_dist, average_neg_dist)
"""
with tf.name_scope(scope):
y = tf.cast(y, tf.float32)
delta = tf.reduce_sum(tf.square(left - right), 1)
delta_sqrt = tf.sqrt(delta + 1e-10)
match_loss = delta
missmatch_loss = tf.square(tf.nn.relu(margin - delta_sqrt))
loss = tf.reduce_mean(0.5 * (y * match_loss + (1 - y) * missmatch_loss))
if extra:
num_pos = tf.count_nonzero(y)
num_neg = tf.count_nonzero(1 - y)
pos_dist = tf.where(tf.equal(num_pos, 0), 0.,
tf.reduce_sum(y * delta_sqrt) / tf.cast(num_pos, tf.float32),
name="pos-dist")
neg_dist = tf.where(tf.equal(num_neg, 0), 0.,
tf.reduce_sum((1 - y) * delta_sqrt) / tf.cast(num_neg, tf.float32),
name="neg-dist")
return loss, pos_dist, neg_dist
else:
return loss
示例14: _grad_sparsity
def _grad_sparsity(self):
"""Gradient sparsity."""
# If the sparse minibatch gradient has 10 percent of its entries
# non-zero, its sparsity is 0.1.
# The norm of dense gradient averaged from full dataset
# are roughly estimated norm of minibatch
# sparse gradient norm * sqrt(sparsity)
# An extension maybe only correct the sparse blob.
non_zero_cnt = tf.add_n([tf.count_nonzero(g) for g in self._grad])
all_entry_cnt = tf.add_n([tf.size(g) for g in self._grad])
self._sparsity = tf.cast(non_zero_cnt, self._grad[0].dtype)
self._sparsity /= tf.cast(all_entry_cnt, self._grad[0].dtype)
avg_op = self._moving_averager.apply([self._sparsity,])
with tf.control_dependencies([avg_op]):
self._sparsity_avg = self._moving_averager.average(self._sparsity)
return avg_op
示例15: __init__
def __init__(self, input_dim, lab_dim, learning_rate):
self.input_feature = tf.placeholder(tf.float32, [None, input_dim])
self.input_labels = tf.placeholder(tf.float32, [None, lab_dim])
self.w = tf.Variable(tf.random_normal([input_dim, lab_dim]), name="weight")
self.b = tf.Variable(tf.zeros([lab_dim]), name="bias")
self.output = tf.matmul(self.input_feature, self.w) + self.b
self.a1 = tf.argmax(tf.nn.softmax(self.output), axis=1)
self.b1 = tf.argmax(self.input_labels, axis=1)
self.err = tf.count_nonzero(self.a1 - self.b1)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=self.input_labels, logits=self.output)
self.loss = tf.reduce_mean(cross_entropy)
optimizer = tf.train.AdamOptimizer(learning_rate)
self.train = optimizer.minimize(self.loss)