本文整理汇总了Python中numpy.count_nonzero函数的典型用法代码示例。如果您正苦于以下问题:Python count_nonzero函数的具体用法?Python count_nonzero怎么用?Python count_nonzero使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了count_nonzero函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: verify
def verify(self, mask, exp):
maxDiffRatio = 0.02
expArea = np.count_nonzero(exp)
nonIntersectArea = np.count_nonzero(mask != exp)
curRatio = float(nonIntersectArea) / expArea
return curRatio < maxDiffRatio
示例2: __init__
def __init__(self, image, skin_mask, labeled_image, label_number,
rectangle_slices):
"""Creates a new skin region.
image: The entire image in YCrCb mode.
skin_mask: The entire image skin mask.
labeled_image: A matrix of the size of the image with the region
label in each position. See scipy.ndimage.measurements.label.
label_number: The label number of this skin region.
rectangle_slices: The slices to get the rectangle of the image in
which the region fits as returned by
scipy.ndimage.measurements.find_objects.
"""
self.region_skin_pixels = np.count_nonzero(
labeled_image[rectangle_slices] == label_number
)
self.bounding_rectangle_size = \
(
rectangle_slices[1].start - rectangle_slices[0].start
) * (
rectangle_slices[1].stop - rectangle_slices[0].stop
)
self.bounding_rectangle_skin_pixels = np.count_nonzero(
skin_mask[rectangle_slices]
)
self.bounding_rectangle_avarage_pixel_intensity = np.average(
image[rectangle_slices].take([0], axis=2)
)
示例3: despike
def despike(self, n=3, recursive=False, verbose=False):
"""
Replace spikes with np.NaN.
Removing spikes that are >= n * std.
default n = 3.
"""
result = self.values.copy()
outliers = (np.abs(self.values - nanmean(self.values)) >= n *
nanstd(self.values))
removed = np.count_nonzero(outliers)
result[outliers] = np.NaN
if verbose and not recursive:
print("Removing from %s\n # removed: %s" % (self.name, removed))
counter = 0
if recursive:
while outliers.any():
result[outliers] = np.NaN
outliers = np.abs(result - nanmean(result)) >= n * nanstd(result)
counter += 1
removed += np.count_nonzero(outliers)
if verbose:
print("Removing from %s\nNumber of iterations: %s # removed: %s" %
(self.name, counter, removed))
return Series(result, index=self.index, name=self.name)
示例4: cost_logit
def cost_logit(X, A, R, lam, n, k):
'''
The cost function
n is the number of examples
k is the feature dimension
R is the matrix indicating which entries of A are known.
'''
# get the matrices
# U, V, beta, alpha
U = X[:n*k]
U = np.reshape(U, (n,k))
V = X[n*k:2*n*k]
V = np.reshape(V, (n,k))
beta = X[2*n*k:2*n*k+n]
beta = np.reshape(beta, (n,1))
alpha = X[-1]
num_knowns = np.count_nonzero(R)
num_edges = np.count_nonzero(np.multiply(A, R))
num_nonedges = num_knowns - num_edges
h = alpha + np.dot(U, np.transpose(V))
# add beta to every row, column
for i in range(h.shape[0]):
for j in range(h.shape[1]):
h[i,j] += beta[i]+beta[j]
sigH = sigmoid(h)
J = ((-A/(2*num_edges))*np.log(sigH)) - (((1-A)/(2*num_nonedges))*np.log(1-sigH))
J = J*R
# regularizer
for i in range(J.shape[0]):
for j in range(J.shape[1]):
J[i,j] += lam*( np.abs(beta[i])**2 + np.abs(beta[j])**2 + np.linalg.norm(U[i,:])**2 + np.linalg.norm(V[j,:])**2 )
# sum over known values
cost = sum(sum(J))
return cost
示例5: print_results
def print_results(labels, predictions):
total = len(labels)
num_correct = total - np.count_nonzero(np.subtract(predictions,labels))
print "\n***** ACCURACY *****"
print "Overall Accuracy: %.3f percent\n" % ((float(num_correct)/float(total)) * 100.0)
results = pd.DataFrame()
results['real'] = labels
results['predicted'] = predictions
for label in np.unique(labels):
data = results[results['real'] == label]
num_correct = len(data) - np.count_nonzero(data['real'].sub(data['predicted']))
acc = ((float(num_correct)/float(len(data))) * 100.0)
print "Total class label '%s' accuracy: %f percent" % (label, acc)
print ""
# Distribution graphs
utils.print_distribution_graph(labels, 'Actual Distribution of Classes')
utils.print_distribution_graph(predictions, 'Distribution of Predictions')
# Distribution graphs for each class label
for label in np.unique(labels):
data = results[results['predicted'] == label]['real'].tolist()
title = "When class label '%s' was predicted, the actual class was:" % label
utils.print_distribution_graph(data, title)
示例6: norm_mean_cent
def norm_mean_cent(movies_np):
mean_movie= []
count_movie = []
for row in movies_np:
row_sum = np.sum(row)
count = np.count_nonzero(row)
count_movie.append(count)
mean_movie.append(row_sum/count)
count_user = []
mean_user = []
for row in movies_np.T:
row_sum = np.sum(row)
count = np.count_nonzero(row)
count_user.append(count)
mean_user.append(row_sum/count)
movies_np[movies_np==0] = np.nan
mean_cent = []
i = 0
for row in movies_np:
mean_cent.append(row - mean_movie[i])
i += 1
mean_cent = np.array(mean_cent)
mean_cent = np.nan_to_num(mean_cent)
return mean_cent
示例7: analyze_param
def analyze_param(net, layers):
# plt.figure()
print '\n=============analyze_param start==============='
total_nonzero = 0
total_allparam = 0
percentage_list = []
for i, layer in enumerate(layers):
i += 1
W = net.params[layer][0].data
b = net.params[layer][1].data
# plt.subplot(3, 1, i);
# numBins = 2 ^ 8
# plt.hist(W.flatten(), numBins, color='blue', alpha=0.8)
# plt.show()
print 'W(%d) range = [%f, %f]' % (i, min(W.flatten()), max(W.flatten()))
print 'W(%d) mean = %f, std = %f' % (i, np.mean(W.flatten()), np.std(W.flatten()))
non_zero = (np.count_nonzero(W.flatten()) + np.count_nonzero(b.flatten()))
all_param = (np.prod(W.shape) + np.prod(b.shape))
this_layer_percentage = non_zero / float(all_param)
total_nonzero += non_zero
total_allparam += all_param
print 'non-zero W and b cnt = %d' % non_zero
print 'total W and b cnt = %d' % all_param
print 'percentage = %f\n' % (this_layer_percentage)
percentage_list.append(this_layer_percentage)
print '=====> summary:'
print 'non-zero W and b cnt = %d' % total_nonzero
print 'total W and b cnt = %d' % total_allparam
print 'percentage = %f' % (total_nonzero / float(total_allparam))
print '=============analyze_param ends ==============='
return (total_nonzero / float(total_allparam), percentage_list)
示例8: test_that_build_pyramid_relaxes_mask
def test_that_build_pyramid_relaxes_mask():
from _stbt.match import _build_pyramid
mask = numpy.ones((20, 20, 3), dtype=numpy.uint8) * 255
mask[3:9, 3:9] = 0 # first 0 is an even row/col, last 0 is an odd row/col
n = mask.size - numpy.count_nonzero(mask)
assert n == 6 * 6 * 3
cv2.imwrite("/tmp/dave1.png", mask)
mask_pyramid = _build_pyramid(mask, 2, is_mask=True)
assert numpy.all(mask_pyramid[0] == mask)
downsampled = mask_pyramid[1]
cv2.imwrite("/tmp/dave2.png", downsampled)
assert downsampled.shape == (10, 10, 3)
print downsampled[:, :, 0] # pylint:disable=unsubscriptable-object
n = downsampled.size - numpy.count_nonzero(downsampled)
assert 3 * 3 * 3 <= n <= 6 * 6 * 3
expected = [
# pylint:disable=bad-whitespace
[255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
[255, 0, 0, 0, 0, 0, 255, 255, 255, 255],
[255, 0, 0, 0, 0, 0, 255, 255, 255, 255],
[255, 0, 0, 0, 0, 0, 255, 255, 255, 255],
[255, 0, 0, 0, 0, 0, 255, 255, 255, 255],
[255, 0, 0, 0, 0, 0, 255, 255, 255, 255],
[255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
[255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
[255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
[255, 255, 255, 255, 255, 255, 255, 255, 255, 255]]
assert numpy.all(downsampled[:, :, 0] == expected) # pylint:disable=unsubscriptable-object
示例9: __precision
def __precision(self, y_test, Y_vote):
""" precision extended to multi-class classification """
# predicted classes
y_hat = np.argmax(Y_vote, axis=1)
if True or self.mode == "one-vs-one":
# need confusion matrix
conf = self.__confusion(y_test, Y_vote)
# consider each class separately
prec = np.zeros(self.numClasses)
for c in xrange(self.numClasses):
# true positives: label is c, classifier predicted c
tp = conf[c,c]
# false positives: label is c, classifier predicted not c
fp = np.sum(conf[:,c]) - conf[c,c]
# precision
prec[c] = tp*1./(tp+fp)
elif self.mode == "one-vs-rest":
# consider each class separately
prec = np.zeros(self.numClasses)
for c in xrange(self.numClasses):
# true positives: label is c, classifier predicted c
tp = np.count_nonzero((y_test==c) * (y_hat==c))
# false positives: label is c, classifier predicted not c
fp = np.count_nonzero((y_test==c) * (y_hat!=c))
prec[c] = tp*1./(tp+fp)
return prec
示例10: testFeatureGenWithOnePoint
def testFeatureGenWithOnePoint(self):
# ensure that the start and end datetimes are the same, since the average calculation uses
# the total distance and the total duration
ts = esta.TimeSeries.get_time_series(self.testUUID)
trackpoint1 = ecwlo.Location({u'coordinates': [0,0], 'type': 'Point'})
ts.insert_data(self.testUUID, "analysis/recreated_location", trackpoint1)
testSeg = ecws.Section({"start_loc": trackpoint1,
"end_loc": trackpoint1,
"distance": 500,
"sensed_mode": 1,
"duration": 150,
"start_ts": arrow.now().timestamp,
"end_ts": arrow.now().timestamp,
"_id": 2,
"speeds":[],
"distances":[],
})
testSegEntry = ecwe.Entry.create_entry(self.testUUID, "analysis/cleaned_section", testSeg)
d = testSegEntry.data
m = testSegEntry.metadata
enufc.expand_start_end_data_times(d, m)
testSegEntry["data"] = d
testSegEntry["metadata"] = m
inserted_id = ts.insert(testSegEntry)
featureMatrix = np.zeros([1, len(self.pipeline.featureLabels)])
resultVector = np.zeros(1)
self.pipeline.updateFeatureMatrixRowWithSection(featureMatrix, 0, testSegEntry)
logging.debug("featureMatrix = %s" % featureMatrix)
self.assertEqual(np.count_nonzero(featureMatrix[0][5:16]), 0)
self.assertEqual(np.count_nonzero(featureMatrix[0][19:21]), 0)
示例11: corners
def corners(self, bandNames=None):
"Return the corners of the tilted rectangle of valid image data as (x, y) pixel coordinates."
alpha = self.mask(bandNames)
alphaT = numpy.transpose(alpha)
ysize, xsize = alpha.shape
output = []
for i in xrange(ysize):
if numpy.count_nonzero(alpha[i]) > 0:
break
output.append((numpy.argwhere(alpha[i]).mean(), i))
for i in xrange(xsize):
if numpy.count_nonzero(alphaT[i]) > 0:
break
output.append((i, numpy.argwhere(alphaT[i]).mean()))
for i in xrange(ysize - 1, 0, -1):
if numpy.count_nonzero(alpha[i]) > 0:
break
output.append((numpy.argwhere(alpha[i]).mean(), i))
for i in xrange(xsize - 1, 0, -1):
if numpy.count_nonzero(alphaT[i]) > 0:
break
output.append((i, numpy.argwhere(alphaT[i]).mean()))
return output
示例12: __init__
def __init__(self,
op_type,
op_name,
output_index,
num_outputs,
value):
"""Constructor of InfOrNanError.
Args:
op_type: Type name of the op that generated the tensor that generated the
`inf`(s) or `nan`(s) (e.g., `Div`).
op_name: Name of the op that generated the tensor with `inf`(s) or
`nan`(s). This name is set by client and can be `None` if it is unset.
output_index: The 0-based output index of the tensor that contains
`inf`(s) or `nan`(s).
num_outputs: Total number of outputs of the operation.
value: The tensor value that contains `inf`(s) or `nan`(s).
"""
self._op_type = op_type
self._op_name = op_name
self._output_index = output_index
self._num_outputs = num_outputs
self._value = value
self._total_count = np.size(value)
self._inf_count = np.count_nonzero(np.isinf(value))
self._nan_count = np.count_nonzero(np.isnan(value))
super(InfOrNanError, self).__init__(self._get_error_message())
示例13: go
def go(sltree, score, X_train, Y_train, X_test, Y_test):
t_train_begin = time()
sltree.train(X_train, Y_train)
t_train_end = time()
t_test_begin = time()
Y_predict_train, AP_train, complexity_train, depths_train = sltree.test(X_train, Y_train, return_complexity=True, return_depth=True)
Y_predict_test, AP_test, complexity_test, depths_test = sltree.test(X_test, Y_test, return_complexity=True, return_depth=True)
t_test_end = time()
n_acc_train = np.count_nonzero(Y_predict_train == Y_train)
n_acc_test = np.count_nonzero(Y_predict_test == Y_test)
score.update({'acc_train':float(n_acc_train)/Y_predict_train.shape[0],
'n_acc_train':n_acc_train,
'AP_train':AP_train,
'mAP_train':np.mean(AP_train),
'complexity_train':complexity_train,
'avg_complexity_train':np.mean(complexity_train),
'depths_train':depths_train,
'avg_depth_train':np.mean(depths_train),
'acc_test':float(n_acc_test)/Y_predict_test.shape[0],
'n_acc_test':n_acc_test,
'AP_test':AP_test,
'mAP_test':np.mean(AP_test),
'complexity_test':complexity_test,
'avg_complexity_test':np.mean(complexity_test),
'depths_test':depths_test,
'avg_depth_test':np.mean(depths_test),
'time_test':t_test_end-t_test_begin})
示例14: get_stats
def get_stats(self):
# number of trades
num_of_trades = self.record.shape[0] / 2
# number of profit_lock_out
num_of_profitlock = np.count_nonzero(np.where(self.record[:,2] == "profit_lock_out"))
# number of stop_out
num_of_stopout = np.count_nonzero(np.where(self.record[:,2] == "trailing_stop_out" ))
num_of_stopout += np.count_nonzero(np.where(self.record[:,2] == "hard_stop_out" ))
# number of reversed_out
num_of_reversed_out = np.count_nonzero(np.where(self.record[:,2] == "reversed_out"))
# number of time_out
num_of_time_out = np.count_nonzero(np.where(self.record[:,2] == "time_out"))
# PNL
i = 1
for i in range(1, num_of_trades * 2, 2):
if self.record[i, 3] == "long":
self.pnl = np.append(self.pnl,float(self.record[i,4])-float(self.record[i-1,4]))
elif self.record[i, 3] == "short":
self.pnl = np.append(self.pnl,float(self.record[i-1,4])-float(self.record[i,4]))
lst.pnl = lst.pnl[1:]
# output statistical results
print "# trades", num_of_trades, "# profit_lock", num_of_profitlock,\
"# stopout", num_of_stopout, "# reversed_out",\
num_of_reversed_out, "# time_out", num_of_time_out
print "P&L Summary Stats:", lst.pnl.__len__(), lst.pnl.mean()/tickBase, lst.pnl.std()/tickBase, lst.pnl.min()/tickBase, lst.pnl.max()/tickBase
示例15: relearn
def relearn(self, test_size=0):
samples, weights, targets = self.learning_component.get_training_set(const_weight=True)
train_samples, test_samples, train_targets, test_targets = train_test_split(samples, targets, test_size=test_size, random_state=np.random.RandomState(0))
count_positives = 1.0*np.count_nonzero(train_targets)
count_negatives = 1.0*(len(train_targets) - count_positives)
positive_weight = count_negatives/len(train_targets)
negative_weight = count_positives/len(train_targets)
weights = np.array([positive_weight if target == 1 else negative_weight for target in train_targets])
self.classifier.fit(train_samples, train_targets, sample_weight=weights)
self.learning_component.new_samples_count = 0
if len(test_samples) > 0:
test_result = [self.classifier.predict(sample) for sample in test_samples]
true_positives = 0.0
count_test_positives = 1.0*np.count_nonzero(test_targets)
count_result_positives = 1.0*np.count_nonzero(test_result)
for i in xrange(len(test_targets)):
if test_targets[i] == test_result[i] and test_result[i] == 1:
true_positives += 1
precision = true_positives / count_test_positives
recall = true_positives / count_result_positives
print "Precision:", precision
print "Recall", recall
if precision + recall != 0:
print "F-score:", 2 * precision * recall / (precision + recall)
else:
print "F-score:", 0
self.positive_class_index = 0
for elem in self.classifier.classes_:
if elem != 1.0:
self.positive_class_index += 1
else:
break