本文整理匯總了Python中cv2.countNonZero方法的典型用法代碼示例。如果您正苦於以下問題:Python cv2.countNonZero方法的具體用法?Python cv2.countNonZero怎麽用?Python cv2.countNonZero使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類cv2
的用法示例。
在下文中一共展示了cv2.countNonZero方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: checkDifference
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import countNonZero [as 別名]
def checkDifference(self, roi, n):
roi = cv2.cvtColor(roi, 6)
roi = cv2.GaussianBlur(roi,(7,7),0)
result = self.bsmog[n].apply(roi, None, self.bgAdapt[n])
if self.debug:
if n == 0:
cv2.imshow(self.debugWindow0, result)
if n == 1:
cv2.imshow(self.debugWindow1, result)
if n == 2:
cv2.imshow(self.debugWindow2, result)
if n == 3:
cv2.imshow(self.debugWindow3, result)
number = cv2.countNonZero(result)
if number > Constants.ACTIVE_THRESHOLD:
return True
return False
示例2: skeletonize
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import countNonZero [as 別名]
def skeletonize(image_in):
'''Inputs and grayscale image and outputs a binary skeleton image'''
size = np.size(image_in)
skel = np.zeros(image_in.shape, np.uint8)
ret, image_edit = cv2.threshold(image_in, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
element = cv2.getStructuringElement(cv2.MORPH_CROSS, (3,3))
done = False
while not done:
eroded = cv2.erode(image_edit, element)
temp = cv2.dilate(eroded, element)
temp = cv2.subtract(image_edit, temp)
skel = cv2.bitwise_or(skel, temp)
image_edit = eroded.copy()
zeros = size - cv2.countNonZero(image_edit)
if zeros == size:
done = True
return skel
示例3: verifyCharSizes
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import countNonZero [as 別名]
def verifyCharSizes(self, r):
aspect = 0.5
charAspect = r.shape[1] / r.shape[0]
error = 0.7
minH = 10
maxH = 35
minAspect = 0.05 # for number 1
maxAspect = aspect + aspect * error
area = cv2.countNonZero(r)
bbArea = r.shape[0] * r.shape[1]
percPixels = area / bbArea
if percPixels <= 1 and minAspect < charAspect < maxAspect and minH <= r.shape[0] < maxH:
return True
else:
return False
示例4: skeletize
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import countNonZero [as 別名]
def skeletize(img):
size = np.size(img)
skel = np.zeros(img.shape, np.uint8)
element = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))
done = False
while not done:
eroded = cv2.erode(img, element)
temp = cv2.dilate(eroded, element)
temp = cv2.subtract(img, temp)
skel = cv2.bitwise_or(skel, temp)
img = eroded.copy()
zeroes = size - cv2.countNonZero(img)
if zeroes == size:
done = True
return skel
示例5: skeletonize
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import countNonZero [as 別名]
def skeletonize(img):
""" OpenCV function to return a skeletonized version of img, a Mat object"""
# hat tip to http://felix.abecassis.me/2011/09/opencv-morphological-skeleton/
img = img.copy() # don't clobber original
skel = img.copy()
skel[:,:] = 0
kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3,3))
while True:
eroded = cv2.morphologyEx(img, cv2.MORPH_ERODE, kernel)
temp = cv2.morphologyEx(eroded, cv2.MORPH_DILATE, kernel)
temp = cv2.subtract(img, temp)
skel = cv2.bitwise_or(skel, temp)
img[:,:] = eroded[:,:]
if cv2.countNonZero(img) == 0:
break
return skel
示例6: _thread
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import countNonZero [as 別名]
def _thread(self, args):
image = args
# convert image from BGR to HSV
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
# only get colours in range
mask = cv2.inRange(hsv, self.lower_colour, self.upper_colour)
# obtain colour count
colour_count = cv2.countNonZero(mask)
# check whether to stop thread
if self.is_stop: return
# respond to colour count
if colour_count < self.lower_threshold:
self._text_to_speech("I just feel sad")
self._display_emotion(SAD)
elif colour_count > self.upper_threshold:
self._text_to_speech("I'm so happy!")
self._display_emotion(HAPPY)
示例7: test_write_image_to_disk
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import countNonZero [as 別名]
def test_write_image_to_disk():
"""Test for write_image_to_disk
"""
print("testing write_image_to_disk")
# load the image from disk
bgr_image = load_image("images/logo.png")
# write image to disk
write_image_to_disk("images/temp.png", bgr_image)
# load the image temp from disk
temp = load_image("images/temp.png")
# now we check that the two images are equal
assert bgr_image.shape == temp.shape
difference = cv2.subtract(bgr_image, temp)
b, g, r = cv2.split(difference)
assert cv2.countNonZero(b) == 0 and cv2.countNonZero(g) == 0 and cv2.countNonZero(r) == 0
示例8: motionDetected
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import countNonZero [as 別名]
def motionDetected(self, new_frame):
frame = self.preprocessInputFrame(new_frame)
gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
gray = cv.GaussianBlur(gray, (21, 21), 0)
if self.prevFrame is None:
self.prevFrame = gray
return False
frameDiff = cv.absdiff(gray, self.prevFrame)
# kernel = np.ones((5, 5), np.uint8)
opening = cv.morphologyEx(frameDiff, cv.MORPH_OPEN, None) # noqa
closing = cv.morphologyEx(frameDiff, cv.MORPH_CLOSE, None) # noqa
ret1, th1 = cv.threshold(frameDiff, 10, 255, cv.THRESH_BINARY)
height = np.size(th1, 0)
width = np.size(th1, 1)
nb = cv.countNonZero(th1)
avg = (nb * 100) / (height * width) # Calculate the average of black pixel in the image
self.prevFrame = gray
# cv.DrawContours(currentframe, self.currentcontours, (0, 0, 255), (0, 255, 0), 1, 2, cv.CV_FILLED)
# cv.imshow("frame", current_frame)
ret = avg > self.threshold # If over the ceiling trigger the alarm
if ret:
self.updateMotionDetectionDts()
return ret
示例9: motion_detection
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import countNonZero [as 別名]
def motion_detection(t_minus, t_now, t_plus):
delta_view = delta_images(t_minus, t_now, t_plus)
retval, delta_view = cv2.threshold(delta_view, 16, 255, 3)
cv2.normalize(delta_view, delta_view, 0, 255, cv2.NORM_MINMAX)
img_count_view = cv2.cvtColor(delta_view, cv2.COLOR_RGB2GRAY)
delta_count = cv2.countNonZero(img_count_view)
dst = cv2.addWeighted(screen,1.0, delta_view,0.6,0)
delta_count_last = delta_count
return delta_count
示例10: plateColorJudge
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import countNonZero [as 別名]
def plateColorJudge(src, r, adaptive_minsv):
thresh = 0.45
src_gray = colorMatch(src, r, adaptive_minsv)
percent = cv2.countNonZero(src_gray) / (src_gray.shape[0] * src_gray.shape[1])
if percent > thresh:
return percent, True
else:
return percent, False
示例11: skeletonize
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import countNonZero [as 別名]
def skeletonize(image, size, structuring=cv2.MORPH_RECT):
# determine the area (i.e. total number of pixels in the image),
# initialize the output skeletonized image, and construct the
# morphological structuring element
area = image.shape[0] * image.shape[1]
skeleton = np.zeros(image.shape, dtype="uint8")
elem = cv2.getStructuringElement(structuring, size)
# keep looping until the erosions remove all pixels from the
# image
while True:
# erode and dilate the image using the structuring element
eroded = cv2.erode(image, elem)
temp = cv2.dilate(eroded, elem)
# subtract the temporary image from the original, eroded
# image, then take the bitwise 'or' between the skeleton
# and the temporary image
temp = cv2.subtract(image, temp)
skeleton = cv2.bitwise_or(skeleton, temp)
image = eroded.copy()
# if there are no more 'white' pixels in the image, then
# break from the loop
if area == area - cv2.countNonZero(image):
break
# return the skeletonized image
return skeleton
示例12: checkEyeStatus
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import countNonZero [as 別名]
def checkEyeStatus(landmarks):
mask = np.zeros(frame.shape[:2], dtype = np.float32)
hullLeftEye = []
for i in range(0, len(leftEyeIndex)):
hullLeftEye.append((landmarks[leftEyeIndex[i]][0], landmarks[leftEyeIndex[i]][1]))
cv2.fillConvexPoly(mask, np.int32(hullLeftEye), 255)
hullRightEye = []
for i in range(0, len(rightEyeIndex)):
hullRightEye.append((landmarks[rightEyeIndex[i]][0], landmarks[rightEyeIndex[i]][1]))
cv2.fillConvexPoly(mask, np.int32(hullRightEye), 255)
# lenLeftEyeX = landmarks[leftEyeIndex[3]][0] - landmarks[leftEyeIndex[0]][0]
# lenLeftEyeY = landmarks[leftEyeIndex[3]][1] - landmarks[leftEyeIndex[0]][1]
# lenLeftEyeSquared = (lenLeftEyeX ** 2) + (lenLeftEyeY ** 2)
# eyeRegionCount = cv2.countNonZero(mask)
# normalizedCount = eyeRegionCount/np.float32(lenLeftEyeSquared)
#############################################################################
leftEAR = eye_aspect_ratio(hullLeftEye)
rightEAR = eye_aspect_ratio(hullRightEye)
ear = (leftEAR + rightEAR) / 2.0
#############################################################################
eyeStatus = 1 # 1 -> Open, 0 -> closed
if (ear < thresh):
eyeStatus = 0
return eyeStatus
開發者ID:jaisayush,項目名稱:Fatigue-Detection-System-Based-On-Behavioural-Characteristics-Of-Driver,代碼行數:38,代碼來源:blinkDetect.py
示例13: get_active_cell
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import countNonZero [as 別名]
def get_active_cell(self, image):
# obtain motion between previous and current image
current_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
delta = cv2.absdiff(self.previous_gray, current_gray)
threshold_image = cv2.threshold(delta, 25, 255, cv2.THRESH_BINARY)[1]
# set cell height and width
height, width = threshold_image.shape[:2]
cell_height = height/2
cell_width = width/3
# store motion level for each cell
cells = np.array([0, 0, 0])
cells[0] = cv2.countNonZero(threshold_image[cell_height:height, 0:cell_width])
cells[1] = cv2.countNonZero(threshold_image[cell_height:height, cell_width:cell_width*2])
cells[2] = cv2.countNonZero(threshold_image[cell_height:height, cell_width*2:width])
# obtain the most active cell
top_cell = np.argmax(cells)
# return the most active cell, if threshold met
if(cells[top_cell] >= self.THRESHOLD):
return top_cell
else:
return None
示例14: detect_textarea
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import countNonZero [as 別名]
def detect_textarea(self, arg):
textarea = []
small = cv2.cvtColor(arg, cv2.COLOR_RGB2GRAY)
height, width, _ = arg.shape
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
grad = cv2.morphologyEx(small, cv2.MORPH_GRADIENT, kernel)
_, bw = cv2.threshold(
grad, 0.0, 255.0, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
kernel = cv2.getStructuringElement(
cv2.MORPH_RECT, (10, 1)) # for historical docs
connected = cv2.morphologyEx(bw, cv2.MORPH_CLOSE, kernel)
contours, _ = cv2.findContours(
connected.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
mask = np.zeros(bw.shape, dtype=np.uint8)
for idx in range(len(contours)):
x, y, w, h = cv2.boundingRect(contours[idx])
# print x,y,w,h
mask[y:y+h, x:x+w] = 0
cv2.drawContours(mask, contours, idx, (255, 255, 255), -1)
r = float(cv2.countNonZero(mask[y:y+h, x:x+w])) / (w * h)
if r > 0.45 and (width*0.9) > w > 15 and (height*0.5) > h > 15:
textarea.append([x, y, x+w-1, y+h-1])
cv2.rectangle(arg, (x, y), (x+w-1, y+h-1), (0, 0, 255), 2)
if len(textarea) > 1:
textarea = self.filter_noisebox(textarea, height, width)
return textarea, arg, height, width
示例15: tail_length
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import countNonZero [as 別名]
def tail_length(mask):
checker = np.zeros((80), dtype=int)
start = 800
width_threshold = 200
for i in range(80):
density = mask[start - 10:start, 0:500]
white = cv2.countNonZero(density)
#print(" ", white)
# start +=10
if white > width_threshold:
checker[i] = 1
else:
checker[i] = 0
start -= 10
tail = 80
length_threshold = 30
for i in range(80 - length_threshold):
over = 1
for j in range(i, i + length_threshold):
if checker[j] == 1:
over = 0
break
if over == 1:
tail = i
break
#print(checker)
#print(tail)
if tail < 5:
tail = 0
return tail
開發者ID:Ujwal2910,項目名稱:Smart-Traffic-Signals-in-India-using-Deep-Reinforcement-Learning-and-Advanced-Computer-Vision,代碼行數:40,代碼來源:cross_read_sequential.py