本文整理匯總了Python中cv2.UMat方法的典型用法代碼示例。如果您正苦於以下問題:Python cv2.UMat方法的具體用法?Python cv2.UMat怎麽用?Python cv2.UMat使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類cv2
的用法示例。
在下文中一共展示了cv2.UMat方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: _findContours
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import UMat [as 別名]
def _findContours(self):
contours = []
masks = self.masks.detach().numpy()
for mask in masks:
mask = cv2.UMat(mask)
contour, hierarchy = cv2.findContours(
mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_L1
)
reshaped_contour = []
for entity in contour:
assert len(entity.shape) == 3
assert entity.shape[1] == 1, "Hierarchical contours are not allowed"
reshaped_contour.append(entity.reshape(-1).tolist())
contours.append(reshaped_contour)
return contours
示例2: find_address
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import UMat [as 別名]
def find_address(crop_gray, crop_org):
template = cv2.UMat(cv2.imread('address_mask_%s.jpg'%pixel_x, 0))
# showimg(template)
#showimg(crop_gray)
w, h = cv2.UMat.get(template).shape[::-1]
#t1 = round(time.time()*1000)
res = cv2.matchTemplate(crop_gray, template, cv2.TM_CCOEFF_NORMED)
#t2 = round(time.time()*1000)
#print 'time:%s'%(t2-t1)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
top_left = (max_loc[0] + w, max_loc[1] - int(20*x))
bottom_right = (top_left[0] + int(1700*x), top_left[1] + int(550*x))
result = cv2.UMat.get(crop_org)[top_left[1]-10:bottom_right[1], top_left[0]-10:bottom_right[0]]
cv2.rectangle(crop_gray, top_left, bottom_right, 255, 2)
#showimg(crop_gray)
return cv2.UMat(result)
示例3: _findContours
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import UMat [as 別名]
def _findContours(self):
contours = []
masks = self.masks.detach().numpy()
for mask in masks:
mask = cv2.UMat(mask)
contour, hierarchy = cv2_util.findContours(
mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_L1
)
reshaped_contour = []
for entity in contour:
assert len(entity.shape) == 3
assert (
entity.shape[1] == 1
), "Hierarchical contours are not allowed"
reshaped_contour.append(entity.reshape(-1).tolist())
contours.append(reshaped_contour)
return contours
示例4: __init__
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import UMat [as 別名]
def __init__(self, path, queueSize=128):
# initialize the file video stream along with the boolean
# used to indicate if the thread should be stopped or not
self.stream = cv2.VideoCapture(path)
self.stopped = False
self.count = 0
# initialize the queue used to store frames read from
# the video file
self.Q = Queue(maxsize=queueSize)
# We need some info from the file first. See more at:
# https://docs.opencv.org/4.1.0/d4/d15/group__videoio__flags__base.html#gaeb8dd9c89c10a5c63c139bf7c4f5704d
self.width = int(self.stream.get(cv2.CAP_PROP_FRAME_WIDTH))
self.height = int(self.stream.get(cv2.CAP_PROP_FRAME_HEIGHT))
# since this version uses UMat to store the images to
# we need to initialize them beforehand
self.frames = [0] * queueSize
for ii in range(queueSize):
self.frames[ii] = cv2.UMat(self.height, self.width, cv2.CV_8UC3)
示例5: get_name
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import UMat [as 別名]
def get_name(img):
# cv2.imshow("method3", img)
# cv2.waitKey()
print('name')
_, _, red = cv2.split(img) #split 會自動將UMat轉換回Mat
red = cv2.UMat(red)
red = hist_equal(red)
red = cv2.adaptiveThreshold(red, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 151, 50)
# red = cv2.medianBlur(red, 3)
red = img_resize(red, 150)
img = img_resize(img, 150)
# showimg(red)
# cv2.imwrite('name.png', red)
# img2 = Image.open('address.png')
# img = Image.fromarray(cv2.UMat.get(red).astype('uint8'))
#return get_result_vary_length(red, 'chi_sim', img, '-psm 7')
return get_result_vary_length(red, 'chi_sim', img, '--psm 7')
# return punc_filter(pytesseract.image_to_string(img, lang='chi_sim', config='-psm 13').replace(" ",""))
示例6: binaryMask
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import UMat [as 別名]
def binaryMask(frame, x0, y0, width, height, framecount, plot ):
global guessGesture, visualize, mod, saveImg
cv2.rectangle(frame, (x0,y0),(x0+width,y0+height),(0,255,0),1)
#roi = cv2.UMat(frame[y0:y0+height, x0:x0+width])
roi = frame[y0:y0+height, x0:x0+width]
gray = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray,(5,5),2)
th3 = cv2.adaptiveThreshold(blur,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY_INV,11,2)
ret, res = cv2.threshold(th3, minValue, 255, cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
if saveImg == True:
saveROIImg(res)
elif guessGesture == True and (framecount % 5) == 4:
#ores = cv2.UMat.get(res)
t = threading.Thread(target=myNN.guessGesture, args = [mod, res])
t.start()
elif visualize == True:
layer = int(input("Enter which layer to visualize "))
cv2.waitKey(1)
myNN.visualizeLayers(mod, res, layer)
visualize = False
return res
#%%
# This is the new mask mode. It simply tries to remove the background content by taking a image of the
# background and subtracts it from the new frame contents of the ROI window.
# So in order to use it correctly, keep the contents of ROI window stable and without your hand in it
# and then press 'x' key. If you can see the contents of ROI window all blank then it means you are
# good to go for gesture prediction
示例7: generate_mask
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import UMat [as 別名]
def generate_mask(x):
name_mask_pic = cv2.UMat(cv2.imread('name_mask.jpg'))
sex_mask_pic = cv2.UMat(cv2.imread('sex_mask.jpg'))
nation_mask_pic = cv2.UMat(cv2.imread('nation_mask.jpg'))
birth_mask_pic = cv2.UMat(cv2.imread('birth_mask.jpg'))
year_mask_pic = cv2.UMat(cv2.imread('year_mask.jpg'))
month_mask_pic = cv2.UMat(cv2.imread('month_mask.jpg'))
day_mask_pic = cv2.UMat(cv2.imread('day_mask.jpg'))
address_mask_pic = cv2.UMat(cv2.imread('address_mask.jpg'))
idnum_mask_pic = cv2.UMat(cv2.imread('idnum_mask.jpg'))
name_mask_pic = img_resize_x(name_mask_pic)
sex_mask_pic = img_resize_x(sex_mask_pic)
nation_mask_pic = img_resize_x(nation_mask_pic)
birth_mask_pic = img_resize_x(birth_mask_pic)
year_mask_pic = img_resize_x(year_mask_pic)
month_mask_pic = img_resize_x(month_mask_pic)
day_mask_pic = img_resize_x(day_mask_pic)
address_mask_pic = img_resize_x(address_mask_pic)
idnum_mask_pic = img_resize_x(idnum_mask_pic)
cv2.imwrite('name_mask_%s.jpg'%pixel_x, name_mask_pic)
cv2.imwrite('sex_mask_%s.jpg' %pixel_x, sex_mask_pic)
cv2.imwrite('nation_mask_%s.jpg' %pixel_x, nation_mask_pic)
cv2.imwrite('birth_mask_%s.jpg' %pixel_x, birth_mask_pic)
cv2.imwrite('year_mask_%s.jpg' % pixel_x, year_mask_pic)
cv2.imwrite('month_mask_%s.jpg' % pixel_x, month_mask_pic)
cv2.imwrite('day_mask_%s.jpg' % pixel_x, day_mask_pic)
cv2.imwrite('address_mask_%s.jpg' %pixel_x, address_mask_pic)
cv2.imwrite('idnum_mask_%s.jpg' %pixel_x, idnum_mask_pic)
#用於生成模板
示例8: img_resize_gray
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import UMat [as 別名]
def img_resize_gray(imgorg):
#imgorg = cv2.imread(imgname)
crop = imgorg
size = cv2.UMat.get(crop).shape
# print size
height = size[0]
width = size[1]
# 參數是根據3840調的
height = int(height * 3840 * x / width)
# print height
crop = cv2.resize(src=crop, dsize=(int(3840 * x), height), interpolation=cv2.INTER_CUBIC)
return hist_equal(cv2.cvtColor(crop, cv2.COLOR_BGR2GRAY)), crop
示例9: find_name
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import UMat [as 別名]
def find_name(crop_gray, crop_org):
template = cv2.UMat(cv2.imread('name_mask_%s.jpg'%pixel_x, 0))
# showimg(crop_org)
w, h = cv2.UMat.get(template).shape[::-1]
res = cv2.matchTemplate(crop_gray, template, cv2.TM_CCOEFF_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
# print(max_loc)
top_left = (max_loc[0] + w, max_loc[1] - int(20*x))
bottom_right = (top_left[0] + int(700*x), top_left[1] + int(300*x))
result = cv2.UMat.get(crop_org)[top_left[1]-10:bottom_right[1], top_left[0]-10:bottom_right[0]]
cv2.rectangle(crop_gray, top_left, bottom_right, 255, 2)
# showimg(result)
return cv2.UMat(result)
示例10: find_sex
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import UMat [as 別名]
def find_sex(crop_gray, crop_org):
template = cv2.UMat(cv2.imread('sex_mask_%s.jpg'%pixel_x, 0))
# showimg(template)
w, h = cv2.UMat.get(template).shape[::-1]
res = cv2.matchTemplate(crop_gray, template, cv2.TM_CCOEFF_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
top_left = (max_loc[0] + w, max_loc[1] - int(20*x))
bottom_right = (top_left[0] + int(300*x), top_left[1] + int(300*x))
result = cv2.UMat.get(crop_org)[top_left[1]-10:bottom_right[1], top_left[0]-10:bottom_right[0]]
cv2.rectangle(crop_gray, top_left, bottom_right, 255, 2)
#showimg(crop_gray)
return cv2.UMat(result)
示例11: find_idnum
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import UMat [as 別名]
def find_idnum(crop_gray, crop_org):
template = cv2.UMat(cv2.imread('idnum_mask_%s.jpg'%pixel_x, 0))
# showimg(template)
#showimg(crop_gray)
w, h = cv2.UMat.get(template).shape[::-1]
res = cv2.matchTemplate(crop_gray, template, cv2.TM_CCOEFF_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
top_left = (max_loc[0] + w, max_loc[1] - int(20*x))
bottom_right = (top_left[0] + int(2300*x), top_left[1] + int(300*x))
result = cv2.UMat.get(crop_org)[top_left[1]-10:bottom_right[1], top_left[0]-10:bottom_right[0]]
cv2.rectangle(crop_gray, top_left, bottom_right, 255, 2)
#showimg(crop_gray)
return cv2.UMat(result)
示例12: get_sex
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import UMat [as 別名]
def get_sex(img):
_, _, red = cv2.split(img)
print('sex')
red = cv2.UMat(red)
red = hist_equal(red)
red = cv2.adaptiveThreshold(red, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 151, 50)
# red = cv2.medianBlur(red, 3)
# cv2.imwrite('address.png', img)
# img2 = Image.open('address.png')
red = img_resize(red, 150)
# cv2.imwrite('sex.png', red)
# img = Image.fromarray(cv2.UMat.get(red).astype('uint8'))
#return get_result_fix_length(red, 1, 'sex', '-psm 10')
return get_result_fix_length(red, 1, 'chi_sim', '--psm 10')
# return pytesseract.image_to_string(img, lang='sex', config='-psm 10').replace(" ","")
示例13: get_idnum_and_birth
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import UMat [as 別名]
def get_idnum_and_birth(img):
_, _, red = cv2.split(img)
print('idnum')
red = cv2.UMat(red)
red = hist_equal(red)
red = cv2.adaptiveThreshold(red, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 151, 50)
red = img_resize(red, 150)
# cv2.imwrite('idnum_red.png', red)
#idnum_str = get_result_fix_length(red, 18, 'idnum', '-psm 8')
# idnum_str = get_result_fix_length(red, 18, 'eng', '--psm 8 ')
img = Image.fromarray(cv2.UMat.get(red).astype('uint8'))
idnum_str = get_result_vary_length(red, 'eng', img, '--psm 8 ')
return idnum_str, idnum_str[6:14]
示例14: get_spots
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import UMat [as 別名]
def get_spots(self, thermal_image):
CFlir.drawing = True
image_copy = thermal_image.copy()
original_copy = image_copy.copy()
if len(original_copy.shape) < 3:
cmap_copy = cv.applyColorMap(original_copy, cv.COLORMAP_JET)
point = []
spot_points = []
flag = [True]
cv.namedWindow('Image')
cv.setMouseCallback('Image', CFlir.draw_spots, (point, flag) )
while(1):
image_copy = original_copy.copy()
for i in range(0,len(spot_points)):
cv.circle(image_copy, spot_points[i] , 5, 0, -1)
try:
cv.circle(cmap_copy, spot_points[i] , 5, 0, -1)
except:
cv.circle(original_copy, spot_points[i] , 5, 0, -1)
if len(point) > 0:
cv.circle(image_copy, tuple(point) , 5, 0, -1)
if flag[0] == False:
spot_points.append(tuple(point))
flag[0] = True
cv.imshow('Image', image_copy)
k = cv.waitKey(1) & 0xff
if k == 13 or k == 141 :
break
CFlir.drawing = False
cv.destroyAllWindows()
# origi_copy = cv.UMat(origi_copy)
if len(original_copy.shape) == 3:
gray = cv.cvtColor(original_copy, cv.COLOR_BGR2GRAY)
else:
gray = cv.cvtColor(cmap_copy, cv.COLOR_BGR2GRAY)
ret,thresh = cv.threshold(gray, 10, 255, cv.THRESH_BINARY_INV)
contours, hierarchy = cv.findContours(thresh,cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE )
self.spots = contours
示例15: skinMask
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import UMat [as 別名]
def skinMask(frame, x0, y0, width, height, framecount, plot):
global guessGesture, visualize, mod, saveImg
# HSV values
low_range = np.array([0, 50, 80])
upper_range = np.array([30, 200, 255])
cv2.rectangle(frame, (x0,y0),(x0+width,y0+height),(0,255,0),1)
#roi = cv2.UMat(frame[y0:y0+height, x0:x0+width])
roi = frame[y0:y0+height, x0:x0+width]
hsv = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
#Apply skin color range
mask = cv2.inRange(hsv, low_range, upper_range)
mask = cv2.erode(mask, skinkernel, iterations = 1)
mask = cv2.dilate(mask, skinkernel, iterations = 1)
#blur
mask = cv2.GaussianBlur(mask, (15,15), 1)
#bitwise and mask original frame
res = cv2.bitwise_and(roi, roi, mask = mask)
# color to grayscale
res = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)
if saveImg == True:
saveROIImg(res)
elif guessGesture == True and (framecount % 5) == 4:
#res = cv2.UMat.get(res)
t = threading.Thread(target=myNN.guessGesture, args = [mod, res])
t.start()
elif visualize == True:
layer = int(input("Enter which layer to visualize "))
cv2.waitKey(0)
myNN.visualizeLayers(mod, res, layer)
visualize = False
return res
#%%