本文整理匯總了Python中cv2.TM_SQDIFF_NORMED屬性的典型用法代碼示例。如果您正苦於以下問題:Python cv2.TM_SQDIFF_NORMED屬性的具體用法?Python cv2.TM_SQDIFF_NORMED怎麽用?Python cv2.TM_SQDIFF_NORMED使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在類cv2
的用法示例。
在下文中一共展示了cv2.TM_SQDIFF_NORMED屬性的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: findAllMatches
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import TM_SQDIFF_NORMED [as 別名]
def findAllMatches(self, needle, similarity):
""" Find all matches for ``needle`` with confidence better than or equal to ``similarity``.
Returns an array of tuples ``(position, confidence)`` if match(es) is/are found,
or an empty array otherwise.
"""
positions = []
method = cv2.TM_CCOEFF_NORMED
match = cv2.matchTemplate(self.haystack, self.needle, method)
indices = (-match).argpartition(100, axis=None)[:100] # Review the 100 top matches
unraveled_indices = numpy.array(numpy.unravel_index(indices, match.shape)).T
for location in unraveled_indices:
y, x = location
confidence = match[y][x]
if method == cv2.TM_SQDIFF_NORMED or method == cv2.TM_SQDIFF:
if confidence <= 1-similarity:
positions.append(((x, y), confidence))
else:
if confidence >= similarity:
positions.append(((x, y), confidence))
positions.sort(key=lambda x: (x[0][1], x[0][0]))
return positions
示例2: locate_img
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import TM_SQDIFF_NORMED [as 別名]
def locate_img(image, template):
img = image.copy()
res = cv2.matchTemplate(img, template, method)
print res
print res.shape
cv2.imwrite('image/shape.png', res)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
print cv2.minMaxLoc(res)
if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
top_left = min_loc
else:
top_left = max_loc
h, w = template.shape
bottom_right = (top_left[0] + w, top_left[1]+h)
cv2.rectangle(img, top_left, bottom_right, 255, 2)
cv2.imwrite('image/tt.jpg', img)
示例3: find_watermark_from_gray
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import TM_SQDIFF_NORMED [as 別名]
def find_watermark_from_gray(self, gray_img, watermark_template_gray_img):
"""
從原圖的灰度圖中尋找水印位置
:param gray_img: 原圖的灰度圖
:param watermark_template_gray_img: 水印模板的灰度圖
:return: x1, y1, x2, y2
"""
# Load the images in gray scale
method = cv2.TM_CCOEFF
# Apply template Matching
res = cv2.matchTemplate(gray_img, watermark_template_gray_img, method)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
# If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum
if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
x, y = min_loc
else:
x, y = max_loc
return x, y, x + self.watermark_template_w, y + self.watermark_template_h
示例4: match_template1
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import TM_SQDIFF_NORMED [as 別名]
def match_template1(template, img, plot=False, method=cv2.TM_SQDIFF_NORMED):
img = cv2.imread(img, 0).copy()
template = cv2.imread(template, 0)
w, h = template.shape[::-1]
if lib == OPENCV:
res = cv2.matchTemplate(img, template, method)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
top_left = min_loc
else:
top_left = max_loc
else:
result = match_template(img, template)
ij = np.unravel_index(np.argmax(result), result.shape)
top_left = ij[::-1]
bottom_right = (top_left[0] + w, top_left[1] + h)
if plot:
cv2.rectangle(img, top_left, bottom_right, 255, 5)
plt.subplot(121)
plt.imshow(img)
plt.title('Detected Point'), plt.xticks([]), plt.yticks([])
plt.subplot(122)
plt.imshow(template)
plt.show()
return top_left, bottom_right
示例5: MatchingMethod
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import TM_SQDIFF_NORMED [as 別名]
def MatchingMethod(param):
global match_method
match_method = param
## [copy_source]
img_display = img.copy()
## [copy_source]
## [match_template]
method_accepts_mask = (cv2.TM_SQDIFF == match_method or match_method == cv2.TM_CCORR_NORMED)
if (use_mask and method_accepts_mask):
result = cv2.matchTemplate(img, templ, match_method, None, mask)
else:
result = cv2.matchTemplate(img, templ, match_method)
## [match_template]
## [normalize]
cv2.normalize( result, result, 0, 1, cv2.NORM_MINMAX, -1 )
## [normalize]
## [best_match]
minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(result, None)
## [best_match]
## [match_loc]
if (match_method == cv2.TM_SQDIFF or match_method == cv2.TM_SQDIFF_NORMED):
matchLoc = minLoc
else:
matchLoc = maxLoc
## [match_loc]
## [imshow]
cv2.rectangle(img_display, matchLoc, (matchLoc[0] + templ.shape[0], matchLoc[1] + templ.shape[1]), (0,0,0), 2, 8, 0 )
cv2.rectangle(result, matchLoc, (matchLoc[0] + templ.shape[0], matchLoc[1] + templ.shape[1]), (0,0,0), 2, 8, 0 )
cv2.imshow(image_window, img_display)
cv2.imshow(result_window, result)
## [imshow]
pass
示例6: findBestMatch
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import TM_SQDIFF_NORMED [as 別名]
def findBestMatch(self, needle, similarity):
""" Find the best match for ``needle`` that has a similarity better than or equal to ``similarity``.
Returns a tuple of ``(position, confidence)`` if a match is found, or ``None`` otherwise.
*Developer's Note - Despite the name, this method actually returns the **first** result
with enough similarity, not the **best** result.*
"""
method = cv2.TM_CCOEFF_NORMED
position = None
match = cv2.matchTemplate(self.haystack, needle, method)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(match)
if method == cv2.TM_SQDIFF_NORMED or method == cv2.TM_SQDIFF:
confidence = min_val
if min_val <= 1-similarity:
# Confidence checks out
position = min_loc
else:
confidence = max_val
if max_val >= similarity:
# Confidence checks out
position = max_loc
if not position:
return None
return (position, confidence)
示例7: getRefCoordinate
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import TM_SQDIFF_NORMED [as 別名]
def getRefCoordinate(self, image, template):
# method = cv2.TM_SQDIFF #2
method = cv2.TM_SQDIFF_NORMED #1
# method = cv2.TM_CCORR_NORMED #3
method = cv2.TM_CCOEFF_NORMED #4
res = cv2.matchTemplate(image, template, method)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
# If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum
if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
top_left = min_loc
else:
top_left = max_loc
# bottom_right = (top_left[0] + w, top_left[1] + h)
return top_left
示例8: compare
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import TM_SQDIFF_NORMED [as 別名]
def compare(self,img,rect=(0,0,1920,1080),delta=.05):return cv2.minMaxLoc(cv2.matchTemplate(self.im[rect[1]:rect[3],rect[0]:rect[2]],img,cv2.TM_SQDIFF_NORMED))[0]<delta and fuse.reset()
示例9: select
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import TM_SQDIFF_NORMED [as 別名]
def select(self,img,rect=(0,0,1920,1080)):return(lambda x:x.index(min(x)))([cv2.minMaxLoc(cv2.matchTemplate(self.im[rect[1]:rect[3],rect[0]:rect[2]],i,cv2.TM_SQDIFF_NORMED))[0]for i in img])
示例10: tapOnCmp
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import TM_SQDIFF_NORMED [as 別名]
def tapOnCmp(self,img,rect=(0,0,1920,1080),delta=.05):return(lambda loc:loc[0]<delta and(base.touch((rect[0]+loc[2][0]+(img.shape[1]>>1),rect[1]+loc[2][1]+(img.shape[0]>>1))),fuse.reset())[1])(cv2.minMaxLoc(cv2.matchTemplate(self.im[rect[1]:rect[3],rect[0]:rect[2]],img,cv2.TM_SQDIFF_NORMED)))
示例11: oneBattle
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import TM_SQDIFF_NORMED [as 別名]
def oneBattle():
turn,stage,stageTurn,servant=0,0,0,[0,1,2]
while True:
if Check(.1).isTurnBegin():
turn+=1
stage,stageTurn,skill,newPortrait=(lambda chk:(lambda x:[x,stageTurn+1if stage==x else 1])(chk.getStage())+[chk.isSkillReady(),chk.getPortrait()])(Check(.2))
if turn==1:stageTotal=check.getStageTotal()
else:servant=(lambda m,p:[m+p.index(i)+1if i in p else servant[i]for i in range(3)])(max(servant),[i for i in range(3)if servant[i]<6and cv2.matchTemplate(newPortrait[i],portrait[i],cv2.TM_SQDIFF_NORMED)[0][0]>=.03])
if stageTurn==1:doit('\x69\x68\x67\x66\x65\x64'[dangerPos[stage-1]]+'P',(250,500))
portrait=newPortrait
logger.info(f'{turn} {stage} {stageTurn} {servant}')
for i,j in((i,j)for i in range(3)if servant[i]<6for j in range(3)if skill[i][j]and skillInfo[servant[i]][j][0]and stage<<4|stageTurn>=min(skillInfo[servant[i]][j][0],stageTotal)<<4|skillInfo[servant[i]][j][1]):
doit(('ASD','FGH','JKL')[i][j],(300,))
if skillInfo[servant[i]][j][2]:doit(chr(skillInfo[servant[i]][j][2]+49),(300,))
sleep(1.7)
while not Check(.1).isTurnBegin():pass
sleep(.16)
for i in(i for i in range(3)if stage==min(masterSkill[i][0],stageTotal)and stageTurn==masterSkill[i][1]):
doit('Q'+'WER'[i],(300,300))
if masterSkill[i][2]:doit(chr(masterSkill[i][2]+49),(300,))
sleep(1.7)
while not Check(.1).isTurnBegin():pass
sleep(.16)
doit(' ',(2250,))
doit((lambda chk:(lambda c,h:([chr(i+54)for i in sorted((i for i in range(3)if h[i]),key=lambda x:-houguInfo[servant[x]][1])]if any(h)else[chr(j+49)for i in range(3)if c.count(i)>=3for j in range(5)if c[j]==i])+[chr(i+49)for i in sorted(range(5),key=lambda x:(c[x]&2)>>1|(c[x]&1)<<1)])(chk.getABQ(),(lambda h:[servant[i]<6and h[i]and houguInfo[servant[i]][0]and stage>=min(houguInfo[servant[i]][0],stageTotal)for i in range(3)])(chk.isHouguReady())))(Check())[:3],(350,350,10000))
elif check.isBattleFinished():
logger.info('Battle Finished')
return True
elif check.tapFailed():
logger.warning('Battle Failed')
return False
示例12: count_occurrence
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import TM_SQDIFF_NORMED [as 別名]
def count_occurrence(this):
Screenshot.shot()
this = this + '.png'
img_rgb = cv2.imread(this)
template = cv2.imread('playing.png')
res = cv2.matchTemplate(img_rgb, template, cv2.TM_SQDIFF_NORMED)
threshold = 0.8
loc = np.where(res >= threshold)
cv2.imwrite('result.png', img_rgb)
return loc
示例13: is_on_screen
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import TM_SQDIFF_NORMED [as 別名]
def is_on_screen(this, accuracy=0.14):
this = this + '.png'
Screenshot.shot()
small_image = cv2.imread(this)
h, w, c = small_image.shape
large_image = cv2.imread('playing.png')
result = cv2.matchTemplate(
small_image, large_image, cv2.TM_SQDIFF_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)
#print('ImageCoordinate::is_on_screen => ' + this + ' ' + str(min_val))
mn, _, mn_loc, mx_loc = cv2.minMaxLoc(result)
mp_x, mp_y = mn_loc
m_x, m_y = mx_loc
# print(mn_loc)
# top_left = mn_loc
# mx_right = mx_loc
bt_rt = (mn_loc[0], mn_loc[1])
bt_rtw = (mn_loc[0]+w, mn_loc[1]+h)
# cv2.rectangle(large_image,top_left,bt_rt,255,2)
# bt_rt =(mx_right[0]+h,mx_right[1]+w)
# cv2.rectangle(large_image,mx_right,bt_rt,255,2)
# cv2.imwrite('result_'+this.replace('images/',''), large_image)
# print('saved')
#pyautogui.moveTo(mp_x, mp_y)
print(min_val)
if min_val > accuracy:
return False
else:
mn, _, mn_loc, _ = cv2.minMaxLoc(result)
mp_x, mp_y = mn_loc
ordinal = random.randrange(1, 15)
a = random.randrange(-ordinal, ordinal)
b = random.randrange(-ordinal, ordinal)
location = [mp_x + w / 2+a, mp_y + h / 2+b, bt_rt, bt_rtw, min_val]
return location
示例14: coords
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import TM_SQDIFF_NORMED [as 別名]
def coords(this, shot=True):
this = this + '.png'
if shot:
Screenshot.shot()
else:
print('No screenshot')
small_image = cv2.imread(this)
h, w, c = small_image.shape
large_image = cv2.imread('playing.png')
result = cv2.matchTemplate(
small_image, large_image, cv2.TM_SQDIFF_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)
# print('ImageCoordinate::coords => ' + this + ' ' + str(min_val))
mn, _, mn_loc, mx_loc = cv2.minMaxLoc(result)
mp_x, mp_y = mn_loc
# #pyautogui.moveTo(mp_x, mp_y)
# top_left = mn_loc
# mx_right = mx_loc
# bt_rt =(top_left[0]+h,top_left[1]+w)
# cv2.rectangle(large_image,top_left,bt_rt,255,2)
# bt_rt =(mx_right[0]+h,mx_right[1]+w)
# cv2.rectangle(large_image,mx_right,bt_rt,255,2)
# cv2.imwrite('result_'+this.replace('images/',''), large_image)
# print('saved'+str(min_val))
if min_val > 0.2:
return [0, 0, min_val]
mn, _, mn_loc, _ = cv2.minMaxLoc(result)
mp_x, mp_y = mn_loc
ordinal = random.randrange(1, 15)
a = random.randrange(-ordinal, ordinal)
b = random.randrange(-ordinal, ordinal)
location = [mp_x + w / 2+a, mp_y + h / 2+b, min_val]
return location
示例15: find
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import TM_SQDIFF_NORMED [as 別名]
def find(search_file, image_file, threshold=0.7):
'''
Locate image position with cv2.templateFind
Use pixel match to find pictures.
Args:
search_file(string): filename of search object
image_file(string): filename of image to search on
threshold: optional variable, to ensure the match rate should >= threshold
Returns:
A tuple like (x, y) or None if nothing found
Raises:
IOError: when file read error
'''
search = _cv2open(search_file)
image = _cv2open(image_file)
w, h = search.shape[::-1]
method = cv2.CV_TM_CCORR_NORMED
res = cv2.matchTemplate(image, search, method)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
top_left = min_loc
else:
top_left = max_loc
bottom_right = (top_left[0] + w, top_left[1] + h)
middle_point = (top_left[0]+w/2, top_left[1]+h/2)
print top_left, bottom_right
return middle_point
# if len(region_center):
# x = int(maxloc[0]+region_center[0]-source_width/2)
# y = int(maxloc[1]+region_center[1]-source_height/2)
# else:
# [x,y] = maxloc
# return max_val, [x,y]