本文整理汇总了Python中cv2.minMaxLoc方法的典型用法代码示例。如果您正苦于以下问题:Python cv2.minMaxLoc方法的具体用法?Python cv2.minMaxLoc怎么用?Python cv2.minMaxLoc使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类cv2
的用法示例。
在下文中一共展示了cv2.minMaxLoc方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: match_img
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import minMaxLoc [as 别名]
def match_img(image, template, value):
"""
:param image: 图片
:param template: 模板
:param value: 阈值
:return: 水印坐标
描述:用于获得这幅图片模板对应的位置坐标,用途:校准元素位置信息
"""
res = cv2.matchTemplate(image, template, cv2.TM_CCOEFF_NORMED)
threshold = value
min_v, max_v, min_pt, max_pt = cv2.minMaxLoc(res)
if max_v < threshold:
return False
if not max_pt[0] in range(10, 40) or max_pt[1] > 20:
return False
return max_pt
开发者ID:Mingtzge,项目名称:2019-CCF-BDCI-OCR-MCZJ-OCR-IdentificationIDElement,代码行数:18,代码来源:split_img_generate_data.py
示例2: get_match_confidence
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import minMaxLoc [as 别名]
def get_match_confidence(img1, img2, mask=None):
if img1.shape != img2.shape:
return False
## first try, using absdiff
# diff = cv2.absdiff(img1, img2)
# h, w, d = diff.shape
# total = h*w*d
# num = (diff<20).sum()
# print 'is_match', total, num
# return num > total*0.90
if mask is not None:
img1 = img1.copy()
img1[mask!=0] = 0
img2 = img2.copy()
img2[mask!=0] = 0
## using match
match = cv2.matchTemplate(img1, img2, cv2.TM_CCOEFF_NORMED)
_, confidence, _, _ = cv2.minMaxLoc(match)
# print confidence
return confidence
示例3: probability
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import minMaxLoc [as 别名]
def probability(self, im: str) -> float:
"""
Return the probability of the existence of given image.
:param im: the name of the image.
:return: the probability (confidence).
"""
assert self.screen is not None
try:
template = self.images[im]
except KeyError:
logger.error('Unexpected image name {}'.format(im))
return 0.0
res = cv.matchTemplate(self.screen, template, TM_METHOD)
_, max_val, _, max_loc = cv.minMaxLoc(res)
logger.debug('max_val = {}, max_loc = {}'.format(max_val, max_loc))
return max_val
示例4: find
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import minMaxLoc [as 别名]
def find(self, im: str, threshold: float = None) -> Tuple[int, int]:
"""
Find the template image on screen and return its top-left coords.
Return None if the matching value is less than `threshold`.
:param im: the name of the image
:param threshold: the threshold of matching. If not given, will be set to the default threshold.
:return: the top-left coords of the result. Return (-1, -1) if not found.
"""
threshold = threshold or self.threshold
assert self.screen is not None
try:
template = self.images[im]
except KeyError:
logger.error('Unexpected image name {}'.format(im))
return -1, -1
res = cv.matchTemplate(self.screen, template, TM_METHOD)
_, max_val, _, max_loc = cv.minMaxLoc(res)
logger.debug('max_val = {}, max_loc = {}'.format(max_val, max_loc))
return max_loc if max_val >= threshold else (-1, -1)
示例5: cal_rgb_confidence
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import minMaxLoc [as 别名]
def cal_rgb_confidence(img_src_rgb, img_sch_rgb):
"""同大小彩图计算相似度."""
# BGR三通道心理学权重:
weight = (0.114, 0.587, 0.299)
src_bgr, sch_bgr = cv2.split(img_src_rgb), cv2.split(img_sch_rgb)
# 计算BGR三通道的confidence,存入bgr_confidence:
bgr_confidence = [0, 0, 0]
for i in range(3):
res_temp = cv2.matchTemplate(src_bgr[i], sch_bgr[i], cv2.TM_CCOEFF_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res_temp)
bgr_confidence[i] = max_val
# 加权可信度
weighted_confidence = bgr_confidence[0] * weight[0] + bgr_confidence[1] * weight[1] + bgr_confidence[2] * weight[2]
return weighted_confidence
示例6: find_template
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import minMaxLoc [as 别名]
def find_template(im_source, im_search, threshold=0.8, rgb=False):
"""函数功能:找到最优结果."""
# 第一步:校验图像输入
check_source_larger_than_search(im_source, im_search)
# 第二步:计算模板匹配的结果矩阵res
res = _get_template_result_matrix(im_source, im_search)
# 第三步:依次获取匹配结果
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
h, w = im_search.shape[:2]
# 求取可信度:
confidence = _get_confidence_from_matrix(im_source, im_search, max_loc, max_val, w, h, rgb)
# 求取识别位置: 目标中心 + 目标区域:
middle_point, rectangle = _get_target_rectangle(max_loc, w, h)
best_match = generate_result(middle_point, rectangle, confidence)
LOGGING.debug("threshold=%s, result=%s" % (threshold, best_match))
return best_match if confidence >= threshold else None
示例7: match_dmg_templates
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import minMaxLoc [as 别名]
def match_dmg_templates(self, frame):
match_mat, max_val, tl = [None]*10, [0]*10, [(0, 0)]*10
for i in range(0, 10):
match_mat[i] = cv2.matchTemplate(frame, self.num_img[0],
cv2.TM_CCORR_NORMED, mask=self.num_mask[0])
_, max_val[i], _, tl[i] = cv2.minMaxLoc(match_mat[i])
# print(max_val[0])
br = (tl[0][0] + self.num_w, tl[0][1] + self.num_h)
frame = cv2.rectangle(frame, tl[0], br, (255, 255, 255), 2)
# Multi-template result searching
# _, max_val_1, _, tl_1 = cv2.minMaxLoc(np.array(match_mat))
# print(tl_1)
# A number of methods corresponding to the various trackbars available.
示例8: main
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import minMaxLoc [as 别名]
def main():
src = cv2.imread('src.jpg', cv2.IMREAD_GRAYSCALE)
tpl = cv2.imread('tpl.jpg', cv2.IMREAD_GRAYSCALE)
result = cv2.matchTemplate(src, tpl, cv2.TM_CCOEFF_NORMED)
result = cv2.normalize(result, dst=None, alpha=0, beta=1,
norm_type=cv2.NORM_MINMAX, dtype=-1)
minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(result)
matchLoc = maxLoc
draw1 = cv2.rectangle(
src, matchLoc, (matchLoc[0] + tpl.shape[1], matchLoc[1] + tpl.shape[0]), 0, 2, 8, 0)
draw2 = cv2.rectangle(
result, matchLoc, (matchLoc[0] + tpl.shape[1], matchLoc[1] + tpl.shape[0]), 0, 2, 8, 0)
cv2.imshow('draw1', draw1)
cv2.imshow('draw2', draw2)
cv2.waitKey(0)
print src.shape
print tpl.shape
print result.shape
print matchLoc
示例9: detect
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import minMaxLoc [as 别名]
def detect(self, z, x):
k = self.gaussianCorrelation(x, z)
# 得到响应图
res = real(fftd(complexMultiplication(self._alphaf, fftd(k)), True))
# pv:响应最大值 pi:相应最大点的索引数组
_, pv, _, pi = cv2.minMaxLoc(res)
# 得到响应最大的点索引的float表示
p = [float(pi[0]), float(pi[1])]
# 使用幅值做差来定位峰值的位置
if pi[0] > 0 and pi[0] < res.shape[1] - 1:
p[0] += self.subPixelPeak(res[pi[1], pi[0] - 1], pv, res[pi[1], pi[0] + 1])
if pi[1] > 0 and pi[1] < res.shape[0] - 1:
p[1] += self.subPixelPeak(res[pi[1] - 1, pi[0]], pv, res[pi[1] + 1, pi[0]])
# 得出偏离采样中心的位移
p[0] -= res.shape[1] / 2.
p[1] -= res.shape[0] / 2.
# 返回偏离采样中心的位移和峰值
return p, pv
# 基于当前帧更新目标位置
示例10: detect_scale
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import minMaxLoc [as 别名]
def detect_scale(self, image):
xsf = self.get_scale_sample(image)
# Compute AZ in the paper
add_temp = cv2.reduce(complexMultiplication(self.sf_num, xsf), 0, cv2.REDUCE_SUM)
# compute the final y
scale_response = cv2.idft(complexDivisionReal(add_temp, (self.sf_den + self.scale_lambda)), None, cv2.DFT_REAL_OUTPUT)
# Get the max point as the final scaling rate
# pv:响应最大值 pi:相应最大点的索引数组
_, pv, _, pi = cv2.minMaxLoc(scale_response)
return pi
# 更新尺度
示例11: imagesearcharea
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import minMaxLoc [as 别名]
def imagesearcharea(image, x1, y1, x2, y2, precision=0.8, im=None):
if im is None:
im = region_grabber(region=(x1, y1, x2, y2))
if is_retina:
im.thumbnail((round(im.size[0] * 0.5), round(im.size[1] * 0.5)))
# im.save('testarea.png') usefull for debugging purposes, this will save the captured region as "testarea.png"
img_rgb = np.array(im)
img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
template = cv2.imread(image, 0)
res = cv2.matchTemplate(img_gray, template, cv2.TM_CCOEFF_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
if max_val < precision:
return [-1, -1]
return max_loc
示例12: locate_img
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import minMaxLoc [as 别名]
def locate_img(image, template):
img = image.copy()
res = cv2.matchTemplate(img, template, method)
print res
print res.shape
cv2.imwrite('image/shape.png', res)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
print cv2.minMaxLoc(res)
if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
top_left = min_loc
else:
top_left = max_loc
h, w = template.shape
bottom_right = (top_left[0] + w, top_left[1]+h)
cv2.rectangle(img, top_left, bottom_right, 255, 2)
cv2.imwrite('image/tt.jpg', img)
示例13: getKeypoints
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import minMaxLoc [as 别名]
def getKeypoints(probMap, threshold=0.1):
mapSmooth = cv2.GaussianBlur(probMap, (3, 3), 0, 0)
mapMask = np.uint8(mapSmooth>threshold)
keypoints = []
contours = None
try:
#OpenCV4.x
contours, _ = cv2.findContours(mapMask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
except:
#OpenCV3.x
_, contours, _ = cv2.findContours(mapMask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
blobMask = np.zeros(mapMask.shape)
blobMask = cv2.fillConvexPoly(blobMask, cnt, 1)
maskedProbMap = mapSmooth * blobMask
_, maxVal, _, maxLoc = cv2.minMaxLoc(maskedProbMap)
keypoints.append(maxLoc + (probMap[maxLoc[1], maxLoc[0]],))
return keypoints
示例14: _locate_target
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import minMaxLoc [as 别名]
def _locate_target(self, score):
def subpixel_peak(left, center, right):
divisor = 2 * center - left - right
if abs(divisor) < 1e-3:
return 0
return 0.5 * (right - left) / divisor
_, _, _, max_loc = cv2.minMaxLoc(score)
loc = np.float32(max_loc)
if max_loc[0] in range(1, score.shape[1] - 1):
loc[0] += subpixel_peak(
score[max_loc[1], max_loc[0] - 1],
score[max_loc[1], max_loc[0]],
score[max_loc[1], max_loc[0] + 1])
if max_loc[1] in range(1, score.shape[0] - 1):
loc[1] += subpixel_peak(
score[max_loc[1] - 1, max_loc[0]],
score[max_loc[1], max_loc[0]],
score[max_loc[1] + 1, max_loc[0]])
offset = loc - np.float32(score.shape[1::-1]) / 2
return offset
示例15: SMAvgLocalMax
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import minMaxLoc [as 别名]
def SMAvgLocalMax(self, src):
# size
stepsize = pySaliencyMapDefs.default_step_local
width = src.shape[1]
height = src.shape[0]
# find local maxima
numlocal = 0
lmaxmean = 0
for y in range(0, height-stepsize, stepsize):
for x in range(0, width-stepsize, stepsize):
localimg = src[y:y+stepsize, x:x+stepsize]
lmin, lmax, dummy1, dummy2 = cv2.minMaxLoc(localimg)
lmaxmean += lmax
numlocal += 1
# averaging over all the local regions
return lmaxmean / numlocal
# normalization specific for the saliency map model