当前位置: 首页>>代码示例>>Python>>正文


Python cv2.TM_CCOEFF_NORMED属性代码示例

本文整理汇总了Python中cv2.TM_CCOEFF_NORMED属性的典型用法代码示例。如果您正苦于以下问题:Python cv2.TM_CCOEFF_NORMED属性的具体用法?Python cv2.TM_CCOEFF_NORMED怎么用?Python cv2.TM_CCOEFF_NORMED使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在cv2的用法示例。


在下文中一共展示了cv2.TM_CCOEFF_NORMED属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: match_img

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import TM_CCOEFF_NORMED [as 别名]
def match_img(image, template, value):
    """
    :param image: 图片
    :param template: 模板
    :param value: 阈值
    :return: 水印坐标
    描述:用于获得这幅图片模板对应的位置坐标,用途:校准元素位置信息
    """
    res = cv2.matchTemplate(image, template, cv2.TM_CCOEFF_NORMED)
    threshold = value
    min_v, max_v, min_pt, max_pt = cv2.minMaxLoc(res)
    if max_v < threshold:
        return False
    if not max_pt[0] in range(10, 40) or max_pt[1] > 20:
        return False
    return max_pt 
开发者ID:Mingtzge,项目名称:2019-CCF-BDCI-OCR-MCZJ-OCR-IdentificationIDElement,代码行数:18,代码来源:split_img_generate_data.py

示例2: get_match_confidence

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import TM_CCOEFF_NORMED [as 别名]
def get_match_confidence(img1, img2, mask=None):
    if img1.shape != img2.shape:
        return False
    ## first try, using absdiff
    # diff = cv2.absdiff(img1, img2)
    # h, w, d = diff.shape
    # total = h*w*d
    # num = (diff<20).sum()
    # print 'is_match', total, num
    # return num > total*0.90
    if mask is not None:
        img1 = img1.copy()
        img1[mask!=0] = 0
        img2 = img2.copy()
        img2[mask!=0] = 0
    ## using match
    match = cv2.matchTemplate(img1, img2, cv2.TM_CCOEFF_NORMED)
    _, confidence, _, _ = cv2.minMaxLoc(match)
    # print confidence
    return confidence 
开发者ID:NetEaseGame,项目名称:ATX,代码行数:22,代码来源:scene_detector.py

示例3: __apply_template_matching

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import TM_CCOEFF_NORMED [as 别名]
def __apply_template_matching(angle, template, image):
    # Rotate the template
    template_rotated = __rotate_image_size_corrected(template, angle)

    # Apply template matching
    image_templated = cv2.matchTemplate(image, template_rotated, cv2.TM_CCOEFF_NORMED)

    # Correct template matching image size difference
    template_rotated_height, template_rotated_width = template_rotated.shape
    template_half_height = template_rotated_height // 2
    template_half_width = template_rotated_width // 2

    image_templated_inrange_size_corrected = cv2.copyMakeBorder(image_templated, template_half_height, template_half_height, template_half_width, template_half_width, cv2.BORDER_CONSTANT, value=0)

    # Calculate maximum match coefficient
    max_match = numpy.max(image_templated_inrange_size_corrected)

    return (max_match, angle, template_rotated, image_templated_inrange_size_corrected) 
开发者ID:microsoft,项目名称:AI-Robot-Challenge-Lab,代码行数:20,代码来源:cv_detection_right_hand.py

示例4: compare

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import TM_CCOEFF_NORMED [as 别名]
def compare(i, j, img):
    for x in range(lenX):
        if x < i:
            continue
        for y in range(lenY):
            if x <= i and y < j:
                continue
            z = mat[x][y]
            # 图片相似度
            y1 = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            z1 = cv2.cvtColor(z, cv2.COLOR_BGR2GRAY)
            # image_difference = get_image_difference(y1, z1)
            res = cv2.matchTemplate(z1, y1, cv2.TM_CCOEFF_NORMED)
            # print(i, j, x, y, image_difference)
            print(i, j, x, y, res)
            # if abs(image_difference-1)>0.5:
            # if image_difference < 0.1:
            #     pairs.append((i, j, x, y, image_difference))
            if res[0][0] >= 0.8 :#and (i != x and j != y): # 0.9较好
                if i ==x and j ==y:
                    continue
                pairs.append((i, j, x, y, res[0][0]))
        print('--------') 
开发者ID:makelove,项目名称:OpenCV-Python-Tutorial,代码行数:25,代码来源:compare_photos.py

示例5: findAllMatches

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import TM_CCOEFF_NORMED [as 别名]
def findAllMatches(self, needle, similarity):
        """ Find all matches for ``needle`` with confidence better than or equal to ``similarity``.

        Returns an array of tuples ``(position, confidence)`` if match(es) is/are found,
        or an empty array otherwise.
        """
        positions = []
        method = cv2.TM_CCOEFF_NORMED

        match = cv2.matchTemplate(self.haystack, self.needle, method)

        indices = (-match).argpartition(100, axis=None)[:100] # Review the 100 top matches
        unraveled_indices = numpy.array(numpy.unravel_index(indices, match.shape)).T
        for location in unraveled_indices:
            y, x = location
            confidence = match[y][x]
            if method == cv2.TM_SQDIFF_NORMED or method == cv2.TM_SQDIFF:
                if confidence <= 1-similarity:
                    positions.append(((x, y), confidence))
            else:
                if confidence >= similarity:
                    positions.append(((x, y), confidence))

        positions.sort(key=lambda x: (x[0][1], x[0][0]))
        return positions 
开发者ID:glitchassassin,项目名称:lackey,代码行数:27,代码来源:TemplateMatchers.py

示例6: cal_rgb_confidence

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import TM_CCOEFF_NORMED [as 别名]
def cal_rgb_confidence(img_src_rgb, img_sch_rgb):
    """同大小彩图计算相似度."""
    # BGR三通道心理学权重:
    weight = (0.114, 0.587, 0.299)
    src_bgr, sch_bgr = cv2.split(img_src_rgb), cv2.split(img_sch_rgb)

    # 计算BGR三通道的confidence,存入bgr_confidence:
    bgr_confidence = [0, 0, 0]
    for i in range(3):
        res_temp = cv2.matchTemplate(src_bgr[i], sch_bgr[i], cv2.TM_CCOEFF_NORMED)
        min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res_temp)
        bgr_confidence[i] = max_val

    # 加权可信度
    weighted_confidence = bgr_confidence[0] * weight[0] + bgr_confidence[1] * weight[1] + bgr_confidence[2] * weight[2]

    return weighted_confidence 
开发者ID:AirtestProject,项目名称:Airtest,代码行数:19,代码来源:cal_confidence.py

示例7: exists

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import TM_CCOEFF_NORMED [as 别名]
def exists(frame, template, thresh):
        """
        Returns True if 'template' is in 'frame' with probability of at least 'thresh'
        :param frame: A frame
        :param template: An image to search in 'frame'.
        :param thresh: The minimum probability required to accept template.
        :return: If template is in frame
        """

        digit_res = cv2.matchTemplate(frame, template, cv2.TM_CCOEFF_NORMED)
        loc = np.where(digit_res >= thresh)

        if len(loc[-1]) == 0:
            return False

        for pt in zip(*loc[::-1]):
            if digit_res[pt[1]][pt[0]] == 1:
                return False

        return True 
开发者ID:shahar603,项目名称:SpaceXtract,代码行数:22,代码来源:general_extract.py

示例8: most_probably_template

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import TM_CCOEFF_NORMED [as 别名]
def most_probably_template(image, templates):
        """
        Get the index of the template(in the templates list) which is most likely to be in the image.


        :param image: Image that contain the template
        :param templates: A list of templates to search in image
        :return: the index (in templates) which has the highest probability of being in  image
        """
        probability_list = []

        for template in templates:
            res = cv2.matchTemplate(image, template, cv2.TM_CCOEFF_NORMED)
            probability_list.append(float(np.max(res)))

        return probability_list.index(max(probability_list)) 
开发者ID:shahar603,项目名称:SpaceXtract,代码行数:18,代码来源:general_extract.py

示例9: main

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import TM_CCOEFF_NORMED [as 别名]
def main():
    src = cv2.imread('src.jpg', cv2.IMREAD_GRAYSCALE)
    tpl = cv2.imread('tpl.jpg', cv2.IMREAD_GRAYSCALE)
    result = cv2.matchTemplate(src, tpl, cv2.TM_CCOEFF_NORMED)
    result = cv2.normalize(result, dst=None, alpha=0, beta=1,
                           norm_type=cv2.NORM_MINMAX, dtype=-1)
    minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(result)
    matchLoc = maxLoc
    draw1 = cv2.rectangle(
        src, matchLoc, (matchLoc[0] + tpl.shape[1], matchLoc[1] + tpl.shape[0]), 0, 2, 8, 0)
    draw2 = cv2.rectangle(
        result, matchLoc, (matchLoc[0] + tpl.shape[1], matchLoc[1] + tpl.shape[0]), 0, 2, 8, 0)
    cv2.imshow('draw1', draw1)
    cv2.imshow('draw2', draw2)
    cv2.waitKey(0)
    print src.shape
    print tpl.shape
    print result.shape
    print matchLoc 
开发者ID:cynricfu,项目名称:dual-fisheye-video-stitching,代码行数:21,代码来源:template_matching.py

示例10: find_game_position

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import TM_CCOEFF_NORMED [as 别名]
def find_game_position(self, threshold) -> Dict:
        monitor = self.shooter.monitors[0]
        buffer = self.shooter.grab(monitor)
        image = Image.frombytes('RGB', buffer.size, buffer.rgb).convert('L')
        image = np.array(image)
        dino_template = cv2.imread(os.path.join('templates', 'dino.png'), 0)
        res = cv2.matchTemplate(image, dino_template, cv2.TM_CCOEFF_NORMED)
        loc = np.where(res >= threshold)
        if len(loc[0]) == 0:
            dino_template = cv2.imread(os.path.join('templates', 'dino2.png'), 0)
            res = cv2.matchTemplate(image, dino_template, cv2.TM_CCOEFF_NORMED)
            loc = np.where(res >= threshold)
        if len(loc[0]):
            pt = next(zip(*loc[::-1]))
            w, h = dino_template.shape[::-1]
            lw, lh = self.landscape_template.shape[::-1]
            return dict(monitor, height=lh, left=pt[0], top=pt[1] - lh + h, width=lw)
        return {} 
开发者ID:pauloalves86,项目名称:go_dino,代码行数:20,代码来源:dino_api.py

示例11: imagesearcharea

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import TM_CCOEFF_NORMED [as 别名]
def imagesearcharea(image, x1, y1, x2, y2, precision=0.8, im=None):
    if im is None:
        im = region_grabber(region=(x1, y1, x2, y2))
        if is_retina:
            im.thumbnail((round(im.size[0] * 0.5), round(im.size[1] * 0.5)))
        # im.save('testarea.png') usefull for debugging purposes, this will save the captured region as "testarea.png"

    img_rgb = np.array(im)
    img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
    template = cv2.imread(image, 0)

    res = cv2.matchTemplate(img_gray, template, cv2.TM_CCOEFF_NORMED)
    min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
    if max_val < precision:
        return [-1, -1]
    return max_loc 
开发者ID:drov0,项目名称:python-imagesearch,代码行数:18,代码来源:imagesearch.py

示例12: imagesearch_count

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import TM_CCOEFF_NORMED [as 别名]
def imagesearch_count(image, precision=0.9):
    img_rgb = pyautogui.screenshot()
    if is_retina:
        img_rgb.thumbnail((round(img_rgb.size[0] * 0.5), round(img_rgb.size[1] * 0.5)))
    img_rgb = np.array(img_rgb)
    img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
    template = cv2.imread(image, 0)
    w, h = template.shape[::-1]
    res = cv2.matchTemplate(img_gray, template, cv2.TM_CCOEFF_NORMED)
    loc = np.where(res >= precision)
    count = 0
    for pt in zip(*loc[::-1]):  # Swap columns and rows
        # cv2.rectangle(img_rgb, pt, (pt[0] + w, pt[1] + h), (0, 0, 255), 2) // Uncomment to draw boxes around found occurrences
        count = count + 1
    # cv2.imwrite('result.png', img_rgb) // Uncomment to write output image with boxes drawn around occurrences
    return count 
开发者ID:drov0,项目名称:python-imagesearch,代码行数:18,代码来源:imagesearch.py

示例13: matchAB

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import TM_CCOEFF_NORMED [as 别名]
def matchAB(fileA, fileB):
    # 读取图像数据
    imgA = cv2.imread(fileA)
    imgB = cv2.imread(fileB)

    # 转换成灰色
    grayA = cv2.cvtColor(imgA, cv2.COLOR_BGR2GRAY)
    grayB = cv2.cvtColor(imgB, cv2.COLOR_BGR2GRAY)

    # 获取图片A的大小
    height, width = grayA.shape

    # 取局部图像,寻找匹配位置
    result_window = np.zeros((height, width), dtype=imgA.dtype)
    for start_y in range(0, height-100, 10):
        for start_x in range(0, width-100, 10):
            window = grayA[start_y:start_y+100, start_x:start_x+100]
            match = cv2.matchTemplate(grayB, window, cv2.TM_CCOEFF_NORMED)
            _, _, _, max_loc = cv2.minMaxLoc(match)
            matched_window = grayB[max_loc[1]:max_loc[1]+100, max_loc[0]:max_loc[0]+100]
            result = cv2.absdiff(window, matched_window)
            result_window[start_y:start_y+100, start_x:start_x+100] = result

    plt.imshow(result_window)
    plt.show() 
开发者ID:cangyan,项目名称:image-detect,代码行数:27,代码来源:image_detect_02.py

示例14: multi_scale_search

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import TM_CCOEFF_NORMED [as 别名]
def multi_scale_search(pivot, screen, range=0.3, num=10):
    H, W = screen.shape[:2]
    h, w = pivot.shape[:2]

    found = None
    for scale in np.linspace(1-range, 1+range, num)[::-1]:
        resized = cv2.resize(screen, (int(W * scale), int(H * scale)))
        r = W / float(resized.shape[1])
        if resized.shape[0] < h or resized.shape[1] < w:
            break
        res = cv2.matchTemplate(resized, pivot, cv2.TM_CCOEFF_NORMED)

        loc = np.where(res >= res.max())
        pos_h, pos_w = list(zip(*loc))[0]

        if found is None or res.max() > found[-1]:
            found = (pos_h, pos_w, r, res.max())

    if found is None: return (0,0,0,0,0)
    pos_h, pos_w, r, score = found
    start_h, start_w = int(pos_h * r), int(pos_w * r)
    end_h, end_w = int((pos_h + h) * r), int((pos_w + w) * r)
    return [start_h, start_w, end_h, end_w, score] 
开发者ID:Prinsphield,项目名称:Wechat_AutoJump,代码行数:25,代码来源:nn_play.py

示例15: find_address

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import TM_CCOEFF_NORMED [as 别名]
def find_address(crop_gray, crop_org):
        template = cv2.UMat(cv2.imread('address_mask_%s.jpg'%pixel_x, 0))
        # showimg(template)
        #showimg(crop_gray)
        w, h = cv2.UMat.get(template).shape[::-1]
        #t1 = round(time.time()*1000)
        res = cv2.matchTemplate(crop_gray, template, cv2.TM_CCOEFF_NORMED)
        #t2 = round(time.time()*1000)
        #print 'time:%s'%(t2-t1)
        min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
        top_left = (max_loc[0] + w, max_loc[1] - int(20*x))
        bottom_right = (top_left[0] + int(1700*x), top_left[1] + int(550*x))
        result = cv2.UMat.get(crop_org)[top_left[1]-10:bottom_right[1], top_left[0]-10:bottom_right[0]]
        cv2.rectangle(crop_gray, top_left, bottom_right, 255, 2)
        #showimg(crop_gray)
        return cv2.UMat(result) 
开发者ID:Raymondhhh90,项目名称:idcardocr,代码行数:18,代码来源:idcardocr.py


注:本文中的cv2.TM_CCOEFF_NORMED属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。