當前位置: 首頁>>代碼示例>>Python>>正文


Python cv2.TM_CCOEFF_NORMED屬性代碼示例

本文整理匯總了Python中cv2.TM_CCOEFF_NORMED屬性的典型用法代碼示例。如果您正苦於以下問題:Python cv2.TM_CCOEFF_NORMED屬性的具體用法?Python cv2.TM_CCOEFF_NORMED怎麽用?Python cv2.TM_CCOEFF_NORMED使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在cv2的用法示例。


在下文中一共展示了cv2.TM_CCOEFF_NORMED屬性的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: match_img

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import TM_CCOEFF_NORMED [as 別名]
def match_img(image, template, value):
    """
    :param image: 圖片
    :param template: 模板
    :param value: 閾值
    :return: 水印坐標
    描述:用於獲得這幅圖片模板對應的位置坐標,用途:校準元素位置信息
    """
    res = cv2.matchTemplate(image, template, cv2.TM_CCOEFF_NORMED)
    threshold = value
    min_v, max_v, min_pt, max_pt = cv2.minMaxLoc(res)
    if max_v < threshold:
        return False
    if not max_pt[0] in range(10, 40) or max_pt[1] > 20:
        return False
    return max_pt 
開發者ID:Mingtzge,項目名稱:2019-CCF-BDCI-OCR-MCZJ-OCR-IdentificationIDElement,代碼行數:18,代碼來源:split_img_generate_data.py

示例2: get_match_confidence

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import TM_CCOEFF_NORMED [as 別名]
def get_match_confidence(img1, img2, mask=None):
    if img1.shape != img2.shape:
        return False
    ## first try, using absdiff
    # diff = cv2.absdiff(img1, img2)
    # h, w, d = diff.shape
    # total = h*w*d
    # num = (diff<20).sum()
    # print 'is_match', total, num
    # return num > total*0.90
    if mask is not None:
        img1 = img1.copy()
        img1[mask!=0] = 0
        img2 = img2.copy()
        img2[mask!=0] = 0
    ## using match
    match = cv2.matchTemplate(img1, img2, cv2.TM_CCOEFF_NORMED)
    _, confidence, _, _ = cv2.minMaxLoc(match)
    # print confidence
    return confidence 
開發者ID:NetEaseGame,項目名稱:ATX,代碼行數:22,代碼來源:scene_detector.py

示例3: __apply_template_matching

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import TM_CCOEFF_NORMED [as 別名]
def __apply_template_matching(angle, template, image):
    # Rotate the template
    template_rotated = __rotate_image_size_corrected(template, angle)

    # Apply template matching
    image_templated = cv2.matchTemplate(image, template_rotated, cv2.TM_CCOEFF_NORMED)

    # Correct template matching image size difference
    template_rotated_height, template_rotated_width = template_rotated.shape
    template_half_height = template_rotated_height // 2
    template_half_width = template_rotated_width // 2

    image_templated_inrange_size_corrected = cv2.copyMakeBorder(image_templated, template_half_height, template_half_height, template_half_width, template_half_width, cv2.BORDER_CONSTANT, value=0)

    # Calculate maximum match coefficient
    max_match = numpy.max(image_templated_inrange_size_corrected)

    return (max_match, angle, template_rotated, image_templated_inrange_size_corrected) 
開發者ID:microsoft,項目名稱:AI-Robot-Challenge-Lab,代碼行數:20,代碼來源:cv_detection_right_hand.py

示例4: compare

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import TM_CCOEFF_NORMED [as 別名]
def compare(i, j, img):
    for x in range(lenX):
        if x < i:
            continue
        for y in range(lenY):
            if x <= i and y < j:
                continue
            z = mat[x][y]
            # 圖片相似度
            y1 = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            z1 = cv2.cvtColor(z, cv2.COLOR_BGR2GRAY)
            # image_difference = get_image_difference(y1, z1)
            res = cv2.matchTemplate(z1, y1, cv2.TM_CCOEFF_NORMED)
            # print(i, j, x, y, image_difference)
            print(i, j, x, y, res)
            # if abs(image_difference-1)>0.5:
            # if image_difference < 0.1:
            #     pairs.append((i, j, x, y, image_difference))
            if res[0][0] >= 0.8 :#and (i != x and j != y): # 0.9較好
                if i ==x and j ==y:
                    continue
                pairs.append((i, j, x, y, res[0][0]))
        print('--------') 
開發者ID:makelove,項目名稱:OpenCV-Python-Tutorial,代碼行數:25,代碼來源:compare_photos.py

示例5: findAllMatches

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import TM_CCOEFF_NORMED [as 別名]
def findAllMatches(self, needle, similarity):
        """ Find all matches for ``needle`` with confidence better than or equal to ``similarity``.

        Returns an array of tuples ``(position, confidence)`` if match(es) is/are found,
        or an empty array otherwise.
        """
        positions = []
        method = cv2.TM_CCOEFF_NORMED

        match = cv2.matchTemplate(self.haystack, self.needle, method)

        indices = (-match).argpartition(100, axis=None)[:100] # Review the 100 top matches
        unraveled_indices = numpy.array(numpy.unravel_index(indices, match.shape)).T
        for location in unraveled_indices:
            y, x = location
            confidence = match[y][x]
            if method == cv2.TM_SQDIFF_NORMED or method == cv2.TM_SQDIFF:
                if confidence <= 1-similarity:
                    positions.append(((x, y), confidence))
            else:
                if confidence >= similarity:
                    positions.append(((x, y), confidence))

        positions.sort(key=lambda x: (x[0][1], x[0][0]))
        return positions 
開發者ID:glitchassassin,項目名稱:lackey,代碼行數:27,代碼來源:TemplateMatchers.py

示例6: cal_rgb_confidence

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import TM_CCOEFF_NORMED [as 別名]
def cal_rgb_confidence(img_src_rgb, img_sch_rgb):
    """同大小彩圖計算相似度."""
    # BGR三通道心理學權重:
    weight = (0.114, 0.587, 0.299)
    src_bgr, sch_bgr = cv2.split(img_src_rgb), cv2.split(img_sch_rgb)

    # 計算BGR三通道的confidence,存入bgr_confidence:
    bgr_confidence = [0, 0, 0]
    for i in range(3):
        res_temp = cv2.matchTemplate(src_bgr[i], sch_bgr[i], cv2.TM_CCOEFF_NORMED)
        min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res_temp)
        bgr_confidence[i] = max_val

    # 加權可信度
    weighted_confidence = bgr_confidence[0] * weight[0] + bgr_confidence[1] * weight[1] + bgr_confidence[2] * weight[2]

    return weighted_confidence 
開發者ID:AirtestProject,項目名稱:Airtest,代碼行數:19,代碼來源:cal_confidence.py

示例7: exists

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import TM_CCOEFF_NORMED [as 別名]
def exists(frame, template, thresh):
        """
        Returns True if 'template' is in 'frame' with probability of at least 'thresh'
        :param frame: A frame
        :param template: An image to search in 'frame'.
        :param thresh: The minimum probability required to accept template.
        :return: If template is in frame
        """

        digit_res = cv2.matchTemplate(frame, template, cv2.TM_CCOEFF_NORMED)
        loc = np.where(digit_res >= thresh)

        if len(loc[-1]) == 0:
            return False

        for pt in zip(*loc[::-1]):
            if digit_res[pt[1]][pt[0]] == 1:
                return False

        return True 
開發者ID:shahar603,項目名稱:SpaceXtract,代碼行數:22,代碼來源:general_extract.py

示例8: most_probably_template

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import TM_CCOEFF_NORMED [as 別名]
def most_probably_template(image, templates):
        """
        Get the index of the template(in the templates list) which is most likely to be in the image.


        :param image: Image that contain the template
        :param templates: A list of templates to search in image
        :return: the index (in templates) which has the highest probability of being in  image
        """
        probability_list = []

        for template in templates:
            res = cv2.matchTemplate(image, template, cv2.TM_CCOEFF_NORMED)
            probability_list.append(float(np.max(res)))

        return probability_list.index(max(probability_list)) 
開發者ID:shahar603,項目名稱:SpaceXtract,代碼行數:18,代碼來源:general_extract.py

示例9: main

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import TM_CCOEFF_NORMED [as 別名]
def main():
    src = cv2.imread('src.jpg', cv2.IMREAD_GRAYSCALE)
    tpl = cv2.imread('tpl.jpg', cv2.IMREAD_GRAYSCALE)
    result = cv2.matchTemplate(src, tpl, cv2.TM_CCOEFF_NORMED)
    result = cv2.normalize(result, dst=None, alpha=0, beta=1,
                           norm_type=cv2.NORM_MINMAX, dtype=-1)
    minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(result)
    matchLoc = maxLoc
    draw1 = cv2.rectangle(
        src, matchLoc, (matchLoc[0] + tpl.shape[1], matchLoc[1] + tpl.shape[0]), 0, 2, 8, 0)
    draw2 = cv2.rectangle(
        result, matchLoc, (matchLoc[0] + tpl.shape[1], matchLoc[1] + tpl.shape[0]), 0, 2, 8, 0)
    cv2.imshow('draw1', draw1)
    cv2.imshow('draw2', draw2)
    cv2.waitKey(0)
    print src.shape
    print tpl.shape
    print result.shape
    print matchLoc 
開發者ID:cynricfu,項目名稱:dual-fisheye-video-stitching,代碼行數:21,代碼來源:template_matching.py

示例10: find_game_position

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import TM_CCOEFF_NORMED [as 別名]
def find_game_position(self, threshold) -> Dict:
        monitor = self.shooter.monitors[0]
        buffer = self.shooter.grab(monitor)
        image = Image.frombytes('RGB', buffer.size, buffer.rgb).convert('L')
        image = np.array(image)
        dino_template = cv2.imread(os.path.join('templates', 'dino.png'), 0)
        res = cv2.matchTemplate(image, dino_template, cv2.TM_CCOEFF_NORMED)
        loc = np.where(res >= threshold)
        if len(loc[0]) == 0:
            dino_template = cv2.imread(os.path.join('templates', 'dino2.png'), 0)
            res = cv2.matchTemplate(image, dino_template, cv2.TM_CCOEFF_NORMED)
            loc = np.where(res >= threshold)
        if len(loc[0]):
            pt = next(zip(*loc[::-1]))
            w, h = dino_template.shape[::-1]
            lw, lh = self.landscape_template.shape[::-1]
            return dict(monitor, height=lh, left=pt[0], top=pt[1] - lh + h, width=lw)
        return {} 
開發者ID:pauloalves86,項目名稱:go_dino,代碼行數:20,代碼來源:dino_api.py

示例11: imagesearcharea

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import TM_CCOEFF_NORMED [as 別名]
def imagesearcharea(image, x1, y1, x2, y2, precision=0.8, im=None):
    if im is None:
        im = region_grabber(region=(x1, y1, x2, y2))
        if is_retina:
            im.thumbnail((round(im.size[0] * 0.5), round(im.size[1] * 0.5)))
        # im.save('testarea.png') usefull for debugging purposes, this will save the captured region as "testarea.png"

    img_rgb = np.array(im)
    img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
    template = cv2.imread(image, 0)

    res = cv2.matchTemplate(img_gray, template, cv2.TM_CCOEFF_NORMED)
    min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
    if max_val < precision:
        return [-1, -1]
    return max_loc 
開發者ID:drov0,項目名稱:python-imagesearch,代碼行數:18,代碼來源:imagesearch.py

示例12: imagesearch_count

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import TM_CCOEFF_NORMED [as 別名]
def imagesearch_count(image, precision=0.9):
    img_rgb = pyautogui.screenshot()
    if is_retina:
        img_rgb.thumbnail((round(img_rgb.size[0] * 0.5), round(img_rgb.size[1] * 0.5)))
    img_rgb = np.array(img_rgb)
    img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
    template = cv2.imread(image, 0)
    w, h = template.shape[::-1]
    res = cv2.matchTemplate(img_gray, template, cv2.TM_CCOEFF_NORMED)
    loc = np.where(res >= precision)
    count = 0
    for pt in zip(*loc[::-1]):  # Swap columns and rows
        # cv2.rectangle(img_rgb, pt, (pt[0] + w, pt[1] + h), (0, 0, 255), 2) // Uncomment to draw boxes around found occurrences
        count = count + 1
    # cv2.imwrite('result.png', img_rgb) // Uncomment to write output image with boxes drawn around occurrences
    return count 
開發者ID:drov0,項目名稱:python-imagesearch,代碼行數:18,代碼來源:imagesearch.py

示例13: matchAB

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import TM_CCOEFF_NORMED [as 別名]
def matchAB(fileA, fileB):
    # 讀取圖像數據
    imgA = cv2.imread(fileA)
    imgB = cv2.imread(fileB)

    # 轉換成灰色
    grayA = cv2.cvtColor(imgA, cv2.COLOR_BGR2GRAY)
    grayB = cv2.cvtColor(imgB, cv2.COLOR_BGR2GRAY)

    # 獲取圖片A的大小
    height, width = grayA.shape

    # 取局部圖像,尋找匹配位置
    result_window = np.zeros((height, width), dtype=imgA.dtype)
    for start_y in range(0, height-100, 10):
        for start_x in range(0, width-100, 10):
            window = grayA[start_y:start_y+100, start_x:start_x+100]
            match = cv2.matchTemplate(grayB, window, cv2.TM_CCOEFF_NORMED)
            _, _, _, max_loc = cv2.minMaxLoc(match)
            matched_window = grayB[max_loc[1]:max_loc[1]+100, max_loc[0]:max_loc[0]+100]
            result = cv2.absdiff(window, matched_window)
            result_window[start_y:start_y+100, start_x:start_x+100] = result

    plt.imshow(result_window)
    plt.show() 
開發者ID:cangyan,項目名稱:image-detect,代碼行數:27,代碼來源:image_detect_02.py

示例14: multi_scale_search

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import TM_CCOEFF_NORMED [as 別名]
def multi_scale_search(pivot, screen, range=0.3, num=10):
    H, W = screen.shape[:2]
    h, w = pivot.shape[:2]

    found = None
    for scale in np.linspace(1-range, 1+range, num)[::-1]:
        resized = cv2.resize(screen, (int(W * scale), int(H * scale)))
        r = W / float(resized.shape[1])
        if resized.shape[0] < h or resized.shape[1] < w:
            break
        res = cv2.matchTemplate(resized, pivot, cv2.TM_CCOEFF_NORMED)

        loc = np.where(res >= res.max())
        pos_h, pos_w = list(zip(*loc))[0]

        if found is None or res.max() > found[-1]:
            found = (pos_h, pos_w, r, res.max())

    if found is None: return (0,0,0,0,0)
    pos_h, pos_w, r, score = found
    start_h, start_w = int(pos_h * r), int(pos_w * r)
    end_h, end_w = int((pos_h + h) * r), int((pos_w + w) * r)
    return [start_h, start_w, end_h, end_w, score] 
開發者ID:Prinsphield,項目名稱:Wechat_AutoJump,代碼行數:25,代碼來源:nn_play.py

示例15: find_address

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import TM_CCOEFF_NORMED [as 別名]
def find_address(crop_gray, crop_org):
        template = cv2.UMat(cv2.imread('address_mask_%s.jpg'%pixel_x, 0))
        # showimg(template)
        #showimg(crop_gray)
        w, h = cv2.UMat.get(template).shape[::-1]
        #t1 = round(time.time()*1000)
        res = cv2.matchTemplate(crop_gray, template, cv2.TM_CCOEFF_NORMED)
        #t2 = round(time.time()*1000)
        #print 'time:%s'%(t2-t1)
        min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
        top_left = (max_loc[0] + w, max_loc[1] - int(20*x))
        bottom_right = (top_left[0] + int(1700*x), top_left[1] + int(550*x))
        result = cv2.UMat.get(crop_org)[top_left[1]-10:bottom_right[1], top_left[0]-10:bottom_right[0]]
        cv2.rectangle(crop_gray, top_left, bottom_right, 255, 2)
        #showimg(crop_gray)
        return cv2.UMat(result) 
開發者ID:Raymondhhh90,項目名稱:idcardocr,代碼行數:18,代碼來源:idcardocr.py


注:本文中的cv2.TM_CCOEFF_NORMED屬性示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。