當前位置: 首頁>>代碼示例>>Python>>正文


Python cv2.TM_CCORR_NORMED屬性代碼示例

本文整理匯總了Python中cv2.TM_CCORR_NORMED屬性的典型用法代碼示例。如果您正苦於以下問題:Python cv2.TM_CCORR_NORMED屬性的具體用法?Python cv2.TM_CCORR_NORMED怎麽用?Python cv2.TM_CCORR_NORMED使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在cv2的用法示例。


在下文中一共展示了cv2.TM_CCORR_NORMED屬性的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: match_dmg_templates

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import TM_CCORR_NORMED [as 別名]
def match_dmg_templates(self, frame):
        match_mat, max_val, tl = [None]*10, [0]*10, [(0, 0)]*10
        for i in range(0, 10):
            match_mat[i] = cv2.matchTemplate(frame, self.num_img[0],
                cv2.TM_CCORR_NORMED, mask=self.num_mask[0])
            _, max_val[i], _, tl[i] = cv2.minMaxLoc(match_mat[i])
        # print(max_val[0])
        br = (tl[0][0] + self.num_w, tl[0][1] + self.num_h)
        frame = cv2.rectangle(frame, tl[0], br, (255, 255, 255), 2)

        # Multi-template result searching
        # _, max_val_1, _, tl_1 = cv2.minMaxLoc(np.array(match_mat))
        # print(tl_1)


    # A number of methods corresponding to the various trackbars available. 
開發者ID:jpnaterer,項目名稱:smashscan,代碼行數:18,代碼來源:thresholding.py

示例2: MatchingMethod

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import TM_CCORR_NORMED [as 別名]
def MatchingMethod(param):

   global match_method
   match_method = param

   ## [copy_source]
   img_display = img.copy()
   ## [copy_source]
   ## [match_template]
   method_accepts_mask = (cv2.TM_SQDIFF == match_method or match_method == cv2.TM_CCORR_NORMED)
   if (use_mask and method_accepts_mask):
       result = cv2.matchTemplate(img, templ, match_method, None, mask)
   else:
       result = cv2.matchTemplate(img, templ, match_method)
   ## [match_template]

   ## [normalize]
   cv2.normalize( result, result, 0, 1, cv2.NORM_MINMAX, -1 )
   ## [normalize]
   ## [best_match]
   minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(result, None)
   ## [best_match]

   ## [match_loc]
   if (match_method == cv2.TM_SQDIFF or match_method == cv2.TM_SQDIFF_NORMED):
       matchLoc = minLoc
   else:
       matchLoc = maxLoc
   ## [match_loc]

   ## [imshow]
   cv2.rectangle(img_display, matchLoc, (matchLoc[0] + templ.shape[0], matchLoc[1] + templ.shape[1]), (0,0,0), 2, 8, 0 )
   cv2.rectangle(result, matchLoc, (matchLoc[0] + templ.shape[0], matchLoc[1] + templ.shape[1]), (0,0,0), 2, 8, 0 )
   cv2.imshow(image_window, img_display)
   cv2.imshow(result_window, result)
   ## [imshow]
   pass 
開發者ID:makelove,項目名稱:OpenCV-Python-Tutorial,代碼行數:39,代碼來源:match_template.py

示例3: getRefCoordinate

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import TM_CCORR_NORMED [as 別名]
def getRefCoordinate(self, image, template):
#        method = cv2.TM_SQDIFF                     #2
        method = cv2.TM_SQDIFF_NORMED              #1
#        method = cv2.TM_CCORR_NORMED                #3
        method = cv2.TM_CCOEFF_NORMED                #4
        res = cv2.matchTemplate(image, template, method)
        min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
        # If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum
        if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
            top_left = min_loc
        else:
            top_left = max_loc
#        bottom_right = (top_left[0] + w, top_left[1] + h)
        return top_left 
開發者ID:jomjol,項目名稱:water-meter-system-complete,代碼行數:16,代碼來源:CutImageClass.py

示例4: get_calibrate_results

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import TM_CCORR_NORMED [as 別名]
def get_calibrate_results(self, frame):
        h, w = self.orig_pct_img.shape[:2]
        opt_max_val, opt_top_left, opt_w, opt_h = 0, 0, 0, 0

        # Assuming W-360p (640×360), only search the bottom of the frame.
        frame = frame[270:, :]

        # Iterate over a num. of widths, and rescale the img/mask accordingly.
        for new_w in range(self.calib_w_range[0], self.calib_w_range[1]):
            new_h = int(new_w * h / w)
            pct_img = cv2.resize(self.orig_pct_img, (new_w, new_h))
            pct_mask = cv2.resize(self.orig_pct_mask, (new_w, new_h))

            # Calculate the confidence and location of the current rescale.
            match_mat = cv2.matchTemplate(frame, pct_img,
                cv2.TM_CCORR_NORMED, mask=pct_mask)
            _, max_val, _, top_left = cv2.minMaxLoc(match_mat)

            # Store the results if the confidence is larger than the previous.
            if max_val > opt_max_val:
                opt_max_val, opt_top_left = max_val, top_left
                opt_w, opt_h = new_w, new_h

        # Compensate for point location for the ROI that was used.
        opt_top_left = (opt_top_left[0], opt_top_left[1] + 270)

        # Format the bounding box and return.
        bbox = (opt_top_left, (opt_top_left[0]+opt_w, opt_top_left[1]+opt_h))
        return bbox, opt_max_val, opt_w, opt_h


    # Given a list of expected widths, return the optimal dimensions of the
    # template bounding box by calculating the median of the list. 
開發者ID:jpnaterer,項目名稱:smashscan,代碼行數:35,代碼來源:percent_matching.py

示例5: find_from_targeted

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import TM_CCORR_NORMED [as 別名]
def find_from_targeted(self, left, right):

        # @TODO ignore red target - it is attacked and dead
        template = cv2.imread('img/template_target.png', 0)

        # print template.shape
        roi = get_screen(
            self.window_info["x"],
            self.window_info["y"],
            self.window_info["x"] + self.window_info["width"],
            self.window_info["y"] + self.window_info["height"] - 300
        )

        roi = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
        ret, th1 = cv2.threshold(roi, 224, 255, cv2.THRESH_TOZERO_INV)
        ret, th2 = cv2.threshold(th1, 135, 255, cv2.THRESH_BINARY)
        ret, tp1 = cv2.threshold(template, 224, 255, cv2.THRESH_TOZERO_INV)
        ret, tp2 = cv2.threshold(tp1, 135, 255, cv2.THRESH_BINARY)
        if not hasattr(th2, 'shape'):
            return False
        wth, hth = th2.shape
        wtp, htp = tp2.shape
        if wth > wtp and hth > htp:
            res = cv2.matchTemplate(th2, tp2, cv2.TM_CCORR_NORMED)
            if res.any():
                min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
                if max_val > 0.7:
                    return True
                else:
                    return False
        return False 
開發者ID:maaaxim,項目名稱:bot,代碼行數:33,代碼來源:bot.py

示例6: shiftDetection

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import TM_CCORR_NORMED [as 別名]
def shiftDetection(filePath, imageList, activeImages, area, filterList, thread):

    largeDisp = np.zeros((len(imageList),2))

    initImage = cv2.imread(filePath+'/'+imageList[0].rstrip(), 0) #read the full image
    initImage = filterFunctions.applyFilterListToImage(filterList, initImage)
    nbImages = len(imageList)
    currentPercent = 1

    activeFileList = []
    for image in range(1, nbImages):
        if activeImages[image] == 1:
            activeFileList.append(image)

    template = initImage[area[1]:area[3],area[0]:area[2]] #select the template data
    width = area[2]-area[0]
    height = area[3]-area[1]

    origin = (area[0], area[1])
    startTime = time.time()
    for i in activeFileList:

        newImage = cv2.imread(filePath+'/'+imageList[i].rstrip(),0)
        newImage = filterFunctions.applyFilterListToImage(filterList, newImage)

        matchArea = cv2.matchTemplate(newImage, template, cv2.TM_CCORR_NORMED)
        minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(matchArea)
        template = newImage[maxLoc[1]:maxLoc[1]+height,maxLoc[0]:maxLoc[0]+width] #the template for the next image is update with the template found on the current picture

        largeDisp[i][0] = maxLoc[0]-origin[0] #save the displacement
        largeDisp[i][1] = maxLoc[1]-origin[1]

        percent = i*100/nbImages
        if percent > currentPercent:
            thread.signal.threadSignal.emit([percent, i, largeDisp[i][0], largeDisp[i][1]])
            currentPercent = percent

    totalTime = time.time() - startTime
    thread.signal.threadSignal.emit([100, nbImages, largeDisp, totalTime])
    #print totalTime 
開發者ID:ChrisEberl,項目名稱:Python_DIC,代碼行數:42,代碼來源:newProcessCorrelations.py

示例7: get_tm_results

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import TM_CCORR_NORMED [as 別名]
def get_tm_results(self, frame, num_results, conf_thresh=None):

        # Only a specific subregion of the frame is analyzed. If the template
        # ROI has been initialized, take that frame subregion. Otherwise, take
        # the bottom quarter of the frame assuming a W-360p (640x360) format.
        if self.template_roi:
            frame = frame[self.template_roi[0][1]:self.template_roi[1][1], :]
        else:
            frame = frame[270:, :]

        # Set the confidence threshold to the default, if none was input.
        if conf_thresh is None:
            conf_thresh = self.conf_thresh

        # Match the template using a normalized cross-correlation method and
        # retrieve the confidence and top-left points from the result.
        match_mat = cv2.matchTemplate(frame, self.pct_img,
            cv2.TM_CCORR_NORMED, mask=self.pct_mask)
        conf_list, tl_list = self.get_match_results(
            match_mat, num_results, conf_thresh)

        # Compensate for point location for the used region of interest.
        if self.template_roi:
            for i, _ in enumerate(tl_list):
                tl_list[i] = (tl_list[i][0],
                    tl_list[i][1] + self.template_roi[0][1])
        else:
            for i, _ in enumerate(tl_list):
                tl_list[i] = (tl_list[i][0], tl_list[i][1] + 270)

        # Create a list of bounding boxes (top-left & bottom-right points),
        # using the input template_shape given as (width, height).
        bbox_list = list()
        h, w = self.pct_img.shape[:2]
        for tl in tl_list:
            br = (tl[0] + w, tl[1] + h)
            bbox_list.append((tl, br))

        return conf_list, bbox_list


    # Take the result of cv2.matchTemplate, and find the most likely locations
    # of a template match. To find multiple locations, the region around a
    # successful match is zeroed. Return a list of confidences and locations. 
開發者ID:jpnaterer,項目名稱:smashscan,代碼行數:46,代碼來源:percent_matching.py


注:本文中的cv2.TM_CCORR_NORMED屬性示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。