當前位置: 首頁>>代碼示例>>Python>>正文


Python cv2.minMaxLoc方法代碼示例

本文整理匯總了Python中cv2.minMaxLoc方法的典型用法代碼示例。如果您正苦於以下問題:Python cv2.minMaxLoc方法的具體用法?Python cv2.minMaxLoc怎麽用?Python cv2.minMaxLoc使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在cv2的用法示例。


在下文中一共展示了cv2.minMaxLoc方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: match_img

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import minMaxLoc [as 別名]
def match_img(image, template, value):
    """
    :param image: 圖片
    :param template: 模板
    :param value: 閾值
    :return: 水印坐標
    描述:用於獲得這幅圖片模板對應的位置坐標,用途:校準元素位置信息
    """
    res = cv2.matchTemplate(image, template, cv2.TM_CCOEFF_NORMED)
    threshold = value
    min_v, max_v, min_pt, max_pt = cv2.minMaxLoc(res)
    if max_v < threshold:
        return False
    if not max_pt[0] in range(10, 40) or max_pt[1] > 20:
        return False
    return max_pt 
開發者ID:Mingtzge,項目名稱:2019-CCF-BDCI-OCR-MCZJ-OCR-IdentificationIDElement,代碼行數:18,代碼來源:split_img_generate_data.py

示例2: get_match_confidence

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import minMaxLoc [as 別名]
def get_match_confidence(img1, img2, mask=None):
    if img1.shape != img2.shape:
        return False
    ## first try, using absdiff
    # diff = cv2.absdiff(img1, img2)
    # h, w, d = diff.shape
    # total = h*w*d
    # num = (diff<20).sum()
    # print 'is_match', total, num
    # return num > total*0.90
    if mask is not None:
        img1 = img1.copy()
        img1[mask!=0] = 0
        img2 = img2.copy()
        img2[mask!=0] = 0
    ## using match
    match = cv2.matchTemplate(img1, img2, cv2.TM_CCOEFF_NORMED)
    _, confidence, _, _ = cv2.minMaxLoc(match)
    # print confidence
    return confidence 
開發者ID:NetEaseGame,項目名稱:ATX,代碼行數:22,代碼來源:scene_detector.py

示例3: probability

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import minMaxLoc [as 別名]
def probability(self, im: str) -> float:
        """
        Return the probability of the existence of given image.

        :param im: the name of the image.
        :return: the probability (confidence).
        """
        assert self.screen is not None
        try:
            template = self.images[im]
        except KeyError:
            logger.error('Unexpected image name {}'.format(im))
            return 0.0

        res = cv.matchTemplate(self.screen, template, TM_METHOD)
        _, max_val, _, max_loc = cv.minMaxLoc(res)
        logger.debug('max_val = {}, max_loc = {}'.format(max_val, max_loc))
        return max_val 
開發者ID:will7101,項目名稱:fgo-bot,代碼行數:20,代碼來源:tm.py

示例4: find

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import minMaxLoc [as 別名]
def find(self, im: str, threshold: float = None) -> Tuple[int, int]:
        """
        Find the template image on screen and return its top-left coords.

        Return None if the matching value is less than `threshold`.

        :param im: the name of the image
        :param threshold: the threshold of matching. If not given, will be set to the default threshold.
        :return: the top-left coords of the result. Return (-1, -1) if not found.
        """
        threshold = threshold or self.threshold

        assert self.screen is not None
        try:
            template = self.images[im]
        except KeyError:
            logger.error('Unexpected image name {}'.format(im))
            return -1, -1

        res = cv.matchTemplate(self.screen, template, TM_METHOD)
        _, max_val, _, max_loc = cv.minMaxLoc(res)
        logger.debug('max_val = {}, max_loc = {}'.format(max_val, max_loc))
        return max_loc if max_val >= threshold else (-1, -1) 
開發者ID:will7101,項目名稱:fgo-bot,代碼行數:25,代碼來源:tm.py

示例5: cal_rgb_confidence

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import minMaxLoc [as 別名]
def cal_rgb_confidence(img_src_rgb, img_sch_rgb):
    """同大小彩圖計算相似度."""
    # BGR三通道心理學權重:
    weight = (0.114, 0.587, 0.299)
    src_bgr, sch_bgr = cv2.split(img_src_rgb), cv2.split(img_sch_rgb)

    # 計算BGR三通道的confidence,存入bgr_confidence:
    bgr_confidence = [0, 0, 0]
    for i in range(3):
        res_temp = cv2.matchTemplate(src_bgr[i], sch_bgr[i], cv2.TM_CCOEFF_NORMED)
        min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res_temp)
        bgr_confidence[i] = max_val

    # 加權可信度
    weighted_confidence = bgr_confidence[0] * weight[0] + bgr_confidence[1] * weight[1] + bgr_confidence[2] * weight[2]

    return weighted_confidence 
開發者ID:AirtestProject,項目名稱:Airtest,代碼行數:19,代碼來源:cal_confidence.py

示例6: find_template

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import minMaxLoc [as 別名]
def find_template(im_source, im_search, threshold=0.8, rgb=False):
    """函數功能:找到最優結果."""
    # 第一步:校驗圖像輸入
    check_source_larger_than_search(im_source, im_search)
    # 第二步:計算模板匹配的結果矩陣res
    res = _get_template_result_matrix(im_source, im_search)
    # 第三步:依次獲取匹配結果
    min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
    h, w = im_search.shape[:2]
    # 求取可信度:
    confidence = _get_confidence_from_matrix(im_source, im_search, max_loc, max_val, w, h, rgb)
    # 求取識別位置: 目標中心 + 目標區域:
    middle_point, rectangle = _get_target_rectangle(max_loc, w, h)
    best_match = generate_result(middle_point, rectangle, confidence)
    LOGGING.debug("threshold=%s, result=%s" % (threshold, best_match))
    return best_match if confidence >= threshold else None 
開發者ID:AirtestProject,項目名稱:Airtest,代碼行數:18,代碼來源:template.py

示例7: match_dmg_templates

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import minMaxLoc [as 別名]
def match_dmg_templates(self, frame):
        match_mat, max_val, tl = [None]*10, [0]*10, [(0, 0)]*10
        for i in range(0, 10):
            match_mat[i] = cv2.matchTemplate(frame, self.num_img[0],
                cv2.TM_CCORR_NORMED, mask=self.num_mask[0])
            _, max_val[i], _, tl[i] = cv2.minMaxLoc(match_mat[i])
        # print(max_val[0])
        br = (tl[0][0] + self.num_w, tl[0][1] + self.num_h)
        frame = cv2.rectangle(frame, tl[0], br, (255, 255, 255), 2)

        # Multi-template result searching
        # _, max_val_1, _, tl_1 = cv2.minMaxLoc(np.array(match_mat))
        # print(tl_1)


    # A number of methods corresponding to the various trackbars available. 
開發者ID:jpnaterer,項目名稱:smashscan,代碼行數:18,代碼來源:thresholding.py

示例8: main

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import minMaxLoc [as 別名]
def main():
    src = cv2.imread('src.jpg', cv2.IMREAD_GRAYSCALE)
    tpl = cv2.imread('tpl.jpg', cv2.IMREAD_GRAYSCALE)
    result = cv2.matchTemplate(src, tpl, cv2.TM_CCOEFF_NORMED)
    result = cv2.normalize(result, dst=None, alpha=0, beta=1,
                           norm_type=cv2.NORM_MINMAX, dtype=-1)
    minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(result)
    matchLoc = maxLoc
    draw1 = cv2.rectangle(
        src, matchLoc, (matchLoc[0] + tpl.shape[1], matchLoc[1] + tpl.shape[0]), 0, 2, 8, 0)
    draw2 = cv2.rectangle(
        result, matchLoc, (matchLoc[0] + tpl.shape[1], matchLoc[1] + tpl.shape[0]), 0, 2, 8, 0)
    cv2.imshow('draw1', draw1)
    cv2.imshow('draw2', draw2)
    cv2.waitKey(0)
    print src.shape
    print tpl.shape
    print result.shape
    print matchLoc 
開發者ID:cynricfu,項目名稱:dual-fisheye-video-stitching,代碼行數:21,代碼來源:template_matching.py

示例9: detect

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import minMaxLoc [as 別名]
def detect(self, z, x):
        k = self.gaussianCorrelation(x, z)
        # 得到響應圖
        res = real(fftd(complexMultiplication(self._alphaf, fftd(k)), True))

        # pv:響應最大值 pi:相應最大點的索引數組
        _, pv, _, pi = cv2.minMaxLoc(res)
        # 得到響應最大的點索引的float表示
        p = [float(pi[0]), float(pi[1])]

        # 使用幅值做差來定位峰值的位置
        if pi[0] > 0 and pi[0] < res.shape[1] - 1:
            p[0] += self.subPixelPeak(res[pi[1], pi[0] - 1], pv, res[pi[1], pi[0] + 1])
        if pi[1] > 0 and pi[1] < res.shape[0] - 1:
            p[1] += self.subPixelPeak(res[pi[1] - 1, pi[0]], pv, res[pi[1] + 1, pi[0]])

        # 得出偏離采樣中心的位移
        p[0] -= res.shape[1] / 2.
        p[1] -= res.shape[0] / 2.
        
        # 返回偏離采樣中心的位移和峰值
        return p, pv

    # 基於當前幀更新目標位置 
開發者ID:ryanfwy,項目名稱:KCF-DSST-py,代碼行數:26,代碼來源:tracker.py

示例10: detect_scale

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import minMaxLoc [as 別名]
def detect_scale(self, image):
        xsf = self.get_scale_sample(image)

        # Compute AZ in the paper
        add_temp = cv2.reduce(complexMultiplication(self.sf_num, xsf), 0, cv2.REDUCE_SUM)

        # compute the final y
        scale_response = cv2.idft(complexDivisionReal(add_temp, (self.sf_den + self.scale_lambda)), None, cv2.DFT_REAL_OUTPUT)

        # Get the max point as the final scaling rate
        # pv:響應最大值 pi:相應最大點的索引數組
        _, pv, _, pi = cv2.minMaxLoc(scale_response)
        
        return pi

    # 更新尺度 
開發者ID:ryanfwy,項目名稱:KCF-DSST-py,代碼行數:18,代碼來源:tracker.py

示例11: imagesearcharea

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import minMaxLoc [as 別名]
def imagesearcharea(image, x1, y1, x2, y2, precision=0.8, im=None):
    if im is None:
        im = region_grabber(region=(x1, y1, x2, y2))
        if is_retina:
            im.thumbnail((round(im.size[0] * 0.5), round(im.size[1] * 0.5)))
        # im.save('testarea.png') usefull for debugging purposes, this will save the captured region as "testarea.png"

    img_rgb = np.array(im)
    img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
    template = cv2.imread(image, 0)

    res = cv2.matchTemplate(img_gray, template, cv2.TM_CCOEFF_NORMED)
    min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
    if max_val < precision:
        return [-1, -1]
    return max_loc 
開發者ID:drov0,項目名稱:python-imagesearch,代碼行數:18,代碼來源:imagesearch.py

示例12: locate_img

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import minMaxLoc [as 別名]
def locate_img(image, template):
    img = image.copy()
    res = cv2.matchTemplate(img, template, method)
    print res
    print res.shape
    cv2.imwrite('image/shape.png', res)
    min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
    print cv2.minMaxLoc(res)
    if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
        top_left = min_loc
    else:
        top_left = max_loc
    h, w = template.shape
    bottom_right = (top_left[0] + w, top_left[1]+h)
    cv2.rectangle(img, top_left, bottom_right, 255, 2)
    cv2.imwrite('image/tt.jpg', img) 
開發者ID:NetEase,項目名稱:airtest,代碼行數:18,代碼來源:pixelmatch.py

示例13: getKeypoints

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import minMaxLoc [as 別名]
def getKeypoints(probMap, threshold=0.1):

    mapSmooth = cv2.GaussianBlur(probMap, (3, 3), 0, 0)
    mapMask = np.uint8(mapSmooth>threshold)
    keypoints = []
    contours = None
    try:
        #OpenCV4.x
        contours, _ = cv2.findContours(mapMask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    except:
        #OpenCV3.x
        _, contours, _ = cv2.findContours(mapMask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

    for cnt in contours:
        blobMask = np.zeros(mapMask.shape)
        blobMask = cv2.fillConvexPoly(blobMask, cnt, 1)
        maskedProbMap = mapSmooth * blobMask
        _, maxVal, _, maxLoc = cv2.minMaxLoc(maskedProbMap)
        keypoints.append(maxLoc + (probMap[maxLoc[1], maxLoc[0]],))

    return keypoints 
開發者ID:PINTO0309,項目名稱:MobileNetV2-PoseEstimation,代碼行數:23,代碼來源:openvino-usbcamera-cpu-ncs2-async.py

示例14: _locate_target

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import minMaxLoc [as 別名]
def _locate_target(self, score):
        def subpixel_peak(left, center, right):
            divisor = 2 * center - left - right
            if abs(divisor) < 1e-3:
                return 0
            return 0.5 * (right - left) / divisor

        _, _, _, max_loc = cv2.minMaxLoc(score)
        loc = np.float32(max_loc)

        if max_loc[0] in range(1, score.shape[1] - 1):
            loc[0] += subpixel_peak(
                score[max_loc[1], max_loc[0] - 1],
                score[max_loc[1], max_loc[0]],
                score[max_loc[1], max_loc[0] + 1])
        if max_loc[1] in range(1, score.shape[0] - 1):
            loc[1] += subpixel_peak(
                score[max_loc[1] - 1, max_loc[0]],
                score[max_loc[1], max_loc[0]],
                score[max_loc[1] + 1, max_loc[0]])
        offset = loc - np.float32(score.shape[1::-1]) / 2

        return offset 
開發者ID:huanglianghua,項目名稱:open-vot,代碼行數:25,代碼來源:kcf.py

示例15: SMAvgLocalMax

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import minMaxLoc [as 別名]
def SMAvgLocalMax(self, src):
        # size
        stepsize = pySaliencyMapDefs.default_step_local
        width = src.shape[1]
        height = src.shape[0]
        # find local maxima
        numlocal = 0
        lmaxmean = 0
        for y in range(0, height-stepsize, stepsize):
            for x in range(0, width-stepsize, stepsize):
                localimg = src[y:y+stepsize, x:x+stepsize]
                lmin, lmax, dummy1, dummy2 = cv2.minMaxLoc(localimg)
                lmaxmean += lmax
                numlocal += 1
        # averaging over all the local regions
        return lmaxmean / numlocal
    # normalization specific for the saliency map model 
開發者ID:tyarkoni,項目名稱:pliers,代碼行數:19,代碼來源:pySaliencyMap.py


注:本文中的cv2.minMaxLoc方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。