當前位置: 首頁>>代碼示例>>Python>>正文


Python cv2.TERM_CRITERIA_MAX_ITER屬性代碼示例

本文整理匯總了Python中cv2.TERM_CRITERIA_MAX_ITER屬性的典型用法代碼示例。如果您正苦於以下問題:Python cv2.TERM_CRITERIA_MAX_ITER屬性的具體用法?Python cv2.TERM_CRITERIA_MAX_ITER怎麽用?Python cv2.TERM_CRITERIA_MAX_ITER使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在cv2的用法示例。


在下文中一共展示了cv2.TERM_CRITERIA_MAX_ITER屬性的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: _detect_team_color

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import TERM_CRITERIA_MAX_ITER [as 別名]
def _detect_team_color(self, pixels):
        criteria = \
            (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)

        pixels = np.array(pixels.reshape((-1, 3)), dtype=np.float32)

        ret, label, center = cv2.kmeans(
            pixels, 2, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)

        # one is black, another is the team color.

        colors = np.array(center, dtype=np.uint8).reshape((1, 2, 3))
        colors_hsv = cv2.cvtColor(colors, cv2.COLOR_BGR2HSV)
        x = np.argmax(colors_hsv[:, :, 2])
        team_color_bgr = colors[0, x, :]
        team_color_hsv = colors_hsv[0, x, :]

        return {
            'rgb': cv2.cvtColor(colors, cv2.COLOR_BGR2RGB).tolist()[0][x],
            'hsv': cv2.cvtColor(colors, cv2.COLOR_BGR2HSV).tolist()[0][x],
        } 
開發者ID:hasegaw,項目名稱:IkaLog,代碼行數:23,代碼來源:inklings_tracker.py

示例2: calculateCorners

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import TERM_CRITERIA_MAX_ITER [as 別名]
def calculateCorners(self, gray, points=None):
        '''
        gray is OpenCV gray image,
        points is Marker.points
        >>> marker.calculateCorners(gray)
        >>> print(marker.corners)
        '''
        if points is None: points = self.points
        if points is None: raise TypeError('calculateCorners need a points value')
        '''
        rotations = 0 -> 0,1,2,3
        rotations = 1 -> 3,0,1,2
        rotations = 2 -> 2,3,0,1
        rotations = 3 -> 1,2,3,0
        => A: 1,0,3,2; B: 0,3,2,1; C: 2,1,0,3; D: 3,2,1,0
        '''
        i = self.rotations
        A = (1,0,3,2)[i]; B = (0,3,2,1)[i]; C = (2,1,0,3)[i]; D = (3,2,1,0)[i]
        corners = np.float32([points[A], points[B], points[C], points[D]])
        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1)
        self.corners = cv2.cornerSubPix(gray, corners, (5,5), (-1,-1), criteria) 
開發者ID:bxtkezhan,項目名稱:BAR4Py,代碼行數:23,代碼來源:marker.py

示例3: get_vectors

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import TERM_CRITERIA_MAX_ITER [as 別名]
def get_vectors(image, points, mtx, dist):
    
    # order points
    points = _order_points(points)

    # set up criteria, image, points and axis
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)

    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    imgp = np.array(points, dtype='float32')

    objp = np.array([[0.,0.,0.],[1.,0.,0.],
                        [1.,1.,0.],[0.,1.,0.]], dtype='float32')  

    # calculate rotation and translation vectors
    cv2.cornerSubPix(gray,imgp,(11,11),(-1,-1),criteria)
    rvecs, tvecs, _ = cv2.solvePnPRansac(objp, imgp, mtx, dist)

    return rvecs, tvecs 
開發者ID:rdmilligan,項目名稱:SaltwashAR,代碼行數:22,代碼來源:markerfunctions.py

示例4: color_quantization

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import TERM_CRITERIA_MAX_ITER [as 別名]
def color_quantization(image, k):
    """Performs color quantization using K-means clustering algorithm"""

    # Transform image into 'data':
    data = np.float32(image).reshape((-1, 3))
    # print(data.shape)

    # Define the algorithm termination criteria (the maximum number of iterations and/or the desired accuracy):
    # In this case the maximum number of iterations is set to 20 and epsilon = 1.0
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 20, 1.0)

    # Apply K-means clustering algorithm:
    ret, label, center = cv2.kmeans(data, k, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)

    # At this point we can make the image with k colors
    # Convert center to uint8:
    center = np.uint8(center)
    # Replace pixel values with their center value:
    result = center[label.flatten()]
    result = result.reshape(img.shape)
    return result


# Create the dimensions of the figure and set title: 
開發者ID:PacktPublishing,項目名稱:Mastering-OpenCV-4-with-Python,代碼行數:26,代碼來源:k_means_color_quantization.py

示例5: kmeans

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import TERM_CRITERIA_MAX_ITER [as 別名]
def kmeans(samples, k, criteria = None, attempts = 3, flags = None):
    import cv2
    
    if flags == None:
        flags = cv2.KMEANS_RANDOM_CENTERS
    if criteria == None:
        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
    samples = np.asarray(samples, dtype = np.float32)
    _,labels,centers = cv2.kmeans(samples, k, criteria, attempts, flags)
    labels = util.np.flatten(labels)
    clusters = [None]*k
    for idx, label in enumerate(labels):
        if clusters[label] is None:
            clusters[label] = []
        clusters[label].append(idx)
        
    for  idx, cluster in enumerate(clusters):
        if cluster == None:
            logging.warn('Empty cluster appeared.')
            clusters[idx] = []
            
    return labels, clusters, centers 
開發者ID:HLIG,項目名稱:HUAWEIOCR-2019,代碼行數:24,代碼來源:ml.py

示例6: live_calibrate

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import TERM_CRITERIA_MAX_ITER [as 別名]
def live_calibrate(camera, pattern_shape, n_matches_needed):
    """ Find calibration parameters as the user moves a checkerboard in front of the camera """
    print("Looking for %s checkerboard" % (pattern_shape,))
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
    example_3d = np.zeros((pattern_shape[0] * pattern_shape[1], 3), np.float32)
    example_3d[:, :2] = np.mgrid[0 : pattern_shape[1], 0 : pattern_shape[0]].T.reshape(-1, 2)
    points_3d = []
    points_2d = []
    while len(points_3d) < n_matches_needed:
        ret, frame = camera.cap.read()
        assert ret
        gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        ret, corners = cv2.findCirclesGrid(
            gray_frame, pattern_shape, flags=cv2.CALIB_CB_ASYMMETRIC_GRID
        )
        cv2.imshow("camera", frame)
        if ret:
            points_3d.append(example_3d.copy())
            points_2d.append(corners)
            print("Found calibration %i of %i" % (len(points_3d), n_matches_needed))
            drawn_frame = cv2.drawChessboardCorners(frame, pattern_shape, corners, ret)
            cv2.imshow("calib", drawn_frame)
        cv2.waitKey(10)
    ret, camera_matrix, distortion_coefficients, _, _ = cv2.calibrateCamera(
        points_3d, points_2d, gray_frame.shape[::-1], None, None
    )
    assert ret
    return camera_matrix, distortion_coefficients 
開發者ID:notkarol,項目名稱:derplearning,代碼行數:30,代碼來源:calibrate_camera.py

示例7: kmeans

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import TERM_CRITERIA_MAX_ITER [as 別名]
def kmeans(vs, ks, niter):
        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER,
                    niter, 0.01)
        flags = cv2.KMEANS_RANDOM_CENTERS
        compactness, labels, centers = cv2.kmeans(
            vs, ks, criteria, 1, flags)
        return centers 
開發者ID:hdidx,項目名稱:hdidx,代碼行數:9,代碼來源:util.py

示例8: get_dominant_color

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import TERM_CRITERIA_MAX_ITER [as 別名]
def get_dominant_color(pixels, clusters, attempts):
        """
        Given a (N, Channels) array of pixel values, compute the dominant color via K-means
        """
        clusters = min(clusters, len(pixels))
        flags = cv2.KMEANS_RANDOM_CENTERS
        criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_MAX_ITER, 1, 10)
        _, labels, centroids = cv2.kmeans(pixels.astype(np.float32), clusters, None, criteria, attempts, flags)
        _, counts = np.unique(labels, return_counts=True)
        dominant = centroids[np.argmax(counts)]
        return dominant 
開發者ID:tasercake,項目名稱:lowpolypy,代碼行數:13,代碼來源:process.py

示例9: kmeans

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import TERM_CRITERIA_MAX_ITER [as 別名]
def kmeans(array: np.ndarray, k: int = 3):
        n_channels = 3 if len(np.shape(array)) == 3 else 1
        arr_values = array.reshape((-1, n_channels))
        arr_values = np.float32(arr_values)
        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.2)
        _, labels, (centers) = cv2.kmeans(arr_values, k, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)
        centers = np.uint8(centers)
        clustered_arr = centers[labels.flatten()]
        clustered_arr = clustered_arr.reshape(array.shape)
        return clustered_arr 
開發者ID:haruiz,項目名稱:CvStudio,代碼行數:12,代碼來源:img_util.py

示例10: color_quantization

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import TERM_CRITERIA_MAX_ITER [as 別名]
def color_quantization(img, n_cluster, iteration, epsilon=1.0):
        Z = img.reshape((-1, 3))
        Z = np.float32(Z)

        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, iteration, epsilon)
        ret, label, center = cv2.kmeans(Z, n_cluster, None, criteria, iteration, cv2.KMEANS_PP_CENTERS)

        labels = label.reshape((img.shape[0], img.shape[1], 1))
        # center = np.uint(center)
        # visual = center[label.flatten()]
        # visual = visual.reshape(img.shape)
        # visual = np.uint8(visual)

        return labels 
開發者ID:YoongiKim,項目名稱:Walk-Assistant,代碼行數:16,代碼來源:filter.py

示例11: test_kmeans

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import TERM_CRITERIA_MAX_ITER [as 別名]
def test_kmeans(img):
    ## K均值聚類
    z = img.reshape((-1, 3))
    z = np.float32(z)
    criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
    ret, label, center = cv2.kmeans(z, 20, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)
    center = np.uint8(center)
    res = center[label.flatten()]
    res2 = res.reshape((img.shape))
    cv2.imshow('preview', res2)
    cv2.waitKey() 
開發者ID:NetEaseGame,項目名稱:ATX,代碼行數:13,代碼來源:test_monkey.py

示例12: _get_corners

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import TERM_CRITERIA_MAX_ITER [as 別名]
def _get_corners(self, image):
        """Find subpixel chessboard corners in image."""
        temp = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        ret, corners = cv2.findChessboardCorners(temp,
                                                 (self.rows, self.columns))
        if not ret:
            raise ChessboardNotFoundError("No chessboard could be found.")
        cv2.cornerSubPix(temp, corners, (11, 11), (-1, -1),
                         (cv2.TERM_CRITERIA_MAX_ITER + cv2.TERM_CRITERIA_EPS,
                          30, 0.01))
        return corners 
開發者ID:erget,項目名稱:StereoVision,代碼行數:13,代碼來源:calibration.py

示例13: svm_init

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import TERM_CRITERIA_MAX_ITER [as 別名]
def svm_init(C=12.5, gamma=0.50625):
    """Creates empty model and assigns main parameters"""

    model = cv2.ml.SVM_create()
    model.setGamma(gamma)
    model.setC(C)
    model.setKernel(cv2.ml.SVM_RBF)
    model.setType(cv2.ml.SVM_C_SVC)
    model.setTermCriteria((cv2.TERM_CRITERIA_MAX_ITER, 100, 1e-6))

    return model 
開發者ID:PacktPublishing,項目名稱:Mastering-OpenCV-4-with-Python,代碼行數:13,代碼來源:svm_handwritten_digits_recognition_preprocessing_hog_c_gamma.py

示例14: svm_init

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import TERM_CRITERIA_MAX_ITER [as 別名]
def svm_init(C=12.5, gamma=0.50625):
    """Creates empty model and assigns main parameters"""

    model = cv2.ml.SVM_create()
    model.setGamma(gamma)
    model.setC(C)
    model.setKernel(cv2.ml.SVM_LINEAR)
    model.setType(cv2.ml.SVM_C_SVC)
    model.setTermCriteria((cv2.TERM_CRITERIA_MAX_ITER, 100, 1e-6))

    return model 
開發者ID:PacktPublishing,項目名稱:Mastering-OpenCV-4-with-Python,代碼行數:13,代碼來源:svm_introduction.py

示例15: k_means

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import TERM_CRITERIA_MAX_ITER [as 別名]
def k_means(points: np.ndarray):
    """返回一個數組經kmeans分類後的k值以及標簽,k值由計算拐點給出

    Args:
        points (np.ndarray): 需分類數據

    Returns:
        Tuple[int, np.ndarry]: k值以及標簽數組
    """

    # Define criteria = ( type, max_iter = 10 , epsilon = 1.0 )
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)

    # Set flags (Just to avoid line break in the code)
    flags = cv2.KMEANS_RANDOM_CENTERS
    length = []
    max_k = min(10, points.shape[0])
    for k in range(2, max_k + 1):
        avg = 0
        for i in range(5):
            compactness, _, _ = cv2.kmeans(
                points, k, None, criteria, 10, flags)
            avg += compactness
        avg /= 5
        length.append(avg)

    peek_pos = find_peek(length)
    k = peek_pos + 2
    # print(k)
    return k, cv2.kmeans(points, k, None, criteria, 10, flags)[1]  # labels 
開發者ID:zhaobenx,項目名稱:Image-stitcher,代碼行數:32,代碼來源:k_means.py


注:本文中的cv2.TERM_CRITERIA_MAX_ITER屬性示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。