当前位置: 首页>>代码示例>>Python>>正文


Python feature.corner_peaks函数代码示例

本文整理汇总了Python中skimage.feature.corner_peaks函数的典型用法代码示例。如果您正苦于以下问题:Python corner_peaks函数的具体用法?Python corner_peaks怎么用?Python corner_peaks使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了corner_peaks函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_binary_descriptors_rotation_crosscheck_true

def test_binary_descriptors_rotation_crosscheck_true():
    """Verify matched keypoints and their corresponding masks results between
    image and its rotated version with the expected keypoint pairs with
    cross_check enabled."""
    img = data.astronaut()
    img = rgb2gray(img)
    tform = tf.SimilarityTransform(scale=1, rotation=0.15, translation=(0, 0))
    rotated_img = tf.warp(img, tform, clip=False)

    extractor = BRIEF(descriptor_size=512)

    keypoints1 = corner_peaks(corner_harris(img), min_distance=5,
                              threshold_abs=0, threshold_rel=0.1)
    extractor.extract(img, keypoints1)
    descriptors1 = extractor.descriptors

    keypoints2 = corner_peaks(corner_harris(rotated_img), min_distance=5,
                              threshold_abs=0, threshold_rel=0.1)
    extractor.extract(rotated_img, keypoints2)
    descriptors2 = extractor.descriptors

    matches = match_descriptors(descriptors1, descriptors2, cross_check=True)

    exp_matches1 = np.array([ 0,  2,  3,  4,  5,  6,  9, 11, 12, 13, 14, 17,
                             18, 19, 21, 22, 23, 26, 27, 28, 29, 31, 32, 33,
                             34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46])
    exp_matches2 = np.array([ 0,  2,  3,  1,  4,  6,  5,  7, 13, 10,  9, 11,
                             15,  8, 14, 12, 16, 18, 19, 21, 20, 24, 25, 26,
                             28, 27, 22, 23, 29, 30, 31, 32, 35, 33, 34, 36])
    assert_equal(matches[:, 0], exp_matches1)
    assert_equal(matches[:, 1], exp_matches2)
开发者ID:AbdealiJK,项目名称:scikit-image,代码行数:31,代码来源:test_match.py

示例2: test_corner_peaks

def test_corner_peaks():
    response = np.zeros((5, 5))
    response[2:4, 2:4] = 1

    corners = corner_peaks(response, exclude_border=False)
    assert len(corners) == 1

    corners = corner_peaks(response, exclude_border=False, min_distance=0)
    assert len(corners) == 4
开发者ID:andersbll,项目名称:scikit-image,代码行数:9,代码来源:test_corner.py

示例3: process

    def process(self, img2, image_gray):
        # img2 = warp(img2)
        patch_size = [640]
        img2 = rgb2gray(img2)
        image_gray = rgb2gray(img2)

        blobs_dog = blob_dog(image_gray, min_sigma=0.2, max_sigma=225, sigma_ratio=1.6, threshold=.5)
        blobs_dog[:, 2] = blobs_dog[:, 2]

        blobs = [blobs_dog]
        colors = ['black']
        titles = ['Difference of Gaussian']
        sequence = zip(blobs, colors, titles)

        # plt.imshow(img2)
        # plt.axis("equal")
        # plt.show()

        for blobs, color, title in sequence:
            print(len(blobs))
            for blob in blobs:
                y, x, r = blob
                plotx = x
                ploty = y
                for i in range (3):
                    keypoints1 = corner_peaks(corner_harris(Array.image_arr[i]), min_distance=1)
                    keypoints2 = corner_peaks(corner_harris(img2), min_distance=1)

                    extractor = BRIEF(patch_size=30, mode="uniform")

                    extractor.extract(Array.image_arr[i], keypoints1)
                    keypoints1 = keypoints1[extractor.mask]
                    descriptors1 = extractor.descriptors

                    extractor.extract(img2, keypoints2)
                    keypoints2 = keypoints2[extractor.mask]
                    descriptors2 = extractor.descriptors

                    matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)
                    
                    # print(keypoints1, keypoints2)
                    # print(matches12)
                    #FUCKGGGPLAYT
                    for pizdezh in matches12:
                        X = keypoints2[pizdezh[1]][1]
                        Y = keypoints2[pizdezh[1]][0]

                    if sqrt((plotx - X)**2 + (ploty - Y)**2) < r:
                        seen = [{
                            "type": Array.type_arr[i],
                            "center_shift": (plotx - 160/2) * -0.02,
                            "distance": image_gray[y][x] / 0.08
                        }]
                        print seen
                        data.seen.add(seen)
                        break
开发者ID:VsevolodTrofimov,项目名称:badbots,代码行数:56,代码来源:blob.py

示例4: test_corner_peaks

def test_corner_peaks():
    response = np.zeros((10, 10))
    response[2:5, 2:5] = 1

    corners = corner_peaks(response, exclude_border=False, min_distance=10,
                           threshold_rel=0)
    assert len(corners) == 1

    corners = corner_peaks(response, exclude_border=False, min_distance=1)
    assert len(corners) == 4

    corners = corner_peaks(response, exclude_border=False, min_distance=1,
                           indices=False)
    assert np.sum(corners) == 4
开发者ID:ameya005,项目名称:scikit-image,代码行数:14,代码来源:test_corner.py

示例5: extract_corner_harris

def extract_corner_harris(patch):
    """ Extract four corner points using harris corner detection algorithm

    """
    # Find corner with harris corner detection
    coords = corner_peaks(corner_harris(patch, k=0.1), min_distance=5)
    coords_subpix = corner_subpix(patch, coords, window_size=13)

    # Find the nearest point for each corner
    dim = patch.shape
    corners = [(0, 0), (dim[0], 0), (dim[0], dim[1]), (0, dim[1])]

    dest_points = [[] for x in range(4)]
    for i in xrange(4):
        dest_points[i] = search_closest_points(corners[i], coords_subpix)

    # Check for error
    try:
        epsilon = 1e-10
        for i in xrange(4):
            for j in xrange(i + 1, 4):
                if calc_distance(dest_points[i], dest_points[j]) < epsilon:
                    print 'Error point'
                    return []
    except TypeError:
        return []

    # Reverse y,x position to x,y
    for i in xrange(4):
        dest_points[i][1], dest_points[i][0] = dest_points[i][0], dest_points[i][1]

    return dest_points
开发者ID:mitbal,项目名称:pemilu,代码行数:32,代码来源:extract.py

示例6: test_uniform_mode

def test_uniform_mode():
    """Verify the computed BRIEF descriptors with expected for uniform mode."""
    img = data.coins()

    keypoints = corner_peaks(corner_harris(img), min_distance=5, threshold_abs=0, threshold_rel=0.1)

    extractor = BRIEF(descriptor_size=8, sigma=2, mode="uniform")

    extractor.extract(img, keypoints[:8])

    expected = np.array(
        [
            [False, False, False, True, True, True, False, False],
            [True, True, True, False, True, False, False, True],
            [True, True, True, False, True, True, False, True],
            [True, True, True, True, False, True, False, True],
            [True, True, True, True, True, True, False, False],
            [True, True, True, True, True, True, True, True],
            [False, False, False, True, True, True, True, True],
            [False, True, False, True, False, True, True, True],
        ],
        dtype=bool,
    )

    assert_array_equal(extractor.descriptors, expected)
开发者ID:soupault,项目名称:scikit-image,代码行数:25,代码来源:test_brief.py

示例7: test_corner_orientations_lena

def test_corner_orientations_lena():
    img = rgb2gray(data.lena())
    corners = corner_peaks(corner_fast(img, 11, 0.35))
    expected = np.array([-1.9195897 , -3.03159624, -1.05991162, -2.89573739,
                         -2.61607644, 2.98660159])
    actual = corner_orientations(img, corners, octagon(3, 2))
    assert_almost_equal(actual, expected)
开发者ID:AlexG31,项目名称:scikit-image,代码行数:7,代码来源:test_corner.py

示例8: featurize

def featurize(img_name):
    """Load an image and convert it into a dictionary of features"""
    img = plt.imread(os.path.join('stimuli', img_name + '.png'))
    height, width, _ = img.shape
    features = defaultdict(int)
    for y in range(height):
        for x in range(width):
            features['red'] += img[y][x][0]
            features['green'] += img[y][x][1]
            features['blue'] += img[y][x][2]
            features['alpha'] += img[y][x][3]

    grey = color.rgb2grey(img)
    for y in range(height):
        for x in range(width):
            for key, value in per_pixel(grey, y, x):
                features[key] += value

    # Normalize over image size
    for key, value in features.items():
        features[key] = float(value) / height / width

    features['blob'] = feature.blob_dog(grey).shape[0]
    features['corners'] = feature.corner_peaks(
        feature.corner_harris(grey)).shape[0]
    return features
开发者ID:cmc333333,项目名称:neuraldata-final,代码行数:26,代码来源:runner.py

示例9: dumb_matcher

def dumb_matcher(img1, img2):
    kps = lambda img: feature.corner_peaks(feature.corner_harris(img), min_distance = 2)
    kp1 = kps(img1)
    kp2 = kps(img2)
    to_set = lambda aoa: set(map(lambda x: (x[0], x[1]), aoa))
    s1 = to_set(kp1)
    s2 = to_set(kp2)
    return float(len(s1 & s2) * 2) / (len(s1) + len(s2))
开发者ID:blmarket,项目名称:CoCParser,代码行数:8,代码来源:feature.py

示例10: test_corner_orientations_square

def test_corner_orientations_square():
    square = np.zeros((12, 12))
    square[3:9, 3:9] = 1
    corners = corner_peaks(corner_fast(square, 9), min_distance=1)
    actual_orientations = corner_orientations(square, corners, octagon(3, 2))
    actual_orientations_degrees = np.rad2deg(actual_orientations)
    expected_orientations_degree = np.array([45.0, 135.0, -45.0, -135.0])
    assert_array_equal(actual_orientations_degrees, expected_orientations_degree)
开发者ID:hbueno,项目名称:scikit-image,代码行数:8,代码来源:test_corner.py

示例11: find_corners

def find_corners(path, min_distance=5):
    """Find corners in an image at path
    
    Returns the image and the corner lists.
    """
    from skimage.feature import corner_harris, corner_peaks
    img = imread(path, flatten=True)
    corners = corner_peaks(corner_harris(img), min_distance=min_distance)
    return img, corners
开发者ID:jupyter,项目名称:ngcm-tutorial,代码行数:9,代码来源:images_common.py

示例12: peak_corner_detector

def peak_corner_detector(distance_map, threshold, min_d, num_peaks=6):
    """
    well, no idea what is the difference from skimage.feature.peak_local_max
    :param distance_map:
    :param threshold:
    :param min_d:
    :return:
    """
    return corner_peaks(distance_map, threshold_rel=threshold, min_distance=min_d, num_peaks=num_peaks)
开发者ID:erdincay,项目名称:ScoreGrass,代码行数:9,代码来源:PeakDetector.py

示例13: corners

def corners(provider):
    """
    number of corners
    """

    gray = provider.as_gray()

    # TODO custom parameters would give arise to exceptions of mis-matched shapes
    coords = corner_peaks(corner_harris(gray))#, min_distance=5)
    coords_subpix = corner_subpix(gray, coords)#, window_size=13)

    return len(coords_subpix)
开发者ID:cuppster,项目名称:imagefeatures,代码行数:12,代码来源:basic.py

示例14: test_match_keypoints_brief_lena_translation

def test_match_keypoints_brief_lena_translation():
    """Test matched keypoints between lena image and its translated version."""
    img = data.lena()
    img = rgb2gray(img)
    img.shape
    tform = tf.SimilarityTransform(scale=1, rotation=0, translation=(15, 20))
    translated_img = tf.warp(img, tform)

    keypoints1 = corner_peaks(corner_harris(img), min_distance=5)
    descriptors1, keypoints1 = brief(img, keypoints1, descriptor_size=512)

    keypoints2 = corner_peaks(corner_harris(translated_img), min_distance=5)
    descriptors2, keypoints2 = brief(translated_img, keypoints2,
                                     descriptor_size=512)

    matched_keypoints = match_keypoints_brief(keypoints1, descriptors1,
                                              keypoints2, descriptors2,
                                              threshold=0.10)

    assert_array_equal(matched_keypoints[:, 0, :], matched_keypoints[:, 1, :] +
                       [20, 15])
开发者ID:Autodidact24,项目名称:scikit-image,代码行数:21,代码来源:_test_brief.py

示例15: test_match_keypoints_brief_lena_rotation

def test_match_keypoints_brief_lena_rotation():
    """Verify matched keypoints result between lena image and its rotated
    version with the expected keypoint pairs."""
    img = data.lena()
    img = rgb2gray(img)
    img.shape
    tform = tf.SimilarityTransform(scale=1, rotation=0.10, translation=(0, 0))
    rotated_img = tf.warp(img, tform)

    keypoints1 = corner_peaks(corner_harris(img), min_distance=5)
    descriptors1, keypoints1 = brief(img, keypoints1, descriptor_size=512)

    keypoints2 = corner_peaks(corner_harris(rotated_img), min_distance=5)
    descriptors2, keypoints2 = brief(rotated_img, keypoints2,
                                     descriptor_size=512)

    matched_keypoints = match_keypoints_brief(keypoints1, descriptors1,
                                              keypoints2, descriptors2,
                                              threshold=0.07)

    expected = np.array([[[263, 272],
                          [234, 298]],

                         [[271, 120],
                          [258, 146]],

                         [[323, 164],
                          [305, 195]],

                         [[414,  70],
                          [405, 111]],

                         [[435, 181],
                          [415, 223]],

                         [[454, 176],
                          [435, 221]]])

    assert_array_equal(matched_keypoints, expected)
开发者ID:Autodidact24,项目名称:scikit-image,代码行数:39,代码来源:_test_brief.py


注:本文中的skimage.feature.corner_peaks函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。