当前位置: 首页>>代码示例>>Python>>正文


Python cv2.goodFeaturesToTrack方法代码示例

本文整理汇总了Python中cv2.goodFeaturesToTrack方法的典型用法代码示例。如果您正苦于以下问题:Python cv2.goodFeaturesToTrack方法的具体用法?Python cv2.goodFeaturesToTrack怎么用?Python cv2.goodFeaturesToTrack使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在cv2的用法示例。


在下文中一共展示了cv2.goodFeaturesToTrack方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: center_from_good_features

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import goodFeaturesToTrack [as 别名]
def center_from_good_features(matrix):
    x, y = (0, 0)
    weight = 0
    corners = cv2.goodFeaturesToTrack(matrix, FEATURE_DETECT_MAX_CORNERS, FEATURE_DETECT_QUALITY_LEVEL,
                                      FEATURE_DETECT_MIN_DISTANCE)

    for point in corners:
        weight += 1
        x += point[0][0]
        y += point[0][1]

    return {
        'x': x / weight,
        'y': y / weight,
        'count': weight
    } 
开发者ID:epixelic,项目名称:python-smart-crop,代码行数:18,代码来源:__init__.py

示例2: extractFeatures

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import goodFeaturesToTrack [as 别名]
def extractFeatures(img):
  orb = cv2.ORB_create()
  # detection
  pts = cv2.goodFeaturesToTrack(np.mean(img, axis=2).astype(np.uint8), 3000, qualityLevel=0.01, minDistance=7)

  # extraction
  kps = [cv2.KeyPoint(x=f[0][0], y=f[0][1], _size=20) for f in pts]
  kps, des = orb.compute(img, kps)

  # return pts and des
  return np.array([(kp.pt[0], kp.pt[1]) for kp in kps]), des 
开发者ID:geohot,项目名称:twitchslam,代码行数:13,代码来源:frame.py

示例3: get_new_tracks

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import goodFeaturesToTrack [as 别名]
def get_new_tracks(self, frame, roi):
        """Get new tracks every detect_interval frames."""
        # Using mask to determine where to look for feature points.
        mask = np.zeros_like(frame)
        mask[roi[0]:roi[1], roi[2]:roi[3]] = 255

        # Get good feature points.
        feature_points = cv2.goodFeaturesToTrack(
            frame, mask=mask, **self.feature_params)

        if feature_points is not None:
            for x, y in np.float32(feature_points).reshape(-1, 2):
                self.tracks.append([(x, y)]) 
开发者ID:junhwanjang,项目名称:face_landmark_dnn,代码行数:15,代码来源:optical_flow_tracker.py

示例4: add_tracking_paths

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import goodFeaturesToTrack [as 别名]
def add_tracking_paths(frame, tracking_paths):
    mask = calculate_region_of_interest(frame, tracking_paths)
 
    # Extract good features to track. You can learn more 
    # about the parameters here: http://goo.gl/BI2Kml 
    feature_points = cv2.goodFeaturesToTrack(frame, mask = mask, maxCorners = 500, \
        qualityLevel = 0.3, minDistance = 7, blockSize = 7) 

    if feature_points is not None: 
        for x, y in np.float32(feature_points).reshape(-1, 2): 
            tracking_paths.append([(x, y)]) 
开发者ID:PacktPublishing,项目名称:OpenCV-3-x-with-Python-By-Example,代码行数:13,代码来源:feature_tracking.py

示例5: main

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import goodFeaturesToTrack [as 别名]
def main():
    image = cv2.imread("images/image_1.jpg")

    keypoints = cv2.goodFeaturesToTrack(
        cv2.cvtColor(image, cv2.COLOR_RGB2GRAY), maxCorners=100, qualityLevel=0.5, minDistance=5
    ).squeeze(1)

    bboxes = [(kp[0] - 10, kp[1] - 10, kp[0] + 10, kp[1] + 10) for kp in keypoints]

    disp_image = visualize(image, keypoints, bboxes)
    plt.figure(figsize=(10, 10))
    plt.imshow(cv2.cvtColor(disp_image, cv2.COLOR_RGB2BGR))
    plt.tight_layout()
    plt.show()

    aug = A.Compose(
        [A.ShiftScaleRotate(scale_limit=0.1, shift_limit=0.2, rotate_limit=10, always_apply=True)],
        bbox_params=A.BboxParams(format="pascal_voc", label_fields=["bbox_labels"]),
        keypoint_params=A.KeypointParams(format="xy"),
    )

    for _i in range(10):
        data = aug(image=image, keypoints=keypoints, bboxes=bboxes, bbox_labels=np.ones(len(bboxes)))

        aug_image = data["image"]
        aug_image = visualize(aug_image, data["keypoints"], data["bboxes"])

        plt.figure(figsize=(10, 10))
        plt.imshow(cv2.cvtColor(aug_image, cv2.COLOR_RGB2BGR))
        plt.tight_layout()
        plt.show() 
开发者ID:albumentations-team,项目名称:albumentations_examples,代码行数:33,代码来源:example_bbox_keypoint_rotate.py

示例6: detect

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import goodFeaturesToTrack [as 别名]
def detect(self, frame, mask=None):                
        pts = cv2.goodFeaturesToTrack(frame, self.num_features, self.quality_level, self.min_coner_distance, blockSize=self.blockSize, mask=mask)
        # convert matrix of pts into list of keypoints 
        if pts is not None: 
            kps = [ cv2.KeyPoint(p[0][0], p[0][1], self.blockSize) for p in pts ]
        else:
            kps = []
        #if kVerbose:
        #    print('detector: Shi-Tomasi, #features: ', len(kps), ', #ref: ', self.num_features, ', frame res: ', frame.shape[0:2])      
        return kps 
开发者ID:luigifreda,项目名称:pyslam,代码行数:12,代码来源:feature_shitomasi.py

示例7: detect

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import goodFeaturesToTrack [as 别名]
def detect(self, img):
        cnrs = cv2.goodFeaturesToTrack(img, self.maxCorners, self.qualityLevel, self.minDistance,
                                       mask=self.mask, blockSize=self.blockSize,
                                       useHarrisDetector=self.useHarrisDetector, k=self.k)

        return corners_to_keypoints(cnrs) 
开发者ID:jrosebr1,项目名称:imutils,代码行数:8,代码来源:gftt.py

示例8: getCorners

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import goodFeaturesToTrack [as 别名]
def getCorners(img_in):
    # number of features to track is a distinctive feature
    ## FeaturesToTrack important -> make accessible
    edges = cv2.goodFeaturesToTrack(img_in, 640, 0.0008, 1, mask=None, blockSize=3, useHarrisDetector=1, k=0.06)  # k=0.08
    corners = np.int0(edges)

    return corners 
开发者ID:hanneshoettinger,项目名称:opencv-steel-darts,代码行数:9,代码来源:DartsRecognition.py

示例9: run

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import goodFeaturesToTrack [as 别名]
def run(self):
        while True:
            ret, frame = self.cam.read()
            frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            vis = frame.copy()

            if len(self.tracks) > 0:
                img0, img1 = self.prev_gray, frame_gray
                p0 = np.float32([tr[-1] for tr in self.tracks]).reshape(-1, 1, 2)
                p1, st, err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params)
                p0r, st, err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params)
                d = abs(p0-p0r).reshape(-1, 2).max(-1)
                good = d < 1
                new_tracks = []
                for tr, (x, y), good_flag in zip(self.tracks, p1.reshape(-1, 2), good):
                    if not good_flag:
                        continue
                    tr.append((x, y))
                    if len(tr) > self.track_len:
                        del tr[0]
                    new_tracks.append(tr)
                    cv2.circle(vis, (x, y), 2, (0, 255, 0), -1)
                self.tracks = new_tracks
                cv2.polylines(vis, [np.int32(tr) for tr in self.tracks], False, (0, 255, 0))
                draw_str(vis, (20, 20), 'track count: %d' % len(self.tracks))

            if self.frame_idx % self.detect_interval == 0:
                mask = np.zeros_like(frame_gray)
                mask[:] = 255
                for x, y in [np.int32(tr[-1]) for tr in self.tracks]:
                    cv2.circle(mask, (x, y), 5, 0, -1)
                p = cv2.goodFeaturesToTrack(frame_gray, mask = mask, **feature_params)
                if p is not None:
                    for x, y in np.float32(p).reshape(-1, 2):
                        self.tracks.append([(x, y)])


            self.frame_idx += 1
            self.prev_gray = frame_gray
            cv2.imshow('lk_track', vis)

            ch = cv2.waitKey(1)
            if ch == 27:
                break 
开发者ID:makelove,项目名称:OpenCV-Python-Tutorial,代码行数:46,代码来源:lk_track.py

示例10: run

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import goodFeaturesToTrack [as 别名]
def run(self):
        while True:
            ret, frame = self.cam.read()
            frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            vis = frame.copy()
            if self.p0 is not None:
                p2, trace_status = checkedTrace(self.gray1, frame_gray, self.p1)

                self.p1 = p2[trace_status].copy()
                self.p0 = self.p0[trace_status].copy()
                self.gray1 = frame_gray

                if len(self.p0) < 4:
                    self.p0 = None
                    continue
                H, status = cv2.findHomography(self.p0, self.p1, (0, cv2.RANSAC)[self.use_ransac], 10.0)
                h, w = frame.shape[:2]
                overlay = cv2.warpPerspective(self.frame0, H, (w, h))
                vis = cv2.addWeighted(vis, 0.5, overlay, 0.5, 0.0)

                for (x0, y0), (x1, y1), good in zip(self.p0[:,0], self.p1[:,0], status[:,0]):
                    if good:
                        cv2.line(vis, (x0, y0), (x1, y1), (0, 128, 0))
                    cv2.circle(vis, (x1, y1), 2, (red, green)[good], -1)
                draw_str(vis, (20, 20), 'track count: %d' % len(self.p1))
                if self.use_ransac:
                    draw_str(vis, (20, 40), 'RANSAC')
            else:
                p = cv2.goodFeaturesToTrack(frame_gray, **feature_params)
                if p is not None:
                    for x, y in p[:,0]:
                        cv2.circle(vis, (x, y), 2, green, -1)
                    draw_str(vis, (20, 20), 'feature count: %d' % len(p))

            cv2.imshow('lk_homography', vis)

            ch = cv2.waitKey(1)
            if ch == 27:
                break
            if ch == ord(' '):
                self.frame0 = frame.copy()
                self.p0 = cv2.goodFeaturesToTrack(frame_gray, **feature_params)
                if self.p0 is not None:
                    self.p1 = self.p0
                    self.gray0 = frame_gray
                    self.gray1 = frame_gray
            if ch == ord('r'):
                self.use_ransac = not self.use_ransac 
开发者ID:makelove,项目名称:OpenCV-Python-Tutorial,代码行数:50,代码来源:lk_homography.py

示例11: _sparse_sd

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import goodFeaturesToTrack [as 别名]
def _sparse_sd(data_instance,
               of_params={'st_pars': dict(maxCorners = 200,
                                          qualityLevel = 0.2,
                                          minDistance = 7,
                                          blockSize = 21),
                          'lk_pars': dict(winSize = (20, 20),
                                          maxLevel = 2,
                                          criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0))},
               lead_steps=12):

    # define penult and last frames
    penult_frame = data_instance[-2]
    last_frame = data_instance[-1]

    # find features to track
    old_corners = cv2.goodFeaturesToTrack(data_instance[0], mask=None,
                                          **of_params['st_pars'])

    # track corners by optical flow algorithm
    new_corners, st, err = cv2.calcOpticalFlowPyrLK(prevImg=penult_frame,
                                                    nextImg=last_frame,
                                                    prevPts=old_corners,
                                                    nextPts=None,
                                                    **of_params['lk_pars'])

    # select only good attempts for corner tracking
    success = st.ravel() == 1
    new_corners = new_corners[success].copy()
    old_corners = old_corners[success].copy()

    # calculate Simple Delta
    delta = new_corners.reshape(-1, 2) - old_corners.reshape(-1, 2)

    # simplificate furher transformations
    pts_source = new_corners.reshape(-1, 2)

    # propagate our corners through time
    pts_target_container = []

    for lead_step in range(lead_steps):
        pts_target_container.append(pts_source + delta * (lead_step + 1))

    return pts_source, pts_target_container 
开发者ID:hydrogo,项目名称:rainymotion,代码行数:45,代码来源:models.py

示例12: getMatches_goodtemplmatch

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import goodFeaturesToTrack [as 别名]
def getMatches_goodtemplmatch(img1, img2, templ_shape, max):
    """Return pairs of corresponding points
    using combining Shi-Tomasi corner detector and template matching."""
    if not np.array_equal(img1.shape, img2.shape):
        print "error: inconsistent array dimention", img1.shape, img2.shape
        sys.exit()
    if not (np.all(templ_shape <= img1.shape[:2]) and
            np.all(templ_shape <= img2.shape[:2])):
        print "error: template shape shall fit img1 and img2"
        sys.exit()

    feature_params = dict(maxCorners=max, qualityLevel=0.01,
                          minDistance=5, blockSize=5)
    kps1 = cv2.goodFeaturesToTrack(img1, mask=None, **feature_params)
    kps2 = cv2.goodFeaturesToTrack(img2, mask=None, **feature_params)

    Hs, Ws = img1.shape[:2]
    Ht, Wt = templ_shape
    matches = []
    for [[xt, yt]] in kps1:
        if int(yt) + Ht > Hs or int(xt) + Wt > Ws:
            continue
        result = cv2.matchTemplate(
            img2, img1[int(yt):int(yt) + Ht, int(xt):int(xt) + Wt],
            cv2.TM_CCOEFF_NORMED)
        minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(result)
        if maxVal > 0.85:
            matches.append((maxVal, (int(xt), int(yt)), maxLoc))
    for [[xt, yt]] in kps2:
        if int(yt) + Ht > Hs or int(xt) + Wt > Ws:
            continue
        result = cv2.matchTemplate(
            img1, img2[int(yt):int(yt) + Ht, int(xt):int(xt) + Wt],
            cv2.TM_CCOEFF_NORMED)
        minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(result)
        if maxVal > 0.85:
            matches.append((maxVal, maxLoc, (int(xt), int(yt))))
    matches.sort(key=lambda e: e[0], reverse=True)
    if len(matches) >= max:
        return np.int32([matches[i][1:] for i in range(max)])
    else:
        return np.int32([c[1:] for c in matches]) 
开发者ID:cynricfu,项目名称:dual-fisheye-video-stitching,代码行数:44,代码来源:feature_matching.py

示例13: sparseOpticalFlow

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import goodFeaturesToTrack [as 别名]
def sparseOpticalFlow():
    # use 0 for webcam capturing
    # cap = cv2.VideoCapture(0)

    cap = cv2.VideoCapture('test/Pedestrian overpass.mp4')

    # params for ShiTomasi corner detection
    feature_params = dict( maxCorners = 100,
                           qualityLevel = 0.3,
                           minDistance = 7,
                           blockSize = 7 )
    # Parameters for lucas kanade optical flow
    lk_params = dict( winSize  = (15,15),
                      maxLevel = 2,
                      criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
    # Create some random colors
    color = np.random.randint(0,255,(100,3))
    # Take first frame and find corners in it
    ret, old_frame = cap.read()
    old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
    p0 = cv2.goodFeaturesToTrack(old_gray, mask = None, **feature_params)
    # Create a mask image for drawing purposes
    mask = np.zeros_like(old_frame)
    while(1):
        ret,frame = cap.read()
        frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        # calculate optical flow
        p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)
        # Select good points
        good_new = p1[st==1]
        good_old = p0[st==1]
        # draw the tracks
        for i,(new,old) in enumerate(zip(good_new,good_old)):
            a,b = new.ravel()
            c,d = old.ravel()
            mask = cv2.line(mask, (a,b),(c,d), color[i].tolist(), 2)
            frame = cv2.circle(frame,(a,b),5,color[i].tolist(),-1)
        img = cv2.add(frame,mask)
        cv2.imshow('frame',img)
        k = cv2.waitKey(30) & 0xff
        if k == 27:
            break
        # Now update the previous frame and previous points
        old_gray = frame_gray.copy()
        p0 = good_new.reshape(-1,1,2)
    cv2.destroyAllWindows()
    cap.release()


# DENSE OPTICAL FLOW 
开发者ID:sahibdhanjal,项目名称:Mask-RCNN-Pedestrian-Detection,代码行数:52,代码来源:opticalFlow.py

示例14: updateError

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import goodFeaturesToTrack [as 别名]
def updateError(self, frame):
        self.frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
      
        if len(self.tracks) > 0:
            img0, img1 = self.prev_gray, self.frame_gray
            p0 = np.float32([tr[-1][:2] for tr in self.tracks]).reshape(-1, 1, 2)
            p1, st, err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **self.lk_params)
            p0r, st, err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **self.lk_params)
            d = abs(p0-p0r).reshape(-1, 2).max(-1)
            good = d < 1
            new_tracks = []

            self.xerror = 0.0
            self.yerror = 0.0
            self.n = 0.0

            current_time = time.time()
            for tr, (x, y), good_flag in zip(self.tracks, p1.reshape(-1, 2), good):
                if not good_flag:
                    continue
                tr.append((x, y, current_time))
                if len(tr) >  500:
                    del tr[0]
                new_tracks.append(tr)

                if(len(tr)>=2):
                    t = np.float32([v[2] for v in tr])
                    x = np.float32([v[0] for v in tr])
                    y = np.float32([v[1] for v in tr])


                    self.xerror = self.xerror + (x[-1] - x[0])
                    self.yerror = self.yerror + (y[-1] - y[0])
                    self.n = self.n + 1.0

            if self.n>0:
                self.xerror = self.xerror / float(self.n)
                self.yerror = self.yerror / float(self.n)

            self.tracks = new_tracks

              
          

        if self.xerror==0 and self.yerror==0:
              current_time = time.time()
              mask = np.zeros_like(self.frame_gray)
              mask[:] = 255
              p = cv2.goodFeaturesToTrack(self.frame_gray, mask = mask, **self.feature_params)
              if p is not None:
                  for x, y in np.float32(p).reshape(-1, 2):
                      self.tracks.append([(x, y, current_time)])


        self.prev_gray = self.frame_gray 
开发者ID:tobykurien,项目名称:pi-tracking-telescope,代码行数:57,代码来源:tracking.py

示例15: run

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import goodFeaturesToTrack [as 别名]
def run(self):
        while True:
            ret, frame = self.cam.read()
            frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            vis = frame.copy()

            if len(self.tracks) > 0:
                img0, img1 = self.prev_gray, frame_gray
                p0 = np.float32([tr[-1] for tr in self.tracks]).reshape(-1, 1, 2)
                p1, st, err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params)
                p0r, st, err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params)
                d = abs(p0-p0r).reshape(-1, 2).max(-1)
                good = d < 1
                new_tracks = []
                for tr, (x, y), good_flag in zip(self.tracks, p1.reshape(-1, 2), good):
                    if not good_flag:
                        continue
                    tr.append((x, y))
                    if len(tr) > self.track_len:
                        del tr[0]
                    new_tracks.append(tr)
                    cv2.circle(vis, (x, y), 2, (0, 255, 0), -1)
                self.tracks = new_tracks
                cv2.polylines(vis, [np.int32(tr) for tr in self.tracks], False, (0, 255, 0))
                draw_str(vis, (20, 20), 'track count: %d' % len(self.tracks))

            if self.frame_idx % self.detect_interval == 0:
                mask = np.zeros_like(frame_gray)
                mask[:] = 255
                for x, y in [np.int32(tr[-1]) for tr in self.tracks]:
                    cv2.circle(mask, (x, y), 5, 0, -1)
                p = cv2.goodFeaturesToTrack(frame_gray, mask = mask, **feature_params)
                if p is not None:
                    for x, y in np.float32(p).reshape(-1, 2):
                        self.tracks.append([(x, y)])


            self.frame_idx += 1
            self.prev_gray = frame_gray
            cv2.imshow('lk_track', vis)

            ch = 0xFF & cv2.waitKey(1)
            if ch == 27:
                break 
开发者ID:fatcloud,项目名称:PyCV-time,代码行数:46,代码来源:lk_track.py


注:本文中的cv2.goodFeaturesToTrack方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。