当前位置: 首页>>代码示例>>C++>>正文


C++ BFMatcher::knnMatch方法代码示例

本文整理汇总了C++中BFMatcher::knnMatch方法的典型用法代码示例。如果您正苦于以下问题:C++ BFMatcher::knnMatch方法的具体用法?C++ BFMatcher::knnMatch怎么用?C++ BFMatcher::knnMatch使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在BFMatcher的用法示例。


在下文中一共展示了BFMatcher::knnMatch方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: detector

static void align_2nd_to_1st_img(Mat& img1, Mat& img2) {
    // Calculate descriptors (feature vectors)
    std::vector<KeyPoint> keyPoints1, keyPoints2;
    Mat descriptor1, descriptor2;
    
    OrbFeatureDetector detector(5000);
    detector.detect(img1, keyPoints1);
    detector.detect(img2, keyPoints2);

    OrbDescriptorExtractor extractor;
    extractor.compute(img1, keyPoints1, descriptor1);
    extractor.compute(img2, keyPoints2, descriptor2);
    
    // Match descriptor vectors
    BFMatcher matcher;
    std::vector<vector< DMatch >> matches;
    matcher.knnMatch(descriptor2, descriptor1, matches, 2);
    
    std::vector< DMatch > good_matches;
    for (int i = 0; i < matches.size(); i ++) {
        float rejectRatio = 0.8;
        if (matches[i][0].distance / matches[i][1].distance > rejectRatio)
            continue;
        good_matches.push_back(matches[i][0]);
    }
    
    std::vector<Point2f> good_keyPoints1, good_keyPoints2;
    for (int i = 0; i < good_matches.size(); i ++) {
        good_keyPoints1.push_back(keyPoints1[good_matches[i].trainIdx].pt);
        good_keyPoints2.push_back(keyPoints2[good_matches[i].queryIdx].pt);
    }
    
    Mat H = findHomography( good_keyPoints2, good_keyPoints1, CV_RANSAC );
    warpPerspective(img2, img2, H, img1.size(), INTER_NEAREST);
}
开发者ID:JenniferWang,项目名称:Opencv-Android-PanoHDR,代码行数:35,代码来源:HDR.cpp

示例2:

void CameraPoseOptimization::crossCheckMatching
(BFMatcher& descriptorMatcher, const Mat& descriptors1, const Mat& descriptors2,
vector<DMatch>& filteredMatches12, int knn /* = 1 */)
{
	filteredMatches12.clear();
	vector<vector<DMatch> > matches12, matches21;
	descriptorMatcher.knnMatch(descriptors1, descriptors2, matches12, knn);
	descriptorMatcher.knnMatch(descriptors2, descriptors1, matches21, knn);
	for (size_t m = 0; m < matches12.size(); m++)
	{
		bool findCrossCheck = false;
		for (size_t fk = 0; fk < matches12[m].size(); fk++)
		{
			DMatch forward = matches12[m][fk];
			for (size_t bk = 0; bk < matches21[forward.trainIdx].size(); bk++)
			{
				DMatch backward = matches21[forward.trainIdx][bk];
				if (backward.trainIdx == forward.queryIdx)
				{
					filteredMatches12.push_back(forward);
					findCrossCheck = true;
					break;
				}
			}
			if (findCrossCheck)
				break;
		}
	}
}
开发者ID:chaowang15,项目名称:CameraPoseOpt,代码行数:29,代码来源:CameraPoseOptimization.cpp

示例3: query

PERF_TEST_P(BruteForceMatcherFixture, DISABLED_knnMatch,
            OCL_BFMATCHER_TYPICAL_MAT_SIZES)  // TODO too many outliers
{
    const Size srcSize = GetParam();

    vector<vector<DMatch> > matches(2);
    Mat query(srcSize, CV_32F), train(srcSize, CV_32F);
    randu(query, 0.0f, 1.0f);
    randu(train, 0.0f, 1.0f);

    declare.in(query, train);
    if (srcSize.height == 2000)
        declare.time(8);

    if (RUN_PLAIN_IMPL)
    {
        BFMatcher matcher (NORM_L2);
        TEST_CYCLE() matcher.knnMatch(query, train, matches, 2);

        std::vector<DMatch> & matches0 = matches[0], & matches1 = matches[1];
        SANITY_CHECK_MATCHES(matches0);
        SANITY_CHECK_MATCHES(matches1);
    }
    else if (RUN_OCL_IMPL)
    {
        ocl::BruteForceMatcher_OCL_base oclMatcher(ocl::BruteForceMatcher_OCL_base::L2Dist);
        ocl::oclMat oclQuery(query), oclTrain(train);

        TEST_CYCLE() oclMatcher.knnMatch(oclQuery, oclTrain, matches, 2);

        std::vector<DMatch> & matches0 = matches[0], & matches1 = matches[1];
        SANITY_CHECK_MATCHES(matches0);
        SANITY_CHECK_MATCHES(matches1);
    }
    else
        OCL_PERF_ELSE
}
开发者ID:Codersheng,项目名称:opencv,代码行数:37,代码来源:perf_brute_force_matcher.cpp

示例4: detect_table

static Mat detect_table(Mat &frame, table_detection_params_t& params, control_panel_t& panel, const SubottoReference& reference, const SubottoMetrics &metrics, FrameAnalysis &frame_analysis) {

  dump_time(panel, "cycle", "detect table start");

	const Mat& reference_image = reference.image;
	const Mat& reference_mask = reference.mask;
	auto& reference_metrics = reference.metrics;

  vector< KeyPoint > frame_features, reference_features;
	Mat frame_features_descriptions, reference_features_descriptions;
	tie(frame_features, frame_features_descriptions) = get_features(frame, Mat(), params.frame_features_per_level, params.frame_features_levels);
  tie(reference_features, reference_features_descriptions) = get_features(reference_image, reference_mask, params.reference_features_per_level, params.reference_features_levels);

	vector<vector<DMatch>> matches_groups;

	BFMatcher dm;
	dm.knnMatch(reference_features_descriptions, frame_features_descriptions, matches_groups, params.features_knn, Mat());

	//if(will_show(panel, "table detect", "matches")) {
  if (true) {
    Mat &matches = frame_analysis.detect_table_matches;
    drawMatches(reference_image, reference_features, frame, frame_features, matches_groups, matches);
  }

	vector<Point2f> coarse_from, coarse_to;

	for (auto matches : matches_groups) {
		for (DMatch match : matches) {
			auto f = reference_features[match.queryIdx].pt;
			auto t = frame_features[match.trainIdx].pt;

			coarse_from.push_back(f);
			coarse_to.push_back(t);
		}
	}

	logger(panel, "table detect", INFO) <<
			"reference features: " << reference_features.size() <<
			" frame features: " << frame_features.size() <<
			" matches: " << coarse_from.size() << endl;

	Mat coarse_transform;
	if(coarse_from.size() < 6) {
		coarse_transform = Mat::eye(3, 3, CV_32F);
		logger(panel, "table detect", WARNING) << "phase 1 motion estimation - not enough features!" << endl;
	} else {
		RansacParams ransac_params(6, params.coarse_ransac_threshold, params.coarse_ransac_outliers_ratio, 0.99f);
		float rmse;
		int ninliers;
		coarse_transform = estimateGlobalMotionRansac(coarse_from, coarse_to, MM_SIMILARITY, ransac_params, &rmse, &ninliers);

		logger(panel, "table detect", INFO) <<
				"phase 1 motion estimation - rmse: " << rmse <<
				" inliers: " << ninliers << "/" << coarse_from.size() << endl;

	}

  dump_time(panel, "cycle", "detect table phase 1 finished");

	Mat &warped = frame_analysis.detect_table_after_matching;
	warpPerspective(frame, warped, coarse_transform, reference_image.size(), WARP_INVERSE_MAP | INTER_LINEAR);

	vector<KeyPoint> optical_flow_features;

  // As above
	//PyramidAdaptedFeatureDetector optical_flow_fd(new GoodFeaturesToTrackDetector(params.optical_flow_features_per_level), params.optical_flow_features_levels);
  auto optical_flow_fd = GFTTDetector::create(params.optical_flow_features_per_level);

	optical_flow_fd->detect(reference_image, optical_flow_features);

	vector<Point2f> optical_flow_from, optical_flow_to;
	vector<uchar> status;

	for(KeyPoint kp : optical_flow_features) {
		optical_flow_from.push_back(kp.pt);
	}

	vector<Point2f> good_optical_flow_from, good_optical_flow_to;

	if (!optical_flow_features.empty()) {
    calcOpticalFlowPyrLK(reference_image, warped, optical_flow_from, optical_flow_to, status, noArray());

		for (int i = 0; i < optical_flow_from.size(); i++) {
			if (!status[i]) {
				continue;
			}

			good_optical_flow_from.push_back(optical_flow_from[i]);
			good_optical_flow_to.push_back(optical_flow_to[i]);
		}

		logger(panel, "table detect", INFO) <<
				"detection optical flow features: " << good_optical_flow_from.size() << "/" << optical_flow_from.size() << endl;
	} else {
		logger(panel, "table detect", WARNING) << "detection optical flow - no features!" << endl;
	}

	Mat flow_correction;
	if (good_optical_flow_from.size() < 6) {
		flow_correction = Mat::eye(3, 3, CV_32F);
//.........这里部分代码省略.........
开发者ID:subotto,项目名称:subtracker,代码行数:101,代码来源:subotto_tracking.cpp

示例5: computePoseDifference

void computePoseDifference(Mat img1, Mat img2, CommandArgs args, Mat k, Mat& dist_coefficients, double& worldScale, Mat& R, Mat& t, Mat& img_matches)
{
   cout << "%===============================================%" << endl;

   Mat camera_matrix = k.clone();
   if (args.resize_factor > 1) 
   {
      resize(img1, img1, Size(img1.cols / args.resize_factor, 
               img1.rows / args.resize_factor)); // make smaller for performance and displayablity
      resize(img2, img2, Size(img2.cols / args.resize_factor,
               img2.rows / args.resize_factor));
      // scale matrix down according to changed resolution
      camera_matrix = camera_matrix / args.resize_factor;
      camera_matrix.at<double>(2,2) = 1;
   }

   Mat K1, K2;
   K1 = K2 = camera_matrix;
   if (img1.rows > img1.cols) // it is assumed the camera has been calibrated in landscape mode, so undistortion must also be performed in landscape orientation, or the camera matrix must be modified (fx,fy and cx,cy need to be exchanged)
   {
      swap(K1.at<double>(0,0), K1.at<double>(1,1));
      swap(K1.at<double>(0,2), K1.at<double>(1,2));
   }
   if (img2.rows > img2.cols)
   {
      swap(K2.at<double>(0,0), K2.at<double>(1,1));
      swap(K2.at<double>(0,2), K2.at<double>(1,2));
   }

   // Feature detection + extraction
   vector<KeyPoint> KeyPoints_1, KeyPoints_2;
   Mat descriptors_1, descriptors_2;

   Ptr<Feature2D> feat_detector;
   if (args.detector == DETECTOR_KAZE) 
   {
      feat_detector = AKAZE::create(args.detector_data.upright ? AKAZE::DESCRIPTOR_MLDB_UPRIGHT : AKAZE::DESCRIPTOR_MLDB, 
            args.detector_data.descriptor_size,
            args.detector_data.descriptor_channels,
            args.detector_data.threshold,
            args.detector_data.nOctaves,
            args.detector_data.nOctaveLayersAkaze);

   } else if (args.detector == DETECTOR_SURF)
   {
      feat_detector = xfeatures2d::SURF::create(args.detector_data.minHessian, 
            args.detector_data.nOctaves, args.detector_data.nOctaveLayersAkaze, args.detector_data.extended, args.detector_data.upright);
   } else if (args.detector == DETECTOR_SIFT)
   {
      feat_detector = xfeatures2d::SIFT::create(args.detector_data.nFeatures, 
            args.detector_data.nOctaveLayersSift, args.detector_data.contrastThreshold, args.detector_data.sigma);
   }

   feat_detector->detectAndCompute(img1, noArray(), KeyPoints_1, descriptors_1);
   feat_detector->detectAndCompute(img2, noArray(), KeyPoints_2, descriptors_2);

   cout << "Number of feature points (img1, img2): " << "(" << KeyPoints_1.size() << ", " << KeyPoints_2.size() << ")" << endl;

   // Find correspondences
   BFMatcher matcher;
   vector<DMatch> matches;
   if (args.use_ratio_test) 
   {
      if (args.detector == DETECTOR_KAZE) 
         matcher = BFMatcher(NORM_HAMMING, false);
      else matcher = BFMatcher(NORM_L2, false);

      vector<vector<DMatch>> match_candidates;
      const float ratio = args.ratio;
      matcher.knnMatch(descriptors_1, descriptors_2, match_candidates, 2);
      for (int i = 0; i < match_candidates.size(); i++)
         if (match_candidates[i][0].distance < ratio * match_candidates[i][1].distance)
            matches.push_back(match_candidates[i][0]);

      cout << "Number of matches passing ratio test: " << matches.size() << endl;

   } else
   {
      if (args.detector == DETECTOR_KAZE) 
         matcher = BFMatcher(NORM_HAMMING, true);
      else matcher = BFMatcher(NORM_L2, true);
      matcher.match(descriptors_1, descriptors_2, matches);
      cout << "Number of matching feature points: " << matches.size() << endl;
   }


   // Convert correspondences to vectors
   vector<Point2f>imgpts1,imgpts2;

   for(unsigned int i = 0; i < matches.size(); i++) 
   {
      imgpts1.push_back(KeyPoints_1[matches[i].queryIdx].pt); 
      imgpts2.push_back(KeyPoints_2[matches[i].trainIdx].pt); 
   }

   Mat mask; // inlier mask
   if (args.undistort) 
   {
      undistortPoints(imgpts1, imgpts1, K1, dist_coefficients, noArray(), K1);
      undistortPoints(imgpts2, imgpts2, K2, dist_coefficients, noArray(), K2);
//.........这里部分代码省略.........
开发者ID:AnnKatrinBecker,项目名称:OpenCV-test-crap,代码行数:101,代码来源:stereo_v3.cpp

示例6: main

//--------------------------------------【main( )函数】-----------------------------------------
//          描述:控制台应用程序的入口函数,我们的程序从这里开始执行
//-----------------------------------------------------------------------------------------------
int main()
{
	//【0】改变console字体颜色
	system("color 5F"); 

	ShowHelpText();

	//【1】载入图像、显示并转化为灰度图
	Mat trainImage = imread("1.jpg"), trainImage_gray;
	imshow("原始图",trainImage);
	cvtColor(trainImage, trainImage_gray, CV_BGR2GRAY);

	//【2】检测SIFT关键点、提取训练图像描述符
	vector<KeyPoint> train_keyPoint;
	Mat trainDescription;
	SiftFeatureDetector featureDetector;
	featureDetector.detect(trainImage_gray, train_keyPoint);
	SiftDescriptorExtractor featureExtractor;
	featureExtractor.compute(trainImage_gray, train_keyPoint, trainDescription);

	// 【3】进行基于描述符的暴力匹配
	BFMatcher matcher;
	vector<Mat> train_desc_collection(1, trainDescription);
	matcher.add(train_desc_collection);
	matcher.train();

	//【4】创建视频对象、定义帧率
	VideoCapture cap(0);
	unsigned int frameCount = 0;//帧数

	//【5】不断循环,直到q键被按下
	while(char(waitKey(1)) != 'q')
	{
		//<1>参数设置
		double time0 = static_cast<double>(getTickCount( ));//记录起始时间
		Mat captureImage, captureImage_gray;
		cap >> captureImage;//采集视频到testImage中
		if(captureImage.empty())
			continue;

		//<2>转化图像到灰度
		cvtColor(captureImage, captureImage_gray, CV_BGR2GRAY);

		//<3>检测SURF关键点、提取测试图像描述符
		vector<KeyPoint> test_keyPoint;
		Mat testDescriptor;
		featureDetector.detect(captureImage_gray, test_keyPoint);
		featureExtractor.compute(captureImage_gray, test_keyPoint, testDescriptor);

		//<4>匹配训练和测试描述符
		vector<vector<DMatch> > matches;
		matcher.knnMatch(testDescriptor, matches, 2);

		// <5>根据劳氏算法(Lowe's algorithm),得到优秀的匹配点
		vector<DMatch> goodMatches;
		for(unsigned int i = 0; i < matches.size(); i++)
		{
			if(matches[i][0].distance < 0.6 * matches[i][1].distance)
				goodMatches.push_back(matches[i][0]);
		}

		//<6>绘制匹配点并显示窗口
		Mat dstImage;
		drawMatches(captureImage, test_keyPoint, trainImage, train_keyPoint, goodMatches, dstImage);
		imshow("匹配窗口", dstImage);

		//<7>输出帧率信息
		cout << "\t>当前帧率为:" << getTickFrequency() / (getTickCount() - time0) << endl;
	}

	return 0;
}
开发者ID:BigCreatation,项目名称:OpenCV3-Intro-Book-Src,代码行数:75,代码来源:93_SiftAndBFMatcher.cpp

示例7: main

int main(int argc, char* argv[])
{
	//video input
	string videoName("A_kind_of_a_Show.avi");
	VideoCapture capture(videoName);
	if (!capture.isOpened())
	{
		cout << "!capture.isOpened()";
		return -1;
	}

	//path list
	vector<vector<Point2f>> pathList;
	vector<int> kpIdx2pathListIdx;
	
	vector<KeyPoint> kpTrackedPrev;
	Mat desTrackedPrev;
	vector<KeyPoint> kpEdgePrev;
	Mat desEdgePrev;

	//firstFrame init
	Mat firstFrame;
	Mat frame, framePrev;
	capture.read(firstFrame);
	keypointDetectorAnddescriptor.detect(firstFrame, kpTrackedPrev);
	keypointDetectorAnddescriptor.compute(firstFrame, kpTrackedPrev, desTrackedPrev);
	getEdgeKeypoint(firstFrame.cols, firstFrame.rows, 0.25,
		kpTrackedPrev, desTrackedPrev,
		kpEdgePrev, desEdgePrev);
	for (int i = 0; i < kpTrackedPrev.size(); ++i)
	{
		pathList.push_back(vector<Point2f>());
		pathList[i].push_back(kpTrackedPrev[i].pt);
		kpIdx2pathListIdx.push_back(i);
	}
	firstFrame.copyTo(framePrev);

	//video writer
	VideoWriter vw("result.avi", CV_FOURCC('M', 'J', 'P', 'G'), 12, Size(firstFrame.cols, firstFrame.rows));
	if (!vw.isOpened())
		return -1;

	//frame
	vector<KeyPoint> kpCur;
	Mat desCur;
	int frameIdx = 0;

	//processing
	while (capture.read(frame))
	{
		++frameIdx;

		keypointDetectorAnddescriptor.detect(frame, kpCur);
		keypointDetectorAnddescriptor.compute(frame, kpCur, desCur);

		//edge keypoint matching for homography
		vector<Point2f> ptEdgeCurMatched;
		vector<Point2f> ptEdgePrevMatched;
		vector<vector<DMatch>> vvmatchs;
		matcher.knnMatch(desEdgePrev, desCur, vvmatchs, 2);
		for (int i = 0; i < vvmatchs.size(); ++i)
		{
			if (vvmatchs[i][0].distance < vvmatchs[i][1].distance * 0.8)
			{
				ptEdgeCurMatched.push_back(kpCur[vvmatchs[i][0].trainIdx].pt);
				ptEdgePrevMatched.push_back(kpEdgePrev[vvmatchs[i][0].queryIdx].pt);
			}
		}

		//findHomography
		Mat h = findHomography(ptEdgePrevMatched,ptEdgeCurMatched, RANSAC);
		cout << h << endl;
		
		// camera movement compensation
		for (vector<Point2f>& path : pathList){
			perspectiveTransform(path, path, h);
		}

		Mat warpedframe;
		warpPerspective(framePrev, warpedframe, h, frame.size());
		imshow("frame", frame);
		imshow("prev", framePrev);
		imshow("warpedframe", warpedframe);

		getEdgeKeypoint(frame.cols, frame.rows, 0.25,
			kpCur, desCur,
			kpEdgePrev, desEdgePrev);
		frame.copyTo(framePrev);

		//keypoint tracking for pathlist
		vector<int> kpIdx2pathListIdxTemp;
		vector<KeyPoint> kpTrackedCur;
		Mat desTrackedCur;
		set<int> curMatchedKpIdxSet;
		matcher.knnMatch(desTrackedPrev, desCur, vvmatchs, 2);
		for (int i = 0; i < vvmatchs.size(); ++i)
		{
			if (vvmatchs[i][0].distance < vvmatchs[i][1].distance * 0.6)
			{
				pathList[kpIdx2pathListIdx[i]].push_back(kpCur[vvmatchs[i][0].trainIdx].pt);
//.........这里部分代码省略.........
开发者ID:zydu51,项目名称:vitrivr_motion_testcode,代码行数:101,代码来源:camera_movement_compensation_test.cpp

示例8: main

int main( int argc, char** argv )
{
    if( argc != 4 )
    { readme(); return -1; }
    namespace io = boost::iostreams;
    if(strncmp(argv[1],"detect",6)==0)
    {
        const char* fname_pic = argv[2];
        const char* fname_kps = argv[3];
        Mat img = imread(fname_pic,IMREAD_GRAYSCALE);
        if(!img.data)
        {
            cout<< "Error reading images!" << std::endl;
            return -1;
        }
        Ptr<SIFT> sift_detector = SIFT::create(siftPoints);
        Ptr<SURF> surf_detector = SURF::create(minHessian);
        vector<KeyPoint> surf_keypoints,sift_keypoints;
        Mat sift_descriptors,surf_descriptors;
        sift_detector->detectAndCompute(img, Mat(),sift_keypoints, sift_descriptors);
        surf_detector->detectAndCompute(img, Mat(),surf_keypoints, surf_descriptors);

        ofstream ofs(fname_kps,ios_base::binary);
        {
            io::filtering_streambuf<io::output> out;
            out.push(io::zlib_compressor(io::zlib::best_compression));
            out.push(ofs);
            binary_oarchive oa(out);
            ArchiveHelper<vector<KeyPoint> > sift_archiver(sift_keypoints);
            ArchiveHelper<vector<KeyPoint> > surf_archiver(surf_keypoints);
            ArchiveHelper<Mat> ar1(sift_descriptors);
            ArchiveHelper<Mat> ar2(surf_descriptors);
            oa << sift_archiver;
            oa << surf_archiver;
            oa << ar1;
            oa<<ar2;
        }
        ofs.close();
    }
    else if(strncmp(argv[1],"match",5)==0)
    {
        const char* fname_pic = argv[2];
        const char* fname_kps = argv[3];
        vector<KeyPoint> isift_keypoints,isurf_keypoints,psift_keypoints,psurf_keypoints;
        Mat isift_descriptors,isurf_descriptors;

        ifstream ifs(fname_kps,ios_base::binary);
        {
            io::filtering_streambuf<io::input> in;
            in.push(iostreams::zlib_decompressor());
            in.push(ifs);
            binary_iarchive ia(in);
            ArchiveHelper<vector<KeyPoint> > sift_archiver(isift_keypoints),surf_archiver(isurf_keypoints);
            ArchiveHelper<Mat> ar1(isift_descriptors),ar2(isurf_descriptors);

            ia>>sift_archiver;
            ia>>surf_archiver;
            ia>>ar1;
            ia>>ar2;
        }
        ifs.close();
        Mat img = imread(fname_pic,IMREAD_GRAYSCALE);
        Ptr<SIFT> sift_detector = SIFT::create(siftPoints);
        Ptr<SURF> surf_detector = SURF::create(minHessian);
        Mat psift_descriptors, psurf_descriptors;

        sift_detector->detectAndCompute(img, Mat(),psift_keypoints, psift_descriptors);
        surf_detector->detectAndCompute(img, Mat(),psurf_keypoints, psurf_descriptors);

        BFMatcher matcher;
        vector< DMatch > sift_matches,surf_matches;
        vector<vector<DMatch> > sift_knnMatches,surf_knnMatches;
        matcher.knnMatch(psift_descriptors,isift_descriptors,sift_knnMatches,2);
        matcher.knnMatch(psurf_descriptors,isurf_descriptors,surf_knnMatches,2);

        for( size_t i = 0; i < sift_knnMatches.size(); i++ )
        {
            const DMatch& bestMatch = sift_knnMatches[i][0];
            const DMatch& betterMatch1 = sift_knnMatches[i][1];
            float  distanceRatio = bestMatch.distance / betterMatch1.distance;
            if(distanceRatio<0.61)
            {
                sift_matches.push_back(bestMatch);
            }
        }
        for( size_t i = 0; i < surf_knnMatches.size(); i++ )
        {
            const DMatch& bestMatch = surf_knnMatches[i][0];
            const DMatch& betterMatch1 = surf_knnMatches[i][1];
            float  distanceRatio = bestMatch.distance/betterMatch1.distance;
            if(distanceRatio<0.65)
            {
                surf_matches.push_back(bestMatch);
            }
        }
        printf("-- SIFT KNN Matching rate:%f\n",sift_matches.size()/(0.0+psift_keypoints.size()));
        printf("-- SURF KNN Matching rate:%f\n\n",surf_matches.size()/(0.0+psurf_keypoints.size()));
        //-- Quick calculation of max and min distances between keypoints
        double mx_sift_dist = 0; double mn_sift_dist = 999;
        double mx_surf_dist = 0; double mn_surf_dist = 999;
//.........这里部分代码省略.........
开发者ID:Chinalover,项目名称:hctf2015-all-problems,代码行数:101,代码来源:picmatch.cpp


注:本文中的BFMatcher::knnMatch方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。