当前位置: 首页>>代码示例>>C++>>正文


C++ SurfFeatureDetector类代码示例

本文整理汇总了C++中SurfFeatureDetector的典型用法代码示例。如果您正苦于以下问题:C++ SurfFeatureDetector类的具体用法?C++ SurfFeatureDetector怎么用?C++ SurfFeatureDetector使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了SurfFeatureDetector类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: prev_

bool TrackerForProject::init( const cv::Mat& frame, const cv::Rect& initial_position )
{
    position_ = initial_position;
    cv::cvtColor(frame, prevFrame_, CV_BGR2GRAY);

	Mat prev_(prevFrame_(position_));

	SurfFeatureDetector detector;
	detector.detect(prev_, keypoints1);

    return true;
}
开发者ID:grishin-sergei,项目名称:face-tracking,代码行数:12,代码来源:ForProject.cpp

示例2: findMatch

bool findMatch(CvPoint &offset, FlannBasedMatcher matcher, SurfFeatureDetector detector, SurfDescriptorExtractor extractor, Mat des_object[])
{
	bool noMatch = true;
	Mat des_image, img_matches;
	vector<KeyPoint> kp_image;
	vector<vector<DMatch > > matches;
	vector<DMatch > good_matches;
	int iter = 0;
	Mat image = imread("/home/pi/opencv/photo.jpg" , CV_LOAD_IMAGE_GRAYSCALE );
	detector.detect( image, kp_image );
	extractor.compute( image, kp_image, des_image );
	while ( noMatch )
	{
		//printf("before kp and des detection 2\n");
	    	
		
		matcher.knnMatch(des_object[iter], des_image, matches, 2);
		for(int i = 0; i < min(des_image.rows-1,(int) matches.size()); i++) //THIS LOOP IS SENSITIVE TO SEGFAULTS
		{
		    if((matches[i][0].distance < 0.6*(matches[i][1].distance)) && ((int) matches[i].size()<=2 && (int) matches[i].size()>0))
		    {
			good_matches.push_back(matches[i][0]);
		    }
		}
		
		//printf("Number of matches: %d\n", good_matches.size());
		if (good_matches.size() >= 10)
		{
			CvPoint center = cvPoint(0,0);
			for ( int z = 0 ; z < good_matches.size() ; z++ )
			{
				int index = good_matches.at(z).trainIdx;
				center.x += kp_image.at(index).pt.x;
				center.y += kp_image.at(index).pt.y;
			}
			center.x = center.x/good_matches.size();
			center.y = center.y/good_matches.size();
			int radius = 5;
			circle( image, center, radius, {0,0,255}, 3, 8, 0 );
			namedWindow("test");
			imshow("test", image);
			imwrite("centerPoint.jpg", image);
			waitKey(5000);
			int offsetX = center.x - image.cols/2;
			int offsetY = center.y - image.rows/2;
			offset = cvPoint(offsetX, offsetY);			
			noMatch = false;
		}
		//printf("draw good matches\n");
		//Show detected matches
		if ( iter++ == 3 || !noMatch )
			break;
		
		good_matches.clear();
	}
	return noMatch;
}
开发者ID:Nissav,项目名称:MASTIFF,代码行数:57,代码来源:main2.cpp

示例3: testCalonderClassifier

/*
 * Test Calonder classifier to match keypoints on given image:
 *      classifierFilename - name of file from which classifier will be read,
 *      imgFilename - test image filename.
 *
 * To calculate keypoint descriptors you may use RTreeClassifier class (as to train),
 * but it is convenient to use CalonderDescriptorExtractor class which is wrapper of
 * RTreeClassifier.
 */
static void testCalonderClassifier( const string& classifierFilename, const string& imgFilename )
{
    Mat img1 = imread( imgFilename, IMREAD_GRAYSCALE ), img2, H12;
    if( img1.empty() )
    {
        cout << "Test image can not be read." << endl;
        exit(-1);
    }
    warpPerspectiveRand( img1, img2, H12, theRNG() );

    // Exstract keypoints from test images
    SurfFeatureDetector detector;
    vector<KeyPoint> keypoints1; detector.detect( img1, keypoints1 );
    vector<KeyPoint> keypoints2; detector.detect( img2, keypoints2 );

    // Compute descriptors
    CalonderDescriptorExtractor<float> de( classifierFilename );
    Mat descriptors1;  de.compute( img1, keypoints1, descriptors1 );
    Mat descriptors2;  de.compute( img2, keypoints2, descriptors2 );

    // Match descriptors
    BFMatcher matcher(NORM_L1);
    vector<DMatch> matches;
    matcher.match( descriptors1, descriptors2, matches );

    // Prepare inlier mask
    vector<char> matchesMask( matches.size(), 0 );
    vector<Point2f> points1; KeyPoint::convert( keypoints1, points1 );
    vector<Point2f> points2; KeyPoint::convert( keypoints2, points2 );
    Mat points1t; perspectiveTransform(Mat(points1), points1t, H12);
    for( size_t mi = 0; mi < matches.size(); mi++ )
    {
        if( norm(points2[matches[mi].trainIdx] - points1t.at<Point2f>((int)mi,0)) < 4 ) // inlier
            matchesMask[mi] = 1;
    }

    // Draw
    Mat drawImg;
    drawMatches( img1, keypoints1, img2, keypoints2, matches, drawImg, CV_RGB(0, 255, 0), CV_RGB(0, 0, 255), matchesMask );
    string winName = "Matches";
    namedWindow( winName, WINDOW_AUTOSIZE );
    imshow( winName, drawImg );
    waitKey();
}
开发者ID:406089450,项目名称:opencv,代码行数:53,代码来源:find_obj_calonder.cpp

示例4: trainCalonderClassifier

/*
 * Trains Calonder classifier and writes trained classifier in file:
 *      imgFilename - name of .txt file which contains list of full filenames of train images,
 *      classifierFilename - name of binary file in which classifier will be written.
 *
 * To train Calonder classifier RTreeClassifier class need to be used.
 */
static void trainCalonderClassifier( const string& classifierFilename, const string& imgFilename )
{
    // Reads train images
    ifstream is( imgFilename.c_str(), ifstream::in );
    vector<Mat> trainImgs;
    while( !is.eof() )
    {
        string str;
        getline( is, str );
        if (str.empty()) break;
        Mat img = imread( str, IMREAD_GRAYSCALE );
        if( !img.empty() )
            trainImgs.push_back( img );
    }
    if( trainImgs.empty() )
    {
        cout << "All train images can not be read." << endl;
        exit(-1);
    }
    cout << trainImgs.size() << " train images were read." << endl;

    // Extracts keypoints from train images
    SurfFeatureDetector detector;
    vector<BaseKeypoint> trainPoints;
    vector<IplImage> iplTrainImgs(trainImgs.size());
    for( size_t imgIdx = 0; imgIdx < trainImgs.size(); imgIdx++ )
    {
        iplTrainImgs[imgIdx] = trainImgs[imgIdx];
        vector<KeyPoint> kps; detector.detect( trainImgs[imgIdx], kps );

        for( size_t pointIdx = 0; pointIdx < kps.size(); pointIdx++ )
        {
            Point2f p = kps[pointIdx].pt;
            trainPoints.push_back( BaseKeypoint(cvRound(p.x), cvRound(p.y), &iplTrainImgs[imgIdx]) );
        }
    }

    // Trains Calonder classifier on extracted points
    RTreeClassifier classifier;
    classifier.train( trainPoints, theRNG(), 48, 9, 100 );
    // Writes classifier
    classifier.write( classifierFilename.c_str() );
}
开发者ID:406089450,项目名称:opencv,代码行数:50,代码来源:find_obj_calonder.cpp

示例5: SURF_Descriptor

KDvoid SURF_Descriptor ( KDint nIdx )
{
	Mat		tDst;
	Mat		tImg1;
	Mat		tImg2;

	tImg1 = imread ( "/res/image/box.png", CV_LOAD_IMAGE_GRAYSCALE );
	tImg2 = imread ( "/res/image/box_in_scene.png", CV_LOAD_IMAGE_GRAYSCALE );

	// -- Step 1: Detect the keypoints using SURF Detector
	KDint  nMinHessian = 400;

	SurfFeatureDetector   tDetector ( nMinHessian );
	std::vector<KeyPoint> aKeypoints1, aKeypoints2;

	tDetector.detect ( tImg1, aKeypoints1 );
	tDetector.detect ( tImg2, aKeypoints2 );

	// -- Step 2: Calculate descriptors (feature vectors)
	SurfDescriptorExtractor  tExtractor;
	Mat  tDescriptors1, tDescriptors2;

	tExtractor.compute ( tImg1, aKeypoints1, tDescriptors1 );
	tExtractor.compute ( tImg2, aKeypoints2, tDescriptors2 );
/*
	// -- Step 3: Matching descriptor vectors with a brute force matcher
	BruteForceMatcher< L2<KDfloat> >  tMatcher;
	std::vector< DMatch >             aMatches;

	tMatcher.match ( tDescriptors1, tDescriptors2, aMatches );

	// -- Draw matches
	drawMatches ( tImg1, aKeypoints1, tImg2, aKeypoints2, aMatches, tDst ); 

	g_pController->setFrame ( 0, tDst );
*/
}
开发者ID:mcodegeeks,项目名称:OpenKODE-Framework,代码行数:37,代码来源:SURF_Descriptor.cpp

示例6: find_next_homography

Mat find_next_homography(Mat image, Mat image_next, vector<KeyPoint> keypoints_0, Mat descriptors_0,
						 SurfFeatureDetector detector, SurfDescriptorExtractor extractor, 
						 BFMatcher matcher, vector<KeyPoint>& keypoints_next, Mat& descriptors_next)
{

	//step 1 detect feature points in next image
	vector<KeyPoint> keypoints_1;
	detector.detect(image_next, keypoints_1);

	Mat img_keypoints_surf0, img_keypoints_surf1;
	drawKeypoints(image, keypoints_0, img_keypoints_surf0);
	drawKeypoints(image_next, keypoints_1, img_keypoints_surf1);
	//cout << "# im0 keypoints" << keypoints_0.size() << endl;
    //cout << "# im1 keypoints" << keypoints_1.size() << endl;
	imshow("surf 0", img_keypoints_surf0);
	imshow("surf 1", img_keypoints_surf1);

    //step 2: extract feature descriptors from feature points
	Mat descriptors_1;
	extractor.compute(image_next, keypoints_1, descriptors_1);

	//step 3: feature matching
	//cout << "fd matching" << endl;
	vector<DMatch> matches;
	vector<Point2f> matched_0;
	vector<Point2f> matched_1;

	matcher.match(descriptors_0, descriptors_1, matches);
	Mat img_feature_matches;
	drawMatches(image, keypoints_0, image_next, keypoints_1, matches, img_feature_matches );
	imshow("Matches", img_feature_matches);

	for (int i = 0; i < matches.size(); i++ )
	{
		matched_0.push_back(keypoints_0[matches[i].queryIdx].pt);	
		matched_1.push_back(keypoints_1[matches[i].trainIdx].pt);	
	}
	keypoints_next = keypoints_1;
	descriptors_next = descriptors_1;
	return findHomography(matched_0, matched_1, RANSAC);

}
开发者ID:jaisrael,项目名称:AR-Tower-Defense,代码行数:42,代码来源:chessboard.cpp

示例7: getHistAndLabels

vector<Mat> getHistAndLabels(SurfFeatureDetector &detector, BOWImgDescriptorExtractor &bowDE, int dictionarySize) {

    // setup variable and object I need
    IplImage *img2;
    Mat labels(0, 1, CV_32FC1);
    Mat trainingData(0, dictionarySize, CV_32FC1);
    vector<KeyPoint> keypoint1;
    Mat bowDescriptor1;
    Helper helper;
    vector<string> files = vector<string>();

    helper.GetFileList(EVAL_DIR, files);

    float labelVal;

    for (unsigned int iz = 0; iz < files.size(); iz++) {
        int isImage = helper.instr(files[iz], "jpg", 0, true);
        if (isImage > 0) {
            string sFileName = TRAINING_DIR;
            sFileName.append(files[iz]);
            const char * imageName = sFileName.c_str ();

            img2 = cvLoadImage(imageName,0);
            if (img2) {
                detector.detect(img2, keypoint1);
                bowDE.compute(img2, keypoint1, bowDescriptor1);
                trainingData.push_back(bowDescriptor1);
                labelVal = iz+1;
                labels.push_back(labelVal);
            }


        }
    }

    vector<Mat> retVec;
    retVec.push_back(trainingData);
    retVec.push_back(labels);
    return retVec;

}
开发者ID:cstahmer,项目名称:archive-vision,代码行数:41,代码来源:imgextract.cpp

示例8: getSingleImageHistogram

Mat getSingleImageHistogram(SurfFeatureDetector &detector, BOWImgDescriptorExtractor &bowDE, string evalFile) {

    // setup variable and object I need
    IplImage *img2;
    vector<KeyPoint> keypoint1;
    Mat bowDescriptor1;
    Helper helper;


    int isImage = helper.instr(evalFile, "jpg", 0, true);
    if (isImage > 0) {

        const char * imageName = evalFile.c_str ();
        img2 = cvLoadImage(imageName,0);
        if (img2) {
            detector.detect(img2, keypoint1);
            bowDE.compute(img2, keypoint1, bowDescriptor1);
        }
    }

    return bowDescriptor1;
}
开发者ID:cstahmer,项目名称:archive-vision,代码行数:22,代码来源:imgextract.cpp

示例9: getClassMatch

float getClassMatch(SurfFeatureDetector &detector, BOWImgDescriptorExtractor &bowDE, IplImage* &img2, int dictionarySize, string sFileName, CvSVM &svm) {
    float response;

    vector<KeyPoint> keypoint2;
    Mat bowDescriptor2;
    Mat evalData(0, dictionarySize, CV_32FC1);
    Mat groundTruth(0, 1, CV_32FC1);
    Mat results(0, 1, CV_32FC1);


    detector.detect(img2, keypoint2);
    bowDE.compute(img2, keypoint2, bowDescriptor2);


    //evalData.push_back(bowDescriptor2);
    //groundTruth.push_back((float) classID);
    response = svm.predict(bowDescriptor2);
    //results.push_back(response);


    return response;
}
开发者ID:cstahmer,项目名称:archive-vision,代码行数:22,代码来源:imgextract.cpp

示例10: getHistograms

Mat getHistograms(SurfFeatureDetector &detector, BOWImgDescriptorExtractor &bowDE, int dictionarySize, vector<string> &collectionFilenames, string evalDir) {

    // setup variable and object I need
    IplImage *img2;
    Mat trainingData(0, dictionarySize, CV_32FC1);
    vector<KeyPoint> keypoint1;
    Mat bowDescriptor1;
    Helper helper;
    vector<string> files = vector<string>();

    helper.GetFileList(evalDir, files);

    cout << "Number of Collection Files to Process: " << files.size()-2 << endl;

    for (unsigned int iz = 0; iz < files.size(); iz++) {
        int isImage = helper.instr(files[iz], "jpg", 0, true);
        if (isImage > 0) {
            cout << "     Processing " << files[iz] << endl;

            collectionFilenames.push_back(files[iz]);
            string sFileName = EVAL_DIR;
            sFileName.append(files[iz]);
            const char * imageName = sFileName.c_str ();

            img2 = cvLoadImage(imageName,0);
            if (img2) {
                detector.detect(img2, keypoint1);
                bowDE.compute(img2, keypoint1, bowDescriptor1);
                trainingData.push_back(bowDescriptor1);
            }


        }
    }

    return trainingData;
}
开发者ID:cstahmer,项目名称:archive-vision,代码行数:37,代码来源:imgextract.cpp

示例11: main

int main(int argc, char** argv)
{
    if( argc < 2 )
    {
        printPrompt( argv[0] );
        return -1;
    }

    initModule_nonfree();

    // Get Input Data
    ifstream file(argv[1]);
    if ( !file.is_open() )
        return false;
    
    string str;
    
        // Image Name
    getline( file, str ); getline( file, str );
    string image_name = str;
        // Cloud Name
    getline( file, str ); getline( file, str );
    string cloud_name = str;
        // width of images to be created.
    getline( file, str ); getline( file, str );
    int w = atoi(str.c_str());
        // height of images to be created
    getline( file, str ); getline( file, str );
    int h = atoi(str.c_str());
        // resolution of voxel grids
    getline( file, str ); getline( file, str );
    float r = atof(str.c_str());
        // f (distance from pinhole)
    getline( file, str ); getline( file, str );
    float f = atof(str.c_str());
        // thetax (initial rotation about X Axis of map)
    getline( file, str ); getline( file, str );
    float thetaX = atof(str.c_str());
        // thetay (initial rotation about Y Axis of map)
    getline( file, str ); getline( file, str );
    float thetaY = atof(str.c_str());
        // number of points to go to
    getline( file, str ); getline( file, str );
    float nop = atoi(str.c_str());
        // Number of divisions
    getline( file, str ); getline( file, str );
    float divs = atoi(str.c_str());
        // Number of images to return
    getline( file, str ); getline( file, str );
    int numtoreturn = atoi(str.c_str());    
        // Should we load or create photos?
    getline( file, str ); getline( file, str );
    string lorc =str.c_str();
        // Directory to look for photos
    getline( file, str ); getline( file, str );
    string dir =str.c_str();
        // Directory to look for kp and descriptors
    getline( file, str ); getline( file, str );
    string kdir =str.c_str();
        // save photos?
    getline( file, str ); getline( file, str );
    string savePhotos =str.c_str();
    
    file.close();
    // Done Getting Input Data

    map<vector<float>, Mat> imagemap;
    map<vector<float>, Mat> surfmap;
    map<vector<float>, Mat> siftmap;
    map<vector<float>, Mat> orbmap;
    map<vector<float>, Mat> fastmap;
    imagemap.clear();

    vector<KeyPoint> SurfKeypoints;
    vector<KeyPoint> SiftKeypoints;
    vector<KeyPoint> OrbKeypoints;
    vector<KeyPoint> FastKeypoints;
    Mat SurfDescriptors;
    Mat SiftDescriptors;
    Mat OrbDescriptors;
    Mat FastDescriptors;

    int minHessian = 300;

    SurfFeatureDetector SurfDetector (minHessian);
    SiftFeatureDetector SiftDetector (minHessian);
    OrbFeatureDetector OrbDetector (minHessian);
    FastFeatureDetector FastDetector (minHessian);


    SurfDescriptorExtractor SurfExtractor;
    SiftDescriptorExtractor SiftExtractor;
    OrbDescriptorExtractor OrbExtractor;

    if ( !fs::exists( dir ) || lorc == "c" )
    { // Load Point Cloud and render images
        PointCloud<PT>::Ptr cloud (new pcl::PointCloud<PT>);
        io::loadPCDFile<PT>(cloud_name, *cloud);

        Eigen::Affine3f tf = Eigen::Affine3f::Identity();
//.........这里部分代码省略.........
开发者ID:aarich,项目名称:localize-with-map,代码行数:101,代码来源:createDBWithPointCloud.cpp

示例12: collectclasscentroids

void collectclasscentroids(SurfFeatureDetector &detector, Ptr<DescriptorExtractor> &extractor, BOWKMeansTrainer &bowTrainer, string trainingDir, bool runInBackground, bool writelog) {

    IplImage *img;
    vector<string> files = vector<string>();
    Helper helper;
    string event;
    char ch[30];

    // should put error correction here to check if directory exists

    helper.GetFileList(trainingDir, files);

    for (unsigned int iz = 0; iz < files.size(); iz++) {
        int isImage = helper.instr(files[iz], "jpg", 0, true);
        if (isImage > 0) {


            string sFileName = trainingDir;
            string sFeaturesDir = "/usr/local/share/archive-vision/build/features/";
            string sOutputImageFilename = "/usr/local/share/archive-vision/build/feature_point_images/";
            sFileName.append(files[iz]);
            sOutputImageFilename.append(files[iz]);
            sFeaturesDir.append(files[iz]);
            sFeaturesDir.append(".txt");
            const char * imageName = sFileName.c_str ();

            img = cvLoadImage(imageName,0);
            if (img) {
                string workingFile = files[iz];
                vector<KeyPoint> keypoint;
                detector.detect(img, keypoint);
                if (keypoint.size()) {
                    Mat features;
                    extractor->compute(img, keypoint, features);

                    event = "Processing " + workingFile;
                    helper.logEvent(event, 2, runInBackground, writelog);


                    //try to write out an image with the features highlighted
                    // Add results to image and save.
                    //				Mat output;
                    //				drawKeypoints(img, keypoint, output, Scalar(0, 128, 0), DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
                    //				imwrite(sOutputImageFilename, output);




                    // try writing out all the feature, each to its own YML file and see what
                    // they look like
                    //				helper.WriteToFile(sFeaturesDir, features, "features");

                    bowTrainer.add(features);
                } else {
                    event = workingFile + "contains no keypoints.";
                    helper.logEvent(event, 1, runInBackground, writelog);
                }
            }


        }
    }
    return;
}
开发者ID:cstahmer,项目名称:archive-vision,代码行数:64,代码来源:imgextract.cpp

示例13: read

void read (Mat rgbimg, Mat depimg, Mat mask, PointCloud &pointcloud,
    PointCloud &keypoints, vector<KeyPoint> &key, int x1, int y1, double minHessian)
{
  vector<KeyPoint> keytemp;
  SurfFeatureDetector detector (minHessian);
  detector.detect (rgbimg, keytemp);

  // convert the feature points in the smaller mask into point cloud
  for (int k = 0; k < keytemp.size (); k++)
  {
    int i = (int) keytemp[k].pt.y;
    int j = (int) keytemp[k].pt.x;

    unsigned short depth = depimg.at<unsigned short> (i, j);

    if (mask.at<bool> (i, j) != 0 && depth != 0 && depth / MM_PER_M < DEPTH_THRESHOLD)
    {
      PointT point;
      double x = (WIDTH-(j + x1)-WIDTH/2) * depth / FOCAL / MM_PER_M;
      double y = (HEIGHT-(i + y1)-HEIGHT/2) * depth / FOCAL / MM_PER_M;
      double z = depth / MM_PER_M;

      point.x = x;
      point.y = y;
      point.z = z;

      Vec3b rgb = rgbimg.at<Vec3b> (i, j);
      point.b = (uint8_t) rgb[0];
      point.g = (uint8_t) rgb[1];
      point.r = (uint8_t) rgb[2];

      keypoints.points.push_back (point);
      //update the keypoints deleting some points outside the mask;
      key.push_back (keytemp[k]);
    }
  }

  // convert all the points in the mask into point cloud
  for (int i = 0; i < rgbimg.rows; i++)
  {
    for (int j = 0; j < rgbimg.cols; j++)
    {
      unsigned short depth = depimg.at<unsigned short> (i, j);
      if (mask.at<bool> (i, j) != 0 && depth != 0 && depth / MM_PER_M < DEPTH_THRESHOLD)
      {
        PointT point;
        double x = (WIDTH-(j + x1)-WIDTH/2) * depth / FOCAL / MM_PER_M;
        double y = (HEIGHT-(i + y1)-HEIGHT/2) * depth / FOCAL / MM_PER_M;
        double z = depth / MM_PER_M;

        point.x = x;
        point.y = y;
        point.z = z;

        Vec3b rgb = rgbimg.at<Vec3b> (i, j);
        point.b = (uint8_t) rgb[0];
        point.g = (uint8_t) rgb[1];
        point.r = (uint8_t) rgb[2];

        pointcloud.points.push_back (point);
      }
    }
  }

  pointcloud.width = (uint32_t) pointcloud.points.size ();
  pointcloud.height = 1;
  keypoints.width = (uint32_t) keypoints.points.size ();
  keypoints.height = 1;

}
开发者ID:ClaireXie,项目名称:modeling_3d,代码行数:70,代码来源:read.cpp

示例14: addTrainImage

    void addTrainImage(string name)
    {
    	/* Добавление нового эталонного изображения и вычисление его дескриптора */

    	Mat train_img = imread(_train_img_dir + "template_" + name + ".jpg");

    	if(!train_img.empty())
    	{
			resize(train_img, train_img, Size(SIGN_SIZE, SIGN_SIZE), 0, 0);
			_train_images.push_back(train_img);
			_train_sign_names.push_back(name);

			vector<KeyPoint> points;
			_detector.detect( train_img, points );
			_train_keypoints.push_back(points);

			Mat descriptors;
			_extractor.compute( train_img, points, descriptors);
			_train_descriptors.push_back(descriptors);	
		}
		else
		{
			cout << ERROR_STR << "Could not load train image " << _train_img_dir << name << ".jpg" << endl;
		}
    }
开发者ID:BorisVasilyev,项目名称:SignRecognition,代码行数:25,代码来源:video_process.cpp

示例15: init

void init(Mat img){
    Pose = Mat::eye(4, 4, CV_64F);
    PreviousImageGrayScale = img;
    PreviousFeatures.clear();
    SurfDetector.detect(img, PreviousFeatures);
    SurfDescriptor.compute(img, PreviousFeatures, PreviousFeatureDescriptors);
}
开发者ID:juangil,项目名称:visualOdometry,代码行数:7,代码来源:VisualOdometry.cpp


注:本文中的SurfFeatureDetector类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。