当前位置: 首页>>代码示例>>C++>>正文


C++ SurfFeatureDetector::detect方法代码示例

本文整理汇总了C++中SurfFeatureDetector::detect方法的典型用法代码示例。如果您正苦于以下问题:C++ SurfFeatureDetector::detect方法的具体用法?C++ SurfFeatureDetector::detect怎么用?C++ SurfFeatureDetector::detect使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在SurfFeatureDetector的用法示例。


在下文中一共展示了SurfFeatureDetector::detect方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: addTrainImage

    void addTrainImage(string name)
    {
    	/* Добавление нового эталонного изображения и вычисление его дескриптора */

    	Mat train_img = imread(_train_img_dir + "template_" + name + ".jpg");

    	if(!train_img.empty())
    	{
			resize(train_img, train_img, Size(SIGN_SIZE, SIGN_SIZE), 0, 0);
			_train_images.push_back(train_img);
			_train_sign_names.push_back(name);

			vector<KeyPoint> points;
			_detector.detect( train_img, points );
			_train_keypoints.push_back(points);

			Mat descriptors;
			_extractor.compute( train_img, points, descriptors);
			_train_descriptors.push_back(descriptors);	
		}
		else
		{
			cout << ERROR_STR << "Could not load train image " << _train_img_dir << name << ".jpg" << endl;
		}
    }
开发者ID:BorisVasilyev,项目名称:SignRecognition,代码行数:25,代码来源:video_process.cpp

示例2: init

void init(Mat img){
    Pose = Mat::eye(4, 4, CV_64F);
    PreviousImageGrayScale = img;
    PreviousFeatures.clear();
    SurfDetector.detect(img, PreviousFeatures);
    SurfDescriptor.compute(img, PreviousFeatures, PreviousFeatureDescriptors);
}
开发者ID:juangil,项目名称:visualOdometry,代码行数:7,代码来源:VisualOdometry.cpp

示例3: imageCallback

void imageCallback(const sensor_msgs::ImageConstPtr& msg)
{
    //std_msgs::String imsignal;
    //std_msgs::String comsignal;
    cv_bridge::CvImageConstPtr cv_ptr;
    //char filename[40];
    cv_ptr =  cv_bridge::toCvShare(msg, enc::BGR8);
    Mat im,biimage;
    im = cv_ptr->image;
    //cvtColor(cv_ptr->image,im,CV_BGR2GRAY);
    inRange(im,Scalar(80,0,0), Scalar(255,255,50),biimage); //(85,80,80);
   // p.Uppercolor = Scalar(125,255,255);
    //imshow( "before", im );
    //waitKey(1);
    //imshow("after",biimage);
    SimpleBlobDetector::Params params;

    params.filterByColor =true;
    params.filterByCircularity =false;
    params.filterByConvexity =false;
    params.filterByInertia = false;
    params.blobColor = 255;
    // Filter by Area.
    params.filterByArea = true;
    params.minArea = 10000;
    params.maxArea = 800000;
    // Filter by Inertia
    params.minInertiaRatio = 10;
    params.minDistBetweenBlobs = 10;
    SimpleBlobDetector detector(params);
    std::vector<KeyPoint> keypoints;
    detector.detect(biimage,keypoints);
    Mat im_with_keypoints;
    drawKeypoints( biimage, keypoints, im_with_keypoints, Scalar(0,0,255), DrawMatchesFlags::DRAW_RICH_KEYPOINTS );
    imshow("test",im_with_keypoints);
    waitKey(1);
    for
    (
    std::vector<KeyPoint>::iterator it = keypoints.begin();
    it != keypoints.end();
    ++it
    )
    {
        KeyPoint k =  *it;
        cout << k.pt << endl;
        x_bar=k.pt.x;
        y_bar=k.pt.y;
    if(x_bar>((im_with_keypoints.cols/2)-(im_with_keypoints.cols/15)) && x_bar<(im_with_keypoints.cols/2)+(im_with_keypoints.cols/15))
    {
        std_msgs::String sent;
        sent.data =  "getposition";
        com_pub.publish(sent);
        ros::Duration(0.05).sleep();
        sent.data =  "BR";
        com_pub.publish(sent);
        ros::Duration(2).sleep();
    }
}
}
开发者ID:ChunkyBART,项目名称:Mytest,代码行数:59,代码来源:barrel_detect.cpp

示例4: findMatch

bool findMatch(CvPoint &offset, FlannBasedMatcher matcher, SurfFeatureDetector detector, SurfDescriptorExtractor extractor, Mat des_object[])
{
	bool noMatch = true;
	Mat des_image, img_matches;
	vector<KeyPoint> kp_image;
	vector<vector<DMatch > > matches;
	vector<DMatch > good_matches;
	int iter = 0;
	Mat image = imread("/home/pi/opencv/photo.jpg" , CV_LOAD_IMAGE_GRAYSCALE );
	detector.detect( image, kp_image );
	extractor.compute( image, kp_image, des_image );
	while ( noMatch )
	{
		//printf("before kp and des detection 2\n");
	    	
		
		matcher.knnMatch(des_object[iter], des_image, matches, 2);
		for(int i = 0; i < min(des_image.rows-1,(int) matches.size()); i++) //THIS LOOP IS SENSITIVE TO SEGFAULTS
		{
		    if((matches[i][0].distance < 0.6*(matches[i][1].distance)) && ((int) matches[i].size()<=2 && (int) matches[i].size()>0))
		    {
			good_matches.push_back(matches[i][0]);
		    }
		}
		
		//printf("Number of matches: %d\n", good_matches.size());
		if (good_matches.size() >= 10)
		{
			CvPoint center = cvPoint(0,0);
			for ( int z = 0 ; z < good_matches.size() ; z++ )
			{
				int index = good_matches.at(z).trainIdx;
				center.x += kp_image.at(index).pt.x;
				center.y += kp_image.at(index).pt.y;
			}
			center.x = center.x/good_matches.size();
			center.y = center.y/good_matches.size();
			int radius = 5;
			circle( image, center, radius, {0,0,255}, 3, 8, 0 );
			namedWindow("test");
			imshow("test", image);
			imwrite("centerPoint.jpg", image);
			waitKey(5000);
			int offsetX = center.x - image.cols/2;
			int offsetY = center.y - image.rows/2;
			offset = cvPoint(offsetX, offsetY);			
			noMatch = false;
		}
		//printf("draw good matches\n");
		//Show detected matches
		if ( iter++ == 3 || !noMatch )
			break;
		
		good_matches.clear();
	}
	return noMatch;
}
开发者ID:Nissav,项目名称:MASTIFF,代码行数:57,代码来源:main2.cpp

示例5: process

    virtual void process() {
        std::vector<KeyPoint> ipts;
        detector.detect(inputImage,ipts);

        Mat descriptors;
        extractor.compute( inputImage, ipts, descriptors );

//        printf("num points %d\n",(int)ipts.size());
    }
开发者ID:JayHuangYC,项目名称:BoofCV,代码行数:9,代码来源:main.cpp

示例6: init

bool TrackerForProject::init( const cv::Mat& frame, const cv::Rect& initial_position )
{
    position_ = initial_position;
    cv::cvtColor(frame, prevFrame_, CV_BGR2GRAY);

	Mat prev_(prevFrame_(position_));

	SurfFeatureDetector detector;
	detector.detect(prev_, keypoints1);

    return true;
}
开发者ID:grishin-sergei,项目名称:face-tracking,代码行数:12,代码来源:ForProject.cpp

示例7: testCalonderClassifier

/*
 * Test Calonder classifier to match keypoints on given image:
 *      classifierFilename - name of file from which classifier will be read,
 *      imgFilename - test image filename.
 *
 * To calculate keypoint descriptors you may use RTreeClassifier class (as to train),
 * but it is convenient to use CalonderDescriptorExtractor class which is wrapper of
 * RTreeClassifier.
 */
static void testCalonderClassifier( const string& classifierFilename, const string& imgFilename )
{
    Mat img1 = imread( imgFilename, IMREAD_GRAYSCALE ), img2, H12;
    if( img1.empty() )
    {
        cout << "Test image can not be read." << endl;
        exit(-1);
    }
    warpPerspectiveRand( img1, img2, H12, theRNG() );

    // Exstract keypoints from test images
    SurfFeatureDetector detector;
    vector<KeyPoint> keypoints1; detector.detect( img1, keypoints1 );
    vector<KeyPoint> keypoints2; detector.detect( img2, keypoints2 );

    // Compute descriptors
    CalonderDescriptorExtractor<float> de( classifierFilename );
    Mat descriptors1;  de.compute( img1, keypoints1, descriptors1 );
    Mat descriptors2;  de.compute( img2, keypoints2, descriptors2 );

    // Match descriptors
    BFMatcher matcher(NORM_L1);
    vector<DMatch> matches;
    matcher.match( descriptors1, descriptors2, matches );

    // Prepare inlier mask
    vector<char> matchesMask( matches.size(), 0 );
    vector<Point2f> points1; KeyPoint::convert( keypoints1, points1 );
    vector<Point2f> points2; KeyPoint::convert( keypoints2, points2 );
    Mat points1t; perspectiveTransform(Mat(points1), points1t, H12);
    for( size_t mi = 0; mi < matches.size(); mi++ )
    {
        if( norm(points2[matches[mi].trainIdx] - points1t.at<Point2f>((int)mi,0)) < 4 ) // inlier
            matchesMask[mi] = 1;
    }

    // Draw
    Mat drawImg;
    drawMatches( img1, keypoints1, img2, keypoints2, matches, drawImg, CV_RGB(0, 255, 0), CV_RGB(0, 0, 255), matchesMask );
    string winName = "Matches";
    namedWindow( winName, WINDOW_AUTOSIZE );
    imshow( winName, drawImg );
    waitKey();
}
开发者ID:406089450,项目名称:opencv,代码行数:53,代码来源:find_obj_calonder.cpp

示例8: tryFindImage_features

    void tryFindImage_features(Mat input)
    {
    	/* Сравниваем входящее изрображение с набором эталонов и выбираем наиболее подходящее */

    	resize(input, input, Size(SIGN_SIZE, SIGN_SIZE), 0, 0);

    	vector<KeyPoint> keyPoints;
    	_detector.detect(input, keyPoints);

    	Mat descriptors;
    	_extractor.compute(input, keyPoints, descriptors);

    	int max_value = 0, max_position = 0; 

    	for(int i=0; i < 5; i++)
    	{
    		vector< vector<DMatch> > matches;

    		_matcher.knnMatch(descriptors, _train_descriptors[i], matches, 50);

    		int good_matches_count = 0;
		   
		    for (size_t j = 0; j < matches.size(); ++j)
		    { 
		        if (matches[j].size() < 2)
		                    continue;
		       
		        const DMatch &m1 = matches[j][0];
		        const DMatch &m2 = matches[j][1];
		            
		        if(m1.distance <= 0.7 * m2.distance)        
		            good_matches_count++;    
		    }

		    if(good_matches_count > max_value)
		    {
		    	max_value = good_matches_count;
		    	max_position = i;
		    }
    	}

    	cout << STATUS_STR << "Detected sign: " << _train_sign_names[max_position] << endl;
    }
开发者ID:BorisVasilyev,项目名称:SignRecognition,代码行数:43,代码来源:video_process.cpp

示例9: trainCalonderClassifier

/*
 * Trains Calonder classifier and writes trained classifier in file:
 *      imgFilename - name of .txt file which contains list of full filenames of train images,
 *      classifierFilename - name of binary file in which classifier will be written.
 *
 * To train Calonder classifier RTreeClassifier class need to be used.
 */
static void trainCalonderClassifier( const string& classifierFilename, const string& imgFilename )
{
    // Reads train images
    ifstream is( imgFilename.c_str(), ifstream::in );
    vector<Mat> trainImgs;
    while( !is.eof() )
    {
        string str;
        getline( is, str );
        if (str.empty()) break;
        Mat img = imread( str, IMREAD_GRAYSCALE );
        if( !img.empty() )
            trainImgs.push_back( img );
    }
    if( trainImgs.empty() )
    {
        cout << "All train images can not be read." << endl;
        exit(-1);
    }
    cout << trainImgs.size() << " train images were read." << endl;

    // Extracts keypoints from train images
    SurfFeatureDetector detector;
    vector<BaseKeypoint> trainPoints;
    vector<IplImage> iplTrainImgs(trainImgs.size());
    for( size_t imgIdx = 0; imgIdx < trainImgs.size(); imgIdx++ )
    {
        iplTrainImgs[imgIdx] = trainImgs[imgIdx];
        vector<KeyPoint> kps; detector.detect( trainImgs[imgIdx], kps );

        for( size_t pointIdx = 0; pointIdx < kps.size(); pointIdx++ )
        {
            Point2f p = kps[pointIdx].pt;
            trainPoints.push_back( BaseKeypoint(cvRound(p.x), cvRound(p.y), &iplTrainImgs[imgIdx]) );
        }
    }

    // Trains Calonder classifier on extracted points
    RTreeClassifier classifier;
    classifier.train( trainPoints, theRNG(), 48, 9, 100 );
    // Writes classifier
    classifier.write( classifierFilename.c_str() );
}
开发者ID:406089450,项目名称:opencv,代码行数:50,代码来源:find_obj_calonder.cpp

示例10: find_next_homography

Mat find_next_homography(Mat image, Mat image_next, vector<KeyPoint> keypoints_0, Mat descriptors_0,
						 SurfFeatureDetector detector, SurfDescriptorExtractor extractor, 
						 BFMatcher matcher, vector<KeyPoint>& keypoints_next, Mat& descriptors_next)
{

	//step 1 detect feature points in next image
	vector<KeyPoint> keypoints_1;
	detector.detect(image_next, keypoints_1);

	Mat img_keypoints_surf0, img_keypoints_surf1;
	drawKeypoints(image, keypoints_0, img_keypoints_surf0);
	drawKeypoints(image_next, keypoints_1, img_keypoints_surf1);
	//cout << "# im0 keypoints" << keypoints_0.size() << endl;
    //cout << "# im1 keypoints" << keypoints_1.size() << endl;
	imshow("surf 0", img_keypoints_surf0);
	imshow("surf 1", img_keypoints_surf1);

    //step 2: extract feature descriptors from feature points
	Mat descriptors_1;
	extractor.compute(image_next, keypoints_1, descriptors_1);

	//step 3: feature matching
	//cout << "fd matching" << endl;
	vector<DMatch> matches;
	vector<Point2f> matched_0;
	vector<Point2f> matched_1;

	matcher.match(descriptors_0, descriptors_1, matches);
	Mat img_feature_matches;
	drawMatches(image, keypoints_0, image_next, keypoints_1, matches, img_feature_matches );
	imshow("Matches", img_feature_matches);

	for (int i = 0; i < matches.size(); i++ )
	{
		matched_0.push_back(keypoints_0[matches[i].queryIdx].pt);	
		matched_1.push_back(keypoints_1[matches[i].trainIdx].pt);	
	}
	keypoints_next = keypoints_1;
	descriptors_next = descriptors_1;
	return findHomography(matched_0, matched_1, RANSAC);

}
开发者ID:jaisrael,项目名称:AR-Tower-Defense,代码行数:42,代码来源:chessboard.cpp

示例11: labels

vector<Mat> getHistAndLabels(SurfFeatureDetector &detector, BOWImgDescriptorExtractor &bowDE, int dictionarySize) {

    // setup variable and object I need
    IplImage *img2;
    Mat labels(0, 1, CV_32FC1);
    Mat trainingData(0, dictionarySize, CV_32FC1);
    vector<KeyPoint> keypoint1;
    Mat bowDescriptor1;
    Helper helper;
    vector<string> files = vector<string>();

    helper.GetFileList(EVAL_DIR, files);

    float labelVal;

    for (unsigned int iz = 0; iz < files.size(); iz++) {
        int isImage = helper.instr(files[iz], "jpg", 0, true);
        if (isImage > 0) {
            string sFileName = TRAINING_DIR;
            sFileName.append(files[iz]);
            const char * imageName = sFileName.c_str ();

            img2 = cvLoadImage(imageName,0);
            if (img2) {
                detector.detect(img2, keypoint1);
                bowDE.compute(img2, keypoint1, bowDescriptor1);
                trainingData.push_back(bowDescriptor1);
                labelVal = iz+1;
                labels.push_back(labelVal);
            }


        }
    }

    vector<Mat> retVec;
    retVec.push_back(trainingData);
    retVec.push_back(labels);
    return retVec;

}
开发者ID:cstahmer,项目名称:archive-vision,代码行数:41,代码来源:imgextract.cpp

示例12: collectclasscentroids

void collectclasscentroids() {
	IplImage *img;
	int samplesOnGroup = 60;
	int trainingGroups = 4;
	int allSamples = samplesOnGroup * trainingGroups;
	for (int j = 1; j <= trainingGroups; j++)
		for (int i = 1; i <= samplesOnGroup; i++) {
			sprintf(ch, "%s%d%s%d%s", "train/", j, " (", i, ").jpg");
			//cout << ch << endl;
			printf("\rTraining : %3d %%",((((j-1)*samplesOnGroup)+i)*100/allSamples));
			const char* imageName = ch;
			img = cvLoadImage(imageName, 0);
			vector<KeyPoint> keypoint;
			detector.detect(img, keypoint);
			Mat features;
			extractor->compute(img, keypoint, features);
			bowTrainer.add(features);
		}
	printf("\n");
	return;
}
开发者ID:bercho,项目名称:Action-Recognition-OpenCV,代码行数:21,代码来源:SVM_image_v0.1.cpp

示例13: getSingleImageHistogram

Mat getSingleImageHistogram(SurfFeatureDetector &detector, BOWImgDescriptorExtractor &bowDE, string evalFile) {

    // setup variable and object I need
    IplImage *img2;
    vector<KeyPoint> keypoint1;
    Mat bowDescriptor1;
    Helper helper;


    int isImage = helper.instr(evalFile, "jpg", 0, true);
    if (isImage > 0) {

        const char * imageName = evalFile.c_str ();
        img2 = cvLoadImage(imageName,0);
        if (img2) {
            detector.detect(img2, keypoint1);
            bowDE.compute(img2, keypoint1, bowDescriptor1);
        }
    }

    return bowDescriptor1;
}
开发者ID:cstahmer,项目名称:archive-vision,代码行数:22,代码来源:imgextract.cpp

示例14: getClassMatch

float getClassMatch(SurfFeatureDetector &detector, BOWImgDescriptorExtractor &bowDE, IplImage* &img2, int dictionarySize, string sFileName, CvSVM &svm) {
    float response;

    vector<KeyPoint> keypoint2;
    Mat bowDescriptor2;
    Mat evalData(0, dictionarySize, CV_32FC1);
    Mat groundTruth(0, 1, CV_32FC1);
    Mat results(0, 1, CV_32FC1);


    detector.detect(img2, keypoint2);
    bowDE.compute(img2, keypoint2, bowDescriptor2);


    //evalData.push_back(bowDescriptor2);
    //groundTruth.push_back((float) classID);
    response = svm.predict(bowDescriptor2);
    //results.push_back(response);


    return response;
}
开发者ID:cstahmer,项目名称:archive-vision,代码行数:22,代码来源:imgextract.cpp

示例15: SURF_Descriptor

KDvoid SURF_Descriptor ( KDint nIdx )
{
	Mat		tDst;
	Mat		tImg1;
	Mat		tImg2;

	tImg1 = imread ( "/res/image/box.png", CV_LOAD_IMAGE_GRAYSCALE );
	tImg2 = imread ( "/res/image/box_in_scene.png", CV_LOAD_IMAGE_GRAYSCALE );

	// -- Step 1: Detect the keypoints using SURF Detector
	KDint  nMinHessian = 400;

	SurfFeatureDetector   tDetector ( nMinHessian );
	std::vector<KeyPoint> aKeypoints1, aKeypoints2;

	tDetector.detect ( tImg1, aKeypoints1 );
	tDetector.detect ( tImg2, aKeypoints2 );

	// -- Step 2: Calculate descriptors (feature vectors)
	SurfDescriptorExtractor  tExtractor;
	Mat  tDescriptors1, tDescriptors2;

	tExtractor.compute ( tImg1, aKeypoints1, tDescriptors1 );
	tExtractor.compute ( tImg2, aKeypoints2, tDescriptors2 );
/*
	// -- Step 3: Matching descriptor vectors with a brute force matcher
	BruteForceMatcher< L2<KDfloat> >  tMatcher;
	std::vector< DMatch >             aMatches;

	tMatcher.match ( tDescriptors1, tDescriptors2, aMatches );

	// -- Draw matches
	drawMatches ( tImg1, aKeypoints1, tImg2, aKeypoints2, aMatches, tDst ); 

	g_pController->setFrame ( 0, tDst );
*/
}
开发者ID:mcodegeeks,项目名称:OpenKODE-Framework,代码行数:37,代码来源:SURF_Descriptor.cpp


注:本文中的SurfFeatureDetector::detect方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。