当前位置: 首页>>代码示例>>C++>>正文


C++ SiftFeatureDetector类代码示例

本文整理汇总了C++中SiftFeatureDetector的典型用法代码示例。如果您正苦于以下问题:C++ SiftFeatureDetector类的具体用法?C++ SiftFeatureDetector怎么用?C++ SiftFeatureDetector使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了SiftFeatureDetector类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1:

vector<KeyPoint> Pyramids::extractKeypoints(const Mat &m)
{
	vector<KeyPoint> keypoints;
	SiftFeatureDetector dec;
	dec.detect(m, keypoints);
	return keypoints;
}
开发者ID:yca,项目名称:VideoAI,代码行数:7,代码来源:pyramids.cpp

示例2: detectSiftMatchWithOpenCV

void detectSiftMatchWithOpenCV(const char* img1_path, const char* img2_path, MatrixXf &match) {
  Mat img1 = imread(img1_path);   
  Mat img2 = imread(img2_path);   

  SiftFeatureDetector detector;
  SiftDescriptorExtractor extractor;
  vector<KeyPoint> key1;
  vector<KeyPoint> key2;
  Mat desc1, desc2;
  detector.detect(img1, key1);
  detector.detect(img2, key2);
  extractor.compute(img1, key1, desc1);
  extractor.compute(img2, key2, desc2);

  FlannBasedMatcher matcher;
  vector<DMatch> matches;
  matcher.match(desc1, desc2, matches);

  match.resize(matches.size(), 6);
  cout << "match count: " << matches.size() << endl;
  for (int i = 0; i < matches.size(); i++) {
    match(i, 0) = key1[matches[i].queryIdx].pt.x;
    match(i, 1) = key1[matches[i].queryIdx].pt.y;
    match(i, 2) = 1;
    match(i, 3) = key2[matches[i].trainIdx].pt.x;
    match(i, 4) = key2[matches[i].trainIdx].pt.y;
    match(i, 5) = 1;
  }
  
}
开发者ID:superchao1982,项目名称:AsProjectiveAsPossible,代码行数:30,代码来源:CVUtility.cpp

示例3: SIFTfeatureCalculate

int SIFTfeatureCalculate(Mat &img, vector<KeyPoint> &keypoints,Mat &descriptors ){
    SiftFeatureDetector detector;
    SiftDescriptorExtractor extractor;
    
    detector.detect( img, keypoints );
    extractor.compute( img, keypoints, descriptors );
}
开发者ID:fhill122,项目名称:robocupHome2015,代码行数:7,代码来源:object_detector.cpp

示例4: tSIFT

Mat  tSIFT(String path)
{
	Mat img = imread(path, CV_LOAD_IMAGE_GRAYSCALE);
	//特征点描述符
	Mat des;
	if (!img.data){
		std::cout << "Can't open" << std::endl;
		system("Pause");
		exit(0);
	}

	SiftFeatureDetector detector;
	std::vector<KeyPoint> tSIFTkp;
	detector.detect(img, tSIFTkp);
	Mat img1;
	drawKeypoints(img, tSIFTkp, img1, Scalar::all(-1), 4);
	//FeaturesExtract
	SiftDescriptorExtractor extractor;
	//提取特征向量
	extractor.compute(img,tSIFTkp,des);

	showImg(img1);

	return des;
}
开发者ID:bankcos,项目名称:SceneTextDect,代码行数:25,代码来源:train.cpp

示例5: run_demo

int run_demo()
{
	//cv::initModule_nonfree();
	//cout <<"initModule_nonfree() called" << endl;

	// Input and output image path.
	const char * imgInFile = "/sdcard/nonfree/img1.jpg";
	const char * imgOutFile = "/sdcard/nonfree/img1_result.jpg";

	Mat image;
	image = imread(imgInFile, CV_LOAD_IMAGE_COLOR);
	if(! image.data )
	{
		LOGI("Could not open or find the image!\n");
		return -1;
	}

	vector<KeyPoint> keypoints;
	Mat descriptors;

	// Create a SIFT keypoint detector.
	SiftFeatureDetector detector;
	detector.detect(image, keypoints);
	LOGI("Detected %d keypoints\n", (int) keypoints.size());

	// Compute feature description.
	detector.compute(image,keypoints, descriptors);
	LOGI("Compute feature.\n");

	// Store description to "descriptors.des".
	FileStorage fs;
	fs.open("descriptors.des", FileStorage::WRITE);
	LOGI("Opened file to store the features.\n");
	fs << "descriptors" << descriptors;
	LOGI("Finished writing file.\n");
	fs.release();
	LOGI("Released file.\n");

	// Show keypoints in the output image.
	Mat outputImg;
	Scalar keypointColor = Scalar(255, 0, 0);
	drawKeypoints(image, keypoints, outputImg, keypointColor, DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
	LOGI("Drew keypoints in output image file.\n");

#ifdef WIN32
	namedWindow("Output image", CV_WINDOW_AUTOSIZE );
	imshow("Output image", outputImg);
	waitKey(0);
#endif
	
	LOGI("Generate the output image.\n");
	imwrite(imgOutFile, outputImg);

	LOGI("Done.\n");
	return 0;
}
开发者ID:downingstreet,项目名称:FaceAlign,代码行数:56,代码来源:nonfree_jni.cpp

示例6: sift_feature

int sift_feature()
{
    Mat img_1=imread("./samples/box.png",CV_LOAD_IMAGE_GRAYSCALE);//宏定义时CV_LOAD_IMAGE_GRAYSCALE=0,也就是读取灰度图像
    Mat img_2=imread("./samples/box_in_scene.png",CV_LOAD_IMAGE_GRAYSCALE);//一定要记得这里路径的斜线方向,这与Matlab里面是相反的

    if(!img_1.data || !img_2.data)//如果数据为空
    {
        cout<<"opencv error"<<endl;
        return -1;
    }
    cout<<"open right"<<endl;

    //第一步,用SIFT算子检测关键点

    SiftFeatureDetector detector;//构造函数采用内部默认的
    vector<KeyPoint> keypoints_1,keypoints_2;//构造2个专门由点组成的点向量用来存储特征点

    detector.detect(img_1,keypoints_1);//将img_1图像中检测到的特征点存储起来放在keypoints_1中
    detector.detect(img_2,keypoints_2);//同理

    //在图像中画出特征点
    Mat img_keypoints_1,img_keypoints_2;

    drawKeypoints(img_1,keypoints_1,img_keypoints_1,Scalar::all(-1),DrawMatchesFlags::DEFAULT);//在内存中画出特征点
    drawKeypoints(img_2,keypoints_2,img_keypoints_2,Scalar::all(-1),DrawMatchesFlags::DEFAULT);

    imshow("sift_keypoints_1",img_keypoints_1);//显示特征点
    imshow("sift_keypoints_2",img_keypoints_2);

    //计算特征向量
    SiftDescriptorExtractor extractor;//定义描述子对象

    Mat descriptors_1,descriptors_2;//存放特征向量的矩阵

    extractor.compute(img_1,keypoints_1,descriptors_1);//计算特征向量
    extractor.compute(img_2,keypoints_2,descriptors_2);

    //用burte force进行匹配特征向量
    BruteForceMatcher<L2<float>>matcher;//定义一个burte force matcher对象
    vector<DMatch>matches;
    matcher.match(descriptors_1,descriptors_2,matches);

    //绘制匹配线段
    Mat img_matches;
    drawMatches(img_1,keypoints_1,img_2,keypoints_2,matches,img_matches);//将匹配出来的结果放入内存img_matches中

    //显示匹配线段
    imshow("sift_Matches",img_matches);//显示的标题为Matches
    waitKey(0);
    return 0;
}
开发者ID:KingBing,项目名称:OpenCV_OLD,代码行数:51,代码来源:sift_feature.cpp

示例7: main

int main()
{
	//从文件中读入图像
	Mat img_1 = imread("class.png");
	Mat img_2 = imread("class2.png");
	//如果读入图像失败
	if (img_1.empty() || img_2.empty())
	{
		cout << "load image error" << endl;
		return -1;
	}
	//显示图像
	imshow("src image 1", img_1);
	imshow("src image 2", img_2);
	//第一步,用SIFT算子检测关键点
	SiftFeatureDetector detector;//构造函数采用内部默认的
	std::vector<KeyPoint> keypoints_1, keypoints_2;//构造2个专门由点组成的点向量用来存储特征点

	detector.detect(img_1, keypoints_1);//将img_1图像中检测到的特征点存储起来放在keypoints_1中
	detector.detect(img_2, keypoints_2);//同理

	//在图像中画出特征点
	Mat img_keypoints_1, img_keypoints_2;

	drawKeypoints(img_1, keypoints_1, img_keypoints_1, Scalar::all(-1), DrawMatchesFlags::DEFAULT);//在内存中画出特征点
	drawKeypoints(img_2, keypoints_2, img_keypoints_2, Scalar::all(-1), DrawMatchesFlags::DEFAULT);

	imshow("sift_keypoints_1", img_keypoints_1);//显示特征点
	imshow("sift_keypoints_2", img_keypoints_2);

	//计算特征向量
	SiftDescriptorExtractor extractor;//定义描述子对象
	Mat descriptors_1, descriptors_2;//存放特征向量的矩阵

	extractor.compute(img_1, keypoints_1, descriptors_1);//计算特征向量
	extractor.compute(img_2, keypoints_2, descriptors_2);

	//用burte force进行匹配特征向量
	BruteForceMatcher<L2<float>>matcher;//定义一个burte force matcher对象
	vector<DMatch>matches;
	matcher.match(descriptors_1, descriptors_2, matches);

	//绘制匹配线段
	Mat img_matches;
	drawMatches(img_1, keypoints_1, img_2, keypoints_2, matches, img_matches);//将匹配出来的结果放入内存img_matches中

	//显示匹配线段
	imshow("sift_Matches", img_matches);//显示的标题为Matches
	cvWaitKey(0);
}
开发者ID:yuki252111,项目名称:computerVision,代码行数:50,代码来源:sift.cpp

示例8: computeSifts

Mat computeSifts(const string& fileName)
{
    const Mat input = cv::imread(fileName.c_str(), 0); //Load as grayscale
    if(input.empty())
        cout<<"ERROR: Image "<<fileName<<" was not read"<<endl;
    Mat descriptors;
    SiftFeatureDetector detector;
    vector<cv::KeyPoint> keypoints;
    detector.detect(input, keypoints);
    SiftDescriptorExtractor extractor;
    extractor.compute(input, keypoints, descriptors);
    // cout<<descriptors<<endl;
    return descriptors;
}
开发者ID:Dryuna,项目名称:BagOfWords,代码行数:14,代码来源:compute_sift.cpp

示例9: siftExtract

Mat siftExtract(string imgName) {

    Mat img = imread(imgName, CV_LOAD_IMAGE_GRAYSCALE); 
    // resize(img, img, Size(), 0.625, 0.625);

    // feature detection
    // SiftFeatureDetector detector(0.05, 5.0);
    SiftFeatureDetector detector;
    vector<KeyPoint> keypoints;
    detector.detect(img, keypoints);

    // // feature extraction
    SiftDescriptorExtractor extractor(3.0);
    Mat descr;
    extractor.compute(img, keypoints, descr);
    return descr;
}
开发者ID:zhouyang2640,项目名称:IvIndex,代码行数:17,代码来源:sift.cpp

示例10: main

int main( int argc, char** argv ) {
    // check http://docs.opencv.org/doc/tutorials/features2d/table_of_content_features2d/table_of_content_features2d.html
    // for OpenCV general detection/matching framework details

    if( argc != 3 ) {
        help(argv);
        return -1;
    }

    // Load images
    Mat imgA = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE );
    if( !imgA.data ) {
        std::cout<< " --(!) Error reading image " << argv[1] << std::endl;
        return -1;
    }

    std::vector<KeyPoint> keypoints1, keypoints2;
    
    // DETECTION
    // Any openCV detector such as
    SurfFeatureDetector detectorSurf(2000,4);
	SiftFeatureDetector detectorSift;
	//OrbFeatureDetector detector(400);
	//FastFeatureDetector detector(10);
    
    // detect
    double t = (double)getTickCount();
    detectorSift.detect( imgA, keypoints1);
    t = ((double)getTickCount() - t)/getTickFrequency();
    std::cout << "SIFT detection time [s]: " << t/1.0 << std::endl;
	//-- Draw keypoints
	Mat imgKeypoint1;
    cv::drawKeypoints(imgA, keypoints1, imgKeypoint1, Scalar::all(-1), DrawMatchesFlags::DRAW_RICH_KEYPOINTS );
    imshow("SIFT keypoint", imgKeypoint1);

    // detect
    t = (double)getTickCount();
    detectorSurf.detect( imgA, keypoints2);
    t = ((double)getTickCount() - t)/getTickFrequency();
    std::cout << "SURF detection time [s]: " << t/1.0 << std::endl;
	//-- Draw keypoints
	Mat imgKeypoint2;
    cv::drawKeypoints(imgA, keypoints2, imgKeypoint2, Scalar::all(-1), DrawMatchesFlags::DRAW_RICH_KEYPOINTS );
    imshow("SURF keypoint", imgKeypoint2);
	waitKey(0);
}
开发者ID:biotrump,项目名称:detector,代码行数:46,代码来源:sift_detector.cpp

示例11: doSift

/* 
*	Function : doSift
*	Description : Find sift points on the image
*	
*	path : path of the image
*	container : container for sift keypoints and their descriptor
*/
void doSift(const string &path, struct SFeatures &container)
{
	Mat img, des;
	vector<KeyPoint> keypoints;

	img = imread(path.c_str(), CV_LOAD_IMAGE_GRAYSCALE);

	SiftFeatureDetector detector;

   	detector.detect(img, keypoints);

   	SiftDescriptorExtractor extractor;

    extractor.compute(img, keypoints, des);

    container.des = des;
    container.keys = keypoints;
}
开发者ID:caomw,项目名称:ulavalSFM,代码行数:25,代码来源:dosift.cpp

示例12: Mat

void ASiftDetector::detectAndCompute(const Mat& img, std::vector< KeyPoint >& keypoints, Mat& descriptors)
{
    keypoints.clear();
    descriptors = Mat(0, 128, CV_32F);
    for(int tl = 1; tl < 6; tl++)
    {
        double t = pow(2, 0.5*tl);
        for(int phi = 0; phi < 180; phi += 72.0/t)
        {
            std::vector<KeyPoint> kps;
            Mat desc;

            Mat timg, mask, Ai;
            img.copyTo(timg);

            affineSkew(t, phi, timg, mask, Ai);

#if 0
            Mat img_disp;
            bitwise_and(mask, timg, img_disp);
            namedWindow( "Skew", WINDOW_AUTOSIZE );// Create a window for display.
            imshow( "Skew", img_disp );
            waitKey(0);
#endif

            SiftFeatureDetector detector;
            detector.detect(timg, kps, mask);

            SiftDescriptorExtractor extractor;
            extractor.compute(timg, kps, desc);

            for(unsigned int i = 0; i < kps.size(); i++)
            {
                Point3f kpt(kps[i].pt.x, kps[i].pt.y, 1);
                Mat kpt_t = Ai*Mat(kpt);
                kps[i].pt.x = kpt_t.at<float>(0,0);
                kps[i].pt.y = kpt_t.at<float>(1,0);
            }
            keypoints.insert(keypoints.end(), kps.begin(), kps.end());
            descriptors.push_back(desc);
        }
    }
}
开发者ID:Flyuu,项目名称:opencv-practical-code,代码行数:43,代码来源:ASiftDetector.cpp

示例13: extract_sift_keypoints

std::vector<KeyPoint> extract_sift_keypoints(Mat image)
{
	cout << "Extracting features..." << endl ;

	image = threshold_and_convert(image);

	SiftFeatureDetector detector;
    vector<KeyPoint> keypoints;
    detector.detect(image, keypoints);

    cout<<"keys:" << keypoints.size()<< endl ;

    if(DEBUG)
    {
    	// Add results to image and save.
    	cv::Mat output;
    	cv::drawKeypoints(image, keypoints, output);
    	imshow("sift_keys", output);
    	moveWindow("sift_keys" , 500 , 100);
	}
    return keypoints;
}
开发者ID:ahmad-asadi,项目名称:Vision,代码行数:22,代码来源:extract_features.cpp

示例14: main

/**
 * @function main
 */
int main( int argc, char** argv )
{
	ros::init(argc, argv, "object_detector");
	ros::NodeHandle nh;
	
	///subscribe to camera image topic
	image_transport::ImageTransport it(nh);
	image_transport::Subscriber sub = it.subscribe((string)IMAGE_TOPIC, 1, imageCallback);
	
    ///read calibration data
    ifstream file (CALIBRATION_FILE);
    if (!file.is_open()){
        printf("ERROR: Unable to open calibration file\n");
        return 2;
    }
    H=readCalibration(file);


    
	//feature calculation of objct image
	img_object = imread( (string)DATA_FOLDER+(string)IMAGE_NAME, CV_LOAD_IMAGE_GRAYSCALE );
	//-- Step 1: Detect the keypoints using SURF Detector
	SiftFeatureDetector detector;
	detector.detect( img_object, keypoints_object );;
	//-- Step 2: Calculate descriptors (feature vectors)
	SiftDescriptorExtractor extractor;
	extractor.compute( img_object, keypoints_object, descriptors_object );
    
	
    //run service
	ros::ServiceServer service = nh.advertiseService("vision/get_plate_position", get_plate_position);
	ros::ServiceServer service1 = nh.advertiseService("vision/displayFrame",displayFrame);
	ROS_INFO("ready to detect the plate");
        
    ros::spin();
	return 0;
}
开发者ID:fhill122,项目名称:robocupHome2015,代码行数:40,代码来源:object_detector_backup.cpp

示例15: BOWKMeansTrainer

/*
	processFeatures function process images and derives the features of each image which in our case is a character.
	These features/descriptors of the characters are then added to BOWKMeansTrainer which is a Bag of Features
	This function takes as input parameters a vector of images, this allows us to process all images in one place.
	We pass by reference our BOWKMeansTrainer (bag of features) since this is a variable we want available in main scope
	A pointer for DescriptorExtractor and SurfFeatureDetector is also passed from main since we define them there already.
	The function process each image by using SurfFeatureDetector to derive keypoints of the image/character.
	Using the pointer DescriptorExtractor extractor we are able to compute features for our character based on the character's keypoints.
	Then, we add the features computed to our bowTrainer (bag of features)
	Although the function is void, we are accessing bowTrainer from main scope so results will be stored there
	Overall, the main purpose of this is to allow us to build a vocabulary of features.
	However, we wouldn't be able to determine the corresponding character to features
*/
void processFeatures(vector<Mat> images, BOWKMeansTrainer &bowTrainer, Ptr<DescriptorExtractor> extractor, SiftFeatureDetector detector) {
    for (int j=0; j<images.size(); j++) {
        Mat image, src;

        resize(images.at(j), src, Size(0,0), 10,10);

        copyMakeBorder(src, image, 10,10,10,10,BORDER_CONSTANT, Scalar(255));


        vector<KeyPoint> keypoints;
        detector.detect(image, keypoints);
        Mat features;
        extractor->compute(image, keypoints, features);
        bowTrainer.add(features);

    }
}
开发者ID:hishehim,项目名称:Vision_OCR,代码行数:30,代码来源:meraocr.cpp


注:本文中的SiftFeatureDetector类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。