当前位置: 首页>>代码示例>>C++>>正文


C++ SiftDescriptorExtractor类代码示例

本文整理汇总了C++中SiftDescriptorExtractor的典型用法代码示例。如果您正苦于以下问题:C++ SiftDescriptorExtractor类的具体用法?C++ SiftDescriptorExtractor怎么用?C++ SiftDescriptorExtractor使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了SiftDescriptorExtractor类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: tSIFT

Mat  tSIFT(String path)
{
	Mat img = imread(path, CV_LOAD_IMAGE_GRAYSCALE);
	//特征点描述符
	Mat des;
	if (!img.data){
		std::cout << "Can't open" << std::endl;
		system("Pause");
		exit(0);
	}

	SiftFeatureDetector detector;
	std::vector<KeyPoint> tSIFTkp;
	detector.detect(img, tSIFTkp);
	Mat img1;
	drawKeypoints(img, tSIFTkp, img1, Scalar::all(-1), 4);
	//FeaturesExtract
	SiftDescriptorExtractor extractor;
	//提取特征向量
	extractor.compute(img,tSIFTkp,des);

	showImg(img1);

	return des;
}
开发者ID:bankcos,项目名称:SceneTextDect,代码行数:25,代码来源:train.cpp

示例2: computeFeatures

Mat Pyramids::computeFeatures(const Mat &m, vector<KeyPoint> &keypoints)
{
	Mat features;
	SiftDescriptorExtractor ex;
	ex.compute(m, keypoints, features);
	return features;
}
开发者ID:yca,项目名称:VideoAI,代码行数:7,代码来源:pyramids.cpp

示例3: SIFTfeatureCalculate

int SIFTfeatureCalculate(Mat &img, vector<KeyPoint> &keypoints,Mat &descriptors ){
    SiftFeatureDetector detector;
    SiftDescriptorExtractor extractor;
    
    detector.detect( img, keypoints );
    extractor.compute( img, keypoints, descriptors );
}
开发者ID:fhill122,项目名称:robocupHome2015,代码行数:7,代码来源:object_detector.cpp

示例4: detectSiftMatchWithOpenCV

void detectSiftMatchWithOpenCV(const char* img1_path, const char* img2_path, MatrixXf &match) {
  Mat img1 = imread(img1_path);   
  Mat img2 = imread(img2_path);   

  SiftFeatureDetector detector;
  SiftDescriptorExtractor extractor;
  vector<KeyPoint> key1;
  vector<KeyPoint> key2;
  Mat desc1, desc2;
  detector.detect(img1, key1);
  detector.detect(img2, key2);
  extractor.compute(img1, key1, desc1);
  extractor.compute(img2, key2, desc2);

  FlannBasedMatcher matcher;
  vector<DMatch> matches;
  matcher.match(desc1, desc2, matches);

  match.resize(matches.size(), 6);
  cout << "match count: " << matches.size() << endl;
  for (int i = 0; i < matches.size(); i++) {
    match(i, 0) = key1[matches[i].queryIdx].pt.x;
    match(i, 1) = key1[matches[i].queryIdx].pt.y;
    match(i, 2) = 1;
    match(i, 3) = key2[matches[i].trainIdx].pt.x;
    match(i, 4) = key2[matches[i].trainIdx].pt.y;
    match(i, 5) = 1;
  }
  
}
开发者ID:superchao1982,项目名称:AsProjectiveAsPossible,代码行数:30,代码来源:CVUtility.cpp

示例5: getDescriptors

Mat Panorama::getDescriptors(vector<KeyPoint> kp){

    cout << "Computing descriptors..." << endl;

	SiftDescriptorExtractor extractor;
	Mat descriptors;
	extractor.compute(srcGray, kp, descriptors);
    
    return descriptors;
}
开发者ID:LucRyan,项目名称:OpenCV-Workshop,代码行数:10,代码来源:Panorama.cpp

示例6: det_desc_features

// Faeture Detection and Decription
void det_desc_features(vector <Image>& images, bool flag)
{
	// Detect the keypoints using SIFT Detector
	SiftFeatureDetector detector(nfeatures, nOctaveLayers, contrastThreshold, edgeThreshold, sigma);
	// Calculate descriptors (feature vectors)
	SiftDescriptorExtractor extractor;
	//// Detect the keypoints using SIFT Detector
	//SurfFeatureDetector detector(500);
	//// Calculate descriptors (feature vectors)
	//SurfDescriptorExtractor extractor;

	for (size_t i = 0; i < images.size(); i++)
	{
	/*	Mat mask = Mat::zeros(images[i].getImg_gray().size(), images[i].getImg_gray().type());
		Mat roi1(mask, cv::Rect(images[i].getImg_gray().cols - 60, 0, images[i].getImg_gray().cols - (images[i].getImg_gray().cols - 60), images[i].getImg_gray().rows));
		roi1 = Scalar(255);
		Mat roi2(mask, cv::Rect(0, 0, 60, images[i].getImg_gray().rows));
		roi2 = Scalar(255);*/

		// Feature Detection
		vector <KeyPoint> tmp_keypoints;
		detector.detect(images[i].getImg_gray(), tmp_keypoints);

		cout << "Features detected in image #" << i << " : " << tmp_keypoints.size() << endl;
		// Feature Description
		Mat tmp_descriptors;
		extractor.compute(images[i].getImg_gray(), tmp_keypoints, tmp_descriptors);

		// Store keypoints and descriptors
		images[i].setImageFeatures(tmp_keypoints, tmp_descriptors);

		// Draw keypoints
		Mat tmp_img_keypoints;
		drawKeypoints(images[i].getImg_gray(), tmp_keypoints, tmp_img_keypoints, Scalar::all(-1), DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
		images[i].setImg_Keypoint(tmp_img_keypoints);

		// Store img_keypoints
		string str;
		if (flag == 0)
		{
			str = "images/SIFT_Keypoints/Original_Image/";
		}
		else
		{
			str = "images/SIFT_Keypoints/Histogram_Equalazition/";
		}
		str.append("Image_");
		str.append(to_string(images[i].getID()));
		str.append("_Keypoints_detected_");
		str.append(to_string(tmp_keypoints.size()));
		str.append(".jpg");
		imwrite(str, tmp_img_keypoints);
	}
}
开发者ID:marios2019,项目名称:Automatic_Panorama_Stitching,代码行数:55,代码来源:main.cpp

示例7: sift_feature

int sift_feature()
{
    Mat img_1=imread("./samples/box.png",CV_LOAD_IMAGE_GRAYSCALE);//宏定义时CV_LOAD_IMAGE_GRAYSCALE=0,也就是读取灰度图像
    Mat img_2=imread("./samples/box_in_scene.png",CV_LOAD_IMAGE_GRAYSCALE);//一定要记得这里路径的斜线方向,这与Matlab里面是相反的

    if(!img_1.data || !img_2.data)//如果数据为空
    {
        cout<<"opencv error"<<endl;
        return -1;
    }
    cout<<"open right"<<endl;

    //第一步,用SIFT算子检测关键点

    SiftFeatureDetector detector;//构造函数采用内部默认的
    vector<KeyPoint> keypoints_1,keypoints_2;//构造2个专门由点组成的点向量用来存储特征点

    detector.detect(img_1,keypoints_1);//将img_1图像中检测到的特征点存储起来放在keypoints_1中
    detector.detect(img_2,keypoints_2);//同理

    //在图像中画出特征点
    Mat img_keypoints_1,img_keypoints_2;

    drawKeypoints(img_1,keypoints_1,img_keypoints_1,Scalar::all(-1),DrawMatchesFlags::DEFAULT);//在内存中画出特征点
    drawKeypoints(img_2,keypoints_2,img_keypoints_2,Scalar::all(-1),DrawMatchesFlags::DEFAULT);

    imshow("sift_keypoints_1",img_keypoints_1);//显示特征点
    imshow("sift_keypoints_2",img_keypoints_2);

    //计算特征向量
    SiftDescriptorExtractor extractor;//定义描述子对象

    Mat descriptors_1,descriptors_2;//存放特征向量的矩阵

    extractor.compute(img_1,keypoints_1,descriptors_1);//计算特征向量
    extractor.compute(img_2,keypoints_2,descriptors_2);

    //用burte force进行匹配特征向量
    BruteForceMatcher<L2<float>>matcher;//定义一个burte force matcher对象
    vector<DMatch>matches;
    matcher.match(descriptors_1,descriptors_2,matches);

    //绘制匹配线段
    Mat img_matches;
    drawMatches(img_1,keypoints_1,img_2,keypoints_2,matches,img_matches);//将匹配出来的结果放入内存img_matches中

    //显示匹配线段
    imshow("sift_Matches",img_matches);//显示的标题为Matches
    waitKey(0);
    return 0;
}
开发者ID:KingBing,项目名称:OpenCV_OLD,代码行数:51,代码来源:sift_feature.cpp

示例8: compute_descriptors

Mat compute_descriptors(Mat image, std::vector<KeyPoint> keypoints)
{
	cout << "Extracting sift descriptors..." << endl;

	SiftDescriptorExtractor extractor ;

	Mat descriptor;

	image.convertTo(image, CV_8U);

	extractor.compute(image, keypoints , descriptor);

	return descriptor;
}
开发者ID:ahmad-asadi,项目名称:Vision,代码行数:14,代码来源:extract_features.cpp

示例9: main

int main()
{
	//从文件中读入图像
	Mat img_1 = imread("class.png");
	Mat img_2 = imread("class2.png");
	//如果读入图像失败
	if (img_1.empty() || img_2.empty())
	{
		cout << "load image error" << endl;
		return -1;
	}
	//显示图像
	imshow("src image 1", img_1);
	imshow("src image 2", img_2);
	//第一步,用SIFT算子检测关键点
	SiftFeatureDetector detector;//构造函数采用内部默认的
	std::vector<KeyPoint> keypoints_1, keypoints_2;//构造2个专门由点组成的点向量用来存储特征点

	detector.detect(img_1, keypoints_1);//将img_1图像中检测到的特征点存储起来放在keypoints_1中
	detector.detect(img_2, keypoints_2);//同理

	//在图像中画出特征点
	Mat img_keypoints_1, img_keypoints_2;

	drawKeypoints(img_1, keypoints_1, img_keypoints_1, Scalar::all(-1), DrawMatchesFlags::DEFAULT);//在内存中画出特征点
	drawKeypoints(img_2, keypoints_2, img_keypoints_2, Scalar::all(-1), DrawMatchesFlags::DEFAULT);

	imshow("sift_keypoints_1", img_keypoints_1);//显示特征点
	imshow("sift_keypoints_2", img_keypoints_2);

	//计算特征向量
	SiftDescriptorExtractor extractor;//定义描述子对象
	Mat descriptors_1, descriptors_2;//存放特征向量的矩阵

	extractor.compute(img_1, keypoints_1, descriptors_1);//计算特征向量
	extractor.compute(img_2, keypoints_2, descriptors_2);

	//用burte force进行匹配特征向量
	BruteForceMatcher<L2<float>>matcher;//定义一个burte force matcher对象
	vector<DMatch>matches;
	matcher.match(descriptors_1, descriptors_2, matches);

	//绘制匹配线段
	Mat img_matches;
	drawMatches(img_1, keypoints_1, img_2, keypoints_2, matches, img_matches);//将匹配出来的结果放入内存img_matches中

	//显示匹配线段
	imshow("sift_Matches", img_matches);//显示的标题为Matches
	cvWaitKey(0);
}
开发者ID:yuki252111,项目名称:computerVision,代码行数:50,代码来源:sift.cpp

示例10: computeSifts

Mat computeSifts(const string& fileName)
{
    const Mat input = cv::imread(fileName.c_str(), 0); //Load as grayscale
    if(input.empty())
        cout<<"ERROR: Image "<<fileName<<" was not read"<<endl;
    Mat descriptors;
    SiftFeatureDetector detector;
    vector<cv::KeyPoint> keypoints;
    detector.detect(input, keypoints);
    SiftDescriptorExtractor extractor;
    extractor.compute(input, keypoints, descriptors);
    // cout<<descriptors<<endl;
    return descriptors;
}
开发者ID:Dryuna,项目名称:BagOfWords,代码行数:14,代码来源:compute_sift.cpp

示例11: doSift

/* 
*	Function : doSift
*	Description : Find sift points on the image
*	
*	path : path of the image
*	container : container for sift keypoints and their descriptor
*/
void doSift(const string &path, struct SFeatures &container)
{
	Mat img, des;
	vector<KeyPoint> keypoints;

	img = imread(path.c_str(), CV_LOAD_IMAGE_GRAYSCALE);

	SiftFeatureDetector detector;

   	detector.detect(img, keypoints);

   	SiftDescriptorExtractor extractor;

    extractor.compute(img, keypoints, des);

    container.des = des;
    container.keys = keypoints;
}
开发者ID:caomw,项目名称:ulavalSFM,代码行数:25,代码来源:dosift.cpp

示例12: Mat

void ASiftDetector::detectAndCompute(const Mat& img, std::vector< KeyPoint >& keypoints, Mat& descriptors)
{
    keypoints.clear();
    descriptors = Mat(0, 128, CV_32F);
    for(int tl = 1; tl < 6; tl++)
    {
        double t = pow(2, 0.5*tl);
        for(int phi = 0; phi < 180; phi += 72.0/t)
        {
            std::vector<KeyPoint> kps;
            Mat desc;

            Mat timg, mask, Ai;
            img.copyTo(timg);

            affineSkew(t, phi, timg, mask, Ai);

#if 0
            Mat img_disp;
            bitwise_and(mask, timg, img_disp);
            namedWindow( "Skew", WINDOW_AUTOSIZE );// Create a window for display.
            imshow( "Skew", img_disp );
            waitKey(0);
#endif

            SiftFeatureDetector detector;
            detector.detect(timg, kps, mask);

            SiftDescriptorExtractor extractor;
            extractor.compute(timg, kps, desc);

            for(unsigned int i = 0; i < kps.size(); i++)
            {
                Point3f kpt(kps[i].pt.x, kps[i].pt.y, 1);
                Mat kpt_t = Ai*Mat(kpt);
                kps[i].pt.x = kpt_t.at<float>(0,0);
                kps[i].pt.y = kpt_t.at<float>(1,0);
            }
            keypoints.insert(keypoints.end(), kps.begin(), kps.end());
            descriptors.push_back(desc);
        }
    }
}
开发者ID:Flyuu,项目名称:opencv-practical-code,代码行数:43,代码来源:ASiftDetector.cpp

示例13: computeMatching

pair<vector<Point2f>, vector<Point2f> > computeMatching(Mat &img1, Mat &img2, vector<KeyPoint> &keypoints1, vector<KeyPoint> &keypoints2)
{
    SiftDescriptorExtractor extractor;
    Mat descriptors1, descriptors2;
    extractor.compute(img1, keypoints1, descriptors1);
    extractor.compute(img2, keypoints2, descriptors2);
    BruteForceMatcher<L2<float> > matcher;
    vector<DMatch> matches1_2, matches2_1;
    matcher.match(descriptors1, descriptors2, matches1_2);
    matcher.match(descriptors2, descriptors1, matches2_1);
    pair<vector<Point2f>, vector<Point2f> > matches;
    vector<DMatch> dmatchFiltrado;
    double maxDistance = 90;
    for (uint i=0; i < matches1_2.size(); i++) {
        if (matches1_2[i].distance > maxDistance) {
            continue;
        }
        pair<Point2f, Point2f> match1_2 = pair<Point2f, Point2f>(keypoints1[matches1_2[i].queryIdx].pt, keypoints2[matches1_2[i].trainIdx].pt);
        for (uint j=0; j < matches2_1.size(); j++) {
            if (matches2_1[j].distance > maxDistance) {
                continue;
            }
            pair<Point2f, Point2f> match2_1 = pair<Point2f, Point2f>(keypoints1[matches2_1[j].trainIdx].pt, keypoints2[matches2_1[j].queryIdx].pt);
            if (match1_2.first == match2_1.first && match1_2.second == match2_1.second) {
                if (dmatchFiltrado.empty() || (matches.first.back() != match1_2.first || matches.second.back() != match1_2.second)) {
                    dmatchFiltrado.push_back(matches1_2[i]);
                    matches.first.push_back(match1_2.first);
                    matches.second.push_back(match1_2.second);
                }
            }
        }
    }

    Mat img3;
    drawMatches(img1, keypoints1, img2, keypoints2, dmatchFiltrado, img3);
    imshow("Correspondencias", img3);
    waitKey();
    destroyWindow("Correspondencias");
    return matches;
}
开发者ID:JuantAldea,项目名称:UGR-2012-artificial-vision,代码行数:40,代码来源:main-4.cpp

示例14: panorama

// パノラマ合成
Mat panorama(Mat src1, Mat src2, int width, int height)
{
	// SIFT特徴点の検出と特徴量の計算
	Mat gray1, gray2, des1, des2;
	SiftFeatureDetector detector(2000);
	SiftDescriptorExtractor extrator;
	vector<KeyPoint> kps1, kps2;
	cvtColor(src1, gray1, CV_BGR2GRAY);
	cvtColor(src2, gray2, CV_BGR2GRAY);
	detector.detect(gray1, kps1);
	detector.detect(gray2, kps2);
	extrator.compute(gray1, kps1, des1);
	extrator.compute(gray2, kps2, des2);

	// 特徴点の対応付け
	vector<DMatch> matches;
	BruteForceMatcher< L2<float> > matcher;
	matcher.match(des1, des2, matches);
	vector<Vec2f> pts1(matches.size());
	vector<Vec2f> pts2(matches.size());

	// ホモグラフィの計算
	for (size_t i = 0; i < matches.size(); ++i){
		pts1[i][0] = kps1[matches[i].queryIdx].pt.x;
		pts1[i][1] = kps1[matches[i].queryIdx].pt.y;
		pts2[i][0] = kps2[matches[i].trainIdx].pt.x;
		pts2[i][1] = kps2[matches[i].trainIdx].pt.y;
	}
	Mat H = findHomography(pts1, pts2, CV_RANSAC);

	// ホモグラフィ行列Hを用いてパノラマ合成
	Mat dst;
	warpPerspective(src1, dst, H, Size(width, height));
	for (int y = 0; y < src1.rows; y++){
		for (int x = 0; x < src1.cols; x++){
			dst.at<Vec3b>(y, x) = src2.at<Vec3b>(y, x);
		}
	}
	return dst;
}
开发者ID:KeiUe,项目名称:cpp,代码行数:41,代码来源:sift.cpp

示例15: cv_featureDescriptor

int cv_featureDescriptor(CParamArray *pa)
{
    using namespace cv;

    // read image
    string imageFN = svar.GetString("image", "./test.png");
    Mat img = imread(imageFN);

    // extract keypoints & descriptors
    Ptr<FeatureDetector>    detector;
    SiftDescriptorExtractor extractor;

    vector<KeyPoint>        keypoints;
    Mat                     descriptors;

    detector = new SiftFeatureDetector;

    detector->detect(img, keypoints);
    extractor.compute(img, keypoints, descriptors);

    // print keypoints
    for(int i=0; i<keypoints.size(); i++) {
        KeyPoint &p = keypoints[i];

        printf("kp[%6d] x, y = %12f, %12f\n", i, p.pt.x, p.pt.y);
        printf("           size = %12f, angle = %12f\n", p.size, p.angle);
        printf("           response = %12f, octave = %3d, class_id = %4d\n", p.response, p.octave, p.class_id);
    }
    printf("\n");

    // print descriptors
    //      type: CV_MAT_TYPE, CV_32F
    printf("descriptor: \n");
    printf("    cols     = %d\n", descriptors.cols);
    printf("    rows     = %d\n", descriptors.rows);
    printf("    channels = %d\n", descriptors.channels());
    printf("    type     = %d\n", descriptors.type());

    return 0;
}
开发者ID:shaoguangcheng,项目名称:pi-cnn,代码行数:40,代码来源:test_CNN.cpp


注:本文中的SiftDescriptorExtractor类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。