本文整理汇总了C++中SiftDescriptorExtractor::detect方法的典型用法代码示例。如果您正苦于以下问题:C++ SiftDescriptorExtractor::detect方法的具体用法?C++ SiftDescriptorExtractor::detect怎么用?C++ SiftDescriptorExtractor::detect使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类SiftDescriptorExtractor
的用法示例。
在下文中一共展示了SiftDescriptorExtractor::detect方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: make_vocabulary
static void make_vocabulary()
{
if(flag==1)
{
return ;
}
cout<<" MAKING VOCABULARY...."<<endl;
for(int i=1; i<=20; i++)
{
cout<<" Reading File "<<i<<endl;
stringstream ss;
ss << path_People << "person_"<<setfill('0') << setw(3) << i <<".image.png";
cout<<ss.str()<<endl;
img=imread(ss.str(),0);
Mat tempp=imread(ss.str(),1);
//vector< vector<Point > > superpixel=make_superpixels(tempp);
//cout<<superpixel.size()<<" Superpixel size "<<endl;
for(int k=0; k<1; k++)
{
/* int x1=superpixel[k][0].x;
int y1=superpixel[k][0].y;
int x2=superpixel[k][1].x;
int y2=superpixel[k][1].y;
Mat newimg=Mat(x2-x1+1,y2-y1+1,0,Scalar(255,255,255));
for(int l=2; l<superpixel[k].size(); l++)
{
int x=superpixel[k][l].x;
int y=superpixel[k][l].y;
newimg.at<uchar>(x-x1,y-y1)=img.at<uchar>(x,y);
}*/
keypoints.clear();
detector.detect(img,keypoints);
detector.compute(img,keypoints,descriptor);
features_unclustered.push_back(descriptor);
}
}
cout<<"VOCABULARY BUILT...."<<endl;
cout<<endl;
}
示例2: main
int main(int argc, char* argv[])
{
char *filename = new char[100];
vector<string> validFormats;
validFormats.push_back("png");
validFormats.push_back("ppm");
validFormats.push_back("jpg");
validFormats.push_back("gif");
validFormats.push_back("bmp");
validFormats.push_back("tiff");
int minHessian = 400; //Hessian Threshold
Mat input;
//To store the keypoints that will be extracted by SIFT
vector<KeyPoint> keypoints;
//To store the SIFT descriptor of current image
Mat descriptor;
//To store all the descriptors that are extracted from all the images.
Mat featuresUnclustered;
//The SIFT feature extractor and descriptor
SiftDescriptorExtractor detector;
DIR *dir;
struct dirent *ent;
if((dir = opendir(argv[1])) != NULL)
{
while((ent = readdir(dir)) != NULL)
{
if(ent->d_type == DT_REG)
{
string fullname(ent->d_name);
int lastindex = fullname.find_last_of(".");
string format = fullname.substr(lastindex + 1, fullname.length() - 1);
if(find(validFormats.begin(), validFormats.end(), format) != validFormats.end())
{
sprintf(filename, "%s/%s",argv[1], ent->d_name);
printf("%s\n", filename);
input = imread(filename, CV_LOAD_IMAGE_GRAYSCALE);
detector.detect(input, keypoints);
detector.compute(input, keypoints, descriptor);
featuresUnclustered.push_back(descriptor);
}
}
}
closedir(dir);
}
else
{
perror("");
return EXIT_FAILURE;
}
int dictionarySize = 200;
TermCriteria tc(CV_TERMCRIT_ITER, 100, 0.001);
int retries = 1;
int flags = KMEANS_RANDOM_CENTERS;
BOWKMeansTrainer bowTrainer(dictionarySize,tc,retries,flags);
//cout << "I'm here too\n";
Mat dictionary = bowTrainer.cluster(featuresUnclustered);
sprintf(filename, "%s/dictionary.yml", argv[2]);
FileStorage fs(filename, FileStorage::WRITE);
fs << "vocabulary" << dictionary;
fs.release();
//create a nearest neighbor matcher
Ptr<DescriptorMatcher> matcher(new FlannBasedMatcher);
//create Sift feature point extracter
Ptr<FeatureDetector> siftdetector(new SiftFeatureDetector());
//create Sift descriptor extractor
Ptr<DescriptorExtractor> extractor(new SiftDescriptorExtractor);
//create BoF (or BoW) descriptor extractor
BOWImgDescriptorExtractor bowDE(extractor,matcher);
//Set the dictionary with the vocabulary we created in the first step
bowDE.setVocabulary(dictionary);
//To store the image file name
char *filename2 = new char[100];
//To store the image tag name - only for save the descriptor in a file
char *imageTag = new char[100];
int i = 1;
if((dir = opendir(argv[1])) != NULL)
{
//.........这里部分代码省略.........
示例3: main
int main(int argc, char* argv[])
{
int DICTIONARY_BUILD = 3;
if (DICTIONARY_BUILD == 1){
//Step 1 - Obtain the set of bags of features.
//to store the input file names
char * filename = new char[100];
//to store the current input image
Mat input;
//To store the keypoints that will be extracted by SIFT
vector<KeyPoint> keypoints;
//To store the SIFT descriptor of current image
Mat descriptor;
//To store all the descriptors that are extracted from all the images.
Mat featuresUnclustered;
//The SIFT feature extractor and descriptor
SiftDescriptorExtractor detector;
//I select 20 (1000/50) images from 1000 images to extract feature descriptors and build the vocabulary
int startid = 1;
int endid = 39;
for(int f=startid;f<=endid;f++){
//create the file name of an image
sprintf(filename,".\\Release\\omocha_train\\%i.jpg",f);
//open the file
input = imread(filename, CV_LOAD_IMAGE_GRAYSCALE); //Load as grayscale
//detect feature points
detector.detect(input, keypoints);
//compute the descriptors for each keypoint
detector.compute(input, keypoints,descriptor);
//put the all feature descriptors in a single Mat object
featuresUnclustered.push_back(descriptor);
//print the percentage
printf("%i percent done\n",f);
}
//Construct BOWKMeansTrainer
//the number of bags
int dictionarySize=200;
//define Term Criteria
TermCriteria tc(CV_TERMCRIT_ITER,100,0.001);
//retries number
int retries=1;
//necessary flags
int flags=KMEANS_PP_CENTERS;
//Create the BoW (or BoF) trainer
BOWKMeansTrainer bowTrainer(dictionarySize,tc,retries,flags);
//cluster the feature vectors
Mat dictionary=bowTrainer.cluster(featuresUnclustered);
//store the vocabulary
FileStorage fs(".\\dictionary.yml", FileStorage::WRITE);
fs << "vocabulary" << dictionary;
fs.release();
}else if(DICTIONARY_BUILD == 2){
//Step 2 - Obtain the BoF descriptor for given image/video frame.
//prepare BOW descriptor extractor from the dictionary
Mat dictionary;
FileStorage fs(".\\dictionary.yml", FileStorage::READ);
fs["vocabulary"] >> dictionary;
fs.release();
//create a nearest neighbor matcher
Ptr<DescriptorMatcher> matcher(new FlannBasedMatcher);
//create Sift feature point extracter
Ptr<FeatureDetector> detector(new SiftFeatureDetector());
//create Sift descriptor extractor
Ptr<DescriptorExtractor> extractor(new SiftDescriptorExtractor);
//create BoF (or BoW) descriptor extractor
BOWImgDescriptorExtractor bowDE(extractor,matcher);
//Set the dictionary with the vocabulary we created in the first step
bowDE.setVocabulary(dictionary);
//To store the image file name
char * filename = new char[100];
char * inputfile = new char[100];
//To store the image tag name - only for save the descriptor in a file
//char * imageTag = new char[10];
int startid = 1;
int endid = 39;
for(int i = startid; i <= endid; i++)
{
sprintf(inputfile,".\\Release\\omocha_train\\%i.jpg", i);
sprintf(filename, ".\\%i.yml", i);
//open the file to write the resultant descriptor
FileStorage fs1(filename, FileStorage::WRITE);
//read the image
Mat img=imread(inputfile,CV_LOAD_IMAGE_GRAYSCALE);
//.........这里部分代码省略.........
示例4:
vector<Mat> make_descriptors(vector< vector<Point> > superpixel,Mat img)
{
cout<<" Making Bowdescriptors "<<endl;
vector<Mat> ret;
vector<KeyPoint>keypoint1;
Mat bowdescriptor1;
//imshow("sf" , img);
//while(waitKey()!=27);
for(int k=0; k<superpixel.size(); k++)
{
int x1=superpixel[k][0].x;
int y1=superpixel[k][0].y;
int x2=superpixel[k][1].x;
int y2=superpixel[k][1].y;
Mat newimg=Mat(x2-x1+1,y2-y1+1,0,Scalar(255,255,255));
for(int l=2; l<superpixel[k].size(); l++)
{
int x=superpixel[k][l].x;
int y=superpixel[k][l].y;
newimg.at<uchar>(x-x1,y-y1)=img.at<uchar>(x,y);
}
//keypoint1.clear();
detector.detect(newimg,keypoint1);
bowde.compute(newimg,keypoint1,bowdescriptor1);
// cout<<k<<" "<<endl;
ret.push_back(bowdescriptor1);
}
for(int i=0; i<superpixel.size(); i++)
{
int cnt=1;
for(int j=0; j<superpixel.size(); j++)
{
if(i==j)
{
continue;
}
if(GR[i][j]<=N && ret[j].rows!=0 && ret[i].rows!=0)
{
if(ret[i].rows==0)
{
ret[i]=ret[j];
continue;
}
ret[i]=ret[i]+ret[j];
cnt++;
}
}
ret[i]=ret[i]/cnt;
}
cout<<" GRAPH "<<endl;
for(int i=0; i<40; i++)
{
for(int j=0; j<40; j++)
{
cout<<GR[i][j]<<" ";
}
cout<<endl;
}
cout<<endl;
cout<<" LEAVING bowdescriptors "<<endl;
return ret;
}
示例5: main
int main()
{
#if DICTIONARY_BUILD == 0
//Step 1 - Obtain the set of bags of features.
//to store the input file names
char * filename = new char[100];
//to store the current input image
Mat input;
//To store the keypoints that will be extracted by SIFT
vector<KeyPoint> keypoints;
//To store the SIFT descriptor of current image
Mat descriptor;
//To store all the descriptors that are extracted from all the images
Mat featuresUnclustered;
//The SIFT feature extractor and descriptor
SiftDescriptorExtractor detector;
/*
cv::Ptr<cv::DescriptorMatcher> matcher = cv::DescriptorMatcher::create("FlannBased");
cv::Ptr<cv::DescriptorExtractor> extractor = new cv::SurfDescriptorExtractor();
cv::BOWImgDescriptorExtractor dextract( extractor, matcher );
cv::SurfFeatureDetector detector(500);
*/
int i,j;
float kl=0,l=0;
for(j=1;j<=3;j++)
for(i=1;i<=3;i++){
sprintf(filename,"%d%s%d%s",j," (",i,").jpg");
//create the file name of an image
//open the file
input = imread(filename, CV_LOAD_IMAGE_GRAYSCALE); //Load as grayscale
//detect feature points
detector.detect(input, keypoints);
//compute the descriptors for each keypoint
detector.compute(input, keypoints,descriptor);
//put the all feature descriptors in a single Mat object
featuresUnclustered.push_back(descriptor);
//print the percentage
l++;
kl=(l*100)/9;
cout<<kl<<"% done\n";
}
int dictionarySize=100;
//define Term Criteria
TermCriteria tc(CV_TERMCRIT_ITER,100,0.001);
//retries number
int retries=1;
//necessary flags
int flags=KMEANS_PP_CENTERS;
//Create the BoW (or BoF) trainer
BOWKMeansTrainer bowTrainer(dictionarySize,tc,retries,flags);
//cluster the feature vectors
Mat dictionary=bowTrainer.cluster(featuresUnclustered);
//store the vocabulary
FileStorage fs("dictionary1.yml", FileStorage::WRITE);
fs << "vocabulary" << dictionary;
fs.release();
cout<<"Saving BoW dictionary\n";
//create a nearest neighbor matcher
Ptr<DescriptorMatcher> matcher(new FlannBasedMatcher);
//create Sift feature point extracter
Ptr<FeatureDetector> detector1(new SiftFeatureDetector());
//create Sift descriptor extractor
Ptr<DescriptorExtractor> extractor(new SiftDescriptorExtractor);
//create BoF (or BoW) descriptor extractor
BOWImgDescriptorExtractor bowDE(extractor,matcher);
//Set the dictionary with the vocabulary we created in the first step
bowDE.setVocabulary(dictionary);
cout<<"extracting histograms in the form of BOW for each image "<<endl;
Mat labels(0, 1, CV_32FC1);
Mat trainingData(0, dictionarySize, CV_32FC1);
int k=0;
vector<KeyPoint> keypoint1;
Mat bowDescriptor1;
Mat img2;
//extracting histogram in the form of bow for each image
for(j=1;j<=3;j++)
for(i=1;i<=3;i++){
sprintf( filename,"%d%s%d%s",j," (",i,").jpg");
img2 = cvLoadImage(filename,0);
detector.detect(img2, keypoint1);
bowDE.compute(img2, keypoint1, bowDescriptor1);
trainingData.push_back(bowDescriptor1);
labels.push_back((float) j);
//.........这里部分代码省略.........