本文整理汇总了C++中FlannBasedMatcher::train方法的典型用法代码示例。如果您正苦于以下问题:C++ FlannBasedMatcher::train方法的具体用法?C++ FlannBasedMatcher::train怎么用?C++ FlannBasedMatcher::train使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类FlannBasedMatcher
的用法示例。
在下文中一共展示了FlannBasedMatcher::train方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: matchDescriptors
//static void matchDescriptors( const Mat& queryDescriptors, const vector<Mat>& trainDescriptors,
//vector<DMatch>& matches, FlannBasedMatcher& descriptorMatcher )
static void matchDescriptors( const Mat& queryDescriptors, const vector<Mat>& trainDescriptors,
vector<DMatch>& matches, FlannBasedMatcher& descriptorMatcher, const vector<Mat>& trainImages, const vector<string>& trainImagesNames )
{
cout << "< Set train descriptors collection in the matcher and match query descriptors to them..." << endl;
descriptorMatcher.add( trainDescriptors );
descriptorMatcher.train();
descriptorMatcher.match( queryDescriptors, matches );
CV_Assert( queryDescriptors.rows == (int)matches.size() || matches.empty() );
cout << "Number of matches: " << matches.size() << endl;
cout << ">" << endl;
for( int i = 0; i < trainDescriptors.size(); i++){
std::vector< std::vector< DMatch> > matches2;
std::vector< DMatch > good_matches;
descriptorMatcher.knnMatch( queryDescriptors, trainDescriptors[i], matches2, 2);
CV_Assert( queryDescriptors.rows == (int)matches2.size() || matches2.empty() );
for (int j = 0; j < matches2.size(); ++j){
const float ratio = 0.8; // As in Lowe's paper; can be tuned
if (matches2[j][0].distance < ratio * matches2[j][1].distance){
good_matches.push_back(matches2[j][0]);
}
}
cout << "currentMatchSize : " << good_matches.size() << endl;
}
}
示例2: main
//.........这里部分代码省略.........
FlannBasedMatcher matcher;
Mat descriptorAuxKp1;
Mat descriptorAuxKp2;
vector < int >associateIdx;
for (int i = 0; i < descriptors1.rows; i++) {
//on copie la ligne i du descripteur, qui correspond aux différentes valeurs données par le descripteur pour le Keypoints[i]
descriptors1.row(i).copyTo(descriptorAuxKp1);
//ici on va mettre que les valeurs du descripteur des keypoints de l'image 2 que l'on veut comparer aux keypoints de l'image1 en cours de traitement
descriptorAuxKp2.create(0, 0, CV_8UC1);
//associateIdx va servir à faire la transition entre les indices renvoyés par matches et ceux des Keypoints
associateIdx.erase(associateIdx.begin(), associateIdx.end());
for (int j = 0; j < descriptors2.rows; j++) {
float p1x = keypoints1[i].pt.x;
float p1y = keypoints1[i].pt.y;
float p2x = keypoints2[j].pt.x;
float p2y = keypoints2[j].pt.y;
float distance = sqrt(pow((p1x - p2x), 2) + pow((p1y - p2y), 2));
//parmis les valeurs dans descriptors2 on ne va garder que ceux dont les keypoints associés sont à une distance définie du keypoints en cours, en l'occurence le ieme ici.
if (distance < 10) {
descriptorAuxKp2.push_back(descriptors2.row(j));
associateIdx.push_back(j);
}
}
//ici on ne matche qu'un keypoints de l'image1 avec tous les keypoints gardés de l'image 2
matcher.add(descriptorAuxKp1);
matcher.train();
matcher.match(descriptorAuxKp2, matches);
//on remet à la bonne valeur les attributs de matches
for (int idxMatch = 0; idxMatch < matches.size(); idxMatch++) {
//on a comparer le keypoints i
matches[idxMatch].queryIdx = i;
//avec le keypoints2 j
matches[idxMatch].trainIdx = associateIdx[matches[idxMatch].trainIdx];
}
//on concatene les matches trouvés pour les points précedents avec les nouveaux
matchesWithDist.insert(matchesWithDist.end(), matches.begin(), matches.end());
}
//ici on trie les matchesWithDist par distance des valeurs des descripteurs et non par distance euclidienne
nth_element(matchesWithDist.begin(), matchesWithDist.begin() + 24, matchesWithDist.end());
// initial position
// position of the sorted element
// end position
Mat imageMatches;
Mat matchesMask;
drawMatches(image1, keypoints1, // 1st image and its keypoints
image2, keypoints2, // 2nd image and its keypoints
matchesWithDist, // the matches
imageMatches, // the image produced
Scalar::all(-1), // color of the lines
Scalar(255, 255, 255) //color of the keypoints
);
namedWindow(matches_window, CV_WINDOW_AUTOSIZE);
imshow(matches_window, imageMatches);
imwrite("resultat.png", imageMatches);
/// Create a window and a trackbar
namedWindow(transparency_window, WINDOW_AUTOSIZE);
createTrackbar("Threshold: ", transparency_window, &thresh, max_thresh, interface);
interface(0, 0);
waitKey(0);
return (0);
}
示例3: main
int main( int argc, char* argv[])
{
// jmena souboru pro zpracovani
string imageName1;
string imageName2;
// zpracovani parametru prikazove radky
for( int i = 1; i < argc; i++){
if( string(argv[ i]) == "-i1" && i + 1 < argc){
imageName1 = argv[ ++i];
} else if( string(argv[ i]) == "-i2" && i + 1 < argc){
imageName2 = argv[ ++i];
} else if( string(argv[ i]) == "-h"){
cout << "Use: " << argv[0] << " -i1 imageName1 -i2 imageName2" << endl;
cout << "Merges two images into one. The images have to share some common area and have to be taken from one location." << endl;
return 0;
} else {
cerr << "Error: Unrecognized command line parameter \"" << argv[ i] << "\" use -h to get more information." << endl;
}
}
// kontrola zadani parametru
if( imageName1.empty() || imageName2.empty()){
cerr << "Error: Some mandatory command line options were not specified. Use -h for more information." << endl;
return -1;
}
// nacteni sedotonovych obrazku
Mat img1 = imread( imageName1, 0);
Mat img2 = imread( imageName2, 0);
if( img1.data == NULL || img2.data == NULL){
cerr << "Error: Failed to read input image files." << endl;
return -1;
}
// SURF detektor lokalnich oblasti
SurfFeatureDetector detector;
// samotna detekce lokalnich priznaku
vector< KeyPoint> keyPoints1, keyPoints2;
detector.detect( img1, keyPoints1);
detector.detect( img2, keyPoints2);
cout << keyPoints1.size() << " " << keyPoints2.size();
// extraktor SURF descriptoru
SurfDescriptorExtractor descriptorExtractor;
// samonty vypocet SURF descriptoru
Mat descriptors1, descriptors2;
descriptorExtractor.compute( img1, keyPoints1, descriptors1);
descriptorExtractor.compute( img2, keyPoints2, descriptors2);
// tento vektor je pouze pro ucely funkce hledajici korespondence
vector< Mat> descriptorVector2;
descriptorVector2.push_back( descriptors2);
// objekt, ktery dokaze snad pomerne efektivne vyhledavat podebne vektory v prostorech s vysokym poctem dimenzi
FlannBasedMatcher matcher;
// Pridani deskriptoru, mezi kterymi budeme pozdeji hledat nejblizsi sousedy
matcher.add( descriptorVector2);
// Vytvoreni vyhledavaci struktury nad vlozenymi descriptory
matcher.train();
// nalezeni nejpodobnejsich descriptoru (z obrazku 2) pro descriptors1 (oblasti z obrazku 1)
vector<cv::DMatch > matches;
matcher.match( descriptors1, matches);
// serazeni korespondenci od nejlepsi (ma nejmensi vzajemnou vzdalenost v prostoru descriptoru)
sort( matches.begin(), matches.end(), compareDMatch);
// pouzijeme jen 200 nejlepsich korespondenci
matches.resize( min( 200, (int) matches.size()));
// pripraveni korespondujicich dvojic
Mat img1Pos( matches.size(), 1, CV_32FC2);
Mat img2Pos( matches.size(), 1, CV_32FC2);
// naplneni matic pozicemi
for( int i = 0; i < (int)matches.size(); i++){
img1Pos.at< Vec2f>( i)[0] = keyPoints1[ matches[ i].queryIdx].pt.x;
img1Pos.at< Vec2f>( i)[1] = keyPoints1[ matches[ i].queryIdx].pt.y;
img2Pos.at< Vec2f>( i)[0] = keyPoints2[ matches[ i].trainIdx].pt.x;
img2Pos.at< Vec2f>( i)[1] = keyPoints2[ matches[ i].trainIdx].pt.y;
}
// Doplnte vypocet 3x3 matice homografie s vyuzitim algoritmu RANSAC. Pouzijte jdenu funkci knihovny OpenCV.
/** FILL DONE **/
Mat homography = findHomography( img1Pos, img2Pos, CV_RANSAC );
// vystupni buffer pro vykresleni spojenych obrazku
Mat outputBuffer( 1024, 1280, CV_8UC1);
// Vysledny spojeny obraz budeme chtit vykreslit do outputBuffer tak, aby se dotykal okraju, ale nepresahoval je.
// "Prilepime" obrazek 2 k prvnimu. Tuto "slepeninu" je potreba zvetsit a posunout, aby byla na pozadovane pozici.
// K tomuto potrebujeme zjistit maximalni a minimalni souradnice vykreslenych obrazu. U obrazu 1 je to jednoduche, minima a maxima se
// ziskaji primo z rozmeru obrazu. U obrazku 2 musime pomoci drive ziskane homografie promitnout do prostoru obrazku 1 jeho rohove body.
//.........这里部分代码省略.........
示例4: main
//--------------------------------------【main( )函数】-----------------------------------------
// 描述:控制台应用程序的入口函数,我们的程序从这里开始执行
//-----------------------------------------------------------------------------------------------
int main( )
{
//【0】改变console字体颜色
system("color 6F");
void ShowHelpText();
//【1】载入图像、显示并转化为灰度图
Mat trainImage = imread("1.jpg"), trainImage_gray;
imshow("原始图",trainImage);
cvtColor(trainImage, trainImage_gray, CV_BGR2GRAY);
//【2】检测Surf关键点、提取训练图像描述符
vector<KeyPoint> train_keyPoint;
Mat trainDescriptor;
SurfFeatureDetector featureDetector(80);
featureDetector.detect(trainImage_gray, train_keyPoint);
SurfDescriptorExtractor featureExtractor;
featureExtractor.compute(trainImage_gray, train_keyPoint, trainDescriptor);
//【3】创建基于FLANN的描述符匹配对象
FlannBasedMatcher matcher;
vector<Mat> train_desc_collection(1, trainDescriptor);
matcher.add(train_desc_collection);
matcher.train();
//【4】创建视频对象、定义帧率
VideoCapture cap(0);
unsigned int frameCount = 0;//帧数
//【5】不断循环,直到q键被按下
while(char(waitKey(1)) != 'q')
{
//<1>参数设置
int64 time0 = getTickCount();
Mat testImage, testImage_gray;
cap >> testImage;//采集视频到testImage中
if(testImage.empty())
continue;
//<2>转化图像到灰度
cvtColor(testImage, testImage_gray, CV_BGR2GRAY);
//<3>检测S关键点、提取测试图像描述符
vector<KeyPoint> test_keyPoint;
Mat testDescriptor;
featureDetector.detect(testImage_gray, test_keyPoint);
featureExtractor.compute(testImage_gray, test_keyPoint, testDescriptor);
//<4>匹配训练和测试描述符
vector<vector<DMatch> > matches;
matcher.knnMatch(testDescriptor, matches, 2);
// <5>根据劳氏算法(Lowe's algorithm),得到优秀的匹配点
vector<DMatch> goodMatches;
for(unsigned int i = 0; i < matches.size(); i++)
{
if(matches[i][0].distance < 0.6 * matches[i][1].distance)
goodMatches.push_back(matches[i][0]);
}
//<6>绘制匹配点并显示窗口
Mat dstImage;
drawMatches(testImage, test_keyPoint, trainImage, train_keyPoint, goodMatches, dstImage);
imshow("匹配窗口", dstImage);
//<7>输出帧率信息
cout << "当前帧率为:" << getTickFrequency() / (getTickCount() - time0) << endl;
}
return 0;
}