本文整理汇总了C++中FlannBasedMatcher::match方法的典型用法代码示例。如果您正苦于以下问题:C++ FlannBasedMatcher::match方法的具体用法?C++ FlannBasedMatcher::match怎么用?C++ FlannBasedMatcher::match使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类FlannBasedMatcher
的用法示例。
在下文中一共展示了FlannBasedMatcher::match方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1:
vector<DMatch> GraphicEnd::match( Mat desp1, Mat desp2 )
{
cout<<"GraphicEnd::match two desp"<<endl;
FlannBasedMatcher matcher;
vector<DMatch> matches;
if (desp1.empty() || desp2.empty())
{
return matches;
}
double max_dist = 0, min_dist = 100;
matcher.match( desp1, desp2, matches);
for (int i=0; i<desp1.rows; i++)
{
double dist = matches[ i ].distance;
if (dist < min_dist)
min_dist = dist;
if (dist > max_dist)
max_dist = dist;
}
//return matches;
vector<DMatch> good_matches;
for (size_t i=0; i<matches.size(); i++)
{
if (matches[ i ].distance <= max(4*min_dist, _match_min_dist))
{
good_matches.push_back(matches[ i ]);
}
}
return good_matches;
}
示例2: main
void main()
{
//Give the names of the images to be registered
const char* imRef_name = "834-r1.png";
const char* imNxt_name = "835-r1.png";
int hessianThresh = 100, ransacThresh = 3;;
Mat mask, H12;
// Read images
Mat img1 = imread(imRef_name, CV_LOAD_IMAGE_GRAYSCALE);
Mat img2 = imread(imNxt_name, CV_LOAD_IMAGE_GRAYSCALE);
Mat img2Out; // Registered image2 wrt image1
// Check to see if images exist
if(img1.empty() || img2.empty())
{
printf("Can’t read one of the images\n");
exit(0);
}
// detecting keypoints
printf("Finding keypoints ... ");
SURF ImgSurf(hessianThresh);
vector<KeyPoint> keypoints1, keypoints2;
ImgSurf(img1, mask, keypoints1);
ImgSurf(img2, mask, keypoints2);
// computing descriptors
SurfDescriptorExtractor extractor;
Mat descriptors1, descriptors2;
extractor(img1,mask,keypoints1,descriptors1,TRUE);
extractor(img2, mask, keypoints2, descriptors2, TRUE);
// Match the points
printf("\nMatching keypoints ... ");
FlannBasedMatcher matcher;
std::vector< DMatch > matches;
matcher.match( descriptors1, descriptors2, matches );
// Extract indices of matched points
vector<int> queryIdxs( matches.size() ), trainIdxs( matches.size() );
for( size_t i = 0; i < matches.size(); i++ )
{
queryIdxs[i] = matches[i].queryIdx;
trainIdxs[i] = matches[i].trainIdx;
}
// Extract matched points from indices
vector<Point2f> points1; KeyPoint::convert(keypoints1, points1, queryIdxs);
vector<Point2f> points2; KeyPoint::convert(keypoints2, points2, trainIdxs);
// Use RANSAC to find the homography
printf("\nComputing homography ... ");
H12 = findHomography( Mat(points2), Mat(points1), CV_RANSAC, ransacThresh );
// Warp the second image according to the homography
warpPerspective(img2, img2Out, H12, cvSize(img2.cols, img2.rows), INTER_LINEAR);
// Write result to file
imwrite("im2reg.png",img2Out);
printf("\nDone!!!.... ");
}
示例3: match
int match(vector<DMatch> &match, frame &f1, frame &f2)
{
vector<DMatch> matches;
FlannBasedMatcher matcher;
matcher.match(f1.desp, f2.desp, matches);
static reader pd("../config/config.ini");
double min_distance = 9999;
double match_threshold = atof(pd.get("match_threshold").c_str());
for (int i = 0; i < matches.size(); ++i)
{
if (matches[i].distance < min_distance)
{
min_distance = matches[i].distance;
}
}
for (int i = 0; i < matches.size(); ++i)
{
if (matches[i].distance < (min_distance * match_threshold))
{
match.push_back(matches[i]);
}
}
return match.size();
}
示例4: detectSiftMatchWithOpenCV
void detectSiftMatchWithOpenCV(const char* img1_path, const char* img2_path, MatrixXf &match) {
Mat img1 = imread(img1_path);
Mat img2 = imread(img2_path);
SiftFeatureDetector detector;
SiftDescriptorExtractor extractor;
vector<KeyPoint> key1;
vector<KeyPoint> key2;
Mat desc1, desc2;
detector.detect(img1, key1);
detector.detect(img2, key2);
extractor.compute(img1, key1, desc1);
extractor.compute(img2, key2, desc2);
FlannBasedMatcher matcher;
vector<DMatch> matches;
matcher.match(desc1, desc2, matches);
match.resize(matches.size(), 6);
cout << "match count: " << matches.size() << endl;
for (int i = 0; i < matches.size(); i++) {
match(i, 0) = key1[matches[i].queryIdx].pt.x;
match(i, 1) = key1[matches[i].queryIdx].pt.y;
match(i, 2) = 1;
match(i, 3) = key2[matches[i].trainIdx].pt.x;
match(i, 4) = key2[matches[i].trainIdx].pt.y;
match(i, 5) = 1;
}
}
示例5: getFeatures
int getFeatures(Mat &object, Mat &frame, Mat &homography, vector<KeyPoint> &keypoints_object,
vector<KeyPoint> &keypoints_scene, vector<DMatch> &good_matches) {
Ptr<SIFT> detector = SIFT::create();
// Detect features and compute descriptors
Mat descriptors_object, descriptors_scene;
detector->detectAndCompute(object, noArray(), keypoints_object, descriptors_object);
detector->detectAndCompute(frame, noArray(), keypoints_scene, descriptors_scene);
// Match descriptors using FLANN
FlannBasedMatcher matcher;
vector<DMatch> matches;
matcher.match(descriptors_object, descriptors_scene, matches);
// Check if too few matches are found
if (matches.size() <= 4) {
cout << "Error: too few matches were found" << endl;
return -1;
}
// Find minimum and maximum distances between descriptors
double max_dist = 0;
double min_dist = 100;
for (int i = 0; i < descriptors_object.rows; i++) {
double dist = matches[i].distance;
if (dist < min_dist) min_dist = dist;
if (dist > max_dist) max_dist = dist;
}
// If there are sufficient matches, filter to find higher-quality subset
int minMatches = 8;
for (int i = 0; i < descriptors_object.rows; i++) {
if (matches[i].distance < 3*min_dist && matches.size() > minMatches) {
good_matches.push_back(matches[i]);
}
}
// If there are too few good matches, use all matches
if (good_matches.size() <= minMatches) {
for (int i = 0; i < matches.size(); i++) {
if (i < good_matches.size()) {
good_matches[i] = matches[i];
} else {
good_matches.push_back(matches[i]);
}
}
}
vector<Point2f> obj, scene;
for (int i = 0; i < good_matches.size(); i++) {
// Determine keypoints from the matches
obj.push_back(keypoints_object[ good_matches[i].queryIdx ].pt);
scene.push_back(keypoints_scene[ good_matches[i].trainIdx ].pt);
}
// Transform pixel coordinates between images
homography = findHomography(obj, scene, CV_RANSAC);
return good_matches.size();
}
示例6: Match_an_Obj
bool RelicScn::Match_an_Obj(RelicObj obj)
{
string message;
FlannBasedMatcher matcher;
vector<DMatch> matches;
matcher.match(obj.descriptors, this->descriptors, matches);
vector<DMatch> good_matches = Get_Good_Matches(matches);
//-- Localize the object
std::vector<Point2f> obj_points;
std::vector<Point2f> scn_points;
for (size_t i = 0; i < good_matches.size(); i++)
{
//-- Get the keypoints from the good matches
obj_points.push_back(obj.keypoints[good_matches[i].queryIdx].pt);
scn_points.push_back(this->keypoints[good_matches[i].trainIdx].pt);
}
Mat H = cv::findHomography(obj_points, scn_points, RANSAC);
std::vector<Point2f> obj_corners(4);
obj_corners[0] = cvPoint(0, 0);
obj_corners[1] = cvPoint(obj.img_width-1, 0);
obj_corners[2] = cvPoint(obj.img_width-1, obj.img_height-1);
obj_corners[3] = cvPoint(0, obj.img_height-1);
std::vector<Point2f> possible_obj_corners(4);
perspectiveTransform(obj_corners, possible_obj_corners, H);
BOOST_LOG_TRIVIAL(info) << "原始目标物体大小(像素): " << contourArea(obj_corners);
BOOST_LOG_TRIVIAL(info) << "检测到的物体大小(像素): " << contourArea(possible_obj_corners);
this->corners = possible_obj_corners;
double possible_target_area = contourArea(possible_obj_corners);
double whole_scene_area = this->img_gray.rows*this->img_gray.cols;
BOOST_LOG_TRIVIAL(info) << "环境图像大小(像素): " << whole_scene_area;
double ratio = possible_target_area / whole_scene_area;
BOOST_LOG_TRIVIAL(info) << "检测到的目标占全图比例: " << ratio;
if (ratio>0.03 && ratio<1)
{
for (int i;i < possible_obj_corners.size();i++)
{
if (possible_obj_corners[i].x < 0 || possible_obj_corners[i].y < 0)
{
BOOST_LOG_TRIVIAL(info) << "未能检测到目标物体!";
return false;
}
}
BOOST_LOG_TRIVIAL(info) << "成功检测到目标物体!";
return true;
}
else
{
BOOST_LOG_TRIVIAL(info) << "未能检测到目标物体!";
return false;
}
}
示例7: compare
int compare(Mat img_1, Mat img_2) {
//-- Step 1: Detect the keypoints using SURF Detector
int minHessian = 400;
SurfFeatureDetector detector( minHessian );
std::vector<KeyPoint> keypoints_1, keypoints_2;
detector.detect( img_1, keypoints_1 );
detector.detect( img_2, keypoints_2 );
//-- Step 2: Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat descriptors_1, descriptors_2;
extractor.compute( img_1, keypoints_1, descriptors_1 );
extractor.compute( img_2, keypoints_2, descriptors_2 );
//-- Step 3: Matching descriptor vectors using FLANN matcher
FlannBasedMatcher matcher;
std::vector< DMatch > matches;
matcher.match( descriptors_1, descriptors_2, matches );
double max_dist = 0; double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for( int i = 0; i < descriptors_1.rows; i++ ) {
double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
//-- Draw only "good" matches (i.e. whose distance is less than 2*min_dist,
//-- or a small arbitary value ( 0.02 ) in the event that min_dist is very
//-- small)
//-- PS.- radiusMatch can also be used here.
std::vector< DMatch > good_matches;
for( int i = 0; i < descriptors_1.rows; i++ ) {
if( matches[i].distance <= max(2 * min_dist, 0.02) ) {
good_matches.push_back( matches[i]);
}
}
// Mat img_matches;
// drawMatches( img_1, keypoints_1, img_2, keypoints_2,
// good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
// vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
// //-- Show detected matches
// imshow( "Good Matches", img_matches );
waitKey(0);
return good_matches.size();
}
示例8: computeSurfGoodMatches
int PlayedCard::computeSurfGoodMatches(vector<KeyPoint> keypoints_1, vector<KeyPoint> keypoints_2, Mat descriptors_1, Mat descriptors_2) {
//-- Step 3: Matching descriptor vectors using FLANN matcher
FlannBasedMatcher matcher;
vector< DMatch > matches;
matcher.match(descriptors_1, descriptors_2, matches);
filterMatchesByAbsoluteValue(matches, 0.125);
filterMatchesRANSAC(matches, keypoints_1, keypoints_2);
return (int)matches.size();
}
示例9: main
int main(int argc, char* argv[])
{
VideoCapture camera;
camera.set(CV_CAP_PROP_FRAME_WIDTH, WIDTH);
camera.set(CV_CAP_PROP_FRAME_HEIGHT, HEIGHT);
camera.open(0);
//checkOpenCL();
Ptr<FeatureDetector> detector = FeatureDetector::create("STAR");
//Ptr<DescriptorExtractor> extractor = DescriptorExtractor::create("FREAK");
BriefDescriptorExtractor extractor;
FlannBasedMatcher matcher;
std::vector< DMatch > matches;
Mat descriptor[2];
int k = 0;
camera >> image;
detector->detect(image, keypoint[1]);
extractor.compute(image, keypoint[1], descriptor[1]);
for (bool loop = true; loop; )
{
switch (waitKey(10))
{
case 'q':
loop = false;
break;
}
camera >> image;
if (image.empty())
break;
// detect features
detector->detect(image, keypoint[k % 2]);
extractor.compute(image, keypoint[k % 2], descriptor[k % 2]);
try {
matcher.match(descriptor[0], descriptor[1], matches);
}
catch (Exception ex)
{
printf("%s", ex.msg);
}
printf("%d\n", keypoint[k % 2].size());
for (int i = 0; i < keypoint[k % 2].size(); i++)
{
Point2f pt = keypoint[k % 2][i].pt;
circle(image, Point(pt.x, pt.y), 3, Scalar(0, 0, 255));
}
k++;
imshow("image", image);
}
return 0;
}
示例10: testFeatures
/**
* @brief featuredetector::testFeatures
* FLANN based matching.
* Algorithm is taken from
* http://docs.opencv.org/doc/tutorials/features2d/feature_flann_matcher/feature_flann_matcher.html
* @return Matched points image.
*/
Mat featuredetector::testFeatures(){
cv::Mat im1, im2;
//Grayscale the images.
if(_image1.channels() == 3)
cv::cvtColor(_image1,im1, CV_BGR2GRAY);
else _image1.copyTo(im1);
if(_image2.channels() == 3)
cv::cvtColor(_image2,im2, CV_BGR2GRAY);
else _image2.copyTo(im2);
int minH = 100; // (should be around ~100)
Ptr<xfeatures2d::SURF> detector = xfeatures2d::SURF::create(minH);
detector->setHessianThreshold(minH);
std::vector<KeyPoint> keypoints1, keypoints2;
Mat descriptors1, descriptors2;
detector->detectAndCompute( im1, Mat(), keypoints1, descriptors1 );
detector->detectAndCompute( im2, Mat(), keypoints2, descriptors2 );
FlannBasedMatcher matcher;
std::vector<DMatch> matches;
matcher.match( descriptors1, descriptors2, matches );
double maxDist = 0; double minDist = 80;
for( int i = 0; i < descriptors1.rows; i++ )
{ double dist = matches[i].distance;
if( dist < minDist ) minDist = dist;
if( dist > maxDist ) maxDist = dist;
}
std::vector< DMatch > goodMatches;
for( int i = 0; i < descriptors1.rows; i++ )
{ if( matches[i].distance <= max(2*minDist, 0.02) )
{ goodMatches.push_back( matches[i]); }
}
Mat matchMat;
drawMatches( im1, keypoints1, im2, keypoints2,
goodMatches, matchMat, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
imshow( "Matches", matchMat );
return matchMat;
}
示例11: flann
/*
* @function main
* @brief Main function
*/
int flann( int argc, char** argv )
{
if( argc != 3 )
{ readme(); return -1; }
Mat img_1 = imread( argv[1], IMREAD_GRAYSCALE );
Mat img_2 = imread( argv[2], IMREAD_GRAYSCALE );
if( !img_1.data || !img_2.data )
{ std::cout<< " --(!) Error reading images " << std::endl; return -1; }
//-- Step 1: Detect the keypoints using SURF Detector, compute the descriptors
int minHessian = 400;
Ptr<SURF> detector = SURF::create();
detector->setHessianThreshold(minHessian);
std::vector<KeyPoint> keypoints_1, keypoints_2;
Mat descriptors_1, descriptors_2;
detector->detectAndCompute( img_1, Mat(), keypoints_1, descriptors_1 );
detector->detectAndCompute( img_2, Mat(), keypoints_2, descriptors_2 );
//-- Step 2: Matching descriptor vectors using FLANN matcher
FlannBasedMatcher matcher;
std::vector< DMatch > matches;
matcher.match( descriptors_1, descriptors_2, matches );
double max_dist = 0; double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for( int i = 0; i < descriptors_1.rows; i++ )
{ double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
printf("-- Max dist : %f \n", max_dist );
printf("-- Min dist : %f \n", min_dist );
//-- Draw only "good" matches (i.e. whose distance is less than 2*min_dist,
//-- or a small arbitary value ( 0.02 ) in the event that min_dist is very
//-- small)
//-- PS.- radiusMatch can also be used here.
std::vector< DMatch > good_matches;
for( int i = 0; i < descriptors_1.rows; i++ )
{ if( matches[i].distance <= max(2*min_dist, 0.02) )
{ good_matches.push_back( matches[i]); }
}
//-- Draw only "good" matches
Mat img_matches;
drawMatches( img_1, keypoints_1, img_2, keypoints_2,
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
//-- Show detected matches
imshow( "Good Matches", img_matches );
for( int i = 0; i < (int)good_matches.size(); i++ )
{ printf( "-- Good Match [%d] Keypoint 1: %d -- Keypoint 2: %d \n", i, good_matches[i].queryIdx, good_matches[i].trainIdx ); }
waitKey(0);
return 0;
}
示例12: Mat
vector<DMatch> GraphicEnd::match( vector<PLANE>& p1, vector<PLANE>& p2 )
{
cout<<"GraphicEnd::match two planes"<<endl;
FlannBasedMatcher matcher;
vector<DMatch> matches;
cv::Mat des1(p1.size(), 4, CV_32F), des2(p2.size(), 4, CV_32F);
for (size_t i=0; i<p1.size(); i++)
{
pcl::ModelCoefficients c = p1[i].coff;
float m[1][4] = { c.values[0], c.values[1], c.values[2], c.values[3] };
Mat mat = Mat(1,4, CV_32F, m);
mat.row(0).copyTo( des1.row(i) );
}
for (size_t i=0; i<p2.size(); i++)
{
pcl::ModelCoefficients c = p2[i].coff;
float m[1][4] = { c.values[0], c.values[1], c.values[2], c.values[3] };
Mat mat = Mat(1,4, CV_32F, m);
mat.row(0).copyTo( des2.row(i) );
}
matcher.match( des1, des2, matches);
return matches;
double max_dist = 0, min_dist = 100;
for (int i=0; i<des1.rows; i++)
{
double dist = matches[ i ].distance;
if (dist < min_dist)
min_dist = dist;
if (dist > max_dist)
max_dist = dist;
}
vector<DMatch> good_matches;
for (size_t i=0; i<matches.size(); i++)
{
if (matches[ i ].distance <= 3*min_dist)
{
good_matches.push_back(matches[ i ]);
}
}
return good_matches;
}
示例13: computeMatching
//Realiza el Matching entre puntos
void computeMatching(Mat& img1, Mat& img2,vector<KeyPoint>& keypoints1,vector<KeyPoint>& keypoints2, vector<DMatch>& matches ){
// computing descriptors
#if _SURF_
SurfDescriptorExtractor extractor;
#else if _SIFT_
SiftDescriptorExtractor extractor;
#endif
Mat descriptors1, descriptors2;
extractor.compute(img1, keypoints1, descriptors1);
extractor.compute(img2, keypoints2, descriptors2);
FlannBasedMatcher matcher;
matcher.match(descriptors1,descriptors2,matches);
double max_dist = 0; double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for( int i = 0; i < descriptors1.rows; i++ ){
double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
printf("-- Max dist : %f \n", max_dist );
printf("-- Min dist : %f \n", min_dist );
//-- Draw only "good" matches (i.e. whose distance is less than 2*min_dist )
//-- PS.- radiusMatch can also be used here.
std::vector< DMatch > good_matches;
for( int i = 0; i < descriptors1.rows; i++ ){
if( matches[i].distance < 2*min_dist ){
good_matches.push_back( matches[i]);
}
}
//-- Draw only "good" matches
Mat img_matches;
drawMatches( img1, keypoints1, img2, keypoints2,
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
//-- Show detected matches
imshow( "Good Matches", img_matches );
}
示例14: findTopFiveFLANNMatches
void findTopFiveFLANNMatches(Mat hqDesc, vector<Mat>* keyframeDesc, vector<vector< DMatch >>* matchVec, vector<int>* matchIndices){
FlannBasedMatcher matcher;
int index = 0;
//Calculate matches between high quality image and
for (vector<Mat>::iterator it = keyframeDesc->begin(); it != keyframeDesc->end(); ++it){
vector< DMatch > matches;
//calculate initial matches
Mat kfDesc = *it;
matcher.match(hqDesc, kfDesc, matches);
//determine good matches
double max_dist = 0; double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for (int i = 0; i < hqDesc.rows; i++)
{
double dist = matches[i].distance;
if (dist < min_dist) min_dist = dist;
if (dist > max_dist) max_dist = dist;
}
std::vector< DMatch > good_matches;
for (int i = 0; i < hqDesc.rows; i++)
{
if (matches[i].distance <= max(2 * min_dist, 0.02))
{
good_matches.push_back(matches[i]);
}
}
matchVec->push_back(good_matches);
index++;
}
//pickTopFive
pickTopFive(matchVec, matchIndices);
index = 0;
}
示例15: main
int main(int argc, char** argv)
{
std::vector<KeyPoint> keypoints_1;
vector<vector<KeyPoint>>keypoints = vector<vector<KeyPoint>>();
//////////////////////////////////////////////////////////////////////////
Ptr<FeatureDetector> detector = xfeatures2d::SIFT::create();
Ptr<DescriptorExtractor> extractor = xfeatures2d::SIFT::create();
//Ptr<DescriptorMatcher> matcher = new FlannBasedMatcher();
FlannBasedMatcher* matcher = new FlannBasedMatcher();
//////////////////////////////////////////////////////////////////////////
Mat image, descriptor, homography;
Mat posters[7], descriptors[7];
vector<DMatch> matches = vector<DMatch>();
//detects and extracts the local features
openImage("poster_test.jpg", image);
detector->detect(image, keypoints_1);
extractor->compute(image, keypoints_1, descriptor);
for (int i = 0;i < 7;i++)
{
vector<KeyPoint> keypoint;
openImage("poster" + std::to_string(i + 1) + ".jpg", posters[i]);
detector->detect(posters[i], keypoint);
extractor->compute(posters[i], keypoint, descriptors[i]);
keypoints.push_back(keypoint);
}
matcher->match(descriptor, descriptors[5], matches);
filterMatchesByAbsoluteValue(matches, 90);
homography = filterMatchesRANSAC(matches, keypoints_1, keypoints[5]);
showResult(image, keypoints_1, posters[5], keypoints[5], matches, homography);
return 0;
}