本文整理汇总了C++中FlannBasedMatcher::knnMatch方法的典型用法代码示例。如果您正苦于以下问题:C++ FlannBasedMatcher::knnMatch方法的具体用法?C++ FlannBasedMatcher::knnMatch怎么用?C++ FlannBasedMatcher::knnMatch使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类FlannBasedMatcher
的用法示例。
在下文中一共展示了FlannBasedMatcher::knnMatch方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: findMatch
bool findMatch(CvPoint &offset, FlannBasedMatcher matcher, SurfFeatureDetector detector, SurfDescriptorExtractor extractor, Mat des_object[])
{
bool noMatch = true;
Mat des_image, img_matches;
vector<KeyPoint> kp_image;
vector<vector<DMatch > > matches;
vector<DMatch > good_matches;
int iter = 0;
Mat image = imread("/home/pi/opencv/photo.jpg" , CV_LOAD_IMAGE_GRAYSCALE );
detector.detect( image, kp_image );
extractor.compute( image, kp_image, des_image );
while ( noMatch )
{
//printf("before kp and des detection 2\n");
matcher.knnMatch(des_object[iter], des_image, matches, 2);
for(int i = 0; i < min(des_image.rows-1,(int) matches.size()); i++) //THIS LOOP IS SENSITIVE TO SEGFAULTS
{
if((matches[i][0].distance < 0.6*(matches[i][1].distance)) && ((int) matches[i].size()<=2 && (int) matches[i].size()>0))
{
good_matches.push_back(matches[i][0]);
}
}
//printf("Number of matches: %d\n", good_matches.size());
if (good_matches.size() >= 10)
{
CvPoint center = cvPoint(0,0);
for ( int z = 0 ; z < good_matches.size() ; z++ )
{
int index = good_matches.at(z).trainIdx;
center.x += kp_image.at(index).pt.x;
center.y += kp_image.at(index).pt.y;
}
center.x = center.x/good_matches.size();
center.y = center.y/good_matches.size();
int radius = 5;
circle( image, center, radius, {0,0,255}, 3, 8, 0 );
namedWindow("test");
imshow("test", image);
imwrite("centerPoint.jpg", image);
waitKey(5000);
int offsetX = center.x - image.cols/2;
int offsetY = center.y - image.rows/2;
offset = cvPoint(offsetX, offsetY);
noMatch = false;
}
//printf("draw good matches\n");
//Show detected matches
if ( iter++ == 3 || !noMatch )
break;
good_matches.clear();
}
return noMatch;
}
示例2: tryFindImage_features
void tryFindImage_features(Mat input)
{
/* Сравниваем входящее изрображение с набором эталонов и выбираем наиболее подходящее */
resize(input, input, Size(SIGN_SIZE, SIGN_SIZE), 0, 0);
vector<KeyPoint> keyPoints;
_detector.detect(input, keyPoints);
Mat descriptors;
_extractor.compute(input, keyPoints, descriptors);
int max_value = 0, max_position = 0;
for(int i=0; i < 5; i++)
{
vector< vector<DMatch> > matches;
_matcher.knnMatch(descriptors, _train_descriptors[i], matches, 50);
int good_matches_count = 0;
for (size_t j = 0; j < matches.size(); ++j)
{
if (matches[j].size() < 2)
continue;
const DMatch &m1 = matches[j][0];
const DMatch &m2 = matches[j][1];
if(m1.distance <= 0.7 * m2.distance)
good_matches_count++;
}
if(good_matches_count > max_value)
{
max_value = good_matches_count;
max_position = i;
}
}
cout << STATUS_STR << "Detected sign: " << _train_sign_names[max_position] << endl;
}
示例3: matchDescriptors
//static void matchDescriptors( const Mat& queryDescriptors, const vector<Mat>& trainDescriptors,
//vector<DMatch>& matches, FlannBasedMatcher& descriptorMatcher )
static void matchDescriptors( const Mat& queryDescriptors, const vector<Mat>& trainDescriptors,
vector<DMatch>& matches, FlannBasedMatcher& descriptorMatcher, const vector<Mat>& trainImages, const vector<string>& trainImagesNames )
{
cout << "< Set train descriptors collection in the matcher and match query descriptors to them..." << endl;
descriptorMatcher.add( trainDescriptors );
descriptorMatcher.train();
descriptorMatcher.match( queryDescriptors, matches );
CV_Assert( queryDescriptors.rows == (int)matches.size() || matches.empty() );
cout << "Number of matches: " << matches.size() << endl;
cout << ">" << endl;
for( int i = 0; i < trainDescriptors.size(); i++){
std::vector< std::vector< DMatch> > matches2;
std::vector< DMatch > good_matches;
descriptorMatcher.knnMatch( queryDescriptors, trainDescriptors[i], matches2, 2);
CV_Assert( queryDescriptors.rows == (int)matches2.size() || matches2.empty() );
for (int j = 0; j < matches2.size(); ++j){
const float ratio = 0.8; // As in Lowe's paper; can be tuned
if (matches2[j][0].distance < ratio * matches2[j][1].distance){
good_matches.push_back(matches2[j][0]);
}
}
cout << "currentMatchSize : " << good_matches.size() << endl;
}
}
示例4: main
//--------------------------------------【main( )函数】-----------------------------------------
// 描述:控制台应用程序的入口函数,我们的程序从这里开始执行
//-----------------------------------------------------------------------------------------------
int main( )
{
//【0】改变console字体颜色
system("color 6F");
void ShowHelpText();
//【1】载入图像、显示并转化为灰度图
Mat trainImage = imread("1.jpg"), trainImage_gray;
imshow("原始图",trainImage);
cvtColor(trainImage, trainImage_gray, CV_BGR2GRAY);
//【2】检测Surf关键点、提取训练图像描述符
vector<KeyPoint> train_keyPoint;
Mat trainDescriptor;
SurfFeatureDetector featureDetector(80);
featureDetector.detect(trainImage_gray, train_keyPoint);
SurfDescriptorExtractor featureExtractor;
featureExtractor.compute(trainImage_gray, train_keyPoint, trainDescriptor);
//【3】创建基于FLANN的描述符匹配对象
FlannBasedMatcher matcher;
vector<Mat> train_desc_collection(1, trainDescriptor);
matcher.add(train_desc_collection);
matcher.train();
//【4】创建视频对象、定义帧率
VideoCapture cap(0);
unsigned int frameCount = 0;//帧数
//【5】不断循环,直到q键被按下
while(char(waitKey(1)) != 'q')
{
//<1>参数设置
int64 time0 = getTickCount();
Mat testImage, testImage_gray;
cap >> testImage;//采集视频到testImage中
if(testImage.empty())
continue;
//<2>转化图像到灰度
cvtColor(testImage, testImage_gray, CV_BGR2GRAY);
//<3>检测S关键点、提取测试图像描述符
vector<KeyPoint> test_keyPoint;
Mat testDescriptor;
featureDetector.detect(testImage_gray, test_keyPoint);
featureExtractor.compute(testImage_gray, test_keyPoint, testDescriptor);
//<4>匹配训练和测试描述符
vector<vector<DMatch> > matches;
matcher.knnMatch(testDescriptor, matches, 2);
// <5>根据劳氏算法(Lowe's algorithm),得到优秀的匹配点
vector<DMatch> goodMatches;
for(unsigned int i = 0; i < matches.size(); i++)
{
if(matches[i][0].distance < 0.6 * matches[i][1].distance)
goodMatches.push_back(matches[i][0]);
}
//<6>绘制匹配点并显示窗口
Mat dstImage;
drawMatches(testImage, test_keyPoint, trainImage, train_keyPoint, goodMatches, dstImage);
imshow("匹配窗口", dstImage);
//<7>输出帧率信息
cout << "当前帧率为:" << getTickFrequency() / (getTickCount() - time0) << endl;
}
return 0;
}
示例5: matchKeypoints
/** @function main */
int matchKeypoints( int argc, char** argv )
{
// if( argc != 3 )
// { readme(); return -1; }
cv::initModule_nonfree();
Mat img_1 = imread( argv[1], CV_LOAD_IMAGE_GRAYSCALE );
Mat img_2 = imread( argv[2], CV_LOAD_IMAGE_GRAYSCALE );
Codebook codebook;
//codebook.readInCSV(string(argv[3]));
if( !img_1.data || !img_2.data )
{ std::cout<< " --(!) Error reading images " << std::endl; return -1; }
//-- Step 1: Detect the keypoints using SURF Detector
int minHessian = 15000;
//SurfFeatureDetector detector( minHessian);
SURF* detector = new SURF(minHessian,1,4,true,true);
std::vector<KeyPoint> keypoints_1, keypoints_2;
assert(img_1.size[0]>0 && img_1.size[1]>0 && img_2.size[0]>0 && img_2.size[1]>0);
(*detector)( img_1, Mat(), keypoints_1 );
(*detector)( img_2, Mat(), keypoints_2 );
Mat img_keypoints_1; Mat img_keypoints_2;
// drawKeypoints( img_1, keypoints_1, img_keypoints_1, Scalar::all(-1), DrawMatchesFlags::DEFAULT );
// drawKeypoints( img_2, keypoints_2, img_keypoints_2, Scalar::all(-1), DrawMatchesFlags::DEFAULT );
cvtColor(img_1,img_keypoints_1,CV_GRAY2RGB);
for (KeyPoint k :keypoints_1)
{
// circle(img_keypoints_1,k.pt,k.size,Scalar(rand()%256,rand()%256,rand()%256));
// cout<<k.size<<endl;
Rect rec(k.pt.x-(k.size/2),k.pt.y-(k.size/2),k.size,k.size);
rectangle(img_keypoints_1,rec,Scalar(rand()%256,rand()%256,rand()%256));
}
cvtColor(img_2,img_keypoints_2,CV_GRAY2RGB);
for (KeyPoint k :keypoints_2)
{
// circle(img_keypoints_2,k.pt,k.size,Scalar(rand()%256,rand()%256,rand()%256));
Rect rec(k.pt.x-(k.size/2),k.pt.y-(k.size/2),k.size,k.size);
rectangle(img_keypoints_2,rec,Scalar(rand()%256,rand()%256,rand()%256));
}
//-- Show detected (drawn) keypoints
imshow("Keypoints 1", img_keypoints_1 );
imshow("Keypoints 2", img_keypoints_2 );
waitKey(0);
//-- Step 2: Calculate descriptors (feature vectors)
//SurfDescriptorExtractor extractor;
Mat descriptors_1, descriptors_2;
detector->compute( img_1, keypoints_1, descriptors_1 );
detector->compute( img_2, keypoints_2, descriptors_2 );
//-- Step 3: Matching descriptor vectors using FLANN matcher
FlannBasedMatcher matcher;
std::vector< std::vector< DMatch > > matches;
matcher.knnMatch( descriptors_1, descriptors_2, matches, 10 );
double max_dist = 0; double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for( int i = 0; i < matches.size(); i++ )
{
for (int j=0; j < matches[i].size(); j++)
{
double dist = matches[i][j].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
}
printf("-- Max dist : %f \n", max_dist );
printf("-- Min dist : %f \n", min_dist );
//-- Draw only "good" matches (i.e. whose distance is less than 2*min_dist,
//-- or a small arbitary value ( 0.02 ) in the event that min_dist is very
//-- small)
//-- PS.- radiusMatch can also be used here.
std::vector< DMatch > good_matches;
for( int i = 0; i < matches.size(); i++ )
{
for (int j=0; j < matches[i].size(); j++)
//if( matches[i][j].distance <= max(2*min_dist, 0.02) )
if( matches[i][j].distance <= max((max_dist-min_dist)/4.0 + min_dist, 0.02) )
{ good_matches.push_back( matches[i][j]); }
else
printf("discard(%d,%d)\n",i,j);
}
//-- Draw only "good" matches
//.........这里部分代码省略.........
示例6: main
int main(int argc, char** argv)
{
sourceORIG = imread( argv[1] );
sourceORIG2 = imread( argv[2] );
cvtColor( sourceORIG, sourceGRAY, CV_BGR2GRAY );
cvtColor( sourceORIG2, sourceGRAY2, CV_BGR2GRAY );
GaussianBlur( sourceGRAY, sourceGRAY_BLUR, Size(3,3), 3.0 );
GaussianBlur( sourceGRAY2, sourceGRAY_BLUR2, Size(7,7), 3.0 );
Canny( sourceGRAY_BLUR, cannyOut, lowThreshold, lowThreshold*ratio, kernel_size );
cv::dilate(cannyOut, cannyOut, cv::Mat(), cv::Point(-1,-1));
findContours( cannyOut, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) );
drawing = Mat::zeros( cannyOut.size(), CV_8UC3 );
vector<Point> approxShape;
for(size_t i = 0; i < contours.size(); i++) {
approxPolyDP(contours[i], approxShape, 5, true);
drawContours(drawing, contours, i, Scalar(255, 255, 255), CV_FILLED);
}
bitwise_not ( drawing, drawing );
vector<vector<Point> > contours_poly( contours.size() );
for( int i = 0; i < contours.size(); i++ )
{
double area = contourArea(contours[i]);
if(area > maxArea) {
maxArea = area;
approxPolyDP( Mat(contours[i]), contours_poly[i], 3, true );
boundRect = boundingRect( Mat(contours_poly[i]) );
}
}
drawing = drawing(boundRect);
mytemplate = drawing;
// http://stackoverflow.com/questions/24539273/how-to-find-out-how-good-a-match-is-in-opencv
// GaussianBlur( mytemplate, mytemplate, Size(7,7), 3.0 );
detector.detect( mytemplate, keypointsTMPL );
detector.detect( sourceGRAY_BLUR2, keypointsSCENE );
extractor.compute( mytemplate, keypointsTMPL, descriptorsTMPL );
extractor.compute( sourceGRAY_BLUR2, keypointsSCENE, descriptorsSCENE );
obj_corners[0] = (cvPoint(0,0));
obj_corners[1] = (cvPoint(mytemplate.cols,0));
obj_corners[2] = (cvPoint(mytemplate.cols,mytemplate.rows));
obj_corners[3] = (cvPoint(0, mytemplate.rows));
if(descriptorsSCENE.type()!=CV_32F) {
descriptorsSCENE.convertTo(descriptorsSCENE, CV_32F);
}
if(descriptorsTMPL.type()!=CV_32F) {
descriptorsTMPL.convertTo(descriptorsTMPL, CV_32F);
}
// if ( descriptorsTMPL.empty() )
// cvError(0,"MatchFinder","1st descriptor empty",__FILE__,__LINE__);
// if ( descriptorsSCENE.empty() )
// cvError(0,"MatchFinder","2nd descriptor empty",__FILE__,__LINE__);
matcher.knnMatch( descriptorsTMPL, descriptorsSCENE, matches, 2);
for(int i = 0; i < cv::min(sourceGRAY_BLUR2.rows-1,(int) matches.size()); i++) {
if((matches[i][0].distance < 0.6*(matches[i][1].distance)) && ((int) matches[i].size()<=2 && (int) matches[i].size()>0)) {
good_matches.push_back(matches[i][0]);
cout << "GOOD MATCHES" << endl;
}
cout << "MATCHES: " << matches[i].size() << endl;
}
// for( int i = 0; i < descriptorsTMPL.rows; i++ )
// {
// if( matches[i][0].distance < 100 )
// {
// good_matches.push_back( matches[i][0]);
// }
// }
drawMatches( sourceGRAY_BLUR2, keypointsSCENE, mytemplate, keypointsTMPL, good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
if (good_matches.size() >= 4) {
for( int i = 0; i < good_matches.size(); i++ ) {
//Get the keypoints from the good matches
obj.push_back( keypointsTMPL[ good_matches[i].queryIdx ].pt );
scene.push_back( keypointsSCENE[ good_matches[i].trainIdx ].pt );
}
H = findHomography( obj, scene, CV_RANSAC );
perspectiveTransform( obj_corners, scene_corners, H);
//Draw lines between the corners (the mapped object in the scene image )
line( img_matches, scene_corners[0], scene_corners[1], cvScalar(0, 255, 0), 4 );
//.........这里部分代码省略.........
示例7: main
//.........这里部分代码省略.........
char object_path[80];
strcpy_s(object_path, "..\\Images\\Photos\\");
strcat_s(object_path, fon.c_str());
strcat_s(object_path, "\\");
strcat_s(object_path, fn.c_str());
puts(object_path);
// Mat img_object = imread(object_path, IMREAD_GRAYSCALE);
Mat objectMat = imread(object_path, IMREAD_GRAYSCALE);
if (!objectMat.data || !sceneMat.data)
{
std::cout << " --(!) Error reading images " << std::endl; return -1;
}
//detector.detect(img_object, keypoints_object);
surf.detect(objectMat, keypointsO);
extractor.compute(objectMat, keypointsO, descriptors_object);
//Declering flann based matcher
FlannBasedMatcher matcher;
//BFMatcher for SURF algorithm can be either set to NORM_L1 or NORM_L2.
//But if you are using binary feature extractors like ORB, instead of NORM_L* you use "hamming"
// BFMatcher matcher(NORM_L1);
vector< vector< DMatch >> matches;
matcher.knnMatch(descriptors_object, descriptors_scene, matches, 2); // find the 2 nearest neighbors
vector< DMatch > good_matches;
good_matches.reserve(matches.size());
float nndrRatio = 0.7f;
for (size_t i = 0; i < matches.size(); ++i)
{
if (matches[i].size() < 2)
continue;
const DMatch &m1 = matches[i][0];
const DMatch &m2 = matches[i][1];
if (m1.distance <= nndrRatio * m2.distance)
good_matches.push_back(m1);
}
printf("-- Amount of good matches : %d \n", good_matches.size());
good_matches_per_folder[i - 2].push_back(good_matches.size());
if (good_matches.size() > most_amount_of_matches_overall){
most_amount_of_matches_overall = good_matches.size();
}
if (vis){
Mat img_matches;
drawMatches(objectMat, keypointsO, sceneMat, keypointsS,
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
imshow("Good Matches & Object detection", img_matches);
waitKey(0);
}
示例8: main
//.........这里部分代码省略.........
cl.Update(&cr);
if (framecount < 5)
{
framecount++;
continue;
}
cl.toIplImage(l);
cr.toIplImage(r);
Mat lg, rg, lge, rge;
cvtColor(Mat(l), lg, CV_RGB2GRAY);
cvtColor(Mat(r), rg, CV_RGB2GRAY);
equalizeHist(lg, lge);
equalizeHist(rg, rge);
line(lg, Point(w/2 - window + offset, 0), Point(w/2 - window + offset, h), Scalar(0, 255, 0), 2);
line(lg, Point(w/2 + window + offset, 0), Point(w/2 + window + offset, h), Scalar(0, 255, 0), 2);
line(rg, Point(w/2 - window - offset, 0), Point(w/2 - window - offset, h), Scalar(0, 255, 0), 2);
line(rg, Point(w/2 + window - offset, 0), Point(w/2 + window - offset, h), Scalar(0, 255, 0), 2);
Mat des_image, img_matches, H;
std::vector<KeyPoint> kp_image;
std::vector<vector<DMatch > > matches;
std::vector<DMatch > good_matches;
std::vector<Point2f> obj;
std::vector<Point2f> scene;
std::vector<Point2f> scene_corners(4);
detector.detect(lg, kp_image);
extractor.compute(lg, kp_image, des_image);
matcher.knnMatch(des_object, des_image, matches, 2);
for(int i = 0; i < matches.size(); i++) //THIS LOOP IS SENSITIVE TO SEGFAULTS
{
if(matches[i].size()==2 && (matches[i][0].distance < 0.8*(matches[i][1].distance)) )
{
good_matches.push_back(matches[i][0]);
obj.push_back(kp_object[matches[i][0].queryIdx].pt);
scene.push_back(kp_image[matches[i][0].trainIdx].pt);
}
}
if (good_matches.size() >= 35)
{
H = findHomography(obj, scene, CV_RANSAC);
perspectiveTransform(obj_corners, scene_corners, H);
RotatedRect box = minAreaRect(scene_corners);
Point2f rect_corners[4];
box.points(rect_corners);
Rect roi = box.boundingRect();
if (roi.area() > 3000)
{
detect = true; count = 0;
for (int i = 0; i < 4; i++)
{
line(lg, rect_corners[i], rect_corners[(i+1)%4], Scalar(255, 255, 255), 4);
}
line(lg, box.center, box.center, Scalar(255, 255, 255), 8);
if (box.center.x < w/2 - window + offset)
{
示例9: detector_Surf
JNIEXPORT bool JNICALL Java_com_example_mipatternrecognition_MainActivity_FindObject(
JNIEnv*, jobject, jlong addrGray, jlong addrRgba, jint TypeDetection) {
Mat& mGr = *(Mat*) addrGray;
Mat& mRgb = *(Mat*) addrRgba;
Mat& objeto = *(Mat*) objeto_long;
int minHessian = 500;
SurfFeatureDetector detector_Surf(minHessian);
SiftFeatureDetector detector_Sift(minHessian);
FastFeatureDetector detector_Fast(50);
OrbFeatureDetector detector_Orb(500, 1.2f, 8, 14, 0, 2, 0, 14);
MserFeatureDetector detector_Mser(5, 60, 14400, 0.25, 0.2, 200, 1.01, 0.003,
5);
int maxCorners = 1000;
double qualityLevel = 0.01;
double minDistance = 1.;
int blockSize = 3;
bool useHarrisDetector;
double k2 = 0.04;
useHarrisDetector = false;
GoodFeaturesToTrackDetector detector_Gftt(maxCorners, qualityLevel,
minDistance, blockSize, useHarrisDetector, k2);
useHarrisDetector = true;
GoodFeaturesToTrackDetector detector_Harris(maxCorners, qualityLevel,
minDistance, blockSize, useHarrisDetector, k2);
int maxSize = 45;
int responseThreshold = 30;
int lineThresholdProjected = 10;
int lineThresholdBinarized = 8;
int suppressNonmaxSize = 5;
StarFeatureDetector detector_Star(maxSize, responseThreshold,
lineThresholdProjected, lineThresholdBinarized, suppressNonmaxSize);
//http://stackoverflow.com/questions/14808429/classification-of-detectors-extractors-and-matchers
SurfDescriptorExtractor extractor_Surf;
SiftDescriptorExtractor extractor_Sift;
OrbDescriptorExtractor extractor_Orb;
FREAK extractor_Freak;
switch (TypeDetection) {
case SURF_DETECTION:
detector_Surf.detect(mGr, keyPoints_2);
extractor_Surf.compute(mGr, keyPoints_2, descriptors_2);
break;
case SIFT_DETECTION:
detector_Sift.detect(mGr, keyPoints_2);
extractor_Sift.compute(mGr, keyPoints_2, descriptors_2);
break;
case FAST_DETECTION:
detector_Fast.detect(mGr, keyPoints_2);
extractor_Freak.compute(mGr, keyPoints_2, descriptors_2);
break;
case ORB_DETECTION:
detector_Orb.detect(mGr, keyPoints_2);
extractor_Orb.compute(mGr, keyPoints_2, descriptors_2);
break;
case MSER_DETECTION:
detector_Mser.detect(mGr, keyPoints_2);
extractor_Surf.compute(mGr, keyPoints_2, descriptors_2);
break;
case GFTT_DETECTION:
detector_Gftt.detect(mGr, keyPoints_2);
extractor_Sift.compute(mGr, keyPoints_2, descriptors_2);
break;
case HARRIS_DETECTION:
detector_Harris.detect(mGr, keyPoints_2);
extractor_Orb.compute(mGr, keyPoints_2, descriptors_2);
break;
case STAR_DETECTION:
detector_Star.detect(mGr, keyPoints_2);
extractor_Orb.compute(mGr, keyPoints_2, descriptors_2);
break;
}
if (descriptors_2.rows == 0 || descriptors_1.rows == 0
|| keyPoints_2.size() == 0 || keyPoints_1.size() == 0) {
return false;
}
FlannBasedMatcher matcher;
vector<vector<DMatch> > matches;
matcher.knnMatch(descriptors_1, descriptors_2, matches, 2);
//-- Draw only "good" matches (i.e. whose distance is less than 2*min_dist,
//-- or a small arbitary value ( 0.02 ) in the event that min_dist is very
//-- small)
//-- PS.- radiusMatch can also be used here.
vector<DMatch> good_matches;
for (int i = 0; i < min(descriptors_1.rows - 1, (int) matches.size()); i++) //THIS LOOP IS SENSITIVE TO SEGFAULTS
{
if ((matches[i][0].distance < 0.6 * (matches[i][1].distance))
&& ((int) matches[i].size() <= 2 && (int) matches[i].size() > 0)) {
good_matches.push_back(matches[i][0]);
}
//.........这里部分代码省略.........
示例10: main
int main()
{
Mat object = imread( "photo.jpg", CV_LOAD_IMAGE_GRAYSCALE );
if( !object.data )
{
std::cout<< "Error reading object " << std::endl;
return -1;
}
//Detect the keypoints using SURF Detector
int minHessian = 500;
SurfFeatureDetector detector( minHessian );
std::vector<KeyPoint> kp_object;
detector.detect( object, kp_object );
//Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat des_object;
extractor.compute( object, kp_object, des_object );
FlannBasedMatcher matcher;
VideoCapture cap(0);
namedWindow("Good Matches");
std::vector<Point2f> obj_corners(4);
//Get the corners from the object
obj_corners[0] = cvPoint(0,0);
obj_corners[1] = cvPoint( object.cols, 0 );
obj_corners[2] = cvPoint( object.cols, object.rows );
obj_corners[3] = cvPoint( 0, object.rows );
char key = 'a';
int framecount = 0;
while (key != 27)
{
Mat frame;
cap >> frame;
if (framecount < 5)
{
framecount++;
continue;
}
Mat des_image, img_matches;
std::vector<KeyPoint> kp_image;
std::vector<vector<DMatch > > matches;
std::vector<DMatch > good_matches;
std::vector<Point2f> obj;
std::vector<Point2f> scene;
std::vector<Point2f> scene_corners(4);
Mat H;
Mat image;
cvtColor(frame, image, CV_RGB2GRAY);
detector.detect( image, kp_image );
extractor.compute( image, kp_image, des_image );
matcher.knnMatch(des_object, des_image, matches, 2);
for(int i = 0; i < min(des_image.rows-1,(int) matches.size()); i++) //THIS LOOP IS SENSITIVE TO SEGFAULTS
{
if((matches[i][0].distance < 0.6*(matches[i][1].distance)) && ((int) matches[i].size()<=2 && (int) matches[i].size()>0))
{
good_matches.push_back(matches[i][0]);
}
}
//Draw only "good" matches
drawMatches( object, kp_object, image, kp_image, good_matches, img_matches, Scalar::all(-1), Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
if (good_matches.size() >= 4)
{
for( int i = 0; i < good_matches.size(); i++ )
{
//Get the keypoints from the good matches
obj.push_back( kp_object[ good_matches[i].queryIdx ].pt );
scene.push_back( kp_image[ good_matches[i].trainIdx ].pt );
}
H = findHomography( obj, scene, CV_RANSAC );
perspectiveTransform( obj_corners, scene_corners, H);
//Draw lines between the corners (the mapped object in the scene image )
line( img_matches, scene_corners[0] + Point2f( object.cols, 0), scene_corners[1] + Point2f( object.cols, 0), Scalar(0, 255, 0), 4 );
line( img_matches, scene_corners[1] + Point2f( object.cols, 0), scene_corners[2] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[2] + Point2f( object.cols, 0), scene_corners[3] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[3] + Point2f( object.cols, 0), scene_corners[0] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
}
//Show detected matches
imshow( "Good Matches", img_matches );
//.........这里部分代码省略.........
示例11: match
//Takes Mat object and finds its keypoints, then compares against the keypoints in segmentedCapture
//If there are 4 or more matching keypoints, then it reports a match
bool match(Mat object, IplImage* segmentedCapture, int i)
{
printf("Size check of segmented capture: height: %d, width: %d\n", segmentedCapture->height, segmentedCapture->width);
printf("attempting to read object now\n");
bool matchFound = false;
if( !object.data )
{
std::cout<< "Error reading object " << std::endl;
return -1;
}
int minHessian = 500;
SurfFeatureDetector detector(minHessian);
//Detect the keypoints using SURF Detector
std::vector<KeyPoint> kp_object;
detector.detect( object, kp_object );
//Calculate descriptors (feature vectors)
Mat des_object;
SurfDescriptorExtractor extractor;
extractor.compute( object, kp_object, des_object );
printf("Number of descriptors found for initial object: %d\n", (int)kp_object.size());
FlannBasedMatcher matcher;
char *windowName = new char[20];
sprintf(windowName, "Match %d", i);
destroyWindow(windowName);
namedWindow(windowName);
std::vector<Point2f> obj_corners(4);
obj_corners[0] = cvPoint(0,0);
obj_corners[1] = cvPoint( object.cols, 0 );
obj_corners[2] = cvPoint( object.cols, object.rows );
obj_corners[3] = cvPoint( 0, object.rows );
Mat des_image, img_matches;
std::vector<KeyPoint> kp_image;
std::vector<vector<DMatch > > matches;
std::vector<DMatch > good_matches;
std::vector<Point2f> obj;
std::vector<Point2f> scene;
std::vector<Point2f> scene_corners(4);
Mat H;
Mat image;
cvResetImageROI(segmentedCapture);
printf("creating image to store it in");
// IplImage *image2 = cvCreateImage(cvSize(segmentedCapture->width, segmentedCapture->height), IPL_DEPTH_8U,1);
printf("about to convert to gray\n");
// cvCvtColor(segmentedCapture, image2, CV_BGR2GRAY);
//
// printf("converted to gray\n");
Mat matCon(segmentedCapture);
image = segmentedCapture;
// printf("before detection\n");
detector.detect( image, kp_image );
// printf("after detection, number of descriptors for detected object: %d\n", kp_image.size());
extractor.compute( image, kp_image, des_image );
// printf("after computation of extraction\n");
if(des_image.empty()){
printf("key points from capture frame are empty\n");
} else {
matcher.knnMatch(des_object, des_image, matches, 2);
// matcher.match(des_object, des_image, matches);
printf("after knnmatch: matches.size() is %d\n", matches.size());
for(int j = 0; j < min(des_image.rows-1,(int) matches.size()); j++) //THIS LOOP IS SENSITIVE TO SEGFAULTS
{
if((matches[j][0].distance < 0.5*(matches[j][1].distance)) && ((int) matches[j].size()<=2 && (int) matches[j].size()>0))
{
good_matches.push_back(matches[j][0]);
// printf("Outer loop is on: %d, Number of matches is: %d\n", i, (int)good_matches.size());
}
}
//Draw only "good" matches
drawMatches( object, kp_object, image, kp_image, good_matches, img_matches, Scalar::all(-1), Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
if (good_matches.size() >= 4)
{
matchFound = true;
printf("Found %d matched points for detectedObject %d", good_matches.size(), i );
for( int i = 0; i < good_matches.size(); i++ )
{
//Get the keypoints from the good matches
obj.push_back( kp_object[ good_matches[i].queryIdx ].pt );
scene.push_back( kp_image[ good_matches[i].trainIdx ].pt );
}
H = findHomography( obj, scene, CV_RANSAC );
perspectiveTransform( obj_corners, scene_corners, H);
//.........这里部分代码省略.........
示例12: detectLogo
bool detectLogo(Mat person, Mat desObject, Mat object, vector<KeyPoint> kpObject, vector<Point2f> objCorners)
{
// scale up the image
resize(person, person, Size(), 4, 4, CV_INTER_CUBIC);
// sharpen the image
Mat image;
GaussianBlur(person, image, cv::Size(0, 0), 3);
addWeighted(person, 1.75, image, -0.75, 0, image);
GaussianBlur(person, image, cv::Size(0, 0), 3);
addWeighted(person, 1.75, image, -0.75, 0, image);
// detect key points in the input frame
vector<KeyPoint> kpFrame;
detector.detect(person, kpFrame);
// extract feature descriptors for the detected key points
Mat desFrame;
extractor.compute(person, kpFrame, desFrame);
if(desFrame.empty() or desObject.empty())
return false;
// match the key points with object
FlannBasedMatcher matcher;
vector< vector <DMatch> > matches;
matcher.knnMatch(desObject, desFrame, matches, 2);
// compute the good matches among the matched key points
vector<DMatch> goodMatches;
for(int i=0; i<desObject.rows; i++)
{
if(matches[i][0].distance < 0.6 * matches[i][1].distance)
{
goodMatches.push_back(matches[i][0]);
}
}
if(goodMatches.size() >= 8)
{
vector<Point2f> obj;
vector<Point2f> scene;
for( int i = 0; i < goodMatches.size(); i++ )
{
// get the keypoints from the good matches
obj.push_back( kpObject[ goodMatches[i].queryIdx ].pt );
scene.push_back( kpFrame[ goodMatches[i].trainIdx ].pt );
}
Mat H;
H = findHomography(obj, scene);
vector<Point2f> sceneCorners(4);
perspectiveTransform( objCorners, sceneCorners, H);
// draw lines between the corners (the mapped object in the scene image )
line(person, sceneCorners[0], sceneCorners[1], Scalar(255, 255, 255), 4);
line(person, sceneCorners[1], sceneCorners[2], Scalar(255, 255, 255), 4);
line(person, sceneCorners[2], sceneCorners[3], Scalar(255, 255, 255), 4);
line(person, sceneCorners[3], sceneCorners[0], Scalar(255, 255, 255), 4);
imshow("Person", person);
cout << "[MESSAGE] LOGO DETECTED" << endl;
return true;
}
return false;
}
示例13: main
int main(int argc, char * argv[])
{
if(argc < 2)
{
std::cout << "Use: tracker <target_image>" << std::endl;
return -1;
}
Mat mTarget = imread( argv[1], CV_LOAD_IMAGE_GRAYSCALE );
if( !mTarget.data )
{
std::cout<< "Error reading target image." << std::endl;
return -1;
}
//Detect the keypoints using SURF Detector
int minHessian = 500;
SurfFeatureDetector detector( minHessian );
std::vector<KeyPoint> kpTarget;
detector.detect( mTarget, kpTarget );
//Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat des_object;
extractor.compute( mTarget, kpTarget, des_object );
FlannBasedMatcher matcher;
//VideoCapture cap("http://192.168.1.200/videostream.cgi?user=admin&pwd=31415LAS&resolution=32&dummy=.mjpg");
VideoCapture cap("http://nidq.no-ip.org/videostream.cgi?user=admin&pwd=31415LAS&resolution=32&dummy=.mjpg");
namedWindow("Capture");
std::vector<Point2f> tgt_corners(4);
//Get the corners from the object
tgt_corners[0] = cvPoint(0,0);
tgt_corners[1] = cvPoint( mTarget.cols, 0 );
tgt_corners[2] = cvPoint( mTarget.cols, mTarget.rows );
tgt_corners[3] = cvPoint( 0, mTarget.rows );
char key = 'a';
int framecount = 0;
while (key != 27)
{
Mat frame;
cap >> frame;
if (framecount < 5)
{
framecount++;
continue;
}
Mat des_image, img_matches;
std::vector<KeyPoint> kpImage;
std::vector<vector<DMatch > > matches;
std::vector<DMatch > good_matches;
std::vector<Point2f> obj;
std::vector<Point2f> scene;
std::vector<Point2f> scene_corners(4);
Mat H;
Mat image;
cvtColor(frame, image, CV_RGB2GRAY);
detector.detect( image, kpImage );
extractor.compute( image, kpImage, des_image );
matcher.knnMatch(des_object, des_image, matches, 2);
for(int i = 0; i < min(des_image.rows-1,(int) matches.size()); i++) //THIS LOOP IS SENSITIVE TO SEGFAULTS
{
if((matches[i][0].distance < 0.6*(matches[i][1].distance)) && ((int) matches[i].size()<=2 && (int) matches[i].size()>0))
{
good_matches.push_back(matches[i][0]);
}
}
//Draw only "good" matches
drawMatches( mTarget, kpTarget, image, kpImage, good_matches, img_matches, Scalar::all(-1), Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
if (good_matches.size() >= 4)
{
for( int i = 0; i < good_matches.size(); i++ )
{
//Get the keypoints from the good matches
obj.push_back( kpTarget[ good_matches[i].queryIdx ].pt );
scene.push_back( kpImage[ good_matches[i].trainIdx ].pt );
}
H = findHomography( obj, scene, CV_RANSAC );
perspectiveTransform( tgt_corners, scene_corners, H);
//Draw lines between the corners (the mapped object in the scene image )
//.........这里部分代码省略.........
示例14: imageCb
/*
* @brief コールバック(動画処理の基本呼び出し部分)
*/
void imageCb(const sensor_msgs::ImageConstPtr &msg)
{
cv_bridge::CvImagePtr cap;
Mat in_img;
Mat grayframe;
Mat img_matches;
Mat cap_descriptors;
Mat H;
Mat lastimage;
std::vector<KeyPoint> cap_keypoint;
std::vector<std::vector<DMatch> > matches;
std::vector<DMatch> good_matches;
std::vector<Point2f> obj;
std::vector<Point2f> scene;
std::vector<Point2f> scene_corners(4);
//Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("BruteForce");
FlannBasedMatcher matcher;
try{
cap = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::BGR8);
in_img = cap->image;
}
catch(cv_bridge::Exception& e){
ROS_ERROR("cv_brige exception: %s", e.what());
return;
}
if(in_img.empty()) {
std::cerr<<"No capture frame img"<<std::endl;
}
else{
cvtColor(cap->image, grayframe, CV_BGR2GRAY);
computeSURF(grayframe, cap_keypoint, cap_descriptors);
//drawAKAZEKeypoint(&cap->image, cap_keypoint, Scalar(0, 0, 255));
obj_corners[0] = cvPoint(0,0);
obj_corners[1] = cvPoint( in_img.cols, 0 );
obj_corners[2] = cvPoint( in_img.cols, in_img.rows );
obj_corners[3] = cvPoint( 0, in_img.rows );
std::cout<<imagenum<<std::endl;
matcher.knnMatch(cap_descriptors, descriptors_0, matches, 2);
for(int i = 0; i < min(descriptors_0.rows-1,(int) matches.size()); i++){
if((matches[i][0].distance < 0.6*(matches[i][1].distance)) && ((int) matches[i].size()<=2 && (int) matches[i].size()>0)){
good_matches.push_back(matches[i][0]);
}
}
drawMatches(grayframe, cap_keypoint, object_img, keypoint_0, good_matches, img_matches);
if(good_matches.size() >= 4){
for(int i = 0; i < good_matches.size(); i++){
obj.push_back(keypoint_0[good_matches[i].queryIdx].pt);
scene.push_back(cap_keypoint[good_matches[i].trainIdx].pt);
}
}
H = findHomography(obj, scene, CV_RANSAC);
(obj_corners, scene_corners, H);
line( img_matches, scene_corners[0] + Point2f( object_img.cols, 0), scene_corners[1] + Point2f( object_img.cols, 0), Scalar(0, 255, 0), 4 );
line( img_matches, scene_corners[1] + Point2f( object_img.cols, 0), scene_corners[2] + Point2f( object_img.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[2] + Point2f( object_img.cols, 0), scene_corners[3] + Point2f( object_img.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[3] + Point2f( object_img.cols, 0), scene_corners[0] + Point2f( object_img.cols, 0), Scalar( 0, 255, 0), 4 );
#ifdef MULTI
for(int i = 0; i < IMAGENUM; i++){
matcher->match(cap_descriptors, trainDescCollection[i], matches);
drawMatches(cap->image, cap_keypoint, trainImgCollection[i], trainPointCollection[i], matches, img_matches);
}
#endif
}
imshow(OPENCV_WINDOW, img_matches);
waitKey(3);
image_pub_.publish(cap->toImageMsg());
}
示例15: identifyObject
void identifyObject( Mat& frame, Mat& object, const string& objectName ) {
//Detect the keypoints using SURF Detector
int minHessian = 500;
SurfFeatureDetector detector( minHessian );
std::vector<KeyPoint> kp_object;
detector.detect( object, kp_object );
//Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat des_object;
extractor.compute( object, kp_object, des_object );
FlannBasedMatcher matcher;
//Get the corners from the object
std::vector<Point2f> obj_corners(4);
obj_corners[0] = cvPoint(0,0);
obj_corners[1] = cvPoint( object.cols, 0 );
obj_corners[2] = cvPoint( object.cols, object.rows );
obj_corners[3] = cvPoint( 0, object.rows );
// Match descriptors to frame
Mat des_image, img_matches;
std::vector<KeyPoint> kp_image;
std::vector<vector<DMatch > > matches;
std::vector<DMatch > good_matches;
std::vector<Point2f> obj;
std::vector<Point2f> scene;
std::vector<Point2f> scene_corners(4);
Mat H;
Mat image;
cvtColor(frame, image, CV_RGB2GRAY);
detector.detect( image, kp_image );
extractor.compute( image, kp_image, des_image );
matcher.knnMatch(des_object, des_image, matches, 2);
for(int i = 0; i < min(des_image.rows-1,(int) matches.size()); i++) //THIS LOOP IS SENSITIVE TO SEGFAULTS
{
if((matches[i][0].distance < 0.6*(matches[i][1].distance)) && ((int) matches[i].size()<=2 && (int) matches[i].size()>0))
{
good_matches.push_back(matches[i][0]);
}
}
//Draw only "good" matches
drawMatches( object, kp_object, image, kp_image, good_matches, img_matches, Scalar::all(-1), Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
if (good_matches.size() >= 4)
{
for( int i = 0; i < good_matches.size(); i++ )
{
//Get the keypoints from the good matches
obj.push_back( kp_object[ good_matches[i].queryIdx ].pt );
scene.push_back( kp_image[ good_matches[i].trainIdx ].pt );
}
H = findHomography( obj, scene, CV_RANSAC );
perspectiveTransform( obj_corners, scene_corners, H);
//Draw lines between the corners (the mapped object in the scene image )
line( frame, scene_corners[0], scene_corners[1], Scalar(0, 255, 0), 4 );
line( frame, scene_corners[1], scene_corners[2], Scalar( 0, 255, 0), 4 );
line( frame, scene_corners[2], scene_corners[3], Scalar( 0, 255, 0), 4 );
line( frame, scene_corners[3], scene_corners[0], Scalar( 0, 255, 0), 4 );
}
//Show detected matches
Point2f textPoint = cvPoint( (scene_corners[0].x+scene_corners[1].x+scene_corners[2].x+scene_corners[3].x )/4.0 , (scene_corners[0].y+scene_corners[1].y+scene_corners[2].y+scene_corners[3].y )/4.0 );
putText( frame, objectName, textPoint, FONT_HERSHEY_COMPLEX_SMALL, 1.0, cvScalar(0,250,150), 1, CV_AA );
}