本文整理汇总了C++中SurfDescriptorExtractor类的典型用法代码示例。如果您正苦于以下问题:C++ SurfDescriptorExtractor类的具体用法?C++ SurfDescriptorExtractor怎么用?C++ SurfDescriptorExtractor使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了SurfDescriptorExtractor类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: identifyObject
void identifyObject( Mat& frame, Mat& object, const string& objectName ) {
//Detect the keypoints using SURF Detector
int minHessian = 500;
SurfFeatureDetector detector( minHessian );
std::vector<KeyPoint> kp_object;
detector.detect( object, kp_object );
//Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat des_object;
extractor.compute( object, kp_object, des_object );
FlannBasedMatcher matcher;
//Get the corners from the object
std::vector<Point2f> obj_corners(4);
obj_corners[0] = cvPoint(0,0);
obj_corners[1] = cvPoint( object.cols, 0 );
obj_corners[2] = cvPoint( object.cols, object.rows );
obj_corners[3] = cvPoint( 0, object.rows );
// Match descriptors to frame
Mat des_image, img_matches;
std::vector<KeyPoint> kp_image;
std::vector<vector<DMatch > > matches;
std::vector<DMatch > good_matches;
std::vector<Point2f> obj;
std::vector<Point2f> scene;
std::vector<Point2f> scene_corners(4);
Mat H;
Mat image;
cvtColor(frame, image, CV_RGB2GRAY);
detector.detect( image, kp_image );
extractor.compute( image, kp_image, des_image );
matcher.knnMatch(des_object, des_image, matches, 2);
for(int i = 0; i < min(des_image.rows-1,(int) matches.size()); i++) //THIS LOOP IS SENSITIVE TO SEGFAULTS
{
if((matches[i][0].distance < 0.6*(matches[i][1].distance)) && ((int) matches[i].size()<=2 && (int) matches[i].size()>0))
{
good_matches.push_back(matches[i][0]);
}
}
//Draw only "good" matches
drawMatches( object, kp_object, image, kp_image, good_matches, img_matches, Scalar::all(-1), Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
if (good_matches.size() >= 4)
{
for( int i = 0; i < good_matches.size(); i++ )
{
//Get the keypoints from the good matches
obj.push_back( kp_object[ good_matches[i].queryIdx ].pt );
scene.push_back( kp_image[ good_matches[i].trainIdx ].pt );
}
H = findHomography( obj, scene, CV_RANSAC );
perspectiveTransform( obj_corners, scene_corners, H);
//Draw lines between the corners (the mapped object in the scene image )
line( frame, scene_corners[0], scene_corners[1], Scalar(0, 255, 0), 4 );
line( frame, scene_corners[1], scene_corners[2], Scalar( 0, 255, 0), 4 );
line( frame, scene_corners[2], scene_corners[3], Scalar( 0, 255, 0), 4 );
line( frame, scene_corners[3], scene_corners[0], Scalar( 0, 255, 0), 4 );
}
//Show detected matches
Point2f textPoint = cvPoint( (scene_corners[0].x+scene_corners[1].x+scene_corners[2].x+scene_corners[3].x )/4.0 , (scene_corners[0].y+scene_corners[1].y+scene_corners[2].y+scene_corners[3].y )/4.0 );
putText( frame, objectName, textPoint, FONT_HERSHEY_COMPLEX_SMALL, 1.0, cvScalar(0,250,150), 1, CV_AA );
}
示例2: main
int main( int argc, char** argv )
{
VideoCapture cap(0);
if(!cap.isOpened()) // check camera
{
string message = "Camera is Broken";
cout << message << endl;
return -1;
}
Mat frame_1, frame_2, outpt, outpt_kp;
std::vector<KeyPoint> keypoints_object_1, keypoints_object_2;
int minHessian = 2000;
SurfFeatureDetector detector( minHessian );
namedWindow("frame",1);
//take a snapshot from camera, as first image
for(;;)
{
//show every frame with keypoints
cap >> frame_1; // get a new frame from camera
detector.detect( frame_1, keypoints_object_1 );
drawKeypoints(frame_1, keypoints_object_1, outpt_kp, Scalar( 0, 255, 255 ), DrawMatchesFlags::DEFAULT );
imshow("frame", outpt_kp);
if(waitKey(30) >= 0)
{
//save snapshot
imwrite( "./test_img.jpg", frame_1);
break;
}
}
//and then load it as reference image
Mat reference_image;
reference_image = imread( "./test_img.jpg", 1 );
detector.detect( reference_image, keypoints_object_1 );
// detect keypoints offset on each frame
for(;;)
{
//detect keypoints
cap >> frame_2; // get a new frame from camera
detector.detect( frame_2, keypoints_object_2 );
SurfDescriptorExtractor extractor;
cv::Mat descriptors1, descriptors2;
//kompute keypoints deskriptors
extractor.compute(reference_image, keypoints_object_1, descriptors1);
extractor.compute(frame_2, keypoints_object_2, descriptors2);
//match keypoints between images
FlannBasedMatcher matcher;
vector< DMatch > matches;
matcher.match(descriptors1, descriptors2, matches);
double max_dist = 0; double min_dist = 100;
// Quick calculation of max and min distances between keypoints
for( int i = 0; i < descriptors1.rows; i++ )
{
double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
printf("-- Max dist : %f \n", max_dist );
printf("-- Min dist : %f \n", min_dist );
// Draw only "good" matches (i.e. whose distance is less than some_value*min_dist )
std::vector< DMatch > good_matches;
for( int i = 0; i < descriptors1.rows; i++ )
{
if( matches[i].distance < 2*min_dist )
{
good_matches.push_back( matches[i]);
}
}
//show keypoints offset (uncomment one of these bellow)
//drawMatches(reference_image, keypoints_object_1, frame_2, keypoints_object_2, good_matches, outpt, Scalar( 0, 255, 255 ), Scalar( 255, 0, 255 ), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
drawVectors(reference_image, keypoints_object_1, frame_2, keypoints_object_2, good_matches, outpt, Scalar( 0, 255, 255 ), Scalar( 255, 0, 255 ), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
imshow("frame", outpt);
if(waitKey(30) >= 0)
{
imwrite( "./test_img1.jpg", frame_2);
break;
}
}
}
示例3: main
/**
* @function main
* @brief Main function
*/
int main( int argc, char** argv )
{
if( argc != 3 )
{ readme(); return -1; }
Mat img_object = imread( argv[1], CV_LOAD_IMAGE_GRAYSCALE );
Mat img_scene = imread( argv[2], CV_LOAD_IMAGE_GRAYSCALE );
if( !img_object.data || !img_scene.data )
{ std::cout<< " --(!) Error reading images " << std::endl; return -1; }
//-- Step 1: Detect the keypoints using SURF Detector
int minHessian = 400;
SurfFeatureDetector detector( minHessian );
std::vector<KeyPoint> keypoints_object, keypoints_scene;
detector.detect( img_object, keypoints_object );
detector.detect( img_scene, keypoints_scene );
//-- Step 2: Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat descriptors_object, descriptors_scene;
extractor.compute( img_object, keypoints_object, descriptors_object );
extractor.compute( img_scene, keypoints_scene, descriptors_scene );
//-- Step 3: Matching descriptor vectors using FLANN matcher
FlannBasedMatcher matcher;
std::vector< DMatch > matches;
matcher.match( descriptors_object, descriptors_scene, matches );
double max_dist = 0; double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for( int i = 0; i < descriptors_object.rows; i++ )
{ double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
printf("-- Max dist : %f \n", max_dist );
printf("-- Min dist : %f \n", min_dist );
//-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
std::vector< DMatch > good_matches;
for( int i = 0; i < descriptors_object.rows; i++ )
{ if( matches[i].distance < 3*min_dist )
{ good_matches.push_back( matches[i]); }
}
Mat img_matches;
drawMatches( img_object, keypoints_object, img_scene, keypoints_scene,
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
//-- Localize the object from img_1 in img_2
std::vector<Point2f> obj;
std::vector<Point2f> scene;
for( size_t i = 0; i < good_matches.size(); i++ )
{
//-- Get the keypoints from the good matches
obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
}
Mat H = findHomography( obj, scene, CV_RANSAC );
//-- Get the corners from the image_1 ( the object to be "detected" )
std::vector<Point2f> obj_corners(4);
obj_corners[0] = cvPoint(0,0); obj_corners[1] = cvPoint( img_object.cols, 0 );
obj_corners[2] = cvPoint( img_object.cols, img_object.rows ); obj_corners[3] = cvPoint( 0, img_object.rows );
std::vector<Point2f> scene_corners(4);
perspectiveTransform( obj_corners, scene_corners, H);
//-- Draw lines between the corners (the mapped object in the scene - image_2 )
Point2f offset( (float)img_object.cols, 0);
line( img_matches, scene_corners[0] + offset, scene_corners[1] + offset, Scalar(0, 255, 0), 4 );
line( img_matches, scene_corners[1] + offset, scene_corners[2] + offset, Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[2] + offset, scene_corners[3] + offset, Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[3] + offset, scene_corners[0] + offset, Scalar( 0, 255, 0), 4 );
//-- Show detected matches
imshow( "Good Matches & Object detection", img_matches );
waitKey(0);
return 0;
}
示例4: main
int main()
{
Mat object = imread( "photo.jpg", CV_LOAD_IMAGE_GRAYSCALE );
if( !object.data )
{
std::cout<< "Error reading object " << std::endl;
return -1;
}
//Detect the keypoints using SURF Detector
int minHessian = 500;
SurfFeatureDetector detector( minHessian );
std::vector<KeyPoint> kp_object;
detector.detect( object, kp_object );
//Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat des_object;
extractor.compute( object, kp_object, des_object );
FlannBasedMatcher matcher;
VideoCapture cap(0);
namedWindow("Good Matches");
std::vector<Point2f> obj_corners(4);
//Get the corners from the object
obj_corners[0] = cvPoint(0,0);
obj_corners[1] = cvPoint( object.cols, 0 );
obj_corners[2] = cvPoint( object.cols, object.rows );
obj_corners[3] = cvPoint( 0, object.rows );
char key = 'a';
int framecount = 0;
while (key != 27)
{
Mat frame;
cap >> frame;
if (framecount < 5)
{
framecount++;
continue;
}
Mat des_image, img_matches;
std::vector<KeyPoint> kp_image;
std::vector<vector<DMatch > > matches;
std::vector<DMatch > good_matches;
std::vector<Point2f> obj;
std::vector<Point2f> scene;
std::vector<Point2f> scene_corners(4);
Mat H;
Mat image;
cvtColor(frame, image, CV_RGB2GRAY);
detector.detect( image, kp_image );
extractor.compute( image, kp_image, des_image );
matcher.knnMatch(des_object, des_image, matches, 2);
for(int i = 0; i < min(des_image.rows-1,(int) matches.size()); i++) //THIS LOOP IS SENSITIVE TO SEGFAULTS
{
if((matches[i][0].distance < 0.6*(matches[i][1].distance)) && ((int) matches[i].size()<=2 && (int) matches[i].size()>0))
{
good_matches.push_back(matches[i][0]);
}
}
//Draw only "good" matches
drawMatches( object, kp_object, image, kp_image, good_matches, img_matches, Scalar::all(-1), Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
if (good_matches.size() >= 4)
{
for( int i = 0; i < good_matches.size(); i++ )
{
//Get the keypoints from the good matches
obj.push_back( kp_object[ good_matches[i].queryIdx ].pt );
scene.push_back( kp_image[ good_matches[i].trainIdx ].pt );
}
H = findHomography( obj, scene, CV_RANSAC );
perspectiveTransform( obj_corners, scene_corners, H);
//Draw lines between the corners (the mapped object in the scene image )
line( img_matches, scene_corners[0] + Point2f( object.cols, 0), scene_corners[1] + Point2f( object.cols, 0), Scalar(0, 255, 0), 4 );
line( img_matches, scene_corners[1] + Point2f( object.cols, 0), scene_corners[2] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[2] + Point2f( object.cols, 0), scene_corners[3] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[3] + Point2f( object.cols, 0), scene_corners[0] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
}
//Show detected matches
imshow( "Good Matches", img_matches );
//.........这里部分代码省略.........
示例5: main
/** @function main */
int main( int argc, char** argv )
{
if( argc != 3 )
{ readme(); return -1; }
// Load the images
Mat image1= imread( argv[2] );
Mat image2= imread( argv[1] );
Mat gray_image1;
Mat gray_image2;
// Convert to Grayscale
cvtColor( image1, gray_image1, CV_RGB2GRAY );
cvtColor( image2, gray_image2, CV_RGB2GRAY );
imshow("first image",image2);
imshow("second image",image1);
if( !gray_image1.data || !gray_image2.data )
{ std::cout<< " --(!) Error reading images " << std::endl; return -1; }
//-- Step 1: Detect the keypoints using SURF Detector
int minHessian = 400;
SurfFeatureDetector detector( minHessian );
std::vector< KeyPoint > keypoints_object, keypoints_scene;
detector.detect( gray_image1, keypoints_object );
detector.detect( gray_image2, keypoints_scene );
//-- Step 2: Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat descriptors_object, descriptors_scene;
extractor.compute( gray_image1, keypoints_object, descriptors_object );
extractor.compute( gray_image2, keypoints_scene, descriptors_scene );
//-- Step 3: Matching descriptor vectors using FLANN matcher
FlannBasedMatcher matcher;
std::vector< DMatch > matches;
matcher.match( descriptors_object, descriptors_scene, matches );
double max_dist = 0; double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for( int i = 0; i < descriptors_object.rows; i++ )
{ double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
printf("-- Max dist : %f \n", max_dist );
printf("-- Min dist : %f \n", min_dist );
//-- Use only "good" matches (i.e. whose distance is less than 3*min_dist )
std::vector< DMatch > good_matches;
for( int i = 0; i < descriptors_object.rows; i++ )
{ if( matches[i].distance < 3*min_dist )
{ good_matches.push_back( matches[i]); }
}
std::vector< Point2f > obj;
std::vector< Point2f > scene;
for( int i = 0; i < good_matches.size(); i++ )
{
//-- Get the keypoints from the good matches
obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
}
// Find the Homography Matrix
Mat H = findHomography( obj, scene, CV_RANSAC );
// Use the Homography Matrix to warp the images
cv::Mat result;
warpPerspective(image1,result,H,cv::Size(image1.cols+image2.cols,image1.rows));
cv::Mat half(result,cv::Rect(0,0,image2.cols,image2.rows));
image2.copyTo(half);
imshow( "Result", result );
waitKey(0);
return 0;
}
示例6: main
int main(int argc, char *argv[])
{
// Timer start.
clock_t tmStart = clock();
// Argument variable.
// ==================
bool bUseColorFeature = false;
int iColorFeature = 0;
bool bUseShapeFeature = false;
int iShapeFeature = 0;
// ==================
// Checking correctness of all arguments.
// ======================================
if(argc == 1)
{
showHowToUse(argv[0]);
}
if(argc > 3)
{
cout << "You gave exceeded number of arguments.\n";
exit(1);
}
for(int i=1;i<argc;i++)
{
string strTemp = argv[i];
if(strTemp.at(0) != '/')
showHowToUse(argv[0]);
if(strTemp.at(1) == 'c' && strTemp.at(2) == '=')
{
bUseColorFeature = true;
iColorFeature = atoi(strTemp.substr(3).c_str());
}
if(strTemp.at(1) == 's' && strTemp.at(2) == '=')
{
bUseShapeFeature = true;
iShapeFeature = atoi(strTemp.substr(3).c_str());
}
}
// ======================================
string strDirFlowerDB = "flowerPicDB\\"; // The folder of flower images.
string strDirDescriptionDB = "descriptionDB\\"; // The folder of features output.
string strFNameFlowerDB = strDirFlowerDB + "files.txt"; // List of flowers' name.
string strFNameFlower; // Indexer.
string strFNameDesc; // Indexer.
string strFNameDescTemp;
ifstream inFile;
int count = 0; // Number of image files.
string::size_type idx; // A position of '.'.
SurfFeatureDetector surf(2500.);
SurfDescriptorExtractor surfDesc;
HistogramHSV hsvObj;
// Find number of photos.
// ======================
inFile.open(strFNameFlowerDB.c_str());
if(!inFile.is_open()) cout << "Can't open file " << strFNameFlowerDB << endl;
while(inFile >> strFNameFlower)
{
count++;
}
inFile.close();
inFile.clear(); // This must be call clear() before it will be made second call
// otherwise the command will finished immediately.
cout << "Number of flower photo = " << count << endl << endl;
// Extract features of all photos in the DB.
// =========================================
// Array of image in DB.
Mat *imgFlowerDB = new Mat[count];
// Array of shape feature.
vecKey *keypointDB;
Mat *descriptorDB;
if(bUseShapeFeature)
{
keypointDB = new vecKey[count];
descriptorDB = new Mat[count];
}
// Array of colour feature.
MatND *hueHistogram;
MatND *saturationHistogram;
MatND *valueHistogram;
if(bUseColorFeature)
{
hueHistogram = new MatND[count];
saturationHistogram = new MatND[count];
valueHistogram = new MatND[count];
}
// File pointer of output file.
FileStorage outDescFileSurf,outDescFileH,outDescFileS,outDescFileV;
//.........这里部分代码省略.........
示例7: main
int main(int argc, char *argv[])
{
if( argc != 3 )
{
_tprintf(TEXT("Usage: %s [target_file]\n"), argv[0]);
return 0;
}
Mat img1 = imread(argv[1], 1);
Mat img2 = imread(argv[2], 1);
if(img1.empty() || img2.empty())
{
printf("Can't read one of the images\n");
return -1;
}
//MATCHING PHASE// //MATCHING PHASE// //MATCHING PHASE//
// detecting keypoints
SurfFeatureDetector detector(400);
vector<KeyPoint> keypoints1, keypoints2;
detector.detect(img1, keypoints1);
detector.detect(img2, keypoints2);
// computing descriptors
SurfDescriptorExtractor extractor;
Mat descriptors1, descriptors2;
extractor.compute(img1, keypoints1, descriptors1);
extractor.compute(img2, keypoints2, descriptors2);
// matching descriptors
FlannBasedMatcher matcher;
vector<DMatch> matches;
matcher.match(descriptors1, descriptors2, matches);
float min = 10000;
for(int i=0; i<matches.size(); i++)
{
if(matches[i].distance < min)
{
min = matches[i].distance;
}
}
vector<DMatch> good_matches;
for(int i=0; i<matches.size(); i++)
{
if(matches[i].distance <= 2*min)
{
good_matches.push_back(matches[i]);
}
}
// drawing the results
namedWindow("matches", 0);
resizeWindow("matches",1280,360);
Mat img_matches;
drawMatches(img1, keypoints1, img2, keypoints2, good_matches, img_matches);
imshow("matches", img_matches);
imwrite("matches.jpg", img_matches);
waitKey(0);
//END OF MATCHING PHASE// //END OF MATCHING PHASE// //END OF MATCHING PHASE//
//HOMOGRAPHY CALCULATION// //HOMOGRAPHY CALCULATION// //HOMOGRAPHY CALCULATION//
vector<Point2f> pts_img1,pts_img2;
for(int i=0; i < matches.size(); i++)
{
pts_img1.push_back(keypoints1[matches[i].queryIdx].pt);
pts_img2.push_back(keypoints2[matches[i].trainIdx].pt);
}
Mat homography = findHomography(pts_img1,pts_img2,CV_RANSAC,3);
cout << "H = " << endl << " " << homography << endl << endl;
waitKey(0);
printf("homography\n\t.rows : %d\n\t.cols : %d",homography.rows,homography.cols);
Mat img1_transformed;
warpPerspective(img1,img1_transformed,homography,img1.size(),INTER_LINEAR,0,0);
double alpha = 0.5;
double beta = 1.0 - alpha;
Mat shifted;
Mat compared;
addWeighted(img1,alpha,img1_transformed,beta,0.0,shifted);
addWeighted(img2,alpha,img1_transformed,beta,0.0,compared);
namedWindow("transformed",0);
resizeWindow("transformed",960,540);
imshow("transformed",img1_transformed);
namedWindow("shifted",0);
resizeWindow("shifted",960,540);
//.........这里部分代码省略.........
示例8: main
int main(int argc, char** argv)
{
if (argc != 3){
readme(); return -1;
}
Mat img_object = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE);
Mat img_scene = imread(argv[2], CV_LOAD_IMAGE_GRAYSCALE);
// -- getting sample(Because of canny)
sample_obj = cvLoadImage(argv[1], 1);
sample_scene = cvLoadImage(argv[2], 1);
if (!img_object.data || !img_scene.data){
std::cout << " --(!) Error reading images " << std::endl; return -1;
}
//-- Step 1: Detect the keypoints using SURF Detector****************************************************
int minHessian = 20; //Hessian critical value basic 200
SurfFeatureDetector detector(minHessian);
std::vector<KeyPoint> keypoints_object, keypoints_scene; // keypoints(cv::KeyPoint)
detector.detect(img_object, keypoints_object); // 1
detector.detect(img_scene, keypoints_scene); // 2
//-- Step 2: Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat descriptors_object, descriptors_scene;
extractor.compute(img_object, keypoints_object, descriptors_object);
extractor.compute(img_scene, keypoints_scene, descriptors_scene);
//-- Step 3: Matching descriptor vectors using FLANN matcher // matching keypoints
FlannBasedMatcher matcher;
std::vector< DMatch > matches;
matcher.match(descriptors_object, descriptors_scene, matches);
double max_dist = 0; double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for (int i = 0; i < descriptors_object.rows; i++){
double dist = matches[i].distance;
if (dist < min_dist) min_dist = dist;
if (dist > max_dist) max_dist = dist;
}
printf("-- Max dist : %f \n", max_dist);
printf("-- Min dist : %f \n", min_dist);
//-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist ) // good matches (the real keypoints)*************************
std::vector< DMatch > good_matches;
for (int i = 0; i < descriptors_object.rows; i++){
if (matches[i].distance < 3 * min_dist){
good_matches.push_back(matches[i]);
}
}
Mat img_matches;
drawMatches(img_object, keypoints_object, img_scene, keypoints_scene,
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
//-- Localize the object
std::vector<Point2f> obj; // the keypoint's coordinate <x, y value>
std::vector<Point2f> scene;
for (int i = 0; i < good_matches.size(); i++){
//-- Get the keypoints from the good matches
obj.push_back(keypoints_object[good_matches[i].queryIdx].pt);
scene.push_back(keypoints_scene[good_matches[i].trainIdx].pt);
}
Mat H = findHomography(obj, scene, CV_RANSAC);
//-- Get the corners from the image_1 ( the object to be "detected" )
obj_corners[0] = cvPoint(0, 0);
obj_corners[1] = cvPoint(img_object.cols, 0);
obj_corners[2] = cvPoint(img_object.cols, img_object.rows);
obj_corners[3] = cvPoint(0, img_object.rows);
perspectiveTransform(obj_corners, scene_corners, H);
//-- Draw lines between the corners (the mapped object in the scene - image_2 )
line(img_matches, scene_corners[0] + Point2f(img_object.cols, 0), scene_corners[1] + Point2f(img_object.cols, 0), Scalar(0, 255, 0), 4);
line(img_matches, scene_corners[1] + Point2f(img_object.cols, 0), scene_corners[2] + Point2f(img_object.cols, 0), Scalar(0, 255, 0), 4);
line(img_matches, scene_corners[2] + Point2f(img_object.cols, 0), scene_corners[3] + Point2f(img_object.cols, 0), Scalar(0, 255, 0), 4);
line(img_matches, scene_corners[3] + Point2f(img_object.cols, 0), scene_corners[0] + Point2f(img_object.cols, 0), Scalar(0, 255, 0), 4);
// -- param process (Point2f to Point3f)
DataProcess2TO3(obj, scene);
//-- Show detected matches
imshow("Sample(Good Matches & Object detection)", img_matches);
// -- param analysis
std::ofstream fs("data.txt");
//.........这里部分代码省略.........
示例9: main
//.........这里部分代码省略.........
imshow("disparity", disp8);
imwrite("disparity.jpg",disp8);
printf("press any key to continue...");
fflush(stdout);
waitKey();
printf("\n");
}
if(disparity_filename)
imwrite(disparity_filename, disp8);
if(point_cloud_filename)
{
printf("storing the point cloud...");
fflush(stdout);
Mat xyz;
reprojectImageTo3D(disp, xyz, Q, true);
saveXYZ(point_cloud_filename, xyz);
printf("\n");
}
///////////////////////////////////////////////////////FANCY STUFF////////////////////////////////////
//-- Step 1: Detect the keypoints using SURF Detector
printf("starting fancy fancy stuff");
int minHessian = 400;
SurfFeatureDetector detector( minHessian );
std::vector<KeyPoint> keypoints_1, keypoints_2;
detector.detect( leftImage, keypoints_1 );
detector.detect( rightImage, keypoints_2 );
//-- Step 2: Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat descriptors_1, descriptors_2;
extractor.compute( leftImage, keypoints_1, descriptors_1 );
extractor.compute( rightImage, keypoints_2, descriptors_2 );
//-- Step 3: Matching descriptor vectors using FLANN matcher
FlannBasedMatcher matcher;
std::vector< DMatch > matches;
matcher.match( descriptors_1, descriptors_2, matches );
double max_dist = 0; double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for( int i = 0; i < descriptors_1.rows; i++ )
{ double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
printf("-- Max dist : %f \n", max_dist );
printf("-- Min dist : %f \n", min_dist );
//-- Draw only "good" matches (i.e. whose distance is less than 2*min_dist )
//-- PS.- radiusMatch can also be used here.
std::vector< DMatch > good_matches;
for( int i = 0; i < descriptors_1.rows; i++ )
{ if( matches[i].distance <= 2*min_dist )
{ good_matches.push_back( matches[i]); }
}
示例10: findObjectSURF
bool findObjectSURF( cv::Mat objectMat, cv::Mat sceneMat, int hessianValue )
{
bool objectFound = false;
float nndrRatio = 0.7f;
//vector of keypoints
vector< cv::KeyPoint > keypointsO;
vector< cv::KeyPoint > keypointsS;
Mat descriptors_object, descriptors_scene;
//-- Step 1: Extract keypoints
SurfFeatureDetector surf(hessianValue);
surf.detect(sceneMat,keypointsS);
if(keypointsS.size() < 7) return false; //Not enough keypoints, object not found
surf.detect(objectMat,keypointsO);
if(keypointsO.size() < 7) return false; //Not enough keypoints, object not found
//-- Step 2: Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
extractor.compute( sceneMat, keypointsS, descriptors_scene );
extractor.compute( objectMat, keypointso, descriptors_object );
//-- Step 3: Matching descriptor vectors using FLANN matcher
cv::FlannBasedMatcher matcher;
descriptors_scene.size(), keypointsO.size(), keypointsS.size());
std::vector<std::vector<cv::DMatch> > matches;
matcher.knnMatch( descriptors_object, descriptors_scene, matches, 2 );
vector< cv::DMatch > good_matches;
good_matches.reserve(matches.size());
for (size_t i = 0; i < matches.size(); ++i)
{
if (matches[i].size() < 2)
continue;
const cv::DMatch &m1 = matches[i][0];
const cv::DMatch &m2 = matches[i][1];
if(m1.distance <= nndrRatio * m2.distance)
good_matches.push_back(m1);
}
if( (good_matches.size() >=7))
{
std::cout << "OBJECT FOUND!" << std::endl;
std::vector< cv::Point2f > obj;
std::vector< cv::Point2f > scene;
for( unsigned int i = 0; i < good_matches.size(); i++ )
{
//-- Get the keypoints from the good matches
obj.push_back( keypointsO[ good_matches[i].queryIdx ].pt );
scene.push_back( keypointsS[ good_matches[i].trainIdx ].pt );
}
Mat H = findHomography( obj, scene, CV_RANSAC );
//-- Get the corners from the image_1 ( the object to be "detected" )
std::vector< Point2f > obj_corners(4);
obj_corners[0] = cvPoint(0,0); obj_corners[1] = cvPoint( objectMat.cols, 0 );
obj_corners[2] = cvPoint( objectMat.cols, objectMat.rows ); obj_corners[3] = cvPoint( 0, objectMat.rows );
std::vector< Point2f > scene_corners(4);
perspectiveTransform( obj_corners, scene_corners, H);
//-- Draw lines between the corners (the mapped object in the scene - image_2 )
line( objectMat, scene_corners[0] , scene_corners[1], color, 2 ); //TOP line
line( objectMat, scene_corners[1] , scene_corners[2], color, 2 );
line( objectMat, scene_corners[2] , scene_corners[3], color, 2 );
line( objectMat, scene_corners[3] , scene_corners[0] , color, 2 );
objectFound=true;
} else {
std::cout << "OBJECT NOT FOUND!" << std::endl;
}
std::cout << "Matches found: " << matches.size() << std::endl;
std::cout << "Good matches found: " << good_matches.size() << std::endl;
return objectFound;
}
示例11: match
int match(Mat img_1)
{
int i=0,j=0;
Mat img_2 = imread( "/home/ankur/Desktop/data/new.jpg", CV_LOAD_IMAGE_GRAYSCALE );
if( !img_1.data || !img_2.data )
{ std::cout<< " --(!) Error reading images " << std::endl; return -1; }
//-- Step 1: Detect the keypoints using SURF Detector
int minHessian = 400;
float good=0,total=0;
SurfFeatureDetector detector( minHessian );
std::vector<KeyPoint> keypoints_1, keypoints_2;
detector.detect( img_1, keypoints_1 );
detector.detect( img_2, keypoints_2 );
//-- Step 2: Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat descriptors_1, descriptors_2;
extractor.compute( img_1, keypoints_1, descriptors_1 );
extractor.compute( img_2, keypoints_2, descriptors_2 );
//-- Step 3: Matching descriptor vectors using FLANN matcher
FlannBasedMatcher matcher;
std::vector< DMatch > matches;
matcher.match( descriptors_1, descriptors_2, matches );
double max_dist = 0; double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for( int i = 0; i < descriptors_1.rows; i++ )
{ double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
// printf("-- Max dist : %f \n", max_dist );
// printf("-- Min dist : %f \n", min_dist );
//-- Draw only "good" matches (i.e. whose distance is less than 2*min_dist )
//-- PS.- radiusMatch can also be used here.
std::vector< DMatch > good_matches;
for( int i = 0; i < descriptors_1.rows; i++ )
{
if( matches[i].distance <= 2*min_dist )
{
good_matches.push_back( matches[i]);
}
}
//-- Draw only "good" matches
Mat img_matches;
drawMatches( img_1, keypoints_1, img_2, keypoints_2,
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
//-- Show detected matches
float x,y;
//imshow( "Good Matches", img_matches );
for( int i = 0; i < (int)good_matches.size(); i++ )
{
x= keypoints_2[good_matches[i].queryIdx].pt.x;
y= keypoints_2[good_matches[i].queryIdx].pt.y;
total++;
if(y<eye_y1 && y>eye_y2)
if(x<eye_x1 && x>eye_x2)
good++;
// cout<<"("<<x<<","<<y<<")\n";
//printf( "-- Good Match [%d] Keypoint 1: %d -- Keypoint 2: %d \n", i, good_matches[i].queryIdx, good_matches[i].trainIdx );
}
data[k++]=(good/total)*100;
//cout<<(good/total)*100<<"% matching\n";
//cout<<"("<<eye_x1<<","<<eye_y1<<")\n"<<"("<<eye_x2<<","<<eye_y2<<")\n";
return 0;
}
示例12: prev_
bool TrackerForProject::filterRANSAC(cv::Mat newFrame_, vector<Point2f> &corners, vector<Point2f> &nextCorners)
{
int ransacReprojThreshold = 3;
cv::Mat prev_(prevFrame_(position_));
cv::Mat new_(newFrame_);
// detecting keypoints
SurfFeatureDetector detector;
detector.detect(prev_, keypoints1);
vector<KeyPoint> keypoints2;
detector.detect(new_, keypoints2);
// computing descriptors
SurfDescriptorExtractor extractor;
Mat descriptors1;
extractor.compute(prev_, keypoints1, descriptors1);
Mat descriptors2;
extractor.compute(newFrame_, keypoints2, descriptors2);
// matching descriptors
BFMatcher matcher;
vector<DMatch> matches;
matcher.match(descriptors1, descriptors2, matches);
std::cout << matches.size() << std::endl;
vector<Point2f> points1, points2;
// fill the arrays with the points
for (int i = 0; i < matches.size(); i++)
{
points1.push_back(keypoints1[matches[i].queryIdx].pt);
}
for (int i = 0; i < matches.size(); i++)
{
points2.push_back(keypoints2[matches[i].trainIdx].pt);
}
Mat H = findHomography(Mat(points1), Mat(points2), CV_RANSAC, ransacReprojThreshold);
Mat points1Projected;
perspectiveTransform(Mat(points1), points1Projected, H);
vector<KeyPoint> keypoints3;
for(int i = 0; i < matches.size(); i++)
{
Point2f p1 = points1Projected.at<Point2f>(matches[i].queryIdx);
Point2f p2 = keypoints2.at(matches[i].trainIdx).pt;
if(((p2.x - p1.x) * (p2.x - p1.x) +
(p2.y - p1.y) * (p2.y - p1.y) <= ransacReprojThreshold * ransacReprojThreshold)&& ((p2.x > position_.x - 10)
&& (p2.x < position_.x + position_.width + 10) && (p2.y > position_.y - 10) &&(p2.y < position_.y + position_.height + 10)) )
{
corners.push_back(keypoints1.at(matches[i].queryIdx).pt);
nextCorners.push_back(keypoints2.at(matches[i].trainIdx).pt);
keypoints3.push_back(keypoints2.at(matches[i].trainIdx));
}
}
for(int i = 0; i < corners.size(); i++)
{
corners[i].x += position_.x;
corners[i].y += position_.y;
}
keypoints1 = keypoints3;
for(int i = 0; i < keypoints1.size(); i++)
{
keypoints1[i].pt.x -= position_.x;
keypoints1[i].pt.y -= position_.y;
}
if (keypoints1.empty())
{
return false;
}
return true;
}
示例13: main
/**
* @function main
* @brief Main function
*/
int main( int argc, char** argv )
{
if( argc != 3 )
{ readme(); return -1; }
Mat img_1 = imread( argv[1], IMREAD_GRAYSCALE );
Mat img_2 = imread( argv[2], IMREAD_GRAYSCALE );
if( !img_1.data || !img_2.data )
{ std::cout<< " --(!) Error reading images " << std::endl; return -1; }
//-- Step 1: Detect the keypoints using SURF Detector
int minHessian = 400;
SurfFeatureDetector detector( minHessian );
std::vector<KeyPoint> keypoints_1, keypoints_2;
detector.detect( img_1, keypoints_1 );
detector.detect( img_2, keypoints_2 );
//-- Step 2: Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat descriptors_1, descriptors_2;
extractor.compute( img_1, keypoints_1, descriptors_1 );
extractor.compute( img_2, keypoints_2, descriptors_2 );
//-- Step 3: Matching descriptor vectors using FLANN matcher
FlannBasedMatcher matcher;
std::vector< DMatch > matches;
matcher.match( descriptors_1, descriptors_2, matches );
double max_dist = 0; double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for( int i = 0; i < descriptors_1.rows; i++ )
{ double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
printf("-- Max dist : %f \n", max_dist );
printf("-- Min dist : %f \n", min_dist );
//-- Draw only "good" matches (i.e. whose distance is less than 2*min_dist )
//-- PS.- radiusMatch can also be used here.
std::vector< DMatch > good_matches;
for( int i = 0; i < descriptors_1.rows; i++ )
{ if( matches[i].distance <= 2*min_dist )
{ good_matches.push_back( matches[i]); }
}
//-- Draw only "good" matches
Mat img_matches;
drawMatches( img_1, keypoints_1, img_2, keypoints_2,
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
//-- Show detected matches
imshow( "Good Matches", img_matches );
for( int i = 0; i < (int)good_matches.size(); i++ )
{ printf( "-- Good Match [%d] Keypoint 1: %d -- Keypoint 2: %d \n", i, good_matches[i].queryIdx, good_matches[i].trainIdx ); }
waitKey(0);
return 0;
}
示例14: cvNamedWindow
void *cv_threadfunc (void *ptr)
{
cvNamedWindow( FREENECTOPENCV_WINDOW_D, CV_WINDOW_AUTOSIZE );
cvNamedWindow( FREENECTOPENCV_WINDOW_N, CV_WINDOW_AUTOSIZE );
depthimg = cvCreateImage(cvSize(FREENECTOPENCV_DEPTH_WIDTH, FREENECTOPENCV_DEPTH_HEIGHT), IPL_DEPTH_8U, FREENECTOPENCV_DEPTH_DEPTH);
rgbimg = cvCreateImage(cvSize(FREENECTOPENCV_RGB_WIDTH, FREENECTOPENCV_RGB_HEIGHT), IPL_DEPTH_8U, FREENECTOPENCV_RGB_DEPTH);
tempimg = cvCreateImage(cvSize(FREENECTOPENCV_RGB_WIDTH, FREENECTOPENCV_RGB_HEIGHT), IPL_DEPTH_8U, FREENECTOPENCV_RGB_DEPTH);
int index=0;
// use image polling
while (1) {
//lock mutex for depth image
pthread_mutex_lock( &mutex_depth );
// show image to window
cvCvtColor(depthimg,tempimg,CV_GRAY2BGR);
cvCvtColor(tempimg,tempimg,CV_HSV2BGR);
cvShowImage(FREENECTOPENCV_WINDOW_D,tempimg);
//unlock mutex for depth image
pthread_mutex_unlock( &mutex_depth );
//lock mutex for rgb image
pthread_mutex_lock( &mutex_rgb );
// show image to window
cvCvtColor(rgbimg,tempimg,CV_BGR2RGB);
//-- Step 0: Initialization
img=tempimg;
//-- Step 1: Detect the keypoints using SURF Detector&-- Step 2: Calculate descriptors (feature vectors)
int minHessian = 1000;
SurfFeatureDetector detector( minHessian );
SurfDescriptorExtractor extractor;
if(first_time)
{
img_old=tempimg;
detector.detect( img_old, keypoints_old );
extractor.compute( img_old, keypoints_old, descriptors_old );
first_time=false;
}
detector.detect( img, keypoints );
extractor.compute( img, keypoints, descriptors );
//printf("--keypoints: %d, %d \n", keypoints_old.size(), keypoints.size());
//-- Step 3: Matching descriptor vectors using FLANN matcher
FlannBasedMatcher matcher;
std::vector< DMatch > matches;
matcher.match( descriptors_old, descriptors, matches );
//printf("--Matches: %d\n", matches.size());
//-- PS.- radiusMatch can also be used here.
//-- Draw only matches
Mat img_matches;
drawMatches( img_old, keypoints_old, img, keypoints,
matches, img_matches, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
imshow(FREENECTOPENCV_WINDOW_N, img_matches);
//Copy Data
img_old=img.clone();
keypoints_old=keypoints;
descriptors_old=descriptors;
//unlock mutex
pthread_mutex_unlock( &mutex_rgb );
// wait for quit key
if( cvWaitKey( 15 )==27 ) break;
index++;
}
pthread_exit(NULL);
}
示例15: main
//.........这里部分代码省略.........
Mat scale = Mat::eye(3, 3, CV_64F);
scale.at<double>(1,1) = 1 / s;
cout << "detect features points" << endl;
H_wi = H*scale;
//test H_w0
Mat mp0 = (Mat_<double>(3,1) << 0, 0, 1);
Mat mp1 = (Mat_<double>(3,1) << 0, s, 1);
Mat mp2 = (Mat_<double>(3,1) << 1, s, 1);
Mat mp3 = (Mat_<double>(3,1) << 1, 0, 1);
Point p0 = transform_corner(H_wi, mp0);
Point p1 = transform_corner(H_wi, mp1);
Point p2 = transform_corner(H_wi, mp2);
Point p3 = transform_corner(H_wi, mp3);
cout << "Testing H_w0..." << endl;
circle(image, p0, 4, Scalar(255,255,255), -1);
circle(image, p1, 4, Scalar(255,255,0), -1);
circle(image, p2, 4, Scalar(0,0,0), -1);
circle(image, p3, 4, Scalar(255,0,255), -1);
imshow("Click Points", image);
cout << "H_w0*p:" << endl
<< "(x,y)" << endl
<< "(" << p0.x << "," << p0.y << ")" << endl
<< "(" << p1.x << "," << p1.y << ")" << endl
<< "(" << p2.x << "," << p2.y << ")" << endl
<< "(" << p3.x << "," << p3.y << ")" << endl;
check = 1;
//GoodFeaturesToTrackDetector detector(500, 0.01, 1, 3, true, 0.04);
SurfFeatureDetector detector(400);
vector<KeyPoint> keypoints_0, keypoints_next;
detector.detect(image, keypoints_0);
//BriefDescriptorExtractor extractor;
//FREAK* extractor = new FREAK();
SurfDescriptorExtractor extractor;
Mat descriptors_0, descriptors_next;
extractor.compute(image, keypoints_0, descriptors_0);
//extractor.compute(image, keypoints_0, descriptors_0);
//FlannBasedMatcher matcher;
BFMatcher matcher( NORM_L2, true);
//BFMatcher matcher(NORM_HAMMING, true);
char key = 0;
Mat H_ii1, H_wi1;
Point p0_1, p1_1, p2_1, p3_1;
while (check)
{
key = waitKey(1);
switch(key) {
case 'q':
check = 0;
break;
case 'f':
cap >> image_next;
H_ii1 = find_next_homography(image, image_next, keypoints_0, descriptors_0,
detector, extractor, matcher, keypoints_next, descriptors_next);
H_wi1 = H_ii1 * H_wi;
p0_1 = transform_corner(H_wi1, mp0);
p1_1 = transform_corner(H_wi1, mp1);
p2_1 = transform_corner(H_wi1, mp2);
p3_1 = transform_corner(H_wi1, mp3);
drawPlane(image_next, p0_1, p1_1, p2_1, p3_1);
imshow("H_ii1", image_next);
keypoints_0 = keypoints_next;
descriptors_0 = descriptors_next;
image = image_next;
H_wi = H_wi1;
default:
break;
}
cap >> image_next;
H_ii1 = find_next_homography(image, image_next, keypoints_0, descriptors_0,
detector, extractor, matcher, keypoints_next, descriptors_next);
H_wi1 = H_ii1 * H_wi;
p0_1 = transform_corner(H_wi1, mp0);
p1_1 = transform_corner(H_wi1, mp1);
p2_1 = transform_corner(H_wi1, mp2);
p3_1 = transform_corner(H_wi1, mp3);
drawPlane(image_next, p0_1, p1_1, p2_1, p3_1);
imshow("H_ii1", image_next);
keypoints_0 = keypoints_next;
descriptors_0 = descriptors_next;
image = image_next;
H_wi = H_wi1;
}
waitKey(0);
return 0;
}