本文整理汇总了C++中BFMatcher::match方法的典型用法代码示例。如果您正苦于以下问题:C++ BFMatcher::match方法的具体用法?C++ BFMatcher::match怎么用?C++ BFMatcher::match使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类BFMatcher
的用法示例。
在下文中一共展示了BFMatcher::match方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: findMatches
int findMatches(Mat img1, Mat img2, vector<KeyPoint>& keypoints1, vector<KeyPoint>& keypoints2, Mat descriptors1, Mat descriptors2, BFMatcher matcher, vector<Point2f>& finalPoint1,
vector<Point2f>& finalPoint2, double passRatio, vector<KeyPoint>& keypointsOut) {
vector<DMatch> matches;
matcher.match(descriptors1, descriptors2, matches);
vector<char> matchesMask(matches.size(), 0);
// Find max distance
double maxDistance = 0;
for (int idx = 0; idx < matches.size(); idx++) {
if (matches[idx].distance > maxDistance)
maxDistance = matches[idx].distance;
}
// Cut out 1-passratio % or points
for (int idx = 0; idx < matches.size(); idx++) {
if (matches[idx].distance <= (maxDistance*passRatio))
matchesMask[idx] = 1;
}
#ifdef DEBUG
namedWindow("Matches", CV_WINDOW_AUTOSIZE);
Mat img_matches;
drawMatches(img1, keypoints1, img2, keypoints2, matches, img_matches, Scalar::all(-1), Scalar::all(-1), matchesMask, 2);
while (1) {
imshow("Matches", img_matches);
int keypress = waitKey(30);
if (keypress == 32) {
break;
}
}
#endif
// Output final points as well as a new vector of keypoints
for (int idx = 0; idx < matches.size(); idx++) {
if (matchesMask[idx]) {
finalPoint1.push_back(keypoints1[matches[idx].queryIdx].pt);
finalPoint2.push_back(keypoints2[matches[idx].trainIdx].pt);
keypointsOut.push_back(keypoints2[matches[idx].trainIdx]);
}
}
return 0;
}
示例2: findTopFiveBFMatches
void findTopFiveBFMatches(Mat hqDesc, vector<Mat>* keyframeDesc, vector<vector< DMatch >>* matchVec, vector<int>* matchIndices){
BFMatcher matcher;
int index = 0;
//Calculate matches between high quality image and
for (vector<Mat>::iterator it = keyframeDesc->begin(); it != keyframeDesc->end(); ++it){
vector< DMatch > matches;
//calculate initial matches
Mat kfDesc = *it;
matcher.match(hqDesc, kfDesc, matches);
matchVec->push_back(matches);
index++;
}
//pickTopFive
pickTopFive(matchVec, matchIndices);
index = 0;
}
示例3: find_next_homography
Mat find_next_homography(Mat image, Mat image_next, vector<KeyPoint> keypoints_0, Mat descriptors_0,
SurfFeatureDetector detector, SurfDescriptorExtractor extractor,
BFMatcher matcher, vector<KeyPoint>& keypoints_next, Mat& descriptors_next)
{
//step 1 detect feature points in next image
vector<KeyPoint> keypoints_1;
detector.detect(image_next, keypoints_1);
Mat img_keypoints_surf0, img_keypoints_surf1;
drawKeypoints(image, keypoints_0, img_keypoints_surf0);
drawKeypoints(image_next, keypoints_1, img_keypoints_surf1);
//cout << "# im0 keypoints" << keypoints_0.size() << endl;
//cout << "# im1 keypoints" << keypoints_1.size() << endl;
imshow("surf 0", img_keypoints_surf0);
imshow("surf 1", img_keypoints_surf1);
//step 2: extract feature descriptors from feature points
Mat descriptors_1;
extractor.compute(image_next, keypoints_1, descriptors_1);
//step 3: feature matching
//cout << "fd matching" << endl;
vector<DMatch> matches;
vector<Point2f> matched_0;
vector<Point2f> matched_1;
matcher.match(descriptors_0, descriptors_1, matches);
Mat img_feature_matches;
drawMatches(image, keypoints_0, image_next, keypoints_1, matches, img_feature_matches );
imshow("Matches", img_feature_matches);
for (int i = 0; i < matches.size(); i++ )
{
matched_0.push_back(keypoints_0[matches[i].queryIdx].pt);
matched_1.push_back(keypoints_1[matches[i].trainIdx].pt);
}
keypoints_next = keypoints_1;
descriptors_next = descriptors_1;
return findHomography(matched_0, matched_1, RANSAC);
}
示例4: getmatched
bool recognizer::getmatched( Mat mat1, Mat mat2){
Mat det1=mat1;Mat det2 = mat2;
std::vector<KeyPoint> keypoints_object, keypoints_scene;
detector->detect(det1,keypoints_object);
detector->detect(det2,keypoints_scene);
if(keypoints_object.size()==0 || keypoints_scene.size()==0){
return false;
}
Mat descriptors1, descriptors2;
extractor->compute(det1, keypoints_object, descriptors1);
extractor->compute(det2, keypoints_scene, descriptors2);
BFMatcher matcher;
vector<DMatch> matches;
matcher.match(descriptors1, descriptors2, matches);
// Rect r3 = det1&det2;
// double match = r3.area()/det2.area();
if(matches.size()<threholdNum)
return false;
return true;
}
示例5: main
int main(int argc, char** argv)
{
//read images
Mat img_1c=imread("img3.jpg");
Mat img_2c=imread("img1.jpg");
Mat img_1, img_2;
//transform images into gray scale
cvtColor( img_1c, img_1, CV_BGR2GRAY );
cvtColor( img_2c, img_2, CV_BGR2GRAY );
SIFT sift;
//Ptr<SIFT> ptrsift = SIFT::create(50, 3, .2, 5, 10); //works for imag1 and 2
Ptr<SIFT> ptrsift = SIFT::create(15, 5, .1, 5, 10);
vector<KeyPoint> key_points_1, key_points_2;
Mat detector;
//do sift, find key points
ptrsift->detect(img_1, key_points_1);
ptrsift->detect(img_2, key_points_2);
//sift(img_2, Mat(), key_points_2, detector);
//PSiftDescriptorExtractor extractor;
Ptr<SIFT> extractor = SIFT::create();
Mat descriptors_1,descriptors_2;
//compute descriptors
extractor->compute(img_1,key_points_1,descriptors_1);
extractor->compute(img_2,key_points_2,descriptors_2);
cout<<descriptors_1;
//use burte force method to match vectors
BFMatcher matcher;
vector<DMatch>matches;
matcher.match(descriptors_1,descriptors_2,matches);
//draw results
Mat img_matches;
drawMatches(img_1c,key_points_1,img_2c,key_points_2,matches,img_matches);
imshow("sift_Matches",img_matches);
waitKey(0);
return 0;
}
示例6: compute
bool compute(Mat CurrentImageGrayScale, Mat Kinverse, const int iteration){
vector<KeyPoint> CurrentFeatures;
SurfDetector.detect(CurrentImageGrayScale, CurrentFeatures);
Mat CurrentFeatureDescriptors;
SurfDescriptor.compute(CurrentImageGrayScale, CurrentFeatures, CurrentFeatureDescriptors);
vector<DMatch> matches;
matcher.match(PreviousFeatureDescriptors, CurrentFeatureDescriptors, matches);
if (matches.size() > 200){
nth_element(matches.begin(), matches.begin()+ 200, matches.end());
matches.erase(matches.begin() + 201, matches.end());
}
//Debug(matches, PreviousImageGrayScale, CurrentImageGrayScale, PreviousFeatures, CurrentFeatures);
vector< pair<double,double> > FirstImageFeatures;
vector< pair<double,double> > SecondImageFeatures;
for(int i = 0; i < matches.size(); i++){
Point2f myft = PreviousFeatures[matches[i].queryIdx].pt;
Mat FtMatForm = (Mat_<double>(3,1) << (double)myft.x, (double)myft.y, 1.0);
FtMatForm = Kinverse*FtMatForm;
pair<double,double> tmp = make_pair(FtMatForm.at<double>(0,0), FtMatForm.at<double>(1,0));
FirstImageFeatures.push_back(tmp);
myft = CurrentFeatures[matches[i].trainIdx].pt;
FtMatForm = (Mat_<double>(3,1) << (double)myft.x, (double)myft.y, 1.0);
FtMatForm = Kinverse*FtMatForm;
tmp = make_pair(FtMatForm.at<double>(0,0), FtMatForm.at<double>(1,0));
SecondImageFeatures.push_back(tmp);
}
vector<int> inliers_indexes;
Mat RobustEssentialMatrix= Ransac(FirstImageFeatures, SecondImageFeatures, 0.00001, 8, 2000, inliers_indexes);
//cout << RobustEssentialMatrix << endl;
//Debug2(matches, PreviousImageGrayScale, CurrentImageGrayScale, PreviousFeatures, CurrentFeatures, inliers_indexes);
Mat P = Mat::eye(3,4,CV_64F);
if (!GetRotationAndTraslation(RobustEssentialMatrix, FirstImageFeatures, SecondImageFeatures, inliers_indexes, P)){
cerr << "Recovering Translation and Rotation: Failed" << endl;
return false;
}
//cout << P << endl;
Mat Transformation = Mat::zeros(4,4, CV_64F);
Transformation.at<double>(3,3) = 1.0;
for(int i = 0 ; i < 3; i++)
for(int j = 0; j < 4; j++)
Transformation.at<double>(i, j) = P.at<double>(i, j);
Mat TransformationInverse = Transformation.inv();
Pose = Pose * TransformationInverse;
cerr << Pose.at<double>(0, 3) << " " << Pose.at<double>(1, 3) << " " << Pose.at<double>(2, 3) << endl;
PreviousImageGrayScale = CurrentImageGrayScale;
PreviousFeatures = CurrentFeatures;
PreviousFeatureDescriptors = CurrentFeatureDescriptors;
//viejo
// vector< pair<int,int> > correspondences = harrisFeatureMatcherMCC(PreviousImageGrayScale, CurrentImageGrayScale, PreviousFeatures, CurrentFeatures);
// cout << "Iteracion" << iteration << "Cantidad de correspondencias " << correspondences.size() << endl;
// vector< pair<double,double> > FirstImageFeatures;
// vector< pair<double,double> > SecondImageFeatures;
// for(int i = 0; i < correspondences.size(); i++){
// pair<int,int> myft = PreviousFeatures[correspondences[i].first];
// Mat FtMatForm = (Mat_<double>(3,1) << (double)myft.first, (double)myft.second, 1.0);
// FtMatForm = Kinverse*FtMatForm;
// pair<double,double> tmp = make_pair(FtMatForm.at<double>(0,0), FtMatForm.at<double>(1,0));
// FirstImageFeatures.push_back(tmp);
//
// myft = CurrentFeatures[correspondences[i].second];
// FtMatForm = (Mat_<double>(3,1) << (double)myft.first, (double)myft.second, 1.0);
// FtMatForm = Kinverse*FtMatForm;
// tmp = make_pair(FtMatForm.at<double>(0,0), FtMatForm.at<double>(1,0));
// SecondImageFeatures.push_back(tmp);
// }
// vector<int> inliers_indexes;
// Mat RobustEssentialMatrix= Ransac(FirstImageFeatures, SecondImageFeatures, 0.98, 0.00001, 0.5, 8, FirstImageFeatures.size()/2, inliers_indexes);
// cout << "Iteration" << iteration << "Final EssentialMatrix" << endl;
// cout << RobustEssentialMatrix << endl;
//
//
// vector<pair<int, int> > correspondences_inliers;
// for(int i = 0; i < inliers_indexes.size(); i++)
// correspondences_inliers.push_back(correspondences[inliers_indexes[i]]);
// debugging2(PreviousImageGrayScale, CurrentImageGrayScale, PreviousFeatures, CurrentFeatures, correspondences_inliers);
//
// Mat P = Mat::eye(3,4,CV_64F);
// if (!GetRotationAndTraslation(RobustEssentialMatrix, FirstImageFeatures, SecondImageFeatures, inliers_indexes, P))
// return false;
// cout << "Iteration" << iteration << "Camera Matrix" << endl;
// cout << P << endl;
// Mat Transformation = Mat::zeros(4,4, CV_64F);
// Transformation.at<double>(3,3) = 1.0;
// for(int i = 0 ; i < 3; i++)
// for(int j = 0; j < 4; j++)
// Transformation.at<double>(i, j) = P.at<double>(i, j);
// Mat TransformationInverse = Transformation.inv();
// Pose = Pose * TransformationInverse;
// PreviousImageGrayScale = CurrentImageGrayScale;
// PreviousFeatures = CurrentFeatures;
// cerr << Pose.at<double>(0, 4) << Pose.at<double>(1, 4) << Pose.at<double>(2, 4) << endl;
}
示例7: match
/* perform 2D SURF feature matching */
void match (Mat img_1, Mat img_2, vector<KeyPoint> keypoints_1,
vector<KeyPoint> keypoints_2, vector<DMatch> &good_matches,
pcl::CorrespondencesPtr &correspondences)
{
SurfDescriptorExtractor extractor;
Mat descriptors_1, descriptors_2;
extractor.compute (img_1, keypoints_1, descriptors_1);
extractor.compute (img_2, keypoints_2, descriptors_2);
//FlannBasedMatcher matcher;
BFMatcher matcher (NORM_L2);
std::vector<DMatch> matches;
matcher.match (descriptors_1, descriptors_2, matches);
double max_dist = 0;
double min_dist = 100;
for (int i = 0; i < descriptors_1.rows; i++)
{
double dist = matches[i].distance;
if (dist < min_dist)
min_dist = dist;
if (dist > max_dist)
max_dist = dist;
}
for (int i = 0; i < descriptors_1.rows; i++)
{
// need to change the factor "2" to adapt to different cases
if (matches[i].distance < 3 * min_dist) //may adapt for changes
{
good_matches.push_back (matches[i]);
}
}
correspondences->resize (good_matches.size ());
for (unsigned cIdx = 0; cIdx < good_matches.size (); cIdx++)
{
(*correspondences)[cIdx].index_query = good_matches[cIdx].queryIdx;
(*correspondences)[cIdx].index_match = good_matches[cIdx].trainIdx;
if (0) // for debugging
{
cout << good_matches[cIdx].queryIdx << " " << good_matches[cIdx].trainIdx
<< " " << good_matches[cIdx].distance << endl;
cout << good_matches.size () << endl;
}
}
// change the constant value of SHOW_MATCHING to 1 if you want to visulize the matching result
if (SHOW_MATCHING)
{
Mat img_matches;
drawMatches (img_1, keypoints_1, img_2, keypoints_2, good_matches,
img_matches, Scalar::all (-1), Scalar::all (-1), vector<char> (),
DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
//-- Show detected matches
imshow ("Good Matches", img_matches);
waitKey (0);
}
}
示例8: computePoseDifference
void computePoseDifference(Mat img1, Mat img2, CommandArgs args, Mat k, Mat& dist_coefficients, double& worldScale, Mat& R, Mat& t, Mat& img_matches)
{
cout << "%===============================================%" << endl;
Mat camera_matrix = k.clone();
if (args.resize_factor > 1)
{
resize(img1, img1, Size(img1.cols / args.resize_factor,
img1.rows / args.resize_factor)); // make smaller for performance and displayablity
resize(img2, img2, Size(img2.cols / args.resize_factor,
img2.rows / args.resize_factor));
// scale matrix down according to changed resolution
camera_matrix = camera_matrix / args.resize_factor;
camera_matrix.at<double>(2,2) = 1;
}
Mat K1, K2;
K1 = K2 = camera_matrix;
if (img1.rows > img1.cols) // it is assumed the camera has been calibrated in landscape mode, so undistortion must also be performed in landscape orientation, or the camera matrix must be modified (fx,fy and cx,cy need to be exchanged)
{
swap(K1.at<double>(0,0), K1.at<double>(1,1));
swap(K1.at<double>(0,2), K1.at<double>(1,2));
}
if (img2.rows > img2.cols)
{
swap(K2.at<double>(0,0), K2.at<double>(1,1));
swap(K2.at<double>(0,2), K2.at<double>(1,2));
}
// Feature detection + extraction
vector<KeyPoint> KeyPoints_1, KeyPoints_2;
Mat descriptors_1, descriptors_2;
Ptr<Feature2D> feat_detector;
if (args.detector == DETECTOR_KAZE)
{
feat_detector = AKAZE::create(args.detector_data.upright ? AKAZE::DESCRIPTOR_MLDB_UPRIGHT : AKAZE::DESCRIPTOR_MLDB,
args.detector_data.descriptor_size,
args.detector_data.descriptor_channels,
args.detector_data.threshold,
args.detector_data.nOctaves,
args.detector_data.nOctaveLayersAkaze);
} else if (args.detector == DETECTOR_SURF)
{
feat_detector = xfeatures2d::SURF::create(args.detector_data.minHessian,
args.detector_data.nOctaves, args.detector_data.nOctaveLayersAkaze, args.detector_data.extended, args.detector_data.upright);
} else if (args.detector == DETECTOR_SIFT)
{
feat_detector = xfeatures2d::SIFT::create(args.detector_data.nFeatures,
args.detector_data.nOctaveLayersSift, args.detector_data.contrastThreshold, args.detector_data.sigma);
}
feat_detector->detectAndCompute(img1, noArray(), KeyPoints_1, descriptors_1);
feat_detector->detectAndCompute(img2, noArray(), KeyPoints_2, descriptors_2);
cout << "Number of feature points (img1, img2): " << "(" << KeyPoints_1.size() << ", " << KeyPoints_2.size() << ")" << endl;
// Find correspondences
BFMatcher matcher;
vector<DMatch> matches;
if (args.use_ratio_test)
{
if (args.detector == DETECTOR_KAZE)
matcher = BFMatcher(NORM_HAMMING, false);
else matcher = BFMatcher(NORM_L2, false);
vector<vector<DMatch>> match_candidates;
const float ratio = args.ratio;
matcher.knnMatch(descriptors_1, descriptors_2, match_candidates, 2);
for (int i = 0; i < match_candidates.size(); i++)
if (match_candidates[i][0].distance < ratio * match_candidates[i][1].distance)
matches.push_back(match_candidates[i][0]);
cout << "Number of matches passing ratio test: " << matches.size() << endl;
} else
{
if (args.detector == DETECTOR_KAZE)
matcher = BFMatcher(NORM_HAMMING, true);
else matcher = BFMatcher(NORM_L2, true);
matcher.match(descriptors_1, descriptors_2, matches);
cout << "Number of matching feature points: " << matches.size() << endl;
}
// Convert correspondences to vectors
vector<Point2f>imgpts1,imgpts2;
for(unsigned int i = 0; i < matches.size(); i++)
{
imgpts1.push_back(KeyPoints_1[matches[i].queryIdx].pt);
imgpts2.push_back(KeyPoints_2[matches[i].trainIdx].pt);
}
Mat mask; // inlier mask
if (args.undistort)
{
undistortPoints(imgpts1, imgpts1, K1, dist_coefficients, noArray(), K1);
undistortPoints(imgpts2, imgpts2, K2, dist_coefficients, noArray(), K2);
//.........这里部分代码省略.........
示例9: detector
JNIEXPORT void JNICALL Java_org_recg_writehomog_NativeCodeInterface_nativeLoop
(JNIEnv * jenv, jclass, jlong hataddr, jlong gray1, jlong gray2)
{
clock_t t1, t2;
t1 = clock();
homogandtimer *hatinloop = (homogandtimer *) hataddr;
LOGD("passed just entered nativeloop b4 trying");
try
{
LOGD("passed just entered the try in nativeloop");
LOGD("passed char jenv getutfchars");
string homogstring;//(jidentitystr); // <--this one
LOGD("passed making jidentitystr");
//output the matrices to the Log
Mat frame1 = *((Mat *)gray1);
Mat frame2 = *((Mat *)gray2);
LOGD("passed making mats");
int minHessian = 400;
//initial variable declaration
OrbFeatureDetector detector(minHessian);
LOGD("passed making detector");
std::vector<KeyPoint> keypoints1, keypoints2;
LOGD("passed making keypoints");
OrbDescriptorExtractor extractor;
LOGD("passed making extractor");
Mat descriptors1, descriptors2;
LOGD("passed making descriptors");
//process first frame
detector.detect(frame1, keypoints1);
LOGD("passed detecting1");
extractor.compute(frame1, keypoints1, descriptors1);
LOGD("passed computing1");
//process second frame
detector.detect(frame2, keypoints2);
LOGD("passed detecting2");
extractor.compute(frame2, keypoints2, descriptors2);
LOGD("passed computing2");
//in case frame has no features (eg if all-black from finger blocking lens)
if (keypoints1.size() == 0){
LOGD("passed keypointssize was zero!!");
frame1 = frame2.clone();
keypoints1 = keypoints2;
descriptors1 = descriptors2;
//go back to the javacode and continue with the next frame
return;
}
LOGD("passed keypointssize not zero!");
//Now match the points on the successive images
//FlannBasedMatcher matcher;
BFMatcher matcher;
LOGD("passed creating matcher");
std::vector<DMatch> matches;
LOGD("passed creating matches");
if(descriptors1.empty()){
LOGD("passed descriptors1 is empty!");
}
if(descriptors2.empty()){
LOGD("passed descriptors2 is empty!");
}
LOGD("passed key1 size %d", keypoints1.size());
LOGD("passed key2 size %d", keypoints2.size());
matcher.match(descriptors1, descriptors2, matches);
LOGD("passed doing the matching");
//eliminate weaker matches
double maxdist = 0;
double mindist = 100;
for (int j = 0; j < descriptors1.rows; j++){
DMatch match = matches[j];
double dist = match.distance;
if( dist < mindist ) mindist = dist;
if( dist > maxdist ) maxdist = dist;
}
//build the list of "good" matches
std::vector<DMatch> goodmatches;
for( int k = 0; k < descriptors1.rows; k++ ){
DMatch amatch = matches[k];
if( amatch.distance <= 3*mindist ){
goodmatches.push_back(amatch);
}
}
//Now compute homography matrix between the stronger matches
//-- Localize the object
std::vector<Point2f> obj;
std::vector<Point2f> scene;
if (goodmatches.size() < 4){
frame1 = frame2.clone();
keypoints1 = keypoints2;
descriptors1 = descriptors2;
return;
//.........这里部分代码省略.........
示例10: main
int main(int argc, char **argv) {
// load image
Mat img1 = imread("input_1.jpg");
Mat img2 = imread("input_2.jpg");
// resize
resize(img1, img1, Size(640, 480));
resize(img2, img2, Size(640, 480));
// to gray (optional)
//cvtColor(img1, img1, CV_BGR2GRAY);
//cvtColor(img2, img2, CV_BGR2GRAY);
// get features
Ptr<Feature2D> f2d = xfeatures2d::SIFT::create();
vector<KeyPoint> kp1, kp2;
Mat dp1, dp2;
int step = 10; // 10 pixels spacing between kp's
for (int i = step; i<img1.rows - step; i += step)
{
for (int j = step; j<img1.cols - step; j += step)
{
// x,y,radius
kp1.push_back(KeyPoint(float(j), float(i), float(step)));
}
}
for (int i = step; i<img2.rows - step; i += step)
{
for (int j = step; j<img2.cols - step; j += step)
{
// x,y,radius
kp2.push_back(KeyPoint(float(j), float(i), float(step)));
}
}
get_features(f2d, img1, kp1, dp1);
get_features(f2d, img2, kp2, dp2);
// display keypoints to canvas
Mat cvs1, cvs2;
drawKeypoints(img1, kp1, cvs1);
drawKeypoints(img2, kp2, cvs2);
// find matches
BFMatcher matcher;
std::vector< DMatch > matches;
matcher.match(dp1, dp2, matches);
// display matches
Mat cvs3;
drawMatches(img1, kp1, img2, kp2, matches, cvs3);
// show
imshow("keypoints 1", cvs1);
imshow("keypoints 2", cvs2);
imshow("matches", cvs3);
waitKey(0);
}
示例11: authenticate
//------------------------------------------------------------------------------
String PAN::authenticate(String CWD,String fileoutput){
Point matchLoc;
float percentage, threshold;
float average = 0;
int count = 0;
Mat big_image;
big_image = panimage.img->clone();//big image
resize(big_image, big_image, Size(2000, 1500));
if (!big_image.data)
{
std::cout << "Error reading images " << std::endl; return"";
}
Mat temp, temp1[3];
if (big_image.channels() >= 2){
cvtColor(big_image, temp, COLOR_BGR2GRAY);
}
//split(temp, temp1);
big_image = temp.clone();
/*img_1 = temp2.clone();
resize(img_2, img_2, Size(600, 400));
*///-- Step 1: Detect the keypoints using SURF Detector
vector<KeyPoint> keypoints_big, keypoints_small;
int minHessian = 200;
//FeatureDetector * detector = new SURF();
FastFeatureDetector detector;
detector.detect(big_image, keypoints_big);
cout << "big sift done\n\n";
//-- Step 2: Calculate descriptors (feature vectors)
int Threshl = 10;
int Octaves = 3;
//(pyramid layer) from which the keypoint has been extracted
float PatternScales = 1.0f;
//declare a variable BRISKD of the type cv::BRISK
Mat descriptors_2, descriptors_small;
BRISK BRISKD;
//BRISKD.detect(img_1, keypoints_1);
//BRISKD.detect(img_2, keypoints_2);
BRISKD.compute(big_image, keypoints_big, descriptors_2);
cout << "big brisk done\n\n";
int i = 0;
for ( i = 0; i < 7; i++){
String path(CWD);
// setting up input standard containers used for matching to
String temp = "win1";
temp = temp + char(i + 48) + ".jpg";
path = path + temp;
Mat find = imread(path, CV_LOAD_IMAGE_UNCHANGED);
//cout << path << "\n\n";
if (find.data == NULL){ break; }
//templateMatch(*panimage.img, find, matchLoc, threshold, percentage);
//-------------------------------------------------------------------------------------
if (!find.data)
{
std::cout << "Error reading images " << std::endl; return "";
}
if (find.channels() >= 2){
cvtColor(find,find, COLOR_BGR2GRAY);
}
//img_1 = temp2.clone();
resize(find ,find, Size(1200, 600));
//-- Step 1: Detect the keypoints using SURF Detector
vector<KeyPoint> keypoints_small;
int minHessian = 200;
detector.detect(find, keypoints_small);
cout << "small sift done\n\n";
//-- Step 2: Calculate descriptors (feature vectors)
int Threshl = 10;
int Octaves = 3;
//(pyramid layer) from which the keypoint has been extracted
float PatternScales = 1.0f;
//declare a variable BRISKD of the type cv::BRISK
Mat descriptors_small;
//BRISKD.detect(img_1, keypoints_1);
//BRISKD.detect(img_2, keypoints_2);
BRISKD.compute(find, keypoints_small, descriptors_small);
cout << "brisk done\n\n";
//-------------------------------------------------------------------------------------
//-- Step 3: Matching descriptor vectors using FLANN matcher
//FlannBasedMatcher matcher;
BFMatcher matcher;
std::vector< DMatch > matches;
matcher.match(descriptors_small, descriptors_2, matches);
cv::Mat all_matches;
drawMatches(find, keypoints_small, big_image, keypoints_big, matches, all_matches, cv::Scalar::all(-1), cv::Scalar::all(-1), vector<char>(), cv::DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
//.........这里部分代码省略.........
示例12: main
/**
* @function main
* @brief Main function
*/
int main( int argc, char** argv )
{
if( argc != 3 )
{ readme(); return -1; }
Mat img_object = imread( argv[1], IMREAD_GRAYSCALE );
Mat img_scene = imread( argv[2], IMREAD_GRAYSCALE );
if( !img_object.data || !img_scene.data )
{ std::cout<< " --(!) Error reading images " << std::endl; return -1; }
//-- Step 1: Detect the keypoints using SURF Detector
int minHessian = 100;
SurfFeatureDetector detector( minHessian );
std::vector<KeyPoint> keypoints_object, keypoints_scene;
detector.detect( img_object, keypoints_object );
detector.detect( img_scene, keypoints_scene );
//-- Step 2: Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat descriptors_object, descriptors_scene;
extractor.compute( img_object, keypoints_object, descriptors_object );
extractor.compute( img_scene, keypoints_scene, descriptors_scene );
//-- Step 3: Matching descriptor vectors using brute force matcher
BFMatcher matcher = BFMatcher(NORM_L2, false);
std::vector< DMatch > matches;
matcher.match( descriptors_object, descriptors_scene, matches );
double max_dist = 0; double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for( int i = 0; i < descriptors_object.rows; i++ )
{ double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
printf("-- Max dist : %f \n", max_dist );
printf("-- Min dist : %f \n", min_dist );
//-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
std::vector< DMatch > good_matches;
for( int i = 0; i < descriptors_object.rows; i++ )
{ if( matches[i].distance < 3*min_dist )
{ good_matches.push_back( matches[i]); }
}
Mat img_matches;
drawMatches( img_object, keypoints_object, img_scene, keypoints_scene,
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
//-- Localize the object from img_1 in img_2
std::vector<Point2f> obj;
std::vector<Point2f> scene;
for( size_t i = 0; i < good_matches.size(); i++ )
{
//-- Get the keypoints from the good matches
obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
}
Mat H = findHomography( obj, scene, RANSAC );
//-- Get the corners from the image_1 ( the object to be "detected" )
std::vector<Point2f> obj_corners(4);
obj_corners[0] = Point(0,0); obj_corners[1] = Point( img_object.cols, 0 );
obj_corners[2] = Point( img_object.cols, img_object.rows ); obj_corners[3] = Point( 0, img_object.rows );
std::vector<Point2f> scene_corners(4);
perspectiveTransform( obj_corners, scene_corners, H);
//-- Draw lines between the corners (the mapped object in the scene - image_2 )
Point2f offset( (float)img_object.cols, 0);
//line( img_matches, scene_corners[0] + offset, scene_corners[1] + offset, Scalar(0, 255, 0), 4 );
//line( img_matches, scene_corners[1] + offset, scene_corners[2] + offset, Scalar( 0, 255, 0), 4 );
//line( img_matches, scene_corners[2] + offset, scene_corners[3] + offset, Scalar( 0, 255, 0), 4 );
//line( img_matches, scene_corners[3] + offset, scene_corners[0] + offset, Scalar( 0, 255, 0), 4 );
//-- Show detected matches
imshow( "Good Matches & Object detection", img_matches );
waitKey(0);
return 0;
}
示例13: filterRANSAC
bool TrackerForProject::filterRANSAC(cv::Mat newFrame_, vector<Point2f> &corners, vector<Point2f> &nextCorners)
{
int ransacReprojThreshold = 3;
cv::Mat prev_(prevFrame_(position_));
cv::Mat new_(newFrame_);
// detecting keypoints
SurfFeatureDetector detector;
detector.detect(prev_, keypoints1);
vector<KeyPoint> keypoints2;
detector.detect(new_, keypoints2);
// computing descriptors
SurfDescriptorExtractor extractor;
Mat descriptors1;
extractor.compute(prev_, keypoints1, descriptors1);
Mat descriptors2;
extractor.compute(newFrame_, keypoints2, descriptors2);
// matching descriptors
BFMatcher matcher;
vector<DMatch> matches;
matcher.match(descriptors1, descriptors2, matches);
std::cout << matches.size() << std::endl;
vector<Point2f> points1, points2;
// fill the arrays with the points
for (int i = 0; i < matches.size(); i++)
{
points1.push_back(keypoints1[matches[i].queryIdx].pt);
}
for (int i = 0; i < matches.size(); i++)
{
points2.push_back(keypoints2[matches[i].trainIdx].pt);
}
Mat H = findHomography(Mat(points1), Mat(points2), CV_RANSAC, ransacReprojThreshold);
Mat points1Projected;
perspectiveTransform(Mat(points1), points1Projected, H);
vector<KeyPoint> keypoints3;
for(int i = 0; i < matches.size(); i++)
{
Point2f p1 = points1Projected.at<Point2f>(matches[i].queryIdx);
Point2f p2 = keypoints2.at(matches[i].trainIdx).pt;
if(((p2.x - p1.x) * (p2.x - p1.x) +
(p2.y - p1.y) * (p2.y - p1.y) <= ransacReprojThreshold * ransacReprojThreshold)&& ((p2.x > position_.x - 10)
&& (p2.x < position_.x + position_.width + 10) && (p2.y > position_.y - 10) &&(p2.y < position_.y + position_.height + 10)) )
{
corners.push_back(keypoints1.at(matches[i].queryIdx).pt);
nextCorners.push_back(keypoints2.at(matches[i].trainIdx).pt);
keypoints3.push_back(keypoints2.at(matches[i].trainIdx));
}
}
for(int i = 0; i < corners.size(); i++)
{
corners[i].x += position_.x;
corners[i].y += position_.y;
}
keypoints1 = keypoints3;
for(int i = 0; i < keypoints1.size(); i++)
{
keypoints1[i].pt.x -= position_.x;
keypoints1[i].pt.y -= position_.y;
}
if (keypoints1.empty())
{
return false;
}
return true;
}
示例14: main
int main( int argc, char** argv ) {
if (argc != 2) {
cout << "Must provide directory argument.\n";
return -1;
}
vector<string> files;
GetFilesInDirectory(files, argv[1]);
int originalIndex = 0;
int imgAindex = 0;
int imgBindex = 0;
std::set<int> indexesIncluded;
std::map<int, vector<Mat,Mat>> knownRts;
// Find first two images based on snavely method - set originalIndex, imgAindex, imgBindex
indexesIncluded.insert(imgAindex);
indexesIncluded.insert(imgBindex);
while (indexesIncluded.size() != files.size()) {
// find features in each image, find matches
// findEssentialMatrix
// recoverPose between A and B
// convert R|t for B using original R|t value for A if we have it. (check knownRts map)
// add new R|ts to the map for both images
// triangulatePoints and add to cloud
// find next B to use based on best match between remaining images (Snavely's method) and an included image.
}
// Create image
string filepath1 = argv[1];
image1 = Image(filepath1);
string filepath2 = argv[2];
image2 = Image(filepath2);
// Detect keypoints
FeatureDetectorSIFT siftDetector = FeatureDetectorSIFT();
vector<KeypointDescriptor> keypoints1 = siftDetector.detect(image1);
vector<KeypointDescriptor> keypoints2 = siftDetector.detect(image2);
// Convert descriptors back to cv keypoints :(
sift_keypoints1 = vector<KeyPoint>(keypoints1.begin(), keypoints1.end());
sift_keypoints2 = vector<KeyPoint>(keypoints2.begin(), keypoints2.end());
//STUFF FROM THE OPEN CV EXAMPLE BELOW
// https://github.com/npinto/opencv/blob/master/samples/cpp/matcher_simple.cpp
cv::Ptr<Feature2D> f2d = xfeatures2d::SIFT::create();
Mat descriptors1, descriptors2;
f2d->compute(image1.matrix, sift_keypoints1, descriptors1);
f2d->compute(image2.matrix, sift_keypoints2, descriptors2);
BFMatcher matcher;
matcher.match(descriptors1, descriptors2, matches);
vector<Point2f> ptList1;
vector<Point2f> ptList2;
vector<int> queryIdxs;
vector<int> trainIdxs;
for (vector<DMatch>::size_type i = 0; i != matches.size(); i++){
queryIdxs.push_back(matches[i].queryIdx);
trainIdxs.push_back(matches[i].trainIdx);
}
KeyPoint::convert(sift_keypoints1, ptList1, queryIdxs);
KeyPoint::convert(sift_keypoints2, ptList2, trainIdxs);
vector<uchar> funOut;
//press 8 for RANSAC
Mat F = findFundamentalMat(ptList1, ptList2, 8, 3, .99, funOut);
vector<int> funOutInt(funOut.begin(), funOut.end());
for (vector<int>::size_type i = 0; i != funOut.size(); i++){
if (funOutInt[i]==1){
filteredMatches.push_back(matches[i]);
}
}
namedWindow("filtered_matches", 1);
drawMatches(image1.matrix, sift_keypoints1, image2.matrix, sift_keypoints2, emptyMatches, filtered_matches_matrix, matchColor, pointColor);
imshow("filtered_matches", filtered_matches_matrix);
cout << "^C to exit.\n";
waitKey(0);
//.........这里部分代码省略.........