本文整理汇总了C++中cv::Ptr::detect方法的典型用法代码示例。如果您正苦于以下问题:C++ Ptr::detect方法的具体用法?C++ Ptr::detect怎么用?C++ Ptr::detect使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类cv::Ptr
的用法示例。
在下文中一共展示了Ptr::detect方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: showFeatures
/*
shows the features detected on the training video
*/
int showFeatures(std::string trainPath, cv::Ptr<cv::FeatureDetector> &detector)
{
//open the movie
cv::VideoCapture movie;
movie.open(trainPath);
if (!movie.isOpened()) {
std::cerr << trainPath << ": training movie not found" << std::endl;
return -1;
}
std::cout << "Press Esc to Exit" << std::endl;
cv::Mat frame, kptsImg;
movie.read(frame);
std::vector<cv::KeyPoint> kpts;
while (movie.read(frame)) {
detector->detect(frame, kpts);
std::cout << kpts.size() << " keypoints detected... \r";
fflush(stdout);
cv::drawKeypoints(frame, kpts, kptsImg);
cv::imshow("Features", kptsImg);
if(cv::waitKey(5) == 27) {
break;
}
}
std::cout << std::endl;
cv::destroyWindow("Features");
return 0;
}
示例2: extract
void extract(const cv::Mat &color, const cv::Rect &roi, const cv::Mat &mask, std::vector<cv::KeyPoint> &keypoints, cv::Mat &descriptors)
{
detector->detect(color(roi), keypoints, mask);
for(size_t i = 0; i < keypoints.size(); ++i)
{
cv::KeyPoint &p = keypoints[i];
p.pt.x += roi.x;
p.pt.y += roi.y;
}
extractor->compute(color, keypoints, descriptors);
}
示例3: assert
Node::Node(ros::NodeHandle* nh,
const cv::Mat& visual, const cv::Mat& depth,
image_geometry::PinholeCameraModel cam_model,
cv::Ptr<cv::FeatureDetector> detector,
cv::Ptr<cv::DescriptorExtractor> extractor,
cv::Ptr<cv::DescriptorMatcher> matcher,
const sensor_msgs::PointCloud2ConstPtr& point_cloud,
unsigned int msg_id,
unsigned int id,
const cv::Mat& detection_mask):
nh_(nh),
msg_id_(msg_id),
id_(id),
cloudMessage_(*point_cloud),
cam_model_(cam_model),
matcher_(matcher)
{
std::clock_t starttime=std::clock();
ROS_FATAL_COND(detector.empty(), "No valid detector!");
detector->detect( visual, feature_locations_2d_, detection_mask);// fill 2d locations
ROS_INFO_STREAM_COND_NAMED(( (std::clock()-starttime) / (double)CLOCKS_PER_SEC) > 0.01, "timings", "Feature detection runtime: " << ( std::clock() - starttime ) / (double)CLOCKS_PER_SEC );
ROS_INFO("Found %d Keypoints", (int)feature_locations_2d_.size());
cloud_pub = nh_->advertise<sensor_msgs::PointCloud2>("/rgbdslam/batch_clouds",20);
cloud_pub2 = nh_->advertise<sensor_msgs::PointCloud2>("/rgbdslam/my_clouds",20);
// get pcl::Pointcloud to extract depthValues a pixel positions
std::clock_t starttime5=std::clock();
// TODO: This takes 0.1 seconds and is not strictly necessary
//pcl::fromROSMsg(*point_cloud,pc);
pcl::fromROSMsg(*point_cloud,pc_col);
ROS_INFO_STREAM_COND_NAMED(( (std::clock()-starttime5) / (double)CLOCKS_PER_SEC) > 0.01, "timings", "projection runtime: " << ( std::clock() - starttime5 ) / (double)CLOCKS_PER_SEC );
// project pixels to 3dPositions and create search structures for the gicp
projectTo3D(depth, feature_locations_2d_, feature_locations_3d_,pc_col); //takes less than 0.01 sec
std::clock_t starttime4=std::clock();
// projectTo3d need a dense cloud to use the points.at(px.x,px.y)-Call
ROS_INFO_STREAM_COND_NAMED(( (std::clock()-starttime4) / (double)CLOCKS_PER_SEC) > 0.01, "timings", "projection runtime: " << ( std::clock() - starttime4 ) / (double)CLOCKS_PER_SEC );
std::clock_t starttime2=std::clock();
extractor->compute(visual, feature_locations_2d_, feature_descriptors_); //fill feature_descriptors_ with information
assert(feature_locations_2d_.size() == feature_locations_3d_.size());
ROS_INFO_STREAM_COND_NAMED(( (std::clock()-starttime2) / (double)CLOCKS_PER_SEC) > 0.01, "timings", "Feature extraction runtime: " << ( std::clock() - starttime2 ) / (double)CLOCKS_PER_SEC );
flannIndex = NULL;
ROS_INFO_STREAM_COND_NAMED(( (std::clock()-starttime) / (double)CLOCKS_PER_SEC) > 0.01, "timings", "constructor runtime: "<< ( std::clock() - starttime ) / (double)CLOCKS_PER_SEC <<"sec");
}
示例4: compute_histogram
Histogram compute_histogram(std::string imFile, cv::Ptr<cv::FeatureDetector> &detector, cv::Ptr<cv::FeatureDetector> &detector2, cv::SiftDescriptorExtractor &extractor, Quantization *quant) {
cv::Mat img = cv::imread(imFile);
//detect SIFT keypoints
std::vector<cv::KeyPoint> keypoints;
detector->detect( img, keypoints );
//detect MSER keypoints
std::vector<cv::KeyPoint> keypoints2;
detector2->detect( img, keypoints2 );
//group them together
for(cv::KeyPoint& keypoint : keypoints2) {
keypoints.push_back(keypoint);
}
std::cout << " - keypoint_ct: " << keypoints.size() << std::endl;
//compute descriptors
cv::Mat descriptor_uchar;
extractor.compute(img, keypoints, descriptor_uchar);
cv::Mat descriptor_double;
descriptor_uchar.convertTo(descriptor_double, CV_64F);
//convert from mat to bag of unquantized features
BagOfFeatures unquantized_features;
convert_mat_to_vector(descriptor_double, unquantized_features);
//quantize to form bag of words
Histogram bag_of_words;
quant->quantize(unquantized_features, bag_of_words);
//normalize
return bag_of_words;
}
示例5: testDetect
void testDetect(const cv::Mat& img)
{
hog->setGammaCorrection(false);
hog->setSVMDetector(hog->getDefaultPeopleDetector());
std::vector<cv::Point> locations;
// Test detect
hog->detect(loadMat(img), locations);
#ifdef DUMP
dump(locations);
#else
compare(locations);
#endif
// Test detect on smaller image
cv::Mat img2;
cv::resize(img, img2, cv::Size(img.cols / 2, img.rows / 2));
hog->detect(loadMat(img2), locations);
#ifdef DUMP
dump(locations);
#else
compare(locations);
#endif
// Test detect on greater image
cv::resize(img, img2, cv::Size(img.cols * 2, img.rows * 2));
hog->detect(loadMat(img2), locations);
#ifdef DUMP
dump(locations);
#else
compare(locations);
#endif
}
示例6: compute_bow_histogram
void compute_bow_histogram(cv::Mat &sample, Histogram &feature_vector, cv::Ptr<cv::FeatureDetector> &detector, cv::SiftDescriptorExtractor &extractor, Quantization *quant){
//detect keypoints
std::vector<cv::KeyPoint> keypoints;
detector->detect( sample, keypoints );
//compute descriptor
cv::Mat descriptor_uchar;
extractor.compute(sample, keypoints, descriptor_uchar);
cv::Mat descriptor_double;
descriptor_uchar.convertTo(descriptor_double, CV_64F);
//convert from mat to bag of unquantized features
BagOfFeatures unquantized_features;
convert_mat_to_vector(descriptor_double, unquantized_features);
//quantize regions -- true BagOfFeatures
quant->quantize(unquantized_features, feature_vector);
}
示例7:
/*
* Class: ph_edu_dlsu_akazefeaturesjni_MainActivity
* Method: findAKAZEFeatures
* Signature: (JJ)V
*/
JNIEXPORT void JNICALL Java_ph_edu_dlsu_akazefeaturesjni_MainActivity_findAKAZEFeatures
(JNIEnv *pEnv, jobject, jlong mGray, jlong mRGBA){
cv::Mat& mGr = *(cv::Mat*)mGray;
cv::Mat& mRgb = *(cv::Mat*)mRGBA;
std::vector<cv::KeyPoint> v;
if (detector == NULL)
detector = cv::AKAZE::create();
detector->detect(mGr, v, cv::Mat());
for( unsigned int i = 0; i < v.size(); i++ )
{
const cv::KeyPoint& kp = v[i];
cv::circle(mRgb, cv::Point(kp.pt.x, kp.pt.y), 10, cv::Scalar(0,255,0,255));
}
}
示例8: train_category
//gets centroid for category from training images
void LocalDescriptorAndBagOfFeature::train_category(const std::vector<cv::Mat> &samples, Histogram ¢roid, const cv::Ptr<cv::FeatureDetector> &detector, const cv::SiftDescriptorExtractor &extractor, Quantization *quant){
clock_t start = clock();
int i = 0;
for(const cv::Mat& sample : samples){
i++;
std::cout << "converting img " << i << " of " << samples.size() << " to bag of features" << std::endl;
//detect keypoints
std::vector<cv::KeyPoint> keypoints;
detector->detect( sample, keypoints );
//compute descriptor
cv::Mat descriptor_uchar;
extractor.compute(sample, keypoints, descriptor_uchar);
cv::Mat descriptor_double;
descriptor_uchar.convertTo(descriptor_double, CV_64F);
//convert from mat to bag of unquantized features
BagOfFeatures unquantized_features;
convert_mat_to_vector(descriptor_double, unquantized_features);
//quantize regions -- true BagOfFeatures
Histogram feature_vector;
quant->quantize(unquantized_features, feature_vector);
//aggregate
vector_add(centroid, feature_vector);
}
//divide by training category size to compute centroid
//std::transform(centroid.begin(), centroid.end(), centroid.begin(), std::bind1st(std::divides<double>(),bikes.size()));
for(double& d : centroid){
d = d/samples.size();
}
std::cout << double( clock() - start ) / (double)CLOCKS_PER_SEC<< " seconds." << std::endl;
}
开发者ID:CS598TeamAwesome,项目名称:Project2-LocalDescriptorAndBagOfFeature,代码行数:38,代码来源:NearestCentroidClassifier.cpp
示例9: createGICPStructures
Node::Node(ros::NodeHandle& nh, const cv::Mat& visual,
cv::Ptr<cv::FeatureDetector> detector,
cv::Ptr<cv::DescriptorExtractor> extractor,
cv::Ptr<cv::DescriptorMatcher> matcher,
const sensor_msgs::PointCloud2ConstPtr point_cloud,
const cv::Mat& detection_mask)
: id_(0),
flannIndex(NULL),
matcher_(matcher)
{
#ifdef USE_ICP_CODE
gicp_initialized = false;
#endif
std::clock_t starttime=std::clock();
#ifdef USE_SIFT_GPU
SiftGPUFeatureDetector* siftgpu = SiftGPUFeatureDetector::GetInstance();
float* descriptors = siftgpu->detect(visual, feature_locations_2d_);
if (descriptors == NULL) {
ROS_FATAL("Can't run SiftGPU");
}
#else
ROS_FATAL_COND(detector.empty(), "No valid detector!");
detector->detect( visual, feature_locations_2d_, detection_mask);// fill 2d locations
#endif
ROS_INFO("Feature detection and descriptor extraction runtime: %f", ( std::clock() - starttime ) / (double)CLOCKS_PER_SEC);
ROS_INFO_STREAM_COND_NAMED(( (std::clock()-starttime) / (double)CLOCKS_PER_SEC) > global_min_time_reported, "timings", "Feature detection runtime: " << ( std::clock() - starttime ) / (double)CLOCKS_PER_SEC );
/*
if (id_ == 0)
cloud_pub_ = nh_->advertise<sensor_msgs::PointCloud2>("clouds_from_node_base",10);
else{
*/
cloud_pub_ = nh.advertise<sensor_msgs::PointCloud2>("/rgbdslam/batch_clouds",20);
// cloud_pub_ransac = nh_->advertise<sensor_msgs::PointCloud2>("clouds_from_node_current_ransac",10);
//} */
// get pcl::Pointcloud to extract depthValues a pixel positions
std::clock_t starttime5=std::clock();
// TODO: If batch sending/saving of clouds would be removed, the pointcloud wouldn't have to be saved
// which would slim down the Memory requirements
pcl::fromROSMsg(*point_cloud,pc_col);
ROS_INFO_STREAM_COND_NAMED(( (std::clock()-starttime5) / (double)CLOCKS_PER_SEC) > global_min_time_reported, "timings", "pc2->pcl conversion runtime: " << ( std::clock() - starttime5 ) / (double)CLOCKS_PER_SEC );
// project pixels to 3dPositions and create search structures for the gicp
#ifdef USE_SIFT_GPU
// removes also unused descriptors from the descriptors matrix
// build descriptor matrix
projectTo3DSiftGPU(feature_locations_2d_, feature_locations_3d_, pc_col, descriptors, feature_descriptors_); //takes less than 0.01 sec
if (descriptors != NULL) delete descriptors;
#else
projectTo3D(feature_locations_2d_, feature_locations_3d_, pc_col); //takes less than 0.01 sec
#endif
// projectTo3d need a dense cloud to use the points.at(px.x,px.y)-Call
#ifdef USE_ICP_CODE
std::clock_t starttime4=std::clock();
createGICPStructures();
ROS_INFO_STREAM_COND_NAMED(( (std::clock()-starttime4) / (double)CLOCKS_PER_SEC) > global_min_time_reported, "timings", "gicp runtime: " << ( std::clock() - starttime4 ) / (double)CLOCKS_PER_SEC );
#endif
std::clock_t starttime2=std::clock();
#ifndef USE_SIFT_GPU
// ROS_INFO("Use extractor");
//cv::Mat topleft, topright;
//topleft = visual.colRange(0,visual.cols/2+50);
//topright= visual.colRange(visual.cols/2+50, visual.cols-1);
//std::vector<cv::KeyPoint> kp1, kp2;
//extractor->compute(topleft, kp1, feature_descriptors_); //fill feature_descriptors_ with information
extractor->compute(visual, feature_locations_2d_, feature_descriptors_); //fill feature_descriptors_ with information
#endif
assert(feature_locations_2d_.size() == feature_locations_3d_.size());
ROS_INFO_STREAM_COND_NAMED(( (std::clock()-starttime2) / (double)CLOCKS_PER_SEC) > global_min_time_reported, "timings", "Feature extraction runtime: " << ( std::clock() - starttime2 ) / (double)CLOCKS_PER_SEC );
ROS_INFO_STREAM_COND_NAMED(( (std::clock()-starttime) / (double)CLOCKS_PER_SEC) > global_min_time_reported, "timings", "constructor runtime: "<< ( std::clock() - starttime ) / (double)CLOCKS_PER_SEC <<"sec");
}
示例10: generateBOWImageDescs
/*
generate FabMap bag-of-words data : an image descriptor for each frame
*/
int generateBOWImageDescs(std::string dataPath,
std::string bowImageDescPath,
std::string vocabPath,
cv::Ptr<cv::FeatureDetector> &detector,
cv::Ptr<cv::DescriptorExtractor> &extractor,
int minWords)
{
cv::FileStorage fs;
//ensure not overwriting training data
std::ifstream checker;
checker.open(bowImageDescPath.c_str());
if(checker.is_open()) {
std::cerr << bowImageDescPath << ": FabMap Training/Testing Data "
"already present" << std::endl;
checker.close();
return -1;
}
//load vocabulary
std::cout << "Loading Vocabulary" << std::endl;
fs.open(vocabPath, cv::FileStorage::READ);
cv::Mat vocab;
fs["Vocabulary"] >> vocab;
if (vocab.empty()) {
std::cerr << vocabPath << ": Vocabulary not found" << std::endl;
return -1;
}
fs.release();
//use a FLANN matcher to generate bag-of-words representations
cv::Ptr<cv::DescriptorMatcher> matcher =
cv::DescriptorMatcher::create("FlannBased");
cv::BOWImgDescriptorExtractor bide(extractor, matcher);
bide.setVocabulary(vocab);
//load movie
cv::VideoCapture movie;
movie.open(dataPath);
if(!movie.isOpened()) {
std::cerr << dataPath << ": movie not found" << std::endl;
return -1;
}
//extract image descriptors
cv::Mat fabmapTrainData;
std::cout << "Extracting Bag-of-words Image Descriptors" << std::endl;
std::cout.setf(std::ios_base::fixed);
std::cout.precision(0);
std::ofstream maskw;
if(minWords) {
maskw.open(std::string(bowImageDescPath + "mask.txt").c_str());
}
cv::Mat frame, bow;
std::vector<cv::KeyPoint> kpts;
while(movie.read(frame)) {
detector->detect(frame, kpts);
bide.compute(frame, kpts, bow);
if(minWords) {
//writing a mask file
if(cv::countNonZero(bow) < minWords) {
//frame masked
maskw << "0" << std::endl;
} else {
//frame accepted
maskw << "1" << std::endl;
fabmapTrainData.push_back(bow);
}
} else {
fabmapTrainData.push_back(bow);
}
std::cout << 100.0 * (movie.get(CV_CAP_PROP_POS_FRAMES) /
movie.get(CV_CAP_PROP_FRAME_COUNT)) << "% \r";
fflush(stdout);
}
std::cout << "Done " << std::endl;
movie.release();
//save training data
fs.open(bowImageDescPath, cv::FileStorage::WRITE);
fs << "BOWImageDescs" << fabmapTrainData;
fs.release();
return 0;
}
示例11: callback
void callback(const sensor_msgs::ImageConstPtr& msg)
{
if (image_0_ == NULL)
{
// Take first image:
try
{
image_0_ = cv_bridge::toCvCopy(msg,
sensor_msgs::image_encodings::isColor(msg->encoding) ?
sensor_msgs::image_encodings::BGR8 :
sensor_msgs::image_encodings::MONO8);
}
catch (cv_bridge::Exception& e)
{
ROS_ERROR_STREAM("Failed to take first image: " << e.what());
return;
}
ROS_INFO("First image taken");
// Detect keypoints:
detector_->detect(image_0_->image, keypoints_0_);
ROS_INFO_STREAM(keypoints_0_.size() << " points found.");
// Extract keypoints' descriptors:
extractor_->compute(image_0_->image, keypoints_0_, descriptors_0_);
}
else
{
// Take second image:
try
{
image_1_ = cv_bridge::toCvShare(msg,
sensor_msgs::image_encodings::isColor(msg->encoding) ?
sensor_msgs::image_encodings::BGR8 :
sensor_msgs::image_encodings::MONO8);
}
catch (cv_bridge::Exception& e)
{
ROS_ERROR_STREAM("Failed to take image: " << e.what());
return;
}
// Detect keypoints:
std::vector<cv::KeyPoint> keypoints_1;
detector_->detect(image_1_->image, keypoints_1);
ROS_INFO_STREAM(keypoints_1.size() << " points found on the new image.");
// Extract keypoints' descriptors:
cv::Mat descriptors_1;
extractor_->compute(image_1_->image, keypoints_1, descriptors_1);
// Compute matches:
std::vector<cv::DMatch> matches;
match(descriptors_0_, descriptors_1, matches);
// Compute homography:
cv::Mat H;
homography(keypoints_0_, keypoints_1, matches, H);
// Draw matches:
const int s = std::max(image_0_->image.rows, image_0_->image.cols);
cv::Size size(s, s);
cv::Mat draw_image;
warped_image_ = boost::make_shared<cv_bridge::CvImage>(
image_0_->header, image_0_->encoding,
cv::Mat(size, image_0_->image.type()));
if (!H.empty()) // filter outliers
{
std::vector<char> matchesMask(matches.size(), 0);
const size_t N = matches.size();
std::vector<int> queryIdxs(N), trainIdxs(N);
for (size_t i = 0; i < N; ++i)
{
queryIdxs[i] = matches[i].queryIdx;
trainIdxs[i] = matches[i].trainIdx;
}
std::vector<cv::Point2f> points1, points2;
cv::KeyPoint::convert(keypoints_0_, points1, queryIdxs);
cv::KeyPoint::convert(keypoints_1, points2, trainIdxs);
cv::Mat points1t;
cv::perspectiveTransform(cv::Mat(points1), points1t, H);
double maxInlierDist = threshold_ < 0 ? 3 : threshold_;
for (size_t i1 = 0; i1 < points1.size(); ++i1)
{
if (cv::norm(points2[i1] - points1t.at<cv::Point2f>((int)i1,0)) <= maxInlierDist ) // inlier
matchesMask[i1] = 1;
}
// draw inliers
cv::drawMatches(
image_0_->image, keypoints_0_,
image_1_->image, keypoints_1, matches,
draw_image, cv::Scalar(0, 255, 0), cv::Scalar(0, 0, 255),
matchesMask,
cv::DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
//.........这里部分代码省略.........
示例12: knn_match
/*****************************************************************************
// The knn matching with k = 2
// This code performs the matching and the refinement.
// @paraam query_image: the input image
// @param matches_out: a pointer that stores the output matches. It is necessary for
// pose estimation.
*/
int knn_match(cv::Mat& query_image, std::vector< cv::DMatch> * matches_out)
{
// variabels that keep the query keypoints and query descriptors
std::vector<cv::KeyPoint> keypointsQuery;
cv::Mat descriptorQuery;
// Temporary variables for the matching results
std::vector< std::vector< cv::DMatch> > matches1;
std::vector< std::vector< cv::DMatch> > matches2;
std::vector< std::vector< cv::DMatch> > matches_opt1;
//////////////////////////////////////////////////////////////////////
// 1. Detect the keypoints
// This line detects keypoints in the query image
_detector->detect(query_image, keypointsQuery);
// If keypoints were found, descriptors are extracted.
if(keypointsQuery.size() > 0)
{
// extract descriptors
_extractor->compute( query_image, keypointsQuery, descriptorQuery);
}
//////////////////////////////////////////////////////////////////////////////
// 2. Here we match the descriptors with the database descriptors.
// with k-nearest neighbors with k=2
_matcher.knnMatch(descriptorQuery , matches1, 2);
#ifdef DEBUG_OUT
std::cout << "Found " << matches1.size() << " matching feature descriptors out of " << _matcher.getTrainDescriptors().size() << " database descriptors." << std::endl;
#endif
//////////////////////////////////////////////////////////////////////////////
// 3 Filter the matches.
// Accept only matches (knn with k=2) which belong ot one images
// The database tree within _matcher contains descriptors of all input images.
// We expect that both nearest neighbors must belong to one image.
// Otherwise we can remove the result.
// Along with this, we count which reference image has the highest number of matches.
// At this time, we consinder this image as the searched image.
// we init the variable hit with 0
std::vector<int> hits(_num_ref_images);
for (int i=0; i<_num_ref_images; i++)
{
hits[i] = 0;
}
// the loop runs through all matches and comparees the image indes
// imgIdx. The must be equal otherwise the matches belong to two
// different reference images.
for (int i=0; i<matches1.size(); i++)
{
// The comparison.
if(matches1[i].at(0).imgIdx == matches1[i].at(1).imgIdx)
{
// we keep it
matches_opt1.push_back(matches1[i]);
// and count a hit
hits[matches1[i].at(0).imgIdx]++;
}
}
#ifdef DEBUG_OUT
std::cout << "Optimized " << matches_opt1.size() << " feature descriptors." << std::endl;
#endif
// Now we search for the highest number of hits in our hit array
// The variable max_idx keeps the image id.
// The variable max_value the amount of hits.
int max_idx = -1;
int max_value = 0;
for (int i=0; i<_num_ref_images; i++)
{
#ifdef DEBUG_OUT
std::cout << "for " << i << " : " << hits[i] << std::endl;
#endif
if(hits[i] > max_value)
{
max_value = hits[i];
max_idx = i;
}
}
///////////////////////////////////////////////////////
// 4. The cross-match
// At this time, we test the database agains the query descriptors.
// The variable max_id stores the reference image id. Thus, we test only
//.........这里部分代码省略.........
示例13: brute_force_match
/*****************************************************************************
// This applies a brute force match without a trained datastructure.
// It also calculates the two nearest neigbors.
// @paraam query_image: the input image
// @param matches_out: a pointer that stores the output matches. It is necessary for
// pose estimation.
*/
int brute_force_match(cv::Mat& query_image, std::vector< cv::DMatch> * matches_out)
{
// variabels that keep the query keypoints and query descriptors
std::vector<cv::KeyPoint> keypointsQuery;
cv::Mat descriptorQuery;
// Temporary variables for the matching results
std::vector< std::vector< cv::DMatch> > matches1;
std::vector< std::vector< cv::DMatch> > matches2;
std::vector< std::vector< cv::DMatch> > matches_opt1;
//////////////////////////////////////////////////////////////////////
// 1. Detect the keypoints
// This line detects keypoints in the query image
_detector->detect(query_image, keypointsQuery);
// If keypoints were found, descriptors are extracted.
if(keypointsQuery.size() > 0)
{
// extract descriptors
_extractor->compute( query_image, keypointsQuery, descriptorQuery);
}
#ifdef DEBUG_OUT
std::cout << "Found " << descriptorQuery.size() << " feature descriptors in the image." << std::endl;
#endif
//////////////////////////////////////////////////////////////////////////////
// 2. Here we match the descriptors with all descriptors in the database
// with k-nearest neighbors with k=2
int max_removed = INT_MAX;
int max_id = -1;
for(int i=0; i<_descriptorsRefDB.size(); i++)
{
std::vector< std::vector< cv::DMatch> > matches_temp1;
// Here we match all query descriptors agains all db descriptors and try to find
// matching descriptors
_brute_force_matcher.knnMatch( descriptorQuery, _descriptorsRefDB[i], matches_temp1, 2);
///////////////////////////////////////////////////////
// 3. Refinement; Ratio test
// The ratio test only accept matches which are clear without ambiguity.
// The best hit must be closer to the query descriptors than the second hit.
int removed = ratioTest(matches_temp1);
// We only accept the match with the highest number of hits / the vector with the minimum revmoved features
int num_matches = matches_temp1.size();
if(removed < max_removed)
{
max_removed = removed;
max_id = i;
matches1.clear();
matches1 = matches_temp1;
}
}
#ifdef DEBUG_OUT
std::cout << "Feature map number " << max_id << " has the highest hit with "<< matches1.size() - max_removed << " descriptors." << std::endl;
#endif
std::vector< std::vector< cv::DMatch> > matches_temp2;
// Here we match all query descriptors agains all db descriptors and try to find
// matching descriptors
_brute_force_matcher.knnMatch(_descriptorsRefDB[max_id], descriptorQuery, matches_temp2, 2);
// The ratio test only accept matches which are clear without ambiguity.
// The best hit must be closer to the query descriptors than the second hit.
int removed = ratioTest(matches_temp2);
///////////////////////////////////////////////////////
// 6. Refinement; Symmetry test
// We only accept matches which appear in both knn-matches.
// It should not matter whether we test the database against the query desriptors
// or the query descriptors against the database.
// If we do not find the same solution in both directions, we toss the match.
std::vector<cv::DMatch> symMatches;
//.........这里部分代码省略.........
示例14: init_database
/*****************************************************************************
Inits the database by loading all images from a certain directory, extracts the feature
keypoints and descriptors and saves them in the database
-keypointsRefDB and _descriptorsRefDB
@param directory_path: the path of all input images for query, e.g., C:/my_database
@param files: a std::vector object with a list of filenames.
*/
void init_database(std::string directory_path, std::vector<std::string> files)
{
// This code reads the image names from the argv variable, loads the image
// detect keypoints, extract the descriptors and push everything into
// two database objects, indicate by the ending DB.
for (int i = 0; i<files.size(); i++)
{
FeatureMap fm;
// Fetch the ref. image name from the input array
fm._ref_image_str = directory_path;
fm._ref_image_str.append("/");
fm._ref_image_str.append( files[i]);
bool ret = exists_test(fm._ref_image_str);
if (ret == false) continue;
// Load the image
fm._ref_image = cv::imread(fm._ref_image_str);
cvtColor( fm._ref_image , fm._ref_image , CV_BGR2GRAY);
// Detect the keypoints
_detector->detect(fm._ref_image,fm._keypointsRef);
std::cout << "For referemce image " << fm._ref_image_str << " - found " << fm._keypointsRef.size() << " keypoints" << std::endl;
// If keypoints found
if(fm._keypointsRef.size() > 0)
{
// extract descriptors
_extractor->compute( fm._ref_image, fm._keypointsRef, fm._descriptorRef);
// push descriptors and keypoints into database
_keypointsRefDB.push_back(fm._keypointsRef);
_descriptorsRefDB.push_back(fm._descriptorRef);
// count the total number of feature descriptors and keypoints in the database.
_num_ref_features_in_db += fm._keypointsRef.size();
}
// Draw the keypoints
/*
cv::Mat out;
cv::drawKeypoints(fm._ref_image , fm._keypointsRef, out);
cv::imshow("out", out );
cv::waitKey();
out.release();
*/
feature_map_database.push_back(fm);
}
// Converting the total number of features to a string.
std::strstream conv;
conv << _num_ref_features_in_db;
conv >> _num_ref_features_in_db_str;
}
示例15: findCirclesGridAB
bool findCirclesGridAB( cv::InputArray _image, cv::Size patternSize,
cv::OutputArray _centers, int flags, const cv::Ptr<cv::FeatureDetector> &blobDetector )
{
bool isAsymmetricGrid = (flags & cv::CALIB_CB_ASYMMETRIC_GRID) ? true : false;
bool isSymmetricGrid = (flags & cv::CALIB_CB_SYMMETRIC_GRID ) ? true : false;
CV_Assert(isAsymmetricGrid ^ isSymmetricGrid);
cv::Mat image = _image.getMat();
std::vector<cv::Point2f> centers;
std::vector<cv::KeyPoint> keypoints;
blobDetector->detect(image, keypoints);
std::vector<cv::Point2f> points;
for (size_t i = 0; i < keypoints.size(); i++)
{
points.push_back (keypoints[i].pt);
}
if(flags & cv::CALIB_CB_CLUSTERING)
{
CirclesGridClusterFinder circlesGridClusterFinder(isAsymmetricGrid);
circlesGridClusterFinder.findGrid(points, patternSize, centers);
cv::Mat(centers).copyTo(_centers);
return !centers.empty();
}
CirclesGridFinderParameters parameters;
parameters.vertexPenalty = -0.6f;
parameters.vertexGain = 1;
parameters.existingVertexGain = 10000;
parameters.edgeGain = 1;
parameters.edgePenalty = -0.6f;
if(flags & cv::CALIB_CB_ASYMMETRIC_GRID)
parameters.gridType = CirclesGridFinderParameters::ASYMMETRIC_GRID;
if(flags & cv::CALIB_CB_SYMMETRIC_GRID)
parameters.gridType = CirclesGridFinderParameters::SYMMETRIC_GRID;
const int attempts = 2;
const size_t minHomographyPoints = 4;
cv::Mat H;
for (int i = 0; i < attempts; i++)
{
centers.clear();
CirclesGridFinder boxFinder(patternSize, points, parameters);
bool isFound = false;
//#define BE_QUIET 1
#if BE_QUIET
void* oldCbkData;
//cv::ErrorCallback oldCbk = redirectError(quiet_error, 0, &oldCbkData);
#endif
try
{
isFound = boxFinder.findHoles();
}
catch (cv::Exception)
{
}
#if BE_QUIET
redirectError(oldCbk, oldCbkData);
#endif
if (isFound)
{
switch(parameters.gridType)
{
case CirclesGridFinderParameters::SYMMETRIC_GRID:
boxFinder.getHoles(centers);
break;
case CirclesGridFinderParameters::ASYMMETRIC_GRID:
boxFinder.getAsymmetricHoles(centers);
break;
default:
CV_Error(CV_StsBadArg, "Unkown pattern type");
}
if (i != 0)
{
cv::Mat orgPointsMat;
cv::transform(centers, orgPointsMat, H.inv());
cv::convertPointsFromHomogeneous(orgPointsMat, centers);
}
cv::Mat(centers).copyTo(_centers);
return true;
}
boxFinder.getHoles(centers);
if (i != attempts - 1)
{
if (centers.size() < minHomographyPoints)
break;
H = CirclesGridFinder::rectifyGrid(boxFinder.getDetectedGridSize(), centers, points, points);
}
}
cv::Mat(centers).copyTo(_centers);
return false;
}