本文整理汇总了C++中CFeatureList类的典型用法代码示例。如果您正苦于以下问题:C++ CFeatureList类的具体用法?C++ CFeatureList怎么用?C++ CFeatureList使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了CFeatureList类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: convertCvSeqInCFeatureList
// ------------------------------------------------------------------------------------
// convertCvSeqInCFeatureList
// ------------------------------------------------------------------------------------
void CFeatureExtraction::convertCvSeqInCFeatureList( void* features_, CFeatureList &list, unsigned int init_ID, const TImageROI &ROI ) const
{
#if MRPT_HAS_OPENCV && MRPT_HAS_SIFT_HESS
CvSeq* features = reinterpret_cast<CvSeq*>( features_ );
// Is there a defined ROI?
bool usingROI = false;
if( ROI.xMin != 0 || ROI.xMax != 0 || ROI.yMin != 0 || ROI.yMax != 0 )
usingROI = true;
int n = features->total;
ASSERT_(n > 0);
list.clear();
struct feature* thisFeat;
for( int k = 0; k < n; k++ )
{
thisFeat = (feature*)cvGetSeqElem( features, k );
CFeaturePtr feat = CFeature::Create();
feat->ID = (TFeatureID)(k + init_ID);
feat->x = usingROI ? thisFeat->x + ROI.xMin : thisFeat->x;
feat->y = usingROI ? thisFeat->y + ROI.yMin : thisFeat->y;
feat->type = featSIFT;
feat->orientation = thisFeat->ori;
feat->scale = thisFeat->scl;
feat->descriptors.SIFT.resize( thisFeat->d );
for( int i = 0; i < thisFeat->d; i++ )
feat->descriptors.SIFT[i] = (unsigned char)thisFeat->descr[i];
list.push_back(feat);
} // end for
#else
THROW_EXCEPTION("Method not available since MRPT has been compiled without OpenCV")
#endif //MRPT_HAS_OPENCV
}
示例2: trackFeatures_deleteOOB
inline size_t trackFeatures_deleteOOB(
CFeatureList &trackedFeats,
const size_t img_width, const size_t img_height,
const int MIN_DIST_MARGIN_TO_STOP_TRACKING)
{
CFeatureList::iterator itFeat = trackedFeats.begin();
size_t n_removed = 0;
while (itFeat!=trackedFeats.end())
{
const TFeatureTrackStatus status = (*itFeat)->track_status;
bool eras = (status_TRACKED!=status && status_IDLE!=status);
if (!eras)
{
// Also, check if it's too close to the image border:
const float x= (*itFeat)->x;
const float y= (*itFeat)->y;
static const float MIN_DIST_MARGIN_TO_STOP_TRACKING = 10;
if (x<MIN_DIST_MARGIN_TO_STOP_TRACKING || y<MIN_DIST_MARGIN_TO_STOP_TRACKING ||
x>(img_width-MIN_DIST_MARGIN_TO_STOP_TRACKING) ||
y>(img_height-MIN_DIST_MARGIN_TO_STOP_TRACKING))
{
eras = true;
}
}
if (eras) // Erase or keep?
{
itFeat = trackedFeats.erase(itFeat);
n_removed++;
}
else ++itFeat;
}
return n_removed;
} // end of trackFeatures_deleteOOB
示例3: ASSERT_
// ------------------------------------------------------------------------------------
// insertCvSeqInCFeatureList
// ------------------------------------------------------------------------------------
void CFeatureExtraction::insertCvSeqInCFeatureList( void* features_, CFeatureList &list, unsigned int init_ID ) const
{
#if MRPT_HAS_OPENCV && MRPT_HAS_SIFT_HESS
CvSeq* features = reinterpret_cast<CvSeq*>( features_ );
int n = features->total;
ASSERT_(n > 0);
CFeatureList::iterator itFeat;
struct feature* thisFeat;
int k;
for( itFeat = list.begin(), k = 0; itFeat != list.end() && k < n; k++ )
{
thisFeat = (feature*)cvGetSeqElem( features, k );
if( (*itFeat)->x == thisFeat->x && (*itFeat)->y == thisFeat->y )
{
(*itFeat)->ID = (TFeatureID)(k + init_ID);
(*itFeat)->orientation = thisFeat->ori;
(*itFeat)->scale = thisFeat->scl;
(*itFeat)->descriptors.SIFT.resize( thisFeat->d );
for( int i = 0; i < thisFeat->d; i++ )
(*itFeat)->descriptors.SIFT[i] = (unsigned char)thisFeat->descr[i];
itFeat++;
} // end if
} // end for
#else
THROW_EXCEPTION("Method not available since MRPT has been compiled without OpenCV")
#endif //MRPT_HAS_OPENCV
}
示例4:
inline void trackFeatures_updatePatch<CFeatureList>(CFeatureList &featureList,const CImage &cur_gray)
{
for (CFeatureList::iterator itFeat = featureList.begin(); itFeat != featureList.end(); ++itFeat)
{
CFeature* ft = itFeat->pointer();
if (ft->track_status!=status_TRACKED)
continue; // Skip if it's not correctly tracked.
const size_t patch_width = ft->patch.getWidth();
const size_t patch_height = ft->patch.getHeight();
if (patch_width>0 && patch_height>0)
{
try
{
const int offset = (int)patch_width/2; // + 1;
cur_gray.extract_patch(
ft->patch,
round( ft->x ) - offset,
round( ft->y ) - offset,
patch_width,
patch_height );
}
catch (std::exception &)
{
ft->track_status = status_OOB; // Out of bounds!
}
}
}
} // end of trackFeatures_updatePatch<>
示例5: int
inline void trackFeatures_addNewFeats<CFeatureList>(CFeatureList &featureList,const TSimpleFeatureList &new_feats, const std::vector<size_t> &sorted_indices, const size_t nNewToCheck,const size_t maxNumFeatures,const float minimum_KLT_response_to_add,const double threshold_sqr_dist_to_add_new,const size_t patchSize,const CImage &cur_gray, TFeatureID &max_feat_ID_at_input)
{
const TImageSize imgSize = cur_gray.getSize();
const int offset = (int)patchSize/2 + 1;
const int w_off = int(imgSize.x - offset);
const int h_off = int(imgSize.y - offset);
for (size_t i=0;i<nNewToCheck && featureList.size()<maxNumFeatures;i++)
{
const TSimpleFeature &feat = new_feats[ sorted_indices[i] ];
if (feat.response<minimum_KLT_response_to_add) continue;
double min_dist_sqr = square(10000);
if (!featureList.empty())
{
//m_timlog.enter("[CGenericFeatureTracker] add new features.kdtree");
min_dist_sqr = featureList.kdTreeClosestPoint2DsqrError(feat.pt.x,feat.pt.y );
//m_timlog.leave("[CGenericFeatureTracker] add new features.kdtree");
}
if (min_dist_sqr>threshold_sqr_dist_to_add_new &&
feat.pt.x > offset &&
feat.pt.y > offset &&
feat.pt.x < w_off &&
feat.pt.y < h_off )
{
// Add new feature:
CFeaturePtr ft = CFeature::Create();
ft->type = featFAST;
ft->ID = ++max_feat_ID_at_input;
ft->x = feat.pt.x;
ft->y = feat.pt.y;
ft->response = feat.response;
ft->orientation = 0;
ft->scale = 1;
ft->patchSize = patchSize; // The size of the feature patch
if( patchSize > 0 )
cur_gray.extract_patch(
ft->patch,
round( ft->x ) - offset,
round( ft->y ) - offset,
patchSize,
patchSize ); // Image patch surronding the feature
featureList.push_back( ft );
}
}
} // end of trackFeatures_addNewFeats<>
示例6: TestExtractFeaturesTile
// ------------------------------------------------------
// TestCapture
// ------------------------------------------------------
void TestExtractFeaturesTile()
{
CDisplayWindow wind1,wind2;
CFeatureExtraction fExt;
CFeatureList featsHarris;
CImage img;
string the_img = myDataDir+string("test_image.jpg");
if (!img.loadFromFile(the_img ))
{
cerr << "Cannot load " << the_img << endl;
return;
}
cout << "Loaded test image: " << the_img << endl;
CTicTac tictac;
cout << "Extracting Harris features (tiled)... [f_harris_tiled.txt]";
fExt.options.featsType = featHarris;
fExt.options.harrisOptions.tile_image = true;
tictac.Tic();
fExt.detectFeatures( img, featsHarris );
cout << format(" %.03fms",tictac.Tac()*1000) << endl;
cout << "Detected " << featsHarris.size() << " features in " << endl;
featsHarris.saveToTextFile("f_harris_tiled.txt");
wind1.setWindowTitle("Harris detected features (Tiled image)");
wind1.showTiledImageAndPoints( img, featsHarris );
cout << "Extracting Harris features... [f_harris.txt]";
fExt.options.harrisOptions.tile_image = false;
tictac.Tic();
fExt.detectFeatures( img, featsHarris );
cout << format(" %.03fms",tictac.Tac()*1000) << endl;
featsHarris.saveToTextFile("f_harris.txt");
wind2.setWindowTitle("Harris detected features");
wind2.showTiledImageAndPoints( img, featsHarris );
mrpt::system::pause();
return;
}
示例7: benchmark_detectFeatures
double benchmark_detectFeatures(int N, [[maybe_unused]] int h)
{
// Generate a random image
CImage img;
getTestImage(0, img);
CFeatureExtraction fExt;
fExt.profiler.enable();
fExt.options.featsType = FEAT_TYPE;
for (int i = 0; i < N; i++)
{
CFeatureList fs;
fExt.detectFeatures(img, fs);
if (i == (N - 1))
std::cout << "(" << std::setw(4) << fs.size() << " found)\n";
}
return fExt.profiler.getMeanTime("detectFeatures");
}
示例8: TestTrackFeatures
void TestTrackFeatures()
{
CImage im1, im2;
im1.loadFromFile("/Trabajo/Experimentos/[2009] vOdometry Characterization/right1.jpg");
im2.loadFromFile("/Trabajo/Experimentos/[2009] vOdometry Characterization/right2.jpg");
CFeatureExtraction fExt;
CFeatureList feats;
fExt.options.featsType = featKLT;
fExt.detectFeatures( im1, feats );
feats.saveToTextFile("J:/Trabajo/Experimentos/[2009] vOdometry Characterization/before.txt");
CFeatureTracker_KL tracker;
// tracker.extra_params["add_new_features"] = 1; // track, AND ALSO, add new features
// ...
// Do tracking:
tracker.trackFeatures(im1, im2, feats);
feats.saveToTextFile("/Trabajo/Experimentos/[2009] vOdometry Characterization/after.txt");
}
示例9: inImg_gray
// N_fast = 9, 10, 12
void CFeatureExtraction::extractFeaturesFASTER_N(
const int N_fast,
const mrpt::utils::CImage & inImg,
CFeatureList & feats,
unsigned int init_ID,
unsigned int nDesiredFeatures,
const TImageROI & ROI ) const
{
MRPT_START
#if MRPT_HAS_OPENCV
// Make sure we operate on a gray-scale version of the image:
const CImage inImg_gray( inImg, FAST_REF_OR_CONVERT_TO_GRAY );
const IplImage *IPL = inImg_gray.getAs<IplImage>();
TSimpleFeatureList corners;
TFeatureType type_of_this_feature;
switch (N_fast)
{
case 9: fast_corner_detect_9 (IPL,corners, options.FASTOptions.threshold, 0, NULL); type_of_this_feature=featFASTER9; break;
case 10: fast_corner_detect_10(IPL,corners, options.FASTOptions.threshold, 0, NULL); type_of_this_feature=featFASTER10; break;
case 12: fast_corner_detect_12(IPL,corners, options.FASTOptions.threshold, 0, NULL); type_of_this_feature=featFASTER12; break;
default:
THROW_EXCEPTION("Only the 9,10,12 FASTER detectors are implemented.")
break;
};
// *All* the features have been extracted.
const size_t N = corners.size();
// Now:
// 1) Sort them by "response": It's ~100 times faster to sort a list of
// indices "sorted_indices" than sorting directly the actual list of features "corners"
std::vector<size_t> sorted_indices(N);
for (size_t i=0;i<N;i++) sorted_indices[i]=i;
// Use KLT response
if (options.FASTOptions.use_KLT_response ||
nDesiredFeatures!=0 // If the user wants us to limit the number of features, we need to do it according to some quality measure
)
{
const int KLT_half_win = 4;
const int max_x = inImg_gray.getWidth() - 1 - KLT_half_win;
const int max_y = inImg_gray.getHeight() - 1 - KLT_half_win;
for (size_t i=0;i<N;i++)
{
const int x = corners[i].pt.x;
const int y = corners[i].pt.y;
if (x>KLT_half_win && y>KLT_half_win && x<=max_x && y<=max_y)
corners[i].response = inImg_gray.KLT_response(x,y,KLT_half_win);
else corners[i].response = -100;
}
std::sort( sorted_indices.begin(), sorted_indices.end(), KeypointResponseSorter<TSimpleFeatureList>(corners) );
}
else
{
for (size_t i=0;i<N;i++)
corners[i].response = 0;
}
// 2) Filter by "min-distance" (in options.FASTOptions.min_distance)
// 3) Convert to MRPT CFeatureList format.
// Steps 2 & 3 are done together in the while() below.
// The "min-distance" filter is done by means of a 2D binary matrix where each cell is marked when one
// feature falls within it. This is not exactly the same than a pure "min-distance" but is pretty close
// and for large numbers of features is much faster than brute force search of kd-trees.
// (An intermediate approach would be the creation of a mask image updated for each accepted feature, etc.)
const bool do_filter_min_dist = options.FASTOptions.min_distance>1;
// Used half the min-distance since we'll later mark as occupied the ranges [i-1,i+1] for a feature at "i"
const unsigned int occupied_grid_cell_size = options.FASTOptions.min_distance/2.0;
const float occupied_grid_cell_size_inv = 1.0f/occupied_grid_cell_size;
unsigned int grid_lx = !do_filter_min_dist ? 1 : (unsigned int)(1 + inImg.getWidth() * occupied_grid_cell_size_inv);
unsigned int grid_ly = !do_filter_min_dist ? 1 : (unsigned int)(1 + inImg.getHeight() * occupied_grid_cell_size_inv );
mrpt::math::CMatrixBool occupied_sections(grid_lx,grid_ly); // See the comments above for an explanation.
occupied_sections.fillAll(false);
unsigned int nMax = (nDesiredFeatures!=0 && N > nDesiredFeatures) ? nDesiredFeatures : N;
const int offset = (int)this->options.patchSize/2 + 1;
const int size_2 = options.patchSize/2;
const size_t imgH = inImg.getHeight();
const size_t imgW = inImg.getWidth();
unsigned int i = 0;
unsigned int cont = 0;
TFeatureID nextID = init_ID;
if( !options.addNewFeatures )
feats.clear();
while( cont != nMax && i!=N )
//.........这里部分代码省略.........
示例10: Test_Kinect
// ------------------------------------------------------
// Test_Kinect
// ------------------------------------------------------
void Test_Kinect()
{
// Launch grabbing thread:
// --------------------------------------------------------
TThreadParam thrPar;
std::thread thHandle = std::thread(thread_grabbing, std::ref(thrPar));
// Wait until data stream starts so we can say for sure the sensor has been
// initialized OK:
cout << "Waiting for sensor initialization...\n";
do
{
CObservation3DRangeScan::Ptr possiblyNewObs =
std::atomic_load(&thrPar.new_obs);
if (possiblyNewObs && possiblyNewObs->timestamp != INVALID_TIMESTAMP)
break;
else
std::this_thread::sleep_for(10ms);
} while (!thrPar.quit);
// Check error condition:
if (thrPar.quit) return;
// Feature tracking variables:
CFeatureList trackedFeats;
unsigned int step_num = 0;
bool SHOW_FEAT_IDS = true;
bool SHOW_RESPONSES = true;
CGenericFeatureTrackerAutoPtr tracker;
// "CFeatureTracker_KL" is by far the most robust implementation for now:
tracker = CGenericFeatureTrackerAutoPtr(new CFeatureTracker_KL);
tracker->enableTimeLogger(true); // Do time profiling.
// Set of parameters common to any tracker implementation:
// To see all the existing params and documentation, see
// mrpt::vision::CGenericFeatureTracker
// http://reference.mrpt.org/devel/structmrpt_1_1vision_1_1_c_generic_feature_tracker.html
tracker->extra_params["add_new_features"] =
1; // track, AND ALSO, add new features
tracker->extra_params["add_new_feat_min_separation"] = 25;
tracker->extra_params["add_new_feat_max_features"] = 150;
tracker->extra_params["add_new_feat_patch_size"] = 21;
tracker->extra_params["minimum_KLT_response_to_add"] = 40;
tracker->extra_params["check_KLT_response_every"] =
5; // Re-check the KLT-response to assure features are in good points.
tracker->extra_params["minimum_KLT_response"] =
25; // Re-check the KLT-response to assure features are in good points.
tracker->extra_params["update_patches_every"] = 0; // Update patches
// Specific params for "CFeatureTracker_KL"
tracker->extra_params["window_width"] = 25;
tracker->extra_params["window_height"] = 25;
// Global points map:
CColouredPointsMap globalPtsMap;
globalPtsMap.colorScheme.scheme =
CColouredPointsMap::cmFromIntensityImage; // Take points color from
// RGB+D observations
// globalPtsMap.colorScheme.scheme =
// CColouredPointsMap::cmFromHeightRelativeToSensorGray;
// Create window and prepare OpenGL object in the scene:
// --------------------------------------------------------
mrpt::gui::CDisplayWindow3D win3D("kinect-3d-slam 3D view", 800, 600);
win3D.setCameraAzimuthDeg(140);
win3D.setCameraElevationDeg(20);
win3D.setCameraZoom(8.0);
win3D.setFOV(90);
win3D.setCameraPointingToPoint(2.5, 0, 0);
mrpt::opengl::CPointCloudColoured::Ptr gl_points =
mrpt::make_aligned_shared<mrpt::opengl::CPointCloudColoured>();
gl_points->setPointSize(2.5);
mrpt::opengl::CSetOfObjects::Ptr gl_curFeats =
mrpt::make_aligned_shared<mrpt::opengl::CSetOfObjects>();
mrpt::opengl::CSetOfObjects::Ptr gl_keyframes =
mrpt::make_aligned_shared<mrpt::opengl::CSetOfObjects>();
mrpt::opengl::CPointCloudColoured::Ptr gl_points_map =
mrpt::make_aligned_shared<mrpt::opengl::CPointCloudColoured>();
gl_points_map->setPointSize(2.0);
const double aspect_ratio =
480.0 / 640.0; // kinect.rows() / double( kinect.cols() );
mrpt::opengl::CSetOfObjects::Ptr gl_cur_cam_corner =
mrpt::opengl::stock_objects::CornerXYZSimple(0.4f, 4);
opengl::COpenGLViewport::Ptr viewInt;
{
//.........这里部分代码省略.........
示例11: img_grayscale
/************************************************************************************************
* internal_computeSurfDescriptors
************************************************************************************************/
void CFeatureExtraction::internal_computeSurfDescriptors(
const mrpt::utils::CImage &inImg,
CFeatureList &in_features) const
{
#if MRPT_HAS_OPENCV && MRPT_OPENCV_VERSION_NUM >= 0x111
if (in_features.empty()) return;
const CImage img_grayscale(inImg, FAST_REF_OR_CONVERT_TO_GRAY);
const IplImage* cGrey = img_grayscale.getAs<IplImage>();
CvMemStorage *storage = cvCreateMemStorage(0);
// Fill in the desired key-points:
CvSeq *kp = cvCreateSeq( 0, sizeof(CvSeq), sizeof(CvSURFPoint), storage );
for (CFeatureList::iterator itList=in_features.begin();itList!=in_features.end();++itList)
{
CvSURFPoint point = cvSURFPoint(
cvPoint2D32f((*itList)->x,(*itList)->y),
0, // Laplacian
16 //sizes[layer]
);
cvSeqPush( kp, &point );
}
CvSeq *desc = NULL;
// Only computes the descriptors:
// Extract the SURF points:
CvSURFParams surf_params = cvSURFParams(options.SURFOptions.hessianThreshold, options.SURFOptions.rotation_invariant ? 1:0);
surf_params.nOctaves = options.SURFOptions.nOctaves;
surf_params.nOctaveLayers = options.SURFOptions.nLayersPerOctave;
cvExtractSURF( cGrey, NULL, &kp, &desc, storage, surf_params, 1 /* Use precomputed key-points */ );
// *** HAVE YOU HAD A COMPILER ERROR NEAR THIS LINE?? : You need OpenCV >=1.1.0, final release or a SVN version ***
// -----------------------------------------------------------------
// MRPT Wrapping
// -----------------------------------------------------------------
CFeatureList::iterator itList;
int i;
for (i=0, itList=in_features.begin();itList!=in_features.end();itList++,i++)
{
// Get the OpenCV SURF point
CFeaturePtr ft = *itList;
CvSURFPoint *point = (CvSURFPoint*)cvGetSeqElem( kp, i );
ft->orientation = point->dir; // Orientation
ft->scale = point->size*1.2/9; // Scale
// Get the SURF descriptor
float* d = (float*)cvGetSeqElem( desc, i );
ft->descriptors.SURF.resize( options.SURFOptions.rotation_invariant ? 128 : 64 );
std::vector<float>::iterator itDesc;
unsigned int k;
for( k = 0, itDesc = ft->descriptors.SURF.begin(); k < ft->descriptors.SURF.size(); k++, itDesc++ )
*itDesc = d[k];
} // end for
cvReleaseMemStorage(&storage); // Free memory
#else
THROW_EXCEPTION("Method not available since either MRPT has been compiled without OpenCV or OpenCV version is incorrect (Required 1.1.0)")
#endif //MRPT_HAS_OPENCV
} // end internal_computeSurfDescriptors
示例12: extractFeaturesSURF
/************************************************************************************************
* extractFeaturesSURF *
************************************************************************************************/
void CFeatureExtraction::extractFeaturesSURF(
const mrpt::utils::CImage &inImg,
CFeatureList &feats,
unsigned int init_ID,
unsigned int nDesiredFeatures,
const TImageROI &ROI) const
{
MRPT_UNUSED_PARAM(ROI);
#if MRPT_HAS_OPENCV && MRPT_OPENCV_VERSION_NUM >= 0x111
const CImage img_grayscale(inImg, FAST_REF_OR_CONVERT_TO_GRAY);
const IplImage* cGrey = img_grayscale.getAs<IplImage>();
CvSeq *kp = NULL;
CvSeq *desc = NULL;
CvMemStorage *storage = cvCreateMemStorage(0);
// Extract the SURF points:
CvSURFParams surf_params = cvSURFParams(options.SURFOptions.hessianThreshold, options.SURFOptions.rotation_invariant ? 1:0);
surf_params.nOctaves = options.SURFOptions.nOctaves;
surf_params.nOctaveLayers = options.SURFOptions.nLayersPerOctave;
cvExtractSURF( cGrey, NULL, &kp, &desc, storage, surf_params);
// -----------------------------------------------------------------
// MRPT Wrapping
// -----------------------------------------------------------------
feats.clear();
unsigned int nCFeats = init_ID;
int limit;
int offset = (int)this->options.patchSize/2 + 1;
unsigned int imgH = inImg.getHeight();
unsigned int imgW = inImg.getWidth();
if( nDesiredFeatures == 0 )
limit = kp->total;
else
limit = (int)nDesiredFeatures < kp->total ? (int)nDesiredFeatures : kp->total;
for( int i = 0; i < limit; i++ )
{
// Get the OpenCV SURF point
CvSURFPoint *point;
CFeaturePtr ft = CFeature::Create();
point = (CvSURFPoint*)cvGetSeqElem( kp, i );
const int xBorderInf = (int)floor( point->pt.x - options.patchSize/2 );
const int xBorderSup = (int)floor( point->pt.x + options.patchSize/2 );
const int yBorderInf = (int)floor( point->pt.y - options.patchSize/2 );
const int yBorderSup = (int)floor( point->pt.y + options.patchSize/2 );
if( options.patchSize == 0 || ( (xBorderSup < (int)imgW) && (xBorderInf > 0) && (yBorderSup < (int)imgH) && (yBorderInf > 0) ) )
{
ft->type = featSURF;
ft->x = point->pt.x; // X position
ft->y = point->pt.y; // Y position
ft->orientation = point->dir; // Orientation
ft->scale = point->size*1.2/9; // Scale
ft->ID = nCFeats++; // Feature ID into extraction
ft->patchSize = options.patchSize; // The size of the feature patch
if( options.patchSize > 0 )
{
inImg.extract_patch(
ft->patch,
round( ft->x ) - offset,
round( ft->y ) - offset,
options.patchSize,
options.patchSize ); // Image patch surronding the feature
}
// Get the SURF descriptor
float* d = (float*)cvGetSeqElem( desc, i );
ft->descriptors.SURF.resize( options.SURFOptions.rotation_invariant ? 128 : 64 );
std::vector<float>::iterator itDesc;
unsigned int k;
for( k = 0, itDesc = ft->descriptors.SURF.begin(); k < ft->descriptors.SURF.size(); k++, itDesc++ )
*itDesc = d[k];
feats.push_back( ft );
} // end if
} // end for
cvReleaseMemStorage(&storage); // Free memory
#else
THROW_EXCEPTION("Method not available since either MRPT has been compiled without OpenCV or OpenCV version is incorrect (Required 1.1.0)")
#endif //MRPT_HAS_OPENCV
} // end extractFeaturesSURF
示例13: img_grayscale
/************************************************************************************************
* extractFeaturesSIFT *
************************************************************************************************/
void CFeatureExtraction::extractFeaturesSIFT(
const CImage &img,
CFeatureList &feats,
unsigned int init_ID,
unsigned int nDesiredFeatures,
const TImageROI &ROI) const
{
bool usingROI = false;
if( ROI.xMin != 0 || ROI.xMax != 0 || ROI.yMin != 0 || ROI.yMax != 0 )
usingROI = true; // A ROI has been defined
// ROI can not be managed properly (yet) with these method, so we extract a subimage
// use a smart pointer so we just copy the pointer if the image is grayscale, or we'll create a new one if it was RGB:
CImage img_grayscale(img, FAST_REF_OR_CONVERT_TO_GRAY); // Was: auxImgPtr;
if( usingROI )
{
ASSERT_( ROI.xMin >= 0 && ROI.xMin < ROI.xMax && ROI.xMax < img.getWidth() && ROI.yMin >= 0 && ROI.yMax < img.getHeight() && ROI.yMin < ROI.yMax );
CImage auximg;
img_grayscale.extract_patch( auximg, ROI.xMin, ROI.yMin, ROI.xMax-ROI.xMin+1, ROI.yMax-ROI.yMin+1 ); // Subimage in "auxImg"
img_grayscale.swap(auximg);
}
switch( options.SIFTOptions.implementation )
{
// --------------------------------------------------------------------------------------
// Binary in C# -> OPTIONAL: Feature position already computed
// --------------------------------------------------------------------------------------
case CSBinary:
{
#ifdef MRPT_OS_WINDOWS
char filImg[2000],filOut[2000],filFeat[2000];
char paramImg[2000];
GetTempPathA(1000,filOut); os::strcat(filOut,1000,"temp_out.txt"); // OUTPUT FILE
GetTempPathA(1000,filImg); os::strcat(filImg,1000,"temp_img.bmp"); // INPUT IMAGE (BMP) FOR BINARY IN (C#)
bool onlyDesc = feats.size() > 0 ? true : false;
if( onlyDesc )
{
GetTempPathA(1000,filFeat); os::strcat(filFeat,1000,"temp_feats.txt"); // KEYPOINTS INPUT FILE
CMatrix listPoints(feats.size(),2);
for (size_t i= 0;i<feats.size();i++)
{
listPoints(i,0) = feats[i]->x;
listPoints(i,1) = feats[i]->y;
}
listPoints.saveToTextFile( filFeat, MATRIX_FORMAT_FIXED /*Float format*/ );
} // end if
// -------------------------------------------
// CALL TO "extractSIFT.exe"
// -------------------------------------------
img_grayscale.saveToFile( filImg );
// ------------------------------------
// Version with "CreateProcess":
// ------------------------------------
os::strcpy(paramImg,1000,"extractSIFT.exe -i"); os::strcat(paramImg,1000,filImg);
os::strcat(paramImg,1000," -f"); os::strcat(paramImg,1000,filOut);
os::strcat(paramImg,1000," -l"); os::strcat(paramImg,1000,filFeat);
// ------------------------------------
// Launch process
// ------------------------------------
bool ret = mrpt::system::launchProcess( paramImg );
if( !ret )
THROW_EXCEPTION( "[extractFeaturesSIFT] Could not launch external process... (extractSIFT.exe)" )
// Process Results
CFeatureList::iterator itFeat = feats.begin();
size_t nFeats;
CMatrix aux;
aux.loadFromTextFile( filOut );
std::cout << "[computeSiftFeatures] " << aux.getRowCount() << " features." << std::endl;
if( onlyDesc )
nFeats = feats.size();
else
{
nFeats = aux.getRowCount();
feats.resize( nFeats );
}
for( size_t i = 0;
itFeat != feats.end();
i++, itFeat++)
{
(*itFeat)->type = featSIFT;
(*itFeat)->x = usingROI ? aux(i,0) + ROI.xMin : aux(i,0);
(*itFeat)->y = usingROI ? aux(i,1) + ROI.yMin : aux(i,1);
(*itFeat)->orientation = aux(i,2);
(*itFeat)->scale = aux(i,3);
//.........这里部分代码省略.........
示例14: selectGoodFeaturesKLT
/************************************************************************************************
* selectGoodFeaturesKLT *
************************************************************************************************/
void CFeatureExtraction::selectGoodFeaturesKLT(
const mrpt::utils::CImage &inImg,
CFeatureList &feats,
unsigned int init_ID,
unsigned int nDesiredFeatures,
void *mask_ ) const
{
//#define VERBOSE_TIMING
#ifdef VERBOSE_TIMING
CTicTac tictac;
#endif
MRPT_START
#if MRPT_HAS_OPENCV
const unsigned int MAX_COUNT = 300;
// Reinterpret opencv formal arguments
CvMatrix *mask = reinterpret_cast<CvMatrix*>(mask_);
// -----------------------------------------------------------------
// Create OpenCV Local Variables
// -----------------------------------------------------------------
int count = 0;
int nPts;
CvImage img, cGrey;
#ifdef VERBOSE_TIMING
tictac.Tic();
#endif
img.attach( const_cast<IplImage*>(inImg.getAs<IplImage>()), false ); // Attach Image as IplImage and do not use ref counter
#ifdef VERBOSE_TIMING
cout << "[KLT] Attach: " << tictac.Tac()*1000.0f << endl;
#endif
if( img.channels() == 1 )
cGrey = img; // Input image is already 'grayscale'
else
{
cGrey.create( cvGetSize( img ), 8, 1);
cvCvtColor( img, cGrey, CV_BGR2GRAY ); // Convert input image into 'grayscale'
}
nDesiredFeatures <= 0 ? nPts = MAX_COUNT : nPts = nDesiredFeatures;
std::vector<CvPoint2D32f> points(nPts);
CvImage eig, temp; // temporary and auxiliary images
#ifdef VERBOSE_TIMING
tictac.Tic();
#endif
eig.create( cvGetSize( cGrey ), 32, 1 );
temp.create( cvGetSize( cGrey ), 32, 1 );
#ifdef VERBOSE_TIMING
cout << "[KLT] Create: " << tictac.Tac()*1000.0f << endl;
#endif
count = nPts; // Number of points to find
#if 0 // Temporary debug
{
static int i=0;
cvSaveImage( format("debug_map_%05i.bmp",++i).c_str(), cGrey);
}
#endif
// -----------------------------------------------------------------
// Select good features with subpixel accuracy (USING HARRIS OR KLT)
// -----------------------------------------------------------------
if( options.featsType == featHarris )
{
#ifdef VERBOSE_TIMING
tictac.Tic();
#endif
cvGoodFeaturesToTrack( cGrey, eig, temp, &points[0], &count, // input and output data
(double)options.harrisOptions.threshold, // for rejecting weak local maxima ( with min_eig < threshold*max(eig_image) )
(double)options.harrisOptions.min_distance, // minimum distance between features
mask ? (*mask) : static_cast<const CvMat*>(NULL), // ROI
(double)options.harrisOptions.radius, // size of the block of pixels used
1, // use Harris
options.harrisOptions.k ); // k factor for the Harris algorithm
#ifdef VERBOSE_TIMING
cout << "[KLT] Find feats: " << tictac.Tac()*1000.0f << endl;
#endif
}
else
{
#ifdef VERBOSE_TIMING
tictac.Tic();
#endif
cvGoodFeaturesToTrack( cGrey, eig, temp, &points[0], &count, // input and output data
(double)options.KLTOptions.threshold, // for rejecting weak local maxima ( with min_eig < threshold*max(eig_image) )
(double)options.KLTOptions.min_distance, // minimum distance between features
mask ? (*mask) : static_cast<const CvMat*>(NULL), // ROI
options.KLTOptions.radius, // size of the block of pixels used
0, // use Kanade Lucas Tomasi
0.04 ); // un-used parameter
//.........这里部分代码省略.........
示例15: extractFeaturesKLT
/************************************************************************************************
* extractFeaturesKLT
************************************************************************************************/
void CFeatureExtraction::extractFeaturesKLT(
const mrpt::utils::CImage &inImg,
CFeatureList &feats,
unsigned int init_ID,
unsigned int nDesiredFeatures,
const TImageROI &ROI) const
{
//#define VERBOSE_TIMING
#ifdef VERBOSE_TIMING
CTicTac tictac;
#endif
MRPT_START
#if MRPT_HAS_OPENCV
const unsigned int MAX_COUNT = 300;
// -----------------------------------------------------------------
// Create OpenCV Local Variables
// -----------------------------------------------------------------
int count = 0;
int nPts;
#ifdef VERBOSE_TIMING
tictac.Tic();
#endif
const cv::Mat img( cv::cvarrToMat( inImg.getAs<IplImage>() ) );
#ifdef VERBOSE_TIMING
cout << "[KLT] Attach: " << tictac.Tac()*1000.0f << endl;
#endif
const CImage inImg_gray( inImg, FAST_REF_OR_CONVERT_TO_GRAY );
const cv::Mat cGrey( cv::cvarrToMat( inImg_gray.getAs<IplImage>() ) );
nDesiredFeatures <= 0 ? nPts = MAX_COUNT : nPts = nDesiredFeatures;
#ifdef VERBOSE_TIMING
tictac.Tic();
#endif
#ifdef VERBOSE_TIMING
cout << "[KLT] Create: " << tictac.Tac()*1000.0f << endl;
#endif
count = nPts; // Number of points to find
// -----------------------------------------------------------------
// Select good features with subpixel accuracy (USING HARRIS OR KLT)
// -----------------------------------------------------------------
const bool use_harris = ( options.featsType == featHarris );
#ifdef VERBOSE_TIMING
tictac.Tic();
#endif
std::vector<cv::Point2f> points;
cv::goodFeaturesToTrack(
cGrey,points, nPts,
(double)options.harrisOptions.threshold, // for rejecting weak local maxima ( with min_eig < threshold*max(eig_image) )
(double)options.harrisOptions.min_distance, // minimum distance between features
cv::noArray(), // mask
3, // blocksize
use_harris, /* harris */
options.harrisOptions.k
);
#ifdef VERBOSE_TIMING
cout << "[KLT] Find feats: " << tictac.Tac()*1000.0f << endl;
#endif
if( nDesiredFeatures > 0 && count < nPts )
cout << "\n[WARNING][selectGoodFeaturesKLT]: Only " << count << " of " << nDesiredFeatures << " points could be extracted in the image." << endl;
if( options.FIND_SUBPIXEL )
{
#ifdef VERBOSE_TIMING
tictac.Tic();
#endif
// Subpixel interpolation
cv::cornerSubPix(cGrey,points,
cv::Size(3,3), cv::Size(-1,-1),
cv::TermCriteria( CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 10, 0.05 ));
#ifdef VERBOSE_TIMING
cout << "[KLT] subpixel: " << tictac.Tac()*1000.0f << endl;
#endif
}
// -----------------------------------------------------------------
// Fill output structure
// -----------------------------------------------------------------
#ifdef VERBOSE_TIMING
tictac.Tic();
#endif
feats.clear();
unsigned int borderFeats = 0;
unsigned int nCFeats = init_ID;
int i = 0;
const int limit = min( nPts, count );
//.........这里部分代码省略.........