本文整理汇总了C++中CFeatureList::clear方法的典型用法代码示例。如果您正苦于以下问题:C++ CFeatureList::clear方法的具体用法?C++ CFeatureList::clear怎么用?C++ CFeatureList::clear使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类CFeatureList
的用法示例。
在下文中一共展示了CFeatureList::clear方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: convertCvSeqInCFeatureList
// ------------------------------------------------------------------------------------
// convertCvSeqInCFeatureList
// ------------------------------------------------------------------------------------
void CFeatureExtraction::convertCvSeqInCFeatureList( void* features_, CFeatureList &list, unsigned int init_ID, const TImageROI &ROI ) const
{
#if MRPT_HAS_OPENCV && MRPT_HAS_SIFT_HESS
CvSeq* features = reinterpret_cast<CvSeq*>( features_ );
// Is there a defined ROI?
bool usingROI = false;
if( ROI.xMin != 0 || ROI.xMax != 0 || ROI.yMin != 0 || ROI.yMax != 0 )
usingROI = true;
int n = features->total;
ASSERT_(n > 0);
list.clear();
struct feature* thisFeat;
for( int k = 0; k < n; k++ )
{
thisFeat = (feature*)cvGetSeqElem( features, k );
CFeaturePtr feat = CFeature::Create();
feat->ID = (TFeatureID)(k + init_ID);
feat->x = usingROI ? thisFeat->x + ROI.xMin : thisFeat->x;
feat->y = usingROI ? thisFeat->y + ROI.yMin : thisFeat->y;
feat->type = featSIFT;
feat->orientation = thisFeat->ori;
feat->scale = thisFeat->scl;
feat->descriptors.SIFT.resize( thisFeat->d );
for( int i = 0; i < thisFeat->d; i++ )
feat->descriptors.SIFT[i] = (unsigned char)thisFeat->descr[i];
list.push_back(feat);
} // end for
#else
THROW_EXCEPTION("Method not available since MRPT has been compiled without OpenCV")
#endif //MRPT_HAS_OPENCV
}
示例2: extractFeaturesKLT
/************************************************************************************************
* extractFeaturesKLT
************************************************************************************************/
void CFeatureExtraction::extractFeaturesKLT(
const mrpt::utils::CImage &inImg,
CFeatureList &feats,
unsigned int init_ID,
unsigned int nDesiredFeatures,
const TImageROI &ROI) const
{
//#define VERBOSE_TIMING
#ifdef VERBOSE_TIMING
CTicTac tictac;
#endif
MRPT_START
#if MRPT_HAS_OPENCV
const unsigned int MAX_COUNT = 300;
// -----------------------------------------------------------------
// Create OpenCV Local Variables
// -----------------------------------------------------------------
int count = 0;
int nPts;
#ifdef VERBOSE_TIMING
tictac.Tic();
#endif
const cv::Mat img( cv::cvarrToMat( inImg.getAs<IplImage>() ) );
#ifdef VERBOSE_TIMING
cout << "[KLT] Attach: " << tictac.Tac()*1000.0f << endl;
#endif
const CImage inImg_gray( inImg, FAST_REF_OR_CONVERT_TO_GRAY );
const cv::Mat cGrey( cv::cvarrToMat( inImg_gray.getAs<IplImage>() ) );
nDesiredFeatures <= 0 ? nPts = MAX_COUNT : nPts = nDesiredFeatures;
#ifdef VERBOSE_TIMING
tictac.Tic();
#endif
#ifdef VERBOSE_TIMING
cout << "[KLT] Create: " << tictac.Tac()*1000.0f << endl;
#endif
count = nPts; // Number of points to find
// -----------------------------------------------------------------
// Select good features with subpixel accuracy (USING HARRIS OR KLT)
// -----------------------------------------------------------------
const bool use_harris = ( options.featsType == featHarris );
#ifdef VERBOSE_TIMING
tictac.Tic();
#endif
std::vector<cv::Point2f> points;
cv::goodFeaturesToTrack(
cGrey,points, nPts,
(double)options.harrisOptions.threshold, // for rejecting weak local maxima ( with min_eig < threshold*max(eig_image) )
(double)options.harrisOptions.min_distance, // minimum distance between features
cv::noArray(), // mask
3, // blocksize
use_harris, /* harris */
options.harrisOptions.k
);
#ifdef VERBOSE_TIMING
cout << "[KLT] Find feats: " << tictac.Tac()*1000.0f << endl;
#endif
if( nDesiredFeatures > 0 && count < nPts )
cout << "\n[WARNING][selectGoodFeaturesKLT]: Only " << count << " of " << nDesiredFeatures << " points could be extracted in the image." << endl;
if( options.FIND_SUBPIXEL )
{
#ifdef VERBOSE_TIMING
tictac.Tic();
#endif
// Subpixel interpolation
cv::cornerSubPix(cGrey,points,
cv::Size(3,3), cv::Size(-1,-1),
cv::TermCriteria( CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 10, 0.05 ));
#ifdef VERBOSE_TIMING
cout << "[KLT] subpixel: " << tictac.Tac()*1000.0f << endl;
#endif
}
// -----------------------------------------------------------------
// Fill output structure
// -----------------------------------------------------------------
#ifdef VERBOSE_TIMING
tictac.Tic();
#endif
feats.clear();
unsigned int borderFeats = 0;
unsigned int nCFeats = init_ID;
int i = 0;
const int limit = min( nPts, count );
//.........这里部分代码省略.........
示例3: extractFeaturesSIFT
//.........这里部分代码省略.........
// The descriptor:
aux.extractRow(i, (*itFeat)->descriptors.SIFT, 4);
}
remove(filImg);
remove(filOut);
#else
THROW_EXCEPTION("Unfortunately, this SIFT Implementation only runs in Windows OS, try Hess implementation");
#endif
break;
} // end case Binary in C#
case VedaldiBinary:
{
// --------------------------------------------------------------------------------------
// Binary by Vedaldi: NOT IMPLEMENTED YET. Input in PGM format
// --------------------------------------------------------------------------------------
#ifdef MRPT_OS_WINDOWS
THROW_EXCEPTION("Usage of Vedaldi Binary not implemented yet, please, try another one");
#else
THROW_EXCEPTION("Unfortunately, this SIFT Implementation only runs in Windows OS, try Hess implementation");
#endif
break;
} // end case Binary by Vedaldi
// --------------------------------------------------------------------------------------
// Binary by David Lowe
// --------------------------------------------------------------------------------------
case LoweBinary: // Binary by Lowe
{
#ifdef MRPT_OS_WINDOWS
char filImg[2000],filOut[2000];
char paramImg[2000];
feats.clear();
GetTempPathA(1000,filOut); os::strcat(filOut,1000,"temp_out.txt"); // OUTPUT FILE
GetTempPathA(1000,filImg); os::strcat(filImg,1000,"temp_img.pgm"); // INPUT IMAGE (PGM) FOR ORIGINAL BINARY BY LOWE
bool valid = img_grayscale.saveToFile( filImg );
if(!valid)
THROW_EXCEPTION( "An error occurred when saving input image into a .pgm file");
// CONVERT TO UNCOMPRESSED RAW PGM (TODO: Solve in a better way this issue)
os::strcpy( paramImg,1000, format( "cmd /C gmic.exe %s -o %s -quiet", filImg, filImg ).c_str() );
bool ret = mrpt::system::launchProcess( paramImg );
if(!ret)
THROW_EXCEPTION("[extractFeaturesSIFT] Could not launch external process... (gmic.exe)");
// ------------------------------------
// Version with "CreateProcess":
// ------------------------------------
os::strcpy(paramImg,1000,"cmd /C siftWin32.exe <"); os::strcat(paramImg,1000,filImg);
os::strcat(paramImg,1000," >"); os::strcat(paramImg,1000,filOut);
ret = mrpt::system::launchProcess( paramImg );
if(!ret)
THROW_EXCEPTION("[extractFeaturesSIFT] Could not launch external process... (siftWin32.exe)");
// ------------------------------------
// Process Results
// ------------------------------------
unsigned int dLen, nFeats;
FILE *f = os::fopen( filOut, "rt");
示例4: extractFeaturesFASTER_N
// N_fast = 9, 10, 12
void CFeatureExtraction::extractFeaturesFASTER_N(
const int N_fast,
const mrpt::utils::CImage & inImg,
CFeatureList & feats,
unsigned int init_ID,
unsigned int nDesiredFeatures,
const TImageROI & ROI ) const
{
MRPT_START
#if MRPT_HAS_OPENCV
// Make sure we operate on a gray-scale version of the image:
const CImage inImg_gray( inImg, FAST_REF_OR_CONVERT_TO_GRAY );
const IplImage *IPL = inImg_gray.getAs<IplImage>();
TSimpleFeatureList corners;
TFeatureType type_of_this_feature;
switch (N_fast)
{
case 9: fast_corner_detect_9 (IPL,corners, options.FASTOptions.threshold, 0, NULL); type_of_this_feature=featFASTER9; break;
case 10: fast_corner_detect_10(IPL,corners, options.FASTOptions.threshold, 0, NULL); type_of_this_feature=featFASTER10; break;
case 12: fast_corner_detect_12(IPL,corners, options.FASTOptions.threshold, 0, NULL); type_of_this_feature=featFASTER12; break;
default:
THROW_EXCEPTION("Only the 9,10,12 FASTER detectors are implemented.")
break;
};
// *All* the features have been extracted.
const size_t N = corners.size();
// Now:
// 1) Sort them by "response": It's ~100 times faster to sort a list of
// indices "sorted_indices" than sorting directly the actual list of features "corners"
std::vector<size_t> sorted_indices(N);
for (size_t i=0;i<N;i++) sorted_indices[i]=i;
// Use KLT response
if (options.FASTOptions.use_KLT_response ||
nDesiredFeatures!=0 // If the user wants us to limit the number of features, we need to do it according to some quality measure
)
{
const int KLT_half_win = 4;
const int max_x = inImg_gray.getWidth() - 1 - KLT_half_win;
const int max_y = inImg_gray.getHeight() - 1 - KLT_half_win;
for (size_t i=0;i<N;i++)
{
const int x = corners[i].pt.x;
const int y = corners[i].pt.y;
if (x>KLT_half_win && y>KLT_half_win && x<=max_x && y<=max_y)
corners[i].response = inImg_gray.KLT_response(x,y,KLT_half_win);
else corners[i].response = -100;
}
std::sort( sorted_indices.begin(), sorted_indices.end(), KeypointResponseSorter<TSimpleFeatureList>(corners) );
}
else
{
for (size_t i=0;i<N;i++)
corners[i].response = 0;
}
// 2) Filter by "min-distance" (in options.FASTOptions.min_distance)
// 3) Convert to MRPT CFeatureList format.
// Steps 2 & 3 are done together in the while() below.
// The "min-distance" filter is done by means of a 2D binary matrix where each cell is marked when one
// feature falls within it. This is not exactly the same than a pure "min-distance" but is pretty close
// and for large numbers of features is much faster than brute force search of kd-trees.
// (An intermediate approach would be the creation of a mask image updated for each accepted feature, etc.)
const bool do_filter_min_dist = options.FASTOptions.min_distance>1;
// Used half the min-distance since we'll later mark as occupied the ranges [i-1,i+1] for a feature at "i"
const unsigned int occupied_grid_cell_size = options.FASTOptions.min_distance/2.0;
const float occupied_grid_cell_size_inv = 1.0f/occupied_grid_cell_size;
unsigned int grid_lx = !do_filter_min_dist ? 1 : (unsigned int)(1 + inImg.getWidth() * occupied_grid_cell_size_inv);
unsigned int grid_ly = !do_filter_min_dist ? 1 : (unsigned int)(1 + inImg.getHeight() * occupied_grid_cell_size_inv );
mrpt::math::CMatrixBool occupied_sections(grid_lx,grid_ly); // See the comments above for an explanation.
occupied_sections.fillAll(false);
unsigned int nMax = (nDesiredFeatures!=0 && N > nDesiredFeatures) ? nDesiredFeatures : N;
const int offset = (int)this->options.patchSize/2 + 1;
const int size_2 = options.patchSize/2;
const size_t imgH = inImg.getHeight();
const size_t imgW = inImg.getWidth();
unsigned int i = 0;
unsigned int cont = 0;
TFeatureID nextID = init_ID;
if( !options.addNewFeatures )
feats.clear();
while( cont != nMax && i!=N )
//.........这里部分代码省略.........
示例5: extractFeaturesSURF
/************************************************************************************************
* extractFeaturesSURF *
************************************************************************************************/
void CFeatureExtraction::extractFeaturesSURF(
const mrpt::utils::CImage &inImg,
CFeatureList &feats,
unsigned int init_ID,
unsigned int nDesiredFeatures,
const TImageROI &ROI) const
{
MRPT_UNUSED_PARAM(ROI);
#if MRPT_HAS_OPENCV && MRPT_OPENCV_VERSION_NUM >= 0x111
const CImage img_grayscale(inImg, FAST_REF_OR_CONVERT_TO_GRAY);
const IplImage* cGrey = img_grayscale.getAs<IplImage>();
CvSeq *kp = NULL;
CvSeq *desc = NULL;
CvMemStorage *storage = cvCreateMemStorage(0);
// Extract the SURF points:
CvSURFParams surf_params = cvSURFParams(options.SURFOptions.hessianThreshold, options.SURFOptions.rotation_invariant ? 1:0);
surf_params.nOctaves = options.SURFOptions.nOctaves;
surf_params.nOctaveLayers = options.SURFOptions.nLayersPerOctave;
cvExtractSURF( cGrey, NULL, &kp, &desc, storage, surf_params);
// -----------------------------------------------------------------
// MRPT Wrapping
// -----------------------------------------------------------------
feats.clear();
unsigned int nCFeats = init_ID;
int limit;
int offset = (int)this->options.patchSize/2 + 1;
unsigned int imgH = inImg.getHeight();
unsigned int imgW = inImg.getWidth();
if( nDesiredFeatures == 0 )
limit = kp->total;
else
limit = (int)nDesiredFeatures < kp->total ? (int)nDesiredFeatures : kp->total;
for( int i = 0; i < limit; i++ )
{
// Get the OpenCV SURF point
CvSURFPoint *point;
CFeaturePtr ft = CFeature::Create();
point = (CvSURFPoint*)cvGetSeqElem( kp, i );
const int xBorderInf = (int)floor( point->pt.x - options.patchSize/2 );
const int xBorderSup = (int)floor( point->pt.x + options.patchSize/2 );
const int yBorderInf = (int)floor( point->pt.y - options.patchSize/2 );
const int yBorderSup = (int)floor( point->pt.y + options.patchSize/2 );
if( options.patchSize == 0 || ( (xBorderSup < (int)imgW) && (xBorderInf > 0) && (yBorderSup < (int)imgH) && (yBorderInf > 0) ) )
{
ft->type = featSURF;
ft->x = point->pt.x; // X position
ft->y = point->pt.y; // Y position
ft->orientation = point->dir; // Orientation
ft->scale = point->size*1.2/9; // Scale
ft->ID = nCFeats++; // Feature ID into extraction
ft->patchSize = options.patchSize; // The size of the feature patch
if( options.patchSize > 0 )
{
inImg.extract_patch(
ft->patch,
round( ft->x ) - offset,
round( ft->y ) - offset,
options.patchSize,
options.patchSize ); // Image patch surronding the feature
}
// Get the SURF descriptor
float* d = (float*)cvGetSeqElem( desc, i );
ft->descriptors.SURF.resize( options.SURFOptions.rotation_invariant ? 128 : 64 );
std::vector<float>::iterator itDesc;
unsigned int k;
for( k = 0, itDesc = ft->descriptors.SURF.begin(); k < ft->descriptors.SURF.size(); k++, itDesc++ )
*itDesc = d[k];
feats.push_back( ft );
} // end if
} // end for
cvReleaseMemStorage(&storage); // Free memory
#else
THROW_EXCEPTION("Method not available since either MRPT has been compiled without OpenCV or OpenCV version is incorrect (Required 1.1.0)")
#endif //MRPT_HAS_OPENCV
} // end extractFeaturesSURF
示例6: selectGoodFeaturesKLT
//.........这里部分代码省略.........
{
#ifdef VERBOSE_TIMING
tictac.Tic();
#endif
cvGoodFeaturesToTrack( cGrey, eig, temp, &points[0], &count, // input and output data
(double)options.KLTOptions.threshold, // for rejecting weak local maxima ( with min_eig < threshold*max(eig_image) )
(double)options.KLTOptions.min_distance, // minimum distance between features
mask ? (*mask) : static_cast<const CvMat*>(NULL), // ROI
options.KLTOptions.radius, // size of the block of pixels used
0, // use Kanade Lucas Tomasi
0.04 ); // un-used parameter
#ifdef VERBOSE_TIMING
cout << "[KLT]: Find feats: " << tictac.Tac()*1000.0f << endl;
#endif
}
if( nDesiredFeatures > 0 && count < nPts )
cout << "\n[WARNING][selectGoodFeaturesKLT]: Only " << count << " of " << nDesiredFeatures << " points could be extracted in the image." << endl;
if( options.FIND_SUBPIXEL )
{
#ifdef VERBOSE_TIMING
tictac.Tic();
#endif
// Subpixel interpolation
cvFindCornerSubPix( cGrey, &points[0], count,
cvSize(3,3), cvSize(-1,-1),
cvTermCriteria( CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 10, 0.05 ));
#ifdef VERBOSE_TIMING
cout << "[KLT] subpixel: " << tictac.Tac()*1000.0f << endl;
#endif
}
// -----------------------------------------------------------------
// Fill output structure
// -----------------------------------------------------------------
#ifdef VERBOSE_TIMING
tictac.Tic();
#endif
feats.clear();
unsigned int borderFeats = 0;
unsigned int nCFeats = init_ID;
int i = 0;
const int limit = min( nPts, count );
int offset = (int)this->options.patchSize/2 + 1;
unsigned int imgH = inImg.getHeight();
unsigned int imgW = inImg.getWidth();
while( i < limit )
{
const int xBorderInf = (int)floor( points[i].x - options.patchSize/2 );
const int xBorderSup = (int)floor( points[i].x + options.patchSize/2 );
const int yBorderInf = (int)floor( points[i].y - options.patchSize/2 );
const int yBorderSup = (int)floor( points[i].y + options.patchSize/2 );
if( options.patchSize==0 || ( (xBorderSup < (int)imgW) && (xBorderInf > 0) && (yBorderSup < (int)imgH) && (yBorderInf > 0) ) )
{
CFeaturePtr ft = CFeature::Create();
ft->type = featKLT;
ft->x = points[i].x; // X position
ft->y = points[i].y; // Y position
ft->track_status = status_TRACKED; // Feature Status
ft->response = 0.0; // A value proportional to the quality of the feature (unused yet)
ft->ID = nCFeats++; // Feature ID into extraction
ft->patchSize = options.patchSize; // The size of the feature patch
if( options.patchSize > 0 )
{
inImg.extract_patch(
ft->patch,
round( ft->x ) - offset,
round( ft->y ) - offset,
options.patchSize,
options.patchSize ); // Image patch surronding the feature
}
feats.push_back( ft );
} // end if
else
borderFeats++;
i++;
} // end while
#ifdef VERBOSE_TIMING
cout << "[KLT] Create output: " << tictac.Tac()*1000.0f << endl;
#endif
#else
THROW_EXCEPTION("The MRPT has been compiled with MRPT_HAS_OPENCV=0 !");
#endif
MRPT_END
} // end of function
示例7: extractFeaturesAKAZE
void CFeatureExtraction::extractFeaturesAKAZE(
const mrpt::img::CImage& inImg, CFeatureList& feats, unsigned int init_ID,
unsigned int nDesiredFeatures, const TImageROI& ROI) const
{
MRPT_UNUSED_PARAM(ROI);
MRPT_START
#if MRPT_HAS_OPENCV
#if MRPT_OPENCV_VERSION_NUM < 0x300
THROW_EXCEPTION("This function requires OpenCV > 3.0.0");
#else
using namespace cv;
vector<KeyPoint> cv_feats; // The opencv keypoint output vector
// Make sure we operate on a gray-scale version of the image:
const CImage inImg_gray(inImg, FAST_REF_OR_CONVERT_TO_GRAY);
#if MRPT_OPENCV_VERSION_NUM >= 0x300
const Mat theImg = cvarrToMat(inImg_gray.getAs<IplImage>());
Ptr<AKAZE> akaze = AKAZE::create(
options.AKAZEOptions.descriptor_type,
options.AKAZEOptions.descriptor_size,
options.AKAZEOptions.descriptor_channels,
options.AKAZEOptions.threshold, options.AKAZEOptions.nOctaves,
options.AKAZEOptions.nOctaveLayers, options.AKAZEOptions.diffusivity);
akaze->detect(theImg, cv_feats);
// *All* the features have been extracted.
const size_t N = cv_feats.size();
#endif
// sort the AKAZE features by line length
for (size_t i = 0; i < N; i++)
{
for (size_t j = i + 1; j < N; j++)
{
if (cv_feats.at(j).response > cv_feats.at(i).response)
{
KeyPoint temp_point = cv_feats.at(i);
cv_feats.at(i) = cv_feats.at(j);
cv_feats.at(j) = temp_point;
}
}
}
unsigned int nMax =
(nDesiredFeatures != 0 && N > nDesiredFeatures) ? nDesiredFeatures : N;
const int offset = (int)this->options.patchSize / 2 + 1;
const size_t size_2 = options.patchSize / 2;
const size_t imgH = inImg.getHeight();
const size_t imgW = inImg.getWidth();
unsigned int i = 0;
unsigned int cont = 0;
TFeatureID nextID = init_ID;
if (!options.addNewFeatures) feats.clear();
while (cont != nMax && i != N)
{
// Take the next feature from the ordered list of good features:
const KeyPoint& kp = cv_feats[i];
i++;
// Patch out of the image??
const int xBorderInf = (int)floor(kp.pt.x - size_2);
const int xBorderSup = (int)floor(kp.pt.x + size_2);
const int yBorderInf = (int)floor(kp.pt.y - size_2);
const int yBorderSup = (int)floor(kp.pt.y + size_2);
if (!(xBorderSup < (int)imgW && xBorderInf > 0 &&
yBorderSup < (int)imgH && yBorderInf > 0))
continue; // nope, skip.
// All tests passed: add new feature:
CFeature::Ptr ft = std::make_shared<CFeature>();
ft->type = featAKAZE;
ft->ID = nextID++;
ft->x = kp.pt.x;
ft->y = kp.pt.y;
ft->response = kp.response;
ft->orientation = kp.angle;
ft->scale = kp.octave;
ft->patchSize = options.patchSize; // The size of the feature patch
if (options.patchSize > 0)
{
inImg.extract_patch(
ft->patch, round(ft->x) - offset, round(ft->y) - offset,
options.patchSize,
options.patchSize); // Image patch surronding the feature
}
feats.push_back(ft);
++cont;
// cout << ft->x << " " << ft->y << endl;
}
#endif
#endif
MRPT_END
//.........这里部分代码省略.........