本文整理汇总了C++中image::Image类的典型用法代码示例。如果您正苦于以下问题:C++ Image类的具体用法?C++ Image怎么用?C++ Image使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Image类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: detector
void FastCornerDetector::detect
(
const image::Image<unsigned char> & ima,
std::vector<PointFeature> & regions
)
{
using FastDetectorCall =
xy* (*) (const unsigned char *, int, int, int, int, int *);
FastDetectorCall detector = nullptr;
if (size_ == 9) detector = fast9_detect_nonmax;
if (size_ == 10) detector = fast10_detect_nonmax;
if (size_ == 11) detector = fast11_detect_nonmax;
if (size_ == 12) detector = fast12_detect_nonmax;
if (!detector)
{
std::cout << "Invalid size for FAST detector: " << size_ << std::endl;
return;
}
int num_corners = 0;
xy* detections = detector(ima.data(),
ima.Width(), ima.Height(), ima.Width(),
threshold_, &num_corners);
regions.clear();
regions.reserve(num_corners);
for (int i = 0; i < num_corners; ++i)
{
regions.emplace_back(detections[i].x, detections[i].y);
}
free( detections );
}
示例2: parameters
> void LinearFilterWx1<PixelType>::applyTo(const Image::Image<PixelType> & srcImage,Image::Image<PixelType> & dstImage) const {
typedef typename PixelType::DataType PixelDataType;
typedef typename PixelType::ComputationType PixelComputationType;
BaseLinearFilterParametersType<PixelDataType,PixelComputationType> parameters(
getFilterData().getDataView(),
getXoffset(),
getYoffset(),
srcImage.getWidth(),
getTotalColor()
);
Algorithm::AlgorithmWx1<
SimpleWx1dataOperationBaseAlgorithm<
BaseLinearFilterAlgorithm<
PixelDataType,
PixelComputationType,
BaseLinearFilterParametersType<PixelDataType,PixelComputationType>,
Algorithm::BaseOperationTempType<PixelDataType,PixelComputationType>
>,
PixelDataType,
PixelComputationType,
BaseLinearFilterParametersType<PixelDataType,PixelComputationType>,
Algorithm::BaseOperationTempType<PixelDataType,PixelComputationType>
>,
PixelDataType,
BaseLinearFilterParametersType<PixelDataType,PixelComputationType>
>(
srcImage.getDataView(),
dstImage.getDataView(),
parameters
);
}
示例3: fillHisto
void GradientsDescriptor::fillHisto( const image::Image& _dx, const image::Image& _dy, double* _histo, const jblas::vec2& _startPoint, const jblas::vec2& _direction, double _lineAngle, int _length, double _coef )
{
jblas::vec2 currentPoint = _startPoint + 3.0 * _direction;
double lastNorm = DBL_MAX;
for(int i = 0; i < _length; ++i)
// while(true)
{
int ix = int(currentPoint(0));
int iy = int(currentPoint(1));
if( not check(ix, _dx.width() - 1) or not check(iy, _dx.height() - 1) ) return;
int dx = _dx.getPixelValue<short>( ix, iy, 0 );
int dy = _dy.getPixelValue<short>( ix, iy, 0 );
// double dx = _dx.getSubPixelValue<short>( ix, iy, 0, JfrImage_INTERP_CUBIC );
// double dy = _dy.getSubPixelValue<short>( ix, iy, 0, JfrImage_INTERP_CUBIC );
double norm = sqrt( dx * dx + dy * dy );
// if( norm > lastNorm) return ;
double angle = atan2(dy, dx);
// double diff = cos( angle - _lineAngle);
double correctedAngle = ( angle - _lineAngle);
// int idx = int( (m_count) * ( 1.0 + diff) * 0.5);
// if( idx >= m_count ) idx = m_count - 1;
// if( idx != 0 and idx != (m_count - 1 ) )
for(int j = 0; j < m_count; ++j )
{
// double lnorm = norm * ( 1.0 - fabs( fpow( -1.0 + 2.0 * j / (m_count-1) - diff ), 2.0) );
// double lnorm = norm * exp( -pow( -1.0 + 2.0 * j / (m_count-1) - diff, 2.0) );
// JFR_DEBUG( j << " " << exp( -pow2( cos( M_PI * j / (m_count-1) - 0.5 * correctedAngle ) ) ) );
double lnorm = norm * _coef * exp( -pow2( cos( M_PI * j / (m_count-1) - 0.5 * correctedAngle ) ) );
_histo[ j ] += lnorm;
}
lastNorm = norm;
currentPoint += _direction;
}
}
示例4: parameters
> void AIL_DLL_EXPORT BoxFilterWx1<PixelType>::applyTo(const Image::Image<PixelType> & srcImage,Image::Image<PixelType> & dstImage) const {
typedef typename PixelType::DataType PixelDataType;
typedef typename PixelType::ComputationType PixelComputationType;
BoxFilterWx1parametersType<PixelDataType,PixelComputationType> parameters(xOffset,filterWidth,PixelType::ComputationRange::getMinPixel(),PixelComputationType(filterWidth));
Algorithm::AlgorithmWx1<
Algorithm::BasicWx1baseAlgorithm<
BaseBoxFilterAlgorithm<
PixelDataType,
PixelComputationType,
BoxFilterWx1parametersType<PixelDataType,PixelComputationType>,
Algorithm::BaseOperationTempType<PixelDataType,PixelComputationType>
>,
PixelDataType,
PixelComputationType,
BoxFilterWx1parametersType<PixelDataType,PixelComputationType>,
Algorithm::BaseOperationTempType<PixelDataType,PixelComputationType>
>,
PixelDataType,
BoxFilterWx1parametersType<PixelDataType,PixelComputationType>
>(
srcImage.getDataView(),
dstImage.getDataView(),
parameters
);
}
示例5: coloriage
void DirectSegmentsBase::coloriage( double x_1, double y_1, double x_2, double y_2, image::Image& inasegment, int _value)
{
double xStep = (x_2 - x_1);
double yStep = (y_2 - y_1);
double norm_steps = sqrt( xStep * xStep + yStep * yStep );
xStep /= norm_steps;
yStep /= norm_steps;
int max_coloriage = (int)norm_steps +1;
for( int k = 0; k < max_coloriage; ++k )
{
int x = (int)( x_1 + k * xStep );
int y = (int)( y_1 + k * yStep );
for( int j = -1; j <= 1; ++j)
{
for( int i = -1; i <= 1; ++i)
{
if( check( x + i, inasegment.width() - 1 ) and check( y + j, inasegment.height() - 1 ) )
{
inasegment.setPixelValue<int>( _value, x+i, y+j, 0 );
}
}
}
}
#if 0
double xStep = (x_2 - x_1);
double yStep = (y_2 - y_1);
double norm_steps = sqrt( xStep * xStep + yStep * yStep );
xStep /= norm_steps;
yStep /= norm_steps;
int max_coloriage = (int)norm_steps +1;
int p_x = x_1;
int p_y = y_1;
if( check( p_x, inasegment.width() - 1 ) and check( p_y, inasegment.height() - 1 ) )
{
inasegment.setPixelValue<int>( _value, p_x, p_y, 0 );
}
for( int k = -1; k <= max_coloriage; ++k )
{
int x = (int)( x_1 + k * xStep );
int y = (int)( y_1 + k * yStep );
if( check( p_x, inasegment.width() - 1 ) and check( y, inasegment.height() - 1 ) )
{
inasegment.setPixelValue<int>( _value, p_x, y, 0 );
}
if( check( x, inasegment.width() - 1 ) )
{
if( check( p_y, inasegment.height() - 1 ) )
{
inasegment.setPixelValue<int>( _value, x, p_y, 0 );
}
if( check( y, inasegment.height() - 1 ) )
{
inasegment.setPixelValue<int>( _value, x, y, 0 );
}
}
p_x = x;
p_y = y;
}
#endif
}
示例6: gen
// suggest new feature point for tracking (count point are kept)
bool detect
(
const image::Image<unsigned char> & ima,
std::vector<features::PointFeature> & pt_to_track,
const size_t count
) const override
{
cv::Mat current_img;
cv::eigen2cv(ima.GetMat(), current_img);
std::vector<cv::KeyPoint> m_nextKeypoints;
cv::Ptr<cv::FeatureDetector> m_detector = cv::GFTTDetector::create(count);
if (m_detector == NULL)
return false;
m_detector->detect(current_img, m_nextKeypoints);
if (m_nextKeypoints.size() >= count)
{
// shuffle to avoid to sample only in one bucket
std::mt19937 gen(std::mt19937::default_seed);
std::shuffle(m_nextKeypoints.begin(), m_nextKeypoints.end(), gen);
}
const size_t kept_kp_count = std::min(m_nextKeypoints.size(), count);
m_nextKeypoints.resize(kept_kp_count);
pt_to_track.resize(kept_kp_count);
for (size_t i = 0; i < kept_kp_count; ++i)
pt_to_track[i] = features::PointFeature(m_nextKeypoints[i].pt.x, m_nextKeypoints[i].pt.y);
return kept_kp_count != 0;
// Return false if no point can be added
}
示例7: return
/// Try to track current point set in the provided image
/// return false when tracking failed (=> to send frame to relocalization)
bool track
(
const image::Image<unsigned char> & ima,
const std::vector<features::PointFeature> & pt_to_track,
std::vector<features::PointFeature> & pt_tracked,
std::vector<bool> & status
) override
{
cv::eigen2cv(ima.GetMat(), current_img_);
if (!pt_to_track.empty())
{
prevPts_.resize(pt_to_track.size());
nextPts_.resize(pt_to_track.size());
for (size_t i=0; i < pt_to_track.size(); ++i)
{
prevPts_[i].x = pt_to_track[i].x();
prevPts_[i].y = pt_to_track[i].y();
}
std::vector<unsigned char> status_uchar;
cv::calcOpticalFlowPyrLK(prev_img_, current_img_, prevPts_, nextPts_, status_uchar, error_);
status.assign(status_uchar.begin(), status_uchar.end());
for (size_t i=0; i < nextPts_.size(); ++i)
{
pt_tracked[i].coords() << nextPts_[i].x, nextPts_[i].y;
}
}
// swap frame for next tracking iteration
current_img_.copyTo(prev_img_);
const size_t tracked_point_count = std::accumulate(status.begin(), status.end(), 0);
return (tracked_point_count != 0);
}
示例8: computeMask
/**
* Fill mask from corresponding points (each point pictured by a disk of radius _radius)
*
* \param[out] maskLeft Mask of the left image (initialized to corresponding image size).
* \param[out] maskRight Mask of the right image (initialized to corresponding image size).
*
* \return True if some pixel have been set to true.
*/
virtual bool computeMask( image::Image< unsigned char > & maskLeft, image::Image< unsigned char > & maskRight )
{
maskLeft.fill(0);
maskRight.fill(0);
for( std::vector< matching::IndMatch >::const_iterator
iter_putativeMatches = _vec_PutativeMatches.begin();
iter_putativeMatches != _vec_PutativeMatches.end();
++iter_putativeMatches )
{
const features::SIOPointFeature & L = _vec_featsL[ iter_putativeMatches->i_ ];
const features::SIOPointFeature & R = _vec_featsR[ iter_putativeMatches->j_ ];
image::FilledCircle( L.x(), L.y(), ( int )_radius, ( unsigned char ) 255, &maskLeft );
image::FilledCircle( R.x(), R.y(), ( int )_radius, ( unsigned char ) 255, &maskRight );
}
return _vec_PutativeMatches.size() > 0;
}
示例9: compute
double Zncc::compute(image::Image const& im1, image::Image const& im2, float const* weightMatrix)
{
JFR_PRECOND(im1.depth() == im2.depth(), "The depth of both images is different");
switch(im1.depth())
{
// case CV_1U:
// if (weightMatrix == NULL)
// return computeTpl<CV_1U, bool,bool,0,1,true,false>(im1,im2);
// else
// return computeTpl<CV_1U, bool,bool,0,1,true,true>(im1,im2,weightMatrix);
case CV_8U:
if (weightMatrix == NULL)
return computeTpl<CV_8U, uint8_t,uint8_t,0,255,true,false>(im1,im2);
else
return computeTpl<CV_8U, uint8_t,uint8_t,0,255,true,true>(im1,im2,weightMatrix);
case CV_8S:
if (weightMatrix == NULL)
return computeTpl<CV_8S, int8_t,int8_t, -128,127,true,false>(im1,im2);
else
return computeTpl<CV_8S, int8_t,int8_t, -128,127,true,true>(im1,im2,weightMatrix);
case CV_16U:
if (weightMatrix == NULL)
return computeTpl<CV_16U, uint16_t,uint16_t, 0,65535,true,false>(im1,im2);
else
return computeTpl<CV_16U, uint16_t,uint16_t, 0,65535,true,true>(im1,im2,weightMatrix);
case CV_16S:
if (weightMatrix == NULL)
return computeTpl<CV_16S, int16_t,int16_t, -32768,32767,true,false>(im1,im2);
else
return computeTpl<CV_16S, int16_t,int16_t, -32768,32767,true,true>(im1,im2,weightMatrix);
case CV_32F:
if (weightMatrix == NULL) // bool and no borne because cannot use a float as a template parameter, and anyway would be useless here
return computeTpl<CV_32F, float,bool, 0,0,false,false>(im1,im2);
else
return computeTpl<CV_32F, float,bool, 0,0,false,true>(im1,im2,weightMatrix);
case CV_64F:
if (weightMatrix == NULL) // bool and no borne because cannot use a float as a template parameter, and anyway would be useless here
return computeTpl<CV_64F, double,bool, 0,0,false,false>(im1,im2);
else
return computeTpl<CV_64F, double,bool, 0,0,false,true>(im1,im2,weightMatrix);
default:
JFR_PRECOND(false, "Unknown image depth");
return FP_NAN;
}
}
示例10: attributes
/**
@brief Detect regions on the image and compute their attributes (description)
@param image Image.
@param regions The detected regions and attributes (the caller must delete the allocated data)
@param mask 8-bit gray image for keypoint filtering (optional).
Non-zero values depict the region of interest.
*/
bool Describe(const image::Image<unsigned char>& image,
std::unique_ptr<Regions> ®ions,
const image::Image<unsigned char> * mask = nullptr)
{
// Convert for opencv
cv::Mat img;
cv::eigen2cv(image.GetMat(), img);
// Convert mask image into cv::Mat
cv::Mat m_mask;
if(mask != nullptr) {
cv::eigen2cv(mask->GetMat(), m_mask);
}
// Create a SIFT detector
std::vector< cv::KeyPoint > v_keypoints;
cv::Mat m_desc;
cv::Ptr<cv::Feature2D> siftdetector = cv::xfeatures2d::SIFT::create();
// Process SIFT computation
siftdetector->detectAndCompute(img, m_mask, v_keypoints, m_desc);
Allocate(regions);
// Build alias to cached data
SIFT_Regions * regionsCasted = dynamic_cast<SIFT_Regions*>(regions.get());
// reserve some memory for faster keypoint saving
regionsCasted->Features().reserve(v_keypoints.size());
regionsCasted->Descriptors().reserve(v_keypoints.size());
// Prepare a column vector with the sum of each descriptor
cv::Mat m_siftsum;
cv::reduce(m_desc, m_siftsum, 1, cv::REDUCE_SUM);
// Copy keypoints and descriptors in the regions
int cpt = 0;
for(std::vector< cv::KeyPoint >::const_iterator i_kp = v_keypoints.begin();
i_kp != v_keypoints.end();
++i_kp, ++cpt)
{
SIOPointFeature feat((*i_kp).pt.x, (*i_kp).pt.y, (*i_kp).size, (*i_kp).angle);
regionsCasted->Features().push_back(feat);
Descriptor<unsigned char, 128> desc;
for(int j = 0; j < 128; j++)
{
desc[j] = static_cast<unsigned char>(512.0*sqrt(m_desc.at<float>(cpt, j)/m_siftsum.at<float>(cpt, 0)));
}
regionsCasted->Descriptors().push_back(desc);
}
return true;
};
示例11: snapshot
void snapshot(unsigned call_no) {
if (!drawable ||
(!snapshot_prefix && !compare_prefix)) {
return;
}
Image::Image *ref = NULL;
if (compare_prefix) {
char filename[PATH_MAX];
snprintf(filename, sizeof filename, "%s%010u.png", compare_prefix, call_no);
ref = Image::readPNG(filename);
if (!ref) {
return;
}
if (retrace::verbosity >= 0) {
std::cout << "Read " << filename << "\n";
}
}
Image::Image *src = glstate::getDrawBufferImage(GL_RGBA);
if (!src) {
return;
}
if (snapshot_prefix) {
char filename[PATH_MAX];
snprintf(filename, sizeof filename, "%s%010u.png", snapshot_prefix, call_no);
if (src->writePNG(filename) && retrace::verbosity >= 0) {
std::cout << "Wrote " << filename << "\n";
}
}
if (ref) {
std::cout << "Snapshot " << call_no << " average precision of " << src->compare(*ref) << " bits\n";
delete ref;
}
delete src;
}
示例12: main
int main( int argc , char** argv ){
#ifdef DEBUG
MEM_ON();
TRACE_OFF();
#endif
//total execution timer
Timer totalTimer;
totalTimer.start();
std::cout<<"****************************************************************"<<std::endl;
std::cout<<"* OpenMP execution with "<<omp_get_max_threads()<<" threads *"<<std::endl;
std::cout<<"****************************************************************"<<std::endl;
std::cout<<"\n\n\n";
std::vector<std::string> imageName;
std::stringstream input(argv[4]);
double factor;
input >> factor;
int succeded = 0;
int failed = 0;
std::string operation( argv[3] );
//parallel code timer
Timer parallelTimer;
//how many images to run in parallel.Number of threads created for the program
unsigned int parallelImages = omp_get_max_threads();
//counter for total files for parallel iterations
unsigned int counter = 0;
//how many iterations to run in parallel
unsigned int parallelIterations = parallelImages;
if( GetDirFileNames ( argv[1] , imageName ) )
try {
parallelTimer.start();
for( std::vector<std::string>::iterator it = imageName.begin() ; it < imageName.end() ; it += parallelImages ){
counter += parallelImages;
parallelIterations = parallelImages;
if( counter > imageName.size() )
parallelIterations = imageName.size() - ( counter - parallelImages );
#pragma omp parallel for
for( unsigned int i = 0 ; i < parallelIterations ; i++ ) {
std::cout<<(*(it + i))<<"\n";
IMAGE::Image* oldImage = NULL;
IMAGE::Image* newImage = NULL;
std::string oldName = argv[1] + (*(it + i));
std::string newName = argv[2] + (*(it + i));
try{
oldImage= IMAGE::Image::createInstance( oldName );
newImage = IMAGE::Image::createInstance( newName );
////////////////////
try{
oldImage->open( oldName, 'r' );
newImage->open( newName , 'w' );
oldImage->readImageRaster();
newImage->raster.createRaster( oldImage->raster );
//check which operation to do
if( operation == REVERSE ){
IMAGE::PROCESS::reverseColor( newImage->raster );
}
else if( operation == BRIGHTNESS ) {
IMAGE::PROCESS::adjustBrightness( newImage->raster , atoi( argv[4] ));
}
else if( operation == CONTRAST ) {
IMAGE::PROCESS::adjustContrast( newImage->raster , atoi( argv[4] ) );
}
else if( operation == RGB2GREY ) {
IMAGE::FILTERS::convertRGB2GREY( newImage->raster , atoi( argv[4] ));
}
else if( operation == RGB2BW ) {
IMAGE::FILTERS::convertRGB2BW( newImage->raster );
}
else if( operation == RGB2SEPIA ) {
IMAGE::FILTERS::convertRGB2SEPIA( newImage->raster );
}
else if( operation == BLUR ) {
IMAGE::PROCESS::blurImage( newImage->raster , atoi(argv[4]) );
}
else if( operation == ROTATE ) {
//.........这里部分代码省略.........
示例13:
double Zncc::compute8noborne(image::Image const& im1, image::Image const& im2)
{
JFR_PRECOND(im1.depth() == im2.depth(), "The depth of both images is different");
JFR_PRECOND(im1.depth() == CV_8U, "The depth of images must be CV_8U");
return computeTpl<CV_8U, uint8_t,uint8_t,0,255,false,false>(im1,im2);
}
示例14: computeMask
/**
* Put masks to white, images are conserved
*
* \param[out] maskLeft Mask of the left image (initialized to corresponding image size).
* \param[out] maskRight Mask of the right image (initialized to corresponding image size).
*
* \return True.
*/
virtual bool computeMask(
image::Image< unsigned char > & maskLeft,
image::Image< unsigned char > & maskRight )
{
std::vector< matching::IndMatch > vec_KVLDMatches;
image::Image< unsigned char > imageL, imageR;
image::ReadImage( _sLeftImage.c_str(), &imageL );
image::ReadImage( _sRightImage.c_str(), &imageR );
image::Image< float > imgA ( imageL.GetMat().cast< float >() );
image::Image< float > imgB(imageR.GetMat().cast< float >());
std::vector< Pair > matchesFiltered, matchesPair;
for( std::vector< matching::IndMatch >::const_iterator iter_match = _vec_PutativeMatches.begin();
iter_match != _vec_PutativeMatches.end();
++iter_match )
{
matchesPair.push_back( std::make_pair( iter_match->i_, iter_match->j_ ) );
}
std::vector< double > vec_score;
//In order to illustrate the gvld(or vld)-consistant neighbors, the following two parameters has been externalized as inputs of the function KVLD.
openMVG::Mat E = openMVG::Mat::Ones( _vec_PutativeMatches.size(), _vec_PutativeMatches.size() ) * ( -1 );
// gvld-consistancy matrix, intitialized to -1, >0 consistancy value, -1=unknow, -2=false
std::vector< bool > valide( _vec_PutativeMatches.size(), true );// indices of match in the initial matches, if true at the end of KVLD, a match is kept.
size_t it_num = 0;
KvldParameters kvldparameters;//initial parameters of KVLD
//kvldparameters.K = 5;
while (
it_num < 5 &&
kvldparameters.inlierRate >
KVLD(
imgA, imgB,
_vec_featsL, _vec_featsR,
matchesPair, matchesFiltered,
vec_score, E, valide, kvldparameters ) )
{
kvldparameters.inlierRate /= 2;
std::cout<<"low inlier rate, re-select matches with new rate="<<kvldparameters.inlierRate<<std::endl;
kvldparameters.K = 2;
it_num++;
}
bool bOk = false;
if( !matchesPair.empty())
{
// Get mask
getKVLDMask(
&maskLeft, &maskRight,
_vec_featsL, _vec_featsR,
matchesPair,
valide,
E);
bOk = true;
}
else{
maskLeft.fill( 0 );
maskRight.fill( 0 );
}
return bOk;
}
示例15: Extract
/**
* @brief Extract MSER regions
* @param img Input image
* @param[out] regions Output regions
*/
void MSERExtractor::Extract( const image::Image<unsigned char> & img , std::vector<MSERRegion> & regions ) const
{
// Compute minimum and maximum region area relative to this image
const int minRegArea = img.Width() * img.Height() * m_minimum_area;
const int maxRegArea = img.Width() * img.Height() * m_maximum_area;
// List of processed pixels (maybe we can use a more efficient structure)
std::vector<std::vector<bool >> processed;
processed.resize( img.Width() );
for (int i = 0; i < img.Width(); ++i )
{
processed[ i ].resize( img.Height() );
std::fill( processed[ i ].begin() , processed[ i ].end() , false );
}
// Holds the boundary of given grayscale value (boundary[0] -> pixels in the boundary with 0 grayscale value)
std::vector<PixelStackElt> boundary[ 256 ];
// List of regions computed so far (not only valid MSER regions)
std::vector<MSERRegion *> regionStack;
// Push en empty region
regionStack.push_back( new MSERRegion );
// Start processing from top left pixel
PixelStackElt cur_pix;
cur_pix.pix_x = 0;
cur_pix.pix_y = 0;
cur_pix.pix_level = img( 0 , 0 );
cur_pix.edge_index = PIXEL_RIGHT;
processed[ cur_pix.pix_x ][ cur_pix.pix_y ] = true;
regionStack.push_back( new MSERRegion( cur_pix.pix_level , cur_pix.pix_x , cur_pix.pix_y ) );
int priority = 256;
// Start process
while (1)
{
bool restart = false;
// Process neighboring to see if there's something to search with lower grayscale level
for ( PixelNeighborsDirection curDir = cur_pix.edge_index;
curDir <= PIXEL_BOTTOM_RIGHT;
curDir = NextDirection( curDir , m_connectivity ) )
{
int nx , ny;
GetNeighbor( cur_pix.pix_x , cur_pix.pix_y , curDir , img.Width() , img.Height() , nx , ny );
// Pixel was not processed before
if (ValidPixel( nx , ny , img.Width() , img.Height() ) && ! processed[ nx ][ ny ] )
{
const int nLevel = img( ny , nx );
processed[ nx ][ ny ] = true;
// Info of the neighboring pixel
PixelStackElt n_elt;
n_elt.pix_x = nx;
n_elt.pix_y = ny;
n_elt.pix_level = nLevel;
n_elt.edge_index = PIXEL_RIGHT;
// Now look from which pixel do we have to continue
if (nLevel >= cur_pix.pix_level )
{
// Continue from the same pixel
boundary[ nLevel ].push_back( n_elt );
// Store the lowest value so far
priority = std::min( nLevel , priority );
}
else
{
// Go on with the neighboring pixel (go down)
cur_pix.edge_index = NextDirection( curDir , m_connectivity ); // Next time we have to process the next boundary pixel
boundary[ cur_pix.pix_level ].push_back( cur_pix );
// Store the lowest value so far
priority = std::min( cur_pix.pix_level , priority );
// Push the next pixel to process
cur_pix = n_elt;
restart = true;
break;
}
}
}
// Do we have to restart from a new pixel ?
if (restart )
{
// If so it's that because we found a lower grayscale value so let's start a new region
regionStack.push_back( new MSERRegion( cur_pix.pix_level , cur_pix.pix_x , cur_pix.pix_y ) );
continue;
}
//.........这里部分代码省略.........