本文整理汇总了C++中image::Image::Width方法的典型用法代码示例。如果您正苦于以下问题:C++ Image::Width方法的具体用法?C++ Image::Width怎么用?C++ Image::Width使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类image::Image
的用法示例。
在下文中一共展示了Image::Width方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: detector
void FastCornerDetector::detect
(
const image::Image<unsigned char> & ima,
std::vector<PointFeature> & regions
)
{
using FastDetectorCall =
xy* (*) (const unsigned char *, int, int, int, int, int *);
FastDetectorCall detector = nullptr;
if (size_ == 9) detector = fast9_detect_nonmax;
if (size_ == 10) detector = fast10_detect_nonmax;
if (size_ == 11) detector = fast11_detect_nonmax;
if (size_ == 12) detector = fast12_detect_nonmax;
if (!detector)
{
std::cout << "Invalid size for FAST detector: " << size_ << std::endl;
return;
}
int num_corners = 0;
xy* detections = detector(ima.data(),
ima.Width(), ima.Height(), ima.Width(),
threshold_, &num_corners);
regions.clear();
regions.reserve(num_corners);
for (int i = 0; i < num_corners; ++i)
{
regions.emplace_back(detections[i].x, detections[i].y);
}
free( detections );
}
示例2: Extract
/**
* @brief Extract MSER regions
* @param img Input image
* @param[out] regions Output regions
*/
void MSERExtractor::Extract( const image::Image<unsigned char> & img , std::vector<MSERRegion> & regions ) const
{
// Compute minimum and maximum region area relative to this image
const int minRegArea = img.Width() * img.Height() * m_minimum_area;
const int maxRegArea = img.Width() * img.Height() * m_maximum_area;
// List of processed pixels (maybe we can use a more efficient structure)
std::vector<std::vector<bool >> processed;
processed.resize( img.Width() );
for (int i = 0; i < img.Width(); ++i )
{
processed[ i ].resize( img.Height() );
std::fill( processed[ i ].begin() , processed[ i ].end() , false );
}
// Holds the boundary of given grayscale value (boundary[0] -> pixels in the boundary with 0 grayscale value)
std::vector<PixelStackElt> boundary[ 256 ];
// List of regions computed so far (not only valid MSER regions)
std::vector<MSERRegion *> regionStack;
// Push en empty region
regionStack.push_back( new MSERRegion );
// Start processing from top left pixel
PixelStackElt cur_pix;
cur_pix.pix_x = 0;
cur_pix.pix_y = 0;
cur_pix.pix_level = img( 0 , 0 );
cur_pix.edge_index = PIXEL_RIGHT;
processed[ cur_pix.pix_x ][ cur_pix.pix_y ] = true;
regionStack.push_back( new MSERRegion( cur_pix.pix_level , cur_pix.pix_x , cur_pix.pix_y ) );
int priority = 256;
// Start process
while (1)
{
bool restart = false;
// Process neighboring to see if there's something to search with lower grayscale level
for ( PixelNeighborsDirection curDir = cur_pix.edge_index;
curDir <= PIXEL_BOTTOM_RIGHT;
curDir = NextDirection( curDir , m_connectivity ) )
{
int nx , ny;
GetNeighbor( cur_pix.pix_x , cur_pix.pix_y , curDir , img.Width() , img.Height() , nx , ny );
// Pixel was not processed before
if (ValidPixel( nx , ny , img.Width() , img.Height() ) && ! processed[ nx ][ ny ] )
{
const int nLevel = img( ny , nx );
processed[ nx ][ ny ] = true;
// Info of the neighboring pixel
PixelStackElt n_elt;
n_elt.pix_x = nx;
n_elt.pix_y = ny;
n_elt.pix_level = nLevel;
n_elt.edge_index = PIXEL_RIGHT;
// Now look from which pixel do we have to continue
if (nLevel >= cur_pix.pix_level )
{
// Continue from the same pixel
boundary[ nLevel ].push_back( n_elt );
// Store the lowest value so far
priority = std::min( nLevel , priority );
}
else
{
// Go on with the neighboring pixel (go down)
cur_pix.edge_index = NextDirection( curDir , m_connectivity ); // Next time we have to process the next boundary pixel
boundary[ cur_pix.pix_level ].push_back( cur_pix );
// Store the lowest value so far
priority = std::min( cur_pix.pix_level , priority );
// Push the next pixel to process
cur_pix = n_elt;
restart = true;
break;
}
}
}
// Do we have to restart from a new pixel ?
if (restart )
{
// If so it's that because we found a lower grayscale value so let's start a new region
regionStack.push_back( new MSERRegion( cur_pix.pix_level , cur_pix.pix_x , cur_pix.pix_y ) );
continue;
}
//.........这里部分代码省略.........
示例3: octave_gen
/**
@brief Detect regions on the image and compute their attributes (description)
@param image Image.
@param regions The detected regions and attributes (the caller must delete the allocated data)
@param mask 8-bit gray image for keypoint filtering (optional).
Non-zero values depict the region of interest.
*/
bool Describe
(
const image::Image<unsigned char>& image,
std::unique_ptr<Regions> ®ions,
const image::Image<unsigned char> * mask = nullptr
) override
{
const int w = image.Width(), h = image.Height();
// Convert to float in range [0;1]
const image::Image<float> If(image.GetMat().cast<float>()/255.0f);
// compute sift keypoints
Allocate(regions);
// Build alias to cached data
SIFT_Regions * regionsCasted = dynamic_cast<SIFT_Regions*>(regions.get());
{
using namespace openMVG::features::sift;
const int supplementary_images = 3;
// => in order to ensure each gaussian slice is used in the process 3 extra images are required:
// +1 for dog computation
// +2 for 3d discrete extrema definition
HierarchicalGaussianScaleSpace octave_gen(
params_.num_octaves_,
params_.num_scales_,
(params_.first_octave_ == -1)
? GaussianScaleSpaceParams(1.6f/2.0f, 1.0f/2.0f, 0.5f, supplementary_images)
: GaussianScaleSpaceParams(1.6f, 1.0f, 0.5f, supplementary_images));
octave_gen.SetImage( If );
std::vector<Keypoint> keypoints;
keypoints.reserve(5000);
Octave octave;
while ( octave_gen.NextOctave( octave ) )
{
std::vector< Keypoint > keys;
// Find Keypoints
SIFT_KeypointExtractor keypointDetector(
params_.peak_threshold_ / octave_gen.NbSlice(),
params_.edge_threshold_);
keypointDetector(octave, keys);
// Find Keypoints orientation and compute their description
Sift_DescriptorExtractor descriptorExtractor;
descriptorExtractor(octave, keys);
// Concatenate the found keypoints
std::move(keys.begin(), keys.end(), std::back_inserter(keypoints));
}
for (const auto & k : keypoints)
{
// Feature masking
if (mask)
{
const image::Image<unsigned char> & maskIma = *mask;
if (maskIma(k.y, k.x) == 0)
continue;
}
Descriptor<unsigned char, 128> descriptor;
descriptor << (k.descr.cast<unsigned char>());
{
regionsCasted->Descriptors().emplace_back(descriptor);
regionsCasted->Features().emplace_back(k.x, k.y, k.sigma, k.theta);
}
}
}
return true;
};
示例4: Describe
/**
@brief Detect regions on the image and compute their attributes (description)
@param image Image.
@param regions The detected regions and attributes (the caller must delete the allocated data)
@param mask 8-bit gray image for keypoint filtering (optional).
Non-zero values depict the region of interest.
*/
bool Describe(const image::Image<unsigned char>& image,
std::unique_ptr<Regions> ®ions,
const image::Image<unsigned char> * mask = NULL)
{
const int w = image.Width(), h = image.Height();
//Convert to float
const image::Image<float> If(image.GetMat().cast<float>());
VlSiftFilt *filt = vl_sift_new(w, h,
_params._num_octaves, _params._num_scales, _params._first_octave);
if (_params._edge_threshold >= 0)
vl_sift_set_edge_thresh(filt, _params._edge_threshold);
if (_params._peak_threshold >= 0)
vl_sift_set_peak_thresh(filt, 255*_params._peak_threshold/_params._num_scales);
Descriptor<vl_sift_pix, 128> descr;
Descriptor<unsigned char, 128> descriptor;
// Process SIFT computation
vl_sift_process_first_octave(filt, If.data());
Allocate(regions);
// Build alias to cached data
SIFT_Regions * regionsCasted = dynamic_cast<SIFT_Regions*>(regions.get());
// reserve some memory for faster keypoint saving
regionsCasted->Features().reserve(2000);
regionsCasted->Descriptors().reserve(2000);
while (true) {
vl_sift_detect(filt);
VlSiftKeypoint const *keys = vl_sift_get_keypoints(filt);
const int nkeys = vl_sift_get_nkeypoints(filt);
// Update gradient before launching parallel extraction
vl_sift_update_gradient(filt);
#ifdef OPENMVG_USE_OPENMP
#pragma omp parallel for private(descr, descriptor)
#endif
for (int i = 0; i < nkeys; ++i) {
// Feature masking
if (mask)
{
const image::Image<unsigned char> & maskIma = *mask;
if (maskIma(keys[i].y, keys[i].x) == 0)
continue;
}
double angles [4] = {0.0, 0.0, 0.0, 0.0};
int nangles = 1; // by default (1 upright feature)
if (_bOrientation)
{ // compute from 1 to 4 orientations
nangles = vl_sift_calc_keypoint_orientations(filt, angles, keys+i);
}
for (int q=0 ; q < nangles ; ++q) {
vl_sift_calc_keypoint_descriptor(filt, &descr[0], keys+i, angles[q]);
const SIOPointFeature fp(keys[i].x, keys[i].y,
keys[i].sigma, static_cast<float>(angles[q]));
siftDescToUChar(&descr[0], descriptor, _params._root_sift);
#ifdef OPENMVG_USE_OPENMP
#pragma omp critical
#endif
{
regionsCasted->Descriptors().push_back(descriptor);
regionsCasted->Features().push_back(fp);
}
}
}
if (vl_sift_process_next_octave(filt))
break; // Last octave
}
vl_sift_delete(filt);
return true;
};