本文整理汇总了C++中vpImage::getRows方法的典型用法代码示例。如果您正苦于以下问题:C++ vpImage::getRows方法的具体用法?C++ vpImage::getRows怎么用?C++ vpImage::getRows使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类vpImage
的用法示例。
在下文中一共展示了vpImage::getRows方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: fromImage
/*!
* Manikandan. B
* Photometric moments v2
* Intended to be used by 'vpMomentObject's of type DENSE_FULL_OBJECT
* @param image : Grayscale image
* @param cam : Camera parameters (to change to )
* @param bg_type : White/Black background surrounding the image
* @param normalize_with_pix_size : This flag if SET, the moments, after calculation are normalized w.r.t pixel size
* available from camera parameters
*/
void vpMomentObject::fromImage(const vpImage<unsigned char>& image, const vpCameraParameters& cam,
vpCameraImgBckGrndType bg_type, bool normalize_with_pix_size)
{
std::vector<double> cache(order*order,0.);
values.assign(order*order,0);
// (x,y) - Pixel co-ordinates in metres
double x=0;
double y=0;
//for indexing into cache[] and values[]
unsigned int idx = 0;
unsigned int kidx = 0;
double intensity = 0;
//double Imax = static_cast<double>(image.getMaxValue());
double iscale = 1.0;
if (flg_normalize_intensity) { // This makes the image a probability density function
double Imax = 255.; // To check the effect of gray level change. ISR Coimbra
iscale = 1.0/Imax;
}
if (bg_type == vpMomentObject::WHITE) {
/////////// WHITE BACKGROUND ///////////
for(unsigned int j=0;j<image.getRows();j++){
for(unsigned int i=0;i<image.getCols();i++){
x = 0;
y = 0;
intensity = (double)(image[j][i])*iscale;
double intensity_white = 1. - intensity;
vpPixelMeterConversion::convertPoint(cam,i,j,x,y);
cacheValues(cache,x,y, intensity_white); // Modify 'cache' which has x^p*y^q to x^p*y^q*(1 - I(x,y))
// Copy to "values"
for(unsigned int k=0;k<order;k++){
kidx = k*order;
for(unsigned int l=0;l<order-k;l++){
idx = kidx+l;
values[idx]+= cache[idx];
}
}
}
}
}
else {
/////////// BLACK BACKGROUND ///////////
for(unsigned int j=0;j<image.getRows();j++){
for(unsigned int i=0;i<image.getCols();i++){
x = 0;
y = 0;
intensity = (double)(image[j][i])*iscale;
vpPixelMeterConversion::convertPoint(cam,i,j,x,y);
// Cache values for fast moment calculation
cacheValues(cache,x,y, intensity); // Modify 'cache' which has x^p*y^q to x^p*y^q*I(x,y)
// Copy to moments array 'values'
for(unsigned int k=0;k<order;k++){
kidx = k*order;
for(unsigned int l=0;l<order-k;l++){
idx = kidx+l;
values[idx]+= cache[idx];
}
}
}
}
}
if (normalize_with_pix_size){
// Normalisation equivalent to sampling interval/pixel size delX x delY
double norm_factor = 1./(cam.get_px()*cam.get_py());
for (std::vector<double>::iterator it = values.begin(); it!=values.end(); ++it) {
*it = (*it) * norm_factor;
}
}
}
示例2: mask
void
vpMbKltTracker::reinit(const vpImage<unsigned char>& I)
{
c0Mo = cMo;
ctTc0.eye();
vpImageConvert::convert(I, cur);
cam.computeFov(I.getWidth(), I.getHeight());
if(useScanLine){
faces.computeClippedPolygons(cMo,cam);
faces.computeScanLineRender(cam, I.getWidth(), I.getHeight());
}
// mask
#if (VISP_HAVE_OPENCV_VERSION >= 0x020408)
cv::Mat mask((int)I.getRows(), (int)I.getCols(), CV_8UC1, cv::Scalar(0));
#else
IplImage* mask = cvCreateImage(cvSize((int)I.getWidth(), (int)I.getHeight()), IPL_DEPTH_8U, 1);
cvZero(mask);
#endif
vpMbtDistanceKltPoints *kltpoly;
vpMbtDistanceKltCylinder *kltPolyCylinder;
if(useScanLine){
vpImageConvert::convert(faces.getMbScanLineRenderer().getMask(), mask);
}
else{
unsigned char val = 255/* - i*15*/;
for(std::list<vpMbtDistanceKltPoints*>::const_iterator it=kltPolygons.begin(); it!=kltPolygons.end(); ++it){
kltpoly = *it;
if(kltpoly->polygon->isVisible() && kltpoly->isTracked() && kltpoly->polygon->getNbPoint() > 2){
//need to changeFrame when reinit() is called by postTracking
//other solution is
kltpoly->polygon->changeFrame(cMo);
kltpoly->polygon->computePolygonClipped(cam); // Might not be necessary when scanline is activated
kltpoly->updateMask(mask, val, maskBorder);
}
}
for(std::list<vpMbtDistanceKltCylinder*>::const_iterator it=kltCylinders.begin(); it!=kltCylinders.end(); ++it){
kltPolyCylinder = *it;
if(kltPolyCylinder->isTracked())
{
for(unsigned int k = 0 ; k < kltPolyCylinder->listIndicesCylinderBBox.size() ; k++)
{
unsigned int indCylBBox = (unsigned int)kltPolyCylinder->listIndicesCylinderBBox[k];
if(faces[indCylBBox]->isVisible() && faces[indCylBBox]->getNbPoint() > 2u){
faces[indCylBBox]->computePolygonClipped(cam); // Might not be necessary when scanline is activated
}
}
kltPolyCylinder->updateMask(mask, val, maskBorder);
}
}
}
tracker.initTracking(cur, mask);
// tracker.track(cur); // AY: Not sure to be usefull but makes sure that the points are valid for tracking and avoid too fast reinitialisations.
// vpCTRACE << "init klt. detected " << tracker.getNbFeatures() << " points" << std::endl;
for(std::list<vpMbtDistanceKltPoints*>::const_iterator it=kltPolygons.begin(); it!=kltPolygons.end(); ++it){
kltpoly = *it;
if(kltpoly->polygon->isVisible() && kltpoly->isTracked() && kltpoly->polygon->getNbPoint() > 2){
kltpoly->init(tracker);
}
}
for(std::list<vpMbtDistanceKltCylinder*>::const_iterator it=kltCylinders.begin(); it!=kltCylinders.end(); ++it){
kltPolyCylinder = *it;
if(kltPolyCylinder->isTracked())
kltPolyCylinder->init(tracker, cMo);
}
#if (VISP_HAVE_OPENCV_VERSION < 0x020408)
cvReleaseImage(&mask);
#endif
}