本文整理汇总了C++中vpImage::getWidth方法的典型用法代码示例。如果您正苦于以下问题:C++ vpImage::getWidth方法的具体用法?C++ vpImage::getWidth怎么用?C++ vpImage::getWidth使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类vpImage
的用法示例。
在下文中一共展示了vpImage::getWidth方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: display
/*!
Initialize the display (size, position and title) of a color
image in RGBa format.
\param I : Image to be displayed (not that image has to be initialized)
\param x, y : The window is set at position x,y (column index, row index).
\param title : Window title.
*/
void
vpDisplayGTK::init(vpImage<vpRGBa> &I,
int x,
int y,
const std::string &title)
{
if ((I.getHeight() == 0) || (I.getWidth()==0))
{
vpERROR_TRACE("Image not initialized " ) ;
throw(vpDisplayException(vpDisplayException::notInitializedError,
"Image not initialized")) ;
}
if (x != -1)
windowXPosition = x ;
if (y != -1)
windowYPosition = y ;
if (! title.empty())
title_ = title;
init (I.getWidth(), I.getHeight(), windowXPosition, windowYPosition, title_) ;
I.display = this ;
displayHasBeenInitialized = true ;
}
示例2: image
/*!
Test wether the line is close to the border of the image (at a given threshold)
\param I : the input image (to know its size)
\param threshold : the threshold in pixel
\return true if the line is near the border of the image
*/
bool
vpMbtDistanceLine::closeToImageBorder(const vpImage<unsigned char>& I, const unsigned int threshold)
{
if(threshold > I.getWidth() || threshold > I.getHeight()){
return true;
}
if (isvisible){
for(unsigned int i = 0 ; i < meline.size() ; i++){
for(std::list<vpMeSite>::const_iterator it=meline[i]->getMeList().begin(); it!=meline[i]->getMeList().end(); ++it){
int i = it->i ;
int j = it->j ;
if(i < 0 || j < 0){ //out of image.
return true;
}
if( ((unsigned int)i > (I.getHeight()- threshold) ) || (unsigned int)i < threshold ||
((unsigned int)j > (I.getWidth ()- threshold) ) || (unsigned int)j < threshold ) {
return true;
}
}
}
}
return false;
}
示例3:
/*!
Apply a filter to an image.
\param I : Image to filter
\param If : Filtered image.
\param M : Filter coefficients.
*/
void
vpImageFilter::filter(const vpImage<unsigned char> &I,
vpImage<double>& If,
const vpMatrix& M)
{
unsigned int size = M.getRows() ;
unsigned int half_size = size/2 ;
If.resize(I.getHeight(),I.getWidth()) ;
If = 0 ;
for (unsigned int i=half_size ; i < I.getHeight()-half_size ; i++)
{
for (unsigned int j=half_size ; j < I.getWidth()-half_size ; j++)
{
double conv_x = 0 ;
for(unsigned int a = 0 ; a < size ; a++ )
for(unsigned int b = 0 ; b < size ; b++ )
{
double val = I[i-half_size+a][j-half_size+b] ;
conv_x += M[a][b] * val ;
}
If[i][j] = conv_x ;
}
}
}
示例4: axis
/*!
Apply a filter to an image.
\param I : Image to filter
\param Iu : Filtered image along the horizontal axis (u = columns).
\param Iv : Filtered image along the vertical axis (v = rows).
\param M : Separate filter coefficients
*/
void
vpImageFilter::filter(const vpImage<double> &I,
vpImage<double>& Iu,
vpImage<double>& Iv,
const vpMatrix& M)
{
unsigned int size = M.getRows() ;
unsigned int half_size = size/2 ;
Iu.resize(I.getHeight(),I.getWidth()) ;
Iv.resize(I.getHeight(),I.getWidth()) ;
Iu = 0 ;
Iv = 0 ;
for (unsigned int v=half_size ; v < I.getHeight()-half_size ; v++)
{
for (unsigned int u=half_size ; u < I.getWidth()-half_size ; u++)
{
double conv_u = 0 ;
double conv_v = 0 ;
for(unsigned int a = 0 ; a < size ; a++ )
for(unsigned int b = 0 ; b < size ; b++ )
{
double val = I[v-half_size+a][u-half_size+b] ;
conv_u += M[a][b] * val ;
conv_v += M[b][a] * val ;
}
Iu[v][u] = conv_u ;
Iv[v][u] = conv_v ;
}
}
}
示例5: throw
/*!
Compute the image addition: \f$ Ires = I1 - I2 \f$.
\param I1 : The first image.
\param I2 : The second image.
\param Ires : \f$ Ires = I1 - I2 \f$
\param saturate : If true, saturate the result to [0 ; 255] using vpMath::saturate, otherwise overflow may occur.
*/
void
vpImageTools::imageSubtract(const vpImage<unsigned char> &I1,
const vpImage<unsigned char> &I2,
vpImage<unsigned char> &Ires,
const bool saturate)
{
if ((I1.getHeight() != I2.getHeight()) || (I1.getWidth() != I2.getWidth())) {
throw (vpException(vpException::dimensionError, "The two images do not have the same size"));
}
if ((I1.getHeight() != Ires.getHeight()) || (I1.getWidth() != Ires.getWidth())) {
Ires.resize(I1.getHeight(), I1.getWidth());
}
unsigned char *ptr_I1 = I1.bitmap;
unsigned char *ptr_I2 = I2.bitmap;
unsigned char *ptr_Ires = Ires.bitmap;
unsigned int cpt = 0;
#if VISP_HAVE_SSE2
if (Ires.getSize() >= 16) {
for (; cpt <= Ires.getSize() - 16 ; cpt += 16, ptr_I1 += 16, ptr_I2 += 16, ptr_Ires += 16) {
const __m128i v1 = _mm_loadu_si128( (const __m128i*) ptr_I1);
const __m128i v2 = _mm_loadu_si128( (const __m128i*) ptr_I2);
const __m128i vres = saturate ? _mm_subs_epu8(v1, v2) : _mm_sub_epi8(v1, v2);
_mm_storeu_si128( (__m128i*) ptr_Ires, vres );
}
}
#endif
for (; cpt < Ires.getSize(); cpt++, ++ptr_I1, ++ptr_I2, ++ptr_Ires) {
*ptr_Ires = saturate ? vpMath::saturate<unsigned char>( (short int) *ptr_I1 - (short int) *ptr_I2 ) : *ptr_I1 - *ptr_I2;
}
}
示例6: throw
// Color pictures SetBackGroundImage
void
vpAR::setImage(vpImage<vpRGBa> &I)
{
if ((internal_width != I.getWidth()) ||
(internal_height != I.getHeight()))
{
vpERROR_TRACE("The image size is different from the view size ");
throw(vpException(vpException::dimensionError),"The image size is different from the view size") ;
}
background = true ;
unsigned int k =0 ;
for (unsigned int i=0 ; i <I.getHeight() ; i++)
{
k=0;
for (unsigned int j=0 ; j <I.getWidth() ; j++)
//le repere image open GL est en bas a gauche donc l'image serait inverse
{
image_background[i*I.getWidth()*3+k+0]=I[I.getHeight()-i-1][j].R ;
image_background[i*I.getWidth()*3+k+1]=I[I.getHeight()-i-1][j].G ;
image_background[i*I.getWidth()*3+k+2]=I[I.getHeight()-i-1][j].B ;
k+=3;
}
}
}
示例7: if
/*!
Get the view of the virtual camera. Be carefull, the image I is modified. The projected image is not added as an overlay!
\param I : The image used to store the result.
\param cam : The parameters of the virtual camera.
*/
void
vpImageSimulator::getImage(vpImage<unsigned char> &I, const vpCameraParameters cam)
{
int nb_point_dessine = 0;
if (cleanPrevImage)
{
unsigned char col = (unsigned char)(0.2126 * bgColor.R + 0.7152 * bgColor.G + 0.0722 * bgColor.B);
for (int i = (int)rect.getTop(); i < (int)rect.getBottom(); i++)
{
for (int j = (int)rect.getLeft(); j < (int)rect.getRight(); j++)
{
I[i][j] = col;
}
}
}
if(visible)
{
getRoi(I.getWidth(),I.getHeight(),cam,pt,rect);
double top = rect.getTop();
double bottom = rect.getBottom();
double left = rect.getLeft();
double right= rect.getRight();
unsigned char *bitmap = I.bitmap;
unsigned int width = I.getWidth();
vpImagePoint ip;
for (int i = (int)top; i < (int)bottom; i++)
{
for (int j = (int)left; j < (int)right; j++)
{
double x=0,y=0;
ip.set_ij(i,j);
vpPixelMeterConversion::convertPoint(cam,ip, x,y);
ip.set_ij(y,x);
if (colorI == GRAY_SCALED)
{
unsigned char Ipixelplan = 0;
if(getPixel(ip,Ipixelplan))
{
*(bitmap+i*width+j)=Ipixelplan;
nb_point_dessine++;
}
}
else if (colorI == COLORED)
{
vpRGBa Ipixelplan;
if(getPixel(ip,Ipixelplan))
{
unsigned char pixelgrey = (unsigned char)(0.2126 * Ipixelplan.R + 0.7152 * Ipixelplan.G + 0.0722 * Ipixelplan.B);
*(bitmap+i*width+j)=pixelgrey;
nb_point_dessine++;
}
}
}
}
}
}
示例8: vispImageToRos
void vispImageToRos(sensor_msgs::Image& dst,
const vpImage<unsigned char>& src)
{
dst.width = src.getWidth();
dst.height = src.getHeight();
dst.encoding = sensor_msgs::image_encodings::MONO8;
dst.step = src.getWidth();
dst.data.resize(dst.height * dst.step);
for(unsigned i = 0; i < src.getWidth (); ++i)
for(unsigned j = 0; j < src.getHeight (); ++j)
dst.data[j * dst.step + i] = src[j][i];
}
示例9:
/*!
Display the 3D model at a given position using the given camera parameters
\param I : The color image.
\param cMo_ : Pose used to project the 3D model into the image.
\param camera : The camera parameters.
\param col : The desired color.
\param thickness : The thickness of the lines.
\param displayFullModel : Boolean to say if all the model has to be displayed, even the faces that are not visible.
*/
void
vpMbKltTracker::display(const vpImage<vpRGBa>& I, const vpHomogeneousMatrix &cMo_, const vpCameraParameters & camera,
const vpColor& col , const unsigned int thickness, const bool displayFullModel)
{
vpCameraParameters c = camera;
if(clippingFlag > 3) // Contains at least one FOV constraint
c.computeFov(I.getWidth(), I.getHeight());
vpMbtDistanceKltPoints *kltpoly;
vpMbtDistanceKltCylinder *kltPolyCylinder;
// Previous version 12/08/2015
// for(std::list<vpMbtDistanceKltPoints*>::const_iterator it=kltPolygons.begin(); it!=kltPolygons.end(); ++it){
// kltpoly = *it;
// kltpoly->polygon->changeFrame(cMo_);
// kltpoly->polygon->computePolygonClipped(c);
// }
faces.computeClippedPolygons(cMo_,c);
if(useScanLine && !displayFullModel)
faces.computeScanLineRender(cam,I.getWidth(), I.getHeight());
for(std::list<vpMbtDistanceKltPoints*>::const_iterator it=kltPolygons.begin(); it!=kltPolygons.end(); ++it){
kltpoly = *it;
kltpoly->display(I,cMo_,camera,col,thickness,displayFullModel);
if(displayFeatures && kltpoly->hasEnoughPoints() && kltpoly->polygon->isVisible() && kltpoly->isTracked()) {
kltpoly->displayPrimitive(I);
// faces[i]->displayNormal(I);
}
}
for(std::list<vpMbtDistanceKltCylinder*>::const_iterator it=kltCylinders.begin(); it!=kltCylinders.end(); ++it){
kltPolyCylinder = *it;
kltPolyCylinder->display(I,cMo_,camera,col,thickness,displayFullModel);
if(displayFeatures && kltPolyCylinder->isTracked() && kltPolyCylinder->hasEnoughPoints())
kltPolyCylinder->displayPrimitive(I);
}
for(std::list<vpMbtDistanceCircle*>::const_iterator it=circles_disp.begin(); it!=circles_disp.end(); ++it){
(*it)->display(I, cMo_, camera, col, thickness);
}
#ifdef VISP_HAVE_OGRE
if(useOgre)
faces.displayOgre(cMo_);
#endif
}
示例10: getRoi
/*!
Get the view of the virtual camera. Be careful, the image I is modified. The projected image is not added as an overlay! In this method you specify directly the image which is projected.
\param I : The image used to store the result.
\param Isrc : The image which is projected into \f$ I \f$.
\param cam : The parameters of the virtual camera.
*/
void
vpImageSimulator::getImage(vpImage<unsigned char> &I,
vpImage<unsigned char> &Isrc,
const vpCameraParameters &cam)
{
if (cleanPrevImage)
{
unsigned char col = (unsigned char)(0.2126 * bgColor.R + 0.7152 * bgColor.G + 0.0722 * bgColor.B);
for (unsigned int i = 0; i < I.getHeight(); i++)
{
for (unsigned int j = 0; j < I.getWidth(); j++)
{
I[i][j] = col;
}
}
}
if(visible)
{
if(!needClipping)
getRoi(I.getWidth(),I.getHeight(),cam,pt,rect);
else
getRoi(I.getWidth(),I.getHeight(),cam,ptClipped,rect);
double top = rect.getTop();
double bottom = rect.getBottom();
double left = rect.getLeft();
double right= rect.getRight();
unsigned char *bitmap = I.bitmap;
unsigned int width = I.getWidth();
vpImagePoint ip;
int nb_point_dessine = 0;
for (unsigned int i = (unsigned int)top; i < (unsigned int)bottom; i++)
{
for (unsigned int j = (unsigned int)left; j < (unsigned int)right; j++)
{
double x=0,y=0;
ip.set_ij(i,j);
vpPixelMeterConversion::convertPoint(cam,ip, x,y);
ip.set_ij(y,x);
unsigned char Ipixelplan = 0;
if(getPixel(Isrc,ip,Ipixelplan))
{
*(bitmap+i*width+j)=Ipixelplan;
nb_point_dessine++;
}
}
}
}
}
示例11: egalisation
void egalisation(const vpImage<unsigned char> &I)
{
vpImage<unsigned char> I2(I.getHeight(), I.getWidth());
unsigned int histo[256];
unsigned int histocumul[256];
unsigned int anam[256];
int max;
int h = I.getHeight();
int w = I.getWidth();
histogramme(I, histo, max);
tracer_histo(histo, max, 256, 100, 300);
histocumule(I, histo, histocumul);
tracer_histo(histocumul, w*h, 256, 100, 300);
cout<<"Entropie : "<<entropie(histo, h, w)<<endl;
cout<<"Moyenne : "<<moyenne(histo, h, w)<<endl;
cout<<"Écart type : "<<ecart_type(histo, h, w)<<endl;
int dmin, dmax;
dynamique(histo, dmin, dmax);
cout<<"Dynamique minimum : "<<dmin<<" maximum : "<<dmax<<endl;
cout<<"Nombre de niveaux de gris : "<<niveaux_de_gris(histo)<<endl;
for (int i=0; i<256; i++)
anam[i] = round(1.0*histocumul[i]/(I.getWidth()*I.getHeight())*255);
//tracer_histo(anam, max, 256, 100, 300);
for (int i=0; i<I.getHeight(); i++)
for (int j=0; j<I.getWidth(); j++)
I2[i][j] = anam[I[i][j]];
vpDisplayX d2(I2,100,500) ;
vpDisplay::display(I2) ;
vpDisplay::flush(I2) ;
histogramme(I2, histo, max);
tracer_histo(histo, max, 256, 100, 300);
histocumule(I2, histo, histocumul);
tracer_histo(histocumul, w*h, 256, 100, 300);
cout<<"Entropie : "<<entropie(histo, h, w)<<endl;
cout<<"Moyenne : "<<moyenne(histo, h, w)<<endl;
cout<<"Écart type : "<<ecart_type(histo, h, w)<<endl;
dynamique(histo, dmin, dmax);
cout<<"Dynamique minimum : "<<dmin<<" maximum : "<<dmax<<endl;
cout<<"Nombre de niveaux de gris : "<<niveaux_de_gris(histo)<<endl;
vpDisplay::getClick(I2) ;
}
示例12: equalizeHistogram
/*!
\ingroup group_imgproc_histogram
Adjust the contrast of a color image by performing an histogram equalization.
The intensity distribution is redistributed over the full [0 - 255] range such as the cumulative histogram
distribution becomes linear. The alpha channel is ignored / copied from the source alpha channel.
\param I : The color image to apply histogram equalization.
\param useHSV : If true, the histogram equalization is performed on the value channel (in HSV space), otherwise
the histogram equalization is performed independently on the RGB channels.
*/
void vp::equalizeHistogram(vpImage<vpRGBa> &I, const bool useHSV) {
if(I.getWidth()*I.getHeight() == 0) {
return;
}
if(!useHSV) {
//Split the RGBa image into 4 images
vpImage<unsigned char> pR(I.getHeight(), I.getWidth());
vpImage<unsigned char> pG(I.getHeight(), I.getWidth());
vpImage<unsigned char> pB(I.getHeight(), I.getWidth());
vpImage<unsigned char> pa(I.getHeight(), I.getWidth());
vpImageConvert::split(I, &pR, &pG, &pB, &pa);
//Apply histogram equalization for each channel
vp::equalizeHistogram(pR);
vp::equalizeHistogram(pG);
vp::equalizeHistogram(pB);
//Merge the result in I
unsigned int size = I.getWidth()*I.getHeight();
unsigned char *ptrStart = (unsigned char*) I.bitmap;
unsigned char *ptrEnd = ptrStart + size*4;
unsigned char *ptrCurrent = ptrStart;
unsigned int cpt = 0;
while(ptrCurrent != ptrEnd) {
*ptrCurrent = pR.bitmap[cpt];
++ptrCurrent;
*ptrCurrent = pG.bitmap[cpt];
++ptrCurrent;
*ptrCurrent = pB.bitmap[cpt];
++ptrCurrent;
*ptrCurrent = pa.bitmap[cpt];
++ptrCurrent;
cpt++;
}
} else {
vpImage<unsigned char> hue(I.getHeight(), I.getWidth());
vpImage<unsigned char> saturation(I.getHeight(), I.getWidth());
vpImage<unsigned char> value(I.getHeight(), I.getWidth());
unsigned int size = I.getWidth()*I.getHeight();
//Convert from RGBa to HSV
vpImageConvert::RGBaToHSV((unsigned char *) I.bitmap, (unsigned char *) hue.bitmap,
(unsigned char *) saturation.bitmap, (unsigned char *) value.bitmap, size);
//Histogram equalization on the value plane
vp::equalizeHistogram(value);
//Convert from HSV to RGBa
vpImageConvert::HSVToRGBa((unsigned char*) hue.bitmap, (unsigned char*) saturation.bitmap,
(unsigned char*) value.bitmap, (unsigned char*) I.bitmap, size);
}
}
示例13: RotationInvertedInterpolated
void RotationInvertedInterpolated(vpImage<uchar> I, vpImage<uchar> &D, float alpha) {
// Si l'image de destination est plus grande que celle d'origine on décale le centre avec offseti et offsetj
int offseti = (D.getHeight() - I.getHeight()) / 2;
int offsetj = (D.getWidth() - I.getWidth()) / 2;
int a = I.getHeight() / 2;
int b = I.getWidth() /2;
for (int fi=0; fi<D.getHeight(); fi++)
for (int fj=0; fj<D.getWidth(); fj++) {
int i = fi - offseti;
int j = fj - offsetj;
float ti = (i-a) * cos(alpha) + (j-b) * sin(alpha) + a;
float tj = - (i-a) * sin(alpha) + (j-b) * cos(alpha) + b;
if((tj>0) && (ti>0) && (ti<I.getHeight()) && (tj<I.getWidth())) D[i+offseti][j+offsetj] = interpole(I,ti,tj);
}
}
示例14: RotationBasic
void RotationBasic(vpImage<uchar> I, vpImage<uchar> &D, float alpha) {
// Si l'image de destination est plus grande que celle d'origine on décale le centre avec offseti et offsetj
int offseti = (D.getHeight() - I.getHeight()) / 2;
int offsetj = (D.getWidth() - I.getWidth()) / 2;
int a = I.getHeight() / 2;
int b = I.getWidth() /2;
for (int i=0; i<I.getHeight(); i++)
for (int j=0; j<I.getWidth(); j++) {
int ti = (i-a) * cos(alpha) - (j-b) * sin(alpha) + a;
int tj = (i-a) * sin(alpha) + (j-b) * cos(alpha) + b;
ti+=offseti;
tj+=offsetj;
if((tj>0) && (ti>0) && (ti<D.getHeight()) && (ti<D.getWidth())) D[ti][tj] = I[i][j];
}
}
示例15: rosImageToVisp
void rosImageToVisp(vpImage<unsigned char>& dst,
const sensor_msgs::Image::ConstPtr& src)
{
using sensor_msgs::image_encodings::RGB8;
using sensor_msgs::image_encodings::RGBA8;
using sensor_msgs::image_encodings::BGR8;
using sensor_msgs::image_encodings::BGRA8;
using sensor_msgs::image_encodings::MONO8;
using sensor_msgs::image_encodings::MONO16;
using sensor_msgs::image_encodings::numChannels;
// Resize the image if necessary.
if (src->width != dst.getWidth() || src->height != dst.getHeight())
{
ROS_INFO
("dst is %dx%d but src size is %dx%d, resizing.",
src->width, src->height,
dst.getWidth (), dst.getHeight ());
dst.resize (src->height, src->width);
}
if(src->encoding == MONO8)
memcpy(dst.bitmap,
&src->data[0],
dst.getHeight () * src->step * sizeof(unsigned char));
else if(src->encoding == RGB8 || src->encoding == RGBA8
|| src->encoding == BGR8 || src->encoding == BGRA8)
{
unsigned nc = numChannels(src->encoding);
unsigned cEnd =
(src->encoding == RGBA8 || src->encoding == BGRA8) ? nc - 1 : nc;
for(unsigned i = 0; i < dst.getWidth (); ++i)
for(unsigned j = 0; j < dst.getHeight (); ++j)
{
int acc = 0;
for(unsigned c = 0; c < cEnd; ++c)
acc += src->data[j * src->step + i * nc + c];
dst[j][i] = acc / nc;
}
}
else
{
boost::format fmt("bad encoding '%1'");
fmt % src->encoding;
throw std::runtime_error(fmt.str());
}
}