本文整理汇总了C++中vpImage类的典型用法代码示例。如果您正苦于以下问题:C++ vpImage类的具体用法?C++ vpImage怎么用?C++ vpImage使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了vpImage类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: table
/*!
Change the look up table (LUT) of an image. Considering pixel gray
level values \f$ l \f$ in the range \f$[A, B]\f$, this method allows
to rescale these values in \f$[A^*, B^*]\f$ by linear interpolation:
\f$
\left\{ \begin{array}{ll}
l \in ]-\infty, A] \mbox{, } & l = A^* \\
l \in [B, \infty[ \mbox{, } & l = B^* \\
l \in ]A, B[ \mbox{, } & l = A^* + (l-A) * \frac{B^*-A^*}{B-A}
\end{array}
\right.
\f$
\param I : Image to process.
\param A : Low gray level value of the range to consider.
\param A_star : New gray level value \f$ A^*\f$ to attribute to pixel
who's value was A
\param B : Height gray level value of the range to consider.
\param B_star : New gray level value \f$ B^*\f$ to attribute to pixel
who's value was B
\return The modified image.
\exception vpImageException::incorrectInitializationError If \f$B \leq A\f$.
As shown in the example below, this method can be used to binarize
an image. For an unsigned char image (in the range 0-255),
thresholding this image at level 127 can be done by:
\code
#include <visp3/core/vpImageTools.h>
#include <visp3/core/vpImage.h>
#include <visp3/io/vpImageIo.h>
int main()
{
vpImage<unsigned char> I;
#ifdef _WIN32
std::string filename("C:/temp/ViSP-images/Klimt/Klimt.ppm");
#else
std::string filename("/local/soft/ViSP/ViSP-images/Klimt/Klimt.ppm");
#endif
// Read an image from the disk
vpImageIo::read(I, filename);
// Binarize image I:
// - gray level values less than or equal to 127 are set to 0,
// - gray level values greater than 128 are set to 255
vpImageTools::changeLUT(I, 127, 0, 128, 255);
vpImageIo::write(I, "Klimt.pgm"); // Write the image in a PGM P5 image file format
}
\endcode
*/
void vpImageTools::changeLUT(vpImage<unsigned char>& I,
unsigned char A,
unsigned char A_star,
unsigned char B,
unsigned char B_star)
{
// Test if input values are valid
if (B <= A) {
vpERROR_TRACE("Bad gray levels") ;
throw (vpImageException(vpImageException::incorrectInitializationError ,
"Bad gray levels"));
}
unsigned char v;
double factor = (double)(B_star - A_star)/(double)(B - A);
for (unsigned int i=0 ; i < I.getHeight(); i++)
for (unsigned int j=0 ; j < I.getWidth(); j++) {
v = I[i][j];
if (v <= A)
I[i][j] = A_star;
else if (v >= B)
I[i][j] = B_star;
else
I[i][j] = (unsigned char)(A_star + factor*(v-A));
}
}
示例2: rectangle
/*!
Computes the SURF points in only a part of the current image I and
try to matched them with the points in the reference list. The part
of the image is a rectangle defined by its top left corner, its
height and its width. The parameters of this rectangle must be given
in pixel. Only the matched points are stored.
\param I : The gray scaled image where the points are computed.
\param iP : The top left corner of the rectangle.
\param height : height of the rectangle (in pixel).
\param width : width of the rectangle (in pixel).
\return the number of point which have been matched.
*/
unsigned int vpKeyPointSurf::matchPoint(const vpImage<unsigned char> &I,
const vpImagePoint &iP,
const unsigned int height, const unsigned int width)
{
if((iP.get_i()+height) >= I.getHeight()
|| (iP.get_j()+width) >= I.getWidth())
{
vpTRACE("Bad size for the subimage");
throw(vpException(vpImageException::notInTheImage ,
"Bad size for the subimage"));
}
vpImage<unsigned char> subImage;
vpImageTools::createSubImage(I,
(unsigned int)iP.get_i(),
(unsigned int)iP.get_j(),
height, width, subImage);
unsigned int nbMatchedPoint = this->matchPoint(subImage);
for(unsigned int k = 0; k < nbMatchedPoint; k++)
{
(currentImagePointsList[k]).set_i((currentImagePointsList[k]).get_i()
+ iP.get_i());
(currentImagePointsList[k]).set_j((currentImagePointsList[k]).get_j()
+ iP.get_j());
}
return(nbMatchedPoint);
}
示例3: display
/*!
Initialize the display (size, position and title) of a color
image in RGBa format.
\param I : Image to be displayed (not that image has to be initialized)
\param x, y : The window is set at position x,y (column index, row index).
\param title : Window title.
*/
void
vpDisplayGTK::init(vpImage<vpRGBa> &I,
int x,
int y,
const std::string &title)
{
if ((I.getHeight() == 0) || (I.getWidth()==0))
{
vpERROR_TRACE("Image not initialized " ) ;
throw(vpDisplayException(vpDisplayException::notInitializedError,
"Image not initialized")) ;
}
if (x != -1)
windowXPosition = x ;
if (y != -1)
windowYPosition = y ;
if (! title.empty())
title_ = title;
init (I.getWidth(), I.getHeight(), windowXPosition, windowYPosition, title_) ;
I.display = this ;
displayHasBeenInitialized = true ;
}
示例4: vpException
void
vpMbKltTracker::init(const vpImage<unsigned char>& I)
{
if(!modelInitialised){
throw vpException(vpException::fatalError, "model not initialized");
}
bool reInitialisation = false;
if(!useOgre)
faces.setVisible(I, cam, cMo, angleAppears, angleDisappears, reInitialisation);
else{
#ifdef VISP_HAVE_OGRE
if(!faces.isOgreInitialised()){
faces.setBackgroundSizeOgre(I.getHeight(), I.getWidth());
faces.setOgreShowConfigDialog(ogreShowConfigDialog);
faces.initOgre(cam);
// Turn off Ogre config dialog display for the next call to this function
// since settings are saved in the ogre.cfg file and used during the next
// call
ogreShowConfigDialog = false;
}
faces.setVisibleOgre(I, cam, cMo, angleAppears, angleDisappears, reInitialisation);
#else
faces.setVisible(I, cam, cMo, angleAppears, angleDisappears, reInitialisation);
#endif
}
reinit(I);
}
示例5:
/*!
Apply a filter to an image.
\param I : Image to filter
\param If : Filtered image.
\param M : Filter coefficients.
*/
void
vpImageFilter::filter(const vpImage<unsigned char> &I,
vpImage<double>& If,
const vpMatrix& M)
{
unsigned int size = M.getRows() ;
unsigned int half_size = size/2 ;
If.resize(I.getHeight(),I.getWidth()) ;
If = 0 ;
for (unsigned int i=half_size ; i < I.getHeight()-half_size ; i++)
{
for (unsigned int j=half_size ; j < I.getWidth()-half_size ; j++)
{
double conv_x = 0 ;
for(unsigned int a = 0 ; a < size ; a++ )
for(unsigned int b = 0 ; b < size ; b++ )
{
double val = I[i-half_size+a][j-half_size+b] ;
conv_x += M[a][b] * val ;
}
If[i][j] = conv_x ;
}
}
}
示例6: convert
int Conversion::convert(const vpImage<float> & dmap, Eigen::MatrixXf & point3D,
double fx, double fy, double cx, double cy)
{
int height = dmap.getHeight();
int width = dmap.getWidth();
point3D.resize(height*width,3);
int index=0;
for(int i=0 ; i< height ; i++){
for(int j=0 ; j< width ; j++){
float z =dmap[i][j];
if (fabs(z + 1.f) > std::numeric_limits<float>::epsilon() & z>0 ){
point3D(index,2) = z;
point3D(index,0) = (float)((i-cx)*point3D(index,2)/fx);
point3D(index,1) = (float)((j-cy)*point3D(index,2)/fy);
index++;
}
}
}
// resize the point max to remove the points that have been pruned du to negative z value
point3D.conservativeResize(index,3);
return 1;
}
示例7: image
/*!
Test wether the line is close to the border of the image (at a given threshold)
\param I : the input image (to know its size)
\param threshold : the threshold in pixel
\return true if the line is near the border of the image
*/
bool
vpMbtDistanceLine::closeToImageBorder(const vpImage<unsigned char>& I, const unsigned int threshold)
{
if(threshold > I.getWidth() || threshold > I.getHeight()){
return true;
}
if (isvisible){
for(unsigned int i = 0 ; i < meline.size() ; i++){
for(std::list<vpMeSite>::const_iterator it=meline[i]->getMeList().begin(); it!=meline[i]->getMeList().end(); ++it){
int i = it->i ;
int j = it->j ;
if(i < 0 || j < 0){ //out of image.
return true;
}
if( ((unsigned int)i > (I.getHeight()- threshold) ) || (unsigned int)i < threshold ||
((unsigned int)j > (I.getWidth ()- threshold) ) || (unsigned int)j < threshold ) {
return true;
}
}
}
}
return false;
}
示例8: display
/*!
Display of a moving line thanks to its equation parameters and its extremities with all the site list.
\param I : The image used as background.
\param PExt1 : First extrimity
\param PExt2 : Second extrimity
\param site_list : vpMeSite list
\param A : Parameter a of the line equation a*i + b*j + c = 0
\param B : Parameter b of the line equation a*i + b*j + c = 0
\param C : Parameter c of the line equation a*i + b*j + c = 0
\param color : Color used to display the line.
\param thickness : Thickness of the line.
*/
void vpMeLine::display(const vpImage<vpRGBa>& I,const vpMeSite &PExt1, const vpMeSite &PExt2,
const std::list<vpMeSite> &site_list,
const double &A, const double &B, const double &C,
const vpColor &color, unsigned int thickness)
{
vpImagePoint ip;
for(std::list<vpMeSite>::const_iterator it=site_list.begin(); it!=site_list.end(); ++it){
vpMeSite pix = *it;
ip.set_i( pix.ifloat );
ip.set_j( pix.jfloat );
if (pix.getState() == vpMeSite::M_ESTIMATOR)
vpDisplay::displayCross(I, ip, 5, vpColor::green,thickness);
else
vpDisplay::displayCross(I, ip, 5, color,thickness);
//vpDisplay::flush(I);
}
vpImagePoint ip1, ip2;
if (fabs(A) < fabs(B)) {
double i1, j1, i2, j2;
i1 = 0;
j1 = (-A*i1 -C) / B;
i2 = I.getHeight() - 1.0;
j2 = (-A*i2 -C) / B;
ip1.set_i( i1 );
ip1.set_j( j1 );
ip2.set_i( i2 );
ip2.set_j( j2 );
vpDisplay::displayLine(I, ip1, ip2, color);
//vpDisplay::flush(I);
}
else {
double i1, j1, i2, j2;
j1 = 0;
i1 = -(B * j1 + C) / A;
j2 = I.getWidth() - 1.0;
i2 = -(B * j2 + C) / A;
ip1.set_i( i1 );
ip1.set_j( j1 );
ip2.set_i( i2 );
ip2.set_j( j2 );
vpDisplay::displayLine(I, ip1, ip2, color);
//vpDisplay::flush(I);
}
ip1.set_i( PExt1.ifloat );
ip1.set_j( PExt1.jfloat );
vpDisplay::displayCross(I, ip1, 10, vpColor::green,thickness);
ip1.set_i( PExt2.ifloat );
ip1.set_j( PExt2.jfloat );
vpDisplay::displayCross(I, ip1, 10, vpColor::green,thickness);
}
示例9: close
/*!
Grabs a grayscale image from the selected camera. If the camera color
coding differs from vp1394CMUGrabber::MONO8, the acquired image is
converted in a gray level image to match the requested format.
\param I : Acquired gray level image.
*/
void
vp1394CMUGrabber::acquire(vpImage<unsigned char> &I)
{
// get image data
unsigned long length;
unsigned char *rawdata = NULL ;
int dropped;
unsigned int size;
if(init == false){
close();
throw (vpFrameGrabberException(vpFrameGrabberException::initializationError,
"Initialization not done") );
}
camera->AcquireImageEx(TRUE,&dropped);
rawdata = camera->GetRawData(&length);
size = I.getWidth() * I.getHeight();
switch(_color) {
case vp1394CMUGrabber::MONO8:
memcpy(I.bitmap, (unsigned char *) rawdata, size);
break;
case vp1394CMUGrabber::MONO16:
vpImageConvert::MONO16ToGrey(rawdata, I.bitmap, size);
break;
case vp1394CMUGrabber::YUV411:
vpImageConvert::YUV411ToGrey(rawdata, I.bitmap, size);
break;
case vp1394CMUGrabber::YUV422:
vpImageConvert::YUV422ToGrey(rawdata, I.bitmap, size);
break;
case vp1394CMUGrabber::YUV444:
vpImageConvert::YUV444ToGrey(rawdata, I.bitmap, size);
break;
case vp1394CMUGrabber::RGB8:
vpImageConvert::RGBToGrey(rawdata, I.bitmap, size);
break;
default:
close();
vpERROR_TRACE("Format conversion not implemented. Acquisition failed.");
throw (vpFrameGrabberException(vpFrameGrabberException::otherError,
"Format conversion not implemented. "
"Acquisition failed.") );
break;
};
//unsigned short depth = 0;
//camera->GetVideoDataDepth(&depth);
//std::cout << "depth: " << depth << " computed: " << (float)(length/(I.getHeight() * I.getWidth())) << std::endl;
//memcpy(I.bitmap,rawdata,length);
}
示例10: getRoi
/*!
Get the view of the virtual camera. Be carefull, the image I is modified. The projected image is not added as an overlay!
\param I : The image used to store the result.
\param cam : The parameters of the virtual camera.
*/
void
vpImageSimulator::getImage(vpImage<unsigned char> &I, const vpCameraParameters cam)
{
int nb_point_dessine = 0;
if (cleanPrevImage)
{
unsigned char col = (unsigned char)(0.2126 * bgColor.R + 0.7152 * bgColor.G + 0.0722 * bgColor.B);
for (int i = (int)rect.getTop(); i < (int)rect.getBottom(); i++)
{
for (int j = (int)rect.getLeft(); j < (int)rect.getRight(); j++)
{
I[i][j] = col;
}
}
}
if(visible)
{
getRoi(I.getWidth(),I.getHeight(),cam,pt,rect);
double top = rect.getTop();
double bottom = rect.getBottom();
double left = rect.getLeft();
double right= rect.getRight();
unsigned char *bitmap = I.bitmap;
unsigned int width = I.getWidth();
vpImagePoint ip;
for (int i = (int)top; i < (int)bottom; i++)
{
for (int j = (int)left; j < (int)right; j++)
{
double x=0,y=0;
ip.set_ij(i,j);
vpPixelMeterConversion::convertPoint(cam,ip, x,y);
ip.set_ij(y,x);
if (colorI == GRAY_SCALED)
{
unsigned char Ipixelplan = 0;
if(getPixel(ip,Ipixelplan))
{
*(bitmap+i*width+j)=Ipixelplan;
nb_point_dessine++;
}
}
else if (colorI == COLORED)
{
vpRGBa Ipixelplan;
if(getPixel(ip,Ipixelplan))
{
unsigned char pixelgrey = (unsigned char)(0.2126 * Ipixelplan.R + 0.7152 * Ipixelplan.G + 0.0722 * Ipixelplan.B);
*(bitmap+i*width+j)=pixelgrey;
nb_point_dessine++;
}
}
}
}
}
}
示例11: displayed
/*!
\brief Constructor : initialize a display to visualize a grayscale image
(8 bits).
\param I Image to be displayed (note that image has to be initialized).
\param winx, winy The window is set at position x,y (column index, row index).
\param title Window's title.
\param scaleType : If this parameter is set to:
- vpDisplay::SCALE_AUTO, the display size is adapted to ensure the image
is fully displayed in the screen;
- vpDisplay::SCALE_DEFAULT or vpDisplay::SCALE_1, the display size is the same than the image size.
- vpDisplay::SCALE_2, the display size is downscaled by 2 along the lines and the columns.
- vpDisplay::SCALE_3, the display size is downscaled by 3 along the lines and the columns.
- vpDisplay::SCALE_4, the display size is downscaled by 4 along the lines and the columns.
- vpDisplay::SCALE_5, the display size is downscaled by 5 along the lines and the columns.
*/
vpDisplayD3D::vpDisplayD3D(vpImage<unsigned char> &I,
int winx, int winy,
const std::string &title, vpScaleType scaleType)
: vpDisplayWin32(new vpD3DRenderer())
{
setScale(scaleType, I.getWidth(), I.getHeight());
init(I,winx,winy,title);
}
示例12: vpERROR_TRACE
/*!
Sets all the parameters needed to read the video or the image sequence.
Grab the first frame and stores it in the image \f$ I \f$.
\param I : The image where the frame is stored.
*/
void vpVideoReader::open(vpImage< vpRGBa > &I)
{
if (!initFileName)
{
vpERROR_TRACE("The generic filename has to be set");
throw (vpImageException(vpImageException::noFileNameError,"filename empty"));
}
if (formatType == FORMAT_PGM ||
formatType == FORMAT_PPM ||
formatType == FORMAT_JPEG ||
formatType == FORMAT_PNG)
{
imSequence = new vpDiskGrabber;
imSequence->setGenericName(fileName);
imSequence->setImageNumber((int)firstFrame);
}
#ifdef VISP_HAVE_FFMPEG
else if (formatType == FORMAT_AVI ||
formatType == FORMAT_MPEG ||
formatType == FORMAT_MOV ||
formatType == FORMAT_OGV)
{
ffmpeg = new vpFFMPEG;
if(!ffmpeg->openStream(fileName, vpFFMPEG::COLORED))
throw (vpException(vpException::ioError ,"Could not open the video"));
ffmpeg->initStream();
}
#else
else if (formatType == FORMAT_AVI ||
formatType == FORMAT_MPEG ||
formatType == FORMAT_MOV ||
formatType == FORMAT_OGV)
{
vpERROR_TRACE("To read video files the FFmpeg library has to be installed");
throw (vpException(vpException::fatalError ,"the FFmpeg library is required"));
}
#endif
else if (formatType == FORMAT_UNKNOWN)
{
vpERROR_TRACE("The format of the file does not correpsond to a readable format.");
throw (vpException(vpException::fatalError ,"The format of the file does not correpsond to a readable format."));
}
frameCount = firstFrame;
if(!getFrame(I,firstFrame))
{
vpERROR_TRACE("Could not read the first frame");
throw (vpException(vpException::ioError ,"Could not read the first frame"));
}
height = I.getHeight();
width = I.getWidth();
isOpen = true;
findLastFrameIndex();
}
示例13: I
/*!
Display a selection of the gray level image \e I (8bits).
\warning Display has to be initialized.
\warning Suppress the overlay drawing in the region of interest.
\param I : Image to display.
\param iP : Top left corner of the region of interest
\param width : Width of the region of interest
\param height : Height of the region of interest
\sa init(), closeDisplay()
*/
void vpDisplayOpenCV::displayImageROI ( const vpImage<unsigned char> &I,const vpImagePoint &iP, const unsigned int width, const unsigned int height )
{
if (displayHasBeenInitialized)
{
vpImage<unsigned char> Itemp;
vpImageTools::createSubImage(I,(unsigned int)iP.get_i(),(unsigned int)iP.get_j(),height,width,Itemp);
vpImage<vpRGBa> Ic;
vpImageConvert::convert(Itemp,Ic);
CvSize size = cvSize((int)this->width, (int)this->height);
int depth = 8;
int channels = 3;
if (background != NULL){
if(background->nChannels != channels || background->depth != depth
|| background->height != (int) I.getHeight() || background->width != (int) I.getWidth()){
if(background->nChannels != 0) cvReleaseImage(&background);
background = cvCreateImage( size, depth, channels );
}
}
else background = cvCreateImage( size, depth, channels );
IplImage* Ip = NULL;
vpImageConvert::convert(Ic, Ip);
unsigned char * input = (unsigned char*)Ip->imageData;
unsigned char * output = (unsigned char*)background->imageData;
unsigned int iwidth = Ic.getWidth();
input = input;
output = output + (int)(iP.get_i()*3*this->width+ iP.get_j()*3);
unsigned int i = 0;
while (i < height)
{
unsigned int j = 0;
while (j < width)
{
*(output+3*j) = *(input+j*3);
*(output+3*j+1) = *(input+j*3+1);
*(output+3*j+2) = *(input+j*3+2);
j++;
}
input = input + 3*iwidth;
output = output + 3*this->width;
i++;
}
cvReleaseImage(&Ip);
}
else
{
vpERROR_TRACE("openCV not initialized " ) ;
throw(vpDisplayException(vpDisplayException::notInitializedError,
"OpenCV not initialized")) ;
}
}
示例14: histogramme
void histogramme(const vpImage<unsigned char> &I, unsigned int* histo, int &max)
{
for (int i=0; i<256; i++)
histo[i] = 0;
for (int i=0; i<I.getHeight(); i++)
for (int j=0; j<I.getWidth(); j++)
if (++histo[I[i][j]] > max) max = histo[I[i][j]];
}
示例15: if
bool
vpImageSimulator::getPixel(vpImage<unsigned char> &Isrc,
const vpImagePoint &iP, unsigned char &Ipixelplan)
{
//test si pixel dans zone projetee
bool inside = false;
for(unsigned int i = 0 ; i < listTriangle.size() ; i++)
if(listTriangle[i].inTriangle(iP)){
inside = true;
break;
}
if(!inside) return false;
// if(!T1.inTriangle(iP) && !T2.inTriangle(iP))
// return false;
//methoed algebrique
double z;
//calcul de la profondeur de l'intersection
z = distance/(normal_Cam_optim[0]*iP.get_u()+normal_Cam_optim[1]*iP.get_v()+normal_Cam_optim[2]);
//calcul coordonnees 3D intersection
Xinter_optim[0]=iP.get_u()*z;
Xinter_optim[1]=iP.get_v()*z;
Xinter_optim[2]=z;
//recuperation des coordonnes de l'intersection dans le plan objet
//repere plan object :
// centre = X0_2_optim[i] (premier point definissant le plan)
// base = u:(X[1]-X[0]) et v:(X[3]-X[0])
//ici j'ai considere que le plan est un rectangle => coordonnees sont simplement obtenu par un produit scalaire
double u = 0, v = 0;
for(unsigned int i = 0; i < 3; i++)
{
double diff = (Xinter_optim[i]-X0_2_optim[i]);
u += diff*vbase_u_optim[i];
v += diff*vbase_v_optim[i];
}
u = u/(euclideanNorm_u*euclideanNorm_u);
v = v/(euclideanNorm_v*euclideanNorm_v);
if( u > 0 && v > 0 && u < 1. && v < 1.)
{
double i2,j2;
i2=v*(Isrc.getHeight()-1);
j2=u*(Isrc.getWidth()-1);
if (interp == BILINEAR_INTERPOLATION)
Ipixelplan = Isrc.getValue(i2,j2);
else if (interp == SIMPLE)
Ipixelplan = Isrc[(unsigned int)i2][(unsigned int)j2];
return true;
}
else
return false;
}