本文整理汇总了C++中vpImage::resize方法的典型用法代码示例。如果您正苦于以下问题:C++ vpImage::resize方法的具体用法?C++ vpImage::resize怎么用?C++ vpImage::resize使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类vpImage
的用法示例。
在下文中一共展示了vpImage::resize方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: axis
/*!
Apply a filter to an image.
\param I : Image to filter
\param Iu : Filtered image along the horizontal axis (u = columns).
\param Iv : Filtered image along the vertical axis (v = rows).
\param M : Separate filter coefficients
*/
void
vpImageFilter::filter(const vpImage<double> &I,
vpImage<double>& Iu,
vpImage<double>& Iv,
const vpMatrix& M)
{
unsigned int size = M.getRows() ;
unsigned int half_size = size/2 ;
Iu.resize(I.getHeight(),I.getWidth()) ;
Iv.resize(I.getHeight(),I.getWidth()) ;
Iu = 0 ;
Iv = 0 ;
for (unsigned int v=half_size ; v < I.getHeight()-half_size ; v++)
{
for (unsigned int u=half_size ; u < I.getWidth()-half_size ; u++)
{
double conv_u = 0 ;
double conv_v = 0 ;
for(unsigned int a = 0 ; a < size ; a++ )
for(unsigned int b = 0 ; b < size ; b++ )
{
double val = I[v-half_size+a][u-half_size+b] ;
conv_u += M[a][b] * val ;
conv_v += M[b][a] * val ;
}
Iu[v][u] = conv_u ;
Iv[v][u] = conv_v ;
}
}
}
示例2: acquire
/*!
Acquire a color image from the active camera.
\param I : Image data structure (RGBa image).
\param timestamp : The acquisition timestamp.
*/
void vpFlyCaptureGrabber::acquire(vpImage<vpRGBa> &I, FlyCapture2::TimeStamp ×tamp)
{
this->open();
FlyCapture2::Error error;
// Retrieve an image
error = m_camera.RetrieveBuffer( &m_rawImage );
if (error != FlyCapture2::PGRERROR_OK) {
error.PrintErrorTrace();
throw (vpException(vpException::fatalError,
"Cannot retrieve image for camera with guid 0x%lx",
m_guid) );
}
timestamp = m_rawImage.GetTimeStamp();
// Create a converted image
FlyCapture2::Image convertedImage;
// Convert the raw image
error = m_rawImage.Convert( FlyCapture2::PIXEL_FORMAT_RGBU, &convertedImage );
if (error != FlyCapture2::PGRERROR_OK) {
error.PrintErrorTrace();
throw (vpException(vpException::fatalError,
"Cannot convert image for camera with guid 0x%lx",
m_guid) );
}
height = convertedImage.GetRows();
width = convertedImage.GetCols();
unsigned char *data = convertedImage.GetData();
I.resize(height, width);
unsigned int bps = convertedImage.GetBitsPerPixel();
memcpy(I.bitmap, data, width*height*bps/8);
}
示例3:
/*!
Apply a filter to an image.
\param I : Image to filter
\param If : Filtered image.
\param M : Filter coefficients.
*/
void
vpImageFilter::filter(const vpImage<unsigned char> &I,
vpImage<double>& If,
const vpMatrix& M)
{
unsigned int size = M.getRows() ;
unsigned int half_size = size/2 ;
If.resize(I.getHeight(),I.getWidth()) ;
If = 0 ;
for (unsigned int i=half_size ; i < I.getHeight()-half_size ; i++)
{
for (unsigned int j=half_size ; j < I.getWidth()-half_size ; j++)
{
double conv_x = 0 ;
for(unsigned int a = 0 ; a < size ; a++ )
for(unsigned int b = 0 ; b < size ; b++ )
{
double val = I[i-half_size+a][j-half_size+b] ;
conv_x += M[a][b] * val ;
}
If[i][j] = conv_x ;
}
}
}
示例4: throw
/*!
Compute the image addition: \f$ Ires = I1 - I2 \f$.
\param I1 : The first image.
\param I2 : The second image.
\param Ires : \f$ Ires = I1 - I2 \f$
\param saturate : If true, saturate the result to [0 ; 255] using vpMath::saturate, otherwise overflow may occur.
*/
void
vpImageTools::imageSubtract(const vpImage<unsigned char> &I1,
const vpImage<unsigned char> &I2,
vpImage<unsigned char> &Ires,
const bool saturate)
{
if ((I1.getHeight() != I2.getHeight()) || (I1.getWidth() != I2.getWidth())) {
throw (vpException(vpException::dimensionError, "The two images do not have the same size"));
}
if ((I1.getHeight() != Ires.getHeight()) || (I1.getWidth() != Ires.getWidth())) {
Ires.resize(I1.getHeight(), I1.getWidth());
}
unsigned char *ptr_I1 = I1.bitmap;
unsigned char *ptr_I2 = I2.bitmap;
unsigned char *ptr_Ires = Ires.bitmap;
unsigned int cpt = 0;
#if VISP_HAVE_SSE2
if (Ires.getSize() >= 16) {
for (; cpt <= Ires.getSize() - 16 ; cpt += 16, ptr_I1 += 16, ptr_I2 += 16, ptr_Ires += 16) {
const __m128i v1 = _mm_loadu_si128( (const __m128i*) ptr_I1);
const __m128i v2 = _mm_loadu_si128( (const __m128i*) ptr_I2);
const __m128i vres = saturate ? _mm_subs_epu8(v1, v2) : _mm_sub_epi8(v1, v2);
_mm_storeu_si128( (__m128i*) ptr_Ires, vres );
}
}
#endif
for (; cpt < Ires.getSize(); cpt++, ++ptr_I1, ++ptr_I2, ++ptr_Ires) {
*ptr_Ires = saturate ? vpMath::saturate<unsigned char>( (short int) *ptr_I1 - (short int) *ptr_I2 ) : *ptr_I1 - *ptr_I2;
}
}
示例5: acquireMulti
/*!
The image is copied.
*/
void vpNaoqiGrabber::acquireMulti(vpImage<unsigned char> &Ia, vpImage<unsigned char> &Ib, struct timeval ×tamp_a, struct timeval ×tamp_b)
{
if (! m_isOpen)
open();
/* Retrieve an image from the camera.
* The image is returned in the form of a container object, with the
* following fields:
* 0 = width
* 1 = height
* 2 = number of layers
* 3 = colors space index (see alvisiondefinitions.h)
* 4 = time stamp (seconds)
* 5 = time stamp (micro seconds)
* 6 = image buffer (size of width * height * number of layers)
*/
m_img = m_videoProxy->getImagesRemote(m_clientName);
m_width = (int) m_img[0][0];
m_height = (int) m_img[0][1];
// Left Image
double tv_sec = (double)m_img[0][4];
double tv_usec = (double)m_img[0][5];
timestamp_a.tv_sec = (unsigned long) tv_sec;
timestamp_a.tv_usec = (unsigned long) tv_usec;
//Rigth image
tv_sec = (double)m_img[1][4];
tv_usec = (double)m_img[1][5];
timestamp_a.tv_sec = (unsigned long) tv_sec;
timestamp_a.tv_usec = (unsigned long) tv_usec;
// Access the image buffer (6th field) and assign it to the ViSP image container
unsigned char *img_buffer_a = (unsigned char *) m_img[0][6].GetBinary();
unsigned char *img_buffer_b = (unsigned char *) m_img[1][6].GetBinary();
// Tells to ALVideoDevice that it can give back the image buffer to the
// driver. Optional after a getImageRemote but MANDATORY after a getImageLocal.
//m_videoProxy->releaseImage(m_clientName);
Ia.resize(m_height, m_width);
Ib.resize(m_height, m_width);
vpImageConvert::BGRToGrey(img_buffer_a, (unsigned char *)Ia.bitmap, m_width, m_height);
vpImageConvert::BGRToGrey(img_buffer_b, (unsigned char *)Ib.bitmap, m_width, m_height);
}
示例6:
/*!
Make a copy of the current internal view
\param I : destination image
*/
void
vpSimulator::getInternalImage(vpImage<unsigned char> &I)
{
//while (get==0) {;}
get =2 ;
I.resize(internal_height,internal_width) ;
vpImageConvert::RGBToGrey(bufferView,I.bitmap,internal_width,internal_height,true);
get =1 ;
}
示例7: copyBitmap
/*!
This method enable to fill the vpImage bitmap thanks to the selected frame.
\throw vpException::dimensionError if either the height or the width
associated to the class is negative.
\param I : the image to fill.
*/
void vpFFMPEG::copyBitmap(vpImage<vpRGBa> &I)
{
if(height < 0 || width < 0){
throw vpException(vpException::dimensionError, "width or height negative.");
}
I.resize((unsigned int)height, (unsigned int)width);
unsigned char* line;
unsigned char* beginOutput = (unsigned char*)I.bitmap;
unsigned char* output = NULL;
if (color_type == COLORED)
{
unsigned char* input = (unsigned char*)pFrameRGB->data[0];
int widthStep = pFrameRGB->linesize[0];
for(int i=0 ; i < height ; i++)
{
line = input;
output = beginOutput + 4 * width * i;
for(int j=0 ; j < width ; j++)
{
*(output++) = *(line);
*(output++) = *(line+1);
*(output++) = *(line+2);
*(output++) = 0;
line+=3;
}
//go to the next line
input+=widthStep;
}
}
else if (color_type == GRAY_SCALED)
{
unsigned char* input = (unsigned char*)pFrameGRAY->data[0];
int widthStep = pFrameGRAY->linesize[0];
for(int i=0 ; i < height ; i++)
{
line = input;
output = beginOutput + 4 * width * i;
for(int j=0 ; j < width ; j++)
{
*output++ = *(line);
*output++ = *(line);
*output++ = *(line);
*output++ = *(line);;
line++;
}
//go to the next line
input+=widthStep;
}
}
}
示例8: convert
int Conversion::convert(const Eigen::MatrixXf & depthMat, vpImage<float>&dmap)
{
int height = depthMat.rows();
int width = depthMat.cols();
dmap.resize(height, width);
for(int i = 0 ; i< height ; i++){
for(int j=0 ; j< width ; j++){
dmap[i][j]=depthMat(i,j);
}
}
return 1;
}
示例9: rosImageToVisp
void rosImageToVisp(vpImage<unsigned char>& dst,
const sensor_msgs::Image::ConstPtr& src)
{
using sensor_msgs::image_encodings::RGB8;
using sensor_msgs::image_encodings::RGBA8;
using sensor_msgs::image_encodings::BGR8;
using sensor_msgs::image_encodings::BGRA8;
using sensor_msgs::image_encodings::MONO8;
using sensor_msgs::image_encodings::MONO16;
using sensor_msgs::image_encodings::numChannels;
// Resize the image if necessary.
if (src->width != dst.getWidth() || src->height != dst.getHeight())
{
ROS_INFO
("dst is %dx%d but src size is %dx%d, resizing.",
src->width, src->height,
dst.getWidth (), dst.getHeight ());
dst.resize (src->height, src->width);
}
if(src->encoding == MONO8)
memcpy(dst.bitmap,
&src->data[0],
dst.getHeight () * src->step * sizeof(unsigned char));
else if(src->encoding == RGB8 || src->encoding == RGBA8
|| src->encoding == BGR8 || src->encoding == BGRA8)
{
unsigned nc = numChannels(src->encoding);
unsigned cEnd =
(src->encoding == RGBA8 || src->encoding == BGRA8) ? nc - 1 : nc;
for(unsigned i = 0; i < dst.getWidth (); ++i)
for(unsigned j = 0; j < dst.getHeight (); ++j)
{
int acc = 0;
for(unsigned c = 0; c < cEnd; ++c)
acc += src->data[j * src->step + i * nc + c];
dst[j][i] = acc / nc;
}
}
else
{
boost::format fmt("bad encoding '%1'");
fmt % src->encoding;
throw std::runtime_error(fmt.str());
}
}
示例10: getImage
/*
\brief gets the displayed image (including the overlay plane)
and returns an RGBa image
*/
void vpDisplayGTK::getImage(vpImage<vpRGBa> &I)
{
// shoudl certainly be optimized.
// doesn't work
if (displayHasBeenInitialized)
{
GdkImage *ImageGtk;
/*
*/
ImageGtk = gdk_image_get(background, 0, 0, (gint)width, (gint)height);
I.resize(height,width) ;
guchar *pos;
guint32 pixel;
gint x,y;
guchar OctetRouge,OctetVert,OctetBleu,mask;
mask = 0x000000FF;
pos = (unsigned char *)I.bitmap;
for (y=0;y<(gint)height;y++)
{
for (x=0;x<(gint)width;x++)
{
pixel = gdk_image_get_pixel(ImageGtk,x,y);
OctetBleu = (guchar)pixel & mask;
OctetVert = (guchar)(pixel>>8) & mask;
OctetRouge = (guchar)(pixel>>16) & mask;
*pos++ = OctetRouge;
*pos++ = OctetVert;
*pos++ = OctetBleu;
*pos++ = 0;
}
}
}
else
{
示例11: imageDifference
/*!
Compute the signed difference between the two images I1 and I2 for
visualization issue : Idiff = I1-I2
- pixels with a null difference are set to 128.
- A negative difference implies a pixel value < 128
- A positive difference implies a pixel value > 128
\param I1 : The first image.
\param I2 : The second image.
\param Idiff : The result of the difference.
*/
void vpImageTools::imageDifference(const vpImage<unsigned char> &I1,
const vpImage<unsigned char> &I2,
vpImage<unsigned char> &Idiff)
{
if ((I1.getHeight() != I2.getHeight()) || (I1.getWidth() != I2.getWidth()))
{
throw (vpException(vpException::dimensionError, "The two images have not the same size"));
}
if ((I1.getHeight() != Idiff.getHeight()) || (I1.getWidth() != Idiff.getWidth()))
Idiff.resize(I1.getHeight(), I1.getWidth());
unsigned int n = I1.getHeight() * I1.getWidth() ;
for (unsigned int b = 0; b < n ; b++)
{
int diff = I1.bitmap[b] - I2.bitmap[b] + 128;
Idiff.bitmap[b] = (unsigned char) (vpMath::maximum(vpMath::minimum(diff, 255), 0));
}
}
示例12: getRenderingOutput
/*!
Get the result of the rendering loop.
\param I : The image on which to copy the result of the rendering loop.
\param cMo : The desired camera pose.
*/
void vpAROgre::getRenderingOutput(vpImage<vpRGBa> &I, const vpHomogeneousMatrix &cMo)
{
updateCameraParameters(cMo);
Ogre::TexturePtr dynTexPtr = Ogre::TextureManager::getSingleton().getByName("rtf");
//#if ( OGRE_VERSION >= (1 << 16 | 9 << 8 | 0) )
// .dynamicCast<Ogre::Texture>();
//#else
// ;
//#endif
Ogre::RenderTexture* RTarget = dynTexPtr->getBuffer()->getRenderTarget();
mWindow->update();
RTarget->update();
if(I.getHeight() != mWindow->getHeight() || I.getWidth() != mWindow->getWidth()){
I.resize(mWindow->getHeight(), mWindow->getWidth());
}
Ogre::HardwarePixelBufferSharedPtr mPixelBuffer = dynTexPtr->getBuffer();
mPixelBuffer->lock(Ogre::HardwareBuffer::HBL_DISCARD);
const Ogre::PixelBox& pixelBox = mPixelBuffer->getCurrentLock();
dynTexPtr->getBuffer()->blitToMemory(pixelBox);
Ogre::uint8* pDest = static_cast<Ogre::uint8*>(pixelBox.data);
#if 1 // if texture in BGRa format
for(unsigned int i=0; i<I.getHeight(); i++){
for(unsigned int j=0; j<I.getWidth(); j++){
// Color Image
I[i][j].B = *pDest++; // Blue component
I[i][j].G = *pDest++; // Green component
I[i][j].R = *pDest++; // Red component
I[i][j].A = *pDest++; // Alpha component
}
}
#else // if texture in RGBa format which is the format of the input image
memcpy(I.bitmap, pDest, I.getHeight()*I.getWidth()*sizeof(vpRGBa));
#endif
// Unlock the pixel buffer
mPixelBuffer->unlock();
}
示例13: main
/*!
\example sonarPioneerReader.cpp example showing how to connect and read
sonar data from a Pioneer mobile robot->
*/
int main(int argc, char **argv)
{
try {
ArArgumentParser parser(&argc, argv);
parser.loadDefaultArguments();
robot = new vpRobotPioneer;
// ArRobotConnector connects to the robot, get some initial data from it such as type and name,
// and then loads parameter files for this robot.
ArRobotConnector robotConnector(&parser, robot);
if(!robotConnector.connectRobot())
{
ArLog::log(ArLog::Terse, "Could not connect to the robot");
if(parser.checkHelpAndWarnUnparsed())
{
Aria::logOptions();
Aria::exit(1);
}
}
if (!Aria::parseArgs())
{
Aria::logOptions();
Aria::shutdown();
return false;
}
std::cout << "Robot connected" << std::endl;
#if defined(VISP_HAVE_X11) || defined (VISP_HAVE_GDI)
// Create a display to show sensor data
if (isInitialized == false)
{
I.resize((unsigned int)half_size*2, (unsigned int)half_size*2);
I = 255;
#if defined(VISP_HAVE_X11)
d = new vpDisplayX;
#elif defined (VISP_HAVE_GDI)
d = new vpDisplayGDI;
#endif
d->init(I, -1, -1, "Sonar range data");
isInitialized = true;
}
#endif
// Activates the sonar
ArGlobalFunctor sonarPrinterCB(&sonarPrinter);
robot->addRangeDevice(&sonar);
robot->addUserTask("Sonar printer", 50, &sonarPrinterCB);
robot->useSonar(true); // activates the sonar device usage
// Robot velocities
vpColVector v_mes(2);
for (int i=0; i < 1000; i++)
{
double t = vpTime::measureTimeMs();
v_mes = robot->getVelocity(vpRobot::REFERENCE_FRAME);
std::cout << "Trans. vel= " << v_mes[0] << " m/s, Rot. vel=" << vpMath::deg(v_mes[1]) << " deg/s" << std::endl;
v_mes = robot->getVelocity(vpRobot::ARTICULAR_FRAME);
std::cout << "Left wheel vel= " << v_mes[0] << " m/s, Right wheel vel=" << v_mes[1] << " m/s" << std::endl;
std::cout << "Battery=" << robot->getBatteryVoltage() << std::endl;
#if defined(VISP_HAVE_X11) || defined (VISP_HAVE_GDI)
if (isInitialized) {
// A mouse click to exit
// Before exiting save the last sonar image
if (vpDisplay::getClick(I, false) == true) {
{
// Set the default output path
std::string opath;
#if !defined(_WIN32) && (defined(__unix__) || defined(__unix) || (defined(__APPLE__) && defined(__MACH__))) // UNIX
opath = "/tmp";
#elif defined(_WIN32)
opath = "C:\\temp";
#endif
std::string username = vpIoTools::getUserName();
// Append to the output path string, the login name of the user
opath = vpIoTools::createFilePath(opath, username);
// Test if the output path exist. If no try to create it
if (vpIoTools::checkDirectory(opath) == false) {
try {
// Create the dirname
vpIoTools::makeDirectory(opath);
}
catch (...) {
std::cerr << std::endl
<< "ERROR:" << std::endl;
std::cerr << " Cannot create " << opath << std::endl;
exit(-1);
//.........这里部分代码省略.........
示例14: initCamera
/*!
Initialization of the grabber using a greyscale image.
\param I : gray level image.
*/
void
vp1394CMUGrabber::open(vpImage<unsigned char> &I)
{
initCamera();
I.resize(this->height, this->width);
}