本文整理汇总了C++中Image::Convert方法的典型用法代码示例。如果您正苦于以下问题:C++ Image::Convert方法的具体用法?C++ Image::Convert怎么用?C++ Image::Convert使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Image
的用法示例。
在下文中一共展示了Image::Convert方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: NotifySetData
void GHL_CALL TextureStage3d::SetData(UInt32 x,UInt32 y,const Image* data,UInt32 level) {
if (data && (x==0) && (y==0) &&
(data->GetWidth()==GetWidth()) &&
(data->GetHeight()==GetHeight())) {
Image* img = data->Clone();
img->Convert(IMAGE_FORMAT_RGBA);
img->SwapRB();
if (m_internal_data) {
m_internal_data->Release();
}
m_internal_data = img;
} else {
if (!m_internal_data) {
m_internal_data = GHL_CreateImage(GetWidth(),GetHeight(),IMAGE_FORMAT_RGBA);
}
Image* img = data->Clone();
img->Convert(IMAGE_FORMAT_RGBA);
img->SwapRB();
m_internal_data->Draw(x,y,img);
img->Release();
}
NotifySetData();
}
示例2: RunSingleCamera
int RunSingleCamera( PGRGuid guid, char* filename, int lr, int numImage)
{
printf("Begina Capturing the image:....\n");
const int k_numImages = 1;
Error error;
Camera cam;
// Connect to a camera
error = cam.Connect(&guid);
// Get the camera information
CameraInfo camInfo;
error = cam.GetCameraInfo(&camInfo);
// Start capturing images
error = cam.StartCapture();
Image rawImage;
for ( int imageCnt=0; imageCnt < k_numImages; imageCnt++ )
{
// Retrieve an image
error = cam.RetrieveBuffer( &rawImage );
if (lr)
printf( "Grabbed right image %d\n", numImage);
else
printf( "Grabbed left image %d\n", numImage);
// Create a converted image
Image convertedImage;
// Convert the raw image
error = rawImage.Convert( PIXEL_FORMAT_MONO8, &convertedImage );
// Create a unique filename
// char filename[512];
// sprintf( filename, "FlyCapture2Test-%u-%d.png", camInfo.serialNumber, imageCnt );
// Save the image. If a file format is not passed in, then the file
// extension is parsed to attempt to determine the file format.
error = convertedImage.Save( filename );
}
// Stop capturing images
error = cam.StopCapture();
// Disconnect the camera
error = cam.Disconnect();
return 0;
}
示例3: main
int main(int argc, char** argv)
{
initialize_camera();
ros::init(argc, argv, "image_publisher");
ros::NodeHandle nh;
//image_transport::ImageTransport it(nh);
//image_transport::Publisher pub = it.advertise("/camera/image_raw", 1);
ros::Publisher pub = nh.advertise<sensor_msgs::Image>("out_image_topic", 1);
sensor_msgs::Image my_image_msg;
while(ros::ok())
{
//image_converter(my_image_msg);
//pub.publish(my_img_msg);
Image rawImage;
error = camera.RetrieveBuffer( &rawImage );
if ( error != PGRERROR_OK )
{
std::cout << "capture error" << std::endl;
//continue;
}
// convert to rgb
Image rgbImage;
rawImage.Convert( FlyCapture2::PIXEL_FORMAT_BGR, &rgbImage );
// convert to OpenCV Mat
unsigned int rowBytes = (double)rgbImage.GetReceivedDataSize()/(double)rgbImage.GetRows();
cv::Mat image = cv::Mat(rgbImage.GetRows(), rgbImage.GetCols(), CV_8UC3, rgbImage.GetData(),rowBytes);
//cv::imshow("image", image);
cv_bridge::CvImage out_msg;
out_msg.encoding = sensor_msgs::image_encodings::BGR8;
out_msg.image = image;
out_msg.header.seq = 1;
out_msg.header.frame_id = 2;
out_msg.header.stamp = ros::Time::now();
out_msg.toImageMsg(my_image_msg);
pub.publish(my_image_msg);
}
}
示例4: ReadPixels
void Image::ReadPixels(const Offset3D& offset, const Extent3D& extent, const DstImageDescriptor& imageDesc, std::size_t threadCount) const
{
if (imageDesc.data && IsRegionInside(offset, extent))
{
/* Validate required size */
ValidateImageDataSize(extent, imageDesc);
/* Get source image parameters */
const auto bpp = GetBytesPerPixel();
const auto srcRowStride = bpp * GetExtent().width;
const auto srcDepthStride = srcRowStride * GetExtent().height;
auto src = data_.get() + GetDataPtrOffset(offset);
if (GetFormat() == imageDesc.format && GetDataType() == imageDesc.dataType)
{
/* Get destination image parameters */
const auto dstRowStride = bpp * extent.width;
const auto dstDepthStride = dstRowStride * extent.height;
auto dst = reinterpret_cast<char*>(imageDesc.data);
/* Blit region into destination image */
BitBlit(
extent, bpp,
dst, dstRowStride, dstDepthStride,
src, srcRowStride, srcDepthStride
);
}
else
{
/* Copy region into temporary sub-image */
Image subImage { extent, GetFormat(), GetDataType() };
BitBlit(
extent, bpp,
reinterpret_cast<char*>(subImage.GetData()), subImage.GetRowStride(), subImage.GetDepthStride(),
src, srcRowStride, srcDepthStride
);
/* Convert sub-image */
subImage.Convert(imageDesc.format, imageDesc.dataType, threadCount);
/* Copy sub-image into output data */
::memcpy(imageDesc.data, subImage.GetData(), imageDesc.dataSize);
}
}
}
示例5: get_frame
int BlobTracker::get_frame (Camera &camera) {
Image rawImage;
err = camera.RetrieveBuffer( &rawImage );
if ( err != PGRERROR_OK ) {
std::cout << "capture err" << std::endl;
return false;
}
// convert to rgb
Image rgbImage;
rawImage.Convert( FlyCapture2::PIXEL_FORMAT_BGR, &rgbImage );
// convert to OpenCV Mat
unsigned int rowBytes = (double)rgbImage.GetReceivedDataSize()/(double)rgbImage.GetRows();
cv::Mat frame;
frame = cv::Mat(rgbImage.GetRows(), rgbImage.GetCols(), CV_8UC3, rgbImage.GetData(),rowBytes);
// cv::undistort( image_dist, image, cameraMat, distCoeffs );
// Undistort image
cv::Mat rview, map1, map2;
Size imageSize = frame.size();
initUndistortRectifyMap(cameraMat, distCoeffs, Mat(),
getOptimalNewCameraMatrix(cameraMat, distCoeffs, imageSize, 1, imageSize, 0),
imageSize, CV_16SC2, map1, map2);
remap(frame, rview, map1, map2, INTER_LINEAR);
// Turn right-side up
flip(rview,rview,-1);
rview.copyTo(img);
return 0;
}
示例6: get_frame
int get_frame (cv::Mat* frame) {
Image rawImage;
err = pt_grey.RetrieveBuffer( &rawImage );
if ( err != PGRERROR_OK ) {
std::cout << "capture err" << std::endl;
return false;
}
// convert to rgb
Image rgbImage;
rawImage.Convert( FlyCapture2::PIXEL_FORMAT_BGR, &rgbImage );
// convert to OpenCV Mat
unsigned int rowBytes = (double)rgbImage.GetReceivedDataSize()/(double)rgbImage.GetRows();
*frame = cv::Mat(rgbImage.GetRows(), rgbImage.GetCols(), CV_8UC3, rgbImage.GetData(),rowBytes);
// Turn right-side up
flip(*frame,*frame,-1);
return 0;
}
示例7: RunSingleCamera
//.........这里部分代码省略.........
PrintError( error );
return -1;
}
cout << "Shutter time set to " << fixed << setprecision(2) << k_shutterVal << "ms" << endl;
// Enable timestamping
EmbeddedImageInfo embeddedInfo;
error = cam.GetEmbeddedImageInfo( &embeddedInfo );
if ( error != PGRERROR_OK )
{
PrintError( error );
return -1;
}
if ( embeddedInfo.timestamp.available != 0 )
{
embeddedInfo.timestamp.onOff = true;
}
error = cam.SetEmbeddedImageInfo( &embeddedInfo );
if ( error != PGRERROR_OK )
{
PrintError( error );
return -1;
}
// Start capturing images
error = cam.StartCapture();
if (error != PGRERROR_OK)
{
PrintError( error );
return -1;
}
for ( int imageCnt=0; imageCnt < k_numImages; imageCnt++ )
{
Image rawImage;
// Retrieve an image
error = cam.RetrieveBuffer( &rawImage );
if (error != PGRERROR_OK)
{
PrintError( error );
continue;
}
cout << "Grabbed image " << imageCnt << endl;
// Create a converted image
Image convertedImage;
// Convert the raw image
error = rawImage.Convert( PIXEL_FORMAT_RGB16, &convertedImage );
if (error != PGRERROR_OK)
{
PrintError( error );
return -1;
}
// Create a unique filename
time_t rawtime;
struct tm * ptm;
time ( &rawtime );
ptm = gmtime ( &rawtime );
ostringstream filename;
filename << "GONetwork-" << (ptm->tm_mday) << "D" << (ptm->tm_mon+1) << "M @"<< (ptm->tm_hour)%24 << "-" << (ptm->tm_min) << "-" << (ptm->tm_sec) << ".bmp";
// Save the image. If a file format is not passed in, then the file
// extension is parsed to attempt to determine the file format.
error = convertedImage.Save( filename.str().c_str() );
if (error != PGRERROR_OK)
{
PrintError( error );
return -1;
}
}
// Stop capturing images
error = cam.StopCapture();
if (error != PGRERROR_OK)
{
PrintError( error );
return -1;
}
// Disconnect the camera
error = cam.Disconnect();
if (error != PGRERROR_OK)
{
PrintError( error );
return -1;
}
return 0;
}
示例8: main
//.........这里部分代码省略.........
}
// Set the settings to the camera
error = cam.SetFormat7Configuration(
&fmt7ImageSettings,
fmt7PacketInfo.recommendedBytesPerPacket );
if (error != PGRERROR_OK)
{
PrintError( error );
return -1;
}
// Start capturing images
error = cam.StartCapture();
if (error != PGRERROR_OK)
{
PrintError( error );
return -1;
}
// Retrieve frame rate property
Property frmRate;
frmRate.type = FRAME_RATE;
error = cam.GetProperty( &frmRate );
if (error != PGRERROR_OK)
{
PrintError( error );
return -1;
}
printf( "Frame rate is %3.2f fps\n", frmRate.absValue );
printf( "Grabbing %d images\n", k_numImages );
Image rawImage;
for ( int imageCount=0; imageCount < k_numImages; imageCount++ )
{
// Retrieve an image
error = cam.RetrieveBuffer( &rawImage );
if (error != PGRERROR_OK)
{
PrintError( error );
continue;
}
printf( "." );
// Get the raw image dimensions
PixelFormat pixFormat;
unsigned int rows, cols, stride;
rawImage.GetDimensions( &rows, &cols, &stride, &pixFormat );
// Create a converted image
Image convertedImage;
// Convert the raw image
error = rawImage.Convert( PIXEL_FORMAT_BGRU, &convertedImage );
if (error != PGRERROR_OK)
{
PrintError( error );
return -1;
}
// Create a unique filename
char filename[512];
sprintf( filename, "%u-%d.bmp", camInfo.serialNumber, imageCount );
// Save the image. If a file format is not passed in, then the file
// extension is parsed to attempt to determine the file format.
error = convertedImage.Save( filename );
if (error != PGRERROR_OK)
{
PrintError( error );
return -1;
}
}
printf( "\nFinished grabbing images\n" );
// Stop capturing images
error = cam.StopCapture();
if (error != PGRERROR_OK)
{
PrintError( error );
return -1;
}
// Disconnect the camera
error = cam.Disconnect();
if (error != PGRERROR_OK)
{
PrintError( error );
return -1;
}
printf( "Done! Press Enter to exit...\n" );
getchar();
return 0;
}
示例9: RunSingleCamera
int RunSingleCamera( PGRGuid guid )
{
const int k_numImages = 10;
Error error;
Camera cam;
// Connect to a camera
error = cam.Connect(&guid);
if (error != PGRERROR_OK)
{
PrintError( error );
return -1;
}
// Get the camera information
CameraInfo camInfo;
error = cam.GetCameraInfo(&camInfo);
if (error != PGRERROR_OK)
{
PrintError( error );
return -1;
}
PrintCameraInfo(&camInfo);
// Start capturing images
error = cam.StartCapture();
if (error != PGRERROR_OK)
{
PrintError( error );
return -1;
}
Image rawImage;
for ( int imageCnt=0; imageCnt < k_numImages; imageCnt++ )
{
// Retrieve an image
error = cam.RetrieveBuffer( &rawImage );
if (error != PGRERROR_OK)
{
PrintError( error );
continue;
}
cout << "Grabbed image " << imageCnt << endl;
// Create a converted image
Image convertedImage;
// Convert the raw image
error = rawImage.Convert( PIXEL_FORMAT_MONO8, &convertedImage );
if (error != PGRERROR_OK)
{
PrintError( error );
return -1;
}
// Create a unique filename
ostringstream filename;
filename << "FlyCapture2Test-" << camInfo.serialNumber << "-" << imageCnt << ".pgm";
// Save the image. If a file format is not passed in, then the file
// extension is parsed to attempt to determine the file format.
error = convertedImage.Save( filename.str().c_str() );
if (error != PGRERROR_OK)
{
PrintError( error );
return -1;
}
}
// Stop capturing images
error = cam.StopCapture();
if (error != PGRERROR_OK)
{
PrintError( error );
return -1;
}
// Disconnect the camera
error = cam.Disconnect();
if (error != PGRERROR_OK)
{
PrintError( error );
return -1;
}
return 0;
}
示例10: imageCallback
void imageCallback(FlyCapture2::Camera *camera) {
cv::Mat image;
cv::namedWindow("camera", CV_WINDOW_NORMAL | CV_WINDOW_KEEPRATIO);
cvMoveWindow("camera", 500, 10);
cvResizeWindow("camera",1500, 1000);
while(key != 'q')
{
Image rawImage;
err = camera->RetrieveBuffer( &rawImage );
// convert to rgb
Image rgbImage;
rawImage.Convert( FlyCapture2::PIXEL_FORMAT_BGR, &rgbImage );
// convert to OpenCV Mat
unsigned int rowBytes = (double)
rgbImage.GetReceivedDataSize()/(double)rgbImage.GetRows();
image = cv::Mat(rgbImage.GetRows(), rgbImage.GetCols(), CV_8UC3, rgbImage.GetData(),rowBytes);
if (use_calib) {
// Undistort image
cv::Mat rview, map1, map2;
cv::Size imageSize = image.size();
double alpha = 1; // 0 = zoomed-in, all valid pixels; 1 = invalid pixels are blacked-out
/* Using projection matrix from calibration results in different image */
initUndistortRectifyMap(cameraMat, distCoeffs, cv::Mat(),
getOptimalNewCameraMatrix(cameraMat, distCoeffs, imageSize, alpha, imageSize, 0),
imageSize, CV_16SC2, map1, map2);
remap(image, rview, map1, map2, cv::INTER_LINEAR);
cv::imshow("camera",rview);
key=cv::waitKey(50);
if (key == 'p') {
std::cout << "key-press = " << key << std::endl;
get_pose(&rview);
key=0;
}
}
else {
cv::imshow("camera",image);
key=cv::waitKey(50);
if (key == 'p'){
std::cout << "key-press = " << key << std::endl;
get_pose(&image);
key=0;
}
}
}
}
示例11: sourceStream
/// @copydoc ResourceHandler::CacheResource()
bool Texture2dResourceHandler::CacheResource(
ObjectPreprocessor* pObjectPreprocessor,
Resource* pResource,
const String& rSourceFilePath )
{
HELIUM_ASSERT( pObjectPreprocessor );
HELIUM_ASSERT( pResource );
Texture2d* pTexture = Reflect::AssertCast< Texture2d >( pResource );
// Load the source texture data.
FileStream* pSourceFileStream = FileStream::OpenFileStream( rSourceFilePath, FileStream::MODE_READ );
if( !pSourceFileStream )
{
HELIUM_TRACE(
TraceLevels::Error,
( TXT( "Texture2dResourceHandler::CacheResource(): Failed to open source texture file \"%s\" for " )
TXT( "reading.\n" ) ),
*rSourceFilePath );
return false;
}
Image sourceImage;
bool bLoadSuccess;
{
BufferedStream sourceStream( pSourceFileStream );
// Determine the proper image loader to used based on the image extension.
FilePath sourceFilePath = *rSourceFilePath;
String extension( sourceFilePath.Extension().c_str() );
if( extension == TXT( "png" ) )
{
bLoadSuccess = PngImageLoader::Load( sourceImage, &sourceStream );
}
else
{
bLoadSuccess = TgaImageLoader::Load( sourceImage, &sourceStream );
}
}
delete pSourceFileStream;
if( !bLoadSuccess )
{
HELIUM_TRACE(
TraceLevels::Error,
TXT( "Texture2dResourceHandler::CacheResource(): Failed to load source texture image \"%s\".\n" ),
*rSourceFilePath );
}
// Convert the source image to a 32-bit BGRA image for the NVIDIA texture tools library to process.
Image::Format bgraFormat;
bgraFormat.SetBytesPerPixel( 4 );
bgraFormat.SetChannelBitCount( Image::CHANNEL_RED, 8 );
bgraFormat.SetChannelBitCount( Image::CHANNEL_GREEN, 8 );
bgraFormat.SetChannelBitCount( Image::CHANNEL_BLUE, 8 );
bgraFormat.SetChannelBitCount( Image::CHANNEL_ALPHA, 8 );
#if HELIUM_ENDIAN_LITTLE
bgraFormat.SetChannelBitOffset( Image::CHANNEL_RED, 16 );
bgraFormat.SetChannelBitOffset( Image::CHANNEL_GREEN, 8 );
bgraFormat.SetChannelBitOffset( Image::CHANNEL_BLUE, 0 );
bgraFormat.SetChannelBitOffset( Image::CHANNEL_ALPHA, 24 );
#else
bgraFormat.SetChannelBitOffset( Image::CHANNEL_RED, 8 );
bgraFormat.SetChannelBitOffset( Image::CHANNEL_GREEN, 16 );
bgraFormat.SetChannelBitOffset( Image::CHANNEL_BLUE, 24 );
bgraFormat.SetChannelBitOffset( Image::CHANNEL_ALPHA, 0 );
#endif
Image bgraImage;
HELIUM_VERIFY( sourceImage.Convert( bgraImage, bgraFormat ) );
sourceImage.Unload();
// If the texture is flagged to ignore alpha data, set the alpha channel to fully opaque for each pixel in the
// image. Otherwise, check if the image is fully opaque (in which case alpha data can be ignored during
// compression, and we can potentially use cheaper compressed formats).
uint32_t imageWidth = bgraImage.GetWidth();
uint32_t imageHeight = bgraImage.GetHeight();
uint32_t pixelCount = imageWidth * imageHeight;
void* pImagePixelData = bgraImage.GetPixelData();
HELIUM_ASSERT( pImagePixelData );
uint8_t* pPixelAlpha = static_cast< uint8_t* >( pImagePixelData ) + 3;
bool bIgnoreAlpha = pTexture->GetIgnoreAlpha();
if( bIgnoreAlpha )
{
for( uint32_t pixelIndex = 0; pixelIndex < pixelCount; ++pixelIndex, pPixelAlpha += 4 )
{
*pPixelAlpha = 0xff;
}
}
else
{
uint32_t pixelIndex;
//.........这里部分代码省略.........
示例12: main
int main(int argc, char **argv){
ros::init(argc, argv, "pointgrey_camera_driver");
ros::NodeHandle nh("~");
// Get the serial number of the camera to connect to
int serial;
nh.param<int>("serial", serial, 0);
// Try connecting to that camera
Camera* cam = ConnectCamera(serial);
if (cam == NULL) {
std::stringstream serial_string;
serial_string << serial;
std::string msg = "PointGreyCamera::connect Could not find camera with serial number: " + serial_string.str() + ". Is that camera plugged in?";
throw std::runtime_error(msg);
}
cout << "connection success!" << endl;
// What type of camera is it? Wide angle or narrow angle?
string camera_type;
nh.param<string>("camera_type", camera_type, "");
// Connection successful! lets setup the ros topic.
// Get the location of our camera config yaml
string camera_info_url, frame_id;
nh.param<std::string>("camera_info_url", camera_info_url, "");
// Get the desired frame_id, set to 'camera' if not found
nh.param<std::string>("frame_id", frame_id, "camera");
// Start the camera info manager and attempt to load any configurations
std::stringstream cinfo_name;
cinfo_name << serial;
camera_info_manager::CameraInfoManager* cinfo = new camera_info_manager::CameraInfoManager(nh, cinfo_name.str(), camera_info_url);
// Publish topics using ImageTransport through camera_info_manager (gives cool things like compression)
image_transport::ImageTransport* it = new image_transport::ImageTransport(nh);
image_transport::CameraPublisher it_pub = it->advertiseCamera("/" + ros::this_node::getNamespace() + "/image_raw", 1000);
// get ready for capture loop
Image image;
Image bgrImage;
IplImage* cvImg;
string imageEncoding = sensor_msgs::image_encodings::BGR8;
sensor_msgs::ImagePtr ros_image; ///< Camera Info message.
sensor_msgs::CameraInfoPtr ci; ///< Camera Info message.
uint64_t num_frames = 0;
// Now configure the camera and put it in run mode
if (camera_type.compare("narrow") == 0)
RunCamera(cam);
else if (camera_type.compare("wide") == 0)
RunWideAngleCamera(cam);
else throw std::runtime_error("Camera type not specified");
// capture loop
while (ros::ok()) {
// try to get an image
Error error = cam->RetrieveBuffer(&image);
//check if we had a timeout or some other error
if (error == PGRERROR_TIMEOUT)
continue;
else if (error != PGRERROR_OK)
throw std::runtime_error(error.GetDescription());
// HACK: throw away half the frames for the WFOV cameras
num_frames++;
if (camera_type.compare("wide") == 0 && num_frames % 2 != 1)
continue;
//grabbed image, reset ros structures and fill out fields
ros_image.reset(new sensor_msgs::Image());
ci.reset(new sensor_msgs::CameraInfo(cinfo->getCameraInfo()));
ros_image->header.stamp = ros::Time::now();
ros_image->header.frame_id = frame_id;
ci->header.stamp = ros_image->header.stamp;
ci->header.frame_id = frame_id;
//convert to bgr
image.Convert(PIXEL_FORMAT_BGR, &bgrImage);
cvImg = cvCreateImageHeader(cvSize(bgrImage.GetCols(), bgrImage.GetRows()), IPL_DEPTH_8U, 3);
convertToCV(&bgrImage, cvImg);
if (camera_type.compare("wide") == 0)
cvFlip(cvImg, cvImg, -1);
// package in ros header
//fillImage(*ros_image, imageEncoding, bgrImage.GetRows(), bgrImage.GetCols(), bgrImage.GetStride(), bgrImage.GetData());
fillImage(*ros_image, imageEncoding, cvImg->height, cvImg->width, cvImg->widthStep, cvImg->imageData);
// Publish the message using standard image transport
it_pub.publish(ros_image, ci);
}
//.........这里部分代码省略.........
示例13: main
//.........这里部分代码省略.........
sendDiagnosticsMessage("WARN:Connecting to GPS...");
if (useGPS) gpsLogger.Run();
uint64_t numframes = 0;
uint64_t lastframes = 0;
///////// main loop
while (!is_done_working or numframes < 250) {
numframes++;
if (numframes > maxframes) {
is_done_working = true;
}
try {
zmq::message_t command_msg;
if (my_commands.recv(&command_msg, ZMQ_NOBLOCK) == true) {
string command((const char*)command_msg.data(), command_msg.size());
if (command.compare("TERMINATE") == 0) {
// properly close the application
is_done_working = true;
quit_via_user_input = true;
}
else if (command.compare("LASTCAMERADATA") == 0) {
// transmit the camera data over
}
else if (command.compare("LASTGPSDATA") == 0) {
// transmit the last known GPS log over
}
}
} catch (const zmq::error_t&e) {}
#ifndef DEBUG_NO_SENSORS
for (int cam_num = 0; cam_num < NUM_CAMS; cam_num++) {
Image image;
cam[cam_num]->RetrieveBuffer(&image);
image.Convert(PIXEL_FORMAT_BGR, imgToSend);
ImageCallback(&image, NULL, &cam_buff[cam_num][numframes % NUMTHREAD_PER_BUFFER]);
Image cimage;
if (numframes % CAMERA_DISPLAY_SKIP == 0) {
image.Convert(PIXEL_FORMAT_BGR, &cimage);
convertToCV(&cimage, img);
#ifdef DISPLAY
stringstream wstm;
wstm << "cam" << cam_num;
string window_name = wstm.str();
show(img, window_name.c_str());
#endif //DISPLAY
}
}
#endif //DEBUG_NO_SENSORS
#ifdef DISPLAY
char r = cvWaitKey(1);
if (r == 'q') {
is_done_working = true;
quit_via_user_input = true;
}
#endif //DISPLAY
if (useGPS) {
GPSLogger::GPSPacketType packet = gpsLogger.getPacket();
int GPSPacketSuccess = boost::get<0>(packet);
if (GPSPacketSuccess > 0) {
lastValidGPSPacket = boost::get<1>(packet);
gps_output << lastValidGPSPacket << endl;
} else {
sendDiagnosticsMessage("WARN:Bad GPS Packet:" + boost::get<1>(packet));
示例14: main
int main(int argc, char** argv)
{
ros::init(argc, argv, "image_publisher");
ros::NodeHandle nh;
image_transport::ImageTransport it(nh);
image_transport::Publisher pub_L = it.advertise("camera_left/image", 1);
image_transport::Publisher pub_R = it.advertise("camera_right/image", 1);
int serial_L=14481155;
int serial_R=14435551;
// Get the location of our camera config yaml
std::string camera_info_url ;
// std::string camera_info_url = "/home/sasha/hydro_ws/src/alvar_ptgrey/info/ptgrey_calib.yaml";
std::string frame_id_;
sensor_msgs::CameraInfoPtr ci_;
boost::shared_ptr<camera_info_manager::CameraInfoManager> cinfo_L;
boost::shared_ptr<camera_info_manager::CameraInfoManager> cinfo_R;
nh.param<std::string>("camera_info_url", camera_info_url, "");
// Get the desired frame_id, set to 'camera' if not found
// nh.param<std::string>("frame_id", frame_id_, "camera");
std::cout << camera_info_url << std::endl;
// Start the camera info manager and attempt to load any configurations
std::stringstream cinfo_name_L;
std::stringstream cinfo_name_R;
cinfo_name_L << serial_L;
cinfo_name_R << serial_R;
ros::Publisher camera_info_pub_L = nh.advertise<sensor_msgs::CameraInfo>("/camera_left/camera_info",1);
ros::Publisher camera_info_pub_R = nh.advertise<sensor_msgs::CameraInfo>("/camera_right/camera_info",1);
// sensor_msgs::CameraInfo cinfo;
cinfo_L.reset(new camera_info_manager::CameraInfoManager(nh, cinfo_name_L.str(), camera_info_url));
cinfo_R.reset(new camera_info_manager::CameraInfoManager(nh, cinfo_name_R.str(), camera_info_url));
BusManager busMgr;
// unsigned int numCameras = 2;
// error = busMgr.GetNumOfCameras(&numCameras);
// if (error != PGRERROR_OK) {
// std::cout << "Bus Manager error" << std::endl;
// return false;
// }
PGRGuid guid_L;
// error = busMgr.GetCameraFromIndex(1, &guid_L);
error = busMgr.GetCameraFromSerialNumber(14481155, &guid_L);
if (error != PGRERROR_OK) {
std::cout << "Index error" << std::endl;
return false;
}
PGRGuid guid_R;
// error = busMgr.GetCameraFromIndex(2, &guid_L);
error = busMgr.GetCameraFromSerialNumber(14435551, &guid_R);
if (error != PGRERROR_OK) {
std::cout << "Index error" << std::endl;
return false;
}
Camera pt_grey_L, pt_grey_R;
cv::Mat image_L, image_R;
// connect to camera
// init_cam_capture(pt_grey, serialNum);
init_cam_capture(pt_grey_L, guid_L);
init_cam_capture(pt_grey_R, guid_R);
// set camera parameters
// set_cam_param(pt_grey);
int frame_no = 0;
// capture loop
while(ros::ok())
{
// Get the image
Image rawImage;
error = pt_grey_L.RetrieveBuffer( &rawImage );
if ( error != PGRERROR_OK )
{
std::cout << "capture error" << std::endl;
return false;
}
// convert to bgr (default format for OpenCV)
Image rgbImage;
rawImage.Convert( FlyCapture2::PIXEL_FORMAT_BGR, &rgbImage );
// convert to OpenCV Mat
unsigned int rowBytes = (double)rgbImage.GetReceivedDataSize()/(double)rgbImage.GetRows();
cv::Mat image = cv::Mat(rgbImage.GetRows(), rgbImage.GetCols(), CV_8UC3, rgbImage.GetData(),rowBytes);
sensor_msgs::ImagePtr msg = cv_bridge::CvImage(std_msgs::Header(), "bgr8", image).toImageMsg();
//.........这里部分代码省略.........
示例15: main
int main()
{
BusManager busMgr;
unsigned int numCameras = 2;
error = busMgr.GetNumOfCameras(&numCameras);
if (error != PGRERROR_OK) {
std::cout << "Bus Manager error" << std::endl;
return false;
}
PGRGuid guid_L;
// error = busMgr.GetCameraFromIndex(1, &guid_L);
error = busMgr.GetCameraFromSerialNumber(14481155, &guid_L);
if (error != PGRERROR_OK) {
std::cout << "Index error" << std::endl;
return false;
}
PGRGuid guid_R;
// error = busMgr.GetCameraFromIndex(2, &guid_L);
error = busMgr.GetCameraFromSerialNumber(14435551, &guid_R);
if (error != PGRERROR_OK) {
std::cout << "Index error" << std::endl;
return false;
}
Camera pt_grey_L, pt_grey_R;
cv::Mat image_L, image_R;
cv::namedWindow("image_L", CV_WINDOW_NORMAL | CV_WINDOW_KEEPRATIO);
cvMoveWindow("image_L", 3000, 10);
cvResizeWindow("image_L",700, 500);
cv::namedWindow("image_R", CV_WINDOW_NORMAL | CV_WINDOW_KEEPRATIO);
cvMoveWindow("image_R", 3000, 10);
cvResizeWindow("image_R",700, 500);
// connect to camera
// init_cam_capture(pt_grey, serialNum);
init_cam_capture(pt_grey_L, guid_L);
init_cam_capture(pt_grey_R, guid_R);
// set camera parameters
// set_cam_param(pt_grey);
int frame_no = 0;
// capture loop
char key = 0;
while(key != 'q')
{
// Get the image
Image rawImage;
error = pt_grey_L.RetrieveBuffer( &rawImage );
if ( error != PGRERROR_OK )
{
std::cout << "capture error" << std::endl;
return false;
}
// convert to bgr (default format for OpenCV)
Image rgbImage;
rawImage.Convert( FlyCapture2::PIXEL_FORMAT_BGR, &rgbImage );
// convert to OpenCV Mat
unsigned int rowBytes = (double)rgbImage.GetReceivedDataSize()/(double)rgbImage.GetRows();
cv::Mat image = cv::Mat(rgbImage.GetRows(), rgbImage.GetCols(), CV_8UC3, rgbImage.GetData(),rowBytes);
cv::imshow("image_L", image);
key = cv::waitKey(1);
// Get the image
error = pt_grey_R.RetrieveBuffer( &rawImage );
if ( error != PGRERROR_OK )
{
std::cout << "capture error" << std::endl;
return false;
}
// convert to bgr (default format for OpenCV)
rawImage.Convert( FlyCapture2::PIXEL_FORMAT_BGR, &rgbImage );
// convert to OpenCV Mat
rowBytes = (double)rgbImage.GetReceivedDataSize()/(double)rgbImage.GetRows();
image = cv::Mat(rgbImage.GetRows(), rgbImage.GetCols(), CV_8UC3, rgbImage.GetData(),rowBytes);
cv::imshow("image_R", image);
key = cv::waitKey(1);
// key = cv::waitKey(50);
// if (key == 'c') {
// char img_name[50];
//.........这里部分代码省略.........