本文整理汇总了C++中sensor_msgs::CvBridge::imgMsgToCv方法的典型用法代码示例。如果您正苦于以下问题:C++ CvBridge::imgMsgToCv方法的具体用法?C++ CvBridge::imgMsgToCv怎么用?C++ CvBridge::imgMsgToCv使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sensor_msgs::CvBridge
的用法示例。
在下文中一共展示了CvBridge::imgMsgToCv方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: syncCallback
// topic callback functions
// function will be called when a new message arrives on a topic
void syncCallback(const sensor_msgs::Image::ConstPtr& tof_camera_xyz_data, const sensor_msgs::Image::ConstPtr& tof_camera_grey_data)
{
ROS_DEBUG("convert xyz_image to point_cloud");
sensor_msgs::PointCloud pc_msg;
// create point_cloud message
pc_msg.header.stamp = ros::Time::now();
pc_msg.header.frame_id = "head_tof_camera_link";
c_xyz_image_32F3_ = cv_bridge_0_.imgMsgToCv(tof_camera_xyz_data, "passthrough");
c_grey_image_32F1_ = cv_bridge_1_.imgMsgToCv(tof_camera_grey_data, "passthrough");
cv::Mat cpp_xyz_image_32F3 = c_xyz_image_32F3_;
cv::Mat cpp_grey_image_32F1 = c_grey_image_32F1_;
float* f_ptr = 0;
for (int row = 0; row < cpp_xyz_image_32F3.rows; row++)
{
f_ptr = cpp_xyz_image_32F3.ptr<float>(row);
for (int col = 0; col < cpp_xyz_image_32F3.cols; col++)
{
geometry_msgs::Point32 pt;
pt.x = f_ptr[3*col + 0];
pt.y = f_ptr[3*col + 1];
pt.z = f_ptr[3*col + 2];
pc_msg.points.push_back(pt);
}
}
pc_msg.header.stamp = ros::Time::now();
topicPub_pointCloud_.publish(pc_msg);
}
示例2: sharedModeSrvCallback
/// Callback is executed, when shared mode is selected.
/// Left and right is expressed when facing the back of the camera in horitontal orientation.
void sharedModeSrvCallback(const sensor_msgs::ImageConstPtr& right_camera_data,
const sensor_msgs::ImageConstPtr& tof_camera_grey_data)
{
boost::mutex::scoped_lock lock(m_ServiceMutex);
ROS_INFO("[all_camera_viewer] sharedModeSrvCallback");
// Convert ROS image messages to openCV IplImages
try
{
right_color_image_8U3_ = cv_bridge_0_.imgMsgToCv(right_camera_data, "passthrough");
grey_image_32F1_ = cv_bridge_2_.imgMsgToCv(tof_camera_grey_data, "passthrough");
cv::Mat tmp = right_color_image_8U3_;
right_color_mat_8U3_ = tmp.clone();
tmp = grey_image_32F1_;
grey_mat_32F1_ = tmp.clone();
}
catch (sensor_msgs::CvBridgeException& e)
{
ROS_ERROR("[all_camera_viewer] Could not convert images with cv_bridge.");
}
ipa_Utils::ConvertToShowImage(grey_mat_32F1_, grey_mat_8U3_, 1);
cv::imshow("TOF grey data", grey_mat_8U3_);
cv::Mat right_color_8U3;
cv::resize(right_color_mat_8U3_, right_color_8U3, cv::Size(), 0.5, 0.5);
cv::imshow("Right color data", right_color_8U3);
cv::waitKey(1000);
}
示例3: stereoModeSrvCallback
/// Callback is executed, when stereo mode is selected
/// Left and right is expressed when facing the back of the camera in horizontal orientation.
void stereoModeSrvCallback(const sensor_msgs::ImageConstPtr& left_camera_data,
const sensor_msgs::ImageConstPtr& right_camera_data)
{
ROS_INFO("[all_camera_viewer] stereoModeSrvCallback");
boost::mutex::scoped_lock lock(m_ServiceMutex);
// Convert ROS image messages to openCV IplImages
try
{
right_color_image_8U3_ = cv_bridge_0_.imgMsgToCv(right_camera_data, "passthrough");
left_color_image_8U3_ = cv_bridge_1_.imgMsgToCv(left_camera_data, "passthrough");
cv::Mat tmp = right_color_image_8U3_;
right_color_mat_8U3_ = tmp.clone();
tmp = left_color_image_8U3_;
left_color_mat_8U3_ = tmp.clone();
}
catch (sensor_msgs::CvBridgeException& e)
{
ROS_ERROR("[all_camera_viewer] Could not convert stereo images with cv_bridge.");
}
cv::Mat right_color_8U3;
cv::resize(right_color_mat_8U3_, right_color_8U3, cv::Size(), 0.5, 0.5);
cv::imshow("Right color data", right_color_8U3);
cv::Mat left_color_8U3;
cv::resize(left_color_mat_8U3_, left_color_8U3, cv::Size(), 0.5, 0.5);
cv::imshow("Left color data", left_color_8U3);
cv::waitKey(1000);
ROS_INFO("[all_camera_viewer] stereoModeSrvCallback [OK]");
}
示例4: imageCb
void ImageNodelet::imageCb(const sensor_msgs::ImageConstPtr& msg)
{
image_mutex_.lock();
// May want to view raw bayer data, which CvBridge doesn't know about
if (msg->encoding.find("bayer") != std::string::npos)
{
last_image_ = cv::Mat(msg->height, msg->width, CV_8UC1,
const_cast<uint8_t*>(&msg->data[0]), msg->step);
}
// We want to scale floating point images so that they display nicely
else if(msg->encoding.find("F") != std::string::npos)
{
cv::Mat float_image_bridge = img_bridge_.imgMsgToCv(msg, "passthrough");
cv::Mat_<float> float_image = float_image_bridge;
float max_val = 0;
for(int i = 0; i < float_image.rows; ++i)
{
for(int j = 0; j < float_image.cols; ++j)
{
max_val = std::max(max_val, float_image(i, j));
}
}
if(max_val > 0)
{
float_image /= max_val;
}
last_image_ = float_image;
}
else
{
// Convert to OpenCV native BGR color
try {
last_image_ = img_bridge_.imgMsgToCv(msg, "bgr8");
}
catch (sensor_msgs::CvBridgeException& e) {
NODELET_ERROR_THROTTLE(30, "Unable to convert '%s' image to bgr8",
msg->encoding.c_str());
}
}
// last_image_ may point to data owned by last_msg_, so we hang onto it for
// the sake of mouseCb.
last_msg_ = msg;
// Must release the mutex before calling cv::imshow, or can deadlock against
// OpenCV's window mutex.
image_mutex_.unlock();
if (!last_image_.empty())
cv::imshow(window_name_, last_image_);
}
示例5: add_sock_to_match
bool add_sock_to_match(image_processor::ProcessBridge::Request &req,
image_processor::ProcessBridge::Response &res )
{
sensor_msgs::Image image = req.image;
sensor_msgs::Image image2 = req.image2;
sensor_msgs::ImagePtr img_ptr(new sensor_msgs::Image(image));
IplImage *cv_image;
IplImage *cv_image2;
try
{
cv_image = bridge_.imgMsgToCv(img_ptr, "bgr8");
}
catch (sensor_msgs::CvBridgeException error)
{
ROS_ERROR("error");
return false;
}
CvPoint2D64f temp_pts[128];
double temp_params[128];
//Crop image
cvSetImageROI(cv_image,cvRect(CROP_X,CROP_Y,CROP_WIDTH,CROP_HEIGHT));
IplImage* cropped_image = cvCloneImage(cv_image);
cvSaveImage("/home/stephen/cropped_image1.png",cropped_image);
sensor_msgs::ImagePtr img2_ptr(new sensor_msgs::Image(image2));
try{
cv_image2 = bridge_.imgMsgToCv(img2_ptr, "bgr8");
}
catch (sensor_msgs::CvBridgeException error)
{
ROS_ERROR("error");
return false;
}
cvSetImageROI(cv_image,cvRect(CROP_X,CROP_Y,CROP_WIDTH,CROP_HEIGHT));
IplImage* cropped_image2 = cvCloneImage(cv_image2);
cvSaveImage("/home/stephen/cropped_image2.png",cropped_image2);
cvReleaseImage(&cropped_image);
cvReleaseImage(&cropped_image2);
IplImage* new_cropped_image = cvLoadImage("/home/stephen/cropped_image1.png");
IplImage* new_cropped_image2 = cvLoadImage("/home/stephen/cropped_image2.png");
match_detector->addImageToList(new_cropped_image,new_cropped_image2);
res.image_annotated = req.image;
return true;
}
示例6: match_socks
bool match_socks(image_processor::MatchSocks::Request &req,
image_processor::MatchSocks::Response &res )
{
match_detector = new MatchDetector();
int num_images = req.images1.size();
for (int i = 0; i < num_images; i++){
sensor_msgs::Image image1 = req.images1.at(i);
sensor_msgs::Image image2 = req.images2.at(i);
sensor_msgs::ImagePtr img_ptr1(new sensor_msgs::Image(image1));
sensor_msgs::ImagePtr img_ptr2(new sensor_msgs::Image(image2));
IplImage *cv_image1;
IplImage *cv_image2;
try{
cv_image1 = bridge_.imgMsgToCv(img_ptr1, "bgr8");
cv_image2 = bridge_.imgMsgToCv(img_ptr2, "bgr8");
match_detector->addImageToList(cv_image1,cv_image2);
}
catch (sensor_msgs::CvBridgeException error)
{
ROS_ERROR("error");
return false;
}
}
vector<int> matches = match_detector->process();
for(int i = 0; i < num_images; i++){
res.matches.push_back(matches.at(i));
}
return true;
}
示例7: imageCallback
void imageCallback (const sensor_msgs::ImageConstPtr & msg_ptr)
{
// Convert ROS Imput Image Message to IplImage
try
{
cv_input_ = bridge_.imgMsgToCv (msg_ptr, "bgr8");
}
catch (sensor_msgs::CvBridgeException error)
{
ROS_ERROR ("CvBridge Input Error");
}
// Convert IplImage to cv::Mat
img_in_ = cv::Mat (cv_input_).clone ();
// Convert Input image from BGR to HSV
cv::cvtColor (img_in_, img_hsv_, CV_BGR2HSV);
// Display HSV Image in HighGUI window
cv::imshow ("hsv", img_hsv_);
// Needed to keep the HighGUI window open
cv::waitKey (3);
// Convert cv::Mat to IplImage
cv_output_ = img_hsv_;
// Convert IplImage to ROS Output Image Message and Publish
try
{
image_pub_.publish (bridge_.cvToImgMsg (&cv_output_, "bgr8"));
}
catch (sensor_msgs::CvBridgeException error)
{
ROS_ERROR ("CvBridge Output error");
}
}
示例8: alpha_image_cb
void alpha_image_cb(const sensor_msgs::ImageConstPtr& msg_ptr){
calc_and_publish_BWMask(msg_ptr->header.stamp, msg_ptr->header.frame_id);
IplImage* cam_image = bridge.imgMsgToCv(msg_ptr);
IplImage* cam_alpha_image = cvCreateImage(cvGetSize(cam_image), IPL_DEPTH_8U, 4);
//b
cvSetImageCOI(cam_alpha_image, 1);
cvSetImageCOI(cam_image, 1);
cvCopy(cam_image, cam_alpha_image);
//g
cvSetImageCOI(cam_alpha_image, 2);
cvSetImageCOI(cam_image, 2);
cvCopy(cam_image, cam_alpha_image);
//r
cvSetImageCOI(cam_alpha_image, 3);
cvSetImageCOI(cam_image, 3);
cvCopy(cam_image, cam_alpha_image);
//alpha
cvSetImageCOI(cam_alpha_image, 4);
cvCopy(ipl_maskBW, cam_alpha_image);
cvSetImageCOI(cam_alpha_image, 0);
sensor_msgs::ImagePtr img_msg = sensor_msgs::CvBridge::cvToImgMsg(cam_alpha_image);
img_msg->header = msg_ptr->header;
image_publisher.publish(img_msg);
cvReleaseImage(&cam_alpha_image);
}
示例9: find_grip_point
bool find_grip_point(image_processor::ProcessBridge::Request &req,
image_processor::ProcessBridge::Response &res )
{
sensor_msgs::Image image = req.image;
sensor_msgs::ImagePtr img_ptr(new sensor_msgs::Image(image));
sensor_msgs::CameraInfo cam_info = req.info;
IplImage *cv_image = NULL;
try
{
cv_image = bridge_.imgMsgToCv(img_ptr, "bgr8");
}
catch (sensor_msgs::CvBridgeException error)
{
ROS_ERROR("error");
return false;
}
//Crop image
cvSetImageROI(cv_image,cvRect(CROP_X,CROP_Y,CROP_WIDTH,CROP_HEIGHT));
IplImage* cropped_image = cvCloneImage(cv_image);
CvPoint2D64f temp_pts[128];
double temp_params[128];
IplImage *output_cv_image;
int num_pts = 0;
int num_params = 0;
ROS_INFO("Ready to call find_grip_point...");
output_cv_image = find_grip_point_process(cropped_image,temp_pts,temp_params,num_pts,num_params);
for( int i = 0; i < num_pts; i++){
res.pts_x.push_back(temp_pts[i].x+CROP_X);
res.pts_y.push_back(temp_pts[i].y+CROP_Y);
}
for ( int i = 0; i < num_params; i++){
res.params.push_back(temp_params[i]);
}
sensor_msgs::ImagePtr output_img_ptr;
sensor_msgs::Image output_img;
try
{
output_img_ptr = bridge_.cvToImgMsg(output_cv_image, "bgr8");
output_img = *output_img_ptr;
}
catch (sensor_msgs::CvBridgeException error)
{
ROS_ERROR("error");
return false;
}
res.image_annotated = output_img;
return true;
}
示例10: colorImageCallback
/// Callback is executed, when shared mode is selected
/// Left and right is expressed when facing the back of the camera in horizontal orientation.
void colorImageCallback(const sensor_msgs::ImageConstPtr& color_camera_data,
const sensor_msgs::CameraInfoConstPtr& color_camera_info)
{
{
boost::mutex::scoped_lock lock( mutexQ_ );
ROS_DEBUG("[fiducials] color image callback");
if (camera_matrix_initialized_ == false)
{
camera_matrix_ = cv::Mat::zeros(3,3,CV_64FC1);
camera_matrix_.at<double>(0,0) = color_camera_info->K[0];
camera_matrix_.at<double>(0,2) = color_camera_info->K[2];
camera_matrix_.at<double>(1,1) = color_camera_info->K[4];
camera_matrix_.at<double>(1,2) = color_camera_info->K[5];
camera_matrix_.at<double>(2,2) = 1;
ROS_INFO("[fiducials] Initializing fiducial detector with camera matrix");
if (m_pi_tag->Init(camera_matrix_, model_directory_ + model_filename_) & ipa_Utils::RET_FAILED)
{
ROS_ERROR("[fiducials] Initializing fiducial detector with camera matrix [FAILED]");
return;
}
camera_matrix_initialized_ = true;
}
// Receive
color_image_8U3_ = cv_bridge_0_.imgMsgToCv(color_camera_data, "bgr8");
received_timestamp_ = color_camera_data->header.stamp;
received_frame_id_ = color_camera_data->header.frame_id;
cv::Mat tmp = color_image_8U3_;
color_mat_8U3_ = tmp.clone();
if (ros_node_mode_ == MODE_TOPIC || ros_node_mode_ == MODE_TOPIC_AND_SERVICE)
{
cob_object_detection_msgs::DetectionArray detection_array;
detectFiducials(detection_array, color_mat_8U3_);
// Publish
detect_fiducials_pub_.publish(detection_array);
cv_bridge::CvImage cv_ptr;
cv_ptr.image = color_mat_8U3_;
cv_ptr.encoding = CobFiducialsNode::color_image_encoding_;
img2D_pub_.publish(cv_ptr.toImageMsg());
}
synchronizer_received_ = true;
// Notify waiting thread
}
condQ_.notify_one();
}
示例11: process_images
void process_images(const sensor_msgs::ImageConstPtr& camera_msg, const sensor_msgs::ImageConstPtr& fg_objects_msg, const sensor_msgs::ImageConstPtr& saliency_msg)
{
ROS_INFO_STREAM("Raw image time: " << camera_msg->header.stamp);
ROS_INFO_STREAM("Foreground objects time: " << fg_objects_msg->header.stamp);
ROS_INFO_STREAM("Saliency time: " << saliency_msg->header.stamp);
cv::Mat camera_img(camera_bridge.imgMsgToCv(camera_msg));
cv::Mat fg_objects_img(fg_objects_bridge.imgMsgToCv(fg_objects_msg));
cv::Mat saliency_img(saliency_bridge.imgMsgToCv(saliency_msg));
double w = -1/5.0, b = 4.0;
cv::Mat fg_prob_img = fg_objects_img;
fg_prob_img.convertTo(fg_prob_img, fg_prob_img.type(), w, b);
cv::exp(fg_prob_img, fg_prob_img);
fg_prob_img.convertTo(fg_prob_img, fg_prob_img.type(), 1, 1);
cv::divide(1.0, fg_prob_img, fg_prob_img);
cv::namedWindow("fg_prob_img");
cv::imshow("fg_prob_img", fg_prob_img);
double sum = cv::sum(fg_prob_img)[0];
}
示例12: image_cb
void image_cb(const sensor_msgs::ImageConstPtr &msg)
{
//ROS_INFO("checking for pancakes");
IplImage *cv_image = NULL;
try
{
cv_image = bridge.imgMsgToCv(msg, "mono8");
}
catch (sensor_msgs::CvBridgeException error)
{
ROS_ERROR("bridge error");
return;
}
detector.process_image(cv_image);
}
示例13: imageCallback
void imageCallback(const sensor_msgs::ImageConstPtr& msg)
{
cvZero(edges);
IplImage *img_base = bridge.imgMsgToCv(msg, "mono8"),
*eigI = cvCreateImage(cvSize(640,480), 32, 1),
*tempI = cvCreateImage(cvSize(640,480), 32, 1);
cvGoodFeaturesToTrack(img_base, eigI,tempI, corners,&corner_count, quality_level,min_distance);
for(i=0; i<max; i++)
{
cvCircle(img_base, cvPoint((int)corners[i].x,(int)corners[i].y), 4,cvScalar(255),1);
}
//send_array();
cvShowImage("Lines",img_base);
cvReleaseImage(&eigI); cvReleaseImage(&tempI);
}
示例14: imageCallback
// This is called whenever the node gets a new image from the camera
void DemoNode::imageCallback(const sensor_msgs::ImageConstPtr& msg)
{
// Convert image from ROS format to OpenCV format
static sensor_msgs::CvBridge bridge;
static bool init = false;
static vector<Point2f> viewCorners,foundPoints;
cv::Mat image;
try
{
image = cv::Mat(bridge.imgMsgToCv(msg, "bgr8"));
if(!init){
// Store the corners of the image for transformation purposes
viewCorners.push_back(Point2f(0,0));
viewCorners.push_back(Point2f(0,image.size().width-1));
viewCorners.push_back(Point2f(image.size().height-1,image.size().width-1));
viewCorners.push_back(Point2f(image.size().height-1,0));
}
transformPoints(viewCorners,viewCloud);
pub_view_pts.publish(viewCloud);
// Detect orange points in the image
foundPoints.clear();
findPoints(image,foundPoints);
if(foundPoints.size() > 1){
// Transform and publish points if you got any
transformPoints(foundPoints,pointCloud);
pub_nav_pts.publish(pointCloud);
}
else{
//ROS_INFO("Camera: nothing detected");
}
}
catch (sensor_msgs::CvBridgeException& e)
{
ROS_ERROR("Could not convert from '%s' to 'bgr8'. E was %s", msg->encoding.c_str(), e.what());
}
}
示例15: greyImageCallback
/// Topic callback functions.
/// Function will be called when a new message arrives on a topic.
/// @param grey_image_msg The gray values of point cloud, saved in a 32bit, 1 channel OpenCV IplImage
void greyImageCallback(const sensor_msgs::ImageConstPtr& grey_image_msg)
{
/// Do not release <code>m_GrayImage32F3</code>
/// Image allocation is managed by Cv_Bridge object
try
{
grey_image_32F1_ = cv_bridge_0_.imgMsgToCv(grey_image_msg, "passthrough");
if (grey_image_8U3_ == 0)
{
grey_image_8U3_ = cvCreateImage(cvGetSize(grey_image_32F1_), IPL_DEPTH_8U, 3);
}
ipa_Utils::ConvertToShowImage(grey_image_32F1_, grey_image_8U3_, 1, 0, 700);
cvShowImage("gray data", grey_image_8U3_);
}
catch (sensor_msgs::CvBridgeException& e)
{
ROS_ERROR("[tof_camera_viewer] Could not convert from '%s' to '32FC1'.", grey_image_msg->encoding.c_str());
}
}