本文整理汇总了C++中image_transport::CameraPublisher::publish方法的典型用法代码示例。如果您正苦于以下问题:C++ CameraPublisher::publish方法的具体用法?C++ CameraPublisher::publish怎么用?C++ CameraPublisher::publish使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类image_transport::CameraPublisher
的用法示例。
在下文中一共展示了CameraPublisher::publish方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: spin
/// Continuously advertises xyz and grey images.
bool spin()
{
boost::mutex::scoped_lock lock(service_mutex_);
sensor_msgs::Image::Ptr xyz_image_msg_ptr;
sensor_msgs::Image::Ptr grey_image_msg_ptr;
sensor_msgs::CameraInfo tof_image_info;
if(tof_camera_->AcquireImages(0, &grey_image_32F1_, &xyz_image_32F3_, false, false, ipa_CameraSensors::INTENSITY) & ipa_Utils::RET_FAILED)
{
ROS_ERROR("[tof_camera] Tof image acquisition failed");
return false;
}
/// Filter images by amplitude and remove tear-off edges
//if(filter_xyz_tearoff_edges_ || filter_xyz_by_amplitude_)
// ROS_ERROR("[tof_camera] FUNCTION UNCOMMENT BY JSF");
if(filter_xyz_tearoff_edges_) ipa_Utils::FilterTearOffEdges(xyz_image_32F3_, 0, (float)tearoff_tear_half_fraction_);
if(filter_xyz_by_amplitude_) ipa_Utils::FilterByAmplitude(xyz_image_32F3_, grey_image_32F1_, 0, 0, lower_amplitude_threshold_, upper_amplitude_threshold_);
try
{
IplImage img = xyz_image_32F3_;
xyz_image_msg_ptr = sensor_msgs::CvBridge::cvToImgMsg(&img, "passthrough");
}
catch (sensor_msgs::CvBridgeException error)
{
ROS_ERROR("[tof_camera] Could not convert 32bit xyz IplImage to ROS message");
return false;
}
try
{
IplImage img = grey_image_32F1_;
grey_image_msg_ptr = sensor_msgs::CvBridge::cvToImgMsg(&img, "passthrough");
}
catch (sensor_msgs::CvBridgeException error)
{
ROS_ERROR("[tof_camera] Could not convert 32bit grey IplImage to ROS message");
return false;
}
/// Set time stamp
ros::Time now = ros::Time::now();
xyz_image_msg_ptr->header.stamp = now;
grey_image_msg_ptr->header.stamp = now;
tof_image_info = camera_info_msg_;
tof_image_info.width = grey_image_32F1_.cols;
tof_image_info.height = grey_image_32F1_.rows;
tof_image_info.header.stamp = now;
/// publish message
xyz_image_publisher_.publish(*xyz_image_msg_ptr, tof_image_info);
grey_image_publisher_.publish(*grey_image_msg_ptr, tof_image_info);
return true;
}
示例2: imageCB
void imageCB(const sensor_msgs::ImageConstPtr& image_l,
const sensor_msgs::CameraInfoConstPtr& info_l,
const sensor_msgs::ImageConstPtr& image_r,
const sensor_msgs::CameraInfoConstPtr& info_r) {
sensor_msgs::Image img = *image_r;
sensor_msgs::CameraInfo info = *info_r;
//img.header.stamp = image_l->header.stamp;
//info.header.stamp = info_l->header.stamp;
pub_left_.publish(image_l, info_l);
pub_right_.publish(img, info, info_l->header.stamp);
//pub_right_.publish(*image_r, *info_r, info_l->header.stamp);
}
示例3: ir_img
void
pubRealSenseInfraredImageMsg(cv::Mat& ir_mat)
{
sensor_msgs::ImagePtr ir_img(new sensor_msgs::Image);;
ir_img->header.seq = head_sequence_id;
ir_img->header.stamp = head_time_stamp;
ir_img->header.frame_id = depth_frame_id;
ir_img->width = ir_mat.cols;
ir_img->height = ir_mat.rows;
ir_img->encoding = sensor_msgs::image_encodings::MONO8;
ir_img->is_bigendian = 0;
int step = sizeof(unsigned char) * ir_img->width;
int size = step * ir_img->height;
ir_img->step = step;
ir_img->data.resize(size);
memcpy(&ir_img->data[0], ir_mat.data, size);
ir_camera_info->header.frame_id = depth_frame_id;
ir_camera_info->header.stamp = head_time_stamp;
ir_camera_info->header.seq = head_sequence_id;
realsense_infrared_image_pub.publish(ir_img, ir_camera_info);
}
示例4: depth_img
void
pubRealSenseDepthImageMsg32F(cv::Mat& depth_mat)
{
sensor_msgs::ImagePtr depth_img(new sensor_msgs::Image);
depth_img->header.seq = head_sequence_id;
depth_img->header.stamp = head_time_stamp;
depth_img->header.frame_id = depth_frame_id;
depth_img->width = depth_mat.cols;
depth_img->height = depth_mat.rows;
depth_img->encoding = sensor_msgs::image_encodings::TYPE_32FC1;
depth_img->is_bigendian = 0;
int step = sizeof(float) * depth_img->width;
int size = step * depth_img->height;
depth_img->step = step;
depth_img->data.resize(size);
memcpy(&depth_img->data[0], depth_mat.data, size);
ir_camera_info->header.frame_id = depth_frame_id;
ir_camera_info->header.stamp = head_time_stamp;
ir_camera_info->header.seq = head_sequence_id;
/*double minVal, maxVal;
cv::minMaxLoc(depth_mat, &minVal, &maxVal);
std::cout << " ***** " << minVal << " " << maxVal << std::endl;*/
realsense_depth_image_pub.publish(depth_img, ir_camera_info);
//pub_depth_info.publish(ir_camera_info);
}
示例5: publishImage
void publishImage (FlyCapture2::Image * frame)
{
//sensor_msgs::CameraInfoPtr cam_info(new sensor_msgs::CameraInfo(cam_info_->getCameraInfo()));
// or with a member variable:
cam_info_ptr_ = sensor_msgs::CameraInfoPtr(new sensor_msgs::CameraInfo(cam_info_mgr_->getCameraInfo()));
if (processFrame (frame, img_, cam_info_ptr_))
streaming_pub_.publish (img_, *cam_info_ptr_);
}
示例6: take_and_send_image
bool take_and_send_image()
{
usb_cam_camera_grab_image(camera_image_);
fillImage(img_, "rgb8", camera_image_->height, camera_image_->width, 3 * camera_image_->width, camera_image_->image);
img_.header.stamp = ros::Time::now();
sensor_msgs::CameraInfoPtr ci(new sensor_msgs::CameraInfo(cinfo_->getCameraInfo()));
ci->header.frame_id = img_.header.frame_id;
ci->header.stamp = img_.header.stamp;
image_pub_.publish(img_, *ci);
return true;
}
示例7: publishframe
void publishframe(camera_info_manager::CameraInfoManager *mgr, const image_transport::CameraPublisher &campub, const sensor_msgs::ImagePtr img, const std::string& frame)
{
ros::Time timestamp = ros::Time::now();
sensor_msgs::CameraInfo::Ptr cinfo(new sensor_msgs::CameraInfo(mgr->getCameraInfo()));
std_msgs::Header::Ptr imageheader(new std_msgs::Header());
std_msgs::Header::Ptr cinfoheader(new std_msgs::Header());
imageheader->frame_id = frame;
cinfoheader->frame_id = frame;
imageheader->stamp = timestamp;
cinfoheader->stamp = timestamp;
img->header = *imageheader;
cinfo->header = *cinfoheader;
campub.publish(img, cinfo);
}
示例8: publishRgbImage
void OpenNINode::publishRgbImage(const openni_wrapper::Image& rgb_img, image_transport::CameraPublisher img_pub) const {
sensor_msgs::ImagePtr rgb_msg = boost::make_shared<sensor_msgs::Image>();
ros::Time time = ros::Time::now();
rgb_msg->header.stamp = time;
rgb_msg->encoding = enc::RGB8;
rgb_msg->width = depth_width_;
rgb_msg->height = depth_height_;
rgb_msg->step = rgb_msg->width * 3;
rgb_msg->data.resize(rgb_msg->height * rgb_msg->step);
rgb_img.fillRGB(rgb_msg->width, rgb_msg->height, reinterpret_cast<unsigned char*>(&rgb_msg->data[0]), rgb_msg->step);
img_pub.publish(rgb_msg, getDepthCameraInfo(rgb_msg->width, rgb_msg->height, time, 1));
}
示例9: publishDepthImage
void OpenNINode::publishDepthImage(const openni_wrapper::DepthImage& depth, image_transport::CameraPublisher depth_pub) const {
sensor_msgs::ImagePtr depth_msg = boost::make_shared<sensor_msgs::Image>();
ros::Time time = ros::Time::now();
depth_msg->header.stamp = time;
depth_msg->encoding = sensor_msgs::image_encodings::TYPE_32FC1;
depth_msg->width = depth_width_;
depth_msg->height = depth_height_;
depth_msg->step = depth_msg->width * sizeof(float);
depth_msg->data.resize(depth_msg->height * depth_msg->step);
depth.fillDepthImage(depth_msg->width, depth_msg->height, reinterpret_cast<float*>(&depth_msg->data[0]), depth_msg->step);
depth_pub.publish(depth_msg, getDepthCameraInfo(depth_msg->width, depth_msg->height, time, 1));
//pub.publish(depth_msg);
}
示例10: take_and_send_image
bool take_and_send_image()
{
// grab the image
cam_.grab_image(&img_);
// grab the camera info
sensor_msgs::CameraInfoPtr ci(new sensor_msgs::CameraInfo(cinfo_->getCameraInfo()));
ci->header.frame_id = img_.header.frame_id;
ci->header.stamp = img_.header.stamp;
// publish the image
image_pub_.publish(img_, *ci);
return true;
}
示例11: publish
/** Publish camera stream topics
*
* @pre image_ contains latest camera frame
*/
void publish()
{
image_.header.frame_id = config_.frame_id;
// get current CameraInfo data
cam_info_ = cinfo_->getCameraInfo();
//ROS_INFO_STREAM ("cam: " << cam_info_.width << " x " << cam_info_.height << ", image: " << image_.width << " x " << image_.height);
if (cam_info_.height != image_.height || cam_info_.width != image_.width)
{
// image size does not match: publish a matching uncalibrated
// CameraInfo instead
if (calibration_matches_)
{
// warn user once
calibration_matches_ = false;
ROS_WARN_STREAM("[" << camera_name_
<< "] camera_info_url: " << config_.camera_info_url);
ROS_WARN_STREAM("[" << camera_name_
<< "] calibration (" << cam_info_.width << "x" << cam_info_.height << ") does not match video mode (" << image_.width << "x" << image_.height << ") "
<< "(publishing uncalibrated data)");
}
cam_info_ = sensor_msgs::CameraInfo();
cam_info_.height = image_.height;
cam_info_.width = image_.width;
}
else if (!calibration_matches_)
{
// calibration OK now
calibration_matches_ = true;
ROS_INFO_STREAM("[" << camera_name_
<< "] calibration matches video mode now");
}
cam_info_.header.frame_id = config_.frame_id;
cam_info_.header.stamp = image_.header.stamp;
// @todo log a warning if (filtered) time since last published
// image is not reasonably close to configured frame_rate
// Publish via image_transport
image_pub_.publish(image_, cam_info_);
}
示例12: rgb_img
void
pubRealSenseRGBImageMsg(cv::Mat& rgb_mat)
{
sensor_msgs::ImagePtr rgb_img(new sensor_msgs::Image);
rgb_img->header.seq = head_sequence_id;
rgb_img->header.stamp = head_time_stamp;
rgb_img->header.frame_id = rgb_frame_id;
rgb_img->width = rgb_mat.cols;
rgb_img->height = rgb_mat.rows;
rgb_img->encoding = sensor_msgs::image_encodings::BGR8;
rgb_img->is_bigendian = 0;
int step = sizeof(unsigned char) * 3 * rgb_img->width;
int size = step * rgb_img->height;
rgb_img->step = step;
rgb_img->data.resize(size);
memcpy(&(rgb_img->data[0]), rgb_mat.data, size);
rgb_camera_info->header.frame_id = rgb_frame_id;
rgb_camera_info->header.stamp = head_time_stamp;
rgb_camera_info->header.seq = head_sequence_id;
realsense_rgb_image_pub.publish(rgb_img, rgb_camera_info);
//pub_rgb_info.publish(rgb_camera_info);
//save rgb img
// static int count = 0;
// count++;
// if(count > 0)
// {
// struct timeval save_time;
// gettimeofday( &save_time, NULL );
// char save_name[256];
// sprintf(save_name, "~/temp/realsense_rgb_%d.jpg", (int)save_time.tv_sec);
// printf("\nsave realsense rgb img: %s\n", save_name);
// cv::imwrite(save_name, rgb_mat);
// count = 0;
// }
}
示例13: constMaskCb
void constMaskCb(const stereo_msgs::DisparityImageConstPtr& msg)
{
if(!image_rect.empty())
{
cv::Mat masked;
image_rect.copyTo(masked, mask);
cv_bridge::CvImage masked_msg;
masked_msg.header = msg->header;
masked_msg.encoding = sensor_msgs::image_encodings::BGR8;
//if we want rescale then rescale
if(scale_factor > 1)
{
cv::Mat masked_tmp = masked;
cv::resize(masked_tmp, masked,
cv::Size(masked.cols*scale_factor, masked.rows*scale_factor));
}
masked_msg.image = masked;
cam_pub.publish(*masked_msg.toImageMsg(), camera_info);
}
// ROS_INFO("disparityCb runtime: %f ms",
// 1000*(cv::getTickCount() - ticksBefore)/cv::getTickFrequency());
}
示例14: disparityCb
void disparityCb(const stereo_msgs::DisparityImageConstPtr& msg)
{
if(cam_pub.getNumSubscribers() > 0 ||
mask_pub.getNumSubscribers() > 0)
{
// double ticksBefore = cv::getTickCount();
// Check for common errors in input
if (msg->min_disparity == 0.0 && msg->max_disparity == 0.0)
{
ROS_ERROR("Disparity image fields min_disparity and max_disparity are not set");
return;
}
if (msg->image.encoding != sensor_msgs::image_encodings::TYPE_32FC1)
{
ROS_ERROR("Disparity image must be 32-bit floating point (encoding '32FC1'), but has encoding '%s'",
msg->image.encoding.c_str());
return;
}
// code taken from image_view / disparity_view
// Colormap and display the disparity image
float min_disparity = msg->min_disparity;
float max_disparity = msg->max_disparity;
float multiplier = 255.0f / (max_disparity - min_disparity);
const cv::Mat_<float> dmat(msg->image.height, msg->image.width,
(float*)&msg->image.data[0], msg->image.step);
cv::Mat disparity_greyscale;
disparity_greyscale.create(msg->image.height, msg->image.width, CV_8UC1);
for (int row = 0; row < disparity_greyscale.rows; ++row) {
const float* d = dmat[row];
for (int col = 0; col < disparity_greyscale.cols; ++col) {
int index = (d[col] - min_disparity) * multiplier + 0.5;
//index = std::min(255, std::max(0, index));
// pushing it into the interval does not matter because of the threshold afterwards
if(index >= threshold)
disparity_greyscale.at<uchar>(row, col) = 255;
else
disparity_greyscale.at<uchar>(row, col) = 0;
}
}
cv::Mat tmp1, mask;
cv::erode(disparity_greyscale, tmp1,
cv::Mat::ones(erode_size, erode_size, CV_8UC1),
cv::Point(-1, -1), erode_iterations);
cv::dilate(tmp1, mask,
cv::Mat::ones(dilate_size, dilate_size, CV_8UC1),
cv::Point(-1, -1), dilate_iterations);
if(mask_pub.getNumSubscribers() > 0)
{
cv_bridge::CvImage mask_msg;
mask_msg.header = msg->header;
mask_msg.encoding = sensor_msgs::image_encodings::TYPE_8UC1;
mask_msg.image = mask;
mask_pub.publish(mask_msg.toImageMsg());
}
if(!image_rect.empty() && cam_pub.getNumSubscribers() > 0)
{
cv::Mat masked;
image_rect.copyTo(masked, mask);
cv_bridge::CvImage masked_msg;
masked_msg.header = msg->header;
masked_msg.encoding = sensor_msgs::image_encodings::BGR8;
//if we want rescale then rescale
if(scale_factor > 1)
{
cv::Mat masked_tmp = masked;
cv::resize(masked_tmp, masked,
cv::Size(masked.cols*scale_factor, masked.rows*scale_factor));
}
masked_msg.image = masked;
// to provide a synchronized output we publish both topics with the same timestamp
ros::Time currentTime = ros::Time::now();
masked_msg.header.stamp = currentTime;
camera_info.header.stamp = currentTime;
cam_pub.publish(*masked_msg.toImageMsg(), camera_info);
}
// ROS_INFO("disparityCb runtime: %f ms",
// 1000*(cv::getTickCount() - ticksBefore)/cv::getTickFrequency());
}
}
示例15: run
int CameraNode::run()
{
ros::WallTime t_prev, t_now;
int dt_avg;
// reset timing information
dt_avg = 0;
t_prev = ros::WallTime::now();
while (ros::ok()) {
std::vector<uint8_t> data(step*height);
CamContext_grab_next_frame_blocking(cc,&data[0],-1.0f); // never timeout
int errnum = cam_iface_have_error();
if (errnum == CAM_IFACE_FRAME_TIMEOUT) {
cam_iface_clear_error();
continue; // wait again
}
if (errnum == CAM_IFACE_FRAME_DATA_MISSING_ERROR) {
cam_iface_clear_error();
} else if (errnum == CAM_IFACE_FRAME_INTERRUPTED_SYSCALL) {
cam_iface_clear_error();
} else if (errnum == CAM_IFACE_FRAME_DATA_CORRUPT_ERROR) {
cam_iface_clear_error();
} else {
_check_error();
if (!_got_frame) {
ROS_INFO("receiving images");
_got_frame = true;
}
if(dt_avg++ == 9) {
ros::WallTime t_now = ros::WallTime::now();
ros::WallDuration dur = t_now - t_prev;
t_prev = t_now;
std_msgs::Float32 rate;
rate.data = 10 / dur.toSec();
_pub_rate.publish(rate);
dt_avg = 0;
}
sensor_msgs::Image msg;
if (_host_timestamp) {
msg.header.stamp = ros::Time::now();
} else {
double timestamp;
CamContext_get_last_timestamp(cc,×tamp);
_check_error();
msg.header.stamp = ros::Time(timestamp);
}
unsigned long framenumber;
CamContext_get_last_framenumber(cc,&framenumber);
_check_error();
msg.header.seq = framenumber;
msg.header.frame_id = "0";
msg.height = height;
msg.width = width;
msg.encoding = encoding;
msg.step = step;
msg.data = data;
// get current CameraInfo data
cam_info = cam_info_manager->getCameraInfo();
cam_info.header.stamp = msg.header.stamp;
cam_info.header.seq = msg.header.seq;
cam_info.header.frame_id = msg.header.frame_id;
cam_info.height = height;
cam_info.width = width;
_pub_image.publish(msg, cam_info);
}
ros::spinOnce();
}
CamContext_stop_camera(cc);
cam_iface_shutdown();
return 0;
}