本文整理汇总了C++中image_transport::Publisher::getNumSubscribers方法的典型用法代码示例。如果您正苦于以下问题:C++ Publisher::getNumSubscribers方法的具体用法?C++ Publisher::getNumSubscribers怎么用?C++ Publisher::getNumSubscribers使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类image_transport::Publisher
的用法示例。
在下文中一共展示了Publisher::getNumSubscribers方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: imageCallback
// NOTE: Not used in SPENCER! We use the version with ground plane below!
void imageCallback(const Image::ConstPtr &msg)
{
// ROS_INFO("Entered img callback");
std::vector<cudaHOG::Detection> detHog;
// unsigned char image
QImage image_rgb(&msg->data[0], msg->width, msg->height, QImage::Format_RGB888);
int returnPrepare = hog->prepare_image(image_rgb.convertToFormat(QImage::Format_ARGB32).bits(), (short unsigned int) msg->width, (short unsigned int) msg->height);
if(returnPrepare)
{
ROS_ERROR("groundHOG: Error while preparing the image");
return;
}
hog->test_image(detHog);
hog->release_image();
int w = 64, h = 128;
GroundHOGDetections detections;
detections.header = msg->header;
for(unsigned int i=0;i<detHog.size();i++)
{
float score = detHog[i].score;
float scale = detHog[i].scale;
float width = (w - 32.0f)*scale;
float height = (h - 32.0f)*scale;
float x = (detHog[i].x + 16.0f*scale);
float y = (detHog[i].y + 16.0f*scale);
detections.scale.push_back(scale);
detections.score.push_back(score);
detections.pos_x.push_back(x);
detections.pos_y.push_back(y);
detections.width.push_back(width);
detections.height.push_back(height);
}
if(pub_result_image.getNumSubscribers()) {
ROS_DEBUG("Publishing image");
render_bbox_2D(detections, image_rgb, 255, 0, 0, 2);
Image sensor_image;
sensor_image.header = msg->header;
sensor_image.height = image_rgb.height();
sensor_image.width = image_rgb.width();
sensor_image.step = msg->step;
vector<unsigned char> image_bits(image_rgb.bits(), image_rgb.bits()+sensor_image.height*sensor_image.width*3);
sensor_image.data = image_bits;
sensor_image.encoding = msg->encoding;
pub_result_image.publish(sensor_image);
}
pub_message.publish(detections);
}
示例2: publish
inline void publish(Mat img, imgColorType t, ros::Time now =
ros::Time::now())
{
hasListener=(img_pub.getNumSubscribers() > 0);
if (!enable || !hasListener)
return;
Mat res;
switch (t)
{
case gray:
msg.encoding = sensor_msgs::image_encodings::MONO8;
res = img;
break;
case hsv:
msg.encoding = sensor_msgs::image_encodings::BGR8;
cvtColor(img, res, CV_HSV2BGR);
break;
case bgr:
default:
msg.encoding = sensor_msgs::image_encodings::BGR8;
res = img;
break;
}
msg.header.stamp = now;
msg.image = res;
img_pub.publish(msg.toImageMsg());
}
示例3: thereAreListeners
inline bool thereAreListeners(bool loadNow=true) //This will be zero if we close rqt
{
if(loadNow)
{
hasListener=(img_pub.getNumSubscribers() > 0);
}
return hasListener;
}
示例4: connectCb
// Handles (un)subscribing when clients (un)subscribe
void ConvertMetricNodelet::connectCb()
{
boost::lock_guard<boost::mutex> lock(connect_mutex_);
if (pub_depth_.getNumSubscribers() == 0)
{
sub_raw_.shutdown();
}
else if (!sub_raw_)
{
image_transport::TransportHints hints("raw", ros::TransportHints(), getPrivateNodeHandle());
sub_raw_ = it_->subscribe("image_raw", 1, &ConvertMetricNodelet::depthCb, this, hints);
}
}
示例5: imgReceivedCallback
void imgReceivedCallback(const sensor_msgs::ImageConstPtr & msg)
{
if(rosPublisher.getNumSubscribers() && msg->data.size())
{
cv_bridge::CvImageConstPtr ptr = cv_bridge::toCvShare(msg);
if(ptr->image.depth() == CV_8U && ptr->image.channels() == 3)
{
cv::Mat motion = ptr->image.clone();
if(previousImage.cols == motion.cols && previousImage.rows == motion.rows)
{
unsigned char * imageData = (unsigned char *)motion.data;
unsigned char * previous_imageData = (unsigned char *)previousImage.data;
int widthStep = motion.cols * motion.elemSize();
for(int j=0; j<motion.rows; ++j)
{
for(int i=0; i<motion.cols; ++i)
{
float b = (float)imageData[j*widthStep+i*3+0];
float g = (float)imageData[j*widthStep+i*3+1];
float r = (float)imageData[j*widthStep+i*3+2];
float previous_b = (float)previous_imageData[j*widthStep+i*3+0];
float previous_g = (float)previous_imageData[j*widthStep+i*3+1];
float previous_r = (float)previous_imageData[j*widthStep+i*3+2];
if(!(fabs(b-previous_b)/256.0f>=ratio || fabs(g-previous_g)/256.0f >= ratio || fabs(r-previous_r)/256.0f >= ratio))
{
imageData[j*widthStep+i*3+0] = 0;
imageData[j*widthStep+i*3+1] = 0;
imageData[j*widthStep+i*3+2] = 0;
}
}
}
}
previousImage = ptr->image.clone();
cv_bridge::CvImage img;
img.header = ptr->header;
img.encoding = ptr->encoding;
img.image = motion;
rosPublisher.publish(img.toImageMsg());
}
else
{
ROS_WARN("Image format should be 8bits - 3 channels");
}
}
}
示例6: connectCallback
// Connection callback that unsubscribes from the tracker if no one is subscribed.
void connectCallback(message_filters::Subscriber<CameraInfo> &sub_cam,
message_filters::Subscriber<GroundPlane> &sub_gp,
image_transport::SubscriberFilter &sub_col,
image_transport::SubscriberFilter &sub_dep,
image_transport::ImageTransport &it) {
if(!pub_message.getNumSubscribers() && !pub_result_image.getNumSubscribers() && !pub_centres.getNumSubscribers() && !pub_detected_persons.getNumSubscribers()) {
ROS_DEBUG("Upper Body Detector: No subscribers. Unsubscribing.");
sub_cam.unsubscribe();
sub_gp.unsubscribe();
sub_col.unsubscribe();
sub_dep.unsubscribe();
} else {
ROS_DEBUG("Upper Body Detector: New subscribers. Subscribing.");
sub_cam.subscribe();
sub_gp.subscribe();
sub_col.subscribe(it,sub_col.getTopic().c_str(),1);
sub_dep.subscribe(it,sub_dep.getTopic().c_str(),1);
}
}
示例7: connectCallback
// Connection callback that unsubscribes from the tracker if no one is subscribed.
void connectCallback(ros::Subscriber &sub_msg,
ros::NodeHandle &n,
string gp_topic,
string img_topic,
Subscriber<GroundPlane> &sub_gp,
Subscriber<CameraInfo> &sub_cam,
image_transport::SubscriberFilter &sub_col,
image_transport::ImageTransport &it){
if(!pub_message.getNumSubscribers() && !pub_result_image.getNumSubscribers() && !pub_detected_persons.getNumSubscribers()) {
ROS_DEBUG("HOG: No subscribers. Unsubscribing.");
sub_msg.shutdown();
sub_gp.unsubscribe();
sub_cam.unsubscribe();
sub_col.unsubscribe();
} else {
ROS_DEBUG("HOG: New subscribers. Subscribing.");
if(strcmp(gp_topic.c_str(), "") == 0) {
sub_msg = n.subscribe(img_topic.c_str(), 1, &imageCallback);
}
sub_cam.subscribe();
sub_gp.subscribe();
sub_col.subscribe(it,sub_col.getTopic().c_str(),1);
}
}
示例8: disparityCb
void disparityCb(const stereo_msgs::DisparityImageConstPtr& msg)
{
if(cam_pub.getNumSubscribers() > 0 ||
mask_pub.getNumSubscribers() > 0)
{
// double ticksBefore = cv::getTickCount();
// Check for common errors in input
if (msg->min_disparity == 0.0 && msg->max_disparity == 0.0)
{
ROS_ERROR("Disparity image fields min_disparity and max_disparity are not set");
return;
}
if (msg->image.encoding != sensor_msgs::image_encodings::TYPE_32FC1)
{
ROS_ERROR("Disparity image must be 32-bit floating point (encoding '32FC1'), but has encoding '%s'",
msg->image.encoding.c_str());
return;
}
// code taken from image_view / disparity_view
// Colormap and display the disparity image
float min_disparity = msg->min_disparity;
float max_disparity = msg->max_disparity;
float multiplier = 255.0f / (max_disparity - min_disparity);
const cv::Mat_<float> dmat(msg->image.height, msg->image.width,
(float*)&msg->image.data[0], msg->image.step);
cv::Mat disparity_greyscale;
disparity_greyscale.create(msg->image.height, msg->image.width, CV_8UC1);
for (int row = 0; row < disparity_greyscale.rows; ++row) {
const float* d = dmat[row];
for (int col = 0; col < disparity_greyscale.cols; ++col) {
int index = (d[col] - min_disparity) * multiplier + 0.5;
//index = std::min(255, std::max(0, index));
// pushing it into the interval does not matter because of the threshold afterwards
if(index >= threshold)
disparity_greyscale.at<uchar>(row, col) = 255;
else
disparity_greyscale.at<uchar>(row, col) = 0;
}
}
cv::Mat tmp1, mask;
cv::erode(disparity_greyscale, tmp1,
cv::Mat::ones(erode_size, erode_size, CV_8UC1),
cv::Point(-1, -1), erode_iterations);
cv::dilate(tmp1, mask,
cv::Mat::ones(dilate_size, dilate_size, CV_8UC1),
cv::Point(-1, -1), dilate_iterations);
if(mask_pub.getNumSubscribers() > 0)
{
cv_bridge::CvImage mask_msg;
mask_msg.header = msg->header;
mask_msg.encoding = sensor_msgs::image_encodings::TYPE_8UC1;
mask_msg.image = mask;
mask_pub.publish(mask_msg.toImageMsg());
}
if(!image_rect.empty() && cam_pub.getNumSubscribers() > 0)
{
cv::Mat masked;
image_rect.copyTo(masked, mask);
cv_bridge::CvImage masked_msg;
masked_msg.header = msg->header;
masked_msg.encoding = sensor_msgs::image_encodings::BGR8;
//if we want rescale then rescale
if(scale_factor > 1)
{
cv::Mat masked_tmp = masked;
cv::resize(masked_tmp, masked,
cv::Size(masked.cols*scale_factor, masked.rows*scale_factor));
}
masked_msg.image = masked;
// to provide a synchronized output we publish both topics with the same timestamp
ros::Time currentTime = ros::Time::now();
masked_msg.header.stamp = currentTime;
camera_info.header.stamp = currentTime;
cam_pub.publish(*masked_msg.toImageMsg(), camera_info);
}
// ROS_INFO("disparityCb runtime: %f ms",
// 1000*(cv::getTickCount() - ticksBefore)/cv::getTickFrequency());
}
}
示例9: image_callback
//.........这里部分代码省略.........
poseMsg.header.stamp = curr_stamp;
double aruco_roll_, aruco_pitch_, aruco_yaw_;
tf::Quaternion aruco_quat_;
tf::quaternionMsgToTF(poseMsg.pose.orientation, aruco_quat_);
tf::Matrix3x3(aruco_quat_).getRPY(aruco_roll_, aruco_pitch_, aruco_yaw_);
//ROS_INFO("April Tag RPY: [%0.3f, %0.3f, %0.3f]", aruco_roll_*180/3.1415926, aruco_pitch_*180/3.1415926, aruco_yaw_*180/3.1415926);
//-------------------------unified coordinate systems of pose----------------------------
aruco_yaw_ = aruco_yaw_*180/3.141592;
printf("Original [x, y, yaw] = [%0.3f, %0.3f, %0.3f]\n", poseMsg.pose.position.x, poseMsg.pose.position.y, aruco_yaw_);
if (markers[i].id == 26 || markers[i].id == 58)
{
float PI = 3.1415926;
float ang = PI*3/4;
float x_0 = -0.045;//-0.015;
float y_0 = -0.015;//0.045;
poseMsg.pose.position.x = x_0 + poseMsg.pose.position.x;// + poseMsg.pose.position.x * cos(-ang) - poseMsg.pose.position.y * sin(-ang);
poseMsg.pose.position.y = y_0 + poseMsg.pose.position.y;// + poseMsg.pose.position.x * sin(-ang) + poseMsg.pose.position.y * cos(-ang);
//printf("[x, y] = [%0.3f, %0.3f]\n",poseMsg.pose.position.x, poseMsg.pose.position.y);
aruco_yaw_ = aruco_yaw_ + ang*180/PI;
printf("[x, y, yaw] = [%0.3f, %0.3f, %0.3f]\n",poseMsg.pose.position.x, poseMsg.pose.position.y, aruco_yaw_);
//printf("-----------unify the coordinate system ---------------------\n");
}
//printf("------------------------------------------------------------------\n-----------------aruco_yaw_ = %0.3f\n", aruco_yaw_);
//printf("------------------------------------------------------------------\n");
double temp_x = poseMsg.pose.position.x;
double temp_y = poseMsg.pose.position.y;
poseMsg.pose.position.x = -temp_y;
poseMsg.pose.position.y = -temp_x;
tf::Quaternion quat = tf::createQuaternionFromRPY(aruco_roll_, aruco_pitch_, aruco_yaw_);
poseMsg.pose.orientation.x = quat.x();
poseMsg.pose.orientation.y = quat.y();
poseMsg.pose.orientation.z = quat.z();
poseMsg.pose.orientation.w = quat.w();
pose_pub.publish(poseMsg);
geometry_msgs::TransformStamped transformMsg;
tf::transformStampedTFToMsg(stampedTransform, transformMsg);
transform_pub.publish(transformMsg);
geometry_msgs::Vector3Stamped positionMsg;
positionMsg.header = transformMsg.header;
positionMsg.vector = transformMsg.transform.translation;
position_pub.publish(positionMsg);
}
// but drawing all the detected markers
markers[i].draw(inImage,cv::Scalar(0,0,255),2);
}
//draw a 3d cube in each marker if there is 3d info
if(camParam.isValid() && marker_size!=-1)
{
for(size_t i=0; i<markers.size(); ++i)
{
CvDrawingUtils::draw3dAxis(inImage, markers[i], camParam);
}
}
if(image_pub.getNumSubscribers() > 0)
{
//show input with augmented information
cv_bridge::CvImage out_msg;
out_msg.header.stamp = curr_stamp;
out_msg.encoding = sensor_msgs::image_encodings::RGB8;
out_msg.image = inImage;
image_pub.publish(out_msg.toImageMsg());
}
if(debug_pub.getNumSubscribers() > 0)
{
//show also the internal image resulting from the threshold operation
cv_bridge::CvImage debug_msg;
debug_msg.header.stamp = curr_stamp;
debug_msg.encoding = sensor_msgs::image_encodings::MONO8;
debug_msg.image = mDetector.getThresholdedImage();
debug_pub.publish(debug_msg.toImageMsg());
}
}
catch (cv_bridge::Exception& e)
{
ROS_ERROR("cv_bridge exception: %s", e.what());
return;
}
}
}
示例10: callback
void callback(const ImageConstPtr &depth, const ImageConstPtr &color,const GroundPlane::ConstPtr &gp, const CameraInfoConstPtr &info)
{
// Check if calculation is necessary
bool detect = pub_message.getNumSubscribers() > 0 || pub_centres.getNumSubscribers() > 0 || pub_detected_persons.getNumSubscribers() > 0;
bool vis = pub_result_image.getNumSubscribers() > 0;
if(!detect && !vis)
return;
// Get depth image as matrix
cv_depth_ptr = cv_bridge::toCvCopy(depth);
img_depth_ = cv_depth_ptr->image;
Matrix<double> matrix_depth(info->width, info->height);
for (int r = 0;r < 480;r++){
for (int c = 0;c < 640;c++) {
matrix_depth(c, r) = img_depth_.at<float>(r,c);
}
}
// Generate base camera
Matrix<double> R = Eye<double>(3);
Vector<double> t(3, 0.0);
Matrix<double> K(3,3, (double*)&info->K[0]);
// Get GP
Vector<double> GP(3, (double*) &gp->n[0]);
GP.pushBack((double) gp->d);
// Detect upper bodies
Camera camera(K,R,t,GP);
PointCloud point_cloud(camera, matrix_depth);
Vector<Vector< double > > detected_bounding_boxes;
detector->ProcessFrame(camera, matrix_depth, point_cloud, upper_body_template, detected_bounding_boxes);
// Generate messages
UpperBodyDetector detection_msg;
geometry_msgs::PoseArray bb_centres;
spencer_tracking_msgs::DetectedPersons detected_persons;
detection_msg.header = depth->header;
bb_centres.header = depth->header;
detected_persons.header = depth->header;
for(int i = 0; i < detected_bounding_boxes.getSize(); i++)
{
// Custom detections message
detection_msg.pos_x.push_back(detected_bounding_boxes(i)(0));
detection_msg.pos_y.push_back(detected_bounding_boxes(i)(1));
detection_msg.width.push_back(detected_bounding_boxes(i)(2));
detection_msg.height.push_back(detected_bounding_boxes(i)(3));
detection_msg.dist.push_back(detected_bounding_boxes(i)(4));
detection_msg.median_depth.push_back(detected_bounding_boxes(i)(5));
// Calculate centres of bounding boxes
double mid_point_x = detected_bounding_boxes(i)(0)+detected_bounding_boxes(i)(2)/2.0;
double mid_point_y = detected_bounding_boxes(i)(1)+detected_bounding_boxes(i)(3)/2.0;
// PoseArray message for boundingbox centres
geometry_msgs::Pose pose;
pose.position.x = detected_bounding_boxes(i)(5)*((mid_point_x-K(2,0))/K(0,0));
pose.position.y = detected_bounding_boxes(i)(5)*((mid_point_y-K(2,1))/K(1,1));
pose.position.z = detected_bounding_boxes(i)(5);
pose.orientation.w = 1.0; //No rotation atm.
bb_centres.poses.push_back(pose);
// DetectedPerson for SPENCER
spencer_tracking_msgs::DetectedPerson detected_person;
detected_person.modality = spencer_tracking_msgs::DetectedPerson::MODALITY_GENERIC_RGBD;
detected_person.confidence = detected_bounding_boxes(i)(4); // FIXME: normalize
detected_person.pose.pose = pose;
const double LARGE_VARIANCE = 999999999;
detected_person.pose.covariance[0*6 + 0] = pose_variance;
detected_person.pose.covariance[1*6 + 1] = pose_variance; // up axis (since this is in sensor frame!)
detected_person.pose.covariance[2*6 + 2] = pose_variance;
detected_person.pose.covariance[3*6 + 3] = LARGE_VARIANCE;
detected_person.pose.covariance[4*6 + 4] = LARGE_VARIANCE;
detected_person.pose.covariance[5*6 + 5] = LARGE_VARIANCE;
detected_person.detection_id = current_detection_id;
current_detection_id += detection_id_increment;
detected_persons.detections.push_back(detected_person);
}
// Creating a ros image with the detection results an publishing it
if(vis) {
ROS_DEBUG("Publishing image");
QImage image_rgb(&color->data[0], color->width, color->height, QImage::Format_RGB888); // would opencv be better?
render_bbox_2D(detection_msg, image_rgb, 0, 0, 255, 2);
sensor_msgs::Image sensor_image;
sensor_image.header = color->header;
sensor_image.height = image_rgb.height();
sensor_image.width = image_rgb.width();
sensor_image.step = color->step;
vector<unsigned char> image_bits(image_rgb.bits(), image_rgb.bits()+sensor_image.height*sensor_image.width*3);
sensor_image.data = image_bits;
sensor_image.encoding = color->encoding;
//.........这里部分代码省略.........
示例11: image_callback
//.........这里部分代码省略.........
}
// but drawing all the detected markers
markers[i].draw(inImage,Scalar(0,0,255),2);
}
//paint a circle in the center of the image
cv::circle(inImage, cv::Point(inImage.cols/2, inImage.rows/2), 4, cv::Scalar(0,255,0), 1);
if ( markers.size() == 2 )
{
float x[2], y[2], u[2], v[2];
for (unsigned int i = 0; i < 2; ++i)
{
ROS_DEBUG_STREAM("Marker(" << i << ") at camera coordinates = ("
<< markers[i].Tvec.at<float>(0,0) << ", "
<< markers[i].Tvec.at<float>(1,0) << ", "
<< markers[i].Tvec.at<float>(2,0));
//normalized coordinates of the marker
x[i] = markers[i].Tvec.at<float>(0,0)/markers[i].Tvec.at<float>(2,0);
y[i] = markers[i].Tvec.at<float>(1,0)/markers[i].Tvec.at<float>(2,0);
//undistorted pixel
u[i] = x[i]*camParam.CameraMatrix.at<float>(0,0) +
camParam.CameraMatrix.at<float>(0,2);
v[i] = y[i]*camParam.CameraMatrix.at<float>(1,1) +
camParam.CameraMatrix.at<float>(1,2);
}
ROS_DEBUG_STREAM("Mid point between the two markers in the image = ("
<< (x[0]+x[1])/2 << ", " << (y[0]+y[1])/2 << ")");
// //paint a circle in the mid point of the normalized coordinates of both markers
// cv::circle(inImage,
// cv::Point((u[0]+u[1])/2, (v[0]+v[1])/2),
// 3, cv::Scalar(0,0,255), CV_FILLED);
//compute the midpoint in 3D:
float midPoint3D[3]; //3D point
for (unsigned int i = 0; i < 3; ++i )
midPoint3D[i] = ( markers[0].Tvec.at<float>(i,0) +
markers[1].Tvec.at<float>(i,0) ) / 2;
//now project the 3D mid point to normalized coordinates
float midPointNormalized[2];
midPointNormalized[0] = midPoint3D[0]/midPoint3D[2]; //x
midPointNormalized[1] = midPoint3D[1]/midPoint3D[2]; //y
u[0] = midPointNormalized[0]*camParam.CameraMatrix.at<float>(0,0) +
camParam.CameraMatrix.at<float>(0,2);
v[0] = midPointNormalized[1]*camParam.CameraMatrix.at<float>(1,1) +
camParam.CameraMatrix.at<float>(1,2);
ROS_DEBUG_STREAM("3D Mid point between the two markers in undistorted pixel coordinates = ("
<< u[0] << ", " << v[0] << ")");
//paint a circle in the mid point of the normalized coordinates of both markers
cv::circle(inImage,
cv::Point(u[0], v[0]),
3, cv::Scalar(0,0,255), CV_FILLED);
}
//draw a 3d cube in each marker if there is 3d info
if(camParam.isValid() && marker_size!=-1)
{
for(unsigned int i=0; i<markers.size(); ++i)
{
CvDrawingUtils::draw3dCube(inImage, markers[i], camParam);
}
}
if(image_pub.getNumSubscribers() > 0)
{
//show input with augmented information
cv_bridge::CvImage out_msg;
out_msg.header.stamp = ros::Time::now();
out_msg.encoding = sensor_msgs::image_encodings::RGB8;
out_msg.image = inImage;
image_pub.publish(out_msg.toImageMsg());
}
if(debug_pub.getNumSubscribers() > 0)
{
//show also the internal image resulting from the threshold operation
cv_bridge::CvImage debug_msg;
debug_msg.header.stamp = ros::Time::now();
debug_msg.encoding = sensor_msgs::image_encodings::MONO8;
debug_msg.image = mDetector.getThresholdedImage();
debug_pub.publish(debug_msg.toImageMsg());
}
ROS_DEBUG("runtime: %f ms",
1000*(cv::getTickCount() - ticksBefore)/cv::getTickFrequency());
}
catch (cv_bridge::Exception& e)
{
ROS_ERROR("cv_bridge exception: %s", e.what());
return;
}
}
}
示例12: image_callback
void image_callback(const sensor_msgs::ImageConstPtr& msg)
{
if(!cam_info_received) return;
cv_bridge::CvImagePtr cv_ptr;
try
{
cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::RGB8);
inImage = cv_ptr->image;
resultImg = cv_ptr->image.clone();
//detection results will go into "markers"
markers.clear();
//Ok, let's detect
double min_size = boards[0].marker_size;
for (int board_index = 1; board_index < boards.size(); board_index++)
if (min_size > boards[board_index].marker_size) min_size = boards[board_index].marker_size;
mDetector.detect(inImage, markers, camParam, min_size, false);
for (int board_index = 0; board_index < boards.size(); board_index++)
{
Board board_detected;
//Detection of the board
float probDetect = the_board_detector.detect(markers, boards[board_index].config, board_detected, camParam, boards[board_index].marker_size);
if (probDetect > 0.0)
{
tf::Transform transform = ar_sys::getTf(board_detected.Rvec, board_detected.Tvec);
tf::StampedTransform stampedTransform(transform, ros::Time::now(), "apollon_cam_right", boards[board_index].name +"_right");
//_tfBraodcaster.sendTransform(stampedTransform); // from phillip
/*geometry_msgs::PoseStamped poseMsg;
tf::poseTFToMsg(transform, poseMsg.pose);
poseMsg.header.frame_id = msg->header.frame_id;
poseMsg.header.stamp = msg->header.stamp;
pose_pub.publish(poseMsg);*/
geometry_msgs::TransformStamped transformMsg;
tf::transformStampedTFToMsg(stampedTransform, transformMsg);
transform_pub.publish(transformMsg);
/*geometry_msgs::Vector3Stamped positionMsg;
positionMsg.header = transformMsg.header;
positionMsg.vector = transformMsg.transform.translation;
position_pub.publish(positionMsg);*/
if(camParam.isValid())
{
//draw board axis
CvDrawingUtils::draw3dAxis(resultImg, board_detected, camParam);
}
}
}
//for each marker, draw info and its boundaries in the image
for(size_t i=0; draw_markers && i < markers.size(); ++i)
{
markers[i].draw(resultImg,cv::Scalar(0,0,255),2);
}
if(camParam.isValid())
{
//draw a 3d cube in each marker if there is 3d info
for(size_t i=0; i<markers.size(); ++i)
{
if (draw_markers_cube) CvDrawingUtils::draw3dCube(resultImg, markers[i], camParam);
if (draw_markers_axis) CvDrawingUtils::draw3dAxis(resultImg, markers[i], camParam);
}
}
if(image_pub.getNumSubscribers() > 0)
{
//show input with augmented information
cv_bridge::CvImage out_msg;
out_msg.header.frame_id = msg->header.frame_id;
out_msg.header.stamp = msg->header.stamp;
out_msg.encoding = sensor_msgs::image_encodings::RGB8;
out_msg.image = resultImg;
image_pub.publish(out_msg.toImageMsg());
}
if(debug_pub.getNumSubscribers() > 0)
{
//show also the internal image resulting from the threshold operation
cv_bridge::CvImage debug_msg;
debug_msg.header.frame_id = msg->header.frame_id;
debug_msg.header.stamp = msg->header.stamp;
debug_msg.encoding = sensor_msgs::image_encodings::MONO8;
debug_msg.image = mDetector.getThresholdedImage();
debug_pub.publish(debug_msg.toImageMsg());
}
}
catch (cv_bridge::Exception& e)
{
ROS_ERROR("cv_bridge exception: %s", e.what());
return;
//.........这里部分代码省略.........
示例13: image_callback
void image_callback(const sensor_msgs::ImageConstPtr& msg)
{
// double ticksBefore = cv::getTickCount();
static tf::TransformBroadcaster br;
if(cam_info_received)
{
cv_bridge::CvImagePtr cv_ptr;
try
{
cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::RGB8);
inImage = cv_ptr->image;
//detection results will go into "markers"
markers.clear();
//Ok, let's detect
mDetector.detect(inImage, markers, camParam, marker_size, false);
//for each marker, draw info and its boundaries in the image
for(size_t i=0; i<markers.size(); ++i)
{
// only publishing the selected marker
if(markers[i].id == marker_id)
{
tf::Transform transform = aruco_ros::arucoMarker2Tf(markers[i]);
tf::StampedTransform stampedTransform(transform, ros::Time::now(),
parent_name, child_name);
br.sendTransform(stampedTransform);
geometry_msgs::PoseStamped poseMsg;
tf::poseTFToMsg(transform, poseMsg.pose);
poseMsg.header.frame_id = parent_name;
poseMsg.header.stamp = ros::Time::now();
pose_pub.publish(poseMsg);
}
// but drawing all the detected markers
markers[i].draw(inImage,Scalar(0,0,255),2);
}
//draw a 3d cube in each marker if there is 3d info
if(camParam.isValid() && marker_size!=-1)
{
for(size_t i=0; i<markers.size(); ++i)
{
//CvDrawingUtils::draw3dCube(inImage, markers[i], camParam);
CvDrawingUtils::draw3dAxis(inImage, markers[i], camParam);
}
}
if(image_pub.getNumSubscribers() > 0)
{
//show input with augmented information
cv_bridge::CvImage out_msg;
out_msg.header.stamp = ros::Time::now();
out_msg.encoding = sensor_msgs::image_encodings::TYPE_8UC3;
out_msg.image = inImage;
image_pub.publish(out_msg.toImageMsg());
}
if(debug_pub.getNumSubscribers() > 0)
{
//show also the internal image resulting from the threshold operation
cv_bridge::CvImage debug_msg;
debug_msg.header.stamp = ros::Time::now();
debug_msg.encoding = sensor_msgs::image_encodings::TYPE_8UC1;
debug_msg.image = mDetector.getThresholdedImage();
debug_pub.publish(debug_msg.toImageMsg());
}
// ROS_INFO("runtime: %f ms",
// 1000*(cv::getTickCount() - ticksBefore)/cv::getTickFrequency());
}
catch (cv_bridge::Exception& e)
{
ROS_ERROR("cv_bridge exception: %s", e.what());
return;
}
}
}
示例14: image_callback
void image_callback(const sensor_msgs::ImageConstPtr& msg)
{
static tf::TransformBroadcaster br;
if(!cam_info_received) return;
cv_bridge::CvImagePtr cv_ptr;
try
{
cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::RGB8);
inImage = cv_ptr->image;
resultImg = cv_ptr->image.clone();
//detection results will go into "markers"
markers.clear();
//Ok, let's detect
mDetector.detect(inImage, markers, camParam, marker_size, false);
//Detection of the board
float probDetect=the_board_detector.detect(markers, the_board_config, the_board_detected, camParam, marker_size);
if (probDetect > 0.0)
{
foundBoard = true; //added///////////////////////
tf::Transform transform = ar_sys::getTf(the_board_detected.Rvec, the_board_detected.Tvec);
tf::StampedTransform stampedTransform(transform, msg->header.stamp, msg->header.frame_id, board_frame);
if (publish_tf)
br.sendTransform(stampedTransform);
geometry_msgs::PoseStamped poseMsg;
tf::poseTFToMsg(transform, poseMsg.pose);
poseMsg.header.frame_id = msg->header.frame_id;
poseMsg.header.stamp = msg->header.stamp;
pose_pub.publish(poseMsg);
geometry_msgs::TransformStamped transformMsg;
tf::transformStampedTFToMsg(stampedTransform, transformMsg);
transform_pub.publish(transformMsg);
geometry_msgs::Vector3Stamped positionMsg;
positionMsg.header = transformMsg.header;
positionMsg.vector = transformMsg.transform.translation;
position_pub.publish(positionMsg);
std_msgs::Float32 boardSizeMsg;
boardSizeMsg.data=the_board_detected[0].ssize;
boardSize_pub.publish(boardSizeMsg);
}
//ADDED////////////////////////////////////////////////////////////////////////////
if(rvec_pub.getNumSubscribers() > 0 && foundBoard)
{
cv_bridge::CvImage rvecMsg;
rvecMsg.header.frame_id = msg->header.frame_id;
rvecMsg.header.stamp = msg->header.stamp;
rvecMsg.encoding = sensor_msgs::image_encodings::TYPE_32FC1;
rvecMsg.image = the_board_detected[0].Rvec;
rvec_pub.publish(rvecMsg.toImageMsg());
}
if(tvec_pub.getNumSubscribers() > 0 && foundBoard)
{
cv_bridge::CvImage tvecMsg;
tvecMsg.header.frame_id = msg->header.frame_id;
tvecMsg.header.stamp = msg->header.stamp;
tvecMsg.encoding = sensor_msgs::image_encodings::TYPE_32FC1;
tvecMsg.image = the_board_detected[0].Tvec;
tvec_pub.publish(tvecMsg.toImageMsg());
}
///////////////////////////////////////////////////////////////////////////////
//for each marker, draw info and its boundaries in the image
for(size_t i=0; draw_markers && i < markers.size(); ++i)
{
markers[i].draw(resultImg,cv::Scalar(0,0,255),2);
}
if(camParam.isValid() && marker_size != -1)
{
//draw a 3d cube in each marker if there is 3d info
for(size_t i=0; i<markers.size(); ++i)
{
if (draw_markers_cube) CvDrawingUtils::draw3dCube(resultImg, markers[i], camParam);
if (draw_markers_axis) CvDrawingUtils::draw3dAxis(resultImg, markers[i], camParam);
}
//draw board axis
if (probDetect > 0.0)
{
CvDrawingUtils::draw3dAxis(resultImg, the_board_detected, camParam);
}
}
if(image_pub.getNumSubscribers() > 0)
{
//show input with augmented information
cv_bridge::CvImage out_msg;
//.........这里部分代码省略.........
示例15: imageGroundPlaneCallback
void imageGroundPlaneCallback(const ImageConstPtr &color, const CameraInfoConstPtr &camera_info,
const GroundPlaneConstPtr &gp)
{
// ROS_INFO("Entered gp-img callback");
std::vector<cudaHOG::Detection> detHog;
// unsigned char image
QImage image_rgb(&color->data[0], color->width, color->height, QImage::Format_RGB888);
int returnPrepare = hog->prepare_image(image_rgb.convertToFormat(QImage::Format_ARGB32).bits(),
(short unsigned int) color->width, (short unsigned int) color->height);
if(returnPrepare)
{
ROS_ERROR("Error by preparing the image");
return;
}
// Generate base camera
Matrix<float> R = Eye<float>(3);
Vector<float> t(3, 0.0);
// Get GP
Vector<double> GPN(3, (double*) &gp->n[0]);
double GPd = ((double) gp->d)*(-1000.0); // GPd = -958.475;
Matrix<double> K(3,3, (double*)&camera_info->K[0]);
// NOTE: Using 0 1 0 does not work, apparently due to numerical problems in libCudaHOG (E(1,1) gets zero when solving quadratic form)
Vector<float> float_GPN(3);
float_GPN(0) = -0.0123896; //-float(GPN(0));
float_GPN(1) = 0.999417; //-float(GPN(1)); // swapped with z by Timm
float_GPN(2) = 0.0317988; //-float(GPN(2));
float float_GPd = (float) GPd;
Matrix<float> float_K(3,3);
float_K(0,0) = K(0,0); float_K(1,0) = K(1,0); float_K(2,0) = K(2,0);
float_K(1,1) = K(1,1); float_K(0,1) = K(0,1); float_K(2,1) = K(2,1);
float_K(2,2) = K(2,2); float_K(0,2) = K(0,2); float_K(1,2) = K(1,2);
//ROS_WARN("Ground plane: %.2f %.2f %.2f d=%.3f", float_GPN(0), float_GPN(1), float_GPN(2), float_GPd);
// If assertion fails, probably ground plane is wrongly oriented!?
hog->set_camera(R.data(), float_K.data(), t.data());
hog->set_groundplane(float_GPN.data(), &float_GPd);
hog->prepare_roi_by_groundplane();
hog->test_image(detHog);
hog->release_image();
const int WINDOW_WIDTH = 64, WINDOW_HEIGHT = 128;
GroundHOGDetections detections;
detections.header = color->header;
for(unsigned int i=0;i<detHog.size();i++)
{
float score = detHog[i].score;
if (score < score_thresh) continue;
float scale = detHog[i].scale;
float width = (WINDOW_WIDTH - 32.0f)*scale;
float height = (WINDOW_HEIGHT - 32.0f)*scale;
float x = (detHog[i].x + 16.0f*scale);
float y = (detHog[i].y + 16.0f*scale);
detections.scale.push_back(scale);
detections.score.push_back(score);
detections.pos_x.push_back(x);
detections.pos_y.push_back(y);
detections.width.push_back(width);
detections.height.push_back(height);
}
if(pub_result_image.getNumSubscribers()) {
ROS_DEBUG("Publishing image");
render_bbox_2D(detections, image_rgb, 255, 0, 0, 2);
Image sensor_image;
sensor_image.header = color->header;
sensor_image.height = image_rgb.height();
sensor_image.width = image_rgb.width();
sensor_image.step = color->step;
vector<unsigned char> image_bits(image_rgb.bits(), image_rgb.bits()+sensor_image.height*sensor_image.width*3);
sensor_image.data = image_bits;
sensor_image.encoding = color->encoding;
pub_result_image.publish(sensor_image);
}
pub_message.publish(detections);
//
// Now create 3D coordinates for SPENCER DetectedPersons msg
//
if(pub_detected_persons.getNumSubscribers()) {
spencer_tracking_msgs::DetectedPersons detected_persons;
detected_persons.header = color->header;
for(unsigned int i=0;i<detHog.size();i++)
//.........这里部分代码省略.........