本文整理汇总了C++中tf::TransformListener::canTransform方法的典型用法代码示例。如果您正苦于以下问题:C++ TransformListener::canTransform方法的具体用法?C++ TransformListener::canTransform怎么用?C++ TransformListener::canTransform使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tf::TransformListener
的用法示例。
在下文中一共展示了TransformListener::canTransform方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1:
bool
KalmanDetectionFilter::transformPointToFilter(samplereturn_msgs::NamedPoint& msg)
{
tf::Point pt;
tf::pointMsgToTF(msg.point, pt);
tf::Stamped<tf::Point> msg_point(pt, msg.header.stamp, msg.header.frame_id);
tf::Stamped<tf::Point> filter_point;
if(!listener_.canTransform(_filter_frame_id, msg.header.frame_id, msg.header.stamp))
return false;
listener_.transformPoint(_filter_frame_id, msg_point, filter_point);
tf::pointTFToMsg(filter_point, msg.point);
msg.header.frame_id = _filter_frame_id;
return true;
}
示例2: convertPoseToRootFrame
bool convertPoseToRootFrame(const geometry_msgs::PoseStamped &pose_msg,
geometry_msgs::PoseStamped &pose_msg_out,
const std::string &root_frame,
tf::TransformListener& tf)
{
geometry_msgs::PoseStamped pose_msg_in = pose_msg;
ROS_DEBUG("Request:\nframe_id: %s\nPosition: %f %f %f\n:Orientation: %f %f %f %f\n",
pose_msg_in.header.frame_id.c_str(),
pose_msg_in.pose.position.x,
pose_msg_in.pose.position.y,
pose_msg_in.pose.position.z,
pose_msg_in.pose.orientation.x,
pose_msg_in.pose.orientation.y,
pose_msg_in.pose.orientation.z,
pose_msg_in.pose.orientation.w);
pose_msg_out = pose_msg;
tf::Stamped<tf::Pose> pose_stamped;
poseStampedMsgToTF(pose_msg_in, pose_stamped);
if (!tf.canTransform(root_frame, pose_stamped.frame_id_, pose_stamped.stamp_))
{
std::string err;
if (tf.getLatestCommonTime(pose_stamped.frame_id_, root_frame, pose_stamped.stamp_, &err) != tf::NO_ERROR)
{
ROS_ERROR("pr2_arm_ik:: Cannot transform from '%s' to '%s'. TF said: %s",pose_stamped.frame_id_.c_str(),root_frame.c_str(), err.c_str());
return false;
}
}
try
{
tf.transformPose(root_frame, pose_stamped, pose_stamped);
}
catch(...)
{
ROS_ERROR("pr2_arm_ik:: Cannot transform from '%s' to '%s'",pose_stamped.frame_id_.c_str(),root_frame.c_str());
return false;
}
tf::poseStampedTFToMsg(pose_stamped,pose_msg_out);
return true;
}
示例3: mean
void
KalmanDetectionFilter::drawFilterStates()
{
if(!current_filter_)
return;
geometry_msgs::PointStamped temp_msg, temp_msg_base_link;
temp_msg.header.frame_id = "odom";
temp_msg.header.stamp = ros::Time(0);
cv::Mat img = cv::Mat::zeros(500, 500, CV_8UC3);
float px_per_meter = 50.0;
float offset = 250;
temp_msg.point.x = current_filter_->statePost.at<float>(0);
temp_msg.point.y = current_filter_->statePost.at<float>(1);
temp_msg.point.z = 0.0;
if( listener_.canTransform( "base_link", temp_msg.header.frame_id, temp_msg.header.stamp))
{
listener_.transformPoint("base_link",temp_msg,temp_msg_base_link);
}
else
{
ROS_ERROR_STREAM("cannot transform filter from odom to base_link");
return;
}
cv::Point mean(temp_msg_base_link.point.x * px_per_meter,
temp_msg_base_link.point.y * px_per_meter);
float rad_x = current_filter_->errorCovPost.at<float>(0,0) * px_per_meter;
float rad_y = current_filter_->errorCovPost.at<float>(1,1) * px_per_meter;
cv::circle(img, mean+cv::Point(0,offset), 5, cv::Scalar(255,0,0));
cv::ellipse(img, mean+cv::Point(0,offset), cv::Size(rad_x, rad_y), 0, 0, 360, cv::Scalar(0,255,0));
//printFilterState();
std_msgs::Header header;
sensor_msgs::ImagePtr debug_img_msg = cv_bridge::CvImage(header,"rgb8",img).toImageMsg();
pub_debug_img.publish(debug_img_msg);
}
示例4: positioned_patch
void
PointCloudProjector::synchronized_callback(const sensor_msgs::PointCloud2ConstPtr& points_msg,
const samplereturn_msgs::PatchArrayConstPtr& patches_msg)
{
// create patch output message
samplereturn_msgs::PatchArray positioned_patches;
positioned_patches.header = patches_msg->header;
positioned_patches.cam_info = patches_msg->cam_info;
// create marker array debug message
visualization_msgs::MarkerArray vis_markers;
// create camera model object
image_geometry::PinholeCameraModel model;
model.fromCameraInfo(patches_msg->cam_info);
// ensure tf is ready
if(!listener_.canTransform(clipping_frame_id_, patches_msg->header.frame_id,
patches_msg->header.stamp))
{
patches_out.publish( positioned_patches );
return;
}
// get camera origin in clipping frame
tf::StampedTransform camera;
listener_.lookupTransform(clipping_frame_id_, patches_msg->header.frame_id,
patches_msg->header.stamp, camera);
Eigen::Vector3d camera_origin;
tf::vectorTFToEigen(camera.getOrigin(), camera_origin);
// scale and transform pointcloud into clipping frame
pcl::PointCloud<pcl::PointXYZRGB>::Ptr colorpoints(new pcl::PointCloud<pcl::PointXYZRGB>),
points_native(new pcl::PointCloud<pcl::PointXYZRGB>),
points_scaled(new pcl::PointCloud<pcl::PointXYZRGB>);
pcl::fromROSMsg(*points_msg, *points_native);
// this scale is a horible hack to fix the manipulator point clouds
Eigen::Transform<float, 3, Eigen::Affine> trans;
trans.setIdentity();
trans.scale(config_.pointcloud_scale);
pcl::transformPointCloud(*points_native, *points_scaled, trans);
pcl_ros::transformPointCloud(clipping_frame_id_, *points_scaled, *colorpoints, listener_);
// id counter for debug markers
int mid = 0;
for(const auto& patch : patches_msg->patch_array)
{
samplereturn_msgs::Patch positioned_patch(patch);
cv_bridge::CvImagePtr cv_ptr_mask;
try {
cv_ptr_mask = cv_bridge::toCvCopy(patch.mask, "mono8");
}
catch (cv_bridge::Exception& e) {
ROS_ERROR("cv_bridge mask exception: %s", e.what());
continue;
}
cv::Point2f roi_offset(patch.image_roi.x_offset, patch.image_roi.x_offset);
Eigen::Vector4d ground_plane;
// assume ground plane at z=0, in base_link xy plane for manipulators
ground_plane << 0,0,1,0;
float dimension, angle;
tf::Stamped<tf::Point> world_point;
if(samplereturn::computeMaskPositionAndSize(listener_,
cv_ptr_mask->image, roi_offset,
model, patches_msg->header.stamp, patches_msg->header.frame_id,
ground_plane, "base_link",
&dimension, &angle, &world_point,
NULL))
{
// if sample size is outside bounds, skip this patch
if ((dimension > config_.max_major_axis) ||
(dimension < config_.min_major_axis)) {
continue;
}
}
// find bounding box of mask
cv::Rect rect;
samplereturn::computeBoundingBox(cv_ptr_mask->image, &rect);
// turn image space bounding box into 4 3d rays
cv::Point2d patch_origin(patch.image_roi.x_offset,
patch.image_roi.y_offset);
std::vector<cv::Point2d> rpoints;
rpoints.push_back(cv::Point2d(rect.x, rect.y) +
patch_origin);
rpoints.push_back(cv::Point2d(rect.x, rect.y+rect.height) +
patch_origin);
rpoints.push_back(cv::Point2d(rect.x+rect.width, rect.y+rect.height) +
patch_origin);
rpoints.push_back(cv::Point2d(rect.x+rect.width, rect.y) +
patch_origin);
std::vector<Eigen::Vector3d> rays;
rays.resize(rpoints.size());
std::transform(rpoints.begin(), rpoints.end(), rays.begin(),
[model, patches_msg, this](cv::Point2d uv) -> Eigen::Vector3d
{
cv::Point3d xyz = model.projectPixelTo3dRay(uv);
//.........这里部分代码省略.........
示例5: workerFunc
//.........这里部分代码省略.........
ROS_ASSERT(srv.response.to_l[i].width == srv.response.to_r[i].width);
ROS_ASSERT(srv.response.to_l[i].height == srv.response.to_r[i].height);
int32_t dims[] = {srv.response.to_l[i].width, srv.response.to_l[i].height, l_step};
bool replace = false;
if(i > 0)
replace = true;
// Debug message
geometric_verification::Debug debug_msg;
debug_msg.to = srv.response.to_id[i];
debug_msg.from = srv.response.from_id;
if(visual_odometer_->process(l_image_data, r_image_data, dims, replace)) // for i > 0 replace current images with the ones we've just added
{
Matrix camera_motion = Matrix::inv(visual_odometer_->getMotion());
double num_matches = visual_odometer_->getNumberOfMatches();
double num_inliers = visual_odometer_->getNumberOfInliers();
std::cout << "Comparing " << srv.response.from_id << " and " << srv.response.to_id[i] << "\n";
std::cout << "Matches: " << num_matches << " Inliers: " << 100.0*num_inliers/num_matches << " %";
// Check that the pose is OK
btMatrix3x3 rot_mat(
camera_motion.val[0][0], camera_motion.val[0][1], camera_motion.val[0][2],
camera_motion.val[1][0], camera_motion.val[1][1], camera_motion.val[1][2],
camera_motion.val[2][0], camera_motion.val[2][1], camera_motion.val[2][2]);
btVector3 t(camera_motion.val[0][3], camera_motion.val[1][3], camera_motion.val[2][3]);
tf::Transform delta_transform(rot_mat, t);
// transform pose to base frame
tf::StampedTransform base_to_sensor;
std::string error_msg;
if (tf_listener_.canTransform(base_link_frame_id_, sensor_frame_id_, ros::Time(0), &error_msg))
{
tf_listener_.lookupTransform(
base_link_frame_id_,
sensor_frame_id_,
ros::Time(0), base_to_sensor);
}
else
{
ROS_INFO("The tf from '%s' to '%s' does not seem to be available, "
"will assume it as identity!",
base_link_frame_id_.c_str(),
sensor_frame_id_.c_str());
base_to_sensor.setIdentity();
}
tf::Transform base_transform = base_to_sensor * delta_transform * base_to_sensor.inverse();
geometry_msgs::PoseWithCovarianceStamped transform_msg;
transform_msg.header.stamp = ros::Time::now();
transform_msg.header.frame_id = "/g2o_map";
tf::poseTFToMsg(base_transform, transform_msg.pose.pose);
tf::poseTFToMsg(base_transform, debug_msg.transform);
transform_msg.pose.covariance = STANDARD_POSE_COVARIANCE;
std::cout << "Translation is " << transform_msg.pose.pose.position.x << ", " << transform_msg.pose.pose.position.y << "\n";
if(abs(transform_msg.pose.pose.position.z) > transz_)
{
ROS_INFO("Got a z transform > %fm, this is unlikely, validation failed.", transz_);
continue;
}
if(abs(transform_msg.pose.pose.position.x) > transx_)