本文整理汇总了C++中tf::Transformer类的典型用法代码示例。如果您正苦于以下问题:C++ Transformer类的具体用法?C++ Transformer怎么用?C++ Transformer使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Transformer类的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: transformVectorTo
bool transformVectorTo(const tf::Transformer& tf, const string& source_frame, const string& goal_frame, const Time& time_source,
const geometry_msgs::Vector3& point_in, geometry_msgs::Vector3& point_out, const std::string& fixed_frame, const Time& time_goal)
{
ros::Duration timeout = Duration().fromSec(2.0);
if (!tf.waitForTransform(source_frame, time_source, goal_frame, time_goal, fixed_frame, timeout)) return false;
tf::Stamped<tf::Point> pnt(tf::Vector3(point_in.x, point_in.y, point_in.z), time_source, source_frame);
tf.transformVector(goal_frame, time_goal, pnt, fixed_frame, pnt);
point_out.x = pnt[0];
point_out.y = pnt[1];
point_out.z = pnt[2];
return true;
}
示例2: projectRayToGround
bool projectRayToGround(const tf::Transformer& listener,
const tf::Stamped<tf::Point> camera_ray,
Eigen::Vector4d ground_plane, std::string ground_frame,
tf::Stamped<tf::Point>* world_point)
{
tf::StampedTransform camera_transform;
// This is a static link, so Time(0) should be fine
if (!listener.canTransform(ground_frame, camera_ray.frame_id_, camera_ray.stamp_)) {
ROS_INFO("Couldn't transform %s to %s\n",camera_ray.frame_id_.c_str(),
ground_frame.c_str());
return false;
}
listener.lookupTransform(ground_frame, camera_ray.frame_id_,ros::Time(0),camera_transform);
tf::Stamped<tf::Point> ground_frame_ray;
listener.transformVector(ground_frame, camera_ray, ground_frame_ray);
Eigen::Vector3d ray, ray_origin;
tf::vectorTFToEigen(ground_frame_ray, ray);
tf::vectorTFToEigen(camera_transform.getOrigin(), ray_origin);
Eigen::Vector3d ground_v = intersectRayPlane(ray, ray_origin, ground_plane);
tf::vectorEigenToTF(ground_v, *world_point);
world_point->frame_id_ = ground_frame;
world_point->stamp_ = camera_ray.stamp_;
return true;
}
示例3: detectObject
// detected object = checkerboard, target = plug
bool RosDetector::detectObject(const sensor_msgs::ImageConstPtr& image_msg,
const sensor_msgs::CameraInfoConstPtr& info_msg,
const tf::Stamped<tf::Pose>& target_prior, const tf::Transformer& transformer,
tf::Stamped<tf::Pose>& target_pose)
{
// Convert image message
cv_bridge::CvImageConstPtr cv_image = cv_bridge::toCvShare(image_msg, "mono8");
cv::Mat image = cv_image->image;
// Detect the checkerboard
std::vector<cv::Point2f> corners;
if (!detector_.detect(image, corners)) {
ROS_DEBUG("%s: Failed to detect checkerboard", name_.c_str());
publishDisplayImage(image, corners, false);
/// @todo Publish feedback?
return false;
}
// Estimate its pose
cam_model_.fromCameraInfo(info_msg);
tf::Stamped<tf::Pose> target_prior_in_camera;
try {
transformer.transformPose(cam_model_.tfFrame(), target_prior, target_prior_in_camera);
}
catch (tf::TransformException& ex) {
ROS_WARN("%s: TF exception\n%s", name_.c_str(), ex.what());
return false;
}
tf::Pose detected_object_prior = target_prior_in_camera * target_in_detected_object_.inverse();
tf::Pose detected_object_pose = pose_estimator_.solveWithPrior(corners, cam_model_, detected_object_prior);
target_pose = tf::Stamped<tf::Pose>(detected_object_pose * target_in_detected_object_,
image_msg->header.stamp, cam_model_.tfFrame());
// Publish visualization messages
tf_broadcaster_.sendTransform(tf::StampedTransform(detected_object_prior, image_msg->header.stamp,
cam_model_.tfFrame(), "checkerboard_prior_frame"));
tf_broadcaster_.sendTransform(tf::StampedTransform(target_prior, target_prior.stamp_,
target_prior.frame_id_, "plug_prior_frame"));
tf_broadcaster_.sendTransform(tf::StampedTransform(detected_object_pose, image_msg->header.stamp,
cam_model_.tfFrame(), "checkerboard_frame"));
tf_broadcaster_.sendTransform(tf::StampedTransform(target_pose, image_msg->header.stamp,
cam_model_.tfFrame(), "plug_frame"));
publishDisplayImage(image, corners, true);
return true;
}
示例4: transformLaserScanToPointCloud_
void LaserProjection::transformLaserScanToPointCloud_ (const std::string &target_frame,
const sensor_msgs::LaserScan &scan_in,
sensor_msgs::PointCloud2 &cloud_out,
tf::Transformer &tf,
double range_cutoff,
int channel_options)
{
//check if the user has requested the index field
bool requested_index = false;
if ((channel_options & channel_option::Index))
requested_index = true;
//we'll enforce that we get index values for the laser scan so that we
//ensure that we use the correct timestamps
channel_options |= channel_option::Index;
projectLaser_(scan_in, cloud_out, -1.0, channel_options);
//we'll assume no associated viewpoint by default
bool has_viewpoint = false;
uint32_t vp_x_offset = 0;
//we need to find the offset of the intensity field in the point cloud
//we also know that the index field is guaranteed to exist since we
//set the channel option above. To be really safe, it might be worth
//putting in a check at some point, but I'm just going to put in an
//assert for now
uint32_t index_offset = 0;
for(unsigned int i = 0; i < cloud_out.fields.size(); ++i)
{
if(cloud_out.fields[i].name == "index")
{
index_offset = cloud_out.fields[i].offset;
}
//we want to check if the cloud has a viewpoint associated with it
//checking vp_x should be sufficient since vp_x, vp_y, and vp_z all
//get put in together
if(cloud_out.fields[i].name == "vp_x")
{
has_viewpoint = true;
vp_x_offset = cloud_out.fields[i].offset;
}
}
ROS_ASSERT(index_offset > 0);
cloud_out.header.frame_id = target_frame;
// Extract transforms for the beginning and end of the laser scan
ros::Time start_time = scan_in.header.stamp;
ros::Time end_time = scan_in.header.stamp + ros::Duration ().fromSec (scan_in.ranges.size () * scan_in.time_increment);
tf::StampedTransform start_transform, end_transform, cur_transform ;
tf.lookupTransform (target_frame, scan_in.header.frame_id, start_time, start_transform);
tf.lookupTransform (target_frame, scan_in.header.frame_id, end_time, end_transform);
double ranges_norm = 1 / ((double) scan_in.ranges.size () - 1.0);
//we want to loop through all the points in the cloud
for(size_t i = 0; i < cloud_out.width; ++i)
{
// Apply the transform to the current point
float *pstep = (float*)&cloud_out.data[i * cloud_out.point_step + 0];
//find the index of the point
uint32_t pt_index;
memcpy(&pt_index, &cloud_out.data[i * cloud_out.point_step + index_offset], sizeof(uint32_t));
// Assume constant motion during the laser-scan, and use slerp to compute intermediate transforms
tfScalar ratio = pt_index * ranges_norm;
//! \todo Make a function that performs both the slerp and linear interpolation needed to interpolate a Full Transform (Quaternion + Vector)
// Interpolate translation
tf::Vector3 v (0, 0, 0);
v.setInterpolate3 (start_transform.getOrigin (), end_transform.getOrigin (), ratio);
cur_transform.setOrigin (v);
// Interpolate rotation
tf::Quaternion q1, q2;
start_transform.getBasis ().getRotation (q1);
end_transform.getBasis ().getRotation (q2);
// Compute the slerp-ed rotation
cur_transform.setRotation (slerp (q1, q2 , ratio));
tf::Vector3 point_in (pstep[0], pstep[1], pstep[2]);
tf::Vector3 point_out = cur_transform * point_in;
// Copy transformed point into cloud
pstep[0] = point_out.x ();
pstep[1] = point_out.y ();
pstep[2] = point_out.z ();
// Convert the viewpoint as well
if(has_viewpoint)
{
float *vpstep = (float*)&cloud_out.data[i * cloud_out.point_step + vp_x_offset];
point_in = tf::Vector3 (vpstep[0], vpstep[1], vpstep[2]);
//.........这里部分代码省略.........
示例5: v
void
LaserProjection::transformLaserScanToPointCloud_ (const std::string &target_frame, sensor_msgs::PointCloud &cloud_out, const sensor_msgs::LaserScan &scan_in,
tf::Transformer& tf, double range_cutoff, int mask)
{
cloud_out.header = scan_in.header;
tf::Stamped<tf::Point> pointIn;
tf::Stamped<tf::Point> pointOut;
//check if the user has requested the index field
bool requested_index = false;
if ((mask & channel_option::Index))
requested_index = true;
//we need to make sure that we include the index in our mask
//in order to guarantee that we get our timestamps right
mask |= channel_option::Index;
pointIn.frame_id_ = scan_in.header.frame_id;
projectLaser_ (scan_in, cloud_out, range_cutoff, false, mask);
cloud_out.header.frame_id = target_frame;
// Extract transforms for the beginning and end of the laser scan
ros::Time start_time = scan_in.header.stamp ;
ros::Time end_time = scan_in.header.stamp + ros::Duration().fromSec(scan_in.ranges.size()*scan_in.time_increment) ;
tf::StampedTransform start_transform ;
tf::StampedTransform end_transform ;
tf::StampedTransform cur_transform ;
tf.lookupTransform(target_frame, scan_in.header.frame_id, start_time, start_transform) ;
tf.lookupTransform(target_frame, scan_in.header.frame_id, end_time, end_transform) ;
//we need to find the index of the index channel
int index_channel_idx = -1;
for(unsigned int i = 0; i < cloud_out.channels.size(); ++i)
{
if(cloud_out.channels[i].name == "index")
{
index_channel_idx = i;
break;
}
}
//check just in case
ROS_ASSERT(index_channel_idx >= 0);
for(unsigned int i = 0; i < cloud_out.points.size(); ++i)
{
//get the index for this point
uint32_t pt_index = cloud_out.channels[index_channel_idx].values[i];
// Instead, assume constant motion during the laser-scan, and use slerp to compute intermediate transforms
tfScalar ratio = pt_index / ( (double) scan_in.ranges.size() - 1.0) ;
//! \todo Make a function that performs both the slerp and linear interpolation needed to interpolate a Full Transform (Quaternion + Vector)
//Interpolate translation
tf::Vector3 v (0, 0, 0);
v.setInterpolate3(start_transform.getOrigin(), end_transform.getOrigin(), ratio) ;
cur_transform.setOrigin(v) ;
//Interpolate rotation
tf::Quaternion q1, q2 ;
start_transform.getBasis().getRotation(q1) ;
end_transform.getBasis().getRotation(q2) ;
// Compute the slerp-ed rotation
cur_transform.setRotation( slerp( q1, q2 , ratio) ) ;
// Apply the transform to the current point
tf::Vector3 pointIn(cloud_out.points[i].x, cloud_out.points[i].y, cloud_out.points[i].z) ;
tf::Vector3 pointOut = cur_transform * pointIn ;
// Copy transformed point into cloud
cloud_out.points[i].x = pointOut.x();
cloud_out.points[i].y = pointOut.y();
cloud_out.points[i].z = pointOut.z();
}
//if the user didn't request the index, we want to remove it from the channels
if(!requested_index)
cloud_out.channels.erase(cloud_out.channels.begin() + index_channel_idx);
}
示例6: calculateStretch
srs_ui_but::COBStretch calculateStretch(std::vector<std::string> links)
{
ros::Time time_stamp = ros::Time().now();
float radius = 0;
float height = 0;
for (std::vector<std::string>::iterator i = links.begin(); i != links.end(); i++)
{
try
{
tfListener->waitForTransform(*i, srs_ui_but::DEFAULT_COB_BASE_LINK, time_stamp, ros::Duration(0.2));
tfListener->lookupTransform(*i, srs_ui_but::DEFAULT_COB_BASE_LINK, time_stamp, linkToBaseTf);
transformer.setTransform(linkToBaseTf);
// Transform link to camera
tf::Stamped<btVector3> p;
p.setX(0);
p.setY(0);
p.setZ(0);
p.frame_id_ = *i;
transformer.transformPoint(srs_ui_but::DEFAULT_COB_BASE_LINK, p, p);
//std::cout << p.getX() << ", " << p.getY() << ", " << p.getZ() << ", " << std::endl;
float r = pow(p.getX(), 2) + pow(p.getY(), 2);
if (r > radius)
{
radius = r;
height = p.getZ();
}
}
catch (tf::TransformException& ex)
{
ROS_WARN("Transform ERROR:\n %s", ex.what());
}
}
srs_ui_but::COBStretch cob_stretch;
cob_stretch.radius = sqrt(radius);
cob_stretch.height = height;
cob_stretch.time_stamp = time_stamp;
return cob_stretch;
}
示例7: board_to_velodyne_transform
static void
initialize_transformations(void)
{
tf::Time::init();
tf::Transform velodyne_position_on_board;
velodyne_position_on_board.setOrigin(carmen_vector3_to_tf_vector3(velodyne_pose.position));
velodyne_position_on_board.setRotation(carmen_rotation_to_tf_quaternion(velodyne_pose.orientation));
tf::StampedTransform board_to_velodyne_transform(velodyne_position_on_board, tf::Time(0), board_tf_name, velodyne_tf_name);
transformer.setTransform(board_to_velodyne_transform, "board_to_velodyne_transform");
tf::Transform board_position_on_car;
board_position_on_car.setOrigin(carmen_vector3_to_tf_vector3(sensor_board_pose.position));
board_position_on_car.setRotation(carmen_rotation_to_tf_quaternion(sensor_board_pose.orientation));
tf::StampedTransform car_to_board_transform(board_position_on_car, tf::Time(0), car_tf_name, board_tf_name);
transformer.setTransform(car_to_board_transform, "car_to_board_transform");
}
示例8: world_to_car_transform
void
initialize_transforms()
{
tf::Transform board_to_camera_pose;
tf::Transform car_to_board_pose;
tf::Transform world_to_car_pose;
tf::Transform ultrasonic_sensor_r1_to_car_pose;
tf::Transform ultrasonic_sensor_r2_to_car_pose;
tf::Transform ultrasonic_sensor_l1_to_car_pose;
tf::Transform ultrasonic_sensor_l2_to_car_pose;
tf::Time::init();
// initial car pose with respect to the world
world_to_car_pose.setOrigin(tf::Vector3(car_pose_g.position.x, car_pose_g.position.y, car_pose_g.position.z));
world_to_car_pose.setRotation(tf::Quaternion(car_pose_g.orientation.yaw, car_pose_g.orientation.pitch, car_pose_g.orientation.roll));
tf::StampedTransform world_to_car_transform(world_to_car_pose, tf::Time(0), "/world", "/car");
tf_transformer.setTransform(world_to_car_transform, "world_to_car_transform");
// board pose with respect to the car
car_to_board_pose.setOrigin(tf::Vector3(sensor_board_pose_g.position.x, sensor_board_pose_g.position.y, sensor_board_pose_g.position.z));
car_to_board_pose.setRotation(tf::Quaternion(sensor_board_pose_g.orientation.yaw, sensor_board_pose_g.orientation.pitch, sensor_board_pose_g.orientation.roll)); // yaw, pitch, roll
tf::StampedTransform car_to_board_transform(car_to_board_pose, tf::Time(0), "/car", "/board");
tf_transformer.setTransform(car_to_board_transform, "car_to_board_transform");
// camera pose with respect to the board
board_to_camera_pose.setOrigin(tf::Vector3(camera_pose_g.position.x, camera_pose_g.position.y, camera_pose_g.position.z));
board_to_camera_pose.setRotation(tf::Quaternion(camera_pose_g.orientation.yaw, camera_pose_g.orientation.pitch, camera_pose_g.orientation.roll)); // yaw, pitch, roll
tf::StampedTransform board_to_camera_transform(board_to_camera_pose, tf::Time(0), "/board", "/camera");
tf_transformer.setTransform(board_to_camera_transform, "board_to_camera_transform");
// initial ultrasonic sensor r1 pose with respect to the car
ultrasonic_sensor_r1_to_car_pose.setOrigin(tf::Vector3(ultrasonic_sensor_r1_g.position.x, ultrasonic_sensor_r1_g.position.y, ultrasonic_sensor_r1_g.position.z));
ultrasonic_sensor_r1_to_car_pose.setRotation(tf::Quaternion(ultrasonic_sensor_r1_g.orientation.yaw, ultrasonic_sensor_r1_g.orientation.pitch, ultrasonic_sensor_r1_g.orientation.roll));
tf::StampedTransform ultrasonic_sensor_r1_to_car_transform(ultrasonic_sensor_r1_to_car_pose, tf::Time(0), "/car", "/ultrasonic_sensor_r1");
tf_transformer.setTransform(ultrasonic_sensor_r1_to_car_transform, "ultrasonic_sensor_r1_to_car_transform");
// initial ultrasonic sensor r2 pose with respect to the car
ultrasonic_sensor_r2_to_car_pose.setOrigin(tf::Vector3(ultrasonic_sensor_r2_g.position.x, ultrasonic_sensor_r2_g.position.y, ultrasonic_sensor_r2_g.position.z));
ultrasonic_sensor_r2_to_car_pose.setRotation(tf::Quaternion(ultrasonic_sensor_r2_g.orientation.yaw, ultrasonic_sensor_r2_g.orientation.pitch, ultrasonic_sensor_r2_g.orientation.roll));
tf::StampedTransform ultrasonic_sensor_r2_to_car_transform(ultrasonic_sensor_r2_to_car_pose, tf::Time(0), "/car", "/ultrasonic_sensor_r2");
tf_transformer.setTransform(ultrasonic_sensor_r2_to_car_transform, "ultrasonic_sensor_r2_to_car_transform");
// initial ultrasonic sensor l2 pose with respect to the car
ultrasonic_sensor_l2_to_car_pose.setOrigin(tf::Vector3(ultrasonic_sensor_l2_g.position.x, ultrasonic_sensor_l2_g.position.y, ultrasonic_sensor_l2_g.position.z));
ultrasonic_sensor_l2_to_car_pose.setRotation(tf::Quaternion(ultrasonic_sensor_l2_g.orientation.yaw, ultrasonic_sensor_l2_g.orientation.pitch, ultrasonic_sensor_l2_g.orientation.roll));
tf::StampedTransform ultrasonic_sensor_l2_to_car_transform(ultrasonic_sensor_l2_to_car_pose, tf::Time(0), "/car", "/ultrasonic_sensor_l2");
tf_transformer.setTransform(ultrasonic_sensor_l2_to_car_transform, "ultrasonic_sensor_l2_to_car_transform");
// initial ultrasonic sensor l1 pose with respect to the car
ultrasonic_sensor_l1_to_car_pose.setOrigin(tf::Vector3(ultrasonic_sensor_l1_g.position.x, ultrasonic_sensor_l1_g.position.y, ultrasonic_sensor_l1_g.position.z));
ultrasonic_sensor_l1_to_car_pose.setRotation(tf::Quaternion(ultrasonic_sensor_l1_g.orientation.yaw, ultrasonic_sensor_l1_g.orientation.pitch, ultrasonic_sensor_l1_g.orientation.roll));
tf::StampedTransform ultrasonic_sensor_l1_to_car_transform(ultrasonic_sensor_l1_to_car_pose, tf::Time(0), "/car", "/ultrasonic_sensor_l1");
tf_transformer.setTransform(ultrasonic_sensor_l1_to_car_transform, "ultrasonic_sensor_l1_to_car_transform");
}
示例9: world_to_car_transform
void
initialize_pose_6d_transformation_matrix()
{
visual_odometry_pose_6d_transformation_matrix = Matrix::eye(4);
tf::Transform board_to_camera_pose_g;
tf::Transform car_to_board_pose;
tf::Transform world_to_car_pose_g;
tf::Transform camera_to_visual_odometry_transform;
// initial car pose with respect to the world
world_to_car_pose_g.setOrigin(tf::Vector3(car_pose_g.position.x, car_pose_g.position.y, car_pose_g.position.z));
world_to_car_pose_g.setRotation(tf::Quaternion(car_pose_g.orientation.yaw, car_pose_g.orientation.pitch, car_pose_g.orientation.roll));
tf::StampedTransform world_to_car_transform(world_to_car_pose_g, tf::Time(0), "/world", "/car");
transformer.setTransform(world_to_car_transform, "world_to_car_transform");
// board pose with respect to the car
car_to_board_pose.setOrigin(tf::Vector3(sensor_board_pose_g.position.x, sensor_board_pose_g.position.y, sensor_board_pose_g.position.z));
car_to_board_pose.setRotation(tf::Quaternion(sensor_board_pose_g.orientation.yaw, sensor_board_pose_g.orientation.pitch, sensor_board_pose_g.orientation.roll)); // yaw, pitch, roll
tf::StampedTransform car_to_board_transform(car_to_board_pose, tf::Time(0), "/car", "/board");
transformer.setTransform(car_to_board_transform, "car_to_board_transform");
// camera pose with respect to the board
board_to_camera_pose_g.setOrigin(tf::Vector3(camera_pose_g.position.x, camera_pose_g.position.y, camera_pose_g.position.z));
board_to_camera_pose_g.setRotation(tf::Quaternion(camera_pose_g.orientation.yaw, camera_pose_g.orientation.pitch, camera_pose_g.orientation.roll)); // yaw, pitch, roll
tf::StampedTransform board_to_camera_transform(board_to_camera_pose_g, tf::Time(0), "/board", "/camera");
transformer.setTransform(board_to_camera_transform, "board_to_camera_transform");
/**
* visual odometry pose with respect to the camera
* (the rotation comes from parent coordinate system (camera) to
* the child coordinate system (visual_odometry)) following yaw,
* pitch, roll angles order).
*/
camera_to_visual_odometry_transform.setOrigin(tf::Vector3(0.0, 0.0, 0.0)); // x, y, z;
camera_to_visual_odometry_transform.setRotation(tf::Quaternion(-M_PI / 2.0, 0.0, -M_PI / 2.0)); // yaw, pitch, roll
tf::StampedTransform camera_to_visual_odometry_stamped_transform(camera_to_visual_odometry_transform, tf::Time(0), "/camera", "/visual_odometry"); // create a time stamped transformation that defines the visual odometry position with respect to the camera
transformer.setTransform(camera_to_visual_odometry_stamped_transform, "camera_to_visual_odometry_transform"); // add a link to the tf tree with the camera_to_visual_odometry_transform stamped transformation
// get the transformation between the visual odometry coordinate system with respect to the carmen coordinate system.
transformer.lookupTransform("/car", "/visual_odometry", tf::Time(0), g_car_to_visual_odometry_transform);
}
示例10: board_to_xsens_transform
static void
initialize_transformations(xsens_xyz_handler *xsens_handler)
{
tf::Time::init();
tf::Transform xsens_position_on_board;
xsens_position_on_board.setOrigin(carmen_vector3_to_tf_vector3(xsens_handler->xsens_pose.position));
xsens_position_on_board.setRotation(carmen_rotation_to_tf_quaternion(xsens_handler->xsens_pose.orientation));
tf::StampedTransform board_to_xsens_transform(xsens_position_on_board, tf::Time(0), board_tf_name, xsens_tf_name);
transformer.setTransform(board_to_xsens_transform, "board_to_xsens_transform");
tf::Transform gps_position_on_board;
gps_position_on_board.setOrigin(carmen_vector3_to_tf_vector3(xsens_handler->gps_pose_in_the_car.position));
gps_position_on_board.setRotation(carmen_rotation_to_tf_quaternion(xsens_handler->gps_pose_in_the_car.orientation));
tf::StampedTransform board_to_gps_transform(gps_position_on_board, tf::Time(0), board_tf_name, gps_tf_name);
transformer.setTransform(board_to_gps_transform, "board_to_gps_transform");
tf::Transform board_position_on_car;
board_position_on_car.setOrigin(carmen_vector3_to_tf_vector3(xsens_handler->sensor_board_pose.position));
board_position_on_car.setRotation(carmen_rotation_to_tf_quaternion(xsens_handler->sensor_board_pose.orientation));
tf::StampedTransform car_to_board_transform(board_position_on_car, tf::Time(0), car_tf_name, board_tf_name);
transformer.setTransform(car_to_board_transform, "car_to_board_transform");
}
示例11: world_to_car_transform
void
initialize_transforms()
{//
tf::Time::init();
// initial car pose with respect to the world
world_to_car_pose.setOrigin(tf::Vector3(car_pose.position.x, car_pose.position.y, car_pose.position.z));
world_to_car_pose.setRotation(tf::Quaternion(car_pose.orientation.yaw, car_pose.orientation.pitch, car_pose.orientation.roll));
tf::StampedTransform world_to_car_transform(world_to_car_pose, tf::Time(0), "/world", "/car");
transformer.setTransform(world_to_car_transform, "world_to_car_transform");
// board pose with respect to the car
car_to_board_pose.setOrigin(tf::Vector3(sensor_board_1_pose.position.x, sensor_board_1_pose.position.y, sensor_board_1_pose.position.z));
car_to_board_pose.setRotation(tf::Quaternion(sensor_board_1_pose.orientation.yaw, sensor_board_1_pose.orientation.pitch, sensor_board_1_pose.orientation.roll)); // yaw, pitch, roll
tf::StampedTransform car_to_board_transform(car_to_board_pose, tf::Time(0), "/car", "/board");
transformer.setTransform(car_to_board_transform, "car_to_board_transform");
// velodyne pose with respect to the board
board_to_velodyne_pose.setOrigin(tf::Vector3(velodyne_pose.position.x, velodyne_pose.position.y, velodyne_pose.position.z));
board_to_velodyne_pose.setRotation(tf::Quaternion(velodyne_pose.orientation.yaw, velodyne_pose.orientation.pitch, velodyne_pose.orientation.roll)); // yaw, pitch, roll
tf::StampedTransform board_to_velodyne_transform(board_to_velodyne_pose, tf::Time(0), "/board", "/velodyne");
transformer.setTransform(board_to_velodyne_transform, "board_to_velodyne_transform");
}
示例12:
static carmen_orientation_3D_t
get_orientation_car_reference_from_message(tf::Quaternion xsens_reading)
{
tf::StampedTransform xsens_to_car;
transformer.lookupTransform(xsens_tf_name, car_tf_name, tf::Time(0), xsens_to_car);
tf::Transform xsens_matrix;
xsens_matrix.setOrigin(tf::Vector3(0.0, 0.0, 0.0));
xsens_matrix.setRotation(xsens_reading);
tf::Transform car_pose = xsens_matrix * xsens_to_car;
carmen_orientation_3D_t orientation = get_carmen_orientation_from_tf_transform(car_pose);
return orientation;
}
示例13:
carmen_pose_3D_t
get_velodyne_pose_in_relation_to_car_helper(int argc, char** argv)
{
static int initialized = 0;
if(initialized == 0)
{
initialize_carmen_parameters(argc, argv);
initialize_transformations();
initialized = 1;
}
tf::StampedTransform car_to_velodyne;
transformer.lookupTransform(car_tf_name, velodyne_tf_name, tf::Time(0), car_to_velodyne);
carmen_pose_3D_t velodyne_pose = tf_transform_to_carmen_pose_3D(car_to_velodyne);
return velodyne_pose;
}
示例14: zero_quat
static carmen_vector_3D_t
get_car_position_from_message(carmen_gps_xyz_message *gps_xyz_message)
{
tf::StampedTransform gps_to_car;
transformer.lookupTransform(gps_tf_name, car_tf_name, tf::Time(0), gps_to_car);
carmen_vector_3D_t gps_position;
gps_position.x = gps_xyz_message->x;
gps_position.y = gps_xyz_message->y;
gps_position.z = gps_xyz_message->z;
tf::Transform global_to_gps;
global_to_gps.setOrigin(carmen_vector3_to_tf_vector3(gps_position));
tf::Quaternion zero_quat(0.0, 0.0, 0.0);
global_to_gps.setRotation(zero_quat);
tf::Transform global_to_car = global_to_gps * gps_to_car;
carmen_vector_3D_t car_position = tf_vector3_to_carmen_vector3(global_to_car.getOrigin());
return car_position;
}