本文整理汇总了C++中tf::TransformListener::transformVector方法的典型用法代码示例。如果您正苦于以下问题:C++ TransformListener::transformVector方法的具体用法?C++ TransformListener::transformVector怎么用?C++ TransformListener::transformVector使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tf::TransformListener
的用法示例。
在下文中一共展示了TransformListener::transformVector方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: wrench_callback
/*******************************************************************************
ROS node callback.
*******************************************************************************/
void wrench_callback(const geometry_msgs::WrenchStampedConstPtr& wrench)
{
// Both force and torque supplied in the same coordinate frame
geometry_msgs::Vector3Stamped f_in, f_out;
geometry_msgs::Vector3Stamped t_in, t_out;
std_msgs::Header h;
h = wrench->header;
h.stamp = ros::Time(0);
t_in.header = f_in.header = h;
f_in.vector = wrench->wrench.force;
t_in.vector = wrench->wrench.torque;
try
{
ls_.transformVector(sensable_frame_name_, f_in, f_out);
ls_.transformVector(sensable_frame_name_, t_in, t_out);
}
catch(tf::TransformException& ex)
{
ROS_ERROR("%s", ex.what());
}
////////////////////Some people might not like this extra damping, but it
////////////////////helps to stabilize the overall force feedback. It isn't
////////////////////like we are getting direct impedance matching from the
////////////////////omni anyway
state_->force[0] = f_out.vector.x - damping_k_ * state_->velocity[0];
state_->force[1] = f_out.vector.y - damping_k_ * state_->velocity[1];
state_->force[2] = f_out.vector.z - damping_k_ * state_->velocity[2];
// TODO torque should be split back to gimbal axes
state_->torque[0] = t_out.vector.x;
state_->torque[1] = t_out.vector.y;
state_->torque[2] = t_out.vector.z;
}
示例2: positioned_patch
//.........这里部分代码省略.........
if ((dimension > config_.max_major_axis) ||
(dimension < config_.min_major_axis)) {
continue;
}
}
// find bounding box of mask
cv::Rect rect;
samplereturn::computeBoundingBox(cv_ptr_mask->image, &rect);
// turn image space bounding box into 4 3d rays
cv::Point2d patch_origin(patch.image_roi.x_offset,
patch.image_roi.y_offset);
std::vector<cv::Point2d> rpoints;
rpoints.push_back(cv::Point2d(rect.x, rect.y) +
patch_origin);
rpoints.push_back(cv::Point2d(rect.x, rect.y+rect.height) +
patch_origin);
rpoints.push_back(cv::Point2d(rect.x+rect.width, rect.y+rect.height) +
patch_origin);
rpoints.push_back(cv::Point2d(rect.x+rect.width, rect.y) +
patch_origin);
std::vector<Eigen::Vector3d> rays;
rays.resize(rpoints.size());
std::transform(rpoints.begin(), rpoints.end(), rays.begin(),
[model, patches_msg, this](cv::Point2d uv) -> Eigen::Vector3d
{
cv::Point3d xyz = model.projectPixelTo3dRay(uv);
tf::Stamped<tf::Vector3> vect(tf::Vector3(xyz.x, xyz.y, xyz.z),
patches_msg->header.stamp,
patches_msg->header.frame_id);
tf::Stamped<tf::Vector3> vect_t;
listener_.transformVector(clipping_frame_id_, vect, vect_t);
Eigen::Vector3d ray;
tf::vectorTFToEigen(vect_t, ray);
return ray;
});
// clip point cloud by the planes of the bounding volume
// described by the rays above
// Add one more clipping plane at z=0 in the clipping frame
// to remove noise points below the ground
pcl::PointIndices::Ptr clipped(new pcl::PointIndices);
for(size_t i=0;i<rays.size()+1;i++)
{
Eigen::Vector4f plane;
if(i<rays.size())
{
plane.segment<3>(0) = -rays[i].cross(rays[(i+1)%4]).cast<float>();
plane[3] = -plane.segment<3>(0).dot(camera_origin.cast<float>());
}
else
{
plane << 0,0,1, config_.bottom_clipping_depth;
}
pcl::PlaneClipper3D<pcl::PointXYZRGB> clip(plane);
std::vector<int> newclipped;
clip.clipPointCloud3D(*colorpoints, newclipped, clipped->indices);
clipped->indices.resize(newclipped.size());
std::copy(newclipped.begin(), newclipped.end(),
clipped->indices.begin());
}
// bail if the clipped pointcloud is empty
if(clipped->indices.size() == 0)