本文整理汇总了C++中tf::TransformListener::getFrameStrings方法的典型用法代码示例。如果您正苦于以下问题:C++ TransformListener::getFrameStrings方法的具体用法?C++ TransformListener::getFrameStrings怎么用?C++ TransformListener::getFrameStrings使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tf::TransformListener
的用法示例。
在下文中一共展示了TransformListener::getFrameStrings方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: atan
void SpatialPerspectiveLevel2::callbackTimer(const ros::TimerEvent&) {
static tf::TransformListener listener;
std::string target_frame = "head_origin1";
std::vector<std::string> frames;
listener.getFrameStrings(frames);
for(size_t i=0; i<frames.size(); i++) {
std::string obj_name = frames.at(i);
if(obj_name.find("obj_")!=std::string::npos) {
geometry_msgs::PointStamped object_head_frame;
try {
geometry_msgs::PointStamped object_object_frame;
// get object coordinates in target_frame, needed because of offset
object_object_frame.header.frame_id = obj_name;
object_object_frame.point.x = 0;
object_object_frame.point.y = 0;
object_object_frame.point.z = 0;
listener.waitForTransform(obj_name, target_frame, ros::Time::now(), ros::Duration(1.0));
listener.transformPoint(target_frame, object_object_frame, object_head_frame);
double angle = atan(object_head_frame.point.y / object_head_frame.point.x) * 180 / M_PI;
if(angle > 5.0) {
ROS_INFO_STREAM("Object " << obj_name << " is left of human.");
} else if(angle < -5.0){
ROS_INFO_STREAM("Object " << obj_name << " is right of human.");
} else {
ROS_INFO_STREAM("Object " << obj_name << " is central.");
}
} catch (tf::TransformException ex) {
ROS_WARN_STREAM("Skip object " << obj_name << " as point could not be transformed!");
continue;
}
}
}
}
示例2: doRaytrace
void Raytracer::doRaytrace(const pcl::PointCloud<pcl::PointXYZRGB>::ConstPtr& cloud) {
static tf::TransformListener listener;
RayTracing<pcl::PointXYZRGB> sor;
sor.setInputCloud (cloud);
sor.setLeafSize (voxel_leaf_size, voxel_leaf_size, voxel_leaf_size);
sor.initializeVoxelGrid();
ROS_INFO_STREAM("Cloud size: " << sor.getFilteredPointCloud().size());
if(sor.getFilteredPointCloud().empty()) {
return;
}
viewer->spinOnce();
std::string target_frame_cloud=cloud->header.frame_id;
geometry_msgs::PointStamped head_cloud_frame;
try {
// get head position in target_frame, needed because of nose tip
std::string head_frame = "head_origin1";
geometry_msgs::PointStamped head_head_frame;
head_head_frame.header.frame_id = head_frame;
tf::pointTFToMsg(head_offset, head_head_frame.point);
listener.waitForTransform(head_frame, target_frame_cloud, ros::Time::now(), ros::Duration(0.1));
listener.transformPoint(target_frame_cloud, head_head_frame, head_cloud_frame);
} catch (tf::TransformException ex) {
ROS_WARN_STREAM("Head coordinate transformation could not be obtained, return!");
return;
}
// delete old shapes
for (size_t i=0; i<shape_list.size(); i++) {
viewer->removeShape(shape_list.at(i));
}
shape_list.clear();
// check all frames which are known to tf
std::vector<std::string> frames;
listener.getFrameStrings(frames);
for(size_t i=0; i<frames.size(); i++) {
std::string obj_name = frames.at(i);
if(obj_name.find("obj_")!=std::string::npos) {
geometry_msgs::PointStamped object_cloud_frame;
try {
geometry_msgs::PointStamped object_object_frame;
// get object coordinates in target_frame, needed because of offset
object_object_frame.header.frame_id = obj_name;
tf::pointTFToMsg(object_offset, object_object_frame.point);
listener.waitForTransform(obj_name, target_frame_cloud, ros::Time::now(), ros::Duration(0.1));
listener.transformPoint(target_frame_cloud, object_object_frame, object_cloud_frame);
} catch (tf::TransformException ex) {
ROS_WARN_STREAM("Skip object " << obj_name << " as point could not be transformed!");
continue;
}
// draw object in visualizer
// get random color, but consistent for one object
boost::hash<std::string> string_hash;
srand(string_hash(obj_name));
double obj_r = double(rand()) / double(RAND_MAX);
double obj_g = double(rand()) / double(RAND_MAX);
double obj_b = double(rand()) / double(RAND_MAX);
viewer->addSphere(vpt::ros_to_pcl(object_cloud_frame.point),
obj_size,
obj_r, obj_g, obj_b,
obj_name);
shape_list.push_back(obj_name);
// do level 1 perspective taking
int is_occluded=-1;
std::vector <Eigen::Vector3i> out_ray;
sor.rayTraceWrapper(is_occluded,
vpt::ros_to_eigen(object_cloud_frame.point),
vpt::ros_to_eigen(head_cloud_frame.point),
theta,
out_ray);
wysiwyd::wrdac::Relation r_occluded(obj_name, "occluded");
wysiwyd::wrdac::Relation r_visible(obj_name, "visible");
if(is_occluded) {
ROS_INFO_STREAM("Object " << obj_name << " is occluded from partner.");
if(opc->isConnected()) {
wysiwyd::wrdac::Agent* partner = opc->addOrRetrieveEntity<wysiwyd::wrdac::Agent>("partner");
partner->addBelief(r_occluded);
partner->removeBelief(r_visible);
opc->commit(partner);
}
} else {
ROS_INFO_STREAM("Object " << obj_name << " is visible to partner.");
if(opc->isConnected()) {
wysiwyd::wrdac::Agent* partner = opc->addOrRetrieveEntity<wysiwyd::wrdac::Agent>("partner");
partner->addBelief(r_visible);
partner->removeBelief(r_occluded);
opc->commit(partner);
}
//.........这里部分代码省略.........