本文整理汇总了C++中KinfuTracker::getCameraPose方法的典型用法代码示例。如果您正苦于以下问题:C++ KinfuTracker::getCameraPose方法的具体用法?C++ KinfuTracker::getCameraPose怎么用?C++ KinfuTracker::getCameraPose使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类KinfuTracker
的用法示例。
在下文中一共展示了KinfuTracker::getCameraPose方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: transform
void
publishPose (KinfuTracker& kinfu, std_msgs::Header header)
{
Eigen::Matrix<float, 3, 3, Eigen::RowMajor> erreMats = kinfu.getCameraPose().linear();
Eigen::Vector3f teVecs = kinfu.getCameraPose().translation();
//TODO: start position: init_tcam_ = volume_size * 0.5f - Vector3f (0, 0, volume_size (2) / 2 * 1.2f);
tf::Transform transform(
tf::Matrix3x3(erreMats(0,0),erreMats(0, 1),erreMats(0, 2),
erreMats(1,0),erreMats(1, 1),erreMats(1, 2),
erreMats(2,0),erreMats(2, 1),erreMats(2, 2)),
tf::Vector3(teVecs[0], teVecs[1], teVecs[2])
);
odom_broad.sendTransform(tf::StampedTransform(transform, header.stamp, "/odom", "/kinfu_frame"));
}
示例2: generateCloud
void SceneCloudView::generateCloud(KinfuTracker& kinfu, bool integrate_colors)
{
viewer_pose_ = kinfu.getCameraPose();
ScopeTimeT time ("PointCloud Extraction");
cout << "\nGetting cloud... " << flush;
valid_combined_ = false;
bool valid_extracted_ = false;
if (extraction_mode_ != GPU_Connected6) // So use CPU
{
kinfu.volume().fetchCloudHost (*cloud_ptr_, extraction_mode_ == CPU_Connected26);
}
else
{
DeviceArray<PointXYZ> extracted = kinfu.volume().fetchCloud (cloud_buffer_device_);
if(extracted.size() > 0){
valid_extracted_ = true;
extracted.download (cloud_ptr_->points);
cloud_ptr_->width = (int)cloud_ptr_->points.size ();
cloud_ptr_->height = 1;
if (integrate_colors)
{
kinfu.colorVolume().fetchColors(extracted, point_colors_device_);
point_colors_device_.download(point_colors_ptr_->points);
point_colors_ptr_->width = (int)point_colors_ptr_->points.size ();
point_colors_ptr_->height = 1;
//pcl::gpu::mergePointRGB(extracted, point_colors_device_, combined_color_device_);
//combined_color_device_.download (combined_color_ptr_->points);
}
else
point_colors_ptr_->points.clear();
combined_color_ptr_->clear();
generateXYZRGB(cloud_ptr_, point_colors_ptr_, combined_color_ptr_);
}else{
valid_extracted_ = false;
cout << "Failed to Extract Cloud " << endl;
}
}
cout << "Done. Cloud size: " << cloud_ptr_->points.size () / 1000 << "K" << endl;
}
示例3: msg
void
publishGeneratedDepth (KinfuTracker& kinfu)
{
const Eigen::Affine3f& pose= kinfu.getCameraPose();
raycaster_ptr_->run(kinfu.volume(), pose, kinfu.getCyclicalBufferStructure());
raycaster_ptr_->generateDepthImage(generated_depth_);
int c;
vector<unsigned short> data;
generated_depth_.download(data, c);
sensor_msgs::ImagePtr msg(new sensor_msgs::Image);
sensor_msgs::fillImage(*msg, "bgr8", view_device_.rows(), view_device_.cols(), view_device_.cols() * 3, &view_host_[0]);
pubgen.publish(msg);
}
示例4: if
void
displayICPState (KinfuTracker& kinfu, bool was_lost_)
{
string name = "last_good_track";
string name_estimate = "last_good_estimate";
if (was_lost_ && !kinfu.icpIsLost ()) //execute only when ICP just recovered (i.e. was_lost_ == true && icpIsLost == false)
{
removeCamera(name);
removeCamera(name_estimate);
clearClouds(false);
cloud_viewer_.updateText ("ICP State: OK", 450, 55, 20, 0.0, 1.0, 0.0, "icp");
cloud_viewer_.updateText ("Press 'S' to save the current world", 450, 35, 10, 0.0, 1.0, 0.0, "icp_save");
cloud_viewer_.updateText ("Press 'R' to reset the system", 450, 15, 10, 0.0, 1.0, 0.0, "icp_reset");
}
else if (!was_lost_ && kinfu.icpIsLost()) //execute only when we just lost ourselves (i.e. was_lost_ = false && icpIsLost == true)
{
// draw position of the last good track
Eigen::Affine3f last_pose = kinfu.getCameraPose();
drawCamera(last_pose, name, 0.0, 1.0, 0.0);
cloud_viewer_.updateText ("ICP State: LOST", 450, 55, 20, 1.0, 0.0, 0.0, "icp");
cloud_viewer_.updateText ("Press 'S' to save the current world", 450, 35, 10, 1.0, 0.0, 0.0, "icp_save");
cloud_viewer_.updateText ("Press 'R' to reset the system", 450, 15, 10, 1.0, 0.0, 0.0, "icp_reset");
}
if( kinfu.icpIsLost() )
{
removeCamera(name_estimate);
// draw current camera estimate
Eigen::Affine3f last_pose_estimate = kinfu.getLastEstimatedPose();
drawCamera(last_pose_estimate, name_estimate, 1.0, 0.0, 0.0);
}
// cout << "current estimated pose: " << kinfu.getLastEstimatedPose().translation() << std::endl << kinfu.getLastEstimatedPose().linear() << std::endl;
//
}
示例5: time
void
show (KinfuTracker& kinfu, bool integrate_colors)
{
viewer_pose_ = kinfu.getCameraPose();
ScopeTimeT time ("PointCloud Extraction");
cout << "\nGetting cloud... " << flush;
valid_combined_ = false;
if (extraction_mode_ != GPU_Connected6) // So use CPU
{
kinfu.volume().fetchCloudHost (*cloud_ptr_, extraction_mode_ == CPU_Connected26);
}
else
{
DeviceArray<PointXYZ> extracted = kinfu.volume().fetchCloud (cloud_buffer_device_);
if (compute_normals_)
{
kinfu.volume().fetchNormals (extracted, normals_device_);
pcl::gpu::mergePointNormal (extracted, normals_device_, combined_device_);
combined_device_.download (combined_ptr_->points);
combined_ptr_->width = (int)combined_ptr_->points.size ();
combined_ptr_->height = 1;
valid_combined_ = true;
}
else
{
extracted.download (cloud_ptr_->points);
cloud_ptr_->width = (int)cloud_ptr_->points.size ();
cloud_ptr_->height = 1;
}
if (integrate_colors)
{
kinfu.colorVolume().fetchColors(extracted, point_colors_device_);
point_colors_device_.download(point_colors_ptr_->points);
point_colors_ptr_->width = (int)point_colors_ptr_->points.size ();
point_colors_ptr_->height = 1;
}
else
point_colors_ptr_->points.clear();
}
size_t points_size = valid_combined_ ? combined_ptr_->points.size () : cloud_ptr_->points.size ();
cout << "Done. Cloud size: " << points_size / 1000 << "K" << endl;
cloud_viewer_.removeAllPointClouds ();
if (valid_combined_)
{
visualization::PointCloudColorHandlerRGBHack<PointNormal> rgb(combined_ptr_, point_colors_ptr_);
cloud_viewer_.addPointCloud<PointNormal> (combined_ptr_, rgb, "Cloud");
cloud_viewer_.addPointCloudNormals<PointNormal>(combined_ptr_, 50);
}
else
{
visualization::PointCloudColorHandlerRGBHack<PointXYZ> rgb(cloud_ptr_, point_colors_ptr_);
cloud_viewer_.addPointCloud<PointXYZ> (cloud_ptr_, rgb);
}
}