本文整理汇总了C++中KinfuTracker类的典型用法代码示例。如果您正苦于以下问题:C++ KinfuTracker类的具体用法?C++ KinfuTracker怎么用?C++ KinfuTracker使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了KinfuTracker类的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: showGeneratedDepth
void
showGeneratedDepth (KinfuTracker& kinfu, const Eigen::Affine3f& pose)
{
raycaster_ptr_->run(kinfu.volume(), pose, kinfu.getCyclicalBufferStructure ());
raycaster_ptr_->generateDepthImage(generated_depth_);
int c;
vector<unsigned short> data;
generated_depth_.download(data, c);
viewerDepth_.showShortImage (&data[0], generated_depth_.cols(), generated_depth_.rows(), 0, 5000, true);
}
示例2: generateCloud
void SceneCloudView::generateCloud(KinfuTracker& kinfu, bool integrate_colors)
{
viewer_pose_ = kinfu.getCameraPose();
ScopeTimeT time ("PointCloud Extraction");
cout << "\nGetting cloud... " << flush;
valid_combined_ = false;
bool valid_extracted_ = false;
if (extraction_mode_ != GPU_Connected6) // So use CPU
{
kinfu.volume().fetchCloudHost (*cloud_ptr_, extraction_mode_ == CPU_Connected26);
}
else
{
DeviceArray<PointXYZ> extracted = kinfu.volume().fetchCloud (cloud_buffer_device_);
if(extracted.size() > 0){
valid_extracted_ = true;
extracted.download (cloud_ptr_->points);
cloud_ptr_->width = (int)cloud_ptr_->points.size ();
cloud_ptr_->height = 1;
if (integrate_colors)
{
kinfu.colorVolume().fetchColors(extracted, point_colors_device_);
point_colors_device_.download(point_colors_ptr_->points);
point_colors_ptr_->width = (int)point_colors_ptr_->points.size ();
point_colors_ptr_->height = 1;
//pcl::gpu::mergePointRGB(extracted, point_colors_device_, combined_color_device_);
//combined_color_device_.download (combined_color_ptr_->points);
}
else
point_colors_ptr_->points.clear();
combined_color_ptr_->clear();
generateXYZRGB(cloud_ptr_, point_colors_ptr_, combined_color_ptr_);
}else{
valid_extracted_ = false;
cout << "Failed to Extract Cloud " << endl;
}
}
cout << "Done. Cloud size: " << cloud_ptr_->points.size () / 1000 << "K" << endl;
}
示例3: publishGeneratedDepth
void
publishGeneratedDepth (KinfuTracker& kinfu)
{
const Eigen::Affine3f& pose= kinfu.getCameraPose();
raycaster_ptr_->run(kinfu.volume(), pose, kinfu.getCyclicalBufferStructure());
raycaster_ptr_->generateDepthImage(generated_depth_);
int c;
vector<unsigned short> data;
generated_depth_.download(data, c);
sensor_msgs::ImagePtr msg(new sensor_msgs::Image);
sensor_msgs::fillImage(*msg, "bgr8", view_device_.rows(), view_device_.cols(), view_device_.cols() * 3, &view_host_[0]);
pubgen.publish(msg);
}
示例4: publishScene
void
publishScene (KinfuTracker& kinfu, std_msgs::Header header, bool registration, Eigen::Affine3f* pose_ptr = 0)
{
// if (pose_ptr)
// {
// raycaster_ptr_->run ( kinfu.volume (), *pose_ptr, kinfu.getCyclicalBufferStructure () ); //says in cmake it does not know it
// raycaster_ptr_->generateSceneView(view_device_);
// }
// else
{
kinfu.getImage (view_device_);
}
/* if (paint_image_ && registration && !pose_ptr)
{
colors_device_.upload (rgb24.data, rgb24.step, rgb24.rows, rgb24.cols);
paint3DView (colors_device_, view_device_);
}
*/
int cols;
view_device_.download (view_host_, cols);
//convert image to sensor message
sensor_msgs::ImagePtr msg(new sensor_msgs::Image);
sensor_msgs::fillImage((*msg), "rgb8", view_device_.rows(), view_device_.cols(),
view_device_.cols() * 3, &view_host_[0]);
msg->header.frame_id = header.frame_id;
pubKinfu.publish(msg);
}
示例5: generateDepth
void ImageView::generateDepth (KinfuTracker& kinfu, const Eigen::Affine3f& pose, KinfuTracker::DepthMap &generated_depth_)
{
raycaster_ptr_->run(kinfu.volume(), pose);
raycaster_ptr_->generateDepthImage(generated_depth_);
//if (viz_)
// viewerDepth_->showShortImage (&data[0], generated_depth_.cols(), generated_depth_.rows(), 0, 1000, true);
}
示例6: publishPose
void
publishPose (KinfuTracker& kinfu, std_msgs::Header header)
{
Eigen::Matrix<float, 3, 3, Eigen::RowMajor> erreMats = kinfu.getCameraPose().linear();
Eigen::Vector3f teVecs = kinfu.getCameraPose().translation();
//TODO: start position: init_tcam_ = volume_size * 0.5f - Vector3f (0, 0, volume_size (2) / 2 * 1.2f);
tf::Transform transform(
tf::Matrix3x3(erreMats(0,0),erreMats(0, 1),erreMats(0, 2),
erreMats(1,0),erreMats(1, 1),erreMats(1, 2),
erreMats(2,0),erreMats(2, 1),erreMats(2, 2)),
tf::Vector3(teVecs[0], teVecs[1], teVecs[2])
);
odom_broad.sendTransform(tf::StampedTransform(transform, header.stamp, "/odom", "/kinfu_frame"));
}
示例7: displayICPState
void
displayICPState (KinfuTracker& kinfu, bool was_lost_)
{
string name = "last_good_track";
string name_estimate = "last_good_estimate";
if (was_lost_ && !kinfu.icpIsLost ()) //execute only when ICP just recovered (i.e. was_lost_ == true && icpIsLost == false)
{
removeCamera(name);
removeCamera(name_estimate);
clearClouds(false);
cloud_viewer_.updateText ("ICP State: OK", 450, 55, 20, 0.0, 1.0, 0.0, "icp");
cloud_viewer_.updateText ("Press 'S' to save the current world", 450, 35, 10, 0.0, 1.0, 0.0, "icp_save");
cloud_viewer_.updateText ("Press 'R' to reset the system", 450, 15, 10, 0.0, 1.0, 0.0, "icp_reset");
}
else if (!was_lost_ && kinfu.icpIsLost()) //execute only when we just lost ourselves (i.e. was_lost_ = false && icpIsLost == true)
{
// draw position of the last good track
Eigen::Affine3f last_pose = kinfu.getCameraPose();
drawCamera(last_pose, name, 0.0, 1.0, 0.0);
cloud_viewer_.updateText ("ICP State: LOST", 450, 55, 20, 1.0, 0.0, 0.0, "icp");
cloud_viewer_.updateText ("Press 'S' to save the current world", 450, 35, 10, 1.0, 0.0, 0.0, "icp_save");
cloud_viewer_.updateText ("Press 'R' to reset the system", 450, 15, 10, 1.0, 0.0, 0.0, "icp_reset");
}
if( kinfu.icpIsLost() )
{
removeCamera(name_estimate);
// draw current camera estimate
Eigen::Affine3f last_pose_estimate = kinfu.getLastEstimatedPose();
drawCamera(last_pose_estimate, name_estimate, 1.0, 0.0, 0.0);
}
// cout << "current estimated pose: " << kinfu.getLastEstimatedPose().translation() << std::endl << kinfu.getLastEstimatedPose().linear() << std::endl;
//
}
示例8: show
void CurrentFrameCloudView::show (const KinfuTracker& kinfu)
{
kinfu.getLastFrameCloud (cloud_device_);
int c;
cloud_device_.download (cloud_ptr_->points, c);
cloud_ptr_->width = cloud_device_.cols ();
cloud_ptr_->height = cloud_device_.rows ();
cloud_ptr_->is_dense = false;
cloud_viewer_.removeAllPointClouds ();
cloud_viewer_.addPointCloud<PointXYZ>(cloud_ptr_);
cloud_viewer_.spinOnce ();
}
示例9: showCloudXYZ
void KinectVisualization::showCloudXYZ(const KinfuTracker& kinfu)
{
//cout << cloud_device_xyz.cols() << endl;
kinfu.getLastFrameCloud (cloud_device_xyz);
int c;
cloud_device_xyz.download (cloud_ptr_->points, c);
cloud_ptr_->width = cloud_device_xyz.cols ();
cloud_ptr_->height = cloud_device_xyz.rows ();
cloud_ptr_->is_dense = false;
//cout << cloud_ptr_->width << " " << cloud_ptr_->height << endl;
//writeCloudFile(2, cloud_ptr_);
cloud_viewer_xyz_.removeAllPointClouds ();
cloud_viewer_xyz_.addPointCloud<PointXYZ>(cloud_ptr_);
}
示例10: showMesh
void
showMesh(KinfuTracker& kinfu, bool /*integrate_colors*/)
{
ScopeTimeT time ("Mesh Extraction");
cout << "\nGetting mesh... " << flush;
if (!marching_cubes_)
marching_cubes_ = MarchingCubes::Ptr( new MarchingCubes() );
DeviceArray<PointXYZ> triangles_device = marching_cubes_->run(kinfu.volume(), triangles_buffer_device_);
mesh_ptr_ = convertToMesh(triangles_device);
cloud_viewer_.removeAllPointClouds ();
if (mesh_ptr_)
cloud_viewer_.addPolygonMesh(*mesh_ptr_);
cout << "Done. Triangles number: " << triangles_device.size() / MarchingCubes::POINTS_PER_TRIANGLE / 1000 << "K" << endl;
}
示例11: show
void
show (KinfuTracker& kinfu, bool integrate_colors)
{
viewer_pose_ = kinfu.getCameraPose();
ScopeTimeT time ("PointCloud Extraction");
cout << "\nGetting cloud... " << flush;
valid_combined_ = false;
if (extraction_mode_ != GPU_Connected6) // So use CPU
{
kinfu.volume().fetchCloudHost (*cloud_ptr_, extraction_mode_ == CPU_Connected26);
}
else
{
DeviceArray<PointXYZ> extracted = kinfu.volume().fetchCloud (cloud_buffer_device_);
if (compute_normals_)
{
kinfu.volume().fetchNormals (extracted, normals_device_);
pcl::gpu::mergePointNormal (extracted, normals_device_, combined_device_);
combined_device_.download (combined_ptr_->points);
combined_ptr_->width = (int)combined_ptr_->points.size ();
combined_ptr_->height = 1;
valid_combined_ = true;
}
else
{
extracted.download (cloud_ptr_->points);
cloud_ptr_->width = (int)cloud_ptr_->points.size ();
cloud_ptr_->height = 1;
}
if (integrate_colors)
{
kinfu.colorVolume().fetchColors(extracted, point_colors_device_);
point_colors_device_.download(point_colors_ptr_->points);
point_colors_ptr_->width = (int)point_colors_ptr_->points.size ();
point_colors_ptr_->height = 1;
}
else
point_colors_ptr_->points.clear();
}
size_t points_size = valid_combined_ ? combined_ptr_->points.size () : cloud_ptr_->points.size ();
cout << "Done. Cloud size: " << points_size / 1000 << "K" << endl;
cloud_viewer_.removeAllPointClouds ();
if (valid_combined_)
{
visualization::PointCloudColorHandlerRGBHack<PointNormal> rgb(combined_ptr_, point_colors_ptr_);
cloud_viewer_.addPointCloud<PointNormal> (combined_ptr_, rgb, "Cloud");
cloud_viewer_.addPointCloudNormals<PointNormal>(combined_ptr_, 50);
}
else
{
visualization::PointCloudColorHandlerRGBHack<PointXYZ> rgb(cloud_ptr_, point_colors_ptr_);
cloud_viewer_.addPointCloud<PointXYZ> (cloud_ptr_, rgb);
}
}