本文整理汇总了C++中pcl::visualization::PCLVisualizer::setCameraPosition方法的典型用法代码示例。如果您正苦于以下问题:C++ PCLVisualizer::setCameraPosition方法的具体用法?C++ PCLVisualizer::setCameraPosition怎么用?C++ PCLVisualizer::setCameraPosition使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pcl::visualization::PCLVisualizer
的用法示例。
在下文中一共展示了PCLVisualizer::setCameraPosition方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: initViewer
void initViewer(pcl::visualization::PCLVisualizer &viewer) {
viewer.setBackgroundColor(0, 0, 0);
viewer.addCoordinateSystem(1.0, "reference");
viewer.initCameraParameters();
viewer.setRepresentationToPointsForAllActors();
viewer.setCameraPosition(0, 0, -1, 0, 0, 0, 0, -1, 0);
viewer.registerKeyboardCallback(keyboardCallback);
}
示例2: setViewerPose
void setViewerPose (pcl::visualization::PCLVisualizer& viewer, const Eigen::Affine3f& viewer_pose) {
Eigen::Vector3f pos_vector = viewer_pose * Eigen::Vector3f(10, 10, 10);
Eigen::Vector3f look_at_vector = viewer_pose.rotation () * Eigen::Vector3f(0, 0, 0);
Eigen::Vector3f up_vector = viewer_pose.rotation () * Eigen::Vector3f(0, -1, 0);
viewer.setCameraPosition (pos_vector[0], pos_vector[1], pos_vector[2],
look_at_vector[0], look_at_vector[1], look_at_vector[2],
up_vector[0], up_vector[1], up_vector[2]);
}
示例3: cloud
int
main (int argc, char** argv)
{
//pcl::PointCloud<pcl::PointXYZ> cloud;
// Read Kinect live stream:
PointCloudT cloud_obj;
PointCloudT::Ptr cloud (new PointCloudT);
bool new_cloud_available_flag = false;
pcl::Grabber* interface = new pcl::OpenNIGrabber();
boost::function<void (const PointCloudT::ConstPtr&)> f =
boost::bind (&cloud_cb_, _1, &cloud_obj, &new_cloud_available_flag);
interface->registerCallback (f);
interface->start ();
// Wait for the first frame:
while(!new_cloud_available_flag)
boost::this_thread::sleep(boost::posix_time::milliseconds(1));
pcl::copyPointCloud<PointT, PointT>(cloud_obj, *cloud);
new_cloud_available_flag = false;
// Display pointcloud:
pcl::visualization::PointCloudColorHandlerRGBField<PointT> rgb(cloud);
viewer.addPointCloud<PointT> (cloud, rgb, "input_cloud");
viewer.setCameraPosition(0,0,-2,0,-1,0,0);
/*
// Add point picking callback to viewer:
struct callback_args cb_args;
PointCloudT::Ptr clicked_points_3d (new PointCloudT);
cb_args.clicked_points_3d = clicked_points_3d;
cb_args.viewerPtr = pcl::visualization::PCLVisualizer::Ptr(&viewer);
viewer.registerPointPickingCallback (pp_callback, (void*)&cb_args);
std::cout << "Shift+click on three floor points, then press 'Q'..." << std::endl;
*/
// Spin until 'Q' is pressed (to allow ground manual initialization):
viewer.spin();
std::cout << "done." << std::endl;
pcl::io::savePCDFileASCII ("/home/igor/pcds/pcd_grabber_out_1.pcd", *cloud);
std::cerr << "Saved " << (*cloud).points.size () << " data points to pcd_grabber_out_1.pcd." << std::endl;
/*
for (size_t i = 0; i < cloud.points.size (); ++i)
std::cerr << " " << cloud.points[i].x << " " << cloud.points[i].y << " " << cloud.points[i].z << std::endl;
*/
while (!viewer.wasStopped ())
{
boost::this_thread::sleep (boost::posix_time::microseconds (100));
}
return (0);
}
示例4: main
int main (int argc, char** argv)
{
if(pcl::console::find_switch (argc, argv, "--help") || pcl::console::find_switch (argc, argv, "-h"))
return print_help();
// Algorithm parameters:
std::string svm_filename = "../../people/data/trainedLinearSVMForPeopleDetectionWithHOG.yaml";
float min_confidence = -1.5;
float min_height = 1.3;
float max_height = 2.3;
float voxel_size = 0.06;
Eigen::Matrix3f rgb_intrinsics_matrix;
rgb_intrinsics_matrix << 525, 0.0, 319.5, 0.0, 525, 239.5, 0.0, 0.0, 1.0; // Kinect RGB camera intrinsics
// Read if some parameters are passed from command line:
pcl::console::parse_argument (argc, argv, "--svm", svm_filename);
pcl::console::parse_argument (argc, argv, "--conf", min_confidence);
pcl::console::parse_argument (argc, argv, "--min_h", min_height);
pcl::console::parse_argument (argc, argv, "--max_h", max_height);
// Read Kinect live stream:
PointCloudT::Ptr cloud (new PointCloudT);
bool new_cloud_available_flag = false;
pcl::Grabber* interface = new pcl::OpenNIGrabber();
boost::function<void (const pcl::PointCloud<pcl::PointXYZRGBA>::ConstPtr&)> f =
boost::bind (&cloud_cb_, _1, cloud, &new_cloud_available_flag);
interface->registerCallback (f);
interface->start ();
// Wait for the first frame:
while(!new_cloud_available_flag)
boost::this_thread::sleep(boost::posix_time::milliseconds(1));
new_cloud_available_flag = false;
cloud_mutex.lock (); // for not overwriting the point cloud
// Display pointcloud:
pcl::visualization::PointCloudColorHandlerRGBField<PointT> rgb(cloud);
viewer.addPointCloud<PointT> (cloud, rgb, "input_cloud");
viewer.setCameraPosition(0,0,-2,0,-1,0,0);
// Add point picking callback to viewer:
struct callback_args cb_args;
PointCloudT::Ptr clicked_points_3d (new PointCloudT);
cb_args.clicked_points_3d = clicked_points_3d;
cb_args.viewerPtr = pcl::visualization::PCLVisualizer::Ptr(&viewer);
viewer.registerPointPickingCallback (pp_callback, (void*)&cb_args);
std::cout << "Shift+click on three floor points, then press 'Q'..." << std::endl;
// Spin until 'Q' is pressed:
viewer.spin();
std::cout << "done." << std::endl;
cloud_mutex.unlock ();
// Ground plane estimation:
Eigen::VectorXf ground_coeffs;
ground_coeffs.resize(4);
std::vector<int> clicked_points_indices;
for (unsigned int i = 0; i < clicked_points_3d->points.size(); i++)
clicked_points_indices.push_back(i);
pcl::SampleConsensusModelPlane<PointT> model_plane(clicked_points_3d);
model_plane.computeModelCoefficients(clicked_points_indices,ground_coeffs);
std::cout << "Ground plane: " << ground_coeffs(0) << " " << ground_coeffs(1) << " " << ground_coeffs(2) << " " << ground_coeffs(3) << std::endl;
// Initialize new viewer:
pcl::visualization::PCLVisualizer viewer("PCL Viewer"); // viewer initialization
viewer.setCameraPosition(0,0,-2,0,-1,0,0);
// Create classifier for people detection:
pcl::people::PersonClassifier<pcl::RGB> person_classifier;
person_classifier.loadSVMFromFile(svm_filename); // load trained SVM
// People detection app initialization:
pcl::people::GroundBasedPeopleDetectionApp<PointT> people_detector; // people detection object
people_detector.setVoxelSize(voxel_size); // set the voxel size
people_detector.setIntrinsics(rgb_intrinsics_matrix); // set RGB camera intrinsic parameters
people_detector.setClassifier(person_classifier); // set person classifier
people_detector.setHeightLimits(min_height, max_height); // set person classifier
// people_detector.setSensorPortraitOrientation(true); // set sensor orientation to vertical
// For timing:
static unsigned count = 0;
static double last = pcl::getTime ();
// Main loop:
while (!viewer.wasStopped())
{
if (new_cloud_available_flag && cloud_mutex.try_lock ()) // if a new cloud is available
{
new_cloud_available_flag = false;
// Perform people detection on the new cloud:
std::vector<pcl::people::PersonCluster<PointT> > clusters; // vector containing persons clusters
people_detector.setInputCloud(cloud);
people_detector.setGround(ground_coeffs); // set floor coefficients
people_detector.compute(clusters); // perform people detection
ground_coeffs = people_detector.getGround(); // get updated floor coefficients
//.........这里部分代码省略.........
示例5: main
int main (int argc, char** argv)
{
//ROS Initialization
ros::init(argc, argv, "detecting_people");
ros::NodeHandle nh;
ros::Rate rate(13);
ros::Subscriber state_sub = nh.subscribe("followme_state", 5, &stateCallback);
ros::Publisher people_pub = nh.advertise<frmsg::people>("followme_people", 5);
frmsg::people pub_people_;
CloudConverter* cc_ = new CloudConverter();
while (!cc_->ready_xyzrgb_)
{
ros::spinOnce();
rate.sleep();
if (!ros::ok())
{
printf("Terminated by Control-c.\n");
return -1;
}
}
// Input parameter from the .yaml
std::string package_path_ = ros::package::getPath("detecting_people") + "/";
cv::FileStorage* fs_ = new cv::FileStorage(package_path_ + "parameters.yml", cv::FileStorage::READ);
// Algorithm parameters:
std::string svm_filename = package_path_ + "trainedLinearSVMForPeopleDetectionWithHOG.yaml";
std::cout << svm_filename << std::endl;
float min_confidence = -1.5;
float min_height = 1.3;
float max_height = 2.3;
float voxel_size = 0.06;
Eigen::Matrix3f rgb_intrinsics_matrix;
rgb_intrinsics_matrix << 525, 0.0, 319.5, 0.0, 525, 239.5, 0.0, 0.0, 1.0; // Kinect RGB camera intrinsics
// Read if some parameters are passed from command line:
pcl::console::parse_argument (argc, argv, "--svm", svm_filename);
pcl::console::parse_argument (argc, argv, "--conf", min_confidence);
pcl::console::parse_argument (argc, argv, "--min_h", min_height);
pcl::console::parse_argument (argc, argv, "--max_h", max_height);
// Read Kinect live stream:
PointCloudT::Ptr cloud_people (new PointCloudT);
cc_->ready_xyzrgb_ = false;
while ( !cc_->ready_xyzrgb_ )
{
ros::spinOnce();
rate.sleep();
}
pcl::PointCloud<pcl::PointXYZRGB>::ConstPtr cloud = cc_->msg_xyzrgb_;
// Display pointcloud:
pcl::visualization::PointCloudColorHandlerRGBField<PointT> rgb(cloud);
viewer.addPointCloud<PointT> (cloud, rgb, "input_cloud");
viewer.setCameraPosition(0,0,-2,0,-1,0,0);
// Add point picking callback to viewer:
struct callback_args cb_args;
PointCloudT::Ptr clicked_points_3d (new PointCloudT);
cb_args.clicked_points_3d = clicked_points_3d;
cb_args.viewerPtr = pcl::visualization::PCLVisualizer::Ptr(&viewer);
viewer.registerPointPickingCallback (pp_callback, (void*)&cb_args);
std::cout << "Shift+click on three floor points, then press 'Q'..." << std::endl;
// Spin until 'Q' is pressed:
viewer.spin();
std::cout << "done." << std::endl;
//cloud_mutex.unlock ();
// Ground plane estimation:
Eigen::VectorXf ground_coeffs;
ground_coeffs.resize(4);
std::vector<int> clicked_points_indices;
for (unsigned int i = 0; i < clicked_points_3d->points.size(); i++)
clicked_points_indices.push_back(i);
pcl::SampleConsensusModelPlane<PointT> model_plane(clicked_points_3d);
model_plane.computeModelCoefficients(clicked_points_indices,ground_coeffs);
std::cout << "Ground plane: " << ground_coeffs(0) << " " << ground_coeffs(1) << " " << ground_coeffs(2) << " " << ground_coeffs(3) << std::endl;
// Initialize new viewer:
pcl::visualization::PCLVisualizer viewer("PCL Viewer"); // viewer initialization
viewer.setCameraPosition(0,0,-2,0,-1,0,0);
// Create classifier for people detection:
pcl::people::PersonClassifier<pcl::RGB> person_classifier;
person_classifier.loadSVMFromFile(svm_filename); // load trained SVM
// People detection app initialization:
pcl::people::GroundBasedPeopleDetectionApp<PointT> people_detector; // people detection object
people_detector.setVoxelSize(voxel_size); // set the voxel size
people_detector.setIntrinsics(rgb_intrinsics_matrix); // set RGB camera intrinsic parameters
people_detector.setClassifier(person_classifier); // set person classifier
people_detector.setHeightLimits(min_height, max_height); // set person classifier
//.........这里部分代码省略.........
示例6: main
int main(int argc, char** argv)
{
int total_frame = 0;
ros::init(argc, argv, "pub_pcl");
ros::NodeHandle nh;
ros::Publisher pub = nh.advertise<PointCloudT>("/openni/points2", 1);
// Read
PointCloudT::Ptr cloud(new PointCloudT);
bool new_cloud_avaliable_flag = false;
pcl::Grabber* interface = new pcl::OpenNIGrabber();
boost::function<void (const PointCloudT::ConstPtr&)> f = boost::bind(&cloud_cb_, _1, cloud, &new_cloud_avaliable_flag);
interface->registerCallback(f);
interface->start();
// Wait for the first frame
while (!new_cloud_avaliable_flag)
{
boost::this_thread::sleep(boost::posix_time::milliseconds(1));
}
cloud_mutex.lock(); // for not overwriting the point cloud
// Display
printf("frame %d\n", ++total_frame);
pcl::visualization::PointCloudColorHandlerRGBField<PointT> rgb(cloud);
viewer.addPointCloud<PointT>(cloud, rgb, "input_cloud");
viewer.setCameraPosition(0, 0, -2, 0, -1, 0, 0);
cloud_mutex.unlock();
ros::Rate loop_rate(4);
while (nh.ok())
{
ros::Time scan_time = ros::Time::now();
// Get & publish new cloud
if (new_cloud_avaliable_flag && cloud_mutex.try_lock())
{
new_cloud_avaliable_flag = false;
pub.publish(cloud);
printf("frame %d\n", ++total_frame);
viewer.removeAllPointClouds();
viewer.removeAllShapes();
pcl::visualization::PointCloudColorHandlerRGBField<PointT> rgb(cloud);
viewer.addPointCloud<PointT>(cloud, rgb, "input_cloud");
cloud_mutex.unlock();
}
else
{
printf("no %d\n", total_frame);
}
ros::spinOnce();
loop_rate.sleep();
}
return 0;
}