本文整理汇总了C++中image_geometry::PinholeCameraModel类的典型用法代码示例。如果您正苦于以下问题:C++ PinholeCameraModel类的具体用法?C++ PinholeCameraModel怎么用?C++ PinholeCameraModel使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了PinholeCameraModel类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: getCameraRay
void getCameraRay(const image_geometry::PinholeCameraModel& model, const cv::Point2d& pt, cv::Point3d* ray)
{
cv::Point2d rect_point;
rect_point = model.rectifyPoint(pt);
ROS_DEBUG("Rect Point: %f, %f",rect_point.x,rect_point.y);
*ray = model.projectPixelTo3dRay(rect_point);
}
示例2: cloud
pcl::PointCloud<pcl::PointXYZ>::Ptr Conversions::toPointCloud(const Eigen::Isometry3d &T, const image_geometry::PinholeCameraModel &cam, const cv::Mat &depth_img)
{
pcl::PointCloud<pcl::PointXYZ>::Ptr cloud(new pcl::PointCloud<pcl::PointXYZ>());
cloud->header.frame_id = "cam";
cloud->is_dense = false;
cloud->height = depth_img.rows;
cloud->width = depth_img.cols;
cloud->sensor_origin_ = Eigen::Vector4f( T.translation()(0), T.translation()(1), T.translation()(2), 1.f );
cloud->sensor_orientation_ = Eigen::Quaternionf(T.rotation().cast<float>());
cloud->points.resize( cloud->height * cloud->width );
size_t idx = 0;
const float* depthdata = reinterpret_cast<float*>( &depth_img.data[0] );
for(int v = 0; v < depth_img.rows; ++v) {
for(int u = 0; u < depth_img.cols; ++u) {
pcl::PointXYZ& p = cloud->points[ idx ]; ++idx;
float d = (*depthdata++);
if (d > 0 && !isnan(d)) {
p.z = d;
p.x = (u - cam.cx()) * d / cam.fx();
p.y = (v - cam.cy()) * d / cam.fy();
} else {
p.x = std::numeric_limits<float>::quiet_NaN();
p.y = std::numeric_limits<float>::quiet_NaN();
p.z = std::numeric_limits<float>::quiet_NaN();
}
}
}
return cloud;
}
示例3: generateStartPoints
void FindObjectOnPlane::generateStartPoints(
const cv::Point2f& point_2d,
const image_geometry::PinholeCameraModel& model,
const pcl::ModelCoefficients::Ptr& coefficients,
std::vector<cv::Point3f>& search_points_3d,
std::vector<cv::Point2f>& search_points_2d)
{
NODELET_INFO("generateStartPoints");
jsk_recognition_utils::Plane::Ptr plane
(new jsk_recognition_utils::Plane(coefficients->values));
cv::Point3d ray = model.projectPixelTo3dRay(point_2d);
Eigen::Vector3f projected_point = rayPlaneInteersect(ray, plane);
const double resolution_step = 0.01;
const int resolution = 5;
search_points_3d.clear();
search_points_2d.clear();
for (int i = - resolution; i < resolution; ++i) {
for (int j = - resolution; j < resolution; ++j) {
double x_diff = resolution_step * i;
double y_diff = resolution_step * j;
Eigen::Vector3f moved_point = projected_point + Eigen::Vector3f(x_diff, y_diff, 0);
Eigen::Vector3f projected_moved_point;
plane->project(moved_point, projected_moved_point);
cv::Point3f projected_moved_point_cv(projected_moved_point[0],
projected_moved_point[1],
projected_moved_point[2]);
search_points_3d.push_back(cv::Point3f(projected_moved_point_cv));
cv::Point2d p2d = model.project3dToPixel(projected_moved_point_cv);
search_points_2d.push_back(p2d);
}
}
}
示例4: projectPoints
void projectPoints(const image_geometry::PinholeCameraModel &cam_model,
const cv::Point3d &points3D,
cv::Point2d *points2D)
{
*points2D = cam_model.project3dToPixel(points3D);
// *points2D = cam_model.rectifyPoint(*points2D);
}
示例5: PointFromPixel
pcl::PointXYZ PointFromPixel(const cv::Point& pixel, const tf::Transform& cameraFrameToWorldFrame, image_geometry::PinholeCameraModel cam) {
cv::Point3d cameraRay = cam.projectPixelTo3dRay(pixel);
tf::Point worldCameraOrigin = cameraFrameToWorldFrame * tf::Vector3(0, 0, 0);
tf::Point worldCameraStep = cameraFrameToWorldFrame * tf::Vector3(cameraRay.x, cameraRay.y, cameraRay.z) - worldCameraOrigin;
double zScale = -worldCameraOrigin.z()/worldCameraStep.z();
tf::Point ret = worldCameraOrigin + zScale * worldCameraStep;
return pcl::PointXYZ(ret.x(), ret.y(), 0);
}
示例6: rayPlaneInteersect
cv::Point2d FindObjectOnPlane::getUyEnd(
const cv::Point2d& ux_start,
const cv::Point2d& ux_end,
const image_geometry::PinholeCameraModel& model,
const jsk_recognition_utils::Plane::Ptr& plane)
{
cv::Point3d start_ray = model.projectPixelTo3dRay(ux_start);
cv::Point3d end_ray = model.projectPixelTo3dRay(ux_end);
Eigen::Vector3f ux_start_3d = rayPlaneInteersect(start_ray, plane);
Eigen::Vector3f ux_end_3d = rayPlaneInteersect(end_ray, plane);
Eigen::Vector3f ux_3d = ux_end_3d - ux_start_3d;
Eigen::Vector3f normal = plane->getNormal();
Eigen::Vector3f uy_3d = normal.cross(ux_3d).normalized();
Eigen::Vector3f uy_end_3d = ux_start_3d + uy_3d;
cv::Point2d uy_end = model.project3dToPixel(cv::Point3d(uy_end_3d[0],
uy_end_3d[1],
uy_end_3d[2]));
return uy_end;
}
示例7: round
cv::Point3d GraphGridMapper::to3D(cv::Point3d &p, const Eigen::Isometry3d &camera_transform, const image_geometry::PinholeCameraModel &camera_model)
{
int width = camera_model.cameraInfo().width;
int height = camera_model.cameraInfo().height;
int u = round(p.x);
if(u < 0) {
u = 0;
} else if (u >= width) {
u = width -1;
}
int v = round(p.y);
if(v < 0) {
v = 0;
} else if (v >= height) {
v = height - 1;
}
cv::Point3d p3D(-1.0,-1.0,std::numeric_limits<double>::infinity());
if (p.z != 0 && !isnan(p.z))
{
p3D.x = (u - camera_model.cx()) * p.z / camera_model.fx();
p3D.y = (v - camera_model.cy()) * p.z / camera_model.fy();
p3D.z = p.z;
Eigen::Vector3d vec(p3D.x, p3D.y, p3D.z);
vec = camera_transform * vec.homogeneous();
p3D.x = vec(0);
p3D.y = vec(1);
p3D.z = vec(2);
}
return p3D;
}
示例8: project_uv_to_cloud_index
size_t project_uv_to_cloud_index(const pcl::PointCloud<PointT>& cloud, const cv::Point2d& image_point,
const image_geometry::PinholeCameraModel camera_model, double& distance)
{
// Assumes camera is at origin, pointed in the normal direction
size_t pt_pcl_index;
cv::Point3d pt_cv;
pt_cv = camera_model.projectPixelTo3dRay(image_point);
Eigen::Vector3f direction_eig(pt_cv.x, pt_cv.y, pt_cv.z);
Eigen::Vector3f origin_eig(0.0, 0.0, 0.0);
pt_pcl_index = closest_point_index_rayOMP(cloud, direction_eig, origin_eig, distance);
return (pt_pcl_index);
}
示例9: hit_same_point
inline void
hit_same_point( const image_geometry::PinholeCameraModel &camera_model,
const pcl::PointCloud<PointT> &in,
pcl::PointCloud<PointT> &out,
int rows,
int cols,
float z_threshold = 0.3)
{
std::vector<std::vector <std::vector<PointT> > > hit( cols, std::vector< std::vector<PointT> >(rows, std::vector<PointT>()));
// Project points into image space
for(unsigned int i=0; i < in.points.size(); ++i){
const PointT* pt = &in.points.at(i);
if( pt->z > 1) { // min distance from camera 1m
cv::Point2i point_image = camera_model.project3dToPixel(cv::Point3d(pt->x, pt->y, pt->z));
if( between<int>(0, point_image.x, cols )
&& between<int>( 0, point_image.y, rows )
)
{
// Sort all points into image
{
hit[point_image.x][point_image.y].push_back(in.points.at(i));
}
}
}
}
assert(out.empty());
for(int x = 0; x < hit.size(); ++x ){
for(int y = 0; y < hit[0].size(); ++y){
if(hit[x][y].size()>1){
PointT min_z = hit[x][y][0];
float max_z = min_z.z;
for(int p = 1; p < hit[x][y].size(); ++p){
// find min and max z
max_z = MAX(max_z, hit[x][y][p].z);
#ifdef DEBUG
std::cout << hit[x][y].size() << "\t";
#endif
if(hit[x][y][p].z < min_z.z)
{
min_z = hit[x][y][p];
}
}
#ifdef DEBUG
std::cout << min_z.z << "\t" << max_z << "\t";
#endif
if(max_z - min_z.z > z_threshold){
#ifdef DEBUG
std::cout << min_z << std::endl;
#endif
out.push_back(min_z);
}
}
}
}
#ifdef DEBUG
std::cout << "hit_same_point in: "<< in.size() << "\t out: " << out.size() << "\n";
#endif
}
示例10: infoCallback
void infoCallback(const sensor_msgs::CameraInfoConstPtr& info_msg)
{
if (calibrated)
return;
cam_model_.fromCameraInfo(info_msg);
pattern_detector_.setCameraMatrices(cam_model_.intrinsicMatrix(), cam_model_.distortionCoeffs());
calibrated = true;
image_sub_ = nh_.subscribe("/camera/rgb/image_mono", 1, &CalibrateKinectCheckerboard::imageCallback, this);
ROS_INFO("[calibrate] Got image info!");
}
示例11: camerainfoCb
void camerainfoCb(const sensor_msgs::CameraInfoConstPtr& info_msg)
{
ROS_INFO("infocallback :shutting down camerainfosub");
cam_model_.fromCameraInfo(info_msg);
camera_topic = info_msg->header.frame_id;
camerainfosub_.shutdown();
}
示例12: overlayPoints
void overlayPoints(pcl::PointCloud<pcl::PointXYZ> detector_points, tf::Transform &transform, cv_bridge::CvImagePtr& image)
{
// Overlay calibration points on the image
pcl::PointCloud<pcl::PointXYZ> transformed_detector_points;
pcl_ros::transformPointCloud(detector_points, transformed_detector_points, transform);
int font_face = cv::FONT_HERSHEY_SCRIPT_SIMPLEX;
double font_scale = 1;
int thickness = 2;
int radius = 5;
for (unsigned int i=0; i < transformed_detector_points.size(); i++)
{
pcl::PointXYZ pt = transformed_detector_points[i];
cv::Point3d pt_cv(pt.x, pt.y, pt.z);
cv::Point2d uv;
uv = cam_model_.project3dToPixel(pt_cv);
cv::circle(image->image, uv, radius, CV_RGB(255,0,0), -1);
cv::Size text_size;
int baseline = 0;
std::stringstream out;
out << i+1;
text_size = cv::getTextSize(out.str(), font_face, font_scale, thickness, &baseline);
cv::Point origin = cvPoint(uv.x - text_size.width / 2,
uv.y - radius - baseline/* - thickness*/);
cv::putText(image->image, out.str(), origin, font_face, font_scale, CV_RGB(255,0,0), thickness);
}
}
示例13: cam_info_cb
void cam_info_cb(const sensor_msgs::CameraInfo::ConstPtr& msg)
{
if( cam_model_.fromCameraInfo(msg) )
{
got_cam_info_ = true;
ROS_INFO("[bk_skeletal_tracker] Got RGB camera info.");
} else {
ROS_ERROR("[bk_skeletal_tracker] Couldn't read camera info.");
}
}
示例14: doOverlay
void doOverlay(const sensor_msgs::ImageConstPtr& img_msg,
const sensor_msgs::CameraInfoConstPtr& info_msg) {
// convert camera image into opencv
cam_model.fromCameraInfo(info_msg);
cv_bridge::CvImagePtr cv_img = cv_bridge::toCvCopy(img_msg, sensor_msgs::image_encodings::RGB8);
double alpha_mult;
ros::param::param<double>("~alpha_mult", alpha_mult, 0.5);
uint8_t r, g, b;
if(aligned_pc) {
if(!tf_list->waitForTransform(img_msg->header.frame_id, "/base_link",
img_msg->header.stamp, ros::Duration(3)))
return;
tf::StampedTransform transform;
tf_list->lookupTransform(img_msg->header.frame_id, "/base_link",
img_msg->header.stamp, transform);
PCRGB::Ptr tf_pc(new PCRGB());
pcl_ros::transformPointCloud<PRGB>(*aligned_pc, *tf_pc, transform);
for(uint32_t i=0;i<tf_pc->size();i++) {
cv::Point3d proj_pt_cv(tf_pc->points[i].x, tf_pc->points[i].y,
tf_pc->points[i].z);
cv::Point pt2d = cam_model.project3dToPixel(proj_pt_cv);
extractRGB(tf_pc->points[i].rgb, r, g, b);
if(pt2d.x >= 0 && pt2d.y >= 0 &&
pt2d.x < cv_img->image.rows && pt2d.y < cv_img->image.cols) {
double old_r = cv_img->image.at<cv::Vec3b>(pt2d.y, pt2d.x)[0];
double old_g = cv_img->image.at<cv::Vec3b>(pt2d.y, pt2d.x)[1];
double old_b = cv_img->image.at<cv::Vec3b>(pt2d.y, pt2d.x)[2];
cv_img->image.at<cv::Vec3b>(pt2d.y, pt2d.x)[0] =
(uint8_t) min(alpha_mult*old_r+r, 255.0);
cv_img->image.at<cv::Vec3b>(pt2d.y, pt2d.x)[1] =
(uint8_t) min(alpha_mult*old_g+g, 255.0);
cv_img->image.at<cv::Vec3b>(pt2d.y, pt2d.x)[2] =
(uint8_t) min(alpha_mult*old_b+b, 255.0);
}
}
}
overlay_pub.publish(cv_img->toImageMsg());
}
示例15: convert
void convert(const sensor_msgs::ImageConstPtr& depth_msg, PointCloud::Ptr& cloud_msg)
{
// Use correct principal point from calibration
float center_x = cameraModel.cx();
float center_y = cameraModel.cy();
// Combine unit conversion (if necessary) with scaling by focal length for computing (X,Y)
double unit_scaling = DepthTraits<T>::toMeters( T(1) );
float constant_x = unit_scaling / cameraModel.fx();
float constant_y = unit_scaling / cameraModel.fy();
float bad_point = std::numeric_limits<float>::quiet_NaN();
sensor_msgs::PointCloud2Iterator<float> iter_x(*cloud_msg, "x");
sensor_msgs::PointCloud2Iterator<float> iter_y(*cloud_msg, "y");
sensor_msgs::PointCloud2Iterator<float> iter_z(*cloud_msg, "z");
const T* depth_row = reinterpret_cast<const T*>(&depth_msg->data[0]);
int row_step = depth_msg->step / sizeof(T);
for (int v = 0; v < (int)cloud_msg->height; ++v, depth_row += row_step)
{
for (int u = 0; u < (int)cloud_msg->width; ++u, ++iter_x, ++iter_y, ++iter_z)
{
T depth = depth_row[u];
// Missing points denoted by NaNs
if (!DepthTraits<T>::valid(depth))
{
*iter_x = *iter_y = *iter_z = bad_point;
continue;
}
// Fill in XYZ
*iter_x = (u - center_x) * depth * constant_x;
*iter_y = (v - center_y) * depth * constant_y;
*iter_z = DepthTraits<T>::toMeters(depth);
}
}
}