本文整理汇总了C++中Pose3D::focalX方法的典型用法代码示例。如果您正苦于以下问题:C++ Pose3D::focalX方法的具体用法?C++ Pose3D::focalX怎么用?C++ Pose3D::focalX使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Pose3D
的用法示例。
在下文中一共展示了Pose3D::focalX方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: setPose
void MeshRenderer :: setPose(const Pose3D& pose, float* arg_near_plane, float* arg_far_plane)
{
VertexBufferObject& vbo = m_vertex_buffer_object;
pose.cvCameraTransform().copyTo(vbo.model_view_matrix);
// Transpose the matrix for OpenGL column-major.
vbo.model_view_matrix = vbo.model_view_matrix.t();
if (!(arg_near_plane && arg_far_plane))
{
estimateOptimalPlanes(pose, &m_last_near_plane, &m_last_far_plane);
}
else
{
m_last_near_plane = *arg_near_plane;
m_last_far_plane = *arg_far_plane;
}
m_pbuffer->makeCurrent();
glMatrixMode (GL_MODELVIEW);
glLoadIdentity ();
cv::Vec3f euler_angles = pose.cvEulerRotation();
glTranslatef(pose.cvTranslation()[0], pose.cvTranslation()[1], pose.cvTranslation()[2]);
glRotatef(euler_angles[2]*180.0/M_PI, 0, 0, 1);
glRotatef(euler_angles[1]*180.0/M_PI, 0, 1, 0);
glRotatef(euler_angles[0]*180.0/M_PI, 1, 0, 0);
glMatrixMode (GL_PROJECTION);
glLoadIdentity ();
double dx = pose.imageCenterX() - (m_pbuffer->width() / 2.0);
double dy = pose.imageCenterY() - (m_pbuffer->height() / 2.0);
glViewport(dx, -dy, m_pbuffer->width(), m_pbuffer->height());
if (pose.isOrthographic())
{
ntk_dbg_print(pose.focalX()/2, 0);
ntk_dbg_print(pose.focalY()/2, 0);
glOrtho(-pose.focalX()/2, pose.focalX()/2, -pose.focalY()/2, pose.focalY()/2, m_last_near_plane, m_last_far_plane);
}
else
{
double fov = (180.0/M_PI) * 2.0*atan(m_pbuffer->height()/(2.0*pose.focalY()));
// double fov2 = (180.0/M_PI) * 2.0*atan(image.cols/(2.0*pose.focalX()));
// ntk_dbg_print(fov2, 2);
// gluPerspective(fov2, double(image.rows)/image.cols, near_plane, far_plane);
gluPerspective(fov, double(m_pbuffer->width())/m_pbuffer->height(), m_last_near_plane, m_last_far_plane);
}
glMatrixMode (GL_MODELVIEW);
}
示例2: color
void MeshGenerator :: generateSurfelsMesh(const RGBDImage& image,
const Pose3D& depth_pose,
const Pose3D& rgb_pose)
{
double min_val = 0, max_val = 0;
if (image.amplitude().data)
minMaxLoc(image.amplitude(), &min_val, &max_val);
m_mesh.clear();
const cv::Mat1f& depth_im = image.depth();
const cv::Mat1b& mask_im = image.depthMask();
for_all_rc(depth_im)
{
int i_r = r;
int i_c = c;
if (!is_yx_in_range(depth_im, i_r, i_c))
continue;
if (!mask_im(r,c))
continue;
double depth = depth_im(i_r,i_c);
cv::Point3f p = depth_pose.unprojectFromImage(Point2f(c,r), depth);
Point3f normal = image.normal().data ? image.normal()(i_r, i_c) : Vec3f(0,0,1);
Vec3b color (0,0,0);
if (m_use_color)
{
cv::Point3f prgb = rgb_pose.projectToImage(p);
int i_y = ntk::math::rnd(prgb.y);
int i_x = ntk::math::rnd(prgb.x);
if (is_yx_in_range(image.rgb(), i_y, i_x))
{
Vec3b bgr = image.rgb()(i_y, i_x);
color = Vec3b(bgr[2], bgr[1], bgr[0]);
}
}
else
{
int g = 0;
if (image.amplitude().data)
g = 255.0 * (image.amplitude()(i_r,i_c) - min_val) / (max_val-min_val);
else
g = 255 * depth / 10.0;
color = Vec3b(g,g,g);
}
Surfel s;
s.color = color;
s.confidence = 0;
s.location = p;
s.normal = normal;
s.n_views = 1;
double normal_z = std::max(normal.z, 0.5f);
s.radius = m_resolution_factor * ntk::math::sqrt1_2 * depth
/ (depth_pose.focalX() * normal_z);
m_mesh.addSurfel(s);
}
}