本文整理汇总了C++中Pose3D::unprojectFromImage方法的典型用法代码示例。如果您正苦于以下问题:C++ Pose3D::unprojectFromImage方法的具体用法?C++ Pose3D::unprojectFromImage怎么用?C++ Pose3D::unprojectFromImage使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Pose3D
的用法示例。
在下文中一共展示了Pose3D::unprojectFromImage方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: color
void MeshGenerator :: generatePointCloudMesh(const RGBDImage& image,
const Pose3D& depth_pose,
const Pose3D& rgb_pose)
{
m_mesh.clear();
m_mesh.vertices.reserve(image.depth().rows*image.depth().cols);
m_mesh.colors.reserve(image.depth().rows*image.depth().cols);
m_mesh.normals.reserve(image.depth().rows*image.depth().cols);
const cv::Mat1f& depth_im = image.depth();
const cv::Mat1b& mask_im = image.depthMask();
cv::Mat3f voxels (depth_im.size());
cv::Mat3f rgb_points (depth_im.size());
cv::Mat1b subsample_mask(mask_im.size());
subsample_mask = 0;
for (float r = 0; r < subsample_mask.rows-1; r += 1.0/m_resolution_factor)
for (float c = 0; c < subsample_mask.cols-1; c += 1.0/m_resolution_factor)
subsample_mask(ntk::math::rnd(r),ntk::math::rnd(c)) = 1;
subsample_mask = mask_im & subsample_mask;
depth_pose.unprojectFromImage(depth_im, subsample_mask, voxels);
if (m_use_color && image.hasRgb())
rgb_pose.projectToImage(voxels, subsample_mask, rgb_points);
for (int r = 0; r < voxels.rows; ++r)
{
Vec3f* voxels_data = voxels.ptr<Vec3f>(r);
const uchar* mask_data = subsample_mask.ptr<uchar>(r);
for (int c = 0; c < voxels.cols; ++c)
{
if (!mask_data[c])
continue;
Vec3b color (0,0,0);
if (m_use_color)
{
Point3f prgb = rgb_points(r,c);
int i_y = ntk::math::rnd(prgb.y);
int i_x = ntk::math::rnd(prgb.x);
if (is_yx_in_range(image.rgb(), i_y, i_x))
{
Vec3b bgr = image.rgb()(i_y, i_x);
color = Vec3b(bgr[2], bgr[1], bgr[0]);
}
}
else
{
int g = 0;
if (image.intensity().data)
g = image.intensity()(r,c);
else
g = 255 * voxels_data[c][2] / 10.0;
color = Vec3b(g,g,g);
}
m_mesh.vertices.push_back(voxels_data[c]);
m_mesh.colors.push_back(color);
}
}
}
示例2: if
void MeshGenerator :: generateTriangleMesh(const RGBDImage& image,
const Pose3D& depth_pose,
const Pose3D& rgb_pose)
{
const Mat1f& depth_im = image.depth();
const Mat1b& mask_im = image.depthMask();
m_mesh.clear();
if (m_use_color)
image.rgb().copyTo(m_mesh.texture);
else if (image.intensity().data)
toMat3b(normalize_toMat1b(image.intensity())).copyTo(m_mesh.texture);
else
{
m_mesh.texture.create(depth_im.size());
m_mesh.texture = Vec3b(255,255,255);
}
m_mesh.vertices.reserve(depth_im.cols*depth_im.rows);
m_mesh.texcoords.reserve(depth_im.cols*depth_im.rows);
m_mesh.colors.reserve(depth_im.cols*depth_im.rows);
Mat1i vertice_map(depth_im.size());
vertice_map = -1;
for_all_rc(depth_im)
{
if (!mask_im(r,c))
continue;
double depth = depth_im(r,c);
Point3f p3d = depth_pose.unprojectFromImage(Point3f(c,r,depth));
Point3f p2d_rgb;
Point2f texcoords;
if (m_use_color)
{
p2d_rgb = rgb_pose.projectToImage(p3d);
texcoords = Point2f(p2d_rgb.x/image.rgb().cols, p2d_rgb.y/image.rgb().rows);
}
else
{
p2d_rgb = Point3f(c,r,depth);
texcoords = Point2f(p2d_rgb.x/image.intensity().cols, p2d_rgb.y/image.intensity().rows);
}
vertice_map(r,c) = m_mesh.vertices.size();
m_mesh.vertices.push_back(p3d);
// m_mesh.colors.push_back(bgr_to_rgb(im.rgb()(p2d_rgb.y, p2d_rgb.x)));
m_mesh.texcoords.push_back(texcoords);
}
for_all_rc(vertice_map)
{
if (vertice_map(r,c) < 0)
continue;
if ((c < vertice_map.cols - 1) && (r < vertice_map.rows - 1) &&
(vertice_map(r+1,c)>=0) && (vertice_map(r,c+1) >= 0) &&
(std::abs(depth_im(r,c) - depth_im(r+1, c)) < m_max_delta_depth) &&
(std::abs(depth_im(r,c) - depth_im(r, c+1)) < m_max_delta_depth))
{
Face f;
f.indices[2] = vertice_map(r,c);
f.indices[1] = vertice_map(r,c+1);
f.indices[0] = vertice_map(r+1,c);
m_mesh.faces.push_back(f);
}
if ((c > 0) && (r < vertice_map.rows - 1) &&
(vertice_map(r+1,c)>=0) && (vertice_map(r+1,c-1) >= 0) &&
(std::abs(depth_im(r,c) - depth_im(r+1, c)) < m_max_delta_depth) &&
(std::abs(depth_im(r,c) - depth_im(r+1, c-1)) < m_max_delta_depth))
{
Face f;
f.indices[2] = vertice_map(r,c);
f.indices[1] = vertice_map(r+1,c);
f.indices[0] = vertice_map(r+1,c-1);
m_mesh.faces.push_back(f);
}
}
m_mesh.computeNormalsFromFaces();
}
示例3: tc
bool SurfelsRGBDModeler :: addNewView(const RGBDImage& image_, Pose3D& depth_pose)
{
ntk::TimeCount tc("SurfelsRGBDModeler::addNewView", 1);
const float max_camera_normal_angle = ntk::deg_to_rad(90);
RGBDImage image;
image_.copyTo(image);
if (!image_.normal().data)
{
OpenniRGBDProcessor processor;
processor.computeNormalsPCL(image);
}
Pose3D rgb_pose = depth_pose;
rgb_pose.toRightCamera(image.calibration()->rgb_intrinsics, image.calibration()->R, image.calibration()->T);
Pose3D world_to_camera_normal_pose;
world_to_camera_normal_pose.applyTransformBefore(cv::Vec3f(0,0,0), depth_pose.cvEulerRotation());
Pose3D camera_to_world_normal_pose = world_to_camera_normal_pose;
camera_to_world_normal_pose.invert();
const Mat1f& depth_im = image.depth();
Mat1b covered_pixels (depth_im.size());
covered_pixels = 0;
std::list<Surfel> surfels_to_reinsert;
// Surfel updating.
for (SurfelMap::iterator next_it = m_surfels.begin(); next_it != m_surfels.end(); )
{
SurfelMap::iterator surfel_it = next_it;
++next_it;
Surfel& surfel = surfel_it->second;
if (!surfel.enabled())
continue;
Point3f surfel_2d = depth_pose.projectToImage(surfel.location);
bool surfel_deleted = false;
int r = ntk::math::rnd(surfel_2d.y);
int c = ntk::math::rnd(surfel_2d.x);
int d = ntk::math::rnd(surfel_2d.z);
if (!is_yx_in_range(depth_im, r, c)
|| !image.depthMask()(r, c)
|| !image.isValidNormal(r,c))
continue;
const float update_max_dist = getCompatibilityDistance(depth_im(r,c));
Vec3f camera_normal = image.normal()(r, c);
normalize(camera_normal);
Vec3f world_normal = camera_to_world_normal_pose.cameraTransform(camera_normal);
normalize(world_normal);
Vec3f eyev = camera_eye_vector(depth_pose, r, c);
double camera_angle = acos(camera_normal.dot(-eyev));
if (camera_angle > max_camera_normal_angle)
continue;
float normal_angle = acos(world_normal.dot(surfel.normal));
// Surfels have different normals, maybe two different faces of the same object.
if (normal_angle > (m_update_max_normal_angle*M_PI/180.0))
{
// Removal check. If a surfel has a different normal and is closer to the camera
// than the new scan, remove it.
if ((-surfel_2d.z) < depth_im(r,c) && surfel.n_views < 3)
{
m_surfels.erase(surfel_it);
surfel_deleted = true;
}
continue;
}
// If existing surfel is far from new depth value:
// - If existing one had a worst point of view, and was seen only once, remove it.
// - Otherwise do not include the new one.
if (std::abs(surfel_2d.z - depth_im(r,c)) > update_max_dist)
{
if (surfel.min_camera_angle > camera_angle && surfel.n_views < 3)
{
m_surfels.erase(surfel_it);
surfel_deleted = true;
}
else
covered_pixels(r,c) = 1;
continue;
}
// Compatible surfel found.
const float depth = depth_im(r,c) + m_global_depth_offset;
Point3f p3d = depth_pose.unprojectFromImage(Point2f(c,r), depth);
cv::Vec3b rgb_color = bgr_to_rgb(image.mappedRgb()(r, c));
Surfel image_surfel;
image_surfel.location = p3d;
image_surfel.normal = world_normal;
image_surfel.color = rgb_color;
//.........这里部分代码省略.........