本文整理汇总了C++中Pose3D类的典型用法代码示例。如果您正苦于以下问题:C++ Pose3D类的具体用法?C++ Pose3D怎么用?C++ Pose3D使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Pose3D类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: estimateOptimalPlanes
void MeshRenderer :: estimateOptimalPlanes(const Pose3D& pose, float* near_plane, float* far_plane)
{
float min_z = std::numeric_limits<float>::max();
float max_z = 0.01;
for (int i = 0; i < m_mesh->faces.size(); ++i)
{
const Point3f& v1 = m_mesh->vertices[m_mesh->faces[i].indices[0]];
const Point3f& v2 = m_mesh->vertices[m_mesh->faces[i].indices[1]];
const Point3f& v3 = m_mesh->vertices[m_mesh->faces[i].indices[2]];
Point3f pv1 = pose.cameraTransform(v1);
Point3f pv2 = pose.cameraTransform(v2);
Point3f pv3 = pose.cameraTransform(v3);
min_z = ntk::math::min(min_z,-pv1.z);
min_z = ntk::math::min(min_z,-pv2.z);
min_z = ntk::math::min(min_z,-pv3.z);
max_z = ntk::math::max(max_z,-pv1.z);
max_z = ntk::math::max(max_z,-pv2.z);
max_z = ntk::math::max(max_z,-pv3.z);
}
ntk_dbg_print(min_z, 2);
ntk_dbg_print(max_z, 2);
if (min_z < 0)
min_z = 0.01;
if (max_z < min_z)
max_z = (min_z*2);
*near_plane = min_z*0.9;
*far_plane = max_z*1.1;
}
示例2: addMeshToVertexBufferObject
void MeshViewer :: addMeshToVertexBufferObject(const ntk::Mesh& mesh, const Pose3D& pose, MeshViewerMode mode)
{
#if defined(NESTK_USE_GLEW) || defined(USE_GLEW)
GLuint vbo_id = -1, vbo_faces_id = -1;
glGenBuffersARB(1, &vbo_id);
if (mesh.hasFaces())
glGenBuffersARB(1, &vbo_faces_id);
VertexBufferObject vbo;
pose.cvCameraTransform().copyTo(vbo.model_view_matrix);
// Transpose the matrix for OpenGL column-major.
vbo.model_view_matrix = vbo.model_view_matrix.t();
vbo.nb_faces = 0;
vbo.vertex_id = vbo_id;
vbo.faces_id = vbo_faces_id;
vbo.has_faces = mesh.hasFaces();
vbo.has_color = mesh.hasColors();
vbo.has_texcoords = mesh.hasTexcoords();
vbo.color_offset = mesh.vertices.size()*sizeof(Vec3f);
vbo.texture_offset = mesh.vertices.size()*sizeof(Vec3f) + mesh.colors.size() * sizeof(Vec3b);
vbo.nb_vertices = mesh.vertices.size();
glBindBufferARB(GL_ARRAY_BUFFER_ARB, vbo.vertex_id);
glBufferDataARB(GL_ARRAY_BUFFER_ARB,
mesh.colors.size()*sizeof(Vec3b)
+ mesh.vertices.size()*sizeof(Vec3f)
+ mesh.texcoords.size()*sizeof(Point2f), // size
0, // null pointer: just allocate memory
GL_STATIC_DRAW_ARB);
glBufferSubDataARB(GL_ARRAY_BUFFER_ARB, 0, mesh.vertices.size()*sizeof(Vec3f), &mesh.vertices[0]);
if (vbo.has_color)
glBufferSubDataARB(GL_ARRAY_BUFFER_ARB, vbo.color_offset, mesh.colors.size()*sizeof(Vec3b), &mesh.colors[0]);
if (vbo.has_texcoords)
glBufferSubDataARB(GL_ARRAY_BUFFER_ARB, vbo.texture_offset, mesh.texcoords.size()*sizeof(Point2f), &mesh.texcoords[0]);
if (vbo.has_faces)
{
vbo.nb_faces = mesh.faces.size();
glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, vbo.faces_id);
glBufferDataARB(GL_ELEMENT_ARRAY_BUFFER_ARB,
mesh.faces.size() * 3 * sizeof(GLuint), // size
(GLuint*)&mesh.faces[0],
GL_STATIC_DRAW_ARB);
}
m_upcoming_vertex_buffer_objects.push_back(vbo);
#endif
}
示例3: calculateHandPos
Pose3D ViewBikeMath::calculateHandPos(const JointData& jointData, const JointData::Joint& joint, const RobotDimensions& robotDimensions)
{
Pose3D handPos;
float sign = joint == JointData::LShoulderPitch ? 1.f : -1.f;
handPos.translate(robotDimensions.armOffset.x, sign * robotDimensions.armOffset.y, robotDimensions.armOffset.z);
handPos.rotateY(-jointData.angles[joint]);
handPos.rotateZ(sign * jointData.angles[joint + 1]);
handPos.translate(robotDimensions.upperArmLength, 0, 0);
handPos.rotateX(jointData.angles[joint + 2] * sign);
handPos.rotateZ(sign * jointData.angles[joint + 3]);
return handPos;
}
示例4: rms_optimize_3d
// atomic mean square pose estimation.
double rms_optimize_3d(Pose3D& pose3d,
const std::vector<Point3f>& ref_points,
const std::vector<Point3f>& img_points)
{
std::vector<double> fx;
std::vector<double> initial(6);
reprojection_error_3d f(pose3d, ref_points, img_points);
LevenbergMarquartMinimizer optimizer;
std::fill(stl_bounds(initial), 0);
fx.resize(ref_points.size()*3);
optimizer.minimize(f, initial);
optimizer.diagnoseOutcome();
f.evaluate(initial, fx);
double error = f.outputNorm(initial);
pose3d.applyTransformAfter(Vec3f(initial[3],initial[4],initial[5]), cv::Vec3f(initial[0], initial[1], initial[2]));
return error;
}
示例5: computeRegistration
bool RelativePoseEstimatorICP<PointT> ::
computeRegistration(Pose3D& relative_pose,
PointCloudConstPtr source_cloud,
PointCloudConstPtr target_cloud,
PointCloudType& aligned_cloud)
{
pcl::IterativeClosestPoint<PointT, PointT> reg;
reg.setMaximumIterations (m_max_iterations);
reg.setTransformationEpsilon (1e-10);
reg.setRANSACOutlierRejectionThreshold(m_ransac_outlier_threshold);
reg.setMaxCorrespondenceDistance (m_distance_threshold);
reg.setInputCloud (source_cloud);
reg.setInputTarget (target_cloud);
reg.align (aligned_cloud);
if (0)
{
ntk::Mesh mesh1, mesh2;
pointCloudToMesh(mesh1, aligned_cloud);
pointCloudToMesh(mesh2, *target_cloud);
mesh1.saveToPlyFile("debug_icp_1.ply");
mesh2.saveToPlyFile("debug_icp_2.ply");
}
if (!reg.hasConverged())
{
ntk_dbg(1) << "ICP did not converge, ignoring.";
return false;
}
ntk_dbg_print(reg.getFitnessScore(), 1);
Eigen::Matrix4f t = reg.getFinalTransformation ();
cv::Mat1f T(4,4);
//toOpencv(t,T);
for (int r = 0; r < 4; ++r)
for (int c = 0; c < 4; ++c)
T(r,c) = t(r,c);
relative_pose.setCameraTransform(T);
return true;
}
示例6: generatePointCloudMesh
void MeshGenerator :: generatePointCloudMesh(const RGBDImage& image,
const Pose3D& depth_pose,
const Pose3D& rgb_pose)
{
m_mesh.clear();
m_mesh.vertices.reserve(image.depth().rows*image.depth().cols);
m_mesh.colors.reserve(image.depth().rows*image.depth().cols);
m_mesh.normals.reserve(image.depth().rows*image.depth().cols);
const cv::Mat1f& depth_im = image.depth();
const cv::Mat1b& mask_im = image.depthMask();
cv::Mat3f voxels (depth_im.size());
cv::Mat3f rgb_points (depth_im.size());
cv::Mat1b subsample_mask(mask_im.size());
subsample_mask = 0;
for (float r = 0; r < subsample_mask.rows-1; r += 1.0/m_resolution_factor)
for (float c = 0; c < subsample_mask.cols-1; c += 1.0/m_resolution_factor)
subsample_mask(ntk::math::rnd(r),ntk::math::rnd(c)) = 1;
subsample_mask = mask_im & subsample_mask;
depth_pose.unprojectFromImage(depth_im, subsample_mask, voxels);
if (m_use_color && image.hasRgb())
rgb_pose.projectToImage(voxels, subsample_mask, rgb_points);
for (int r = 0; r < voxels.rows; ++r)
{
Vec3f* voxels_data = voxels.ptr<Vec3f>(r);
const uchar* mask_data = subsample_mask.ptr<uchar>(r);
for (int c = 0; c < voxels.cols; ++c)
{
if (!mask_data[c])
continue;
Vec3b color (0,0,0);
if (m_use_color)
{
Point3f prgb = rgb_points(r,c);
int i_y = ntk::math::rnd(prgb.y);
int i_x = ntk::math::rnd(prgb.x);
if (is_yx_in_range(image.rgb(), i_y, i_x))
{
Vec3b bgr = image.rgb()(i_y, i_x);
color = Vec3b(bgr[2], bgr[1], bgr[0]);
}
}
else
{
int g = 0;
if (image.intensity().data)
g = image.intensity()(r,c);
else
g = 255 * voxels_data[c][2] / 10.0;
color = Vec3b(g,g,g);
}
m_mesh.vertices.push_back(voxels_data[c]);
m_mesh.colors.push_back(color);
}
}
}
示例7: generateTriangleMesh
void MeshGenerator :: generateTriangleMesh(const RGBDImage& image,
const Pose3D& depth_pose,
const Pose3D& rgb_pose)
{
const Mat1f& depth_im = image.depth();
const Mat1b& mask_im = image.depthMask();
m_mesh.clear();
if (m_use_color)
image.rgb().copyTo(m_mesh.texture);
else if (image.intensity().data)
toMat3b(normalize_toMat1b(image.intensity())).copyTo(m_mesh.texture);
else
{
m_mesh.texture.create(depth_im.size());
m_mesh.texture = Vec3b(255,255,255);
}
m_mesh.vertices.reserve(depth_im.cols*depth_im.rows);
m_mesh.texcoords.reserve(depth_im.cols*depth_im.rows);
m_mesh.colors.reserve(depth_im.cols*depth_im.rows);
Mat1i vertice_map(depth_im.size());
vertice_map = -1;
for_all_rc(depth_im)
{
if (!mask_im(r,c))
continue;
double depth = depth_im(r,c);
Point3f p3d = depth_pose.unprojectFromImage(Point3f(c,r,depth));
Point3f p2d_rgb;
Point2f texcoords;
if (m_use_color)
{
p2d_rgb = rgb_pose.projectToImage(p3d);
texcoords = Point2f(p2d_rgb.x/image.rgb().cols, p2d_rgb.y/image.rgb().rows);
}
else
{
p2d_rgb = Point3f(c,r,depth);
texcoords = Point2f(p2d_rgb.x/image.intensity().cols, p2d_rgb.y/image.intensity().rows);
}
vertice_map(r,c) = m_mesh.vertices.size();
m_mesh.vertices.push_back(p3d);
// m_mesh.colors.push_back(bgr_to_rgb(im.rgb()(p2d_rgb.y, p2d_rgb.x)));
m_mesh.texcoords.push_back(texcoords);
}
for_all_rc(vertice_map)
{
if (vertice_map(r,c) < 0)
continue;
if ((c < vertice_map.cols - 1) && (r < vertice_map.rows - 1) &&
(vertice_map(r+1,c)>=0) && (vertice_map(r,c+1) >= 0) &&
(std::abs(depth_im(r,c) - depth_im(r+1, c)) < m_max_delta_depth) &&
(std::abs(depth_im(r,c) - depth_im(r, c+1)) < m_max_delta_depth))
{
Face f;
f.indices[2] = vertice_map(r,c);
f.indices[1] = vertice_map(r,c+1);
f.indices[0] = vertice_map(r+1,c);
m_mesh.faces.push_back(f);
}
if ((c > 0) && (r < vertice_map.rows - 1) &&
(vertice_map(r+1,c)>=0) && (vertice_map(r+1,c-1) >= 0) &&
(std::abs(depth_im(r,c) - depth_im(r+1, c)) < m_max_delta_depth) &&
(std::abs(depth_im(r,c) - depth_im(r+1, c-1)) < m_max_delta_depth))
{
Face f;
f.indices[2] = vertice_map(r,c);
f.indices[1] = vertice_map(r+1,c);
f.indices[0] = vertice_map(r+1,c-1);
m_mesh.faces.push_back(f);
}
}
m_mesh.computeNormalsFromFaces();
}
示例8: generateSurfelsMesh
void MeshGenerator :: generateSurfelsMesh(const RGBDImage& image,
const Pose3D& depth_pose,
const Pose3D& rgb_pose)
{
double min_val = 0, max_val = 0;
if (image.amplitude().data)
minMaxLoc(image.amplitude(), &min_val, &max_val);
m_mesh.clear();
const cv::Mat1f& depth_im = image.depth();
const cv::Mat1b& mask_im = image.depthMask();
for_all_rc(depth_im)
{
int i_r = r;
int i_c = c;
if (!is_yx_in_range(depth_im, i_r, i_c))
continue;
if (!mask_im(r,c))
continue;
double depth = depth_im(i_r,i_c);
cv::Point3f p = depth_pose.unprojectFromImage(Point2f(c,r), depth);
Point3f normal = image.normal().data ? image.normal()(i_r, i_c) : Vec3f(0,0,1);
Vec3b color (0,0,0);
if (m_use_color)
{
cv::Point3f prgb = rgb_pose.projectToImage(p);
int i_y = ntk::math::rnd(prgb.y);
int i_x = ntk::math::rnd(prgb.x);
if (is_yx_in_range(image.rgb(), i_y, i_x))
{
Vec3b bgr = image.rgb()(i_y, i_x);
color = Vec3b(bgr[2], bgr[1], bgr[0]);
}
}
else
{
int g = 0;
if (image.amplitude().data)
g = 255.0 * (image.amplitude()(i_r,i_c) - min_val) / (max_val-min_val);
else
g = 255 * depth / 10.0;
color = Vec3b(g,g,g);
}
Surfel s;
s.color = color;
s.confidence = 0;
s.location = p;
s.normal = normal;
s.n_views = 1;
double normal_z = std::max(normal.z, 0.5f);
s.radius = m_resolution_factor * ntk::math::sqrt1_2 * depth
/ (depth_pose.focalX() * normal_z);
m_mesh.addSurfel(s);
}
}
示例9: calibrate_kinect_depth
//.........这里部分代码省略.........
std::vector<Mat> rvecs, tvecs;
int flags = 0;
if (global::opt_ignore_distortions())
flags = CV_CALIB_ZERO_TANGENT_DIST;
double error = calibrateCamera(pattern_points, good_corners, global::image_size,
global::depth_intrinsics, global::depth_distortion,
rvecs, tvecs, flags);
if (global::opt_ignore_distortions())
global::depth_distortion = 0.f;
ntk_dbg_print(error, 1);
int good_i = 0;
foreach_idx(stereo_i, stereo_corners)
{
if (stereo_corners[stereo_i].size() > 0)
{
QString filename = global::images_list[stereo_i];
QDir cur_image_dir (global::images_dir.absoluteFilePath(filename));
std::string full_filename;
cv::Mat3b image;
load_intensity_file(cur_image_dir.path().toStdString(), full_filename, image);
ntk_ensure(image.data, "Could not load intensity image");
kinect_shift_ir_to_depth(image);
cv::Mat1f depth_image;
if (is_file(cur_image_dir.absoluteFilePath("raw/depth.yml").toStdString()))
{
full_filename = cur_image_dir.absoluteFilePath("raw/depth.yml").toStdString();
depth_image = imread_yml(full_filename);
}
else if (is_file(cur_image_dir.absoluteFilePath("raw/depth.raw").toStdString()))
{
full_filename = cur_image_dir.absoluteFilePath("raw/depth.raw").toStdString();
depth_image = imread_Mat1f_raw(full_filename);
}
ntk_ensure(depth_image.data, "Could not load depth image");
cv::Mat3b undistorted_image;
if (global::opt_ignore_distortions())
image.copyTo(undistorted_image);
else
undistort(image, undistorted_image, global::depth_intrinsics, global::depth_distortion);
std::vector<Point2f> current_view_corners;
calibrationCorners(full_filename, "corners",
global::opt_pattern_width(), global::opt_pattern_height(),
current_view_corners, undistorted_image, 1);
if (current_view_corners.size() == (global::opt_pattern_width()*global::opt_pattern_height()))
{
stereo_corners[stereo_i] = current_view_corners;
showCheckerboardCorners(undistorted_image, stereo_corners[stereo_i], 200);
}
else
{
stereo_corners[stereo_i].resize(0);
continue;
}
// Generate calibration points
{
// FIXME: why rvecs and tvecs from calibrateCamera seems to be nonsense ?
// calling findExtrinsics another time to get good chessboard estimations.
cv::Mat1f H;
estimate_checkerboard_pose(pattern_points[0],
current_view_corners,
global::depth_intrinsics,
H);
Pose3D pose;
pose.setCameraParametersFromOpencv(global::depth_intrinsics);
ntk_dbg_print(pose, 1);
pose.setCameraTransform(H);
foreach_idx(pattern_i, pattern_points[0])
{
ntk_dbg_print(pattern_points[0][pattern_i], 1);
Point3f p = pose.projectToImage(pattern_points[0][pattern_i]);
ntk_dbg_print(p, 1);
double kinect_raw = depth_image(p.y, p.x);
if (!(kinect_raw < 2047)) continue;
ntk_dbg_print(kinect_raw, 1);
double linear_depth = 1.0 / (kinect_raw * -0.0030711016 + 3.3309495161);
const float k1 = 1.1863;
const float k2 = 2842.5;
const float k3 = 0.1236;
double tan_depth = k3 * tanf(kinect_raw/k2 + k1);
ntk_dbg_print(linear_depth, 1);
ntk_dbg_print(tan_depth, 1);
depth_values.push_back(DepthCalibrationPoint(kinect_raw, p.z));
}
}
++good_i;
}
示例10: setPose
void MeshRenderer :: setPose(const Pose3D& pose, float* arg_near_plane, float* arg_far_plane)
{
VertexBufferObject& vbo = m_vertex_buffer_object;
pose.cvCameraTransform().copyTo(vbo.model_view_matrix);
// Transpose the matrix for OpenGL column-major.
vbo.model_view_matrix = vbo.model_view_matrix.t();
if (!(arg_near_plane && arg_far_plane))
{
estimateOptimalPlanes(pose, &m_last_near_plane, &m_last_far_plane);
}
else
{
m_last_near_plane = *arg_near_plane;
m_last_far_plane = *arg_far_plane;
}
m_pbuffer->makeCurrent();
glMatrixMode (GL_MODELVIEW);
glLoadIdentity ();
cv::Vec3f euler_angles = pose.cvEulerRotation();
glTranslatef(pose.cvTranslation()[0], pose.cvTranslation()[1], pose.cvTranslation()[2]);
glRotatef(euler_angles[2]*180.0/M_PI, 0, 0, 1);
glRotatef(euler_angles[1]*180.0/M_PI, 0, 1, 0);
glRotatef(euler_angles[0]*180.0/M_PI, 1, 0, 0);
glMatrixMode (GL_PROJECTION);
glLoadIdentity ();
double dx = pose.imageCenterX() - (m_pbuffer->width() / 2.0);
double dy = pose.imageCenterY() - (m_pbuffer->height() / 2.0);
glViewport(dx, -dy, m_pbuffer->width(), m_pbuffer->height());
if (pose.isOrthographic())
{
ntk_dbg_print(pose.focalX()/2, 0);
ntk_dbg_print(pose.focalY()/2, 0);
glOrtho(-pose.focalX()/2, pose.focalX()/2, -pose.focalY()/2, pose.focalY()/2, m_last_near_plane, m_last_far_plane);
}
else
{
double fov = (180.0/M_PI) * 2.0*atan(m_pbuffer->height()/(2.0*pose.focalY()));
// double fov2 = (180.0/M_PI) * 2.0*atan(image.cols/(2.0*pose.focalX()));
// ntk_dbg_print(fov2, 2);
// gluPerspective(fov2, double(image.rows)/image.cols, near_plane, far_plane);
gluPerspective(fov, double(m_pbuffer->width())/m_pbuffer->height(), m_last_near_plane, m_last_far_plane);
}
glMatrixMode (GL_MODELVIEW);
}
示例11: addNewView
bool SurfelsRGBDModeler :: addNewView(const RGBDImage& image_, Pose3D& depth_pose)
{
ntk::TimeCount tc("SurfelsRGBDModeler::addNewView", 1);
const float max_camera_normal_angle = ntk::deg_to_rad(90);
RGBDImage image;
image_.copyTo(image);
if (!image_.normal().data)
{
OpenniRGBDProcessor processor;
processor.computeNormalsPCL(image);
}
Pose3D rgb_pose = depth_pose;
rgb_pose.toRightCamera(image.calibration()->rgb_intrinsics, image.calibration()->R, image.calibration()->T);
Pose3D world_to_camera_normal_pose;
world_to_camera_normal_pose.applyTransformBefore(cv::Vec3f(0,0,0), depth_pose.cvEulerRotation());
Pose3D camera_to_world_normal_pose = world_to_camera_normal_pose;
camera_to_world_normal_pose.invert();
const Mat1f& depth_im = image.depth();
Mat1b covered_pixels (depth_im.size());
covered_pixels = 0;
std::list<Surfel> surfels_to_reinsert;
// Surfel updating.
for (SurfelMap::iterator next_it = m_surfels.begin(); next_it != m_surfels.end(); )
{
SurfelMap::iterator surfel_it = next_it;
++next_it;
Surfel& surfel = surfel_it->second;
if (!surfel.enabled())
continue;
Point3f surfel_2d = depth_pose.projectToImage(surfel.location);
bool surfel_deleted = false;
int r = ntk::math::rnd(surfel_2d.y);
int c = ntk::math::rnd(surfel_2d.x);
int d = ntk::math::rnd(surfel_2d.z);
if (!is_yx_in_range(depth_im, r, c)
|| !image.depthMask()(r, c)
|| !image.isValidNormal(r,c))
continue;
const float update_max_dist = getCompatibilityDistance(depth_im(r,c));
Vec3f camera_normal = image.normal()(r, c);
normalize(camera_normal);
Vec3f world_normal = camera_to_world_normal_pose.cameraTransform(camera_normal);
normalize(world_normal);
Vec3f eyev = camera_eye_vector(depth_pose, r, c);
double camera_angle = acos(camera_normal.dot(-eyev));
if (camera_angle > max_camera_normal_angle)
continue;
float normal_angle = acos(world_normal.dot(surfel.normal));
// Surfels have different normals, maybe two different faces of the same object.
if (normal_angle > (m_update_max_normal_angle*M_PI/180.0))
{
// Removal check. If a surfel has a different normal and is closer to the camera
// than the new scan, remove it.
if ((-surfel_2d.z) < depth_im(r,c) && surfel.n_views < 3)
{
m_surfels.erase(surfel_it);
surfel_deleted = true;
}
continue;
}
// If existing surfel is far from new depth value:
// - If existing one had a worst point of view, and was seen only once, remove it.
// - Otherwise do not include the new one.
if (std::abs(surfel_2d.z - depth_im(r,c)) > update_max_dist)
{
if (surfel.min_camera_angle > camera_angle && surfel.n_views < 3)
{
m_surfels.erase(surfel_it);
surfel_deleted = true;
}
else
covered_pixels(r,c) = 1;
continue;
}
// Compatible surfel found.
const float depth = depth_im(r,c) + m_global_depth_offset;
Point3f p3d = depth_pose.unprojectFromImage(Point2f(c,r), depth);
cv::Vec3b rgb_color = bgr_to_rgb(image.mappedRgb()(r, c));
Surfel image_surfel;
image_surfel.location = p3d;
image_surfel.normal = world_normal;
image_surfel.color = rgb_color;
//.........这里部分代码省略.........
示例12: addMeshToDisplayList
void MeshViewer :: addMeshToDisplayList(const ntk::Mesh& mesh, const Pose3D& pose, MeshViewerMode mode)
{
int new_list_index = glGenLists(1);
glNewList(new_list_index, GL_COMPILE);
if (mesh.texture.data)
{
// Last texture id was just created
GLuint texture = m_upcoming_textures[m_upcoming_textures.size()-1];
glEnable(GL_TEXTURE_2D);
glBindTexture( GL_TEXTURE_2D, texture );
}
else
{
glDisable(GL_TEXTURE_2D);
}
glMatrixMode(GL_MODELVIEW);
glPushMatrix();
glTranslatef(pose.cvTranslation()[0], pose.cvTranslation()[1], pose.cvTranslation()[2]);
Vec3f euler_angles = pose.cvEulerRotation();
glRotatef(rad_to_deg(euler_angles[0]), 1, 0, 0);
glRotatef(rad_to_deg(euler_angles[1]), 0, 1, 0);
glRotatef(rad_to_deg(euler_angles[2]), 0, 0, 1);
if (mode & WIREFRAME)
{
glPolygonMode( GL_FRONT_AND_BACK, GL_LINE );
}
else
{
glPolygonMode( GL_FRONT_AND_BACK, GL_FILL );
}
int64 point_start_time = ntk::Time::getMillisecondCounter();
if (mesh.faces.size() == 0)
{
glBegin(GL_POINTS);
for (int i = 0; i < mesh.vertices.size(); ++i)
{
const Point3f& v = mesh.vertices[i];
// if (i % 1000 == 0)
// ntk_dbg_print(v, 1);
if (mesh.hasColors())
glColor3f(mesh.colors[i][0]/255.0, mesh.colors[i][1]/255.0, mesh.colors[i][2]/255.0);
glVertex3f(v.x, v.y, v.z);
}
glEnd();
}
int64 point_end_time = ntk::Time::getMillisecondCounter();
ntk_dbg_print(point_end_time-point_start_time, 1);
{
glBegin(GL_TRIANGLES);
for (int i = 0; i < mesh.faces.size(); ++i)
{
int i1 = mesh.faces[i].indices[0];
int i2 = mesh.faces[i].indices[1];
int i3 = mesh.faces[i].indices[2];
const Point3f& v1 = mesh.vertices[i1];
const Point3f& v2 = mesh.vertices[i2];
const Point3f& v3 = mesh.vertices[i3];
Vec3f nm = (Vec3f(v2-v1).cross(v3-v2));
normalize(nm);
if (!mesh.hasColors())
glColor3f(1.0f, 0.0f, 0.0f);
if (mesh.hasColors())
glColor3f(mesh.colors[i1][0]/255.0, mesh.colors[i1][1]/255.0, mesh.colors[i1][2]/255.0);
if (mesh.hasTexcoords())
glTexCoord2f(mesh.texcoords[i1].x, mesh.texcoords[i1].y);
glVertex3f(v1.x, v1.y, v1.z);
glNormal3f(nm[0], nm[1], nm[2]);
if (mesh.hasColors())
glColor3f(mesh.colors[i2][0]/255.0, mesh.colors[i2][1]/255.0, mesh.colors[i2][2]/255.0);
if (mesh.hasTexcoords())
glTexCoord2f(mesh.texcoords[i2].x, mesh.texcoords[i2].y);
glVertex3f(v2.x, v2.y, v2.z);
glNormal3f(nm[0], nm[1], nm[2]);
if (mesh.hasColors())
glColor3f(mesh.colors[i3][0]/255.0, mesh.colors[i3][1]/255.0, mesh.colors[i3][2]/255.0);
if (mesh.hasTexcoords())
glTexCoord2f(mesh.texcoords[i3].x, mesh.texcoords[i3].y);
glVertex3f(v3.x, v3.y, v3.z);
glNormal3f(nm[0], nm[1], nm[2]);
}
glEnd();
}
glMatrixMode(GL_MODELVIEW);
glPopMatrix();
glEndList();
m_upcoming_display_lists.push_back(new_list_index);
}
示例13: main
int main(void)
{
Pose3D pz;
pz.setAxisAngle(0,1,0, M_PI/3);
pz.setPosition(0,2,0);
Pose3D pz2;
pz2.setAxisAngle(0,1,0, M_PI/2 - M_PI/3);
pz2.setPosition(0,1,0);
pz.multiplyPose(pz2);
cout << pz;
NEWMAT::Matrix m = pz.asMatrix();
cout << m;
Position p;
p.x = 0;
p.y = 0;
p.z = 1;
pz.applyToPosition(p);
printf("After rotate: %f %f %f\n", p.x, p.y, p.z);
cout << endl;
for (int ind = 0; ind < 2; ind++)
{
bool caching;
if (ind == 0) {
caching = true;
std::cout << "Caching Mode"<<std::endl;
}
else {
caching = false;
std::cout << std::endl<<std::endl<<std::endl<<"Not Caching Mode" <<std::endl;
}
double dx,dy,dz,dyaw,dp,dr;
std::cout <<"Creating TransformReference" << std::endl;
TransformReference mTR(caching);
dx = dy= dz = 0;
dyaw = dp = dr = 0.1;
uint64_t atime;
timeval temp_time_struct;
gettimeofday(&temp_time_struct,NULL);
atime = temp_time_struct.tv_sec * 1000000000ULL + (uint64_t)temp_time_struct.tv_usec * 1000ULL;
std::cout <<"Setting values" << std::endl;
//Fill in some transforms
// mTR.setWithEulers(10,2,1,1,1,dyaw,dp,dr,atime); //Switching out for DH params below
mTR.setWithDH("10","2",1.0,1.0,1.0,dyaw,atime);
//mTR.setWithEulers("2","3",1-1,1,1,dyaw,dp,dr,atime-1000);
mTR.setWithEulers("2","3",1,1,1,dyaw,dp,dr,atime-100);
mTR.setWithEulers("2","3",1,1,1,dyaw,dp,dr,atime-50);
mTR.setWithEulers("2","3",1,1,1,dyaw,dp,dr,atime-1000);
mTR.setWithEulers("2","3",1,1,1,dyaw,dp,dr,atime+500);
mTR.setWithEulers("2","3",1+100,1,1,dyaw,dp,dr,atime+1000);
mTR.setWithEulers("2","3",1,1,1,dyaw,dp,dr,atime+1100);
mTR.setWithEulers("3","5",dx,dy,dz,dyaw,dp,dr,atime);
mTR.setWithEulers("5","1",dx,dy,dz,dyaw,dp,dr,atime);
mTR.setWithEulers("6","5",dx,dy,dz,dyaw,dp,dr,atime);
mTR.setWithEulers("6","5",dx,dy,dz,dyaw,dp,dr,atime);
mTR.setWithEulers("7","6",1,1,1,dyaw,dp,dr,atime);
mTR.setWithDH("8","7",1.0,1.0,1.0,dyaw,atime);
//mTR.setWithEulers("8","7",1,1,1,dyaw,dp,dr,atime); //Switching out for DH params above
std::cout <<"Trying some tests" << std::endl;
//Demonstrate InvalidFrame LookupException
try
{
std::cout<< mTR.viewChain("10","9");
}
catch (TransformReference::LookupException &ex)
{
std::cout << "Caught " << ex.what()<<std::endl;
}
// See the list of transforms to get between the frames
std::cout<<"Viewing (10,8):"<<std::endl;
std::cout << mTR.viewChain("10","8");
//See the resultant transform
std::cout <<"Calling getMatrix(10,8)"<<std::endl;
// NEWMAT::Matrix mat = mTR.getMatrix(1,1);
NEWMAT::Matrix mat = mTR.getMatrix("10","8",atime);
//.........这里部分代码省略.........
示例14: setBehaviorData
void ODEOutBehaviorProvider::setBehaviorData()
{
int i = 0;
int numSet = 0;
int fallState = FallDownState::upright;
bool bumperRight = false, bumperLeft = false, chestButton = false;
// if(theKeyStates != NULL){
// bumperRight = theKeyStates->pressed[KeyStates::RBumperRight] || theKeyStates->pressed[KeyStates::RBumperLeft];
// bumperLeft = theKeyStates->pressed[KeyStates::LBumperRight] || theKeyStates->pressed[KeyStates::LBumperLeft];
// chestButton = theKeyStates->pressed[KeyStates::ChestButton];
// }
if(theFallDownState != NULL){
fallState = theFallDownState->state;
}
i = 0;
outBehaviorData.intData.clear();
outBehaviorData.intData.resize(OutBehaviorData::NUM_INT_DATA);
outBehaviorData.intData[i++] = fallState;//fallen state
outBehaviorData.intData[i++] = bumperRight;//bumper right
outBehaviorData.intData[i++] = bumperLeft;//bumper left
outBehaviorData.intData[i++] = chestButton;//chest button
// numSet = i;
// if(numSet == OutBehaviorData::NUM_INT_DATA)
// std::cout<<"Num of Data Set missmatch with NUM_INT_DATA: NumSet: "<<numSet<<", NUM_INT_DATA: "<<OutBehaviorData::NUM_INT_DATA<<std::endl;
// printf("%d Fallen and Button Data Set to :\n", outBehaviorData.intData.size());
// for(i = 0; i < numSet; i++)
// {
// printf("the %d number is ", i);
// std::cout<<outBehaviorData.intData[i]<<std::endl;
// }
//========for joint data
i = 0;
outBehaviorData.jointData.clear();
outBehaviorData.jointData.resize(OutBehaviorData::NUM_JOINT_DATA);
outBehaviorData.jointData[i++] =(float)theSensoryJointData->angles[JointData::HeadYaw];
outBehaviorData.jointData[i++] =(float)theSensoryJointData->angles[JointData::HeadPitch];
outBehaviorData.jointData[i++] =(float)theSensoryJointData->angles[JointData::RShoulderPitch];
outBehaviorData.jointData[i++] =(float)theSensoryJointData->angles[JointData::RShoulderRoll];
outBehaviorData.jointData[i++] =(float)theSensoryJointData->angles[JointData::RElbowYaw];
outBehaviorData.jointData[i++] =(float)theSensoryJointData->angles[JointData::RElbowRoll];
outBehaviorData.jointData[i++] =(float)theSensoryJointData->angles[JointData::LShoulderPitch];
outBehaviorData.jointData[i++] =(float)theSensoryJointData->angles[JointData::LShoulderRoll];
outBehaviorData.jointData[i++] =(float)theSensoryJointData->angles[JointData::LElbowYaw];
outBehaviorData.jointData[i++] =(float)theSensoryJointData->angles[JointData::LElbowRoll];
outBehaviorData.jointData[i++] =(float)theSensoryJointData->angles[JointData::RHipYawPitch];
outBehaviorData.jointData[i++] =(float)theSensoryJointData->angles[JointData::RHipRoll];
outBehaviorData.jointData[i++] =(float)theSensoryJointData->angles[JointData::RHipPitch];
outBehaviorData.jointData[i++] =(float)theSensoryJointData->angles[JointData::RKneePitch];
outBehaviorData.jointData[i++] =(float)theSensoryJointData->angles[JointData::RAnklePitch];
outBehaviorData.jointData[i++] =(float)theSensoryJointData->angles[JointData::RAnkleRoll];
outBehaviorData.jointData[i++] =(float)theSensoryJointData->angles[JointData::LHipYawPitch];
outBehaviorData.jointData[i++] =(float)theSensoryJointData->angles[JointData::LHipRoll];
outBehaviorData.jointData[i++] =(float)theSensoryJointData->angles[JointData::LHipPitch];
outBehaviorData.jointData[i++] =(float)theSensoryJointData->angles[JointData::LKneePitch];
outBehaviorData.jointData[i++] =(float)theSensoryJointData->angles[JointData::LAnklePitch];
outBehaviorData.jointData[i++] =(float)theSensoryJointData->angles[JointData::LAnkleRoll];
// numSet = i;
// if(numSet == OutBehaviorData::NUM_JOINT_DATA)
// std::cout<<"Num of Data Set missmatch with NUM_JOINT_DATA: NumSet: "<<numSet<<", NUM_JOINT_DATA: "<<OutBehaviorData::NUM_JOINT_DATA<<std::endl;
// printf("%d joint data Set to :\n", outBehaviorData.jointData.size());
// for(i = 0; i < numSet; i++)
// {
// printf("the %d number is ", i);
// std::cout<<outBehaviorData.jointData[i]<<std::endl;
// }
RotationMatrix Rsupport;
Rsupport = theNaoStructure->uLink[NaoStructure::bodyLink].R.invert();
Pose3D Tsupport;
Tsupport.rotation= Rsupport;
Tsupport.translate(theNaoStructure->uLink[NaoStructure::bodyLink].p*(-1));
if (theNaoStructure->supportingMode ==NaoConfig::SUPPORT_MODE_LEFT || theNaoStructure->supportingMode == NaoConfig::SUPPORT_MODE_DOUBLE_LEFT)
{
outBehaviorData.leftx = Tsupport.translation.x*1000;
outBehaviorData.lefty = Tsupport.translation.y*1000;
Pose3D Tswing;
Tswing.rotation = Rsupport;
Tswing.translate(theNaoStructure->uLink[NaoStructure::rFootLink].p-theNaoStructure->uLink[NaoStructure::bodyLink].p);
outBehaviorData.rightx = Tswing.translation.x*1000;
outBehaviorData.righty = Tswing.translation.y*1000;
}
else
{
outBehaviorData.rightx = Tsupport.translation.x*1000;
outBehaviorData.righty = Tsupport.translation.y*1000;
Pose3D Tswing;
Tswing.rotation = Rsupport;
Tswing.translate(theNaoStructure->uLink[NaoStructure::lFootLink].p-theNaoStructure->uLink[NaoStructure::bodyLink].p);
outBehaviorData.leftx = Tswing.translation.x*1000;
outBehaviorData.lefty = Tswing.translation.y*1000;
}
}
示例15: calibrate_kinect_depth
void calibrate_kinect_depth(std::vector< DepthCalibrationPoint >& depth_values)
{
std::vector< std::vector<Point3f> > pattern_points;
calibrationPattern(pattern_points,
global::opt_pattern_width(), global::opt_pattern_height(), global::opt_square_size(),
global::images_list.size());
for (int i_image = 0; i_image < global::images_list.size(); ++i_image)
{
// Generate depth calibration points
QString filename = global::images_list[i_image];
QDir cur_image_dir (global::images_dir.absoluteFilePath(filename));
std::string full_filename = cur_image_dir.absoluteFilePath("raw/color.png").toStdString();
RGBDImage image;
OpenniRGBDProcessor processor;
processor.setFilterFlag(RGBDProcessorFlags::ComputeMapping, true);
image.loadFromDir(cur_image_dir.absolutePath().toStdString(), &global::calibration, &processor);
imshow_normalized("mapped depth", image.mappedDepth());
imshow("color", image.rgb());
std::vector<Point2f> current_view_corners;
calibrationCorners(full_filename, "corners",
global::opt_pattern_width(), global::opt_pattern_height(),
current_view_corners, image.rgb(), 1, global::pattern_type);
if (current_view_corners.size() != (global::opt_pattern_width()*global::opt_pattern_height()))
{
ntk_dbg(1) << "Corners not detected in " << cur_image_dir.absolutePath().toStdString();
continue;
}
// FIXME: why rvecs and tvecs from calibrateCamera seems to be nonsense ?
// calling findExtrinsics another time to get good chessboard estimations.
cv::Mat1f H;
estimate_checkerboard_pose(pattern_points[0],
current_view_corners,
global::calibration.rgb_intrinsics,
H);
Pose3D pose;
pose.setCameraParametersFromOpencv(global::calibration.rgb_intrinsics);
pose.setCameraTransform(H);
ntk_dbg_print(pose, 1);
cv::Mat3b debug_img;
image.rgb().copyTo(debug_img);
foreach_idx(pattern_i, pattern_points[0])
{
Point3f p = pose.projectToImage(pattern_points[0][pattern_i]);
ntk_dbg_print(p, 1);
float kinect_raw = image.mappedDepth()(p.y, p.x);
ntk_dbg_print(kinect_raw, 1);
if (kinect_raw < 1e-5) continue;
float err = kinect_raw-p.z;
cv::putText(debug_img, format("%d", (int)(err*1000)), Point(p.x, p.y), CV_FONT_NORMAL, 0.4, Scalar(255,0,0));
ntk_dbg_print(pattern_points[0][pattern_i], 1);
ntk_dbg_print(p, 1);
ntk_dbg_print(kinect_raw, 1);
depth_values.push_back(DepthCalibrationPoint(kinect_raw, p.z));
}
imshow("errors", debug_img);
cv::waitKey(0);
}