本文整理汇总了C++中Quaterniond::setIdentity方法的典型用法代码示例。如果您正苦于以下问题:C++ Quaterniond::setIdentity方法的具体用法?C++ Quaterniond::setIdentity怎么用?C++ Quaterniond::setIdentity使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Quaterniond
的用法示例。
在下文中一共展示了Quaterniond::setIdentity方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: main
int main(int argc, char **argv)
{
int num_points = 0;
// check for arg, # of points to use in projection SBA
if (argc > 1)
num_points = atoi(argv[1]);
double euc_noise = 0.1; // noise in position, m
double pix_noise = 1.0; // pixel noise
// double outlier_ratio = 0.1;
SparseOptimizer optimizer;
optimizer.setVerbose(false);
// variable-size block solver
BlockSolverX::LinearSolverType * linearSolver
= new LinearSolverCSparse<g2o
::BlockSolverX::PoseMatrixType>();
BlockSolverX * solver_ptr
= new BlockSolverX(linearSolver);
g2o::OptimizationAlgorithmLevenberg* solver = new g2o::OptimizationAlgorithmLevenberg(solver_ptr);
optimizer.setAlgorithm(solver);
vector<Vector3d> true_points;
for (size_t i=0;i<1000; ++i)
{
true_points.push_back(Vector3d((Sample::uniform()-0.5)*3,
Sample::uniform()-0.5,
Sample::uniform()+10));
}
// set up camera params
Vector2d focal_length(500,500); // pixels
Vector2d principal_point(320,240); // 640x480 image
double baseline = 0.075; // 7.5 cm baseline
// set up camera params and projection matrices on vertices
g2o::VertexSCam::setKcam(focal_length[0],focal_length[1],
principal_point[0],principal_point[1],
baseline);
// set up two poses
int vertex_id = 0;
for (size_t i=0; i<2; ++i)
{
// set up rotation and translation for this node
Vector3d t(0,0,i);
Quaterniond q;
q.setIdentity();
Eigen::Isometry3d cam; // camera pose
cam = q;
cam.translation() = t;
// set up node
VertexSCam *vc = new VertexSCam();
vc->setEstimate(cam);
vc->setId(vertex_id); // vertex id
cerr << t.transpose() << " | " << q.coeffs().transpose() << endl;
// set first cam pose fixed
if (i==0)
vc->setFixed(true);
// make sure projection matrices are set
vc->setAll();
// add to optimizer
optimizer.addVertex(vc);
vertex_id++;
}
// set up point matches for GICP
for (size_t i=0; i<true_points.size(); ++i)
{
// get two poses
VertexSE3* vp0 =
dynamic_cast<VertexSE3*>(optimizer.vertices().find(0)->second);
VertexSE3* vp1 =
dynamic_cast<VertexSE3*>(optimizer.vertices().find(1)->second);
// calculate the relative 3D position of the point
Vector3d pt0,pt1;
pt0 = vp0->estimate().inverse() * true_points[i];
pt1 = vp1->estimate().inverse() * true_points[i];
// add in noise
pt0 += Vector3d(Sample::gaussian(euc_noise ),
Sample::gaussian(euc_noise ),
Sample::gaussian(euc_noise ));
//.........这里部分代码省略.........
示例2: main
int main()
{
double euc_noise = 0.01; // noise in position, m
// double outlier_ratio = 0.1;
SparseOptimizer optimizer;
optimizer.setVerbose(false);
// variable-size block solver
BlockSolverX::LinearSolverType * linearSolver = new LinearSolverDense<g2o::BlockSolverX::PoseMatrixType>();
BlockSolverX * solver_ptr = new BlockSolverX(linearSolver);
g2o::OptimizationAlgorithmLevenberg* solver = new g2o::OptimizationAlgorithmLevenberg(solver_ptr);
optimizer.setAlgorithm(solver);
vector<Vector3d> true_points;
for (size_t i=0;i<1000; ++i)
{
true_points.push_back(Vector3d((Sample::uniform()-0.5)*3,
Sample::uniform()-0.5,
Sample::uniform()+10));
}
// set up two poses
int vertex_id = 0;
for (size_t i=0; i<2; ++i)
{
// set up rotation and translation for this node
Vector3d t(0,0,i);
Quaterniond q;
q.setIdentity();
Eigen::Isometry3d cam; // camera pose
cam = q;
cam.translation() = t;
// set up node
VertexSE3 *vc = new VertexSE3();
vc->setEstimate(cam);
vc->setId(vertex_id); // vertex id
cerr << t.transpose() << " | " << q.coeffs().transpose() << endl;
// set first cam pose fixed
if (i==0)
vc->setFixed(true);
// add to optimizer
optimizer.addVertex(vc);
vertex_id++;
}
// set up point matches
for (size_t i=0; i<true_points.size(); ++i)
{
// get two poses
VertexSE3* vp0 =
dynamic_cast<VertexSE3*>(optimizer.vertices().find(0)->second);
VertexSE3* vp1 =
dynamic_cast<VertexSE3*>(optimizer.vertices().find(1)->second);
// calculate the relative 3D position of the point
Vector3d pt0,pt1;
pt0 = vp0->estimate().inverse() * true_points[i];
pt1 = vp1->estimate().inverse() * true_points[i];
// add in noise
pt0 += Vector3d(Sample::gaussian(euc_noise ),
Sample::gaussian(euc_noise ),
Sample::gaussian(euc_noise ));
pt1 += Vector3d(Sample::gaussian(euc_noise ),
Sample::gaussian(euc_noise ),
Sample::gaussian(euc_noise ));
// form edge, with normals in varioius positions
Vector3d nm0, nm1;
nm0 << 0, i, 1;
nm1 << 0, i, 1;
nm0.normalize();
nm1.normalize();
Edge_V_V_GICP * e // new edge with correct cohort for caching
= new Edge_V_V_GICP();
e->setVertex(0, vp0); // first viewpoint
e->setVertex(1, vp1); // second viewpoint
EdgeGICP meas;
meas.pos0 = pt0;
meas.pos1 = pt1;
meas.normal0 = nm0;
meas.normal1 = nm1;
e->setMeasurement(meas);
//.........这里部分代码省略.........
示例3: imgCb
void VoNode::imgCb(const sensor_msgs::ImageConstPtr& msg)
{
// Dummy data as the real position for the system.
// static int old = msg->header.seq;
// if(msg->header.seq == old)
// filtered_pose.pose.position.z = 0;
// else{
// filtered_pose.pose.position.z += 10.0/70.0 * (msg->header.seq-old);
// }
// old = msg->header.seq;
ROS_INFO("Frame seq: %i", msg->header.seq);
#ifdef USE_ASE_IMU
tf::StampedTransform transform;
try{
listener_.lookupTransform("/camera", "/worldNED",
ros::Time(0), transform);
}
catch (tf::TransformException ex){
ROS_ERROR("%s",ex.what());
return;
}
#endif
cv::Mat img;
try {
img = cv_bridge::toCvShare(msg, "mono8")->image;
} catch (cv_bridge::Exception& e) {
ROS_ERROR("cv_bridge exception: %s", e.what());
}
processUserActions();
// Quaterniond quat(filtered_pose.pose.orientation.w,filtered_pose.pose.orientation.x,filtered_pose.pose.orientation.y, filtered_pose.pose.orientation.z);
// Matrix3d orient = quat.toRotationMatrix();
// Vector3d pos(filtered_pose.pose.position.x,filtered_pose.pose.position.y, filtered_pose.pose.position.z);
Quaterniond quat;
quat.setIdentity();
Vector3d pos;
pos.setZero();
Matrix3d orient = quat.toRotationMatrix();
#ifdef USE_ASE_IMU
tf::quaternionTFToEigen(transform.getRotation(), quat);
orient = quat.toRotationMatrix();
tf::vectorTFToEigen(transform.getOrigin(), pos);
#endif
vo_->addImage(img, msg->header.stamp.toSec(), orient, pos);
visualizer_.publishMinimal(img, vo_->lastFrame(), *vo_, msg->header.stamp.toSec());
if(publish_markers_ && vo_->stage() != FrameHandlerBase::STAGE_PAUSED)
visualizer_.visualizeMarkers(vo_->lastFrame(), vo_->coreKeyframes(), vo_->map());
if(publish_dense_input_)
visualizer_.exportToDense(vo_->lastFrame());
if(vo_->stage() == FrameHandlerMono::STAGE_PAUSED)
usleep(100000);
}