本文整理汇总了C++中SE3::inverse方法的典型用法代码示例。如果您正苦于以下问题:C++ SE3::inverse方法的具体用法?C++ SE3::inverse怎么用?C++ SE3::inverse使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类SE3
的用法示例。
在下文中一共展示了SE3::inverse方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: updateDepthFilter
bool updateDepthFilter(
const Vector2d& pt_ref,
const Vector2d& pt_curr,
const SE3& T_C_R,
Mat& depth,
Mat& depth_cov
)
{
// 我是一只喵
// 不知道这段还有没有人看
// 用三角化计算深度
SE3 T_R_C = T_C_R.inverse();
Vector3d f_ref = px2cam( pt_ref );
f_ref.normalize();
Vector3d f_curr = px2cam( pt_curr );
f_curr.normalize();
// 方程参照本书第 7 讲三角化一节
Vector3d t = T_R_C.translation();
Vector3d f2 = T_R_C.rotation_matrix() * f_curr;
Vector2d b = Vector2d ( t.dot ( f_ref ), t.dot ( f2 ) );
double A[4];
A[0] = f_ref.dot ( f_ref );
A[2] = f_ref.dot ( f2 );
A[1] = -A[2];
A[3] = - f2.dot ( f2 );
double d = A[0]*A[3]-A[1]*A[2];
Vector2d lambdavec =
Vector2d ( A[3] * b ( 0,0 ) - A[1] * b ( 1,0 ),
-A[2] * b ( 0,0 ) + A[0] * b ( 1,0 )) /d;
Vector3d xm = lambdavec ( 0,0 ) * f_ref;
Vector3d xn = t + lambdavec ( 1,0 ) * f2;
Vector3d d_esti = ( xm+xn ) / 2.0; // 三角化算得的深度向量
double depth_estimation = d_esti.norm(); // 深度值
// 计算不确定性(以一个像素为误差)
Vector3d p = f_ref*depth_estimation;
Vector3d a = p - t;
double t_norm = t.norm();
double a_norm = a.norm();
double alpha = acos( f_ref.dot(t)/t_norm );
double beta = acos( -a.dot(t)/(a_norm*t_norm));
double beta_prime = beta + atan(1/fx);
double gamma = M_PI - alpha - beta_prime;
double p_prime = t_norm * sin(beta_prime) / sin(gamma);
double d_cov = p_prime - depth_estimation;
double d_cov2 = d_cov*d_cov;
// 高斯融合
double mu = depth.ptr<double>( int(pt_ref(1,0)) )[ int(pt_ref(0,0)) ];
double sigma2 = depth_cov.ptr<double>( int(pt_ref(1,0)) )[ int(pt_ref(0,0)) ];
double mu_fuse = (d_cov2*mu+sigma2*depth_estimation) / ( sigma2+d_cov2);
double sigma_fuse2 = ( sigma2 * d_cov2 ) / ( sigma2 + d_cov2 );
depth.ptr<double>( int(pt_ref(1,0)) )[ int(pt_ref(0,0)) ] = mu_fuse;
depth_cov.ptr<double>( int(pt_ref(1,0)) )[ int(pt_ref(0,0)) ] = sigma_fuse2;
return true;
}
示例2: publishPosewithCovariance
// for EKF, we publish Tcw
void publishPosewithCovariance(ros::Time stamp)
{
// SE3<> camPose = tracker_->GetCurrentPose();//Tcw
SE3<> camPose = camPose4pub;//Tcw
if (true) {
geometry_msgs::PoseWithCovarianceStampedPtr pose(new geometry_msgs::PoseWithCovarianceStamped);//Tcw
pose->header.stamp = stamp;
pose->header.frame_id = "/ptam_world_cov";
pose->pose.pose.position.x = camPose.get_translation()[0];
pose->pose.pose.position.y = camPose.get_translation()[1];
pose->pose.pose.position.z = camPose.get_translation()[2];
quat_from_so3(pose->pose.pose.orientation, camPose.get_rotation());
TooN::Matrix<6> covar = tracker_->GetPoseCovariance();
for (unsigned int i = 0; i < pose->pose.covariance.size(); i++){
pose->pose.covariance[i] = sqrt(fabs(covar[i % 6][i / 6]));
// cout << pose->pose.covariance[i] << ", ";
}
pose->pose.covariance[1] = tracker_->GetCurrentKeyFrame().dSceneDepthMedian;
// cout << "PTAM pose, Tcw: " << endl;
// cout << pose->pose.pose.position.x << ", " << pose->pose.pose.position.y << ", " << pose->pose.pose.position.z << endl;
// cout << pose->pose.pose.orientation.w << ", " << pose->pose.pose.orientation.x << ", "
// << pose->pose.pose.orientation.y << ", " << pose->pose.pose.orientation.z << endl;
SE3<> roboPose = se3IMUfromcam * camPose;//Twi = (Tic * Tcw).inv
roboPose = roboPose.inverse();
cout << "POSITION DIFF: "<< PoseEKF_wi.get_translation()[0]-roboPose.get_translation()[0] << ", " <<
PoseEKF_wi.get_translation()[1] - roboPose.get_translation()[1]<< ", " <<
PoseEKF_wi.get_translation()[2] - roboPose.get_translation()[2] << endl;
geometry_msgs::Quaternion quatRobo;
quat_from_so3(quatRobo, roboPose.get_rotation());//Riw
// cout << "PTAM pose, Twi: " << endl;
// cout << roboPose.get_translation()[0] << ", " << roboPose.get_translation()[1] << ", " << roboPose.get_translation()[2] << endl;
// cout << quatRobo.w << ", " << quatRobo.x << ", "
// << quatRobo.y << ", " << quatRobo.z << endl;
// cout << "Pose Covariance: " << endl;
// cout << pose->pose.covariance[0] << ", " << pose->pose.covariance[1] << ", " << pose->pose.covariance[2] << endl;
// cout << pose->pose.covariance[3] << ", " << pose->pose.covariance[4] << ", " << pose->pose.covariance[5] << endl;
// cout << pose->pose.covariance[6] << ", " << pose->pose.covariance[7] << ", " << pose->pose.covariance[8] << endl;
cam_posewithcov_pub_.publish(pose);
geometry_msgs::TransformStampedPtr quadpose(new geometry_msgs::TransformStamped);// for ros nodelets, publish ptr.
quadpose->header.stamp = stamp;
quadpose->header.frame_id = "/ptam_world";
quadpose->transform.translation.x = roboPose.get_translation()[0];
quadpose->transform.translation.y = roboPose.get_translation()[1];
quadpose->transform.translation.z = roboPose.get_translation()[2];
quat_from_so3(quadpose->transform.rotation, roboPose.get_rotation());
quad_pose_pub_.publish(quadpose);
logPose(stamp, roboPose);
}
}
示例3: Data
// get Tcw
SE3<> imu2camWorld(pximu::AttitudeData attitude_data){
Matrix<3> Rroll = Data(1.0, 0, 0,//roll Rot(-roll)T=Rot(roll)
0, cos(attitude_data.roll), -sin(attitude_data.roll),
0, sin(attitude_data.roll), cos(attitude_data.roll));
Matrix<3> Rpitch = Data(cos(attitude_data.pitch), 0, -sin(attitude_data.pitch),//pitch Rot(-pitch)T=Rot(pitch)
0, 1.0, 0,
sin(attitude_data.pitch), 0, cos(attitude_data.pitch));
Matrix<3> roll = Data(1.0, 0, 0,//Rww1, because the roll and pitch angles are in
0, -1, 0, // a world frame which pointing downward.
0, 0, -1);
SE3<> camWorld = SE3<>();
Matrix<3> rotation = roll * Rpitch * Rroll; //Rwr*Rrc-->Rwc
camWorld.get_rotation() = rotation*se3IMUfromcam.get_rotation().get_matrix();
Vector<3> twr = makeVector(0.0, 0.0, 0.198);// twc = twr + Rwr * trc
Vector<3> twc = twr + rotation * se3IMUfromcam.get_translation();
camWorld.get_translation()[0] = 0.0;//twc[0]; //twc
camWorld.get_translation()[1] = 0.0;//twc[1]; // TODO: find out the bug why must use 0
camWorld.get_translation()[2] = twc[2];
camWorld = camWorld.inverse();//Tcw
cout<< "TCW INITIALIZED FROM IMU ATT. TWC: " << twc[0]<< ", " << twc[1]<< ", " << twc[2]<<endl;
return camWorld;
}
示例4: makeVector
SE3<> IMU2camWorldfromQuat(Eigen::Quaternion<double> atti){// use ini attitude info from EKF node to ini ptam pose
Vector<4> attivec = makeVector(atti.w(), atti.x(), atti.y(), atti.z());//Rw1i
Matrix<3> iniOrientationEKF;
iniOrientationEKF = tag::quaternionToMatrix(attivec);
Matrix<3> roll = TooN::Data(1.0, 0, 0,//Rww1, because the roll and pitch angles are in
0, -1, 0, // a world frame which pointing downward.
0, 0, -1);
SE3<> camWorld = SE3<>();
Matrix<3> rotation;
if (tracker_->attitude_get)
rotation = iniOrientationEKF; //
else
rotation = roll * iniOrientationEKF;//Rwi = Rww1*Rw1i
camWorld.get_rotation() = rotation*se3IMUfromcam.get_rotation().get_matrix();//Rwc = (Rwi * Ric)
Vector<3> twr = makeVector(0.0, 0.0, 0.198);// twc = twr + Rwr * trc
Vector<3> twc = twr + rotation * se3IMUfromcam.get_translation();
camWorld.get_translation()[0] = 0.0;//twc[0]; //twc
camWorld.get_translation()[1] = 0.0;//twc[1];
camWorld.get_translation()[2] = twc[2];
camWorld = camWorld.inverse();//Tcw
cout<< "TCW INITIALIZED. TWC: " << twc[0]<< ", " << twc[1]<< ", " << twc[2]<<endl;
// cout<< camWorld.get_rotation().get_matrix()(2,2)<<endl;
return camWorld;
}
示例5:
void SlamGraph<SE3,StereoCamera, SE3XYZ_STEREO, 3>
::addConstraintToG2o(const SE3 & T_2_from_1,
const Matrix6d &
Lambda_2_from_1,
int pose_id_1,
int pose_id_2,
g2o::SparseOptimizer * optimizer)
{
G2oEdgeSE3 * e = new G2oEdgeSE3();
g2o::HyperGraph::Vertex * pose1_vertex
= GET_MAP_ELEM(pose_id_1, optimizer->vertices());
e->vertices()[0]
= dynamic_cast<g2o::OptimizableGraph::Vertex*>(pose1_vertex);
g2o::HyperGraph::Vertex * pose2_vertex
= GET_MAP_ELEM(pose_id_2, optimizer->vertices());
e->vertices()[1]
= dynamic_cast<g2o::OptimizableGraph::Vertex*>(pose2_vertex);
e->measurement() = T_2_from_1;
e->inverseMeasurement() = T_2_from_1.inverse();
e->information() = Lambda_2_from_1;
optimizer->addEdge(e);
}
示例6: Render
void ARDriver::Render(Image<Rgb<byte> >& imFrame, SE3<> se3CfromW) {
if (!mbInitialised) {
Init();
Reset();
};
mnCounter++;
// Upload the image to our frame texture
glBindTexture(GL_TEXTURE_RECTANGLE_ARB, mnFrameTex);
glTexSubImage2D(GL_TEXTURE_RECTANGLE_ARB, 0, 0, 0, mirFrameSize.x,
mirFrameSize.y, GL_RGB, GL_UNSIGNED_BYTE, imFrame.data());
// Set up rendering to go the FBO, draw undistorted video frame into BG
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, mnFrameBuffer);
CheckFramebufferStatus();
glViewport(0, 0, mirFBSize.x, mirFBSize.y);
DrawFBBackGround();
glClearDepth(1);
glClear(GL_DEPTH_BUFFER_BIT);
// Set up 3D projection
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glMultMatrix(mCamera.MakeUFBLinearFrustumMatrix(0.005, 100));
glMultMatrix(se3CfromW);
DrawFadingGrid();
mGame.DrawStuff(se3CfromW.inverse().get_translation());
glDisable(GL_DEPTH_TEST);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glDisable(GL_BLEND);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
// Set up for drawing 2D stuff:
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, 0);
DrawDistortedFB();
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
mGLWindow.SetupViewport();
mGLWindow.SetupVideoOrtho();
mGLWindow.SetupVideoRasterPosAndZoom();
}
示例7:
Vector3d Camera::camera2world ( const Vector3d& p_c, const SE3& T_c_w )
{
return T_c_w.inverse() *p_c;
}
示例8: threadLoop
void Relocalizer::threadLoop(int idx)
{
if(!multiThreading && idx != 0) return;
SE3Tracker* tracker = new SE3Tracker(w,h,K);
boost::unique_lock<boost::mutex> lock(exMutex);
while(continueRunning)
{
// if got something: do it (unlock in the meantime)
if(nextRelocIDX < maxRelocIDX && CurrentRelocFrame)
{
Frame* todo = KFForReloc[nextRelocIDX%KFForReloc.size()];
nextRelocIDX++;
if(todo->neighbors.size() <= 2) continue;
std::shared_ptr<Frame> myRelocFrame = CurrentRelocFrame;
lock.unlock();
// initial Alignment
SE3 todoToFrame = tracker->trackFrameOnPermaref(todo, myRelocFrame.get(), SE3());
// try neighbours
float todoGoodVal = tracker->pointUsage * tracker->lastGoodCount / (tracker->lastGoodCount+tracker->lastBadCount);
if(todoGoodVal > relocalizationTH)
{
int numGoodNeighbours = 0;
int numBadNeighbours = 0;
float bestNeightbourGoodVal = todoGoodVal;
float bestNeighbourUsage = tracker->pointUsage;
Frame* bestKF = todo;
SE3 bestKFToFrame = todoToFrame;
for(Frame* nkf : todo->neighbors)
{
SE3 nkfToFrame_init = se3FromSim3((nkf->getScaledCamToWorld().inverse() * todo->getScaledCamToWorld() * sim3FromSE3(todoToFrame.inverse(), 1))).inverse();
SE3 nkfToFrame = tracker->trackFrameOnPermaref(nkf, myRelocFrame.get(), nkfToFrame_init);
float goodVal = tracker->pointUsage * tracker->lastGoodCount / (tracker->lastGoodCount+tracker->lastBadCount);
if(goodVal > relocalizationTH*0.8 && (nkfToFrame * nkfToFrame_init.inverse()).log().norm() < 0.1)
numGoodNeighbours++;
else
numBadNeighbours++;
if(goodVal > bestNeightbourGoodVal)
{
bestNeightbourGoodVal = goodVal;
bestKF = nkf;
bestKFToFrame = nkfToFrame;
bestNeighbourUsage = tracker->pointUsage;
}
}
if(numGoodNeighbours > numBadNeighbours || numGoodNeighbours >= 5)
{
if(enablePrintDebugInfo && printRelocalizationInfo)
printf("RELOCALIZED! frame %d on %d (bestNeighbour %d): good %2.1f%%, usage %2.1f%%, GoodNeighbours %d / %d\n",
myRelocFrame->id(), todo->id(), bestKF->id(),
100*bestNeightbourGoodVal, 100*bestNeighbourUsage,
numGoodNeighbours, numGoodNeighbours+numBadNeighbours);
// set everything to stop!
continueRunning = false;
lock.lock();
resultRelocFrame = myRelocFrame;
resultFrameID = myRelocFrame->id();
resultKF = bestKF;
resultFrameToKeyframe = bestKFToFrame.inverse();
resultReadySignal.notify_all();
hasResult = true;
lock.unlock();
}
else
{
if(enablePrintDebugInfo && printRelocalizationInfo)
printf("FAILED RELOCALIZE! frame %d on %d (bestNeighbour %d): good %2.1f%%, usage %2.1f%%, GoodNeighbours %d / %d\n",
myRelocFrame->id(), todo->id(), bestKF->id(),
100*bestNeightbourGoodVal, 100*bestNeighbourUsage,
numGoodNeighbours, numGoodNeighbours+numBadNeighbours);
}
}
lock.lock();
}
else
{
newCurrentFrameSignal.wait(lock);
}
}
delete tracker;
}