本文整理汇总了C++中Transformation::applyInv方法的典型用法代码示例。如果您正苦于以下问题:C++ Transformation::applyInv方法的具体用法?C++ Transformation::applyInv怎么用?C++ Transformation::applyInv使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Transformation
的用法示例。
在下文中一共展示了Transformation::applyInv方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: initPoseMono
void MultiCamInitIncrementalPose::initPoseMono(unsigned int camera, size_t frame, unsigned int maxIter,
unsigned int& inliers, RealPoint3D<double>& t, Matrix<double>& R) const {
// Selecting points appearing in the camera
vector<size_t> inCameraPoints;
inCameraPoints.reserve(bundle_.frame(frame).size());
for(size_t i = 0; i < bundle_.frame(frame).size(); ++i) {
if(bundle_.frame(frame)[i].inTrackNumber() > 2 && bundle_.frame(frame)[i].inCamera(camera) && bundle_.frame(frame)[i].track()->point()->numInliers() >= 3)
inCameraPoints.push_back(i);
}
// Checking the number of points
if(inCameraPoints.size() < 3) {
// Not enought points, stop here!
inliers = 0;
return;
}
// Main RANSAC loop
unsigned int maxInliers = 0;
RealPoint3D<double> maxT;
Matrix<double> maxR(3, 3);
const Transformation P_W_Cn0(calibration_.translation(camera), calibration_.rotation(camera));
#pragma omp parallel for
for(unsigned int iter = 0; iter < maxIter; ++iter) {
// Selecting 3 random points
size_t index[3];
select3Index(inCameraPoints, index);
// Extracting points
double Xw[3], Yw[3], Zw[3], TabXi[3], TabYi[3], TabZi[3];
for(size_t i = 0; i < 3; ++i) {
Track& track = *(bundle_.frame(frame)[inCameraPoints[index[i]]].track());
RealPoint3D<double> tmp = P_W_Cn0.applyInv(track.point()->coords());
Xw[i] = tmp.x();
Yw[i] = tmp.y();
Zw[i] = tmp.z();
mulTransMat33Vect3(P_W_Cn0.R(), bundle_.frame(frame)[inCameraPoints[index[i]]].ray(camera).direction(), tmp);
TabXi[i] = tmp.x();
TabYi[i] = tmp.y();
TabZi[i] = tmp.z();
}
// Calculating pose
Matrix33 R3Pose[4];
Vector31 T3Pose[4];
MPoseFinsterwalder poseEstimator;
int nbSol = poseEstimator.CalculePose(Xw, Yw, Zw, TabXi, TabYi, TabZi, R3Pose, T3Pose);
// Checking each solution
for(unsigned int sol = 0; sol < nbSol; ++sol) {
// Calculating temporary pose
Vector31 tmpT;
Matrix33 tmpR = R3Pose[sol].transpose();
tmpT = -1.0*(tmpR*T3Pose[sol]);
Matrix<double> tmp_mat(3, 3);
tmp_mat[0][0] = tmpR.Get00(); tmp_mat[1][0] = tmpR.Get01(); tmp_mat[2][0] = tmpR.Get02();
tmp_mat[0][1] = tmpR.Get10(); tmp_mat[1][1] = tmpR.Get11(); tmp_mat[2][1] = tmpR.Get12();
tmp_mat[0][2] = tmpR.Get20(); tmp_mat[1][2] = tmpR.Get21(); tmp_mat[2][2] = tmpR.Get22();
Transformation P_Cn0_Cni(RealPoint3D<double>(tmpT.GetX(), tmpT.GetY(), tmpT.GetZ()), tmp_mat);
Transformation P_W_MCSi = P_W_Cn0*P_Cn0_Cni*P_W_Cn0.inv();
// Calculating angular errors
unsigned int numInliers = 0;
for(size_t i = 0; i < bundle_.frame(frame).size(); ++i) {
if(bundle_.frame(frame)[i].inTrackNumber() > 2) {
for(unsigned int camera = 0; camera < bundle_.numCameras(); ++camera) {
if(bundle_.frame(frame)[i].inCamera(camera) && bundle_.frame(frame)[i].track()->point()->numInliers() >= 3) {
Transformation P_MCS_Cn(calibration_.translation(camera), calibration_.rotation(camera));
Transformation P_W_Cni = P_W_MCSi*P_MCS_Cn;
RealPoint3D<double> point3D = P_W_Cni.applyInv(bundle_.frame(frame)[i].track()->point()->coords());
RealPoint3D<double> rayDir;
mulTransMat33Vect3(P_MCS_Cn.R(), bundle_.frame(frame)[i].ray(camera).direction(), rayDir);
const double error = 1.0 - syuSqr(point3D.dot(rayDir))/(point3D.norm2()*rayDir.norm2());
if(error < maxSqrAngularError_) ++numInliers;
}
}
}
}
// Checking if our solution is better
#pragma omp critical
{
if(maxInliers < numInliers) {
maxInliers = numInliers;
maxT = P_Cn0_Cni.t();
maxR = P_Cn0_Cni.R();
}
}
}
//.........这里部分代码省略.........