本文整理汇总了C++中Mat_::inv方法的典型用法代码示例。如果您正苦于以下问题:C++ Mat_::inv方法的具体用法?C++ Mat_::inv怎么用?C++ Mat_::inv使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Mat_
的用法示例。
在下文中一共展示了Mat_::inv方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: findCorrectPose
bool findCorrectPose(Mat_<double> E, vector<KeyPoint> key1, vector<KeyPoint> key2, Mat_<double> K,Mat_<double> Kinv,Matx34d& out,vector<Point3d>& cloudOut) {
Matx34d Peye(1.0, 0, 0, 0, 0, 1.0, 0, 0, 0, 0, 1.0, 0);
//decompose E to P' , HZ (9.19)
cv::SVD svd(E, SVD::MODIFY_A);
Mat svd_u = svd.u;
Mat svd_vt = svd.vt;
Mat svd_w = svd.w;
Matx33d W(0, -1, 0, //HZ 9.13
1, 0, 0, 0, 0, 1);
/////////////////////////////////////////////////////
//newly added must check if correct
Matx33d Z(0, 1, 0, //HZ 9.13
-1, 0, 0, 0, 0, 0);
Mat_<double> S = svd_u * Mat(Z) * svd_u.t();
///////////////////////////////////
Mat_<double> R = svd_u * Mat(W) * svd_vt; //HZ 9.19
Mat_<double> R1 = svd_u * Mat(W).t() * svd_vt; //HZ 9.19
///////////////////newly added according to mit implementation////////
if(determinant(R)<0)
R = -R;
if(determinant(R1)<0)
R1 = -R1;
/////////////////////////////////////
Mat_<double> t = svd_u.col(2); // u3
Mat_<double> tneg = -1 * svd_u.col(2); // -u3
cv::SVD rSVD(R);
R = rSVD.u * Mat::eye(3, 3, CV_64F) * rSVD.vt;
double myscale = trace(rSVD.w)[0] / 3;
t = t / myscale;
cv::SVD r1SVD(R1);
R1 = r1SVD.u * Mat::eye(3, 3, CV_64F) * r1SVD.vt;
double myscale1 = trace(r1SVD.w)[0] / 3;
tneg = tneg / myscale1;
if (!CheckCoherentRotation(R) || !CheckCoherentRotation(R1)) {
out = 0;
return false;
}
//save the correct pointclouds and Relativepose
Matx34d TempPose,poseHolder;
vector<Point3d> pointCloud,pointCloudMax;
createPoseFromRotationTranslation(R, t, out);
double max = -1,temp = 0.0;
max = getPointCloudAndPointsInFront(Peye, out, key1, key2, K, K.inv(), pointCloud);
pointCloudMax = pointCloud;
poseHolder = Matx34d(out);
pointCloud.clear();
out = TempPose;
createPoseFromRotationTranslation(R, tneg, out);
temp = getPointCloudAndPointsInFront(Peye, out, key1, key2, K, K.inv(), pointCloud);
if(temp>max) {
pointCloudMax = pointCloud;
poseHolder = Matx34d(out);
max= temp;
}
pointCloud.clear();
out = TempPose;
createPoseFromRotationTranslation(R1, t, out);
temp = getPointCloudAndPointsInFront(Peye, out, key1, key2, K, K.inv(), pointCloud);
if(temp>max) {
pointCloudMax = pointCloud;
poseHolder = Matx34d(out);
max= temp;
}
pointCloud.clear();
out = TempPose;
createPoseFromRotationTranslation(R1, tneg, out);
temp = getPointCloudAndPointsInFront(Peye, out, key1, key2, K, K.inv(), pointCloud);
if(temp>max) {
pointCloudMax = pointCloud;
poseHolder = Matx34d(out);
max= temp;
}
out = poseHolder;
cloudOut = pointCloudMax;
return true;
}
示例2: main
//.........这里部分代码省略.........
cout << ", not enough matches\n";
continue;
}
ExtractMatchedKeypoints(*(features_collection.find(from)->second),
*(features_collection.find(to)->second),
matches, keypoints1, keypoints2);
vector<uchar> inliers_mask;
Mat_<double> H = findHomography(keypoints1.reshape(2), keypoints2.reshape(2),
inliers_mask, RANSAC, H_est_thresh);
if (H.empty()) {
cout << ", can't estimate H\n";
continue;
}
Ptr<vector<DMatch> > inliers = new vector<DMatch>();
for (size_t i = 0; i < matches.size(); ++i)
if (inliers_mask[i])
inliers->push_back(matches[i]);
cout << ", #inliers = " << inliers->size();
double rms_err = 0;
for (size_t i = 0; i < matches.size(); ++i) {
const Point2d &kp1 = keypoints1.at<Point2d>(0, i);
const Point2d &kp2 = keypoints2.at<Point2d>(0, i);
double x = H(0, 0) * kp1.x + H(0, 1) * kp1.y + H(0, 2);
double y = H(1, 0) * kp1.x + H(1, 1) * kp1.y + H(1, 2);
double z = H(2, 0) * kp1.x + H(2, 1) * kp1.y + H(2, 2);
x /= z; y /= z;
rms_err += (kp2.x - x) * (kp2.x - x) + (kp2.y - y) * (kp2.y - y);
}
rms_err = sqrt(rms_err / matches.size());
cout << ", RMS err = " << rms_err;
// See "Automatic Panoramic Image Stitching using Invariant Features"
// by Matthew Brown and David G. Lowe, IJCV 2007 for the explanation
double confidence = inliers->size() / (8 + 0.3 * matches.size()) - 1;
rel_confs[make_pair(from, to)] = confidence;
cout << ", conf = " << confidence;
cout << endl;
Hs[make_pair(from, to)] = H;
matches_collection[make_pair(from, to)] = inliers;
if (confidence > 0)
good_Hs[make_pair(from, to)] = H;
if (from == 0)
Hs_from_0.push_back(H);
}
}
// Linear calibration
if (K_init.empty()) {
cout << "Linear calibrating...\n";
if (lin_est_skew)
K_init = CalibRotationalCameraLinear(good_Hs);
else
K_init = CalibRotationalCameraLinearNoSkew(good_Hs);
cout << "K_init =\n" << K_init << endl;
}
// Non-linear refinement
cout << "Refining camera...\n";
if (Hs_from_0.size() != num_cameras - 1) {
stringstream msg;
msg << "Refinement requires Hs between first and all other images, "
<< "but only " << Hs_from_0.size() << " were/was found";
throw runtime_error(msg.str());
}
map<int, Mat> Rs;
Rs[0] = Mat::eye(3, 3, CV_64F);
for (int i = 1; i < num_cameras; ++i)
Rs[i] = K_init.inv() * Hs_from_0[i - 1] * K_init;
Mat_<double> K_refined = K_init.clone();
if (refine_skew)
RefineRigidCamera(K_refined, Rs, features_collection, matches_collection);
else {
K_refined(0, 1) = 0;
RefineRigidCamera(K_refined, Rs, features_collection, matches_collection,
REFINE_FLAG_K_ALL & ~REFINE_FLAG_K_SKEW);
}
cout << "K_refined =\n" << K_refined << endl;
cout << "SUMMARY\n";
cout << "K_init =\n" << K_init << endl;
cout << "K_refined =\n" << K_refined << endl;
}
catch (const exception &e) {
cout << "Error: " << e.what() << endl;
}
return 0;
}