本文整理汇总了C++中NViewDataSet类的典型用法代码示例。如果您正苦于以下问题:C++ NViewDataSet类的具体用法?C++ NViewDataSet怎么用?C++ NViewDataSet使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了NViewDataSet类的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: TEST
TEST(Resection_Kernel, Multiview) {
const int views_num = 3;
const int points_num = 10;
const NViewDataSet d = NRealisticCamerasRing(views_num, points_num,
NViewDatasetConfigurator(1,1,0,0,5,0)); // Suppose a camera with Unit matrix as K
const int nResectionCameraIndex = 2;
// Solve the problem and check that fitted value are good enough
{
Mat x = d.projected_points_[nResectionCameraIndex];
Mat X = d.point_3d_;
mvg::multiview::resection::PoseResectionKernel kernel(x, X);
size_t samples_[6]={0,1,2,3,4,5};
std::vector<size_t> samples(samples_, samples_ + 6);
std::vector<Mat34> Ps;
kernel.Fit(samples, &Ps);
for (size_t i = 0; i < x.cols(); ++i) {
EXPECT_NEAR(0.0, kernel.Error(i, Ps[0]), 1e-8);
}
EXPECT_EQ(1, Ps.size());
// Check that Projection matrix is near to the GT :
Mat34 GT_ProjectionMatrix = d.P(nResectionCameraIndex).array()
/ d.P(nResectionCameraIndex).norm();
Mat34 COMPUTED_ProjectionMatrix = Ps[0].array() / Ps[0].norm();
EXPECT_MATRIX_NEAR(GT_ProjectionMatrix, COMPUTED_ProjectionMatrix, 1e-8);
}
}
示例2: TEST
TEST(Resection_L_Infinity, Robust_OneOutlier) {
const int nViews = 3;
const int nbPoints = 20;
const NViewDataSet d = NRealisticCamerasRing(nViews, nbPoints,
nViewDatasetConfigurator(1,1,0,0,5,0)); // Suppose a camera with Unit matrix as K
d.ExportToPLY("test_Before_Infinity.ply");
//-- Modify a dataset (set to 0 and parse new value) (Assert good values)
NViewDataSet d2 = d;
const int nResectionCameraIndex = 2;
//-- Set to 0 the future computed data to be sure of computation results :
d2._R[nResectionCameraIndex] = Mat3::Zero();
d2._t[nResectionCameraIndex] = Vec3::Zero();
// Set 20% of the 3D point as outlier
const int nbOutlier = nbPoints*0.2;
for (int i=0; i < nbOutlier; ++i)
{
d2._X.col(i)(0) += 120.0;
d2._X.col(i)(1) += -60.0;
d2._X.col(i)(2) += 80.0;
}
// Solve the problem and check that fitted value are good enough
{
typedef lInfinityCV::kernel::l1PoseResectionKernel KernelType;
const Mat & pt2D = d2._x[nResectionCameraIndex];
const Mat & pt3D = d2._X;
KernelType kernel(pt2D, pt3D);
ScorerEvaluator<KernelType> scorer(Square(0.1)); //Highly intolerant for the test
Mat34 P = MaxConsensus(kernel, scorer, NULL, 128);
// Check that Projection matrix is near to the GT :
Mat34 GT_ProjectionMatrix = d.P(nResectionCameraIndex).array()
/ d.P(nResectionCameraIndex).norm();
Mat34 COMPUTED_ProjectionMatrix = P.array() / P.norm();
// Extract K[R|t]
Mat3 R,K;
Vec3 t;
KRt_From_P(P, &K, &R, &t);
d2._R[nResectionCameraIndex] = R;
d2._t[nResectionCameraIndex] = t;
//CHeck matrix to GT, and residual
EXPECT_NEAR( 0.0, FrobeniusDistance(GT_ProjectionMatrix, COMPUTED_ProjectionMatrix), 1e-3 );
Mat pt4D = VStack(pt3D, Mat(Vec::Ones(pt3D.cols()).transpose()));
EXPECT_NEAR( 0.0, RootMeanSquareError(pt2D, pt4D, COMPUTED_ProjectionMatrix), 1e-1);
}
d2.ExportToPLY("test_After_Infinity.ply");
}
示例3: TEST
TEST(Triangulation, TriangulateDLT) {
const NViewDataSet d = NRealisticCamerasRing(2, 12);
for (int i = 0; i < d._X.cols(); ++i) {
const Vec3 X_gt = d._X.col(i);
Vec3 X_estimated;
const Vec2 x1 = d._x[0].col(i);
const Vec2 x2 = d._x[1].col(i);
TriangulateDLT(d.P(0), x1.homogeneous(), d.P(1), x2.homogeneous(), &X_estimated);
EXPECT_NEAR(0, DistanceLInfinity(X_estimated, X_gt), 1e-8);
}
}
示例4: TEST
TEST(translation_averaging, globalTi_from_tijs_softl1_Ceres) {
const int focal = 1000;
const int principal_Point = 500;
//-- Setup a circular camera rig or "cardiod".
const int iNviews = 12;
const int iNbPoints = 6;
const bool bCardiod = true;
const bool bRelative_Translation_PerTriplet = false;
std::vector<openMVG::relativeInfo > vec_relative_estimates;
const NViewDataSet d =
Setup_RelativeTranslations_AndNviewDataset
(
vec_relative_estimates,
focal, principal_Point, iNviews, iNbPoints,
bCardiod, bRelative_Translation_PerTriplet
);
d.ExportToPLY("global_translations_from_Tij_GT.ply");
visibleCamPosToSVGSurface(d._C, "global_translations_from_Tij_GT.svg");
// Solve the translation averaging problem:
std::vector<Vec3> vec_translations;
EXPECT_TRUE(solve_translations_problem_softl1(
vec_relative_estimates, bRelative_Translation_PerTriplet, iNviews, vec_translations));
EXPECT_EQ(iNviews, vec_translations.size());
// Check accuracy of the found translations
for (unsigned i = 0; i < iNviews; ++i)
{
const Vec3 t = vec_translations[i];
const Mat3 & Ri = d._R[i];
const Vec3 C_computed = - Ri.transpose() * t;
const Vec3 C_GT = d._C[i] - d._C[0];
//-- Check that found camera position is equal to GT value
if (i==0) {
EXPECT_MATRIX_NEAR(C_computed, C_GT, 1e-6);
}
else {
EXPECT_NEAR(0.0, DistanceLInfinity(C_computed.normalized(), C_GT.normalized()), 1e-6);
}
}
}
示例5: NRealisticCamerasCardioid
NViewDataSet NRealisticCamerasCardioid(size_t nviews, size_t npoints,
const nViewDatasetConfigurator config)
{
//-- Setup a camera circle rig.
NViewDataSet d;
d._n = nviews;
d._K.resize(nviews);
d._R.resize(nviews);
d._t.resize(nviews);
d.C.resize(nviews);
d._x.resize(nviews);
d._x_ids.resize(nviews);
d.X.resize(3, npoints);
d.X.setRandom();
d.X *= 0.6;
Vecu all_point_ids(npoints);
for (size_t j = 0; j < npoints; ++j)
all_point_ids[j] = j;
for (size_t i = 0; i < nviews; ++i) {
Vec3 camera_center, t, jitter, lookdir;
const double theta = i * 2 * M_PI / nviews;
//-- Cardioid
camera_center <<
2*sin(theta)-(sin(2*theta)),
0.0,
2*cos(theta)-(cos(2*theta)); // Y axis UP
camera_center *= config._dist;
d.C[i] = camera_center;
jitter.setRandom();
jitter *= config._jitter_amount / camera_center.norm();
lookdir = -camera_center + jitter;
d._K[i] << config._fx, 0, config._cx,
0, config._fy, config._cy,
0, 0, 1;
d._R[i] = LookAt(lookdir); // Y axis UP
d._t[i] = -d._R[i] * camera_center; // [t]=[-RC] Cf HZ.
d._x[i] = Project(d.P(i), d.X);
d._x_ids[i] = all_point_ids;
}
return d;
}
示例6: NRealisticCamerasCardioid
NViewDataSet NRealisticCamerasCardioid(size_t nviews, size_t npoints,
const NViewDatasetConfigurator config)
{
// 设置相机参数
NViewDataSet d;
d.actual_camera_num_ = nviews;
d.camera_matrix_.resize(nviews);
d.rotation_matrix_.resize(nviews);
d.translation_vector_.resize(nviews);
d.camera_center_.resize(nviews);
d.projected_points_.resize(nviews);
d.projected_point_ids_.resize(nviews);
d.point_3d_.resize(3, npoints);
d.point_3d_.setRandom();
d.point_3d_ *= 0.6;
Vecu all_point_ids(npoints);
for (size_t j = 0; j < npoints; ++j)
all_point_ids[j] = j;
for (size_t i = 0; i < nviews; ++i) {
Vec3 camera_center, t, jitter, lookdir;
const double theta = i * 2 * M_PI / nviews;
// 心形方程式,确定中点
camera_center <<
2 * sin(theta) - (sin(2 * theta)),
0.0,
2 * cos(theta) - (cos(2 * theta)); // Y axis UP
camera_center *= config._dist;
d.camera_center_[i] = camera_center;
jitter.setRandom();
jitter *= config._jitter_amount / camera_center.norm();
lookdir = -camera_center + jitter;
d.camera_matrix_[i] << config._fx, 0, config._cx,
0, config._fy, config._cy,
0, 0, 1;
d.rotation_matrix_[i] = LookAt(lookdir); // Y axis UP
d.translation_vector_[i] = -d.rotation_matrix_[i] * camera_center; // [t]=[-RC] Cf HZ.
d.projected_points_[i] = Project(d.P(i), d.point_3d_);
d.projected_point_ids_[i] = all_point_ids;
}
return d;
}
示例7: TEST
TEST(translation_averaging, globalTi_from_tijs_Triplets_ECCV14) {
int focal = 1000;
int principal_Point = 500;
//-- Setup a circular camera rig or "cardioid".
const int iNviews = 12;
const int iNbPoints = 6;
NViewDataSet d =
//NRealisticCamerasRing(
NRealisticCamerasCardioid(
iNviews, iNbPoints,
nViewDatasetConfigurator(focal,focal,principal_Point,principal_Point,5,0));
d.ExportToPLY("global_translations_from_triplets_GT.ply");
visibleCamPosToSVGSurface(d._C, "global_translations_from_triplets_GT.svg");
// List successive triplets of the large loop of camera
std::vector< graph::Triplet > vec_triplets;
for (size_t i = 0; i < iNviews; ++i)
{
const size_t iPlus1 = modifiedMod(i+1,iNviews);
const size_t iPlus2 = modifiedMod(i+2,iNviews);
//-- sort the triplet index to have a monotonic ascending series of value
size_t triplet[3] = {i, iPlus1, iPlus2};
std::sort(&triplet[0], &triplet[3]);
vec_triplets.push_back(Triplet(triplet[0],triplet[1],triplet[2]));
}
//- For each triplet compute relative translations and rotations motions
std::vector<openMVG::relativeInfo > vec_initialEstimates;
for (size_t i = 0; i < vec_triplets.size(); ++i)
{
const graph::Triplet & triplet = vec_triplets[i];
size_t I = triplet.i, J = triplet.j , K = triplet.k;
//-- Build camera alias over GT translations and rotations:
const Mat3 & RI = d._R[I];
const Mat3 & RJ = d._R[J];
const Mat3 & RK = d._R[K];
const Vec3 & ti = d._t[I];
const Vec3 & tj = d._t[J];
const Vec3 & tk = d._t[K];
//-- Build relatives motions (that feeds the Linear program formulation)
{
Mat3 RijGt;
Vec3 tij;
RelativeCameraMotion(RI, ti, RJ, tj, &RijGt, &tij);
vec_initialEstimates.push_back(
std::make_pair(std::make_pair(I, J), std::make_pair(RijGt, tij)));
Mat3 RjkGt;
Vec3 tjk;
RelativeCameraMotion(RJ, tj, RK, tk, &RjkGt, &tjk);
vec_initialEstimates.push_back(
std::make_pair(std::make_pair(J, K), std::make_pair(RjkGt, tjk)));
Mat3 RikGt;
Vec3 tik;
RelativeCameraMotion(RI, ti, RK, tk, &RikGt, &tik);
vec_initialEstimates.push_back(
std::make_pair(std::make_pair(I, K), std::make_pair(RikGt, tik)));
}
}
//-- Compute the global translations from the triplets of heading directions
//- with the Kyle method
std::vector<int> vec_edges;
vec_edges.reserve(vec_initialEstimates.size() * 2);
std::vector<double> vec_poses;
vec_poses.reserve(vec_initialEstimates.size() * 3);
std::vector<double> vec_weights;
vec_weights.reserve(vec_initialEstimates.size());
for(int i=0; i < vec_initialEstimates.size(); ++i)
{
const openMVG::relativeInfo & rel = vec_initialEstimates[i];
vec_edges.push_back(rel.first.first);
vec_edges.push_back(rel.first.second);
const Vec3 EdgeDirection = -(d._R[rel.first.second].transpose() * rel.second.second.normalized());
vec_poses.push_back(EdgeDirection(0));
vec_poses.push_back(EdgeDirection(1));
vec_poses.push_back(EdgeDirection(2));
vec_weights.push_back(1.0);
}
const double function_tolerance=1e-7, parameter_tolerance=1e-8;
const int max_iterations = 500;
const double loss_width = 0.0;
std::vector<double> X(iNviews*3);
EXPECT_TRUE(
solve_translations_problem(
//.........这里部分代码省略.........
示例8: TEST
TEST(translation_averaging, globalTi_from_tijs_Triplets) {
int focal = 1000;
int principal_Point = 500;
//-- Setup a circular camera rig or "cardiod".
const int iNviews = 12;
const int iNbPoints = 6;
NViewDataSet d =
//NRealisticCamerasRing(
NRealisticCamerasCardioid(
iNviews, iNbPoints,
nViewDatasetConfigurator(focal,focal,principal_Point,principal_Point,5,0));
d.ExportToPLY("global_translations_from_triplets_GT.ply");
visibleCamPosToSVGSurface(d._C, "global_translations_from_triplets_GT.svg");
// List sucessives triplets of the large loop of camera
std::vector< graphUtils::Triplet > vec_triplets;
for (size_t i = 0; i < iNviews; ++i)
{
const size_t iPlus1 = modifiedMod(i+1,iNviews);
const size_t iPlus2 = modifiedMod(i+2,iNviews);
//-- sort the triplet index to have a monotic ascending series of value
size_t triplet[3] = {i, iPlus1, iPlus2};
std::sort(&triplet[0], &triplet[3]);
vec_triplets.push_back(Triplet(triplet[0],triplet[1],triplet[2]));
}
//- For each triplet compute relative translations and rotations motions
std::vector<openMVG::lInfinityCV::relativeInfo > vec_initialEstimates;
for (size_t i = 0; i < vec_triplets.size(); ++i)
{
const graphUtils::Triplet & triplet = vec_triplets[i];
size_t I = triplet.i, J = triplet.j , K = triplet.k;
//-- Build camera alias over GT translations and rotations:
const Mat3 & RI = d._R[I];
const Mat3 & RJ = d._R[J];
const Mat3 & RK = d._R[K];
const Vec3 & ti = d._t[I];
const Vec3 & tj = d._t[J];
const Vec3 & tk = d._t[K];
//-- Build relatives motions (that feeds the Linear program formulation)
{
Mat3 RijGt;
Vec3 tij;
RelativeCameraMotion(RI, ti, RJ, tj, &RijGt, &tij);
vec_initialEstimates.push_back(
std::make_pair(std::make_pair(I, J), std::make_pair(RijGt, tij)));
Mat3 RjkGt;
Vec3 tjk;
RelativeCameraMotion(RJ, tj, RK, tk, &RjkGt, &tjk);
vec_initialEstimates.push_back(
std::make_pair(std::make_pair(J, K), std::make_pair(RjkGt, tjk)));
Mat3 RikGt;
Vec3 tik;
RelativeCameraMotion(RI, ti, RK, tk, &RikGt, &tik);
vec_initialEstimates.push_back(
std::make_pair(std::make_pair(I, K), std::make_pair(RikGt, tik)));
}
}
//-- Compute the global translations from the triplets of heading directions
//- with the L_infinity optimization
std::vector<double> vec_solution(iNviews*3 + vec_initialEstimates.size()/3 + 1);
double gamma = -1.0;
//- a. Setup the LP solver,
//- b. Setup the constraints generator (for the dedicated L_inf problem),
//- c. Build constraints and solve the problem,
//- d. Get back the estimated parameters.
//- a. Setup the LP solver,
OSI_CLP_SolverWrapper solverLP(vec_solution.size());
//- b. Setup the constraints generator (for the dedicated L_inf problem),
Tifromtij_ConstraintBuilder_OneLambdaPerTrif cstBuilder(vec_initialEstimates);
//- c. Build constraints and solve the problem (Setup constraints and solver)
LP_Constraints_Sparse constraint;
cstBuilder.Build(constraint);
solverLP.setup(constraint);
//-- Solving
EXPECT_TRUE(solverLP.solve()); // the linear program must have a solution
//- d. Get back the estimated parameters.
solverLP.getSolution(vec_solution);
gamma = vec_solution[vec_solution.size()-1];
//--
//-- Unit test checking about the found solution
//--
EXPECT_NEAR(0.0, gamma, 1e-6); // Gamma must be 0, no noise, perfect data have been sent
//.........这里部分代码省略.........
示例9: TEST
TEST(Translation_Structure_L_Infinity, OSICLP_SOLVER) {
const size_t kViewNum = 3;
const size_t kPointsNum = 6;
const NViewDataSet d = NRealisticCamerasRing(kViewNum, kPointsNum,
NViewDatasetConfigurator(1,1,0,0,5,0)); // 假设相机内参为单位阵
d.ExportToPLY("test_Before_Infinity.ply");
// 对所有点进行三角化测试
NViewDataSet d2 = d;
//-- Set to 0 the future computed data to be sure of computation results :
d2.point_3d_.fill(0); //Set _Xi of dataset 2 to 0 to be sure of new data computation
std::fill(d2.translation_vector_.begin(), d2.translation_vector_.end(), Vec3(0.0,0.0,0.0));
//Create the mega matrix
Mat megaMat(4, d.actual_camera_num_*d.projected_points_[0].cols());
{
size_t cpt = 0;
for (size_t i=0; i<d.actual_camera_num_;++i)
{
const size_t camera_index = i;
for (size_t j=0; j < d.projected_points_[0].cols(); ++j)
{
megaMat(0,cpt) = d.projected_points_[camera_index].col(j)(0);
megaMat(1,cpt) = d.projected_points_[camera_index].col(j)(1);
megaMat(2,cpt) = j;
megaMat(3,cpt) = camera_index;
cpt++;
}
}
}
// Solve the problem and check that fitted value are good enough
{
std::vector<double> vec_solution((kViewNum + kPointsNum)*3);
OSI_CLP_SolverWrapper wrapper_osiclp_solver(vec_solution.size());
Translation_Structure_L1_ConstraintBuilder constraint_builder( d.rotation_matrix_, megaMat);
EXPECT_TRUE(
(BisectionLinearProgramming<Translation_Structure_L1_ConstraintBuilder, LP_Constraints_Sparse>(
wrapper_osiclp_solver,
constraint_builder,
&vec_solution,
1.0,
0.0))
);
// Move computed value to dataset for residual estimation.
{
//-- fill the ti
for (size_t i=0; i < kViewNum; ++i)
{
size_t index = i*3;
d2.translation_vector_[i] = Vec3(vec_solution[index], vec_solution[index+1], vec_solution[index+2]);
// Change Ci to -Ri*Ci
d2.camera_center_[i] = -d2.rotation_matrix_[i] * d2.translation_vector_[i];
}
//-- Now the Xi :
for (size_t i=0; i < kPointsNum; ++i) {
size_t index = 3*kViewNum;
d2.point_3d_.col(i) = Vec3(vec_solution[index+i*3], vec_solution[index+i*3+1], vec_solution[index+i*3+2]);
}
}
// Compute residuals L2 from estimated parameter values :
Vec2 xk, xsum(0.0,0.0);
for (size_t i = 0; i < d2.actual_camera_num_; ++i) {
for(size_t k = 0; k < d.projected_points_[0].cols(); ++k)
{
xk = Project(d2.P(i), Vec3(d2.point_3d_.col(k)));
xsum += Vec2(( xk - d2.projected_points_[i].col(k)).array().pow(2));
}
}
double dResidual2D = (xsum.array().sqrt().sum());
// Check that 2D re-projection and 3D point are near to GT.
EXPECT_NEAR(0.0, dResidual2D, 1e-4);
}
d2.ExportToPLY("test_After_Infinity.ply");
}