本文整理汇总了C++中Mat::cols方法的典型用法代码示例。如果您正苦于以下问题:C++ Mat::cols方法的具体用法?C++ Mat::cols怎么用?C++ Mat::cols使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Mat
的用法示例。
在下文中一共展示了Mat::cols方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: slice_into
IGL_INLINE void igl::slice_into(
const Mat& X,
const Eigen::Matrix<int,Eigen::Dynamic,1> & R,
const int dim,
Mat& Y)
{
Eigen::VectorXi C;
switch(dim)
{
case 1:
assert(R.size() == X.rows());
// boring base case
if(X.cols() == 0)
{
return;
}
igl::colon(0,X.cols()-1,C);
return slice_into(X,R,C,Y);
case 2:
assert(R.size() == X.cols());
// boring base case
if(X.rows() == 0)
{
return;
}
igl::colon(0,X.rows()-1,C);
return slice_into(X,C,R,Y);
default:
assert(false && "Unsupported dimension");
return;
}
}
示例2: ACKernelAdaptor
ACKernelAdaptor(
const Mat &x1, int w1, int h1,
const Mat &x2, int w2, int h2, bool bPointToLine = true)
: x1_(x1.rows(), x1.cols()), x2_(x2.rows(), x2.cols()),
N1_(3,3), N2_(3,3), logalpha0_(0.0), bPointToLine_(bPointToLine)
{
assert(2 == x1_.rows());
assert(x1_.rows() == x2_.rows());
assert(x1_.cols() == x2_.cols());
NormalizePoints(x1, &x1_, &N1_, w1, h1);
NormalizePoints(x2, &x2_, &N2_, w2, h2);
// LogAlpha0 is used to make error data scale invariant
if(bPointToLine) {
// Ratio of containing diagonal image rectangle over image area
double D = sqrt(w2*(double)w2 + h2*(double)h2); // diameter
double A = w2*(double)h2; // area
logalpha0_ = log10(2.0*D/A /N2_(0,0));
}
else {
// ratio of area : unit circle over image area
logalpha0_ = log10(M_PI/(w2*(double)h2) /(N2_(0,0)*N2_(0,0)));
}
}
示例3: if
inline
void
op_trapz::apply_noalias(Mat<eT>& out, const Mat<eT>& Y, const uword dim)
{
arma_extra_debug_sigprint();
arma_debug_check( (dim > 1), "trapz(): argument 'dim' must be 0 or 1" );
uword N = 0;
if(dim == 0) { N = Y.n_rows; }
else if(dim == 1) { N = Y.n_cols; }
if(N <= 1)
{
if(dim == 0) { out.zeros(1, Y.n_cols); }
else if(dim == 1) { out.zeros(Y.n_rows, 1); }
return;
}
if(dim == 0)
{
out = sum( (0.5 * (Y.rows(0, N-2) + Y.rows(1, N-1))), 0 );
}
else
if(dim == 1)
{
out = sum( (0.5 * (Y.cols(0, N-2) + Y.cols(1, N-1))), 1 );
}
}
示例4: Solve
static void Solve(const Mat &x1, const Mat &x2, std::vector<Mat3> *pvec_E)
{
assert(3 == x1.rows());
assert(8 <= x1.cols());
assert(x1.rows() == x2.rows());
assert(x1.cols() == x2.cols());
MatX9 A(x1.cols(), 9);
EncodeEpipolarEquation(x1, x2, &A);
Vec9 e;
Nullspace(&A, &e);
Mat3 E = Map<RMat3>(e.data());
// Find the closest essential matrix to E in frobenius norm
// E = UD'VT
if (x1.cols() > 8) {
Eigen::JacobiSVD<Mat3> USV(E, Eigen::ComputeFullU | Eigen::ComputeFullV);
Vec3 d = USV.singularValues();
double a = d[0];
double b = d[1];
d << (a+b)/2., (a+b)/2., 0.0;
E = USV.matrixU() * d.asDiagonal() * USV.matrixV().transpose();
}
pvec_E->push_back(E);
}
示例5: Solve
void FourPointSolver::Solve(const Mat &x, const Mat &y, vector<Mat3> *Hs) {
assert(2 == x.rows());
assert(4 <= x.cols());
assert(x.rows() == y.rows());
assert(x.cols() == y.cols());
Mat::Index n = x.cols();
Vec9 h;
if (n == 4) {
// In the case of minimal configuration we use fixed sized matrix to let
// Eigen and the compiler doing the maximum of optimization.
typedef Eigen::Matrix<double, 16, 9> Mat16_9;
Mat16_9 L = Mat::Zero(16, 9);
BuildActionMatrix(L, x, y);
Nullspace(&L, &h);
}
else {
MatX9 L = Mat::Zero(n * 2, 9);
BuildActionMatrix(L, x, y);
Nullspace(&L, &h);
}
Mat3 H = Map<RMat3>(h.data()); // map the linear vector as the H matrix
Hs->push_back(H);
}
示例6: Solve
void FivePointSolver::Solve(const Mat &x1, const Mat &x2, vector<Mat3> *E) {
assert(2 == x1.rows());
assert(5 <= x1.cols());
assert(x1.rows() == x2.rows());
assert(x1.cols() == x2.cols());
FivePointsRelativePose(x1, x2, E);
}
示例7: Refine_RTS
/** 3D rigid transformation refinement using LM
* Refine the Scale, Rotation and translation rigid transformation
* using a Levenberg-Marquardt opimization.
*
* \param[in] x1 The first 3xN matrix of euclidean points
* \param[in] x2 The second 3xN matrix of euclidean points
* \param[out] S The initial scale factor
* \param[out] t The initial 3x1 translation
* \param[out] R The initial 3x3 rotation
*
* \return none
*/
static void Refine_RTS( const Mat &x1,
const Mat &x2,
double * S,
Vec3 * t,
Mat3 * R )
{
{
lm_SRTRefine_functor functor( 7, 3 * x1.cols(), x1, x2, *S, *R, *t );
Eigen::NumericalDiff<lm_SRTRefine_functor> numDiff( functor );
Eigen::LevenbergMarquardt<Eigen::NumericalDiff<lm_SRTRefine_functor> > lm( numDiff );
lm.parameters.maxfev = 1000;
Vec xlm = Vec::Zero( 7 ); // The deviation vector {tx,ty,tz,rotX,rotY,rotZ,S}
lm.minimize( xlm );
Vec3 transAdd = xlm.block<3, 1>( 0, 0 );
Vec3 rot = xlm.block<3, 1>( 3, 0 );
double SAdd = xlm( 6 );
//Build the rotation matrix
Mat3 Rcor =
( Eigen::AngleAxis<double>( rot( 0 ), Vec3::UnitX() )
* Eigen::AngleAxis<double>( rot( 1 ), Vec3::UnitY() )
* Eigen::AngleAxis<double>( rot( 2 ), Vec3::UnitZ() ) ).toRotationMatrix();
*R = ( *R ) * Rcor;
*t = ( *t ) + transAdd;
*S = ( *S ) + SAdd;
}
// Refine rotation
{
lm_RRefine_functor functor( 3, 3 * x1.cols(), x1, x2, *S, *R, *t );
Eigen::NumericalDiff<lm_RRefine_functor> numDiff( functor );
Eigen::LevenbergMarquardt<Eigen::NumericalDiff<lm_RRefine_functor> > lm( numDiff );
lm.parameters.maxfev = 1000;
Vec xlm = Vec::Zero( 3 ); // The deviation vector {rotX,rotY,rotZ}
lm.minimize( xlm );
Vec3 rot = xlm.block<3, 1>( 0, 0 );
//Build the rotation matrix
Mat3 Rcor =
( Eigen::AngleAxis<double>( rot( 0 ), Vec3::UnitX() )
* Eigen::AngleAxis<double>( rot( 1 ), Vec3::UnitY() )
* Eigen::AngleAxis<double>( rot( 2 ), Vec3::UnitZ() ) ).toRotationMatrix();
*R = ( *R ) * Rcor;
}
}
示例8: Fit
void Fit(const vector<size_t> &samples, std::vector<Model> *models) const {
const Mat x1 = ExtractColumns(x1_, samples);
const Mat x2 = ExtractColumns(x2_, samples);
assert(3 == x1.rows());
assert(MINIMUM_SAMPLES <= x1.cols());
assert(x1.rows() == x2.rows());
assert(x1.cols() == x2.cols());
EightPointRelativePoseSolver::Solve(x1, x2, models);
}
示例9: DataProps
List DataProps(const Mat<T>& dat, IntegerVector subsetIndices) {
Mat<T> U, V;
Col<T> S;
uvec subsetCols = sort(as<uvec>(subsetIndices) - 1);
// Get the summary profile for the network subset from the SVD.
bool success = svd_econ(U, S, V, dat.cols(subsetCols), "left", "dc");
if (!success) {
Function warning("warning");
warning("SVD failed to converge, does your data contain missing or"
" infinite values?");
return List::create(
Named("moduleSummary") = NA_REAL,
Named("nodeContribution") = NA_REAL,
Named("moduleCoherence") = NA_REAL
);
}
Mat<T> summary(U.col(0));
// Flip the sign of the summary profile so that the eigenvector is
// positively correlated with the average scaled value of the underlying
// data for the network module.
Mat<T> ap = cor(mean(dat.cols(subsetCols), 1), summary);
if (ap(0,0) < 0) {
summary *= -1;
}
// We want the correlation between each variable (node) in the underlying
// data and the summary profile for that network subset.
Mat<T> p = cor(summary, dat.cols(subsetCols));
Mat<T> MM(p);
// To make sure the resulting KIM vector is in the correct order,
// order the results to match the original ordering of subsetIndices.
Function rank("rank"); // Rank only works on R objects like IntegerVector.
uvec idxRank = as<uvec>(rank(subsetIndices)) - 1;
Col<T> oMM = MM(idxRank);
// The proportion of variance explained is the sum of the squared
// correlation between the network module summary profile, and each of the
// variables in the data that correspond to nodes in the network subset.
Mat<T> pve(mean(square(p), 1));
return List::create(
Named("moduleSummary") = summary,
Named("nodeContribution") = oMM,
Named("moduleCoherence") = pve
);
}
示例10: ExpectFundamentalProperties
// Check the properties of a fundamental matrix:
//
// 1. The determinant is 0 (rank deficient)
// 2. The condition x'T*F*x = 0 is satisfied to precision.
//
bool ExpectFundamentalProperties(const Mat3 &F,
const Mat &ptsA,
const Mat &ptsB,
double precision) {
bool bOk = true;
bOk &= F.determinant() < precision;
assert(ptsA.cols() == ptsB.cols());
const Mat hptsA = ptsA.colwise().homogeneous();
const Mat hptsB = ptsB.colwise().homogeneous();
for (int i = 0; i < ptsA.cols(); ++i) {
const double residual = hptsB.col(i).dot(F * hptsA.col(i));
bOk &= residual < precision;
}
return bOk;
}
示例11: NRealisticCamerasRing
TEST(Resection_Kernel, Multiview) {
const int views_num = 3;
const int points_num = 10;
const NViewDataSet d = NRealisticCamerasRing(views_num, points_num,
NViewDatasetConfigurator(1,1,0,0,5,0)); // Suppose a camera with Unit matrix as K
const int nResectionCameraIndex = 2;
// Solve the problem and check that fitted value are good enough
{
Mat x = d.projected_points_[nResectionCameraIndex];
Mat X = d.point_3d_;
mvg::multiview::resection::PoseResectionKernel kernel(x, X);
size_t samples_[6]={0,1,2,3,4,5};
std::vector<size_t> samples(samples_, samples_ + 6);
std::vector<Mat34> Ps;
kernel.Fit(samples, &Ps);
for (size_t i = 0; i < x.cols(); ++i) {
EXPECT_NEAR(0.0, kernel.Error(i, Ps[0]), 1e-8);
}
EXPECT_EQ(1, Ps.size());
// Check that Projection matrix is near to the GT :
Mat34 GT_ProjectionMatrix = d.P(nResectionCameraIndex).array()
/ d.P(nResectionCameraIndex).norm();
Mat34 COMPUTED_ProjectionMatrix = Ps[0].array() / Ps[0].norm();
EXPECT_MATRIX_NEAR(GT_ProjectionMatrix, COMPUTED_ProjectionMatrix, 1e-8);
}
}
示例12: sampleGoodBasis
/**
* sampleGoodBasis - Creates and returns a good basis for input argument data.
* Uses the left eigenvectors of a random subset of nSample columns from data.
*/
static fmat sampleGoodBasis(const Mat<u16> &data, const int nSample) {
const int L = data.n_rows;
// random subset of columns:
fmat sampleData = conv_to<fmat>::from( data.cols( linspace<uvec>(0, data.n_cols-1, nSample) ) );
// make a transformation matrix that represents taking adjacent differences
// between each row (except the first row):
mat P = eye(L, L);
P.diag(-1) = -ones(L-1);
// same as sampleData = P * sampleData (adjacent differences between rows)
sampleData.rows(1, L-1) -= sampleData.rows(0, L-2);
// this is the time consuming step
// numerical stable with floats because values are much smaller now,
// convert resulting small matrix to double when done:
mat M = conv_to<mat>::from(sampleData * sampleData.t());
// redo the effect of P before calculating eigenvectors
M = P.i() * M * P.t().i(); // M is now original sampleData * sampleData.t()
vec eigenvalues;
mat eigenvectors;
eig_sym(eigenvalues, eigenvectors, M);
// flip because eigenvectors are returned in ascending order:
return conv_to<fmat>::from( fliplr(eigenvectors) );
}
示例13: EuclideanToHomogeneous
void EuclideanToHomogeneous(const Mat &X, Mat *H) {
Mat::Index d = X.rows();
Mat::Index n = X.cols();
H->resize(d + 1, n);
H->block(0, 0, d, n) = X;
H->row(d).setOnes();
}
示例14: ExpectFundamentalProperties
bool ExpectFundamentalProperties(const TMat &F,
const Mat &ptsA,
const Mat &ptsB,
double precision) {
bool bOk = true;
bOk &= F.determinant() < precision;
assert(ptsA.cols() == ptsB.cols());
Mat hptsA, hptsB;
EuclideanToHomogeneous(ptsA, &hptsA);
EuclideanToHomogeneous(ptsB, &hptsB);
for (int i = 0; i < ptsA.cols(); ++i) {
double residual = hptsB.col(i).dot(F * hptsA.col(i));
bOk &= residual < precision;
}
return bOk;
}
示例15: Y
inline
bool
op_princomp::direct_princomp
(
Mat<typename T1::elem_type>& coeff_out,
Mat<typename T1::elem_type>& score_out,
const Base<typename T1::elem_type, T1>& X,
const typename arma_not_cx<typename T1::elem_type>::result* junk
)
{
arma_extra_debug_sigprint();
arma_ignore(junk);
typedef typename T1::elem_type eT;
const unwrap_check<T1> Y( X.get_ref(), score_out );
const Mat<eT>& in = Y.M;
const uword n_rows = in.n_rows;
const uword n_cols = in.n_cols;
if(n_rows > 1) // more than one sample
{
// subtract the mean - use score_out as temporary matrix
score_out = in; score_out.each_row() -= mean(in);
// singular value decomposition
Mat<eT> U;
Col<eT> s;
const bool svd_ok = svd(U, s, coeff_out, score_out);
if(svd_ok == false) { return false; }
// normalize the eigenvalues
s /= std::sqrt( double(n_rows - 1) );
// project the samples to the principals
score_out *= coeff_out;
if(n_rows <= n_cols) // number of samples is less than their dimensionality
{
score_out.cols(n_rows-1,n_cols-1).zeros();
Col<eT> s_tmp = zeros< Col<eT> >(n_cols);
s_tmp.rows(0,n_rows-2) = s.rows(0,n_rows-2);
s = s_tmp;
}
}
else // 0 or 1 samples
{
coeff_out.eye(n_cols, n_cols);
score_out.copy_size(in);
score_out.zeros();
}
return true;
}