本文整理汇总了C++中eigen::MatrixBase::rows方法的典型用法代码示例。如果您正苦于以下问题:C++ MatrixBase::rows方法的具体用法?C++ MatrixBase::rows怎么用?C++ MatrixBase::rows使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类eigen::MatrixBase
的用法示例。
在下文中一共展示了MatrixBase::rows方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: cat
IGL_INLINE void igl::cat(
const int dim,
const Eigen::MatrixBase<Derived> & A,
const Eigen::MatrixBase<Derived> & B,
MatC & C)
{
assert(dim == 1 || dim == 2);
// Special case if B or A is empty
if(A.size() == 0)
{
C = B;
return;
}
if(B.size() == 0)
{
C = A;
return;
}
if(dim == 1)
{
assert(A.cols() == B.cols());
C.resize(A.rows()+B.rows(),A.cols());
C << A,B;
}else if(dim == 2)
{
assert(A.rows() == B.rows());
C.resize(A.rows(),A.cols()+B.cols());
C << A,B;
}else
{
fprintf(stderr,"cat.h: Error: Unsupported dimension %d\n",dim);
}
}
示例2: forward_propagation
void forward_propagation(Eigen::MatrixBase<Derived1> const &input,
Eigen::MatrixBase<Derived2> const &weight,
Eigen::MatrixBase<Derived3> const &bias,
Eigen::MatrixBase<Derived4> &output,
bool no_overlap = true,
UnaryFunc func = UnaryFunc())
{
static_assert(std::is_same<Derived1::Scalar, Derived2::Scalar>::value &&
std::is_same<Derived2::Scalar, Derived3::Scalar>::value &&
std::is_same<Derived4::Scalar, Derived4::Scalar>::value,
"Data type of matrix input, weight, bias and output should be the same");
if(input.rows() != 0 && weight.rows() != 0 &&
bias.rows() != 0){
if(no_overlap){
output.noalias() = weight * input;
}else{
output = weight * input;
}
using Scalar = typename Derived3::Scalar;
using MatType = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
using Mapper = Eigen::Map<const MatType, Eigen::Aligned>;
Mapper Map(&bias(0, 0), bias.size());
output.colwise() += Map;
func(output);
}
}
示例3: crossMx
inline Eigen::Matrix<typename Eigen::internal::traits<Derived_T>::Scalar, 3, 3> crossMx(
Eigen::MatrixBase<Derived_T> const & v)
{
EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Eigen::MatrixBase<Derived_T>, 3);
assert((v.cols()==3 && v.rows()==1)||(v.rows()==3 && v.cols()==1));
return crossMx(v(0, 0), v(1, 0), v(2, 0));
}
示例4: vertex_triangle_adjacency
IGL_INLINE void igl::vertex_triangle_adjacency(
const Eigen::MatrixBase<DerivedF> & F,
const int n,
Eigen::PlainObjectBase<DerivedVF> & VF,
Eigen::PlainObjectBase<DerivedNI> & NI)
{
typedef Eigen::Matrix<typename DerivedVF::Scalar,Eigen::Dynamic,1> VectorXI;
// vfd #V list so that vfd(i) contains the vertex-face degree (number of
// faces incident on vertex i)
VectorXI vfd = VectorXI::Zero(n);
for (int i = 0; i < F.rows(); i++)
{
for (int j = 0; j < 3; j++)
{
vfd[F(i,j)]++;
}
}
igl::cumsum(vfd,1,NI);
// Prepend a zero
NI = (DerivedNI(n+1)<<0,NI).finished();
// vfd now acts as a counter
vfd = NI;
VF.derived()= Eigen::VectorXi(3*F.rows());
for (int i = 0; i < F.rows(); i++)
{
for (int j = 0; j < 3; j++)
{
VF[vfd[F(i,j)]] = i;
vfd[F(i,j)]++;
}
}
}
示例5: per_face_normals
IGL_INLINE void igl::per_face_normals(
const Eigen::MatrixBase<DerivedV>& V,
const Eigen::MatrixBase<DerivedF>& F,
const Eigen::MatrixBase<DerivedZ> & Z,
Eigen::PlainObjectBase<DerivedN> & N)
{
N.resize(F.rows(),3);
// loop over faces
int Frows = F.rows();
#pragma omp parallel for if (Frows>10000)
for(int i = 0; i < Frows;i++)
{
const Eigen::Matrix<typename DerivedV::Scalar, 1, 3> v1 = V.row(F(i,1)) - V.row(F(i,0));
const Eigen::Matrix<typename DerivedV::Scalar, 1, 3> v2 = V.row(F(i,2)) - V.row(F(i,0));
N.row(i) = v1.cross(v2);//.normalized();
typename DerivedV::Scalar r = N.row(i).norm();
if(r == 0)
{
N.row(i) = Z;
}else
{
N.row(i) /= r;
}
}
}
示例6: matGradMultMatNumRows
Eigen::Matrix<typename DerivedA::Scalar, matGradMultMatNumRows(DerivedA::RowsAtCompileTime, DerivedB::ColsAtCompileTime), DerivedDA::ColsAtCompileTime>
matGradMultMat(
const Eigen::MatrixBase<DerivedA>& A,
const Eigen::MatrixBase<DerivedB>& B,
const Eigen::MatrixBase<DerivedDA>& dA,
const Eigen::MatrixBase<DerivedDB>& dB) {
assert(dA.cols() == dB.cols());
const int nq = dA.cols();
const int nq_at_compile_time = DerivedDA::ColsAtCompileTime;
Eigen::Matrix<typename DerivedA::Scalar,
matGradMultMatNumRows(DerivedA::RowsAtCompileTime, DerivedB::ColsAtCompileTime),
DerivedDA::ColsAtCompileTime> ret(A.rows() * B.cols(), nq);
for (int col = 0; col < B.cols(); col++) {
auto block = ret.template block<DerivedA::RowsAtCompileTime, nq_at_compile_time>(col * A.rows(), 0, A.rows(), nq);
// A * dB part:
block.noalias() = A * dB.template block<DerivedA::ColsAtCompileTime, nq_at_compile_time>(col * A.cols(), 0, A.cols(), nq);
for (int row = 0; row < B.rows(); row++) {
// B * dA part:
block.noalias() += B(row, col) * dA.template block<DerivedA::RowsAtCompileTime, nq_at_compile_time>(row * A.rows(), 0, A.rows(), nq);
}
}
return ret;
// much slower and requires eigen/unsupported:
// return Eigen::kroneckerProduct(Eigen::MatrixXd::Identity(B.cols(), B.cols()), A) * dB + Eigen::kroneckerProduct(B.transpose(), Eigen::MatrixXd::Identity(A.rows(), A.rows())) * dA;
}
示例7: LUPartialPivotDecompositionSuccessful
MatrixSuccessCode LUPartialPivotDecompositionSuccessful(Eigen::MatrixBase<Derived> const& LU)
{
#ifndef BERTINI_DISABLE_ASSERTS
assert(LU.rows()==LU.cols() && "non-square matrix in LUPartialPivotDecompositionSuccessful");
assert(LU.rows()>0 && "empty matrix in LUPartialPivotDecompositionSuccessful");
#endif
// this loop won't test entry (0,0). it's tested separately after.
for (unsigned int ii = LU.rows()-1; ii > 0; ii--)
{
if (IsSmallValue(LU(ii,ii)))
{
return MatrixSuccessCode::SmallValue;
}
if (IsLargeChange(LU(ii-1,ii-1),LU(ii,ii)))
{
return MatrixSuccessCode::LargeChange;
}
}
// this line is the reason for the above assert on non-empty matrix.
if (IsSmallValue(LU(0,0)))
{
return MatrixSuccessCode::SmallValue;
}
return MatrixSuccessCode::Success;
}
示例8: normalizeVec
void normalizeVec(
const Eigen::MatrixBase<Derived>& x,
typename Derived::PlainObject& x_norm,
typename Gradient<Derived, Derived::RowsAtCompileTime, 1>::type* dx_norm,
typename Gradient<Derived, Derived::RowsAtCompileTime, 2>::type* ddx_norm) {
typename Derived::Scalar xdotx = x.squaredNorm();
typename Derived::Scalar norm_x = std::sqrt(xdotx);
x_norm = x / norm_x;
if (dx_norm) {
dx_norm->setIdentity(x.rows(), x.rows());
(*dx_norm) -= x * x.transpose() / xdotx;
(*dx_norm) /= norm_x;
if (ddx_norm) {
auto dx_norm_transpose = transposeGrad(*dx_norm, x.rows());
auto ddx_norm_times_norm = -matGradMultMat(x_norm, x_norm.transpose(), (*dx_norm), dx_norm_transpose);
auto dnorm_inv = -x.transpose() / (xdotx * norm_x);
(*ddx_norm) = ddx_norm_times_norm / norm_x;
auto temp = (*dx_norm) * norm_x;
int n = x.rows();
for (int col = 0; col < n; col++) {
auto column_as_matrix = (dnorm_inv(0, col) * temp);
for (int row_block = 0; row_block < n; row_block++) {
ddx_norm->block(row_block * n, col, n, 1) += column_as_matrix.col(row_block);
}
}
}
}
}
示例9: hessian
IGL_INLINE void igl::hessian(
const Eigen::MatrixBase<DerivedV> & V,
const Eigen::MatrixBase<DerivedF> & F,
Eigen::SparseMatrix<Scalar>& H)
{
typedef typename DerivedV::Scalar denseScalar;
typedef typename Eigen::Matrix<denseScalar, Eigen::Dynamic, 1> VecXd;
typedef typename Eigen::SparseMatrix<Scalar> SparseMat;
typedef typename Eigen::DiagonalMatrix
<Scalar, Eigen::Dynamic, Eigen::Dynamic> DiagMat;
int dim = V.cols();
assert((dim==2 || dim==3) &&
"The dimension of the vertices should be 2 or 3");
//Construct the combined gradient matric
SparseMat G;
igl::grad(Eigen::PlainObjectBase<DerivedV>(V),
Eigen::PlainObjectBase<DerivedF>(F),
G, false);
SparseMat GG(F.rows(), dim*V.rows());
GG.reserve(G.nonZeros());
for(int i=0; i<dim; ++i)
GG.middleCols(i*G.cols(),G.cols()) = G.middleRows(i*F.rows(),F.rows());
SparseMat D;
igl::repdiag(GG,dim,D);
//Compute area matrix
VecXd areas;
igl::doublearea(V, F, areas);
DiagMat A = (0.5*areas).replicate(dim,1).asDiagonal();
//Compute FEM Hessian
H = D.transpose()*A*G;
}
示例10: drpy
typename Gradient<Eigen::Matrix<typename DerivedR::Scalar, RPY_SIZE, 1>, DerivedDR::ColsAtCompileTime>::type drotmat2rpy(
const Eigen::MatrixBase<DerivedR>& R,
const Eigen::MatrixBase<DerivedDR>& dR)
{
EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(Eigen::MatrixBase<DerivedR>, SPACE_DIMENSION, SPACE_DIMENSION);
EIGEN_STATIC_ASSERT(Eigen::MatrixBase<DerivedDR>::RowsAtCompileTime == RotmatSize, THIS_METHOD_IS_ONLY_FOR_MATRICES_OF_A_SPECIFIC_SIZE);
const int nq = dR.cols();
typedef typename DerivedR::Scalar Scalar;
typedef typename Gradient<Eigen::Matrix<Scalar, RPY_SIZE, 1>, DerivedDR::ColsAtCompileTime>::type ReturnType;
ReturnType drpy(RPY_SIZE, nq);
auto dR11_dq = getSubMatrixGradient(dR, 0, 0, R.rows());
auto dR21_dq = getSubMatrixGradient(dR, 1, 0, R.rows());
auto dR31_dq = getSubMatrixGradient(dR, 2, 0, R.rows());
auto dR32_dq = getSubMatrixGradient(dR, 2, 1, R.rows());
auto dR33_dq = getSubMatrixGradient(dR, 2, 2, R.rows());
Scalar sqterm = R(2,1) * R(2,1) + R(2,2) * R(2,2);
using namespace std;
// droll_dq
drpy.row(0) = (R(2, 2) * dR32_dq - R(2, 1) * dR33_dq) / sqterm;
// dpitch_dq
Scalar sqrt_sqterm = sqrt(sqterm);
drpy.row(1) = (-sqrt_sqterm * dR31_dq + R(2, 0) / sqrt_sqterm * (R(2, 1) * dR32_dq + R(2, 2) * dR33_dq)) / (R(2, 0) * R(2, 0) + R(2, 1) * R(2, 1) + R(2, 2) * R(2, 2));
// dyaw_dq
sqterm = R(0, 0) * R(0, 0) + R(1, 0) * R(1, 0);
drpy.row(2) = (R(0, 0) * dR21_dq - R(1, 0) * dR11_dq) / sqterm;
return drpy;
}
示例11: allGreaterEquals
bool allGreaterEquals(const Eigen::MatrixBase<DerivedA> & A,
const Eigen::MatrixBase<DerivedB> & B,
double tolerance = math::NUMERICAL_ZERO_DIFFERENCE)
{
assertion(A.rows() == B.rows(), "Matrices with different number of rows can't be compared.");
assertion(A.cols() == B.cols(), "Matrices with different number of cols can't be compared.");
return ((A-B).array() >= tolerance).all();
}
示例12: assert_size
void assert_size(const Eigen::MatrixBase<Derived> &X, int rows_expected, int cols_expected)
{
common_assert_msg_ex(
X.rows() == rows_expected && X.cols() == cols_expected,
"matrix [row, col] mismatch" << std::endl <<
"actual: [" << X.rows() << ", " << X.cols() << "]" << std::endl <<
"expected: [" << rows_expected << ", " << cols_expected << "]",
eigen_utilities::assert_error);
}
示例13: mask
void ba81NormalQuad::layer::detectTwoTier(Eigen::ArrayBase<T1> ¶m,
Eigen::MatrixBase<T2> &mean, Eigen::MatrixBase<T3> &cov)
{
if (mean.rows() < 3) return;
std::vector<int> orthogonal;
Eigen::Matrix<Eigen::DenseIndex, Eigen::Dynamic, 1>
numCov((cov.array() != 0.0).matrix().colwise().count());
std::vector<int> candidate;
for (int fx=0; fx < numCov.rows(); ++fx) {
if (numCov(fx) == 1) candidate.push_back(fx);
}
if (candidate.size() > 1) {
std::vector<bool> mask(numItems());
for (int cx=candidate.size() - 1; cx >= 0; --cx) {
std::vector<bool> loading(numItems());
for (int ix=0; ix < numItems(); ++ix) {
loading[ix] = param(candidate[cx], itemsMap[ix]) != 0;
}
std::vector<bool> overlap(loading.size());
std::transform(loading.begin(), loading.end(),
mask.begin(), overlap.begin(),
std::logical_and<bool>());
if (std::find(overlap.begin(), overlap.end(), true) == overlap.end()) {
std::transform(loading.begin(), loading.end(),
mask.begin(), mask.begin(),
std::logical_or<bool>());
orthogonal.push_back(candidate[cx]);
}
}
}
std::reverse(orthogonal.begin(), orthogonal.end());
if (orthogonal.size() == 1) orthogonal.clear();
if (orthogonal.size() && orthogonal[0] != mean.rows() - int(orthogonal.size())) {
mxThrow("Independent specific factors must be given after general dense factors");
}
numSpecific = orthogonal.size();
if (numSpecific) {
Sgroup.assign(numItems(), 0);
for (int ix=0; ix < numItems(); ix++) {
for (int dx=orthogonal[0]; dx < mean.rows(); ++dx) {
if (param(dx, itemsMap[ix]) != 0) {
Sgroup[ix] = dx - orthogonal[0];
continue;
}
}
}
//Eigen::Map< Eigen::ArrayXi > foo(Sgroup.data(), param.cols());
//mxPrintMat("sgroup", foo);
}
}
示例14: per_face_normals_stable
IGL_INLINE void igl::per_face_normals_stable(
const Eigen::MatrixBase<DerivedV>& V,
const Eigen::MatrixBase<DerivedF>& F,
Eigen::PlainObjectBase<DerivedN> & N)
{
using namespace Eigen;
typedef Matrix<typename DerivedV::Scalar,1,3> RowVectorV3;
typedef typename DerivedV::Scalar Scalar;
const size_t m = F.rows();
N.resize(F.rows(),3);
// Grad all points
for(size_t f = 0;f<m;f++)
{
const RowVectorV3 p0 = V.row(F(f,0));
const RowVectorV3 p1 = V.row(F(f,1));
const RowVectorV3 p2 = V.row(F(f,2));
const RowVectorV3 n0 = (p1 - p0).cross(p2 - p0);
const RowVectorV3 n1 = (p2 - p1).cross(p0 - p1);
const RowVectorV3 n2 = (p0 - p2).cross(p1 - p2);
// careful sum
for(int d = 0;d<3;d++)
{
// This is a little _silly_ in terms of complexity, but its recursive
// implementation is clean looking...
const std::function<Scalar(Scalar,Scalar,Scalar)> sum3 =
[&sum3](Scalar a, Scalar b, Scalar c)->Scalar
{
if(fabs(c)>fabs(a))
{
return sum3(c,b,a);
}
// c < a
if(fabs(c)>fabs(b))
{
return sum3(a,c,b);
}
// c < a, c < b
if(fabs(b)>fabs(a))
{
return sum3(b,a,c);
}
return (a+b)+c;
};
N(f,d) = sum3(n0(d),n1(d),n2(d));
}
// sum better not be sure, or else NaN
N.row(f) /= N.row(f).norm();
}
}
示例15: run
static void run( float a, const Eigen::MatrixBase<Derived2> & A, const Eigen::MatrixBase<Derived1> & x, float b, Eigen::MatrixBase<Derived1> &y) {
EIGEN_STATIC_ASSERT(sizeof(PREC) == sizeof(typename Derived1::Scalar), YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
ASSERTMSG(A.cols() == x.rows() && A.rows() == y.rows() ,"ERROR: Vector/Matrix wrong dimension");
// b_dev = alpha * A_dev * x_old_dev + beta *b_dev
//Derived2 C = A;
#if USE_INTEL_BLAS == 1
CBLAS_ORDER order;
CBLAS_TRANSPOSE trans;
if(Derived1::Flags & Eigen::RowMajorBit) {
order = CblasRowMajor;
} else {
order = CblasColMajor;
}
trans = CblasNoTrans;
mkl_set_dynamic(false);
mkl_set_num_threads(BLAS_NUM_THREADS);
//cout << "Threads:" << mkl_get_max_threads();
cblas_sgemv(order, trans, A.rows(), A.cols(), a, const_cast<double*>(&(A.operator()(0,0))), A.outerStride(), const_cast<double*>(&(x.operator()(0,0))), 1, b, &(y.operator()(0,0)), 1);
//cblas_dgemm(order,trans,trans, A.rows(), A.cols(), A.cols(), 1.0, const_cast<double*>(&(A.operator()(0,0))), A.rows(), const_cast<double*>(&(A.operator()(0,0))), A.rows(), 1.0 , &(C.operator()(0,0)), C.rows() );
#else
#if USE_GOTO_BLAS == 1
/* static DGEMVFunc DGEMV = NULL;
if (DGEMV == NULL) {
HINSTANCE hInstLibrary = LoadLibrary("libopenblasp-r0.1alpha2.2.dll");
DGEMV = (DGEMVFunc)GetProcAddress(hInstLibrary, "DGEMV");
}*/
char trans = 'N';
BLAS_INT idx = 1;
BLAS_INT m = A.rows();
BLAS_INT n = A.cols();
sgemv(&trans, &m, &n, &a, &(A.operator()(0,0)), &m, &(x.operator()(0,0)), &idx, &b, &(y.operator()(0,0)), &idx);
// FreeLibrary(hInstLibrary);
#else
ASSERTMSG(false,"No implementation for BLAS defined!");
#endif
#endif
}