本文整理汇总了C++中eigen::SparseMatrix::coeffRef方法的典型用法代码示例。如果您正苦于以下问题:C++ SparseMatrix::coeffRef方法的具体用法?C++ SparseMatrix::coeffRef怎么用?C++ SparseMatrix::coeffRef使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类eigen::SparseMatrix
的用法示例。
在下文中一共展示了SparseMatrix::coeffRef方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: insertSparseBlock
// inserts the sparse matrix 'ins' into the sparse matrix 'original' in the place given by 'row' and 'col' integers
void insertSparseBlock(const Eigen::SparseMatrix<Scalar>& ins, Eigen::SparseMatrix<Scalar>& original, const unsigned int& row, const unsigned int& col)
{
for (int k=0; k<ins.outerSize(); ++k)
for (Eigen::SparseMatrix<Scalar>::InnerIterator iti(ins,k); iti; ++iti)
original.coeffRef(iti.row() + row, iti.col() + col) = iti.value();
original.makeCompressed();
}
示例2: adjacency_matrix
IGL_INLINE void igl::adjacency_matrix(
const Eigen::PlainObjectBase<DerivedF> & F,
Eigen::SparseMatrix<T>& A)
{
using namespace std;
using namespace Eigen;
typedef typename DerivedF::Scalar Index;
typedef Triplet<T> IJV;
vector<IJV > ijv;
ijv.reserve(F.size()*2);
// Loop over faces
for(int i = 0;i<F.rows();i++)
{
// Loop over this face
for(int j = 0;j<F.cols();j++)
{
// Get indices of edge: s --> d
Index s = F(i,j);
Index d = F(i,(j+1)%F.cols());
ijv.push_back(IJV(s,d,1));
ijv.push_back(IJV(d,s,1));
}
}
const Index n = F.maxCoeff()+1;
A.resize(n,n);
switch(F.cols())
{
case 3:
A.reserve(6*(F.maxCoeff()+1));
break;
case 4:
A.reserve(26*(F.maxCoeff()+1));
break;
}
A.setFromTriplets(ijv.begin(),ijv.end());
// Force all non-zeros to be one
// Iterate over outside
for(int k=0; k<A.outerSize(); ++k)
{
// Iterate over inside
for(typename Eigen::SparseMatrix<T>::InnerIterator it (A,k); it; ++it)
{
assert(it.value() != 0);
A.coeffRef(it.row(),it.col()) = 1;
}
}
}
示例3:
Eigen::SparseMatrix<double> Condi2Joint(Eigen::SparseMatrix<double> Condi, Eigen::SparseVector<double> Pa)
{ // second dimension of Condi is the parent
Eigen::SparseMatrix<double> Joint;
Joint.resize(Condi.rows(), Condi.cols());
for (int cols = 0; cols < Condi.cols(); cols++)
{
Eigen::SparseVector<double> tmp_vec = Condi.block(0, cols, Condi.rows(), 1)*Pa.coeff(cols);
for (int id_rows = 0; id_rows < tmp_vec.size(); id_rows++)
{
Joint.coeffRef(id_rows, cols) = tmp_vec.coeff(id_rows);
}
}
Joint.prune(TOLERANCE);
return Joint;
}
示例4:
TEST_F(TsProductTimings, benchmark )
{
std::cerr<<"=== Matrix-Matrix Products:"<<std::endl;
double start, stop;
matMultiplication.clear();
start = get_time();
matMultiplication = mat * matMultiplier;
stop = get_time();
std::cerr<<"Mat:\t\t"<<stop-start<<" (ms)"<<std::endl;
fullMultiplication.clear();
start = get_time();
fullMat.mul( fullMultiplication, fullMultiplier );
stop = get_time();
std::cerr<<"Full:\t\t"<<stop-start<<" (ms)"<<std::endl;
crsMultiplication.clear();
start = get_time();
crs1.mul( crsMultiplication, crsMultiplier );
stop = get_time();
std::cerr<<"CRS:\t\t"<<stop-start<<" (ms)"<<std::endl;
eiBaseMultiplication.clear();
start = get_time();
eiBase.mul( eiBaseMultiplication, eiBaseMultiplier );
stop = get_time();
std::cerr<<"Eigen Base ST:\t\t"<<stop-start<<" (ms)"<<std::endl;
#ifdef USING_OMP_PRAGMAS
eiBaseMultiplication.clear();
start = get_time();
eiBase.mul_MT( eiBaseMultiplication, eiBaseMultiplier );
stop = get_time();
std::cerr<<"Eigen Base MT:\t\t"<<stop-start<<" (ms)"<<std::endl;
#endif
start = get_time();
eiDenseMultiplication = eiBase.compressedMatrix * eiDenseMultiplier;
stop = get_time();
std::cerr<<"Eigen Sparse*Dense:\t\t"<<stop-start<<" (ms)"<<std::endl;
#ifdef USING_OMP_PRAGMAS
start = get_time();
eiDenseMultiplication.noalias() = component::linearsolver::mul_EigenSparseDenseMatrix_MT( eiBase.compressedMatrix, eiDenseMultiplier, omp_get_max_threads()/2 );
stop = get_time();
std::cerr<<"Eigen Sparse*Dense MT:\t\t"<<stop-start<<" (ms)"<<std::endl;
#endif
std::cerr<<"=== Eigen Matrix-Vector Products:"<<std::endl;
unsigned nbrows = 100, nbcols;
std::cerr<<"=== nb rows:"<<nbrows<<std::endl;
for( int j=1; j<300 ; j+=30 )
{
nbcols = 100 * j;
std::cerr<<"=== nb cols:"<<nbcols<<std::endl;
Eigen::SparseMatrix<SReal,Eigen::RowMajor> A;
A.resize(nbrows,nbcols);
#define NBCOLSRHS 1
Eigen::Matrix<SReal, Eigen::Dynamic, NBCOLSRHS> res, rhs;
rhs.resize(nbcols,NBCOLSRHS);
res.resize(nbrows,NBCOLSRHS);
sofa::helper::RandomGenerator randomGenerator;
randomGenerator.initSeed( (long)time(0) );
for( unsigned j=0; j<nbcols; j++)
{
Real random = randomGenerator.random<Real>( (Real) -1, (Real) 1 );
for( unsigned i=0; i<NBCOLSRHS; i++)
rhs.coeffRef(j,i) = random;
for( unsigned i=0; i<nbrows; i++)
{
if( random > -0.5 && random < 0.5 ) A.coeffRef(i,j)=random;
}
}
double min=std::numeric_limits<double>::max(), max=0, sum=0;
for( int i=0; i<100 ; ++i )
{
start = get_time();
res.noalias() = A * rhs;
stop = get_time();
double current = stop-start;
sum+=current;
if( current<min ) min=current;
if( current>max ) max=current;
}
std::cerr<<"ST: "<<sum/100.0<<" "<<min<<" "<<max<<std::endl;
#ifdef USING_OMP_PRAGMAS
min=std::numeric_limits<double>::max(), max=0, sum=0;
//.........这里部分代码省略.........
示例5: orientable_patches
IGL_INLINE void igl::orientable_patches(
const Eigen::PlainObjectBase<DerivedF> & F,
Eigen::PlainObjectBase<DerivedC> & C,
Eigen::SparseMatrix<AScalar> & A)
{
using namespace Eigen;
using namespace std;
// simplex size
assert(F.cols() == 3);
// List of all "half"-edges: 3*#F by 2
Matrix<typename DerivedF::Scalar, Dynamic, 2> allE,sortallE,uE;
allE.resize(F.rows()*3,2);
Matrix<int,Dynamic,2> IX;
VectorXi IA,IC;
allE.block(0*F.rows(),0,F.rows(),1) = F.col(1);
allE.block(0*F.rows(),1,F.rows(),1) = F.col(2);
allE.block(1*F.rows(),0,F.rows(),1) = F.col(2);
allE.block(1*F.rows(),1,F.rows(),1) = F.col(0);
allE.block(2*F.rows(),0,F.rows(),1) = F.col(0);
allE.block(2*F.rows(),1,F.rows(),1) = F.col(1);
// Sort each row
sort(allE,2,true,sortallE,IX);
//IC(i) tells us where to find sortallE(i,:) in uE:
// so that sortallE(i,:) = uE(IC(i),:)
unique_rows(sortallE,uE,IA,IC);
// uE2FT(e,f) = 1 means face f is adjacent to unique edge e
vector<Triplet<AScalar> > uE2FTijv(IC.rows());
for(int e = 0;e<IC.rows();e++)
{
uE2FTijv[e] = Triplet<AScalar>(e%F.rows(),IC(e),1);
}
SparseMatrix<AScalar> uE2FT(F.rows(),uE.rows());
uE2FT.setFromTriplets(uE2FTijv.begin(),uE2FTijv.end());
// kill non-manifold edges
for(int j=0; j<(int)uE2FT.outerSize();j++)
{
int degree = 0;
for(typename SparseMatrix<AScalar>::InnerIterator it (uE2FT,j); it; ++it)
{
degree++;
}
// Iterate over inside
if(degree > 2)
{
for(typename SparseMatrix<AScalar>::InnerIterator it (uE2FT,j); it; ++it)
{
uE2FT.coeffRef(it.row(),it.col()) = 0;
}
}
}
// Face-face Adjacency matrix
SparseMatrix<AScalar> uE2F;
uE2F = uE2FT.transpose().eval();
A = uE2FT*uE2F;
// All ones
for(int j=0; j<A.outerSize();j++)
{
// Iterate over inside
for(typename SparseMatrix<AScalar>::InnerIterator it (A,j); it; ++it)
{
if(it.value() > 1)
{
A.coeffRef(it.row(),it.col()) = 1;
}
}
}
//% Connected components are patches
//%C = components(A); % alternative to graphconncomp from matlab_bgl
//[~,C] = graphconncomp(A);
// graph connected components using boost
components(A,C);
}
示例6: computeGNParam
void dart::ReportedJointsPrior::computeContribution(Eigen::SparseMatrix<float> & fullJTJ,
Eigen::VectorXf & fullJTe,
const int * modelOffsets,
const int priorParamOffset,
const std::vector<MirroredModel *> & models,
const std::vector<Pose> & poses,
const OptimizationOptions & opts)
{
// get mapping of reported joint names and values
std::map<std::string, float> rep_map;
for(unsigned int i=0; i<_reported.getReducedArticulatedDimensions(); i++) {
// apply lower and upper joint limits
rep_map[_reported.getReducedName(i)] =
std::min(std::max(_reported.getReducedArticulation()[i], _reported.getReducedMin(i)), _reported.getReducedMax(i));
}
#ifdef LCM_DEBUG_GRADIENT
std::vector<std::string> names;
#if FILTER_FIXED_JOINTS
const bool pub_grad = (_skipped==GRADIENT_SKIP);
#endif
#endif
// compute difference of reported to estimated joint value
Eigen::VectorXf diff = Eigen::VectorXf::Zero(_estimated.getReducedArticulatedDimensions());
for(unsigned int i=0; i<_estimated.getReducedArticulatedDimensions(); i++) {
const std::string jname = _estimated.getReducedName(i);
#ifdef LCM_DEBUG_GRADIENT
#if FILTER_FIXED_JOINTS
if(pub_grad)
if( !(_estimated.getReducedMin(i)==0 && _estimated.getReducedMin(i)==0) )
#endif
names.push_back(jname);
#endif
float rep = rep_map.at(jname);
float est = _estimated.getReducedArticulation()[i];
diff[i] = rep_map.at(jname) - _estimated.getReducedArticulation()[i];
}
// set nan values to 0, e.g. comparison of nan values always yields false
diff = (diff.array()!=diff.array()).select(0,diff);
// get Gauss-Newton parameter for specific objective function
Eigen::MatrixXf J = Eigen::MatrixXf::Zero(_estimated.getReducedArticulatedDimensions(), 1);
Eigen::VectorXf JTe = Eigen::VectorXf::Zero(_estimated.getReducedArticulatedDimensions());
std::tie(J,JTe) = computeGNParam(diff);
const Eigen::MatrixXf JTJ = J.transpose()*J;
#ifdef LCM_DEBUG_GRADIENT
#if FILTER_FIXED_JOINTS
if(pub_grad) {
#endif
// publish gradient (JTe)
bot_core::joint_angles_t grad;
grad.num_joints = names.size();
grad.joint_name = names;
for(unsigned int i = 0; i<JTe.size(); i++) {
#if FILTER_FIXED_JOINTS
if(!(_estimated.getReducedMin(i)==0 && _estimated.getReducedMin(i)==0))
#endif
grad.joint_position.push_back(JTe[i]);
}
LCM_CommonBase::publish("DART_GRADIENT", &grad);
#if FILTER_FIXED_JOINTS
_skipped=0;
}
else {
_skipped++;
}
#endif
#endif // LCM_DEBUG_GRADIENT
for(unsigned int r=0; r<JTJ.rows(); r++)
for(unsigned int c=0; c<JTJ.cols(); c++)
if(JTJ(r,c)!=0)
fullJTJ.coeffRef(modelOffsets[_modelID]+6+r, modelOffsets[_modelID]+6+c) += JTJ(r,c);
for(unsigned int r=0; r<JTe.rows(); r++)
if(JTe[r]!=0)
fullJTe[modelOffsets[_modelID]+6+r] += JTe[r];
}