本文整理汇总了C++中eigen::SparseMatrix::nonZeros方法的典型用法代码示例。如果您正苦于以下问题:C++ SparseMatrix::nonZeros方法的具体用法?C++ SparseMatrix::nonZeros怎么用?C++ SparseMatrix::nonZeros使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类eigen::SparseMatrix
的用法示例。
在下文中一共展示了SparseMatrix::nonZeros方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: cat
IGL_INLINE void igl::cat(
const int dim,
const Eigen::SparseMatrix<Scalar> & A,
const Eigen::SparseMatrix<Scalar> & B,
Eigen::SparseMatrix<Scalar> & C)
{
assert(dim == 1 || dim == 2);
using namespace Eigen;
// Special case if B or A is empty
if(A.size() == 0)
{
C = B;
return;
}
if(B.size() == 0)
{
C = A;
return;
}
DynamicSparseMatrix<Scalar, RowMajor> dyn_C;
if(dim == 1)
{
assert(A.cols() == B.cols());
dyn_C.resize(A.rows()+B.rows(),A.cols());
}else if(dim == 2)
{
assert(A.rows() == B.rows());
dyn_C.resize(A.rows(),A.cols()+B.cols());
}else
{
fprintf(stderr,"cat.h: Error: Unsupported dimension %d\n",dim);
}
dyn_C.reserve(A.nonZeros()+B.nonZeros());
// Iterate over outside of A
for(int k=0; k<A.outerSize(); ++k)
{
// Iterate over inside
for(typename SparseMatrix<Scalar>::InnerIterator it (A,k); it; ++it)
{
dyn_C.coeffRef(it.row(),it.col()) += it.value();
}
}
// Iterate over outside of B
for(int k=0; k<B.outerSize(); ++k)
{
// Iterate over inside
for(typename SparseMatrix<Scalar>::InnerIterator it (B,k); it; ++it)
{
int r = (dim == 1 ? A.rows()+it.row() : it.row());
int c = (dim == 2 ? A.cols()+it.col() : it.col());
dyn_C.coeffRef(r,c) += it.value();
}
}
C = SparseMatrix<Scalar>(dyn_C);
}
示例2: find
IGL_INLINE void igl::find(
const Eigen::SparseMatrix<T>& X,
Eigen::DenseBase<DerivedI> & I,
Eigen::DenseBase<DerivedJ> & J,
Eigen::DenseBase<DerivedV> & V)
{
// Resize outputs to fit nonzeros
I.derived().resize(X.nonZeros(),1);
J.derived().resize(X.nonZeros(),1);
V.derived().resize(X.nonZeros(),1);
int i = 0;
// Iterate over outside
for(int k=0; k<X.outerSize(); ++k)
{
// Iterate over inside
for(typename Eigen::SparseMatrix<T>::InnerIterator it (X,k); it; ++it)
{
V(i) = it.value();
I(i) = it.row();
J(i) = it.col();
i++;
}
}
}
示例3: convert_from_Eigen
ColCompressedMatrix convert_from_Eigen(const Eigen::SparseMatrix<double> &m)
{
assert(m.rows() == m.cols());
assert(m.isCompressed());
return ColCompressedMatrix(m.rows, m.nonZeros(),
m.valuePtr(), m.outerIndexPtr(), m.innerIndexPtr());
}
示例4: dx
/// Solve the linear system Ax = b, with A being the
/// combined derivative matrix of the residual and b
/// being the residual itself.
/// \param[in] residual residual object containing A and b.
/// \return the solution x
NewtonIterationBlackoilSimple::SolutionVector
NewtonIterationBlackoilSimple::computeNewtonIncrement(const LinearisedBlackoilResidual& residual) const
{
typedef LinearisedBlackoilResidual::ADB ADB;
const int np = residual.material_balance_eq.size();
ADB mass_res = residual.material_balance_eq[0];
for (int phase = 1; phase < np; ++phase) {
mass_res = vertcat(mass_res, residual.material_balance_eq[phase]);
}
const ADB well_res = vertcat(residual.well_flux_eq, residual.well_eq);
const ADB total_residual = collapseJacs(vertcat(mass_res, well_res));
Eigen::SparseMatrix<double, Eigen::RowMajor> matr;
total_residual.derivative()[0].toSparse(matr);
SolutionVector dx(SolutionVector::Zero(total_residual.size()));
Opm::LinearSolverInterface::LinearSolverReport rep
= linsolver_->solve(matr.rows(), matr.nonZeros(),
matr.outerIndexPtr(), matr.innerIndexPtr(), matr.valuePtr(),
total_residual.value().data(), dx.data(), parallelInformation_);
// store iterations
iterations_ = rep.iterations;
if (!rep.converged) {
OPM_THROW(LinearSolverProblem,
"FullyImplicitBlackoilSolver::solveJacobianSystem(): "
"Linear solver convergence failure.");
}
return dx;
}
示例5: evalModel
void ProbitNoise::evalModel(Eigen::SparseMatrix<double> & Ytest, const int n, Eigen::VectorXd & predictions, Eigen::VectorXd & predictions_var, const Eigen::MatrixXd &cols, const Eigen::MatrixXd &rows, double mean_rating) {
const unsigned N = Ytest.nonZeros();
Eigen::VectorXd pred(N);
Eigen::VectorXd test(N);
// #pragma omp parallel for schedule(dynamic,8) reduction(+:se, se_avg) <- dark magic :)
for (int k = 0; k < Ytest.outerSize(); ++k) {
int idx = Ytest.outerIndexPtr()[k];
for (Eigen::SparseMatrix<double>::InnerIterator it(Ytest,k); it; ++it) {
pred[idx] = nCDF(cols.col(it.col()).dot(rows.col(it.row())));
test[idx] = it.value();
// https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Online_algorithm
double pred_avg;
if (n == 0) {
pred_avg = pred[idx];
} else {
double delta = pred[idx] - predictions[idx];
pred_avg = (predictions[idx] + delta / (n + 1));
predictions_var[idx] += delta * (pred[idx] - pred_avg);
}
predictions[idx++] = pred_avg;
}
}
auc_test_onesample = auc(pred,test);
auc_test = auc(predictions, test);
}
示例6: assert
IGL_INLINE void igl::matlab::prepare_lhs_double(
const Eigen::SparseMatrix<Vtype> & M,
mxArray *plhs[])
{
using namespace std;
const int m = M.rows();
const int n = M.cols();
// THIS WILL NOT WORK FOR ROW-MAJOR
assert(n==M.outerSize());
const int nzmax = M.nonZeros();
plhs[0] = mxCreateSparse(m, n, nzmax, mxREAL);
mxArray * mx_data = plhs[0];
// Copy data immediately
double * pr = mxGetPr(mx_data);
mwIndex * ir = mxGetIr(mx_data);
mwIndex * jc = mxGetJc(mx_data);
// Iterate over outside
int k = 0;
for(int j=0; j<M.outerSize(); j++)
{
jc[j] = k;
// Iterate over inside
for(typename Eigen::SparseMatrix<Vtype>::InnerIterator it (M,j); it; ++it)
{
// copy (cast to double)
pr[k] = it.value();
ir[k] = it.row();
k++;
}
}
jc[M.outerSize()] = k;
}
示例7: repdiag
IGL_INLINE void igl::repdiag(
const Eigen::SparseMatrix<T>& A,
const int d,
Eigen::SparseMatrix<T>& B)
{
using namespace std;
using namespace Eigen;
int m = A.rows();
int n = A.cols();
vector<Triplet<T> > IJV;
IJV.reserve(A.nonZeros()*d);
// Loop outer level
for (int k=0; k<A.outerSize(); ++k)
{
// loop inner level
for (typename Eigen::SparseMatrix<T>::InnerIterator it(A,k); it; ++it)
{
for(int i = 0;i<d;i++)
{
IJV.push_back(Triplet<T>(i*m+it.row(),i*n+it.col(),it.value()));
}
}
}
B.resize(m*d,n*d);
B.setFromTriplets(IJV.begin(),IJV.end());
// Q: Why is this **Very** slow?
//int m = A.rows();
//int n = A.cols();
//B.resize(m*d,n*d);
//// Reserve enough space for new non zeros
//B.reserve(d*A.nonZeros());
//// loop over reps
//for(int i=0;i<d;i++)
//{
// // Loop outer level
// for (int k=0; k<A.outerSize(); ++k)
// {
// // loop inner level
// for (typename Eigen::SparseMatrix<T>::InnerIterator it(A,k); it; ++it)
// {
// B.insert(i*m+it.row(),i*n+it.col()) = it.value();
// }
// }
//}
//B.makeCompressed();
}
示例8: VectorSparseMatrix
inline std::unique_ptr<giss::VectorSparseMatrix> Eigen_to_giss(
Eigen::SparseMatrix<double> const &mat)
{
std::unique_ptr<giss::VectorSparseMatrix> ret(
new giss::VectorSparseMatrix(giss::SparseDescr(
mat.rows(), mat.cols())));
int nz = mat.nonZeros();
ret->reserve(nz);
for (int k=0; k<mat.outerSize(); ++k) {
for (Eigen::SparseMatrix<double>::InnerIterator it(mat,k); it; ++it) {
ret->set(it.row(), it.col(), it.value(), SparseMatrix::DuplicatePolicy::ADD);
}
}
return ret;
}
示例9: formInterleavedSystem
void NewtonIterationBlackoilInterleaved::formInterleavedSystem(const std::vector<ADB>& eqs,
const Eigen::SparseMatrix<double, Eigen::RowMajor>& A,
Mat& istlA) const
{
const int np = eqs.size();
// Find sparsity structure as union of basic block sparsity structures,
// corresponding to the jacobians with respect to pressure.
// Use addition to get to the union structure.
Eigen::SparseMatrix<double> structure = eqs[0].derivative()[0];
for (int phase = 0; phase < np; ++phase) {
structure += eqs[phase].derivative()[0];
}
Eigen::SparseMatrix<double, Eigen::RowMajor> s = structure;
// Create ISTL matrix with interleaved rows and columns (block structured).
assert(np == 3);
istlA.setSize(s.rows(), s.cols(), s.nonZeros());
istlA.setBuildMode(Mat::row_wise);
const int* ia = s.outerIndexPtr();
const int* ja = s.innerIndexPtr();
for (Mat::CreateIterator row = istlA.createbegin(); row != istlA.createend(); ++row) {
int ri = row.index();
for (int i = ia[ri]; i < ia[ri + 1]; ++i) {
row.insert(ja[i]);
}
}
const int size = s.rows();
Span span[3] = { Span(size, 1, 0),
Span(size, 1, size),
Span(size, 1, 2*size) };
for (int row = 0; row < size; ++row) {
for (int col_ix = ia[row]; col_ix < ia[row + 1]; ++col_ix) {
const int col = ja[col_ix];
MatrixBlockType block;
for (int p1 = 0; p1 < np; ++p1) {
for (int p2 = 0; p2 < np; ++p2) {
block[p1][p2] = A.coeff(span[p1][row], span[p2][col]);
}
}
istlA[row][col] = block;
}
}
}
示例10: init
//// AdaptiveGaussianNoise ////
void AdaptiveGaussianNoise::init(const Eigen::SparseMatrix<double> &train, double mean_value) {
double se = 0.0;
#pragma omp parallel for schedule(dynamic, 4) reduction(+:se)
for (int k = 0; k < train.outerSize(); ++k) {
for (SparseMatrix<double>::InnerIterator it(train,k); it; ++it) {
se += square(it.value() - mean_value);
}
}
var_total = se / train.nonZeros();
if (var_total <= 0.0 || std::isnan(var_total)) {
// if var cannot be computed using 1.0
var_total = 1.0;
}
// Var(noise) = Var(total) / (SN + 1)
alpha = (sn_init + 1.0) / var_total;
alpha_max = (sn_max + 1.0) / var_total;
}
示例11: update
void AdaptiveGaussianNoise::update(const Eigen::SparseMatrix<double> &train, double mean_value, std::vector< std::unique_ptr<Eigen::MatrixXd> > & samples)
{
double sumsq = 0.0;
MatrixXd & U = *samples[0];
MatrixXd & V = *samples[1];
#pragma omp parallel for schedule(dynamic, 4) reduction(+:sumsq)
for (int j = 0; j < train.outerSize(); j++) {
auto Vj = V.col(j);
for (SparseMatrix<double>::InnerIterator it(train, j); it; ++it) {
double Yhat = Vj.dot( U.col(it.row()) ) + mean_value;
sumsq += square(Yhat - it.value());
}
}
// (a0, b0) correspond to a prior of 1 sample of noise with full variance
double a0 = 0.5;
double b0 = 0.5 * var_total;
double aN = a0 + train.nonZeros() / 2.0;
double bN = b0 + sumsq / 2.0;
alpha = rgamma(aN, 1.0 / bN);
if (alpha > alpha_max) {
alpha = alpha_max;
}
}
示例12: harwell_boeing
IGL_INLINE void igl::harwell_boeing(
const Eigen::SparseMatrix<Scalar> & A,
int & num_rows,
std::vector<Scalar> & V,
std::vector<Index> & R,
std::vector<Index> & C)
{
num_rows = A.rows();
int num_cols = A.cols();
int nnz = A.nonZeros();
V.resize(nnz);
R.resize(nnz);
C.resize(num_cols+1);
// Assumes outersize is columns
assert(A.cols() == A.outerSize());
int column_pointer = 0;
int i = 0;
int k = 0;
// Iterate over outside
for(; k<A.outerSize(); ++k)
{
C[k] = column_pointer;
// Iterate over inside
for(typename Eigen::SparseMatrix<Scalar>::InnerIterator it (A,k); it; ++it)
{
V[i] = it.value();
R[i] = it.row();
i++;
// Also increment column pointer
column_pointer++;
}
}
// by convention C[num_cols] = nnz
C[k] = column_pointer;
}
示例13: slice
IGL_INLINE void igl::slice(
const Eigen::SparseMatrix<TX>& X,
const Eigen::Matrix<int,Eigen::Dynamic,1> & R,
const Eigen::Matrix<int,Eigen::Dynamic,1> & C,
Eigen::SparseMatrix<TY>& Y)
{
#if 1
int xm = X.rows();
int xn = X.cols();
int ym = R.size();
int yn = C.size();
// special case when R or C is empty
if(ym == 0 || yn == 0)
{
Y.resize(ym,yn);
return;
}
assert(R.minCoeff() >= 0);
assert(R.maxCoeff() < xm);
assert(C.minCoeff() >= 0);
assert(C.maxCoeff() < xn);
// Build reindexing maps for columns and rows, -1 means not in map
std::vector<std::vector<int> > RI;
RI.resize(xm);
for(int i = 0;i<ym;i++)
{
RI[R(i)].push_back(i);
}
std::vector<std::vector<int> > CI;
CI.resize(xn);
// initialize to -1
for(int i = 0;i<yn;i++)
{
CI[C(i)].push_back(i);
}
// Resize output
Eigen::DynamicSparseMatrix<TY, Eigen::RowMajor> dyn_Y(ym,yn);
// Take a guess at the number of nonzeros (this assumes uniform distribution
// not banded or heavily diagonal)
dyn_Y.reserve((X.nonZeros()/(X.rows()*X.cols())) * (ym*yn));
// Iterate over outside
for(int k=0; k<X.outerSize(); ++k)
{
// Iterate over inside
for(typename Eigen::SparseMatrix<TX>::InnerIterator it (X,k); it; ++it)
{
std::vector<int>::iterator rit, cit;
for(rit = RI[it.row()].begin();rit != RI[it.row()].end(); rit++)
{
for(cit = CI[it.col()].begin();cit != CI[it.col()].end(); cit++)
{
dyn_Y.coeffRef(*rit,*cit) = it.value();
}
}
}
}
Y = Eigen::SparseMatrix<TY>(dyn_Y);
#else
// Alec: This is _not_ valid for arbitrary R,C since they don't necessary
// representation a strict permutation of the rows and columns: rows or
// columns could be removed or replicated. The removal of rows seems to be
// handled here (although it's not clear if there is a performance gain when
// the #removals >> #remains). If this is sufficiently faster than the
// correct code above, one could test whether all entries in R and C are
// unique and apply the permutation version if appropriate.
//
int xm = X.rows();
int xn = X.cols();
int ym = R.size();
int yn = C.size();
// special case when R or C is empty
if(ym == 0 || yn == 0)
{
Y.resize(ym,yn);
return;
}
assert(R.minCoeff() >= 0);
assert(R.maxCoeff() < xm);
assert(C.minCoeff() >= 0);
assert(C.maxCoeff() < xn);
// initialize row and col permutation vectors
Eigen::VectorXi rowIndexVec = Eigen::VectorXi::LinSpaced(xm,0,xm-1);
Eigen::VectorXi rowPermVec = Eigen::VectorXi::LinSpaced(xm,0,xm-1);
for(int i=0;i<ym;i++)
{
int pos = rowIndexVec.coeffRef(R(i));
if(pos != i)
{
int& val = rowPermVec.coeffRef(i);
std::swap(rowIndexVec.coeffRef(val),rowIndexVec.coeffRef(R(i)));
std::swap(rowPermVec.coeffRef(i),rowPermVec.coeffRef(pos));
}
//.........这里部分代码省略.........
示例14: lu_lagrange
IGL_INLINE bool igl::lu_lagrange(
const Eigen::SparseMatrix<T> & ATA,
const Eigen::SparseMatrix<T> & C,
Eigen::SparseMatrix<T> & L,
Eigen::SparseMatrix<T> & U)
{
#if EIGEN_VERSION_AT_LEAST(3,0,92)
#if defined(_WIN32)
#pragma message("lu_lagrange has not yet been implemented for your Eigen Version")
#else
#warning lu_lagrange has not yet been implemented for your Eigen Version
#endif
return false;
#else
// number of unknowns
int n = ATA.rows();
// number of lagrange multipliers
int m = C.cols();
assert(ATA.cols() == n);
if(m != 0)
{
assert(C.rows() == n);
if(C.nonZeros() == 0)
{
// See note above about empty columns in C
fprintf(stderr,"Error: lu_lagrange() C has columns but no entries\n");
return false;
}
}
// Check that each column of C has at least one entry
std::vector<bool> has_entry; has_entry.resize(C.cols(),false);
// Iterate over outside
for(int k=0; k<C.outerSize(); ++k)
{
// Iterate over inside
for(typename Eigen::SparseMatrix<T>::InnerIterator it (C,k); it; ++it)
{
has_entry[it.col()] = true;
}
}
for(int i=0;i<(int)has_entry.size();i++)
{
if(!has_entry[i])
{
// See note above about empty columns in C
fprintf(stderr,"Error: lu_lagrange() C(:,%d) has no entries\n",i);
return false;
}
}
// Cholesky factorization of ATA
//// Eigen fails if you give a full view of the matrix like this:
//Eigen::SparseLLT<SparseMatrix<T> > ATA_LLT(ATA);
Eigen::SparseMatrix<T> ATA_LT = ATA.template triangularView<Eigen::Lower>();
Eigen::SparseLLT<Eigen::SparseMatrix<T> > ATA_LLT(ATA_LT);
Eigen::SparseMatrix<T> J = ATA_LLT.matrixL();
//if(!ATA_LLT.succeeded())
if(!((J*0).eval().nonZeros() == 0))
{
fprintf(stderr,"Error: lu_lagrange() failed to factor ATA\n");
return false;
}
if(m == 0)
{
// If there are no constraints (C is empty) then LU decomposition is just L
// and L' from cholesky decomposition
L = J;
U = J.transpose();
}else
{
// Construct helper matrix M
Eigen::SparseMatrix<T> M = C;
J.template triangularView<Eigen::Lower>().solveInPlace(M);
// Compute cholesky factorizaiton of M'*M
Eigen::SparseMatrix<T> MTM = M.transpose() * M;
Eigen::SparseLLT<Eigen::SparseMatrix<T> > MTM_LLT(MTM.template triangularView<Eigen::Lower>());
Eigen::SparseMatrix<T> K = MTM_LLT.matrixL();
//if(!MTM_LLT.succeeded())
if(!((K*0).eval().nonZeros() == 0))
{
fprintf(stderr,"Error: lu_lagrange() failed to factor MTM\n");
return false;
}
// assemble LU decomposition of Q
Eigen::Matrix<int,Eigen::Dynamic,1> MI;
Eigen::Matrix<int,Eigen::Dynamic,1> MJ;
Eigen::Matrix<T,Eigen::Dynamic,1> MV;
//.........这里部分代码省略.........