本文整理汇总了C++中eigen::Matrix::minCoeff方法的典型用法代码示例。如果您正苦于以下问题:C++ Matrix::minCoeff方法的具体用法?C++ Matrix::minCoeff怎么用?C++ Matrix::minCoeff使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类eigen::Matrix
的用法示例。
在下文中一共展示了Matrix::minCoeff方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: slice_into
IGL_INLINE void igl::slice_into(
const Eigen::PlainObjectBase<DerivedX> & X,
const Eigen::Matrix<int,Eigen::Dynamic,1> & R,
const Eigen::Matrix<int,Eigen::Dynamic,1> & C,
Eigen::PlainObjectBase<DerivedX> & Y)
{
int xm = X.rows();
int xn = X.cols();
assert(R.size() == xm);
assert(C.size() == xn);
#ifndef NDEBUG
int ym = Y.size();
int yn = Y.size();
assert(R.minCoeff() >= 0);
assert(R.maxCoeff() < ym);
assert(C.minCoeff() >= 0);
assert(C.maxCoeff() < yn);
#endif
// Build reindexing maps for columns and rows, -1 means not in map
Eigen::Matrix<int,Eigen::Dynamic,1> RI;
RI.resize(xm);
for(int i = 0;i<xm;i++)
{
for(int j = 0;j<xn;j++)
{
Y(R(i),C(j)) = X(i,j);
}
}
}
示例2: childcloud
void k_means_tree<Point, K, Data, Lp>::assign_extra(CloudPtrT& subcloud, node* n, const vector<int>& subinds)
{
std::cout << subcloud->size() << std::endl;
Eigen::Matrix<float, rows, dim> centroids;
Eigen::Matrix<float, 1, dim> distances;
for (size_t i = 0; i < dim; ++i) {
centroids.col(i) = eig(n->children[i]->centroid);
}
std::vector<int> clusters[dim];
PointT p;
for (int ind = 0; ind < subcloud->size(); ++ind) {
p = subcloud->at(ind);
int closest;
//distances = eig(p).transpose()*centroids;
distances = (centroids.colwise()-eig(p)).array().abs().colwise().sum();
//distances = (centroids.colwise()-eig(p)).colwise().squaredNorm();
distances.minCoeff(&closest);
clusters[closest].push_back(ind);
}
for (size_t i = 0; i < dim; ++i) {
node* c = n->children[i];
if (c->is_leaf) {
leaf* l = static_cast<leaf*>(c);
l->inds.reserve(l->inds.size() + clusters[i].size());
for (size_t j = 0; j < clusters[i].size(); ++j) {
l->inds.push_back(subinds[clusters[i][j]]);
}
continue;
}
CloudPtrT childcloud(new CloudT);
childcloud->resize(clusters[i].size());
vector<int> childinds(clusters[i].size());
for (size_t j = 0; j < clusters[i].size(); ++j) {
childcloud->at(j) = subcloud->at(clusters[i][j]);
childinds[j] = subinds[clusters[i][j]];
}
assign_extra(childcloud, c, childinds);
}
}
示例3: slice
IGL_INLINE void igl::slice(
const Eigen::SparseMatrix<TX>& X,
const Eigen::Matrix<int,Eigen::Dynamic,1> & R,
const Eigen::Matrix<int,Eigen::Dynamic,1> & C,
Eigen::SparseMatrix<TY>& Y)
{
#if 1
int xm = X.rows();
int xn = X.cols();
int ym = R.size();
int yn = C.size();
// special case when R or C is empty
if(ym == 0 || yn == 0)
{
Y.resize(ym,yn);
return;
}
assert(R.minCoeff() >= 0);
assert(R.maxCoeff() < xm);
assert(C.minCoeff() >= 0);
assert(C.maxCoeff() < xn);
// Build reindexing maps for columns and rows, -1 means not in map
std::vector<std::vector<int> > RI;
RI.resize(xm);
for(int i = 0;i<ym;i++)
{
RI[R(i)].push_back(i);
}
std::vector<std::vector<int> > CI;
CI.resize(xn);
// initialize to -1
for(int i = 0;i<yn;i++)
{
CI[C(i)].push_back(i);
}
// Resize output
Eigen::DynamicSparseMatrix<TY, Eigen::RowMajor> dyn_Y(ym,yn);
// Take a guess at the number of nonzeros (this assumes uniform distribution
// not banded or heavily diagonal)
dyn_Y.reserve((X.nonZeros()/(X.rows()*X.cols())) * (ym*yn));
// Iterate over outside
for(int k=0; k<X.outerSize(); ++k)
{
// Iterate over inside
for(typename Eigen::SparseMatrix<TX>::InnerIterator it (X,k); it; ++it)
{
std::vector<int>::iterator rit, cit;
for(rit = RI[it.row()].begin();rit != RI[it.row()].end(); rit++)
{
for(cit = CI[it.col()].begin();cit != CI[it.col()].end(); cit++)
{
dyn_Y.coeffRef(*rit,*cit) = it.value();
}
}
}
}
Y = Eigen::SparseMatrix<TY>(dyn_Y);
#else
// Alec: This is _not_ valid for arbitrary R,C since they don't necessary
// representation a strict permutation of the rows and columns: rows or
// columns could be removed or replicated. The removal of rows seems to be
// handled here (although it's not clear if there is a performance gain when
// the #removals >> #remains). If this is sufficiently faster than the
// correct code above, one could test whether all entries in R and C are
// unique and apply the permutation version if appropriate.
//
int xm = X.rows();
int xn = X.cols();
int ym = R.size();
int yn = C.size();
// special case when R or C is empty
if(ym == 0 || yn == 0)
{
Y.resize(ym,yn);
return;
}
assert(R.minCoeff() >= 0);
assert(R.maxCoeff() < xm);
assert(C.minCoeff() >= 0);
assert(C.maxCoeff() < xn);
// initialize row and col permutation vectors
Eigen::VectorXi rowIndexVec = Eigen::VectorXi::LinSpaced(xm,0,xm-1);
Eigen::VectorXi rowPermVec = Eigen::VectorXi::LinSpaced(xm,0,xm-1);
for(int i=0;i<ym;i++)
{
int pos = rowIndexVec.coeffRef(R(i));
if(pos != i)
{
int& val = rowPermVec.coeffRef(i);
std::swap(rowIndexVec.coeffRef(val),rowIndexVec.coeffRef(R(i)));
std::swap(rowPermVec.coeffRef(i),rowPermVec.coeffRef(pos));
}
//.........这里部分代码省略.........
示例4: min
inline T min(const Eigen::Matrix<T, R, C>& m) {
if (m.size() == 0)
return std::numeric_limits<double>::infinity();
return m.minCoeff();
}
示例5: getMinIndexOfMinRatioVector
static int getMinIndexOfMinRatioVector(const Eigen::Matrix<Type, Eigen::Dynamic, 1> &vector) {
int index;
vector.minCoeff(&index);
return index;
}
示例6: arap_dof_recomputation
IGL_INLINE bool igl::arap_dof_recomputation(
const Eigen::Matrix<int,Eigen::Dynamic,1> & fixed_dim,
const Eigen::SparseMatrix<double> & A_eq,
ArapDOFData<LbsMatrixType, SSCALAR> & data)
{
using namespace Eigen;
typedef Matrix<SSCALAR, Dynamic, Dynamic> MatrixXS;
LbsMatrixType * Q;
LbsMatrixType Qdyn;
if(data.with_dynamics)
{
// multiply by 1/timestep and to quadratic coefficients matrix
// Might be missing a 0.5 here
LbsMatrixType Q_copy = data.Q;
Qdyn = Q_copy + (1.0/(data.h*data.h))*data.Mass_tilde;
Q = &Qdyn;
// This may/should be superfluous
//printf("is_symmetric()\n");
if(!is_symmetric(*Q))
{
//printf("Fixing symmetry...\n");
// "Fix" symmetry
LbsMatrixType QT = (*Q).transpose();
LbsMatrixType Q_copy = *Q;
*Q = 0.5*(Q_copy+QT);
// Check that ^^^ this really worked. It doesn't always
//assert(is_symmetric(*Q));
}
}else
{
Q = &data.Q;
}
assert((int)data.CSM_M.size() == data.dim);
assert(A_eq.cols() == data.m*data.dim*(data.dim+1));
data.fixed_dim = fixed_dim;
if(fixed_dim.size() > 0)
{
assert(fixed_dim.maxCoeff() < data.m*data.dim*(data.dim+1));
assert(fixed_dim.minCoeff() >= 0);
}
#ifdef EXTREME_VERBOSE
cout<<"data.fixed_dim=["<<endl<<data.fixed_dim<<endl<<"]+1;"<<endl;
#endif
// Compute dense solve matrix (alternative of matrix factorization)
//printf("min_quad_dense_precompute()\n");
MatrixXd Qfull; full(*Q, Qfull);
MatrixXd A_eqfull; full(A_eq, A_eqfull);
MatrixXd M_Solve;
double timer0_start = get_seconds_hires();
bool use_lu = data.effective_dim != 2;
//use_lu = false;
//printf("use_lu: %s\n",(use_lu?"TRUE":"FALSE"));
min_quad_dense_precompute(Qfull, A_eqfull, use_lu,M_Solve);
double timer0_end = get_seconds_hires();
verbose("Bob timing: %.20f\n", (timer0_end - timer0_start)*1000.0);
// Precompute full solve matrix:
const int fsRows = data.m * data.dim * (data.dim + 1); // 12 * number_of_bones
const int fsCols1 = data.M_KG.cols(); // 9 * number_of_posConstraints
const int fsCols2 = A_eq.rows(); // number_of_posConstraints
data.M_FullSolve.resize(fsRows, fsCols1 + fsCols2);
// note the magical multiplicative constant "-0.5", I've no idea why it has
// to be there :)
data.M_FullSolve <<
(-0.5 * M_Solve.block(0, 0, fsRows, fsRows) * data.M_KG).template cast<SSCALAR>(),
M_Solve.block(0, fsRows, fsRows, fsCols2).template cast<SSCALAR>();
if(data.with_dynamics)
{
printf(
"---------------------------------------------------------------------\n"
"\n\n\nWITH DYNAMICS recomputation\n\n\n"
"---------------------------------------------------------------------\n"
);
// Also need to save Π1 before it gets multiplied by Ktilde (aka M_KG)
data.Pi_1 = M_Solve.block(0, 0, fsRows, fsRows).template cast<SSCALAR>();
}
// Precompute condensed matrices,
// first CSM:
std::vector<MatrixXS> CSM_M_SSCALAR;
CSM_M_SSCALAR.resize(data.dim);
for (int i=0; i<data.dim; i++) CSM_M_SSCALAR[i] = data.CSM_M[i].template cast<SSCALAR>();
SSCALAR maxErr1 = condense_CSM(CSM_M_SSCALAR, data.m, data.dim, data.CSM);
verbose("condense_CSM maxErr = %.15f (this should be close to zero)\n", maxErr1);
assert(fabs(maxErr1) < 1e-5);
// and then solveBlock1:
// number of groups
const int k = data.CSM_M[0].rows()/data.dim;
MatrixXS SolveBlock1 = data.M_FullSolve.block(0, 0, data.M_FullSolve.rows(), data.dim * data.dim * k);
SSCALAR maxErr2 = condense_Solve1(SolveBlock1, data.m, k, data.dim, data.CSolveBlock1);
verbose("condense_Solve1 maxErr = %.15f (this should be close to zero)\n", maxErr2);
//.........这里部分代码省略.........
示例7: range
typename k_means_tree<Point, K, Data, Lp>::leaf_range k_means_tree<Point, K, Data, Lp>::assign_nodes(CloudPtrT& subcloud, node** nodes, size_t current_depth, const vector<int>& subinds)
{
std::cout << "Now doing level " << current_depth << std::endl;
std::cout << subcloud->size() << std::endl;
// do k-means of the points, iteratively call this again?
//PointT centroids[dim];
Eigen::Matrix<float, rows, dim> centroids;
Eigen::Matrix<float, rows, dim> last_centroids;
Eigen::Matrix<float, 1, dim> distances;
// first, pick centroids at random
vector<size_t> inds = sample_with_replacement(subcloud->size());
for (size_t i = 0; i < dim; ++i) {
//centroids[i] = subcloud->points[inds[i]];
centroids.col(i) = eig(subcloud->points[inds[i]]);
}
last_centroids.setZero();
// if there are no more than 1000 points, continue as normal,
// otherwise decrease to about a 1000 points then double with every iteration
int skip = std::max(1 << int(log2(double(subcloud->size())/1000.0)), 1);
std::vector<int> clusters[dim];
//float cluster_distances[dim];
size_t min_iter = std::max(50, int(subcloud->size()/100)); // 50 100
size_t counter = 0;
PointT p;
while (true) {
// compute closest centroids
for (std::vector<int>& c : clusters) {
c.clear();
}
//int ind = 0;
//for (const PointT& p : subcloud->points) {
int _subcloud_size = subcloud->size();
for (int ind = 0; ind < _subcloud_size; ind += skip) {
p = subcloud->at(ind);
int closest;
// Wrap these two calls with some nice inlining
//distances = eig(p).transpose()*centroids;
distances = (centroids.colwise()-eig(p)).array().abs().colwise().sum();
//distances = (centroids.colwise()-eig(p)).colwise().squaredNorm();
distances.minCoeff(&closest);
clusters[closest].push_back(ind);
}
if (skip == 1 && (counter >= min_iter || compare_centroids(centroids, last_centroids))) {
break;
}
last_centroids = centroids;
// compute new centroids
for (size_t i = 0; i < dim; ++i) {
Eigen::Matrix<double, rows, 1> acc;
acc.setZero();
size_t nbr = 0;
for (size_t ind : clusters[i]) {
acc += eig(subcloud->at(ind)).template cast<double>();
++nbr;
}
if (nbr == 0) {
vector<size_t> temp = sample_with_replacement(subcloud->size());
//centroids[i] = subcloud->at(temp.back());
centroids.col(i) = eig(subcloud->at(temp.back()));
}
else {
acc *= 1.0/double(nbr);
//eig(centroids[i]) = acc.template cast<float>();
centroids.col(i) = acc.template cast<float>();
}
}
skip = std::max(skip/2, 1);
++counter;
}
leaf_range range(cloud->size(), 0);
for (size_t i = 0; i < dim; ++i) {
//std::cout << i << " size: " << clusters[i].size() << std::endl;
if (current_depth == depth || clusters[i].size() <= 1) {
leaf* l = new leaf;
l->inds.resize(clusters[i].size());
for (size_t j = 0; j < clusters[i].size(); ++j) {
l->inds[j] = subinds[clusters[i][j]];
}
//l->centroid = centroids[i];
eig(l->centroid) = centroids.col(i);
/*if (clusters[i].empty()) {
eig(l->centroid).setZeros();
}*/
l->range.first = leaves.size();
l->range.second = leaves.size()+1;
leaves.push_back(l);
nodes[i] = l;
range.first = std::min(range.first, l->range.first);
range.second = std::max(range.second, l->range.second);
continue;
}
//.........这里部分代码省略.........