本文整理汇总了C++中optimizablegraph::Edge::computeError方法的典型用法代码示例。如果您正苦于以下问题:C++ Edge::computeError方法的具体用法?C++ Edge::computeError怎么用?C++ Edge::computeError使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类optimizablegraph::Edge
的用法示例。
在下文中一共展示了Edge::computeError方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: computeActiveErrors
void SparseOptimizer::computeActiveErrors()
{
// call the callbacks in case there is something registered
HyperGraphActionSet& actions = _graphActions[AT_COMPUTEACTIVERROR];
if (actions.size() > 0) {
for (HyperGraphActionSet::iterator it = actions.begin(); it != actions.end(); ++it)
(*(*it))(this);
}
# ifdef G2O_OPENMP
# pragma omp parallel for default (shared) if (_activeEdges.size() > 50)
# endif
for (int k = 0; k < static_cast<int>(_activeEdges.size()); ++k) {
OptimizableGraph::Edge* e = _activeEdges[k];
e->computeError();
}
# ifndef NDEBUG
for (int k = 0; k < static_cast<int>(_activeEdges.size()); ++k) {
OptimizableGraph::Edge* e = _activeEdges[k];
bool hasNan = arrayHasNaN(e->errorData(), e->dimension());
if (hasNan) {
cerr << "computeActiveErrors(): found NaN in error for edge " << e << endl;
}
}
# endif
}
示例2: computeActiveErrors
void SparseOptimizer::computeActiveErrors()
{
for (EdgeContainer::const_iterator
it = _activeEdges.begin();
it != _activeEdges.end();
it++)
{
OptimizableGraph::Edge* e = *it;
e->computeError();
if (e->robustKernel()) e->robustifyError();
}
}
示例3: updateInitialization
//.........这里部分代码省略.........
int ind = v->hessianIndex();
//cerr << PVAR(ind) << endl;
if (ind >= 0) {
MatrixXd* m = _updateMat.block(ind, ind, true);
v->mapHessianMemory(m->data());
lastBlock = m;
}
}
lastBlock->diagonal().array() += 1e-6; // HACK to get Eigen value > 0
for (HyperGraph::EdgeSet::const_iterator it = eset.begin(); it != eset.end(); ++it) {
OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*it);
OptimizableGraph::Vertex* v1 = (OptimizableGraph::Vertex*) e->vertices()[0];
OptimizableGraph::Vertex* v2 = (OptimizableGraph::Vertex*) e->vertices()[1];
int ind1 = v1->hessianIndex();
if (ind1 == -1)
continue;
int ind2 = v2->hessianIndex();
if (ind2 == -1)
continue;
bool transposedBlock = ind1 > ind2;
if (transposedBlock) // make sure, we allocate the upper triangular block
swap(ind1, ind2);
MatrixXd* m = _updateMat.block(ind1, ind2, true);
e->mapHessianMemory(m->data(), 0, 1, transposedBlock);
}
// build the system into _updateMat
for (HyperGraph::EdgeSet::iterator it = eset.begin(); it != eset.end(); ++it) {
OptimizableGraph::Edge * e = static_cast<OptimizableGraph::Edge*>(*it);
e->computeError();
}
for (HyperGraph::EdgeSet::iterator it = eset.begin(); it != eset.end(); ++it) {
OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*it);
e->linearizeOplus();
}
for (HyperGraph::EdgeSet::iterator it = eset.begin(); it != eset.end(); ++it) {
OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*it);
e->constructQuadraticForm();
}
// restore the original data for the vertex
for (int i = 0; i < idx; ++i) {
backupIdx[i].vertex->setHessianIndex(backupIdx[i].hessianIndex);
if (backupIdx[i].hessianData)
backupIdx[i].vertex->mapHessianMemory(backupIdx[i].hessianData);
}
// update the structure of the real block matrix
bool solverStatus = _algorithm->updateStructure(newVertices, eset);
bool updateStatus = computeCholeskyUpdate();
if (! updateStatus) {
cerr << "Error while computing update" << endl;
}
cholmod_sparse* updateAsSparseFactor = cholmod_factor_to_sparse(_cholmodFactor, &_cholmodCommon);
// convert CCS update by permuting back to the permutation of L
if (updateAsSparseFactor->nzmax > _permutedUpdate->nzmax) {
//cerr << "realloc _permutedUpdate" << endl;
cholmod_reallocate_triplet(updateAsSparseFactor->nzmax, _permutedUpdate, &_cholmodCommon);
}