本文整理汇总了C++中optimizablegraph::Edge类的典型用法代码示例。如果您正苦于以下问题:C++ Edge类的具体用法?C++ Edge怎么用?C++ Edge使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Edge类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: gaugeFreedom
bool SparseOptimizer::gaugeFreedom()
{
if (vertices().empty())
return false;
int maxDim=0;
for (HyperGraph::VertexIDMap::iterator it=vertices().begin(); it!=vertices().end(); ++it){
OptimizableGraph::Vertex* v=static_cast<OptimizableGraph::Vertex*>(it->second);
maxDim = std::max(maxDim,v->dimension());
}
for (HyperGraph::VertexIDMap::iterator it=vertices().begin(); it!=vertices().end(); ++it){
OptimizableGraph::Vertex* v=static_cast<OptimizableGraph::Vertex*>(it->second);
if (v->dimension() == maxDim) {
// test for fixed vertex
if (v->fixed()) {
return false;
}
// test for full dimension prior
for (HyperGraph::EdgeSet::const_iterator eit = v->edges().begin(); eit != v->edges().end(); ++eit) {
OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*eit);
if (e->vertices().size() == 1 && e->dimension() == maxDim)
return false;
}
}
}
return true;
}
示例2: addEdge
bool OptimizableGraph::addEdge(HyperGraph::Edge* e_)
{
OptimizableGraph::Edge* e = dynamic_cast<OptimizableGraph::Edge*>(e_);
assert(e && "Edge does not inherit from OptimizableGraph::Edge");
// std::cerr << "subclass of OptimizableGraph::Edge confirmed";
if (! e)
return false;
bool eresult = HyperGraph::addEdge(e);
if (! eresult)
return false;
// std::cerr << "called HyperGraph::addEdge" << std::endl;
e->_internalId = _nextEdgeId++;
if (e->numUndefinedVertices())
return true;
// std::cerr << "internalId set" << std::endl;
if (! e->resolveParameters()){
cerr << __FUNCTION__ << ": FATAL, cannot resolve parameters for edge " << e << endl;
return false;
}
// std::cerr << "parameters set" << std::endl;
if (! e->resolveCaches()){
cerr << __FUNCTION__ << ": FATAL, cannot resolve caches for edge " << e << endl;
return false;
}
// std::cerr << "updating jacobian size" << std::endl;
_jacobianWorkspace.updateSize(e);
// std::cerr << "about to return true" << std::endl;
return true;
}
示例3: setRobustKernel
void MainWindow::setRobustKernel()
{
SparseOptimizer* optimizer = viewer->graph;
bool robustKernel = cbRobustKernel->isChecked();
double huberWidth = leKernelWidth->text().toDouble();
//odometry edges are those whose node ids differ by 1
bool onlyLoop = cbOnlyLoop->isChecked();
if (robustKernel) {
QString strRobustKernel = coRobustKernel->currentText();
AbstractRobustKernelCreator* creator = RobustKernelFactory::instance()->creator(strRobustKernel.toStdString());
if (! creator) {
cerr << strRobustKernel.toStdString() << " is not a valid robust kernel" << endl;
return;
}
for (SparseOptimizer::EdgeSet::const_iterator it = optimizer->edges().begin(); it != optimizer->edges().end(); ++it) {
OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*it);
if (onlyLoop) {
if (e->vertices().size() >= 2 && std::abs(e->vertex(0)->id() - e->vertex(1)->id()) != 1) {
e->setRobustKernel(creator->construct());
e->robustKernel()->setDelta(huberWidth);
}
} else {
e->setRobustKernel(creator->construct());
e->robustKernel()->setDelta(huberWidth);
}
}
} else {
for (SparseOptimizer::EdgeSet::const_iterator it = optimizer->edges().begin(); it != optimizer->edges().end(); ++it) {
OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*it);
e->setRobustKernel(0);
}
}
}
示例4: setRobustKernel
void MainWindow::setRobustKernel()
{
SparseOptimizer* optimizer = viewer->graph;
bool robustKernel = cbRobustKernel->isChecked();
double huberWidth = leKernelWidth->text().toDouble();
if (robustKernel) {
QString strRobustKernel = coRobustKernel->currentText();
AbstractRobustKernelCreator* creator = RobustKernelFactory::instance()->creator(strRobustKernel.toStdString());
if (! creator) {
cerr << strRobustKernel.toStdString() << " is not a valid robust kernel" << endl;
return;
}
for (SparseOptimizer::EdgeSet::const_iterator it = optimizer->edges().begin(); it != optimizer->edges().end(); ++it) {
OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*it);
e->setRobustKernel(creator->construct());
e->robustKernel()->setDelta(huberWidth);
}
} else {
for (SparseOptimizer::EdgeSet::const_iterator it = optimizer->edges().begin(); it != optimizer->edges().end(); ++it) {
OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*it);
e->setRobustKernel(0);
}
}
}
示例5: computeActiveErrors
void SparseOptimizer::computeActiveErrors()
{
// call the callbacks in case there is something registered
HyperGraphActionSet& actions = _graphActions[AT_COMPUTEACTIVERROR];
if (actions.size() > 0) {
for (HyperGraphActionSet::iterator it = actions.begin(); it != actions.end(); ++it)
(*(*it))(this);
}
# ifdef G2O_OPENMP
# pragma omp parallel for default (shared) if (_activeEdges.size() > 50)
# endif
for (int k = 0; k < static_cast<int>(_activeEdges.size()); ++k) {
OptimizableGraph::Edge* e = _activeEdges[k];
e->computeError();
}
# ifndef NDEBUG
for (int k = 0; k < static_cast<int>(_activeEdges.size()); ++k) {
OptimizableGraph::Edge* e = _activeEdges[k];
bool hasNan = arrayHasNaN(e->errorData(), e->dimension());
if (hasNan) {
cerr << "computeActiveErrors(): found NaN in error for edge " << e << endl;
}
}
# endif
}
示例6: updateInitialization
bool SparseOptimizer::updateInitialization(HyperGraph::VertexSet& vset, HyperGraph::EdgeSet& eset)
{
std::vector<HyperGraph::Vertex*> newVertices;
newVertices.reserve(vset.size());
_activeVertices.reserve(_activeVertices.size() + vset.size());
_activeEdges.reserve(_activeEdges.size() + eset.size());
for (HyperGraph::EdgeSet::iterator it = eset.begin(); it != eset.end(); ++it) {
OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*it);
if (!e->allVerticesFixed()) _activeEdges.push_back(e);
}
// update the index mapping
size_t next = _ivMap.size();
for (HyperGraph::VertexSet::iterator it = vset.begin(); it != vset.end(); ++it) {
OptimizableGraph::Vertex* v=static_cast<OptimizableGraph::Vertex*>(*it);
if (! v->fixed()){
if (! v->marginalized()){
v->setHessianIndex(next);
_ivMap.push_back(v);
newVertices.push_back(v);
_activeVertices.push_back(v);
next++;
}
else // not supported right now
abort();
}
else {
v->setHessianIndex(-1);
}
}
//if (newVertices.size() != vset.size())
//cerr << __PRETTY_FUNCTION__ << ": something went wrong " << PVAR(vset.size()) << " " << PVAR(newVertices.size()) << endl;
return _algorithm->updateStructure(newVertices, eset);
}
示例7: default
bool BlockSolver<Traits>::buildSystem()
{
// clear b vector
# ifdef G2O_OPENMP
# pragma omp parallel for default (shared) if (_optimizer->indexMapping().size() > 1000)
# endif
for (int i = 0; i < static_cast<int>(_optimizer->indexMapping().size()); ++i) {
OptimizableGraph::Vertex* v=_optimizer->indexMapping()[i];
assert(v);
v->clearQuadraticForm();
}
_Hpp->clear();
if (_doSchur) {
_Hll->clear();
_Hpl->clear();
}
// resetting the terms for the pairwise constraints
// built up the current system by storing the Hessian blocks in the edges and vertices
# ifndef G2O_OPENMP
// no threading, we do not need to copy the workspace
JacobianWorkspace& jacobianWorkspace = _optimizer->jacobianWorkspace();
# else
// if running with threads need to produce copies of the workspace for each thread
JacobianWorkspace jacobianWorkspace = _optimizer->jacobianWorkspace();
# pragma omp parallel for default (shared) firstprivate(jacobianWorkspace) if (_optimizer->activeEdges().size() > 100)
# endif
for (int k = 0; k < static_cast<int>(_optimizer->activeEdges().size()); ++k) {
OptimizableGraph::Edge* e = _optimizer->activeEdges()[k];
e->linearizeOplus(jacobianWorkspace); // jacobian of the nodes' oplus (manifold)
e->constructQuadraticForm();
# ifndef NDEBUG
for (size_t i = 0; i < e->vertices().size(); ++i) {
const OptimizableGraph::Vertex* v = static_cast<const OptimizableGraph::Vertex*>(e->vertex(i));
if (! v->fixed()) {
bool hasANan = arrayHasNaN(jacobianWorkspace.workspaceForVertex(i), e->dimension() * v->dimension());
if (hasANan) {
std::cerr << "buildSystem(): NaN within Jacobian for edge " << e << " for vertex " << i << std::endl;
break;
}
}
}
# endif
}
// flush the current system in a sparse block matrix
# ifdef G2O_OPENMP
# pragma omp parallel for default (shared) if (_optimizer->indexMapping().size() > 1000)
# endif
for (int i = 0; i < static_cast<int>(_optimizer->indexMapping().size()); ++i) {
OptimizableGraph::Vertex* v=_optimizer->indexMapping()[i];
int iBase = v->colInHessian();
if (v->marginalized())
iBase+=_sizePoses;
v->copyB(_b+iBase);
}
return 0;
}
示例8: operator
double EstimatePropagatorCost::operator()(OptimizableGraph::Edge* edge, const OptimizableGraph::VertexSet& from, OptimizableGraph::Vertex* to_) const
{
OptimizableGraph::Edge* e = dynamic_cast<OptimizableGraph::Edge*>(edge);
OptimizableGraph::Vertex* to = dynamic_cast<OptimizableGraph::Vertex*>(to_);
SparseOptimizer::EdgeContainer::const_iterator it = _graph->findActiveEdge(e);
if (it == _graph->activeEdges().end()) // it has to be an active edge
return std::numeric_limits<double>::max();
return e->initialEstimatePossible(from, to);
}
示例9: computeActiveErrors
void SparseOptimizer::computeActiveErrors()
{
for (EdgeContainer::const_iterator
it = _activeEdges.begin();
it != _activeEdges.end();
it++)
{
OptimizableGraph::Edge* e = *it;
e->computeError();
if (e->robustKernel()) e->robustifyError();
}
}
示例10: linearizeSystem
void SparseOptimizer::linearizeSystem()
{
# ifdef G2O_OPENMP
# pragma omp parallel for default (shared) if (_activeEdges.size() > 50)
# endif
for (size_t k = 0; k < _activeEdges.size(); ++k)
{
OptimizableGraph::Edge* e = _activeEdges[k];
// jacobian of the nodes' oplus (manifold)
e->linearizeOplus();
}
}
示例11: activeVertexChi
double activeVertexChi(const OptimizableGraph::Vertex* v){
const SparseOptimizer* s = dynamic_cast<const SparseOptimizer*>(v->graph());
const OptimizableGraph::EdgeContainer& av = s->activeEdges();
double chi = 0;
int ne =0;
for (HyperGraph::EdgeSet::iterator it = v->edges().begin(); it!=v->edges().end(); it++){
OptimizableGraph::Edge* e = dynamic_cast <OptimizableGraph::Edge*> (*it);
if (!e)
continue;
if (s->findActiveEdge(e)!=av.end()) {
chi +=e->chi2();
ne++;
}
}
if (! ne)
return -1;
return chi/ne;
}
示例12: addEdge
bool OptimizableGraph::addEdge(HyperGraph::Edge* e_)
{
OptimizableGraph::Edge* e = dynamic_cast<OptimizableGraph::Edge*>(e_);
assert(e && "Edge does not inherit from OptimizableGraph::Edge");
if (! e)
return false;
bool eresult = HyperGraph::addEdge(e);
if (! eresult)
return false;
e->_internalId = _nextEdgeId++;
if (! e->resolveParameters()){
cerr << __FUNCTION__ << ": FATAL, cannot resolve parameters for edge " << e << endl;
return false;
}
if (! e->resolveCaches()){
cerr << __FUNCTION__ << ": FATAL, cannot resolve caches for edge " << e << endl;
return false;
}
_jacobianWorkspace.updateSize(e);
return true;
}
示例13: setEdgeVertex
bool OptimizableGraph::setEdgeVertex(HyperGraph::Edge* e, int pos, HyperGraph::Vertex* v){
if (! HyperGraph::setEdgeVertex(e,pos,v)){
return false;
}
if (!e->numUndefinedVertices()){
#ifndef NDEBUG
OptimizableGraph::Edge* ee = dynamic_cast<OptimizableGraph::Edge*>(e);
assert(ee && "Edge is not a OptimizableGraph::Edge");
#else
OptimizableGraph::Edge* ee = static_cast<OptimizableGraph::Edge*>(e);
#endif
if (! ee->resolveParameters()){
cerr << __FUNCTION__ << ": FATAL, cannot resolve parameters for edge " << e << endl;
return false;
}
if (! ee->resolveCaches()){
cerr << __FUNCTION__ << ": FATAL, cannot resolve caches for edge " << e << endl;
return false;
}
_jacobianWorkspace.updateSize(e);
}
return true;
}
示例14: abort
bool BlockSolver<Traits>::updateStructure(const std::vector<HyperGraph::Vertex*>& vset, const HyperGraph::EdgeSet& edges)
{
for (std::vector<HyperGraph::Vertex*>::const_iterator vit = vset.begin(); vit != vset.end(); ++vit) {
OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(*vit);
int dim = v->dimension();
if (! v->marginalized()){
v->setColInHessian(_sizePoses);
_sizePoses+=dim;
_Hpp->rowBlockIndices().push_back(_sizePoses);
_Hpp->colBlockIndices().push_back(_sizePoses);
_Hpp->blockCols().push_back(typename SparseBlockMatrix<PoseMatrixType>::IntBlockMap());
++_numPoses;
int ind = v->hessianIndex();
PoseMatrixType* m = _Hpp->block(ind, ind, true);
v->mapHessianMemory(m->data());
} else {
std::cerr << "updateStructure(): Schur not supported" << std::endl;
abort();
}
}
resizeVector(_sizePoses + _sizeLandmarks);
for (HyperGraph::EdgeSet::const_iterator it = edges.begin(); it != edges.end(); ++it) {
OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*it);
for (size_t viIdx = 0; viIdx < e->vertices().size(); ++viIdx) {
OptimizableGraph::Vertex* v1 = (OptimizableGraph::Vertex*) e->vertex(viIdx);
int ind1 = v1->hessianIndex();
int indexV1Bak = ind1;
if (ind1 == -1)
continue;
for (size_t vjIdx = viIdx + 1; vjIdx < e->vertices().size(); ++vjIdx) {
OptimizableGraph::Vertex* v2 = (OptimizableGraph::Vertex*) e->vertex(vjIdx);
int ind2 = v2->hessianIndex();
if (ind2 == -1)
continue;
ind1 = indexV1Bak;
bool transposedBlock = ind1 > ind2;
if (transposedBlock) // make sure, we allocate the upper triangular block
std::swap(ind1, ind2);
if (! v1->marginalized() && !v2->marginalized()) {
PoseMatrixType* m = _Hpp->block(ind1, ind2, true);
e->mapHessianMemory(m->data(), viIdx, vjIdx, transposedBlock);
} else {
std::cerr << __PRETTY_FUNCTION__ << ": not supported" << std::endl;
}
}
}
}
return true;
}
示例15: computeInitialGuess
void SparseOptimizer::computeInitialGuess(EstimatePropagatorCost& costFunction)
{
OptimizableGraph::VertexSet emptySet;
std::set<Vertex*> backupVertices;
HyperGraph::VertexSet fixedVertices; // these are the root nodes where to start the initialization
for (EdgeContainer::iterator it = _activeEdges.begin(); it != _activeEdges.end(); ++it) {
OptimizableGraph::Edge* e = *it;
for (size_t i = 0; i < e->vertices().size(); ++i) {
OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(e->vertex(i));
if (!v)
continue;
if (v->fixed())
fixedVertices.insert(v);
else { // check for having a prior which is able to fully initialize a vertex
for (EdgeSet::const_iterator vedgeIt = v->edges().begin(); vedgeIt != v->edges().end(); ++vedgeIt) {
OptimizableGraph::Edge* vedge = static_cast<OptimizableGraph::Edge*>(*vedgeIt);
if (vedge->vertices().size() == 1 && vedge->initialEstimatePossible(emptySet, v) > 0.) {
//cerr << "Initialize with prior for " << v->id() << endl;
vedge->initialEstimate(emptySet, v);
fixedVertices.insert(v);
}
}
}
if (v->hessianIndex() == -1) {
std::set<Vertex*>::const_iterator foundIt = backupVertices.find(v);
if (foundIt == backupVertices.end()) {
v->push();
backupVertices.insert(v);
}
}
}
}
EstimatePropagator estimatePropagator(this);
estimatePropagator.propagate(fixedVertices, costFunction);
// restoring the vertices that should not be initialized
for (std::set<Vertex*>::iterator it = backupVertices.begin(); it != backupVertices.end(); ++it) {
Vertex* v = *it;
v->pop();
}
if (verbose()) {
computeActiveErrors();
cerr << "iteration= -1\t chi2= " << activeChi2()
<< "\t time= 0.0"
<< "\t cumTime= 0.0"
<< "\t (using initial guess from " << costFunction.name() << ")" << endl;
}
}