本文整理汇总了C++中optimizablegraph::Vertex::hessianIndex方法的典型用法代码示例。如果您正苦于以下问题:C++ Vertex::hessianIndex方法的具体用法?C++ Vertex::hessianIndex怎么用?C++ Vertex::hessianIndex使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类optimizablegraph::Vertex
的用法示例。
在下文中一共展示了Vertex::hessianIndex方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: removeVertex
bool SparseOptimizer::removeVertex(HyperGraph::Vertex* v)
{
OptimizableGraph::Vertex* vv = static_cast<OptimizableGraph::Vertex*>(v);
if (vv->hessianIndex() >= 0) {
clearIndexMapping();
_ivMap.clear();
}
return HyperGraph::removeVertex(v);
}
示例2: abort
bool BlockSolver<Traits>::updateStructure(const std::vector<HyperGraph::Vertex*>& vset, const HyperGraph::EdgeSet& edges)
{
for (std::vector<HyperGraph::Vertex*>::const_iterator vit = vset.begin(); vit != vset.end(); ++vit) {
OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(*vit);
int dim = v->dimension();
if (! v->marginalized()){
v->setColInHessian(_sizePoses);
_sizePoses+=dim;
_Hpp->rowBlockIndices().push_back(_sizePoses);
_Hpp->colBlockIndices().push_back(_sizePoses);
_Hpp->blockCols().push_back(typename SparseBlockMatrix<PoseMatrixType>::IntBlockMap());
++_numPoses;
int ind = v->hessianIndex();
PoseMatrixType* m = _Hpp->block(ind, ind, true);
v->mapHessianMemory(m->data());
} else {
std::cerr << "updateStructure(): Schur not supported" << std::endl;
abort();
}
}
resizeVector(_sizePoses + _sizeLandmarks);
for (HyperGraph::EdgeSet::const_iterator it = edges.begin(); it != edges.end(); ++it) {
OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*it);
for (size_t viIdx = 0; viIdx < e->vertices().size(); ++viIdx) {
OptimizableGraph::Vertex* v1 = (OptimizableGraph::Vertex*) e->vertex(viIdx);
int ind1 = v1->hessianIndex();
int indexV1Bak = ind1;
if (ind1 == -1)
continue;
for (size_t vjIdx = viIdx + 1; vjIdx < e->vertices().size(); ++vjIdx) {
OptimizableGraph::Vertex* v2 = (OptimizableGraph::Vertex*) e->vertex(vjIdx);
int ind2 = v2->hessianIndex();
if (ind2 == -1)
continue;
ind1 = indexV1Bak;
bool transposedBlock = ind1 > ind2;
if (transposedBlock) // make sure, we allocate the upper triangular block
std::swap(ind1, ind2);
if (! v1->marginalized() && !v2->marginalized()) {
PoseMatrixType* m = _Hpp->block(ind1, ind2, true);
e->mapHessianMemory(m->data(), viIdx, vjIdx, transposedBlock);
} else {
std::cerr << __PRETTY_FUNCTION__ << ": not supported" << std::endl;
}
}
}
}
return true;
}
示例3: computeInitialGuess
void SparseOptimizer::computeInitialGuess(EstimatePropagatorCost& costFunction)
{
OptimizableGraph::VertexSet emptySet;
std::set<Vertex*> backupVertices;
HyperGraph::VertexSet fixedVertices; // these are the root nodes where to start the initialization
for (EdgeContainer::iterator it = _activeEdges.begin(); it != _activeEdges.end(); ++it) {
OptimizableGraph::Edge* e = *it;
for (size_t i = 0; i < e->vertices().size(); ++i) {
OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(e->vertex(i));
if (!v)
continue;
if (v->fixed())
fixedVertices.insert(v);
else { // check for having a prior which is able to fully initialize a vertex
for (EdgeSet::const_iterator vedgeIt = v->edges().begin(); vedgeIt != v->edges().end(); ++vedgeIt) {
OptimizableGraph::Edge* vedge = static_cast<OptimizableGraph::Edge*>(*vedgeIt);
if (vedge->vertices().size() == 1 && vedge->initialEstimatePossible(emptySet, v) > 0.) {
//cerr << "Initialize with prior for " << v->id() << endl;
vedge->initialEstimate(emptySet, v);
fixedVertices.insert(v);
}
}
}
if (v->hessianIndex() == -1) {
std::set<Vertex*>::const_iterator foundIt = backupVertices.find(v);
if (foundIt == backupVertices.end()) {
v->push();
backupVertices.insert(v);
}
}
}
}
EstimatePropagator estimatePropagator(this);
estimatePropagator.propagate(fixedVertices, costFunction);
// restoring the vertices that should not be initialized
for (std::set<Vertex*>::iterator it = backupVertices.begin(); it != backupVertices.end(); ++it) {
Vertex* v = *it;
v->pop();
}
if (verbose()) {
computeActiveErrors();
cerr << "iteration= -1\t chi2= " << activeChi2()
<< "\t time= 0.0"
<< "\t cumTime= 0.0"
<< "\t (using initial guess from " << costFunction.name() << ")" << endl;
}
}
示例4: updateInitialization
bool SparseOptimizerIncremental::updateInitialization(HyperGraph::VertexSet& vset, HyperGraph::EdgeSet& eset)
{
if (batchStep) {
return SparseOptimizerOnline::updateInitialization(vset, eset);
}
for (HyperGraph::VertexSet::iterator it = vset.begin(); it != vset.end(); ++it) {
OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(*it);
v->clearQuadraticForm(); // be sure that b is zero for this vertex
}
// get the touched vertices
_touchedVertices.clear();
for (HyperGraph::EdgeSet::iterator it = eset.begin(); it != eset.end(); ++it) {
OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*it);
OptimizableGraph::Vertex* v1 = static_cast<OptimizableGraph::Vertex*>(e->vertices()[0]);
OptimizableGraph::Vertex* v2 = static_cast<OptimizableGraph::Vertex*>(e->vertices()[1]);
if (! v1->fixed())
_touchedVertices.insert(v1);
if (! v2->fixed())
_touchedVertices.insert(v2);
}
//cerr << PVAR(_touchedVertices.size()) << endl;
// updating the internal structures
std::vector<HyperGraph::Vertex*> newVertices;
newVertices.reserve(vset.size());
_activeVertices.reserve(_activeVertices.size() + vset.size());
_activeEdges.reserve(_activeEdges.size() + eset.size());
for (HyperGraph::EdgeSet::iterator it = eset.begin(); it != eset.end(); ++it)
_activeEdges.push_back(static_cast<OptimizableGraph::Edge*>(*it));
//cerr << "updating internal done." << endl;
// update the index mapping
size_t next = _ivMap.size();
for (HyperGraph::VertexSet::iterator it = vset.begin(); it != vset.end(); ++it) {
OptimizableGraph::Vertex* v=static_cast<OptimizableGraph::Vertex*>(*it);
if (! v->fixed()){
if (! v->marginalized()){
v->setHessianIndex(next);
_ivMap.push_back(v);
newVertices.push_back(v);
_activeVertices.push_back(v);
next++;
}
else // not supported right now
abort();
}
else {
v->setHessianIndex(-1);
}
}
//cerr << "updating index mapping done." << endl;
// backup the tempindex and prepare sorting structure
VertexBackup backupIdx[_touchedVertices.size()];
memset(backupIdx, 0, sizeof(VertexBackup) * _touchedVertices.size());
int idx = 0;
for (HyperGraph::VertexSet::iterator it = _touchedVertices.begin(); it != _touchedVertices.end(); ++it) {
OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(*it);
backupIdx[idx].hessianIndex = v->hessianIndex();
backupIdx[idx].vertex = v;
backupIdx[idx].hessianData = v->hessianData();
++idx;
}
sort(backupIdx, backupIdx + _touchedVertices.size()); // sort according to the hessianIndex which is the same order as used later by the optimizer
for (int i = 0; i < idx; ++i) {
backupIdx[i].vertex->setHessianIndex(i);
}
//cerr << "backup tempindex done." << endl;
// building the structure of the update
_updateMat.clear(true); // get rid of the old matrix structure
_updateMat.rowBlockIndices().clear();
_updateMat.colBlockIndices().clear();
_updateMat.blockCols().clear();
// placing the current stuff in _updateMat
MatrixXd* lastBlock = 0;
int sizePoses = 0;
for (int i = 0; i < idx; ++i) {
OptimizableGraph::Vertex* v = backupIdx[i].vertex;
int dim = v->dimension();
sizePoses+=dim;
_updateMat.rowBlockIndices().push_back(sizePoses);
_updateMat.colBlockIndices().push_back(sizePoses);
_updateMat.blockCols().push_back(SparseBlockMatrix<MatrixXd>::IntBlockMap());
int ind = v->hessianIndex();
//cerr << PVAR(ind) << endl;
if (ind >= 0) {
MatrixXd* m = _updateMat.block(ind, ind, true);
v->mapHessianMemory(m->data());
lastBlock = m;
}
}
lastBlock->diagonal().array() += 1e-6; // HACK to get Eigen value > 0
for (HyperGraph::EdgeSet::const_iterator it = eset.begin(); it != eset.end(); ++it) {
OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*it);
//.........这里部分代码省略.........
示例5: labelStarEdges
bool Star::labelStarEdges(int iterations, EdgeLabeler* labeler){
// mark all vertices in the lowLevelEdges as floating
bool ok=true;
std::set<OptimizableGraph::Vertex*> vset;
for (HyperGraph::EdgeSet::iterator it=_lowLevelEdges.begin(); it!=_lowLevelEdges.end(); it++){
HyperGraph::Edge* e=*it;
for (size_t i=0; i<e->vertices().size(); i++){
OptimizableGraph::Vertex* v=(OptimizableGraph::Vertex*)e->vertices()[i];
v->setFixed(false);
vset.insert(v);
}
}
for (std::set<OptimizableGraph::Vertex*>::iterator it=vset.begin(); it!=vset.end(); it++){
OptimizableGraph::Vertex* v = *it;
v->push();
}
// fix all vertices in the gauge
//cerr << "fixing gauge: ";
for (HyperGraph::VertexSet::iterator it = _gauge.begin(); it!=_gauge.end(); it++){
OptimizableGraph::Vertex* v=(OptimizableGraph::Vertex*)*it;
//cerr << v->id() << " ";
v->setFixed(true);
}
//cerr << endl;
if (iterations>0){
_optimizer->initializeOptimization(_lowLevelEdges);
_optimizer->computeInitialGuess();
int result=_optimizer->optimize(iterations);
if (result<1){
cerr << "Vertices num: " << _optimizer->activeVertices().size() << "ids: ";
for (size_t i=0; i<_optimizer->indexMapping().size(); i++){
cerr << _optimizer->indexMapping()[i]->id() << " " ;
}
cerr << endl;
cerr << "!!! optimization failure" << endl;
cerr << "star size=" << _lowLevelEdges.size() << endl;
cerr << "gauge: ";
for (HyperGraph::VertexSet::iterator it=_gauge.begin(); it!=_gauge.end(); it++){
OptimizableGraph::Vertex* v = (OptimizableGraph::Vertex*)*it;
cerr << "[" << v->id() << " " << v->hessianIndex() << "] ";
}
cerr << endl;
ok=false;
}
} else {
optimizer()->initializeOptimization(_lowLevelEdges);
// cerr << "guess" << endl;
//optimizer()->computeInitialGuess();
// cerr << "solver init" << endl;
optimizer()->solver()->init();
// cerr << "structure" << endl;
OptimizationAlgorithmWithHessian* solverWithHessian = dynamic_cast<OptimizationAlgorithmWithHessian*> (optimizer()->solver());
if (!solverWithHessian->buildLinearStructure())
cerr << "FATAL: failure while building linear structure" << endl;
// cerr << "errors" << endl;
optimizer()->computeActiveErrors();
// cerr << "system" << endl;
solverWithHessian->updateLinearSystem();
}
std::set<OptimizableGraph::Edge*> star;
for(HyperGraph::EdgeSet::iterator it=_starEdges.begin(); it!=_starEdges.end(); it++){
star.insert((OptimizableGraph::Edge*)*it);
}
if (ok) {
int result = labeler->labelEdges(star);
if (result < 0)
ok=false;
}
// release all vertices in the gauge
for (std::set<OptimizableGraph::Vertex*>::iterator it=vset.begin(); it!=vset.end(); it++){
OptimizableGraph::Vertex* v = *it;
v->pop();
}
for (HyperGraph::VertexSet::iterator it=_gauge.begin(); it!=_gauge.end(); it++){
OptimizableGraph::Vertex* v=(OptimizableGraph::Vertex*)*it;
v->setFixed(false);
}
return ok;
}
示例6: hyperDijkstra
bool SolverSLAM2DLinear::solveOrientation()
{
assert(_optimizer->indexMapping().size() + 1 == _optimizer->vertices().size() && "Needs to operate on full graph");
assert(_optimizer->vertex(0)->fixed() && "Graph is not fixed by vertex 0");
VectorXD b, x; // will be used for theta and x/y update
b.setZero(_optimizer->indexMapping().size());
x.setZero(_optimizer->indexMapping().size());
typedef Eigen::Matrix<double, 1, 1, Eigen::ColMajor> ScalarMatrix;
ScopedArray<int> blockIndeces(new int[_optimizer->indexMapping().size()]);
for (size_t i = 0; i < _optimizer->indexMapping().size(); ++i)
blockIndeces[i] = i+1;
SparseBlockMatrix<ScalarMatrix> H(blockIndeces.get(), blockIndeces.get(), _optimizer->indexMapping().size(), _optimizer->indexMapping().size());
// building the structure, diagonal for each active vertex
for (size_t i = 0; i < _optimizer->indexMapping().size(); ++i) {
OptimizableGraph::Vertex* v = _optimizer->indexMapping()[i];
int poseIdx = v->hessianIndex();
ScalarMatrix* m = H.block(poseIdx, poseIdx, true);
m->setZero();
}
HyperGraph::VertexSet fixedSet;
// off diagonal for each edge
for (SparseOptimizer::EdgeContainer::const_iterator it = _optimizer->activeEdges().begin(); it != _optimizer->activeEdges().end(); ++it) {
# ifndef NDEBUG
EdgeSE2* e = dynamic_cast<EdgeSE2*>(*it);
assert(e && "Active edges contain non-odometry edge"); //
# else
EdgeSE2* e = static_cast<EdgeSE2*>(*it);
# endif
OptimizableGraph::Vertex* from = static_cast<OptimizableGraph::Vertex*>(e->vertices()[0]);
OptimizableGraph::Vertex* to = static_cast<OptimizableGraph::Vertex*>(e->vertices()[1]);
int ind1 = from->hessianIndex();
int ind2 = to->hessianIndex();
if (ind1 == -1 || ind2 == -1) {
if (ind1 == -1) fixedSet.insert(from); // collect the fixed vertices
if (ind2 == -1) fixedSet.insert(to);
continue;
}
bool transposedBlock = ind1 > ind2;
if (transposedBlock){ // make sure, we allocate the upper triangle block
std::swap(ind1, ind2);
}
ScalarMatrix* m = H.block(ind1, ind2, true);
m->setZero();
}
// walk along the Minimal Spanning Tree to compute the guess for the robot orientation
assert(fixedSet.size() == 1);
VertexSE2* root = static_cast<VertexSE2*>(*fixedSet.begin());
VectorXD thetaGuess;
thetaGuess.setZero(_optimizer->indexMapping().size());
UniformCostFunction uniformCost;
HyperDijkstra hyperDijkstra(_optimizer);
hyperDijkstra.shortestPaths(root, &uniformCost);
HyperDijkstra::computeTree(hyperDijkstra.adjacencyMap());
ThetaTreeAction thetaTreeAction(thetaGuess.data());
HyperDijkstra::visitAdjacencyMap(hyperDijkstra.adjacencyMap(), &thetaTreeAction);
// construct for the orientation
for (SparseOptimizer::EdgeContainer::const_iterator it = _optimizer->activeEdges().begin(); it != _optimizer->activeEdges().end(); ++it) {
EdgeSE2* e = static_cast<EdgeSE2*>(*it);
VertexSE2* from = static_cast<VertexSE2*>(e->vertices()[0]);
VertexSE2* to = static_cast<VertexSE2*>(e->vertices()[1]);
double omega = e->information()(2,2);
double fromThetaGuess = from->hessianIndex() < 0 ? 0. : thetaGuess[from->hessianIndex()];
double toThetaGuess = to->hessianIndex() < 0 ? 0. : thetaGuess[to->hessianIndex()];
double error = normalize_theta(-e->measurement().rotation().angle() + toThetaGuess - fromThetaGuess);
bool fromNotFixed = !(from->fixed());
bool toNotFixed = !(to->fixed());
if (fromNotFixed || toNotFixed) {
double omega_r = - omega * error;
if (fromNotFixed) {
b(from->hessianIndex()) -= omega_r;
(*H.block(from->hessianIndex(), from->hessianIndex()))(0,0) += omega;
if (toNotFixed) {
if (from->hessianIndex() > to->hessianIndex())
(*H.block(to->hessianIndex(), from->hessianIndex()))(0,0) -= omega;
else
(*H.block(from->hessianIndex(), to->hessianIndex()))(0,0) -= omega;
}
}
if (toNotFixed ) {
b(to->hessianIndex()) += omega_r;
(*H.block(to->hessianIndex(), to->hessianIndex()))(0,0) += omega;
}
}
}
//.........这里部分代码省略.........