本文整理汇总了C++中optimizablegraph::Edge::mapHessianMemory方法的典型用法代码示例。如果您正苦于以下问题:C++ Edge::mapHessianMemory方法的具体用法?C++ Edge::mapHessianMemory怎么用?C++ Edge::mapHessianMemory使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类optimizablegraph::Edge
的用法示例。
在下文中一共展示了Edge::mapHessianMemory方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: abort
bool BlockSolver<Traits>::updateStructure(const std::vector<HyperGraph::Vertex*>& vset, const HyperGraph::EdgeSet& edges)
{
for (std::vector<HyperGraph::Vertex*>::const_iterator vit = vset.begin(); vit != vset.end(); ++vit) {
OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(*vit);
int dim = v->dimension();
if (! v->marginalized()){
v->setColInHessian(_sizePoses);
_sizePoses+=dim;
_Hpp->rowBlockIndices().push_back(_sizePoses);
_Hpp->colBlockIndices().push_back(_sizePoses);
_Hpp->blockCols().push_back(typename SparseBlockMatrix<PoseMatrixType>::IntBlockMap());
++_numPoses;
int ind = v->hessianIndex();
PoseMatrixType* m = _Hpp->block(ind, ind, true);
v->mapHessianMemory(m->data());
} else {
std::cerr << "updateStructure(): Schur not supported" << std::endl;
abort();
}
}
resizeVector(_sizePoses + _sizeLandmarks);
for (HyperGraph::EdgeSet::const_iterator it = edges.begin(); it != edges.end(); ++it) {
OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*it);
for (size_t viIdx = 0; viIdx < e->vertices().size(); ++viIdx) {
OptimizableGraph::Vertex* v1 = (OptimizableGraph::Vertex*) e->vertex(viIdx);
int ind1 = v1->hessianIndex();
int indexV1Bak = ind1;
if (ind1 == -1)
continue;
for (size_t vjIdx = viIdx + 1; vjIdx < e->vertices().size(); ++vjIdx) {
OptimizableGraph::Vertex* v2 = (OptimizableGraph::Vertex*) e->vertex(vjIdx);
int ind2 = v2->hessianIndex();
if (ind2 == -1)
continue;
ind1 = indexV1Bak;
bool transposedBlock = ind1 > ind2;
if (transposedBlock) // make sure, we allocate the upper triangular block
std::swap(ind1, ind2);
if (! v1->marginalized() && !v2->marginalized()) {
PoseMatrixType* m = _Hpp->block(ind1, ind2, true);
e->mapHessianMemory(m->data(), viIdx, vjIdx, transposedBlock);
} else {
std::cerr << __PRETTY_FUNCTION__ << ": not supported" << std::endl;
}
}
}
}
return true;
}
示例2: updateInitialization
bool SparseOptimizerIncremental::updateInitialization(HyperGraph::VertexSet& vset, HyperGraph::EdgeSet& eset)
{
if (batchStep) {
return SparseOptimizerOnline::updateInitialization(vset, eset);
}
for (HyperGraph::VertexSet::iterator it = vset.begin(); it != vset.end(); ++it) {
OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(*it);
v->clearQuadraticForm(); // be sure that b is zero for this vertex
}
// get the touched vertices
_touchedVertices.clear();
for (HyperGraph::EdgeSet::iterator it = eset.begin(); it != eset.end(); ++it) {
OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*it);
OptimizableGraph::Vertex* v1 = static_cast<OptimizableGraph::Vertex*>(e->vertices()[0]);
OptimizableGraph::Vertex* v2 = static_cast<OptimizableGraph::Vertex*>(e->vertices()[1]);
if (! v1->fixed())
_touchedVertices.insert(v1);
if (! v2->fixed())
_touchedVertices.insert(v2);
}
//cerr << PVAR(_touchedVertices.size()) << endl;
// updating the internal structures
std::vector<HyperGraph::Vertex*> newVertices;
newVertices.reserve(vset.size());
_activeVertices.reserve(_activeVertices.size() + vset.size());
_activeEdges.reserve(_activeEdges.size() + eset.size());
for (HyperGraph::EdgeSet::iterator it = eset.begin(); it != eset.end(); ++it)
_activeEdges.push_back(static_cast<OptimizableGraph::Edge*>(*it));
//cerr << "updating internal done." << endl;
// update the index mapping
size_t next = _ivMap.size();
for (HyperGraph::VertexSet::iterator it = vset.begin(); it != vset.end(); ++it) {
OptimizableGraph::Vertex* v=static_cast<OptimizableGraph::Vertex*>(*it);
if (! v->fixed()){
if (! v->marginalized()){
v->setHessianIndex(next);
_ivMap.push_back(v);
newVertices.push_back(v);
_activeVertices.push_back(v);
next++;
}
else // not supported right now
abort();
}
else {
v->setHessianIndex(-1);
}
}
//cerr << "updating index mapping done." << endl;
// backup the tempindex and prepare sorting structure
VertexBackup backupIdx[_touchedVertices.size()];
memset(backupIdx, 0, sizeof(VertexBackup) * _touchedVertices.size());
int idx = 0;
for (HyperGraph::VertexSet::iterator it = _touchedVertices.begin(); it != _touchedVertices.end(); ++it) {
OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(*it);
backupIdx[idx].hessianIndex = v->hessianIndex();
backupIdx[idx].vertex = v;
backupIdx[idx].hessianData = v->hessianData();
++idx;
}
sort(backupIdx, backupIdx + _touchedVertices.size()); // sort according to the hessianIndex which is the same order as used later by the optimizer
for (int i = 0; i < idx; ++i) {
backupIdx[i].vertex->setHessianIndex(i);
}
//cerr << "backup tempindex done." << endl;
// building the structure of the update
_updateMat.clear(true); // get rid of the old matrix structure
_updateMat.rowBlockIndices().clear();
_updateMat.colBlockIndices().clear();
_updateMat.blockCols().clear();
// placing the current stuff in _updateMat
MatrixXd* lastBlock = 0;
int sizePoses = 0;
for (int i = 0; i < idx; ++i) {
OptimizableGraph::Vertex* v = backupIdx[i].vertex;
int dim = v->dimension();
sizePoses+=dim;
_updateMat.rowBlockIndices().push_back(sizePoses);
_updateMat.colBlockIndices().push_back(sizePoses);
_updateMat.blockCols().push_back(SparseBlockMatrix<MatrixXd>::IntBlockMap());
int ind = v->hessianIndex();
//cerr << PVAR(ind) << endl;
if (ind >= 0) {
MatrixXd* m = _updateMat.block(ind, ind, true);
v->mapHessianMemory(m->data());
lastBlock = m;
}
}
lastBlock->diagonal().array() += 1e-6; // HACK to get Eigen value > 0
for (HyperGraph::EdgeSet::const_iterator it = eset.begin(); it != eset.end(); ++it) {
OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*it);
//.........这里部分代码省略.........
示例3: assert
bool BlockSolver<Traits>::buildStructure(bool zeroBlocks)
{
assert(_optimizer);
size_t sparseDim = 0;
_numPoses=0;
_numLandmarks=0;
_sizePoses=0;
_sizeLandmarks=0;
int* blockPoseIndices = new int[_optimizer->indexMapping().size()];
int* blockLandmarkIndices = new int[_optimizer->indexMapping().size()];
for (size_t i = 0; i < _optimizer->indexMapping().size(); ++i) {
OptimizableGraph::Vertex* v = _optimizer->indexMapping()[i];
int dim = v->dimension();
if (! v->marginalized()){
v->setColInHessian(_sizePoses);
_sizePoses+=dim;
blockPoseIndices[_numPoses]=_sizePoses;
++_numPoses;
} else {
v->setColInHessian(_sizeLandmarks);
_sizeLandmarks+=dim;
blockLandmarkIndices[_numLandmarks]=_sizeLandmarks;
++_numLandmarks;
}
sparseDim += dim;
}
resize(blockPoseIndices, _numPoses, blockLandmarkIndices, _numLandmarks, sparseDim);
delete[] blockLandmarkIndices;
delete[] blockPoseIndices;
// allocate the diagonal on Hpp and Hll
int poseIdx = 0;
int landmarkIdx = 0;
for (size_t i = 0; i < _optimizer->indexMapping().size(); ++i) {
OptimizableGraph::Vertex* v = _optimizer->indexMapping()[i];
if (! v->marginalized()){
//assert(poseIdx == v->hessianIndex());
PoseMatrixType* m = _Hpp->block(poseIdx, poseIdx, true);
if (zeroBlocks)
m->setZero();
v->mapHessianMemory(m->data());
++poseIdx;
} else {
LandmarkMatrixType* m = _Hll->block(landmarkIdx, landmarkIdx, true);
if (zeroBlocks)
m->setZero();
v->mapHessianMemory(m->data());
++landmarkIdx;
}
}
assert(poseIdx == _numPoses && landmarkIdx == _numLandmarks);
// temporary structures for building the pattern of the Schur complement
SparseBlockMatrixHashMap<PoseMatrixType>* schurMatrixLookup = 0;
if (_doSchur) {
schurMatrixLookup = new SparseBlockMatrixHashMap<PoseMatrixType>(_Hschur->rowBlockIndices(), _Hschur->colBlockIndices());
schurMatrixLookup->blockCols().resize(_Hschur->blockCols().size());
}
// here we assume that the landmark indices start after the pose ones
// create the structure in Hpp, Hll and in Hpl
for (SparseOptimizer::EdgeContainer::const_iterator it=_optimizer->activeEdges().begin(); it!=_optimizer->activeEdges().end(); ++it){
OptimizableGraph::Edge* e = *it;
for (size_t viIdx = 0; viIdx < e->vertices().size(); ++viIdx) {
OptimizableGraph::Vertex* v1 = (OptimizableGraph::Vertex*) e->vertex(viIdx);
int ind1 = v1->hessianIndex();
if (ind1 == -1)
continue;
int indexV1Bak = ind1;
for (size_t vjIdx = viIdx + 1; vjIdx < e->vertices().size(); ++vjIdx) {
OptimizableGraph::Vertex* v2 = (OptimizableGraph::Vertex*) e->vertex(vjIdx);
int ind2 = v2->hessianIndex();
if (ind2 == -1)
continue;
ind1 = indexV1Bak;
bool transposedBlock = ind1 > ind2;
if (transposedBlock){ // make sure, we allocate the upper triangle block
std::swap(ind1, ind2);
}
if (! v1->marginalized() && !v2->marginalized()){
PoseMatrixType* m = _Hpp->block(ind1, ind2, true);
if (zeroBlocks)
m->setZero();
e->mapHessianMemory(m->data(), viIdx, vjIdx, transposedBlock);
if (_Hschur) {// assume this is only needed in case we solve with the schur complement
schurMatrixLookup->addBlock(ind1, ind2);
}
} else if (v1->marginalized() && v2->marginalized()){
// RAINER hmm.... should we ever reach this here????
LandmarkMatrixType* m = _Hll->block(ind1-_numPoses, ind2-_numPoses, true);
if (zeroBlocks)
m->setZero();
e->mapHessianMemory(m->data(), viIdx, vjIdx, false);
} else {
if (v1->marginalized()){
PoseLandmarkMatrixType* m = _Hpl->block(v2->hessianIndex(),v1->hessianIndex()-_numPoses, true);
if (zeroBlocks)
//.........这里部分代码省略.........