本文整理汇总了C++中optimizablegraph::Vertex::dimension方法的典型用法代码示例。如果您正苦于以下问题:C++ Vertex::dimension方法的具体用法?C++ Vertex::dimension怎么用?C++ Vertex::dimension使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类optimizablegraph::Vertex
的用法示例。
在下文中一共展示了Vertex::dimension方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: findGauge
OptimizableGraph::Vertex* SparseOptimizer::findGauge(){
if (vertices().empty())
return 0;
int maxDim=0;
for (HyperGraph::VertexIDMap::iterator
it = vertices().begin();
it != vertices().end();
it++)
{
OptimizableGraph::Vertex* v =
static_cast<OptimizableGraph::Vertex*>(it->second);
maxDim = std::max(maxDim,v->dimension());
}
OptimizableGraph::Vertex* rut=0;
for (HyperGraph::VertexIDMap::iterator
it = vertices().begin();
it != vertices().end();
it++)
{
OptimizableGraph::Vertex* v =
static_cast<OptimizableGraph::Vertex*>(it->second);
if (v->dimension()==maxDim)
{
rut=v;
break;
}
}
return rut;
}
示例2: computeUpperTriangleIndex
void BaseMultiEdge<D, E>::mapHessianMemory(double* d, int i, int j, bool rowMajor)
{
int idx = computeUpperTriangleIndex(i, j);
assert(idx < (int)_hessian.size());
OptimizableGraph::Vertex* vi = static_cast<OptimizableGraph::Vertex*>(_vertices[i]);
OptimizableGraph::Vertex* vj = static_cast<OptimizableGraph::Vertex*>(_vertices[j]);
HessianHelper& h = _hessian[idx];
if (rowMajor) {
if (h.matrix.data() != d || h.transposed != rowMajor)
new (&h.matrix) HessianBlockType(d, vj->dimension(), vi->dimension());
} else {
if (h.matrix.data() != d || h.transposed != rowMajor)
new (&h.matrix) HessianBlockType(d, vi->dimension(), vj->dimension());
}
h.transposed = rowMajor;
}
示例3: assert
void BaseMultiEdge<D, E>::linearizeOplus(JacobianWorkspace& jacobianWorkspace)
{
for (size_t i = 0; i < _vertices.size(); ++i) {
OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(_vertices[i]);
assert(v->dimension() >= 0);
new (&_jacobianOplus[i]) JacobianType(jacobianWorkspace.workspaceForVertex(i), D, v->dimension());
}
linearizeOplus();
}
示例4: edgeAllVertsSameDim
bool edgeAllVertsSameDim(OptimizableGraph::Edge* e, int dim)
{
for (size_t i = 0; i < e->vertices().size(); ++i) {
OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(e->vertices()[i]);
if (v->dimension() != dim)
return false;
}
return true;
}
示例5: update
void SparseOptimizer::update(double* update)
{
// update the graph by calling oplus on the vertices
for (size_t i=0; i < _ivMap.size(); ++i)
{
OptimizableGraph::Vertex* v = _ivMap[i];
v->oplus(update);
update += v->dimension();
}
}
示例6: fromMap
void BaseMultiEdge<D, E>::constructQuadraticForm()
{
const InformationType& omega = _information;
Matrix<double, D, 1> omega_r = - omega * _error;
for (size_t i = 0; i < _vertices.size(); ++i) {
OptimizableGraph::Vertex* from = static_cast<OptimizableGraph::Vertex*>(_vertices[i]);
bool istatus = !(from->fixed());
if (istatus) {
const MatrixXd& A = _jacobianOplus[i];
MatrixXd AtO = A.transpose() * omega;
int fromDim = from->dimension();
Map<MatrixXd> fromMap(from->hessianData(), fromDim, fromDim);
Map<VectorXd> fromB(from->bData(), fromDim);
// ii block in the hessian
#ifdef G2O_OPENMP
from->lockQuadraticForm();
#endif
fromMap.noalias() += AtO * A;
fromB.noalias() += A.transpose() * omega_r;
// compute the off-diagonal blocks ij for all j
for (size_t j = i+1; j < _vertices.size(); ++j) {
OptimizableGraph::Vertex* to = static_cast<OptimizableGraph::Vertex*>(_vertices[j]);
#ifdef G2O_OPENMP
to->lockQuadraticForm();
#endif
bool jstatus = !(to->fixed());
if (jstatus) {
const MatrixXd& B = _jacobianOplus[j];
int idx = computeUpperTriangleIndex(i, j);
assert(idx < (int)_hessian.size());
HessianHelper& hhelper = _hessian[idx];
if (hhelper.transposed) { // we have to write to the block as transposed
hhelper.matrix.noalias() += B.transpose() * AtO.transpose();
} else {
hhelper.matrix.noalias() += AtO * B;
}
}
#ifdef G2O_OPENMP
to->unlockQuadraticForm();
#endif
}
#ifdef G2O_OPENMP
from->unlockQuadraticForm();
#endif
}
}
}
示例7: abort
bool BlockSolver<Traits>::updateStructure(const std::vector<HyperGraph::Vertex*>& vset, const HyperGraph::EdgeSet& edges)
{
for (std::vector<HyperGraph::Vertex*>::const_iterator vit = vset.begin(); vit != vset.end(); ++vit) {
OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(*vit);
int dim = v->dimension();
if (! v->marginalized()){
v->setColInHessian(_sizePoses);
_sizePoses+=dim;
_Hpp->rowBlockIndices().push_back(_sizePoses);
_Hpp->colBlockIndices().push_back(_sizePoses);
_Hpp->blockCols().push_back(typename SparseBlockMatrix<PoseMatrixType>::IntBlockMap());
++_numPoses;
int ind = v->hessianIndex();
PoseMatrixType* m = _Hpp->block(ind, ind, true);
v->mapHessianMemory(m->data());
} else {
std::cerr << "updateStructure(): Schur not supported" << std::endl;
abort();
}
}
resizeVector(_sizePoses + _sizeLandmarks);
for (HyperGraph::EdgeSet::const_iterator it = edges.begin(); it != edges.end(); ++it) {
OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*it);
for (size_t viIdx = 0; viIdx < e->vertices().size(); ++viIdx) {
OptimizableGraph::Vertex* v1 = (OptimizableGraph::Vertex*) e->vertex(viIdx);
int ind1 = v1->hessianIndex();
int indexV1Bak = ind1;
if (ind1 == -1)
continue;
for (size_t vjIdx = viIdx + 1; vjIdx < e->vertices().size(); ++vjIdx) {
OptimizableGraph::Vertex* v2 = (OptimizableGraph::Vertex*) e->vertex(vjIdx);
int ind2 = v2->hessianIndex();
if (ind2 == -1)
continue;
ind1 = indexV1Bak;
bool transposedBlock = ind1 > ind2;
if (transposedBlock) // make sure, we allocate the upper triangular block
std::swap(ind1, ind2);
if (! v1->marginalized() && !v2->marginalized()) {
PoseMatrixType* m = _Hpp->block(ind1, ind2, true);
e->mapHessianMemory(m->data(), viIdx, vjIdx, transposedBlock);
} else {
std::cerr << __PRETTY_FUNCTION__ << ": not supported" << std::endl;
}
}
}
}
return true;
}
示例8: gaugeFreedom
bool SparseOptimizer::gaugeFreedom()
{
if (vertices().empty())
return false;
int maxDim=0;
for (HyperGraph::VertexIDMap::iterator
it = vertices().begin();
it != vertices().end();
it++)
{
OptimizableGraph::Vertex* v =
static_cast<OptimizableGraph::Vertex*>(it->second);
maxDim = std::max(maxDim,v->dimension());
}
for (HyperGraph::VertexIDMap::iterator
it = vertices().begin();
it != vertices().end();
it++)
{
OptimizableGraph::Vertex* v =
static_cast<OptimizableGraph::Vertex*>(it->second);
if (v->dimension() == maxDim)
{
// test for full dimension prior
for (HyperGraph::EdgeSet::const_iterator
eit = v->edges().begin();
eit != v->edges().end();
++eit)
{
OptimizableGraph::Edge* e =
static_cast<OptimizableGraph::Edge*>(*eit);
if (e->vertices().size() == 1 && e->dimension() == maxDim)
return false;
}
}
}
return true;
}
示例9: computeLambdaInit
double OptimizationAlgorithmLevenberg::computeLambdaInit() const
{
if (_userLambdaInit->value() > 0)
return _userLambdaInit->value();
double maxDiagonal=0.;
for (size_t k = 0; k < _optimizer->indexMapping().size(); k++) {
OptimizableGraph::Vertex* v = _optimizer->indexMapping()[k];
assert(v);
int dim = v->dimension();
for (int j = 0; j < dim; ++j){
maxDiagonal = std::max(fabs(v->hessian(j,j)),maxDiagonal);
}
}
return _tau*maxDiagonal;
}
示例10: prepare
bool MainWindow::prepare()
{
SparseOptimizer* optimizer = viewer->graph;
if (_currentOptimizationAlgorithmProperty.requiresMarginalize) {
cerr << "Marginalizing Landmarks" << endl;
for (SparseOptimizer::VertexIDMap::const_iterator it = optimizer->vertices().begin(); it != optimizer->vertices().end(); ++it) {
OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(it->second);
int vdim = v->dimension();
v->setMarginalized((vdim == _currentOptimizationAlgorithmProperty.landmarkDim));
}
}
else {
cerr << "Preparing (no marginalization of Landmarks)" << endl;
for (SparseOptimizer::VertexIDMap::const_iterator it = optimizer->vertices().begin(); it != optimizer->vertices().end(); ++it) {
OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(it->second);
v->setMarginalized(false);
}
}
viewer->graph->initializeOptimization();
return true;
}
示例11: updateInitialization
bool SparseOptimizerIncremental::updateInitialization(HyperGraph::VertexSet& vset, HyperGraph::EdgeSet& eset)
{
if (batchStep) {
return SparseOptimizerOnline::updateInitialization(vset, eset);
}
for (HyperGraph::VertexSet::iterator it = vset.begin(); it != vset.end(); ++it) {
OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(*it);
v->clearQuadraticForm(); // be sure that b is zero for this vertex
}
// get the touched vertices
_touchedVertices.clear();
for (HyperGraph::EdgeSet::iterator it = eset.begin(); it != eset.end(); ++it) {
OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*it);
OptimizableGraph::Vertex* v1 = static_cast<OptimizableGraph::Vertex*>(e->vertices()[0]);
OptimizableGraph::Vertex* v2 = static_cast<OptimizableGraph::Vertex*>(e->vertices()[1]);
if (! v1->fixed())
_touchedVertices.insert(v1);
if (! v2->fixed())
_touchedVertices.insert(v2);
}
//cerr << PVAR(_touchedVertices.size()) << endl;
// updating the internal structures
std::vector<HyperGraph::Vertex*> newVertices;
newVertices.reserve(vset.size());
_activeVertices.reserve(_activeVertices.size() + vset.size());
_activeEdges.reserve(_activeEdges.size() + eset.size());
for (HyperGraph::EdgeSet::iterator it = eset.begin(); it != eset.end(); ++it)
_activeEdges.push_back(static_cast<OptimizableGraph::Edge*>(*it));
//cerr << "updating internal done." << endl;
// update the index mapping
size_t next = _ivMap.size();
for (HyperGraph::VertexSet::iterator it = vset.begin(); it != vset.end(); ++it) {
OptimizableGraph::Vertex* v=static_cast<OptimizableGraph::Vertex*>(*it);
if (! v->fixed()){
if (! v->marginalized()){
v->setHessianIndex(next);
_ivMap.push_back(v);
newVertices.push_back(v);
_activeVertices.push_back(v);
next++;
}
else // not supported right now
abort();
}
else {
v->setHessianIndex(-1);
}
}
//cerr << "updating index mapping done." << endl;
// backup the tempindex and prepare sorting structure
VertexBackup backupIdx[_touchedVertices.size()];
memset(backupIdx, 0, sizeof(VertexBackup) * _touchedVertices.size());
int idx = 0;
for (HyperGraph::VertexSet::iterator it = _touchedVertices.begin(); it != _touchedVertices.end(); ++it) {
OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(*it);
backupIdx[idx].hessianIndex = v->hessianIndex();
backupIdx[idx].vertex = v;
backupIdx[idx].hessianData = v->hessianData();
++idx;
}
sort(backupIdx, backupIdx + _touchedVertices.size()); // sort according to the hessianIndex which is the same order as used later by the optimizer
for (int i = 0; i < idx; ++i) {
backupIdx[i].vertex->setHessianIndex(i);
}
//cerr << "backup tempindex done." << endl;
// building the structure of the update
_updateMat.clear(true); // get rid of the old matrix structure
_updateMat.rowBlockIndices().clear();
_updateMat.colBlockIndices().clear();
_updateMat.blockCols().clear();
// placing the current stuff in _updateMat
MatrixXd* lastBlock = 0;
int sizePoses = 0;
for (int i = 0; i < idx; ++i) {
OptimizableGraph::Vertex* v = backupIdx[i].vertex;
int dim = v->dimension();
sizePoses+=dim;
_updateMat.rowBlockIndices().push_back(sizePoses);
_updateMat.colBlockIndices().push_back(sizePoses);
_updateMat.blockCols().push_back(SparseBlockMatrix<MatrixXd>::IntBlockMap());
int ind = v->hessianIndex();
//cerr << PVAR(ind) << endl;
if (ind >= 0) {
MatrixXd* m = _updateMat.block(ind, ind, true);
v->mapHessianMemory(m->data());
lastBlock = m;
}
}
lastBlock->diagonal().array() += 1e-6; // HACK to get Eigen value > 0
for (HyperGraph::EdgeSet::const_iterator it = eset.begin(); it != eset.end(); ++it) {
OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*it);
//.........这里部分代码省略.........
示例12: computeError
void BaseMultiEdge<D, E>::linearizeOplus()
{
#ifdef G2O_OPENMP
for (size_t i = 0; i < _vertices.size(); ++i) {
OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(_vertices[i]);
v->lockQuadraticForm();
}
#endif
const double delta = 1e-9;
const double scalar = 1.0 / (2*delta);
ErrorVector errorBak;
ErrorVector errorBeforeNumeric = _error;
for (size_t i = 0; i < _vertices.size(); ++i) {
//Xi - estimate the jacobian numerically
OptimizableGraph::Vertex* vi = static_cast<OptimizableGraph::Vertex*>(_vertices[i]);
if (vi->fixed())
continue;
const int vi_dim = vi->dimension();
#ifdef _MSC_VER
double* add_vi = new double[vi_dim];
#else
double add_vi[vi_dim];
#endif
std::fill(add_vi, add_vi + vi_dim, 0.0);
if (_jacobianOplus[i].rows() != _dimension || _jacobianOplus[i].cols() != vi_dim)
_jacobianOplus[i].resize(_dimension, vi_dim);
// add small step along the unit vector in each dimension
for (int d = 0; d < vi_dim; ++d) {
vi->push();
add_vi[d] = delta;
vi->oplus(add_vi);
computeError();
errorBak = _error;
vi->pop();
vi->push();
add_vi[d] = -delta;
vi->oplus(add_vi);
computeError();
errorBak -= _error;
vi->pop();
add_vi[d] = 0.0;
_jacobianOplus[i].col(d) = scalar * errorBak;
} // end dimension
#ifdef _MSC_VER
delete[] add_vi;
#endif
}
_error = errorBeforeNumeric;
#ifdef G2O_OPENMP
for (int i = (int)(_vertices.size()) - 1; i >= 0; --i) {
OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(_vertices[i]);
v->unlockQuadraticForm();
}
#endif
}
示例13: assert
bool BlockSolver<Traits>::buildStructure(bool zeroBlocks)
{
assert(_optimizer);
size_t sparseDim = 0;
_numPoses=0;
_numLandmarks=0;
_sizePoses=0;
_sizeLandmarks=0;
int* blockPoseIndices = new int[_optimizer->indexMapping().size()];
int* blockLandmarkIndices = new int[_optimizer->indexMapping().size()];
for (size_t i = 0; i < _optimizer->indexMapping().size(); ++i) {
OptimizableGraph::Vertex* v = _optimizer->indexMapping()[i];
int dim = v->dimension();
if (! v->marginalized()){
v->setColInHessian(_sizePoses);
_sizePoses+=dim;
blockPoseIndices[_numPoses]=_sizePoses;
++_numPoses;
} else {
v->setColInHessian(_sizeLandmarks);
_sizeLandmarks+=dim;
blockLandmarkIndices[_numLandmarks]=_sizeLandmarks;
++_numLandmarks;
}
sparseDim += dim;
}
resize(blockPoseIndices, _numPoses, blockLandmarkIndices, _numLandmarks, sparseDim);
delete[] blockLandmarkIndices;
delete[] blockPoseIndices;
// allocate the diagonal on Hpp and Hll
int poseIdx = 0;
int landmarkIdx = 0;
for (size_t i = 0; i < _optimizer->indexMapping().size(); ++i) {
OptimizableGraph::Vertex* v = _optimizer->indexMapping()[i];
if (! v->marginalized()){
//assert(poseIdx == v->hessianIndex());
PoseMatrixType* m = _Hpp->block(poseIdx, poseIdx, true);
if (zeroBlocks)
m->setZero();
v->mapHessianMemory(m->data());
++poseIdx;
} else {
LandmarkMatrixType* m = _Hll->block(landmarkIdx, landmarkIdx, true);
if (zeroBlocks)
m->setZero();
v->mapHessianMemory(m->data());
++landmarkIdx;
}
}
assert(poseIdx == _numPoses && landmarkIdx == _numLandmarks);
// temporary structures for building the pattern of the Schur complement
SparseBlockMatrixHashMap<PoseMatrixType>* schurMatrixLookup = 0;
if (_doSchur) {
schurMatrixLookup = new SparseBlockMatrixHashMap<PoseMatrixType>(_Hschur->rowBlockIndices(), _Hschur->colBlockIndices());
schurMatrixLookup->blockCols().resize(_Hschur->blockCols().size());
}
// here we assume that the landmark indices start after the pose ones
// create the structure in Hpp, Hll and in Hpl
for (SparseOptimizer::EdgeContainer::const_iterator it=_optimizer->activeEdges().begin(); it!=_optimizer->activeEdges().end(); ++it){
OptimizableGraph::Edge* e = *it;
for (size_t viIdx = 0; viIdx < e->vertices().size(); ++viIdx) {
OptimizableGraph::Vertex* v1 = (OptimizableGraph::Vertex*) e->vertex(viIdx);
int ind1 = v1->hessianIndex();
if (ind1 == -1)
continue;
int indexV1Bak = ind1;
for (size_t vjIdx = viIdx + 1; vjIdx < e->vertices().size(); ++vjIdx) {
OptimizableGraph::Vertex* v2 = (OptimizableGraph::Vertex*) e->vertex(vjIdx);
int ind2 = v2->hessianIndex();
if (ind2 == -1)
continue;
ind1 = indexV1Bak;
bool transposedBlock = ind1 > ind2;
if (transposedBlock){ // make sure, we allocate the upper triangle block
std::swap(ind1, ind2);
}
if (! v1->marginalized() && !v2->marginalized()){
PoseMatrixType* m = _Hpp->block(ind1, ind2, true);
if (zeroBlocks)
m->setZero();
e->mapHessianMemory(m->data(), viIdx, vjIdx, transposedBlock);
if (_Hschur) {// assume this is only needed in case we solve with the schur complement
schurMatrixLookup->addBlock(ind1, ind2);
}
} else if (v1->marginalized() && v2->marginalized()){
// RAINER hmm.... should we ever reach this here????
LandmarkMatrixType* m = _Hll->block(ind1-_numPoses, ind2-_numPoses, true);
if (zeroBlocks)
m->setZero();
e->mapHessianMemory(m->data(), viIdx, vjIdx, false);
} else {
if (v1->marginalized()){
PoseLandmarkMatrixType* m = _Hpl->block(v2->hessianIndex(),v1->hessianIndex()-_numPoses, true);
if (zeroBlocks)
//.........这里部分代码省略.........
示例14: saveGnuplot
bool saveGnuplot(const std::string& gnudump, const HyperGraph::VertexSet& vertices, const HyperGraph::EdgeSet& edges)
{
// seek for an action whose name is writeGnuplot in the library
HyperGraphElementAction* saveGnuplot = HyperGraphActionLibrary::instance()->actionByName("writeGnuplot");
if (! saveGnuplot ){
cerr << __PRETTY_FUNCTION__ << ": no action \"writeGnuplot\" registered" << endl;
return false;
}
WriteGnuplotAction::Parameters params;
int maxDim = -1;
int minDim = numeric_limits<int>::max();
for (HyperGraph::VertexSet::const_iterator it = vertices.begin(); it != vertices.end(); ++it){
OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(*it);
int vdim = v->dimension();
maxDim = (std::max)(vdim, maxDim);
minDim = (std::min)(vdim, minDim);
}
string extension = getFileExtension(gnudump);
if (extension.size() == 0)
extension = "dat";
string baseFilename = getPureFilename(gnudump);
// check for odometry edges
bool hasOdomEdge = false;
bool hasLandmarkEdge = false;
for (HyperGraph::EdgeSet::const_iterator it = edges.begin(); it != edges.end(); ++it) {
OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*it);
if (e->vertices().size() == 2) {
if (edgeAllVertsSameDim(e, maxDim))
hasOdomEdge = true;
else
hasLandmarkEdge = true;
}
if (hasOdomEdge && hasLandmarkEdge)
break;
}
bool fileStatus = true;
if (hasOdomEdge) {
string odomFilename = baseFilename + "_odom_edges." + extension;
cerr << "# saving " << odomFilename << " ... ";
ofstream fout(odomFilename.c_str());
if (! fout) {
cerr << "Unable to open file" << endl;
return false;
}
params.os = &fout;
// writing odometry edges
for (HyperGraph::EdgeSet::const_iterator it = edges.begin(); it != edges.end(); ++it) {
OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*it);
if (e->vertices().size() != 2 || ! edgeAllVertsSameDim(e, maxDim))
continue;
(*saveGnuplot)(e, ¶ms);
}
cerr << "done." << endl;
}
if (hasLandmarkEdge) {
string filename = baseFilename + "_landmarks_edges." + extension;
cerr << "# saving " << filename << " ... ";
ofstream fout(filename.c_str());
if (! fout) {
cerr << "Unable to open file" << endl;
return false;
}
params.os = &fout;
// writing landmark edges
for (HyperGraph::EdgeSet::const_iterator it = edges.begin(); it != edges.end(); ++it) {
OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*it);
if (e->vertices().size() != 2 || edgeAllVertsSameDim(e, maxDim))
continue;
(*saveGnuplot)(e, ¶ms);
}
cerr << "done." << endl;
}
if (1) {
string filename = baseFilename + "_edges." + extension;
cerr << "# saving " << filename << " ... ";
ofstream fout(filename.c_str());
if (! fout) {
cerr << "Unable to open file" << endl;
return false;
}
params.os = &fout;
// writing all edges
for (HyperGraph::EdgeSet::const_iterator it = edges.begin(); it != edges.end(); ++it) {
OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*it);
(*saveGnuplot)(e, ¶ms);
}
cerr << "done." << endl;
}
if (1) {
string filename = baseFilename + "_vertices." + extension;
//.........这里部分代码省略.........