本文整理汇总了C++中MapPoint::ComputeDistinctiveDescriptors方法的典型用法代码示例。如果您正苦于以下问题:C++ MapPoint::ComputeDistinctiveDescriptors方法的具体用法?C++ MapPoint::ComputeDistinctiveDescriptors怎么用?C++ MapPoint::ComputeDistinctiveDescriptors使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类MapPoint
的用法示例。
在下文中一共展示了MapPoint::ComputeDistinctiveDescriptors方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: ProcessNewKeyFrame
void LocalMapping::ProcessNewKeyFrame()
{
{
boost::mutex::scoped_lock lock(mMutexNewKFs);
mpCurrentKeyFrame = mlNewKeyFrames.front();
mlNewKeyFrames.pop_front();
}
// Compute Bags of Words structures
mpCurrentKeyFrame->ComputeBoW();
if(mpCurrentKeyFrame->mnId==0)
return;
// Associate MapPoints to the new keyframe and update normal and descriptor
vector<MapPoint*> vpMapPointMatches = mpCurrentKeyFrame->GetMapPointMatches();
if(mpCurrentKeyFrame->mnId>1) //This operations are already done in the tracking for the first two keyframes
{
for(size_t i=0; i<vpMapPointMatches.size(); i++)
{
MapPoint* pMP = vpMapPointMatches[i];
if(pMP)
{
if(!pMP->isBad())
{
pMP->AddObservation(mpCurrentKeyFrame, i);
pMP->UpdateNormalAndDepth();
pMP->ComputeDistinctiveDescriptors();
}
}
}
}
if(mpCurrentKeyFrame->mnId==1)
{
for(size_t i=0; i<vpMapPointMatches.size(); i++)
{
MapPoint* pMP = vpMapPointMatches[i];
if(pMP)
{
mlpRecentAddedMapPoints.push_back(pMP);
}
}
}
// Update links in the Covisibility Graph
mpCurrentKeyFrame->UpdateConnections();
// Insert Keyframe in Map
mpMap->AddKeyFrame(mpCurrentKeyFrame);
}
示例2: CorrectLoop
//.........这里部分代码省略.........
for(size_t iMP=0, endMPi = vpMPsi.size(); iMP<endMPi; iMP++)
{
MapPoint* pMPi = vpMPsi[iMP];
if(!pMPi)
continue;
if(pMPi->isBad())
continue;
if(pMPi->mnCorrectedByKF==mpCurrentKF->mnId)
continue;
// Project with non-corrected pose and project back with corrected pose
cv::Mat P3Dw = pMPi->GetWorldPos();
Eigen::Matrix<double,3,1> eigP3Dw = Converter::toVector3d(P3Dw);
Eigen::Matrix<double,3,1> eigCorrectedP3Dw = g2oCorrectedSwi.map(g2oSiw.map(eigP3Dw));
cv::Mat cvCorrectedP3Dw = Converter::toCvMat(eigCorrectedP3Dw);
pMPi->SetWorldPos(cvCorrectedP3Dw);
pMPi->mnCorrectedByKF = mpCurrentKF->mnId;
pMPi->mnCorrectedReference = pKFi->mnId;
pMPi->UpdateNormalAndDepth();
}
// Update keyframe pose with corrected Sim3. First transform Sim3 to SE3 (scale translation)
Eigen::Matrix3d eigR = g2oCorrectedSiw.rotation().toRotationMatrix();
Eigen::Vector3d eigt = g2oCorrectedSiw.translation();
double s = g2oCorrectedSiw.scale();
eigt *=(1./s); //[R t/s;0 1]
cv::Mat correctedTiw = Converter::toCvSE3(eigR,eigt);
pKFi->SetPose(correctedTiw);
// Make sure connections are updated
pKFi->UpdateConnections();
}
// Start Loop Fusion
// Update matched map points and replace if duplicated
for(size_t i=0; i<mvpCurrentMatchedPoints.size(); i++)
{
if(mvpCurrentMatchedPoints[i])
{
MapPoint* pLoopMP = mvpCurrentMatchedPoints[i];
MapPoint* pCurMP = mpCurrentKF->GetMapPoint(i);
if(pCurMP)
pCurMP->Replace(pLoopMP);
else
{
mpCurrentKF->AddMapPoint(pLoopMP,i);
pLoopMP->AddObservation(mpCurrentKF,i);
pLoopMP->ComputeDistinctiveDescriptors();
}
}
}
// Project MapPoints observed in the neighborhood of the loop keyframe
// into the current keyframe and neighbors using corrected poses.
// Fuse duplications.
SearchAndFuse(CorrectedSim3);
// After the MapPoint fusion, new links in the covisibility graph will appear attaching both sides of the loop
map<KeyFrame*, set<KeyFrame*> > LoopConnections;
for(vector<KeyFrame*>::iterator vit=mvpCurrentConnectedKFs.begin(), vend=mvpCurrentConnectedKFs.end(); vit!=vend; vit++)
{
KeyFrame* pKFi = *vit;
vector<KeyFrame*> vpPreviousNeighbors = pKFi->GetVectorCovisibleKeyFrames();
// Update connections. Detect new links.
pKFi->UpdateConnections();
LoopConnections[pKFi]=pKFi->GetConnectedKeyFrames();
for(vector<KeyFrame*>::iterator vit_prev=vpPreviousNeighbors.begin(), vend_prev=vpPreviousNeighbors.end(); vit_prev!=vend_prev; vit_prev++)
{
LoopConnections[pKFi].erase(*vit_prev);
}
for(vector<KeyFrame*>::iterator vit2=mvpCurrentConnectedKFs.begin(), vend2=mvpCurrentConnectedKFs.end(); vit2!=vend2; vit2++)
{
LoopConnections[pKFi].erase(*vit2);
}
}
mpTracker->ForceRelocalisation();
Optimizer::OptimizeEssentialGraph(mpMap, mpMatchedKF, mpCurrentKF, mg2oScw, NonCorrectedSim3, CorrectedSim3, LoopConnections);
//Add edge
mpMatchedKF->AddLoopEdge(mpCurrentKF);
mpCurrentKF->AddLoopEdge(mpMatchedKF);
std::cout << "Loop Closed!" << std::endl;
// Loop closed. Release Local Mapping.
mpLocalMapper->Release();
mpMap->SetFlagAfterBA();
mLastLoopKFid = mpCurrentKF->mnId;
}
示例3: CreateNewMapPoints
//.........这里部分代码省略.........
const int idx2 = vMatchedIndices[ikp].second;
const cv::KeyPoint &kp1 = vMatchedKeysUn1[ikp];
const cv::KeyPoint &kp2 = vMatchedKeysUn2[ikp];
// Check parallax between rays
cv::Mat xn1 = (cv::Mat_<float>(3,1) << (kp1.pt.x-cx1)*invfx1, (kp1.pt.y-cy1)*invfy1, 1.0 );
cv::Mat ray1 = Rwc1*xn1;
cv::Mat xn2 = (cv::Mat_<float>(3,1) << (kp2.pt.x-cx2)*invfx2, (kp2.pt.y-cy2)*invfy2, 1.0 );
cv::Mat ray2 = Rwc2*xn2;
const float cosParallaxRays = ray1.dot(ray2)/(cv::norm(ray1)*cv::norm(ray2));
if(cosParallaxRays<0 || cosParallaxRays>0.9998)
continue;
// Linear Triangulation Method
cv::Mat A(4,4,CV_32F);
A.row(0) = xn1.at<float>(0)*Tcw1.row(2)-Tcw1.row(0);
A.row(1) = xn1.at<float>(1)*Tcw1.row(2)-Tcw1.row(1);
A.row(2) = xn2.at<float>(0)*Tcw2.row(2)-Tcw2.row(0);
A.row(3) = xn2.at<float>(1)*Tcw2.row(2)-Tcw2.row(1);
cv::Mat w,u,vt;
cv::SVD::compute(A,w,u,vt,cv::SVD::MODIFY_A| cv::SVD::FULL_UV);
cv::Mat x3D = vt.row(3).t();
if(x3D.at<float>(3)==0)
continue;
// Euclidean coordinates
x3D = x3D.rowRange(0,3)/x3D.at<float>(3);
cv::Mat x3Dt = x3D.t();
//Check triangulation in front of cameras
float z1 = Rcw1.row(2).dot(x3Dt)+tcw1.at<float>(2);
if(z1<=0)
continue;
float z2 = Rcw2.row(2).dot(x3Dt)+tcw2.at<float>(2);
if(z2<=0)
continue;
//Check reprojection error in first keyframe
float sigmaSquare1 = mpCurrentKeyFrame->GetSigma2(kp1.octave);
float x1 = Rcw1.row(0).dot(x3Dt)+tcw1.at<float>(0);
float y1 = Rcw1.row(1).dot(x3Dt)+tcw1.at<float>(1);
float invz1 = 1.0/z1;
float u1 = fx1*x1*invz1+cx1;
float v1 = fy1*y1*invz1+cy1;
float errX1 = u1 - kp1.pt.x;
float errY1 = v1 - kp1.pt.y;
if((errX1*errX1+errY1*errY1)>5.991*sigmaSquare1)
continue;
//Check reprojection error in second keyframe
float sigmaSquare2 = pKF2->GetSigma2(kp2.octave);
float x2 = Rcw2.row(0).dot(x3Dt)+tcw2.at<float>(0);
float y2 = Rcw2.row(1).dot(x3Dt)+tcw2.at<float>(1);
float invz2 = 1.0/z2;
float u2 = fx2*x2*invz2+cx2;
float v2 = fy2*y2*invz2+cy2;
float errX2 = u2 - kp2.pt.x;
float errY2 = v2 - kp2.pt.y;
if((errX2*errX2+errY2*errY2)>5.991*sigmaSquare2)
continue;
//Check scale consistency
cv::Mat normal1 = x3D-Ow1;
float dist1 = cv::norm(normal1);
cv::Mat normal2 = x3D-Ow2;
float dist2 = cv::norm(normal2);
if(dist1==0 || dist2==0)
continue;
float ratioDist = dist1/dist2;
float ratioOctave = mpCurrentKeyFrame->GetScaleFactor(kp1.octave)/pKF2->GetScaleFactor(kp2.octave);
if(ratioDist*ratioFactor<ratioOctave || ratioDist>ratioOctave*ratioFactor)
continue;
// Triangulation is succesfull
MapPoint* pMP = new MapPoint(x3D,mpCurrentKeyFrame,mpMap);
pMP->AddObservation(pKF2,idx2);
pMP->AddObservation(mpCurrentKeyFrame,idx1);
mpCurrentKeyFrame->AddMapPoint(pMP,idx1);
pKF2->AddMapPoint(pMP,idx2);
pMP->ComputeDistinctiveDescriptors();
pMP->UpdateNormalAndDepth();
mpMap->AddMapPoint(pMP);
mlpRecentAddedMapPoints.push_back(pMP);
}
}
}