本文整理汇总了C++中MapPoint::addFeature方法的典型用法代码示例。如果您正苦于以下问题:C++ MapPoint::addFeature方法的具体用法?C++ MapPoint::addFeature怎么用?C++ MapPoint::addFeature使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类MapPoint
的用法示例。
在下文中一共展示了MapPoint::addFeature方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: reconstructTracks
int NewMapPtsNCC::reconstructTracks(Track2D tracks[], int numTracks,
int curFrame, std::vector<MapPoint*>& mapPts, int minLen,
double maxRpErr) {
int num = 0;
for (int k = 0; k < numTracks; k++) {
if (tracks[k].length() >= minLen) {
Mat_d ms(numCams, 2);
Mat_d nms(numCams, 2);
Mat_d Ks(numCams, 9);
Mat_d Rs(numCams, 9);
Mat_d Ts(numCams, 3);
int npts = 0;
for (Track2DNode* pTkNode = tracks[k].head.next; pTkNode; pTkNode =
pTkNode->next) {
int camId = pTkNode->f;
ms.data[2 * npts] = pTkNode->x;
ms.data[2 * npts + 1] = pTkNode->y;
//normalize the image coordinates of the feature points
normPoint(m_invK[camId], ms.data + 2 * npts,
nms.data + 2 * npts);
memcpy(Ks.data + 9 * npts, m_K[camId], sizeof(double) * 9);
memcpy(Rs.data + 9 * npts, m_R[camId], sizeof(double) * 9);
memcpy(Ts.data + 3 * npts, m_t[camId], sizeof(double) * 3);
npts++;
}
double M[4];
triangulateMultiView(npts, Rs.data, Ts.data, nms.data, M);
bool outlier = false;
//check re-projection error
for (int i = 0; i < npts; i++) {
double rm[2];
project(Ks.data + 9 * i, Rs.data + 9 * i, Ts.data + 3 * i, M,
rm);
double err = dist2(ms.data + 2 * i, rm);
if (err > maxRpErr
|| isAtCameraBack(Rs.data + 9 * i, Ts.data + 3 * i, M)) {
outlier = true;
break;
}
}
//if it is a inlier, a new map point is generated
if (!outlier) {
MapPoint* pM = new MapPoint(M[0], M[1], M[2], curFrame);
mapPts.push_back(pM);
//get the triangulation covariance
getTriangulateCovMat(npts, Ks.data, Rs.data, Ts.data, M,
pM->cov, Const::PIXEL_ERR_VAR);
int nDyn = 0;
for (Track2DNode* pTkNode = tracks[k].head.next; pTkNode;
pTkNode = pTkNode->next) {
int iCam = m_camGroup.camIds[pTkNode->f];
FeaturePoint* fp = pTkNode->pt;
fp->reprojErr = reprojErrorSingle(fp->K, fp->cam->R,
fp->cam->t, M, fp->m);
pM->addFeature(iCam, fp);
if (fp->type == TYPE_FEATPOINT_DYNAMIC)
nDyn++;
}
if (nDyn > 1) {
pM->setLocalDynamic();
}
// else if (nDyn == 1)
// pM->type = TYPE_MAPPOINT_UNCERTAIN;
// else{
// pM->type = TYPE_MAPPOINT_UNCERTAIN;
// pM->newPt = true;
// }
else
pM->setUncertain();
num++;
}
}
}
return num;
}
示例2: newMapPoints
int SingleSLAM::newMapPoints(std::vector<MapPoint*>& mapPts, double maxEpiErr,
double maxNcc) {
std::vector<FeaturePoint*> vecFeatPts;
getUnMappedAndTrackedFeatPts(vecFeatPts, 0, Param::nMinFeatTrkLen);
mapPts.clear();
mapPts.reserve(4096);
double M[3], m1[2], m2[2];
//reconstruct 3D map points
int numRecons = 0;
for (size_t k = 0; k < vecFeatPts.size(); k++) {
FeaturePoint* cur_fp = vecFeatPts[k];
FeaturePoint* pre_fp = cur_fp;
while (pre_fp->preFrame && pre_fp->preFrame->cam) {
if (pre_fp->type == TYPE_FEATPOINT_DYNAMIC) {
break;
}
pre_fp = pre_fp->preFrame;
}
if (pre_fp->type == TYPE_FEATPOINT_DYNAMIC || !pre_fp->cam)
continue;
normPoint(iK.data, pre_fp->m, m1);
normPoint(iK.data, cur_fp->m, m2);
//triangulate the two feature points to get the 3D point
binTriangulate(pre_fp->cam->R, pre_fp->cam->t, cur_fp->cam->R,
cur_fp->cam->t, m1, m2, M);
if (isAtCameraBack(cur_fp->cam->R, cur_fp->cam->t, M))
continue;
double cov[9], org[3];
getBinTriangulateCovMat(K.data, pre_fp->cam->R, pre_fp->cam->t, K.data,
cur_fp->cam->R, cur_fp->cam->t, M, cov, Const::PIXEL_ERR_VAR);
getCameraCenter(cur_fp->cam->R, cur_fp->cam->t, org);
double s = fabs(cov[0] + cov[4] + cov[8]);
if (dist3(org, M) < sqrt(s))
continue;
//check the reprojection error
double err1 = reprojErrorSingle(K.data, pre_fp->cam->R, pre_fp->cam->t,
M, pre_fp->m);
double err2 = reprojErrorSingle(K.data, cur_fp->cam->R, cur_fp->cam->t,
M, cur_fp->m);
if (err1 < maxEpiErr && err2 < maxEpiErr) {
//a new map point is generated
refineTriangulation(cur_fp, M, cov);
err1 = reprojErrorSingle(K.data, pre_fp->cam->R, pre_fp->cam->t, M,
pre_fp->m);
err2 = reprojErrorSingle(K.data, cur_fp->cam->R, cur_fp->cam->t, M,
cur_fp->m);
if (isAtCameraBack(cur_fp->cam->R, cur_fp->cam->t, M)
|| isAtCameraBack(pre_fp->cam->R, pre_fp->cam->t, M))
continue;
if (err1 < maxEpiErr && err2 < maxEpiErr) {
MapPoint* pM = new MapPoint(M[0], M[1], M[2], pre_fp->f);
doubleArrCopy(pM->cov, 0, cov, 9);
mapPts.push_back(pM);
pM->lastFrame = cur_fp->f;
for (FeaturePoint* p = cur_fp; p; p = p->preFrame)
p->mpt = pM;
//add the feature point
pM->addFeature(camId, cur_fp);
pM->setLocalStatic();
//compute the NCC block
pM->nccBlks[camId].computeScaled(m_lastKeyPos->imgSmall,
m_lastKeyPos->imgScale, cur_fp->x, cur_fp->y);
int x = max(0, min((int) cur_fp->x, m_rgb.w - 1));
int y = max(0, min((int) cur_fp->y, m_rgb.h - 1));
pM->setColor(m_rgb(x, y));
numRecons++;
}
}
}
return numRecons;
}