本文整理汇总了C++中FramePtr::get方法的典型用法代码示例。如果您正苦于以下问题:C++ FramePtr::get方法的具体用法?C++ FramePtr::get怎么用?C++ FramePtr::get使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类FramePtr
的用法示例。
在下文中一共展示了FramePtr::get方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: _filter
bool SmoothFilter::_filter(const FramePtr &in, FramePtr &out)
{
ToFRawFrame *tofFrame = dynamic_cast<ToFRawFrame *>(in.get());
DepthFrame *depthFrame = dynamic_cast<DepthFrame *>(in.get());
if((!tofFrame && !depthFrame) || !_prepareOutput(in, out))
{
logger(LOG_ERROR) << "IIRFilter: Input frame type is not ToFRawFrame or DepthFrame or failed get the output ready" << std::endl;
return false;
}
if(tofFrame)
{
_size = tofFrame->size;
ToFRawFrame *o = dynamic_cast<ToFRawFrame *>(out.get());
if(!o)
{
logger(LOG_ERROR) << "IIRFilter: Invalid frame type. Expecting ToFRawFrame." << std::endl;
return false;
}
//logger(LOG_INFO) << "IIRFilter: Applying filter with gain = " << _gain << " to ToFRawFrame id = " << tofFrame->id << std::endl;
uint s = _size.width*_size.height;
memcpy(o->ambient(), tofFrame->ambient(), s*tofFrame->ambientWordWidth());
memcpy(o->amplitude(), tofFrame->amplitude(), s*tofFrame->amplitudeWordWidth());
memcpy(o->flags(), tofFrame->flags(), s*tofFrame->flagsWordWidth());
if(tofFrame->phaseWordWidth() == 2)
return _filter<uint16_t>((uint16_t *)tofFrame->phase(), (uint16_t *)o->phase());
else if(tofFrame->phaseWordWidth() == 1)
return _filter<uint8_t>((uint8_t *)tofFrame->phase(), (uint8_t *)o->phase());
else if(tofFrame->phaseWordWidth() == 4)
return _filter<uint32_t>((uint32_t *)tofFrame->phase(), (uint32_t *)o->phase());
else
return false;
}
else if(depthFrame)
{
_size = depthFrame->size;
DepthFrame *o = dynamic_cast<DepthFrame *>(out.get());
if(!o)
{
logger(LOG_ERROR) << "IIRFilter: Invalid frame type. Expecting DepthFrame." << std::endl;
return false;
}
o->amplitude = depthFrame->amplitude;
return _filter<float>(depthFrame->depth.data(), o->depth.data());
}
else
return false;
}
示例2: apply
void EffectMosaic::apply(FramePtr &src)
{
for(int i=0; i < src->getHeight(); i+=mMosaicSize) {
for(int j=0; j < src->getWidth(); j+=mMosaicSize) {
auto pixel = AverageColor(src.get(), i, j);
SetColor(src.get(), j, i, pixel);
}
}
}
示例3: initializeSeeds
void DepthFilter::initializeSeeds(FramePtr frame)
{
list<PointFeat*> new_pt_features;
list<LineFeat*> new_seg_features;
if(Config::hasPoints())
{
/* detect new point features in point-unpopulated cells of the grid */
// populate the occupancy grid of the detector with current features
pt_feature_detector_->setExistingFeatures(frame->pt_fts_);
// detect features to fill the free cells in the image
pt_feature_detector_->detect(frame.get(), frame->img_pyr_,
Config::triangMinCornerScore(), new_pt_features);
}
if(Config::hasLines())
{
/* detect new segment features in line-unpopulated cells of the grid */
// populate the occupancy grid of the detector with current features
seg_feature_detector_->setExistingFeatures(frame->seg_fts_);
// detect features
seg_feature_detector_->detect(frame.get(), frame->img_pyr_,
Config::lsdMinLength(), new_seg_features);
}
// lock the parallel updating thread?
seeds_updating_halt_ = true;
lock_t lock(seeds_mut_); // by locking the updateSeeds function stops
// increase by one the keyframe counter (to account for this new one)
++PointSeed::batch_counter;
// initialize a point seed for every new point feature
std::for_each(new_pt_features.begin(), new_pt_features.end(), [&](PointFeat* ftr){
pt_seeds_.push_back(PointSeed(ftr, new_keyframe_mean_depth_, new_keyframe_min_depth_));
});
// initialize a segment seed for every new segment feature
std::for_each(new_seg_features.begin(), new_seg_features.end(), [&](LineFeat* ftr){
seg_seeds_.push_back(LineSeed(ftr, new_keyframe_mean_depth_, new_keyframe_min_depth_));
});
if(options_.verbose)
SVO_INFO_STREAM("DepthFilter: Initialized "<<new_pt_features.size()<<" new point seeds and "
<<new_seg_features.size()<<" line segment seeds.");
seeds_updating_halt_ = false;
}
示例4: addSecondFrame
InitResult KltHomographyInit::addSecondFrame(FramePtr frame_cur)
{
trackKlt(frame_ref_, frame_cur, px_ref_, px_cur_, f_ref_, f_cur_, disparities_);
SVO_INFO_STREAM("Init: KLT tracked "<< disparities_.size() <<" features");
if(disparities_.size() < Config::initMinTracked())
return FAILURE;
double disparity = vk::getMedian(disparities_);
SVO_INFO_STREAM("Init: KLT "<<disparity<<"px average disparity.");
if(disparity < Config::initMinDisparity())
return NO_KEYFRAME;
computeHomography(
f_ref_, f_cur_,
frame_ref_->cam_->errorMultiplier2(), Config::poseOptimThresh(),
inliers_, xyz_in_cur_, T_cur_from_ref_);
SVO_INFO_STREAM("Init: Homography RANSAC "<<inliers_.size()<<" inliers.");
if(inliers_.size() < Config::initMinInliers())
{
SVO_WARN_STREAM("Init WARNING: "<<Config::initMinInliers()<<" inliers minimum required.");
return FAILURE;
}
// Rescale the map such that the mean scene depth is equal to the specified scale
vector<double> depth_vec;
for(size_t i=0; i<xyz_in_cur_.size(); ++i)
depth_vec.push_back((xyz_in_cur_[i]).z());
double scene_depth_median = vk::getMedian(depth_vec);
double scale = Config::mapScale()/scene_depth_median;
frame_cur->T_f_w_ = T_cur_from_ref_ * frame_ref_->T_f_w_;
frame_cur->T_f_w_.translation() =
-frame_cur->T_f_w_.rotation_matrix()*(frame_ref_->pos() + scale*(frame_cur->pos() - frame_ref_->pos()));
// For each inlier create 3D point and add feature in both frames
SE3 T_world_cur = frame_cur->T_f_w_.inverse();
for(vector<int>::iterator it=inliers_.begin(); it!=inliers_.end(); ++it)
{
Vector2d px_cur(px_cur_[*it].x, px_cur_[*it].y);
Vector2d px_ref(px_ref_[*it].x, px_ref_[*it].y);
if(frame_ref_->cam_->isInFrame(px_cur.cast<int>(), 10) && frame_ref_->cam_->isInFrame(px_ref.cast<int>(), 10) && xyz_in_cur_[*it].z() > 0)
{
Vector3d pos = T_world_cur * (xyz_in_cur_[*it]*scale);
Point* new_point = new Point(pos);
Feature* ftr_cur(new Feature(frame_cur.get(), new_point, px_cur, f_cur_[*it], 0));
frame_cur->addFeature(ftr_cur);
new_point->addFrameRef(ftr_cur);
Feature* ftr_ref(new Feature(frame_ref_.get(), new_point, px_ref, f_ref_[*it], 0));
frame_ref_->addFeature(ftr_ref);
new_point->addFrameRef(ftr_ref);
}
}
return SUCCESS;
}
示例5: getSeedsCopy
void DepthFilter::getSeedsCopy(const FramePtr& frame, std::list<Seed>& seeds)
{
lock_t lock(seeds_mut_);
for(std::list<Seed>::iterator it=seeds_.begin(); it!=seeds_.end(); ++it)
{
if (it->ftr->frame == frame.get())
seeds.push_back(*it);
}
}
示例6: generate
bool PointCloudFrameGenerator::generate(const FramePtr &in, FramePtr &out)
{
const DepthFrame *depthFrame = dynamic_cast<const DepthFrame *>(in.get());
if(!depthFrame)
{
logger(LOG_ERROR) << "PointCloudFrameGenerator: Only DepthFrame type is supported as input to generate() function." << std::endl;
return false;
}
XYZIPointCloudFrame *f = dynamic_cast<XYZIPointCloudFrame *>(out.get());
if(!f)
{
f = new XYZIPointCloudFrame();
out = FramePtr(f);
}
f->id = depthFrame->id;
f->timestamp = depthFrame->timestamp;
f->points.resize(depthFrame->size.width*depthFrame->size.height);
if(!_pointCloudTransform->depthToPointCloud(depthFrame->depth, *f))
{
logger(LOG_ERROR) << "DepthCamera: Could not convert depth frame to point cloud frame" << std::endl;
return false;
}
// Setting amplitude as intensity
auto index = 0;
auto w = depthFrame->size.width;
auto h = depthFrame->size.height;
for(auto y = 0; y < h; y++)
for(auto x = 0; x < w; x++, index++)
{
IntensityPoint &p = f->points[index];
p.i = depthFrame->amplitude[index];
}
return true;
}
示例7: reprojectCell
bool Reprojector::reprojectCell(Cell& cell, FramePtr frame)
{
cell.sort(boost::bind(&Reprojector::pointQualityComparator, _1, _2));
Cell::iterator it=cell.begin();
while(it!=cell.end())
{
++n_trials_;
if(it->pt->type_ == Point::TYPE_DELETED)
{
it = cell.erase(it);
continue;
}
bool found_match = true;
if(options_.find_match_direct)
found_match = matcher_.findMatchDirect(*it->pt, *frame, it->px);
if(!found_match)
{
it->pt->n_failed_reproj_++;
if(it->pt->type_ == Point::TYPE_UNKNOWN && it->pt->n_failed_reproj_ > 15)
map_.safeDeletePoint(it->pt);
if(it->pt->type_ == Point::TYPE_CANDIDATE && it->pt->n_failed_reproj_ > 30)
map_.point_candidates_.deleteCandidatePoint(it->pt);
it = cell.erase(it);
continue;
}
it->pt->n_succeeded_reproj_++;
if(it->pt->type_ == Point::TYPE_UNKNOWN && it->pt->n_succeeded_reproj_ > 10)
it->pt->type_ = Point::TYPE_GOOD;
Feature* new_feature = new Feature(frame.get(), it->px, matcher_.search_level_);
frame->addFeature(new_feature);
// Here we add a reference in the feature to the 3D point, the other way
// round is only done if this frame is selected as keyframe.
new_feature->point = it->pt;
if(matcher_.ref_ftr_->type == Feature::EDGELET)
{
new_feature->type = Feature::EDGELET;
new_feature->grad = matcher_.A_cur_ref_*matcher_.ref_ftr_->grad;
new_feature->grad.normalize();
}
// If the keyframe is selected and we reproject the rest, we don't have to
// check this point anymore.
it = cell.erase(it);
// Maximum one point per cell.
return true;
}
return false;
}
示例8: removeKeyframe
void DepthFilter::removeKeyframe(FramePtr frame)
{
seeds_updating_halt_ = true;
lock_t lock(seeds_mut_);
list<Seed>::iterator it=seeds_.begin();
size_t n_removed = 0;
while(it!=seeds_.end())
{
if(it->ftr->frame == frame.get())
{
it = seeds_.erase(it);
++n_removed;
}
else
++it;
}
seeds_updating_halt_ = false;
}
示例9: initializeSeeds
void DepthFilter::initializeSeeds(FramePtr frame)
{
Features new_features;
feature_detector_->setExistingFeatures(frame->fts_);
feature_detector_->detect(frame.get(), frame->img_pyr_,
Config::triangMinCornerScore(), new_features);
// initialize a seed for every new feature
seeds_updating_halt_ = true;
lock_t lock(seeds_mut_); // by locking the updateSeeds function stops
++Seed::batch_counter;
std::for_each(new_features.begin(), new_features.end(), [&](Feature* ftr){
seeds_.push_back(Seed(ftr, new_keyframe_mean_depth_, new_keyframe_min_depth_));
});
if(options_.verbose)
SVO_INFO_STREAM("DepthFilter: Initialized "<<new_features.size()<<" new seeds");
seeds_updating_halt_ = false;
}
示例10: detectFeatures
void detectFeatures(
FramePtr frame,
vector<cv::Point2f>& px_vec,
vector<Vector3d>& f_vec)
{
Features new_features;
feature_detection::FastDetector detector(
frame->img().cols, frame->img().rows, Config::gridSize(), Config::nPyrLevels());
detector.detect(frame.get(), frame->img_pyr_, Config::triangMinCornerScore(), new_features);
// now for all maximum corners, initialize a new seed
px_vec.clear(); px_vec.reserve(new_features.size());
f_vec.clear(); f_vec.reserve(new_features.size());
std::for_each(new_features.begin(), new_features.end(), [&](Feature* ftr){
px_vec.push_back(cv::Point2f(ftr->px[0], ftr->px[1]));
f_vec.push_back(ftr->f);
delete ftr;
});
}
示例11: detectFeatures
/// 检测fast角度,输出的是对应的点和点的方向向量(可以考虑为点的反投影坐标)
void Initialization::detectFeatures(
FramePtr frame,
std::vector<cv::Point2f>& px_vec,
std::vector<Vector3d>& f_vec)
{
Features new_features;
FastDetector detector(
frame->img().cols, frame->img().rows, 25, 3);
detector.detect(frame.get(), frame->img_pyr_, 20.0, new_features);
// 返回特征位置和特征的单位向量
px_vec.clear(); px_vec.reserve(new_features.size());
f_vec.clear(); f_vec.reserve(new_features.size());
std::for_each(new_features.begin(), new_features.end(), [&](Feature* ftr){
px_vec.push_back(cv::Point2f(ftr->px[0], ftr->px[1]));
f_vec.push_back(ftr->f);
delete ftr;
});
}
示例12: DisplayFrame
void DisplayFrame(Frame* pFrame)
{
if(pFrame != NULL && pFrame->HasValidDataPtr()) {
if(GetFrameManager()->Owns(*pFrame)) {
DoRender(pFrame);
}
else {
FramePtr pTempFrame = GetReservedFrame();
if(pTempFrame && pFrame->GetDataSize() == pTempFrame->GetDataSize()) {
utils::image::Copy(pTempFrame->GetDataPtr(), pFrame->GetDataPtr(), pTempFrame->GetDataSize());
DoRender(pTempFrame.get());
}
else
LOG << TEXT("DECKLINK: Failed to get reserved frame");
}
}
else {
LOG << TEXT("DECKLINK: Tried to render frame with no data");
}
}
示例13: addSecondFrame
InitResult Initialization::addSecondFrame(FramePtr frame_cur)
{
trackKlt(frame_ref_, frame_cur, px_ref_, px_cur_, f_ref_, f_cur_, disparities_);
std::cout << "Init: KLT tracked " << disparities_.size() << " features" << std::endl;
// 符合光流跟踪的特征数
if (disparities_.size() < 50)
return FAILURE;
// 对两帧光流跟踪之后像素差值的中值
double disparity = getMedian(disparities_);
std::cout << "Init: KLT " << disparity << "px average disparity." << std::endl;
// 如果中值小于给定配置参数,则表明这一帧不是关键帧,也就是刚开始的时候两帧不能太近
if (disparity < 50.0)
return NO_KEYFRAME;
// 计算单应矩阵
computeHomography(
f_ref_, f_cur_,
frame_ref_->cam_->getFocalLength(), 2.0,
inliers_, xyz_in_cur_, T_cur_from_ref_);
std::cout << "Init: Homography RANSAC " << inliers_.size() << " inliers." << std::endl;
// 根据计算单应矩阵之后,内点个数判断是否跟踪
if (inliers_.size() < 40)
{
std::cerr << "Init WARNING: 40 inliers minimum required." << std::endl;
return FAILURE;
}
// 通过单应矩阵,对两帧之间的特征形成的3d点进行计算,计算这些3d的深度中值,转换到指定的scale
std::vector<double> depth_vec;
for (size_t i = 0; i < xyz_in_cur_.size(); ++i)
depth_vec.push_back((xyz_in_cur_[i]).z());
double scene_depth_median = getMedian(depth_vec);
double scale = 1.0 / scene_depth_median;
// 计算相对变换SE3
frame_cur->T_f_w_ = T_cur_from_ref_ * frame_ref_->T_f_w_;
// 对位移变换添加尺度
frame_cur->T_f_w_.translation() =
-frame_cur->T_f_w_.rotation_matrix()*(frame_ref_->pos() + scale*(frame_cur->pos() - frame_ref_->pos()));
// 对每个内点创建3D点,设置特征,添加到这两帧中
SE3 T_world_cur = frame_cur->T_f_w_.inverse();
for (std::vector<int>::iterator it = inliers_.begin(); it != inliers_.end(); ++it)
{
Vector2d px_cur(px_cur_[*it].x, px_cur_[*it].y);
Vector2d px_ref(px_ref_[*it].x, px_ref_[*it].y);
if (frame_ref_->cam_->isInFrame(px_cur.cast<int>(), 10) && frame_ref_->cam_->isInFrame(px_ref.cast<int>(), 10) && xyz_in_cur_[*it].z() > 0)
{
Vector3d pos = T_world_cur * (xyz_in_cur_[*it] * scale);// 将相机下的点坐标转世界坐标
Point3D *new_point = new Point3D(pos);
Feature* ftr_cur = new Feature(frame_cur.get(), new_point, px_cur, f_cur_[*it], 0);
frame_cur->addFeature(ftr_cur);
// 将同一个点对应的特征保存起来,这样点删除了,对应的特征都可以删除
new_point->addFrameRef(ftr_cur);
Feature* ftr_ref = new Feature(frame_ref_.get(), new_point, px_ref, f_ref_[*it], 0);
frame_ref_->addFeature(ftr_ref);
new_point->addFrameRef(ftr_ref);
}
}
return SUCCESS;
}
示例14: if
/**
* @brief
*
* @return
*/
int H264Encoder::run()
{
// TODO - This section needs to be rewritten to read the configuration from the values saved
// for the streams via the web gui
AVDictionary *opts = NULL;
//avSetH264Preset( &opts, "default" );
//avSetH264Profile( &opts, "main" );
//avDictSet( &opts, "level", "4.1" );
avSetH264Preset( &opts, "ultrafast" );
//avSetH264Profile( &opts, "baseline" );
avDictSet( &opts, "level", "31" );
avDictSet( &opts, "g", "24" );
//avDictSet( &opts, "b", (int)mBitRate );
//avDictSet( &opts, "bitrate", (int)mBitRate );
//avDictSet( &opts, "crf", "24" );
//avDictSet( &opts, "framerate", (double)mFrameRate );
//avDictSet( &opts, "fps", (double)mFrameRate );
//avDictSet( &opts, "r", (double)mFrameRate );
//avDictSet( &opts, "timebase", "1/90000" );
avDumpDict( opts );
// Make sure ffmpeg is compiled with libx264 support
AVCodec *codec = avcodec_find_encoder( CODEC_ID_H264 );
if ( !codec )
Fatal( "Can't find encoder codec" );
mCodecContext = avcodec_alloc_context3( codec );
mCodecContext->width = mWidth;
mCodecContext->height = mHeight;
//mCodecContext->time_base = TimeBase( 1, 90000 );
mCodecContext->time_base = mFrameRate.timeBase();
mCodecContext->bit_rate = mBitRate;
mCodecContext->pix_fmt = mPixelFormat;
mCodecContext->gop_size = 24;
//mCodecContext->max_b_frames = 1;
Debug( 2, "Time base = %d/%d", mCodecContext->time_base.num, mCodecContext->time_base.den );
Debug( 2, "Pix fmt = %d", mCodecContext->pix_fmt );
/* open it */
if ( avcodec_open2( mCodecContext, codec, &opts ) < 0 )
Fatal( "Unable to open encoder codec" );
avDumpDict( opts );
AVFrame *inputFrame = avcodec_alloc_frame();
Info( "%s:Waiting", cidentity() );
if ( waitForProviders() )
{
Info( "%s:Waited", cidentity() );
// Find the source codec context
uint16_t inputWidth = videoProvider()->width();
uint16_t inputHeight = videoProvider()->height();
PixelFormat inputPixelFormat = videoProvider()->pixelFormat();
//FrameRate inputFrameRate = videoProvider()->frameRate();
//Info( "CONVERT: %d-%dx%d -> %d-%dx%d",
//inputPixelFormat, inputWidth, inputHeight,
//mPixelFormat, mWidth, mHeight
//);
// Make space for anything that is going to be output
AVFrame *outputFrame = avcodec_alloc_frame();
ByteBuffer outputBuffer;
outputBuffer.size( avpicture_get_size( mCodecContext->pix_fmt, mCodecContext->width, mCodecContext->height ) );
avpicture_fill( (AVPicture *)outputFrame, outputBuffer.data(), mCodecContext->pix_fmt, mCodecContext->width, mCodecContext->height );
// Prepare for image format and size conversions
struct SwsContext *convertContext = sws_getContext( inputWidth, inputHeight, inputPixelFormat, mCodecContext->width, mCodecContext->height, mCodecContext->pix_fmt, SWS_BICUBIC, NULL, NULL, NULL );
if ( !convertContext )
Fatal( "Unable to create conversion context for encoder" );
int outSize = 0;
uint64_t timeInterval = mFrameRate.intervalUsec();
uint64_t currTime = time64();
uint64_t nextTime = currTime;
//outputFrame->pts = currTime;
outputFrame->pts = 0;
uint32_t ptsInterval = 90000/mFrameRate.toInt();
//uint32_t ptsInterval = mFrameRate.intervalPTS( mCodecContext->time_base );
while ( !mStop )
{
// Synchronise the output with the desired output frame rate
while ( currTime < nextTime )
{
currTime = time64();
usleep( 1000 );
}
nextTime += timeInterval;
FramePtr framePtr;
mQueueMutex.lock();
if ( !mFrameQueue.empty() )
//.........这里部分代码省略.........
示例15: addSecondFrame
InitResult KltHomographyInit::addSecondFrame(FramePtr frame_cur, Matrix3d orient, Vector3d pos)
{
trackKlt(frame_ref_, frame_cur, px_ref_, px_cur_, f_ref_, f_cur_, disparities_);
SVO_INFO_STREAM("Init: KLT tracked "<< disparities_.size() <<" features");
if(disparities_.size() < Config::initMinTracked())
return FAILURE;
double disparity = vk::getMedian(disparities_);
SVO_INFO_STREAM("Init: KLT "<<disparity<<"px average disparity.");
if(disparity < Config::initMinDisparity())
return NO_KEYFRAME;
computeHomography(
f_ref_, f_cur_,
frame_ref_->cam_->errorMultiplier2(), Config::poseOptimThresh(),
inliers_, xyz_in_cur_, T_cur_from_ref_);
SVO_INFO_STREAM("Init: Homography RANSAC "<<inliers_.size()<<" inliers.");
if(inliers_.size() < Config::initMinInliers())
{
SVO_WARN_STREAM("Init WARNING: "<<Config::initMinInliers()<<" inliers minimum required.");
return FAILURE;
}
// Transformation in real world
T_cur_frame_real_scale = SE3(orient, pos);
Vector3d trans = T_cur_frame_real_scale.translation() - T_first_frame_real_scale.translation();
double length_real = sqrt(pow(trans[0],2) + pow(trans[1],2) + pow(trans[2],2));
SVO_INFO_STREAM("Real world transform x: " << trans[0] << " y:" << trans[1] << " z:" << trans[2] << " length:" << length_real);
double x = T_cur_from_ref_.translation()[0];
double y = T_cur_from_ref_.translation()[1];
double z = T_cur_from_ref_.translation()[2];
double length_svo = sqrt(pow(x,2) + pow(y,2) + pow(z,2));
SVO_INFO_STREAM("SVO transform x: " << x << " y:" << y << " z:" << z << " length:" << length_svo);
#ifdef USE_ASE_IMU
// Rescale the map such that the real length of the movement matches with the svo movement length
double scale =length_real / length_svo;
#else
// Rescale the map such that the mean scene depth is equal to the specified scale
vector<double> depth_vec;
for(size_t i=0; i<xyz_in_cur_.size(); ++i)
depth_vec.push_back((xyz_in_cur_[i]).z());
double scene_depth_median = vk::getMedian(depth_vec);
double scale = Config::mapScale()/scene_depth_median;
#endif
frame_cur->T_f_w_ = T_cur_from_ref_ * frame_ref_->T_f_w_;
frame_cur->T_f_w_.translation() =
-frame_cur->T_f_w_.rotation_matrix()*(frame_ref_->pos() + scale*(frame_cur->pos() - frame_ref_->pos()));
//frame_cur->T_f_w_ = T_cur_frame_real_scale;
//frame_ref_->T_f_w_ = T_first_frame_real_scale;
// // Rescale the map such that the mean scene depth is equal to the specified scale
// vector<double> depth_vec;
// for(size_t i=0; i<xyz_in_cur_.size(); ++i)
// depth_vec.push_back((xyz_in_cur_[i]).z());
// double scene_depth_median = vk::getMedian(depth_vec);
// double scale = Config::mapScale()/scene_depth_median;
// frame_cur->T_f_w_ = T_cur_from_ref_ * frame_ref_->T_f_w_;
// frame_cur->T_f_w_.translation() =
// -frame_cur->T_f_w_.rotation_matrix()*(frame_ref_->pos() + scale*(frame_cur->pos() - frame_ref_->pos()));
// For each inlier create 3D point and add feature in both frames
SE3 T_world_cur = frame_cur->T_f_w_.inverse();
for(vector<int>::iterator it=inliers_.begin(); it!=inliers_.end(); ++it)
{
Vector2d px_cur(px_cur_[*it].x, px_cur_[*it].y);
Vector2d px_ref(px_ref_[*it].x, px_ref_[*it].y);
if(frame_ref_->cam_->isInFrame(px_cur.cast<int>(), 10) && frame_ref_->cam_->isInFrame(px_ref.cast<int>(), 10) && xyz_in_cur_[*it].z() > 0)
{
Vector3d pos = T_world_cur * (xyz_in_cur_[*it]*scale);
Point* new_point = new Point(pos);
Feature* ftr_cur(new Feature(frame_cur.get(), new_point, px_cur, f_cur_[*it], 0));
frame_cur->addFeature(ftr_cur);
new_point->addFrameRef(ftr_cur);
Feature* ftr_ref(new Feature(frame_ref_.get(), new_point, px_ref, f_ref_[*it], 0));
frame_ref_->addFeature(ftr_ref);
new_point->addFrameRef(ftr_ref);
}
}
return SUCCESS;
}