本文整理汇总了C++中FramePtr类的典型用法代码示例。如果您正苦于以下问题:C++ FramePtr类的具体用法?C++ FramePtr怎么用?C++ FramePtr使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了FramePtr类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: visualizeMarkers
void Visualizer::visualizeMarkers(
const FramePtr& frame,
const set<FramePtr>& core_kfs,
const Map& map)
{
if(frame == NULL)
return;
vk::output_helper::publishTfTransform(
frame->T_f_w_*T_world_from_vision_.inverse(),
ros::Time(frame->timestamp_), "cam_pos", "worldNED", br_);
if(pub_frames_.getNumSubscribers() > 0 || pub_points_.getNumSubscribers() > 0)
{
vk::output_helper::publishHexacopterMarker(
pub_frames_, "cam_pos", "cams", ros::Time(frame->timestamp_),
1, 0, 0.3*VIS_SCALE, Vector3d(0.,0.,1.));
vk::output_helper::publishPointMarker(
pub_points_, T_world_from_vision_*frame->pos(), "trajectory",
ros::Time::now(), trace_id_, 0, 0.006*VIS_SCALE, Vector3d(0.,0.,0.5));
if(frame->isKeyframe() || publish_map_every_frame_)
publishMapRegion(core_kfs);
removeDeletedPts(map);
}
}
示例2: sendFrame
/**
* @brief
*
* @param writeable
* @param frame
*
* @return
*/
bool HttpImageStream::sendFrame( Select::CommsList &writeable, FramePtr frame )
{
const ByteBuffer &packet = frame->buffer();
std::string txHeaders = "--imgboundary\r\n";
txHeaders += "Content-Type: image/jpeg\r\n";
txHeaders += stringtf( "Content-Length: %zd\r\n\r\n", packet.size() );
ByteBuffer txBuffer;
txBuffer.reserve( packet.size()+128 );
txBuffer.append( txHeaders.c_str(), txHeaders.size() );
txBuffer.append( packet.data(), packet.size() );
txBuffer.append( "\r\n", 2 );
for ( Select::CommsList::iterator iter = writeable.begin(); iter != writeable.end(); iter++ )
{
if ( TcpInetSocket *socket = dynamic_cast<TcpInetSocket *>(*iter) )
{
if ( socket == mConnection->socket() )
{
int nBytes = socket->write( txBuffer.data(), txBuffer.size() );
const FeedFrame *sourceFrame = frame->sourceFrame();
Debug( 4, "Wrote %d bytes on sd %d, frame %ju<-%ju", nBytes, socket->getWriteDesc(), frame->id(), sourceFrame->id() );
if ( nBytes != txBuffer.size() )
{
Error( "Incomplete write, %d bytes instead of %zd", nBytes, packet.size() );
mStop = true;
return( false );
}
}
}
}
return( true );
}
示例3: while
void DepthFilter::updateSeedsLoop()
{
while(!boost::this_thread::interruption_requested())
{
FramePtr frame;
{
lock_t lock(frame_queue_mut_);
while(frame_queue_.empty() && new_keyframe_set_ == false)
frame_queue_cond_.wait(lock);
if(new_keyframe_set_)
{
new_keyframe_set_ = false;
seeds_updating_halt_ = false;
clearFrameQueue();
frame = new_keyframe_;
}
else
{
frame = frame_queue_.front();
frame_queue_.pop();
}
}
updateSeeds(frame);
if(frame->isKeyframe())
initializeSeeds(frame);
}
}
示例4: locker
void RenderThread::UpdateLayout()
{
QMutexLocker locker(&m_MutexLayout);
// Find frames without layout info and delete them
QMutableListIterator<FramePtr> i(m_RenderFrames);
while (i.hasNext())
{
FramePtr renderFrame = i.next();
if (!renderFrame)
{
i.remove();
continue;
}
int j = m_ViewIDs.indexOf(renderFrame->Info(VIEW_ID).toUInt());
if (j == -1)
{
m_Renderer->Deallocate(renderFrame);
i.remove();
}else
{
QRect srcRect = m_SrcRects.at(j);
float scaleX = renderFrame->Info(RENDER_SRC_SCALE_X).toFloat();
float scaleY = renderFrame->Info(RENDER_SRC_SCALE_Y).toFloat();
srcRect.setLeft((int)(srcRect.left()*scaleX));
srcRect.setRight((int)(srcRect.right()*scaleX));
srcRect.setTop((int)(srcRect.top()*scaleY));
srcRect.setBottom((int)(srcRect.bottom()*scaleY));
renderFrame->SetInfo(SRC_RECT, srcRect);
renderFrame->SetInfo(DST_RECT, m_DstRects.at(j));
}
}
}
示例5: addSecondFrame
InitResult KltHomographyInit::addSecondFrame(FramePtr frame_cur)
{
trackKlt(frame_ref_, frame_cur, px_ref_, px_cur_, f_ref_, f_cur_, disparities_);
SVO_INFO_STREAM("Init: KLT tracked "<< disparities_.size() <<" features");
if(disparities_.size() < Config::initMinTracked())
return FAILURE;
double disparity = vk::getMedian(disparities_);
SVO_INFO_STREAM("Init: KLT "<<disparity<<"px average disparity.");
if(disparity < Config::initMinDisparity())
return NO_KEYFRAME;
computeHomography(
f_ref_, f_cur_,
frame_ref_->cam_->errorMultiplier2(), Config::poseOptimThresh(),
inliers_, xyz_in_cur_, T_cur_from_ref_);
SVO_INFO_STREAM("Init: Homography RANSAC "<<inliers_.size()<<" inliers.");
if(inliers_.size() < Config::initMinInliers())
{
SVO_WARN_STREAM("Init WARNING: "<<Config::initMinInliers()<<" inliers minimum required.");
return FAILURE;
}
// Rescale the map such that the mean scene depth is equal to the specified scale
vector<double> depth_vec;
for(size_t i=0; i<xyz_in_cur_.size(); ++i)
depth_vec.push_back((xyz_in_cur_[i]).z());
double scene_depth_median = vk::getMedian(depth_vec);
double scale = Config::mapScale()/scene_depth_median;
frame_cur->T_f_w_ = T_cur_from_ref_ * frame_ref_->T_f_w_;
frame_cur->T_f_w_.translation() =
-frame_cur->T_f_w_.rotation_matrix()*(frame_ref_->pos() + scale*(frame_cur->pos() - frame_ref_->pos()));
// For each inlier create 3D point and add feature in both frames
SE3 T_world_cur = frame_cur->T_f_w_.inverse();
for(vector<int>::iterator it=inliers_.begin(); it!=inliers_.end(); ++it)
{
Vector2d px_cur(px_cur_[*it].x, px_cur_[*it].y);
Vector2d px_ref(px_ref_[*it].x, px_ref_[*it].y);
if(frame_ref_->cam_->isInFrame(px_cur.cast<int>(), 10) && frame_ref_->cam_->isInFrame(px_ref.cast<int>(), 10) && xyz_in_cur_[*it].z() > 0)
{
Vector3d pos = T_world_cur * (xyz_in_cur_[*it]*scale);
Point* new_point = new Point(pos);
Feature* ftr_cur(new Feature(frame_cur.get(), new_point, px_cur, f_cur_[*it], 0));
frame_cur->addFeature(ftr_cur);
new_point->addFrameRef(ftr_cur);
Feature* ftr_ref(new Feature(frame_ref_.get(), new_point, px_ref, f_ref_[*it], 0));
frame_ref_->addFeature(ftr_ref);
new_point->addFrameRef(ftr_ref);
}
}
return SUCCESS;
}
示例6: _filter
bool SmoothFilter::_filter(const FramePtr &in, FramePtr &out)
{
ToFRawFrame *tofFrame = dynamic_cast<ToFRawFrame *>(in.get());
DepthFrame *depthFrame = dynamic_cast<DepthFrame *>(in.get());
if((!tofFrame && !depthFrame) || !_prepareOutput(in, out))
{
logger(LOG_ERROR) << "IIRFilter: Input frame type is not ToFRawFrame or DepthFrame or failed get the output ready" << std::endl;
return false;
}
if(tofFrame)
{
_size = tofFrame->size;
ToFRawFrame *o = dynamic_cast<ToFRawFrame *>(out.get());
if(!o)
{
logger(LOG_ERROR) << "IIRFilter: Invalid frame type. Expecting ToFRawFrame." << std::endl;
return false;
}
//logger(LOG_INFO) << "IIRFilter: Applying filter with gain = " << _gain << " to ToFRawFrame id = " << tofFrame->id << std::endl;
uint s = _size.width*_size.height;
memcpy(o->ambient(), tofFrame->ambient(), s*tofFrame->ambientWordWidth());
memcpy(o->amplitude(), tofFrame->amplitude(), s*tofFrame->amplitudeWordWidth());
memcpy(o->flags(), tofFrame->flags(), s*tofFrame->flagsWordWidth());
if(tofFrame->phaseWordWidth() == 2)
return _filter<uint16_t>((uint16_t *)tofFrame->phase(), (uint16_t *)o->phase());
else if(tofFrame->phaseWordWidth() == 1)
return _filter<uint8_t>((uint8_t *)tofFrame->phase(), (uint8_t *)o->phase());
else if(tofFrame->phaseWordWidth() == 4)
return _filter<uint32_t>((uint32_t *)tofFrame->phase(), (uint32_t *)o->phase());
else
return false;
}
else if(depthFrame)
{
_size = depthFrame->size;
DepthFrame *o = dynamic_cast<DepthFrame *>(out.get());
if(!o)
{
logger(LOG_ERROR) << "IIRFilter: Invalid frame type. Expecting DepthFrame." << std::endl;
return false;
}
o->amplitude = depthFrame->amplitude;
return _filter<float>(depthFrame->depth.data(), o->depth.data());
}
else
return false;
}
示例7: apply
void EffectMosaic::apply(FramePtr &src)
{
for(int i=0; i < src->getHeight(); i+=mMosaicSize) {
for(int j=0; j < src->getWidth(); j+=mMosaicSize) {
auto pixel = AverageColor(src.get(), i, j);
SetColor(src.get(), j, i, pixel);
}
}
}
示例8: frame
FramePtr StaticFrame::frame() const
{
FramePtr p = boost::make_shared<Frame>();
p->geometry = geometry;
p->geometry.scale(scale);
for (unsigned int i=0; i < data.size(); ++i) {
p->addPoint(data[i].point());
}
return p;
}
示例9: reprojectCell
bool Reprojector::reprojectCell(Cell& cell, FramePtr frame)
{
cell.sort(boost::bind(&Reprojector::pointQualityComparator, _1, _2));
Cell::iterator it=cell.begin();
while(it!=cell.end())
{
++n_trials_;
if(it->pt->type_ == Point::TYPE_DELETED)
{
it = cell.erase(it);
continue;
}
bool found_match = true;
if(options_.find_match_direct)
found_match = matcher_.findMatchDirect(*it->pt, *frame, it->px);
if(!found_match)
{
it->pt->n_failed_reproj_++;
if(it->pt->type_ == Point::TYPE_UNKNOWN && it->pt->n_failed_reproj_ > 15)
map_.safeDeletePoint(it->pt);
if(it->pt->type_ == Point::TYPE_CANDIDATE && it->pt->n_failed_reproj_ > 30)
map_.point_candidates_.deleteCandidatePoint(it->pt);
it = cell.erase(it);
continue;
}
it->pt->n_succeeded_reproj_++;
if(it->pt->type_ == Point::TYPE_UNKNOWN && it->pt->n_succeeded_reproj_ > 10)
it->pt->type_ = Point::TYPE_GOOD;
Feature* new_feature = new Feature(frame.get(), it->px, matcher_.search_level_);
frame->addFeature(new_feature);
// Here we add a reference in the feature to the 3D point, the other way
// round is only done if this frame is selected as keyframe.
new_feature->point = it->pt;
if(matcher_.ref_ftr_->type == Feature::EDGELET)
{
new_feature->type = Feature::EDGELET;
new_feature->grad = matcher_.A_cur_ref_*matcher_.ref_ftr_->grad;
new_feature->grad.normalize();
}
// If the keyframe is selected and we reproject the rest, we don't have to
// check this point anymore.
it = cell.erase(it);
// Maximum one point per cell.
return true;
}
return false;
}
示例10: Initialize
bool ColorProducer::Initialize(FrameManagerPtr pFrameManager) {
if(pFrameManager != 0) {
FramePtr pFrame = pFrameManager->CreateFrame();
if(pFrame != 0) {
memset_d(reinterpret_cast<unsigned long*>(pFrame->GetDataPtr()), colorValue_, pFrame->GetDataSize() / sizeof(unsigned long));
frameBuffer_.push_back(pFrame);
return true;
}
}
return false;
}
示例11: switch
void AbstractStream::dataLoop(AbstractStream *stream)
{
switch (stream->mediaType()) {
case AVMEDIA_TYPE_VIDEO:
case AVMEDIA_TYPE_AUDIO:
while (stream->m_runDataLoop) {
stream->m_dataMutex.lock();
if (stream->m_frames.isEmpty())
stream->m_dataQueueNotEmpty.wait(&stream->m_dataMutex,
THREAD_WAIT_LIMIT);
if (!stream->m_frames.isEmpty()) {
FramePtr frame = stream->m_frames.dequeue();
stream->processData(frame.data());
if (stream->m_frames.size() < stream->m_maxData)
stream->m_dataQueueNotFull.wakeAll();
}
stream->m_dataMutex.unlock();
}
break;
case AVMEDIA_TYPE_SUBTITLE:
while (stream->m_runDataLoop) {
stream->m_dataMutex.lock();
if (stream->m_subtitles.isEmpty())
stream->m_dataQueueNotEmpty.wait(&stream->m_dataMutex,
THREAD_WAIT_LIMIT);
if (!stream->m_subtitles.isEmpty()) {
SubtitlePtr subtitle = stream->m_subtitles.dequeue();
stream->processData(subtitle.data());
if (stream->m_subtitles.size() < stream->m_maxData)
stream->m_dataQueueNotFull.wakeAll();
}
stream->m_dataMutex.unlock();
}
break;
default:
break;
}
}
示例12: addFrame
int BufferSrcFilterContext::addFrame(const FramePtr &frame)
{
assert(isValid());
assert(frame);
return av_buffersrc_add_frame(getAVFilterContext(), frame->getAVFrame());
}
示例13: initializeSeeds
void DepthFilter::initializeSeeds(FramePtr frame)
{
list<PointFeat*> new_pt_features;
list<LineFeat*> new_seg_features;
if(Config::hasPoints())
{
/* detect new point features in point-unpopulated cells of the grid */
// populate the occupancy grid of the detector with current features
pt_feature_detector_->setExistingFeatures(frame->pt_fts_);
// detect features to fill the free cells in the image
pt_feature_detector_->detect(frame.get(), frame->img_pyr_,
Config::triangMinCornerScore(), new_pt_features);
}
if(Config::hasLines())
{
/* detect new segment features in line-unpopulated cells of the grid */
// populate the occupancy grid of the detector with current features
seg_feature_detector_->setExistingFeatures(frame->seg_fts_);
// detect features
seg_feature_detector_->detect(frame.get(), frame->img_pyr_,
Config::lsdMinLength(), new_seg_features);
}
// lock the parallel updating thread?
seeds_updating_halt_ = true;
lock_t lock(seeds_mut_); // by locking the updateSeeds function stops
// increase by one the keyframe counter (to account for this new one)
++PointSeed::batch_counter;
// initialize a point seed for every new point feature
std::for_each(new_pt_features.begin(), new_pt_features.end(), [&](PointFeat* ftr){
pt_seeds_.push_back(PointSeed(ftr, new_keyframe_mean_depth_, new_keyframe_min_depth_));
});
// initialize a segment seed for every new segment feature
std::for_each(new_seg_features.begin(), new_seg_features.end(), [&](LineFeat* ftr){
seg_seeds_.push_back(LineSeed(ftr, new_keyframe_mean_depth_, new_keyframe_min_depth_));
});
if(options_.verbose)
SVO_INFO_STREAM("DepthFilter: Initialized "<<new_pt_features.size()<<" new point seeds and "
<<new_seg_features.size()<<" line segment seeds.");
seeds_updating_halt_ = false;
}
示例14: decodeFrame
bool decodeFrame(StreamFrameMap& streamFrames)
{
bool done = false;
while(!done)
{
// demux
PacketPtr packet = demuxPacket();
if(packet == 0){
FlogE("demuxing failed");
return 0;
}
// decode
decodePacket(packet, streamFrames);
// check if any frames finished
for(auto pair : streamFrames){
FramePtr frame = pair.second;
if(frame->finished != 0){
// set timestamp and break out of loop
int64_t pts = av_frame_get_best_effort_timestamp(frame->GetAvFrame());
if(pair.first == videoStream){
if(firstDts == AV_NOPTS_VALUE){
firstDts = frame->GetAvFrame()->pkt_dts;
FlogD("setting firstDts to: " << firstDts);
}
if(firstPts == AV_NOPTS_VALUE){
firstPts = pts;
FlogD("setting firstPts to: " << firstPts);
}
}
frame->SetPts(pts);
done = true;
}
}
}
// successfully decoded frame, reset retry counter
ResetRetries();
return true;
}
示例15: p
const QIcon * StaticFrameGui::icon()
{
QPixmap p(48,48);
if (fp) {
FramePtr f = fp->frame();
if (f) {
QPainter q(&p);
q.setBackground (Qt::black);
q.setRenderHint(QPainter::HighQualityAntialiasing);
q.setBrush(QBrush(Qt::black));
q.drawRect(0,0,48,48);
f->render (q,0,0,48,48);
}
}
QIcon * i = new QIcon(p);
return i;
}