本文整理汇总了C++中FramePtr::img方法的典型用法代码示例。如果您正苦于以下问题:C++ FramePtr::img方法的具体用法?C++ FramePtr::img怎么用?C++ FramePtr::img使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类FramePtr
的用法示例。
在下文中一共展示了FramePtr::img方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: detectFeatures
void detectFeatures(
FramePtr frame,
vector<cv::Point2f>& px_vec,
vector<Vector3d>& f_vec)
{
Features new_features;
feature_detection::FastDetector detector(
frame->img().cols, frame->img().rows, Config::gridSize(), Config::nPyrLevels());
detector.detect(frame.get(), frame->img_pyr_, Config::triangMinCornerScore(), new_features);
// now for all maximum corners, initialize a new seed
px_vec.clear(); px_vec.reserve(new_features.size());
f_vec.clear(); f_vec.reserve(new_features.size());
std::for_each(new_features.begin(), new_features.end(), [&](Feature* ftr){
px_vec.push_back(cv::Point2f(ftr->px[0], ftr->px[1]));
f_vec.push_back(ftr->f);
delete ftr;
});
}
示例2: detectFeatures
/// 检测fast角度,输出的是对应的点和点的方向向量(可以考虑为点的反投影坐标)
void Initialization::detectFeatures(
FramePtr frame,
std::vector<cv::Point2f>& px_vec,
std::vector<Vector3d>& f_vec)
{
Features new_features;
FastDetector detector(
frame->img().cols, frame->img().rows, 25, 3);
detector.detect(frame.get(), frame->img_pyr_, 20.0, new_features);
// 返回特征位置和特征的单位向量
px_vec.clear(); px_vec.reserve(new_features.size());
f_vec.clear(); f_vec.reserve(new_features.size());
std::for_each(new_features.begin(), new_features.end(), [&](Feature* ftr){
px_vec.push_back(cv::Point2f(ftr->px[0], ftr->px[1]));
f_vec.push_back(ftr->f);
delete ftr;
});
}
示例3: exportToDense
void Visualizer::exportToDense(const FramePtr& frame)
{
// publish air_ground_msgs
if(frame != NULL && dense_pub_nth_ > 0
&& trace_id_%dense_pub_nth_ == 0 && pub_dense_.getNumSubscribers() > 0)
{
svo_msgs::DenseInput msg;
msg.header.stamp = ros::Time(frame->timestamp_);
msg.header.frame_id = "/worldNED";
msg.frame_id = frame->id_;
cv_bridge::CvImage img_msg;
img_msg.header.stamp = msg.header.stamp;
img_msg.header.frame_id = "camera";
img_msg.image = frame->img();
img_msg.encoding = sensor_msgs::image_encodings::MONO8;
msg.image = *img_msg.toImageMsg();
double min_z = std::numeric_limits<double>::max();
double max_z = std::numeric_limits<double>::min();
for(Features::iterator it=frame->fts_.begin(); it!=frame->fts_.end(); ++it)
{
if((*it)->point==NULL)
continue;
Vector3d pos = frame->T_f_w_*(*it)->point->pos_;
min_z = fmin(pos[2], min_z);
max_z = fmax(pos[2], max_z);
}
msg.min_depth = (float) min_z;
msg.max_depth = (float) max_z;
// publish cam in world frame
SE3 T_world_from_cam(T_world_from_vision_*frame->T_f_w_.inverse());
Quaterniond q(T_world_from_cam.rotation_matrix());
Vector3d p(T_world_from_cam.translation());
msg.pose.position.x = p[0];
msg.pose.position.y = p[1];
msg.pose.position.z = p[2];
msg.pose.orientation.w = q.w();
msg.pose.orientation.x = q.x();
msg.pose.orientation.y = q.y();
msg.pose.orientation.z = q.z();
pub_dense_.publish(msg);
}
}