本文整理汇总了C++中SensorData::cameraModels方法的典型用法代码示例。如果您正苦于以下问题:C++ SensorData::cameraModels方法的具体用法?C++ SensorData::cameraModels怎么用?C++ SensorData::cameraModels使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类SensorData
的用法示例。
在下文中一共展示了SensorData::cameraModels方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: addData
void OdometryThread::addData(const SensorData & data)
{
if(dynamic_cast<OdometryMono*>(_odometry) == 0 && dynamic_cast<OdometryF2M*>(_odometry) == 0)
{
if(data.imageRaw().empty() || data.depthOrRightRaw().empty() || (data.cameraModels().size()==0 && !data.stereoCameraModel().isValidForProjection()))
{
ULOGGER_ERROR("Missing some information (images empty or missing calibration)!?");
return;
}
}
else
{
// Mono and BOW can accept RGB only
if(data.imageRaw().empty() || (data.cameraModels().size()==0 && !data.stereoCameraModel().isValidForProjection()))
{
ULOGGER_ERROR("Missing some information (image empty or missing calibration)!?");
return;
}
}
bool notify = true;
_dataMutex.lock();
{
_dataBuffer.push_back(data);
while(_dataBufferMaxSize > 0 && _dataBuffer.size() > _dataBufferMaxSize)
{
UDEBUG("Data buffer is full, the oldest data is removed to add the new one.");
_dataBuffer.pop_front();
notify = false;
}
}
_dataMutex.unlock();
if(notify)
{
_dataAdded.release();
}
}
示例2: mainLoop
void CameraThread::mainLoop()
{
UTimer timer;
UDEBUG("");
SensorData data = _camera->takeImage();
if(!data.imageRaw().empty())
{
if(_colorOnly && !data.depthRaw().empty())
{
data.setDepthOrRightRaw(cv::Mat());
}
if(_mirroring && data.cameraModels().size() == 1)
{
cv::Mat tmpRgb;
cv::flip(data.imageRaw(), tmpRgb, 1);
data.setImageRaw(tmpRgb);
if(data.cameraModels()[0].cx())
{
CameraModel tmpModel(
data.cameraModels()[0].fx(),
data.cameraModels()[0].fy(),
float(data.imageRaw().cols) - data.cameraModels()[0].cx(),
data.cameraModels()[0].cy(),
data.cameraModels()[0].localTransform());
data.setCameraModel(tmpModel);
}
if(!data.depthRaw().empty())
{
cv::Mat tmpDepth;
cv::flip(data.depthRaw(), tmpDepth, 1);
data.setDepthOrRightRaw(tmpDepth);
}
}
if(_stereoToDepth && data.stereoCameraModel().isValid() && !data.rightRaw().empty())
{
cv::Mat depth = util2d::depthFromDisparity(
util2d::disparityFromStereoImages(data.imageRaw(), data.rightRaw()),
data.stereoCameraModel().left().fx(),
data.stereoCameraModel().baseline());
data.setCameraModel(data.stereoCameraModel().left());
data.setDepthOrRightRaw(depth);
data.setStereoCameraModel(StereoCameraModel());
}
this->post(new CameraEvent(data, _camera->getSerial()));
}
else if(!this->isKilled())
{
UWARN("no more images...");
this->kill();
this->post(new CameraEvent());
}
}
示例3: computeTransform
// return not null transform if odometry is correctly computed
Transform OdometryDVO::computeTransform(
SensorData & data,
const Transform & guess,
OdometryInfo * info)
{
Transform t;
#ifdef RTABMAP_DVO
UTimer timer;
if(data.imageRaw().empty() ||
data.imageRaw().rows != data.depthOrRightRaw().rows ||
data.imageRaw().cols != data.depthOrRightRaw().cols)
{
UERROR("Not supported input!");
return t;
}
if(!(data.cameraModels().size() == 1 && data.cameraModels()[0].isValidForReprojection()))
{
UERROR("Invalid camera model! Only single RGB-D camera supported by DVO. Try another odometry approach.");
return t;
}
if(dvo_ == 0)
{
dvo::DenseTracker::Config cfg = dvo::DenseTracker::getDefaultConfig();
dvo_ = new dvo::DenseTracker(cfg);
}
cv::Mat grey, grey_s16, depth_inpainted, depth_mask, depth_mono, depth_float;
if(data.imageRaw().type() != CV_32FC1)
{
if(data.imageRaw().type() == CV_8UC3)
{
cv::cvtColor(data.imageRaw(), grey, CV_BGR2GRAY);
}
else
{
grey = data.imageRaw();
}
grey.convertTo(grey_s16, CV_32F);
}
else
{
grey_s16 = data.imageRaw();
}
// make sure all zeros are NAN
if(data.depthRaw().type() == CV_32FC1)
{
depth_float = data.depthRaw();
for(int i=0; i<depth_float.rows; ++i)
{
for(int j=0; j<depth_float.cols; ++j)
{
float & d = depth_float.at<float>(i,j);
if(d == 0.0f)
{
d = NAN;
}
}
}
}
else if(data.depthRaw().type() == CV_16UC1)
{
depth_float = cv::Mat(data.depthRaw().size(), CV_32FC1);
for(int i=0; i<data.depthRaw().rows; ++i)
{
for(int j=0; j<data.depthRaw().cols; ++j)
{
float d = float(data.depthRaw().at<unsigned short>(i,j))/1000.0f;
depth_float.at<float>(i, j) = d==0.0f?NAN:d;
}
}
}
else
{
UFATAL("Unknown depth format!");
}
if(camera_ == 0)
{
dvo::core::IntrinsicMatrix intrinsics = dvo::core::IntrinsicMatrix::create(
data.cameraModels()[0].fx(),
data.cameraModels()[0].fy(),
data.cameraModels()[0].cx(),
data.cameraModels()[0].cy());
camera_ = new dvo::core::RgbdCameraPyramid(
data.cameraModels()[0].imageWidth(),
data.cameraModels()[0].imageHeight(),
intrinsics);
}
dvo::core::RgbdImagePyramid * current = new dvo::core::RgbdImagePyramid(*camera_, grey_s16, depth_float);
cv::Mat covariance;
//.........这里部分代码省略.........
示例4: process
Transform Odometry::process(SensorData & data, OdometryInfo * info)
{
if(_pose.isNull())
{
_pose.setIdentity(); // initialized
}
UASSERT(!data.imageRaw().empty());
if(!data.stereoCameraModel().isValidForProjection() &&
(data.cameraModels().size() == 0 || !data.cameraModels()[0].isValidForProjection()))
{
UERROR("Rectified images required! Calibrate your camera.");
return Transform();
}
double dt = previousStamp_>0.0f?data.stamp() - previousStamp_:0.0;
Transform guess = dt && guessFromMotion_?Transform::getIdentity():Transform();
UASSERT_MSG(dt>0.0 || (dt == 0.0 && previousVelocityTransform_.isNull()), uFormat("dt=%f previous transform=%s", dt, previousVelocityTransform_.prettyPrint().c_str()).c_str());
if(!previousVelocityTransform_.isNull())
{
if(guessFromMotion_)
{
if(_filteringStrategy == 1)
{
// use Kalman predict transform
float vx,vy,vz, vroll,vpitch,vyaw;
predictKalmanFilter(dt, &vx,&vy,&vz,&vroll,&vpitch,&vyaw);
guess = Transform(vx*dt, vy*dt, vz*dt, vroll*dt, vpitch*dt, vyaw*dt);
}
else
{
float vx,vy,vz, vroll,vpitch,vyaw;
previousVelocityTransform_.getTranslationAndEulerAngles(vx,vy,vz, vroll,vpitch,vyaw);
guess = Transform(vx*dt, vy*dt, vz*dt, vroll*dt, vpitch*dt, vyaw*dt);
}
}
else if(_filteringStrategy == 1)
{
predictKalmanFilter(dt);
}
}
UTimer time;
Transform t;
if(_imageDecimation > 1)
{
// Decimation of images with calibrations
SensorData decimatedData = data;
decimatedData.setImageRaw(util2d::decimate(decimatedData.imageRaw(), _imageDecimation));
decimatedData.setDepthOrRightRaw(util2d::decimate(decimatedData.depthOrRightRaw(), _imageDecimation));
std::vector<CameraModel> cameraModels = decimatedData.cameraModels();
for(unsigned int i=0; i<cameraModels.size(); ++i)
{
cameraModels[i] = cameraModels[i].scaled(1.0/double(_imageDecimation));
}
decimatedData.setCameraModels(cameraModels);
StereoCameraModel stereoModel = decimatedData.stereoCameraModel();
if(stereoModel.isValidForProjection())
{
stereoModel.scale(1.0/double(_imageDecimation));
}
decimatedData.setStereoCameraModel(stereoModel);
// compute transform
t = this->computeTransform(decimatedData, guess, info);
// transform back the keypoints in the original image
std::vector<cv::KeyPoint> kpts = decimatedData.keypoints();
double log2value = log(double(_imageDecimation))/log(2.0);
for(unsigned int i=0; i<kpts.size(); ++i)
{
kpts[i].pt.x *= _imageDecimation;
kpts[i].pt.y *= _imageDecimation;
kpts[i].size *= _imageDecimation;
kpts[i].octave += log2value;
}
data.setFeatures(kpts, decimatedData.descriptors());
if(info)
{
UASSERT(info->newCorners.size() == info->refCorners.size());
for(unsigned int i=0; i<info->newCorners.size(); ++i)
{
info->refCorners[i].x *= _imageDecimation;
info->refCorners[i].y *= _imageDecimation;
info->newCorners[i].x *= _imageDecimation;
info->newCorners[i].y *= _imageDecimation;
}
for(std::multimap<int, cv::KeyPoint>::iterator iter=info->words.begin(); iter!=info->words.end(); ++iter)
{
iter->second.pt.x *= _imageDecimation;
iter->second.pt.y *= _imageDecimation;
iter->second.size *= _imageDecimation;
iter->second.octave += log2value;
}
}
}
else
{
//.........这里部分代码省略.........
示例5: newFrame
// return not null transform if odometry is correctly computed
Transform OdometryF2F::computeTransform(
SensorData & data,
const Transform & guess,
OdometryInfo * info)
{
UTimer timer;
Transform output;
if(!data.rightRaw().empty() && !data.stereoCameraModel().isValidForProjection())
{
UERROR("Calibrated stereo camera required");
return output;
}
if(!data.depthRaw().empty() &&
(data.cameraModels().size() != 1 || !data.cameraModels()[0].isValidForProjection()))
{
UERROR("Calibrated camera required (multi-cameras not supported).");
return output;
}
RegistrationInfo regInfo;
UASSERT(!this->getPose().isNull());
if(lastKeyFramePose_.isNull())
{
lastKeyFramePose_ = this->getPose(); // reset to current pose
}
Transform motionSinceLastKeyFrame = lastKeyFramePose_.inverse()*this->getPose();
Signature newFrame(data);
if(refFrame_.sensorData().isValid())
{
Signature tmpRefFrame = refFrame_;
output = registrationPipeline_->computeTransformationMod(
tmpRefFrame,
newFrame,
!guess.isNull()?motionSinceLastKeyFrame*guess:Transform(),
®Info);
if(info && this->isInfoDataFilled())
{
std::list<std::pair<int, std::pair<cv::KeyPoint, cv::KeyPoint> > > pairs;
EpipolarGeometry::findPairsUnique(tmpRefFrame.getWords(), newFrame.getWords(), pairs);
info->refCorners.resize(pairs.size());
info->newCorners.resize(pairs.size());
std::map<int, int> idToIndex;
int i=0;
for(std::list<std::pair<int, std::pair<cv::KeyPoint, cv::KeyPoint> > >::iterator iter=pairs.begin();
iter!=pairs.end();
++iter)
{
info->refCorners[i] = iter->second.first.pt;
info->newCorners[i] = iter->second.second.pt;
idToIndex.insert(std::make_pair(iter->first, i));
++i;
}
info->cornerInliers.resize(regInfo.inliersIDs.size(), 1);
i=0;
for(; i<(int)regInfo.inliersIDs.size(); ++i)
{
info->cornerInliers[i] = idToIndex.at(regInfo.inliersIDs[i]);
}
Transform t = this->getPose()*motionSinceLastKeyFrame.inverse();
for(std::multimap<int, cv::Point3f>::const_iterator iter=tmpRefFrame.getWords3().begin(); iter!=tmpRefFrame.getWords3().end(); ++iter)
{
info->localMap.insert(std::make_pair(iter->first, util3d::transformPoint(iter->second, t)));
}
info->words = newFrame.getWords();
}
}
else
{
//return Identity
output = Transform::getIdentity();
// a very high variance tells that the new pose is not linked with the previous one
regInfo.variance = 9999;
}
if(!output.isNull())
{
output = motionSinceLastKeyFrame.inverse() * output;
// new key-frame?
if( (registrationPipeline_->isImageRequired() && (keyFrameThr_ == 0 || float(regInfo.inliers) <= keyFrameThr_*float(refFrame_.sensorData().keypoints().size()))) ||
(registrationPipeline_->isScanRequired() && (scanKeyFrameThr_ == 0 || regInfo.icpInliersRatio <= scanKeyFrameThr_)))
{
UDEBUG("Update key frame");
int features = newFrame.getWordsDescriptors().size();
if(features == 0)
{
newFrame = Signature(data);
// this will generate features only for the first frame or if optical flow was used (no 3d words)
Signature dummy;
registrationPipeline_->computeTransformationMod(
newFrame,
dummy);
features = (int)newFrame.sensorData().keypoints().size();
}
//.........这里部分代码省略.........
示例6: mainLoop
void CameraThread::mainLoop()
{
UDEBUG("");
CameraInfo info;
SensorData data = _camera->takeImage(&info);
if(!data.imageRaw().empty())
{
if(_colorOnly && !data.depthRaw().empty())
{
data.setDepthOrRightRaw(cv::Mat());
}
if(_imageDecimation>1 && !data.imageRaw().empty())
{
UDEBUG("");
UTimer timer;
if(!data.depthRaw().empty() &&
!(data.depthRaw().rows % _imageDecimation == 0 && data.depthRaw().cols % _imageDecimation == 0))
{
UERROR("Decimation of depth images should be exact (decimation=%d, size=(%d,%d))! "
"Images won't be resized.", _imageDecimation, data.depthRaw().cols, data.depthRaw().rows);
}
else
{
data.setImageRaw(util2d::decimate(data.imageRaw(), _imageDecimation));
data.setDepthOrRightRaw(util2d::decimate(data.depthOrRightRaw(), _imageDecimation));
std::vector<CameraModel> models = data.cameraModels();
for(unsigned int i=0; i<models.size(); ++i)
{
if(models[i].isValidForProjection())
{
models[i] = models[i].scaled(1.0/double(_imageDecimation));
}
}
data.setCameraModels(models);
StereoCameraModel stereoModel = data.stereoCameraModel();
if(stereoModel.isValidForProjection())
{
stereoModel.scale(1.0/double(_imageDecimation));
data.setStereoCameraModel(stereoModel);
}
}
info.timeImageDecimation = timer.ticks();
}
if(_mirroring && data.cameraModels().size() == 1)
{
UDEBUG("");
UTimer timer;
cv::Mat tmpRgb;
cv::flip(data.imageRaw(), tmpRgb, 1);
data.setImageRaw(tmpRgb);
UASSERT_MSG(data.cameraModels().size() <= 1 && !data.stereoCameraModel().isValidForProjection(), "Only single RGBD cameras are supported for mirroring.");
if(data.cameraModels().size() && data.cameraModels()[0].cx())
{
CameraModel tmpModel(
data.cameraModels()[0].fx(),
data.cameraModels()[0].fy(),
float(data.imageRaw().cols) - data.cameraModels()[0].cx(),
data.cameraModels()[0].cy(),
data.cameraModels()[0].localTransform());
data.setCameraModel(tmpModel);
}
if(!data.depthRaw().empty())
{
cv::Mat tmpDepth;
cv::flip(data.depthRaw(), tmpDepth, 1);
data.setDepthOrRightRaw(tmpDepth);
}
info.timeMirroring = timer.ticks();
}
if(_stereoToDepth && data.stereoCameraModel().isValidForProjection() && !data.rightRaw().empty())
{
UDEBUG("");
UTimer timer;
cv::Mat depth = util2d::depthFromDisparity(
_stereoDense->computeDisparity(data.imageRaw(), data.rightRaw()),
data.stereoCameraModel().left().fx(),
data.stereoCameraModel().baseline());
data.setCameraModel(data.stereoCameraModel().left());
data.setDepthOrRightRaw(depth);
data.setStereoCameraModel(StereoCameraModel());
info.timeDisparity = timer.ticks();
UDEBUG("Computing disparity = %f s", info.timeDisparity);
}
if(_scanFromDepth &&
data.cameraModels().size() &&
data.cameraModels().at(0).isValidForProjection() &&
!data.depthRaw().empty())
{
UDEBUG("");
if(data.laserScanRaw().empty())
{
UASSERT(_scanDecimation >= 1);
UTimer timer;
pcl::IndicesPtr validIndices(new std::vector<int>);
pcl::PointCloud<pcl::PointXYZ>::Ptr cloud = util3d::cloudFromSensorData(data, _scanDecimation, _scanMaxDepth, _scanMinDepth, validIndices.get());
float maxPoints = (data.depthRaw().rows/_scanDecimation)*(data.depthRaw().cols/_scanDecimation);
if(_scanVoxelSize>0.0f)
{
cloud = util3d::voxelize(cloud, validIndices, _scanVoxelSize);
//.........这里部分代码省略.........