本文整理汇总了C++中tf::StampedTransform::setIdentity方法的典型用法代码示例。如果您正苦于以下问题:C++ StampedTransform::setIdentity方法的具体用法?C++ StampedTransform::setIdentity怎么用?C++ StampedTransform::setIdentity使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tf::StampedTransform
的用法示例。
在下文中一共展示了StampedTransform::setIdentity方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: Localization
inline Localization(vector<ObstacleC> *obstacles) :
location(-1.42, 0), locationKalman(location), kalmanI(
locationKalman), _cameraProjections(
NULL), lastOdomX(0), lastOdomY(0), globalPos(0, 0, 0), lastOdom(0,
0, 0), lastOdomID(0), ballPos(0, 0), m_field(
field_model::FieldModel::getInstance()), obstacles(
obstacles), CurrentVertexId(0), PreviousVertexId(0), LandmarkCount(
0), odomLastGet(0, 0), atLeastOneObservation(false), nodeCounter(
0)
{
odom_sub = nodeHandle.subscribe("/gait/odometry", 10,
&Localization::dead_reckoning_callback, this);
setLoc_sub = nodeHandle.subscribe<geometry_msgs::Point>(
"/vision/setLocation", 1, &Localization::setloc_callback, this);
A = m_field->length();
B = m_field->width();
E = m_field->goalAreaDepth();
F = m_field->goalAreaWidth();
G = m_field->penaltyMarkerDist();
H = m_field->centerCircleDiameter();
D = m_field->goalWidth();
I = m_field->boundary();
params.ball->radius.set(m_field->ballDiameter() / 2.);
A2 = A / 2.;
B2 = B / 2.;
H2 = H / 2.;
A2 = A / 2.;
B2 = B / 2.;
E2 = E / 2.;
F2 = F / 2.;
G2 = G / 2.;
H2 = H / 2.;
D2 = D / 2.;
I2 = I / 2.;
// TF transforms
tfLocField.frame_id_ = "/ego_floor";
tfLocField.child_frame_id_ = "/loc_field";
tfLocField.setIdentity();
lastSavedNodeTime = ros::Time::now();
// create the linear solver
linearSolver = new LinearSolverCSparse<BlockSolverX::PoseMatrixType>();
// create the block solver on the top of the linear solver
blockSolver = new BlockSolverX(linearSolver);
//create the algorithm to carry out the optimization
optimizationAlgorithm = new OptimizationAlgorithmLevenberg(blockSolver);
}
示例2: cam_info_callback
// wait for one camerainfo, then shut down that subscriber
void cam_info_callback(const sensor_msgs::CameraInfo &msg)
{
camParam = aruco_ros::rosCameraInfo2ArucoCamParams(msg, useRectifiedImages);
// handle cartesian offset between stereo pairs
// see the sensor_msgs/CamaraInfo documentation for details
rightToLeft.setIdentity();
rightToLeft.setOrigin(
tf::Vector3(
-msg.P[3]/msg.P[0],
-msg.P[7]/msg.P[5],
0.0));
cam_info_received = true;
cam_info_sub.shutdown();
}
示例3: processWithLock
TyErrorId processWithLock(CAS &tcas, ResultSpecification const &res_spec)
{
MEASURE_TIME;
outInfo("process begins");
rs::SceneCas cas(tcas);
rs::Scene scene = cas.getScene();
cas.get(VIEW_CLOUD, *cloud);
cas.get(VIEW_COLOR_IMAGE, color);
cas.get(VIEW_DEPTH_IMAGE, depth);
cas.get(VIEW_CAMERA_INFO, cameraInfo);
indices->clear();
indices->reserve(cloud->points.size());
camToWorld.setIdentity();
if(scene.viewPoint.has())
{
rs::conversion::from(scene.viewPoint.get(), camToWorld);
}
else
{
outWarn("No camera to world transformation, no further processing!");
throw rs::FrameFilterException();
}
worldToCam = tf::StampedTransform(camToWorld.inverse(), camToWorld.stamp_, camToWorld.child_frame_id_, camToWorld.frame_id_);
computeFrustum();
//default place to look for objects is counter tops except if we got queried for some different place
//message comes from desigantor and is not the same as the entries from the semantic map so we need
//to transform them
rs::Query qs = rs::create<rs::Query>(tcas);
std::vector<std::string> regionsToLookAt;
regionsToLookAt.assign(defaultRegions.begin(),defaultRegions.end());
regions.clear();
if(cas.getFS("QUERY", qs))
{
outWarn("loaction set in query: " << qs.location());
if(std::find(defaultRegions.begin(), defaultRegions.end(), qs.location()) == std::end(defaultRegions) && qs.location()!="")
{
regionsToLookAt.clear();
regionsToLookAt.push_back(qs.location());
}
}
if(regions.empty())
{
std::vector<rs::SemanticMapObject> semanticRegions;
getSemanticMapEntries(cas, regionsToLookAt, semanticRegions);
regions.resize(semanticRegions.size());
for(size_t i = 0; i < semanticRegions.size(); ++i)
{
Region ®ion = regions[i];
region.width = semanticRegions[i].width();
region.depth = semanticRegions[i].depth();
region.height = semanticRegions[i].height();
region.name = semanticRegions[i].name();
rs::conversion::from(semanticRegions[i].transform(), region.transform);
}
}
for(size_t i = 0; i < regions.size(); ++i)
{
if(frustumCulling(regions[i]) || !frustumCulling_)
{
outInfo("region inside frustum: " << regions[i].name);
filterRegion(regions[i]);
}
else
{
outInfo("region outside frustum: " << regions[i].name);
}
}
pcl::ExtractIndices<PointT> ei;
ei.setKeepOrganized(true);
ei.setIndices(indices);
ei.filterDirectly(cloud);
cas.set(VIEW_CLOUD, *cloud);
if(changeDetection && !indices->empty())
{
++frames;
if(lastImg.empty())
{
lastMask = cv::Mat::ones(color.rows, color.cols, CV_8U);
lastImg = cv::Mat::zeros(color.rows, color.cols, CV_32FC4);
}
uint32_t secondsPassed = camToWorld.stamp_.sec - lastTime.sec;
bool change = checkChange() || cas.has("QUERY") || secondsPassed > timeout;
if(!change)
{
++filtered;
}
else
//.........这里部分代码省略.........