本文整理汇总了C++中eigen::Transform::setIdentity方法的典型用法代码示例。如果您正苦于以下问题:C++ Transform::setIdentity方法的具体用法?C++ Transform::setIdentity怎么用?C++ Transform::setIdentity使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类eigen::Transform
的用法示例。
在下文中一共展示了Transform::setIdentity方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: t
void X3DConverter::startShape(const std::array<float, 12>& matrix) {
// Finding axis/angle from matrix using Eigen for its bullet proof implementation.
Eigen::Transform<float, 3, Eigen::Affine> t;
t.setIdentity();
for (unsigned int i = 0; i < 3; i++) {
for (unsigned int j = 0; j < 4; j++) {
t(i, j) = matrix[i+j*3];
}
}
Eigen::Matrix3f rotationMatrix;
Eigen::Matrix3f scaleMatrix;
t.computeRotationScaling(&rotationMatrix, &scaleMatrix);
Eigen::Quaternionf q;
Eigen::AngleAxisf aa;
q = rotationMatrix;
aa = q;
Eigen::Vector3f scale = scaleMatrix.diagonal();
Eigen::Vector3f translation = t.translation();
startNode(ID::Transform);
m_writers.back()->setSFVec3f(ID::translation, translation.x(), translation.y() , translation.z());
m_writers.back()->setSFRotation(ID::rotation, aa.axis().x(), aa.axis().y(), aa.axis().z(), aa.angle());
m_writers.back()->setSFVec3f(ID::scale, scale.x(), scale.y(), scale.z());
startNode(ID::Shape);
startNode(ID::Appearance);
startNode(ID::Material);
m_writers.back()->setSFColor<vector<float> >(ID::diffuseColor, RVMColorHelper::color(m_materials.back()));
endNode(ID::Material); // Material
endNode(ID::Appearance); // Appearance
}
示例2:
operator Eigen::Transform<double, 3, Eigen::Affine, _Options>() const
{
Eigen::Transform<double, 3, Eigen::Affine, _Options> ret;
ret.setIdentity();
ret.rotate(this->orientation);
ret.translation() = this->position;
return ret;
}
示例3: positioned_patch
void
PointCloudProjector::synchronized_callback(const sensor_msgs::PointCloud2ConstPtr& points_msg,
const samplereturn_msgs::PatchArrayConstPtr& patches_msg)
{
// create patch output message
samplereturn_msgs::PatchArray positioned_patches;
positioned_patches.header = patches_msg->header;
positioned_patches.cam_info = patches_msg->cam_info;
// create marker array debug message
visualization_msgs::MarkerArray vis_markers;
// create camera model object
image_geometry::PinholeCameraModel model;
model.fromCameraInfo(patches_msg->cam_info);
// ensure tf is ready
if(!listener_.canTransform(clipping_frame_id_, patches_msg->header.frame_id,
patches_msg->header.stamp))
{
patches_out.publish( positioned_patches );
return;
}
// get camera origin in clipping frame
tf::StampedTransform camera;
listener_.lookupTransform(clipping_frame_id_, patches_msg->header.frame_id,
patches_msg->header.stamp, camera);
Eigen::Vector3d camera_origin;
tf::vectorTFToEigen(camera.getOrigin(), camera_origin);
// scale and transform pointcloud into clipping frame
pcl::PointCloud<pcl::PointXYZRGB>::Ptr colorpoints(new pcl::PointCloud<pcl::PointXYZRGB>),
points_native(new pcl::PointCloud<pcl::PointXYZRGB>),
points_scaled(new pcl::PointCloud<pcl::PointXYZRGB>);
pcl::fromROSMsg(*points_msg, *points_native);
// this scale is a horible hack to fix the manipulator point clouds
Eigen::Transform<float, 3, Eigen::Affine> trans;
trans.setIdentity();
trans.scale(config_.pointcloud_scale);
pcl::transformPointCloud(*points_native, *points_scaled, trans);
pcl_ros::transformPointCloud(clipping_frame_id_, *points_scaled, *colorpoints, listener_);
// id counter for debug markers
int mid = 0;
for(const auto& patch : patches_msg->patch_array)
{
samplereturn_msgs::Patch positioned_patch(patch);
cv_bridge::CvImagePtr cv_ptr_mask;
try {
cv_ptr_mask = cv_bridge::toCvCopy(patch.mask, "mono8");
}
catch (cv_bridge::Exception& e) {
ROS_ERROR("cv_bridge mask exception: %s", e.what());
continue;
}
cv::Point2f roi_offset(patch.image_roi.x_offset, patch.image_roi.x_offset);
Eigen::Vector4d ground_plane;
// assume ground plane at z=0, in base_link xy plane for manipulators
ground_plane << 0,0,1,0;
float dimension, angle;
tf::Stamped<tf::Point> world_point;
if(samplereturn::computeMaskPositionAndSize(listener_,
cv_ptr_mask->image, roi_offset,
model, patches_msg->header.stamp, patches_msg->header.frame_id,
ground_plane, "base_link",
&dimension, &angle, &world_point,
NULL))
{
// if sample size is outside bounds, skip this patch
if ((dimension > config_.max_major_axis) ||
(dimension < config_.min_major_axis)) {
continue;
}
}
// find bounding box of mask
cv::Rect rect;
samplereturn::computeBoundingBox(cv_ptr_mask->image, &rect);
// turn image space bounding box into 4 3d rays
cv::Point2d patch_origin(patch.image_roi.x_offset,
patch.image_roi.y_offset);
std::vector<cv::Point2d> rpoints;
rpoints.push_back(cv::Point2d(rect.x, rect.y) +
patch_origin);
rpoints.push_back(cv::Point2d(rect.x, rect.y+rect.height) +
patch_origin);
rpoints.push_back(cv::Point2d(rect.x+rect.width, rect.y+rect.height) +
patch_origin);
rpoints.push_back(cv::Point2d(rect.x+rect.width, rect.y) +
patch_origin);
std::vector<Eigen::Vector3d> rays;
rays.resize(rpoints.size());
std::transform(rpoints.begin(), rpoints.end(), rays.begin(),
[model, patches_msg, this](cv::Point2d uv) -> Eigen::Vector3d
{
cv::Point3d xyz = model.projectPixelTo3dRay(uv);
//.........这里部分代码省略.........