本文整理汇总了C++中eigen::Affine3f::linear方法的典型用法代码示例。如果您正苦于以下问题:C++ Affine3f::linear方法的具体用法?C++ Affine3f::linear怎么用?C++ Affine3f::linear使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类eigen::Affine3f
的用法示例。
在下文中一共展示了Affine3f::linear方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: svd
void CoSegmentation::point2point(Matrix3X & srCloud,Matrix3X & tgCloud,Matrix33 & rotMat,MatrixXX & transVec)
{
Vector3Type X_mean, Y_mean;
for(int i=0; i<3; ++i) //计算两点云的均值
{
X_mean(i) = (ScalarType)srCloud.row(i).sum()/srCloud.cols();
Y_mean(i) = (ScalarType)tgCloud.row(i).sum()/tgCloud.cols();
}
srCloud.colwise() -= X_mean;
tgCloud.colwise() -= Y_mean;
/// Compute transformation
Eigen::Affine3f transformation;
Eigen::Matrix3f sigma = (srCloud * tgCloud.transpose()).cast<float>();
Eigen::JacobiSVD<Eigen::Matrix3f> svd(sigma, Eigen::ComputeFullU | Eigen::ComputeFullV);
if(svd.matrixU().determinant()*svd.matrixV().determinant() < 0.0)//contains reflection
{
Vector3Type S = Vector3Type::Ones(); S(2) = -1.0;
transformation.linear().noalias() = svd.matrixV()*S.asDiagonal()*svd.matrixU().transpose();
} else
{
transformation.linear().noalias() = svd.matrixV()*svd.matrixU().transpose();//计算旋转矩阵
}
transVec = Y_mean -( transformation.linear().cast<ScalarType>()*X_mean);
rotMat = transformation.linear().cast<ScalarType>() ;
srCloud.colwise() += X_mean;
tgCloud.colwise() += Y_mean;
}
示例2: getViewerPose
Eigen::Affine3f getViewerPose (visualization::PCLVisualizer& viewer)
{
Eigen::Affine3f pose = viewer.getViewerPose();
Eigen::Matrix3f rotation = pose.linear();
Matrix3f axis_reorder;
axis_reorder << 0, 0, 1,
-1, 0, 0,
0, -1, 0;
rotation = rotation * axis_reorder;
pose.linear() = rotation;
return pose;
}
示例3: writePose
void
ScreenshotManager::saveImage(const Eigen::Affine3f &camPose, PtrStepSz<const PixelRGB> rgb24)
{
PCL_INFO ("[o] [o] [o] [o] Saving screenshot [o] [o] [o] [o]\n");
std::string file_extension_image = ".png";
std::string file_extension_pose = ".txt";
std::string filename_image = "KinFuSnapshots/";
std::string filename_pose = "KinFuSnapshots/";
// Get Pose
Eigen::Matrix<float, 3, 3, Eigen::RowMajor> erreMats = camPose.linear ();
Eigen::Vector3f teVecs = camPose.translation ();
// Create filenames
filename_pose = filename_pose + boost::lexical_cast<std::string> (screenshot_counter) + file_extension_pose;
filename_image = filename_image + boost::lexical_cast<std::string> (screenshot_counter) + file_extension_image;
// Write files
writePose (filename_pose, teVecs, erreMats);
// Save Image
pcl::io::saveRgbPNGFile (filename_image, (unsigned char*)rgb24.data, 640,480);
screenshot_counter++;
}
示例4: getForcedTfFrame
bool CommandSubscriber::getForcedTfFrame(Eigen::Affine3f & result) const
{
tf::StampedTransform transform;
try
{
m_tf_listener.lookupTransform(m_forced_tf_frame_reference,m_forced_tf_frame_name,ros::Time(0),transform);
}
catch (tf::TransformException ex)
{
ROS_ERROR("kinfu: hint was forced by TF, but retrieval failed because: %s",ex.what());
return false;
}
Eigen::Vector3f vec;
vec.x() = transform.getOrigin().x();
vec.y() = transform.getOrigin().y();
vec.z() = transform.getOrigin().z();
Eigen::Quaternionf quat;
quat.x() = transform.getRotation().x();
quat.y() = transform.getRotation().y();
quat.z() = transform.getRotation().z();
quat.w() = transform.getRotation().w();
result.linear() = Eigen::AngleAxisf(quat).matrix();
result.translation() = vec;
return true;
}
示例5: return
Eigen::Affine3f
pcl::gpu::kinfuLS::KinfuTracker::getLastEstimatedPose () const
{
Eigen::Affine3f aff;
aff.linear () = last_estimated_rotation_;
aff.translation () = last_estimated_translation_;
return (aff);
}
示例6: setWorldFromCam
void FakeKinect::setWorldFromCam(const Eigen::Affine3f &worldFromCam) {
Vector3f eye, center, up;
Matrix3f rot;
rot = worldFromCam.linear();
eye = worldFromCam.translation();
center = rot.col(2);
up = -rot.col(1);
m_cam->setViewMatrixAsLookAt(toOSGVector(eye), toOSGVector(center+eye), toOSGVector(up));
}
示例7: applyTransform
bool applyTransform(std::vector<Eigen::Vector3f, Eigen::aligned_allocator<Eigen::Vector3f> > &points, std::vector<Eigen::Vector3f, Eigen::aligned_allocator<Eigen::Vector3f> > &normals, const Eigen::Affine3f &t)
{
const Eigen::Matrix3f normalMatrix = t.linear().inverse().transpose();
for (size_t i = 0; i < points.size(); ++i) {
points[i] = t * points[i];
normals[i] = (normalMatrix * normals[i]).normalized();
}
return true;
}
示例8: return
Eigen::Affine3f
pcl::gpu::KinfuTracker::getCameraPose (int time) const
{
if (time > (int)rmats_.size () || time < 0)
time = rmats_.size () - 1;
Eigen::Affine3f aff;
aff.linear () = rmats_[time];
aff.translation () = tvecs_[time];
return (aff);
}
示例9: svd
inline void
RGBID_SLAM::CameraView::drawMotionUncertainty(const Eigen::Matrix<float,6,6> cov_mat, const Eigen::Affine3f& pose, double r, double g, double b)
{
//boost::mutex::scoped_lock lock(display_mutex_);
Eigen::Matrix3f A = cov_mat.block<3,3>(0,0);
Eigen::JacobiSVD<Eigen::Matrix3f> svd(A,Eigen::ComputeThinU);
Eigen::Vector3f singval_A = svd.singularValues();
Eigen::Matrix3f singvec_A = svd.matrixU();
Eigen::Matrix3f singval_A_mat = Eigen::Matrix3f::Identity();
singval_A_mat(0,0) = singval_A(0);
singval_A_mat(1,1) = singval_A(1);
singval_A_mat(2,2) = singval_A(2);
Eigen::Affine3f trafo;
Eigen::Matrix3f trafomat = 10000.*singvec_A*singval_A_mat.cwiseSqrt();
trafo.linear() = trafomat;
//std::cout << trafo.linear() << std::endl;
//std::cout << trafo.translation() << std::endl;
pcl::PointXYZ p0x, p1x, p0y, p1y, p0z, p1z;
p0x.x = -1.; p0x.y = 0.; p0x.z = 0.;
p1x.x = 1.; p1x.y = 0.; p1x.z = 0.;
p0y.x = 0.; p0y.y = -1.; p0y.z = 0.;
p1y.x = 0.; p1y.y = 1.; p1y.z = 0.;
p0z.x = 0.; p0z.y = 0.; p0z.z = -1.;
p1z.x = 0.; p1z.y = 0.; p1z.z = 1.;
p0x = pcl::transformPoint (p0x, trafo);
p1x = pcl::transformPoint (p1x, trafo);
p0y = pcl::transformPoint (p0y, trafo);
p1y = pcl::transformPoint (p1y, trafo);
p0z = pcl::transformPoint (p0z, trafo);
p1z = pcl::transformPoint (p1z, trafo);
p0x = pcl::transformPoint (p0x, pose);
p1x = pcl::transformPoint (p1x, pose);
p0y = pcl::transformPoint (p0y, pose);
p1y = pcl::transformPoint (p1y, pose);
p0z = pcl::transformPoint (p0z, pose);
p1z = pcl::transformPoint (p1z, pose);
std::stringstream ss;
ss.str ("");
ss << name_ << "_uncline1";
cloud_viewer_.addLine (p0x, p1x, r, g, b, ss.str ());
ss.str ("");
ss << name_ << "_uncline2";
cloud_viewer_.addLine (p0y, p1y, r, g, b, ss.str ());
ss.str ("");
ss << name_ << "_uncline3";
cloud_viewer_.addLine (p0z, p1z, r, g, b, ss.str ());
}
示例10: viewerGL
void CGLUtil::viewerGL()
{
glMatrixMode(GL_MODELVIEW);
// load the matrix to set camera pose
glLoadMatrixf(_eimModelViewGL.data());
//rotation
Eigen::Matrix3f eimRotation;
if( btl::utility::BTL_GL == _eConvention ){
eimRotation = Eigen::AngleAxisf(float(_dXAngle*M_PI/180.f), Eigen::Vector3f::UnitY())* Eigen::AngleAxisf(float(_dYAngle*M_PI/180.f), Eigen::Vector3f::UnitX()); // 3. rotate horizontally
}//mouse x-movement is the rotation around y-axis
else if( btl::utility::BTL_CV == _eConvention ) {
eimRotation = Eigen::AngleAxisf(float(_dXAngle*M_PI/180.f), -Eigen::Vector3f::UnitY())* Eigen::AngleAxisf(float(_dYAngle*M_PI/180.f), Eigen::Vector3f::UnitX()); // 3. rotate horizontally
}
//translation
/*_dZoom = _dZoom < 0.1? 0.1: _dZoom;
_dZoom = _dZoom > 10? 10: _dZoom;*/
//get direction N pointing from camera center to the object centroid
Eigen::Affine3f M; M = _eimModelViewGL;
Eigen::Vector3f T = M.translation();
Eigen::Matrix3f R = M.linear();
Eigen::Vector3f C = -R.transpose()*T;//camera centre
Eigen::Vector3f N = _eivCentroid - C;//from camera centre to object centroid
N = N/N.norm();//normalization
Eigen::Affine3f _eimManipulate; _eimManipulate.setIdentity();
_eimManipulate.translate( N*float(_dZoom) );//(N*(1-_dZoom)); //use camera movement toward object for zoom in/out effects
_eimManipulate.translate(_eivCentroid); // 5. translate back to the original camera pose
//_eimManipulate.scale(s); // 4. zoom in/out, never use scale to simulate camera movement, it affects z-buffer capturing. use translation instead
_eimManipulate.rotate(eimRotation); // 2. rotate vertically // 3. rotate horizontally
_eimManipulate.translate(-_eivCentroid); // 1. translate the camera center to align with object centroid*/
glMultMatrixf(_eimManipulate.data());
/*
lTranslated( _aCentroid[0], _aCentroid[1], _aCentroid[2] ); // 5. translate back to the original camera pose
_dZoom = _dZoom < 0.1? 0.1: _dZoom;
_dZoom = _dZoom > 10? 10: _dZoom;
glScaled( _dZoom, _dZoom, _dZoom ); // 4. zoom in/out,
if( btl::utility::BTL_GL == _eConvention )
glRotated ( _dXAngle, 0, 1 ,0 ); // 3. rotate horizontally
else if( btl::utility::BTL_CV == _eConvention ) //mouse x-movement is the rotation around y-axis
glRotated ( _dXAngle, 0,-1 ,0 );
glRotated ( _dYAngle, 1, 0 ,0 ); // 2. rotate vertically
glTranslated(-_aCentroid[0],-_aCentroid[1],-_aCentroid[2] ); // 1. translate the camera center to align with object centroid
*/
// light position in 3d
glLightfv(GL_LIGHT0, GL_POSITION, _aLight);
}
示例11: l
Eigen::Affine3f WorldDownloadManager::toEigenAffine(T1 linear,T2 translation)
{
Eigen::Matrix3f l;
Eigen::Vector3f t;
for (uint r = 0; r < 3; r++)
for (uint c = 0; c < 3; c++)
l(r,c) = linear[r * 3 + c];
for (uint r = 0; r < 3; r++)
t[r] = translation[r];
Eigen::Affine3f result;
result.linear() = l;
result.translation() = t;
return result;
}
示例12: q
geometry_msgs::Pose transformEigenAffine3fToPose(Eigen::Affine3f e) {
Eigen::Vector3f Oe;
Eigen::Matrix3f Re;
geometry_msgs::Pose pose;
Oe = e.translation();
Re = e.linear();
Eigen::Quaternionf q(Re); // convert rotation matrix Re to a quaternion, q
pose.position.x = Oe(0);
pose.position.y = Oe(1);
pose.position.z = Oe(2);
pose.orientation.x = q.x();
pose.orientation.y = q.y();
pose.orientation.z = q.z();
pose.orientation.w = q.w();
return pose;
}
示例13: printPose
void
printPose( Eigen::Affine3f const& pose )
{
// debug
std::cout << pose.linear() << std::endl <<
pose.translation().transpose() << std::endl;
float alpha = atan2( pose.linear()(1,0), pose.linear()(0,0) );
float beta = atan2( -pose.linear()(2,0),
sqrt( pose.linear()(2,1) * pose.linear()(2,1) +
pose.linear()(2,2) * pose.linear()(2,2) )
);
float gamma = atan2( pose.linear()(2,1), pose.linear()(2,2) );
std::cout << "alpha: " << alpha << " " << alpha * 180.f / M_PI << std::endl;
std::cout << "beta: " << beta << " " << beta * 180.f / M_PI << std::endl;
std::cout << "gamma: " << gamma << " " << gamma * 180.f / M_PI << std::endl;
}
示例14: main
int main(int argc, char** argv) {
ros::init(argc, argv, "object_finder_node"); // name this node
ROS_INFO("instantiating the object finder action server: ");
ObjectFinder object_finder_as; // create an instance of the class "ObjectFinder"
tf::TransformListener tfListener;
ROS_INFO("listening for kinect-to-base transform:");
tf::StampedTransform stf_kinect_wrt_base;
bool tferr = true;
ROS_INFO("waiting for tf between kinect_pc_frame and base_link...");
while (tferr) {
tferr = false;
try {
//The direction of the transform returned will be from the target_frame to the source_frame.
//Which if applied to data, will transform data in the source_frame into the target_frame.
//See tf/CoordinateFrameConventions#Transform_Direction
tfListener.lookupTransform("base_link", "kinect_pc_frame", ros::Time(0), stf_kinect_wrt_base);
} catch (tf::TransformException &exception) {
ROS_WARN("%s; retrying...", exception.what());
tferr = true;
ros::Duration(0.5).sleep(); // sleep for half a second
ros::spinOnce();
}
}
ROS_INFO("kinect to base_link tf is good");
object_finder_as.xformUtils_.printStampedTf(stf_kinect_wrt_base);
tf::Transform tf_kinect_wrt_base = object_finder_as.xformUtils_.get_tf_from_stamped_tf(stf_kinect_wrt_base);
g_affine_kinect_wrt_base = object_finder_as.xformUtils_.transformTFToAffine3f(tf_kinect_wrt_base);
cout << "affine rotation: " << endl;
cout << g_affine_kinect_wrt_base.linear() << endl;
cout << "affine offset: " << g_affine_kinect_wrt_base.translation().transpose() << endl;
ROS_INFO("going into spin");
// from here, all the work is done in the action server, with the interesting stuff done within "executeCB()"
while (ros::ok()) {
ros::spinOnce(); //normally, can simply do: ros::spin();
ros::Duration(0.1).sleep();
}
return 0;
}
示例15:
void CGLUtil::getRTFromWorld2CamCV(Eigen::Matrix3f* pRw_, Eigen::Vector3f* pTw_) {
//only the matrix in the top of the modelview matrix stack works
Eigen::Affine3f M;
glGetFloatv(GL_MODELVIEW_MATRIX,M.matrix().data());
Eigen::Matrix3f S;
*pTw_ = M.translation();
*pRw_ = M.linear();
//M.computeRotationScaling(pRw_,&S);
//*pTw_ = (*pTw_)/S(0,0);
//negate row no. 1 and 2, to switch from GL to CV convention
for (int r = 1; r < 3; r++){
for (int c = 0; c < 3; c++){
(*pRw_)(r,c) = -(*pRw_)(r,c);
}
(*pTw_)(r) = -(*pTw_)(r);
}
//PRINT(S);
//PRINT(*pRw_);
//PRINT(*pTw_);
return;
}