本文整理汇总了C++中vpHomogeneousMatrix类的典型用法代码示例。如果您正苦于以下问题:C++ vpHomogeneousMatrix类的具体用法?C++ vpHomogeneousMatrix怎么用?C++ vpHomogeneousMatrix使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了vpHomogeneousMatrix类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: computePose
void computePose(std::vector<vpPoint> &point, const std::vector<vpImagePoint> &corners,
const vpCameraParameters &cam, bool init, vpHomogeneousMatrix &cMo)
{
vpPose pose;
double x=0, y=0;
for (unsigned int i=0; i < point.size(); i ++) {
vpPixelMeterConversion::convertPoint(cam, corners[i], x, y);
point[i].set_x(x);
point[i].set_y(y);
pose.addPoint(point[i]);
}
if (init == true) pose.computePose(vpPose::DEMENTHON_VIRTUAL_VS, cMo);
else pose.computePose(vpPose::VIRTUAL_VS, cMo) ;
vpTranslationVector t;
cMo.extract(t);
vpRotationMatrix R;
cMo.extract(R);
vpThetaUVector tu(R);
std::cout << "cMo: ";
for (unsigned int i=0; i < 3; i++)
std::cout << t[i] << " ";
for (unsigned int i=0; i < 3; i++)
std::cout << vpMath::deg(tu[i]) << " ";
std::cout << std::endl;
}
示例2: buildFrom
/*!
Build a 6 dimension pose vector \f$ [\bf t, \Theta \bf u]^\top\f$ from
an homogeneous matrix \f$ \bf M \f$.
\param M : Homogeneous matrix \f$ \bf M \f$ from which translation \f$
\bf t \f$ and \f$\Theta \bf u \f$ vectors are extracted to initialize
the pose vector.
\return The build pose vector.
*/
vpPoseVector
vpPoseVector::buildFrom(const vpHomogeneousMatrix& M)
{
vpRotationMatrix R ; M.extract(R) ;
vpTranslationVector tv ; M.extract(tv) ;
buildFrom(tv,R) ;
return *this ;
}
示例3: buildFrom
/*!
Initialize a velocity twist transformation matrix from an homogeneous matrix
\f$M\f$ with \f[ {\bf M} = \left[\begin{array}{cc} {\bf R} & {\bf t}
\\ {\bf 0}_{1\times 3} & 1 \end{array} \right] \f]
\param M : Homogeneous matrix \f$M\f$ used to initialize the twist
transformation matrix.
*/
vpVelocityTwistMatrix
vpVelocityTwistMatrix::buildFrom(const vpHomogeneousMatrix &M)
{
vpTranslationVector tv ;
vpRotationMatrix R ;
M.extract(R) ;
M.extract(tv) ;
buildFrom(tv, R) ;
return (*this) ;
}
示例4: setPosition
/*!
Set the position and the orientation of a SceneNode.
\param name : Name of the SceneNode to rotate.
\param wMo : The homogeneous matrix representing the rotation and
translation to apply.
*/
void vpAROgre::setPosition(const std::string &name,
const vpHomogeneousMatrix &wMo)
{
// Extract the position and orientation data
vpRotationMatrix rotations;
vpTranslationVector translation;
wMo.extract(rotations);
wMo.extract(translation);
// Apply them to the node
setPosition(name, translation);
setRotation(name, rotations);
}
示例5: printPose
void printPose(const std::string &text, const vpHomogeneousMatrix &cMo)
{
vpTranslationVector t;
cMo.extract(t);
vpRotationMatrix R;
cMo.extract(R);
vpThetaUVector tu(R);
std::cout << text;
for (unsigned int i=0; i < 3; i++)
std::cout << t[i] << " ";
for (unsigned int i=0; i < 3; i++)
std::cout << vpMath::deg(tu[i]) << " ";
std::cout << std::endl;
}
示例6:
void
vpProjectionDisplay::displayCamera(vpImage<unsigned char> &I,
const vpHomogeneousMatrix &cextMo,
const vpHomogeneousMatrix &cMo,
const vpCameraParameters &cam)
{
vpHomogeneousMatrix c1Mc ;
c1Mc = cextMo*cMo.inverse() ;
o.track(c1Mc) ;
x.track(c1Mc) ;
y.track(c1Mc) ;
z.track(c1Mc) ;
vpImagePoint ipo;
vpImagePoint ipx;
vpMeterPixelConversion::convertPoint(cam, o.p[0], o.p[1], ipo) ;
vpMeterPixelConversion::convertPoint(cam, x.p[0], x.p[1], ipx) ;
vpDisplay::displayArrow(I, ipo, ipx, vpColor::green) ;
vpMeterPixelConversion::convertPoint(cam, y.p[0], y.p[1], ipx) ;
vpDisplay::displayArrow(I, ipo, ipx, vpColor::blue) ;
vpMeterPixelConversion::convertPoint(cam, z.p[0], z.p[1], ipx) ;
vpDisplay::displayArrow(I, ipo, ipx, vpColor::red) ;
}
示例7: catch
int HRP2ComputeControlLawProcess::loadcMh(vpHomogeneousMatrix& cMh)
{
//TODO : this file name shouldn't be written here
ifstream file;
string name="./data/ViSP/hrp2CamParam/cMh.3";
file.open (name.c_str());
try
{
cMh.load(file);
}
catch(vpException a) // file doesn't exist
{
cout << "---- Open Exception Default---------------"<<endl;
cout<<"----Wide camera extrinsic parameters ---- " <<endl;
vpCTRACE << endl <<a;
cout << "------------------ " <<endl;
// fill a defaut matrix
cMh[0][0]=0.016937505; cMh[0][1]=-0.9997821632; cMh[0][2]= -0.01217327283; cMh[0][3]=-0.05195444004;
cMh[1][0]=-0.1698427574; cMh[1][1]=0.009121740254; cMh[1][2]= -0.9854286431; cMh[1][3]= 0.1036017168 ;
cMh[2][0]=0.9853256992 ; cMh[2][1]=0.0187590039; cMh[2][2]= -0.1696511946; cMh[2][3]=-0.04729590458 ;
cMh[3][0]=0; cMh[3][1]=0;cMh[3][2]= 0; cMh[3][3]=1 ;
return -1;
}
file.close();
return 0;
}
示例8:
void
vpProjectionDisplay::displayCamera(vpImage<unsigned char> &I,
const vpHomogeneousMatrix &cextMo,
const vpHomogeneousMatrix &cMo,
const vpCameraParameters &cam,
const unsigned int thickness)
{
vpHomogeneousMatrix c1Mc ;
c1Mc = cextMo*cMo.inverse() ;
o.track(c1Mc) ;
if(o.get_Z() < 0) // do not print camera if behind the external camera
return;
x.track(c1Mc) ;
y.track(c1Mc) ;
z.track(c1Mc) ;
vpImagePoint ipo;
vpImagePoint ipx;
vpMeterPixelConversion::convertPoint(cam, o.p[0], o.p[1], ipo) ;
vpMeterPixelConversion::convertPoint(cam, x.p[0], x.p[1], ipx) ;
vpDisplay::displayArrow(I, ipo, ipx, vpColor::green, 4+thickness, 2+thickness, thickness) ;
vpMeterPixelConversion::convertPoint(cam, y.p[0], y.p[1], ipx) ;
vpDisplay::displayArrow(I, ipo, ipx, vpColor::blue, 4+thickness, 2+thickness, thickness) ;
vpMeterPixelConversion::convertPoint(cam, z.p[0], z.p[1], ipx) ;
vpDisplay::displayArrow(I, ipo, ipx, vpColor::red, 4+thickness, 2+thickness, thickness) ;
}
示例9: compute_pose
/*!
Compute the pose \e cMo from the 3D coordinates of the points \e point and
their corresponding 2D coordinates \e dot. The pose is computed using a Lowe
non linear method.
\param point : 3D coordinates of the points.
\param dot : 2D coordinates of the points.
\param ndot : Number of points or dots used for the pose estimation.
\param cam : Intrinsic camera parameters.
\param cMo : Homogeneous matrix in output describing the transformation
between the camera and object frame.
\param cto : Translation in ouput extracted from \e cMo.
\param cro : Rotation in ouput extracted from \e cMo.
\param init : Indicates if the we have to estimate an initial pose with
Lagrange or Dementhon methods.
*/
void compute_pose(vpPoint point[], vpDot2 dot[], int ndot,
vpCameraParameters cam,
vpHomogeneousMatrix &cMo,
vpTranslationVector &cto,
vpRxyzVector &cro, bool init)
{
vpHomogeneousMatrix cMo_dementhon; // computed pose with dementhon
vpHomogeneousMatrix cMo_lagrange; // computed pose with dementhon
vpRotationMatrix cRo;
vpPose pose;
vpImagePoint cog;
for (int i=0; i < ndot; i ++) {
double x=0, y=0;
cog = dot[i].getCog();
vpPixelMeterConversion::convertPoint(cam,
cog,
x, y) ; //pixel to meter conversion
point[i].set_x(x) ;//projection perspective p
point[i].set_y(y) ;
pose.addPoint(point[i]) ;
}
if (init == true) {
pose.computePose(vpPose::DEMENTHON, cMo_dementhon) ;
// Compute and return the residual expressed in meter for the pose matrix
// 'cMo'
double residual_dementhon = pose.computeResidual(cMo_dementhon);
pose.computePose(vpPose::LAGRANGE, cMo_lagrange) ;
double residual_lagrange = pose.computeResidual(cMo_lagrange);
// Select the best pose to initialize the lowe pose computation
if (residual_lagrange < residual_dementhon)
cMo = cMo_lagrange;
else
cMo = cMo_dementhon;
}
else { // init = false; use of the previous pose to initialise LOWE
cRo.buildFrom(cro);
cMo.buildFrom(cto, cRo);
}
pose.computePose(vpPose::LOWE, cMo) ;
cMo.extract(cto);
cMo.extract(cRo);
cro.buildFrom(cRo);
}
示例10: one
/*!
Build a 3D \f$ \theta u \f$ visual feature from an
homogeneous matrix \f$ M \f$ that represent the displacement that
the camera has to achieve.
\param M [in] : Homogeneous transformation that describe the
movement that the camera has to achieve. Only the rotational part of
this homogeneous transformation is taken into consideration.
Depending on the rotation representation type
(vpFeatureThetaU::vpFeatureThetaURotationRepresentationType) used to
construct this object, the parameter \e M represents either the
rotation that the camera has to achieve to move from the desired
camera frame to the current one (\f$ ^{c^*}R_c\f$), or the rotation
that the camera has to achieve to move from the current camera frame
to the desired one (\f$ ^{c}R_{c^*}\f$).
*/
void
vpFeatureThetaU::buildFrom(const vpHomogeneousMatrix &M)
{
vpRotationMatrix R ;
M.extract(R) ;
vpThetaUVector tu(R) ;
buildFrom(tu) ;
}
示例11: readConfigVar_PoseRxyz
void readConfigVar_PoseRxyz(const string &s, vpHomogeneousMatrix &M)
{
vpColVector v(6);
vpIoTools::readConfigVar(s, v);
vpTranslationVector t(v[0], v[1], v[2]);
vpRxyzVector r(v[3], v[4], v[5]);
vpRotationMatrix R(r);
M.buildFrom(t, R);
}
示例12: execute
vpMatrix execute(const vpHomogeneousMatrix& cMo, const vpHomogeneousMatrix& cdMo,
vpMomentObject &src, vpMomentObject &dst)
{
vpServo::vpServoIteractionMatrixType interaction_type = vpServo::CURRENT; ; //current or desired
vpServo task;
task.setServo(vpServo::EYEINHAND_CAMERA);
//A,B,C parameters of source and destination plane
double A; double B; double C;
double Ad; double Bd; double Cd;
//init main object: using moments up to order 6
//Initializing values from regular plane (with ax+by+cz=d convention)
vpPlane pl;
pl.setABCD(0,0,1.0,0);
pl.changeFrame(cMo);
planeToABC(pl,A,B,C);
pl.setABCD(0,0,1.0,0);
pl.changeFrame(cdMo);
planeToABC(pl,Ad,Bd,Cd);
//extracting initial position (actually we only care about Zdst)
vpTranslationVector vec;
cdMo.extract(vec);
///////////////////////////// initializing moments and features /////////////////////////////////
//don't need to be specific, vpMomentCommon automatically loads Xg,Yg,An,Ci,Cj,Alpha moments
vpMomentCommon moments (vpMomentCommon ::getSurface(dst),vpMomentCommon::getMu3(dst),vpMomentCommon::getAlpha(dst), vec[2]);
vpMomentCommon momentsDes(vpMomentCommon::getSurface(dst),vpMomentCommon::getMu3(dst),vpMomentCommon::getAlpha(dst),vec[2]);
//same thing with common features
vpFeatureMomentCommon featureMoments(moments);
vpFeatureMomentCommon featureMomentsDes(momentsDes);
moments.updateAll(src);
momentsDes.updateAll(dst);
featureMoments.updateAll(A,B,C);
featureMomentsDes.updateAll(Ad,Bd,Cd);
//setup the interaction type
task.setInteractionMatrixType(interaction_type) ;
//////////////////////////////////add useful features to task//////////////////////////////
task.addFeature(featureMoments.getFeatureGravityNormalized(),featureMomentsDes.getFeatureGravityNormalized());
task.addFeature(featureMoments.getFeatureAn(),featureMomentsDes.getFeatureAn());
//the moments are different in case of a symmetric object
task.addFeature(featureMoments.getFeatureCInvariant(),featureMomentsDes.getFeatureCInvariant(),(1 << 10) | (1 << 11));
task.addFeature(featureMoments.getFeatureAlpha(),featureMomentsDes.getFeatureAlpha());
task.setLambda(0.4) ;
task.computeControlLaw();
vpMatrix mat = task.computeInteractionMatrix();
task.kill();
return mat;
}
示例13: buildFrom
/*!
Converts an homogeneous matrix into a \f$\theta {\bf u}\f$ vector.
*/
vpThetaUVector
vpThetaUVector::buildFrom(const vpHomogeneousMatrix& M)
{
vpRotationMatrix R;
M.extract(R);
buildFrom(R);
return *this ;
}
示例14: buildFrom
/*!
Initialize a force/torque twist transformation matrix from an homogeneous matrix
\f$M\f$ with \f[ {\bf M} = \left[\begin{array}{cc} {\bf R} & {\bf t}
\\ {\bf 0}_{1\times 3} & 1 \end{array} \right] \f]
\param M : Homogeneous matrix \f$M\f$ used to initialize the force/torque twist
transformation matrix.
*/
vpForceTwistMatrix
vpForceTwistMatrix::buildFrom(const vpHomogeneousMatrix &M)
{
vpTranslationVector t ;
vpRotationMatrix R ;
M.extract(R) ;
M.extract(t) ;
buildFrom(t, R) ;
return (*this) ;
}
示例15: computed
/*!
compute the homography using a displacement matrix.
the homography is given by:
\f$ {}^cH_{c_0} = {}^cR_{c_0} + \frac{{}^cT_{c_0} . {}^tN}{d_0} \f$
Several internal variables are computed (dt, cRc0_0n)
\param _cTc0 : the displacement matrix of the camera between the initial position of the camera and the current camera position
\param _cHc0 : the homography of the plane
*/
void
vpMbtDistanceKltPoints::computeHomography(const vpHomogeneousMatrix& _cTc0, vpHomography& _cHc0)
{
vpRotationMatrix cRc0;
vpTranslationVector ctransc0;
_cTc0.extract(cRc0);
_cTc0.extract(ctransc0);
vpMatrix cHc0 = _cHc0.convert();
// vpGEMM(cRc0, 1.0, invd0, cRc0, -1.0, _cHc0, VP_GEMM_A_T);
vpGEMM(ctransc0, N, -invd0, cRc0, 1.0, cHc0, VP_GEMM_B_T);
cHc0 /= cHc0[2][2];
H = cHc0;
// vpQuaternionVector NQuat(N[0], N[1], N[2], 0.0);
// vpQuaternionVector RotQuat(cRc0);
// vpQuaternionVector RotQuatConj(-RotQuat.x(), -RotQuat.y(), -RotQuat.z(), RotQuat.w());
// vpQuaternionVector partial = RotQuat * NQuat;
// vpQuaternionVector resQuat = (partial * RotQuatConj);
//
// cRc0_0n = vpColVector(3);
// cRc0_0n[0] = resQuat.x();
// cRc0_0n[1] = resQuat.y();
// cRc0_0n[2] = resQuat.z();
cRc0_0n = cRc0*N;
// vpPlane p(corners[0], corners[1], corners[2]);
// vpColVector Ncur = p.getNormal();
// Ncur.normalize();
N_cur = cRc0_0n;
dt = 0.0;
for (unsigned int i = 0; i < 3; i += 1){
dt += ctransc0[i] * (N_cur[i]);
}
}