本文整理汇总了C++中Pose3类的典型用法代码示例。如果您正苦于以下问题:C++ Pose3类的具体用法?C++ Pose3怎么用?C++ Pose3使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Pose3类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: range
/* ************************************************************************* */
double Pose3::range(const Pose3& pose, OptionalJacobian<1, 6> H1,
OptionalJacobian<1, 6> H2) const {
Matrix13 D_local_point;
double r = range(pose.translation(), H1, H2 ? &D_local_point : 0);
if (H2) *H2 << Matrix13::Zero(), D_local_point * pose.rotation().matrix();
return r;
}
示例2: TEST
//***************************************************************************
TEST(GPSData, init) {
// GPS Reading 1 will be ENU origin
double t1 = 84831;
Point3 NED1(0, 0, 0);
LocalCartesian enu(35.4393283333333, -119.062986666667, 275.54,
Geocentric::WGS84);
// GPS Readin 2
double t2 = 84831.5;
double E, N, U;
enu.Forward(35.4394633333333, -119.063146666667, 276.52, E, N, U);
Point3 NED2(N, E, -U);
// Estimate initial state
Pose3 T;
Vector3 nV;
boost::tie(T, nV) = GPSFactor::EstimateState(t1, NED1, t2, NED2, 84831.0796);
// Check values values
EXPECT(assert_equal((Vector )Vector3(29.9575, -29.0564, -1.95993), nV, 1e-4));
EXPECT( assert_equal(Rot3::ypr(-0.770131, 0.046928, 0), T.rotation(), 1e-5));
Point3 expectedT(2.38461, -2.31289, -0.156011);
EXPECT(assert_equal(expectedT, T.translation(), 1e-5));
}
示例3: addGeometry
void Compound::addGeometry(const Pose3<>& parentPose, Geometry& geometry, SimRobotCore2::CollisionCallback* callback)
{
// compute pose
Pose3<> geomPose = parentPose;
if(geometry.translation)
geomPose.translate(*geometry.translation);
if(geometry.rotation)
geomPose.rotate(*geometry.rotation);
// create geometry
dGeomID geom = geometry.createGeometry(Simulation::simulation->staticSpace);
if(geom)
{
dGeomSetData(geom, &geometry);
// set pose
dGeomSetPosition(geom, geomPose.translation.x, geomPose.translation.y, geomPose.translation.z);
dMatrix3 matrix3;
ODETools::convertMatrix(geomPose.rotation, matrix3);
dGeomSetRotation(geom, matrix3);
}
// handle nested geometries
for(std::list< ::PhysicalObject*>::const_iterator iter = geometry.physicalDrawings.begin(), end = geometry.physicalDrawings.end(); iter != end; ++iter)
{
Geometry* geometry = dynamic_cast<Geometry*>(*iter);
if(geometry)
addGeometry(geomPose, *geometry, callback);
}
}
示例4: Depth
void Frustum_Filter::init_z_near_z_far_depth
(
const SfM_Data & sfm_data,
const double zNear,
const double zFar
)
{
// If z_near & z_far are -1 and structure if not empty,
// compute the values for each camera and the structure
const bool bComputed_Z = (zNear == -1. && zFar == -1.) && !sfm_data.structure.empty();
if (bComputed_Z) // Compute the near & far planes from the structure and view observations
{
for (Landmarks::const_iterator itL = sfm_data.GetLandmarks().begin();
itL != sfm_data.GetLandmarks().end(); ++itL)
{
const Landmark & landmark = itL->second;
const Vec3 & X = landmark.X;
for (Observations::const_iterator iterO = landmark.obs.begin();
iterO != landmark.obs.end(); ++iterO)
{
const IndexT id_view = iterO->first;
const View * view = sfm_data.GetViews().at(id_view).get();
if (!sfm_data.IsPoseAndIntrinsicDefined(view))
continue;
const Pose3 pose = sfm_data.GetPoseOrDie(view);
const double z = Depth(pose.rotation(), pose.translation(), X);
NearFarPlanesT::iterator itZ = z_near_z_far_perView.find(id_view);
if (itZ != z_near_z_far_perView.end())
{
if ( z < itZ->second.first)
itZ->second.first = z;
else
if ( z > itZ->second.second)
itZ->second.second = z;
}
else
z_near_z_far_perView[id_view] = {z,z};
}
}
}
else
{
// Init the same near & far limit for all the valid views
for (Views::const_iterator it = sfm_data.GetViews().begin();
it != sfm_data.GetViews().end(); ++it)
{
const View * view = it->second.get();
if (!sfm_data.IsPoseAndIntrinsicDefined(view))
continue;
if (z_near_z_far_perView.find(view->id_view) == z_near_z_far_perView.end())
z_near_z_far_perView[view->id_view] = {zNear, zFar};
}
}
}
示例5: TEST
/* ************************************************************************* */
TEST( dataSet, writeBALfromValues_Dubrovnik){
///< Read a file using the unit tested readBAL
const string filenameToRead = findExampleDataFile("dubrovnik-3-7-pre");
SfM_data readData;
readBAL(filenameToRead, readData);
Pose3 poseChange = Pose3(Rot3::ypr(-M_PI/10, 0., -M_PI/10), gtsam::Point3(0.3,0.1,0.3));
Values value;
for(size_t i=0; i < readData.number_cameras(); i++){ // for each camera
Key poseKey = symbol('x',i);
Pose3 pose = poseChange.compose(readData.cameras[i].pose());
value.insert(poseKey, pose);
}
for(size_t j=0; j < readData.number_tracks(); j++){ // for each point
Key pointKey = P(j);
Point3 point = poseChange.transform_from( readData.tracks[j].p );
value.insert(pointKey, point);
}
// Write values and readData to a file
const string filenameToWrite = createRewrittenFileName(filenameToRead);
writeBALfromValues(filenameToWrite, readData, value);
// Read the file we wrote
SfM_data writtenData;
readBAL(filenameToWrite, writtenData);
// Check that the reprojection errors are the same and the poses are correct
// Check number of things
EXPECT_LONGS_EQUAL(3,writtenData.number_cameras());
EXPECT_LONGS_EQUAL(7,writtenData.number_tracks());
const SfM_Track& track0 = writtenData.tracks[0];
EXPECT_LONGS_EQUAL(3,track0.number_measurements());
// Check projection of a given point
EXPECT_LONGS_EQUAL(0,track0.measurements[0].first);
const SfM_Camera& camera0 = writtenData.cameras[0];
Point2 expected = camera0.project(track0.p), actual = track0.measurements[0].second;
EXPECT(assert_equal(expected,actual,12));
Pose3 expectedPose = camera0.pose();
Key poseKey = symbol('x',0);
Pose3 actualPose = value.at<Pose3>(poseKey);
EXPECT(assert_equal(expectedPose,actualPose, 1e-7));
Point3 expectedPoint = track0.p;
Key pointKey = P(0);
Point3 actualPoint = value.at<Point3>(pointKey);
EXPECT(assert_equal(expectedPoint,actualPoint, 1e-6));
}
示例6: perturbCameraPoseAndCalibration
PinholeCamera<CALIBRATION> perturbCameraPoseAndCalibration(
const PinholeCamera<CALIBRATION>& camera) {
GTSAM_CONCEPT_MANIFOLD_TYPE(CALIBRATION)
Pose3 noise_pose = Pose3(Rot3::ypr(-M_PI / 10, 0., -M_PI / 10),
Point3(0.5, 0.1, 0.3));
Pose3 cameraPose = camera.pose();
Pose3 perturbedCameraPose = cameraPose.compose(noise_pose);
typename gtsam::traits<CALIBRATION>::TangentVector d;
d.setRandom();
d *= 0.1;
CALIBRATION perturbedCalibration = camera.calibration().retract(d);
return PinholeCamera<CALIBRATION>(perturbedCameraPose, perturbedCalibration);
}
示例7: glViewport
void Camera::CameraSensor::updateValue()
{
// allocate buffer
const unsigned int imageWidth = camera->imageWidth;
const unsigned int imageHeight = camera->imageHeight;
const unsigned int imageSize = imageWidth * imageHeight * 3;
if(imageBufferSize < imageSize)
{
if(imageBuffer)
delete[] imageBuffer;
imageBuffer = new unsigned char[imageSize];
imageBufferSize = imageSize;
}
// make sure the poses of all movable objects are up to date
Simulation::simulation->scene->updateTransformations();
// prepare offscreen renderer
OffscreenRenderer& renderer = Simulation::simulation->renderer;
renderer.makeCurrent(imageWidth, imageHeight);
// setup image size and angle of view
glViewport(0, 0, imageWidth, imageHeight);
glMatrixMode(GL_PROJECTION);
glLoadMatrixf(projection);
glMatrixMode(GL_MODELVIEW);
// enable lighting, textures, and smooth shading
glEnable(GL_LIGHTING);
glEnable(GL_TEXTURE_2D);
glPolygonMode(GL_FRONT, GL_FILL);
glShadeModel(GL_SMOOTH);
// setup camera position
Pose3<> pose = physicalObject->pose;
pose.conc(offset);
static const Matrix3x3<> cameraRotation(Vector3<>(0.f, -1.f, 0.f), Vector3<>(0.f, 0.f, 1.f), Vector3<>(-1.f, 0.f, 0.f));
pose.rotate(cameraRotation);
float transformation[16];
OpenGLTools::convertTransformation(pose.invert(), transformation);
glLoadMatrixf(transformation);
// draw all objects
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
Simulation::simulation->scene->drawAppearances();
// read frame buffer
renderer.finishImageRendering(imageBuffer, imageWidth, imageHeight);
data.byteArray = imageBuffer;
}
示例8:
/* ************************************************************************* */
BearingS2 BearingS2::fromForwardObservation(const Pose3& A, const Point3& B) {
// Cnb = DCMnb(Att);
Matrix Cnb = A.rotation().matrix().transpose();
Vector p_rel_c = Cnb*(B - A.translation());
// FIXME: the matlab code checks for p_rel_c(0) greater than
// azi = atan2(p_rel_c(2),p_rel_c(1));
double azimuth = atan2(p_rel_c(1),p_rel_c(0));
// elev = atan2(p_rel_c(3),sqrt(p_rel_c(1)^2 + p_rel_c(2)^2));
double elevation = atan2(p_rel_c(2),sqrt(p_rel_c(0) * p_rel_c(0) + p_rel_c(1) * p_rel_c(1)));
return BearingS2(azimuth, elevation);
}
示例9: Logmap
/* ************************************************************************* */
Vector6 Pose3::ChartAtOrigin::Local(const Pose3& T, ChartJacobian H) {
#ifdef GTSAM_POSE3_EXPMAP
return Logmap(T, H);
#else
Matrix3 DR;
Vector3 omega = Rot3::LocalCoordinates(T.rotation(), H ? &DR : 0);
if (H) {
*H = I_6x6;
H->topLeftCorner<3,3>() = DR;
}
Vector6 xi;
xi << omega, T.translation().vector();
return xi;
#endif
}
示例10: epipole_from_P
/// Camera pair epipole (Projection of camera center 2 in the image plane 1)
static Vec3 epipole_from_P(const Mat34& P1, const Pose3& P2)
{
const Vec3 c = P2.center();
Vec4 center;
center << c(0), c(1), c(2), 1.0;
return P1*center;
}
示例11: skewSymmetric
/* ************************************************************************* */
Vector6 Pose3::Logmap(const Pose3& p, OptionalJacobian<6, 6> H) {
if (H) *H = LogmapDerivative(p);
Vector3 w = Rot3::Logmap(p.rotation()), T = p.translation().vector();
double t = w.norm();
if (t < 1e-10) {
Vector6 log;
log << w, T;
return log;
} else {
Matrix3 W = skewSymmetric(w / t);
// Formula from Agrawal06iros, equation (14)
// simplified with Mathematica, and multiplying in T to avoid matrix math
double Tan = tan(0.5 * t);
Vector3 WT = W * T;
Vector3 u = T - (0.5 * t) * WT + (1 - t / (2. * Tan)) * (W * WT);
Vector6 log;
log << w, u;
return log;
}
}
示例12: transformed_from
/* ************************************************************************* */
PoseRTV PoseRTV::transformed_from(const Pose3& trans, ChartJacobian Dglobal,
OptionalJacobian<9, 6> Dtrans) const {
// Pose3 transform is just compose
Matrix6 D_newpose_trans, D_newpose_pose;
Pose3 newpose = trans.compose(pose(), D_newpose_trans, D_newpose_pose);
// Note that we rotate the velocity
Matrix3 D_newvel_R, D_newvel_v;
Velocity3 newvel = trans.rotation().rotate(Point3(velocity()), D_newvel_R, D_newvel_v);
if (Dglobal) {
Dglobal->setZero();
Dglobal->topLeftCorner<6,6>() = D_newpose_pose;
Dglobal->bottomRightCorner<3,3>() = D_newvel_v;
}
if (Dtrans) {
Dtrans->setZero();
Dtrans->topLeftCorner<6,6>() = D_newpose_trans;
Dtrans->bottomLeftCorner<3,3>() = D_newvel_R;
}
return PoseRTV(newpose, newvel);
}
示例13: f
// Init a frustum for each valid views of the SfM scene
void Frustum_Filter::initFrustum
(
const SfM_Data & sfm_data
)
{
for (NearFarPlanesT::const_iterator it = z_near_z_far_perView.begin();
it != z_near_z_far_perView.end(); ++it)
{
const View * view = sfm_data.GetViews().at(it->first).get();
if (!sfm_data.IsPoseAndIntrinsicDefined(view))
continue;
Intrinsics::const_iterator iterIntrinsic = sfm_data.GetIntrinsics().find(view->id_intrinsic);
if (!isPinhole(iterIntrinsic->second->getType()))
continue;
const Pose3 pose = sfm_data.GetPoseOrDie(view);
const Pinhole_Intrinsic * cam = dynamic_cast<const Pinhole_Intrinsic*>(iterIntrinsic->second.get());
if (!cam)
continue;
if (!_bTruncated) // use infinite frustum
{
const Frustum f(
cam->w(), cam->h(), cam->K(),
pose.rotation(), pose.center());
frustum_perView[view->id_view] = f;
}
else // use truncated frustum with defined Near and Far planes
{
const Frustum f(cam->w(), cam->h(), cam->K(),
pose.rotation(), pose.center(), it->second.first, it->second.second);
frustum_perView[view->id_view] = f;
}
}
}
示例14: main
int main()
{
int n = 5000000;
cout << "NOTE: Times are reported for " << n << " calls" << endl;
double norm=sqrt(1.0+16.0+4.0);
double x=1.0/norm, y=4.0/norm, z=2.0/norm;
Vector v = (Vector(6) << x, y, z, 0.1, 0.2, -0.1).finished();
Pose3 T = Pose3::Expmap((Vector(6) << 0.1, 0.1, 0.2, 0.1, 0.4, 0.2).finished()), T2 = T.retract(v);
Matrix H1,H2;
TEST(retract, T.retract(v))
TEST(Expmap, T*Pose3::Expmap(v))
TEST(localCoordinates, T.localCoordinates(T2))
TEST(between, T.between(T2))
TEST(between_derivatives, T.between(T2,H1,H2))
TEST(Logmap, Pose3::Logmap(T.between(T2)))
// Print timings
tictoc_print_();
return 0;
}
示例15: glMatrixMode
bool Camera::CameraSensor::renderCameraImages(SimRobotCore2::Sensor** cameras, unsigned int count)
{
if(lastSimulationStep == Simulation::simulation->simulationStep)
return true;
// allocate buffer
const unsigned int imageWidth = camera->imageWidth;
const unsigned int imageHeight = camera->imageHeight;
const unsigned int imageSize = imageWidth * imageHeight * 3;
const unsigned int multiImageBufferSize = imageSize * count;
if(imageBufferSize < multiImageBufferSize)
{
if(imageBuffer)
delete[] imageBuffer;
imageBuffer = new unsigned char[multiImageBufferSize];
imageBufferSize = multiImageBufferSize;
}
// make sure the poses of all movable objects are up to date
Simulation::simulation->scene->updateTransformations();
// prepare offscreen renderer
OffscreenRenderer& renderer = Simulation::simulation->renderer;
renderer.makeCurrent(imageWidth, imageHeight * count);
// setup angle of view
glMatrixMode(GL_PROJECTION);
glLoadMatrixf(projection);
glMatrixMode(GL_MODELVIEW);
// enable lighting, textures, and smooth shading
glEnable(GL_LIGHTING);
glEnable(GL_TEXTURE_2D);
glPolygonMode(GL_FRONT, GL_FILL);
glShadeModel(GL_SMOOTH);
// clear buffers
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// render images
int currentHorizontalPos = 0;
unsigned char* currentBufferPos = imageBuffer;
for(unsigned int i = 0; i < count; ++i)
{
CameraSensor* sensor = (CameraSensor*)cameras[i];
ASSERT(sensor->lastSimulationStep != Simulation::simulation->simulationStep);
glViewport(0, currentHorizontalPos, imageWidth, imageHeight);
// setup camera position
Pose3<> pose = sensor->physicalObject->pose;
pose.conc(sensor->offset);
static const Matrix3x3<> cameraRotation(Vector3<>(0.f, -1.f, 0.f), Vector3<>(0.f, 0.f, 1.f), Vector3<>(-1.f, 0.f, 0.f));
pose.rotate(cameraRotation);
float transformation[16];
OpenGLTools::convertTransformation(pose.invert(), transformation);
glLoadMatrixf(transformation);
// draw all objects
Simulation::simulation->scene->drawAppearances();
sensor->data.byteArray = currentBufferPos;
sensor->lastSimulationStep = Simulation::simulation->simulationStep;
currentHorizontalPos += imageHeight;
currentBufferPos += imageSize;
}
// read frame buffer
renderer.finishImageRendering(imageBuffer, imageWidth, imageHeight * count);
return true;
}