本文整理汇总了C++中eigen::Matrix3d::setIdentity方法的典型用法代码示例。如果您正苦于以下问题:C++ Matrix3d::setIdentity方法的具体用法?C++ Matrix3d::setIdentity怎么用?C++ Matrix3d::setIdentity使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类eigen::Matrix3d
的用法示例。
在下文中一共展示了Matrix3d::setIdentity方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: computeDistance
void PartFilter::computeDistance(int partition)
{
std::multimap<int, std::string>::iterator it;
for (int i=0 ; i<mModels.size() ; i++)
{
double distance=0;
it = mOffsetPartToName[i].find(partition);
for (it = mOffsetPartToName[i].equal_range(partition).first ; it != mOffsetPartToName[i].equal_range(partition).second ; ++it)
{
if (mJointNameToPos[(*it).second] != -1)
{
int pos = mJointNameToPos[(*it).second];
double distTemp=0;
// Mahalanobis distance
//cout << (*it).second << "=>" << mPosNames[pos] << endl;
Eigen::Vector3d jtPos = mModels[i]->getJoint((*it).second)->getXYZVect();
Eigen::Vector3d jtObs(mCurrentFrame[pos][1], mCurrentFrame[pos][2], mCurrentFrame[pos][3]);
Eigen::Vector3d diff = jtPos - jtObs;
Eigen::Matrix3d cov;
cov.setIdentity();
distTemp = diff.transpose()*(cov*diff);
distance += distTemp;
}
}
mCurrentDistances[i] = distance;
}
}
示例2: sdh_moveto_cb
void sdh_moveto_cb(boost::shared_ptr<std::string> data){
Eigen::Vector3d p,o;
p.setZero();
o.setZero();
p(0) = right_newP_x;
p(1) = right_newP_y;
p(2) = right_newP_z;
Eigen::Vector3d desired_euler;
Eigen::Matrix3d Ident;
desired_euler.setZero();
Ident.setIdentity();
desired_euler(0) = 0;
desired_euler(1) = -1*M_PI;
desired_euler(2) = 0.5*M_PI;
o = euler2axisangle(desired_euler,Ident);
mutex_act.lock();
right_ac_vec.clear();
right_task_vec.clear();
right_ac_vec.push_back(new ProActController(*right_pm));
right_task_vec.push_back(new KukaSelfCtrlTask(RP_NOCONTROL));
right_task_vec.back()->mt = JOINTS;
right_task_vec.back()->mft = GLOBAL;
right_task_vec.back()->set_desired_p_eigen(p);
right_task_vec.back()->set_desired_o_ax(o);
mutex_act.unlock();
std::cout<<"kuka sdh self movement and move to new pose"<<std::endl;
}
示例3: process
void EKFOA::process(const double delta_t, cv::Mat & frame, Eigen::Vector3d & rW, Eigen::Vector4d & qWR, Eigen::Matrix3d & axes_orientation_and_confidence, std::vector<Point3d> (& XYZs)[3], Delaunay & triangulation, Point3d & closest_point){
double time_total;
std::vector<cv::Point2f> features_to_add;
std::vector<Features_extra> features_extra;
/*
* EKF prediction (state and measurement prediction)
*/
time_total = (double)cv::getTickCount();
double time_prediction = (double)cv::getTickCount();
filter.predict_state_and_covariance(delta_t);
filter.compute_features_h(cam, features_extra);
time_prediction = (double)cv::getTickCount() - time_prediction;
// std::cout << "predict = " << time_prediction/((double)cvGetTickFrequency()*1000.) << "ms" << std::endl;
/*
* Sense and map management (delete features from EKF)
*/
double time_tracker = (double)cv::getTickCount();
motion_tracker.process(frame, features_extra, features_to_add);
//TODO: Why is optical flow returning points outside the image???
time_tracker = (double)cv::getTickCount() - time_tracker;
time_total = time_total + time_tracker; //do not count the time spent by the tracker
//Delete no longer seen features from the state, covariance matrix and the features_extra:
double time_del = (double)cv::getTickCount();
filter.delete_features(features_extra);
time_del = (double)cv::getTickCount() - time_del;
// std::cout << "delete = " << time_del/((double)cvGetTickFrequency()*1000.) << "ms" << std::endl;
/*
* EKF Update step and map management (add new features to EKF)
*/
double time_update = (double)cv::getTickCount();
filter.update(cam, features_extra);
time_update = (double)cv::getTickCount() - time_update;
// std::cout << "update = " << time_update/((double)cvGetTickFrequency()*1000.) << "ms" << std::endl;
//Add new features
double time_add = (double)cv::getTickCount();
filter.add_features_inverse_depth(cam, features_to_add);
time_add = (double)cv::getTickCount() - time_add;
// std::cout << "add_fea = " << time_add/((double)cvGetTickFrequency()*1000.) << "ms" << std::endl;
/*
* Triangulation, surface and GUI data setting:
*/
double time_triangulation = (double)cv::getTickCount();
std::vector< std::pair<Point2d, size_t> > triangle_list;
std::list<Triangle> triangles_list_3d;
const Eigen::VectorXd & x_k_k = filter.x_k_k();
const Eigen::MatrixXd & p_k_k = filter.p_k_k();
//Set the position, so the GUI can draw it:
rW = x_k_k.segment<3>(0);//current position
//Set the axes orientation and confidence:
axes_orientation_and_confidence.setIdentity();//axes_orientation_and_confidence stores in each column one axis (X, Y, Z)
axes_orientation_and_confidence *= 5; //make the lines larger, so they are actually informative
//Apply rotation matrix:
Eigen::Matrix3d qWR_R;//Rotation matrix of current orientation quaternion
qWR = x_k_k.segment<4>(3);
MotionModel::quaternion_matrix(qWR, qWR_R);
axes_orientation_and_confidence.applyOnTheLeft(qWR_R); // == R * axes_orientation_and_confidence
for (int axis=0 ; axis<axes_orientation_and_confidence.cols() ; axis++){
//Set the length to be 3*sigma:
axes_orientation_and_confidence.col(axis) *= 3*std::sqrt(p_k_k(axis, axis)); //the first 3 positions of the cov matrix define the confidence for the position
//Translate origin:
axes_orientation_and_confidence.col(axis) += rW;
}
int num_features = (x_k_k.rows()-13)/6;
XYZs[0].resize(num_features);
XYZs[1].resize(num_features);
XYZs[2].resize(num_features);
//Compute the 3d positions and inverse depth variances of all the points in the state
int i=0; //Feature counter
for (int start_feature=13 ; start_feature<x_k_k.rows() ; start_feature+=6){
const int feature_inv_depth_index = start_feature + 5;
//As with any normal distribution, nearly all (99.73%) of the possible depths lie within three standard deviations of the mean!
const double sigma_3 = std::sqrt(p_k_k(feature_inv_depth_index, feature_inv_depth_index)); //sqrt(depth_variance)
const Eigen::VectorXd & yi = x_k_k.segment(start_feature, 6);
Eigen::VectorXd point_close(x_k_k.segment(start_feature, 6));
Eigen::VectorXd point_far(x_k_k.segment(start_feature, 6));
//Change the depth of the feature copy, so that it is possible to represent the range between -3*sigma and 3*sigma:
point_close(5) += sigma_3;
point_far(5) -= sigma_3;
Eigen::Vector3d XYZ_mu = (Feature::compute_cartesian(yi)); //mu (mean)
Eigen::Vector3d XYZ_close = (Feature::compute_cartesian(point_close)); //mean + 3*sigma. (since inverted signs are also inverted)
//.........这里部分代码省略.........
示例4: loadData
//.........这里部分代码省略.........
{
redCoefficients.calcMipMapping(level, offset + k, index1 + k, index2 + k);
greenCoefficients.calcMipMapping(level, offset + k, index1 + k, index2 + k);
blueCoefficients.calcMipMapping(level, offset + k, index1 + k, index2 + k);
}
}
}
if (height % 2 != 0)
{
for (int i = 0; i < width - 1; i+=2)
{
int index1 = ((height - 1) * width + i)*ordlen;
int index2 = ((height - 1) * width + i + 1)*ordlen;
int offset = ((height2 - 1) * width2 + i/2)*ordlen;
for (int k = 0; k < basisTerm; k++)
{
redCoefficients.calcMipMapping(level, offset + k, index1 + k, index2 + k);
greenCoefficients.calcMipMapping(level, offset + k, index1 + k, index2 + k);
blueCoefficients.calcMipMapping(level, offset + k, index1 + k, index2 + k);
}
}
}
if (height % 2 != 0 && width % 2 != 0)
{
int index1 = (height*width - 1)*ordlen;
int offset = (height2*width2 - 1)*ordlen;
for (int k = 0; k < basisTerm; k++)
{
redCoefficients.calcMipMapping(level, offset + k, index1 + k);
greenCoefficients.calcMipMapping(level, offset + k, index1 + k);
blueCoefficients.calcMipMapping(level, offset + k, index1 + k);
}
}
mipMapSize[level] = QSize(width2, height2);
}
//Compute normals.
if (cb != NULL) (*cb)(75 , "Normals generation...");
Eigen::Vector3d l0(sin(M_PI/4)*cos(M_PI/6), sin(M_PI/4)*sin(M_PI/6), cos(M_PI/4));
Eigen::Vector3d l1(sin(M_PI/4)*cos(5*M_PI / 6), sin(M_PI/4)*sin(5*M_PI / 6), cos(M_PI/4));
Eigen::Vector3d l2(sin(M_PI/4)*cos(3*M_PI / 2), sin(M_PI/4)*sin(3*M_PI / 2), cos(M_PI/4));
float hweights0[16], hweights1[16], hweights2[16];
getHSH(M_PI / 4, M_PI / 6, hweights0, ordlen);
getHSH(M_PI / 4, 5*M_PI / 6, hweights1, ordlen);
getHSH(M_PI / 4, 3*M_PI / 2, hweights2, ordlen);
Eigen::Matrix3d L;
L.setIdentity();
L.row(0) = l0;
L.row(1) = l1;
L.row(2) = l2;
Eigen::Matrix3d LInverse = L.inverse();
for (int level = 0; level < MIP_MAPPING_LEVELS; level++)
{
const float* rPtr = redCoefficients.getLevel(level);
const float* gPtr = greenCoefficients.getLevel(level);
const float* bPtr = blueCoefficients.getLevel(level);
vcg::Point3f* temp = new vcg::Point3f[mipMapSize[level].width()*mipMapSize[level].height()];
if (cb != NULL) (*cb)(75 + level*6, "Normal generation...");
#pragma omp parallel for
for (int y = 0; y < mipMapSize[level].height(); y++)
{
for (int x = 0; x < mipMapSize[level].width(); x++)
{
int offset= y * mipMapSize[level].width() + x;
Eigen::Vector3d f(0, 0, 0);
for (int k = 0; k < ordlen; k++)
{
f(0) += rPtr[offset*ordlen + k] * hweights0[k];
f(1) += rPtr[offset*ordlen + k] * hweights1[k];
f(2) += rPtr[offset*ordlen + k] * hweights2[k];
}
for (int k = 0; k < ordlen; k++)
{
f(0) += gPtr[offset*ordlen + k] * hweights0[k];
f(1) += gPtr[offset*ordlen + k] * hweights1[k];
f(2) += gPtr[offset*ordlen + k] * hweights2[k];
}
for (int k = 0; k < ordlen; k++)
{
f(0) += bPtr[offset*ordlen + k] * hweights0[k];
f(1) += bPtr[offset*ordlen + k] * hweights1[k];
f(2) += bPtr[offset*ordlen + k] * hweights2[k];
}
f /= 3.0;
Eigen::Vector3d normal = LInverse * f;
temp[offset] = vcg::Point3f(normal(0), normal(1), normal(2));
temp[offset].Normalize();
}
}
normals.setLevel(temp, mipMapSize[level].width()*mipMapSize[level].height(), level);
}
return 0;
}