当前位置: 首页>>代码示例>>C++>>正文


C++ Matrix4f::inverse方法代码示例

本文整理汇总了C++中eigen::Matrix4f::inverse方法的典型用法代码示例。如果您正苦于以下问题:C++ Matrix4f::inverse方法的具体用法?C++ Matrix4f::inverse怎么用?C++ Matrix4f::inverse使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在eigen::Matrix4f的用法示例。


在下文中一共展示了Matrix4f::inverse方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: iss

Eigen::Matrix4f
parse_registration(turtle_input& input, const std::string& guid_0,
                     const std::string& guid_1) {
    std::pair<std::string, bool> t = parse_transformation_string(input, guid_0, guid_1);
    std::istringstream iss(t.first);
    std::vector<std::string> tokens{std::istream_iterator<std::string>{iss}, std::istream_iterator<std::string>{}};
    assert(tokens.size() == 16);

    Eigen::Matrix4f m;
    for (uint32_t i = 0; i < 4; ++i) {
        for (uint32_t j = 0; j < 4; ++j) {
            double value;
            std::stringstream convert_str(tokens[i*4+j]);
            convert_str >> value;
            convert_str.str("");
            m(i, j) = value;
        }
    }

    if (t.second) {
        Eigen::Matrix4f inv = m.inverse();
        m = inv;
    }

    return m;
}
开发者ID:paulhilbert,项目名称:duraark_rdf,代码行数:26,代码来源:turtle_input_helper.cpp

示例2:

Eigen::Matrix4f GroundTruthOdometry::getTransformation(uint64_t timestamp)
{
    Eigen::Matrix4f pose = Eigen::Matrix4f::Identity();

    if(last_utime != 0)
    {
        std::map<uint64_t, Eigen::Isometry3f>::const_iterator it = camera_trajectory.find(last_utime);
        if (it == camera_trajectory.end())
        {
            last_utime = timestamp;
            return pose;
        }

        //Poses are stored in the file in iSAM basis, undo it
        Eigen::Matrix4f M;
        M <<  0,  0, 1, 0,
             -1,  0, 0, 0,
              0, -1, 0, 0,
              0,  0, 0, 1;

        pose = M.inverse() * camera_trajectory[timestamp] * M;
    }
    else
    {
        std::map<uint64_t, Eigen::Isometry3f>::const_iterator it = camera_trajectory.find(timestamp);
        Eigen::Isometry3f ident = it->second;
        pose = Eigen::Matrix4f::Identity();
        camera_trajectory[last_utime] = ident;
    }

    last_utime = timestamp;

    return pose;
}
开发者ID:Hielamon,项目名称:ElasticFusion,代码行数:34,代码来源:GroundTruthOdometry.cpp

示例3: initScene

void Scene::initScene(CloudPtr &pc_in, CloudPtr &pc_transformed, MatrixXf boundBox, Eigen::Matrix4f eigen_virtualFromFocus_old)
{
    pc_scene.reset (new Cloud);
    pc_scene_focus.reset (new Cloud);
    pc_scene_focus_back.reset(new Cloud);
    for(int i=0;i<pc_in->points.size();i++){
        PointT point_in = pc_in->points[i];
        PointT point_transformed = pc_transformed->points[i];
        // cut off
        if(point_transformed.x<boundBox(0,0) && point_transformed.x>boundBox(0,1) && point_transformed.y<boundBox(1,0) && point_transformed.y>boundBox(1,1) && point_transformed.z<boundBox(2,0) && point_transformed.z>boundBox(2,1)){
            pc_scene->points.push_back(point_in);
            pc_scene_focus->points.push_back(point_transformed);
        }
    }

    // return back pc_scene to the original cam pos
    Eigen::Matrix4f inv = eigen_virtualFromFocus_old.inverse();
    for(int i=0;i<pc_scene_focus->points.size();i++){
        PointT point = pc_scene_focus->points[i];
        PointT point_back;
        point_back.x = inv(0,0)*point.x + inv(0,1)*point.y + inv(0,2)*point.z + inv(0,3);
        point_back.y = inv(1,0)*point.x + inv(1,1)*point.y + inv(1,2)*point.z + inv(1,3);
        point_back.z = inv(2,0)*point.x + inv(2,1)*point.y + inv(2,2)*point.z + inv(2,3);
        pc_scene_focus_back->points.push_back(point_back);
    }


//    if(USENORM) calcNormal();
}
开发者ID:AIS-Bonn,项目名称:vismotcoord,代码行数:29,代码来源:scene.cpp

示例4: publish_objectToGrasp_moveitFormat

void Communication::publish_objectToGrasp_moveitFormat(){
    ros::Rate r(1);
    ros::NodeHandle node;
    static tf::TransformBroadcaster br;
    Eigen::Matrix4f camToWorldMatrix; camToWorldMatrix.setZero();camToWorldMatrix(0,2) = 1; camToWorldMatrix(1,0) = -1; camToWorldMatrix(2,1) = -1; camToWorldMatrix(3,3) = 1;

    while(node.ok()){
        pcl::PointCloud<PointT>::Ptr object_pc = m_object_ex_ptr->getObjectToGrasp();
        pcl::PointCloud<PointT>::Ptr object_pc_transformed(new pcl::PointCloud<PointT>);
//        Eigen::Vector4f c = m_object_ex_ptr->getGraspCentroid(); c(3)=1;
//        tf::Transform simpleTF;
        tf::StampedTransform camToJacoTf;
        tf::TransformListener listener;
        bool tf_ready = listener.waitForTransform("camera_rgb_frame","root",ros::Time(0),ros::Duration(5.0));
        if(object_pc->size() > 0 && tf_ready){
            listener.lookupTransform("camera_rgb_frame","root",ros::Time(0),camToJacoTf);
            Eigen::Matrix4f camToJacoMatrix;
            pcl_ros::transformAsMatrix(camToJacoTf,camToJacoMatrix);

            Eigen::Matrix4f combinedMatrix = camToJacoMatrix.inverse() * camToWorldMatrix;
//            Eigen::Vector4f result = combinedMatrix * c;
//            Eigen::Matrix4f res;  res(0,3) = result(0); res(1,3) = result(1); res(2,3) = result(2); res(3,3) = 1;
//            simpleTF = tfFromEigen(res);
//            br.sendTransform(tf::StampedTransform(simpleTF, ros::Time::now(), "root", "object_centroid_test"));

            pcl::transformPointCloud(*object_pc, *object_pc_transformed, combinedMatrix);
//            std::cout << "old : " << object_pc->at(100) << std::endl;
//            std::cout << "new : " << object_pc_transformed->at(100) << std::endl;
            ObjectToGrasp_publisher_.publish(object_pc_transformed);
        }
        r.sleep();
    }
}
开发者ID:jpmerc,项目名称:perception3d,代码行数:33,代码来源:communication.cpp

示例5: predictIndices

void IndexMap::predictIndices(const Eigen::Matrix4f & pose,
                              const int & time,
                              const std::pair<GLuint, GLuint> & model,
                              const float depthCutoff,
                              const int timeDelta)
{
    indexFrameBuffer.Bind();

    glPushAttrib(GL_VIEWPORT_BIT);

    glViewport(0, 0, indexRenderBuffer.width, indexRenderBuffer.height);

    glClearColor(0, 0, 0, 0);

    glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

    indexProgram->Bind();

    Eigen::Matrix4f t_inv = pose.inverse();

    Eigen::Vector4f cam(Intrinsics::getInstance().cx() * IndexMap::FACTOR,
                  Intrinsics::getInstance().cy() * IndexMap::FACTOR,
                  Intrinsics::getInstance().fx() * IndexMap::FACTOR,
                  Intrinsics::getInstance().fy() * IndexMap::FACTOR);

    indexProgram->setUniform(Uniform("t_inv", t_inv));
    indexProgram->setUniform(Uniform("cam", cam));
    indexProgram->setUniform(Uniform("maxDepth", depthCutoff));
    indexProgram->setUniform(Uniform("cols", (float)Resolution::getInstance().cols() * IndexMap::FACTOR));
    indexProgram->setUniform(Uniform("rows", (float)Resolution::getInstance().rows() * IndexMap::FACTOR));
    indexProgram->setUniform(Uniform("time", time));
    indexProgram->setUniform(Uniform("timeDelta", timeDelta));

    glBindBuffer(GL_ARRAY_BUFFER, model.first);

    glEnableVertexAttribArray(0);
    glVertexAttribPointer(0, 4, GL_FLOAT, GL_FALSE, Vertex::SIZE, 0);

    glEnableVertexAttribArray(1);
    glVertexAttribPointer(1, 4, GL_FLOAT, GL_FALSE, Vertex::SIZE, reinterpret_cast<GLvoid*>(sizeof(Eigen::Vector4f)));

    glEnableVertexAttribArray(2);
    glVertexAttribPointer(2, 4, GL_FLOAT, GL_FALSE, Vertex::SIZE, reinterpret_cast<GLvoid*>(sizeof(Eigen::Vector4f) * 2));

    glDrawTransformFeedback(GL_POINTS, model.second);

    glDisableVertexAttribArray(0);
    glDisableVertexAttribArray(1);
    glDisableVertexAttribArray(2);
    glBindBuffer(GL_ARRAY_BUFFER, 0);

    indexFrameBuffer.Unbind();

    indexProgram->Unbind();

    glPopAttrib();

    glFinish();
}
开发者ID:HarveyLiuFly,项目名称:ElasticFusion,代码行数:59,代码来源:IndexMap.cpp

示例6: isOdometryContinuousMotion

    bool isOdometryContinuousMotion(Eigen::Matrix4f &prevPose, Eigen::Matrix4f &currPose, float thresDist = 0.1)
    {
        Eigen::Matrix4f relativePose = prevPose.inverse() * currPose;
        if( relativePose.block(0,3,3,1).norm() > thresDist )
            return false;

        return true;
    }
开发者ID:EduFdez,项目名称:rgbd360,代码行数:8,代码来源:KFsphere_SLAM.cpp

示例7: GetCameraRay

void Scene::GetCameraRay(double x, double y, Eigen::Vector3d* origin, Eigen::Vector3d* ray) {
  Eigen::Matrix4f inverse;
  inverse = g_viewMatrix.inverse();
  Eigen::Vector4f preVec;
  preVec << (2 * x / DDWIDTH) - 1, 1 - (2 * y / DDHEIGHT), 2 * .5 - 1, 1;
  (*origin)[0] = xpos;
  (*origin)[1] = ypos;
  (*origin)[2] = zpos;
  Eigen::Vector4f ori = inverse * preVec;
  (*ray)[0] = ori[0] - (*origin)[0];
  (*ray)[1] = ori[1] - (*origin)[1];
  (*ray)[2] = ori[2] - (*origin)[2];
}
开发者ID:patrickriordan,项目名称:mass-spring,代码行数:13,代码来源:scene.cpp

示例8: setGlobalPose

void DynamicsRobot::setGlobalPose(Eigen::Matrix4f &gp)
{
    Eigen::Matrix4f currentPose = robot->getGlobalPose();
    Eigen::Matrix4f delta = gp * currentPose.inverse();

    robot->setGlobalPose(gp);
    std::map<VirtualRobot::RobotNodePtr, DynamicsObjectPtr>::iterator it = dynamicRobotNodes.begin();
    while (it != dynamicRobotNodes.end())
    {
        Eigen::Matrix4f newPose = it->second->getSceneObject()->getGlobalPose() * delta;
        it->second->setPose(newPose);
        it++;
    }
}
开发者ID:TheMarex,项目名称:simox,代码行数:14,代码来源:DynamicsRobot.cpp

示例9: getRegisteredAndReferencedPointCloud

void DepthFrame::getRegisteredAndReferencedPointCloud(pcl::PointCloud<pcl::PointXYZ>& cloud)
{
    pcl::PointXYZ regReferencePoint = getRegisteredReferencePoint();
    
    // Build a translation matrix to the registered reference the cloud after its own registration
    Eigen::Matrix4f E = Eigen::Matrix4f::Identity();
    E.col(3) = regReferencePoint.getVector4fMap(); // set translation column
    
    // Apply registration first and then referenciation (right to left order in matrix product)
    pcl::PointCloud<pcl::PointXYZ>::Ptr pCloudTmp (new pcl::PointCloud<pcl::PointXYZ>);
    getPointCloud(*pCloudTmp);
    
    pcl::transformPointCloud(*pCloudTmp, cloud, E.inverse() * m_T);
}
开发者ID:aclapes,项目名称:ReMedi2,代码行数:14,代码来源:DepthFrame.cpp

示例10: cloud_temp

void SQ_fitter<PointT>::getBoundingBox(const PointCloudPtr &_cloud,
				       double _dim[3],
				       double _trans[3],
				       double _rot[3] ) {

  // 1. Compute the bounding box center
  Eigen::Vector4d centroid;
  pcl::compute3DCentroid( *_cloud, centroid );
  _trans[0] = centroid(0);
  _trans[1] = centroid(1); 
  _trans[2] = centroid(2);

  // 2. Compute main axis orientations
  pcl::PCA<PointT> pca;
  pca.setInputCloud( _cloud );
  Eigen::Vector3f eigVal = pca.getEigenValues();
  Eigen::Matrix3f eigVec = pca.getEigenVectors();
  // Make sure 3 vectors are normal w.r.t. each other
  Eigen::Vector3f temp;
  eigVec.col(2) = eigVec.col(0); // Z
  Eigen::Vector3f v3 = (eigVec.col(1)).cross( eigVec.col(2) );
  eigVec.col(0) = v3;
  Eigen::Vector3f rpy = eigVec.eulerAngles(2,1,0);
 
  _rot[0] = (double)rpy(2);
  _rot[1] = (double)rpy(1);
  _rot[2] = (double)rpy(0);

  // Transform _cloud
  Eigen::Matrix4f transf = Eigen::Matrix4f::Identity();
  transf.block(0,3,3,1) << (float)centroid(0), (float)centroid(1), (float)centroid(2);
  transf.block(0,0,3,3) = eigVec;

  Eigen::Matrix4f tinv; tinv = transf.inverse();
  PointCloudPtr cloud_temp( new pcl::PointCloud<PointT>() );
  pcl::transformPointCloud( *_cloud, *cloud_temp, tinv );

  // Get maximum and minimum
  PointT minPt; PointT maxPt;
  pcl::getMinMax3D( *cloud_temp, minPt, maxPt );
  
  _dim[0] = ( maxPt.x - minPt.x ) / 2.0;
  _dim[1] = ( maxPt.y - minPt.y ) / 2.0;
  _dim[2] = ( maxPt.z - minPt.z ) / 2.0;

}
开发者ID:LongfeiProjects,项目名称:GSoC_PCL,代码行数:46,代码来源:SQ_fitter.hpp

示例11: applyViewMatrix

void Camera::applyViewMatrix(MatrixStack *MV) const
{
	// Create the translation and rotation matrices
	Eigen::Matrix4f T = Eigen::Matrix4f::Identity();
	T(0,3) = position(0);
	T(1,3) = position(1);
	T(2,3) = position(2);
	Eigen::Matrix4f YR = Eigen::Matrix4f::Identity();
	YR.block<3,3>(0,0) = Eigen::AngleAxisf(yawRotation, Eigen::Vector3f(0.0f, 1.0f, 0.0f)).toRotationMatrix();
	Eigen::Matrix4f PR = Eigen::Matrix4f::Identity();
	PR.block<3,3>(0,0) = Eigen::AngleAxisf(pitchRotation, Eigen::Vector3f(1.0f, 0.0f, 0.0f)).toRotationMatrix();

	// The matrix C is the product of these matrices
	Eigen::Matrix4f C = T * YR * PR; // Also apply rotations here
	// The view matrix is the inverse
	Eigen::Matrix4f V = C.inverse();
	// Add to the matrix stack
	MV->multMatrix(V);
}
开发者ID:elliotfiske,项目名称:slumber,代码行数:19,代码来源:Camera.cpp

示例12: focusSceneUpdate

void Scene::focusSceneUpdate(CloudPtr &pc_in, tf::Transform &transform_camFromBase, tf::Transform &transform_baseFromfocus, MatrixXf boundBox, Eigen::Matrix4f eigen_virtualFromFocus_old)
{
    pc_scene.reset (new Cloud);
    pc_scene_focus.reset (new Cloud);
    pc_scene_base.reset(new Cloud);
    pc_scene_focus_back.reset(new Cloud);

    CloudPtr pc_scene_focus_temp;
    pc_scene_focus_temp.reset (new Cloud);

    Eigen::Matrix4f eigen_camFromBase;
    transformAsMatrix (transform_camFromBase, eigen_camFromBase);
    if(DEBUG_ALGORITHM) cout<<"-------------camupdated"<<endl;
    if(DEBUG_ALGORITHM) cout<<eigen_camFromBase<<endl;

    pcl_ros::transformPointCloud(*pc_in, *pc_scene_base, transform_camFromBase);
    pcl_ros::transformPointCloud(*pc_scene_base, *pc_scene_focus_temp, transform_baseFromfocus);

    for(int i=0;i<pc_in->points.size();i++){
        PointT point_in = pc_in->points[i];
        PointT point_transformed = pc_scene_focus_temp->points[i];
        // cut off
        if(point_transformed.x<boundBox(0,0) && point_transformed.x>boundBox(0,1) && point_transformed.y<boundBox(1,0) && point_transformed.y>boundBox(1,1) && point_transformed.z<boundBox(2,0) && point_transformed.z>boundBox(2,1)){
            pc_scene->points.push_back(point_in);
            pc_scene_focus->points.push_back(point_transformed);
        }
    }

    // return back pc_scene to the original cam pos
    Eigen::Matrix4f inv = eigen_virtualFromFocus_old.inverse();
    for(int i=0;i<pc_scene_focus->points.size();i++){
        PointT point = pc_scene_focus->points[i];
        PointT point_back;
        point_back.x = inv(0,0)*point.x + inv(0,1)*point.y + inv(0,2)*point.z + inv(0,3);
        point_back.y = inv(1,0)*point.x + inv(1,1)*point.y + inv(1,2)*point.z + inv(1,3);
        point_back.z = inv(2,0)*point.x + inv(2,1)*point.y + inv(2,2)*point.z + inv(2,3);
        pc_scene_focus_back->points.push_back(point_back);
    }

//    if(USENORM) calcNormal();
}
开发者ID:AIS-Bonn,项目名称:vismotcoord,代码行数:41,代码来源:scene.cpp

示例13: file

Eigen::Matrix4f computeGroundTruth(const std::string path_groundtruth, const int id)
{
	std::string line;
	std::ifstream file(path_groundtruth);
	int offset = 3;
	std::vector<std::string> splitVec;
	int index = id * 5 + offset;
	Eigen::Matrix4f transformationMatrix;

	if (file.is_open())
	{
		int count = 1;
		bool done = false;

		while (count<index)
		{
			std::getline(file, line);
			count++;
		}

		//start reading matrix from file
		for (int i = 0; i<4; i++)
		{
			std::getline(file, line);
			boost::split(splitVec, line, boost::is_any_of("\t"), boost::token_compress_on);
			transformationMatrix.row(i) << boost::lexical_cast<float>(splitVec.at(0)),
				boost::lexical_cast<float>(splitVec.at(1)),
				boost::lexical_cast<float>(splitVec.at(2)),
				boost::lexical_cast<float>(splitVec.at(3));
		}
	}
	else
		std::cout << "GroundTruth file not found" << std::endl;

	transformationMatrix.inverse();

	return transformationMatrix;
}
开发者ID:CVLAB-Unibo,项目名称:Keypoint-Learning,代码行数:38,代码来源:view_manager.cpp

示例14: cloud_model

void
pcl::ihs::OfflineIntegration::computationThread ()
{
  std::vector <std::string> filenames;

  if (!this->getFilesFromDirectory (path_dir_, ".pcd", filenames))
  {
    std::cerr << "ERROR in offline_integration.cpp: Could not get the files from the directory\n";
    return;
  }

  // First cloud is reference model
  std::cerr << "Processing file " << std::setw (5) << 1 << " / " << filenames.size () << std::endl;
  CloudXYZRGBNormalPtr cloud_model (new CloudXYZRGBNormal ());
  Eigen::Matrix4f T = Eigen::Matrix4f::Identity ();
  if (!this->load (filenames [0], cloud_model, T))
  {
    std::cerr << "ERROR in offline_integration.cpp: Could not load the model cloud.\n";
    return;
  }

  pcl::transformPointCloudWithNormals (*cloud_model, *cloud_model, T);

  if (!integration_->reconstructMesh (cloud_model, mesh_model_))
  {
    std::cerr << "ERROR in offline_integration.cpp: Could not reconstruct the model mesh.\n";
    return;
  }

  Base::setPivot ("model");
  Base::addMesh (mesh_model_, "model");

  if (filenames.size () < 1)
  {
    return;
  }

  for (unsigned int i=1; i<filenames.size (); ++i)
  {
    std::cerr << "Processing file " << std::setw (5) << i+1 << " / " << filenames.size () << std::endl;

    {
      boost::mutex::scoped_lock lock (mutex_);
      if (destructor_called_) return;
    }
    boost::mutex::scoped_lock lock_quit (mutex_quit_);

    CloudXYZRGBNormalPtr cloud_data (new CloudXYZRGBNormal ());
    if (!this->load (filenames [i], cloud_data, T))
    {
      std::cerr << "ERROR in offline_integration.cpp: Could not load the data cloud.\n";
      return;
    }

    if (!integration_->merge (cloud_data, mesh_model_, T))
    {
      std::cerr << "ERROR in offline_integration.cpp: merge failed.\n";
      return;
    }

    integration_->age (mesh_model_);

    {
      lock_quit.unlock ();
      boost::mutex::scoped_lock lock (mutex_);
      if (destructor_called_) return;

      Base::addMesh (mesh_model_, "model", Eigen::Isometry3d (T.inverse ().cast <double> ()));
      Base::calcFPS (computation_fps_);
    }
  }
  Base::setPivot ("model");
}
开发者ID:FBIKKIBF,项目名称:pcl,代码行数:73,代码来源:offline_integration.cpp

示例15: pose_fn

void
RegisteredViewsSource<Full3DPointT, PointInT, OutModelPointT>::loadModel (ModelT & model)
{
    const std::string training_view_path = path_ + model.class_ + "/" + model.id_ + "/views/";
    const std::string view_pattern = ".*" + view_prefix_ + ".*.pcd";
    model.view_filenames_ = io::getFilesInDirectory(training_view_path, view_pattern, false);
    std::cout << "Object class: " << model.class_ << ", id: " << model.id_ << ", views: " << model.view_filenames_.size() << std::endl;

    typename pcl::PointCloud<Full3DPointT>::Ptr modell (new pcl::PointCloud<Full3DPointT>);
    typename pcl::PointCloud<Full3DPointT>::Ptr modell_voxelized (new pcl::PointCloud<Full3DPointT>);
    pcl::io::loadPCDFile(path_ + model.class_ + "/" + model.id_ + "/3D_model.pcd", *modell);

    float voxel_grid_size = 0.003f;
    typename pcl::VoxelGrid<Full3DPointT> grid_;
    grid_.setInputCloud (modell);
    grid_.setLeafSize (voxel_grid_size, voxel_grid_size, voxel_grid_size);
    grid_.setDownsampleAllData (true);
    grid_.filter (*modell_voxelized);

    model.normals_assembled_.reset(new pcl::PointCloud<pcl::Normal>);
    model.assembled_.reset (new pcl::PointCloud<PointInT>);

    pcl::copyPointCloud(*modell_voxelized, *model.assembled_);
    pcl::copyPointCloud(*modell_voxelized, *model.normals_assembled_);

    if(load_into_memory_)
    {
        model.views_.resize( model.view_filenames_.size() );
        model.indices_.resize( model.view_filenames_.size() );
        model.poses_.resize( model.view_filenames_.size() );
        model.self_occlusions_.resize( model.view_filenames_.size() );

        for (size_t i=0; i<model.view_filenames_.size(); i++)
        {
            // load training view
            const std::string view_file = training_view_path + "/" + model.view_filenames_[i];
            model.views_[i].reset( new pcl::PointCloud<PointInT> () );
            pcl::io::loadPCDFile (view_file, *model.views_[i]);

            // read pose
            std::string pose_fn (view_file);
            boost::replace_last (pose_fn, view_prefix_, pose_prefix_);
            boost::replace_last (pose_fn, ".pcd", ".txt");
            Eigen::Matrix4f pose = io::readMatrixFromFile( pose_fn );
            model.poses_[i] = pose.inverse(); //the recognizer assumes transformation from M to CC - i think!

            // read object mask
            model.indices_[i].indices.clear();
            std::string obj_indices_fn (view_file);
            boost::replace_last (obj_indices_fn, view_prefix_, indices_prefix_);
            boost::replace_last (obj_indices_fn, ".pcd", ".txt");
            std::ifstream f ( obj_indices_fn.c_str() );
            int idx;
            while (f >> idx)
                model.indices_[i].indices.push_back(idx);
            f.close();

            model.self_occlusions_[i] = -1.f;
        }
    }
    else
    {
开发者ID:severin-lemaignan,项目名称:v4r,代码行数:62,代码来源:registered_views_source.hpp


注:本文中的eigen::Matrix4f::inverse方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。