当前位置: 首页>>代码示例>>C++>>正文


C++ Matrix4f::topLeftCorner方法代码示例

本文整理汇总了C++中eigen::Matrix4f::topLeftCorner方法的典型用法代码示例。如果您正苦于以下问题:C++ Matrix4f::topLeftCorner方法的具体用法?C++ Matrix4f::topLeftCorner怎么用?C++ Matrix4f::topLeftCorner使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在eigen::Matrix4f的用法示例。


在下文中一共展示了Matrix4f::topLeftCorner方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: initICPModel

void ICPOdometry::initICPModel(const DeviceArray2D<unsigned short>& depth,
                               const float depthCutoff,
                               const Eigen::Matrix4f & modelPose)
{
    depth_tmp[0] = depth;

    for(int i = 1; i < NUM_PYRS; ++i)
    {
        pyrDown(depth_tmp[i - 1], depth_tmp[i]);
    }

    for(int i = 0; i < NUM_PYRS; ++i)
    {
        createVMap(intr(i), depth_tmp[i], vmaps_g_prev_[i], depthCutoff);
        createNMap(vmaps_g_prev_[i], nmaps_g_prev_[i]);
    }

    cudaDeviceSynchronize();

    Eigen::Matrix<float, 3, 3, Eigen::RowMajor> Rcam = modelPose.topLeftCorner(3, 3);
    Eigen::Vector3f tcam = modelPose.topRightCorner(3, 1);

    Mat33 &  device_Rcam = device_cast<Mat33>(Rcam);
    float3& device_tcam = device_cast<float3>(tcam);

    if (modelPose != Eigen::Matrix4f::Identity())
      for(int i = 0; i < NUM_PYRS; ++i)
        tranformMaps(vmaps_g_prev_[i], nmaps_g_prev_[i], device_Rcam, device_tcam, vmaps_g_prev_[i], nmaps_g_prev_[i]);

    cudaDeviceSynchronize();
}
开发者ID:ahundt,项目名称:ICPCUDA,代码行数:31,代码来源:ICPOdometry.cpp

示例2: initICPModel

void ICPSlowdometry::initICPModel(unsigned short * depth,
                                  const float depthCutoff,
                                  const Eigen::Matrix4f & modelPose)
{
    depth_tmp[0].upload(depth, sizeof(unsigned short) * width, height, width);

    for(int i = 1; i < NUM_PYRS; ++i)
    {
        pyrDown(depth_tmp[i - 1], depth_tmp[i]);
    }

    for(int i = 0; i < NUM_PYRS; ++i)
    {
        createVMap(intr(i), depth_tmp[i], vmaps_g_prev_[i], depthCutoff);
        createNMap(vmaps_g_prev_[i], nmaps_g_prev_[i]);
    }

    cudaDeviceSynchronize();

    Eigen::Matrix<float, 3, 3, Eigen::RowMajor> Rcam = modelPose.topLeftCorner(3, 3);
    Eigen::Vector3f tcam = modelPose.topRightCorner(3, 1);

    Mat33 &  device_Rcam = device_cast<Mat33>(Rcam);
    float3& device_tcam = device_cast<float3>(tcam);

    for(int i = 0; i < NUM_PYRS; ++i)
    {
        tranformMaps(vmaps_g_prev_[i], nmaps_g_prev_[i], device_Rcam, device_tcam, vmaps_g_prev_[i], nmaps_g_prev_[i]);
    }

    cudaDeviceSynchronize();
}
开发者ID:Acidburn0zzz,项目名称:ICPCUDA,代码行数:32,代码来源:ICPSlowdometry.cpp

示例3: addCloud

	void DiriniVisualizer::addCloud(pcl::PointCloud<pcl::PointXYZRGB>::Ptr cloud_, Eigen::Matrix3f R, Eigen::Vector3f t)
	{
		Eigen::Matrix4f H = Eigen::Matrix4f::Identity();

		H.topLeftCorner(3,3) = R;
		H.topRightCorner(3,1) = t;

		addCloud(cloud_, H);
	}
开发者ID:mwuethri,项目名称:dirini,代码行数:9,代码来源:dirini_visualizer.cpp

示例4: main

int main()
{
  Eigen::Matrix4f m;
  m << 1, 2, 3, 4,
       5, 6, 7, 8,
       9, 10,11,12,
       13,14,15,16;
  cout << "m.leftCols(2) =" << endl << m.leftCols(2) << endl << endl;
  cout << "m.bottomRows<2>() =" << endl << m.bottomRows<2>() << endl << endl;
  m.topLeftCorner(1,3) = m.bottomRightCorner(3,1).transpose();
  cout << "After assignment, m = " << endl << m << endl;
}
开发者ID:rajeev2010,项目名称:cpp_tutorial,代码行数:12,代码来源:gk_eigen_corner_row_col.cpp

示例5: correspondences

void OBJCTXT<_DOF6>::findCorrespondences3(const OBJCTXT &ctxt, std::vector<SCOR> &cors,
                                          const DOF6 &tf) {
  map_cors_.clear();

  Eigen::Matrix4f M = tf.getTF4().inverse();
  Eigen::Vector3f t=M.col(3).head<3>();
  Eigen::Matrix3f R=M.topLeftCorner(3,3);

  const float thr = tf.getRotationVariance()+0.05f;

  for(size_t j=0; j<ctxt.objs_.size(); j++)
  {
    OBJECT obj = *ctxt.objs_[j];
    obj.transform(R,t,0,0);

    for(size_t i=0; i<objs_.size(); i++)
      if( (obj.getData().getBB().preassumption(objs_[i]->getData().getBB())>=std::cos(thr+objs_[i]->getData().getBB().ratio())) &&
          obj.intersectsBB(*objs_[i], tf.getRotationVariance(),tf.getTranslationVariance())
          && obj.getData().extensionMatch(objs_[i]->getData(),0.7f,0)
      )
      {
        if(obj.intersectsPts(*objs_[i], tf.getRotationVariance(),tf.getTranslationVariance()) ||
            objs_[i]->intersectsPts(obj, tf.getRotationVariance(),tf.getTranslationVariance()))
        {
          //seccond check
          map_cors_[ctxt.objs_[j]].push_back(cors.size());
          SCOR c;
          c.a = objs_[i];
          c.b = ctxt.objs_[j];
          cors.push_back(c);
        }
      }

  }

#ifdef DEBUG_
  ROS_INFO("found %d correspondences (%d %d)", (int)cors.size(), (int)objs_.size(), (int)ctxt.objs_.size());

  for(typename std::vector<SCOR>::iterator it = cors.begin(); it!=cors.end(); it++)
  {
    Debug::Interface::get().addArrow(it->a->getNearestPoint(),it->b->getNearestPoint(), 0,255,0);
  }
#endif

}
开发者ID:Etimr,项目名称:cob_environment_perception,代码行数:45,代码来源:registration.hpp

示例6: initICPModel

void RGBDOdometry::initICPModel(GPUTexture * predictedVertices,
                                GPUTexture * predictedNormals,
                                const float depthCutoff,
                                const Eigen::Matrix4f & modelPose)
{
    cudaArray * textPtr;

    cudaGraphicsMapResources(1, &predictedVertices->cudaRes);
    cudaGraphicsSubResourceGetMappedArray(&textPtr, predictedVertices->cudaRes, 0, 0);
    cudaMemcpyFromArray(vmaps_tmp.ptr(), textPtr, 0, 0, vmaps_tmp.sizeBytes(), cudaMemcpyDeviceToDevice);
    cudaGraphicsUnmapResources(1, &predictedVertices->cudaRes);

    cudaGraphicsMapResources(1, &predictedNormals->cudaRes);
    cudaGraphicsSubResourceGetMappedArray(&textPtr, predictedNormals->cudaRes, 0, 0);
    cudaMemcpyFromArray(nmaps_tmp.ptr(), textPtr, 0, 0, nmaps_tmp.sizeBytes(), cudaMemcpyDeviceToDevice);
    cudaGraphicsUnmapResources(1, &predictedNormals->cudaRes);

    copyMaps(vmaps_tmp, nmaps_tmp, vmaps_g_prev_[0], nmaps_g_prev_[0]);

    for(int i = 1; i < NUM_PYRS; ++i)
    {
        resizeVMap(vmaps_g_prev_[i - 1], vmaps_g_prev_[i]);
        resizeNMap(nmaps_g_prev_[i - 1], nmaps_g_prev_[i]);
    }

    Eigen::Matrix<float, 3, 3, Eigen::RowMajor> Rcam = modelPose.topLeftCorner(3, 3);
    Eigen::Vector3f tcam = modelPose.topRightCorner(3, 1);

    mat33 device_Rcam = Rcam;
    float3 device_tcam = *reinterpret_cast<float3*>(tcam.data());

    for(int i = 0; i < NUM_PYRS; ++i)
    {
        tranformMaps(vmaps_g_prev_[i], nmaps_g_prev_[i], device_Rcam, device_tcam, vmaps_g_prev_[i], nmaps_g_prev_[i]);
    }

    cudaDeviceSynchronize();
}
开发者ID:Hielamon,项目名称:ElasticFusion,代码行数:38,代码来源:RGBDOdometry.cpp

示例7: while

template <typename PointSource, typename PointTarget> inline void
pcl::GeneralizedIterativeClosestPoint<PointSource, PointTarget>::computeTransformation (PointCloudSource &output, const Eigen::Matrix4f& guess)
{
  pcl::IterativeClosestPoint<PointSource, PointTarget>::initComputeReciprocal ();
  using namespace std;
  // Difference between consecutive transforms
  double delta = 0;
  // Get the size of the target
  const size_t N = indices_->size ();
  // Set the mahalanobis matrices to identity
  mahalanobis_.resize (N, Eigen::Matrix3d::Identity ());
  // Compute target cloud covariance matrices
  if (target_covariances_.empty ())
    computeCovariances<PointTarget> (target_, tree_, target_covariances_);
  // Compute input cloud covariance matrices
  if (input_covariances_.empty ())
    computeCovariances<PointSource> (input_, tree_reciprocal_, input_covariances_);

  base_transformation_ = guess;
  nr_iterations_ = 0;
  converged_ = false;
  double dist_threshold = corr_dist_threshold_ * corr_dist_threshold_;
  std::vector<int> nn_indices (1);
  std::vector<float> nn_dists (1);

  while(!converged_)
  {
    size_t cnt = 0;
    std::vector<int> source_indices (indices_->size ());
    std::vector<int> target_indices (indices_->size ());

    // guess corresponds to base_t and transformation_ to t
    Eigen::Matrix4d transform_R = Eigen::Matrix4d::Zero ();
    for(size_t i = 0; i < 4; i++)
      for(size_t j = 0; j < 4; j++)
        for(size_t k = 0; k < 4; k++)
          transform_R(i,j)+= double(transformation_(i,k)) * double(guess(k,j));

    Eigen::Matrix3d R = transform_R.topLeftCorner<3,3> ();

    for (size_t i = 0; i < N; i++)
    {
      PointSource query = output[i];
      query.getVector4fMap () = guess * query.getVector4fMap ();
      query.getVector4fMap () = transformation_ * query.getVector4fMap ();

      if (!searchForNeighbors (query, nn_indices, nn_dists))
      {
        PCL_ERROR ("[pcl::%s::computeTransformation] Unable to find a nearest neighbor in the target dataset for point %d in the source!\n", getClassName ().c_str (), (*indices_)[i]);
        return;
      }
      
      // Check if the distance to the nearest neighbor is smaller than the user imposed threshold
      if (nn_dists[0] < dist_threshold)
      {
        Eigen::Matrix3d &C1 = input_covariances_[i];
        Eigen::Matrix3d &C2 = target_covariances_[nn_indices[0]];
        Eigen::Matrix3d &M = mahalanobis_[i];
        // M = R*C1
        M = R * C1;
        // temp = M*R' + C2 = R*C1*R' + C2
        Eigen::Matrix3d temp = M * R.transpose();        
        temp+= C2;
        // M = temp^-1
        M = temp.inverse ();
        source_indices[cnt] = static_cast<int> (i);
        target_indices[cnt] = nn_indices[0];
        cnt++;
      }
    }
    // Resize to the actual number of valid correspondences
    source_indices.resize(cnt); target_indices.resize(cnt);
    /* optimize transformation using the current assignment and Mahalanobis metrics*/
    previous_transformation_ = transformation_;
    //optimization right here
    try
    {
      rigid_transformation_estimation_(output, source_indices, *target_, target_indices, transformation_);
      /* compute the delta from this iteration */
      delta = 0.;
      for(int k = 0; k < 4; k++) {
        for(int l = 0; l < 4; l++) {
          double ratio = 1;
          if(k < 3 && l < 3) // rotation part of the transform
            ratio = 1./rotation_epsilon_;
          else
            ratio = 1./transformation_epsilon_;
          double c_delta = ratio*fabs(previous_transformation_(k,l) - transformation_(k,l));
          if(c_delta > delta)
            delta = c_delta;
        }
      }
    } 
    catch (PCLException &e)
    {
      PCL_DEBUG ("[pcl::%s::computeTransformation] Optimization issue %s\n", getClassName ().c_str (), e.what ());
      break;
    }
    nr_iterations_++;
    // Check for convergence
//.........这里部分代码省略.........
开发者ID:87west,项目名称:pcl,代码行数:101,代码来源:gicp.hpp

示例8: main


//.........这里部分代码省略.........
                    loader.load(camfile, ColorHelper::READ_CONFIDENCE);
                }
                cout << "Done reading " << loader.size() << " color images" << endl;
                cout << "Reprojecting..." << endl;
                reproject(loader, m, hdr_threshold);
            } else {
                loader.load(camfile, 0);
                loader.load(project, ColorHelper::READ_COLOR | ColorHelper::READ_DEPTH);
                if (use_confidence_files) {
                    loader.load(project, ColorHelper::READ_CONFIDENCE);
                }
                cout << "Reprojecting..." << endl;
                reproject((const float*) loader.getImage(project),
                          loader.getConfidenceMap(project),
                          loader.getDepthMap(project),
                          loader.getCamera(project),
                          m, hdr_threshold);
            }
            cout << "Done reprojecting; clustering lights..." << endl;
            numlights = clusterLights(m, hdr_threshold, minlightsize);
            cout << "Done clustering " << numlights << " lights" << endl;
        }
        if (output_reprojection) m.writeSamples(outfile);
        if (coloredfile.length()) m.writeColoredMesh(coloredfile, displayscale);
        m.computeColorsOGL();
        hemicuberesolution = max(hemicuberesolution, loader.getCamera(0)->width);
        hemicuberesolution = max(hemicuberesolution, loader.getCamera(0)->height);
        cout << "========================" << endl;
    }
    InverseRender ir(&m, numlights, hemicuberesolution);
    vector<SampleData> walldata, floordata;
    // Only do inverse rendering with full reprojection and wall labels
    if (do_sampling && (input || all_project) && (wallinput || do_wallfinding)) {
        cout << "==== SAMPLING SCENE ====" << endl;
        if (read_eq) {
            ir.loadVariablesBinary(walldata, samplefile + ".walls");
            if (do_texture) {
                ir.loadVariablesBinary(floordata, samplefile + ".floors");
            }
        } else {
            ir.computeSamples(walldata, wallindices, numsamples, discardthreshold);
            if (do_texture) {
                ir.computeSamples(floordata, floorindices, numsamples, discardthreshold);
            }
            if (write_eq) {
                ir.writeVariablesBinary(walldata, sampleoutfile + ".walls");
                if (do_texture) {
                    ir.writeVariablesBinary(floordata, sampleoutfile + ".floors");
                }
            }
            if (write_matlab) {
                ir.writeVariablesMatlab(walldata, matlabsamplefile);
            }
        }
        cout << "========================" << endl;
        ir.setNumRansacIters(numRansacIters);
        ir.setMaxPercentErr(maxPercentErr);
        ir.solve(walldata);

        Texture tex;
        if (do_texture) {
            // Prepare floor plane
            Eigen::Matrix4f t = of.getNormalizationTransform().inverse();
            Eigen::Vector3f floornormal(0,flipfloorceiling?-1:1,0);
            floornormal = t.topLeftCorner(3,3)*floornormal;
            Eigen::Vector4f floorpoint(0, wf.floorplane, 0, 1);
            floorpoint = t*floorpoint;
            R3Plane floorplane(eigen2gaps(floorpoint.head(3)).Point(), eigen2gaps(floornormal));

            loader.load(camfile, use_confidence_files);

            cout << "Solving texture..." << endl;
            ir.solveTexture(floordata, &loader, floorplane, tex);
            cout << "Done solving texture..." << endl;
            if (tex.size > 0) {
                ColorHelper::writeExrImage(texfile, tex.texture, tex.size, tex.size);
            }
        }
        if (radfile != "") {
            outputRadianceFile(radfile, wf, m, ir);
        }
        if (plyfile != "") {
            outputPlyFile(plyfile, wf, m, ir);
        }
        if (pbrtfile != "") {
            outputPbrtFile(pbrtfile, wf, m, ir, tex, loader.getCamera(0), do_texture?texfile:"");
        }
    }
    cout << "DONE PROCESSING" << endl;

    if (display) {
        InvRenderVisualizer irv(cloud, loader, wf, ir);
        if (project_debug) irv.recalculateColors(InvRenderVisualizer::LABEL_REPROJECT_DEBUG);
        irv.visualizeCameras(camera);
        irv.visualizeWalls();
        irv.addSamples(walldata);
        irv.loop();
    }
    return 0;
}
开发者ID:kyzyx,项目名称:empty-room,代码行数:101,代码来源:invrender.cpp

示例9: printHelp

/* ---[ */
int
main (int argc, char** argv)
{
  print_info ("Transform a cloud. For more information, use: %s -h\n", argv[0]);

  bool help = false;
  parse_argument (argc, argv, "-h", help);
  if (argc < 3 || help)
  {
    printHelp (argc, argv);
    return (-1);
  }

  // Parse the command line arguments for .pcd files
  std::vector<int> p_file_indices;
  p_file_indices = parse_file_extension_argument (argc, argv, ".pcd");
  if (p_file_indices.size () != 2)
  {
    print_error ("Need one input PCD file and one output PCD file to continue.\n");
    return (-1);
  }

  // Initialize the transformation matrix
  Eigen::Matrix4f tform; 
  tform.setIdentity ();

  // Command line parsing
  float dx, dy, dz;
  std::vector<float> values;

  if (parse_3x_arguments (argc, argv, "-trans", dx, dy, dz) > -1)
  {
    tform (0, 3) = dx;
    tform (1, 3) = dy;
    tform (2, 3) = dz;
  }

  if (parse_x_arguments (argc, argv, "-quat", values) > -1)
  {
    if (values.size () == 4)
    {
      const float& x = values[0];
      const float& y = values[1];
      const float& z = values[2];
      const float& w = values[3];
      tform.topLeftCorner (3, 3) = Eigen::Matrix3f (Eigen::Quaternionf (w, x, y, z));
    }
    else
    {
      print_error ("Wrong number of values given (%zu): ", values.size ());
      print_error ("The quaternion specified with -quat must contain 4 elements (w,x,y,z).\n");
    }
  }

  if (parse_x_arguments (argc, argv, "-axisangle", values) > -1)
  {
    if (values.size () == 4)
    {
      const float& ax = values[0];
      const float& ay = values[1];
      const float& az = values[2];
      const float& theta = values[3];
      tform.topLeftCorner (3, 3) = Eigen::Matrix3f (Eigen::AngleAxisf (theta, Eigen::Vector3f (ax, ay, az)));
    }
    else
    {
      print_error ("Wrong number of values given (%zu): ", values.size ());
      print_error ("The rotation specified with -axisangle must contain 4 elements (ax,ay,az,theta).\n");
    }
  }

  if (parse_x_arguments (argc, argv, "-matrix", values) > -1)
  {
    if (values.size () == 9 || values.size () == 16)
    {
      int n = values.size () == 9 ? 3 : 4;
      for (int r = 0; r < n; ++r)
        for (int c = 0; c < n; ++c)
          tform (r, c) = values[n*r+c];
    }
    else
    {
      print_error ("Wrong number of values given (%zu): ", values.size ());
      print_error ("The transformation specified with -matrix must be 3x3 (9) or 4x4 (16).\n");
    }
  }

  // Load the first file
  pcl::PCLPointCloud2::Ptr cloud (new pcl::PCLPointCloud2);
  if (!loadCloud (argv[p_file_indices[0]], *cloud)) 
    return (-1);

  // Apply the transform
  pcl::PCLPointCloud2 output;
  compute (cloud, output, tform);

  // Check if a scaling parameter has been given
  double divider[3];
  if (parse_3x_arguments (argc, argv, "-scale", divider[0], divider[1], divider[2]) > -1)
//.........这里部分代码省略.........
开发者ID:brodyh,项目名称:sail-car-log,代码行数:101,代码来源:transform_point_cloud.cpp

示例10: getTransformationByRANSAC


//.........这里部分代码省略.........
			{
				used[id] = true;
				sample_matches_indices.push_back(id);
				sample_matches_vector.push_back(initial_matches.at(id));
			}
		}

		bool valid; // valid is false iff the sampled points clearly aren't inliers themself 
		transformation = Feature::getTransformFromMatches(valid,
			earlier->feature_pts_3d,
			now->feature_pts_3d,
			sample_matches_vector, max_dist_m);

		if (!valid) continue; // valid is false iff the sampled points aren't inliers themself 
		if (transformation != transformation) continue; //Contains NaN

		//test whether samples are inliers (more strict than before)
		Feature::computeInliersAndError(inlier, inlier_error, nullptr,
			sample_matches_vector, transformation,
			earlier->feature_pts_3d, now->feature_pts_3d,
			squared_max_dist_m);
		if (inlier_error > 1000) continue; //most possibly a false match in the samples

		Feature::computeInliersAndError(inlier, inlier_error, nullptr,
			initial_matches, transformation,
			earlier->feature_pts_3d, now->feature_pts_3d,
			squared_max_dist_m);

		if (inlier.size() < min_inlier_threshold || inlier_error > max_dist_m)
		{
			continue;
		}

		valid_iterations++;

		//Performance hacks:
		///Iterations with more than half of the initial_matches inlying, count twice
		if (inlier.size() > initial_matches.size() * 0.5) n_iter++;
		///Iterations with more than 80% of the initial_matches inlying, count threefold
		if (inlier.size() > initial_matches.size() * 0.8) n_iter++;

		if (inlier_error < best_error)
		{ //copy this to the result
			if (pcc != nullptr)
			{
				Eigen::Vector3f t = transformation.topRightCorner(3, 1);
				Eigen::Matrix<float, 3, 3, Eigen::RowMajor> rot = transformation.topLeftCorner(3, 3);
				pcc->getCoresp(t, rot, information, pc, pcorrc, threads, blocks);
			}

			result_transform = transformation;
			result_information = information;
			point_count = pc;
			point_corr_count = pcorrc;
			if (matches != nullptr) *matches = inlier;
			best_inlier_cnt = inlier.size();
			rmse = inlier_error;
			best_error = inlier_error;
		}

		float new_inlier_error;

		transformation = Feature::getTransformFromMatches(valid,
			earlier->feature_pts_3d, now->feature_pts_3d, inlier); // compute new trafo from all inliers:
		if (transformation != transformation) continue; //Contains NaN
		Feature::computeInliersAndError(inlier, new_inlier_error, nullptr,
			initial_matches, transformation,
			earlier->feature_pts_3d, now->feature_pts_3d,
			squared_max_dist_m);

		if(inlier.size() < min_inlier_threshold || new_inlier_error > max_dist_m)
		{
			continue;
		}

		if (new_inlier_error < best_error) 
		{
			if (pcc != nullptr)
			{
				Eigen::Vector3f t = transformation.topRightCorner(3, 1);
				Eigen::Matrix<float, 3, 3, Eigen::RowMajor> rot = transformation.topLeftCorner(3, 3);
				pcc->getCoresp(t, rot, information, pc, pcorrc, threads, blocks);
			}

			result_transform = transformation;
			if (pcc != nullptr)
				result_information = information;
			point_count = pc;
			point_corr_count = pcorrc;
			if (matches != nullptr) *matches = inlier;
			best_inlier_cnt = inlier.size();
			rmse = new_inlier_error;
			best_error = new_inlier_error;
		}
	} //iterations
	
	if (pcc == nullptr)
		result_information = Eigen::Matrix<double, 6, 6>::Identity();
	return best_inlier_cnt >= min_inlier_threshold;
}
开发者ID:weeks-yu,项目名称:WReg,代码行数:101,代码来源:Feature.cpp

示例11: run

void MainController::run()
{
    while(!pangolin::ShouldQuit() && !((!logReader->hasMore()) && quiet) && !(eFusion->getTick() == end && quiet))
    {
        if(!gui->pause->Get() || pangolin::Pushed(*gui->step))
        {
            if((logReader->hasMore() || rewind) && eFusion->getTick() < end)
            {
                TICK("LogRead");
                if(rewind)
                {
                    if(!logReader->hasMore())
                    {
                        logReader->getBack();
                    }
                    else
                    {
                        logReader->getNext();
                    }

                    if(logReader->rewound())
                    {
                        logReader->currentFrame = 0;
                    }
                }
                else
                {
                    logReader->getNext();
                }
                TOCK("LogRead");

                if(eFusion->getTick() < start)
                {
                    eFusion->setTick(start);
                    logReader->fastForward(start);
                }

                float weightMultiplier = framesToSkip + 1;

                if(framesToSkip > 0)
                {
                    eFusion->setTick(eFusion->getTick() + framesToSkip);
                    logReader->fastForward(logReader->currentFrame + framesToSkip);
                    framesToSkip = 0;
                }

                Eigen::Matrix4f * currentPose = 0;

                if(groundTruthOdometry)
                {
                    currentPose = new Eigen::Matrix4f;
                    currentPose->setIdentity();
                    *currentPose = groundTruthOdometry->getIncrementalTransformation(logReader->timestamp);
                }

                eFusion->processFrame(logReader->rgb, logReader->depth, logReader->timestamp, currentPose, weightMultiplier);

                if(currentPose)
                {
                    delete currentPose;
                }

                if(frameskip && Stopwatch::getInstance().getTimings().at("Run") > 1000.f / 30.f)
                {
                    framesToSkip = int(Stopwatch::getInstance().getTimings().at("Run") / (1000.f / 30.f));
                }
            }
        }
        else
        {
            eFusion->predict();
        }

        TICK("GUI");

        if(gui->followPose->Get())
        {
            pangolin::OpenGlMatrix mv;

            Eigen::Matrix4f currPose = eFusion->getCurrPose();
            Eigen::Matrix3f currRot = currPose.topLeftCorner(3, 3);

            Eigen::Quaternionf currQuat(currRot);
            Eigen::Vector3f forwardVector(0, 0, 1);
            Eigen::Vector3f upVector(0, iclnuim ? 1 : -1, 0);

            Eigen::Vector3f forward = (currQuat * forwardVector).normalized();
            Eigen::Vector3f up = (currQuat * upVector).normalized();

            Eigen::Vector3f eye(currPose(0, 3), currPose(1, 3), currPose(2, 3));

            eye -= forward;

            Eigen::Vector3f at = eye + forward;

            Eigen::Vector3f z = (eye - at).normalized();  // Forward
            Eigen::Vector3f x = up.cross(z).normalized(); // Right
            Eigen::Vector3f y = z.cross(x);

            Eigen::Matrix4d m;
//.........这里部分代码省略.........
开发者ID:HarveyLiuFly,项目名称:ElasticFusion,代码行数:101,代码来源:MainController.cpp

示例12: addPoint

	void DiriniVisualizer::addPoint(Eigen::Vector3f point, Eigen::Matrix4f transform_)
	{
		points.push_back(transform_.topLeftCorner(3,3)*point+transform_.topRightCorner(3,1));
	}
开发者ID:mwuethri,项目名称:dirini,代码行数:4,代码来源:dirini_visualizer.cpp


注:本文中的eigen::Matrix4f::topLeftCorner方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。