当前位置: 首页>>代码示例>>C++>>正文


C++ Mat::inv方法代码示例

本文整理汇总了C++中cv::Mat::inv方法的典型用法代码示例。如果您正苦于以下问题:C++ Mat::inv方法的具体用法?C++ Mat::inv怎么用?C++ Mat::inv使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在cv::Mat的用法示例。


在下文中一共展示了Mat::inv方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: compNewPos

cv::Mat ConstraintsHelpers::compNewPos(cv::Mat lprevImu, cv::Mat lcurImu,
									cv::Mat lprevEnc, cv::Mat lcurEnc,
									cv::Mat lposOrigMapCenter,
									cv::Mat lmapCenterOrigGlobal,
									ros::NodeHandle &nh)
{
	Mat ret = Mat::eye(4, 4, CV_32FC1);
	if(!lposOrigMapCenter.empty() && !lmapCenterOrigGlobal.empty()){
		//cout << "compOrient(lcurImu) = " << compOrient(lcurImu) << endl;
		//cout << "lmapCenterGlobal = " << lmapCenterGlobal << endl;
		//cout << "Computing curPos" << endl;
		//cout << "encodersCur - encodersPrev = " << encodersCur - encodersPrev << endl;
		Mat trans = lmapCenterOrigGlobal.inv()*compTrans(compOrient(lprevImu), lcurEnc - lprevEnc, nh);
		//cout << "trans = " << trans << endl;
		//cout << "Computing curTrans" << endl;
		Mat curTrans = Mat(lposOrigMapCenter, Rect(3, 0, 1, 4)) + trans;
		//cout << "Computing curRot" << endl;

		Mat curRot = lmapCenterOrigGlobal.inv()*compOrient(lcurImu);
		//cout << "curRot = " << curRot << endl;

		curRot.copyTo(ret);
		curTrans.copyTo(Mat(ret, Rect(3, 0, 1, 4)));
	}
	return ret;
}
开发者ID:mzdunek93,项目名称:TAPAS-ros,代码行数:26,代码来源:ConstraintsHelpers.cpp

示例2: setIntrinsic

void CameraHandler::setIntrinsic(const cv::Mat &K)
{
  if (K.rows != 3 &&
      K.cols != 3)
    return;
  _cam_param.K = K;
  _cam_param.K_inv = K.inv();
}
开发者ID:artivis,项目名称:ros_camera_rig,代码行数:8,代码来源:camera_handler.cpp

示例3: get_rotation_error

// Computes the norm of the rotation error
double get_rotation_error(const cv::Mat &R_true, const cv::Mat &R)
{
  cv::Mat error_vec, error_mat;
  error_mat = R_true * cv::Mat(R.inv()).mul(-1);
  cv::Rodrigues(error_mat, error_vec);

  return cv::norm(error_vec);
}
开发者ID:13983441921,项目名称:opencv,代码行数:9,代码来源:Utils.cpp

示例4: pose_cam

PoseRT PoseRT::obj2cam(const cv::Mat &Rt_obj2cam)
{
  Mat projectiveMatrix_obj = getProjectiveMatrix();
  Mat projectiveMatrix_cam = Rt_obj2cam * projectiveMatrix_obj * Rt_obj2cam.inv(DECOMP_SVD);

  PoseRT pose_cam(projectiveMatrix_cam);
  return pose_cam;
}
开发者ID:WalkingMachine,项目名称:sara_commun,代码行数:8,代码来源:poseRT.cpp

示例5: calc_homography

void calc_homography(Mat& image) {
    vector<Point2f> real_corners;
    calcChessboardCorners2D(BOARD_SIZE, SQUARE_SIZE, real_corners);

    vector<Point2f> pointBuf;
    bool found = findChessboardCorners(image, BOARD_SIZE, pointBuf,
                                       CALIB_CB_ADAPTIVE_THRESH |
                                       CALIB_CB_FAST_CHECK  |
                                       CV_CALIB_CB_FILTER_QUADS);
    // CALIB_CB_NORMALIZE_IMAGE

    // if (found)
    // {
    //     // May lead to drifting corners
    //     Mat viewGray;
    //     cvtColor(image, viewGray, CV_BGR2GRAY);
    //     cornerSubPix(viewGray, pointBuf, Size(11,11),
    //           Size(-1,-1), TermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 30, 0.1));
    // }
    drawChessboardCorners(image, BOARD_SIZE, Mat(pointBuf), found);
    ROS_INFO("[CALC_HOMOGRAPHY] Found board: %d", found);

    if (found) {
        ROS_DEBUG_STREAM("Number of calc. points: " << pointBuf.size() << std::endl <<
                         pointBuf << std::endl);
        ROS_DEBUG_STREAM("Number of found points: " << real_corners.size() << std::endl
                         << real_corners << std::endl);

        H = cv::findHomography(pointBuf, real_corners);

        ROS_INFO_STREAM("[CALC_HOMOGRAPHY] H:\n" << H);

        // Save to file
        std::string path = ros::package::getPath("netcam_stream");
        FileStorage fs(path + "/calibration/homography" + to_str(CAMERA_ID) + ".yaml",
                       FileStorage::WRITE);
        fs << "H_" + to_str(CAMERA_ID) << H;
        fs.release();

        // Output to terminal in formated way:
        for (int i = 0; i < 3; ++i)
            for (int j = 0; j < 3; ++j)
                std::cout << "H" << i << j << " " << H.at<double>(i, j) << std::endl;

        ROS_INFO("Matrix Inverse:");
        H_inv = H.inv();
        // Output to terminal in formated way:
        for (int i = 0; i < 3; ++i)
            for (int j = 0; j < 3; ++j)
                std::cout << "H" << i << j << " " << H_inv.at<double>(i, j) << std::endl;
    }

    imshow("Calc Board", image);
    waitKey(10);

}
开发者ID:ipab-rad,项目名称:netcam_stream,代码行数:56,代码来源:homography.cpp

示例6: compareCov

////////////////////////////////////////////////////////////////////////////////////////////////////////
//compare covariance matricies
float compareCov(cv::Mat a, cv::Mat b)
{
	cv::Mat eigen_values;
	cv::eigen(a.inv() * b, eigen_values);
	cv::Mat ln;
	cv::Mat ln2;
	cv::log(eigen_values, ln);
	cv::multiply(ln,ln,ln2);
	cv::Scalar temp = cv::sum(ln2);
	return temp[0];
}
开发者ID:lbl1985,项目名称:toolbox_c,代码行数:13,代码来源:in_out_exist_utility.cpp

示例7: probability

double Gauss::probability(const Vec3f &mean, const cv::Mat &covmat, Vec3f color){
    double mul = 0;
    Mat miu(1,3,CV_64FC1);
    Mat ans(1,1,CV_64FC1);
    miu.at<double>(0,0) = color[0] - mean[0];
    miu.at<double>(0,1) = color[1] - mean[1];
    miu.at<double>(0,2) = color[2] - mean[2];
    ans = miu * covmat.inv() * miu.t();
    mul = (-0.5)*ans.at<double>(0,0);
    return 1.0f/sqrt(determinant(covmat))*exp(mul);
}
开发者ID:a554b554,项目名称:myGrabcut,代码行数:11,代码来源:BorderMattingHandler.cpp

示例8: gaussian

 double DARP::gaussian(const cv::Mat & x,
         const cv::Mat & mu,
         const cv::Mat & cov)
 {
     cv::Mat diff=x-mu;
     double det=determinant(cov);
     double aux=(0.5/M_PI)*(1.0/sqrt(det));
     
     cv::Mat exponent=-0.5*diff.t()*cov.inv()*diff;
     
     return aux*exp(exponent.at<double>(0));
 }
开发者ID:vislab-tecnico-lisboa,项目名称:HumanAwareness,代码行数:12,代码来源:resource_allocation.cpp

示例9: RenderPose

    unsigned long RenderPose(cv::Mat& image, cv::Mat& rot_3x3_CfromO, cv::Mat& trans_3x1_CfromO)
    {
        cv::Mat object_center(3, 1, CV_64FC1);
        double* p_object_center = object_center.ptr<double>(0);
        p_object_center[0] = 0;
        p_object_center[1] = 0;
        p_object_center[2] = 0;

        cv::Mat rot_inv = rot_3x3_CfromO.inv();

        // Compute coordinate axis for visualization
        cv::Mat pt_axis(4, 3, CV_64FC1);
        double* p_pt_axis = pt_axis.ptr<double>(0);
        p_pt_axis[0] = 0 + p_object_center[0];
        p_pt_axis[1] = 0 + p_object_center[1];
        p_pt_axis[2] = 0 + p_object_center[2];
        p_pt_axis = pt_axis.ptr<double>(1);
        p_pt_axis[0] = 0.1 + p_object_center[0];
        p_pt_axis[1] = 0 + p_object_center[1];
        p_pt_axis[2] = 0 + p_object_center[2];
        p_pt_axis = pt_axis.ptr<double>(2);
        p_pt_axis[0] = 0 + p_object_center[0];
        p_pt_axis[1] = 0.1 + p_object_center[1];
        p_pt_axis[2] = 0 + p_object_center[2];
        p_pt_axis = pt_axis.ptr<double>(3);
        p_pt_axis[0] = 0 + p_object_center[0];
        p_pt_axis[1] = 0 + p_object_center[1];
        p_pt_axis[2] = 0.1 + p_object_center[2];

        // Transform data points
        std::vector<cv::Point> vec_2d(4, cv::Point());
        for (int i=0; i<4; i++)
        {
            cv::Mat vec_3d = pt_axis.row(i).clone();
            vec_3d = vec_3d.t();
            vec_3d = rot_3x3_CfromO*vec_3d;
            vec_3d += trans_3x1_CfromO;
            double* p_vec_3d = vec_3d.ptr<double>(0);

            ReprojectXYZ(p_vec_3d[0], p_vec_3d[1], p_vec_3d[2],
                         vec_2d[i].x , vec_2d[i].y);
        }

        // Render results
        int line_width = 1;
        cv::line(image, vec_2d[0], vec_2d[1], cv::Scalar(0, 0, 255), line_width);
        cv::line(image, vec_2d[0], vec_2d[2], cv::Scalar(0, 255, 0), line_width);
        cv::line(image, vec_2d[0], vec_2d[3], cv::Scalar(255, 0, 0), line_width);

        return ipa_Utils::RET_OK;
    }
开发者ID:renxi-cu,项目名称:cob_object_perception,代码行数:51,代码来源:fiducials.cpp

示例10: computeDistance

void PoseRT::computeDistance(const PoseRT &pose1, const PoseRT &pose2, double &rotationDistance, double &translationDistance, const cv::Mat &Rt_obj2cam)
{
  Mat Rt_diff_cam = pose1.getProjectiveMatrix() * pose2.getProjectiveMatrix().inv(DECOMP_SVD);
  Mat Rt_diff_obj = Rt_diff_cam;
  if (!Rt_obj2cam.empty())
  {
    Rt_diff_obj = Rt_obj2cam.inv(DECOMP_SVD) * Rt_diff_cam * Rt_obj2cam;
  }

  Mat rvec_diff_obj, tvec_diff_obj;
  getRvecTvec(Rt_diff_obj, rvec_diff_obj, tvec_diff_obj);
  rotationDistance = norm(rvec_diff_obj);
  translationDistance = norm(tvec_diff_obj);
}
开发者ID:WalkingMachine,项目名称:sara_commun,代码行数:14,代码来源:poseRT.cpp

示例11: mMask

ApplicationContext::ApplicationContext(const cv::Mat& mask, const cv::Mat& homography, float fps, float pixelToMeters, DrawingFlags drawingFlags, BlobTrackerAlgorithmParams algoParams, bool recordBGS, InputFrameProviderIface* frameProvider)
: mMask(mask)
, mHomography(homography)
, mInverseHomography(homography.inv())
, mFPS(fps)
, mSecondByFrame(1/fps)
, mMetersByPixel(1/pixelToMeters)
, mPixelToMeters(pixelToMeters)
, mTracker(nullptr)
, mObjectDatabaseManager(nullptr)
, mDrawingFlags(drawingFlags)
, mCurrentId(0)
, mTrackerParams(algoParams)
, mRecordingBGS(recordBGS)
, mFrameProvider(frameProvider)
, mBGSPath("bgs/")
{
	
}
开发者ID:yzbx,项目名称:opencv-qt,代码行数:19,代码来源:ApplicationContext.cpp

示例12: applyS

cv::Mat applyS(cv::Mat H, double u1, cv::Mat thetaMatrix, cv::Mat p, bool inverse = false) {
//	std::cout << "\n" << "[applyS] Start" << "\n";
	p = p / p.at<double>(2, 0);

	cv::Mat transformedPoint = cv::Mat::zeros(2, 1, CV_64F);
	transformedPoint.at<double>(0, 0) = p.at<double>(0, 0);
	transformedPoint.at<double>(1, 0) = p.at<double>(1, 0);
	
	transformedPoint = thetaMatrix.inv() * transformedPoint;
//	std::cout << "\n" << "[applyS] Changing point coords" << "\n";
	
	cv::Mat leftyMatrix;
	double leftyMatrixTmp[2][2] = {
		{H.at<double>(1, 1), H.at<double>(0, 1)},
		{-H.at<double>(0, 1), H.at<double>(1, 1)}
	};
	
	cv::Mat(2, 2, CV_64F, &leftyMatrixTmp).copyTo(leftyMatrix);
	
	cv::Mat rightMatrix;
	double rightMatrixTmp[2][1] = {
		{ (H.at<double>(0, 0) - H.at<double>(1, 1)) * u1 + H.at<double>(0, 2) },
		{ (H.at<double>(1, 0) + H.at<double>(0, 1)) * u1 + H.at<double>(1, 2) }
	};
	cv::Mat(2, 1, CV_64F, &rightMatrixTmp).copyTo(rightMatrix);
	
	double multiplier = 1 / (1 + H.at<double>(2, 0) * u1);
	
	if(inverse) {
		leftyMatrix.at<double>(1,1) *= 0.7673;
		leftyMatrix = leftyMatrix.inv();
		rightMatrix = rightMatrix * -1;
		
		multiplier = (1 / multiplier);
	}
	
	
	cv::Mat output = multiplier * ((leftyMatrix * transformedPoint) + rightMatrix);
	output = thetaMatrix * output;
	
	return output;
}
开发者ID:grillorafael,项目名称:images-reconstruction-class,代码行数:42,代码来源:main.cpp

示例13: main

int main( int argc, char **argv )
{
	if(argc<4) {
		usage(argc,argv);
		return 1;
	}
	is = helper::createImageSource(argv[1]);
	if(is.empty() || is->done()) {
		loglne("[main] createImageSource failed or no valid imagesource!");
		return -1;
	}
	is->pause(false);
	is->reportInfo();
	is->get(frame);
	imgW = frame.cols; imgH = frame.rows;
	videoFromWebcam = false;
	if( is->classname() == "ImageSource_Camera" ) {
		videoFromWebcam = true;
	}

	loglni("[main] loading K matrix from: "<<argv[2]);
	double K[9];
	std::ifstream kfile(argv[2]);
	for(int i=0; i<9; ++i) kfile >> K[i];
	tracker.loadK(K);
	loglni("[main] K matrix loaded:");
	loglni(helper::PrintMat<>(3,3,K));

	loglni("[main] load template image from: "<<argv[3]);
	tracker.loadTemplate(argv[3]);

	//////////////// TagDetector /////////////////////////////////////////
	int tagid = 0; //default tag16h5
	if(argc>5) tagid = atoi(argv[5]);
	tagFamily = TagFamilyFactory::create(tagid);
	if(tagFamily.empty()) {
		loglne("[main] create TagFamily fail!");
		return -1;
	}
	detector = new TagDetector(tagFamily);
	if(detector.empty()) {
		loglne("[main] create TagDetector fail!");
		return -1;
	}
	Mat temp = imread(argv[3]);
	if( findAprilTag(temp, 0, HI, true) ) {
		namedWindow("template");
		imshow("template", temp);
		iHI = HI.inv();
	} else {
		loglne("[main error] detector did not find any apriltag on template image!");
		return -1;
	}

	//////////////// OSG ////////////////////////////////////////////////
	osg::ref_ptr<osg::Group> root = new osg::Group;

	string scenefilename = (argc>4?argv[4]:("cow.osg"));
	osg::ref_ptr<osg::Node> cow = osgDB::readNodeFile(scenefilename);
	arscene = new helper::ARSceneRoot;
	helper::FixMat<3,double>::Type matK = helper::FixMat<3,double>::ConvertType(K);
	CV2CG::cv2cg(matK,0.01,500,imgW,imgH,*arscene);
	manipMat = new osg::MatrixTransform(osg::Matrix::identity());
	manipMat->addChild(cow);
	manipMat->getOrCreateStateSet()->setMode(GL_NORMALIZE, osg::StateAttribute::ON);
	arscene->addChild(manipMat);

	osg::ref_ptr<osg::Image> backgroundImage = new osg::Image;
	helper::cvmat2osgimage(frame,backgroundImage);
	arvideo = new helper::ARVideoBackground(backgroundImage);
	root->setUpdateCallback(new ARUpdateCallback);

	root->addChild(arvideo);
	root->addChild(arscene);

	viewer.setSceneData(root);
	viewer.addEventHandler(new osgViewer::StatsHandler);
	viewer.addEventHandler(new osgViewer::WindowSizeHandler);
	viewer.addEventHandler(new QuitHandler);

	//start tracking thread
	OpenThreads::Thread::Init();
	TrackThread* thr = new TrackThread;
	thr->start();

	viewer.run();

	delete thr;
	loglni("[main] DONE...exit!");
	return 0;
}
开发者ID:andrewjchen,项目名称:ethzasl_apriltag,代码行数:91,代码来源:main.cpp

示例14: halfProjectiveWarp

cv::Mat halfProjectiveWarp(cv::Mat H, cv::Mat image0, int u1, cv::Mat thetaMatrixTmp, bool left) {
	std::cout << "\n" << "[halfProjectiveWarp] Start" << "\n";
	cv::Mat thetaMatrix = cv::Mat::zeros(3, 3, CV_64F);
	thetaMatrix.at<double>(0, 0) = thetaMatrixTmp.at<double>(0, 0);
	thetaMatrix.at<double>(1, 0) = thetaMatrixTmp.at<double>(1, 0);
	thetaMatrix.at<double>(0, 1) = thetaMatrixTmp.at<double>(0, 1);
	thetaMatrix.at<double>(1, 1) = thetaMatrixTmp.at<double>(1, 1);
	
	thetaMatrix.at<double>(2, 0) = 0;
	thetaMatrix.at<double>(2, 1) = 0;
	thetaMatrix.at<double>(2, 2) = 1;
	thetaMatrix.at<double>(0, 2) = 0;
	thetaMatrix.at<double>(1, 2) = 0;
	
	cv::Size imageSize = image0.size();
	cv::Mat output;
	
	std::cout << "\n" << "[halfProjectiveWarp] Calculating bounds" << "\n";
	cv::Mat p00 = cv::Mat::zeros(3, 1, CV_64F);
	p00.at<double>(2, 0) = 1;
	p00 = H * p00;
	
	cv::Mat p01 = cv::Mat::zeros(3, 1, CV_64F);
	p01.at<double>(1, 0) = imageSize.height - 1;
	p01.at<double>(2, 0) = 1;
	p01 = H * p01;
	
	cv::Mat p10 = cv::Mat::zeros(3, 1, CV_64F);
	p10.at<double>(0, 0) = imageSize.width - 1;
	p10.at<double>(2, 0) = 1;
	p10 = applyS(H, u1, thetaMatrixTmp, p10);

	cv::Mat p11 = cv::Mat::zeros(3, 1, CV_64F);
	p11.at<double>(0, 0) = imageSize.width - 1;
	p11.at<double>(1, 0) = imageSize.height - 1;
	p11.at<double>(2, 0) = 1;
	p11 = applyS(H, u1, thetaMatrixTmp, p11);
	
	std::cout << "\n" <<  "[halfProjectiveWarp] Calculating new image size" << "\n";
	int height = std::max(p01.at<double>(1, 0), p11.at<double>(1, 0)) - std::min(p00.at<double>(1, 0), p10.at<double>(1, 0));
	int width = std::max(p10.at<double>(0, 0), p11.at<double>(0, 0)) - std::min(p00.at<double>(0, 0), p10.at<double>(0, 0));

	int minX = std::min(p00.at<double>(0, 0), p10.at<double>(0, 0));
	int maxX = std::max(p10.at<double>(0, 0), p11.at<double>(0, 0));
	
	int minY = std::min(p00.at<double>(1, 0), p10.at<double>(1, 0));
	int maxY = std::max(p01.at<double>(1, 0), p11.at<double>(1, 0));
	
	cv::Size newSize = cv::Size(width, height);
	std::cout << "\n" <<  newSize << "\n";
	output = cv::Mat::zeros(newSize, CV_8UC3);
	
	cv::Mat pointTmp = cv::Mat::zeros(2, 1, CV_64F);
	cv::Mat pointTmp3d = cv::Mat::zeros(3, 1, CV_64F);
	pointTmp3d.at<double>(2, 0) = 1;

	for (int x = 0; x < width; x++) {
		for (int y = 0; y < height; y++) {
			pointTmp.at<double>(0, 0) = x + minX;
			pointTmp.at<double>(1, 0) = y + minY;
			
			pointTmp = thetaMatrix.inv() * pointTmp;
			
			pointTmp3d.at<double>(0, 0) = pointTmp.at<double>(0, 0);
			pointTmp3d.at<double>(1, 0) = pointTmp.at<double>(1, 0);

			cv::Mat correspondingPoint;
			if(pointTmp.at<double>(0, 0) < u1) {
				// Apply H
				correspondingPoint = H.inv() * pointTmp3d;
				correspondingPoint = correspondingPoint / correspondingPoint.at<double>(2, 0);
				correspondingPoint = thetaMatrix * correspondingPoint;
			}
			else {
				// Apply S inverse
				correspondingPoint = applyS(H, u1, thetaMatrixTmp, thetaMatrix * pointTmp3d, true);
			}
			
			if(correspondingPoint.at<double>(0, 0) > 0 && correspondingPoint.at<double>(0, 0) < image0.size().width &&
			   correspondingPoint.at<double>(1, 0) > 0 && correspondingPoint.at<double>(1, 0) < image0.size().height) {
				output.at<cv::Vec3b>(y, x) = image0.at<cv::Vec3b>(correspondingPoint.at<double>(1, 0), correspondingPoint.at<double>(0, 0));
			}
		}
	}
	
//	for (int x = 0; x < image0.size().width; x++) {
//		for (int y = 0; y < image0.size().height; y++) {
//			pointTmp.at<double>(0, 0) = x;
//			pointTmp.at<double>(1, 0) = y;
//			
//			pointTmp = thetaMatrix.inv() * pointTmp;
//			
//			pointTmp3d.at<double>(0, 0) = pointTmp.at<double>(0, 0);
//			pointTmp3d.at<double>(1, 0) = pointTmp.at<double>(1, 0);
//			
//			cv::Mat correspondingPoint = applyS(H, u1, thetaMatrixTmp, pointTmp3d);
//			correspondingPoint.at<double>(0, 0) += minX;
//			correspondingPoint.at<double>(1, 0) += minY;
//
//			cv::Mat correspondingPointUV = thetaMatrix.inv() * correspondingPoint;
//.........这里部分代码省略.........
开发者ID:grillorafael,项目名称:images-reconstruction-class,代码行数:101,代码来源:main.cpp

示例15: extractLineDepth

void Node::extractLineDepth(const cv::Mat& depth_float, const cv::Mat& K, 
	double ratio_of_collinear_pts, double line_3d_len_thres_m, double depth_scaling)
	// extract the 3d info of an frame line if availabe from the depth image
	// input: depth, lines
	// output: lines with 3d info
{		
	int depth_CVMatDepth = depth_float.depth();
	for(int i=0; i<lines.size(); ++i)	{ // each line
		double len = cv::norm(lines[i].p - lines[i].q);		
		vector<cv::Point3d> pts3d;
		// iterate through a line
   		double numSmp = min(max(len/sysPara.line_sample_interval, (double)sysPara.line_sample_min_num), (double)sysPara.line_sample_max_num);  // smaller numSmp for fast speed, larger numSmp should be more accurate
		pts3d.reserve(numSmp);
		for(int j=0; j<=numSmp; ++j) {
			// use nearest neighbor to querry depth value
			// assuming position (0,0) is the top-left corner of image, then the
			// top-left pixel's center would be (0.5,0.5)
			cv::Point2d pt = lines[i].p * (1-j/numSmp) + lines[i].q * (j/numSmp);
			if(pt.x<0 || pt.y<0 || pt.x >= depth_float.cols || pt.y >= depth_float.rows ) continue;
			int row, col; // nearest pixel for pt
			if((floor(pt.x) == pt.x) && (floor(pt.y) == pt.y)) {// boundary issue
				col = max(int(pt.x-1),0);
				row = max(int(pt.y-1),0);
			} else {
				col = int(pt.x);
				row = int(pt.y);
			}
			double zval = -1;
			double depval = depth_float.at<float>(row,col);
			if(depth_CVMatDepth == CV_32F) 
				depval = depth_float.at<float>(row,col);
			else if (depth_CVMatDepth == CV_64F) 
				depval = depth_float.at<double>(row,col);
			else {
				cerr<<"Node::extractLineDepth: depth image matrix type is not float/double\n";
				exit(0);	
			}	
			if(depval < EPS) { // no depth info

			} else {
				zval = depval/depth_scaling; // in meter, z-value
			}

			// export 3d points to file
			if (zval > 0 ) {
				cv::Point2d xy3d = mat2cvpt(K.inv()*cvpt2mat(pt))*zval;	
				pts3d.push_back(cv::Point3d(xy3d.x, xy3d.y, zval));
			}
		}
		//if (pts3d.size() < max(10.0,len*ratio_of_collinear_pts))
		if (pts3d.size() < max(5.0, numSmp *ratio_of_collinear_pts))
			continue;

		RandomLine3d tmpLine;		

#ifdef EXTRACTLINE_USE_MAHDIST
		vector<RandomPoint3d> rndpts3d;
		rndpts3d.reserve(pts3d.size());
		// compute uncertainty of 3d points
		for(int j=0; j<pts3d.size();++j) {
			rndpts3d.push_back(compPt3dCov(pts3d[j], K, asynch_time_diff_sec_));
		}
		// using ransac to extract a 3d line from 3d pts
		tmpLine = extract3dline_mahdist(rndpts3d);
		
#else
		tmpLine = extract3dline(pts3d);
#endif
		if(tmpLine.pts.size()/numSmp > ratio_of_collinear_pts	&&
			cv::norm(tmpLine.A - tmpLine.B) > line_3d_len_thres_m) {
				MLEstimateLine3d(tmpLine, sysPara.line3d_mle_iter_num);//expensive, postpone to after line matching
				lines[i].haveDepth = true;
				lines[i].line3d = tmpLine;
				
		}		
		lines[i].line3d.pts.clear();
	}	

}
开发者ID:caomw,项目名称:LineSLAM,代码行数:79,代码来源:lineslam.cpp


注:本文中的cv::Mat::inv方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。