当前位置: 首页>>代码示例>>C++>>正文


C++ Mat_::t方法代码示例

本文整理汇总了C++中Mat_::t方法的典型用法代码示例。如果您正苦于以下问题:C++ Mat_::t方法的具体用法?C++ Mat_::t怎么用?C++ Mat_::t使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在Mat_的用法示例。


在下文中一共展示了Mat_::t方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1:

/**
 * @author      JIA Pei
 * @version     2010-02-05
 * @brief       Calculate statistics for all profiles; Computer all landmarks' mean prof and covariance matrix
 * @return      void
*/
void VO_ASMNDProfiles::VO_CalcStatistics4AllProfiles()
{
    // Calcuate Inverse of Sg
    for(unsigned int i = 0; i < this->m_iNbOfPyramidLevels; i++)
    {
        Mat_<float> allProfiles = Mat_<float>::zeros( this->m_iNbOfSamples, this->m_vvvNormalizedProfiles[0][i][0].GetProfileLength() );
        Mat_<float> Covar = Mat_<float>::zeros(this->m_vvvNormalizedProfiles[0][i][0].GetProfileLength(), 
                                                this->m_vvvNormalizedProfiles[0][i][0].GetProfileLength() );
        Mat_<float> Mean = Mat_<float>::zeros(1, this->m_vvvNormalizedProfiles[0][i][0].GetProfileLength() );
        for(unsigned int j = 0; j < this->m_iNbOfPoints; j++)
        {
            for(unsigned int k = 0; k < this->m_iNbOfProfileDim; k++)
            {
                for(unsigned int l = 0; l < this->m_iNbOfSamples; l++)
                {
                    Mat_<float> tmpRow = allProfiles.row(l);
                    Mat_<float> tmp = this->m_vvvNormalizedProfiles[l][i][j].GetTheProfile().col(k).t();
                    tmp.copyTo(tmpRow);
                }

                // OK! Now We Calculate the Covariance Matrix of prof for Landmark iPoint
                cv::calcCovarMatrix( allProfiles, Covar, Mean, CV_COVAR_NORMAL+CV_COVAR_ROWS+CV_COVAR_SCALE, CV_32F);
//                cv::calcCovarMatrix( allProfiles, Covar, Mean, CV_COVAR_SCRAMBLED+CV_COVAR_ROWS, CV_32F);
                this->m_vvMeanNormalizedProfile[i][j].Set1DimProfile(Mean.t(), k);

                // Explained by YAO Wei, 2008-1-29.
                // Actually Covariance Matrix is semi-positive definite. But I am not sure
                // whether it is invertible or not!!!
                // In my opinion it is un-invert, since C = X.t() * X!!!
                cv::invert(Covar, this->m_vvvCVMInverseOfSg[i][j][k], DECOMP_SVD);
            }
        }
    }
}
开发者ID:HVisionSensing,项目名称:mc-vosm,代码行数:40,代码来源:VO_ASMNDProfiles.cpp

示例2: calcKeyPointProjections

static void calcKeyPointProjections( const vector<KeyPoint>& src, const Mat_<double>& H, vector<KeyPoint>& dst )
{
    if(  !src.empty() )
    {
        CV_Assert( !H.empty() && H.cols == 3 && H.rows == 3);
        dst.resize(src.size());
        vector<KeyPoint>::const_iterator srcIt = src.begin();
        vector<KeyPoint>::iterator       dstIt = dst.begin();
        for( ; srcIt != src.end(); ++srcIt, ++dstIt )
        {
            Point2f dstPt = applyHomography(H, srcIt->pt);

            float srcSize2 = srcIt->size * srcIt->size;
            Mat_<double> M(2, 2);
            M(0,0) = M(1,1) = 1./srcSize2;
            M(1,0) = M(0,1) = 0;
            Mat_<double> invM; invert(M, invM);
            Mat_<double> Aff; linearizeHomographyAt(H, srcIt->pt, Aff);
            Mat_<double> dstM; invert(Aff*invM*Aff.t(), dstM);
            Mat_<double> eval; eigen( dstM, eval );
            CV_Assert( eval(0,0) && eval(1,0) );
            float dstSize = (float)pow(1./(eval(0,0)*eval(1,0)), 0.25);

            // TODO: check angle projection
            float srcAngleRad = (float)(srcIt->angle*CV_PI/180);
            Point2f vec1(cos(srcAngleRad), sin(srcAngleRad)), vec2;
            vec2.x = (float)(Aff(0,0)*vec1.x + Aff(0,1)*vec1.y);
            vec2.y = (float)(Aff(1,0)*vec1.x + Aff(0,1)*vec1.y);
            float dstAngleGrad = fastAtan2(vec2.y, vec2.x);

            *dstIt = KeyPoint( dstPt, dstSize, dstAngleGrad, srcIt->response, srcIt->octave, srcIt->class_id );
        }
    }
}
开发者ID:93sam,项目名称:opencv,代码行数:34,代码来源:detector_descriptor_evaluation.cpp

示例3: getBestCandidate

Matx33d FivePoints::getBestCandidate(std::vector< Matx33d > candidates, Mat_<double> HQ1, Mat_<double> HQ2)
{
    Matx33d best_candidate;
    Mat_<double> candidates_error_matrix(HQ1.cols, HQ1.cols);
    double min_candidates_error = 1000;
    double candidate_error;

    // choose best E based on reprojection error
    // e = Q1'*E*Q2
    for(int k=0; k<candidates.size(); ++k)
    {
        candidates_error_matrix = HQ1.t()*Mat_<double>(candidates[k])*HQ2;

        candidate_error = 0;
        for(int n=0; n<HQ1.cols; ++n)
        candidate_error += candidates_error_matrix(n,n);

        if( fabs(candidate_error) < min_candidates_error )
        {
            min_candidates_error = fabs(candidate_error);
            best_candidate = candidates[k];
        }
    }

    return best_candidate;
}
开发者ID:ktiwari9,项目名称:projects,代码行数:26,代码来源:FivePointsWrapper.cpp

示例4: worldHomToCameraHom

Mat_<double> worldHomToCameraHom(
    Mat_<double> const worldPtsHom,
    Mat_<double> const rotMatrix,
    Mat_<double> const translation)
{
    assert(worldPtsHom.cols == 4);
    assert(rotMatrix.rows == 3);
    assert(rotMatrix.cols == 3);
    assert(translation.rows == 3);
    assert(translation.cols == 1);

    // Convert rotMatrix + translation into a linear transformation in
    // homogeneous coordinates.
    Mat_<double> rigidMotion = Mat_<double>::zeros(4, 4);
    rotMatrix.copyTo(rigidMotion(Range(0, 3), Range(0, 3)));
    translation.copyTo(rigidMotion(Range(0, 3), Range(3, 4)));
    rigidMotion(3, 3) = 1;
    // cout << "rigidMotion: " << rigidMotion << endl;

    // Assuming camera calibration matrix is identity.
    // Note that OpenCV treats size as "[cols rows]", so matrix multiplication
    // has to be done backwards (transpose everything).
    Mat_<double> projection = Mat_<double>::eye(3, 4);
    Mat result = projection * rigidMotion * worldPtsHom.t();
    return result.t();
}
开发者ID:EliteUAV,项目名称:drones-287,代码行数:26,代码来源:Geometry.cpp

示例5: GetPupilPosition

Point3f GetPupilPosition(Mat_<double> eyeLdmks3d){
	
	eyeLdmks3d = eyeLdmks3d.t();

	Mat_<double> irisLdmks3d = eyeLdmks3d.rowRange(0,8);

	Point3f p (mean(irisLdmks3d.col(0))[0], mean(irisLdmks3d.col(1))[0], mean(irisLdmks3d.col(2))[0]);
	return p;
}
开发者ID:AndreySheka,项目名称:CLM-framework,代码行数:9,代码来源:GazeEstimation.cpp

示例6: calcProjection

void EllipticKeyPoint::calcProjection( const Mat_<double>& H, EllipticKeyPoint& projection ) const
{
    Point2f dstCenter = applyHomography(H, center);

    Mat_<double> invM; invert(getSecondMomentsMatrix(), invM);
    Mat_<double> Aff; linearizeHomographyAt(H, center, Aff);
    Mat_<double> dstM; invert(Aff*invM*Aff.t(), dstM);

    projection = EllipticKeyPoint( dstCenter, Scalar(dstM(0,0), dstM(0,1), dstM(1,1)) );
}
开发者ID:RebUT,项目名称:REBUT,代码行数:10,代码来源:evaluation.cpp

示例7: while

void *processFrame (void *arg) {

    int thread_id = *(int *)arg;
    while(do_work()) {

        pthread_cond_wait(&thread_cond[thread_id], &thread_cond_mutexes[thread_id]);
        setThreadBusy (thread_id);

        Mat frame = thread_frames[thread_id];
        Mat_<double> imagePts;
        Mat_<double> simplePose;

        imagePts = detectCorners(frame);
        bool success = (imagePts.rows > 0);
        if(success) {
            imagePts = calibrateImagePoints(imagePts);
            std_msgs::Float64MultiArray cornersMsg = makeCornersMsg(imagePts);
            cornersPub.publish(cornersMsg);
            simplePose = estimatePose(imagePts);

            std::cout << simplePose.t() << std::endl;

            std_msgs::Float64MultiArray simplePoseMsg = \
                makeSimplePoseMsg(simplePose);
            simplePosePub.publish(simplePoseMsg);
//            ROS_INFO("Tick.");
        } else {
            // Don't publish anything this tick.
            ROS_INFO("Could not detect all corners!");
        }

        frame.release();
        setThreadIdle (thread_id);
    }
    ROS_INFO("Thread %d exiting.", thread_id);    pthread_exit(NULL);
    
}
开发者ID:lzl200102109,项目名称:pose_estimator,代码行数:37,代码来源:PoseEstimator.cpp

示例8: findEssentialMatrix

void findEssentialMatrix(MFramePair& pair, Mat_<double> K) {
    vector<Point2f> k1, k2,n1,n2;
    vector<int> usedIndex1,usedIndex2;
    vector<uchar> status(pair.imgpts1.size());
    Mat F = findFundamentalMat(pair.matchPts1, pair.matchPts2, CV_FM_RANSAC, 0.2, 0.9,
                               status);
    Mat_<double> E;
    E = K.t() * F * K; //according to HZ (9.12)
    for (unsigned int i = 0; i < status.size(); i++) { // queryIdx is the "left" image
        if (status[i]) {
            usedIndex1.push_back(pair.matchedIndex1[i]);
            k1.push_back(pair.matchPts1[i]);
            usedIndex2.push_back(pair.matchedIndex2[i]);
            k2.push_back(pair.matchPts2[i]);
        }
    }
    correctMatches(F,k1,k2,n1,n2);
    pair.matchPts1 = n1;
    pair.matchPts2 = n2;
    pair.matchedIndex1=usedIndex1;
    pair.matchedIndex2=usedIndex2;
    pair.F = F;
    pair.E = E;
}
开发者ID:caomw,项目名称:AndroidSFMDemo,代码行数:24,代码来源:Utils.cpp

示例9: AddNextFrame

void FaceAnalyser::AddNextFrame(const cv::Mat& frame, const CLMTracker::CLM& clm_model, double timestamp_seconds, bool online, bool visualise)
{
	// Check if a reset is needed first (TODO same person no reset)
	//if(face_bounding_box.area() > 0)
	//{
	//	Rect_<double> new_bounding_box = clm.GetBoundingBox();

	//	// If the box overlaps do not need a reset
	//	double intersection_area = (face_bounding_box & new_bounding_box).area();
	//	double union_area = face_bounding_box.area() + new_bounding_box.area() - 2 * intersection_area;

	//	// If the model is already tracking what we're detecting ignore the detection, this is determined by amount of overlap
	//	if( intersection_area/union_area < 0.5)
	//	{
	//		this->Reset();
	//	}

	//	face_bounding_box = new_bounding_box;
	//}
	//if(!clm.detection_success)
	//{
	//	this->Reset();
	//}

	frames_tracking++;

	// First align the face if tracking was successfull
	if(clm_model.detection_success)
	{
		AlignFaceMask(aligned_face, frame, clm_model, triangulation, true, align_scale, align_width, align_height);
	}
	else
	{
		aligned_face = Mat(align_height, align_width, CV_8UC3);
		aligned_face.setTo(0);
	}

	if(aligned_face.channels() == 3)
	{
		cvtColor(aligned_face, aligned_face_grayscale, CV_BGR2GRAY);
	}
	else
	{
		aligned_face_grayscale = aligned_face.clone();
	}

	// Extract HOG descriptor from the frame and convert it to a useable format
	Mat_<double> hog_descriptor;
	Extract_FHOG_descriptor(hog_descriptor, aligned_face, this->num_hog_rows, this->num_hog_cols);

	// Store the descriptor
	hog_desc_frame = hog_descriptor;

	Vec3d curr_orient(clm_model.params_global[1], clm_model.params_global[2], clm_model.params_global[3]);
	int orientation_to_use = GetViewId(this->head_orientations, curr_orient);

	// Only update the running median if predictions are not high
	// That is don't update it when the face is expressive (just retrieve it)
	bool update_median = true;

	// TODO test if this would be useful or not
	//if(!this->AU_predictions.empty())
	//{
	//	for(size_t i = 0; i < this->AU_predictions.size(); ++i)
	//	{
	//		if(this->AU_predictions[i].second > 1)
	//		{
	//			update_median = false;				
	//			break;
	//		}
	//	}
	//}

	update_median = update_median & clm_model.detection_success;

	// A small speedup
	if(frames_tracking % 2 == 1)
	{
		UpdateRunningMedian(this->hog_desc_hist[orientation_to_use], this->hog_hist_sum[orientation_to_use], this->hog_desc_median, hog_descriptor, update_median, this->num_bins_hog, this->min_val_hog, this->max_val_hog);
	}	
	// Geom descriptor and its median
	geom_descriptor_frame = clm_model.params_local.t();
	
	if(!clm_model.detection_success)
	{
		geom_descriptor_frame.setTo(0);
	}

	// Stack with the actual feature point locations (without mean)
	Mat_<double> locs = clm_model.pdm.princ_comp * geom_descriptor_frame.t();
	
	cv::hconcat(locs.t(), geom_descriptor_frame.clone(), geom_descriptor_frame);
	
	// A small speedup
	if(frames_tracking % 2 == 1)
	{
		UpdateRunningMedian(this->geom_desc_hist, this->geom_hist_sum, this->geom_descriptor_median, geom_descriptor_frame, update_median, this->num_bins_geom, this->min_val_geom, this->max_val_geom);
	}

	// First convert the face image to double representation as a row vector
//.........这里部分代码省略.........
开发者ID:wearebase,项目名称:gaze-detection-android-app,代码行数:101,代码来源:FaceAnalyser.cpp

示例10: computePoseDifference

void computePoseDifference(Mat img1, Mat img2, CommandArgs args, Mat k, Mat& dist_coefficients, double& worldScale, Mat& R, Mat& t, Mat& img_matches)
{
   cout << "%===============================================%" << endl;

   Mat camera_matrix = k.clone();
   if (args.resize_factor > 1) 
   {
      resize(img1, img1, Size(img1.cols / args.resize_factor, 
               img1.rows / args.resize_factor)); // make smaller for performance and displayablity
      resize(img2, img2, Size(img2.cols / args.resize_factor,
               img2.rows / args.resize_factor));
      // scale matrix down according to changed resolution
      camera_matrix = camera_matrix / args.resize_factor;
      camera_matrix.at<double>(2,2) = 1;
   }

   Mat K1, K2;
   K1 = K2 = camera_matrix;
   if (img1.rows > img1.cols) // it is assumed the camera has been calibrated in landscape mode, so undistortion must also be performed in landscape orientation, or the camera matrix must be modified (fx,fy and cx,cy need to be exchanged)
   {
      swap(K1.at<double>(0,0), K1.at<double>(1,1));
      swap(K1.at<double>(0,2), K1.at<double>(1,2));
   }
   if (img2.rows > img2.cols)
   {
      swap(K2.at<double>(0,0), K2.at<double>(1,1));
      swap(K2.at<double>(0,2), K2.at<double>(1,2));
   }

   // Feature detection + extraction
   vector<KeyPoint> KeyPoints_1, KeyPoints_2;
   Mat descriptors_1, descriptors_2;

   Ptr<Feature2D> feat_detector;
   if (args.detector == DETECTOR_KAZE) 
   {
      feat_detector = AKAZE::create(args.detector_data.upright ? AKAZE::DESCRIPTOR_MLDB_UPRIGHT : AKAZE::DESCRIPTOR_MLDB, 
            args.detector_data.descriptor_size,
            args.detector_data.descriptor_channels,
            args.detector_data.threshold,
            args.detector_data.nOctaves,
            args.detector_data.nOctaveLayersAkaze);

   } else if (args.detector == DETECTOR_SURF)
   {
      feat_detector = xfeatures2d::SURF::create(args.detector_data.minHessian, 
            args.detector_data.nOctaves, args.detector_data.nOctaveLayersAkaze, args.detector_data.extended, args.detector_data.upright);
   } else if (args.detector == DETECTOR_SIFT)
   {
      feat_detector = xfeatures2d::SIFT::create(args.detector_data.nFeatures, 
            args.detector_data.nOctaveLayersSift, args.detector_data.contrastThreshold, args.detector_data.sigma);
   }

   feat_detector->detectAndCompute(img1, noArray(), KeyPoints_1, descriptors_1);
   feat_detector->detectAndCompute(img2, noArray(), KeyPoints_2, descriptors_2);

   cout << "Number of feature points (img1, img2): " << "(" << KeyPoints_1.size() << ", " << KeyPoints_2.size() << ")" << endl;

   // Find correspondences
   BFMatcher matcher;
   vector<DMatch> matches;
   if (args.use_ratio_test) 
   {
      if (args.detector == DETECTOR_KAZE) 
         matcher = BFMatcher(NORM_HAMMING, false);
      else matcher = BFMatcher(NORM_L2, false);

      vector<vector<DMatch>> match_candidates;
      const float ratio = args.ratio;
      matcher.knnMatch(descriptors_1, descriptors_2, match_candidates, 2);
      for (int i = 0; i < match_candidates.size(); i++)
         if (match_candidates[i][0].distance < ratio * match_candidates[i][1].distance)
            matches.push_back(match_candidates[i][0]);

      cout << "Number of matches passing ratio test: " << matches.size() << endl;

   } else
   {
      if (args.detector == DETECTOR_KAZE) 
         matcher = BFMatcher(NORM_HAMMING, true);
      else matcher = BFMatcher(NORM_L2, true);
      matcher.match(descriptors_1, descriptors_2, matches);
      cout << "Number of matching feature points: " << matches.size() << endl;
   }


   // Convert correspondences to vectors
   vector<Point2f>imgpts1,imgpts2;

   for(unsigned int i = 0; i < matches.size(); i++) 
   {
      imgpts1.push_back(KeyPoints_1[matches[i].queryIdx].pt); 
      imgpts2.push_back(KeyPoints_2[matches[i].trainIdx].pt); 
   }

   Mat mask; // inlier mask
   if (args.undistort) 
   {
      undistortPoints(imgpts1, imgpts1, K1, dist_coefficients, noArray(), K1);
      undistortPoints(imgpts2, imgpts2, K2, dist_coefficients, noArray(), K2);
//.........这里部分代码省略.........
开发者ID:AnnKatrinBecker,项目名称:OpenCV-test-crap,代码行数:101,代码来源:stereo_v3.cpp

示例11: DrawBox

// Need to move this all to opengl
void DrawBox(Mat image, Vec6d pose, Scalar color, int thickness, float fx, float fy, float cx, float cy)
{
	float boxVerts[] = {-1, 1, -1,
						1, 1, -1,
						1, 1, 1,
						-1, 1, 1,
						1, -1, 1,
						1, -1, -1,
						-1, -1, -1,
						-1, -1, 1};
	Mat_<float> box = Mat(8, 3, CV_32F, boxVerts).clone() * 100;


	Matx33f rot = Euler2RotMat(Vec3d(pose[3], pose[4], pose[5]));
	Mat_<float> rotBox;
	
	Mat((Mat(rot) * box.t())).copyTo(rotBox);
	rotBox = rotBox.t();

	rotBox.col(0) = rotBox.col(0) + pose[0];
	rotBox.col(1) = rotBox.col(1) + pose[1];
	rotBox.col(2) = rotBox.col(2) + pose[2];

	// draw the lines
	Mat_<float> rotBoxProj;
	Project(rotBoxProj, rotBox, image.size(), fx, fy, cx, cy);
	
	Mat begin;
	Mat end;

	rotBoxProj.row(0).copyTo(begin);
	rotBoxProj.row(1).copyTo(end);
	cv::line(image, Point((int)begin.at<float>(0), (int)begin.at<float>(1)), Point((int)end.at<float>(0), (int)end.at<float>(1)), color, thickness);
		
	rotBoxProj.row(1).copyTo(begin);
	rotBoxProj.row(2).copyTo(end);
	cv::line(image, Point((int)begin.at<float>(0), (int)begin.at<float>(1)), Point((int)end.at<float>(0), (int)end.at<float>(1)), color, thickness);
	
	rotBoxProj.row(2).copyTo(begin);
	rotBoxProj.row(3).copyTo(end);
	cv::line(image, Point((int)begin.at<float>(0), (int)begin.at<float>(1)), Point((int)end.at<float>(0), (int)end.at<float>(1)), color, thickness);
	
	rotBoxProj.row(0).copyTo(begin);
	rotBoxProj.row(3).copyTo(end);
	//std::cout << begin <<endl;
	//std::cout << end <<endl;
	cv::line(image, Point((int)begin.at<float>(0), (int)begin.at<float>(1)), Point((int)end.at<float>(0), (int)end.at<float>(1)), color, thickness);
	
	rotBoxProj.row(2).copyTo(begin);
	rotBoxProj.row(4).copyTo(end);
	cv::line(image, Point((int)begin.at<float>(0), (int)begin.at<float>(1)), Point((int)end.at<float>(0), (int)end.at<float>(1)), color, thickness);
	
	rotBoxProj.row(1).copyTo(begin);
	rotBoxProj.row(5).copyTo(end);
	cv::line(image, Point((int)begin.at<float>(0), (int)begin.at<float>(1)), Point((int)end.at<float>(0), (int)end.at<float>(1)), color, thickness);
	
	rotBoxProj.row(0).copyTo(begin);
	rotBoxProj.row(6).copyTo(end);
	cv::line(image, Point((int)begin.at<float>(0), (int)begin.at<float>(1)), Point((int)end.at<float>(0), (int)end.at<float>(1)), color, thickness);
	
	rotBoxProj.row(3).copyTo(begin);
	rotBoxProj.row(7).copyTo(end);
	cv::line(image, Point((int)begin.at<float>(0), (int)begin.at<float>(1)), Point((int)end.at<float>(0), (int)end.at<float>(1)), color, thickness);
	
	rotBoxProj.row(6).copyTo(begin);
	rotBoxProj.row(5).copyTo(end);
	cv::line(image, Point((int)begin.at<float>(0), (int)begin.at<float>(1)), Point((int)end.at<float>(0), (int)end.at<float>(1)), color, thickness);
	
	rotBoxProj.row(5).copyTo(begin);
	rotBoxProj.row(4).copyTo(end);
	cv::line(image, Point((int)begin.at<float>(0), (int)begin.at<float>(1)), Point((int)end.at<float>(0), (int)end.at<float>(1)), color, thickness);
		
	rotBoxProj.row(4).copyTo(begin);
	rotBoxProj.row(7).copyTo(end);
	cv::line(image, Point((int)begin.at<float>(0), (int)begin.at<float>(1)), Point((int)end.at<float>(0), (int)end.at<float>(1)), color, thickness);
	
	rotBoxProj.row(7).copyTo(begin);
	rotBoxProj.row(6).copyTo(end);
	cv::line(image, Point((int)begin.at<float>(0), (int)begin.at<float>(1)), Point((int)end.at<float>(0), (int)end.at<float>(1)), color, thickness);
	

}
开发者ID:AshwinRajendraprasad,项目名称:FacialAgeSynthesis,代码行数:83,代码来源:SimpleCLM.cpp

示例12: main

// main function.
int main(int argc, char** argv) {

	// image processing
	Mat imgOriginal;
	Mat imgGrayscale;
	Mat imgThresholded;
	Mat imgFeature;
	Mat imgTracked;

	// image processing
	while (true) {

		// load the image
		imgOriginal = imread(argv[1], CV_LOAD_IMAGE_COLOR);
		imgFeature = imgOriginal;
		if(! imgOriginal.data )
		{
			cout <<  "Could not open or find the image" << std::endl ;
			return -1;
		}

		// threshold original image.
		ThresholdImage(imgOriginal, imgGrayscale, imgThresholded);

		bool method = 0;	// default method is by simple geometry.
		if (method) {
			/* METHOD 1: SIMPLE GEOMETRY */

			// get image points
			Mat_<double> imagePts = getFeatureVector(imgThresholded, imgFeature);
			imagePts = calibrateImagePoints(getFeatureVector(imgThresholded, imgFeature));
//			cout << "imgPts = " << endl << imagePts << endl << endl;

			// get world points
			Mat_<double> worldPts = getWorldPts();
//			cout << "worldPts = " << endl << worldPts << endl << endl;

			// estimate pose
			Mat_<double> simplePose = estimatePose_GEO(imagePts, worldPts);
			cout << "simplePose = " << endl << simplePose.t() << endl;

		} else {
			/* METHOD 2: SIGULAR VALUE DECOMPOSITION*/

			// get image points
			Mat_<double> imagePts = calibrateImagePoints(getFeatureVector(imgThresholded, imgFeature));
//			cout << "imgPts = " << endl << imagePts << endl << endl;

			// get world points
			Mat_<double> worldPts = getWorldPts();

//			cout << "imgPts = " << endl << imagePts*f << endl << endl;
//			cout << "worldPts = " << endl << worldPts << endl << endl;


			// estimate pose
			Mat_<double> simplePose = estimatePose_SVD(imagePts, worldPts);
			cout << "simplePose = " << endl << simplePose.t() << endl;

		}


		// show images.
//		imshow("Original Image", imgOriginal); //show the original image
//		imshow("Grayscale Image", imgGrayscale); //show the original image
//		imshow("Thresholded Image", imgThresholded); //show the thresholded image
		imshow("Featured Image", imgFeature); //show the thresholded image

		// Press ESC to exit.
		if (waitKey(30) == 27) //wait for 'esc' key press for 30ms. If 'esc' key is pressed, break loop
				{
			cout << "esc key is pressed by user" << endl;
			break;
		}
	}
	return 0;
}
开发者ID:lzl200102109,项目名称:shape_detector,代码行数:78,代码来源:shape_detector_image.cpp


注:本文中的Mat_::t方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。