当前位置: 首页>>代码示例>>C++>>正文


C++ Mat_::empty方法代码示例

本文整理汇总了C++中Mat_::empty方法的典型用法代码示例。如果您正苦于以下问题:C++ Mat_::empty方法的具体用法?C++ Mat_::empty怎么用?C++ Mat_::empty使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在Mat_的用法示例。


在下文中一共展示了Mat_::empty方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: calcKeyPointProjections

static void calcKeyPointProjections( const vector<KeyPoint>& src, const Mat_<double>& H, vector<KeyPoint>& dst )
{
    if(  !src.empty() )
    {
        CV_Assert( !H.empty() && H.cols == 3 && H.rows == 3);
        dst.resize(src.size());
        vector<KeyPoint>::const_iterator srcIt = src.begin();
        vector<KeyPoint>::iterator       dstIt = dst.begin();
        for( ; srcIt != src.end(); ++srcIt, ++dstIt )
        {
            Point2f dstPt = applyHomography(H, srcIt->pt);

            float srcSize2 = srcIt->size * srcIt->size;
            Mat_<double> M(2, 2);
            M(0,0) = M(1,1) = 1./srcSize2;
            M(1,0) = M(0,1) = 0;
            Mat_<double> invM; invert(M, invM);
            Mat_<double> Aff; linearizeHomographyAt(H, srcIt->pt, Aff);
            Mat_<double> dstM; invert(Aff*invM*Aff.t(), dstM);
            Mat_<double> eval; eigen( dstM, eval );
            CV_Assert( eval(0,0) && eval(1,0) );
            float dstSize = (float)pow(1./(eval(0,0)*eval(1,0)), 0.25);

            // TODO: check angle projection
            float srcAngleRad = (float)(srcIt->angle*CV_PI/180);
            Point2f vec1(cos(srcAngleRad), sin(srcAngleRad)), vec2;
            vec2.x = (float)(Aff(0,0)*vec1.x + Aff(0,1)*vec1.y);
            vec2.y = (float)(Aff(1,0)*vec1.x + Aff(0,1)*vec1.y);
            float dstAngleGrad = fastAtan2(vec2.y, vec2.x);

            *dstIt = KeyPoint( dstPt, dstSize, dstAngleGrad, srcIt->response, srcIt->octave, srcIt->class_id );
        }
    }
}
开发者ID:93sam,项目名称:opencv,代码行数:34,代码来源:detector_descriptor_evaluation.cpp

示例2: UpdatePredictionTrack

void FaceAnalyser::UpdatePredictionTrack(Mat_<unsigned int>& prediction_corr_histogram, int& prediction_correction_count, vector<double>& correction, const vector<pair<string, double>>& predictions, double ratio, int num_bins, double min_val, double max_val, int min_frames)
{
	double length = max_val - min_val;
	if(length < 0)
		length = -length;

	correction.resize(predictions.size(), 0);

	// The median update
	if(prediction_corr_histogram.empty())
	{
		prediction_corr_histogram = Mat_<unsigned int>(predictions.size(), num_bins, (unsigned int)0);
	}
	
	for(int i = 0; i < prediction_corr_histogram.rows; ++i)
	{
		// Find the bins corresponding to the current descriptor
		int index = (predictions[i].second - min_val)*((double)num_bins)/(length);
		if(index < 0)
		{
			index = 0;
		}
		else if(index > num_bins - 1)
		{
			index = num_bins - 1;
		}
		prediction_corr_histogram.at<unsigned int>(i, index)++;
	}

	// Update the histogram count
	prediction_correction_count++;

	if(prediction_correction_count >= min_frames)
	{
		// Recompute the correction
		int cutoff_point = ratio * prediction_correction_count;

		// For each dimension
		for(int i = 0; i < prediction_corr_histogram.rows; ++i)
		{
			int cummulative_sum = 0;
			for(int j = 0; j < prediction_corr_histogram.cols; ++j)
			{
				cummulative_sum += prediction_corr_histogram.at<unsigned int>(i, j);
				if(cummulative_sum > cutoff_point)
				{
					double corr = min_val + j * (length/num_bins);
					correction[i] = corr;
					break;
				}
			}
		}
	}
}
开发者ID:wearebase,项目名称:gaze-detection-android-app,代码行数:54,代码来源:FaceAnalyser.cpp

示例3: calcProjection

void EllipticKeyPoint::calcProjection( const vector<EllipticKeyPoint>& src, const Mat_<double>& H, vector<EllipticKeyPoint>& dst )
{
    if( !src.empty() )
    {
        assert( !H.empty() && H.cols == 3 && H.rows == 3);
        dst.resize(src.size());
        vector<EllipticKeyPoint>::const_iterator srcIt = src.begin();
        vector<EllipticKeyPoint>::iterator       dstIt = dst.begin();
        for( ; srcIt != src.end(); ++srcIt, ++dstIt )
            srcIt->calcProjection(H, *dstIt);
    }
}
开发者ID:RebUT,项目名称:REBUT,代码行数:12,代码来源:evaluation.cpp

示例4: copyMakeBorder

int copyMakeBorder(/*const*/ Mat_<_Tp, chs>& src, Mat_<_Tp, chs>& dst, int top, int bottom, int left, int right, int borderType, const Scalar& value = Scalar())
{
	FBC_Assert(top >= 0 && bottom >= 0 && left >= 0 && right >= 0);

	if (src.isSubmatrix() && (borderType & BORDER_ISOLATED) == 0) {
		Size wholeSize;
		Point ofs;
		src.locateROI(wholeSize, ofs);
		int dtop = std::min(ofs.y, top);
		int dbottom = std::min(wholeSize.height - src.rows - ofs.y, bottom);
		int dleft = std::min(ofs.x, left);
		int dright = std::min(wholeSize.width - src.cols - ofs.x, right);
		src.adjustROI(dtop, dbottom, dleft, dright);
		top -= dtop;
		left -= dleft;
		bottom -= dbottom;
		right -= dright;
	}

	if (dst.empty() || dst.rows != (src.rows + top + bottom) || dst.cols != (src.cols + left + right)) {
		dst.release();
		dst = Mat_<_Tp, chs>(src.rows + top + bottom, src.cols + left + right);
	}

	if (top == 0 && left == 0 && bottom == 0 && right == 0) {
		if (src.data != dst.data || src.step != dst.step)
			src.copyTo(dst);
		return 0;
	}

	borderType &= ~BORDER_ISOLATED;

	if (borderType != BORDER_CONSTANT) {
		copyMakeBorder_8u(src.ptr(), src.step, src.size(), dst.ptr(), dst.step, dst.size(), top, left, src.elemSize(), borderType);
	} else {
		int cn = src.channels, cn1 = cn;
		AutoBuffer<double> buf(cn);

		scalarToRawData<_Tp, chs>(value, buf, cn);
		copyMakeConstBorder_8u(src.ptr(), src.step, src.size(), dst.ptr(), dst.step, dst.size(), top, left, (int)src.elemSize(), (uchar*)(double*)buf);
	}

	return 0;
}
开发者ID:fengbingchun,项目名称:OpenCV_Test,代码行数:44,代码来源:imgproc.hpp

示例5: main

int main (int argc, char **argv)
{

	vector<string> arguments = get_arguments(argc, argv);

	// Some initial parameters that can be overriden from command line	
	vector<string> files, dDirs, outposes, outvideos, outfeatures;
	
	// By default try webcam
	int device = 0;

	// cx and cy aren't always half dimx or half dimy, so need to be able to override it (start with unit vals and init them if none specified)
    float fx = 500, fy = 500, cx = 0, cy = 0;
	int dimx = 0, dimy = 0;

	bool useCLMTracker = true;
	
	CLMWrapper::CLMParameters clmParams(arguments);
	
	clmParams.wSizeCurrent = clmParams.wSizeInit;

    PoseDetectorHaar::PoseDetectorHaarParameters haarParams;

	#if OS_UNIX
    haarParams.ClassifierLocation = "classifiers/haarcascade_frontalface_alt.xml";
	#else
		haarParams.ClassifierLocation = "classifiers/haarcascade_frontalface_alt.xml";
	#endif
		
	// Get the input output file parameters
	CLMWrapper::get_video_input_output_params(files, dDirs, outposes, outvideos, outfeatures, arguments);
	// Get camera parameters
	CLMWrapper::get_camera_params(fx, fy, cx, cy, dimx, dimy, arguments);    
	
	// The modules that are being used for tracking
	CLMTracker::TrackerCLM clmModel;	
	
	// Face detector initialisation
	CascadeClassifier classifier(haarParams.ClassifierLocation);
	if(classifier.empty())
	{
		string err = "Could not open a face detector at: " + haarParams.ClassifierLocation;
		FATAL_STREAM( err );
	}

	bool done = false;
	
	int f_n = -1;

	while(!done)
	{
		string file;

		// We might specify multiple video files as arguments
		if(files.size() > 0)
		{
			f_n++;			
		    file = files[f_n];
		}

		bool readDepth = !dDirs.empty();	

		// Do some grabbing
		VideoCapture vCap;
		if( file.size() > 0 )
		{
			INFO_STREAM( "Attempting to read from file: " << file );
			vCap = VideoCapture( file );
		}
		else
		{
			INFO_STREAM( "Attempting to capture from device: " << device );
			vCap = VideoCapture( device );

			// Read a first frame often empty in camera
			Mat img;
			vCap >> img;
		}

		if( !vCap.isOpened() ) FATAL_STREAM( "Failed to open video source" );
		else INFO_STREAM( "Device or file opened");

		Mat img;
		vCap >> img;

		// If no dimensions defined, do not do any resizing
		if(dimx == 0 || dimy == 0)
		{
			dimx = img.cols;
			dimy = img.rows;
		}
	
		// If optical centers are not defined just use center of image
		if(cx == 0 || cy == 0)
		{
			cx = dimx / 2.0f;
			cy = dimy / 2.0f;
		}
	
		// Creating output files
//.........这里部分代码省略.........
开发者ID:AshwinRajendraprasad,项目名称:FacialAgeSynthesis,代码行数:101,代码来源:SimpleCLM.cpp

示例6: main

int main (int argc, char **argv)
{

	//Convert arguments to more convenient vector form
	vector<string> arguments = get_arguments(argc, argv);

	// Some initial parameters that can be overriden from command line
	vector<string> files, dFiles, outimages, outfeaturess;

	// these can be overriden using -cx, -cy, -fx, -fy, -dimx, -dimy flags
	float fx = 500, fy = 500, cx = 0, cy = 0;
	int dimx = 0, dimy = 0;
	
	// initial translation, rotation and scale (can be specified by the user)
	vector<Vec6d> initial_poses;

	bool useCLMTracker = true;

	// Get camera parameters
	CLMWrapper::get_camera_params(fx, fy, cx, cy, dimx, dimy, arguments);
	CLMWrapper::get_image_input_output_params(files, dFiles, outfeaturess, outimages, initial_poses, arguments);	
	CLMWrapper::CLMParameters clmParams(arguments);	

	// The modules that are being used for tracking
	CLMTracker::TrackerCLM clmModel;

	cout << "Loading the model" << endl;
	clmModel.Read(clmParams.defaultModelLoc, clmParams.override_pdm_loc);
	cout << "Model loaded" << endl;

	PoseDetectorHaar::PoseDetectorHaarParameters haarParams;

	haarParams.ClassifierLocation = "classifiers/haarcascade_frontalface_alt2.xml";

	CascadeClassifier classifier(haarParams.ClassifierLocation);

	bool visualise = true;

	//clmParams.multi_view = true;

	// Do some image loading
	for(size_t i = 0; i < files.size(); i++)
	{
		string file = files.at(i);

		// Loading image
		Mat img = imread(file, -1);
		
		// Loading depth file if exists
		Mat dTemp;		
		Mat_<float> dImg;

		if(dFiles.size() > 0)
		{
			string dFile = dFiles.at(i);
			dTemp = imread(dFile, -1);
			dTemp.convertTo(dImg, CV_32F);
		}

		if(dimx != 0)
		{
			cv::resize(img.clone(), img, cv::Size(dimx, dimy));
			if(!dImg.empty())
			{
				cv::resize(dImg.clone(), dImg, cv::Size(dimx, dimy));
			}
		}

		bool trackingInitialised = false;
	
		// Making sure the image is in uchar grayscale
		Mat_<uchar> gray;		
		convert_to_grayscale(img, gray);
		
			
		// Face detection initialisation
		Vec6d poseEstimateHaar;
		Matx66d poseEstimateHaarUncertainty;

		vector<Vec6d> poseEstimatesInitialiser;
		vector<Matx66d> covariancesInitialiser;	
		vector<Rect> faceRegions;
	
		bool initSuccess = false;

		// if no pose defined we just use OpenCV
		if(initial_poses.empty())
		{
			initSuccess = PoseDetectorHaar::InitialisePosesHaar(gray, dImg, poseEstimatesInitialiser, covariancesInitialiser, faceRegions, classifier, fx, fy, cx, cy, haarParams);

			if(initSuccess)
			{
				if(poseEstimatesInitialiser.size() > 1)
				{
					cout << "Multiple faces detected" << endl;
				}
			}
		
			if(initSuccess)
			{
//.........这里部分代码省略.........
开发者ID:AshwinRajendraprasad,项目名称:FacialAgeSynthesis,代码行数:101,代码来源:SimpleCLMImg.cpp

示例7: main

int main(int argc, char **argv) {
    try {
        ParseArgs(argc, argv);

        int num_cameras = static_cast<int>(imgs.size());
        if (num_cameras < 1)
            throw runtime_error("Need at least one camera");

        // Find features

        cout << "Finding features...\n";
        Ptr<detail::FeaturesFinder> features_finder = features_finder_creator->Create();

        for (int i = 0; i < num_cameras; ++i) {
            int64 t = getTickCount();
            cout << "Finding features in '" << img_names[i] << "'... ";

            Ptr<detail::ImageFeatures> features = new detail::ImageFeatures();
            (*features_finder)(imgs[i], *features);
            features_collection[i] = features;

            cout << "#features = " << features_collection.find(i)->second->keypoints.size()
                 << ", time = " << (getTickCount() - t) / getTickFrequency() << " sec\n";
        }

        // Match all pairs

        cout << "Matching pairs... ";
        MatchesCollection matches_collection;
        Ptr<detail::FeaturesMatcher> matcher = features_matcher_creator.Create();

        FeaturesCollection::iterator from_iter = features_collection.begin();
        FeaturesCollection::iterator from_next_iter = from_iter; ++from_next_iter;
        FeaturesCollection::iterator to_iter;

        for (; from_next_iter != features_collection.end(); from_iter = from_next_iter++) {
            for (to_iter = from_next_iter; to_iter != features_collection.end(); ++to_iter) {
                cout << "(" << from_iter->first << "->" << to_iter->first << ") ";
                detail::MatchesInfo mi;
                (*matcher)(*(from_iter->second), *(to_iter->second), mi);
                matches_collection[make_pair(from_iter->first, to_iter->first)]
                        = new vector<DMatch>(mi.matches);
            }
        }
        cout << endl;

        // Estimate homographies

        HomographiesP2 Hs;
        HomographiesP2 good_Hs;
        vector<Mat> Hs_from_0;
        RelativeConfidences rel_confs;
        Mat keypoints1, keypoints2;

        cout << "Estimating Hs...\n";
        for (int from = 0; from < num_cameras - 1; ++from) {
            for (int to = from + 1; to < num_cameras; ++to) {
                const vector<DMatch> &matches = *(matches_collection.find(make_pair(from, to))->second);

                cout << "Estimating H between '" << img_names[from] << "' and '" << img_names[to]
                     << "'... #matches = " << matches.size();

                if (static_cast<int>(matches.size()) < min_num_matches) {
                    cout << ", not enough matches\n";
                    continue;
                }

                ExtractMatchedKeypoints(*(features_collection.find(from)->second),
                                        *(features_collection.find(to)->second),
                                        matches, keypoints1, keypoints2);
                vector<uchar> inliers_mask;
                Mat_<double> H = findHomography(keypoints1.reshape(2), keypoints2.reshape(2),
                                                inliers_mask, RANSAC, H_est_thresh);

                if (H.empty()) {
                    cout << ", can't estimate H\n";
                    continue;
                }

                Ptr<vector<DMatch> > inliers = new vector<DMatch>();
                for (size_t i = 0; i < matches.size(); ++i)
                    if (inliers_mask[i])
                        inliers->push_back(matches[i]);
                cout << ", #inliers = " << inliers->size();

                double rms_err = 0;
                for (size_t i = 0; i < matches.size(); ++i) {
                    const Point2d &kp1 = keypoints1.at<Point2d>(0, i);
                    const Point2d &kp2 = keypoints2.at<Point2d>(0, i);
                    double x = H(0, 0) * kp1.x + H(0, 1) * kp1.y + H(0, 2);
                    double y = H(1, 0) * kp1.x + H(1, 1) * kp1.y + H(1, 2);
                    double z = H(2, 0) * kp1.x + H(2, 1) * kp1.y + H(2, 2);
                    x /= z; y /= z;
                    rms_err += (kp2.x - x) * (kp2.x - x) + (kp2.y - y) * (kp2.y - y);
                }
                rms_err = sqrt(rms_err / matches.size());
                cout << ", RMS err = " << rms_err;

                // See "Automatic Panoramic Image Stitching using Invariant Features"
                // by Matthew Brown and David G. Lowe, IJCV 2007 for the explanation
//.........这里部分代码省略.........
开发者ID:AlexeySpizhevoy,项目名称:autocalib,代码行数:101,代码来源:_deprecated_autocalib_R_cam_real_ltd.cpp


注:本文中的Mat_::empty方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。