当前位置: 首页>>代码示例>>C++>>正文


C++ Mat::empty方法代码示例

本文整理汇总了C++中Mat::empty方法的典型用法代码示例。如果您正苦于以下问题:C++ Mat::empty方法的具体用法?C++ Mat::empty怎么用?C++ Mat::empty使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在Mat的用法示例。


在下文中一共展示了Mat::empty方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: main

int main(int argc, char** argv) {
	vector<Point> a,b;
	
	Mat src = imread("deer-18.png");
	if (src.empty()) {
		cerr << "can't read image" << endl; exit(0);
	}
	GetCurveForImage(src, a, false);
	ResampleCurve(a, a, 200, false);
	
	vector<Point2d> a_p2d;
	ConvertCurve(a, a_p2d);
	
	//create the target curve
	{
		//rotate and scale
		Scalar meanpt = mean(a);
		Mat_<double> trans_to = getRotationMatrix2D(Point2f(meanpt[0],meanpt[1]), 5, 0.65);
		
		//trasnlate
		trans_to(0,2) += 40;
		trans_to(1,2) += 40;
	
		vector<Point2d> b_p2d;
		cv::transform(a_p2d,b_p2d,trans_to);
		
		// everybody in the house - make some noise!
		cv::RNG rng(27628);
		for (int i=0; i<b_p2d.size(); i++) {
			b_p2d[i].x += (rng.uniform(0.0,1.0) - 0.5) * 20;
			b_p2d[i].y += (rng.uniform(0.0,1.0) - 0.5) * 20;
		}
		
		ConvertCurve(b_p2d, b);
		
		// occlude
		vector<Point> b_occ;
		for (int i=50; i<130; i++) {
			b_occ.push_back(b[i]);
		}
		ResampleCurve(b_occ, b, 200, true);
	}
	
	//Compare curves
	int a_len,a_off,b_len,b_off;
	double db_compare_score;
	CompareCurvesUsingSignatureDB(a, 
								  b,
								  a_len,
								  a_off,
								  b_len,
								  b_off,
								  db_compare_score
								  );

	//Get matched subsets of curves
	vector<Point> a_subset(a.begin() + a_off, a.begin() + a_off + a_len);
	vector<Point> b_subset(b.begin() + b_off, b.begin() + b_off + b_len);
	
	//Normalize to equal length
	ResampleCurve(a_subset, a_subset, 200, true);
	ResampleCurve(b_subset, b_subset, 200, true);
		
	//Visualize the original and target
	Mat outout(src.size(),CV_8UC3,Scalar::all(0));
	{
		//draw small original
		vector<Point2d> tmp_curve;
		cv::transform(a_p2d,tmp_curve,getRotationMatrix2D(Point2f(0,0),0,0.2));
		Mat tmp_curve_m(tmp_curve); tmp_curve_m += Scalar(25,0);
		drawOpenCurve(outout, tmp_curve, Scalar(255), 1);
		
		//draw small matched subset of original
		ConvertCurve(a_subset, tmp_curve);
		cv::transform(tmp_curve,tmp_curve,getRotationMatrix2D(Point2f(0,0),0,0.2));
		Mat tmp_curve_m1(tmp_curve); tmp_curve_m1 += Scalar(25,0);
		drawOpenCurve(outout, tmp_curve, Scalar(255,255), 2);

		//draw small target
		ConvertCurve(b, tmp_curve);
		cv::transform(tmp_curve,tmp_curve,getRotationMatrix2D(Point2f(0,0),0,0.2));
		Mat tmp_curve_m2(tmp_curve); tmp_curve_m2 += Scalar(outout.cols - 150,0);
		drawOpenCurve(outout, tmp_curve, Scalar(255,0,255), 1);

		//draw big target
		drawOpenCurve(outout, b, Scalar(0,0,255), 1);
		//draw big matched subset of target
		drawOpenCurve(outout, b_subset, Scalar(0,255,255), 1);
	}
	
	
	//Prepare the curves for finding the transformation
	vector<Point2f> seq_a_32f,seq_b_32f,seq_a_32f_,seq_b_32f_;

	ConvertCurve(a_subset, seq_a_32f_);
	ConvertCurve(b_subset, seq_b_32f_);
	
	assert(seq_a_32f_.size() == seq_b_32f_.size());
	
	seq_a_32f.clear(); seq_b_32f.clear();
//.........这里部分代码省略.........
开发者ID:DavidNgv,项目名称:CurveMatching,代码行数:101,代码来源:match_two_curves.cpp

示例2: main


//.........这里部分代码省略.........
        cout <<
            "\nImage generator output mode:" << endl <<
            "FRAME_WIDTH   " << capture.get( CV_CAP_OPENNI_IMAGE_GENERATOR+CV_CAP_PROP_FRAME_WIDTH ) << endl <<
            "FRAME_HEIGHT  " << capture.get( CV_CAP_OPENNI_IMAGE_GENERATOR+CV_CAP_PROP_FRAME_HEIGHT ) << endl <<
            "FPS           " << capture.get( CV_CAP_OPENNI_IMAGE_GENERATOR+CV_CAP_PROP_FPS ) << endl;
    }
    else
    {
        cout << "\nDevice doesn't contain image generator." << endl;
        if (!retrievedImageFlags[0] && !retrievedImageFlags[1] && !retrievedImageFlags[2])
            return 0;
    }
    if( !face_cascade.load( cascade_name[0] ) )
    { 
	printf("--(!)Error loading\n"); return -1; 
    };
    if( !eyes_cascade.load( cascade_name[1] ) )
    { 
	printf("--(!)Error loading\n"); return -1; 
    };
    //printf("Entering for\n");

    int last_printed = 0;
    int WAIT_SEC = 10;

    for(;;)
    {
        Mat depthMap;
        Point image_center;
        Mat Display_image;
        Mat validDepthMap;
        Mat disparityMap;
        Mat bgrImage;
        Mat grayImage;
        Mat show;
        double seconds_since_start = difftime( time(0), start);

        if( !capture.grab() )
        {
            cout << "Can not grab images." << endl;
            return -1;
        }
        else
        {
            if( capture.retrieve( depthMap, CV_CAP_OPENNI_DEPTH_MAP ) )
            {
                const float scaleFactor = 0.05f;
		depthMap.convertTo( show, CV_8UC1, scaleFactor );
                //imshow( "depth map", show );
            }

            if( capture.retrieve( bgrImage, CV_CAP_OPENNI_BGR_IMAGE ) ) {
                
            // Align nose with the circle


                int rad = 40;
               	int row_rgb = bgrImage.rows;
            	int col_rgb = bgrImage.cols;
                image_center.y = row_rgb/2 - 100;
                image_center.x = col_rgb/2;
                Display_image = bgrImage.clone();
                // Copying bgrImage so that circle is shown temporarily only
                if( seconds_since_start < WAIT_SEC ) {
                circle( Display_image, image_center, rad, Scalar( 255, 0, 0 ), 3, 8, 0 );
                imshow( "rgb image", Display_image );
                }

                // Wait for a key Press
                //std::cin.ignore();
                // Now it will capture Golden data 
            }

        /*    if( retrievedImageFlags[4] && capture.retrieve( grayImage, CV_CAP_OPENNI_GRAY_IMAGE ) )
                imshow( "gray image", grayImage );*/

        int seconds = int(seconds_since_start);
        if(last_printed<seconds && seconds<=WAIT_SEC){
            printf(" Capturing Golden Face template after %d Seconds ...\n\n", WAIT_SEC - seconds);
                last_printed=seconds;
    
        }
            
	    if(!depthMap.empty() && !bgrImage.empty() && (seconds_since_start > WAIT_SEC))
		    detectAndDisplay(bgrImage, depthMap, argc, argv);
	    
	    //writeMatToFile("depth.txt",depthMap);
        }

        if( waitKey( 30 ) >= 0 )  {
            seconds_since_start = difftime( time(0), start) - WAIT_SEC;
            cout << endl << endl << " FPS is : " << ( (double)(filenumber - 1) )/seconds_since_start << endl; 
            cout << " Viola Jones Count is : " << viola_jones_count <<  " Total file count is : " << filenumber - 1 << endl; 
            cout << " Predictor Accuracy is : " << ( (double)(filenumber - viola_jones_count - 1 ) ) * 100 / (double) (filenumber - 1) << endl; 
            break;
        }
    }
    Trans_dump.close();
    return 0;
}
开发者ID:Urmish,项目名称:HeadPoseTracking,代码行数:101,代码来源:openni_capture.cpp

示例3: if

static void
StereoCalib(const vector<string>& imagelist, Size boardSize, bool useCalibrated=true, bool showRectified=true)
{
    if( imagelist.size() % 2 != 0 )
    {
        cout << "Error: the image list contains odd (non-even) number of elements\n";
        return;
    }

    bool displayCorners = false;//true;
    const int maxScale = 2;
    const float squareSize = 1.f;  // Set this to your actual square size
    // ARRAY AND VECTOR STORAGE:

    vector<vector<Point2f> > imagePoints[2];
    vector<vector<Point3f> > objectPoints;
    Size imageSize;

    int i, j, k, nimages = (int)imagelist.size()/2;

    imagePoints[0].resize(nimages);
    imagePoints[1].resize(nimages);
    vector<string> goodImageList;

    for( i = j = 0; i < nimages; i++ )
    {
        for( k = 0; k < 2; k++ )
        {
            const string& filename = imagelist[i*2+k];
            Mat img = imread(filename, 0);
            if(img.empty())
                break;
            if( imageSize == Size() )
                imageSize = img.size();
            else if( img.size() != imageSize )
            {
                cout << "The image " << filename << " has the size different from the first image size. Skipping the pair\n";
                break;
            }
            bool found = false;
            vector<Point2f>& corners = imagePoints[k][j];
            for( int scale = 1; scale <= maxScale; scale++ )
            {
                Mat timg;
                if( scale == 1 )
                    timg = img;
                else
                    resize(img, timg, Size(), scale, scale);
                found = findChessboardCorners(timg, boardSize, corners,
                    CALIB_CB_ADAPTIVE_THRESH | CALIB_CB_NORMALIZE_IMAGE);
                if( found )
                {
                    if( scale > 1 )
                    {
                        Mat cornersMat(corners);
                        cornersMat *= 1./scale;
                    }
                    break;
                }
            }
            if( displayCorners )
            {
                cout << filename << endl;
                Mat cimg, cimg1;
                cvtColor(img, cimg, COLOR_GRAY2BGR);
                drawChessboardCorners(cimg, boardSize, corners, found);
                double sf = 640./MAX(img.rows, img.cols);
                resize(cimg, cimg1, Size(), sf, sf);
                imshow("corners", cimg1);
                char c = (char)waitKey(500);
                if( c == 27 || c == 'q' || c == 'Q' ) //Allow ESC to quit
                    exit(-1);
            }
            else
                putchar('.');
            if( !found )
                break;
            cornerSubPix(img, corners, Size(11,11), Size(-1,-1),
                         TermCriteria(TermCriteria::COUNT+TermCriteria::EPS,
                                      30, 0.01));
        }
        if( k == 2 )
        {
            goodImageList.push_back(imagelist[i*2]);
            goodImageList.push_back(imagelist[i*2+1]);
            j++;
        }
    }
    cout << j << " pairs have been successfully detected.\n";
    nimages = j;
    if( nimages < 2 )
    {
        cout << "Error: too little pairs to run the calibration\n";
        return;
    }

    imagePoints[0].resize(nimages);
    imagePoints[1].resize(nimages);
    objectPoints.resize(nimages);

//.........这里部分代码省略.........
开发者ID:165-goethals,项目名称:opencv,代码行数:101,代码来源:stereo_calib.cpp

示例4: main

int main( int argc, char** argv )
{
	VideoCapture cap;
	TermCriteria termcrit(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03);
	Size subPixWinSize(10,10), winSize(31,31);

	const int MAX_COUNT = 500;
	bool needToInit = false;
	bool nightMode = false;

	cap.open(0);

	if( !cap.isOpened() )
	{
		cout << "Could not initialize capturing...\n";
		return 0;
	}

	help();

	namedWindow( "PlaneTracking", 1 );
	setMouseCallback( "PlaneTracking", onMouse, 0 );

	Mat gray, prevGray, image;

	for(;;)
	{
		Mat frame;
		cap >> frame;
		if( frame.empty() )
			break;

		frame.copyTo(image);
		cvtColor(image, gray, CV_BGR2GRAY); 

		if( nightMode )
			image = Scalar::all(0);

		if( needToInit )
		{
			// automatic initialization
			/*goodFeaturesToTrack(gray, points[1], MAX_COUNT, 0.01, 10, Mat(), 3, 0, 0.04);
			cornerSubPix(gray, points[1], subPixWinSize, Size(-1,-1), termcrit);*/

			initTrackingPoints(frame);
			addRemovePt = false;
		}
		else if( !trackPtsPre.empty() )
		{
			vector<uchar> status;
			vector<float> err;
			if(prevGray.empty())
				gray.copyTo(prevGray);
			calcOpticalFlowPyrLK(prevGray, gray, trackPtsPre, trackPtsCur, status, err, winSize,
				3, termcrit, 0, 0, 0.001);

			size_t i, k;
			for( i = k = 0; i < trackPtsCur.size(); i++ )
			{
				if( addRemovePt )
				{
					if( norm(pt - trackPtsCur[i]) <= 5 )
					{
						addRemovePt = false;
						continue;
					}
				}

				if( !status[i] )
					continue;

				trackPtsCur[k++] = trackPtsCur[i];
				circle( image, trackPtsCur[i], 3, Scalar(0,255,0), -1, 8);
			}
			trackPtsCur.resize(k);
		}

		if( addRemovePt && trackPtsCur.size() < (size_t)MAX_COUNT )
		{
			vector<Point2f> tmp;
			tmp.push_back(pt);
			cornerSubPix( gray, tmp, winSize, cvSize(-1,-1), termcrit);
			trackPtsCur.push_back(tmp[0]);
			addRemovePt = false;
		}

		needToInit = false;
		imshow("LK Demo", image);

		char c = (char)waitKey(10);
		if( c == 27 )
			break;
		switch( c )
		{
		case 'r':
			needToInit = true;
			break;
		case 'c':
			trackPtsCur.clear();
			break;
//.........这里部分代码省略.........
开发者ID:burningneutron,项目名称:FixAR,代码行数:101,代码来源:main_PlaneTracking.cpp

示例5: main

int main(int argc, char** argv)
{
  // check number of arguments
  if(argv[1] == NULL) 
	{
		ROS_ERROR("Webcam device number missing");
		return 1;
	}

  ros::init(argc, argv, "image_publisher");
  ros::NodeHandle nh;
  image_transport::ImageTransport it(nh);
  image_transport::Publisher pub = it.advertise("camera/image", 1);

	// webcam device nummer to integer
  std::istringstream video_sourceCmd(argv[1]);
  int video_source;
  // check of webcam device number is a real number
  if(!(video_sourceCmd >> video_source)) 
	{
		ROS_ERROR("Webcam device number is not a number");
		return 1;
	}

  VideoCapture cap(video_source);
	
  // check of webcam device number is correct
  if(!cap.isOpened())
	{
		ROS_ERROR("Webcam device nummer invalid");
		return 1;
	}
  Mat frame;
  sensor_msgs::ImagePtr msg;

	std::istringstream fpsCmd(argv[2]);
	double fps;
	// check of fps is a real number
	if(!(fpsCmd >> fps)) 
	{
		ROS_ERROR("Frame rate is not a number");
		return 1;
	}

  ros::Rate loop_rate(fps);
  while (nh.ok()) 
	{
    cap >> frame;
    // check for image content
    if(!frame.empty()) 
		{
			//convert OpenCV image to ROS image
      msg = cv_bridge::CvImage(std_msgs::Header(), "bgr8", frame).toImageMsg();
			//publish image
      pub.publish(msg);
			ROS_INFO("Publish frame");
      waitKey(1);
    }

    ros::spinOnce();
    loop_rate.sleep();
  }
}
开发者ID:callemein,项目名称:ros_robot,代码行数:63,代码来源:cam_publisher.cpp

示例6: main

int main(int argc, char ** argv)
{

	string gauss = "Gaussino";
	string canny = "Canny";
	string hough = "Hough";
	string binarizar = "Binarizar";
	string Otsu = "Otsu";
	string image_name = "";
	int number;
	Point min, max, start;

	ofstream myfile;

	myfile.open("data.txt");

	myfile << "ESCREVE QUALQUER COISA\n";
	

	clock_t t1, t2, t3, t4;
	double threshold1, threshold2, thres, minLength, maxGap;
	bool f1, f2, f3, f4, f5, f6, f7, f8, f9;
	string Result;
	ostringstream convert;
	//int i;
	float temp;

	//for (i = 1;  i <= 6; i++){

		//number = i;
		//convert << number;
		//Result = convert.str();
		//image_name = "a" + Result + ".JPG";
		image_name = "a2.JPG";
		//number++;
		//cout << number << endl;
		cout << image_name;


		myfile << image_name;
		myfile << "\n";

		t1 = clock();
		f1 = false;
		f2 = true;
		f3 = false;
		f4 = false;
		f5 = false;
		f6 = true;
		f7 = true;
		if (f7 == true){
			threshold1 = 10;
			threshold2 = 19;
		}
		f8 = false;
		f9 = true;
		if (f9 == true){
			thres = 10;// 40
			minLength = 20; //50
			maxGap = 30; //80

			/*
			CvCapture* capture = cvCaptureFromCAM( CV_CAP_ANY );

			if ( !capture ) {
			fprintf( stderr, "ERROR: capture is NULL \n" );
			getchar();
			return -1;
			}
			string original = "original.jpg";
			string foto ="img";

			IplImage* frame = cvQueryFrame( capture );
			Mat img(frame);
			Mat I, I1, imge;
			cvtColor(img,imge,CV_RGB2GRAY);
			imge.convertTo(I, CV_8U);
			equalizeHist(I,I1);
			Mat aux = I1;
			savePictures(I1, original, foto);

			*/

			//realiza a leitura e carrega a imagem para a matriz I1
			// a imagem tem apenas 1 canal de cor e por isso foi usado o parametro CV_LOAD_IMAGE_GRAYSCALE
			Mat lara = imread("lara.JPG", CV_LOAD_IMAGE_GRAYSCALE);
			Mat I = imread(image_name, CV_LOAD_IMAGE_GRAYSCALE);
			if (I.empty())
				return -1;
			resize(I, I, lara.size(), 1.0, 1.0, INTER_LINEAR);
			Mat I1;
			//Mat aux = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE); 
			equalizeHist(I, I1);


			Mat aux, original;

			aux = I1;

			//ShowImage(I, I1);
//.........这里部分代码省略.........
开发者ID:pawpepe,项目名称:ResearchProjectImageProcessing,代码行数:101,代码来源:raspberryPi.cpp

示例7: forward

int Permute::forward(const Mat& bottom_blob, Mat& top_blob) const
{
    int w = bottom_blob.w;
    int h = bottom_blob.h;
    int channels = bottom_blob.c;

    // order_type
    // 0 = w h c
    // 1 = h w c
    // 2 = w c h
    // 3 = c w h
    // 4 = h c w
    // 5 = c h w

    if (order_type == 0)
    {
        top_blob = bottom_blob;
    }
    else if (order_type == 1)
    {
        top_blob.create(h, w, channels);
        if (top_blob.empty())
            return -100;

        #pragma omp parallel for
        for (int q=0; q<channels; q++)
        {
            const float* ptr = bottom_blob.channel(q);
            float* outptr = top_blob.channel(q);

            for (int i = 0; i < w; i++)
            {
                for (int j = 0; j < h; j++)
                {
                    outptr[i*h + j] = ptr[j*w + i];
                }
            }
        }
    }
    else if (order_type == 2)
    {
        top_blob.create(w, channels, h);
        if (top_blob.empty())
            return -100;

        #pragma omp parallel for
        for (int q=0; q<h; q++)
        {
            float* outptr = top_blob.channel(q);

            for (int i = 0; i < channels; i++)
            {
                const float* ptr = bottom_blob.channel(i).row(q);

                for (int j = 0; j < w; j++)
                {
                    outptr[i*w + j] = ptr[j];
                }
            }
        }
    }
    else if (order_type == 3)
    {
        top_blob.create(channels, w, h);
        if (top_blob.empty())
            return -100;

        #pragma omp parallel for
        for (int q=0; q<h; q++)
        {
            float* outptr = top_blob.channel(q);

            for (int i = 0; i < w; i++)
            {
                for (int j = 0; j < channels; j++)
                {
                    const float* ptr = bottom_blob.channel(j).row(q);

                    outptr[i*channels + j] = ptr[i];
                }
            }
        }
    }
    else if (order_type == 4)
    {
        top_blob.create(h, channels, w);
        if (top_blob.empty())
            return -100;

        #pragma omp parallel for
        for (int q=0; q<w; q++)
        {
            float* outptr = top_blob.channel(q);

            for (int i = 0; i < channels; i++)
            {
                const float* ptr = bottom_blob.channel(i);

                for (int j = 0; j < h; j++)
                {
//.........这里部分代码省略.........
开发者ID:RichieMay,项目名称:ncnn,代码行数:101,代码来源:permute.cpp

示例8: main

int main(int argc, char* argv[])
{
	/* Tserial *com;
   com = new Tserial();
   com->connect("COM11", 9600, spNONE);                            //check com port		B
   cvWaitKey(5000);
	
	*/
	    
	
	ofstream outfile;
outfile.open("currentangle.txt");
	port_initialize("/dev/ttyACM1","9600");						//D
	cout<<"connected"<<endl;
	//Matrix to store each frame of the webcam feed
	Mat cameraFeed;
	//matrix storage for HSV image
	Mat HSV;
	//matrix storage for binary threshold image
	Mat threshold;
	//x and y values for the location of the object
	int x=0, y=0;
	int gradient1,gradient2,mac,sac,pac,zac;
	float slope,angle;
	
	//create slider bars for HSV filtering	//createTrackbars();
	//video capture object to acquire webcam feed
	VideoCapture capture(argv[1]);
	//open capture object at location zero (default location for webcam)
	//capture.open(0);
	//set height and width of capture frame
	capture.set(CV_CAP_PROP_FRAME_WIDTH,FRAME_WIDTH);
	capture.set(CV_CAP_PROP_FRAME_HEIGHT,FRAME_HEIGHT);
	//start an infinite loop where webcam feed is copied to cameraFeed matrix
	//all of our operations will be performed within this loop
	while(1){
		//store image to matrix
		capture.read(cameraFeed);
		//convert frame from BGR to HSV colorspace
		cvtColor(cameraFeed,HSV,COLOR_BGR2HSV);
		//filter HSV image between values and store filtered image to
		//threshold matrix
		inRange(HSV,Scalar(H_MIN,S_MIN,V_MIN),Scalar(H_MAX,S_MAX,V_MAX),threshold);
		//show frames 
		
		    Mat src = threshold;
 if(src.empty())
 {
    
     return -1;
 }

 Mat dst, cdst;
 Canny(src, dst, 100, 300, 3);    			//PARAMETER A  : HIGHER MUST BE 3 TIMES THE LOWER, SO LOWER ONLY NEED TO BE SET
 cvtColor(dst, cdst, CV_GRAY2BGR);

 #if 0
  vector<Vec2f> lines;
  HoughLines(dst, lines, 1, CV_PI/180, 100, 0, 0 );

  for( size_t i = 0; i < lines.size(); i++ )
  {
     float rho = lines[i][0], theta = lines[i][1];
     Point pt1, pt2;
     double a = cos(theta), b = sin(theta);
     double x0 = a*rho, y0 = b*rho;
     pt1.x = cvRound(x0 + 1000*(-b));
     pt1.y = cvRound(y0 + 1000*(a));
     pt2.x = cvRound(x0 - 1000*(-b));
     pt2.y = cvRound(y0 - 1000*(a));
     line( cdst, pt1, pt2, Scalar(0,0,255), 3, CV_AA);
  }
 #else
  vector<Vec4i> lines;
  HoughLinesP(dst, lines, 1, CV_PI/180, 85, 100, 100 );		//PARAMETER B:NO. OF INTERSECTIONS-85BESTFORNOW
  for( size_t i = 0; i < lines.size(); i++ )
  {
    
    /*if(i==2)
    {
    	system("java -jar livegraph.jar");
    }*/
    if(i==1)
    {
    	gradient1 =(lines[0][3]-lines[0][1])/(lines[0][2]-lines[0][0]) ;
 	Vec4i l = lines[i];
 	gradient2 = (lines[1][3]-lines[1][1])/(lines[1][2]-lines[1][0]) ;
 	if (gradient1==gradient2)
  	{
   		mac = (lines[0][0]+lines[1][0])/2;
   		sac = (lines[0][2]+lines[1][2])/2;
		pac = (lines[0][1]+lines[1][1])/2;
    		zac = (lines[0][3]+lines[1][3])/2;
		line(cdst, Point(mac, pac), Point(sac, zac), Scalar(0,0,255), 3, CV_AA);
		slope=(zac-pac)/(float)(sac-mac);
		if((57.2957795*tanh(slope))>0){
		angle=(90-(57.2957795*tanh(slope)));}
		else
		{
			angle=(57.2957795*tanh(slope));
//.........这里部分代码省略.........
开发者ID:harkiratbehl,项目名称:IP,代码行数:101,代码来源:finalwrite.cpp

示例9: computeOcclusionBasedMasks

/*
  Calculate occluded regions of reference image (left image) (regions that are occluded in the matching image (right image),
  i.e., where the forward-mapped disparity lands at a location with a larger (nearer) disparity) and non occluded regions.
*/
void computeOcclusionBasedMasks( const Mat& leftDisp, const Mat& _rightDisp,
                             Mat* occludedMask, Mat* nonOccludedMask,
                             const Mat& leftUnknDispMask = Mat(), const Mat& rightUnknDispMask = Mat(),
                             float dispThresh = EVAL_DISP_THRESH )
{
    if( !occludedMask && !nonOccludedMask )
        return;
    checkDispMapsAndUnknDispMasks( leftDisp, _rightDisp, leftUnknDispMask, rightUnknDispMask );

    Mat rightDisp;
    if( _rightDisp.empty() )
    {
        if( !rightUnknDispMask.empty() )
           CV_Error( CV_StsBadArg, "rightUnknDispMask must be empty if _rightDisp is empty" );
        rightDisp.create(leftDisp.size(), CV_32FC1);
        rightDisp.setTo(Scalar::all(0) );
        for( int leftY = 0; leftY < leftDisp.rows; leftY++ )
        {
            for( int leftX = 0; leftX < leftDisp.cols; leftX++ )
            {
                if( !leftUnknDispMask.empty() && leftUnknDispMask.at<uchar>(leftY,leftX) )
                    continue;
                float leftDispVal = leftDisp.at<float>(leftY, leftX);
                int rightX = leftX - cvRound(leftDispVal), rightY = leftY;
                if( rightX >= 0)
                    rightDisp.at<float>(rightY,rightX) = max(rightDisp.at<float>(rightY,rightX), leftDispVal);
            }
        }
    }
    else
        _rightDisp.copyTo(rightDisp);

    if( occludedMask )
    {
        occludedMask->create(leftDisp.size(), CV_8UC1);
        occludedMask->setTo(Scalar::all(0) );
    }
    if( nonOccludedMask )
    {
        nonOccludedMask->create(leftDisp.size(), CV_8UC1);
        nonOccludedMask->setTo(Scalar::all(0) );
    }
    for( int leftY = 0; leftY < leftDisp.rows; leftY++ )
    {
        for( int leftX = 0; leftX < leftDisp.cols; leftX++ )
        {
            if( !leftUnknDispMask.empty() && leftUnknDispMask.at<uchar>(leftY,leftX) )
                continue;
            float leftDispVal = leftDisp.at<float>(leftY, leftX);
            int rightX = leftX - cvRound(leftDispVal), rightY = leftY;
            if( rightX < 0 && occludedMask )
                occludedMask->at<uchar>(leftY, leftX) = 255;
            else
            {
                if( !rightUnknDispMask.empty() && rightUnknDispMask.at<uchar>(rightY,rightX) )
                    continue;
                float rightDispVal = rightDisp.at<float>(rightY, rightX);
                if( rightDispVal > leftDispVal + dispThresh )
                {
                    if( occludedMask )
                        occludedMask->at<uchar>(leftY, leftX) = 255;
                }
                else
                {
                    if( nonOccludedMask )
                        nonOccludedMask->at<uchar>(leftY, leftX) = 255;
                }
            }
        }
    }
}
开发者ID:JaehyunAhn,项目名称:Basic_OpenCV_utilization,代码行数:75,代码来源:test_stereomatching.cpp

示例10: run_batch

void CV_ChessboardDetectorTest::run_batch( const string& filename )
{
    CvTS& ts = *this->ts;
    ts.set_failed_test_info( CvTS::OK );

    ts.printf(CvTS::LOG, "\nRunning batch %s\n", filename.c_str());
//#define WRITE_POINTS 1
#ifndef WRITE_POINTS    
    double max_rough_error = 0, max_precise_error = 0;
#endif
    string folder = string(ts.get_data_path()) + "cameracalibration/";

    FileStorage fs( folder + filename, FileStorage::READ );
    FileNode board_list = fs["boards"];
        
    if( !fs.isOpened() || board_list.empty() || !board_list.isSeq() || board_list.size() % 2 != 0 )
    {
        ts.printf( CvTS::LOG, "%s can not be readed or is not valid\n", (folder + filename).c_str() );
        ts.printf( CvTS::LOG, "fs.isOpened=%d, board_list.empty=%d, board_list.isSeq=%d,board_list.size()%2=%d\n", 
            fs.isOpened(), (int)board_list.empty(), board_list.isSeq(), board_list.size()%2);
        ts.set_failed_test_info( CvTS::FAIL_MISSING_TEST_DATA );        
        return;
    }

    int progress = 0;
    int max_idx = board_list.node->data.seq->total/2;
    double sum_error = 0.0;
    int count = 0;

    for(int idx = 0; idx < max_idx; ++idx )
    {
        ts.update_context( this, idx, true );
        
        /* read the image */
        string img_file = board_list[idx * 2];                    
        Mat gray = imread( folder + img_file, 0);
                
        if( gray.empty() )
        {
            ts.printf( CvTS::LOG, "one of chessboard images can't be read: %s\n", img_file.c_str() );
            ts.set_failed_test_info( CvTS::FAIL_MISSING_TEST_DATA );
            continue;
        }

        string filename = folder + (string)board_list[idx * 2 + 1];
        Mat expected;
        {
            CvMat *u = (CvMat*)cvLoad( filename.c_str() );
            if(!u )
            {                
                ts.printf( CvTS::LOG, "one of chessboard corner files can't be read: %s\n", filename.c_str() ); 
                ts.set_failed_test_info( CvTS::FAIL_MISSING_TEST_DATA );
                continue;                
            }
            expected = Mat(u, true);
            cvReleaseMat( &u );
        }                
        size_t count_exp = static_cast<size_t>(expected.cols * expected.rows);                
        Size pattern_size = expected.size();

        vector<Point2f> v;        
        bool result = findChessboardCorners(gray, pattern_size, v, CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_NORMALIZE_IMAGE);        
        show_points( gray, Mat(), v, pattern_size, result );
        if( !result || v.size() != count_exp )
        {
            ts.printf( CvTS::LOG, "chessboard is not found in %s\n", img_file.c_str() );
            ts.set_failed_test_info( CvTS::FAIL_INVALID_OUTPUT );
            continue;
        }

#ifndef WRITE_POINTS
        double err = calcError(v, expected);
#if 0
        if( err > rough_success_error_level )
        {
            ts.printf( CvTS::LOG, "bad accuracy of corner guesses\n" );
            ts.set_failed_test_info( CvTS::FAIL_BAD_ACCURACY );
            continue;
        }
#endif
        max_rough_error = MAX( max_rough_error, err );
#endif
        cornerSubPix( gray, v, Size(5, 5), Size(-1,-1), TermCriteria(TermCriteria::EPS|TermCriteria::MAX_ITER, 30, 0.1));        
        //find4QuadCornerSubpix(gray, v, Size(5, 5));
        show_points( gray, expected, v, pattern_size, result  );

#ifndef WRITE_POINTS
//        printf("called find4QuadCornerSubpix\n");
        err = calcError(v, expected);
        sum_error += err;
        count++;
#if 1
        if( err > precise_success_error_level )
        {
            ts.printf( CvTS::LOG, "Image %s: bad accuracy of adjusted corners %f\n", img_file.c_str(), err ); 
            ts.set_failed_test_info( CvTS::FAIL_BAD_ACCURACY );
            continue;
        }
#endif
        ts.printf(CvTS::LOG, "Error on %s is %f\n", img_file.c_str(), err);
//.........这里部分代码省略.........
开发者ID:SCS-B3C,项目名称:OpenCV2-2,代码行数:101,代码来源:achesscorners.cpp

示例11: RGBDOdometry

bool cv::RGBDOdometry( cv::Mat& Rt, const Mat& initRt,
                       const cv::Mat& image0, const cv::Mat& _depth0, const cv::Mat& validMask0,
                       const cv::Mat& image1, const cv::Mat& _depth1, const cv::Mat& validMask1,
                       const cv::Mat& cameraMatrix, float minDepth, float maxDepth, float maxDepthDiff,
                       const std::vector<int>& iterCounts, const std::vector<float>& minGradientMagnitudes,
                       int transformType )
{
    const int sobelSize = 3;
    const double sobelScale = 1./8;

    Mat depth0 = _depth0.clone(),
        depth1 = _depth1.clone();

    // check RGB-D input data
    CV_Assert( !image0.empty() );
    CV_Assert( image0.type() == CV_8UC1 );
    CV_Assert( depth0.type() == CV_32FC1 && depth0.size() == image0.size() );

    CV_Assert( image1.size() == image0.size() );
    CV_Assert( image1.type() == CV_8UC1 );
    CV_Assert( depth1.type() == CV_32FC1 && depth1.size() == image0.size() );

    // check masks
    CV_Assert( validMask0.empty() || (validMask0.type() == CV_8UC1 && validMask0.size() == image0.size()) );
    CV_Assert( validMask1.empty() || (validMask1.type() == CV_8UC1 && validMask1.size() == image0.size()) );

    // check camera params
    CV_Assert( cameraMatrix.type() == CV_32FC1 && cameraMatrix.size() == Size(3,3) );

    // other checks
    CV_Assert( iterCounts.empty() || minGradientMagnitudes.empty() ||
               minGradientMagnitudes.size() == iterCounts.size() );
    CV_Assert( initRt.empty() || (initRt.type()==CV_64FC1 && initRt.size()==Size(4,4) ) );

    vector<int> defaultIterCounts;
    vector<float> defaultMinGradMagnitudes;
    vector<int> const* iterCountsPtr = &iterCounts;
    vector<float> const* minGradientMagnitudesPtr = &minGradientMagnitudes;

    if( iterCounts.empty() || minGradientMagnitudes.empty() )
    {
        defaultIterCounts.resize(4);
        defaultIterCounts[0] = 7;
        defaultIterCounts[1] = 7;
        defaultIterCounts[2] = 7;
        defaultIterCounts[3] = 10;

        defaultMinGradMagnitudes.resize(4);
        defaultMinGradMagnitudes[0] = 12;
        defaultMinGradMagnitudes[1] = 5;
        defaultMinGradMagnitudes[2] = 3;
        defaultMinGradMagnitudes[3] = 1;

        iterCountsPtr = &defaultIterCounts;
        minGradientMagnitudesPtr = &defaultMinGradMagnitudes;
    }

    preprocessDepth( depth0, depth1, validMask0, validMask1, minDepth, maxDepth );

    vector<Mat> pyramidImage0, pyramidDepth0,
                pyramidImage1, pyramidDepth1, pyramid_dI_dx1, pyramid_dI_dy1, pyramidTexturedMask1,
                pyramidCameraMatrix;
    buildPyramids( image0, image1, depth0, depth1, cameraMatrix, sobelSize, sobelScale, *minGradientMagnitudesPtr,
                   pyramidImage0, pyramidDepth0, pyramidImage1, pyramidDepth1,
                   pyramid_dI_dx1, pyramid_dI_dy1, pyramidTexturedMask1, pyramidCameraMatrix );

    Mat resultRt = initRt.empty() ? Mat::eye(4,4,CV_64FC1) : initRt.clone();
    Mat currRt, ksi;
    for( int level = (int)iterCountsPtr->size() - 1; level >= 0; level-- )
    {
        const Mat& levelCameraMatrix = pyramidCameraMatrix[level];

        const Mat& levelImage0 = pyramidImage0[level];
        const Mat& levelDepth0 = pyramidDepth0[level];
        Mat levelCloud0;
        cvtDepth2Cloud( pyramidDepth0[level], levelCloud0, levelCameraMatrix );

        const Mat& levelImage1 = pyramidImage1[level];
        const Mat& levelDepth1 = pyramidDepth1[level];
        const Mat& level_dI_dx1 = pyramid_dI_dx1[level];
        const Mat& level_dI_dy1 = pyramid_dI_dy1[level];

        CV_Assert( level_dI_dx1.type() == CV_16S );
        CV_Assert( level_dI_dy1.type() == CV_16S );

        const double fx = levelCameraMatrix.at<double>(0,0);
        const double fy = levelCameraMatrix.at<double>(1,1);
        const double determinantThreshold = 1e-6;

        Mat corresps( levelImage0.size(), levelImage0.type() );

        // Run transformation search on current level iteratively.
        for( int iter = 0; iter < (*iterCountsPtr)[level]; iter ++ )
        {
            int correspsCount = computeCorresp( levelCameraMatrix, levelCameraMatrix.inv(), resultRt.inv(DECOMP_SVD),
                                                levelDepth0, levelDepth1, pyramidTexturedMask1[level], maxDepthDiff,
                                                corresps );

            if( correspsCount == 0 )
                break;
//.........这里部分代码省略.........
开发者ID:Daniil-Osokin,项目名称:opencv,代码行数:101,代码来源:rgbdodometry.cpp

示例12: main

/** @function main */
int main(int argc, const char** argv) {
    CvCapture* capture;
    Mat frame;
    char filename[13] = "Mission.avi";
    //-- 1. Load the cascades
    if (!face_cascade.load(face_cascade_name)) {
        printf("--(!)Error loading\n");
        return -1;
    };
    if (!eyes_cascade.load(eyes_cascade_name)) {
        printf("--(!)Error loading\n");
        return -1;
    };

    //++++++++++++++++++++++
    printf("------------- video to image ... ----------------\n");
    //初始化一个视频文件捕捉器
    capture = cvCaptureFromAVI(filename);
    //获取视频信息
    cvQueryFrame(capture);
    int frameH = (int) cvGetCaptureProperty(capture, CV_CAP_PROP_FRAME_HEIGHT);
    int frameW = (int) cvGetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH);
    int fps = (int) cvGetCaptureProperty(capture, CV_CAP_PROP_FPS);
    int numFrames =
        (int) cvGetCaptureProperty(capture, CV_CAP_PROP_FRAME_COUNT);
    printf(
        "\tvideo height : %d\n\tvideo width : %d\n\tfps : %d\n\tframe numbers : %d\n",
        frameH, frameW, fps, numFrames);
    //定义和初始化变量
    int i = 0;
    IplImage* img = 0;
    char image_name[13];

    //cvNamedWindow( "mainWin", CV_WINDOW_AUTOSIZE );
    //读取和显示
    while (1) {

        //img = cvQueryFrame(capture); //获取一帧图片
        //cvShowImage( "mainWin", img ); //将其显示
        // char key = cvWaitKey(20);

        //sprintf(image_name, "%s%d%s", "image", ++i, ".jpg");//保存的图片名

        //cvSaveImage( image_name, img);   //保存一帧图片
        frame = cvQueryFrame(capture);

        //-- 3. Apply the classifier to the frame
        if (!frame.empty()) {
            detectAndDisplay(frame);
        } else {
            printf(" --(!) No captured frame -- Break!");
            break;
        }

        int c = waitKey(10);
        if ((char) c == 'c') {
            break;
        }

        if (i == NUM_FRAME)
            break;
    }
    cvReleaseCapture(&capture);
    //+++++++++++++++++++++++++
    //-- 2. Read the video stream
//	// capture = cvCaptureFromCAM( -1 );
//	if (capture) {
//		while (true) {
//			frame = cvQueryFrame(capture);
//
//			//-- 3. Apply the classifier to the frame
//			if (!frame.empty()) {
//				detectAndDisplay(frame);
//			} else {
//				printf(" --(!) No captured frame -- Break!");
//				break;
//			}
//
//			int c = waitKey(10);
//			if ((char) c == 'c') {
//				break;
//			}
//		}
//	} else
//		printf("nothing happen");
    return 0;
}
开发者ID:shaofenchen,项目名称:hadoop_video_processing,代码行数:88,代码来源:test.cpp

示例13: main

int main(int argc, char** argv)
{
    if(argc != 2) {
        help();
        return 1;
    }

    FILE* f = 0;
    VideoCapture cap;
    char test_file[20] = "";

    if (strcmp(argv[1], "live") != 0)
    {
        sprintf(test_file, "%s", argv[1]);
        f = fopen(test_file, "r");
        char vid[20];
        int values_read = fscanf(f, "%s\n", vid);
        CV_Assert(values_read == 1);
        cout << "Benchmarking against " << vid << endl;
        live = 0;
    }
    else
    {
        cap.open(0);
        if (!cap.isOpened())
        {
            cout << "Failed to open camera" << endl;
            return 0;
        }
        cout << "Opened camera" << endl;
        cap.set(CAP_PROP_FRAME_WIDTH, 640);
        cap.set(CAP_PROP_FRAME_HEIGHT, 480);
        cap >> frame;
    }

    HybridTrackerParams params;
    // motion model params
    params.motion_model = CvMotionModel::LOW_PASS_FILTER;
    params.low_pass_gain = 0.1f;
    // mean shift params
    params.ms_tracker_weight = 0.8f;
    params.ms_params.tracking_type = CvMeanShiftTrackerParams::HS;
    // feature tracking params
    params.ft_tracker_weight = 0.2f;
    params.ft_params.feature_type = CvFeatureTrackerParams::OPTICAL_FLOW;
    params.ft_params.window_size = 0;

    HybridTracker tracker(params);
    char img_file[20] = "seqG/0001.png";
    char img_file_num[10];
    namedWindow("Win", 1);

    setMouseCallback("Win", onMouse, 0);

    int i = 0;
    float w[4];
    for(;;)
    {
        i++;
        if (live)
        {
            cap >> frame;
            if( frame.empty() )
                break;
            frame.copyTo(image);
        }
        else
        {
            int values_read = fscanf(f, "%d %f %f %f %f\n", &i, &w[0], &w[1], &w[2], &w[3]);
            CV_Assert(values_read == 5);
            sprintf(img_file, "seqG/%04d.png", i);
            image = imread(img_file, IMREAD_COLOR);
            if (image.empty())
                break;
            selection = Rect(cvRound(w[0]*image.cols), cvRound(w[1]*image.rows),
                             cvRound(w[2]*image.cols), cvRound(w[3]*image.rows));
        }

        sprintf(img_file_num, "Frame: %d", i);
        putText(image, img_file_num, Point(10, image.rows-20), FONT_HERSHEY_PLAIN, 0.75, Scalar(255, 255, 255));
        if (!image.empty())
        {

            if (trackObject < 0)
            {
                tracker.newTracker(image, selection);
                trackObject = 1;
            }

            if (trackObject)
            {
                tracker.updateTracker(image);
                drawRectangle(&image, tracker.getTrackingWindow());
            }

            if (selectObject && selection.width > 0 && selection.height > 0)
            {
                Mat roi(image, selection);
                bitwise_not(roi, roi);
            }
//.........这里部分代码省略.........
开发者ID:406089450,项目名称:opencv,代码行数:101,代码来源:hybridtrackingsample.cpp

示例14: findTransformECC

double cv::findTransformECC(InputArray templateImage,
                            InputArray inputImage,
                            InputOutputArray warpMatrix,
                            int motionType,
                            TermCriteria criteria)
{


    Mat src = templateImage.getMat();//template iamge
    Mat dst = inputImage.getMat(); //input image (to be warped)
    Mat map = warpMatrix.getMat(); //warp (transformation)

    CV_Assert(!src.empty());
    CV_Assert(!dst.empty());


    if( ! (src.type()==dst.type()))
        CV_Error( CV_StsUnmatchedFormats, "Both input images must have the same data type" );

    //accept only 1-channel images
    if( src.type() != CV_8UC1 && src.type()!= CV_32FC1)
        CV_Error( CV_StsUnsupportedFormat, "Images must have 8uC1 or 32fC1 type");

    if( map.type() != CV_32FC1)
        CV_Error( CV_StsUnsupportedFormat, "warpMatrix must be single-channel floating-point matrix");

    CV_Assert (map.cols == 3);
    CV_Assert (map.rows == 2 || map.rows ==3);

    CV_Assert (motionType == MOTION_AFFINE || motionType == MOTION_HOMOGRAPHY ||
        motionType == MOTION_EUCLIDEAN || motionType == MOTION_TRANSLATION);

    if (motionType == MOTION_HOMOGRAPHY){
        CV_Assert (map.rows ==3);
    }

    CV_Assert (criteria.type & TermCriteria::COUNT || criteria.type & TermCriteria::EPS);
    const int    numberOfIterations = (criteria.type & TermCriteria::COUNT) ? criteria.maxCount : 200;
    const double termination_eps    = (criteria.type & TermCriteria::EPS)   ? criteria.epsilon  :  -1;

    int paramTemp = 6;//default: affine
    switch (motionType){
      case MOTION_TRANSLATION:
          paramTemp = 2;
          break;
      case MOTION_EUCLIDEAN:
          paramTemp = 3;
          break;
      case MOTION_HOMOGRAPHY:
          paramTemp = 8;
          break;
    }


    const int numberOfParameters = paramTemp;

    const int ws = src.cols;
    const int hs = src.rows;
    const int wd = dst.cols;
    const int hd = dst.rows;

    Mat Xcoord = Mat(1, ws, CV_32F);
    Mat Ycoord = Mat(hs, 1, CV_32F);
    Mat Xgrid = Mat(hs, ws, CV_32F);
    Mat Ygrid = Mat(hs, ws, CV_32F);

    float* XcoPtr = Xcoord.ptr<float>(0);
    float* YcoPtr = Ycoord.ptr<float>(0);
    int j;
    for (j=0; j<ws; j++)
        XcoPtr[j] = (float) j;
    for (j=0; j<hs; j++)
        YcoPtr[j] = (float) j;

    repeat(Xcoord, hs, 1, Xgrid);
    repeat(Ycoord, 1, ws, Ygrid);

    Xcoord.release();
    Ycoord.release();

    Mat templateZM    = Mat(hs, ws, CV_32F);// to store the (smoothed)zero-mean version of template
    Mat templateFloat = Mat(hs, ws, CV_32F);// to store the (smoothed) template
    Mat imageFloat    = Mat(hd, wd, CV_32F);// to store the (smoothed) input image
    Mat imageWarped   = Mat(hs, ws, CV_32F);// to store the warped zero-mean input image
    Mat allOnes		= Mat::ones(hd, wd, CV_8U); //to use it for mask warping
    Mat imageMask		= Mat(hs, ws, CV_8U); //to store the final mask

    //gaussian filtering is optional
    src.convertTo(templateFloat, templateFloat.type());
    GaussianBlur(templateFloat, templateFloat, Size(5, 5), 0, 0);//is in-place filtering slower?

    dst.convertTo(imageFloat, imageFloat.type());
    GaussianBlur(imageFloat, imageFloat, Size(5, 5), 0, 0);

    // needed matrices for gradients and warped gradients
    Mat gradientX = Mat::zeros(hd, wd, CV_32FC1);
    Mat gradientY = Mat::zeros(hd, wd, CV_32FC1);
    Mat gradientXWarped = Mat(hs, ws, CV_32FC1);
    Mat gradientYWarped = Mat(hs, ws, CV_32FC1);

//.........这里部分代码省略.........
开发者ID:4auka,项目名称:opencv,代码行数:101,代码来源:ecc.cpp

示例15: main

int main(int argc, char** argv)
{
	VideoCapture cap;
	Mat frame;

	frame.create(Size(FRAME_WIDTH, FRAME_HEIGHT), CV_8UC1);

	//if ( frame.isContinuous() ) cout << "yes" << endl;
	//Open RGB Camera
	cap.open(0);
	cap.set(cv::CAP_PROP_FRAME_WIDTH, FRAME_WIDTH);
	cap.set(cv::CAP_PROP_FRAME_HEIGHT, FRAME_HEIGHT);

	if( !cap.isOpened() )
	{
		cout << "Can not open camera !!" << endl;
		return -1;
	}

	//read frame
	cap >> frame;
	if( frame.empty() )
	{
		cout << "Can not read data from the Camera !!" << endl;
		return -1;
	}

	gpu_initialize_gmm(frame.ptr(0));

	cout << "frame.cols: " << frame.cols << endl;
	cout << "frame.rows: " << frame.rows << endl;

	for(;;)
	{
		//Get RGB Image
		cap >> frame;

		if( frame.empty() )
		{
			cout << "Can not read data from the Camera !!" << endl;
			return -1;
		}
		
		//GMM output
		Mat gmm_frame;
		gmm_frame.create(frame.size(), frame.type());
		gmm_frame = Mat::zeros(frame.size(), CV_8UC1);
		
		gpu_perform_gmm(frame.ptr(0), gmm_frame.ptr(0));
		//Show the GMM result image
		imshow("GMM", gmm_frame);

		//User Key Input
		char c = waitKey(10);
		if (c == 27) break; // got ESC
	}
	
	gpu_free_gmm();

	return 0;
}
开发者ID:CT-LU,项目名称:CV-Test1,代码行数:61,代码来源:main.cpp


注:本文中的Mat::empty方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。