当前位置: 首页>>代码示例>>C++>>正文


C++ cvCaptureFromCAM函数代码示例

本文整理汇总了C++中cvCaptureFromCAM函数的典型用法代码示例。如果您正苦于以下问题:C++ cvCaptureFromCAM函数的具体用法?C++ cvCaptureFromCAM怎么用?C++ cvCaptureFromCAM使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了cvCaptureFromCAM函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: main

 */
int main(int argc, char *argv[]) 
{   
    DSP_cvStartDSP();
    CvCapture * capture;
    IplImage *videoFrame, *convFrame, *convOpencvFrame, *unsignedFrame; 
    IplImage *dataImage, *integralImage;
    int key;
    
    int *ptr;
    float *flPtr;

    int i,j;
    
    float tempFloat=0.0;
    float *floatDataPtr;
    float *floatOutPtr;

    /* Data to test cvIntegral() */
    unsigned char intdata[] = { 151,  57, 116, 170,   9, 247, 208, 140, 150,  60,  88,  77,   4,   6, 162,   6, 
			    	31, 143, 178,   3, 135,  91,  54, 154, 193, 161,  20, 162, 137, 150, 128, 224, 
			    	214, 113,   9,  28,  53, 211,  98, 217, 149, 233, 231, 127, 115, 203, 177,  42, 
			    	62, 155,   3, 103, 127,  16, 135, 131, 211, 158,   9,   2, 106, 227, 249, 255 }; //16 x 4
        
    if ( argc < 2 ) {
       printf( "Usage: ./remote_ti_platforms_evm3530_opencv.xv5T [option] \n");
       printf("option:\ni. integral\ns. sobel\nd. dft\n");
       printf("Following are the all usage:\n");
       printf("./remote_ti_platforms_evm3530_opencv.xv5T i dsp(To test integral-image algorithm using DSP. Input is from webcam). Note: You need to install VLIB to test this.\n");
       printf("./remote_ti_platforms_evm3530_opencv.xv5T i arm(To test integral-image algorithm using ARM. Input is from webcam). \n");
       printf("./remote_ti_platforms_evm3530_opencv.xv5T i test(To test integral-image algorithm using test data given in APP. Input is from webcam). \n");
       printf("./remote_ti_platforms_evm3530_opencv.xv5T d dsp (To test DFT algorithm using DSP) \n");
       printf("./remote_ti_platforms_evm3530_opencv.xv5T d arm (To test DFT algorithm using ARM) \n");
       printf("./remote_ti_platforms_evm3530_opencv.xv5T s tree.avi dsp (To test sobel algorithm for movie clip tree.avi using DSP) \n");
       printf("./remote_ti_platforms_evm3530_opencv.xv5T s tree.avi arm (To test sobel algorithm for movie clip tree.avi using ARM) \n");
       printf("./remote_ti_platforms_evm3530_opencv.xv5T s webcam dsp (To test sobel algorithm for image from webcam using DSP) \n");
       printf("./remote_ti_platforms_evm3530_opencv.xv5T s webcam arm (To test sobel algorithm for image from webcam using ARM) \n");
       printf("./remote_ti_platforms_evm3530_opencv.xv5T rgb2gray (To test RGB to Gray for image from webcam.) \n");
       return (-1);
    }

    if (*argv[1] == 's' && argc < 3) {
       printf( "Usage: ./remote_ti_platforms_evm3530_opencv.xv5T s tree.avi \n");
       printf( "Usage: ./remote_ti_platforms_evm3530_opencv.xv5T s webcam \n");

       return (-1);
    }
	
    


    switch (*argv[1]) {
 
       	case 'i': 
	    switch (*argv[2]) {
                case 'd': { //'d' dor DSP accelerated

    			cvNamedWindow( "video", CV_WINDOW_AUTOSIZE );
    
    
    			capture = cvCaptureFromCAM(-1);

    			if ( !capture) {
      	   	   	   printf("Error: Video capture initialization failed.\n");
      	   	   	   break;
    			}
        		videoFrame = cvQueryFrame ( capture );
     			if ( !videoFrame) {
      	   	   	   printf("**Error reading from webcam\n");
      		   	   break;
    			}
    			
    			/* create new image for the grayscale version */
    			convFrame = cvCreateImage( cvSize( videoFrame->width, videoFrame->height ), IPL_DEPTH_8U, 1 );

    			/* create sobel filtered image */
    			convOpencvFrame = cvCreateImage( cvSize( convFrame->width+1, convFrame->height+1 ), IPL_DEPTH_32S, 1 );
			
			/* Process the first frame outside the loop*/
			DSP_cvCvtColor(videoFrame,convFrame,CV_RGB2GRAY);
			DSP_cvSyncDSP();			    
    			while ( key != 'q') { 
			          			 
	 		      /* Time to test and benchmark DSP based sobel filter */
			      Time_reset(&sTime);
			      Time_delta(&sTime,&time);
			      /*Find integral image */
			      DSP_cvIntegral(convFrame,convOpencvFrame,NULL,NULL);			      
			      
			      /* get next frame */
			      videoFrame = cvQueryFrame( capture); 
			      if ( !videoFrame) {
	         	         printf("***The End***\n");
	           	         break;
      	      	              }
			      DSP_cvSyncDSP();			      		      
			      /* Do color conversion */
			      DSP_cvCvtColor(videoFrame,convFrame,CV_RGB2GRAY);
			      /* show Image */ //Since I am using VLIB for IntegralImage, its output image width and height is same as source image.
			      convOpencvFrame->width -= 1; convOpencvFrame->height -= 1;
//.........这里部分代码省略.........
开发者ID:andrecurvello,项目名称:opencv-dsp-acceleration,代码行数:101,代码来源:app.c

示例2: main

int main(int argc, char** argv)
{

	profile_name = (argc > 1 ? argv[1] : (char*)"blue_goal.yml");

	int cam = (argc > 2 ? atoi(argv[2]) : 0);

	// value loading
	fs = cvOpenFileStorage(profile_name, 0, CV_STORAGE_READ, NULL);
	Hmax = cvReadIntByName(fs, NULL, "Hmax", Hmax);
	Smax = cvReadIntByName(fs, NULL, "Smax", Smax);
	Vmax = cvReadIntByName(fs, NULL, "Vmax", Vmax);
	Hmin = cvReadIntByName(fs, NULL, "Hmin", Hmin);
	Smin = cvReadIntByName(fs, NULL, "Smin", Smin);
	Vmin = cvReadIntByName(fs, NULL, "Vmin", Vmin);
	minH = cvReadIntByName(fs, NULL, "minH", minH);


	cvNamedWindow("img", CV_WINDOW_AUTOSIZE);
	cvNamedWindow("treshed", CV_WINDOW_AUTOSIZE);
	cvNamedWindow("graph", CV_WINDOW_AUTOSIZE);


	cvCreateTrackbar("Hmin", "treshed", &Hmin, 360, onTrack);
	cvCreateTrackbar("Smin", "treshed", &Smin, 255, onTrack);
	cvCreateTrackbar("Vmin", "treshed", &Vmin, 255, onTrack);

	cvCreateTrackbar("Hmax", "treshed", &Hmax, 360, onTrack);
	cvCreateTrackbar("Smax", "treshed", &Smax, 255, onTrack);
	cvCreateTrackbar("Vmax", "treshed", &Vmax, 255, onTrack);
	cvCreateTrackbar("minH", "treshed", &minH, 255, onTrack);


	onTrack(0);

	CvCapture* camera = cvCaptureFromCAM(cam);


	while(1){
		img = cvQueryFrame(camera);

		allocateCvImage(&imgHSV, cvGetSize(img), 8, 3);
		cvCvtColor(img, imgHSV, CV_BGR2HSV);

		allocateCvImage(&imgThreshed, cvGetSize(img), 8, 1);
		cvInRangeS(imgHSV, cvScalar(Hmin, Smin, Vmin, 0), cvScalar(Hmax,
			Smax, Vmax, 0), imgThreshed);

		cvErode(imgThreshed, imgThreshed, 0, 2);


		int width = imgThreshed->width;
		int height = imgThreshed->height;
		int nchannels = imgThreshed->nChannels;
		int step = imgThreshed->widthStep;

		uchar* data = (uchar *)imgThreshed->imageData;
		unsigned int graph[width];

		int x,y;
		for(x = 0; x < width ; x++)
			graph[x] = 0;

		int sum = 0, notnull = 0;
		for(x = 0; x < width; x++){
			for( y = 0 ; y < height ; y++ ) {
				if(data[y*step + x*nchannels] == 255){
					graph[x]++;
				}
			}
			sum += graph[x];
			if(graph[x] != 0)
				notnull += 1;

	//		printf("%d\t%d\n", x, graph[x]);
		}
		if(notnull == 0)
			notnull = 1;
		int average = sum/notnull;
		if(average == 0)
			average = 1;
		float pix = 12.0/average;

		printf("\n sum: %d average: %d\n",sum,average);


		int first = 0, last = 0;
		// looking for goal
		for(x = 0;x < width; x++){
			if(graph[x] >= average && graph[x-1] < average){
				cvLine(img, cvPoint(x, 0), cvPoint(x, height),
					cvScalar(255, 255, 0, 0), 1, 0, 0);
				if(first == 0)
					first = x;
			}
			if(graph[x] >= average && graph[x+1] < average){
				cvLine(img, cvPoint(x, 0), cvPoint(x, height),
					cvScalar(255, 255, 0, 0), 1, 0, 0);
				last = x;
			}
//.........这里部分代码省略.........
开发者ID:xlcteam,项目名称:visy,代码行数:101,代码来源:ViSyLite.c

示例3: main

int main()
{
	// Initialize capturing live feed from the camera
	CvCapture* capture = 0;
	capture = cvCaptureFromCAM(0);	

	// Couldn't get a device? Throw an error and quit
	if(!capture)
    {
        printf("Could not initialize capturing...\n");
        return -1;
    }

	// The two windows we'll be using
    cvNamedWindow("video");
	cvNamedWindow("thresh");

	// This image holds the "scribble" data...
	// the tracked positions of the ball
	IplImage* imgScribble = NULL;

	// An infinite loop
	while(true)
    {
		// Will hold a frame captured from the camera
		IplImage* frame = 0;
		frame = cvQueryFrame(capture);

		// If we couldn't grab a frame... quit
        if(!frame)
            break;
		
		// If this is the first frame, we need to initialize it
		if(imgScribble == NULL)
		{
			imgScribble = cvCreateImage(cvGetSize(frame), 8, 3);
		}

		// Holds the yellow thresholded image (yellow = white, rest = black)
		IplImage* imgYellowThresh = GetThresholdedImage(frame);

		// Calculate the moments to estimate the position of the ball
		CvMoments *moments = (CvMoments*)malloc(sizeof(CvMoments));
		cvMoments(imgYellowThresh, moments, 1);

		// The actual moment values
		double moment10 = cvGetSpatialMoment(moments, 1, 0);
		double moment01 = cvGetSpatialMoment(moments, 0, 1);
		double area = cvGetCentralMoment(moments, 0, 0);

		// Holding the last and current ball positions
		static int posX = 0;
		static int posY = 0;

		int lastX = posX;
		int lastY = posY;

		posX = moment10/area;
		posY = moment01/area;

		// Print it out for debugging purposes
		printf("position (%d,%d)\n", posX, posY);

		// We want to draw a line only if its a valid position
		if(lastX>0 && lastY>0 && posX>0 && posY>0)
		{
			// Draw a yellow line from the previous point to the current point
			cvLine(imgScribble, cvPoint(posX, posY), cvPoint(lastX, lastY), cvScalar(0,255,255), 5);
		}

		// Add the scribbling image and the frame... and we get a combination of the two
		cvAdd(frame, imgScribble, frame);
		cvShowImage("thresh", imgYellowThresh);
		cvShowImage("video", frame);

		// Wait for a keypress
		int c = cvWaitKey(10);
		if(c!=-1)
		{
			// If pressed, break out of the loop
            break;
		}

		// Release the thresholded image... we need no memory leaks.. please
		cvReleaseImage(&imgYellowThresh);

		delete moments;
    }

	// We're done using the camera. Other applications can now use it
	cvReleaseCapture(&capture);
    return 0;
}
开发者ID:Alicg,项目名称:AI-Shack--Tracking-with-OpenCV,代码行数:93,代码来源:TrackColour.cpp

示例4: main

int
main (int argc, char *argv[])
{
        GtkBuilder              *builder;
        GtkWidget               *window, *window_paint, * fullscreen;
		  GtkWidget     			  *canvas, *canvas_screen;
        GdkGLConfig *gl_config;

	//we need to initialize all these functions so that gtk knows
	//to be thread-aware
	if (!g_thread_supported ()){ 
		g_thread_init(NULL); 
	}
	gdk_threads_init(); 
	gdk_threads_enter();


        gtk_init (&argc, &argv);

		  //opengl
//        gtk_gl_init (&argc, &argv);

//		  gl_config = gdk_gl_config_new_by_mode (GDK_GL_MODE_RGB | GDK_GL_MODE_ALPHA | GDK_GL_MODE_DEPTH | GDK_GL_MODE_DOUBLE);

        builder = gtk_builder_new ();
        gtk_builder_add_from_file (builder, "gui/settings.builder", NULL);

        window = GTK_WIDGET (gtk_builder_get_object (builder, "window"));
        canvas = GTK_WIDGET (gtk_builder_get_object (builder, "canvas"));
		  window_paint = GTK_WIDGET (gtk_builder_get_object (builder, "windowscreen"));
		  canvas_screen = GTK_WIDGET (gtk_builder_get_object (builder, "canvasscreen")); 

		  fullscreen = GTK_WIDGET (gtk_builder_get_object (builder, "fullscreen"));
        gtk_builder_connect_signals (builder, NULL);          
        g_object_unref (G_OBJECT (builder));

	//signals for event
	g_signal_connect (G_OBJECT (canvas), "expose-event",
                G_CALLBACK (paints_video), NULL
                  );


	g_signal_connect (G_OBJECT (canvas_screen), "expose-event",
                G_CALLBACK (paints_anime), NULL
                  );

	g_signal_connect(G_OBJECT(window), "destroy", 
		G_CALLBACK(on_window_destroy), NULL
		);

   //g_signal_connect(G_OBJECT(window_paint), "expose_event", G_CALLBACK(on_window_expose_event), NULL);
   // g_signal_connect(G_OBJECT(window_paint), "configure_event", G_CALLBACK(on_window_configure_event), NULL);

	g_signal_connect(G_OBJECT(fullscreen), "clicked", 
		G_CALLBACK(click_fullcreen ), (gpointer) window_paint
		);

   gtk_widget_set_size_request(canvas_screen,500,500);

// opencv 
	CvCapture * capture = cvCaptureFromCAM(-1);      
   detec = new Detection(capture);

	//we can turn off gtk's automatic painting and double buffering routines.
	//gtk_widget_set_app_paintable(window_paint, TRUE);
	//gtk_widget_set_double_buffered(window_paint, FALSE);

	g_timeout_add(D_TIMER_ANIME, (GSourceFunc) timer_anime2, (gpointer) canvas_screen);
	g_timeout_add(D_TIMER_VIDEO, (GSourceFunc) timer_frame, (gpointer) canvas);

   gtk_widget_show (window_paint);   
   gtk_widget_show(window);
//	time_handler(window);    

	//pixmap = gdk_pixmap_new(window_paint->window,500,500,-1);

        gtk_main ();
	gdk_threads_leave();
        
        return 0;
}
开发者ID:Varhoo,项目名称:Fingerpaint,代码行数:81,代码来源:gui.cpp

示例5: main2

/**
 * @function main
 */
int main2() {
  CvCapture* capture;
  cv::Mat frame;

  // Load the cascades
  if( !face_cascade.load( face_cascade_name ) ){ printf("--(!)Error loading face cascade, please change face_cascade_name in source code.\n"); return -1; };

  cv::namedWindow(main_window_name,CV_WINDOW_NORMAL);
  cv::moveWindow(main_window_name, 400, 100);
  cv::namedWindow(face_window_name,CV_WINDOW_NORMAL);
  cv::moveWindow(face_window_name, 10, 100);
  cv::namedWindow("Right Eye",CV_WINDOW_NORMAL);
  cv::moveWindow("Right Eye", 10, 600);
  cv::namedWindow("Left Eye",CV_WINDOW_NORMAL);
  cv::moveWindow("Left Eye", 10, 800);

  createCornerKernels();
  ellipse(skinCrCbHist, cv::Point(113, 155.6), cv::Size(23.4, 15.2),
          43.0, 0.0, 360.0, cv::Scalar(255, 255, 255), -1);

   // Read the video stream
  capture = cvCaptureFromCAM( -1 );
  if( capture ) {
    while( true ) {
      frame = cvQueryFrame( capture );
      // mirror it
      cv::flip(frame, frame, 1);
        
      frame.copyTo(debugImage);
        
        int index = -1;
      // Apply the classifier to the frame
      if( !frame.empty() ) {
        
         index =detectAndDisplay( frame );
      }
        

      else {
        printf(" --(!) No captured frame -- Break!");
        break;
      }
        if (index >0) {
            char temp[5];
            sprintf(temp, "%d", index);
            string s(temp);
            Text(debugImage, "Index: "+s+" OK", 100, 100);

        }
        
      imshow(main_window_name,debugImage);

      int c = cv::waitKey(10);
      if( (char)c == 'c' ) { break; }
      if( (char)c == 'f' ) {
        imwrite("frame.png",frame);
      }

    }
  }

  releaseCornerKernels();

  return 0;
}
开发者ID:Victor0118,项目名称:Computer-Vision-PG,代码行数:68,代码来源:main2.cpp

示例6: cvReleaseCapture

void MainWindow::OpticalFlowDetect()
{
    cvReleaseCapture(&pCapture);
    pCapture=cvCaptureFromCAM(0);

    int corner_count = 1000;

     CvTermCriteria criteria;
     criteria = cvTermCriteria (CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 64, 0.01);

     IplImage *src_img1;
     IplImage *src_img2;

     IplImage *dst_img;
     IplImage *pre_img;
     IplImage *result;


     IplImage *eig_img;
     IplImage *temp_img;

     IplImage *prev_pyramid;
     IplImage *curr_pyramid;

     CvPoint2D32f *corners1;
     CvPoint2D32f *corners2;
     corners1 = (CvPoint2D32f *) cvAlloc (corner_count * sizeof (CvPoint2D32f));
     corners2 = (CvPoint2D32f *) cvAlloc (corner_count * sizeof (CvPoint2D32f));

     char *status;
     status = (char *) cvAlloc (corner_count);

     while (1)
     {

      pre_img = cvQueryFrame(pCapture);

      CvSize img_sz = cvGetSize(pre_img);
      src_img1 = cvCreateImage(img_sz, IPL_DEPTH_8U, 1);
      cvCvtColor(pre_img, src_img1, CV_RGB2GRAY);

      dst_img = cvQueryFrame(pCapture);
      src_img2 = cvCreateImage(img_sz, IPL_DEPTH_8U, 1);
      cvCvtColor(dst_img, src_img2, CV_RGB2GRAY);

      result=cvCreateImage(img_sz,IPL_DEPTH_8U,1);
      cvZero(result);

      eig_img = cvCreateImage (img_sz, IPL_DEPTH_32F, 1);
      temp_img = cvCreateImage (img_sz, IPL_DEPTH_32F, 1);


      prev_pyramid = cvCreateImage (cvSize (src_img1->width + 8, src_img1->height / 3), IPL_DEPTH_8U, 1);
      curr_pyramid = cvCreateImage (cvSize (src_img1->width + 8, src_img1->height / 3), IPL_DEPTH_8U, 1);



      cvGoodFeaturesToTrack (src_img1, eig_img, temp_img, corners1, &corner_count, 0.001, 5, NULL);

      cvCalcOpticalFlowPyrLK (src_img1, src_img2, prev_pyramid, curr_pyramid,
       corners1, corners2, corner_count, cvSize (10, 10), 4, status, NULL, criteria, 0);

      for (int i = 0; i < corner_count; i++)
      {

          if (status[i])
              cvLine (dst_img, cvPointFrom32f (corners1[i]), cvPointFrom32f (corners2[i]), CV_RGB (255, 0, 0), 1, CV_AA, 0);
      }

      if(27==cvWaitKey(33))
          break;

    //  cvCvtScale(dst_img,result,1.0/255,0);
      MainWindow::Display(pre_img,src_img2,dst_img);

     }
}
开发者ID:lkpjj,项目名称:qt_demo,代码行数:77,代码来源:mainwindow.cpp

示例7: _tmain

int _tmain(int argc, char* argv[])
{
	CvCapture* pCapture = NULL;
	if(argc == 2)
	{
		char* _tempname = "e:\\201505280048_22.mp4";
		if( !(pCapture = cvCaptureFromFile(_tempname)))     
		{   
			fprintf(stderr, "Can not open video file %s\n", argv[1]);   
			return -2;     
		}
	}
	if (argc == 1)
	{
		if( !(pCapture = cvCaptureFromCAM(1)))     
		{   
			fprintf(stderr, "Can not open camera.\n");   
			return -2;     
		} 
	}

	IplImage* pFrame = NULL;
	int countx=0;
	while (pFrame =cvQueryFrame(pCapture))  
	{ 
		countx++;
		IplImage* img1 = cvCreateImage(cvGetSize(pFrame), IPL_DEPTH_8U, 1);//创建目标图像 
		cvCvtColor(pFrame,img1,CV_BGR2GRAY);//cvCvtColor(src,des,CV_BGR2GRAY) 
		//边缘检测   
		cv::Mat result(img1); 
		cv::Mat contours;  
		cv::Canny (result,contours,50,150);
		img1 =&IplImage(contours);	


		int nVer = 1;
		int nHor = 2;
		IplConvKernel* VerKer;
		IplConvKernel* HorKer;
		VerKer = cvCreateStructuringElementEx(1,nVer,0,nVer/2,CV_SHAPE_RECT);
		HorKer = cvCreateStructuringElementEx(nHor,1,nHor/2,0,CV_SHAPE_RECT);
		cvDilate(img1,img1,VerKer);
		cvDilate(img1,img1,HorKer);

		cvMorphologyEx(img1, img1, NULL, NULL, CV_MOP_CLOSE);

		cvSaveImage("a.jpg",img1);

		cv::Mat image(pFrame);
		LineFinder finder;  
		finder.setMinVote (600);  
		finder.setLineLengthAndGap (680,500);
		std::vector<cv::Vec4i> li;
		li = finder.findLines (contours); 
		finder.drawDetectedLines (image); 
		imwrite("123.jpg",image);
		//选择第一条直线 
		//黑色的图像  

// 		for(int i = 0; i < li.size();i++)
// 		{
// 			int n= i;
// 			cv::Mat oneLine(image.size(),CV_8U,cv::Scalar(0));  
// 			cv::Mat oneLineInv;  
// 			//白线  
// 			line(oneLine,cv::Point(li[n][0],li[n][1]),cv::Point(li[n][2],li[n][3]),cv::Scalar(255),5);  
// 			//将轮廓与白线按位与  
// 			bitwise_and(contours,oneLine,oneLine); 		
// 			threshold(oneLine,oneLineInv,128,255,cv::THRESH_BINARY_INV);
// 			//把点集中的点插入到向量中  
// 			std::vector<cv::Point> points;  
// 			//遍历每个像素  
// 			for(int y = 0; y < oneLine.rows;y++)  
// 			{  
// 				uchar* rowPtr = oneLine.ptr<uchar>(y);  
// 				for(int x = 0;x < oneLine.cols;x++)  
// 				{  
// 					if(rowPtr[x])  
// 					{  
// 						points.push_back(cv::Point(x,y));  
// 					}  
// 				}  
// 			}  
// 			//储存拟合直线的容器  
// 			cv::Vec4f line;  
// 			//直线拟合函数  
// 			fitLine(cv::Mat(points),line,CV_DIST_L12,0,0.01,0.01);  
// 			//画一个线段  
// 			int x0= line[2];  
// 			int y0= line[3];  
// 			int x1= x0-200*line[0];  
// 			int y1= y0-200*line[1]; 
// 			if(y0 == y1 /*|| x0 == x1*/)
// 			{
// 				cv::line(image,cv::Point(x0,y0),cv::Point(x1,y1),cv::Scalar(0,255,0),1); 	
// 				imwrite("123.jpg",image);
// 			}
// 		}
// 		
	}
//.........这里部分代码省略.........
开发者ID:Strongc,项目名称:ImageProcessing-1,代码行数:101,代码来源:Proimage_VideoDectLine_Project.cpp

示例8: main

void main(int argc,char *argv[])
{
	int c;
	IplImage* color_img;
	IplImage* hsv_img;
	IplImage* h_img;
	IplImage* gray;
	int flags = CV_WINDOW_AUTOSIZE;
	CvCapture* cv_cap = cvCaptureFromCAM(CAMERA_0);         // Capture from CAMERA 0
	int h = 180;
	int t1 = 3, t2 = 5;
	CvScalar min = CV_RGB(h-15,100,0);
	CvScalar max = CV_RGB(h+15,256,256);

	/* Create ellipse to despeckle hsv.  */
	IplConvKernel* ellipse = cvCreateStructuringElementEx(10, 10, 1, 1,
			CV_SHAPE_ELLIPSE, NULL);
	/* For X, Y, And Area */
	CvMoments moments;

	/* For contours  */
	CvMemStorage* storage = cvCreateMemStorage(0);
	CvSeq* contours = 0;

	double area, m01, m10;
	if (!cv_cap)
		goto fail;

	cvNamedWindow("Webcam Video", flags);                        // create window
	cvNamedWindow("hsv Video", flags);                        // create window
	cvNamedWindow("Contour Video", flags);                        // create window
	cvCreateTrackbar("Hue", "hsv Video", &h, 256, set_h);
	cvCreateTrackbar("countour1", "Contour Video", &t1, 256, set_h);
	cvCreateTrackbar("countour2", "Contour Video", &t2, 256, set_h);
	for(;;) {
		color_img = cvQueryFrame(cv_cap);               // get frame
		if(color_img != 0) {
			hsv_img = cvCreateImage(cvGetSize(color_img), IPL_DEPTH_8U, 3);
			gray = cvCreateImage(cvGetSize(hsv_img), IPL_DEPTH_8U, 1);
		}
		cvCvtColor(color_img, gray, CV_RGB2GRAY);
		cvCvtColor(color_img, hsv_img, CV_BGR2HSV);
		h_img = cvCreateImage(cvGetSize(hsv_img), IPL_DEPTH_8U, 1);


		/* HSV */
		min = CV_RGB(h-20,10,10);
		max = CV_RGB(h+20,256,256);

		/* Remove anything not in the hue range. */
		cvInRangeS(hsv_img, min, max, h_img);
		/* Remove noise, or at least make the blotches bigger? */
		cvErode(h_img, h_img, ellipse,1);
		cvDilate(h_img, h_img, ellipse,1);

		/* Calculate moments to figure out if the object is present */
		cvMoments(h_img, &moments, 1);

		area = cvGetSpatialMoment(&moments, 0,0);
		m01 = cvGetSpatialMoment(&moments, 0,1);
		m10 = cvGetSpatialMoment(&moments, 1,0);

		if (area > 17000) {
			int x = m10/area;
			int y = m01/area;
			printf("x = %d, y = %d (area = %f)\n", x, y, area);
		}

		/* Draw contours */
		cvCanny(gray, gray, (double)t1, (double)t2, 3);
		//cvDilate(gray, gray, 0, 1);
		cvDilate(gray, gray, ellipse, 1);
		cvFindContours(gray, storage, &contours, sizeof(CvContour), CV_RETR_TREE, CV_CHAIN_APPROX_NONE, cvPoint(0,0));
		cvDrawContours(color_img, contours, CV_RGB(254,0,0), CV_RGB(0,255,0), 10, 1, CV_AA, cvPoint(0,0));
		/* Display images */
		cvShowImage("hsv Video", h_img); // show frame
		cvShowImage("Contour Video", gray); // show frame
		cvShowImage("Webcam Video", color_img); // show frame

		c = cvWaitKey(KS_WAIT);                         // wait KS_WAIT ms or for key stroke
		if(c == 27)
			break;                                  // if ESC, break and quit
	}
	/* clean up */
	cvReleaseCapture( &cv_cap );
	cvDestroyWindow("Webcam Video");
	return;
fail:
	printf("capture from cam failed\n");

}
开发者ID:howlett,项目名称:mvision,代码行数:91,代码来源:hsv.c

示例9: camera_control_new_with_settings

CameraControl *
camera_control_new_with_settings(int cameraID, int width, int height, int framerate, int cam_type)
{
	CameraControl* cc = (CameraControl*) calloc(1, sizeof(CameraControl));
	cc->cameraID = cameraID;

    if (framerate <= 0) {
        framerate = PSMOVE_TRACKER_DEFAULT_FPS;
    }
    
    if (cam_type == PSMove_Camera_PS3EYE_BLUEDOT)
    {
        cc->focl_x = (float)PS3EYE_FOCAL_LENGTH_BLUE;
        cc->focl_y = (float)PS3EYE_FOCAL_LENGTH_BLUE;
    }
    else if (cam_type == PSMove_Camera_PS3EYE_REDDOT)
    {
        cc->focl_x = (float)PS3EYE_FOCAL_LENGTH_RED;
        cc->focl_y = (float)PS3EYE_FOCAL_LENGTH_RED;
        
    }
    else if (cam_type == PSMove_Camera_Unknown)
    {
        cc->focl_x = (float)PS3EYE_FOCAL_LENGTH_BLUE;
        cc->focl_y = (float)PS3EYE_FOCAL_LENGTH_BLUE;
    }

    // Needed for cbb tracker. Will be overwritten by camera calibration files if they exist.
    

#if defined(CAMERA_CONTROL_USE_CL_DRIVER)
    // Windows 32-bit. Either CL_SDK or Registry_requiring
	int cams = CLEyeGetCameraCount();

	if (cams <= cameraID) {
            free(cc);
            return NULL;
	}

	GUID cguid = CLEyeGetCameraUUID(cameraID);
	cc->camera = CLEyeCreateCamera(cguid,
        CLEYE_COLOR_PROCESSED, CLEYE_VGA, framerate);

    CLEyeCameraGetFrameDimensions(cc->camera, &width, &height);

	// Depending on color mode chosen, create the appropriate OpenCV image
    cc->frame4ch = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 4);
    cc->frame3ch = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 3);

	CLEyeCameraStart(cc->camera);

#elif defined(CAMERA_CONTROL_USE_PS3EYE_DRIVER)
    // Mac or Windows
    // Initialize PS3EYEDriver
    ps3eye_init();
    int cams = ps3eye_count_connected();
    psmove_DEBUG("Found %i ps3eye(s) with CAMERA_CONTROL_USE_PS3EYE_DRIVER.\n", cams);
    if (cams <= cameraID) {
        free(cc);
        return NULL;
    }

    if (width <= 0 || height <= 0) {
        get_metrics(&width, &height);
    }

    psmove_DEBUG("Attempting to open ps3eye with cameraId, width, height, framerate: %d, %d, %d, %d.\n", cameraID, width, height, framerate);
    cc->eye = ps3eye_open(cameraID, width, height, framerate);

    if (cc->eye == NULL) {
        psmove_WARNING("Failed to open camera ID %d", cameraID);
        free(cc);
        return NULL;
    }

    cc->framebgr = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 3);

#else
    // Assume webcam accessible from OpenCV.
    char *video = psmove_util_get_env_string(PSMOVE_TRACKER_FILENAME_ENV);
    if (video) {
        psmove_DEBUG("Using '%s' as video input.\n", video);
        cc->capture = cvCaptureFromFile(video);
        free(video);
    } else {
        cc->capture = cvCaptureFromCAM(cc->cameraID);
        if (width <= 0 || height <= 0) {
            get_metrics(&width, &height);
        }
        cvSetCaptureProperty(cc->capture, CV_CAP_PROP_FRAME_WIDTH, width);
        cvSetCaptureProperty(cc->capture, CV_CAP_PROP_FRAME_HEIGHT, height);
    }

#endif
    cc->width = width;
    cc->height = height;
    cc->deinterlace = PSMove_False;

	return cc;
}
开发者ID:bagobor,项目名称:psmoveapi,代码行数:100,代码来源:camera_control.c

示例10: demo_classifier

void demo_classifier(char *datacfg, char *cfgfile, char *weightfile, int cam_index, const char *filename)
{
#ifdef OPENCV
    printf("Classifier Demo\n");
    network *net = load_network(cfgfile, weightfile, 0);
    set_batch_network(net, 1);
    list *options = read_data_cfg(datacfg);

    srand(2222222);
    CvCapture * cap;

    if(filename){
        cap = cvCaptureFromFile(filename);
    }else{
        cap = cvCaptureFromCAM(cam_index);
    }

    int top = option_find_int(options, "top", 1);

    char *name_list = option_find_str(options, "names", 0);
    char **names = get_labels(name_list);

    int *indexes = calloc(top, sizeof(int));

    if(!cap) error("Couldn't connect to webcam.\n");
    cvNamedWindow("Classifier", CV_WINDOW_NORMAL); 
    cvResizeWindow("Classifier", 512, 512);
    float fps = 0;
    int i;

    while(1){
        struct timeval tval_before, tval_after, tval_result;
        gettimeofday(&tval_before, NULL);

        image in = get_image_from_stream(cap);
        image in_s = resize_image(in, net->w, net->h);
        show_image(in, "Classifier");

        float *predictions = network_predict(net, in_s.data);
        if(net->hierarchy) hierarchy_predictions(predictions, net->outputs, net->hierarchy, 1, 1);
        top_predictions(net, top, indexes);

        printf("\033[2J");
        printf("\033[1;1H");
        printf("\nFPS:%.0f\n",fps);

        for(i = 0; i < top; ++i){
            int index = indexes[i];
            printf("%.1f%%: %s\n", predictions[index]*100, names[index]);
        }

        free_image(in_s);
        free_image(in);

        cvWaitKey(10);

        gettimeofday(&tval_after, NULL);
        timersub(&tval_after, &tval_before, &tval_result);
        float curr = 1000000.f/((long int)tval_result.tv_usec);
        fps = .9*fps + .1*curr;
    }
#endif
}
开发者ID:ShahImranShovon,项目名称:darknet,代码行数:63,代码来源:classifier.c

示例11: main

int main()
{
	int c = 0, i = 0;
    CvCapture* capture = cvCaptureFromCAM(0);

	if(!cvQueryFrame(capture))
	{
		printf("Video capture failed, please check the camera.");
	}
	else
	{
		printf("Video camera capture status: OK");
	}

    CvSize sz = cvGetSize(cvQueryFrame( capture));
	
	height = sz.height;
    width = sz.width;
    step = sz.width;
    
    IplImage* src = cvCreateImage( sz, 8, 3 );
	IplImage* hsv_image = cvCreateImage( sz, 8, 3);
	IplImage* hsv_mask = cvCreateImage( sz, 8, 1);
	IplImage* handview = cvCreateImage(sz, 8, 1);
	CvScalar  hsv_min = cvScalar(5, 70, 0, 0);
	CvScalar  hsv_max = cvScalar(20, 150, 255, 0); //H-> 0-20

	while( c != 27)
	{
		//printf("%d\t\t",framecount);
		src = cvQueryFrame( capture);
		cvCvtColor(src, hsv_image, CV_BGR2HSV);
		cvInRangeS (hsv_image, hsv_min, hsv_max, hsv_mask);
		cvSmooth(hsv_mask, handview, CV_MEDIAN, 5, 0, 0, 0);
		cvDilate(handview, handview, NULL, 3);
		//cvDilate(hsv_mask, handview, NULL, 1);
		//cvErode(handview, handview, NULL, 1);
		//cvDilate(handview, handview, NULL, 1);
		
		CvMemStorage* storage = cvCreateMemStorage(0);
		CvSeq* contour = 0;
		cvFindContours(handview, storage, &contour, sizeof(CvContour), CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE );
		cvZero(handview);
		
		for( ; contour != 0; contour = contour->h_next )
		{
			// replace CV_FILLED with 1 to see the outlines         
			double area = cvContourArea( contour, CV_WHOLE_SEQ, 0);
			if(area > 500)
			{
				cvDrawContours( handview, contour, cvScalar( 255 ), cvScalar( 255 ), 0, 0, 8 );
				//max = area;
			}
		}
		//cvShowImage("handview", handview);
		
		tips(handview);

		//cvNamedWindow( "hsv-msk",1);
		//cvShowImage( "hsv-msk", hsv_mask); //hsv_mask->origin = 1; 
		
		for(i=0; i<tipcount; i++)
		{
			if(posmax == i)
				cvCircle(src, cvPoint(tips_position[posmax][1], tips_position[posmax][0]), 3, cvScalar(0,255,0), 2, 8, 0);	
			else
				cvCircle(src, cvPoint(tips_position[i][1], tips_position[i][0]), 3, cvScalar(255,0,0), 2, 8, 0);
			

			if(speed[i][1] > 1 && speed[i][2] > 1 && (int)speed[i][5] == 1 && oldtips[(oldtipflag+1)%FRAMERUN][i][2] == 1)
			{	
				cvCircle(src, cvPoint(speed[i][4], speed[i][3]), 5, cvScalar(0,0,255), 3, 8, 0);
				speed[i][1] = 0;
				speed[i][2] = 0;
				speed[i][5] = 0;
				//printf("check\t");
			}
			else if(speed[i][1] > 1 && speed[i][2] > 1  && oldtips[(oldtipflag+1)%FRAMERUN][i][2] == -1)
			{	
				//cvCircle(src, cvPoint(speed[posmax][4], speed[posmax][3]), 9, cvScalar(0,0,255), 3, 8, 0);
				speed[i][1] = speed[i][2];
				speed[i][2] = 0;
				speed[i][5] = 0;
				//printf("check\t");
			}
		}
					
		//printf("%d\t%d\t%d", (int)speed[3][1], (int)speed[3][2], (int)speed[3][5]);
			
		//printf("\n");
		/*if(speed[posmax][1] > 1 && speed[posmax][2] > 1 && oldtips[(oldtipflag+1)%FRAMERUN][posmax][2] == 1)
		{	
			cvCircle(src, cvPoint(speed[posmax][4], speed[posmax][3]), 5, cvScalar(0,0,255), 3, 8, 0);
			speed[posmax][1]=0;
			speed[posmax][2]=0;
			//printf("check\t");
		}
		else if(speed[posmax][1] > 1 && speed[posmax][2] > 1 && oldtips[(oldtipflag+1)%FRAMERUN][posmax][2] == -1)
		{	
			//cvCircle(src, cvPoint(speed[posmax][4], speed[posmax][3]), 5, cvScalar(0,0,255), 3, 8, 0);
//.........这里部分代码省略.........
开发者ID:sumitsrv,项目名称:vk,代码行数:101,代码来源:hand_final.c

示例12: gun_classifier

void gun_classifier(char *datacfg, char *cfgfile, char *weightfile, int cam_index, const char *filename)
{
#ifdef OPENCV
    int bad_cats[] = {218, 539, 540, 1213, 1501, 1742, 1911, 2415, 4348, 19223, 368, 369, 370, 1133, 1200, 1306, 2122, 2301, 2537, 2823, 3179, 3596, 3639, 4489, 5107, 5140, 5289, 6240, 6631, 6762, 7048, 7171, 7969, 7984, 7989, 8824, 8927, 9915, 10270, 10448, 13401, 15205, 18358, 18894, 18895, 19249, 19697};

    printf("Classifier Demo\n");
    network *net = load_network(cfgfile, weightfile, 0);
    set_batch_network(net, 1);
    list *options = read_data_cfg(datacfg);

    srand(2222222);
    CvCapture * cap;

    if(filename){
        cap = cvCaptureFromFile(filename);
    }else{
        cap = cvCaptureFromCAM(cam_index);
    }

    int top = option_find_int(options, "top", 1);

    char *name_list = option_find_str(options, "names", 0);
    char **names = get_labels(name_list);

    int *indexes = calloc(top, sizeof(int));

    if(!cap) error("Couldn't connect to webcam.\n");
    cvNamedWindow("Threat Detection", CV_WINDOW_NORMAL); 
    cvResizeWindow("Threat Detection", 512, 512);
    float fps = 0;
    int i;

    while(1){
        struct timeval tval_before, tval_after, tval_result;
        gettimeofday(&tval_before, NULL);

        image in = get_image_from_stream(cap);
        image in_s = resize_image(in, net->w, net->h);
        show_image(in, "Threat Detection");

        float *predictions = network_predict(net, in_s.data);
        top_predictions(net, top, indexes);

        printf("\033[2J");
        printf("\033[1;1H");

        int threat = 0;
        for(i = 0; i < sizeof(bad_cats)/sizeof(bad_cats[0]); ++i){
            int index = bad_cats[i];
            if(predictions[index] > .01){
                printf("Threat Detected!\n");
                threat = 1;
                break;
            }
        }
        if(!threat) printf("Scanning...\n");
        for(i = 0; i < sizeof(bad_cats)/sizeof(bad_cats[0]); ++i){
            int index = bad_cats[i];
            if(predictions[index] > .01){
                printf("%s\n", names[index]);
            }
        }

        free_image(in_s);
        free_image(in);

        cvWaitKey(10);

        gettimeofday(&tval_after, NULL);
        timersub(&tval_after, &tval_before, &tval_result);
        float curr = 1000000.f/((long int)tval_result.tv_usec);
        fps = .9*fps + .1*curr;
    }
#endif
}
开发者ID:ShahImranShovon,项目名称:darknet,代码行数:75,代码来源:classifier.c

示例13: threat_classifier

void threat_classifier(char *datacfg, char *cfgfile, char *weightfile, int cam_index, const char *filename)
{
#ifdef OPENCV
    float threat = 0;
    float roll = .2;

    printf("Classifier Demo\n");
    network *net = load_network(cfgfile, weightfile, 0);
    set_batch_network(net, 1);
    list *options = read_data_cfg(datacfg);

    srand(2222222);
    CvCapture * cap;

    if(filename){
        cap = cvCaptureFromFile(filename);
    }else{
        cap = cvCaptureFromCAM(cam_index);
    }

    int top = option_find_int(options, "top", 1);

    char *name_list = option_find_str(options, "names", 0);
    char **names = get_labels(name_list);

    int *indexes = calloc(top, sizeof(int));

    if(!cap) error("Couldn't connect to webcam.\n");
    //cvNamedWindow("Threat", CV_WINDOW_NORMAL); 
    //cvResizeWindow("Threat", 512, 512);
    float fps = 0;
    int i;

    int count = 0;

    while(1){
        ++count;
        struct timeval tval_before, tval_after, tval_result;
        gettimeofday(&tval_before, NULL);

        image in = get_image_from_stream(cap);
        if(!in.data) break;
        image in_s = resize_image(in, net->w, net->h);

        image out = in;
        int x1 = out.w / 20;
        int y1 = out.h / 20;
        int x2 = 2*x1;
        int y2 = out.h - out.h/20;

        int border = .01*out.h;
        int h = y2 - y1 - 2*border;
        int w = x2 - x1 - 2*border;

        float *predictions = network_predict(net, in_s.data);
        float curr_threat = 0;
        if(1){
            curr_threat = predictions[0] * 0 + 
                predictions[1] * .6 + 
                predictions[2];
        } else {
            curr_threat = predictions[218] +
                predictions[539] + 
                predictions[540] + 
                predictions[368] + 
                predictions[369] + 
                predictions[370];
        }
        threat = roll * curr_threat + (1-roll) * threat;

        draw_box_width(out, x2 + border, y1 + .02*h, x2 + .5 * w, y1 + .02*h + border, border, 0,0,0);
        if(threat > .97) {
            draw_box_width(out,  x2 + .5 * w + border,
                    y1 + .02*h - 2*border, 
                    x2 + .5 * w + 6*border, 
                    y1 + .02*h + 3*border, 3*border, 1,0,0);
        }
        draw_box_width(out,  x2 + .5 * w + border,
                y1 + .02*h - 2*border, 
                x2 + .5 * w + 6*border, 
                y1 + .02*h + 3*border, .5*border, 0,0,0);
        draw_box_width(out, x2 + border, y1 + .42*h, x2 + .5 * w, y1 + .42*h + border, border, 0,0,0);
        if(threat > .57) {
            draw_box_width(out,  x2 + .5 * w + border,
                    y1 + .42*h - 2*border, 
                    x2 + .5 * w + 6*border, 
                    y1 + .42*h + 3*border, 3*border, 1,1,0);
        }
        draw_box_width(out,  x2 + .5 * w + border,
                y1 + .42*h - 2*border, 
                x2 + .5 * w + 6*border, 
                y1 + .42*h + 3*border, .5*border, 0,0,0);

        draw_box_width(out, x1, y1, x2, y2, border, 0,0,0);
        for(i = 0; i < threat * h ; ++i){
            float ratio = (float) i / h;
            float r = (ratio < .5) ? (2*(ratio)) : 1;
            float g = (ratio < .5) ? 1 : 1 - 2*(ratio - .5);
            draw_box_width(out, x1 + border, y2 - border - i, x2 - border, y2 - border - i, 1, r, g, 0);
        }
//.........这里部分代码省略.........
开发者ID:ShahImranShovon,项目名称:darknet,代码行数:101,代码来源:classifier.c

示例14: main

int main(){
  
      CvCapture* capture =0;       
      capture = cvCaptureFromCAM(0);
      if(!capture){
printf("Capture failure\n");
return -1;
      }
      
      IplImage* frame=0;
      frame = cvQueryFrame(capture);           
      if(!frame) return -1;
  
     //create a blank image and assigned to 'imgTracking' which has the same size of original video
     imgTracking=cvCreateImage(cvGetSize(frame),IPL_DEPTH_8U, 3);

     cvZero(imgTracking); //covert the image, 'imgTracking' to black

     cvNamedWindow("Video");     
     cvNamedWindow("Ball");

      //iterate through each frames of the video     
      while(true){

            frame = cvQueryFrame(capture);           
            if(!frame) break;
            frame=cvCloneImage(frame); 
            
           cvSmooth(frame, frame, CV_GAUSSIAN,3,3); //smooth the original image using Gaussian kernel

IplImage* imgHSV = cvCreateImage(cvGetSize(frame), IPL_DEPTH_8U, 3); 

            cvCvtColor(frame, imgHSV, CV_BGR2HSV); //Change the color format from BGR to HSV

IplImage* imgThresh = GetThresholdedImage(imgHSV, 150, 127, 134, 193, 177, 170);
	    
	     cvSmooth(imgThresh, imgThresh, CV_GAUSSIAN,3,3); //smooth the binary image using Gaussian kernel
	     
	     trackObject(imgThresh, 255, 0, 0, 1);

	     
          	
IplImage* imgHSV2 = cvCreateImage(cvGetSize(frame), IPL_DEPTH_8U, 3); 
	    
	      cvCvtColor(frame, imgHSV2, CV_BGR2HSV);

IplImage* imgThresh2 = GetThresholdedImage(imgHSV2, 37, 67, 135, 113, 115, 202);

 		cvSmooth(imgThresh2, imgThresh2, CV_GAUSSIAN,3,3); //smooth the binary image using Gaussian kernel

		trackObject(imgThresh2, 0, 255, 0, 2);

            // Add the tracking image and the frame

          cvAdd(frame, imgTracking, frame);

          cvShowImage("Ball", imgThresh); 
	  cvShowImage("Ball2", imgThresh2);            
          cvShowImage("Video", frame);
           
           //Clean up used images
           cvReleaseImage(&imgHSV);
	   cvReleaseImage(&imgHSV2);
           cvReleaseImage(&imgThresh);
	   cvReleaseImage(&imgThresh2);             
           cvReleaseImage(&frame);

            //Wait 10mS
            int c = cvWaitKey(10);
            //If 'ESC' is pressed, break the loop
            if((char)c==27 ) break;      
      }

      cvDestroyAllWindows() ;
      cvReleaseImage(&imgTracking);
      cvReleaseCapture(&capture);     

      return 0;
}
开发者ID:josepabloapu,项目名称:renekton,代码行数:79,代码来源:Tracker2playersV4.cpp

示例15: main

int main()
{
	CvCapture* capture = 0;
	printf("Initializing Capture...\n");
//  capture = cvCreateFileCapture("http://192.168.1.100/videostream.cgi?user=admin&pwd=123456");
 capture = cvCaptureFromCAM( CV_CAP_ANY );
  printf("Camera found\n");
	if(!capture){printf("Could not initialize camera...\n");}
	printf("Capture Success!!!\n");
	cvNamedWindow("Video", CV_WINDOW_AUTOSIZE);
	cvResizeWindow("Video", 640,480);
	cvMoveWindow("Video", 0,100);
	cvNamedWindow("HSV",CV_WINDOW_AUTOSIZE);
	cvNamedWindow("ROI");
	cvMoveWindow("ROI", 700,100);

  int state = 0;
  int track = 1;
  int change= 2;
  int reset = 3;
  int clear = 4;

	while(1)
	{
	  // Hold frame captured from camera
		IplImage* img = cvQueryFrame(capture);
		if(!img){printf("Image Query Failed");break;}
		
	  key = cvWaitKey(10);
	  if(key=='t'){state = track;}
	  if(key=='c'){state = change;}
	  if(key=='r'){state = reset;}
	  if(key=='q'){state = clear;}
	  key = cvWaitKey(10);
	
		
		
		if(state == track)
		{   
		  printf("Tracking\n");
      imgRed = getThresholdImage(img);
      imgPos = getMoment(img);
    }   
	  key = cvWaitKey(10); 
       	
		if(state == change)
		{
      cvRectangle(img,cvPoint(250,300),cvPoint(400,200),CV_RGB(255,0,0),5,8);
      cvWaitKey(30);
		  cvShowImage("Video",img);
			//printf("Getting Color\n");
			IplImage* imgColor = getColor(img);
  	  state = 0;
		}
		key = cvWaitKey(10);
		
		if(state == reset)
		{
			 printf("Resetting\n");
			 minC = hsv_min;
			 maxC = hsv_max;
			 state = 0;
		}
		
		if(state == clear)
		{
		  printf("Paused\n");
		  state = 0;
		  
		}
    else 
    {
      key = cvWaitKey(10);
    }

		cvShowImage("HSV", imgRed);
		cvShowImage("Video", img);
		cvReleaseImage(&imgPos);
		cvReleaseImage(&imgRed);

    if ( (cvWaitKey(10) & 255) == 27 ) break;
		//key = cvWaitKey(10);
	}
	cvDestroyWindow("Video");
	cvDestroyWindow("HSV");
	cvDestroyWindow("ROI");
	cvReleaseCapture(&capture);
}
开发者ID:RENX,项目名称:OpenCV,代码行数:88,代码来源:colortrack.cpp


注:本文中的cvCaptureFromCAM函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。