当前位置: 首页>>代码示例>>C++>>正文


C++ VideoCapture类代码示例

本文整理汇总了C++中VideoCapture的典型用法代码示例。如果您正苦于以下问题:C++ VideoCapture类的具体用法?C++ VideoCapture怎么用?C++ VideoCapture使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了VideoCapture类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: trainData

int trainData() {

    std:: string videoName="";

    int n_frames[1000];
    //create dictionary
    int dict_size=100;//***

    Mat features;
    for(int i=1; i<no_videos; i++) {


        stringstream temp;
        temp<<i;
        std::string no=temp.str();
        videoName="C:/Rasika/trainvideos/video_"+no+".avi"; //*** path can be changed

        //initialize capture
        VideoCapture cap;
        cap.open(videoName);
        if(!cap.isOpened())  // check if we succeeded
            return -1;

        double count = cap.get(CV_CAP_PROP_FRAME_COUNT); //get the frame count

        //create window to show image
        //namedWindow("Video",1);
        //cout<<count<<endl;
        int jump=count/N;
        int j=1;

        int u=0;
        if(count<10) {
            jump=1;
        }
        int cnt=jump;
        while(u<10) {

            //Create matrix to store video frame
            Mat image;
            cap.set(CV_CAP_PROP_POS_FRAMES,cnt); //Set index to jump for particular count
            bool success = cap.read(image);
            if (!success) {
                cout << "Cannot read  frame " << endl;
                break;
            }

            ///////////Convert to gray scale/////////////
            Mat gray_image;
            cvtColor( image, gray_image, CV_BGR2GRAY );

            ////////EXTRACT INTEREST POINTS USING SIFT////
            // vector of keypoints
            std::vector<cv::KeyPoint> keypoints;
            // Construct the SIFT feature detector object
            SiftFeatureDetector sif(0.03,10.); // threshold  //***
            //Detect interest points
            sif.detect(gray_image,keypoints);

            ////////IMSHOW THE FRAMES EXTRACTED///////////

            //copy video stream to image
            //cap>>image;
            //print image to screen
            //imshow("Video",image);


            ///////////Save the frames//////////////

            stringstream temp2;
            temp2<<j;
            std::string no2=temp2.str();
            std::string frame_name="frame"+no2+".jpg";
            imwrite(frame_name,image);


            //////////////Draw the keypoints////////////

            /*
            Mat featureImage;
            // Draw the keypoints with scale and orientation information
            drawKeypoints(image, // original image
            keypoints, // vector of keypoints
            featureImage, // the resulting image
            Scalar(255,0,255), // color of the points
            DrawMatchesFlags::DRAW_RICH_KEYPOINTS); //flag
            //std::string name="image"+i;
            imshow(frame_name, featureImage );
            */

            ////////////////////detect decriptors//////////////////

            SiftDescriptorExtractor siftExtractor;
            Mat siftDesc;
            siftExtractor.compute(gray_image,keypoints,siftDesc);
            features.push_back(siftDesc);//add the descriptors from each frame..to create one for a video

            ////////////////
            //delay 33ms //***
            //waitKey(33);
//.........这里部分代码省略.........
开发者ID:RasikaWarade,项目名称:Computer-Vision,代码行数:101,代码来源:main.cpp

示例2: doDetect

void doDetect (void)
{
  Mat src, dst;
  VideoCapture cap;
  /// Load image
  //src = imread( "pic.jpg", 1 );

  //delay(WAITPHOTO);

  if(!cap.open(0)) {
    printf("kein Foto moeglich");
  }

  cap >> src;

  if( !src.data )
    { }

  /// Separate the image in 3 places ( B, G and R )
  vector<Mat> bgr_planes;
  split( src, bgr_planes );

  /// Establish the number of bins
  int histSize = 256;

  /// Set the ranges ( for B,G,R) )
  float range[] = { 0, 256 } ;
  const float* histRange = { range };

  bool uniform = true; bool accumulate = false;

  Mat b_hist, g_hist, r_hist;

  /// Compute the histograms:
  calcHist( &bgr_planes[0], 1, 0, Mat(), b_hist, 1, &histSize, &histRange, uniform, accumulate );
  calcHist( &bgr_planes[1], 1, 0, Mat(), g_hist, 1, &histSize, &histRange, uniform, accumulate );
  calcHist( &bgr_planes[2], 1, 0, Mat(), r_hist, 1, &histSize, &histRange, uniform, accumulate );

  // Draw the histograms for B, G and R
  int hist_w = 512; int hist_h = 400;
  int bin_w = cvRound( (double) hist_w/histSize );

  Mat histImageR( hist_h, hist_w, CV_8UC3, Scalar( 0,0,0) );
  Mat histImageB( hist_h, hist_w, CV_8UC3, Scalar( 0,0,0) );
  Mat histImageG( hist_h, hist_w, CV_8UC3, Scalar( 0,0,0) );


  /// Normalize the result to [ 0, histImage.rows ]
  normalize(b_hist, b_hist, 0, histImageB.rows, NORM_MINMAX, -1, Mat() );
  normalize(g_hist, g_hist, 0, histImageG.rows, NORM_MINMAX, -1, Mat() );
  normalize(r_hist, r_hist, 0, histImageR.rows, NORM_MINMAX, -1, Mat() );

  /// Draw for each channel
  for( int i = 1; i < histSize; i++ )
  {
      line( histImageB, Point( bin_w*(i-1), hist_h - cvRound(b_hist.at<float>(i-1)) ) ,
                       Point( bin_w*(i), hist_h - cvRound(b_hist.at<float>(i)) ),
                       Scalar( 255, 0, 0), 2, 8, 0  );
      line( histImageG, Point( bin_w*(i-1), hist_h - cvRound(g_hist.at<float>(i-1)) ) ,
                       Point( bin_w*(i), hist_h - cvRound(g_hist.at<float>(i)) ),
                       Scalar( 0, 255, 0), 2, 8, 0  );
      line( histImageR, Point( bin_w*(i-1), hist_h - cvRound(r_hist.at<float>(i-1)) ) ,
                       Point( bin_w*(i), hist_h - cvRound(r_hist.at<float>(i)) ),
                       Scalar( 0, 0, 255), 2, 8, 0  );
  }

  imwrite("orginal.jpg",src);
  imwrite( "PicR.jpg", histImageR );
  imwrite("PicG.jpg", histImageG);
  imwrite("PicB.jpg", histImageB);

  float peakRed=0.0, peakGreen=0.0, peakBlue=0.0;
  int positionRed=0, positionGreen=0, positionBlue=0;
  bool blueXred = false;
  bool fallBlue = false;

  for (int j=0;j<255;j++) {
    if(r_hist.at<float>(j) > peakRed) {
       positionRed=j;
       peakRed=r_hist.at<float>(j);
    }
    if(g_hist.at<float>(j) > peakGreen) {
      positionGreen=j;
      peakGreen=g_hist.at<float>(j);
    }
    if(b_hist.at<float>(j) > peakBlue) {
      positionBlue=j;
      peakBlue=b_hist.at<float>(j);
    }
    if(b_hist.at<float>(j)+5 < peakBlue) {
      fallBlue = true;
    }
    if(b_hist.at<float>(j)-5 > peakBlue) {
      fallBlue = false;;
    }

    if(b_hist.at<float>(j) - r_hist.at<float>(j) <5 && b_hist.at<float>(j) - r_hist.at<float>(j) >-5) {
      if (fallBlue) {
        blueXred = true;
      }
//.........这里部分代码省略.........
开发者ID:aJunk,项目名称:makeathon_team_7-8,代码行数:101,代码来源:colorDetection.cpp

示例3: main

int main(int argvc, char** argv){
  VideoCapture video;
  float media[] = {1,1,1,
				   1,1,1,
				   1,1,1};
  float gauss[] = {1,2,1,
				   2,4,2,
				   1,2,1};
  float horizontal[]={-1,0,1,
					  -2,0,2,
					  -1,0,1};
  float vertical[]={-1,-2,-1,
					0,0,0,
					1,2,1};
  float laplacian[]={0,-1,0,
					 -1,4,-1,
					 0,-1,0};

  Mat cap, frame, frame32f, frameFiltered;
  Mat mask(3,3,CV_32F), mask1;
  Mat result, result1;
  double width, height, min, max;
  int absolut;
  char key;

  video.open(0);
  if(!video.isOpened())
    return -1;
  width=video.get(CV_CAP_PROP_FRAME_WIDTH);
  height=video.get(CV_CAP_PROP_FRAME_HEIGHT);
  std::cout << "largura=" << width << "\n";;
  std::cout << "altura =" << height<< "\n";;

  namedWindow("filtroespacial",1);

  mask = Mat(3, 3, CV_32F, media);
  scaleAdd(mask, 1/9.0, Mat::zeros(3,3,CV_32F), mask1);
  swap(mask, mask1);
  absolut=1; // calcs abs of the image

  menu();
  for(;;){
    video >> cap;
    cvtColor(cap, frame, CV_BGR2GRAY);
    flip(frame, frame, 1);
    imshow("original", frame);
    frame.convertTo(frame32f, CV_32F);
    filter2D(frame32f, frameFiltered, frame32f.depth(), mask, Point(1,1), 0);
    if(absolut){
      frameFiltered=abs(frameFiltered);
    }
    frameFiltered.convertTo(result, CV_8U);
    imshow("filtroespacial", result);
    key = (char) waitKey(10);
    if( key == 27 ) break; // esc pressed!
    switch(key){
    case 'a':
	  menu();
      absolut=!absolut;
      break;
    case 'm':
	  menu();
      mask = Mat(3, 3, CV_32F, media);
      scaleAdd(mask, 1/9.0, Mat::zeros(3,3,CV_32F), mask1);
      mask = mask1;
      printmask(mask);
      break;
    case 'g':
	  menu();
      mask = Mat(3, 3, CV_32F, gauss);
      scaleAdd(mask, 1/16.0, Mat::zeros(3,3,CV_32F), mask1);
      mask = mask1;
      printmask(mask);
      break;
    case 'h':
	  menu();
      mask = Mat(3, 3, CV_32F, horizontal);
      printmask(mask);
      break;
    case 'v':
	  menu();
      mask = Mat(3, 3, CV_32F, vertical);
      printmask(mask);
      break;
    case 'l':
	  menu();
      mask = Mat(3, 3, CV_32F, laplacian);
      printmask(mask);
      break;
    case 'd':
      menu();
      mask = Mat(3, 3, CV_32F, gauss);
      scaleAdd(mask, 1/16.0, Mat::zeros(3,3,CV_32F), mask1);
      mask = mask1;
      mask = Mat(3, 3, CV_32F, laplacian);
      printmask(mask);
    default:
      break;
    }
  }
//.........这里部分代码省略.........
开发者ID:herbeton,项目名称:herbeton.github.io,代码行数:101,代码来源:laplgauss.cpp

示例4: main

int main(int argc, char *argv[], char *window_name) {
	if (argc != 5) {
		cout << "Not enough parameters" << endl;
		return -1;
	}

	stringstream conv;


	VideoCapture capture;
	capture.open(atoi(argv[1]));

	const string compareImage1 = argv[2];
	const string compareImage2 = argv[3];
	const string compareImage3 = argv[4];

	Mat image1 = imread(compareImage1, -1);
	Mat image2 = imread(compareImage2, -1);
	Mat image3 = imread(compareImage3, -1);

	downsample(&image1);
	downsample(&image2);
	downsample(&image3);

	displayImage("Image1", image1, 0);
	displayImage("Image2", image2, 1);
	displayImage("Image3", image3, 2);


	//cv::cvtColor(image1, image1, CV_BGR2GRAY);
	//   cv::threshold(image1, image1, 128, 255, CV_THRESH_BINARY);
	//vector<std::vector<cv::Point> > storage;
	//Mat contoursImg1 = image1.clone();
	//findContours(contoursImg1, storage, CV_RETR_LIST, CV_CHAIN_APPROX_NONE);

	Mat frame;
	Mat grayFrame;
	capture >> frame;
	int frameCounter = 0;

	//KalmanFilter kalman = KalmanFilter(2, 2, 0);

	
    ///kalman.transitionMatrix 
	//	=(Mat_<int>(2,2) << 1, 0, 1, 0);

    //setIdentity(kalman.measurementMatrix);
    //setIdentity(kalman.measurementNoiseCov, Scalar::all(1e-5));
	//setIdentity(kalman.errorCovPost, Scalar::all(1));


KalmanFilter KF(4, 2, 0);
KF.transitionMatrix = *(Mat_<float>(4, 4) << 1,0,1,0,   0,1,0,1,  0,0,1,0,  0,0,0,1);

 
// init...

setIdentity(KF.measurementMatrix);
setIdentity(KF.processNoiseCov, Scalar::all(1e-4));
setIdentity(KF.measurementNoiseCov, Scalar::all(1e-1));
setIdentity(KF.errorCovPost, Scalar::all(.1));

	while (!frame.empty()) {

		Mat prediction = KF.predict();

			//prediction.at<int>(0,0);

            cout << "Prediction: " << prediction << "   ";

		//process only grey frames:
		cvtColor(frame, grayFrame, CV_RGB2GRAY);

		//downsample(&grayFrame);
		//nearest(image1, grayFrame);

		vector<Point2f> image1Corners = nearest(image1, frame.clone());
		vector<Point2f> image2Corners = nearest(image2, frame.clone());
		vector<Point2f> image3Corners = nearest(image3, frame.clone());


		Mat measurement = 
			(Mat_<float>(2,1) << (image1Corners[0].x + image1Corners[2].x)/2, (image1Corners[0].y + image1Corners[2].y)/2); 
		

		cout << measurement << endl;

		if(measurement.at<float>(0,0) != 0){
			KF.correct(measurement);
			Point predictCentre(prediction.at<float>(0,0), prediction.at<float>(1,0));
			cout << predictCentre;
			cv::circle(frame,predictCentre,5,Scalar(255,255,255, 0),3,8,0);
		}


		if(shouldDraw((image1Corners[0].x + image1Corners[2].x)/2, (image1Corners[0].y + image1Corners[2].y)/2, prediction.at<float>(0,0),prediction.at<float>(1,0))){
			drawCorners(&frame, image1Corners, 0);
		}
		drawCorners(&frame, image2Corners, 1);
		drawCorners(&frame, image3Corners, 2);
//.........这里部分代码省略.........
开发者ID:AnupamaKumar,项目名称:Orb-obeject-detection,代码行数:101,代码来源:projectPt1Cam.cpp

示例5: main

int main(int argc,char **argv)
{
    try
    {
        if (readArguments (argc,argv)==false) {
            return 0;
        }
        //parse arguments
        ;
        //read from camera or from  file
        if (TheInputVideo=="live") {
            TheVideoCapturer.open(0);
            waitTime=10;
        }
        else  TheVideoCapturer.open(TheInputVideo);
        //check video is open
        if (!TheVideoCapturer.isOpened()) {
            cerr<<"Could not open video"<<endl;
            return -1;

        }

        //read first image to get the dimensions
        TheVideoCapturer>>TheInputImage;

        //read camera parameters if passed
        if (TheIntrinsicFile!="") {
            TheCameraParameters.readFromXMLFile(TheIntrinsicFile);
            TheCameraParameters.resize(TheInputImage.size());
        }
        //Configure other parameters
        if (ThePyrDownLevel>0)
            MDetector.pyrDown(ThePyrDownLevel);


        //Create gui

	MDetector.getThresholdParams( ThresParam1,ThresParam2);
        MDetector.setCornerRefinementMethod(MarkerDetector::LINES);

	/*
        cv::namedWindow("thres",1);
        cv::namedWindow("in",1);
        iThresParam1=ThresParam1;
        iThresParam2=ThresParam2;
        cv::createTrackbar("ThresParam1", "in",&iThresParam1, 13, cvTackBarEvents);
        cv::createTrackbar("ThresParam2", "in",&iThresParam2, 13, cvTackBarEvents);
	*/
	
        char key=0;
        int index=0;
        //capture until press ESC or until the end of the video
        while ( key!=27 && TheVideoCapturer.grab() ) // && index <= 50)
        {
            TheVideoCapturer.retrieve( TheInputImage);
            //copy image

            index++; //number of images captured

            double tick = (double)getTickCount();//for checking the speed
            //Detection of markers in the image passed
            MDetector.detect(TheInputImage,TheMarkers,TheCameraParameters,TheMarkerSize);
            //chekc the speed by calculating the mean speed of all iterations
            AvrgTime.first+=((double)getTickCount()-tick)/getTickFrequency();
            AvrgTime.second++;
            //cout<<"Time detection="<<1000*AvrgTime.first/AvrgTime.second<<" milliseconds"<<endl;
	    
            //print marker info and draw the markers in image
            TheInputImage.copyTo(TheInputImageCopy);
            for (unsigned int i=0;i<TheMarkers.size();i++) {
	      if (AllMarkers.count( TheMarkers[i].id ) == 0)
		AllMarkers[TheMarkers[i].id] = map<int,Marker>();
	      AllMarkers[TheMarkers[i].id][index] = TheMarkers[i];
	      
	      cout<<index<<endl;
                cout<<TheMarkers[i]<<endl;
                TheMarkers[i].draw(TheInputImageCopy,Scalar(0,0,255),1);
            }
            //print other rectangles that contains no valid markers
       /**     for (unsigned int i=0;i<MDetector.getCandidates().size();i++) {
                aruco::Marker m( MDetector.getCandidates()[i],999);
                m.draw(TheInputImageCopy,cv::Scalar(255,0,0));
            }*/



            //draw a 3d cube in each marker if there is 3d info
            if (  TheCameraParameters.isValid())
                for (unsigned int i=0;i<TheMarkers.size();i++) {
                    CvDrawingUtils::draw3dCube(TheInputImageCopy,TheMarkers[i],TheCameraParameters);
                    CvDrawingUtils::draw3dAxis(TheInputImageCopy,TheMarkers[i],TheCameraParameters);
                }
            //DONE! Easy, right?
            cout<<endl<<endl<<endl;
            //show input with augmented information and  the thresholded image
            //cv::imshow("in",TheInputImageCopy);
            //cv::imshow("thres",MDetector.getThresholdedImage());

            //key=cv::waitKey(waitTime);//wait for key to be pressed
        }
//.........这里部分代码省略.........
开发者ID:ASCTech,项目名称:mooculus,代码行数:101,代码来源:find-markers.cpp

示例6: main

int main(int argc, char *argv[]) {
    CommandLineParser parser(argc, argv, keys);
    parser.about(about);

    if(argc < 2) {
        parser.printMessage();
        return 0;
    }

    int dictionaryId = parser.get<int>("d");
    bool showRejected = parser.has("r");
    bool estimatePose = parser.has("c");
    float markerLength = parser.get<float>("l");

    Ptr<aruco::DetectorParameters> detectorParams = aruco::DetectorParameters::create();
    if(parser.has("dp")) {
        bool readOk = readDetectorParameters(parser.get<string>("dp"), detectorParams);
        if(!readOk) {
            cerr << "Invalid detector parameters file" << endl;
            return 0;
        }
    }
    detectorParams->doCornerRefinement = true; // do corner refinement in markers

    int camId = parser.get<int>("ci");

    String video;
    if(parser.has("v")) {
        video = parser.get<String>("v");
    }

    if(!parser.check()) {
        parser.printErrors();
        return 0;
    }

    Ptr<aruco::Dictionary> dictionary =
        aruco::getPredefinedDictionary(aruco::PREDEFINED_DICTIONARY_NAME(dictionaryId));

    Mat camMatrix, distCoeffs;
    if(estimatePose) {
        bool readOk = readCameraParameters(parser.get<string>("c"), camMatrix, distCoeffs);
        if(!readOk) {
            cerr << "Invalid camera file" << endl;
            return 0;
        }
    }

    VideoCapture inputVideo;
    int waitTime;
    if(!video.empty()) {
        inputVideo.open(video);
        waitTime = 0;
    } else {
        inputVideo.open(camId);
        waitTime = 10;
    }

    double totalTime = 0;
    int totalIterations = 0;

    while(inputVideo.grab()) {
        Mat image, imageCopy;
        inputVideo.retrieve(image);

        double tick = (double)getTickCount();

        vector< int > ids;
        vector< vector< Point2f > > corners, rejected;
        vector< Vec3d > rvecs, tvecs;

        // detect markers and estimate pose
        aruco::detectMarkers(image, dictionary, corners, ids, detectorParams, rejected);
        if(estimatePose && ids.size() > 0)
            aruco::estimatePoseSingleMarkers(corners, markerLength, camMatrix, distCoeffs, rvecs,
                                             tvecs);

        double currentTime = ((double)getTickCount() - tick) / getTickFrequency();
        totalTime += currentTime;
        totalIterations++;
        if(totalIterations % 30 == 0) {
            cout << "Detection Time = " << currentTime * 1000 << " ms "
                 << "(Mean = " << 1000 * totalTime / double(totalIterations) << " ms)" << endl;
        }

        // draw results
        image.copyTo(imageCopy);
        if(ids.size() > 0) {
            aruco::drawDetectedMarkers(imageCopy, corners, ids);

            if(estimatePose) {
                for(unsigned int i = 0; i < ids.size(); i++)
                {
                    aruco::drawAxis(imageCopy, camMatrix, distCoeffs, rvecs[i], tvecs[i],
                                    markerLength * 0.5f);
                    cout << tvecs[i] << endl;
                }
            }
        }

//.........这里部分代码省略.........
开发者ID:azhao12345,项目名称:eyeslorger,代码行数:101,代码来源:detect_markers.cpp

示例7: main


//.........这里部分代码省略.........


        while(1)
        {
            imshow(winName, mat_canvas );

            waitKey(30);
        }

    }
    //-- use dataset
    else if(flag_use_image == 2)
    {



        useDataset();





        while(1)
        {

            imshow(winName, mat_canvas );

            waitKey(30);
        }

    }
    else // video input: tracking features
    {
        VideoCapture cap;

        cap.open(1);
        if(!cap.isOpened())  // check if we succeeded
            return -1;
        cap.set(CV_CAP_PROP_FRAME_WIDTH, 800);
        cap.set(CV_CAP_PROP_FRAME_HEIGHT, 600);


        namedWindow("Keypoints", WINDOW_NORMAL);
        Mat mat_image;
        int num_vecKeypoints;
        int num_trackingPoints = 50;
        Mat mat_descriptors;

        char keyInput;

        //-- Step 1: Detect the keypoints using Detector
        // int minHessian = 400;





        OrbFeatureDetector detector;
        FREAK extractor;

        while(1)
        {
            cap >> mat_image;

            std::vector<KeyPoint> vec_keypoints, vec_goodKeypoints;
开发者ID:jiayil,项目名称:jiayi-ros-pkg,代码行数:66,代码来源:init.cpp

示例8: main

main(int argc, const char* argv[])
{

	Mat frame, uiframe, grayFrame, result, skin, frame1, frame2, frame3,frame4, frame5;
	VideoCapture  capture;
	int lowerBound = 200;
	int upperBound = 255;
	
	//Se inicia la camara
	
	capture.open(0);
	if( !capture.isOpened() )
	{
		std::cout << "no se encontro la camara" << std::endl;
		return -1;
	}

	//Crea la ventana
	cvNamedWindow("Result", CV_WINDOW_AUTOSIZE);
	cvCreateTrackbar("Rango inferior", "Result", &lowerBound, 255 );
	cvCreateTrackbar("Rango superior", "Result", &upperBound, 255 );

	//Captura de la camara
	while(1)
	{
		for(int i = 0; i < 5; i++ ) capture >> frame;
		capture >> frame;
		uiframe = frame.clone();

		// Dibuja las barras inferiores y superiores
		dibujarInterface(uiframe);
		imshow("Result", uiframe );
		cvWaitKey();

		//Obtiene la mascara de la piel
		skin = getSkin(frame);

		//Convierte el frame a escala de grises
		cvtColor(frame, grayFrame, CV_BGR2GRAY);
		//Histogram Equalization
		equalizeHist(grayFrame, frame1);

		blur(frame1,frame2, Size(5,5));

		//erociona y dilatacion
		int size = 6;
		Mat element = getStructuringElement(MORPH_CROSS, Size(2*size+1,2*size+1), Point(size,size) );
		erode(frame2, frame3, element );
		dilate(frame3, frame4, element );

		inRange(frame4, Scalar(lowerBound), Scalar(upperBound), frame5);

		applyMask(frame5, frame, result);
		applyMask(skin, result, result, 255);
		cvtColor( result , result, CV_BGR2GRAY);
		//Para dibujar el rectangulo 
		Rect rect = EncontrarSonrisa( result );
		rectangle( frame, rect , cvScalar(0,0,255));

		// Dibuja las barras inferiores y superiores
		dibujarInterface(frame);

		//Muestra el frame
		imshow("Result", frame);
		cvWaitKey();
	}
	return 0;
}
开发者ID:lcjury,项目名称:SmileDetectOpenCV,代码行数:68,代码来源:main.cpp

示例9: main

int main(int argc, char* argv[])
{
	//if we would like to calibrate our filter values, set to true.
	bool calibrationMode = true;
	
	//Matrix to store each frame of the webcam feed
	Mat cameraFeed;
	Mat threshold;
	Mat filteredImage;

	if(calibrationMode){
		//create slider bars for HSV filtering
		createTrackbars();
	}
	//video capture object to acquire webcam feed
	VideoCapture capture;
	//open capture object at location zero (default location for webcam)
	capture.open(1);
	//set height and width of capture frame
	capture.set(CV_CAP_PROP_FRAME_WIDTH,FRAME_WIDTH);
	capture.set(CV_CAP_PROP_FRAME_HEIGHT,FRAME_HEIGHT);
	//start an infinite loop where webcam feed is copied to cameraFeed matrix
	//all of our operations will be performed within this loop
	while(1){
		//store image to matrix
		capture.read(cameraFeed);
		// flip(cameraFeed,cameraFeed,1); //flip camera
		filteredImage = cameraFeed.clone();
		filteredImage = filterRed(filteredImage);
		
		//convert frame from BGR to HSV colorspace
		// cvtColor(cameraFeed,HSV,COLOR_BGR2HSV);

		if(calibrationMode==true){
		//if in calibration mode, we track objects based on the HSV slider values.
		// cvtColor(cameraFeed,HSV,COLOR_BGR2HSV);
		inRange(filteredImage,Scalar(254,254,254),Scalar(255,255,255),threshold);
		morphOps(threshold);
		imshow(windowName2,threshold);
		trackFilteredObject(threshold,filteredImage,cameraFeed);
		}

		//show frames 
		imshow(windowName2,threshold);

		imshow(windowName,cameraFeed);
		imshow(windowName1,filteredImage);


		//delay 30ms so that screen can refresh.
		//image will not appear without this waitKey() command
		waitKey(30);
	}






	return 0;
}
开发者ID:jimenezl,项目名称:MASLAB-Team-UP,代码行数:61,代码来源:rgbObjectTracking2.cpp

示例10: video_homography

int video_homography(int ac, char ** av)
{

    if (ac != 2)
    {
        help(av);
        return 1;
    }

    BriefDescriptorExtractor brief(32);

    VideoCapture capture;
    capture.open(atoi(av[1]));
    if (!capture.isOpened())
    {
        help(av);
        cout << "capture device " << atoi(av[1]) << " failed to open!" << endl;
        return 1;
    }

    cout << "following keys do stuff:" << endl;
    cout << "t : grabs a reference frame to match against" << endl;
    cout << "l : makes the reference frame new every frame" << endl;
    cout << "q or escape: quit" << endl;

    Mat frame;

    vector<DMatch> matches;

    BFMatcher desc_matcher(NORM_HAMMING);

    vector<Point2f> train_pts, query_pts;
    vector<KeyPoint> train_kpts, query_kpts;
    vector<unsigned char> match_mask;

    Mat gray;

    bool ref_live = true;

    Mat train_desc, query_desc;
    const int DESIRED_FTRS = 500;
    GridAdaptedFeatureDetector detector(new FastFeatureDetector(10, true), DESIRED_FTRS, 4, 4);

    Mat H_prev = Mat::eye(3, 3, CV_32FC1);
    for (;;)
    {
        capture >> frame;
        if (frame.empty())
            break;

        cvtColor(frame, gray, COLOR_RGB2GRAY);

        detector.detect(gray, query_kpts); //Find interest points

        brief.compute(gray, query_kpts, query_desc); //Compute brief descriptors at each keypoint location

        if (!train_kpts.empty())
        {

            vector<KeyPoint> test_kpts;
            warpKeypoints(H_prev.inv(), query_kpts, test_kpts);

            Mat mask = windowedMatchingMask(test_kpts, train_kpts, 25, 25);
            desc_matcher.match(query_desc, train_desc, matches, mask);
            drawKeypoints(frame, test_kpts, frame, Scalar(255, 0, 0), DrawMatchesFlags::DRAW_OVER_OUTIMG);

            matches2points(train_kpts, query_kpts, matches, train_pts, query_pts);

            if (matches.size() > 5)
            {
                Mat H = findHomography(train_pts, query_pts, RANSAC, 4, match_mask);
                if (countNonZero(Mat(match_mask)) > 15)
                {
                    H_prev = H;
                }
                else
                    resetH(H_prev);
                drawMatchesRelative(train_kpts, query_kpts, matches, frame, match_mask);
            }
            else
                resetH(H_prev);

        }
        else
        {
            H_prev = Mat::eye(3, 3, CV_32FC1);
            Mat out;
            drawKeypoints(gray, query_kpts, out);
            frame = out;
        }

        imshow("frame", frame);

        if (ref_live)
        {
            train_kpts = query_kpts;
            query_desc.copyTo(train_desc);
        }
        char key = (char)waitKey(2);
        switch (key)
//.........这里部分代码省略.........
开发者ID:mickyman550,项目名称:develop,代码行数:101,代码来源:video_homography.cpp

示例11: main

int main(int argc, char** argv)
{
    CommandLineParser parser(argc, argv, keys);
    parser.about("Use this script to run object detection deep learning networks using OpenCV.");
    if (argc == 1 || parser.has("help"))
    {
        parser.printMessage();
        return 0;
    }

    confThreshold = parser.get<float>("thr");
    float scale = parser.get<float>("scale");
    Scalar mean = parser.get<Scalar>("mean");
    bool swapRB = parser.get<bool>("rgb");
    int inpWidth = parser.get<int>("width");
    int inpHeight = parser.get<int>("height");

    // Open file with classes names.
    if (parser.has("classes"))
    {
        std::string file = parser.get<String>("classes");
        std::ifstream ifs(file.c_str());
        if (!ifs.is_open())
            CV_Error(Error::StsError, "File " + file + " not found");
        std::string line;
        while (std::getline(ifs, line))
        {
            classes.push_back(line);
        }
    }

    // Load a model.
    CV_Assert(parser.has("model"));
    Net net = readNet(parser.get<String>("model"), parser.get<String>("config"), parser.get<String>("framework"));
    net.setPreferableBackend(parser.get<int>("backend"));
    net.setPreferableTarget(parser.get<int>("target"));

    // Create a window
    static const std::string kWinName = "Deep learning object detection in OpenCV";
    namedWindow(kWinName, WINDOW_NORMAL);
    int initialConf = (int)(confThreshold * 100);
    createTrackbar("Confidence threshold, %", kWinName, &initialConf, 99, callback);

    // Open a video file or an image file or a camera stream.
    VideoCapture cap;
    if (parser.has("input"))
        cap.open(parser.get<String>("input"));
    else
        cap.open(0);

    // Process frames.
    Mat frame, blob;
    while (waitKey(1) < 0)
    {
        cap >> frame;
        if (frame.empty())
        {
            waitKey();
            break;
        }

        // Create a 4D blob from a frame.
        Size inpSize(inpWidth > 0 ? inpWidth : frame.cols,
                     inpHeight > 0 ? inpHeight : frame.rows);
        blobFromImage(frame, blob, scale, inpSize, mean, swapRB, false);

        // Run a model.
        net.setInput(blob);
        if (net.getLayer(0)->outputNameToIndex("im_info") != -1)  // Faster-RCNN or R-FCN
        {
            resize(frame, frame, inpSize);
            Mat imInfo = (Mat_<float>(1, 3) << inpSize.height, inpSize.width, 1.6f);
            net.setInput(imInfo, "im_info");
        }
        Mat out = net.forward();

        postprocess(frame, out, net);

        // Put efficiency information.
        std::vector<double> layersTimes;
        double freq = getTickFrequency() / 1000;
        double t = net.getPerfProfile(layersTimes) / freq;
        std::string label = format("Inference time: %.2f ms", t);
        putText(frame, label, Point(0, 15), FONT_HERSHEY_SIMPLEX, 0.5, Scalar(0, 255, 0));

        imshow(kWinName, frame);
    }
    return 0;
}
开发者ID:Aspie96,项目名称:opencv,代码行数:89,代码来源:object_detection.cpp

示例12: main

int main(int argc, char ** argv){

    VideoCapture cap;


    if(argc > 1){
        cout << argv[1] << endl;
        cap.open(argv[1]);

    }else{
        cap.open(0);
        cap.set(CV_CAP_PROP_FRAME_WIDTH, 640.);
        cap.set(CV_CAP_PROP_FRAME_HEIGHT, 480.);
    }

    if(!cap.isOpened())
    {
        cerr << "Cant open video" << endl;
        return EXIT_FAILURE;
    }
    Mat frame, gray, frame_prev, gray_prev;

    cap >> frame;

//    myloader loader("input_data.yaml",frame);

//    loader.parseConfig();

    w.fps = 0;
    w.new_frame = true;
Mat h;
//    bool enough = false;

    int maxCorners = 300;
    int minCorners = 45;
    vector<Point2f> corners, corners_prev, corners_init;

    vector<vector<Point2f>> correspondences(2, vector<Point2f>()); // init two vector of vectors :D


    TermCriteria termcrit(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20, 0.03);
    Size subPixWinSize(10,10), winSize(31,31);

//    thread t1(Thread);

    namedWindow(WIN1, WINDOW_NORMAL);
    setMouseCallback(WIN1, CallBackFunc, NULL);


    Mat out;
    bool init = true, drawPoints = true;

    Size boxSize = {200,200};
    Rect box;

    box += boxSize;
    box += Point(1920/2, 1080/2);

    while(!DispThreadDone)
    {
        cap >> frame;

        if(frame.empty())
            break;

        cvtColor(frame, gray, CV_BGR2GRAY);

        Mat gray_blend;
        if(gray_prev.empty()){
            frame.copyTo(gray_blend);
        }
        else{
            frame.copyTo(gray_blend);
//            addWeighted(gray, 0.5, gray_prev, 0.5,0.0, gray_blend);
        }

        if (gray_blend.type()==CV_8UC1) {
                      //input image is grayscale
            cvtColor(gray_blend, out, CV_GRAY2RGB);

        } else {
            gray_blend.copyTo(out);
        }

        if(tracked_points.size() < 4){
            while(tracked_points.size() != 4){
                waitKey(10);

                for(Point p : tracked_points){
                    circle(out, p, 2,Scalar(255,0,0));
                }
                imshow(WIN1, out);
            }

//            for(int i = 0; i < tracked_points.size(); i ++){
//                tracked_points[i].y -= 300;
//            }
        }

        if(init){
//.........这里部分代码省略.........
开发者ID:straiki,项目名称:tracker-localizator-dp,代码行数:101,代码来源:main.cpp

示例13: main

int main(int argc, char** argv) {

    //Check arguments
    //***

    int set=trainData();

    //If set=0, proceed
    if(set==0) {


        //Take the two video inputs and measure the similarity
        float firstTF[1000];//***
        float secondTF[1000];
        int n_frames[1000];

        //////////////////////////////////////////////////////////////////////////////////////////////////
        Mat dicty;
        FileStorage fs("full_dictionary.yml", FileStorage::READ);
        fs["vocabulary"] >> dicty;
        fs.release();

        //set dictionary
        int dict_size=100;//***
        //create a nearest neighbor matcher
        Ptr<DescriptorMatcher> matcher(new FlannBasedMatcher);
        //create Sift feature point extracter
        Ptr<FeatureDetector> detector(new SiftFeatureDetector());
        //create Sift descriptor extractor
        Ptr<DescriptorExtractor> extractor(new SiftDescriptorExtractor);
        //create BoF (or BoW) descriptor extractor
        BOWImgDescriptorExtractor bowDE(extractor,matcher);
        //Set the dictionary with the vocabulary we created in the first step
        bowDE.setVocabulary(dicty);

//////////////////////////////First Video//////////////////////////////////////////////////////////

        ofstream myfile;
        myfile.open ("first_video.txt");
        myfile << "Calculating TF_VECTORS.\n";

        //initialize capture
        VideoCapture cap;
        cap.open(argv[1]); //***

        double count = cap.get(CV_CAP_PROP_FRAME_COUNT); //get the frame count

        int jump=count/N; //extract 10 frames from the video ***
        int j=0;
        if(count<10) {
            jump=1;
        }
        int cnt=jump;
        myfile<<"Reading Video";
        Mat features;
        Mat desc;

        int u=0;
        while(u<10) {

            //Create matrix to store video frame
            Mat image;
            cap.set(CV_CAP_PROP_POS_FRAMES,cnt); //Set index to jump for particular count
            bool success = cap.read(image);
            if (!success) {
                cout << "Cannot read  frame " << endl;
                break;
            }

            ///////////Convert to gray scale/////////////
            Mat gray_image;
            cvtColor( image, gray_image, CV_BGR2GRAY );

            //To store the keypoints that will be extracted by SIFT
            vector<KeyPoint> keypoints;
            //Detect SIFT keypoints (or feature points)
            detector->detect(gray_image,keypoints);
            //To store the BoW (or BoF) representation of the image
            Mat bowDescriptor;
            //extract BoW (or BoF) descriptor from given image
            bowDE.compute(gray_image,keypoints,bowDescriptor);

            desc.push_back(bowDescriptor);

            cnt+=jump;
            j++;
            u++;
            ///next frame for the same video
        }
        //FileStorage fs("descriptor.yml", FileStorage::WRITE);
        //fs << "descriptor" << desc;
        //fs.release();



        for(int k=0; k<desc.cols; k++) {
            int tf=0;
            for(int l=0; l<desc.rows; l++) {
                if(desc.at<float>(l,k)>0) {

//.........这里部分代码省略.........
开发者ID:RasikaWarade,项目名称:Computer-Vision,代码行数:101,代码来源:main.cpp

示例14: idf_vector

void idf_vector(Mat full_dictionary) {
    ofstream myfile;
    myfile.open ("example.txt");
    myfile << "Calculating IDF_VECTORS.\n";

    std:: string videoName="";

    int n_frames[100];
    //create dictionary
    int dict_size=100;//***


    //create a nearest neighbor matcher
    Ptr<DescriptorMatcher> matcher(new FlannBasedMatcher);
    //create Sift feature point extracter
    Ptr<FeatureDetector> detector(new SiftFeatureDetector());
    //create Sift descriptor extractor
    Ptr<DescriptorExtractor> extractor(new SiftDescriptorExtractor);
    //create BoF (or BoW) descriptor extractor
    BOWImgDescriptorExtractor bowDE(extractor,matcher);
    //Set the dictionary with the vocabulary we created in the first step
    bowDE.setVocabulary(full_dictionary);

    for(int i=1; i<no_videos; i++) {

        stringstream temp;
        temp<<i;
        std::string no=temp.str();
        videoName="C:/Rasika/video_"+no+".avi"; //*** path can be changed

        //initialize capture
        VideoCapture cap;
        cap.open(videoName);

        double count = cap.get(CV_CAP_PROP_FRAME_COUNT); //get the frame count

        int jump=count/N; //extract 10 frames from the video ***
        int j=0;
        int cnt=0;
        myfile<<"Reading Video";
        Mat features;
        Mat desc;
        while(cnt<count) {

            //Create matrix to store video frame
            Mat image;
            cap.set(CV_CAP_PROP_POS_FRAMES,cnt); //Set index to jump for particular count
            bool success = cap.read(image);
            if (!success) {
                cout << "Cannot read  frame " << endl;
                break;
            }

            ///////////Convert to gray scale/////////////
            Mat gray_image;
            cvtColor( image, gray_image, CV_BGR2GRAY );
            imagesData++;//Number of images in the database

            //To store the keypoints that will be extracted by SIFT
            vector<KeyPoint> keypoints;
            //Detect SIFT keypoints (or feature points)
            detector->detect(gray_image,keypoints);
            //To store the BoW (or BoF) representation of the image
            Mat bowDescriptor;
            //extract BoW (or BoF) descriptor from given image
            bowDE.compute(gray_image,keypoints,bowDescriptor);

            desc.push_back(bowDescriptor);

            ////////////////
            //delay 33ms //***
            //waitKey(33);

            cnt+=jump;
            j++;

            ///next frame for the same video
        }



        /*myfile<<desc.rows<<endl;
        myfile<<desc.cols<<endl;

        int tf=0;
        for(int i=0;i<desc.rows;i++){
        	for(int j=0;j<desc.cols;j++){
        		if(desc.at<float>(i,j)>0){

        			//cout<<bowDescriptor.at<float>(i,j)<<endl;
        			tf++;
        		}
        	}
        }

        myfile<<"Term Frequency:"<<tf<<"\n";
        float idf=0;
        float logcal=count/tf;
        idf=log(logcal);
        myfile<<"IDF:"<<idf<<"\n";
//.........这里部分代码省略.........
开发者ID:RasikaWarade,项目名称:Computer-Vision,代码行数:101,代码来源:main.cpp

示例15: main

int main( int argc, char** argv )
{
    Size boardSize, imageSize;
    float squareSize = 1.f, aspectRatio = 1.f;
    Mat cameraMatrix, distCoeffs;
    const char* outputFilename = "out_camera_data.yml";
    const char* inputFilename = 0;

    int i, nframes = 10;
    bool writeExtrinsics = false, writePoints = false;
    bool undistortImage = false;
    int flags = 0;
    VideoCapture capture;
    bool flipVertical = false;
    bool showUndistorted = false;
    bool videofile = false;
    int delay = 1000;
    clock_t prevTimestamp = 0;
    int mode = DETECTION;
    int cameraId = 0;
    vector<vector<Point2f> > imagePoints;
    vector<string> imageList;
    Pattern pattern = CHESSBOARD;

    if( argc < 2 )
    {
        help();
        return 0;
    }

    for( i = 1; i < argc; i++ )
    {
        const char* s = argv[i];
        if( strcmp( s, "-w" ) == 0 )
        {
            if( sscanf( argv[++i], "%u", (unsigned int*)&boardSize.width ) != 1 || boardSize.width <= 0 )
                return fprintf( stderr, "Invalid board width\n" ), -1;
        }
        else if( strcmp( s, "-h" ) == 0 )
        {
            if( sscanf( argv[++i], "%u", (unsigned int*)&boardSize.height ) != 1 || boardSize.height <= 0 )
                return fprintf( stderr, "Invalid board height\n" ), -1;
        }
        else if( strcmp( s, "-pt" ) == 0 )
        {
            i++;
            if( !strcmp( argv[i], "circles" ) )
                pattern = CIRCLES_GRID;
            else if( !strcmp( argv[i], "acircles" ) )
                pattern = ASYMMETRIC_CIRCLES_GRID;
            else if( !strcmp( argv[i], "chessboard" ) )
                pattern = CHESSBOARD;
            else
                return fprintf( stderr, "Invalid pattern type: must be chessboard or circles\n" ), -1;
        }
        else if( strcmp( s, "-s" ) == 0 )
        {
            if( sscanf( argv[++i], "%f", &squareSize ) != 1 || squareSize <= 0 )
                return fprintf( stderr, "Invalid board square width\n" ), -1;
        }
        else if( strcmp( s, "-n" ) == 0 )
        {
            if( sscanf( argv[++i], "%u", (unsigned int*)&nframes ) != 1 || nframes <= 3 )
                return printf("Invalid number of images\n" ), -1;
        }
        else if( strcmp( s, "-a" ) == 0 )
        {
            if( sscanf( argv[++i], "%f", &aspectRatio ) != 1 || aspectRatio <= 0 )
                return printf("Invalid aspect ratio\n" ), -1;
            flags |= CV_CALIB_FIX_ASPECT_RATIO;
        }
        else if( strcmp( s, "-d" ) == 0 )
        {
            if( sscanf( argv[++i], "%u", (unsigned int*)&delay ) != 1 || delay <= 0 )
                return printf("Invalid delay\n" ), -1;
        }
        else if( strcmp( s, "-op" ) == 0 )
        {
            writePoints = true;
        }
        else if( strcmp( s, "-oe" ) == 0 )
        {
            writeExtrinsics = true;
        }
        else if( strcmp( s, "-zt" ) == 0 )
        {
            flags |= CV_CALIB_ZERO_TANGENT_DIST;
        }
        else if( strcmp( s, "-p" ) == 0 )
        {
            flags |= CV_CALIB_FIX_PRINCIPAL_POINT;
        }
        else if( strcmp( s, "-v" ) == 0 )
        {
            flipVertical = true;
        }
        else if( strcmp( s, "-V" ) == 0 )
        {
            videofile = true;
        }
//.........这里部分代码省略.........
开发者ID:parched,项目名称:enmt401,代码行数:101,代码来源:calibration.cpp


注:本文中的VideoCapture类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。