当前位置: 首页>>代码示例>>C++>>正文


C++ VideoCapture::isOpened方法代码示例

本文整理汇总了C++中VideoCapture::isOpened方法的典型用法代码示例。如果您正苦于以下问题:C++ VideoCapture::isOpened方法的具体用法?C++ VideoCapture::isOpened怎么用?C++ VideoCapture::isOpened使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在VideoCapture的用法示例。


在下文中一共展示了VideoCapture::isOpened方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: run

void App::run()
{
    running = true;
    cv::VideoWriter video_writer;

    Size win_stride(args.win_stride_width, args.win_stride_height);
    Size win_size(args.win_width, args.win_width * 2);
    Size block_size(args.block_width, args.block_width);
    Size block_stride(args.block_stride_width, args.block_stride_height);
    Size cell_size(args.cell_width, args.cell_width);

    cv::Ptr<cv::cuda::HOG> gpu_hog = cv::cuda::HOG::create(win_size, block_size, block_stride, cell_size, args.nbins);
    cv::HOGDescriptor cpu_hog(win_size, block_size, block_stride, cell_size, args.nbins);

    if(args.svm_load) {
//        std::vector<float> svm_model;
//        const std::string model_file_name = args.svm;
//        FileStorage ifs(model_file_name, FileStorage::READ);
//        if (ifs.isOpened()) {
//            ifs["svm_detector"] >> svm_model;
//        } else {
//            const std::string what =
//                    "could not load model for hog classifier from file: "
//                    + model_file_name;
//            throw std::runtime_error(what);
//        }

//        // check if the variables are initialized
//        if (svm_model.empty()) {
//            const std::string what =
//                    "HoG classifier: svm model could not be loaded from file"
//                    + model_file_name;
//            throw std::runtime_error(what);
//        }

        Ptr<SVM> svm;
        // Load the trained SVM.
        svm = StatModel::load<SVM>( args.svm);
        // Set the trained svm to my_hog
        vector< float > hog_detector;
        get_svm_detector( svm, hog_detector );

        gpu_hog->setSVMDetector(hog_detector);
        cpu_hog.setSVMDetector(hog_detector);
    } else {
        // Create HOG descriptors and detectors here
        Mat detector = gpu_hog->getDefaultPeopleDetector();

        gpu_hog->setSVMDetector(detector);
        cpu_hog.setSVMDetector(detector);
    }

    cout << "gpusvmDescriptorSize : " << gpu_hog->getDescriptorSize()
         << endl;
    cout << "cpusvmDescriptorSize : " << cpu_hog.getDescriptorSize()
         << endl;

    while (running)
    {
        VideoCapture vc;
        Mat frame;
        vector<String> filenames;

        unsigned int count = 1;

        if (args.src_is_video)
        {
            vc.open(args.src.c_str());
            if (!vc.isOpened())
                throw runtime_error(string("can't open video file: " + args.src));
            vc >> frame;
        }
        else if (args.src_is_folder) {
开发者ID:brightming,项目名称:ImageTest,代码行数:73,代码来源:test_opencv3_hog.cpp

示例2: main

int main()
{
    SurfFeatureDetector detector(1000);
    Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("FlannBased");
    Ptr<DescriptorExtractor> extractor = new SurfDescriptorExtractor();
    BOWImgDescriptorExtractor bowide( extractor, matcher );
    
    // load vocabulary data
    Mat vocabulary;
    FileStorage fs( "vocabulary.xml", FileStorage::READ);
    fs["vocabulary"] >> vocabulary;
    fs.release();
    if( vocabulary.empty()  ) return 1;
    
    bowide.setVocabulary( vocabulary );
    
    CvSVM svm_left; svm_left.load("learned_lib_left.xml");
    CvSVM svm_left_mid; svm_left_mid.load("learned_lib_left_mid.xml");
    CvSVM svm_mid; svm_mid.load("learned_lib_mid.xml");
    CvSVM svm_right_mid; svm_right_mid.load("learned_lib_right_mid.xml");
    CvSVM svm_right; svm_right.load("learned_lib_right.xml");
    
    chdir("/home/netbook/Desktop/Capture_Mid");
    // Initialize capturing live feed from the camera device 1
    VideoCapture capture = VideoCapture(1);
    // Couldn't get a device? Throw an error and quit
    if(!capture.isOpened())
    {
        printf("Could not initialize capturing...");
        return -1;
    }
    int n = 0;
    // An infinite loop
    while(true)
    {
        // Will hold a frame captured from the camera
        Mat frame, response_hist;
        
        // If we couldn't grab a frame... quit
        if(!capture.read(frame))
            break;
        imshow("video", frame);
        vector<KeyPoint> keypoints;
        detector.detect(frame,keypoints);
        bowide.compute(frame, keypoints, response_hist);
        
        vector<double> region_list;
        region_list.push_back(svm_left.predict(response_hist,true));
        region_list.push_back(svm_left_mid.predict(response_hist,true));
        region_list.push_back(svm_mid.predict(response_hist,true));
        region_list.push_back(svm_right_mid.predict(response_hist,true));
        region_list.push_back(svm_right.predict(response_hist,true));
        
        long int min_index = std::min_element(region_list.begin(), region_list.end()) - region_list.begin();
        
        if(min_index != 2){
            const string file = to_string(n) + ".jpg";
            imwrite( file, frame );
            n++;
        }
        
        // Wait for a keypress
        int c = cvWaitKey(10);
        if(c!=-1)
        {
            // If pressed, break out of the loop
            break;
        }
    }
    
    return 0;
}
开发者ID:TAMU-CS635,项目名称:Robot,代码行数:72,代码来源:capture_images_mid.cpp

示例3: main

int main(int argc, char *argv[])
{
      if(argc <2)
      {
         return -1;
      }
	cap.open(argv[1]);
	/*获取视频帧数*/
	frameNumber = cap.get(CV_CAP_PROP_FRAME_COUNT);    
	if(!cap.isOpened())
	{
	   cout<<"can't open the video!"<<endl;
	   return 0;
	}

	int flag = 0;
	/*字符串缓冲数组*/
	char  buf[100];										
	ifstream in("test.txt");
	int totalnum = 0;

	while (in.getline(buf,100))
	{
		string str = buf;
		vector<int>   myint;
		/*提取一行字符串中的数字*/
		str2num(str,myint);	
		/*第一行,获取宽和高*/							
		if(0==totalnum)
		{
			fishCorrect.dstWidth = myint[0];
			fishCorrect.dstHeight = myint[1];
			fishCorrect.wRatio = myint[2];
			fishCorrect.hRatio = myint[3];
		}
		/*提取第二行,获取第一个展开图的参数:方位角,仰角,视角*/
		else if(1==totalnum)
		{
			fishCorrect.phi0 = myint[0];
			fishCorrect.sita0 = myint[1];
			fishCorrect.gama0 = myint[2];
		}
		/*提取第三行,获取第二个展开图的参数:方位角,仰角,视角*/
		else if(2==totalnum)
		{
			fishCorrect.phi1 = myint[0];
			fishCorrect.sita1 = myint[1];
			fishCorrect.gama1 = myint[2];
		}
		/*提取第四行,获取第三个展开图的参数:方位角,仰角,视角*/
		else if(3==totalnum)
		{
			fishCorrect.phi2 = myint[0];
			fishCorrect.sita2 = myint[1];
			fishCorrect.gama2 = myint[2];
		}
		/*提取第五行,获取第四个展开图的参数:方位角,仰角,视角*/
		else if(4==totalnum)
		{
			fishCorrect.phi3 = myint[0];
			fishCorrect.sita3 = myint[1];
			fishCorrect.gama3 = myint[2];
		}	
		totalnum++;
	}
	cout<<"playing...."<<endl;
	/*循环处理视频*/
        int wflag = 0;
	int num =0;
	
	fishCorrect.dstImg = Mat::zeros(Size(fishCorrect.dstWidth*2,fishCorrect.dstHeight*2),CV_8UC3);
	fishCorrect.position =  (int*)malloc(fishCorrect.dstWidth*fishCorrect.dstHeight*8*sizeof(int));

	for(int i = 0;i < frameNumber;++i)               
	{
	   		
		pthread_t tid1,tid2;
         	void *tret;
		if(i <= 21)
		{
		    cap >> frame;			
		}
		if(i >20)
		{
		    if (flag == 0)
		    {
		    	/*获取鱼眼图像区域*/
				fishCorrect.GetArea(frame);
				flag =1;
		    }
		   if (i>21)
		    {
			
			/*创建线程2*/
		      if (pthread_create(&tid2,NULL,thrd_func2,NULL)!=0)
			 {
         		    printf("Create thread 2 error!\n");
         		    exit(1);
     			}
	     		/*等待线程一执行完毕*/
//.........这里部分代码省略.........
开发者ID:kongqx,项目名称:FishEye-Correct,代码行数:101,代码来源:FishVideoLinux.cpp

示例4: Produtor1

void Produtor1 ()
{

    /* OpenCV variables */
    VideoCapture cap;
    Mat gray, frame;

    int i, j, d=D, ss, key=0;
    long k=0L, l, ns=2L*(long)(0.5*FS*T), m=ns/N,sso=HIFI?0L:128L, ssm=HIFI?32768L:128L;
    double **A, a, t, dt=1.0/FS, *w, *phi0, s, y, yp, z, tau1, tau2, x, theta,
                      scale=0.5/sqrt((double)M), q, q2, r, sl, sr, tl, tr, yl, ypl, yr, ypr,
                      zl, zr, hrtf, hrtfl, hrtfr, v=340.0,  /* v = speed of sound (m/s) */
                                                  hs=0.20;  /* hs = characteristic acoustical size of head (m) */

    w    = C_ALLOC(M, double);
    phi0 = C_ALLOC(M, double);
    A    = C_ALLOC(M, double *);

    for (i=0; i<M; i++) A[i] = C_ALLOC(N, double);  /* M x N pixel matrix */

    /* Set lin|exp (0|1) frequency distribution and random initial phase */
    if (d)
        for (i=0; i<M; i++)
            w[i] = TwoPi * FL * pow(1.0* FH/FL,1.0*i/(M-1));
    else
        for (i=0; i<M; i++)
            w[i] = TwoPi * FL + TwoPi * (FH-FL)   *i/(M-1) ;

    for (i=0; i<M; i++) phi0[i] = TwoPi * rnd();

    int cam_id = 0;  /* First available OpenCV camera */
    /* Optionally override ID from command line parameter: prog.exe cam_id */

    cap.open(cam_id);
    if (!cap.isOpened())
    {
        // printf("resenha");
        fprintf(stderr,"Could not open camera %d\n", cam_id);
        exit(1);
    }
    printf("abriu camera\n");
    /* Setting standard capture size, may fail; resize later */

    cap.read(frame);  /* Dummy read needed with some devices */
    //cap.set(CV_CAP_PROP_FRAME_WIDTH , 176);
    //cap.set(CV_CAP_PROP_FRAME_HEIGHT, 144);
    cap.set(CV_CAP_PROP_FRAME_WIDTH , 128);
    cap.set(CV_CAP_PROP_FRAME_HEIGHT, 128);

    if (VIEW)    /* Screen views only for debugging */
    {
        namedWindow("Large", CV_WINDOW_AUTOSIZE);
        namedWindow("Small", CV_WINDOW_AUTOSIZE);
    }
    int count = 0;
    bool bSuccess;
    while (key != 27)    /* Escape key */
    {
        bSuccess=true;
       for (int i=0; i<5 && bSuccess; i++)
			bSuccess = cap.read(frame);

        cap.read(frame);

        if (frame.empty())
        {
            /* Sometimes initial frames fail */
            fprintf(stderr, "Capture failed\n");
            key = waitKey((int)(100));
            continue;
        }
        printf("capturou frame\n");

        Mat tmp;
        cvtColor(frame,tmp,CV_BGR2GRAY);
        if (frame.rows != M || frame.cols != N)
            resize(tmp, gray, Size(N,M));

        else gray=tmp;

        if (VIEW)    /* Screen views only for debugging */
        {
            /* imwrite("hificodeLarge.jpg", frame); */
            imshow("Large", frame);
            /* imwrite("hificodeSmall.jpg", gray); */
            imshow("Small", gray);
        }

        key = waitKey((int)(10));

        if (CAM)    /* Set live camera image */
        {
            for (i=0; i<M; i++)
            {
                for (j=0; j<N; j++)
                {
                    int mVal=gray.at<uchar>(M-1-i,j)/16;

                    if (mVal == 0)
                        A[i][j]=0;
//.........这里部分代码省略.........
开发者ID:arthurmoreno,项目名称:Desafio-Intel,代码行数:101,代码来源:main.cpp

示例5: main

int main(int argc, char* argv[])
{

	VideoCapture capture;

	// Objects
	Mat frame;

	// keyboard pressed
	char keypressed = 0;
	bool success;

	// Load image from disk
	capture.open(0);
	// if not success, exit program
	if (!capture.isOpened()){
		cout << "error in VideoCapture: check path file" << endl;
		getchar();
		return 1;
	}

	/// Parameters for Shi-Tomasi algorithm
	vector<Point2f> cornersA, cornersB;
	double qualityLevel = 0.01;
	double minDistance = 10;
	int blockSize = 3;
	bool useHarrisDetector = false; 
	double k = 0.04;
	int maxCorners = MAX_CORNERS;

	// winsize has to be 11 or 13, otherwise nothing is found
	vector<uchar> status;
	vector<float> error;
	int winsize = 11;
	int maxlvl = 5;

	// Objects
	Mat img_prev, img_next, grayA, grayB;

	success = capture.read(frame);
	// if no success exit program
	if (success == false){
		cout << "Cannot read the frame from file" << endl;
		getchar();
		return 1;
	}

	img_prev = frame.clone();

	// Windows for all the images
	namedWindow("Corners A", CV_WINDOW_AUTOSIZE);
	namedWindow("Corners B", CV_WINDOW_AUTOSIZE);
    
    VideoWriter outputVideo;
    Size S = Size((int) capture.get(CV_CAP_PROP_FRAME_WIDTH),    // Acquire input size
                  (int) capture.get(CV_CAP_PROP_FRAME_HEIGHT));
    int ex = static_cast<int>(capture.get(CV_CAP_PROP_FOURCC));  
    outputVideo.open("video.avi", CV_FOURCC('P','I','M','1'), capture.get(CV_CAP_PROP_FPS), S, true);
    
    if (!outputVideo.isOpened())
    {
        cout  << "Could not open the output video for write: "  << endl;
        return -1;
    }
    
	while (keypressed != ESCAPE)
	{
		// read frame by frame in a loop
		success = capture.read(frame);
		// if no success exit program
		if (success == false){
			cout << "Cannot read the frame from file" << endl;
			return 1;
		}

		img_next = frame.clone();

		// convert to grayScale
		cvtColor(img_prev, grayA, CV_RGB2GRAY);
		cvtColor(img_next, grayB, CV_RGB2GRAY);

		/// Apply corner detection
		goodFeaturesToTrack(grayA,
			cornersA,
			maxCorners,
			qualityLevel,
			minDistance,
			Mat(),
			blockSize,
			useHarrisDetector,
			k);

		calcOpticalFlowPyrLK(grayA, grayB, cornersA, cornersB, status, error,
			Size(winsize, winsize), maxlvl);

		/// Draw corners detected
		//cout << "Number of cornersA detected: " << cornersA.size() << endl;
		//cout << "Optical Flow corners detected: " << cornersB.size() << endl;
		for (int i = 0; i < cornersA.size(); i++)
		{
//.........这里部分代码省略.........
开发者ID:4m1g0,项目名称:openCV-tutorial,代码行数:101,代码来源:OpticalFlow.cpp

示例6: main

//this is a sample for foreground detection functions
int main(int argc, const char** argv)
{
    help();

    CommandLineParser parser(argc, argv, keys);
    bool useCamera = parser.has("camera");
    bool smoothMask = parser.has("smooth");
    string file = parser.get<string>("file_name");
    string method = parser.get<string>("method");
    VideoCapture cap;
    bool update_bg_model = true;

    if( useCamera )
        cap.open(0);
    else
        cap.open(file.c_str());

    parser.printMessage();

    if( !cap.isOpened() )
    {
        printf("can not open camera or video file\n");
        return -1;
    }



     /// Set background subtractor object 
     Ptr<BackgroundSubtractor> bg_model = method == "knn" ?
            createBackgroundSubtractorKNN().dynamicCast<BackgroundSubtractor>() :
            createBackgroundSubtractorMOG2().dynamicCast<BackgroundSubtractor>();

    

    /// Set VideoWriter object
    Size frameSize = Size((int) cap.get(CAP_PROP_FRAME_WIDTH), (int) cap.get(CAP_PROP_FRAME_HEIGHT));
    VideoWriter put("./backgroundSubtraction.mpg", VideoWriter::fourcc('M','P','E','G'), 30, frameSize); // works

	if(!put.isOpened())
	{
		cout << "File could not be created for writing. Check permissions" << endl;
		return -1;
	}

 
   Mat img0, img, fgmask, fgimg, outputFrame;

    for(;;)
    {
        cap >> img0;

        if( img0.empty() )
            break;

        /// Reduce to 1/4 of the original size
        resize(img0, img, Size(frameSize.width/2, frameSize.height/2), INTER_LINEAR);

        if( fgimg.empty() )
          fgimg.create(img.size(), img.type());

        /// Update the background model
        bg_model->apply(img, fgmask, update_bg_model ? -1 : 0);
        if( smoothMask )
        {
            GaussianBlur(fgmask, fgmask, Size(25, 25), 3.5, 0);
            threshold(fgmask, fgmask, 60, 255, THRESH_BINARY);
        }

        /// Get foreground image
        fgimg = Scalar::all(0);
        img.copyTo(fgimg, fgmask);

        /// Get background image
        Mat bgimg;
        bg_model->getBackgroundImage(bgimg);

     
        stringstream text1, text2, text3, text4;
        text1 << "Raw Video";
        putText(img, text1.str(), cv::Point(10, img.size().height - 10), FONT_HERSHEY_PLAIN, 1.5, Scalar(0,255,0)); 
        text2 << "Mask";
        putText(fgmask, text2.str(), cv::Point(10, img.size().height - 10), FONT_HERSHEY_PLAIN, 1.5, Scalar(255,255,255));
        text3 << "Foreground";
        putText(fgimg, text3.str(), cv::Point(10, img.size().height - 10), FONT_HERSHEY_PLAIN, 1.5, Scalar(0,255,0));
        text4 << "Background";
        putText(bgimg, text4.str(), cv::Point(10, img.size().height - 10), FONT_HERSHEY_PLAIN, 1.5, Scalar(0,255,0));
           
        /// Display output camera frames        
         outputFrame = append4( img,
                               fgmask,
                               fgimg,
                               bgimg); 
        
         imshow(windowName, outputFrame);  
         put << outputFrame;   
       
       

        char k = (char)waitKey(30);
//.........这里部分代码省略.........
开发者ID:melvincabatuan,项目名称:HelloOpenCV,代码行数:101,代码来源:BackgroundSubtraction.cpp

示例7: main

/** @function main */
int main( int argc, const char** argv )
{
    bool useCamera = true;
    bool useFiles = false;
    String big_directory;
    if(argc > 1){
        useCamera = false;
        useFiles = true;
        big_directory = argv[1];
    }
    VideoCapture cap;
    Mat frame;
    if(useCamera){
        cap.open(0);
        if(!cap.isOpened()){
            cerr<<"Failed to open camera"<<endl;
            return -1;
        }
        while(frame.empty()){
            cap>>frame;
        }
    }
    //-- 1. Load the cascades
    if( !face_cascade.load( face_cascade_name ) ){ 
        cerr<<"Error loading cascade"<<endl;
        return -1; 
    }    
    vector<string> dirs;
    if(useFiles){
        GetFilesInDirectory(dirs, big_directory);
        cout<<dirs.size()<<endl;
        for(int i = 0; i < dirs.size(); i++){
            cout<<dirs[i]<<endl;
        }
    }
    while(!dirs.empty() || useCamera) {
        vector<string> files;
        string subdir;
        if(useFiles){
            subdir = dirs.back();
            dirs.pop_back();
            GetFilesInDirectory(files, subdir);
        }
        while (true) {
            if (useCamera) {
                cap >> frame;
                if (!frame.empty()) {
                    detectAndDisplay(frame, "camera");
                }
                else {
                    cout << " --(!) No captured frame -- Break!" << endl;
                    break;
                }
            }
            if (useFiles) {
                if (files.empty()) {
                    cout << subdir<<" finished" << endl;
                    break;
                }
                string name = files.back();
                cout << "converting " << name << endl;
                frame = imread(name);
                transpose(frame, frame);
                flip(frame, frame, 1);
                files.pop_back();
                vector<string> splitName;
                splitName = split(subdir, '/');


                detectAndDisplay(frame, splitName.back().c_str());
            }

            int c = waitKey(10);
            if (c == 27) {
                return 0;
            }
        }
    }
    
    return 0;
}
开发者ID:ItseezSummerSchool2015FaceDetection,项目名称:FaceDetection,代码行数:82,代码来源:videoCrop.cpp

示例8: main


//.........这里部分代码省略.........
				capture.set(CV_CAP_PROP_POS_AVI_RATIO, 1);
			}
		}
		else if(ChooseVideo == Airport)
		{
			if (videoIndex == 0)
			{
				capture.open("F:/roadDB/Airport/cam_20150806120920.mp4");
				gpsFile = fopen("F:/roadDB/Airport/gps/list_20150806120920.txt","r");
				capture.set(CV_CAP_PROP_POS_AVI_RATIO, 1);
			}
		}
		else if(ChooseVideo == Ford)
		{
			if (videoIndex == 0)
			{
				capture.open("F:/roadDB/Ford/NewcoData/MKS360_20130722_003_Uncompressed.avi");
				gpsFile = fopen("F:/roadDB/Ford/NewcoData/gps_003.txt","r");
				capture.set(CV_CAP_PROP_POS_AVI_RATIO, 1);
			}
		}
		else if(ChooseVideo == VW2)
		{
			if (videoIndex == 0)
			{
				capture.open("C:/Users/ypren/Documents/newco_demo/Demo/Ford/inVehicle/inVehicle/resource/Germany/Lehre2/reverse/cap_20150722110100_cut/cam_20150722110100.mp4");
				gpsFile = fopen("C:/Users/ypren/Documents/newco_demo/Demo/Ford/inVehicle/inVehicle/resource/Germany/Lehre2/reverse/cap_20150722110100_cut/list_20150722110100.txt","r");
				capture.set(CV_CAP_PROP_POS_AVI_RATIO, 1);
			}
		}

		int number_of_frames = capture.get(CV_CAP_PROP_POS_FRAMES);

		if ( !capture.isOpened() )  // if not success, exit program
		{
			cout<<"error" <<endl;
			return -1;
		}
		else
		{
			capture.set(CV_CAP_PROP_POS_AVI_RATIO, 0);
			double fps = capture.get(CV_CAP_PROP_FPS); //get the frames per seconds of the video	
		}

		Size S = Size((int) capture.get(CV_CAP_PROP_FRAME_WIDTH), (int) capture.get(CV_CAP_PROP_FRAME_HEIGHT));	

		S.height *= inParam.imageScaleHeight;
		S.width *= inParam.imageScaleWidth;
        
        vector<dataEveryRow> roadPaintData;
		vector<dataEveryRow> roadPaintDataALL;
	    vector<gpsInformationAndInterval> GPSAndInterval;
        
		////////////////////////////////////////////////////////////////////////////
		Mat history = Mat::zeros(S.height *HH*SCALE,S.width, CV_8UC1);
		
		int rowIndex = 0;
		int IntervalTmp = 0;
		int Interval = 0;
		int GPStmp = 0;

		Point2d GPS_next;

        gpsInformationAndInterval gpsAndInterval;
		Mat image;
		int intrtmp = 0;
开发者ID:miaozhendaoren,项目名称:ygomi_vscpp,代码行数:67,代码来源:roadScan_main_liang.cpp

示例9: main

void main(int argc, char *argv[])
{
	Mat emptyFrame = Mat::zeros(Camera::reso_height, Camera::reso_width, CV_8UC3);
	Thesis::FastTracking fastTrack(20); //used to be 50, why? i dno
	Thesis::KalmanFilter kalman;
	kalman.initialise(CoordinateReal(0, 0, 0));
	kalman.openFile();
	// the two stereoscope images
	Camera one(0,-125,0,0,0,90);
	Camera two(2, 125,0,0,0,90);
	Camera three;
	// list of cameras and cameraLocs
	std::vector<Camera> cameraList;
	std::vector<CoordinateReal> locList;
	VideoWriter writeOne ;
	VideoWriter writeTwo;
	VideoWriter writeThree;
	VideoCapture capOne;
	VideoCapture capTwo;
	VideoCapture capThree;
	Thesis::Stats stat;
	cv::Point2d horizontalOne(0,Camera::reso_height/2);
	cv::Point2d horizontalTwo(Camera::reso_width, Camera::reso_height/2);
	cv::Point2d verticalOne(Camera::reso_width / 2, 0);
	cv::Point2d verticalTwo(Camera::reso_width / 2, Camera::reso_height);
	ofstream framesFile_;
	framesFile_.open("../../../../ThesisImages/fps_ABSDIFF.txt");
	double framesPerSecond = 1 / 10.0;
	//open the recorders
	FeatureExtraction surf(5000);
	Stereoscope stereo;
	Util util;
	bool once = false;
	bool foundInBoth = false;
	bool foundInMono = false;
	std::vector<cv::Point2f> leftRect(4);
	cv::Rect leftRealRect;
	cv::Rect rightRealRect;
	std::vector<cv::Point2f> rightRect(4);
	cv::Mat frameLeft;
	cv::Mat frameRight;
	cv::Mat frameThree;
	cv::Mat prevFrameLeft;
	cv::Mat prevFrameRight;
	cv::Mat prevFrameThree;

	// check if you going to run simulation or not or record
	cout << " run simulation: 's' or normal: 'n' or record 'o' or threeCameras 'c' " << endl;
	imshow("main", emptyFrame);
	char command = waitKey(0);

	string left = "../../../../ThesisImages/leftTen.avi";
	string right = "../../../../ThesisImages/rightTen.avi";
	string mid = "../../../../ThesisImages/midTen.avi";
	commands(command);
	emptyFrame = Mat::ones(10, 10, CV_64F);
	imshow("main", emptyFrame);
	command = waitKey(0);
	camCount(command);
	// checkt the cam count 
	if (multiCams){
		//load in all the cameras
		three = Camera(3, 175, -50, 585, 7.1, 97);//Camera(3, 200, -60, 480, 7,111);
	}
	//==========hsv values=======================
	cv::Mat hsvFrame;
	cv::Mat threshold;
	int iLowH = 155;
	int iHighH = 179;

	int iLowS = 75;
	int iHighS = 255;

	int iLowV = 0;
	int iHighV = 255;
	
	//=================================
	double elapsedTime = 0;
	double waitDelta = 0;	
	if (record){
		writeOne.open("../../../../ThesisImages/leftTen.avi", 0, 10, cv::Size(864, 480), true);
		writeTwo.open("../../../../ThesisImages/rightTen.avi", 0, 10, cv::Size(864, 480), true);
		writeThree.open("../../../../ThesisImages/midTen.avi", 0, 10, cv::Size(864, 480), true);
	}else if (simulation){
		capOne.open(left);
		capTwo.open(right);
		capThree.open(mid);
		assert(capOne.isOpened() && capTwo.isOpened());
	}
	 if (hsv){
		//Create trackbars in "Control" window
		cvCreateTrackbar("LowH", "main", &iLowH, 179); //Hue (0 - 179)
		cvCreateTrackbar("HighH", "main", &iHighH, 179);

		cvCreateTrackbar("LowS", "main", &iLowS, 255); //Saturation (0 - 255)
		cvCreateTrackbar("HighS", "main", &iHighS, 255);

		cvCreateTrackbar("LowV", "main", &iLowV, 255); //Value (0 - 255)
		cvCreateTrackbar("HighV", "main", &iHighV, 255);
	}
//.........这里部分代码省略.........
开发者ID:Beknight,项目名称:ProofOfConcept,代码行数:101,代码来源:main.cpp

示例10: main

int main(int argc, char *argv[]) {
	ros::init(argc, argv, "verify_tracking_node");
	ros::NodeHandle n;
	std::string port;
	ros::param::param<std::string>("~port", port, "/dev/ttyACM0");
	int baud;
	ros::param::param<int>("~baud", baud, 57600);
	ros::Rate loop_rate(10);

	ros::Publisher servox_pub = n.advertise<std_msgs::Char>("servox_chatter", 1000);
	ros::Publisher servoy_pub = n.advertise<std_msgs::Char>("servoy_chatter", 1000);
	ros::Publisher motor_pub = n.advertise<std_msgs::Char>("motor_chatter", 1000);

	ros::Publisher verify_pub = n.advertise<std_msgs::Char>("verify_chatter", 1);

	Subscriber track_sub = n.subscribe("track_chatter", 1, trackCallback);
	Subscriber host_sub = n.subscribe("host_chatter", 1, hostCallback);
	
	cv_result_t cv_result = CV_OK;
	int main_return = -1;
	cv_handle_t handle_detect = NULL;
	cv_handle_t handle_track = NULL;
	VideoCapture capture;
	double time;
	capture.open(0);         // open the camera
	if (!capture.isOpened()) {
		fprintf(stderr, "Verify track can not open camera!\n");
		return -1;
	}
	capStatus = OPEN;
	int frame_width = capture.get(CV_CAP_PROP_FRAME_WIDTH);
	int frame_height = capture.get(CV_CAP_PROP_FRAME_HEIGHT);
	int frame_half_width = frame_width >> 1;
	int frame_half_height = frame_height >> 1;
	//printf("width %d height %d \n", frame_width, frame_height);
	Point expect(frame_half_width , frame_half_height);
	struct timeval start0, end0;
	struct timeval start1, end1;
	struct timeval start2, end2;
	struct timeval start3, end3;
	struct timeval start4, end4;
	struct timeval start5, end5;
#ifdef TIME
	gettimeofday(&start0, NULL);
#endif
	cv_handle_t handle_verify = cv_verify_create_handle("data/verify.tar");
#ifdef TIME
	gettimeofday(&end0, NULL);
	time = COST_TIME(start0, end0);
	printf("get from verify tar time cost = %.2fs \n", time / 1000000);
#endif
#if 1
	const int person_number = 3;
	Mat p_image_color_1[person_number], p_image_color_color_1[person_number], p_image_color_2, p_image_color_color_2;
	Mat tmp_frame;
	cv_face_t *p_face_1[person_number];
	cv_face_t *p_face_2;
	int face_count_1[person_number] = {0};
	int face_count_2 = 0;
	cv_feature_t *p_feature_1[person_number];
	cv_feature_t *p_feature_new_1[person_number];
	unsigned int feature_length_1[person_number];
	p_image_color_1[0] = imread("00.JPG");
	p_image_color_1[1] = imread("01.JPG");
	p_image_color_1[2] = imread("02.JPG");
	//p_image_color_1[3] = imread("04.jpg");
	char *string_feature_1[person_number];
#else
	Mat p_image_color_2, p_image_color_color_2;

	const int person_number = 4;
	cv_face_t *p_face_2 = NULL;
	vector<cv_face_t *>p_face_1(person_number,NULL);
	vector<int>face_count_1(person_number, 0);
	int face_count_2 = 0;
	vector<Mat>p_image_color_1(person_number);
	vector<Mat>p_image_color_color_1(person_number);
	vector<cv_feature_t *>p_feature_1(person_number, NULL);
	vector<cv_feature_t *>p_feature_new_1(person_number, NULL);
	vector<unsigned int>feature_length_1(person_number, 0);
	// load image
	p_image_color_1.push_back(imread("01.JPG"));
	p_image_color_1.push_back(imread("02.JPG"));
	p_image_color_1.push_back(imread("03.JPG"));
	p_image_color_1.push_back(imread("04.JPG"));
	char *string_feature_1[person_number];
#endif

	for(int i = 0; i < person_number; i++)
	{
		if (!p_image_color_1[i].data ) {
			fprintf(stderr, "fail to read %d image \n", i);
			//return -1;
			goto RETURN;
		}
	}
	for(int i = 0; i < person_number; i++)
		cvtColor(p_image_color_1[i], p_image_color_color_1[i], CV_BGR2BGRA);
	// init detect handle
	handle_detect = cv_face_create_detector(NULL, CV_FACE_SKIP_BELOW_THRESHOLD | CV_DETECT_ENABLE_ALIGN);
//.........这里部分代码省略.........
开发者ID:Yvaine,项目名称:speech-robot,代码行数:101,代码来源:verify_tracking_node1.5.cpp

示例11: main

/**
 * Canny Edge Detector.
 *
 * argv[1] = source file or will default to "../../resources/traffic.mp4" if no
 * args passed.
 *
 * @author sgoldsmith
 * @version 1.0.0
 * @since 1.0.0
 */
int main(int argc, char *argv[]) {
	int return_val = 0;
	string url = "../../resources/traffic.mp4";
	string output_file = "../../output/canny-cpp.avi";
	cout << CV_VERSION << endl;
	cout << "Press [Esc] to exit" << endl;
	VideoCapture capture;
	Mat image;
	// See if URL arg passed
	if (argc == 2) {
		url = argv[1];
	}
	cout << "Input file:" << url << endl;
	cout << "Output file:" << output_file << endl;
	capture.open(url);
	// See if video capture opened
	if (capture.isOpened()) {
		cout << "Resolution: " << capture.get(CV_CAP_PROP_FRAME_WIDTH) << "x"
				<< capture.get(CV_CAP_PROP_FRAME_HEIGHT) << endl;
		bool exit_loop = false;
		// Video writer
		VideoWriter writer(output_file, (int) capture.get(CAP_PROP_FOURCC),
				(int) capture.get(CAP_PROP_FPS),
				Size((int) capture.get(CAP_PROP_FRAME_WIDTH),
						(int) capture.get(CAP_PROP_FRAME_HEIGHT)));
		Mat gray_img;
		Mat blur_img;
		Mat edges_img;
		Mat dst_img;
		Size kSize = Size(3, 3);
		int frames = 0;
		timeval start_time;
		gettimeofday(&start_time, 0);
		// Process all frames
		while (capture.read(image) && !exit_loop) {
			if (!image.empty()) {
				// Convert the image to grayscale
				cvtColor(image, gray_img, COLOR_BGR2GRAY);
				// Reduce noise with a kernel 3x3
				GaussianBlur(gray_img, blur_img, kSize, 0);
				// Canny detector
				Canny(blur_img, edges_img, 100, 200, 3, false);
				// Add some colors to edges from original image
				bitwise_and(image, image, dst_img, edges_img);
				// Write frame with motion rectangles
				writer.write(dst_img);
				// Make sure we get new matrix
				dst_img.release();
				frames++;
			} else {
				cout << "No frame captured" << endl;
				exit_loop = true;
			}
		}
		timeval end_time;
		gettimeofday(&end_time, 0);
		cout << frames << " frames" << endl;
		cout << "FPS " << (frames / (end_time.tv_sec - start_time.tv_sec))
				<< ", elapsed time: " << (end_time.tv_sec - start_time.tv_sec)
				<< " seconds" << endl;
		// Release VideoWriter
		writer.release();
		// Release VideoCapture
		capture.release();
	} else {
		cout << "Unable to open device" << endl;
		return_val = -1;
	}
	return return_val;
}
开发者ID:StephenSwanepoel,项目名称:install-opencv,代码行数:80,代码来源:Canny.cpp

示例12: main

int main(int argc, const char *argv[])
{
    if (argc == 1)
    {
        help();
        return -1;
    }

    if (getCudaEnabledDeviceCount() == 0)
    {
        return cerr << "No GPU found or the library is compiled without CUDA support" << endl, -1;
    }

    cv::cuda::printShortCudaDeviceInfo(cv::cuda::getDevice());

    string cascadeName;
    string inputName;
    bool isInputImage = false;
    bool isInputVideo = false;
    bool isInputCamera = false;

    for (int i = 1; i < argc; ++i)
    {
        if (string(argv[i]) == "--cascade")
            cascadeName = argv[++i];
        else if (string(argv[i]) == "--video")
        {
            inputName = argv[++i];
            isInputVideo = true;
        }
        else if (string(argv[i]) == "--camera")
        {
            inputName = argv[++i];
            isInputCamera = true;
        }
        else if (string(argv[i]) == "--help")
        {
            help();
            return -1;
        }
        else if (!isInputImage)
        {
            inputName = argv[i];
            isInputImage = true;
        }
        else
        {
            cout << "Unknown key: " << argv[i] << endl;
            return -1;
        }
    }

    Ptr<cuda::CascadeClassifier> cascade_gpu = cuda::CascadeClassifier::create(cascadeName);

    cv::CascadeClassifier cascade_cpu;
    if (!cascade_cpu.load(cascadeName))
    {
        return cerr << "ERROR: Could not load cascade classifier \"" << cascadeName << "\"" << endl, help(), -1;
    }

    VideoCapture capture;
    Mat image;

    if (isInputImage)
    {
        image = imread(inputName);
        CV_Assert(!image.empty());
    }
    else if (isInputVideo)
    {
        capture.open(inputName);
        CV_Assert(capture.isOpened());
    }
    else
    {
        capture.open(atoi(inputName.c_str()));
        CV_Assert(capture.isOpened());
    }

    namedWindow("result", 1);

    Mat frame, frame_cpu, gray_cpu, resized_cpu, frameDisp;
    vector<Rect> faces;

    GpuMat frame_gpu, gray_gpu, resized_gpu, facesBuf_gpu;

    /* parameters */
    bool useGPU = true;
    double scaleFactor = 1.0;
    bool findLargestObject = false;
    bool filterRects = true;
    bool helpScreen = false;

    for (;;)
    {
        if (isInputCamera || isInputVideo)
        {
            capture >> frame;
            if (frame.empty())
            {
//.........这里部分代码省略.........
开发者ID:ArkaJU,项目名称:opencv,代码行数:101,代码来源:cascadeclassifier.cpp

示例13: main

int main() 
{
	string fileName = "traffic.avi";
	capture.open(fileName);		//Video capture from harddisk(.avi) or from camera
	if( !capture.isOpened() )
	{	
		cerr<<"video opening error\n"; waitKey(0); system("pause");  
	}

	Mat frameImg_origSize;							//image taken from camera feed in original size
	namedWindow( "out"	  , CV_WINDOW_AUTOSIZE);	//window to show output
	namedWindow( "trackbar", CV_WINDOW_AUTOSIZE);	//Trackbars to change value of parameters
	resizeWindow( "trackbar", 300, 600);			//Resizing trackbar window for proper view of all the parameters
	
	
	capture>>frameImg_origSize; // Just to know original size of video
	if( frameImg_origSize.empty() ) { cout<<"something wrong"; }
	

	resize(frameImg_origSize, frameImg, Size(WIDTH_SMALL, HEIGHT_SMALL), 0, 0, CV_INTER_AREA);	//Resize original frame into smaller frame for faster calculations

	Size origSize = frameImg_origSize.size();	//original size
	cout<<"ORIG: size = "<<frameImg_origSize.cols
		<<" X "<<frameImg_origSize.rows
		<<" step "<<frameImg_origSize.step
		<<" nchannels "<<frameImg_origSize.channels()<<endl;	//print original size: width, height, widthStep, no of channels.

	g_image = Mat(Size(WIDTH_SMALL, HEIGHT_SMALL), CV_8UC1);	g_image.setTo(0);	//Gray image of frameImg
	//frameData  = (char*)frameImg ->imageData;	//Data of frameImg
	//calibIntensity();	//Average Intensity of all pixels in the image

	//cout<<"calibintensity\n";
	Mat roadImage = Mat(Size(WIDTH_SMALL,HEIGHT_SMALL), CV_8UC3);	//Image of the road (without vehicles)
	roadImage = findRoadImage();	//Image of the road
	
	cout<<"roadimage\n";
	//char* roadImageData = (char*)roadImage->imageData;	//Data of roadImage
	calibPolygon();	//Polygon caliberation: Select four points of polygon clockwise and press enter

	cout<<"polyArea = "<<polyArea;	//Area of selected polygon
	Mat binImage = Mat(Size(WIDTH_SMALL, HEIGHT_SMALL),CV_8UC1);	//white pixel = cars, black pixel = other than cars
	//char* binImageData = (char*)binImage->imageData;	//data of binImage
	Mat finalImage = Mat(Size(WIDTH_SMALL,HEIGHT_SMALL), CV_8UC3);	//final image to show output

	double T = time(0);	//Current time
	float fps = 0, lastCount = 0;	//frames per second

	int thresh_r = 43, thresh_g = 43, thresh_b = 49;						//Threshold parameters for Red, Green, Blue colors
	createTrackbar( "Red Threshold", "trackbar", &thresh_r, 255, 0 );		//Threshold for Red color
	createTrackbar( "Green Threshold", "trackbar", &thresh_g, 255, 0 );		//Threshold for Green color
	createTrackbar( "Blue Threshold", "trackbar", &thresh_b, 255, 0 );		//Threshold for Blue color
	int dilate1=1, erode1=2, dilate2=5;	//Dilate and Erode parameters
	Mat imgA = Mat(Size(WIDTH_SMALL,HEIGHT_SMALL),CV_8SC3);//Used for opticalFlow
	//CvPoint2D32f* cornersA = new CvPoint2D32f[ MAX_CORNERS ];	//Input points for opticalFlow
	//CvPoint2D32f* cornersB = new CvPoint2D32f[ MAX_CORNERS ];	//Output points from opticalFlow
	vector<Point2f> cornersA, cornersB;

	frameImg.copyTo(imgA);//cvCopyImage(frameImg,imgA);	//copy from frameImg to imgA
	
	int win_size = 20;	//parameter for opticalFlow
	int corner_count = MAX_CORNERS;	//no of points tracked in opticalFlow
	//Mat pyrA;// = cvCreateImage( size(WIDTH_SMALL,HEIGHT_SMALL), IPL_DEPTH_32F, 1 );	//Temp image (opticalFlow)
	//Mat pyrB;// = cvCreateImage( size(WIDTH_SMALL,HEIGHT_SMALL), IPL_DEPTH_32F, 1 );	//Temp image (opticalFlow)
	double distance;	//Length of lines tracked by opticalFlow
	int maxArrowLength = 100, minArrowLength = 0;	//div by 10 //Max and Min length of the tracked lines
	int arrowGap = 5;	//distance between consecutive tracking points (opticalFlow)
	createTrackbar("max arrow length", "trackbar", &maxArrowLength, 100, 0);	//Manually change max length of tracked lines
	createTrackbar("min arrow length", "trackbar", &minArrowLength, 100, 0);	//Manually change min length of tracked lines
	createTrackbar("dilate 1","trackbar", &dilate1, 15, 0);	//first dilate
	createTrackbar("erode 1","trackbar", &erode1, 15, 0);		//first erode
	createTrackbar("dilate 2","trackbar", &dilate2, 15, 0);	//second dilate
	char features_found[ MAX_CORNERS ];	//temp data (opticalFlow)
	float feature_errors[ MAX_CORNERS ];//temp data (opticalFlow)
	Mat dilate1_element = getStructuringElement(MORPH_ELLIPSE , Size(2 * dilate1 + 1, 2 * dilate1 + 1), Point(-1,-1) );
	Mat erode1_element = getStructuringElement(MORPH_ELLIPSE , Size(2 * erode1 + 1, 2 * erode1 + 1), Point(-1,-1) );
	Mat dilate2_element = getStructuringElement(MORPH_ELLIPSE , Size(2 * dilate2 + 1, 2 * dilate2 + 1), Point(-1,-1) );
	
	vector< Vec4i > hierarchy;
	vector< vector<Point> > contours;
	vector< uchar > vstatus; 
	vector< float >verror;

	//////////////////////////////////////////////////////////////////////////
	while(true) //Loops till video buffers
	{
		++fps;	//calculation of Frames Per Second
		capture>>frameImg_origSize; //Store image in original size
		if( frameImg_origSize.empty() ) break; //if there is no frame available (end of buffer); stop.
		resize(frameImg_origSize, frameImg, frameImg.size()); //resize original image into smaller image for fast calculation
		imshow("video", frameImg);
		
		register int X; //temp variable
		for( int i=0; i<HEIGHT_SMALL; ++i) //iter through whole frame and compare it with image of road; if greater than threshold, it must be a vehicle
		{
			for(int j=0; j<WIDTH_SMALL; ++j)
			{
				//X = i*WIDTH_STEP_SMALL+j*NCHANNELS;
				if(	abs(roadImage.at<Vec3b>(i,j)[0]-frameImg.at<Vec3b>(i,j)[0])<thresh_r &&
					abs(roadImage.at<Vec3b>(i,j)[1]-frameImg.at<Vec3b>(i,j)[1])<thresh_g &&
					abs(roadImage.at<Vec3b>(i,j)[2]-frameImg.at<Vec3b>(i,j)[2])<thresh_b ) //comparing frame image against road image using threshold of Red, Green and Blue
//.........这里部分代码省略.........
开发者ID:sivapvarma,项目名称:smart_traffic_control,代码行数:101,代码来源:main.cpp

示例14: main

int main( int argc, char** argv )
{
    VideoCapture cap;
    Rect trackWindow;
    RotatedRect trackBox;
    int hsize = 16;
    float hranges[] = {0,180};
    const float* phranges = hranges;

    if( argc == 1 || (argc == 2 && strlen(argv[1]) == 1 && isdigit(argv[1][0])))
        cap.open(argc == 2 ? argv[1][0] - '0' : 0);
    else if( argc == 2 )
        cap.open(argv[1]);

    if( !cap.isOpened() )
    {
    	help();
        cout << "***Could not initialize capturing...***\n";
        return 0;
    }

    help();

    namedWindow( "Histogram", 1 );
    namedWindow( "CamShift Demo", 1 );
    setMouseCallback( "CamShift Demo", onMouse, 0 );
    createTrackbar( "Vmin", "CamShift Demo", &vmin, 256, 0 );
    createTrackbar( "Vmax", "CamShift Demo", &vmax, 256, 0 );
    createTrackbar( "Smin", "CamShift Demo", &smin, 256, 0 );

    Mat hsv, hue, mask, hist, histimg = Mat::zeros(200, 320, CV_8UC3), backproj;
    
    for(;;)
    {
        Mat frame;
        cap >> frame;
        if( frame.empty() )
            break;

        frame.copyTo(image);
        cvtColor(image, hsv, CV_BGR2HSV);

        if( trackObject )
        {
            int _vmin = vmin, _vmax = vmax;

            inRange(hsv, Scalar(0, smin, MIN(_vmin,_vmax)),
                    Scalar(180, 256, MAX(_vmin, _vmax)), mask);
            int ch[] = {0, 0};
            hue.create(hsv.size(), hsv.depth());
            mixChannels(&hsv, 1, &hue, 1, ch, 1);

            if( trackObject < 0 )
            {
                Mat roi(hue, selection), maskroi(mask, selection);
                calcHist(&roi, 1, 0, maskroi, hist, 1, &hsize, &phranges);
                normalize(hist, hist, 0, 255, CV_MINMAX);
                
                trackWindow = selection;
                trackObject = 1;

                histimg = Scalar::all(0);
                int binW = histimg.cols / hsize;
                Mat buf(1, hsize, CV_8UC3);
                for( int i = 0; i < hsize; i++ )
                    buf.at<Vec3b>(i) = Vec3b(saturate_cast<uchar>(i*180./hsize), 255, 255);
                cvtColor(buf, buf, CV_HSV2BGR);
                    
                for( int i = 0; i < hsize; i++ )
                {
                    int val = saturate_cast<int>(hist.at<float>(i)*histimg.rows/255);
                    rectangle( histimg, Point(i*binW,histimg.rows),
                               Point((i+1)*binW,histimg.rows - val),
                               Scalar(buf.at<Vec3b>(i)), -1, 8 );
                }
            }

            calcBackProject(&hue, 1, 0, hist, backproj, &phranges);
            backproj &= mask;
            RotatedRect trackBox = CamShift(backproj, trackWindow,
                                TermCriteria( CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1 ));

            if( backprojMode )
                cvtColor( backproj, image, CV_GRAY2BGR );
            ellipse( image, trackBox, Scalar(0,0,255), 3, CV_AA );
        }

        if( selectObject && selection.width > 0 && selection.height > 0 )
        {
            Mat roi(image, selection);
            bitwise_not(roi, roi);
        }

        imshow( "CamShift Demo", image );
        imshow( "Histogram", histimg );

        char c = (char)waitKey(10);
        if( c == 27 )
            break;
        switch(c)
//.........这里部分代码省略.........
开发者ID:SCS-B3C,项目名称:OpenCV2-2,代码行数:101,代码来源:camshiftdemo.cpp

示例15: main

void main()
{
	int num1 = 0;
	int num2 = 0;
	int result;
	char key;
	char command = '@';
	bool SecondNumPressed = false;


	VideoCapture cap;
	cap.open(0);
	if (!cap.isOpened())
	{
		system("CLS");
		printf("\n\n\t\t\tcamera disconnected");
		system("PAUSE");
		exit;
	}
	cap.set(CV_CAP_PROP_FRAME_WIDTH, 640);
	cap.set(CV_CAP_PROP_FRAME_HEIGHT, 480);


	system("CLS");
	printf("\n\n\t\t\t0");
	while (true)
	{
		key = press(cap);
		if ((SecondNumPressed == true) && ((key != '=') && (((key - '0')<0) || ((key - '0')>9))))
		{
			//cout << SecondNumPressed << " " << key << " " << key - '0' << "\n";
			continue;
		}
		if (key == '=')
		{
			system("CLS");

			if ((num2 == 0) && (command == '/'))
				printf("\n\n\t\t\tcan not devide with zero");
			else if (SecondNumPressed == true)
				printf("\n\n\t\t\t%d", result);
			else
				printf("\n\n\t\t\t%d", num1);
			num1 = 0;
			num2 = 0;
			command = '@';
			SecondNumPressed = false;
		}
		else if (key == 'c')
		{
			system("CLS");
			num1 = 0;
			num2 = 0;
			command = '@';
			printf("\n\n\t\t\t0");
			SecondNumPressed = false;
		}
		else if ((key == '+') || (key == '*') || (key == '-') || (key == '/'))
		{
			if (command != '@')
			{
				system("CLS");
				printf("\n\n\t\t\t%d", num1);
			}
			printf(" %c ", key);
			command = key;
		}
		else if (command == '@')
		{
			system("CLS");
			if (key == '<')
				num1 = num1 - num1 % 10;
			else
				num1 = num1 * 10 + (key - '0');
			printf("\n\n\t\t\t%d", num1);
		}
		else
		{
			system("CLS");
			if (key == '<')
				num2 = num2 - num2 % 10;
			else
			{
				num2 = num2 * 10 + (key - '0');
				SecondNumPressed = true;
			}
			printf("\n\n\t\t\t%d %c %d", num1, command, num2);
			if (command == '+')
				result = num1 + num2;
			else if (command == '-')
				result = num1 - num2;
			else if (command == '*')
				result = num1 * num2;
			else if (command == '/')
			if (num2 != 0)
				result = num1 / num2;
		}
	}
}
开发者ID:harsh-pd,项目名称:virtual-calculator,代码行数:99,代码来源:calculator.cpp


注:本文中的VideoCapture::isOpened方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。