当前位置: 首页>>代码示例>>C++>>正文


C++ VideoCapture::read方法代码示例

本文整理汇总了C++中cv::VideoCapture::read方法的典型用法代码示例。如果您正苦于以下问题:C++ VideoCapture::read方法的具体用法?C++ VideoCapture::read怎么用?C++ VideoCapture::read使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在cv::VideoCapture的用法示例。


在下文中一共展示了VideoCapture::read方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: streaming_process

void streaming_process(cv::VideoCapture vcap, ros::Publisher* _blob_publisher)
{
  Camera Cam;

  int counting=0;                         // Number of ellipsoide detected
  int falsepositive=0;                    // Number of falsepositive ellipsoide detected
  int zerovalue=0;
  int eps = 5;                          // Threeshold to detect falsepositive
  bool notvalidvalue=false;

  // Active signal (CTRL + C interrupt)
  struct sigaction sigIntHandler;
  sigIntHandler.sa_handler = my_handler;
  sigemptyset(&sigIntHandler.sa_mask);
  sigIntHandler.sa_flags = 0;
  sigaction(SIGINT, &sigIntHandler, NULL);

  Mat src;                              // Frame
  vector<Vec3f> circles;                // Vector of coordinates indicating x,y,r parameters of circles detected
  vector<RotatedRect> minEllipse;       // Vector of data indicating x,y coord of ellipses detected
  vector<RotatedRect> dataEllipses;

  std_msgs::Float64MultiArray cog_blobs;
  Eigen::MatrixXd blobs_matrix_data = Eigen::MatrixXd::Zero(2,3);


  /// Ellipses are detected considering rectangle shape

  while(counting<2){

    vcap >> src; // get a new frame from camera

    if(!vcap.read(src)) {
        std::cout << "No frame" << std::endl;
        waitKey();
    }

    /// Ellipses detection
    Cam.ellipsedetection(src, minEllipse, &counting);

    std::cout << "features detected: " << minEllipse.size() << std::endl;

    notvalidvalue = false;
    falsepositive = 0;
    zerovalue = 0;
    for(unsigned int n=0; n<counting; n++){


        for(unsigned int n2=0; n2<dataEllipses.size(); n2++){
            //std::cout << "compare (" << minEllipse[n].center.x << "," << minEllipse[n].center.y << ") con (" << dataEllipses[n2].center.x << "," << dataEllipses[n2].center.y << ")" << std::endl;
            if(fabs(minEllipse[n].center.x - dataEllipses[n2].center.x) < eps && fabs(minEllipse[n].center.y - dataEllipses[n2].center.y) < eps)
              {
              std::cout << "already detected" << std::endl;
              notvalidvalue = true;
              falsepositive++;
              break;
            }
          }
        if (notvalidvalue==false)
          {
          if(minEllipse[n].center.x!=0 && minEllipse[n].center.y!=0)
            dataEllipses.push_back(minEllipse[n]);
          else
            zerovalue++;
          }
      }

    std::cout << "false positive  " << falsepositive << std::endl;
    counting = counting - falsepositive - zerovalue;


    dataEllipses.clear();

    std::cout << "Real features detected: " << counting << std::endl;
    std::cout << " ------------ " << std::endl;



    }


  /// VISP PART: Tracking circles
  /// Once found out the circles, tell visp where they are

  Mat srcHSV, srcWB;
  vpImage<unsigned char> I; // for gray images
  vpImagePoint vImp;
  vpImagePoint vImp2;

    vpImageConvert::convert(src, I);  // convert Image from opencv to visp

  vpDisplayOpenCV d(I);
  vpDisplay::display(I);
  vpDisplay::flush(I);
  std::list<vpDot2> blob_list;
  vpDot2 blob;
  vpDot2 blob2;

  // assign blobs position
  vImp.set_u(dataEllipses[0].center.x);
//.........这里部分代码省略.........
开发者ID:Christian-Vassallo,项目名称:robot_follow_vision,代码行数:101,代码来源:features-detection.cpp

示例2: makeGUI

void OpenCVTemplateApp::makeGUI() {
    interface->clear();
    interface->addButton("load image", [this] {
        auto path = ci::app::getOpenFilePath();
        image = cv::imread(path.string());
        std::cout <<"cols "<<image.cols << std::endl;
        std::cout <<"rows "<<image.rows << std::endl;
        std::cout <<"channels "<<image.channels() << std::endl;
        imageTexture = gl::Texture::create(fromOcv(image));
    });
    interface->addButton("load video", [this] {
        auto path = ci::app::getOpenFilePath();
        video.open(path.string());
        frameWidth = video.get(cv::CAP_PROP_FRAME_WIDTH);
        frameHeight = video.get(cv::CAP_PROP_FRAME_HEIGHT);
        totalFrames = video.get(cv::CAP_PROP_FRAME_COUNT);
        video.read(frame);
        if(isGrayScale) {
            cv::cvtColor(frame, frame, cv::COLOR_BGR2GRAY);
        }
        frameTexture = gl::Texture::create(fromOcv(frame));
        makeGUI();
    });
    interface->addSeparator();
    if(frameTexture) {
        interface->addParam("gray scale", &isGrayScale).updateFn([this] {
            video.retrieve(frame);
            if(isGrayScale) {
                cv::cvtColor(frame, frame, cv::COLOR_BGR2GRAY);
            }
            frameTexture = gl::Texture::create(fromOcv(frame));
            makeGUI();
        });
        interface->addParam("nb of feature",&nbOfFeaturePoints).min(1).max(1000);
        if(isGrayScale) {
            interface->addButton("get feature points", [this] {
                cv::goodFeaturesToTrack(frame, featurePoints, nbOfFeaturePoints, 0.01, 10, cv::Mat(), 3, 0, 0.04);
            });
        }
        interface->addSeparator();
        interface->addParam("frame",&frameIndex).min(0).max(totalFrames-1).step(1).updateFn([this] {
            video.set(cv::CAP_PROP_POS_FRAMES,frameIndex);
            video.read(frame);
            if(isGrayScale) {
                cv::cvtColor(frame, frame, cv::COLOR_BGR2GRAY);
            }
            frameTexture = gl::Texture::create(fromOcv(frame));
        });
        interface->addSeparator();
        interface->addParam("speed", &frameSpeed).min(1).max(1000).step(1);
        interface->addButton("play",[this] {
            currentState = PLAY;
            makeGUI();
        });
        if(currentState == PLAY) {
            interface->addButton("pause",[this] {
                currentState = PAUSE;
                makeGUI();
            });
        }
    }
}
开发者ID:cmorace,项目名称:nckuGraphicsLib,代码行数:62,代码来源:OpenCVTemplateApp.cpp

示例3: sparse_optical_flow

///////////////////////////////////////////////////////////////////////////////
// Generate polar coordinates from a processed video
///////////////////////////////////////////////////////////////////////////////
void sparse_optical_flow(){
	int count = 0;

	// loop through video
	while( vidCap.read( currentImage ) && count < end_frame ){
		count++;
		if( count < start_frame ) continue;
		if( count % every_n_frames != 0 ) continue;
		cout << "frame: " << count << "/" << end_frame << endl;

		cv::cvtColor( currentImage, currGray, CV_BGR2GRAY );
		currentImage.copyTo( drawTo );
		if( needToInit ) {
			goodFeaturesToTrack( currGray, points[1], 10000, 0.01, 3, cv::Mat(), 3, 0, 0.04 );
			cornerSubPix( currGray, points[1], cv::Size(10,10), cv::Size(-1, -1), termcrit );
			needToInit = false;
		}else if( !points[0].empty() ){
			vector<uchar> status;
			vector<float> err;
			if(prevGray.empty()){
					currGray.copyTo(prevGray);
			}
			calcOpticalFlowPyrLK( prevGray, currGray, points[0], points[1], status, err, cv::Size(5,5), 3, termcrit, 0, 0.001);
			size_t i, k;
			for( i = k = 0; i < points[1].size(); i++){
				if(!status[i]){
					continue;
				}

				points[1][k++] = points[1][i];

				float dist = distanceBetweenPoints( points[0][i], points[1][i] );

				if( dist > min_vector_length && dist < 10.00f ){

					// remove negative values
					if(points[0][i].x < 0.0f){
						points[0][i].x = 0.0f;
					}
					if(points[0][i].y < 0.0f){
						points[0][i].y = 0.0f;
					}
					if(points[1][i].x < 0.0f){
						points[1][i].x = 0.0f;
					}
					if(points[1][i].y < 0.0f){
						points[1][i].y = 0.0f;
					}

					// remove high values
					if(points[0][i].x >= width){
						points[0][i].x = width - 0.0001f;
					}
					if(points[0][i].y >= height){
						points[0][i].y = height - 0.0001f;
					}
					if(points[1][i].x >= width){
						points[1][i].x = width - 0.0001f;
					}
					if(points[1][i].y >= height){
						points[1][i].y = height - 0.0001f;
					}

					// dont use vectors that start and end in the same position
					if( (points[0][i].x == points[1][i].x) && (points[0][i].y == points[1][i].y) ){
						continue;
					}

					// draw vector
					cv::line(drawTo, points[0][i], points[1][i], cv::Scalar(0,0,255),1,8,0);
					// draw the head
					cv::circle(drawTo, points[1][i], 1, cv::Scalar(0,255,0), -1,8,0);
				}
			}
			points[1].resize(k);
		}
		cv::imshow("vectors", drawTo);
		cv::waitKey(1);

		swap(points[1], points[0]);
		swap(prevGray, currGray);
		swap(prevImage, currentImage);
	}
}
开发者ID:jstadler,项目名称:tools-opencv,代码行数:87,代码来源:sparse_opflow_motion.cpp

示例4: main

int main(int argc, char* argv[])
{
    signal(SIGINT, quitFunction);

    // Simple parsing of the parameters related to the image acquisition
    int xRes = 640;
    int yRes = 480;
    int cameraIndex = 0;
    if (argc > 2) {
        xRes = std::atoi(argv[1]);
        yRes = std::atoi(argv[2]);
    }
    if (argc > 3) {
        cameraIndex = std::atoi(argv[3]);
    }

    // The source of input images
    capture.open(cameraIndex);
    if (!capture.isOpened())
    {
        std::cerr << "Unable to initialise video capture." << std::endl;
        return 1;
    }
#ifdef OPENCV3
    capture.set(cv::CAP_PROP_FRAME_WIDTH, xRes);
    capture.set(cv::CAP_PROP_FRAME_HEIGHT, yRes);
#else
    capture.set(CV_CAP_PROP_FRAME_WIDTH, xRes);
    capture.set(CV_CAP_PROP_FRAME_HEIGHT, yRes);
#endif
    cv::Mat inputImage;

    // The tag detection happens in the Chilitags class.
    chilitags::Chilitags chilitags;

    // The detection is not perfect, so if a tag is not detected during one frame,
    // the tag will shortly disappears, which results in flickering.
    // To address this, Chilitags "cheats" by keeping tags for n frames
    // at the same position. When tags disappear for more than 5 frames,
    // Chilitags actually removes it.
    // Here, we cancel this to show the raw detection results.
    chilitags.setFilter(0, 0.0f);

    cv::namedWindow("DisplayChilitags");
    // Main loop, exiting when 'q is pressed'
    for (; 'q' != (char) cv::waitKey(1) && sRunning; ) {

        // Capture a new image.
        capture.read(inputImage);

        // Start measuring the time needed for the detection
        int64 startTime = cv::getTickCount();

        // Detect tags on the current image (and time the detection);
        // The resulting map associates tag ids (between 0 and 1023)
        // to four 2D points corresponding to the corners positions
        // in the picture.
        chilitags::TagCornerMap tags = chilitags.find(inputImage);

        // Measure the processing time needed for the detection
        int64 endTime = cv::getTickCount();
        float processingTime = 1000.0f*((float) endTime - startTime)/cv::getTickFrequency();


        // Now we start using the result of the detection.

        // First, we set up some constants related to the information overlaid
        // on the captured image
        const static cv::Scalar COLOR(255, 0, 255);
        // OpenCv can draw with sub-pixel precision with fixed point coordinates
        static const int SHIFT = 16;
        static const float PRECISION = 1<<SHIFT;

        // We dont want to draw directly on the input image, so we clone it
        cv::Mat outputImage = inputImage.clone();

        for (const std::pair<int, chilitags::Quad> & tag : tags) {

            int id = tag.first;
            // We wrap the corner matrix into a datastructure that allows an
            // easy access to the coordinates
            const cv::Mat_<cv::Point2f> corners(tag.second);

            // We start by drawing the borders of the tag
            for (size_t i = 0; i < 4; ++i) {
                cv::line(
                    outputImage,
                    PRECISION*corners(i),
                    PRECISION*corners((i+1)%4),
#ifdef OPENCV3
                    COLOR, 1, cv::LINE_AA, SHIFT);
#else
                    COLOR, 1, CV_AA, SHIFT);
#endif
            }

            // Other points can be computed from the four corners of the Quad.
            // Chilitags are oriented. It means that the points 0,1,2,3 of
            // the Quad coordinates are consistently the top-left, top-right,
            // bottom-right and bottom-left corners.
//.........这里部分代码省略.........
开发者ID:chili-epfl,项目名称:chilitags,代码行数:101,代码来源:detect-live.cpp

示例5: opencvLoop

void opencvLoop(){
    //read first frame
    stream1.read(frame1);
    if(!frame1.empty()){
        //convert frame1 to gray scale for frame differencing
        cvtColor(frame1, grayImage1, COLOR_BGR2GRAY);
    }
    //copy second frame
    stream1.read(frame2);
    //convert frame2 to gray scale for frame differencing
    cvtColor(frame2, grayImage2, COLOR_BGR2GRAY);
    //perform frame differencing with the sequential images. This will output an "intensity image"
    //do not confuse this with a threshold image, we will need to perform thresholding afterwards.
    absdiff(grayImage1, grayImage2, differenceImage);
    //threshold intensity image at a given sensitivity value
    threshold(differenceImage, thresholdImage, SENSITIVITY_VALUE, 255, THRESH_BINARY);
    if (debugMode == true) {
        //show the difference image and threshold image
        video[1] = differenceImage;            // imshow("Difference Image", differenceImage);
//        glutSetWindow(debugWindow1);
//        glutShowWindow();
        video[2] = thresholdImage;             //imshow("Threshold Image", thresholdImage);
    }
    else {
        //if not in debug mode, destroy the windows
        //glutHideWindow();
       // glutHideWindow(debugWindow2);
    }
    //blur() to smooth the image, remove noise
    blur(thresholdImage, thresholdImage, cv::Size(BLUR_SIZE, BLUR_SIZE));
    //threshold again to obtain binary image from blur output
    threshold(thresholdImage, thresholdImage, SENSITIVITY_VALUE, 255, THRESH_BINARY);
    //verifies that image is 8 bit for findcontours()
    thresholdImage.convertTo(thresholdImage, CV_8U);
    if (debugMode == true) {
        //show the threshold image after it's been "blurred"
        video[3] = thresholdImage;             //imshow("Final Threshold Image", thresholdImage);
    }
    else {
       // glutHideWindow(debugWindow3);
    }
    
    //if tracking enabled, search for contours in our thresholded image
    if (trackingEnabled) {
        //Collects a number of sample averages specified by SMOOTHING_SAMPLE and sticks them in the samplePoints global vector
        collectSamples(thresholdImage, frame1);
    }
    //when samplePoints vector reaches the size specified by SMOOTHING_SAMPLE, updates the current point and clears samplePoints
    if (samplePoints.size() == SMOOTHING_SAMPLE) {
        destination = pathSmoothing(frame1);
        samplePoints.clear();
    }
    
    if(trackingEnabled){
        targetPoints.push_back(current);
        vector<Point> temp;
        int counter = 0;
        
        //Draw Past motion of target//
        //if vector target only has one point, skip over it
        if(targetPoints.size() > 1){
            if(TRAILS >= targetPoints.size()){
                counter = targetPoints.size();
                //target size hasn't gotten that many trails yet
                //Loop through past points and draw last 3 lines of motion
                for(int i = 0; i+1<counter-1; i++){
                    line(frame1, targetPoints[i], targetPoints[i+1], (0,0,255), 2);
                    cout << "building up" << endl;
                }
            }
            else{
                counter = TRAILS;
                
                //Loop through past points and draw last lines of motion
                for(int i = 0; i+1<counter; i++){
                    line(frame1, targetPoints[targetPoints.size()-1-i], targetPoints[targetPoints.size()-2-i], (0,0,255), 2);
                    targetPoints[i] = targetPoints[i+1];
                }
            }
        }
    }
    
    
    //limits the speed of movement of the target crosshair. We can tweak SPEED_OF_MOVEMENT to accurately reflect the actual position of the laser in project 2
    //so that we can draw an obscuring circle over the laser
    current = speedGovernor(current, destination, SPEED_OF_MOVEMENT);
    Point predictLine = speedGovernor(current, destination, 100);
    
    //draw the target
    drawTarget(current, frame1);
    line(frame1, current, predictLine, Scalar(0, 255, 0), 2);
    
    video[0] = frame1;         //imshow("Frame1", frame1);
    glutPostRedisplay();
    
}
开发者ID:lcheek3,项目名称:CS480-Project-2,代码行数:96,代码来源:main.cpp

示例6: run

int SubsExtractor::run()
{

	namedWindow("Control", CV_WINDOW_AUTOSIZE);
    //createTrackbar("SF", "Control", &StartFrame, cap->get(CV_CAP_PROP_FRAME_COUNT), (void (*)(int,void *))&SubsExtractor::onSFtb, 0);
	createTrackbar("SF", "Control", &StartFrame, cap->get(CV_CAP_PROP_FRAME_COUNT), onSFtb, this);
	createTrackbar("EF", "Control", &EndFrame, 
                   cap->get(CV_CAP_PROP_FRAME_COUNT), 0, 0);
    createTrackbar("T1", "Control", &th1,255,NULL,0);
    createTrackbar("T2", "Control", &th2,255,NULL,0);

	int xmax = cap->get(CV_CAP_PROP_FRAME_WIDTH); 
    int ymax = cap->get(CV_CAP_PROP_FRAME_HEIGHT); 
	int x = xmax/2 - 50; int y = ymax - 110; 
	int xw = 100; int yh = 100;
	// 800x90+240+590 convert
	// (240,590) -> (240+800,590+90) = (1040,680)
	fprintf(stderr,"FRAME (%d %d) -> (%d %d)\n", x, y, x+xw, y+yh);
	fprintf(stderr,"STARTFRAME %d ENDFRAME %d\n", StartFrame, EndFrame);

	int subs;
	int frame = 0;
	char subtext[1024] = "";
	char same[] = " .   ";
	string f;
	char chronline[500];
	while(true) {
		if(!cap->read(img)) { 
    		cout << "Cannot read a frame from video stream" << endl;
			break;
		}
		if((frame = cap->get(CV_CAP_PROP_POS_FRAMES)) >= EndFrame) {
			cout << "Beyond EndFrame" << endl;
            break;
		}
        ////fprintf(stderr,"%ld\r",frame);
		subs = haysubs(x, x + xw, y, y + yh);
		fprintf(stderr,"subs %d\n", subs);
		switch(subs) {
			case SAME:
				//fprintf(stderr,"%s           \r", same + frame % 4);
				break;
			case START:
				if(ocr(subtext))
					setchron(cap->get(CV_CAP_PROP_POS_MSEC));
				//fprintf(stderr, "STR frame %ld\n",frame);
				break;
			case END:
				//fprintf(stderr,"END\n");
				getchron(cap->get(CV_CAP_PROP_POS_MSEC), chronline);
                                printf("%s\n%s\n\n",chronline,subtext);
				//fprintf(stderr, "END frame %d %s\n",frame,subtext);
				break;
			case CHANGE:
				//fprintf(stderr,"CHANGE\n");
                                //string s = getchron();
                                //setchron(cap->get(CV_CAP_PROP_POS_MSEC);
                                //imwrite(f, img);
                                //chron = cap->get(CV_CAP_PROP_POS_MSEC);
                                //intchron(CHRON_START,chron); 
				//fprintf(stderr, "CHG frame %d\n",frame);
				break;
			default:
				fprintf(stderr,"ERROR SUBS\n");
		}
		if (waitKey(30) == 27) {
			cout << "esc key pressed by user" << endl;
			break; 
		}
	}
	return 0;
}
开发者ID:egrosclaude,项目名称:subs,代码行数:72,代码来源:subsextractor.cpp


注:本文中的cv::VideoCapture::read方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。