当前位置: 首页>>代码示例>>Java>>正文


Java VideoCapture.read方法代码示例

本文整理汇总了Java中org.opencv.videoio.VideoCapture.read方法的典型用法代码示例。如果您正苦于以下问题:Java VideoCapture.read方法的具体用法?Java VideoCapture.read怎么用?Java VideoCapture.read使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.opencv.videoio.VideoCapture的用法示例。


在下文中一共展示了VideoCapture.read方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: Webcam

import org.opencv.videoio.VideoCapture; //导入方法依赖的package包/类
Webcam() {
    cap = new VideoCapture(0);
    if (!cap.isOpened()) {
        System.out.println("Camera Error");
    } else {
        System.out.println("Camera OK?");
        cap.set(Videoio.CV_CAP_PROP_FRAME_WIDTH, width);
        cap.set(Videoio.CV_CAP_PROP_FRAME_HEIGHT, height);
    }
    try {
        Thread.sleep(1000);
    } catch (InterruptedException ex) {
    }
    cap.read(mat);
    System.out.println("width, height = "+mat.cols()+", "+mat.rows());
}
 
开发者ID:TheoreticallyNick,项目名称:Face-Detection-and-Tracking,代码行数:17,代码来源:Webcam.java

示例2: patternCapture

import org.opencv.videoio.VideoCapture; //导入方法依赖的package包/类
public void patternCapture(VideoCapture capture, EyeDetection e, int length, String cmd){
    //Scanner scanner = new Scanner(System.in);
    List<EyeStatus> eyeStatusList = new ArrayList<>();
    Calculation calculation = new Calculation(this);
    Mat frame = new Mat();
    List<Circle> circleList = new ArrayList<>();
    //System.out.println("How big will your pattern be?");
    //length = scanner.nextInt(); //excpetion
    for(int i = 0; i < length; i++) {
        //System.out.println("Do step " + (i + 1));
        pressAnyKeyToContinue("Do step " + (i + 1));
        if(capture.read(frame)) {
            circleList = e.eyeDetect(frame);
            e.filter(circleList);
            //Calculate
            eyeStatusList.add(calculation.calculate(frame, circleList));
        }
    }
    for(EyeStatus eye : eyeStatusList) {
        System.out.println(eye.getLeft() + " " + eye.getRight());
    }
    PatternDatabase.writeNewPattern(eyeStatusList, cmd);
    capture.release();
}
 
开发者ID:Merge-Conflict,项目名称:ICHACK16-ProcrastEnabler,代码行数:25,代码来源:Calibrator.java

示例3: CamFaceDetector

import org.opencv.videoio.VideoCapture; //导入方法依赖的package包/类
public static void CamFaceDetector() {
	try {
		System.loadLibrary(Core.NATIVE_LIBRARY_NAME);

		final CascadeClassifier objDetector = new CascadeClassifier(CameraFacialRecognition.class
				.getResource("../../../../../opencv/sources/data/lbpcascades/lbpcascade_frontalface_improved.xml")
				.getPath().substring(1));

		final Mat capImg = new Mat();
		final VideoCapture capture = new VideoCapture(0);
		final int height = (int) capture.get(Videoio.CV_CAP_PROP_FRAME_HEIGHT);
		final int width = (int) capture.get(Videoio.CV_CAP_PROP_FRAME_WIDTH);
		if (height == 0 || width == 0) {
			throw new Exception("camera not found");
		}

		final JFrame frame = new JFrame("camera");
		frame.setDefaultCloseOperation(WindowConstants.DISPOSE_ON_CLOSE);
		final CameraFacialRecognition panel = new CameraFacialRecognition();
		frame.setContentPane(panel);
		frame.setVisible(true);
		frame.setSize(width + frame.getInsets().left + frame.getInsets().right,
				height + frame.getInsets().top + frame.getInsets().bottom);

		Mat dst = new Mat();
		while (frame.isShowing()) {
			capture.read(capImg);
			dst = dobj(objDetector, capImg);
			panel.mImg = Mat2BufferedImage.mat2BI(dst);
			panel.repaint();
		}
		capture.release();
	} catch (final Exception e) {
		System.out.println("Exception" + e);
	} finally {
		System.out.println("--done--");
	}
}
 
开发者ID:zylo117,项目名称:SpotSpotter,代码行数:39,代码来源:CameraFacialRecognition.java

示例4: grabImage

import org.opencv.videoio.VideoCapture; //导入方法依赖的package包/类
public void grabImage(){
    Mat frame = new Mat();

    //connect
    videoCapture = new VideoCapture(0);
    isOpened = videoCapture.isOpened();
    System.out.println("connected: " + isOpened);
    //setSetting
    videoCapture.set(Videoio.CV_CAP_PROP_FRAME_WIDTH, 1280);
    videoCapture.set(Videoio.CV_CAP_PROP_FRAME_HEIGHT, 720);
    //startGrab
    isSucceed = videoCapture.grab();
    System.out.println("started: " + String.valueOf(isSucceed));
    if ((!isOpened) || (!isSucceed))
        return;
    System.out.println("------- START GRAB -------");

    //Wait for camera starting
    while (true){
        videoCapture.read(frame);
        if (!frame.empty())
            break;
    }

    int frameNo = 0;
    long startSysMillis = System.currentTimeMillis();
    while (frameNo < 1000){
        videoCapture.read(frame);
        frameNo++;
    }
    System.out.println(frameNo + " frames in " + (System.currentTimeMillis() - startSysMillis) + " millis");

    videoCapture.release(); // release device

    System.out.println('\n' + "Done");
}
 
开发者ID:Plasmoxy,项目名称:AquamarineLake,代码行数:37,代码来源:Frames.java

示例5: main

import org.opencv.videoio.VideoCapture; //导入方法依赖的package包/类
public static void main(String[] args)
{
	System.out.println("Hello, OpenCV");
    // Load the native library.
    System.loadLibrary("opencv_java244");

    VideoCapture camera = new VideoCapture(0);
    camera.open(0); //Useless
    if(!camera.isOpened()){
        System.out.println("Camera Error");
    }
    else{
        System.out.println("Camera OK?");
    }

    Mat frame = new Mat();

    //camera.grab();
    //System.out.println("Frame Grabbed");
    //camera.retrieve(frame);
    //System.out.println("Frame Decoded");

    camera.read(frame);
    System.out.println("Frame Obtained");

    /* No difference
    camera.release();
    */

    System.out.println("Captured Frame Width " + frame.width());

    Imgcodecs.imwrite("camera.jpg", frame);
    System.out.println("OK");
}
 
开发者ID:Plasmoxy,项目名称:AquamarineLake,代码行数:35,代码来源:HelloCV.java

示例6: main

import org.opencv.videoio.VideoCapture; //导入方法依赖的package包/类
public static void main(String args[]) {
   System.loadLibrary(Core.NATIVE_LIBRARY_NAME); // Import openCV
   Webcam panel = new Webcam(); // Initialize itself

   // Initialize JPanel
   JFrame frame = new JFrame("Webcam");
   frame.setSize(200, 200);
   frame.setContentPane(panel);
   frame.setVisible(true);

   VideoCapture camera = new VideoCapture(0); // The camera

   // Attempt to set the frame size, but it doesn't really work. Only just makes it bigger
   camera.set(Videoio.CAP_PROP_FRAME_WIDTH, 1900);
   camera.set(Videoio.CAP_PROP_FRAME_HEIGHT, 1000);


   // Special window listener because there are threads that need to be shutdown on a close
   frame.addWindowListener(new WindowAdapter() {
      @Override
      public void windowClosing(WindowEvent e) {
         e.getWindow().dispose();
         camera.release();
         System.exit(0);
      }
   });

   if (camera.isOpened()) {

      // Create SwingWorker to encapsulate the process in a thread
      SwingWorker<Void, Mat> worker = new SwingWorker<Void, Mat>() {
         @Override
         protected Void doInBackground() throws Exception {

            // Put something into thisFrame so it doesn't glitch at the beginning
            Mat thisFrame = new Mat();
            camera.read(thisFrame);
            Imgproc.cvtColor(thisFrame, thisFrame, Imgproc.COLOR_BGR2GRAY); // Convert the diff to gray

            // Set up new Mats for diffing them later, manually set the amount of channels to avoid an openCV error
            Mat pastFrame = new Mat();
            Mat diff = new Mat();


            // isCancelled is set by the SwingWorker
            while (!isCancelled()) {

               thisFrame.copyTo(pastFrame); // What was previously the frame is now the pastFrame
               camera.read(thisFrame); // Get camera image, and set it to currentImage
               Imgproc.cvtColor(thisFrame, thisFrame, Imgproc.COLOR_BGR2GRAY); // Convert the diff to gray

               if (!thisFrame.empty()) {

                  // Set the frame size to have a nice border around the image
                  frame.setSize(thisFrame.width() + 40, thisFrame.height() + 60);


                  Core.absdiff(thisFrame, pastFrame, diff); // Diff the frames
                  Imgproc.GaussianBlur(diff, diff, new Size(7, 7), 7); // Despeckle
                  Imgproc.threshold(diff, diff, 5, 255, 1); // Threshhold the gray

                  image = matrixToBuffer(getFace(thisFrame, diff)); // Update the display image
                  panel.repaint(); // Refresh the panel
               } else {
                  System.err.println("Error: no frame captured");
               }
               //Thread.sleep(70); // Set refresh rate, as well as prevent the code from tripping over itself
            }
            return null;
         }
      };
      worker.execute();
   }
   return;
}
 
开发者ID:grantslatton,项目名称:GarfieldLanguage,代码行数:76,代码来源:Webcam.java

示例7: calibration

import org.opencv.videoio.VideoCapture; //导入方法依赖的package包/类
public void calibration(VideoCapture capture, EyeDetection e) {

        initializeEvetything();
        /*
        System.out.println("Calibration time!");
        System.out.println("Open both your eyes for 2 seconds");
        */
        pressAnyKeyToContinue("Calibration time!\nOpen both your eyes for 2 seconds");

        boolean first_capture;
        first_capture = true;
        //capture.open(0); not sure may need

        /*
        if(capture.isOpened()) {
            capture.set(io.CV_CAP_PROP_FRAME_WIDTH, width);
            capture.set(io.CV_CAP_PROP_FRAME_HEIGHT, height);
        }
        */

        while(openEyesList.size() < 2) {

            if(!first_capture) {
                //System.out.println("Can you please repeat?");
                pressAnyKeyToContinue("Can you please repeat?");
            } else {
                first_capture = false;
            }

            if(capture.read(openEyesMat)) {
                openEyesList = e.eyeDetect(openEyesMat);
            }
        }
        e.filter(openEyesList);


        //System.out.println();
        pressAnyKeyToContinue("Nice, Now close your eyes for 2 seconds");
        capture.release();
        capture.open(0);
        first_capture = true;

        while(closedEyesList.size() < 2) {
            if(!capture.isOpened())
                capture.open(0);
            if(first_capture) {
                first_capture = false;
            } else {
                //System.out.println("Can you please repeat?");
                pressAnyKeyToContinue("Can you please repeat?");
            }

            if(capture.read(closedEyesMat)) {
                closedEyesList = e.eyeDetect(closedEyesMat);
            }
        }
        e.filter(closedEyesList);
        System.out.println();
        capture.release();
    }
 
开发者ID:Merge-Conflict,项目名称:ICHACK16-ProcrastEnabler,代码行数:61,代码来源:Calibrator.java

示例8: runMainLoop

import org.opencv.videoio.VideoCapture; //导入方法依赖的package包/类
private void runMainLoop(String[] args) throws InterruptedException	{
	ImageViewer imageProcessor = new ImageViewer();
	FloodFill floodFill = new FloodFill();
	Mat webcamMatImage = new Mat();
    Mat mask = new Mat();
		    
	Image tempImage;
	Image tempImage2;
	Image tempImage3;
	VideoCapture capture = VideoCaptureFactory.videoCapture(args);

	//VideoCapture capture = new VideoCapture("D:\\STUDIA\\floor3.mp4");

	capture.set(Videoio.CAP_PROP_FRAME_WIDTH,500);
	capture.set(Videoio.CAP_PROP_FRAME_HEIGHT,300);
	int initFrames = (int) capture.get(Videoio.CAP_PROP_FPS);
	if (capture.isOpened()) {
		while (true) {
			capture.read(webcamMatImage);

			if (!webcamMatImage.empty()){
				
				Mat matblurredImage = imageProcessor.blur(webcamMatImage, 1);
				mask.create(new Size(webcamMatImage.cols()+2, webcamMatImage.rows()+2), CvType.CV_8UC1);
				
				mask.setTo(new Scalar(0));
				//Setting range method for fill flood
				floodFill.setRange(FloodFill.FIXED_RANGE);
				
				//Connectivity setting for 8 neighbour pixels
				floodFill.setConnectivity(8);
				
				//Lower and Higher difference of pixels 
				floodFill.setLowerDiff(sliders.getSliderValue("lower diff"));
				floodFill.setUpperDiff(sliders.getSliderValue("upper diff"));
				
				//Here you point the coordinates x y of the pixel to get populated
				seedPointXperc = sliders.getSliderValue("seed point x");
				seedPointYperc = sliders.getSliderValue("seed point y");
				// limit seed point coordinates so it does not get out of the image
				int seedPointX = Math.min(webcamMatImage.width()*seedPointXperc/100,webcamMatImage.width()-1);
				int seedPointY = Math.min(webcamMatImage.height()*seedPointYperc/100,webcamMatImage.height()-1);
				floodFill.fill(matblurredImage, mask, seedPointX, seedPointY);
				
				tempImage = imageProcessor.toBufferedImage(matblurredImage);
				tempImage2 = imageProcessor.toBufferedImage(webcamMatImage);

				Mat temp = matblurredImage.clone();
				drawHoughLines(webcamMatImage, temp);

				tempImage3 = imageProcessor.toBufferedImage(temp);
				
				ImageIcon imageIcon = new ImageIcon(tempImage, "Floor Detection 0.1");
				ImageIcon imageIcon2 = new ImageIcon(tempImage2, "Floor Detection 0.1");
				ImageIcon imageIcon3 = new ImageIcon(tempImage3, "Floor Detection 0.1");
				
				imageLabel1.setIcon(imageIcon);
				imageLabel1.setText("Flood filling");
				imageLabel1.setHorizontalTextPosition(JLabel.CENTER);
				imageLabel1.setVerticalTextPosition(JLabel.BOTTOM);
				imageLabel2.setIcon(imageIcon2);
				imageLabel2.setText("Original input");
				imageLabel2.setHorizontalTextPosition(JLabel.CENTER);
				imageLabel2.setVerticalTextPosition(JLabel.BOTTOM);
				imageLabel3.setIcon(imageIcon3);
				imageLabel3.setText("Edge detection approach");
				imageLabel3.setHorizontalTextPosition(JLabel.CENTER);
				imageLabel3.setVerticalTextPosition(JLabel.BOTTOM);
				//delay for frames to be played properly
				TimeUnit.MILLISECONDS.sleep(initFrames);
				
			} else {
				System.err.println(" -- Frame not captured -- Break!");
				break;
			}
		}
	} else {
		System.err.println("Couldn't open capture.");
	}
}
 
开发者ID:adgud,项目名称:MotionDepthDetection,代码行数:81,代码来源:MainApp.java


注:本文中的org.opencv.videoio.VideoCapture.read方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。