本文整理汇总了Java中org.opencv.videoio.VideoCapture.set方法的典型用法代码示例。如果您正苦于以下问题:Java VideoCapture.set方法的具体用法?Java VideoCapture.set怎么用?Java VideoCapture.set使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.opencv.videoio.VideoCapture
的用法示例。
在下文中一共展示了VideoCapture.set方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: Webcam
import org.opencv.videoio.VideoCapture; //导入方法依赖的package包/类
Webcam() {
cap = new VideoCapture(0);
if (!cap.isOpened()) {
System.out.println("Camera Error");
} else {
System.out.println("Camera OK?");
cap.set(Videoio.CV_CAP_PROP_FRAME_WIDTH, width);
cap.set(Videoio.CV_CAP_PROP_FRAME_HEIGHT, height);
}
try {
Thread.sleep(1000);
} catch (InterruptedException ex) {
}
cap.read(mat);
System.out.println("width, height = "+mat.cols()+", "+mat.rows());
}
示例2: ImageProcessor
import org.opencv.videoio.VideoCapture; //导入方法依赖的package包/类
public ImageProcessor(int webcam, VisionNetworkTable visionTable) {
capture = new VideoCapture(webcam);
capture.set(Videoio.CAP_PROP_FRAME_WIDTH, 640); // width
capture.set(Videoio.CAP_PROP_FRAME_HEIGHT, 480); // height
this.visionTable = visionTable;
this.scheduler = Executors.newScheduledThreadPool(1);
originalImage = new Mat();
hslImage = new Mat();
maskImage = new Mat();
hierarchy = new Mat();
setCameraManualExposure();
setCameraAbsoluteExposure();
setCameraBrightness();
System.out.println("ImageProcesser constructor done");
}
示例3: grabImage
import org.opencv.videoio.VideoCapture; //导入方法依赖的package包/类
public void grabImage(){
Mat frame = new Mat();
//connect
videoCapture = new VideoCapture(0);
isOpened = videoCapture.isOpened();
System.out.println("connected: " + isOpened);
//setSetting
videoCapture.set(Videoio.CV_CAP_PROP_FRAME_WIDTH, 1280);
videoCapture.set(Videoio.CV_CAP_PROP_FRAME_HEIGHT, 720);
//startGrab
isSucceed = videoCapture.grab();
System.out.println("started: " + String.valueOf(isSucceed));
if ((!isOpened) || (!isSucceed))
return;
System.out.println("------- START GRAB -------");
//Wait for camera starting
while (true){
videoCapture.read(frame);
if (!frame.empty())
break;
}
int frameNo = 0;
long startSysMillis = System.currentTimeMillis();
while (frameNo < 1000){
videoCapture.read(frame);
frameNo++;
}
System.out.println(frameNo + " frames in " + (System.currentTimeMillis() - startSysMillis) + " millis");
videoCapture.release(); // release device
System.out.println('\n' + "Done");
}
示例4: CvMultiCamera
import org.opencv.videoio.VideoCapture; //导入方法依赖的package包/类
/**
* Opens a new camera using openCV with a given frame width and height,
* and a compression quality.
* <p>
* Checks all available devices up to index 10 and adds them using
* {@link CameraView#add(Camera)}.
* </p>
*
* @param name the name of the camera
* @param current the device index from 0.
* @param width the frame width
* @param height the frame height
* @param quality the compression quality
*
* @throws RuntimeException if the camera could not be opened
*/
public CvMultiCamera(String name, int current, int width, int height, int quality) {
super(name, null);
capture = new VideoCapture();
cams = checkCameras(capture, 10);
if(current >= 0){
capture.open(current);
camIndex = current;
}
image = new Mat();
buffer = new MatOfByte();
compressParams = new MatOfInt(Imgcodecs.CV_IMWRITE_JPEG_QUALITY, quality);
capture.set(Videoio.CAP_PROP_FRAME_WIDTH, width);
capture.set(Videoio.CAP_PROP_FRAME_HEIGHT, height);
this.height = height;
this.width = width;
this.quality = quality;
Camera nullcam = null;
for (int i = 0; i < cams.length; i++) {
if(cams[i] >= 0){
add(nullcam);
}
}
}
示例5: CvCamera
import org.opencv.videoio.VideoCapture; //导入方法依赖的package包/类
/**
* Opens a new camera using openCV at a certain device index with
* a given frame width and height, and a compression quality.
*
* @param cam the device index from 0.
* @param width the frame width
* @param height the frame height
* @param quality the compression quality
*
* @throws RuntimeException if the camera could not be opened
*/
public CvCamera(int cam, int width, int height, int quality){
capture = new VideoCapture();
capture.open(cam);
if(!capture.isOpened())
throw new RuntimeException("Unable to open camera " + cam);
image = new Mat();
buffer = new MatOfByte();
compressParams = new MatOfInt(Imgcodecs.CV_IMWRITE_JPEG_QUALITY, quality);
capture.set(Videoio.CAP_PROP_FRAME_WIDTH, width);
capture.set(Videoio.CAP_PROP_FRAME_HEIGHT, height);
camIndex = cam;
this.quality = quality;
}
示例6: makeCamera
import org.opencv.videoio.VideoCapture; //导入方法依赖的package包/类
/**
* Make a connection to a camera.
*
* @param device Camera number.
* @param width Window width in pixels.
* @param height Window height in pixels.
* @param exposure Relative exposure.
* @return
*/
public static VideoCapture makeCamera(int device, int width, int height, double exposure) {
VideoCapture camera = new VideoCapture(0);
camera.set(Videoio.CAP_PROP_FRAME_WIDTH, width);
camera.set(Videoio.CAP_PROP_FRAME_HEIGHT, height);
if (exposure > -1.0) {
System.out.println("\t" + exposure);
camera.set(Videoio.CAP_PROP_AUTO_EXPOSURE, 0);
camera.set(Videoio.CAP_PROP_EXPOSURE, exposure);
}
if (!camera.isOpened()) {
throw new RuntimeException("Camera will not open");
}
return camera;
}
示例7: init
import org.opencv.videoio.VideoCapture; //导入方法依赖的package包/类
@Override
public void init() {
imageProcessor = new ImageProcessor();
webcamMatImage = new Mat();
capture = new VideoCapture(0);
capture.set(Videoio.CAP_PROP_FRAME_WIDTH, 640);
capture.set(Videoio.CAP_PROP_FRAME_HEIGHT, 480);
if (capture.isOpened()) {
new Thread(new Runnable() {
@Override
public void run() {
BufferedImage tempImage;
while (true) {
capture.read(webcamMatImage);
if (!webcamMatImage.empty()) {
tempImage = imageProcessor.toBufferedImage(webcamMatImage);
if(isFront)
Listeners.getInstance().updateImageFront(tempImage);
else
Listeners.getInstance().updateImageBottom(tempImage);
} else {
System.out.println(" -- Frame not captured -- Break!");
break;
}
try {
Thread.sleep(60);
} catch (InterruptedException e) {
System.out.println("not sleep");
}
}
}
}).start();
} else {
System.out.println("Couldn't open capture.");
}
}
示例8: main
import org.opencv.videoio.VideoCapture; //导入方法依赖的package包/类
public static void main(String args[]) {
System.loadLibrary(Core.NATIVE_LIBRARY_NAME); // Import openCV
Webcam panel = new Webcam(); // Initialize itself
// Initialize JPanel
JFrame frame = new JFrame("Webcam");
frame.setSize(200, 200);
frame.setContentPane(panel);
frame.setVisible(true);
VideoCapture camera = new VideoCapture(0); // The camera
// Attempt to set the frame size, but it doesn't really work. Only just makes it bigger
camera.set(Videoio.CAP_PROP_FRAME_WIDTH, 1900);
camera.set(Videoio.CAP_PROP_FRAME_HEIGHT, 1000);
// Special window listener because there are threads that need to be shutdown on a close
frame.addWindowListener(new WindowAdapter() {
@Override
public void windowClosing(WindowEvent e) {
e.getWindow().dispose();
camera.release();
System.exit(0);
}
});
if (camera.isOpened()) {
// Create SwingWorker to encapsulate the process in a thread
SwingWorker<Void, Mat> worker = new SwingWorker<Void, Mat>() {
@Override
protected Void doInBackground() throws Exception {
// Put something into thisFrame so it doesn't glitch at the beginning
Mat thisFrame = new Mat();
camera.read(thisFrame);
Imgproc.cvtColor(thisFrame, thisFrame, Imgproc.COLOR_BGR2GRAY); // Convert the diff to gray
// Set up new Mats for diffing them later, manually set the amount of channels to avoid an openCV error
Mat pastFrame = new Mat();
Mat diff = new Mat();
// isCancelled is set by the SwingWorker
while (!isCancelled()) {
thisFrame.copyTo(pastFrame); // What was previously the frame is now the pastFrame
camera.read(thisFrame); // Get camera image, and set it to currentImage
Imgproc.cvtColor(thisFrame, thisFrame, Imgproc.COLOR_BGR2GRAY); // Convert the diff to gray
if (!thisFrame.empty()) {
// Set the frame size to have a nice border around the image
frame.setSize(thisFrame.width() + 40, thisFrame.height() + 60);
Core.absdiff(thisFrame, pastFrame, diff); // Diff the frames
Imgproc.GaussianBlur(diff, diff, new Size(7, 7), 7); // Despeckle
Imgproc.threshold(diff, diff, 5, 255, 1); // Threshhold the gray
image = matrixToBuffer(getFace(thisFrame, diff)); // Update the display image
panel.repaint(); // Refresh the panel
} else {
System.err.println("Error: no frame captured");
}
//Thread.sleep(70); // Set refresh rate, as well as prevent the code from tripping over itself
}
return null;
}
};
worker.execute();
}
return;
}
示例9: runMainLoop
import org.opencv.videoio.VideoCapture; //导入方法依赖的package包/类
private void runMainLoop(String[] args) throws InterruptedException {
ImageViewer imageProcessor = new ImageViewer();
FloodFill floodFill = new FloodFill();
Mat webcamMatImage = new Mat();
Mat mask = new Mat();
Image tempImage;
Image tempImage2;
Image tempImage3;
VideoCapture capture = VideoCaptureFactory.videoCapture(args);
//VideoCapture capture = new VideoCapture("D:\\STUDIA\\floor3.mp4");
capture.set(Videoio.CAP_PROP_FRAME_WIDTH,500);
capture.set(Videoio.CAP_PROP_FRAME_HEIGHT,300);
int initFrames = (int) capture.get(Videoio.CAP_PROP_FPS);
if (capture.isOpened()) {
while (true) {
capture.read(webcamMatImage);
if (!webcamMatImage.empty()){
Mat matblurredImage = imageProcessor.blur(webcamMatImage, 1);
mask.create(new Size(webcamMatImage.cols()+2, webcamMatImage.rows()+2), CvType.CV_8UC1);
mask.setTo(new Scalar(0));
//Setting range method for fill flood
floodFill.setRange(FloodFill.FIXED_RANGE);
//Connectivity setting for 8 neighbour pixels
floodFill.setConnectivity(8);
//Lower and Higher difference of pixels
floodFill.setLowerDiff(sliders.getSliderValue("lower diff"));
floodFill.setUpperDiff(sliders.getSliderValue("upper diff"));
//Here you point the coordinates x y of the pixel to get populated
seedPointXperc = sliders.getSliderValue("seed point x");
seedPointYperc = sliders.getSliderValue("seed point y");
// limit seed point coordinates so it does not get out of the image
int seedPointX = Math.min(webcamMatImage.width()*seedPointXperc/100,webcamMatImage.width()-1);
int seedPointY = Math.min(webcamMatImage.height()*seedPointYperc/100,webcamMatImage.height()-1);
floodFill.fill(matblurredImage, mask, seedPointX, seedPointY);
tempImage = imageProcessor.toBufferedImage(matblurredImage);
tempImage2 = imageProcessor.toBufferedImage(webcamMatImage);
Mat temp = matblurredImage.clone();
drawHoughLines(webcamMatImage, temp);
tempImage3 = imageProcessor.toBufferedImage(temp);
ImageIcon imageIcon = new ImageIcon(tempImage, "Floor Detection 0.1");
ImageIcon imageIcon2 = new ImageIcon(tempImage2, "Floor Detection 0.1");
ImageIcon imageIcon3 = new ImageIcon(tempImage3, "Floor Detection 0.1");
imageLabel1.setIcon(imageIcon);
imageLabel1.setText("Flood filling");
imageLabel1.setHorizontalTextPosition(JLabel.CENTER);
imageLabel1.setVerticalTextPosition(JLabel.BOTTOM);
imageLabel2.setIcon(imageIcon2);
imageLabel2.setText("Original input");
imageLabel2.setHorizontalTextPosition(JLabel.CENTER);
imageLabel2.setVerticalTextPosition(JLabel.BOTTOM);
imageLabel3.setIcon(imageIcon3);
imageLabel3.setText("Edge detection approach");
imageLabel3.setHorizontalTextPosition(JLabel.CENTER);
imageLabel3.setVerticalTextPosition(JLabel.BOTTOM);
//delay for frames to be played properly
TimeUnit.MILLISECONDS.sleep(initFrames);
} else {
System.err.println(" -- Frame not captured -- Break!");
break;
}
}
} else {
System.err.println("Couldn't open capture.");
}
}