本文整理汇总了Java中edu.wpi.cscore.CvSource.putFrame方法的典型用法代码示例。如果您正苦于以下问题:Java CvSource.putFrame方法的具体用法?Java CvSource.putFrame怎么用?Java CvSource.putFrame使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类edu.wpi.cscore.CvSource
的用法示例。
在下文中一共展示了CvSource.putFrame方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: concatStreamStart
import edu.wpi.cscore.CvSource; //导入方法依赖的package包/类
/**
* Start both the left and right camera streams and combine them into a single one which is then pushed
* to an output stream titled Concat.
* This method should only be used for starting the camera stream.
*/
private void concatStreamStart() {
cameraLeft = CameraServer.getInstance().startAutomaticCapture("Left", 0);
cameraRight = CameraServer.getInstance().startAutomaticCapture("Right", 1);
/// Dummy sinks to keep camera connections open.
CvSink cvsinkLeft = new CvSink("leftSink");
cvsinkLeft.setSource(cameraLeft);
cvsinkLeft.setEnabled(true);
CvSink cvsinkRight = new CvSink("rightSink");
cvsinkRight.setSource(cameraRight);
cvsinkRight.setEnabled(true);
/// Matrices to store each image from the cameras.
Mat leftSource = new Mat();
Mat rightSource = new Mat();
/// The ArrayList of left and right sources is needed for the hconcat method used to combine the streams
ArrayList<Mat> sources = new ArrayList<>();
sources.add(leftSource);
sources.add(rightSource);
/// Concatenation of both matrices
Mat concat = new Mat();
/// Puts the combined video on the SmartDashboard (I think)
/// The width is multiplied by 2 as the dimensions of the stream will have a width two times that of a single webcam
CvSource outputStream = CameraServer.getInstance().putVideo("Concat", 2*Constants.CAM_WIDTH, Constants.CAM_HEIGHT);
while (!Thread.interrupted()) {
/// Provide each mat with the current frame
cvsinkLeft.grabFrame(leftSource);
cvsinkRight.grabFrame(rightSource);
/// Combine the frames into a single mat in the Output and stream the image.
Core.hconcat(sources, concat);
outputStream.putFrame(concat);
}
}
示例2: VisionTest
import edu.wpi.cscore.CvSource; //导入方法依赖的package包/类
public VisionTest() {
UsbCamera camera = CameraServer.getInstance().startAutomaticCapture(); // cam0 by default
camera.setResolution(IMG_WIDTH, IMG_HEIGHT);
camera.setBrightness(0);
// camera.setExposureManual(100);
camera.setExposureAuto();
CvSource cs= CameraServer.getInstance().putVideo("name", IMG_WIDTH, IMG_HEIGHT);
visionThread = new VisionThread(camera, new GripPipeline(), pipeline -> {
Mat IMG_MOD = pipeline.hslThresholdOutput();
if (!pipeline.filterContoursOutput().isEmpty()) {
//Rect recCombine = Imgproc.boundingRect(pipeline.filterContoursOutput().get(0));
Rect recCombine = computeBoundingRectangle(pipeline.filterContoursOutput());
if (recCombine == null) {
targetDetected = false;
return;
}
targetDetected = true;
computeCoords(recCombine);
synchronized (imgLock) {
centerX = recCombine.x + (recCombine.width / 2);
}
Imgproc.rectangle(IMG_MOD, new Point(recCombine.x, recCombine.y),new Point(recCombine.x + recCombine.width,recCombine.y + recCombine.height), new Scalar(255, 20, 0), 5);
} else {
targetDetected = false;
}
cs.putFrame(IMG_MOD);
});
visionThread.start();
Relay relay = new Relay(RobotMap.RELAY_CHANNEL, Direction.kForward);
relay.set(Relay.Value.kOn);
//this.processImage();
}
示例3: cameraInit
import edu.wpi.cscore.CvSource; //导入方法依赖的package包/类
/**
* Camera switcher initialization
*/
private void cameraInit() {
// camera switching code
Thread t = new Thread(() -> {
boolean allowCam1 = false;
UsbCamera camera1 = CameraServer.getInstance().startAutomaticCapture(0);
camera1.setResolution(320, 240);
camera1.setFPS(30);
UsbCamera camera2 = CameraServer.getInstance().startAutomaticCapture(1);
camera2.setResolution(320, 240);
camera2.setFPS(30);
CvSink cvSink1 = CameraServer.getInstance().getVideo(camera1);
CvSink cvSink2 = CameraServer.getInstance().getVideo(camera2);
CvSource outputStream = CameraServer.getInstance().putVideo("Switcher", 320, 240);
Mat image = new Mat();
Mat grey = new Mat();
while(!Thread.interrupted()) {
if (Robot.oi.getPrimaryJoystick().getRawButton(4)) { allowCam1 = !allowCam1; }
if(allowCam1){
cvSink2.setEnabled(false);
cvSink1.setEnabled(true);
cvSink1.grabFrame(image);
} else{
cvSink1.setEnabled(false);
cvSink2.setEnabled(true);
cvSink2.grabFrame(image);
}
Imgproc.cvtColor(image, grey, Imgproc.COLOR_BGR2GRAY);
outputStream.putFrame(grey);
}
});
t.start();
}
示例4: cameraInit
import edu.wpi.cscore.CvSource; //导入方法依赖的package包/类
/**
* Camera switcher initialization
*/
private void cameraInit() {
// camera switching code
Thread t = new Thread(() -> {
boolean allowCam1 = false;
UsbCamera camera1 = CameraServer.getInstance().startAutomaticCapture(0);
camera1.setResolution(320, 240);
camera1.setFPS(30);
UsbCamera camera2 = CameraServer.getInstance().startAutomaticCapture(1);
camera2.setResolution(320, 240);
camera2.setFPS(30);
CvSink cvSink1 = CameraServer.getInstance().getVideo(camera1);
CvSink cvSink2 = CameraServer.getInstance().getVideo(camera2);
CvSource outputStream = CameraServer.getInstance().putVideo("Switcher", 320, 240);
Mat image = new Mat();
Mat grey = new Mat();
while(!Thread.interrupted()) {
if (Robot.oi.getPrimaryJoystick().getRawButton(4)) { allowCam1 = !allowCam1; }
if(allowCam1){
cvSink2.setEnabled(false);
cvSink1.setEnabled(true);
cvSink1.grabFrame(image);
} else{
cvSink1.setEnabled(false);
cvSink2.setEnabled(true);
cvSink2.grabFrame(image);
}
// Imgproc.cvtColor(image, grey, Imgproc.COLOR_BGR2GRAY);
// outputStream.putFrame(grey);
outputStream.putFrame(image);
}
});
t.start();
}