本文整理汇总了Java中edu.wpi.cscore.CvSource类的典型用法代码示例。如果您正苦于以下问题:Java CvSource类的具体用法?Java CvSource怎么用?Java CvSource使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
CvSource类属于edu.wpi.cscore包,在下文中一共展示了CvSource类的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: initDefaultCommand
import edu.wpi.cscore.CvSource; //导入依赖的package包/类
public void initDefaultCommand() {
visionThread = new Thread(() -> {
// Get the UsbCamera from CameraServer
UsbCamera camera = CameraServer.getInstance().startAutomaticCapture();
// Set the resolution
camera.setResolution(640, 480);
// Get a CvSink. This will capture Mats from the camera
CvSink cvSink = CameraServer.getInstance().getVideo();
// Setup a CvSource. This will send images back to the Dashboard
CvSource outputStream = CameraServer.getInstance().putVideo("Rectangle", 640, 480);
// Mats are very memory expensive. Lets reuse this Mat.
Mat mat = new Mat();
// This cannot be 'true'. The program will never exit if it is. This
// lets the robot stop this thread when restarting robot code or
// deploying.
while (!Thread.interrupted()) {
// Tell the CvSink to grab a frame from the camera and put it
// in the source mat. If there is an error notify the output.
if (cvSink.grabFrame(mat) == 0) {
// Send the output the error.
outputStream.notifyError(cvSink.getError());
// skip the rest of the current iteration
continue;
}
// Put a rectangle on the image
Imgproc.rectangle(mat, new Point(100, 100), new Point(400, 400),
new Scalar(255, 255, 255), 5);
// Give the output stream a new image to display
outputStream.putFrame(mat);
}
});
visionThread.setDaemon(true);
visionThread.start();
}
示例2: Vision
import edu.wpi.cscore.CvSource; //导入依赖的package包/类
private Vision() {
super("Vision");
// Intialize generated pipeline
pipeline = new Pipeline();
// Intialize USB camera
camera = new UsbCamera("UsbCamera", Mappings.CAMERA);
camera.setVideoMode(CAMERA_PIXEL_FORMAT, CAMERA_WIDTH, CAMERA_HEIGHT, CAMERA_FPS);
camera.setExposureManual(CAMERA_EXPOSURE);
camera.setBrightness(CAMERA_BRIGHTNESS);
// Intialize sink
sink = new CvSink("Sink");
sink.setEnabled(true);
sink.setSource(camera);
// Intialize output
output = new CvSource("CameraSource", CAMERA_PIXEL_FORMAT, CAMERA_WIDTH, CAMERA_HEIGHT, CAMERA_FPS);
server = new MjpegServer("OutputServer", Mappings.MJPEG_SERVER);
server.setSource(output);
}
示例3: FrcFaceDetector
import edu.wpi.cscore.CvSource; //导入依赖的package包/类
/**
* Constructor: Create an instance of the object.
*
* @param instanceName specifies the instance name.
* @param classifierPath specifies the file path for the classifier.
* @param videoIn specifies the video input stream.
* @param videoOut specifies the video output stream.
*/
public FrcFaceDetector(
final String instanceName, final String classifierPath, CvSink videoIn, CvSource videoOut)
{
super(instanceName, videoIn, videoOut, NUM_IMAGE_BUFFERS, detectedFacesBuffers);
if (debugEnabled)
{
dbgTrace = new TrcDbgTrace(moduleName, tracingEnabled, traceLevel, msgLevel);
}
//
// Preallocate two MatOfRects for ping pong processing.
//
for (int i = 0; i < detectedFacesBuffers.length; i++)
{
detectedFacesBuffers[i] = new MatOfRect();
}
faceDetector = new CascadeClassifier(classifierPath);
if (faceDetector.empty())
{
throw new RuntimeException("Failed to load Cascade Classifier <" + classifierPath + ">");
}
}
示例4: FrcOpenCVDetector
import edu.wpi.cscore.CvSource; //导入依赖的package包/类
/**
* Constructor: Create an instance of the object.
*
* @param instanceName specifies the instance name.
* @param videoIn specifies the video input stream.
* @param videoOut specifies the video output stream.
* @param numImageBuffers specifies the number of image buffers to allocate.
* @param detectedObjectBuffers specifies the array of preallocated detected object buffers.
*/
public FrcOpenCVDetector(
final String instanceName, CvSink videoIn, CvSource videoOut, int numImageBuffers, O[] detectedObjectBuffers)
{
if (debugEnabled)
{
dbgTrace = new TrcDbgTrace(moduleName, tracingEnabled, traceLevel, msgLevel);
}
this.instanceName = instanceName;
this.videoIn = videoIn;
this.videoOut = videoOut;
//
// Pre-allocate the image buffers.
//
Mat[] imageBuffers = new Mat[numImageBuffers];
for (int i = 0; i < imageBuffers.length; i++)
{
imageBuffers[i] = new Mat();
}
visionTask = new TrcVisionTask<>(instanceName, this, imageBuffers, detectedObjectBuffers);
}
示例5: concatStreamStart
import edu.wpi.cscore.CvSource; //导入依赖的package包/类
/**
* Start both the left and right camera streams and combine them into a single one which is then pushed
* to an output stream titled Concat.
* This method should only be used for starting the camera stream.
*/
private void concatStreamStart() {
cameraLeft = CameraServer.getInstance().startAutomaticCapture("Left", 0);
cameraRight = CameraServer.getInstance().startAutomaticCapture("Right", 1);
/// Dummy sinks to keep camera connections open.
CvSink cvsinkLeft = new CvSink("leftSink");
cvsinkLeft.setSource(cameraLeft);
cvsinkLeft.setEnabled(true);
CvSink cvsinkRight = new CvSink("rightSink");
cvsinkRight.setSource(cameraRight);
cvsinkRight.setEnabled(true);
/// Matrices to store each image from the cameras.
Mat leftSource = new Mat();
Mat rightSource = new Mat();
/// The ArrayList of left and right sources is needed for the hconcat method used to combine the streams
ArrayList<Mat> sources = new ArrayList<>();
sources.add(leftSource);
sources.add(rightSource);
/// Concatenation of both matrices
Mat concat = new Mat();
/// Puts the combined video on the SmartDashboard (I think)
/// The width is multiplied by 2 as the dimensions of the stream will have a width two times that of a single webcam
CvSource outputStream = CameraServer.getInstance().putVideo("Concat", 2*Constants.CAM_WIDTH, Constants.CAM_HEIGHT);
while (!Thread.interrupted()) {
/// Provide each mat with the current frame
cvsinkLeft.grabFrame(leftSource);
cvsinkRight.grabFrame(rightSource);
/// Combine the frames into a single mat in the Output and stream the image.
Core.hconcat(sources, concat);
outputStream.putFrame(concat);
}
}
示例6: VisionTest
import edu.wpi.cscore.CvSource; //导入依赖的package包/类
public VisionTest() {
UsbCamera camera = CameraServer.getInstance().startAutomaticCapture(); // cam0 by default
camera.setResolution(IMG_WIDTH, IMG_HEIGHT);
camera.setBrightness(0);
// camera.setExposureManual(100);
camera.setExposureAuto();
CvSource cs= CameraServer.getInstance().putVideo("name", IMG_WIDTH, IMG_HEIGHT);
visionThread = new VisionThread(camera, new GripPipeline(), pipeline -> {
Mat IMG_MOD = pipeline.hslThresholdOutput();
if (!pipeline.filterContoursOutput().isEmpty()) {
//Rect recCombine = Imgproc.boundingRect(pipeline.filterContoursOutput().get(0));
Rect recCombine = computeBoundingRectangle(pipeline.filterContoursOutput());
if (recCombine == null) {
targetDetected = false;
return;
}
targetDetected = true;
computeCoords(recCombine);
synchronized (imgLock) {
centerX = recCombine.x + (recCombine.width / 2);
}
Imgproc.rectangle(IMG_MOD, new Point(recCombine.x, recCombine.y),new Point(recCombine.x + recCombine.width,recCombine.y + recCombine.height), new Scalar(255, 20, 0), 5);
} else {
targetDetected = false;
}
cs.putFrame(IMG_MOD);
});
visionThread.start();
Relay relay = new Relay(RobotMap.RELAY_CHANNEL, Direction.kForward);
relay.set(Relay.Value.kOn);
//this.processImage();
}
示例7: GripVision
import edu.wpi.cscore.CvSource; //导入依赖的package包/类
public GripVision(final String instanceName, CvSink videoIn, CvSource videoOut)
{
super(instanceName, videoIn, videoOut);
if (debugEnabled)
{
dbgTrace = new TrcDbgTrace(moduleName, tracingEnabled, traceLevel, msgLevel);
}
pipeline = new GripPipeline();
}
示例8: FrcVisionTarget
import edu.wpi.cscore.CvSource; //导入依赖的package包/类
/**
* Constructor: Create an instance of the object.
*
* @param instanceName specifies the instance name.
* @param videoIn specifies the video input stream.
* @param videoOut specifies the video output stream.
*/
public FrcVisionTarget(final String instanceName, CvSink videoIn, CvSource videoOut)
{
super(instanceName, videoIn, videoOut, NUM_IMAGE_BUFFERS, null);
if (debugEnabled)
{
dbgTrace = new TrcDbgTrace(moduleName, tracingEnabled, traceLevel, msgLevel);
}
}
示例9: robotInit
import edu.wpi.cscore.CvSource; //导入依赖的package包/类
/**
* This function is run when the robot is first started up and should be
* used for any initialization code.
*/
public void robotInit() {
RobotMap.init();
// BEGIN AUTOGENERATED CODE, SOURCE=ROBOTBUILDER ID=CONSTRUCTORS
// driveTrain = new DriveTrain();
// lights = new Lights();
new Thread(() -> {
UsbCamera camera = CameraServer.getInstance().startAutomaticCapture();
camera.setResolution(720, 480);
CvSink cvSink = CameraServer.getInstance().getVideo();
CvSource outputStream = CameraServer.getInstance().putVideo("Blur", 640, 480);
Mat source = new Mat();
Mat output = new Mat();
while(true) {
cvSink.grabFrame(source);
Imgproc.cvtColor(source, output, Imgproc.COLOR_BGR2GRAY);
outputStream.putFrame(output);
}
}).start();
motorRelay = new MotorRelay();
wheels = new WheelMotors();
// END AUTOGENERATED CODE, SOURCE=ROBOTBUILDER ID=CONSTRUCTORS
// OI must be constructed after subsystems. If the OI creates Commands
//(which it very likely will), subsystems are not guaranteed to be
// constructed yet. Thus, their requires() statements may grab null
// pointers. Bad news. Don't move it.
oi = new OI();
// instantiate the command used for the autonomous period
// BEGIN AUTOGENERATED CODE, SOURCE=ROBOTBUILDER ID=AUTONOMOUS
autonomousCommand = new AutonomousCommand();
// END AUTOGENERATED CODE, SOURCE=ROBOTBUILDER ID=AUTONOMOUS
}
示例10: cameraInit
import edu.wpi.cscore.CvSource; //导入依赖的package包/类
/**
* Camera switcher initialization
*/
private void cameraInit() {
// camera switching code
Thread t = new Thread(() -> {
boolean allowCam1 = false;
UsbCamera camera1 = CameraServer.getInstance().startAutomaticCapture(0);
camera1.setResolution(320, 240);
camera1.setFPS(30);
UsbCamera camera2 = CameraServer.getInstance().startAutomaticCapture(1);
camera2.setResolution(320, 240);
camera2.setFPS(30);
CvSink cvSink1 = CameraServer.getInstance().getVideo(camera1);
CvSink cvSink2 = CameraServer.getInstance().getVideo(camera2);
CvSource outputStream = CameraServer.getInstance().putVideo("Switcher", 320, 240);
Mat image = new Mat();
Mat grey = new Mat();
while(!Thread.interrupted()) {
if (Robot.oi.getPrimaryJoystick().getRawButton(4)) { allowCam1 = !allowCam1; }
if(allowCam1){
cvSink2.setEnabled(false);
cvSink1.setEnabled(true);
cvSink1.grabFrame(image);
} else{
cvSink1.setEnabled(false);
cvSink2.setEnabled(true);
cvSink2.grabFrame(image);
}
Imgproc.cvtColor(image, grey, Imgproc.COLOR_BGR2GRAY);
outputStream.putFrame(grey);
}
});
t.start();
}
示例11: cameraInit
import edu.wpi.cscore.CvSource; //导入依赖的package包/类
/**
* Camera switcher initialization
*/
private void cameraInit() {
// camera switching code
Thread t = new Thread(() -> {
boolean allowCam1 = false;
UsbCamera camera1 = CameraServer.getInstance().startAutomaticCapture(0);
camera1.setResolution(320, 240);
camera1.setFPS(30);
UsbCamera camera2 = CameraServer.getInstance().startAutomaticCapture(1);
camera2.setResolution(320, 240);
camera2.setFPS(30);
CvSink cvSink1 = CameraServer.getInstance().getVideo(camera1);
CvSink cvSink2 = CameraServer.getInstance().getVideo(camera2);
CvSource outputStream = CameraServer.getInstance().putVideo("Switcher", 320, 240);
Mat image = new Mat();
Mat grey = new Mat();
while(!Thread.interrupted()) {
if (Robot.oi.getPrimaryJoystick().getRawButton(4)) { allowCam1 = !allowCam1; }
if(allowCam1){
cvSink2.setEnabled(false);
cvSink1.setEnabled(true);
cvSink1.grabFrame(image);
} else{
cvSink1.setEnabled(false);
cvSink2.setEnabled(true);
cvSink2.grabFrame(image);
}
// Imgproc.cvtColor(image, grey, Imgproc.COLOR_BGR2GRAY);
// outputStream.putFrame(grey);
outputStream.putFrame(image);
}
});
t.start();
}
示例12: putVideo
import edu.wpi.cscore.CvSource; //导入依赖的package包/类
/**
* Create a MJPEG stream with OpenCV input. This can be called to pass custom
* annotated images to the dashboard.
*
* @param name Name to give the stream
* @param width Width of the image being sent
* @param height Height of the image being sent
*/
public CvSource putVideo(String name, int width, int height) {
CvSource source = new CvSource(name, VideoMode.PixelFormat.kMJPEG, width, height, 30);
startAutomaticCapture(source);
return source;
}