本文整理汇总了Java中edu.wpi.cscore.VideoSource类的典型用法代码示例。如果您正苦于以下问题:Java VideoSource类的具体用法?Java VideoSource怎么用?Java VideoSource使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
VideoSource类属于edu.wpi.cscore包,在下文中一共展示了VideoSource类的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: makeSourceValue
import edu.wpi.cscore.VideoSource; //导入依赖的package包/类
@SuppressWarnings("JavadocMethod")
private static String makeSourceValue(int source) {
switch (VideoSource.getKindFromInt(CameraServerJNI.getSourceKind(source))) {
case kUsb:
return "usb:" + CameraServerJNI.getUsbCameraPath(source);
case kHttp: {
String[] urls = CameraServerJNI.getHttpCameraUrls(source);
if (urls.length > 0) {
return "ip:" + urls[0];
} else {
return "ip:";
}
}
case kCv:
// FIXME: Should be "cv:", but LabVIEW dashboard requires "usb:".
// https://github.com/wpilibsuite/allwpilib/issues/407
return "usb:";
default:
return "unknown:";
}
}
示例2: getVideo
import edu.wpi.cscore.VideoSource; //导入依赖的package包/类
/**
* Get OpenCV access to the specified camera. This allows you to get
* images from the camera for image processing on the roboRIO.
*
* @param camera Camera (e.g. as returned by startAutomaticCapture).
*/
public CvSink getVideo(VideoSource camera) {
String name = "opencv_" + camera.getName();
synchronized (this) {
VideoSink sink = m_sinks.get(name);
if (sink != null) {
VideoSink.Kind kind = sink.getKind();
if (kind != VideoSink.Kind.kCv) {
throw new VideoException("expected OpenCV sink, but got " + kind);
}
return (CvSink) sink;
}
}
CvSink newsink = new CvSink(name);
newsink.setSource(camera);
addServer(newsink);
return newsink;
}
示例3: robotInit
import edu.wpi.cscore.VideoSource; //导入依赖的package包/类
/**
* This function is run when the robot is first started up and should be
* used for any initialization code.
*/
public void robotInit() {
RobotMap.init();
// BEGIN AUTOGENERATED CODE, SOURCE=ROBOTBUILDER ID=CONSTRUCTORS
driveTrain = new DriveTrain();
shooter = new Shooter();
lift = new Lift();
// END AUTOGENERATED CODE, SOURCE=ROBOTBUILDER ID=CONSTRUCTORS
// OI must be constructed after subsystems. If the OI creates Commands
//(which it very likely will), subsystems are not guaranteed to be
// constructed yet. Thus, their requires() statements may grab null
// pointers. Bad news. Don't move it.
oi = new OI();
// instantiate the command used for the autonomous period
// BEGIN AUTOGENERATED CODE, SOURCE=ROBOTBUILDER ID=AUTONOMOUS
autonomousCommand = new AutonomousCommand();
// END AUTOGENERATED CODE, SOURCE=ROBOTBUILDER ID=AUTONOMOUS
CameraServer cameraServer = CameraServer.getInstance();
System.out.println("Camera sources:" + VideoSource.enumerateSources().length);
for (VideoSource videoSource : VideoSource.enumerateSources()) {
System.out.println("Camera: " + videoSource.getName());
}
UsbCamera camera= cameraServer.startAutomaticCapture();
System.out.println("Started camera capture.");
// Hard coded camera address
cameraServer.addAxisCamera("AxisCam ye", "10.26.67.42");
// visionThread = new VisionThread(camera,new GripPipeline());
driveTrainChooser = new SendableChooser();
driveTrainChooser.addDefault("default PWM", DriveTrain.DriveMode.PWM);
for (DriveTrain.DriveMode driveMode : DriveTrain.DriveMode.values()) {
driveTrainChooser.addObject(driveMode.name(), driveMode);
}
}
示例4: robotInit
import edu.wpi.cscore.VideoSource; //导入依赖的package包/类
/**
* This function is run when the robot is first started up and should be
* used for any initialization code.
*/
public void robotInit() {
RobotMap.init();
// BEGIN AUTOGENERATED CODE, SOURCE=ROBOTBUILDER ID=CONSTRUCTORS
driveTrain = new DriveTrain();
shooter = new Shooter();
// END AUTOGENERATED CODE, SOURCE=ROBOTBUILDER ID=CONSTRUCTORS
// OI must be constructed after subsystems. If the OI creates Commands
//(which it very likely will), subsystems are not guaranteed to be
// constructed yet. Thus, their requires() statements may grab null
// pointers. Bad news. Don't move it.
oi = new OI();
// instantiate the command used for the autonomous period
// BEGIN AUTOGENERATED CODE, SOURCE=ROBOTBUILDER ID=AUTONOMOUS
autonomousCommand = new AutonomousCommand();
// END AUTOGENERATED CODE, SOURCE=ROBOTBUILDER ID=AUTONOMOUS
CameraServer cameraServer = CameraServer.getInstance();
System.out.println("Camera sources:" + VideoSource.enumerateSources().length);
for (VideoSource videoSource : VideoSource.enumerateSources()) {
System.out.println("Camera: " + videoSource.getName());
}
UsbCamera camera= cameraServer.startAutomaticCapture();
System.out.println("Started camera capture.");
// Hard coded camera address
cameraServer.addAxisCamera("AxisCam ye", "10.26.67.42");
// visionThread = new VisionThread(camera,new GripPipeline());
}
示例5: getSourceStreamValues
import edu.wpi.cscore.VideoSource; //导入依赖的package包/类
@SuppressWarnings({"JavadocMethod", "PMD.AvoidUsingHardCodedIP"})
private synchronized String[] getSourceStreamValues(int source) {
// Ignore all but HttpCamera
if (VideoSource.getKindFromInt(CameraServerJNI.getSourceKind(source))
!= VideoSource.Kind.kHttp) {
return new String[0];
}
// Generate values
String[] values = CameraServerJNI.getHttpCameraUrls(source);
for (int j = 0; j < values.length; j++) {
values[j] = "mjpg:" + values[j];
}
// Look to see if we have a passthrough server for this source
for (VideoSink i : m_sinks.values()) {
int sink = i.getHandle();
int sinkSource = CameraServerJNI.getSinkSource(sink);
if (source == sinkSource
&& VideoSink.getKindFromInt(CameraServerJNI.getSinkKind(sink)) == VideoSink.Kind.kMjpeg) {
// Add USB-only passthrough
String[] finalValues = new String[values.length + 1];
for (int j = 0; j < values.length; j++) {
finalValues[j] = values[j];
}
int port = CameraServerJNI.getMjpegServerPort(sink);
finalValues[values.length] = makeStreamValue("172.22.11.2", port);
return finalValues;
}
}
return values;
}
示例6: addCamera
import edu.wpi.cscore.VideoSource; //导入依赖的package包/类
/**
* Adds an already created camera.
*
* @param camera Camera
*/
public void addCamera(VideoSource camera) {
String name = camera.getName();
synchronized (this) {
if (m_primarySourceName == null) {
m_primarySourceName = name;
}
m_sources.put(name, camera);
}
}
示例7: setSize
import edu.wpi.cscore.VideoSource; //导入依赖的package包/类
/**
* Sets the size of the image to use. Use the public kSize constants to set the correct mode, or
* set it directly on a camera and call the appropriate startAutomaticCapture method.
*
* @deprecated Use setResolution on the UsbCamera returned by startAutomaticCapture() instead.
* @param size The size to use
*/
@Deprecated
public void setSize(int size) {
VideoSource source = null;
synchronized (this) {
if (m_primarySourceName == null) {
return;
}
source = m_sources.get(m_primarySourceName);
if (source == null) {
return;
}
}
switch (size) {
case kSize640x480:
source.setResolution(640, 480);
break;
case kSize320x240:
source.setResolution(320, 240);
break;
case kSize160x120:
source.setResolution(160, 120);
break;
default:
throw new IllegalArgumentException("Unsupported size: " + size);
}
}
示例8: BlastoiseShooterVision
import edu.wpi.cscore.VideoSource; //导入依赖的package包/类
/**
* Instantiates BlastoiseVision with a VideoSource object.
* This method is not recommended to be used directly, use one of the others instead.
* @param videoSource
*/
public BlastoiseShooterVision(VideoSource videoSource) {
this.videoSource = videoSource;
videoSource.setResolution(Constants.ShooterVision.Camera.WIDTH_PX, Constants.ShooterVision.Camera.HEIGHT_PX);
VisionThread visionThread = new VisionThread(videoSource, new DetectTargetPipeline(), pipeline -> {
synchronized (lock) {
processContours(pipeline.filterContours0Output());
}
});
visionThread.start();
}
示例9: startAutomaticCapture
import edu.wpi.cscore.VideoSource; //导入依赖的package包/类
/**
* Start automatically capturing images to send to the dashboard from
* an existing camera.
*
* @param camera Camera
*/
public void startAutomaticCapture(VideoSource camera) {
addCamera(camera);
VideoSink server = addServer("serve_" + camera.getName());
server.setSource(camera);
}
示例10: VisionRunner
import edu.wpi.cscore.VideoSource; //导入依赖的package包/类
/**
* Creates a new vision runner. It will take images from the {@code videoSource}, send them to
* the {@code pipeline}, and call the {@code listener} when the pipeline has finished to alert
* user code when it is safe to access the pipeline's outputs.
*
* @param videoSource the video source to use to supply images for the pipeline
* @param pipeline the vision pipeline to run
* @param listener a function to call after the pipeline has finished running
*/
public VisionRunner(VideoSource videoSource, P pipeline, Listener<? super P> listener) {
this.m_pipeline = pipeline;
this.m_listener = listener;
m_cvSink.setSource(videoSource);
}
示例11: VisionThread
import edu.wpi.cscore.VideoSource; //导入依赖的package包/类
/**
* Creates a new vision thread that continuously runs the given vision pipeline. This is
* equivalent to {@code new VisionThread(new VisionRunner<>(videoSource, pipeline, listener))}.
*
* @param videoSource the source for images the pipeline should process
* @param pipeline the pipeline to run
* @param listener the listener to copy outputs from the pipeline after it runs
* @param <P> the type of the pipeline
*/
public <P extends VisionPipeline> VisionThread(VideoSource videoSource,
P pipeline,
VisionRunner.Listener<? super P> listener) {
this(new VisionRunner<>(videoSource, pipeline, listener));
}