当前位置: 首页>>代码示例>>Java>>正文


Java CvSink类代码示例

本文整理汇总了Java中edu.wpi.cscore.CvSink的典型用法代码示例。如果您正苦于以下问题:Java CvSink类的具体用法?Java CvSink怎么用?Java CvSink使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


CvSink类属于edu.wpi.cscore包,在下文中一共展示了CvSink类的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: initDefaultCommand

import edu.wpi.cscore.CvSink; //导入依赖的package包/类
public void initDefaultCommand() {
  	visionThread = new Thread(() -> {
	// Get the UsbCamera from CameraServer
	UsbCamera camera = CameraServer.getInstance().startAutomaticCapture();
	// Set the resolution
	camera.setResolution(640, 480);

	// Get a CvSink. This will capture Mats from the camera
	CvSink cvSink = CameraServer.getInstance().getVideo();
	// Setup a CvSource. This will send images back to the Dashboard
	CvSource outputStream = CameraServer.getInstance().putVideo("Rectangle", 640, 480);

	// Mats are very memory expensive. Lets reuse this Mat.
	Mat mat = new Mat();

	// This cannot be 'true'. The program will never exit if it is. This
	// lets the robot stop this thread when restarting robot code or
	// deploying.
	while (!Thread.interrupted()) {
		// Tell the CvSink to grab a frame from the camera and put it
		// in the source mat.  If there is an error notify the output.
		if (cvSink.grabFrame(mat) == 0) {
			// Send the output the error.
			outputStream.notifyError(cvSink.getError());
			// skip the rest of the current iteration
			continue;
		}
		// Put a rectangle on the image
		Imgproc.rectangle(mat, new Point(100, 100), new Point(400, 400),
				new Scalar(255, 255, 255), 5);
		// Give the output stream a new image to display
		outputStream.putFrame(mat);
	}
});
visionThread.setDaemon(true);
visionThread.start();
  }
 
开发者ID:FRC6706,项目名称:FRC6706_JAVA2017,代码行数:38,代码来源:VisionSubsystem2.java

示例2: Vision

import edu.wpi.cscore.CvSink; //导入依赖的package包/类
private Vision() {
    super("Vision");

    // Intialize generated pipeline
    pipeline = new Pipeline();

    // Intialize USB camera
    camera = new UsbCamera("UsbCamera", Mappings.CAMERA);
    camera.setVideoMode(CAMERA_PIXEL_FORMAT, CAMERA_WIDTH, CAMERA_HEIGHT, CAMERA_FPS);
    camera.setExposureManual(CAMERA_EXPOSURE);
    camera.setBrightness(CAMERA_BRIGHTNESS);

    // Intialize sink
    sink = new CvSink("Sink");
    sink.setEnabled(true);
    sink.setSource(camera);

    // Intialize output
    output = new CvSource("CameraSource", CAMERA_PIXEL_FORMAT, CAMERA_WIDTH, CAMERA_HEIGHT, CAMERA_FPS);
    server = new MjpegServer("OutputServer", Mappings.MJPEG_SERVER);
    server.setSource(output);
}
 
开发者ID:vyrotek,项目名称:frc-robot,代码行数:23,代码来源:Vision.java

示例3: getVideo

import edu.wpi.cscore.CvSink; //导入依赖的package包/类
/**
 * Get OpenCV access to the specified camera.  This allows you to get
 * images from the camera for image processing on the roboRIO.
 *
 * @param camera Camera (e.g. as returned by startAutomaticCapture).
 */
public CvSink getVideo(VideoSource camera) {
  String name = "opencv_" + camera.getName();

  synchronized (this) {
    VideoSink sink = m_sinks.get(name);
    if (sink != null) {
      VideoSink.Kind kind = sink.getKind();
      if (kind != VideoSink.Kind.kCv) {
        throw new VideoException("expected OpenCV sink, but got " + kind);
      }
      return (CvSink) sink;
    }
  }

  CvSink newsink = new CvSink(name);
  newsink.setSource(camera);
  addServer(newsink);
  return newsink;
}
 
开发者ID:ArcticWarriors,项目名称:snobot-2017,代码行数:26,代码来源:CameraServer.java

示例4: FrcFaceDetector

import edu.wpi.cscore.CvSink; //导入依赖的package包/类
/**
 * Constructor: Create an instance of the object.
 *
 * @param instanceName specifies the instance name.
 * @param classifierPath specifies the file path for the classifier.
 * @param videoIn specifies the video input stream.
 * @param videoOut specifies the video output stream.
 */
public FrcFaceDetector(
    final String instanceName, final String classifierPath, CvSink videoIn, CvSource videoOut)
{
    super(instanceName, videoIn, videoOut, NUM_IMAGE_BUFFERS, detectedFacesBuffers);

    if (debugEnabled)
    {
        dbgTrace = new TrcDbgTrace(moduleName, tracingEnabled, traceLevel, msgLevel);
    }
    //
    // Preallocate two MatOfRects for ping pong processing.
    //
    for (int i = 0; i < detectedFacesBuffers.length; i++)
    {
        detectedFacesBuffers[i] = new MatOfRect();
    }

    faceDetector = new CascadeClassifier(classifierPath);
    if (faceDetector.empty())
    {
        throw new RuntimeException("Failed to load Cascade Classifier <" + classifierPath + ">");
    }
}
 
开发者ID:trc492,项目名称:Frc2017FirstSteamWorks,代码行数:32,代码来源:FrcFaceDetector.java

示例5: FrcOpenCVDetector

import edu.wpi.cscore.CvSink; //导入依赖的package包/类
/**
 * Constructor: Create an instance of the object.
 *
 * @param instanceName specifies the instance name.
 * @param videoIn specifies the video input stream.
 * @param videoOut specifies the video output stream.
 * @param numImageBuffers specifies the number of image buffers to allocate.
 * @param detectedObjectBuffers specifies the array of preallocated detected object buffers.
 */
public FrcOpenCVDetector(
    final String instanceName, CvSink videoIn, CvSource videoOut, int numImageBuffers, O[] detectedObjectBuffers)
{
    if (debugEnabled)
    {
        dbgTrace = new TrcDbgTrace(moduleName, tracingEnabled, traceLevel, msgLevel);
    }

    this.instanceName = instanceName;
    this.videoIn = videoIn;
    this.videoOut = videoOut;
    //
    // Pre-allocate the image buffers.
    //
    Mat[] imageBuffers = new Mat[numImageBuffers];
    for (int i = 0; i < imageBuffers.length; i++)
    {
        imageBuffers[i] = new Mat();
    }

    visionTask = new TrcVisionTask<>(instanceName, this, imageBuffers, detectedObjectBuffers);
}
 
开发者ID:trc492,项目名称:Frc2017FirstSteamWorks,代码行数:32,代码来源:FrcOpenCVDetector.java

示例6: concatStreamStart

import edu.wpi.cscore.CvSink; //导入依赖的package包/类
/**
 * Start both the left and right camera streams and combine them into a single one which is then pushed
 * to an output stream titled Concat.
 * This method should only be used for starting the camera stream.
 */
private void concatStreamStart() {
    cameraLeft = CameraServer.getInstance().startAutomaticCapture("Left", 0);
    cameraRight = CameraServer.getInstance().startAutomaticCapture("Right", 1);

    /// Dummy sinks to keep camera connections open.
    CvSink cvsinkLeft = new CvSink("leftSink");
    cvsinkLeft.setSource(cameraLeft);
    cvsinkLeft.setEnabled(true);
    CvSink cvsinkRight = new CvSink("rightSink");
    cvsinkRight.setSource(cameraRight);
    cvsinkRight.setEnabled(true);

    /// Matrices to store each image from the cameras.
    Mat leftSource = new Mat();
    Mat rightSource = new Mat();

    /// The ArrayList of left and right sources is needed for the hconcat method used to combine the streams
    ArrayList<Mat> sources = new ArrayList<>();
    sources.add(leftSource);
    sources.add(rightSource);

    /// Concatenation of both matrices
    Mat concat = new Mat();

    /// Puts the combined video on the SmartDashboard (I think)
    /// The width is multiplied by 2 as the dimensions of the stream will have a width two times that of a single webcam
    CvSource outputStream = CameraServer.getInstance().putVideo("Concat", 2*Constants.CAM_WIDTH, Constants.CAM_HEIGHT);

    while (!Thread.interrupted()) {
        /// Provide each mat with the current frame
        cvsinkLeft.grabFrame(leftSource);
        cvsinkRight.grabFrame(rightSource);
        /// Combine the frames into a single mat in the Output and stream the image.
        Core.hconcat(sources, concat);
        outputStream.putFrame(concat);
    }
}
 
开发者ID:wh1ter0se,项目名称:PowerUp-2018,代码行数:43,代码来源:Vision.java

示例7: GripVision

import edu.wpi.cscore.CvSink; //导入依赖的package包/类
public GripVision(final String instanceName, CvSink videoIn, CvSource videoOut)
{
    super(instanceName, videoIn, videoOut);

    if (debugEnabled)
    {
        dbgTrace = new TrcDbgTrace(moduleName, tracingEnabled, traceLevel, msgLevel);
    }

    pipeline = new GripPipeline();
}
 
开发者ID:trc492,项目名称:Frc2017FirstSteamWorks,代码行数:12,代码来源:GripVision.java

示例8: FrcVisionTarget

import edu.wpi.cscore.CvSink; //导入依赖的package包/类
/**
 * Constructor: Create an instance of the object.
 *
 * @param instanceName specifies the instance name.
 * @param videoIn specifies the video input stream.
 * @param videoOut specifies the video output stream.
 */
public FrcVisionTarget(final String instanceName, CvSink videoIn, CvSource videoOut)
{
    super(instanceName, videoIn, videoOut, NUM_IMAGE_BUFFERS, null);

    if (debugEnabled)
    {
        dbgTrace = new TrcDbgTrace(moduleName, tracingEnabled, traceLevel, msgLevel);
    }
}
 
开发者ID:trc492,项目名称:Frc2017FirstSteamWorks,代码行数:17,代码来源:FrcVisionTarget.java

示例9: robotInit

import edu.wpi.cscore.CvSink; //导入依赖的package包/类
/**
     * This function is run when the robot is first started up and should be
     * used for any initialization code.
     */
    public void robotInit() {
    RobotMap.init();
        // BEGIN AUTOGENERATED CODE, SOURCE=ROBOTBUILDER ID=CONSTRUCTORS

//    	driveTrain = new DriveTrain();
  //  	lights = new Lights();
    	new Thread(() -> {
	    	
	        
	        UsbCamera camera = CameraServer.getInstance().startAutomaticCapture();
	        camera.setResolution(720, 480);
	        CvSink cvSink = CameraServer.getInstance().getVideo();
	        CvSource outputStream = CameraServer.getInstance().putVideo("Blur", 640, 480);
	        
	        Mat source = new Mat();
	        Mat output = new Mat();
	        
	        while(true) {
	            cvSink.grabFrame(source);
	            Imgproc.cvtColor(source, output, Imgproc.COLOR_BGR2GRAY);
	            outputStream.putFrame(output);
	        }
        }).start();
        

        motorRelay = new MotorRelay();
        wheels = new WheelMotors();

        // END AUTOGENERATED CODE, SOURCE=ROBOTBUILDER ID=CONSTRUCTORS
        // OI must be constructed after subsystems. If the OI creates Commands
        //(which it very likely will), subsystems are not guaranteed to be
        // constructed yet. Thus, their requires() statements may grab null
        // pointers. Bad news. Don't move it.
        oi = new OI();

        // instantiate the command used for the autonomous period
        // BEGIN AUTOGENERATED CODE, SOURCE=ROBOTBUILDER ID=AUTONOMOUS

        autonomousCommand = new AutonomousCommand();

        // END AUTOGENERATED CODE, SOURCE=ROBOTBUILDER ID=AUTONOMOUS
    }
 
开发者ID:FRC2832,项目名称:Practice_Robot_Code,代码行数:47,代码来源:Robot.java

示例10: CameraServerSource

import edu.wpi.cscore.CvSink; //导入依赖的package包/类
private CameraServerSource(String name) {
  super(CameraServerDataType.Instance);
  setName(name);
  videoSink = new CvSink(name + "-videosink");
  eventListenerId = CameraServerJNI.addListener(e -> {
    if (e.name.equals(name)) {
      switch (e.kind) {
        case kSourceConnected:
          setConnected(true);
          break;
        case kSourceDisconnected:
          setConnected(false);
          break;
        default:
          // don't care
          break;
      }
    }
  }, 0xFF, true);

  streams = cameraPublisherTable.getSubTable(name).getEntry(STREAMS_KEY);
  String[] streamUrls = removeCameraProtocols(streams.getStringArray(emptyStringArray));
  if (streamUrls.length > 0) {
    camera = new HttpCamera(name, streamUrls);
    videoSink.setSource(camera);
    videoSink.setEnabled(true);
  }

  connected.addListener((__, was, is) -> {
    if (!is) {
      // Only listen to changes in the streams when there's a connection
      streams.removeListener(streamsListener);
    }
  });

  enabledListener = (__, was, is) -> {
    if (is) {
      data.addListener(recordingListener);
      frameFuture = frameGrabberService.submit(this::grabForever);
      streamsListener = streams.addListener(entryNotification -> {
        if (NetworkTableUtils.isDelete(entryNotification.flags)
            || (entryNotification.getEntry().getType() != NetworkTableType.kStringArray)
            || (entryNotification.value.getStringArray()).length == 0) {
          setActive(false);
        } else {
          String[] urls = removeCameraProtocols(entryNotification.value.getStringArray());
          if (camera == null) {
            camera = new HttpCamera(name, urls);
            videoSink.setSource(camera);
          } else if (EqualityUtils.isDifferent(camera.getUrls(), urls)) {
            camera.setUrls(urls);
          }
          setActive(true);
        }
      }, 0xFF);
    } else {
      data.removeListener(recordingListener);
      cancelFrameGrabber();
    }
  };
  enabled.addListener(enabledListener);

  setActive(camera != null && camera.getUrls().length > 0);
}
 
开发者ID:wpilibsuite,项目名称:shuffleboard,代码行数:65,代码来源:CameraServerSource.java

示例11: cameraInit

import edu.wpi.cscore.CvSink; //导入依赖的package包/类
/**
 * Camera switcher initialization
 */
private void cameraInit() {

    // camera switching code
    Thread t = new Thread(() -> {
		
		boolean allowCam1 = false;
		
		UsbCamera camera1 = CameraServer.getInstance().startAutomaticCapture(0);
        camera1.setResolution(320, 240);
        camera1.setFPS(30);
        UsbCamera camera2 = CameraServer.getInstance().startAutomaticCapture(1);
        camera2.setResolution(320, 240);
        camera2.setFPS(30);
        
        CvSink cvSink1 = CameraServer.getInstance().getVideo(camera1);
        CvSink cvSink2 = CameraServer.getInstance().getVideo(camera2);
        CvSource outputStream = CameraServer.getInstance().putVideo("Switcher", 320, 240);
        
        Mat image = new Mat();            
        Mat grey = new Mat();
        
        while(!Thread.interrupted()) {
        	
        	if (Robot.oi.getPrimaryJoystick().getRawButton(4)) { allowCam1 = !allowCam1; }
        	
            if(allowCam1){
              cvSink2.setEnabled(false);
              cvSink1.setEnabled(true);
              cvSink1.grabFrame(image);
            } else{
              cvSink1.setEnabled(false);
              cvSink2.setEnabled(true);
              cvSink2.grabFrame(image);     
            }
            
            Imgproc.cvtColor(image, grey, Imgproc.COLOR_BGR2GRAY);
            
            outputStream.putFrame(grey);
        }
        
    });
    t.start();
}
 
开发者ID:Team4914,项目名称:2017-emmet,代码行数:47,代码来源:Robot.java

示例12: cameraInit

import edu.wpi.cscore.CvSink; //导入依赖的package包/类
/**
 * Camera switcher initialization
 */
private void cameraInit() {

    // camera switching code
    Thread t = new Thread(() -> {
		
		boolean allowCam1 = false;
		
		UsbCamera camera1 = CameraServer.getInstance().startAutomaticCapture(0);
        camera1.setResolution(320, 240);
        camera1.setFPS(30);
        UsbCamera camera2 = CameraServer.getInstance().startAutomaticCapture(1);
        camera2.setResolution(320, 240);
        camera2.setFPS(30);
        
        CvSink cvSink1 = CameraServer.getInstance().getVideo(camera1);
        CvSink cvSink2 = CameraServer.getInstance().getVideo(camera2);
        CvSource outputStream = CameraServer.getInstance().putVideo("Switcher", 320, 240);
        
        Mat image = new Mat();            
        Mat grey = new Mat();
        
        while(!Thread.interrupted()) {
        	
        	if (Robot.oi.getPrimaryJoystick().getRawButton(4)) { allowCam1 = !allowCam1; }
        	
            if(allowCam1){
              cvSink2.setEnabled(false);
              cvSink1.setEnabled(true);
              cvSink1.grabFrame(image);
            } else{
              cvSink1.setEnabled(false);
              cvSink2.setEnabled(true);
              cvSink2.grabFrame(image);     
            }
            
            // Imgproc.cvtColor(image, grey, Imgproc.COLOR_BGR2GRAY);
            
            // outputStream.putFrame(grey);
            outputStream.putFrame(image);
        }
        
    });
    t.start();
}
 
开发者ID:Team4914,项目名称:2017-emmet,代码行数:48,代码来源:Robot.java

示例13: VisualGearLiftFinder

import edu.wpi.cscore.CvSink; //导入依赖的package包/类
public VisualGearLiftFinder(CvSink imageSource)
{
	this.imageSource = imageSource;
	pipeline = new VisionMainPipeline();
}
 
开发者ID:first95,项目名称:FRC2017,代码行数:6,代码来源:VisualGearLiftFinder.java

示例14: MultiCameraSource

import edu.wpi.cscore.CvSink; //导入依赖的package包/类
/**
 * Create a MultiCameraSource with no cameras by default.
 *
 * @param name The name of the MultiCameraSource.
 * @param port The port the MultiCameraSource streams to.
 */
public MultiCameraSource(String name, int port) {
    cvSink = new CvSink("opencv_" + name);
    server = new MjpegServer("server_" + name, port);
    cameras = new HashMap<>();
}
 
开发者ID:kylecorry31,项目名称:Robot-Vision-API,代码行数:12,代码来源:MultiCameraSource.java


注:本文中的edu.wpi.cscore.CvSink类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。