当前位置: 首页>>代码示例>>Java>>正文


Java CanvasFrame类代码示例

本文整理汇总了Java中org.bytedeco.javacv.CanvasFrame的典型用法代码示例。如果您正苦于以下问题:Java CanvasFrame类的具体用法?Java CanvasFrame怎么用?Java CanvasFrame使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


CanvasFrame类属于org.bytedeco.javacv包,在下文中一共展示了CanvasFrame类的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: JavavcCameraTest

import org.bytedeco.javacv.CanvasFrame; //导入依赖的package包/类
public JavavcCameraTest(int deviceNumber) throws Exception  {
	super(deviceNumber);
	start(); // 开始获取摄像头数据
	// TODO Auto-generated constructor stub
	CanvasFrame canvas = new CanvasFrame("摄像头");
	canvas.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);
	canvas.setAlwaysOnTop(true);
	canvas.setResizable(true);
	while (true) {
		if (!canvas.isDisplayable()) {// 窗口是否关闭
			stop();// 停止抓取
			System.exit(2);// 退出
		}
		canvas.showImage(grab());// 获取摄像头图像并放到窗口上显示, 这里的Frame
		Frame frame=grab();
										// frame是一帧视频图像
		
		try {
			Thread.sleep(50);
		} catch (InterruptedException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}// 50毫秒刷新一次图像
	}
}
 
开发者ID:binjie09,项目名称:tttclass,代码行数:26,代码来源:JavavcCameraTest.java

示例2: show

import org.bytedeco.javacv.CanvasFrame; //导入依赖的package包/类
public void show(final Mat imageMat, final String title) {
  IplImage image = converterToIpl.convertToIplImage(converterToIpl.convert(imageMat));
  final IplImage image1 = cvCreateImage(cvGetSize(image), IPL_DEPTH_8U, image.nChannels());
  cvCopy(image, image1);
  CanvasFrame canvas = new CanvasFrame(title, 1);
  canvas.setDefaultCloseOperation(WindowConstants.EXIT_ON_CLOSE);
  canvas.showImage(converterToIpl.convert(image1));
}
 
开发者ID:MyRobotLab,项目名称:myrobotlab,代码行数:9,代码来源:OpenCVFilterFaceRecognizer.java

示例3: ShowImage

import org.bytedeco.javacv.CanvasFrame; //导入依赖的package包/类
public static void ShowImage(IplImage image, String caption, int width, int height)
{
    CanvasFrame canvas = new CanvasFrame(caption, 1);   // gamma=1
    canvas.setDefaultCloseOperation(javax.swing.JFrame.EXIT_ON_CLOSE);
    canvas.setCanvasSize(width, height);
    canvas.showImage(image);
}
 
开发者ID:duodecimo,项目名称:jmcv,代码行数:8,代码来源:BlobDemo.java

示例4: init

import org.bytedeco.javacv.CanvasFrame; //导入依赖的package包/类
public void init() {
	// canvas.setDefaultCloseOperation(javax.swing.JFrame.EXIT_ON_CLOSE);

	canvas = new CanvasFrame("Web Cam Live");
	path = new CanvasFrame("Detection");
	//path.setDefaultCloseOperation(javax.swing.JFrame.EXIT_ON_CLOSE);
	path.setContentPane(jp);
}
 
开发者ID:MeAnupSarkar,项目名称:ExoVisix,代码行数:9,代码来源:ColoredObjectTracker.java

示例5: ShowImageTransform

import org.bytedeco.javacv.CanvasFrame; //导入依赖的package包/类
/**
 * Constructs an instance of the ImageTransform with a new {@link CanvasFrame}.
 *
 * @param title of the new CanvasFrame to display images in
 * @param delay max time to wait in milliseconds (0 == infinity, negative == no wait)
 */
public ShowImageTransform(String title, int delay) {
    super(null);
    this.canvas = new CanvasFrame(title, 1.0);
    this.canvas.setDefaultCloseOperation(JFrame.DISPOSE_ON_CLOSE);
    this.delay = delay;
}
 
开发者ID:deeplearning4j,项目名称:DataVec,代码行数:13,代码来源:ShowImageTransform.java

示例6: show

import org.bytedeco.javacv.CanvasFrame; //导入依赖的package包/类
public void show(final Mat imageMat, final String title) {
  IplImage image = converterToIpl.convertToIplImage(converterToIpl.convert(imageMat));
  final IplImage image1 = cvCreateImage(cvGetSize(image), IPL_DEPTH_8U, image.nChannels());
  cvCopy(image, image1);
  CanvasFrame canvas = new CanvasFrame(title, 1);
  canvas.setDefaultCloseOperation(WindowConstants.EXIT_ON_CLOSE);
  final OpenCVFrameConverter.ToIplImage converter = new OpenCVFrameConverter.ToIplImage();
  canvas.showImage(converter.convert(image1));
}
 
开发者ID:MyRobotLab,项目名称:myrobotlab,代码行数:10,代码来源:OpenCVFilterFaceRecognizer.java

示例7: main

import org.bytedeco.javacv.CanvasFrame; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
        CanvasFrame frame = new CanvasFrame("Virtual Ball Test");
        OpenCVFrameConverter.ToIplImage converter = new OpenCVFrameConverter.ToIplImage();
        IplImage image = IplImage.create(640, 960, IPL_DEPTH_8U, 3);
        cvSetZero(image);
        double[] roiPts = { 0,0, 640,0, 640,480, 0,400 };
        cvFillConvexPoly(image, new CvPoint(4).put((byte)16, roiPts), roiPts.length/2, CvScalar.WHITE, CV_AA, 16);
        VirtualBall virtualBall = new VirtualBall(new Settings(roiPts));

        for (int i = 0; i < 1000; i++) {
            Thread.sleep(100);
            cvSetZero(image);
            if (i == 50) {
                roiPts[5] -= 100;
            }
            if (i > 100 && i < 1200) {
                roiPts[3] += 1;
                roiPts[5] += 1;
            }
//if (i > 103) {
//    System.out.println(i);
//}
            cvFillConvexPoly(image, new CvPoint(4).put((byte)16, roiPts), roiPts.length/2, CvScalar.WHITE, CV_AA, 16);
            virtualBall.draw(image, roiPts);
            frame.showImage(converter.convert(image));
        }
    }
 
开发者ID:bytedeco,项目名称:procamtracker,代码行数:28,代码来源:VirtualBall.java

示例8: main

import org.bytedeco.javacv.CanvasFrame; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
    CanvasFrame frame = new CanvasFrame("Chronometer Test");
    OpenCVFrameConverter.ToIplImage converter = new OpenCVFrameConverter.ToIplImage();
    IplImage image = IplImage.create(640, 480, IPL_DEPTH_8U, 3);
    cvSetZero(image);
    Chronometer chronometer = new Chronometer(new Rectangle(100, 100, 100, 100), image);

    for (int i = 0; i < 1000; i++) {
        Thread.sleep(100);
        cvSetZero(image);
        chronometer.draw(image);
        frame.showImage(converter.convert(image));
    }
}
 
开发者ID:bytedeco,项目名称:procamtracker,代码行数:15,代码来源:Chronometer.java

示例9: main

import org.bytedeco.javacv.CanvasFrame; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
//        Logger.getLogger("org.bytedeco.javacv").setLevel(Level.OFF);

//        String objectFilename = args.length == 2 ? args[0] : "/home/jiii/sketchbook/libraries/PapARt/data/markers/dlink.png";
        String objectFilename = args.length == 2 ? args[0] : "/home/jiii/repos/Papart-github/papart-examples/Camera/ExtractPlanarObjectForTracking/ExtractedView.bmp";
        String sceneFilename = args.length == 2 ? args[1] : "/home/jiii/my_photo-7.jpg";

        IplImage object = cvLoadImage(objectFilename, CV_LOAD_IMAGE_GRAYSCALE);
        IplImage image = cvLoadImage(sceneFilename, CV_LOAD_IMAGE_GRAYSCALE);
        if (object == null || image == null) {
            System.err.println("Can not load " + objectFilename + " and/or " + sceneFilename);
            System.exit(-1);
        }

        IplImage objectColor = IplImage.create(object.width(), object.height(), 8, 3);
        cvCvtColor(object, objectColor, CV_GRAY2BGR);

        IplImage correspond = IplImage.create(image.width(), object.height() + image.height(), 8, 1);
        cvSetImageROI(correspond, cvRect(0, 0, object.width(), object.height()));
        cvCopy(object, correspond);
        cvSetImageROI(correspond, cvRect(0, object.height(), correspond.width(), correspond.height()));
        cvCopy(image, correspond);
        cvResetImageROI(correspond);

        ObjectFinder.Settings settings = new ObjectFinder.Settings();
        settings.objectImage = object;
        settings.useFLANN = true;
        settings.ransacReprojThreshold = 5;
        ObjectFinder finder = new ObjectFinder(settings);

        long start = System.currentTimeMillis();
        double[] dst_corners = finder.find(image);
//        System.out.println("Finding time = " + (System.currentTimeMillis() - start) + " ms");

        if (dst_corners != null) {
            for (int i = 0; i < 4; i++) {
                int j = (i + 1) % 4;
                int x1 = (int) Math.round(dst_corners[2 * i]);
                int y1 = (int) Math.round(dst_corners[2 * i + 1]);
                int x2 = (int) Math.round(dst_corners[2 * j]);
                int y2 = (int) Math.round(dst_corners[2 * j + 1]);
                line(cvarrToMat(correspond), new Point(x1, y1 + object.height()),
                        new Point(x2, y2 + object.height()),
                        Scalar.WHITE, 1, 8, 0);
            }
        }

        for (int i = 0; i < finder.ptpairs.size(); i += 2) {
            Point2f pt1 = finder.objectKeypoints.get(finder.ptpairs.get(i)).pt();
            Point2f pt2 = finder.imageKeypoints.get(finder.ptpairs.get(i + 1)).pt();
            line(cvarrToMat(correspond), new Point(Math.round(pt1.x()), Math.round(pt1.y())),
                    new Point(Math.round(pt2.x()), Math.round(pt2.y() + object.height())),
                    Scalar.WHITE, 1, 8, 0);
        }

        CanvasFrame objectFrame = new CanvasFrame("Object");
        CanvasFrame correspondFrame = new CanvasFrame("Object Correspond");
        OpenCVFrameConverter converter = new OpenCVFrameConverter.ToIplImage();

        correspondFrame.showImage(converter.convert(correspond));
        for (int i = 0; i < finder.objectKeypoints.size(); i++) {
            KeyPoint r = finder.objectKeypoints.get(i);
            Point center = new Point(Math.round(r.pt().x()), Math.round(r.pt().y()));
            int radius = Math.round(r.size() / 2);
            circle(cvarrToMat(objectColor), center, radius, Scalar.RED, 1, 8, 0);
        }
        objectFrame.showImage(converter.convert(objectColor));

        objectFrame.waitKey();

        objectFrame.dispose();
        correspondFrame.dispose();
    }
 
开发者ID:poqudrof,项目名称:PapARt,代码行数:74,代码来源:ObjectFinder.java

示例10: main

import org.bytedeco.javacv.CanvasFrame; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
//        Logger.getLogger("org.bytedeco.javacv").setLevel(Level.OFF);

//        String objectFilename = args.length == 2 ? args[0] : "/home/jiii/sketchbook/libraries/PapARt/data/markers/dlink.png";
        String objectFilename = args.length == 2 ? args[0] : "/home/jiii/sketchbook/libraries/PapARt/data/markers/rocks.jpg";
        String sceneFilename  = args.length == 2 ? args[1] : "/home/jiii/my_photo-4.jpg";

        IplImage object = cvLoadImage(objectFilename, CV_LOAD_IMAGE_GRAYSCALE);
        IplImage image  = cvLoadImage(sceneFilename,  CV_LOAD_IMAGE_GRAYSCALE);
        if (object == null || image == null) {
            System.err.println("Can not load " + objectFilename + " and/or " + sceneFilename);
            System.exit(-1);
        }

        IplImage objectColor = IplImage.create(object.width(), object.height(), 8, 3);
        cvCvtColor(object, objectColor, CV_GRAY2BGR);

        IplImage correspond = IplImage.create(image.width(), object.height()+ image.height(), 8, 1);
        cvSetImageROI(correspond, cvRect(0, 0, object.width(), object.height()));
        cvCopy(object, correspond);
        cvSetImageROI(correspond, cvRect(0, object.height(), correspond.width(), correspond.height()));
        cvCopy(image, correspond);
        cvResetImageROI(correspond);

        ObjectFinder.Settings settings = new ObjectFinder.Settings();
        settings.objectImage = object;
        settings.useFLANN = true;
        settings.ransacReprojThreshold = 5;
        ObjectFinder finder = new ObjectFinder(settings);

        long start = System.currentTimeMillis();
        double[] dst_corners = finder.find(image);
        System.out.println("Finding time = " + (System.currentTimeMillis() - start) + " ms");

        if (dst_corners !=  null) {
            for (int i = 0; i < 4; i++) {
                int j = (i+1)%4;
                int x1 = (int)Math.round(dst_corners[2*i    ]);
                int y1 = (int)Math.round(dst_corners[2*i + 1]);
                int x2 = (int)Math.round(dst_corners[2*j    ]);
                int y2 = (int)Math.round(dst_corners[2*j + 1]);
                line(cvarrToMat(correspond), new Point(x1, y1 + object.height()),
                        new Point(x2, y2 + object.height()),
                        Scalar.WHITE, 1, 8, 0);
            }
        }

        for (int i = 0; i < finder.ptpairs.size(); i += 2) {
            Point2f pt1 = finder.objectKeypoints.get(finder.ptpairs.get(i)).pt();
            Point2f pt2 = finder.imageKeypoints.get(finder.ptpairs.get(i + 1)).pt();
            line(cvarrToMat(correspond), new Point(Math.round(pt1.x()), Math.round(pt1.y())),
                    new Point(Math.round(pt2.x()), Math.round(pt2.y() + object.height())),
                    Scalar.WHITE, 1, 8, 0);
        }

        CanvasFrame objectFrame = new CanvasFrame("Object");
        CanvasFrame correspondFrame = new CanvasFrame("Object Correspond");
        OpenCVFrameConverter converter = new OpenCVFrameConverter.ToIplImage();

        correspondFrame.showImage(converter.convert(correspond));
        for (int i = 0; i < finder.objectKeypoints.size(); i++) {
            KeyPoint r = finder.objectKeypoints.get(i);
            Point center = new Point(Math.round(r.pt().x()), Math.round(r.pt().y()));
            int radius = Math.round(r.size() / 2);
            circle(cvarrToMat(objectColor), center, radius, Scalar.RED, 1, 8, 0);
        }
        objectFrame.showImage(converter.convert(objectColor));

        objectFrame.waitKey();

        objectFrame.dispose();
        correspondFrame.dispose();
    }
 
开发者ID:poqudrof,项目名称:PapAR,代码行数:74,代码来源:ObjectFinder.java

示例11: init

import org.bytedeco.javacv.CanvasFrame; //导入依赖的package包/类
public void init() throws Exception {
    // create arrays and canvas frames on the Event Dispatcher Thread...
    CameraDevice.Settings[] cs = cameraSettings.toArray();
    if (cameraDevices == null) {
        cameraDevices = new CameraDevice[cs.length];
    } else {
        cameraDevices = Arrays.copyOf(cameraDevices, cs.length);
    }
    cameraCanvasFrames = new CanvasFrame[cs.length];
    frameGrabbers = new FrameGrabber[cs.length];
    cameraFrameConverters = new OpenCVFrameConverter.ToIplImage[cs.length];
    for (int i = 0; i < cs.length; i++) {
        if (cameraDevices[i] == null) {
            cameraDevices[i] = new CameraDevice(cs[i]);
        } else {
            cameraDevices[i].setSettings(cs[i]);
        }
        if (cameraSettings.getMonitorWindowsScale() > 0) {
            cameraCanvasFrames[i] = new CanvasFrame(cs[i].getName());
            cameraCanvasFrames[i].setCanvasScale(cameraSettings.getMonitorWindowsScale());
        }
    }

    ProjectorDevice.Settings[] ps = projectorSettings.toArray();
    if (projectorDevices == null) {
        projectorDevices = new ProjectorDevice[ps.length];
    } else {
        projectorDevices = Arrays.copyOf(projectorDevices, ps.length);
    }
    projectorCanvasFrames = new CanvasFrame[ps.length];
    projectorPlanes = new MarkedPlane[ps.length];
    projectorFrameConverters = new OpenCVFrameConverter.ToIplImage[ps.length];
    for (int i = 0; i < ps.length; i++) {
        if (projectorDevices[i] == null) {
            projectorDevices[i] = new ProjectorDevice(ps[i]);
        } else {
            projectorDevices[i].setSettings(ps[i]);
        }
        projectorCanvasFrames[i] = projectorDevices[i].createCanvasFrame();
        projectorCanvasFrames[i].showColor(Color.BLACK);
        projectorFrameConverters[i] = new OpenCVFrameConverter.ToIplImage();
        Dimension dim = projectorCanvasFrames[i].getSize();
        projectorPlanes[i] = new MarkedPlane(dim.width, dim.height, markers[1], true,
                cvScalarAll(((ProjectorDevice.CalibrationSettings)ps[0]).getBrightnessForeground()*255),
                cvScalarAll(((ProjectorDevice.CalibrationSettings)ps[0]).getBrightnessBackground()*255), 4);
    }
}
 
开发者ID:bytedeco,项目名称:procamcalib,代码行数:48,代码来源:CalibrationWorker.java

示例12: onState

import org.bytedeco.javacv.CanvasFrame; //导入依赖的package包/类
public void onState(final OpenCV opencv) {
	SwingUtilities.invokeLater(new Runnable() {
		@Override
		public void run() {

			VideoProcessor vp = opencv.videoProcessor;

			// seems pretty destructive :P
			currentFilterListModel.clear();
			// add new filters from service into gui
			for (OpenCVFilter f : opencv.getFiltersCopy()) {
			  ComboBoxModel2.removeSource(boundServiceName+"."+f.name);
				addFilterToGui(f);
			}

			currentFilters.repaint();

			for (int i = 0; i < grabberTypeSelect.getItemCount(); ++i) {
				String currentObject = prefixPath + grabberTypeSelect.getItemAt(i) + "FrameGrabber";
				if (currentObject.equals(vp.grabberType)) {
					grabberTypeSelect.setSelectedIndex(i);
					break;
				}
			}

			if (opencv.capturing) {
				capture.setText("stop");
			} else {
				capture.setText("capture");
			}

			inputFile.setText(vp.inputFile);
			cameraIndex.setSelectedIndex(vp.cameraIndex);
			String inputSource = opencv.videoProcessor.inputSource;
			if (OpenCV.INPUT_SOURCE_CAMERA.equals(inputSource)) {
				cameraRadio.setSelected(true);
			} else if (OpenCV.INPUT_SOURCE_CAMERA.equals(inputSource)) {
				fileRadio.setSelected(true);
			} else if (OpenCV.INPUT_SOURCE_PIPELINE.equals(inputSource)) {
				// grabberTypeSelect.removeActionListener(grabberTypeListener);
				grabberTypeSelect.setSelectedItem("Pipeline");
				// grabberTypeSelect.addActionListener(grabberTypeListener);
				pipelineHook.setSelectedItem(vp.pipelineSelected);
			} else if (OpenCV.INPUT_SOURCE_IMAGE_FILE.equals(inputSource)
					|| OpenCV.INPUT_SOURCE_IMAGE_DIRECTORY.equals(inputSource)) {
				// the file input should be enabled if we are file or
				// directory.
				fileRadio.setSelected(true);
			}

			currentFilters.removeListSelectionListener(self);
			currentFilters.setSelectedValue(vp.displayFilterName, true);// .setSelectedIndex(index);
			currentFilters.addListSelectionListener(self);

			if (opencv.undockDisplay == true) {
				cframe = new CanvasFrame("canvas frame");
			} else {
				if (cframe != null) {
					cframe.dispose();
					cframe = null;
				}
			}

			// changing a filter "broadcastState()"
			// which might change dimension of video feed
			// which might need to re-pack & re-paint components ...
			myService.pack();
		} // end run()
	});

}
 
开发者ID:MyRobotLab,项目名称:myrobotlab,代码行数:72,代码来源:OpenCVGui.java

示例13: acquireRoi

import org.bytedeco.javacv.CanvasFrame; //导入依赖的package包/类
public double[] acquireRoi(CanvasFrame monitorWindow, double monitorWindowScale,
        IplImage cameraImage, int pyramidLevel) throws Exception {
    final int w = cameraImage.width();
    final int h = cameraImage.height();
    roiPts = null;
    markerError      = 0;
    markerErrorCount = 0;

    for (ObjectSettings os : settings.toArray()) {
        File f = os.textureImageFile;
        RoiAcquisitionMethod ram = os.roiAcquisitionMethod;
        if ((ram == RoiAcquisitionMethod.OBJECT_FINDER || ram == RoiAcquisitionMethod.MARKER_DETECTOR) &&
                (f == null || (textureImage = cvLoadImage(f.getAbsolutePath())) == null)) {
            throw new Exception("Error: Could not load the object image file \"" + f + "\" for " + ram + ".");
        }
        // in the grabbed camera images, acquire the region of interest
        switch (ram) {
            case MOUSE_CLICKS:    roiPts = acquireRoiFromMouseClicks(monitorWindow, monitorWindowScale); break;
            case OBJECT_FINDER:   roiPts = acquireRoiFromObjectFinder  (cameraImage); break;
            case MARKER_DETECTOR: roiPts = acquireRoiFromMarkerDetector(cameraImage); break;
            case WHOLE_FRAME:     roiPts = new double[] { 0.0, 0.0,  w, 0.0,  w, h,  0.0, h }; break;
            case HALF_FRAME:
                double dw = w*(2-JavaCV.SQRT2)/4;
                double dh = h*(2-JavaCV.SQRT2)/4;
                roiPts = new double[] { dw, dh,  w-dw, dh,  w-dw, h-dh,  dw, h-dh }; break;
            default: assert false;
        }

        if (roiPts != null) {
            if (pyramidLevel > 0) {
                for (int i = 0; i < roiPts.length; i++) {
                    roiPts[i] = roiPts[i]*(1<<pyramidLevel);
                }
            }
            objectSettings = os;
            virtualSettings = null;
            for (VirtualSettings vs : objectSettings.toArray()) {
                Rectangle r = vs.objectHotSpot;
                if (r == null || r.width <= 0 || r.height <= 0) {
                    setVirtualSettings(vs);
                    initVirtualSettings();
                }
            }
            break;
        }
    }

    return roiPts;
}
 
开发者ID:bytedeco,项目名称:procamtracker,代码行数:50,代码来源:RealityAugmentor.java


注:本文中的org.bytedeco.javacv.CanvasFrame类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。