本文整理汇总了Java中boofcv.gui.image.ShowImages.showWindow方法的典型用法代码示例。如果您正苦于以下问题:Java ShowImages.showWindow方法的具体用法?Java ShowImages.showWindow怎么用?Java ShowImages.showWindow使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类boofcv.gui.image.ShowImages
的用法示例。
在下文中一共展示了ShowImages.showWindow方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: run
import boofcv.gui.image.ShowImages; //导入方法依赖的package包/类
@Override
public void run() {
if (!streaming) {
addMouseListener(panel);
}
JFrame window = ShowImages.showWindow(panel, PlaySet.class.getSimpleName(),true);
window.addKeyListener(new KeyAdapter() {
@Override
public void keyTyped(KeyEvent e) {
if (e.getKeyChar() == 'x' && previousSet != null) {
System.out.println("'Not a Set!'");
ImageSuppliers.WebcamSaverImageSupplier.save(image);
}
}
});
if (streaming) {
while (true) {
image = imageSupplier.get();
newImage(image);
}
}
}
示例2: view
import boofcv.gui.image.ShowImages; //导入方法依赖的package包/类
public static void view(File directory) {
ListDisplayPanel panel = new ListDisplayPanel();
int numCols = 20;
int numImagesPerLabel = -1;
char prevNumLabel = ' ';
for (File d : directory.listFiles((dir, name) -> !name.matches("\\..*"))) {
String label = d.getName();
BufferedImage[] bufferedImages = Arrays.stream(d.listFiles((dir, name) -> name.matches(".*\\.jpg")))
.map(f -> UtilImageIO.loadImage(f.getAbsolutePath()))
.map(bi -> resize(bi, bi.getWidth() / 3, bi.getHeight() / 3))
.collect(Collectors.toList())
.toArray(new BufferedImage[0]);
panel.addItem(new ImageGridPanel((bufferedImages.length / numCols) + 1, numCols, bufferedImages), label);
System.out.println(label + "\t" + bufferedImages.length);
if (prevNumLabel != label.charAt(0)) {
numImagesPerLabel = bufferedImages.length;
prevNumLabel = label.charAt(0);
} else if (numImagesPerLabel != bufferedImages.length) {
throw new IllegalStateException("Expected " + numImagesPerLabel + " images, but only found " + bufferedImages.length + " for " + label);
}
}
ShowImages.showWindow(panel, ViewLabelledImagesV2.class.getSimpleName(), true);
}
示例3: processRgb
import boofcv.gui.image.ShowImages; //导入方法依赖的package包/类
protected void processRgb(FrameMode mode, ByteBuffer frame, int timestamp) {
if (mode.getVideoFormat() != VideoFormat.RGB) {
System.out.println("Bad rgb format!");
}
if (outRgb == null) {
rgb.reshape(mode.getWidth(), mode.getHeight());
outRgb = new BufferedImage(rgb.width, rgb.height, BufferedImage.TYPE_INT_RGB);
guiRgb = ShowImages.showWindow(outRgb, "RGB Image");
}
App.bufferRgbToMsU8(frame, rgb);
ConvertBufferedImage.convertTo_U8(rgb, outRgb, true);
drawButton(buttonMove, outRgb);
drawButton(buttonStop, outRgb);
drawButton(buttonLeft, outRgb);
drawButton(buttonRight, outRgb);
processButtonStatePhaseTwo(buttonMove, outRgb);
processButtonStatePhaseTwo(buttonStop, outRgb);
processButtonStatePhaseTwo(buttonLeft, outRgb);
processButtonStatePhaseTwo(buttonRight, outRgb);
guiRgb.repaint();
}
示例4: processRgb
import boofcv.gui.image.ShowImages; //导入方法依赖的package包/类
protected void processRgb(FrameMode mode, ByteBuffer frame, int timestamp) {
if (mode.getVideoFormat() != VideoFormat.RGB) {
System.out.println("Bad rgb format!");
}
System.out.println("Got rgb! " + timestamp);
if (outRgb == null) {
rgb.reshape(mode.getWidth(), mode.getHeight());
outRgb = new BufferedImage(rgb.width, rgb.height, BufferedImage.TYPE_INT_RGB);
guiRgb = ShowImages.showWindow(outRgb, "RGB Image");
}
UtilOpenKinect.bufferRgbToMsU8(frame, rgb);
ConvertBufferedImage.convertTo_U8(rgb, outRgb, true);
guiRgb.repaint();
}
示例5: main
import boofcv.gui.image.ShowImages; //导入方法依赖的package包/类
public static void main(String args[]) {
// ShowImageBlurApp<ImageFloat32> app
// = new ShowImageBlurApp<ImageFloat32>(ImageFloat32.class);
ShowLensDistortion<ImageUInt8> app
= new ShowLensDistortion<ImageUInt8>(ImageUInt8.class);
List<PathLabel> inputs = new ArrayList<PathLabel>();
inputs.add(new PathLabel("shapes","../data/evaluation/shapes01.png"));
inputs.add(new PathLabel("beach","../data/evaluation/scale/beach02.jpg"));
inputs.add(new PathLabel("sunflowers","../data/evaluation/sunflowers.png"));
app.setInputList(inputs);
// wait for it to process one image so that the size isn't all screwed up
while( !app.getHasProcessedImage() ) {
Thread.yield();
}
ShowImages.showWindow(app, "Lens Distortion");
}
示例6: nogenerics
import boofcv.gui.image.ShowImages; //导入方法依赖的package包/类
public static void nogenerics( ImageSingleBand input )
{
Class inputType = input.getClass();
Class derivType = GImageDerivativeOps.getDerivativeType(inputType);
ImageSingleBand blurred = GeneralizedImageOps.createSingleBand(inputType, input.width, input.height);
ImageSingleBand derivX = GeneralizedImageOps.createSingleBand(derivType, input.width, input.height);
ImageSingleBand derivY = GeneralizedImageOps.createSingleBand(derivType, input.width, input.height);
// Gaussian blur: Convolve a Gaussian kernel
GBlurImageOps.gaussian(input, blurred, -1, blurRadius, null);
// Calculate image's derivative
GImageDerivativeOps.sobel(blurred, derivX, derivY, BorderType.EXTENDED);
// display the results
BufferedImage outputImage = VisualizeImageData.colorizeSign(derivX,null,-1);
ShowImages.showWindow(outputImage,"Generalized "+inputType.getSimpleName());
}
示例7: process
import boofcv.gui.image.ShowImages; //导入方法依赖的package包/类
public void process( BufferedImage image ) {
I input = GeneralizedImageOps.createSingleBand(imageType, image.getWidth(), image.getHeight());
I blur = GeneralizedImageOps.createSingleBand(imageType, image.getWidth(), image.getHeight());
ConvertBufferedImage.convertFromSingle(image, input, imageType);
GBlurImageOps.gaussian(input, blur, -1, 2, null);
DetectLineHoughFoot<I,D> alg = FactoryDetectLineAlgs.houghFoot(6, 12, 5, 25, 10, imageType, derivType);
ImageLinePanel gui = new ImageLinePanel();
gui.setBackground(image);
gui.setLines(alg.detect(blur));
gui.setPreferredSize(new Dimension(image.getWidth(),image.getHeight()));
BufferedImage renderedTran = VisualizeImageData.grayMagnitude(alg.getTransform().getTransform(),null,-1);
BufferedImage renderedBinary = VisualizeBinaryData.renderBinary(alg.getBinary(), null);
ShowImages.showWindow(renderedBinary,"Detected Edges");
ShowImages.showWindow(renderedTran,"Parameter Space");
ShowImages.showWindow(gui,"Detected Lines");
}
示例8: process
import boofcv.gui.image.ShowImages; //导入方法依赖的package包/类
public void process() throws IOException {
parseFrame(0);
outRgb = new BufferedImage(rgb.getWidth(),rgb.getHeight(),BufferedImage.TYPE_INT_RGB);
outDepth = new BufferedImage(depth.getWidth(),depth.getHeight(),BufferedImage.TYPE_INT_RGB);
gui = new ImageGridPanel(1,2,outRgb,outDepth);
ShowImages.showWindow(gui,"Kinect Data");
int frame = 1;
while( true ) {
parseFrame(frame++);
ConvertBufferedImage.convertTo_U8(rgb,outRgb);
VisualizeImageData.disparity(depth, outDepth, 0, UtilOpenKinect.FREENECT_DEPTH_MM_MAX_VALUE, 0);
gui.repaint();
BoofMiscOps.pause(30);
}
}
示例9: process
import boofcv.gui.image.ShowImages; //导入方法依赖的package包/类
public void process( BufferedImage image ) {
I input = GeneralizedImageOps.createSingleBand(imageType, image.getWidth(), image.getHeight());
I blur = GeneralizedImageOps.createSingleBand(imageType, image.getWidth(), image.getHeight());
ConvertBufferedImage.convertFromSingle(image, input, imageType);
GBlurImageOps.gaussian(input, blur, -1, 2, null);
DetectLineHoughPolar<I,D> alg = FactoryDetectLineAlgs.houghPolar(5, 10, 2, Math.PI / 180, 25, 10, imageType, derivType);
List<LineParametric2D_F32> lines = alg.detect(blur);
ImageLinePanel gui = new ImageLinePanel();
gui.setBackground(image);
gui.setLines(lines);
gui.setPreferredSize(new Dimension(image.getWidth(),image.getHeight()));
BufferedImage renderedTran = VisualizeImageData.grayMagnitude(alg.getTransform().getTransform(),null,-1);
BufferedImage renderedBinary = VisualizeBinaryData.renderBinary(alg.getBinary(), null);
ShowImages.showWindow(renderedBinary,"Detected Edges");
ShowImages.showWindow(renderedTran,"Parameter Space");
ShowImages.showWindow(gui,"Detected Lines");
}
示例10: main
import boofcv.gui.image.ShowImages; //导入方法依赖的package包/类
public static void main( String args[] ) {
Class type = ImageFloat32.class;
Class derivType = type;
// Class type = ImageUInt8.class;
// Class derivType = ImageSInt16.class;
VideoMosaicSequentialPointApp app = new VideoMosaicSequentialPointApp(type,derivType);
List<PathLabel> inputs = new ArrayList<PathLabel>();
inputs.add(new PathLabel("Plane 1", "../data/applet/mosaic/airplane01.mjpeg"));
inputs.add(new PathLabel("Plane 2", "../data/applet/mosaic/airplane02.mjpeg"));
inputs.add(new PathLabel("Shake", "../data/applet/shake.mjpeg"));
app.setInputList(inputs);
// wait for it to process one image so that the size isn't all screwed up
while( !app.getHasProcessedImage() ) {
Thread.yield();
}
ShowImages.showWindow(app, "Video Image Mosaic");
}
示例11: main
import boofcv.gui.image.ShowImages; //导入方法依赖的package包/类
public static void main( String args[] ) {
DemoBinaryImageLabelOpsApp app = new DemoBinaryImageLabelOpsApp(ImageFloat32.class);
List<PathLabel> inputs = new ArrayList<PathLabel>();
inputs.add(new PathLabel("particles","../data/evaluation/particles01.jpg"));
inputs.add(new PathLabel("shapes","../data/evaluation/shapes01.png"));
app.setInputList(inputs);
// wait for it to process one image so that the size isn't all screwed up
while( !app.getHasProcessedImage() ) {
Thread.yield();
}
ShowImages.showWindow(app, "Label Binary Blobs");
System.out.println("Done");
}
示例12: convertToGray
import boofcv.gui.image.ShowImages; //导入方法依赖的package包/类
/**
* There is no real perfect way that everyone agrees on for converting color images into gray scale
* images. Two examples of how to convert a MultiSpectral image into a gray scale image are shown
* in this example.
*/
public static void convertToGray( BufferedImage input ) {
// convert the BufferedImage into a MultiSpectral
MultiSpectral<ImageUInt8> image = ConvertBufferedImage.convertFromMulti(input,null,ImageUInt8.class);
ImageUInt8 gray = new ImageUInt8( image.width,image.height);
// creates a gray scale image by averaging intensity value across pixels
GPixelMath.averageBand(image, gray);
BufferedImage outputAve = ConvertBufferedImage.convertTo(gray,null);
// create an output image just from the first band
BufferedImage outputBand0 = ConvertBufferedImage.convertTo(image.getBand(0),null);
ShowImages.showWindow(outputAve,"Average");
ShowImages.showWindow(outputBand0,"Band 0");
}
示例13: updateGUI
import boofcv.gui.image.ShowImages; //导入方法依赖的package包/类
@Override
public void updateGUI(BufferedImage guiImage, T origImage) {
Graphics2D g2 = guiImage.createGraphics();
for (int i = 0; i < corners.size(); i++) {
Point2D_I16 pt = corners.get(i);
g2.setColor(Color.BLACK);
g2.fillOval(pt.x - 4, pt.y - 4, 9, 9);
g2.setColor(Color.RED);
g2.fillOval(pt.x - 2, pt.y - 2, 5, 5);
}
if (panel == null) {
panel = ShowImages.showWindow(guiImage, "Image Sequence");
addComponent(panel);
} else {
panel.setBufferedImage(guiImage);
panel.repaint();
}
}
示例14: main
import boofcv.gui.image.ShowImages; //导入方法依赖的package包/类
public static void main( String args[] ) {
EvaluateInterpolateEnlargeApp app = new EvaluateInterpolateEnlargeApp(ImageFloat32.class);
// EvaluateInterpolateEnlargeApp app = new EvaluateInterpolateEnlargeApp(ImageUInt8.class);
app.setPreferredSize(new Dimension(500,500));
app.setBaseDirectory("../data/applet/");
app.loadInputData("../data/applet/interpolation.txt");
// java.util.List<PathLabel> inputs = new ArrayList<PathLabel>();
// inputs.add(new PathLabel("eye 1","../data/evaluation/eye01.jpg"));
// inputs.add(new PathLabel("eye 2","../data/evaluation/eye02.jpg"));
//
// app.setInputList(inputs);
// wait for it to process one image so that the size isn't all screwed up
while( !app.getHasProcessedImage() ) {
Thread.yield();
}
ShowImages.showWindow(app,"Interpolation Enlarge");
}
示例15: main
import boofcv.gui.image.ShowImages; //导入方法依赖的package包/类
public static void main( String args[] ) {
RemoveLensDistortionApp app = new RemoveLensDistortionApp();
// camera config, image left, image right
String calibDir = "../data/applet/calibration/mono/Sony_DSC-HX5V_Chess/";
String imageDir = "../data/evaluation/structure/";
String bumbleDir = "../data/evaluation/calibration/stereo/Bumblebee2_Chess/";
java.util.List<PathLabel> inputs = new ArrayList<PathLabel>();
inputs.add(new PathLabel("Sony HX5V",calibDir + "intrinsic.xml",imageDir + "dist_cyto_01.jpg"));
inputs.add(new PathLabel("BumbleBee2",bumbleDir+"intrinsicLeft.xml",bumbleDir + "left01.jpg"));
app.setInputList(inputs);
// wait for it to process one image so that the size isn't all screwed up
while( !app.getHasProcessedImage() ) {
Thread.yield();
}
ShowImages.showWindow(app, "Remove Lens Distortion");
System.out.println("Done");
}