本文整理汇总了Java中boofcv.io.image.UtilImageIO.loadImage方法的典型用法代码示例。如果您正苦于以下问题:Java UtilImageIO.loadImage方法的具体用法?Java UtilImageIO.loadImage怎么用?Java UtilImageIO.loadImage使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类boofcv.io.image.UtilImageIO
的用法示例。
在下文中一共展示了UtilImageIO.loadImage方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: main
import boofcv.io.image.UtilImageIO; //导入方法依赖的package包/类
public static void main(String[] args) throws IOException {
FeatureFinder[] finders = new FeatureFinder[] {
new FindCardColourFeatures(),
new FindCardShadingFeatures(),
new FindCardShapeFeatures()
};
for (FeatureFinder finder : finders) {
List<String> summaries = new ArrayList<>();
for (File file : new File("data/train-out").listFiles((dir, name) -> name.matches(".*\\.jpg"))) {
BufferedImage image = UtilImageIO.loadImage(file.getAbsolutePath());
double[] features = finder.find(image, false);
if (features != null) {
summaries.add(finder.getSummaryLine(file.getAbsolutePath(), features));
}
}
Path p = Paths.get("data/train-out-" + finder.getFileSuffix());
Files.write(p, summaries);
}
}
示例2: main
import boofcv.io.image.UtilImageIO; //导入方法依赖的package包/类
public static void main(String[] args) throws IOException {
FeatureFinder[] finders = new FeatureFinder[] {
new FindCardColourFeatures(2),
// new FindCardShadingFeatures(),
// new FindCardShapeFeatures()
};
for (FeatureFinder finder : finders) {
List<String> summaries = new ArrayList<>();
for (File d : TrainingDataV2.LABELLED_DIRECTORY.listFiles((dir, name) -> !name.matches("\\..*"))) {
for (File file : d.listFiles((dir, name) -> name.matches(".*\\.jpg"))) {
BufferedImage image = UtilImageIO.loadImage(file.getAbsolutePath());
double[] features = finder.find(image, false);
if (features != null) {
summaries.add(finder.getSummaryLine(file.getAbsolutePath(), features));
}
}
}
Path p = Paths.get("data/train-out-" + finder.getFileSuffix());
Files.write(p, summaries);
}
}
示例3: main
import boofcv.io.image.UtilImageIO; //导入方法依赖的package包/类
public static void main( String args[] ) {
// load images with lens distortion removed
String dir = "../data/applet/stereo/";
BufferedImage imageA = UtilImageIO.loadImage(dir + "mono_wall_01.jpg");
BufferedImage imageB = UtilImageIO.loadImage(dir + "mono_wall_03.jpg");
// Find a set of point feature matches
List<AssociatedPair> matches = ExampleFundamentalMatrix.computeMatches(imageA,imageB);
// Prune matches using the epipolar constraint
List<AssociatedPair> inliers = new ArrayList<AssociatedPair>();
DenseMatrix64F F = ExampleFundamentalMatrix.robustFundamental(matches, inliers);
// display the inlier matches found using the robust estimator
AssociationPanel panel = new AssociationPanel(20);
panel.setAssociation(inliers);
panel.setImages(imageA,imageB);
ShowImages.showWindow(panel, "Inlier Pairs");
rectify(F,inliers,imageA,imageB);
}
示例4: openImage
import boofcv.io.image.UtilImageIO; //导入方法依赖的package包/类
@Override
public BufferedImage openImage(String fileName) {
BufferedImage b = cachedImage.get(fileName);
if( b == null ) {
b = UtilImageIO.loadImage(fileName);
if( b == null )
throw new RuntimeException("Image cannot be found! "+fileName);
cachedImage.put(fileName,b);
}
// return a copy of the image so that if it is modified strangeness won't happen
BufferedImage c = new BufferedImage(b.getWidth(),b.getHeight(),b.getType());
Graphics2D g2 = c.createGraphics();
g2.drawImage(b,0,0,null);
return c;
}
示例5: main
import boofcv.io.image.UtilImageIO; //导入方法依赖的package包/类
public static void main(String[] args) {
BufferedImage image = UtilImageIO.loadImage("/home/pja/projects/ValidationBoof/data/track_rect/TLD/01_david/00001.jpg");
new VisualizeTldDetectionApp(image,ImageUInt8.class);
// String fileName = "/home/pja/Downloads/multi_face_turning/motinas_multi_face_turning.avi";
// SimpleImageSequence<ImageUInt8> sequence =
// new XugglerSimplified<ImageUInt8>(fileName, ImageDataType.single(ImageUInt8.class));
//
// sequence.hasNext();
// sequence.next();
// sequence.hasNext();
// sequence.next();
//
// new VisualizeTldDetectionApp((BufferedImage)sequence.getGuiImage(),ImageUInt8.class);
}
示例6: get
import boofcv.io.image.UtilImageIO; //导入方法依赖的package包/类
@Override
public BufferedImage get() {
if (files.length == 0) {
return null;
}
File file = files[index];
index--;
if (index < 0) {
index = files.length - 1;
}
System.out.println("Using " + file);
return UtilImageIO.loadImage(file.getAbsolutePath());
}
示例7: run
import boofcv.io.image.UtilImageIO; //导入方法依赖的package包/类
private void run() throws IOException {
BufferedImage image = UtilImageIO.loadImage(UtilIO.pathExample("C:\\development\\readySET\\deck\\1221.png"));
GrayU8 gray = ConvertBufferedImage.convertFrom(image,(GrayU8)null);
GrayU8 edgeImage = gray.createSameShape();
// Create a canny edge detector which will dynamically compute the threshold based on maximum edge intensity
// It has also been configured to save the trace as a graph. This is the graph created while performing
// hysteresis thresholding.
CannyEdge<GrayU8,GrayS16> canny = FactoryEdgeDetectors.canny(2,true, true, GrayU8.class, GrayS16.class);
// The edge image is actually an optional parameter. If you don't need it just pass in null
canny.process(gray,0.1f,0.3f,edgeImage);
// First get the contour created by canny
List<EdgeContour> edgeContours = canny.getContours();
// The 'edgeContours' is a tree graph that can be difficult to process. An alternative is to extract
// the contours from the binary image, which will produce a single loop for each connected cluster of pixels.
// Note that you are only interested in verticesnal contours.
List<Contour> contours = BinaryImageOps.contour(edgeImage, ConnectRule.EIGHT, null);
// display the results
BufferedImage visualBinary = VisualizeBinaryData.renderBinary(edgeImage, false, null);
BufferedImage visualCannyContour = VisualizeBinaryData.renderContours(edgeContours,null,
gray.width,gray.height,null);
BufferedImage visualEdgeContour = new BufferedImage(gray.width, gray.height,BufferedImage.TYPE_INT_RGB);
VisualizeBinaryData.render(contours, (int[]) null, visualEdgeContour);
ListDisplayPanel panel = new ListDisplayPanel();
panel.addImage(visualBinary,"Binary Edges from Canny");
panel.addImage(visualCannyContour, "Canny Trace Graph");
panel.addImage(visualEdgeContour,"Contour from Canny Binary");
ShowImages.showWindow(panel,"Canny Edge", true);
}
示例8: main
import boofcv.io.image.UtilImageIO; //导入方法依赖的package包/类
public static void main( String args[] ) {
// load the input image, declare data structures, create a noisy image
Random rand = new Random(234);
String dir = new File(".").getAbsolutePath();
File file = new File("/Users/mehdibenchoufi/Downloads/Example_lena_denoise_noisy.jpg");
ImageFloat32 input = UtilImageIO.loadImage(file.getAbsolutePath(), ImageFloat32.class);
ImageFloat32 noisy = input.clone();
GImageMiscOps.addGaussian(noisy, rand, 20, 0, 255);
ImageFloat32 denoised = new ImageFloat32(input.width, input.height);
// How many levels in wavelet transform
int numLevels = 4;
// Create the noise removal algorithm
WaveletDenoiseFilter<ImageFloat32> denoiser =
FactoryImageDenoise.waveletBayes(ImageFloat32.class, numLevels, 0, 255);
// remove noise from the image
denoiser.process(noisy,denoised);
// display the results
ListDisplayPanel gui = new ListDisplayPanel();
gui.addImage(ConvertBufferedImage.convertTo(input, null),"Input");
gui.addImage(ConvertBufferedImage.convertTo(noisy,null),"Noisy");
gui.addImage(ConvertBufferedImage.convertTo(denoised,null),"Denoised");
ShowImages.showWindow(input, "With noise", true);
ShowImages.showWindow(denoised, "Without noise", true);
}
示例9: parseFrame
import boofcv.io.image.UtilImageIO; //导入方法依赖的package包/类
private void parseFrame(int frameNumber ) throws IOException {
UtilImageIO.loadPPM_U8(String.format("%s/rgb%07d.ppm", directory, frameNumber), rgb, data);
if( depthIsPng ) {
BufferedImage image = UtilImageIO.loadImage(String.format("%s/depth%07d.png", directory, frameNumber));
ConvertBufferedImage.convertFrom(image,depth);
} else {
UtilOpenKinect.parseDepth(String.format("%s/depth%07d.depth", directory, frameNumber), depth, data);
}
}
示例10: next
import boofcv.io.image.UtilImageIO; //导入方法依赖的package包/类
@Override
public boolean next() {
if (index < fileNames.size()) {
name = fileNames.get(index++);
image = UtilImageIO.loadImage(prefix+name);
if (image == null)
throw new RuntimeException("Couldn't open " + (prefix+name));
return true;
} else
return false;
}
示例11: main
import boofcv.io.image.UtilImageIO; //导入方法依赖的package包/类
public static void main( String args[] ) {
BufferedImage image = UtilImageIO.loadImage("../data/applet/simple_objects.jpg");
ImageUInt8 gray = ConvertBufferedImage.convertFrom(image,(ImageUInt8)null);
ImageUInt8 edgeImage = new ImageUInt8(gray.width,gray.height);
// Create a canny edge detector which will dynamically compute the threshold based on maximum edge intensity
// It has also been configured to save the trace as a graph. This is the graph created while performing
// hysteresis thresholding.
CannyEdge<ImageUInt8,ImageSInt16> canny = FactoryEdgeDetectors.canny(2,true, true, ImageUInt8.class, ImageSInt16.class);
// The edge image is actually an optional parameter. If you don't need it just pass in null
canny.process(gray,0.1f,0.3f,edgeImage);
// First get the contour created by canny
List<EdgeContour> edgeContours = canny.getContours();
// The 'edgeContours' is a tree graph that can be difficult to process. An alternative is to extract
// the contours from the binary image, which will produce a single loop for each connected cluster of pixels.
// Note that you are only interested in external contours.
List<Contour> contours = BinaryImageOps.contour(edgeImage, 8, null);
// display the results
BufferedImage visualBinary = VisualizeBinaryData.renderBinary(edgeImage, null);
BufferedImage visualCannyContour = VisualizeBinaryData.renderContours(edgeContours,null,
gray.width,gray.height,null);
BufferedImage visualEdgeContour = VisualizeBinaryData.renderExternal(contours, null,
gray.width, gray.height, null);
ShowImages.showWindow(visualBinary,"Binary Edges from Canny");
ShowImages.showWindow(visualCannyContour,"Canny Trace Graph");
ShowImages.showWindow(visualEdgeContour,"Contour from Canny Binary");
}
示例12: main
import boofcv.io.image.UtilImageIO; //导入方法依赖的package包/类
public static void main( String args[] ) {
String dir = "../data/evaluation/structure/";
BufferedImage imageA = UtilImageIO.loadImage(dir + "undist_cyto_01.jpg");
BufferedImage imageB = UtilImageIO.loadImage(dir + "undist_cyto_02.jpg");
List<AssociatedPair> matches = computeMatches(imageA,imageB);
// Where the fundamental matrix is stored
DenseMatrix64F F;
// List of matches that matched the model
List<AssociatedPair> inliers = new ArrayList<AssociatedPair>();
// estimate and print the results using a robust and simple estimator
// The results should be difference since there are many false associations in the simple model
// Also note that the fundamental matrix is only defined up to a scale factor.
F = robustFundamental(matches, inliers);
System.out.println("Robust");
F.print();
F = simpleFundamental(matches);
System.out.println("Simple");
F.print();
// display the inlier matches found using the robust estimator
AssociationPanel panel = new AssociationPanel(20);
panel.setAssociation(inliers);
panel.setImages(imageA,imageB);
ShowImages.showWindow(panel, "Inlier Pairs");
}
示例13: process
import boofcv.io.image.UtilImageIO; //导入方法依赖的package包/类
/**
* Process calibration images, compute intrinsic parameters, save to a file
*/
public void process() {
// Declare and setup the calibration algorithm
CalibrateStereoPlanar calibratorAlg = new CalibrateStereoPlanar(detector, flipY);
calibratorAlg.configure(target, true, 2);
// ensure the lists are in the same order
Collections.sort(left);
Collections.sort(right);
for( int i = 0; i < left.size(); i++ ) {
BufferedImage l = UtilImageIO.loadImage(left.get(i));
BufferedImage r = UtilImageIO.loadImage(right.get(i));
ImageFloat32 imageLeft = ConvertBufferedImage.convertFrom(l,(ImageFloat32)null);
ImageFloat32 imageRight = ConvertBufferedImage.convertFrom(r,(ImageFloat32)null);
if( !calibratorAlg.addPair(imageLeft, imageRight) )
System.out.println("Failed to detect target in "+left.get(i)+" and/or "+right.get(i));
}
// Process and compute calibration parameters
StereoParameters stereoCalib = calibratorAlg.process();
// print out information on its accuracy and errors
calibratorAlg.printStatistics();
// save results to a file and print out
BoofMiscOps.saveXML(stereoCalib, "stereo.xml");
stereoCalib.print();
// Note that the stereo baseline translation will be specified in the same units as the calibration grid.
// Which is in millimeters (mm) in this example.
}
示例14: main
import boofcv.io.image.UtilImageIO; //导入方法依赖的package包/类
public static void main( String args[] ) {
BufferedImage image = UtilImageIO.loadImage("../data/applet/sunflowers.jpg");
// Let the user select a color
printClickedColor(image);
// Display pre-selected colors
showSelectedColor("Yellow",image,1f,1f);
showSelectedColor("Green",image,1.5f,0.65f);
}
示例15: main
import boofcv.io.image.UtilImageIO; //导入方法依赖的package包/类
public static void main( String args[] ) {
// load the test image
// String directory = "../data/evaluation/calibration/stereo/Bumblebee2_Square";
String directory = "../data/evaluation/calibration/stereo/Bumblebee2_Chess";
BufferedImage orig = UtilImageIO.loadImage(directory+"/left01.jpg");
ImageFloat32 input = ConvertBufferedImage.convertFrom(orig,(ImageFloat32)null);
// To select different types of detectors add or remove comments below
PlanarCalibrationDetector detector;
// For chessboard targets, tune RADIUS parameter for your images
// detector = FactoryPlanarCalibrationTarget.detectorSquareGrid( 5, 7);
detector = FactoryPlanarCalibrationTarget.detectorChessboard( new ConfigChessboard(5,7));
// process the image and check for failure condition
if( !detector.process(input) )
throw new RuntimeException("Target detection failed!");
// Ordered observations of calibration points on the target
List<Point2D_F64> points = detector.getPoints();
// render and display the results
Graphics2D g2 = orig.createGraphics();
for( Point2D_F64 p : points )
VisualizeFeatures.drawPoint(g2,(int)p.x,(int)p.y,3,Color.RED);
ShowImages.showWindow(orig,"Calibration Points");
}