本文整理汇总了Java中boofcv.io.image.ConvertBufferedImage类的典型用法代码示例。如果您正苦于以下问题:Java ConvertBufferedImage类的具体用法?Java ConvertBufferedImage怎么用?Java ConvertBufferedImage使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
ConvertBufferedImage类属于boofcv.io.image包,在下文中一共展示了ConvertBufferedImage类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getResolvedImage
import boofcv.io.image.ConvertBufferedImage; //导入依赖的package包/类
public BufferedImage getResolvedImage(BufferedImage src)
{
Planar<GrayF32> input = ConvertBufferedImage.convertFromMulti(src, null, true, GrayF32.class);
RemovePerspectiveDistortion<Planar<GrayF32>> removePerspective =
new RemovePerspectiveDistortion<>(300, 300, ImageType.pl(3, GrayF32.class));
if( !removePerspective.apply(input,
new Point2D_F64(points[0].x,points[0].y),
new Point2D_F64(points[1].x,points[1].y),
new Point2D_F64(points[2].x,points[2].y),
new Point2D_F64(points[3].x,points[3].y)
) ){
return null;
}
Planar<GrayF32> output = removePerspective.getOutput();
return ConvertBufferedImage.convertTo_F32(output,null,true);
}
示例2: coupledHueSat
import boofcv.io.image.ConvertBufferedImage; //导入依赖的package包/类
/**
* HSV stores color information in Hue and Saturation while intensity is in Value. This computes a 2D histogram
* from hue and saturation only, which makes it lighting independent.
*/
public static double[] coupledHueSat(BufferedImage image) {
Planar<GrayF32> rgb = new Planar<>(GrayF32.class, image.getWidth(), image.getHeight(), 3);
Planar<GrayF32> hsv = new Planar<>(GrayF32.class, image.getWidth(), image.getHeight(), 3);
ConvertBufferedImage.convertFrom(image, rgb, true);
ColorHsv.rgbToHsv_F32(rgb, hsv);
Planar<GrayF32> hs = hsv.partialSpectrum(0, 1);
// The number of bins is an important parameter. Try adjusting it
Histogram_F64 histogram = new Histogram_F64(10, 10);
histogram.setRange(0, 0, 2.0 * Math.PI); // range of hue is from 0 to 2PI
histogram.setRange(1, 0, 1.0); // range of saturation is from 0 to 1
// Compute the histogram
GHistogramFeatureOps.histogram(hs, histogram);
histogram.value[0] = 0.0; // remove black
UtilFeature.normalizeL2(histogram); // normalize so that image size doesn't matter
return histogram.value;
}
示例3: coupledRGB
import boofcv.io.image.ConvertBufferedImage; //导入依赖的package包/类
/**
* Constructs a 3D histogram using RGB. RGB is a popular color space, but the resulting histogram will
* depend on lighting conditions and might not produce the accurate results.
*/
public static double[] coupledRGB(BufferedImage image) {
Planar<GrayF32> rgb = new Planar<>(GrayF32.class,1,1,3);
rgb.reshape(image.getWidth(), image.getHeight());
ConvertBufferedImage.convertFrom(image, rgb, true);
// The number of bins is an important parameter. Try adjusting it
Histogram_F64 histogram = new Histogram_F64(5,5,5);
histogram.setRange(0, 0, 255);
histogram.setRange(1, 0, 255);
histogram.setRange(2, 0, 255);
GHistogramFeatureOps.histogram(rgb,histogram);
histogram.value[0] = 0.0; // remove black
UtilFeature.normalizeL2(histogram); // normalize so that image size doesn't matter
return histogram.value;
}
示例4: removeGray
import boofcv.io.image.ConvertBufferedImage; //导入依赖的package包/类
public static BufferedImage removeGray(BufferedImage image) {
Planar<GrayF32> input = ConvertBufferedImage.convertFromMulti(image, null, true, GrayF32.class);
BufferedImage output = new BufferedImage(input.width, input.height, BufferedImage.TYPE_INT_RGB);
for (int y = 0; y < input.height; y++) {
for (int x = 0; x < input.width; x++) {
float v0 = input.getBand(0).get(x, y);
float v1 = input.getBand(1).get(x, y);
float v2 = input.getBand(2).get(x, y);
int tol = 20;
if (!(Math.abs(v0 - v1) < tol && Math.abs(v1 - v2) < tol && Math.abs(v0 - v2) < tol)) {
output.setRGB(x, y, image.getRGB(x, y));
}
}
}
return output;
}
示例5: getContours
import boofcv.io.image.ConvertBufferedImage; //导入依赖的package包/类
/**
* Applies a contour-detection algorithm on the provided image and returns a list of detected contours. First, the image
* is converted to a BinaryImage using a threshold algorithm (Otsu). Afterwards, blobs in the image are detected using
* an 8-connect rule.
*
* @param image BufferedImage in which contours should be detected.
* @return List of contours.
*/
public static List<Contour> getContours(BufferedImage image) {
/* Draw a black frame around to image so as to make sure that all detected contours are internal contours. */
BufferedImage resized = new BufferedImage(image.getWidth() + 4, image.getHeight() + 4, image.getType());
Graphics g = resized.getGraphics();
g.setColor(Color.BLACK);
g.fillRect(0,0,resized.getWidth(),resized.getHeight());
g.drawImage(image, 2,2, image.getWidth(), image.getHeight(), null);
/* Convert to BufferedImage to Gray-scale image and prepare Binary image. */
GrayF32 input = ConvertBufferedImage.convertFromSingle(resized, null, GrayF32.class);
GrayU8 binary = new GrayU8(input.width,input.height);
GrayS32 label = new GrayS32(input.width,input.height);
/* Select a global threshold using Otsu's method and apply that threshold. */
double threshold = GThresholdImageOps.computeOtsu(input, 0, 255);
ThresholdImageOps.threshold(input, binary,(float)threshold,true);
/* Remove small blobs through erosion and dilation; The null in the input indicates that it should internally
* declare the work image it needs this is less efficient, but easier to code. */
GrayU8 filtered = BinaryImageOps.erode8(binary, 1, null);
filtered = BinaryImageOps.dilate8(filtered, 1, null);
/* Detect blobs inside the image using an 8-connect rule. */
return BinaryImageOps.contour(filtered, ConnectRule.EIGHT, label);
}
示例6: getEdgePixels
import boofcv.io.image.ConvertBufferedImage; //导入依赖的package包/类
public static boolean[] getEdgePixels(MultiImage img, boolean[] out) {
LOGGER.traceEntry();
if (out == null || out.length != img.getWidth() * img.getHeight()) {
out = new boolean[img.getWidth() * img.getHeight()];
}
GrayU8 gray = ConvertBufferedImage.convertFrom(img.getBufferedImage(), (GrayU8) null);
if(!isSolid(gray)){
getCanny().process(gray, THRESHOLD_LOW, THRESHOLD_HIGH, gray);
}
for (int i = 0; i < gray.data.length; ++i) {
out[i] = (gray.data[i] != 0);
}
LOGGER.traceExit();
return out;
}
示例7: processRgb
import boofcv.io.image.ConvertBufferedImage; //导入依赖的package包/类
protected void processRgb(FrameMode mode, ByteBuffer frame, int timestamp) {
if (mode.getVideoFormat() != VideoFormat.RGB) {
System.out.println("Bad rgb format!");
}
if (outRgb == null) {
rgb.reshape(mode.getWidth(), mode.getHeight());
outRgb = new BufferedImage(rgb.width, rgb.height, BufferedImage.TYPE_INT_RGB);
guiRgb = ShowImages.showWindow(outRgb, "RGB Image");
}
App.bufferRgbToMsU8(frame, rgb);
ConvertBufferedImage.convertTo_U8(rgb, outRgb, true);
drawButton(buttonMove, outRgb);
drawButton(buttonStop, outRgb);
drawButton(buttonLeft, outRgb);
drawButton(buttonRight, outRgb);
processButtonStatePhaseTwo(buttonMove, outRgb);
processButtonStatePhaseTwo(buttonStop, outRgb);
processButtonStatePhaseTwo(buttonLeft, outRgb);
processButtonStatePhaseTwo(buttonRight, outRgb);
guiRgb.repaint();
}
示例8: countPills
import boofcv.io.image.ConvertBufferedImage; //导入依赖的package包/类
/**
* Count pills.
*
* @param image the image
* @return the int
* @throws Exception the exception
*/
public static int countPills( BufferedImage image ) throws Exception {
GrayF32 input = ConvertBufferedImage.convertFromSingle(image, null, GrayF32.class);
GrayU8 binary = new GrayU8(input.width,input.height);
int totPixels = 0;
for( int x = 0 ; x<input.width ; x++ ) {
for( int y=0 ; y<input.height ; y++ ) {
int binout = input.get(x, y) < PIXEL_THRESHOLD ? 0 : 1;
binary.set(x, y, binout );
totPixels += binout;
}
}
dumpImage(binary, input.width, input.height );
int numPills = -1;
for( int checkNumPills=1 ; checkNumPills<CHECK_MAX_NUM_PILLS ; checkNumPills++ ) {
int checkMaxPixels = (int)(checkNumPills * PIXELS_PER_PILL * PIXELS_PER_PILL_FUDGE_FACTOR);
if( totPixels <= checkMaxPixels ) {
numPills = checkNumPills;
break;
}
}
logger.info("NumPills found in image: {}", numPills);
return numPills;
}
示例9: processRgb
import boofcv.io.image.ConvertBufferedImage; //导入依赖的package包/类
protected void processRgb(FrameMode mode, ByteBuffer frame, int timestamp) {
if (mode.getVideoFormat() != VideoFormat.RGB) {
System.out.println("Bad rgb format!");
}
System.out.println("Got rgb! " + timestamp);
if (outRgb == null) {
rgb.reshape(mode.getWidth(), mode.getHeight());
outRgb = new BufferedImage(rgb.width, rgb.height, BufferedImage.TYPE_INT_RGB);
guiRgb = ShowImages.showWindow(outRgb, "RGB Image");
}
UtilOpenKinect.bufferRgbToMsU8(frame, rgb);
ConvertBufferedImage.convertTo_U8(rgb, outRgb, true);
guiRgb.repaint();
}
示例10: getCannyContours
import boofcv.io.image.ConvertBufferedImage; //导入依赖的package包/类
public static List<Contour> getCannyContours(BufferedImage image) {
GrayU8 gray = ConvertBufferedImage.convertFrom(image, (GrayU8) null);
GrayU8 edgeImage = gray.createSameShape();
canny.process(gray, 0.1f, 0.3f, edgeImage);
List<Contour> contours = BinaryImageOps.contour(edgeImage, ConnectRule.EIGHT, null);
return contours;
}
示例11: ImageDesc
import boofcv.io.image.ConvertBufferedImage; //导入依赖的package包/类
public ImageDesc(BufferedImage in)
{
if(!AverageHash.isInitiated())
{
AverageHash.init(2, 2);
}
hash = AverageHash.avgHash(in,2,2);
GrayF32 img = ConvertBufferedImage.convertFromSingle(in, null, GrayF32.class);
desc.reset();
describeImage(img,desc);
}
示例12: independentHueSat
import boofcv.io.image.ConvertBufferedImage; //导入依赖的package包/类
/**
* Computes two independent 1D histograms from hue and saturation. Less affects by sparsity, but can produce
* worse results since the basic assumption that hue and saturation are decoupled is most of the time false.
*/
public static double[] independentHueSat(BufferedImage image) {
// The number of bins is an important parameter. Try adjusting it
TupleDesc_F64 histogramHue = new TupleDesc_F64(5);
TupleDesc_F64 histogramValue = new TupleDesc_F64(5);
List<TupleDesc_F64> histogramList = new ArrayList<>();
histogramList.add(histogramHue); histogramList.add(histogramValue);
Planar<GrayF32> rgb = new Planar<>(GrayF32.class,1,1,3);
Planar<GrayF32> hsv = new Planar<>(GrayF32.class,1,1,3);
rgb.reshape(image.getWidth(), image.getHeight());
hsv.reshape(image.getWidth(), image.getHeight());
ConvertBufferedImage.convertFrom(image, rgb, true);
ColorHsv.rgbToHsv_F32(rgb, hsv);
GHistogramFeatureOps.histogram(hsv.getBand(0), 0, 2*Math.PI,histogramHue);
GHistogramFeatureOps.histogram(hsv.getBand(1), 0, 1, histogramValue);
// need to combine them into a single descriptor for processing later on
TupleDesc_F64 imageHist = UtilFeature.combine(histogramList,null);
UtilFeature.normalizeL2(imageHist); // normalize so that image size doesn't matter
return imageHist.value;
}
示例13: filterBackgroundOut
import boofcv.io.image.ConvertBufferedImage; //导入依赖的package包/类
public static BufferedImage filterBackgroundOut(BufferedImage image) {
Planar<GrayF32> input = ConvertBufferedImage.convertFromMulti(image, null, true, GrayF32.class);
Planar<GrayF32> hsv = new Planar<>(GrayF32.class, input.getWidth(), input.getHeight(), 3);
// Convert into HSV
ColorHsv.rgbToHsv_F32(input, hsv);
// Euclidean distance squared threshold for deciding which pixels are members of the selected set
float maxDist2 = 0.4f * 0.4f;
// Extract hue and saturation bands which are independent of intensity
GrayF32 H = hsv.getBand(0);
GrayF32 S = hsv.getBand(1);
float hue = H.get(1, 1);
float saturation = S.get(1, 1);
// Adjust the relative importance of Hue and Saturation.
// Hue has a range of 0 to 2*PI and Saturation from 0 to 1.
float adjustUnits = (float) (Math.PI / 2.0);
// step through each pixel and mark how close it is to the selected color
BufferedImage output = new BufferedImage(input.width, input.height, BufferedImage.TYPE_INT_RGB);
for (int y = 0; y < hsv.height; y++) {
for (int x = 0; x < hsv.width; x++) {
// Hue is an angle in radians, so simple subtraction doesn't work
float dh = UtilAngle.dist(H.unsafe_get(x, y), hue);
float ds = (S.unsafe_get(x, y) - saturation) * adjustUnits;
// this distance measure is a bit naive, but good enough for to demonstrate the concept
float dist2 = dh * dh + ds * ds;
if (dist2 > maxDist2 * 4) {
output.setRGB(x, y, image.getRGB(x, y));
}
}
}
return output;
}
示例14: maskBackground
import boofcv.io.image.ConvertBufferedImage; //导入依赖的package包/类
public static BufferedImage maskBackground(BufferedImage image) {
GrayU8 gray = ConvertBufferedImage.convertFromSingle(image, null, GrayU8.class);
int threshold = gray.get(1, 1); // get background pixel - would be better to average some
GrayU8 binary = ThresholdImageOps.threshold(gray, null, threshold, true);
GrayF32 mask = ConvertImage.convert(binary, (GrayF32) null);
return mask(image, mask);
}
示例15: run
import boofcv.io.image.ConvertBufferedImage; //导入依赖的package包/类
private void run() throws IOException {
BufferedImage image = UtilImageIO.loadImage(UtilIO.pathExample("C:\\development\\readySET\\deck\\1221.png"));
GrayU8 gray = ConvertBufferedImage.convertFrom(image,(GrayU8)null);
GrayU8 edgeImage = gray.createSameShape();
// Create a canny edge detector which will dynamically compute the threshold based on maximum edge intensity
// It has also been configured to save the trace as a graph. This is the graph created while performing
// hysteresis thresholding.
CannyEdge<GrayU8,GrayS16> canny = FactoryEdgeDetectors.canny(2,true, true, GrayU8.class, GrayS16.class);
// The edge image is actually an optional parameter. If you don't need it just pass in null
canny.process(gray,0.1f,0.3f,edgeImage);
// First get the contour created by canny
List<EdgeContour> edgeContours = canny.getContours();
// The 'edgeContours' is a tree graph that can be difficult to process. An alternative is to extract
// the contours from the binary image, which will produce a single loop for each connected cluster of pixels.
// Note that you are only interested in verticesnal contours.
List<Contour> contours = BinaryImageOps.contour(edgeImage, ConnectRule.EIGHT, null);
// display the results
BufferedImage visualBinary = VisualizeBinaryData.renderBinary(edgeImage, false, null);
BufferedImage visualCannyContour = VisualizeBinaryData.renderContours(edgeContours,null,
gray.width,gray.height,null);
BufferedImage visualEdgeContour = new BufferedImage(gray.width, gray.height,BufferedImage.TYPE_INT_RGB);
VisualizeBinaryData.render(contours, (int[]) null, visualEdgeContour);
ListDisplayPanel panel = new ListDisplayPanel();
panel.addImage(visualBinary,"Binary Edges from Canny");
panel.addImage(visualCannyContour, "Canny Trace Graph");
panel.addImage(visualEdgeContour,"Contour from Canny Binary");
ShowImages.showWindow(panel,"Canny Edge", true);
}