本文整理汇总了Java中boofcv.core.image.ConvertBufferedImage.convertFrom方法的典型用法代码示例。如果您正苦于以下问题:Java ConvertBufferedImage.convertFrom方法的具体用法?Java ConvertBufferedImage.convertFrom怎么用?Java ConvertBufferedImage.convertFrom使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类boofcv.core.image.ConvertBufferedImage
的用法示例。
在下文中一共展示了ConvertBufferedImage.convertFrom方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: next
import boofcv.core.image.ConvertBufferedImage; //导入方法依赖的package包/类
@Override
public T next() {
if (reducedImage != null) {
Graphics2D g2 = reducedImage.createGraphics();
g2.scale(1.0 / factor, 1.0 / factor);
g2.drawImage(bufferedImage, 0, 0, null);
imageGUI = reducedImage;
} else {
imageGUI = bufferedImage;
}
image.reshape(imageGUI.getWidth(),imageGUI.getHeight());
ConvertBufferedImage.convertFrom(imageGUI, image);
return image;
}
示例2: sharpen
import boofcv.core.image.ConvertBufferedImage; //导入方法依赖的package包/类
/**
* When an image is sharpened the intensity of edges are made more extreme while flat regions remain unchanged.
*/
public static void sharpen() {
// BufferedImage buffered = UtilImageIO.loadImage("../data/applet/enhance/dull.jpg");
BufferedImage buffered = UtilImageIO.loadImage("../data/applet/enhance/dark.jpg");
ImageUInt8 gray = ConvertBufferedImage.convertFrom(buffered,(ImageUInt8)null);
ImageUInt8 adjusted = new ImageUInt8(gray.width, gray.height);
ListDisplayPanel panel = new ListDisplayPanel();
EnhanceImageOps.sharpen4(gray, adjusted);
panel.addImage(ConvertBufferedImage.convertTo(adjusted,null),"Sharpen-4");
EnhanceImageOps.sharpen8(gray, adjusted);
panel.addImage(ConvertBufferedImage.convertTo(adjusted,null),"Sharpen-8");
panel.addImage(ConvertBufferedImage.convertTo(gray,null),"Original");
panel.setPreferredSize(new Dimension(gray.width,gray.height));
ShowImages.showWindow(panel,"Sharpen");
}
示例3: loadObservations
import boofcv.core.image.ConvertBufferedImage; //导入方法依赖的package包/类
/**
* Detects calibration points found in several images and returned as a list. Not the focus of this example.
*/
public static List<List<Point2D_F64>> loadObservations() {
String directory = "../data/evaluation/calibration/stereo/Bumblebee2_Chess";
List<String> imageNames = BoofMiscOps.directoryList(directory,"left");
PlanarCalibrationDetector detector = FactoryPlanarCalibrationTarget.
detectorChessboard(new ConfigChessboard(5, 7));
List<List<Point2D_F64>> ret = new ArrayList<List<Point2D_F64>>();
for( String n : imageNames ) {
BufferedImage img = UtilImageIO.loadImage(n);
ImageFloat32 input = ConvertBufferedImage.convertFrom(img,(ImageFloat32)null);
if( !detector.process(input) )
throw new RuntimeException("Detection failed!");
ret.add(detector.getPoints());
}
return ret;
}
示例4: createTestImage
import boofcv.core.image.ConvertBufferedImage; //导入方法依赖的package包/类
private void createTestImage() {
BufferedImage workImg = new BufferedImage(width, height, BufferedImage.TYPE_INT_BGR);
Graphics2D g2 = workImg.createGraphics();
g2.setColor(Color.WHITE);
g2.fillRect(0, 0, width, height);
g2.setColor(Color.BLACK);
addRectangle(g2, new AffineTransform(), 40, 50, 60, 50);
AffineTransform tran = new AffineTransform();
tran.setToRotation(0.5);
addRectangle(g2, tran, 120, 140, 60, 50);
tran.setToRotation(-1.2);
addRectangle(g2, tran, -120, 200, 60, 40);
ConvertBufferedImage.convertFrom(workImg, image);
ImageMiscOps.addUniform(image, rand, -2, 2);
ImageBorder_I32<ImageUInt8> border = (ImageBorder_I32)FactoryImageBorder.general(image,BoofDefaults.DERIV_BORDER_TYPE);
GradientSobel.process(image, derivX, derivY, border);
}
示例5: process
import boofcv.core.image.ConvertBufferedImage; //导入方法依赖的package包/类
public synchronized void process( final BufferedImage input ) {
this.input = input;
workImage = new BufferedImage(input.getWidth(),input.getHeight(),BufferedImage.TYPE_INT_RGB);
gray.reshape(input.getWidth(),input.getHeight());
ConvertBufferedImage.convertFrom(input, gray);
intensity.reshape(gray.width,gray.height);
detectTarget();
SwingUtilities.invokeLater(new Runnable() {
public void run() {
gui.setPreferredSize(new Dimension(input.getWidth(), input.getHeight()));
renderOutput();
}
});
}
示例6: process
import boofcv.core.image.ConvertBufferedImage; //导入方法依赖的package包/类
public synchronized void process( final BufferedImage input ) {
this.input = input;
workImage = new BufferedImage(input.getWidth(),input.getHeight(),BufferedImage.TYPE_INT_RGB);
gray.reshape(input.getWidth(),input.getHeight());
binary.reshape(gray.width,gray.height);
ConvertBufferedImage.convertFrom(input,gray);
doRefreshAll();
}
示例7: nextFrame
import boofcv.core.image.ConvertBufferedImage; //导入方法依赖的package包/类
@Override
public void nextFrame(VideoFrame frame) {
// This method is called when a new frame is ready.
// Don't forget to recycle it when done dealing with the frame.
ConvertBufferedImage.convertFrom(frame.getBufferedImage(),imageBoof);
callback.nextFrame(imageBoof,frame.getBufferedImage(),frame.getCaptureTime());
// recycle the frame
frame.recycle();
if( callback.stopRequested() ) {
cleanupCapture();
}
}
示例8: main
import boofcv.core.image.ConvertBufferedImage; //导入方法依赖的package包/类
public static void main( String args[] ){
BufferedImage input = UtilImageIO.loadImage(fileName);
ImageFloat32 inputF32 = ConvertBufferedImage.convertFrom(input,(ImageFloat32)null);
ImageFloat32 blurred = new ImageFloat32(inputF32.width,inputF32.height);
ImageFloat32 derivX = new ImageFloat32(inputF32.width,inputF32.height);
ImageFloat32 derivY = new ImageFloat32(inputF32.width,inputF32.height);
ImageFloat32 intensity = new ImageFloat32(inputF32.width,inputF32.height);
ImageFloat32 orientation = new ImageFloat32(inputF32.width,inputF32.height);
ImageFloat32 suppressed = new ImageFloat32(inputF32.width,inputF32.height);
ImageSInt8 direction = new ImageSInt8(inputF32.width,inputF32.height);
ImageUInt8 output = new ImageUInt8(inputF32.width,inputF32.height);
BlurStorageFilter<ImageFloat32> blur = FactoryBlurFilter.gaussian(ImageFloat32.class,-1,2);
ImageGradient<ImageFloat32,ImageFloat32> gradient = FactoryDerivative.sobel_F32();
blur.process(inputF32,blurred);
gradient.process(blurred,derivX,derivY);
float threshLow = 5;
float threshHigh = 40;
GradientToEdgeFeatures.intensityE(derivX,derivY,intensity);
GradientToEdgeFeatures.direction(derivX,derivY,orientation);
GradientToEdgeFeatures.discretizeDirection4(orientation,direction);
GradientToEdgeFeatures.nonMaxSuppression4(intensity,direction,suppressed);
ShowImages.showWindow(suppressed,"Suppressed Intensity",true);
BufferedImage renderedOrientation = VisualizeEdgeFeatures.renderOrientation4(direction,suppressed,threshLow,null);
HysteresisEdgeTraceMark hysteresis = new HysteresisEdgeTraceMark();
hysteresis.process(suppressed,direction,threshLow,threshHigh,output);
BufferedImage renderedLabel = VisualizeBinaryData.renderBinary(output, null);
ShowImages.showWindow(intensity,"Raw Intensity",true);
ShowImages.showWindow(renderedOrientation,"Orientation");
ShowImages.showWindow(renderedLabel,"Labeled Contours");
}
示例9: changeInputScale
import boofcv.core.image.ConvertBufferedImage; //导入方法依赖的package包/类
@Override
public synchronized void changeInputScale() {
calib = new StereoParameters(origCalib);
double scale = control.inputScale;
PerspectiveOps.scaleIntrinsic(calib.left,scale);
PerspectiveOps.scaleIntrinsic(calib.right,scale);
int w = (int)(origLeft.getWidth()*scale);
int h = (int)(origLeft.getHeight()*scale);
colorLeft = new BufferedImage(w,h,BufferedImage.TYPE_INT_BGR);
colorRight = new BufferedImage(w,h,BufferedImage.TYPE_INT_BGR);
colorLeft.createGraphics().drawImage(origLeft, AffineTransform.getScaleInstance(scale,scale),null);
colorRight.createGraphics().drawImage(origRight, AffineTransform.getScaleInstance(scale,scale),null);
activeAlg = createAlg();
inputLeft = GeneralizedImageOps.createSingleBand(activeAlg.getInputType(),w,h);
inputRight = GeneralizedImageOps.createSingleBand(activeAlg.getInputType(),w,h);
rectLeft = GeneralizedImageOps.createSingleBand(activeAlg.getInputType(),w,h);
rectRight = GeneralizedImageOps.createSingleBand(activeAlg.getInputType(),w,h);
ConvertBufferedImage.convertFrom(colorLeft,inputLeft);
ConvertBufferedImage.convertFrom(colorRight,inputRight);
rectifyInputImages();
doRefreshAll();
}
示例10: main
import boofcv.core.image.ConvertBufferedImage; //导入方法依赖的package包/类
public static void main( String args[] ) {
String calibDir = "../data/applet/calibration/stereo/Bumblebee2_Chess/";
String imageDir = "../data/applet/stereo/";
StereoParameters param = BoofMiscOps.loadXML(calibDir + "stereo.xml");
// load and convert images into a BoofCV format
BufferedImage origLeft = UtilImageIO.loadImage(imageDir + "chair01_left.jpg");
BufferedImage origRight = UtilImageIO.loadImage(imageDir + "chair01_right.jpg");
ImageUInt8 distLeft = ConvertBufferedImage.convertFrom(origLeft,(ImageUInt8)null);
ImageUInt8 distRight = ConvertBufferedImage.convertFrom(origRight,(ImageUInt8)null);
// rectify images
ImageUInt8 rectLeft = new ImageUInt8(distLeft.width,distLeft.height);
ImageUInt8 rectRight = new ImageUInt8(distRight.width,distRight.height);
rectify(distLeft,distRight,param,rectLeft,rectRight);
// compute disparity
ImageUInt8 disparity = denseDisparity(rectLeft,rectRight,5,10,60);
// ImageFloat32 disparity = denseDisparitySubpixel(rectLeft,rectRight,5,10,60);
// show results
BufferedImage visualized = VisualizeImageData.disparity(disparity, null,10,60,0);
ShowImages.showWindow(rectLeft,"Rectified");
ShowImages.showWindow(visualized,"Disparity");
}
示例11: visualize
import boofcv.core.image.ConvertBufferedImage; //导入方法依赖的package包/类
public static <TD extends TupleDesc>
void visualize( String title ,
BufferedImage image1, BufferedImage image2,
InterestPointDetector<ImageFloat32> detector ,
DescribeRegionPoint<ImageFloat32,TD> describe ,
ScoreAssociation<TD> scorer ) {
AssociateDescription<TD> assoc = FactoryAssociation.greedy(scorer,Double.MAX_VALUE,false);
List<Point2D_F64> locationSrc = new ArrayList<Point2D_F64>();
List<Point2D_F64> locationDst = new ArrayList<Point2D_F64>();
ImageFloat32 input1 = ConvertBufferedImage.convertFrom(image1,(ImageFloat32)null);
ImageFloat32 input2 = ConvertBufferedImage.convertFrom(image2,(ImageFloat32)null);
FastQueue<TD> listSrc = describeImage(input1,detector,describe,locationSrc);
FastQueue<TD> listDst = describeImage(input2,detector,describe,locationDst);
assoc.setSource(listSrc);
assoc.setDestination(listDst);
assoc.associate();
FastQueue<AssociatedIndex> matches = assoc.getMatches();
AssociationPanel panel = new AssociationPanel(20);
panel.setImages(image1,image2);
panel.setAssociation(locationSrc,locationDst,matches);
ShowImages.showWindow(panel,title);
}
示例12: VisualizeTldDetectionApp
import boofcv.core.image.ConvertBufferedImage; //导入方法依赖的package包/类
public VisualizeTldDetectionApp( BufferedImage input , Class<T> imageType ) {
super(new BorderLayout());
this.input = input;
gray = GeneralizedImageOps.createSingleBand(imageType,input.getWidth(),input.getHeight());
ConvertBufferedImage.convertFrom(input,gray);
tracker = new TldTracker<T,D>(new TldConfig<T,D>(imageType));
tracker.setPerformLearning(false);
addMouseListener(this);
requestFocus();
setPreferredSize(new Dimension(gray.width,gray.height));
ShowImages.showWindow(this,"Visualize Detection");
}
示例13: next
import boofcv.core.image.ConvertBufferedImage; //导入方法依赖的package包/类
@Override
public T next() {
original = next;
image.reshape(original.getWidth(),original.getHeight());
ConvertBufferedImage.convertFrom(original,image);
readNext();
return image;
}
示例14: BufferedFileImageSequence
import boofcv.core.image.ConvertBufferedImage; //导入方法依赖的package包/类
/**
*
*/
public BufferedFileImageSequence(ImageDataType<T> type, BufferedImage[] orig) {
this.type = type;
this.orig = orig;
images = type.createArray( orig.length );
for( int i = 0; i < orig.length; i++ ) {
BufferedImage b = orig[i];
images[i] = type.createImage(b.getWidth(),b.getHeight());
ConvertBufferedImage.convertFrom(orig[i], images[i]);
}
}
示例15: main
import boofcv.core.image.ConvertBufferedImage; //导入方法依赖的package包/类
public static void main(String[] args) throws FileNotFoundException {
MediaManager media = DefaultMediaManager.INSTANCE;
// String fileName0 = "images/dogdance07.png";
// String fileName1 = "images/dogdance08.png";
String fileName0 = "images/Urban2_07.png";
String fileName1 = "images/Urban2_08.png";
// String fileName0 = "images/Grove2_07.png";
// String fileName1 = "images/Grove2_09.png";
DenseOpticalFlow<ImageFloat32> denseFlow =
// new IpolHornSchunkPyramid_to_DenseOpticalFlow();
new IpolBroxSpacial_to_DenseOpticalFlow();
BufferedImage buff0 = media.openImage(fileName0);
BufferedImage buff1 = media.openImage(fileName1);
// Dense optical flow is very computationally expensive. Just process the image at 1/2 resolution
ImageFloat32 previous = new ImageFloat32(buff0.getWidth(),buff0.getHeight());
ImageFloat32 current = new ImageFloat32(previous.width,previous.height);
ImageFlow flow = new ImageFlow(previous.width,previous.height);
ConvertBufferedImage.convertFrom(buff0, previous);
ConvertBufferedImage.convertFrom(buff1, current);
// compute dense motion
long start = System.currentTimeMillis();
denseFlow.process(previous, current, flow);
long stop = System.currentTimeMillis();
System.out.println(" elapsed "+(stop-start));
UtilOpticalFlow.saveFlow(flow,"denseflow.bflow");
// Visualize the results
PanelGridPanel gui = new PanelGridPanel(1,2);
BufferedImage converted0 = new BufferedImage(current.width,current.height, BufferedImage.TYPE_INT_RGB);
BufferedImage converted1 = new BufferedImage(current.width,current.height, BufferedImage.TYPE_INT_RGB);
BufferedImage visualized = new BufferedImage(current.width,current.height, BufferedImage.TYPE_INT_RGB);
ConvertBufferedImage.convertTo(previous, converted0, true);
ConvertBufferedImage.convertTo(current, converted1, true);
VisualizeOpticalFlow.colorized(flow, 10, visualized);
AnimatePanel animate = new AnimatePanel(150,converted0,converted1);
gui.add(animate);
gui.add(visualized);
animate.start();
ShowImages.showWindow(gui,"Dense Optical Flow");
}