本文整理汇总了Java中org.openimaj.image.FImage.analyseWith方法的典型用法代码示例。如果您正苦于以下问题:Java FImage.analyseWith方法的具体用法?Java FImage.analyseWith怎么用?Java FImage.analyseWith使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.openimaj.image.FImage
的用法示例。
在下文中一共展示了FImage.analyseWith方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: main
import org.openimaj.image.FImage; //导入方法依赖的package包/类
/**
* Test the distance transform
* @param args
* @throws IOException
*/
public static void main(String args[]) throws IOException{
FImage i = ImageUtilities.readF(new File("/Users/ss/Desktop/tache.jpg"));
EuclideanDistanceTransform etrans = new EuclideanDistanceTransform();
// i.processInplace(new CannyEdgeDetector());
i.inverse();
for(int x = 0;x < i.width; x++)
for(int y = 0; y < i.height; y++)
if(i.pixels[y][x] == 1.0f)
i.setPixel(x, y, Float.MAX_VALUE);
DisplayUtilities.display(i);
i.analyseWith(etrans);
i = etrans.getDistances();
i.normalise();
DisplayUtilities.display(i);
}
示例2: processCorrelationMap
import org.openimaj.image.FImage; //导入方法依赖的package包/类
@Override
public void processCorrelationMap(FImage img, FImage template, FImage corr) {
SummedAreaTable sum = new SummedAreaTable();
img.analyseWith(sum);
final float templateMean = FloatArrayStatsUtils.mean(template.pixels); //TODO: cache this
final float[][] pix = corr.pixels;
for( int y = 0; y < corr.height; y++ ) {
for( int x = 0; x < corr.width; x++ ) {
double num = pix[y][x];
double t = sum.calculateArea(x, y, x+template.width, y+template.height);
num -= t * templateMean;
pix[y][x] = (float)num;
}
}
}
示例3: main
import org.openimaj.image.FImage; //导入方法依赖的package包/类
/**
* Testing
* @param args
* @throws IOException
*/
public static void main(String[] args) throws IOException {
FImage image = ImageUtilities.readF(new File("/Users/jsh2/Desktop/image.png"));
FImage template = image.extractROI(100, 100, 100, 100);
image.fill(0f);
image.drawImage(template, 100, 100);
TemplateMatcher matcher = new TemplateMatcher(template, Mode.CORRELATION);
matcher.setSearchBounds(new Rectangle(100,100,200,200));
image.analyseWith(matcher);
DisplayUtilities.display(matcher.responseMap.normalise());
MBFImage cimg = image.toRGB();
for (FValuePixel p : matcher.getBestResponses(10)) {
System.out.println(p);
cimg.drawPoint(p, RGBColour.RED, 1);
}
cimg.drawShape(matcher.getSearchBounds(), RGBColour.BLUE);
cimg.drawShape(new Rectangle(100,100,100,100), RGBColour.GREEN);
DisplayUtilities.display(cimg);
}
示例4: testCircle
import org.openimaj.image.FImage; //导入方法依赖的package包/类
/**
* Test
*/
@Test
public void testCircle() {
final int imgWidthHeight = 200;
final FImage circleImage = new FImage(imgWidthHeight, imgWidthHeight);
final Circle c = new Circle(imgWidthHeight / 2 + 3, imgWidthHeight / 2 + 1, imgWidthHeight / 4);
circleImage.drawShapeFilled(c, 1f);
final CannyEdgeDetector det = new CannyEdgeDetector();
final FImage edgeImage = circleImage.process(det);
final HoughCircles circ = new HoughCircles(5, imgWidthHeight, 5, 360);
edgeImage.analyseWith(circ);
final List<WeightedCircle> best = circ.getBest(1);
final WeightedCircle b = best.get(0);
assertTrue(b.equals(c));
}
示例5: getImageVectorAsArray
import org.openimaj.image.FImage; //导入方法依赖的package包/类
/**
* From the given image filename, returns the edge direction coherence
* vector as a double array.
*
* @param imageName The image to process
* @return A 2-dimensional vector with only one
*/
private static double[][] getImageVectorAsArray( FImage crgbimage )
{
double[][] toReturn = new double[1][VECTOR_SIZE];
// Calculate the Edge direction coherence on the image.
EdgeDirectionCoherenceVector edcv = new EdgeDirectionCoherenceVector();
edcv.setNumberOfBins( VECTOR_SIZE/2 );
// Process the image
crgbimage.analyseWith( edcv );
// Get the histogram
double[] d = edcv.getLastHistogram().asDoubleFV().asDoubleVector();
// Normalise the vector by the total number of edge pixels
double[] edgeCounter = new double[1];
for( int j = 0; j < VECTOR_SIZE; j++ )
{
toReturn[0][j] = d[j];
edgeCounter[0] += d[j];
}
// Normalise the vector
CityLandscapeUtilities.normaliseVector( toReturn, edgeCounter );
return toReturn;
}
示例6: getImageVector
import org.openimaj.image.FImage; //导入方法依赖的package包/类
/**
* Returns an ArrayList<Double> of which each index represents one element of a edge
* direction coherence vector
* @param imageName
* @return the EDCV
*/
public static ArrayList<Double> getImageVector(String imageName) {
ArrayList<Double> queryVector = new ArrayList<Double>();
FImage crgbimage;
try {
crgbimage = ImageUtilities.readF(new File(imageName));
EdgeDirectionCoherenceVector cldo = new EdgeDirectionCoherenceVector();
crgbimage.analyseWith(cldo);
double[][] vec = new double[][] {
cldo.getLastHistogram().incoherentHistogram.values,
cldo.getLastHistogram().coherentHistogram.values
};
int n = cldo.getNumberOfDirBins();
double edgeCounter = 0;
for (int j = 0; j < n; j++){
//Incoherent
queryVector.add(vec[0][j]);
edgeCounter += vec[0][j];
}
for(int j = 0; j< n; j++){
//Coherent
queryVector.add(vec[1][j]);
edgeCounter += vec[1][j];
}
queryVector.add(edgeCounter);
normaliseVector(queryVector);
} catch (IOException e) {
System.out.println("File with path: "+imageName+" not found.");
System.exit(1);
}
return queryVector;
}
示例7: extractLTPSlicePixels
import org.openimaj.image.FImage; //导入方法依赖的package包/类
protected static List<List<Pixel>> extractLTPSlicePixels(FImage image) {
LocalTernaryPattern ltp = new LocalTernaryPattern(2, 8, 0.1f);
image.analyseWith(ltp);
List<List<Pixel>> positiveSlices = UniformBinaryPattern.extractPatternPixels(ltp.getPositivePattern(), 8);
List<List<Pixel>> negativeSlices = UniformBinaryPattern.extractPatternPixels(ltp.getNegativePattern(), 8);
positiveSlices.addAll(negativeSlices);
return positiveSlices;
}
示例8: computeCost
import org.openimaj.image.FImage; //导入方法依赖的package包/类
@Override
public float computeCost(FImage image, Point2d point, PointList pointList) {
FImage extracted = extractBlock(image, point, blockSize);
if (matcher == null)
matcher = new TemplateMatcher(average, Mode.NORM_SUM_SQUARED_DIFFERENCE);
matcher.setSearchBounds(null);
extracted.analyseWith(matcher);
return matcher.getResponseMap().pixels[0][0];
}
示例9: updatePosition
import org.openimaj.image.FImage; //导入方法依赖的package包/类
@Override
public ObjectFloatPair<Point2d> updatePosition(FImage image, Point2d initial, PointList pointList) {
Rectangle roi = getROI((int)initial.getX(), (int)initial.getY(), searchSize, searchSize);
if (matcher == null)
matcher = new TemplateMatcher(average, Mode.NORM_SUM_SQUARED_DIFFERENCE);
matcher.setSearchBounds(roi);
image.analyseWith(matcher);
FValuePixel p = matcher.getBestResponses(1)[0];
return new ObjectFloatPair<Point2d>(p, 0);
}
示例10: writeHistograms
import org.openimaj.image.FImage; //导入方法依赖的package包/类
/**
* Write histograms to file
*
* @param numberOfImages
* @param inputPath
* @param outfile
* @throws IOException
*/
public static void writeHistograms(int numberOfImages, String inputPath, String outfile) throws IOException{
BufferedWriter bw = new BufferedWriter(new FileWriter(outfile));
System.out.println("Input file: "+inputPath);
System.out.println("Writing to: "+outfile);
File dir = new File(inputPath);
File [] array = dir.listFiles();
int skipped = 0;
for (int i = OFFSET; i-skipped<numberOfImages && i < array.length; i++){
FImage image = null;
try{
System.out.println("Attempting write image" +array[i].getName()+ " Number: "+(i+skipped));
image = ImageUtilities.readF(array[i].getAbsoluteFile());
}
catch(Exception e){
System.out.println("Error reading image: " + array[i].getName()+". Number: "+(i+skipped));
skipped++;
continue;
}
EdgeDirectionCoherenceVector cldo = new EdgeDirectionCoherenceVector();
image.analyseWith(cldo);
double[][] vec = new double[][] {
cldo.getLastHistogram().incoherentHistogram.values,
cldo.getLastHistogram().coherentHistogram.values
};
int n = cldo.getNumberOfDirBins();
double edgeCounter = 0;
for (int j = 0; j < n; j++) {
// Incoherent
bw.write(vec[0][j] + ",");
edgeCounter += vec[0][j];
}
for (int j = 0; j < n; j++) {
// Coherent
bw.write(vec[1][j] + ",");
edgeCounter += vec[1][j];
}
//edgeCounter variable hold sum of all elements in vector, used in normalization
bw.write(Double.toString(edgeCounter));
bw.newLine();
edgeCounter = 0;
}
bw.flush();
bw.close();
System.out.println("Vector Write Completed");
}
示例11: testHoughLines
import org.openimaj.image.FImage; //导入方法依赖的package包/类
/**
* Test Hough line detection
*/
@Test
public void testHoughLines()
{
try
{
HoughLines hl = new HoughLines();
FImage i = ImageUtilities.readF(
HoughLinesTest.class.getResource( "/hough.jpg" ) );
i.analyseWith( hl );
MBFImage m = new MBFImage( i.getWidth(), i.getHeight(), 3 );
MBFImageRenderer renderer = m.createRenderer();
renderer.drawImage( i, 0, 0 );
List<Line2d> lines = hl.getBestLines( 2 );
Assert.assertEquals( 2, lines.size() );
for( int j = 0; j < lines.size(); j++ )
{
Line2d l = lines.get(j);
Assert.assertEquals( -2000, l.begin.getX(), 1d );
Assert.assertEquals( 2000, l.end.getX(), 1d );
l = l.lineWithinSquare(
new Rectangle( 0, 0, m.getWidth(), m.getHeight() ) );
renderer.drawLine( l, 2, new Float[]{1f,0f,0f} );
System.out.println( l );
Assert.assertEquals( 0d, l.begin.getX(), 5d );
}
DisplayUtilities.display( m );
}
catch( IOException e )
{
e.printStackTrace();
}
// forceWait();
}
示例12: processGridElement
import org.openimaj.image.FImage; //导入方法依赖的package包/类
@Override
public Float processGridElement(FImage patch) {
patch.analyseWith(bpp);
return (float) bpp.getBlurredPixelProportion();
}
示例13: analyseImage
import org.openimaj.image.FImage; //导入方法依赖的package包/类
@Override
public void analyseImage(FImage src) {
final FImage thresh = new FImage(src.width, src.height);
final MinMaxAnalyser mma = new MinMaxAnalyser(FilterSupport.BLOCK_3x3);
src.analyseWith(mma);
final FImage white = mma.min;
final FImage black = mma.max;
result = false;
for (float threshLevel = BLACK_LEVEL; threshLevel < WHITE_LEVEL && !result; threshLevel += (20.0f / 255f))
{
final List<FloatIntPair> quads = new ArrayList<FloatIntPair>();
quickThresh(white, thresh, threshLevel + BLACK_WHITE_GAP, false);
getQuadrangleHypotheses(SuzukiContourProcessor.findContours(thresh), quads, 1);
quickThresh(black, thresh, threshLevel, true);
getQuadrangleHypotheses(SuzukiContourProcessor.findContours(thresh), quads, 0);
final int minQuadsCount = patternWidth * patternHeight / 2;
Collections.sort(quads, FloatIntPair.FIRST_ITEM_ASCENDING_COMPARATOR);
// now check if there are many hypotheses with similar sizes
// do this by floodfill-style algorithm
final float sizeRelDev = 0.4f;
for (int i = 0; i < quads.size(); i++)
{
int j = i + 1;
for (; j < quads.size(); j++)
{
if (quads.get(j).first / quads.get(i).first > 1.0f + sizeRelDev)
{
break;
}
}
if (j + 1 > minQuadsCount + i)
{
// check the number of black and white squares
final int[] counts = new int[2];
countClasses(quads, i, j, counts);
final int blackCount = (int) Math.round(Math.ceil(patternWidth / 2.0)
* Math.ceil(patternHeight / 2.0));
final int whiteCount = (int) Math.round(Math.floor(patternWidth / 2.0)
* Math.floor(patternHeight / 2.0));
if (counts[0] < blackCount * 0.75 ||
counts[1] < whiteCount * 0.75)
{
continue;
}
result = true;
break;
}
}
}
}
示例14: processImage
import org.openimaj.image.FImage; //导入方法依赖的package包/类
/**
* {@inheritDoc}
* @see org.openimaj.image.processor.ImageProcessor#processImage(org.openimaj.image.Image)
*/
@Override
public void processImage( final FImage image )
{
final PyramidTextExtractor ped = new PyramidTextExtractor();
// Unlike Lowe's SIFT DoG pyramid, we just need a basic pyramid
final GaussianPyramidOptions<FImage> gpo = new GaussianPyramidOptions<FImage>();
gpo.setScales( 1 );
gpo.setExtraScaleSteps( 1 );
gpo.setPyramidProcessor( ped );
gpo.setDoubleInitialImage( this.doubleSizePyramid );
// Create and process the pyramid
final GaussianPyramid<FImage> gp = new GaussianPyramid<FImage>( gpo );
image.analyseWith( gp );
// -------------------------------------------------------------
// This is not part of the Liu/Samarabandu algorithm:
// Multiscale feature map
FImage msFMap = ped.getFeatureMap();
// Single scale feature map
FImage fmap = this.basicTextExtractor.textRegionDetection( image );
// Need to make it match the multiscale feature map
if( this.doubleSizePyramid )
fmap = ResizeProcessor.doubleSize( fmap );
// Combine the two.
msFMap = fmap.add( msFMap );
// -------------------------------------------------------------
if( LiuSamarabanduTextExtractorMultiscale.DEBUG )
DisplayUtilities.display( msFMap.normalise(), "Fused Feature Map" );
// Process the feature map
this.basicTextExtractor.processFeatureMap( msFMap, image );
// Store the regions
this.extractedRegions = this.basicTextExtractor.getTextRegions();
// If we doubled the feature map, we'll have to half the size of the bounding boxes.
if( this.doubleSizePyramid )
for( final Rectangle r : this.extractedRegions.keySet() )
r.scale( 0.5f );
// The output of the processor is the feature map
image.internalAssign( fmap );
}
示例15: processImage
import org.openimaj.image.FImage; //导入方法依赖的package包/类
/**
* Apply non-max suppression and hysteresis thresholding based using the
* given {@link FSobel} analyser to generate the gradients. The gradient
* maps held by the {@link FSobel} object will be set to the gradients of
* the input image after this method returns.
*
* @param image
* the image to process (and write the result to)
* @param sobel
* the computed gradients
*/
public void processImage(FImage image, FSobel sobel) {
image.analyseWith(sobel);
processImage(image, sobel.dx, sobel.dy);
}