本文整理汇总了Java中org.openimaj.image.FImage.getWidth方法的典型用法代码示例。如果您正苦于以下问题:Java FImage.getWidth方法的具体用法?Java FImage.getWidth怎么用?Java FImage.getWidth使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.openimaj.image.FImage
的用法示例。
在下文中一共展示了FImage.getWidth方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: flagFingers
import org.openimaj.image.FImage; //导入方法依赖的package包/类
/**
* This method adds a detected finger candidate to the finger candidate
* list of the current scan if this candidate matches the requirements.
* @param res
*/
public static void flagFingers(FImage res) {
Iterator<Finger> itr = fingerList.iterator();
int fingersCount = 0;
while (itr.hasNext()) {
Finger finger = itr.next();
int plotCount = finger.getPlotMap().size();
if (plotCount >= MIN_FINGER_POINTS && finger.getFingerHeight() <= res.getHeight()*MAX_FINGER_SIZE && finger.getFingerWidthMax() <= res.getWidth()*MAX_FINGER_SIZE) {
// System.out.println("fingersCount: " + fingersCount + ", Finger x: " + finger.getxId() +
// ", y: " + finger.getyId() + ". Plot count: " + plotCount);
fingersCount++;
if (finger.getImageHight()*MAX_FINGER_SIZE/100 > finger.getFingerHeight() &&
finger.getImageWidth()*MAX_FINGER_SIZE/100 > finger.getFingerWidthMax() &&
finger.getFingerWidthMax() > DEVIATION_X) {
checkFingerWidth(res, finger);
checkFingerOnBorder(res, finger);
resFingerList.add(finger);
}
}
}
}
示例2: displayQueryResults
import org.openimaj.image.FImage; //导入方法依赖的package包/类
/**
*
* @param resource
* @return The displayed image
* @throws Exception
*/
public static MBFImage displayQueryResults(final URL resource) throws Exception
{
System.out.println("----------- QUERYING ----------- ");
final FImage fi = ImageUtilities.readF(resource);
final PersonMatcher pm = new PersonMatcher(new File(PersonMatcher.RECOGNISER_FILE));
final List<? extends IndependentPair<? extends DetectedFace, ScoredAnnotation<String>>> l = pm.query(fi);
final MBFImage m = new MBFImage(fi.getWidth(), fi.getHeight(), 3);
m.addInplace(fi);
int count = 1;
for (final IndependentPair<? extends DetectedFace, ScoredAnnotation<String>> i : l)
{
final Rectangle b = i.firstObject().getBounds();
m.drawShape(b, RGBColour.RED);
final String name = count + " : " +
(i.secondObject() == null ? "Unknown" : i.secondObject().annotation);
m.drawText(name, (int) b.x, (int) b.y,
HersheyFont.TIMES_MEDIUM, 12, RGBColour.GREEN);
count++;
}
DisplayUtilities.display(m);
return m;
}
示例3: computeTable
import org.openimaj.image.FImage; //导入方法依赖的package包/类
protected void computeTable(FImage image) {
sum = new FImage(image.getWidth() + 1, image.getHeight() + 1);
sqSum = new FImage(image.getWidth() + 1, image.getHeight() + 1);
for (int y = 0; y < image.height; y++) {
for (int x = 0; x < image.width; x++) {
final float p = image.pixels[y][x];
sum.pixels[y + 1][x + 1] = p +
sum.pixels[y + 1][x] +
sum.pixels[y][x + 1] -
sum.pixels[y][x];
sqSum.pixels[y + 1][x + 1] = p * p +
sqSum.pixels[y + 1][x] +
sqSum.pixels[y][x + 1] -
sqSum.pixels[y][x];
}
}
}
示例4: evaluateNextRightPos
import org.openimaj.image.FImage; //导入方法依赖的package包/类
/**
* This method evaluates the next existing pixel on the right from the given
* position on the X axis.
* @param image
* @param xx
* @param yy
* @return the x coordinate of the next pixel
*/
public static int evaluateNextRightPos (FImage image, int xx, int yy) {
int res = 0;
for (int x=xx; x < image.getWidth (); x++) { // column
float pixel = image.getPixel(x, yy);
if (pixel > 0.0) {
res = x;
break;
}
}
return res;
}
示例5: checkFingerOnBorder
import org.openimaj.image.FImage; //导入方法依赖的package包/类
/**
* This method checks whether finger candidate crosses a scan text border.
* @param res
* @param finger
*/
public static void checkFingerOnBorder(FImage res, Finger finger) {
if (!(finger.getxId() < MIN_BORDER_DISTANCE || finger.getLastX() > res.getWidth() - MIN_BORDER_DISTANCE ||
finger.getyId() < MIN_BORDER_DISTANCE || finger.getLastY() > res.getHeight() - MIN_BORDER_DISTANCE)) {
if (finger.getPlotMap().size() > 0) {
for (Map.Entry<Pixel, Pixel> entry : finger.getPlotMap().entrySet()) {
Pixel key = entry.getKey();
Pixel value = entry.getValue();
res.setPixel(key.x, key.y, 0.0f);
res.setPixel(value.x, value.y, 0.0f);
}
}
}
}
示例6: analyse
import org.openimaj.image.FImage; //导入方法依赖的package包/类
/**
* This method analyze a given scan image in order to detect finger candidates.
* @param res The scan image to analyze
*/
public static void analyse(FImage res) {
// filter pixels
for (int y=0; y < res.getHeight (); y++) { // row
for (int x=0; x < res.getWidth (); x++) { // column
float pixel = res.getPixel(x, y);
if (pixel > 0.0) {
int nextX = evaluateNextRightPos(res, x + 1, y);
int diff = nextX - x;
if (diff >= MIN_FINGER_WIDTH) {
Finger f = null;
f = isFingerExist(x, y);
if (f == null) {
// create possible finger
f = new Finger();
f.setxId(x);
f.setyId(y);
fingerList.add(f);
}
f.getPlotMap().put(new Pixel(x, y), new Pixel(nextX, y));
f.setLastX(x);
f.setLastY(y);
f.setImageHight(res.getHeight());
f.setImageWidth(res.getWidth());
}
// System.out.println("x: " + x + ", y: " + y + ", pixel value: " + pixel + "\n");
}
}
}
// System.out.println("finger list size: " + fingerList.size());
flagFingers(res);
}
示例7: diff
import org.openimaj.image.FImage; //导入方法依赖的package包/类
static MBFImage diff(FImage bg, FImage fg) {
final FImage df = new FImage(bg.getWidth(), bg.getHeight());
final float[][] dff = df.pixels;
final float[][] bgfr = bg.pixels;
final float[][] fgfr = fg.pixels;
for (int y = 0; y < df.getHeight(); y++) {
for (int x = 0; x < df.getWidth(); x++) {
final float dr = bgfr[y][x] - fgfr[y][x];
final float ssd = dr * dr;
if (ssd < 0.03) {
dff[y][x] = 0;
} else {
dff[y][x] = 1;
}
}
}
// Dilate.dilate(df, 1);
// Erode.erode(df, 2);
df.processInplace(new MedianFilter(FilterSupport.createBlockSupport(3, 3)));
df.processInplace(new MedianFilter(FilterSupport.createBlockSupport(3, 3)));
return df.toRGB();
}
示例8: simpleSusan
import org.openimaj.image.FImage; //导入方法依赖的package包/类
/**
* Performs the simple SUSAN edge detection.
*
* @param img
* The image to find edges in
* @param thresh
* The threshold
* @param nmax
* The global threshold weighting
* @return Edge image
*/
public static FImage simpleSusan(FImage img, double thresh, double nmax)
{
final FImage area = new FImage(img.getWidth(), img.getHeight());
final double globalThresh = (3.0 * nmax) / 4.0;
for (int y = 1; y < img.getHeight() - 1; y++)
{
for (int x = 1; x < img.getWidth() - 1; x++)
{
double a = 0;
for (int x1 = x - 1; x1 < x + 2; x1++)
{
for (int y1 = y - 1; y1 < y + 2; y1++)
{
if (Math.abs(img.getPixel(x1, y1) - img.getPixel(x, y)) < thresh)
a++;
}
}
if (a < globalThresh)
area.setPixel(x, y, (float) (globalThresh - a));
}
}
return area;
}
示例9: smoothCircularSusan
import org.openimaj.image.FImage; //导入方法依赖的package包/类
/**
* Performs the simple SUSAN edge detection.
*
* @param img
* The image to find edges in
* @param thresh
* The threshold
* @param nmax
* The global threshold weighting
* @param radius
* The radius of the circle (try 3.4)
* @return Edge image
*/
public static FImage smoothCircularSusan(FImage img, double thresh, double nmax, double radius)
{
final FImage area = new FImage(img.getWidth(), img.getHeight());
final double globalThresh = (3.0 * nmax) / 4.0;
final int r = (int) Math.ceil(radius);
for (int y = r; y < img.getHeight() - r; y++)
{
for (int x = r; x < img.getWidth() - r; x++)
{
final float[] pixelValues = getPixelsInCircle(x, y, radius, img);
double a = 0;
for (final float f : pixelValues)
a += Math.exp(
-Math.pow(
Math.abs(f -
img.getPixel(x, y))
/ thresh, 6));
if (a < globalThresh)
area.setPixel(x, y, (float) (globalThresh - a));
}
}
return area;
}
示例10: analyseImage
import org.openimaj.image.FImage; //导入方法依赖的package包/类
@Override
public void analyseImage(FImage image) {
int height = image.getHeight();
int width = image.getWidth();
magnitude = new FImage(width, height);
angle = new FImage(width, height);
int w = (kx.length - 1) / 2;
for (int y=w+1; y<height-w; y++) {
for (int x=w+1; x<width-w; x++) {
//compute gradient
double gx = 0;
double gy = 0;
for (int j=0; j<kx.length; j++) {
for (int i=0; i<kx.length; i++) {
gx += image.pixels[y-w+j][x-w+i] * kx[j][i];
gy += image.pixels[y-w+j][x-w+i] * ky[j][i];
}
}
magnitude.pixels[y][x] = (float) sqrt(gx*gx + gy*gy);
if(gy!=0)
angle.pixels[y][x] = (float) atan(gx/-gy);
else
angle.pixels[y][x] = 1.57f;
}
}
}
示例11: H_TO_H1H2_2
import org.openimaj.image.FImage; //导入方法依赖的package包/类
/**
* Convert to Hue to H2 VARIANT 2 using the formulation from:
* http://ilab.usc.edu/wiki/index.php/HSV_And_H2SV_Color_Space
*
* @param in
* Hue image
* @return H1H2_2 image
*/
public static MBFImage H_TO_H1H2_2(final FImage in) {
final int width = in.getWidth();
final int height = in.getHeight();
final MBFImage out = new MBFImage(width, height, ColourSpace.H1H2_2);
final float[][] H = in.pixels;
final float[][] H1 = out.getBand(0).pixels;
final float[][] H2 = out.getBand(1).pixels;
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
if (H[y][x] > 0.3333333333F)
{
H2[y][x] = ((H[y][x] - 0.3333333333F) / 0.6666666666F);
if (H[y][x] > 0.6666666666F)
H1[y][x] = ((H[y][x] - 0.6666666666F) / 0.5F);
else
H1[y][x] = (1 - (H[y][x] - 0.1666666666F) / 0.5F);
}
else
{
H2[y][x] = (1 - H[y][x] / 0.3333333333F);
if (H[y][x] > 0.1666666666F)
H1[y][x] = (1 - (H[y][x] - 0.1666666666F) / 0.5F);
else
H1[y][x] = ((2.0F / 3.0F) + H[y][x] / 0.5F);
}
}
}
return out;
}
示例12: WatershedProcessorAlgorithm
import org.openimaj.image.FImage; //导入方法依赖的package包/类
/**
* Default constructor
*
* @param bGreyscaleImage
* the image to apply the watershed transform too
* @param startPixel
* The pixel to start the process at
* @param featureClasses
* the features that should be created for each detected
* component
*/
@SafeVarargs
public WatershedProcessorAlgorithm(FImage bGreyscaleImage, IntValuePixel startPixel,
Class<? extends ComponentFeature>... featureClasses)
{
this(new int[bGreyscaleImage.getHeight()][bGreyscaleImage.getWidth()], startPixel, featureClasses);
for (int j = 0; j < bGreyscaleImage.getHeight(); j++) {
for (int i = 0; i < bGreyscaleImage.getWidth(); i++) {
greyscaleImage[j][i] = (int) (bGreyscaleImage.pixels[j][i] * 255);
}
}
}
示例13: calculateHorizontalProjection
import org.openimaj.image.FImage; //导入方法依赖的package包/类
/**
* Calculates a projection across the given accumulator space.
* Returns an image that has width the same as the input
* and height of 1. Effectively sums across the distances from origin
* in the space such that you end up with a representation that gives you
* the strength of the angles in the image irrespective of where those
* lines occur.
*
* @param accum The accumulator space to project
* @return A horizontal projection on the accumulator space as an
* FImage with same width as input image but only 1 pixel high
*/
public FImage calculateHorizontalProjection( FImage accum )
{
FImage proj = new FImage( accum.getWidth(), 1 );
for( int x = 0; x < accum.getWidth(); x++ )
{
float acc = 0;
for( int y = 0; y < accum.getHeight(); y++ )
acc += accum.getPixel(x,y)*accum.getPixel(x,y);
proj.setPixel(x,0, (float)Math.sqrt(acc) );
}
return proj;
}
示例14: H_TO_H1H2
import org.openimaj.image.FImage; //导入方法依赖的package包/类
/**
* Convert to Hue to H2 using the formulation from:
* http://ilab.usc.edu/wiki/index.php/HSV_And_H2SV_Color_Space
*
* @param in
* input image
* @return Two-component hue image
*/
public static MBFImage H_TO_H1H2(final FImage in) {
final int width = in.getWidth();
final int height = in.getHeight();
final MBFImage out = new MBFImage(width, height, ColourSpace.H1H2);
final float[][] H = in.pixels;
final float[][] H1 = out.getBand(0).pixels;
final float[][] H2 = out.getBand(1).pixels;
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
if (H[y][x] > 0.5F)
{
H2[y][x] = ((H[y][x] - 0.5F) / 0.5F);
if (H[y][x] > 0.75)
H1[y][x] = ((H[y][x] - 0.75F) / 0.5F);
else
H1[y][x] = (1 - (H[y][x] - 0.25F) / 0.5F);
}
else
{
H2[y][x] = (1F - H[y][x] / 0.5F);
if (H[y][x] > 0.25F)
H1[y][x] = (1 - (H[y][x] - 0.25F) / 0.5F);
else
H1[y][x] = (0.5F + H[y][x] / 0.5F);
}
}
}
return out;
}
示例15: inpaint
import org.openimaj.image.FImage; //导入方法依赖的package包/类
protected void inpaint(int x, int y, FImage input) {
final int width = input.getWidth();
final int height = input.getHeight();
final float gradx_u = gradX(timeMap.pixels, x, y);
final float grady_u = gradY(timeMap.pixels, x, y);
float accum = 0;
float norm = 0;
for (final Pixel p : region) {
final int xx = p.x + x;
final int yy = p.y + y;
if (xx <= 1 || xx >= width - 1 || yy <= 1 || yy >= height - 1)
continue;
if (flag[yy][xx] != KNOWN)
continue;
final int rx = x - xx;
final int ry = y - yy;
// geometric distance.
final float geometricDistance = (float) (1. / ((rx * rx + ry * ry) * Math.sqrt((rx * rx + ry * ry))));
// levelset distance.
final float levelsetDistance = (float) (1. / (1 + Math.abs(timeMap.pixels[yy][xx] - timeMap.pixels[y][x])));
// Dot product of final displacement and gradient vectors.
float direction = Math.abs(rx * gradx_u + ry * grady_u);
if (direction < 0.000001f)
direction = 0.000001f;
final float weight = geometricDistance * levelsetDistance * direction;
accum += weight * input.pixels[yy][xx];
norm += weight;
}
input.pixels[y][x] = accum / norm;
}