本文整理汇总了Java中org.openimaj.image.FImage.extractROI方法的典型用法代码示例。如果您正苦于以下问题:Java FImage.extractROI方法的具体用法?Java FImage.extractROI怎么用?Java FImage.extractROI使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.openimaj.image.FImage
的用法示例。
在下文中一共展示了FImage.extractROI方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: main
import org.openimaj.image.FImage; //导入方法依赖的package包/类
/**
* Testing
* @param args
* @throws IOException
*/
public static void main(String[] args) throws IOException {
FImage image = ImageUtilities.readF(new File("/Users/jsh2/Desktop/image.png"));
FImage template = image.extractROI(100, 100, 100, 100);
image.fill(0f);
image.drawImage(template, 100, 100);
TemplateMatcher matcher = new TemplateMatcher(template, Mode.CORRELATION);
matcher.setSearchBounds(new Rectangle(100,100,200,200));
image.analyseWith(matcher);
DisplayUtilities.display(matcher.responseMap.normalise());
MBFImage cimg = image.toRGB();
for (FValuePixel p : matcher.getBestResponses(10)) {
System.out.println(p);
cimg.drawPoint(p, RGBColour.RED, 1);
}
cimg.drawShape(matcher.getSearchBounds(), RGBColour.BLUE);
cimg.drawShape(new Rectangle(100,100,100,100), RGBColour.GREEN);
DisplayUtilities.display(cimg);
}
示例2: CLMDetectedFace
import org.openimaj.image.FImage; //导入方法依赖的package包/类
/**
* Construct with the given bounds, shape and pose parameters and detection
* image. The face patch is extracted automatically.
*
* @param bounds
* @param shape
* @param poseParameters
* @param shapeParameters
* @param visibility
* @param fullImage
*/
public CLMDetectedFace(final Rectangle bounds, final Matrix shape, final Matrix poseParameters, final Matrix shapeParameters,
final Matrix visibility, final FImage fullImage)
{
super(bounds, fullImage.extractROI(bounds), 1);
this.poseParameters = poseParameters;
this.shapeParameters = shapeParameters;
this.visibility = visibility;
this.shape = shape;
// translate the shape
final int n = shape.getRowDimension() / 2;
final double[][] shapeData = shape.getArray();
for (int i = 0; i < n; i++) {
shapeData[i][0] -= bounds.x;
shapeData[i + n][0] -= bounds.y;
}
}
示例3: extractClassifierTrainingPatches
import org.openimaj.image.FImage; //导入方法依赖的package包/类
void extractClassifierTrainingPatches(FImage image, FImage labels, int npatchesPerClass, int sz) {
bigPatchSize = sz;
urbanPatches = new FImage[npatchesPerClass];
ruralPatches = new FImage[npatchesPerClass];
int u = 0;
int r = 0;
while (u < npatchesPerClass || r < npatchesPerClass) {
final int x = rng.nextInt(image.width - sz - 1);
final int y = rng.nextInt(image.height - sz - 1);
final FImage ip = image.extractROI(x, y, sz, sz);
final float[] lp = labels.extractROI(x, y, sz, sz).getFloatPixelVector();
boolean same = true;
for (int i = 0; i < sz * sz; i++) {
if (lp[i] != lp[0]) {
same = false;
break;
}
}
if (same) {
if (lp[0] == 0 && r < npatchesPerClass) {
ruralPatches[r] = ip;
r++;
} else if (lp[0] == 1 && u < npatchesPerClass) {
// DisplayUtilities.display(ResizeProcessor.resample(ip,
// 128, 128).normalise());
urbanPatches[u] = ip;
u++;
}
}
}
}
示例4: gistGabor
import org.openimaj.image.FImage; //导入方法依赖的package包/类
private FloatFV gistGabor(MBFImage img) {
final int blocksPerFilter = computeNumberOfSamplingBlocks();
final int nFeaturesPerBand = gaborFilters.length * blocksPerFilter;
final int nFilters = this.gaborFilters.length;
// pad the image
img = img.paddingSymmetric(boundaryExtension, boundaryExtension, boundaryExtension, boundaryExtension);
final int cols = img.getCols();
final int rows = img.getRows();
final FloatFFT_2D fft = new FloatFFT_2D(rows, cols);
final float[][] workingSpace = new float[rows][cols * 2];
final FloatFV fv = new FloatFV(nFeaturesPerBand * img.numBands());
for (int b = 0; b < img.numBands(); b++) {
final FImage band = img.bands.get(b);
final float[][] preparedImage =
FourierTransform.prepareData(band.pixels, rows, cols, true);
fft.complexForward(preparedImage);
for (int i = 0; i < nFilters; i++) {
// convolve with the filter
FImage ig = performConv(fft, preparedImage, workingSpace, this.gaborFilters[i], rows, cols);
// remove padding
ig = ig.extractROI(boundaryExtension, boundaryExtension, band.width - 2 * boundaryExtension, band.height
- 2
* boundaryExtension);
sampleResponses(ig, fv.values, b * nFeaturesPerBand + i * blocksPerFilter);
}
}
return fv;
}
示例5: extractDateArea
import org.openimaj.image.FImage; //导入方法依赖的package包/类
private FImage extractDateArea(FImage image) {
return image.extractROI(664, 1024, 176, 16);
}
示例6: extractTimeArea
import org.openimaj.image.FImage; //导入方法依赖的package包/类
private FImage extractTimeArea(FImage image) {
return image.extractROI(840, 1024, 144, 16);
}
示例7: analyseImage
import org.openimaj.image.FImage; //导入方法依赖的package包/类
/**
* Perform template matching. If a bounds rectangle
* is has not been set or is null, then the whole
* image will be searched. Otherwise the area of the image
* which lies in the previously set search bounds will be
* searched.
*
* @see org.openimaj.image.analyser.ImageAnalyser#analyseImage(org.openimaj.image.Image)
*/
@Override
public void analyseImage(FImage image) {
FImage subImage;
if (this.searchBounds != null) {
final int halfWidth = templateWidth / 2;
final int halfHeight = templateHeight / 2;
int x = (int) Math.max(searchBounds.x - halfWidth, 0);
int width = (int) searchBounds.width + templateWidth;
if (searchBounds.x - halfWidth < 0) {
width += (searchBounds.x - halfWidth);
}
if (x + width > image.width)
width = image.width;
int y = (int) Math.max(searchBounds.y - halfHeight, 0);
int height = (int) searchBounds.height + templateHeight;
if (searchBounds.y - halfHeight < 0) {
height += (searchBounds.y - halfHeight);
}
if (y + height > image.height)
height = image.height;
//FIXME: this is doing an additional copy; should be rolled into FFT data prep step in FourierTransform
subImage = image.extractROI(
x,
y,
width,
height
);
} else {
subImage = image.clone();
}
responseMap = subImage.process(correlation);
responseMap.height = responseMap.height - correlation.template.height + 1;
responseMap.width = responseMap.width - correlation.template.width + 1;
mode.processCorrelationMap(subImage, correlation.template, responseMap);
}
示例8: detectFaces
import org.openimaj.image.FImage; //导入方法依赖的package包/类
@Override
public List<KEDetectedFace> detectFaces(FImage image) {
final List<? extends DetectedFace> faces = faceDetector.detectFaces(image);
final List<KEDetectedFace> descriptors = new ArrayList<KEDetectedFace>(faces.size());
for (final DetectedFace df : faces) {
final int canonicalSize = facialKeypointExtractor.getCanonicalImageDimension();
final Rectangle r = df.getBounds();
// calculate a scaled version of the image and extract a patch of
// canonicalSize
final float scale = (r.width / 2) / ((canonicalSize / 2) - facialKeypointExtractor.model.border);
float tx = (r.x + (r.width / 2)) - scale * canonicalSize / 2;
float ty = (r.y + (r.height / 2)) - scale * canonicalSize / 2;
final Matrix T0 = new Matrix(new double[][] { { scale, 0, tx }, { 0, scale, ty }, { 0, 0, 1 } });
final Matrix T = (Matrix) T0.clone();
final FImage subsampled = pyramidResize(image, T);
final FImage smallpatch = extractPatch(subsampled, T, canonicalSize, 0);
// extract the keypoints
final FacialKeypoint[] kpts = facialKeypointExtractor.extractFacialKeypoints(smallpatch);
// calculate the transform to take the canonical coordinates to the
// roi coordinates
tx = (r.width / 2) - scale * canonicalSize / 2;
ty = (r.height / 2) - scale * canonicalSize / 2;
final Matrix T1 = new Matrix(new double[][] { { scale, 0, tx }, { 0, scale, ty }, { 0, 0, 1 } });
FacialKeypoint.updateImagePosition(kpts, T1);
// recompute the bounding box based on the positions of the facial
// keypoints
final FacialKeypoint eyeLL = FacialKeypoint.getKeypoint(kpts,
FacialKeypoint.FacialKeypointType.EYE_LEFT_LEFT);
final FacialKeypoint eyeRR = FacialKeypoint.getKeypoint(kpts,
FacialKeypoint.FacialKeypointType.EYE_RIGHT_RIGHT);
final FacialKeypoint eyeLR = FacialKeypoint.getKeypoint(kpts,
FacialKeypoint.FacialKeypointType.EYE_LEFT_RIGHT);
final FacialKeypoint eyeRL = FacialKeypoint.getKeypoint(kpts,
FacialKeypoint.FacialKeypointType.EYE_RIGHT_LEFT);
final float eyeSpace = (0.5f * (eyeRR.position.x + eyeRL.position.x))
- (0.5f * (eyeLR.position.x + eyeLL.position.x));
final float deltaX = (0.5f * (eyeLR.position.x + eyeLL.position.x)) - eyeSpace;
r.x = r.x + deltaX;
r.width = eyeSpace * 3;
final float eyeVavg = 0.5f * ((0.5f * (eyeRR.position.y + eyeRL.position.y)) + (0.5f * (eyeLR.position.y + eyeLL.position.y)));
r.height = 1.28f * r.width;
final float deltaY = eyeVavg - 0.4f * r.height;
r.y = r.y + deltaY;
float dx = r.x;
float dy = r.y;
r.scaleCentroid(patchScale);
dx = dx - r.x;
dy = dy - r.y;
FacialKeypoint.updateImagePosition(kpts, TransformUtilities.translateMatrix(-deltaX + dx, -deltaY + dy));
// final KEDetectedFace kedf = new KEDetectedFace(r,
// df.getFacePatch(), kpts, df.getConfidence());
// final Rectangle scr = r.clone();
final KEDetectedFace kedf = new KEDetectedFace(r, image.extractROI(r), kpts, df.getConfidence());
descriptors.add(kedf);
}
return descriptors;
}
示例9: extractBlock
import org.openimaj.image.FImage; //导入方法依赖的package包/类
protected FImage extractBlock(FImage image, Point2d pt, int sz) {
return image.extractROI(getROI((int)pt.getX(), (int)pt.getY(), sz, sz));
}