本文整理汇总了Java中org.openimaj.image.FImage.drawImage方法的典型用法代码示例。如果您正苦于以下问题:Java FImage.drawImage方法的具体用法?Java FImage.drawImage怎么用?Java FImage.drawImage使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.openimaj.image.FImage
的用法示例。
在下文中一共展示了FImage.drawImage方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: main
import org.openimaj.image.FImage; //导入方法依赖的package包/类
/**
* Testing
* @param args
* @throws IOException
*/
public static void main(String[] args) throws IOException {
FImage image = ImageUtilities.readF(new File("/Users/jsh2/Desktop/image.png"));
FImage template = image.extractROI(100, 100, 100, 100);
image.fill(0f);
image.drawImage(template, 100, 100);
TemplateMatcher matcher = new TemplateMatcher(template, Mode.CORRELATION);
matcher.setSearchBounds(new Rectangle(100,100,200,200));
image.analyseWith(matcher);
DisplayUtilities.display(matcher.responseMap.normalise());
MBFImage cimg = image.toRGB();
for (FValuePixel p : matcher.getBestResponses(10)) {
System.out.println(p);
cimg.drawPoint(p, RGBColour.RED, 1);
}
cimg.drawShape(matcher.getSearchBounds(), RGBColour.BLUE);
cimg.drawShape(new Rectangle(100,100,100,100), RGBColour.GREEN);
DisplayUtilities.display(cimg);
}
示例2: main
import org.openimaj.image.FImage; //导入方法依赖的package包/类
public static void main(String[] args) throws IOException {
final ShapeModelDataset<FImage> dataset = IMMFaceDatabase.load(ImageUtilities.FIMAGE_READER);
final PointListConnections connections = dataset.getConnections();
final List<PointList> pointData = dataset.getPointLists();
final int width = 200, height = 200;
final PointDistributionModel pdm = new PointDistributionModel(pointData);
pdm.setNumComponents(15);
final FImage images = new FImage(4 * width, 4 * height);
for (int j = 0; j < 4; j++) {
for (int i = 0; i < 4; i++) {
final ValueAnimator<double[]> a = DoubleArrayValueAnimator.makeRandomLinear(0,
pdm.getStandardDeviations(3));
final FImage image = new FImage(width, height);
image.fill(1);
final PointList newShape = pdm.generateNewShape(a.nextValue());
final PointList tfShape = newShape.transform(TransformUtilities.translateMatrix(100, 100).times(
TransformUtilities.scaleMatrix(50, 50)));
final List<Line2d> lines = connections.getLines(tfShape);
image.drawLines(lines, 1, 0f);
images.drawImage(image, i * width, j * height);
}
}
DisplayUtilities.display(images);
ImageUtilities.write(images, new File("/Users/jsh2/Desktop/faces.png"));
}
示例3: shiftData
import org.openimaj.image.FImage; //导入方法依赖的package包/类
/**
* Checks whether the visualisation needs to be shifted to the left.
* If not it draws the spectra from the left of the image. If it
* reaches the right of the image, the image will be scrolled to the
* left and the new spectra drawn at the right of the image.
*/
private void shiftData()
{
// Check if we should be drawing outside of the image. If so,
// shift the image to the left and draw at the right hand edge.
// (our draw position is given in currentDrawPosition and it's
// not updated if we enter this if clause).
if( this.nFrames > this.visImage.getWidth() )
this.previousSpecImage = this.previousSpecImage.shiftLeft();
else
{
// Blat the previous spectrogram and update where we're going to draw
// the newest spectra.
if( this.nFrames > 0 )
{
final FImage t = new FImage( this.nFrames, this.visImage.getHeight() );
if( this.previousSpecImage != null )
t.drawImage( this.previousSpecImage, 0, t.getHeight()-this.previousSpecImage.getHeight() );
this.previousSpecImage = t;
this.currentDrawPosition++;
}
}
// Draw the newest spectra. Note that we draw onto the "previousSpecImage"
// memory image and we blat this out in the update() method.
synchronized( this.data )
{
// Draw spectra onto image
this.drawSpectra( this.previousSpecImage, this.data, this.currentDrawPosition-1 );
}
}
示例4: main
import org.openimaj.image.FImage; //导入方法依赖的package包/类
/**
* Main method for the example.
*
* @param args
* Ignored.
* @throws IOException
*/
public static void main(String[] args) throws IOException {
// first, we load two images
final URL image1url = new URL(
"http://s3.amazonaws.com/rapgenius/fema_-_39841_-_official_portrait_of_president-elect_barack_obama_on_jan-_13.jpg");
final URL image2url = new URL(
"http://nimg.sulekha.com/others/thumbnailfull/barack-obama-michelle-obama-mary-mcaleese-martin-mcaleese-2011-5-23-6-50-0.jpg");
final FImage image1 = ImageUtilities.readF(image1url);
final FImage image2 = ImageUtilities.readF(image2url);
// then we set up a face detector; will use a haar cascade detector to
// find faces, followed by a keypoint-enhanced detector to find facial
// keypoints for our feature. There are many different combinations of
// features and detectors to choose from.
final HaarCascadeDetector detector = HaarCascadeDetector.BuiltInCascade.frontalface_alt2.load();
final FKEFaceDetector kedetector = new FKEFaceDetector(detector);
// now we construct a feature extractor - this one will extract pixel
// patches around prominant facial keypoints (like the corners of the
// mouth, etc) and build them into a vector.
final Extractor extractor = new FacePatchFeature.Extractor();
// in order to compare the features we need a comparator. In this case,
// we'll use the Euclidean distance between the vectors:
final FaceFVComparator<FacePatchFeature, FloatFV> comparator =
new FaceFVComparator<FacePatchFeature, FloatFV>(FloatFVComparison.EUCLIDEAN);
// Now we can construct the FaceSimilarityEngine. It is capable of
// running the face detector on a pair of images, extracting the
// features and then comparing every pair of detected faces in the two
// images:
final FaceSimilarityEngine<KEDetectedFace, FacePatchFeature, FImage> engine =
new FaceSimilarityEngine<KEDetectedFace, FacePatchFeature, FImage>(kedetector, extractor, comparator);
// we need to tell the engine to use our images:
engine.setQuery(image1, "image1");
engine.setTest(image2, "image2");
// and then to do its work of detecting, extracting and comparing
engine.performTest();
// finally, for this example, we're going to display the "best" matching
// faces in the two images. The following loop goes through the map of
// each face in the first image to all the faces in the second:
for (final Entry<String, Map<String, Double>> e : engine.getSimilarityDictionary().entrySet()) {
// this computes the matching face in the second image with the
// smallest distance:
double bestScore = Double.MAX_VALUE;
String best = null;
for (final Entry<String, Double> matches : e.getValue().entrySet()) {
if (matches.getValue() < bestScore) {
bestScore = matches.getValue();
best = matches.getKey();
}
}
// and this composites the original two images together, and draws
// the matching pair of faces:
final FImage img = new FImage(image1.width + image2.width, Math.max(image1.height, image2.height));
img.drawImage(image1, 0, 0);
img.drawImage(image2, image1.width, 0);
img.drawShape(engine.getBoundingBoxes().get(e.getKey()), 1F);
final Rectangle r = engine.getBoundingBoxes().get(best);
r.translate(image1.width, 0);
img.drawShape(r, 1F);
// and finally displays the result
DisplayUtilities.display(img);
}
}
示例5: main
import org.openimaj.image.FImage; //导入方法依赖的package包/类
public static void main(String[] args) throws IOException {
final TestImageClass tic = new TestImageClass();
final FImage trainPhoto = ResizeProcessor.halfSize(ResizeProcessor.halfSize(ImageUtilities.readF(new File(
"/Users/jon/Desktop/images50cm4band/sp7034.jpeg"))));
final FImage trainClass = ResizeProcessor.halfSize(ResizeProcessor.halfSize(ImageUtilities.readF(new File(
"/Users/jon/Desktop/images50cm4band/sp7034-classes.PNG"))));
tic.extractFeaturePatches(trainPhoto, 20000, 8);
tic.extractClassifierTrainingPatches(trainPhoto, trainClass, 1000, 32);
tic.learnDictionary(100);
// Note: should really use sparse version!!
/*
* final LiblinearAnnotator<FImage, Boolean> ann = new
* LiblinearAnnotator<FImage, Boolean>(tic, Mode.MULTICLASS,
* SolverType.L2R_L2LOSS_SVC, 1, 0.0001);
*
* final MapBackedDataset<Boolean, ListBackedDataset<FImage>, FImage>
* data = new MapBackedDataset<Boolean, ListBackedDataset<FImage>,
* FImage>(); data.add(true, new
* ListBackedDataset<FImage>(Arrays.asList(tic.ruralPatches)));
* data.add(false, new
* ListBackedDataset<FImage>(Arrays.asList(tic.urbanPatches)));
* ann.train(data);
*/
final FImage test = ResizeProcessor.halfSize(ResizeProcessor.halfSize(ImageUtilities.readF(new File(
"/Users/jon/Desktop/images50cm4band/test.jpeg")))).normalise();
/*
* final FImage result = test.extractCenter(test.width - 32, test.height
* - 32); final FImage tmp = new FImage(32, 32); for (int y = 0; y <
* test.height - 32; y++) { for (int x = 0; x < test.width - 32; x++) {
* test.extractROI(x, y, tmp);
*
* final ClassificationResult<Boolean> r = ann.classify(tmp); final
* Boolean clz = r.getPredictedClasses().iterator().next();
*
* if (clz) result.pixels[y][x] = 1;
*
* DisplayUtilities.displayName(result, "result"); } }
*/
final FImage tmp = new FImage(8 * 10, 8 * 10);
for (int i = 0, y = 0; y < 10; y++) {
for (int x = 0; x < 10; x++, i++) {
final FImage p = new FImage(tic.dictionary[i], 8, 8);
p.divideInplace(2 * Math.max(p.min(), p.max()));
p.addInplace(0.5f);
tmp.drawImage(p, x * 8, y * 8);
}
}
DisplayUtilities.display(tmp);
}
示例6: main
import org.openimaj.image.FImage; //导入方法依赖的package包/类
public static void main(String[] args) throws IOException {
final File patchesFile = new File("patches.bin");
// final RandomPatchSampler sampler =
// new
// RandomPatchSampler(Caltech101.getImages(ImageUtilities.FIMAGE_READER),
// 8, 8, 100000);
// sampler.save(patchesFile);
final List<FImage> patches = RandomPatchSampler.loadPatches(patchesFile);
final double[][] data = new double[patches.size()][];
for (int i = 0; i < data.length; i++)
data[i] = patches.get(i).getDoublePixelVector();
// final PCAWhitening whitening = new PCAWhitening();
final WhiteningTransform whitening = new ZCAWhitening(0.1, new PerExampleMeanCenterVar(10f / 255f));
whitening.train(data);
final double[][] wd = whitening.whiten(data);
// final double[][] comps =
// whitening.getTransform().transpose().getArray();
// for (int i = 0; i < comps.length; i++)
// DisplayUtilities.di play(ResizeProcessor.resample(new
// FImage(comps[i], 8, 8).normalise(), 128, 128));
// final FImage tmp1 = new FImage(100 * 8, 100 * 8);
// final FImage tmp2 = new FImage(100 * 8, 100 * 8);
// final FImage tmp3 = new FImage(100 * 8, 100 * 8);
// for (int i = 0; i < 100; i++) {
// for (int j = 0; j < 100; j++) {
// final double[] d = new PerExampleMeanCenterVar(10f /
// 255f).normalise(patches.get(i * 100 + j)
// .getDoublePixelVector());
// FImage patch = new FImage(d, 8, 8);
// patch.divideInplace(2 * Math.max(patch.min(), patch.max()));
// patch.addInplace(0.5f);
// tmp2.drawImage(patch, i * 8, j * 8);
//
// tmp3.drawImage(patches.get(i * 100 + j), i * 8, j * 8);
//
// patch = new FImage(wd[i * 100 + j], 8, 8);
// patch.divideInplace(2 * Math.max(patch.min(), patch.max()));
// patch.addInplace(0.5f);
// tmp1.drawImage(patch, i * 8, j * 8);
// }
// }
// DisplayUtilities.display(tmp3);
// DisplayUtilities.display(tmp2);
// DisplayUtilities.display(tmp1);
final SphericalKMeans skm = new SphericalKMeans(2500, 10);
final SphericalKMeansResult res = skm.cluster(wd);
final FImage tmp = new FImage(50 * (8 + 1) + 1, 50 * (8 + 1) + 1);
tmp.fill(1f);
for (int i = 0; i < 50; i++) {
for (int j = 0; j < 50; j++) {
final FImage patch = ResizeProcessor
.resample(
new FImage(res.centroids[i * 50 + j], 8, 8),
8, 8);
patch.divideInplace(2 * Math.max(Math.abs(patch.min()),
Math.abs(patch.max())));
patch.addInplace(0.5f);
tmp.drawImage(patch, i * (8 + 1) + 1, j * (8 + 1) + 1);
}
}
DisplayUtilities.display(tmp);
}