本文整理汇总了Java中org.openimaj.image.FImage.toRGB方法的典型用法代码示例。如果您正苦于以下问题:Java FImage.toRGB方法的具体用法?Java FImage.toRGB怎么用?Java FImage.toRGB使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.openimaj.image.FImage
的用法示例。
在下文中一共展示了FImage.toRGB方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: main
import org.openimaj.image.FImage; //导入方法依赖的package包/类
/**
* Testing
* @param args
* @throws IOException
*/
public static void main(String[] args) throws IOException {
FImage image = ImageUtilities.readF(new File("/Users/jsh2/Desktop/image.png"));
FImage template = image.extractROI(100, 100, 100, 100);
image.fill(0f);
image.drawImage(template, 100, 100);
TemplateMatcher matcher = new TemplateMatcher(template, Mode.CORRELATION);
matcher.setSearchBounds(new Rectangle(100,100,200,200));
image.analyseWith(matcher);
DisplayUtilities.display(matcher.responseMap.normalise());
MBFImage cimg = image.toRGB();
for (FValuePixel p : matcher.getBestResponses(10)) {
System.out.println(p);
cimg.drawPoint(p, RGBColour.RED, 1);
}
cimg.drawShape(matcher.getSearchBounds(), RGBColour.BLUE);
cimg.drawShape(new Rectangle(100,100,100,100), RGBColour.GREEN);
DisplayUtilities.display(cimg);
}
示例2: main
import org.openimaj.image.FImage; //导入方法依赖的package包/类
/**
* Main method
*
* @param args
* ignored
* @throws IOException
* if the image can't be read
*/
public static void main(String[] args) throws IOException {
// Read the images from two streams
final String input_1Str = "/org/openimaj/examples/image/input_0.png";
final String input_2Str = "/org/openimaj/examples/image/input_1.png";
final FImage input_1 = ImageUtilities.readF(ASIFTMatchingExample.class.getResourceAsStream(input_1Str));
final FImage input_2 = ImageUtilities.readF(ASIFTMatchingExample.class.getResourceAsStream(input_2Str));
// Prepare the engine to the parameters in the IPOL demo
final ASIFTEngine engine = new ASIFTEngine(false, 7);
// Extract the keypoints from both images
final LocalFeatureList<Keypoint> input1Feats = engine.findKeypoints(input_1);
System.out.println("Extracted input1: " + input1Feats.size());
final LocalFeatureList<Keypoint> input2Feats = engine.findKeypoints(input_2);
System.out.println("Extracted input2: " + input2Feats.size());
// Prepare the matcher, uncomment this line to use a basic matcher as
// opposed to one that enforces homographic consistency
// LocalFeatureMatcher<Keypoint> matcher = createFastBasicMatcher();
final LocalFeatureMatcher<Keypoint> matcher = createConsistentRANSACHomographyMatcher();
// Find features in image 1
matcher.setModelFeatures(input1Feats);
// ... against image 2
matcher.findMatches(input2Feats);
// Get the matches
final List<Pair<Keypoint>> matches = matcher.getMatches();
System.out.println("NMatches: " + matches.size());
// Display the results
final MBFImage inp1MBF = input_1.toRGB();
final MBFImage inp2MBF = input_2.toRGB();
DisplayUtilities.display(MatchingUtilities.drawMatches(inp1MBF, inp2MBF, matches, RGBColour.RED));
}
示例3: main
import org.openimaj.image.FImage; //导入方法依赖的package包/类
/**
* Main method
*
* @param args
* ignored
* @throws IOException
* if image can't be loaded
*/
public static void main(String[] args) throws IOException {
final FImage chessboard = ImageUtilities.readF(new URL("http://www.ugcs.caltech.edu/~rajan/REPORT/camera.jpg"));
final HoughLines hlines = new HoughLines(1.f);
chessboard.process(new CannyEdgeDetector()).analyseWith(hlines);
final List<Line2d> lines = hlines.getBestLines(50);
final List<Point2d> intersections = new ArrayList<Point2d>();
for (final Line2d inner : lines) {
for (final Line2d outer : lines) {
if (inner == outer)
continue;
final IntersectionResult intersect = inner.getIntersection(outer);
if (intersect.type == IntersectionType.INTERSECTING) {
intersections.add(intersect.intersectionPoint);
}
}
}
// draw result
final MBFImage chessboardC = chessboard.toRGB();
chessboardC.drawLines(lines, 1, RGBColour.RED);
chessboardC.drawPoints(intersections, RGBColour.GREEN, 3);
DisplayUtilities.display(chessboardC);
}
示例4: diff
import org.openimaj.image.FImage; //导入方法依赖的package包/类
static MBFImage diff(FImage bg, FImage fg) {
final FImage df = new FImage(bg.getWidth(), bg.getHeight());
final float[][] dff = df.pixels;
final float[][] bgfr = bg.pixels;
final float[][] fgfr = fg.pixels;
for (int y = 0; y < df.getHeight(); y++) {
for (int x = 0; x < df.getWidth(); x++) {
final float dr = bgfr[y][x] - fgfr[y][x];
final float ssd = dr * dr;
if (ssd < 0.03) {
dff[y][x] = 0;
} else {
dff[y][x] = 1;
}
}
}
// Dilate.dilate(df, 1);
// Erode.erode(df, 2);
df.processInplace(new MedianFilter(FilterSupport.createBlockSupport(3, 3)));
df.processInplace(new MedianFilter(FilterSupport.createBlockSupport(3, 3)));
return df.toRGB();
}
示例5: main
import org.openimaj.image.FImage; //导入方法依赖的package包/类
/**
* @param args
* @throws IOException
*/
public static void main(String[] args) throws IOException {
final ShapeModelDataset<FImage> dataset = ShapeModelDatasets.loadASFDataset("/Users/jsh2/Downloads/imm_face_db",
ImageUtilities.FIMAGE_READER);
final PointListConnections conns = dataset.getConnections();
final float scale = 0.02f;
final FNormalLandmarkModel.Factory factory = new FNormalLandmarkModel.Factory(conns,
FLineSampler.INTERPOLATED_DERIVATIVE, 5, 9, scale);
final MultiResolutionActiveShapeModel<FImage> asm = MultiResolutionActiveShapeModel.trainModel(3,
new NumberComponentSelector(19), dataset, new PointDistributionModel.BoxConstraint(3), factory);
final Matrix pose = TransformUtilities.translateMatrix(300, 300).times(TransformUtilities.scaleMatrix(70, 70));
PointList shape = asm.getPDM().getMean().transform(pose);
// PointList shape = ASFDataset.readASF(new File(dir,
// "01-1m.asf")).firstObject();
final FImage img = dataset.get(0).secondObject();
// PointList shape = ASFDataset.readASF(new File(dir,
// "16-6m.asf")).firstObject();
// FImage img = ASFDataset.readASF(new File(dir,
// "16-6m.asf")).secondObject();
final MBFImage image = img.toRGB();
image.drawLines(conns.getLines(shape), 1, RGBColour.RED);
final long t1 = System.currentTimeMillis();
final IterationResult newData = asm.fit(img, shape);
final long t2 = System.currentTimeMillis();
shape = newData.shape;
System.out.println(newData.fit);
System.out.println(t2 - t1);
image.drawLines(conns.getLines(shape), 1, RGBColour.GREEN);
final float shapeScale = shape.computeIntrinsicScale();
for (final Point2d pt : shape) {
final Line2d normal = conns.calculateNormalLine(pt, shape, scale * shapeScale);
if (normal != null)
image.drawLine(normal, 1, RGBColour.BLUE);
}
DisplayUtilities.display(image);
}
示例6: extract
import org.openimaj.image.FImage; //导入方法依赖的package包/类
public OrientedFeatureVector[] extract(FImage image, Ellipse ellipse) {
final Matrix tf = ellipse.transformMatrix();
final FImage patch = new FImage(patchSize, patchSize);
final float halfSize = patchSize / 2;
// Sample the ellipse content into a rectified image
for (int y = 0; y < patchSize; y++) {
for (int x = 0; x < patchSize; x++) {
final Point2dImpl pt = new Point2dImpl((x - halfSize) / halfSize, (y - halfSize) / halfSize);
final Point2dImpl tpt = pt.transform(tf);
patch.pixels[y][x] = image.getPixelInterpNative(tpt.x, tpt.y, 0);
}
}
// now find grad mags and oris
final FImageGradients gmo = FImageGradients.getGradientMagnitudesAndOrientations(patch);
final GradientScaleSpaceImageExtractorProperties<FImage> props = new GradientScaleSpaceImageExtractorProperties<FImage>();
props.image = patch;
props.magnitude = gmo.magnitudes;
props.orientation = gmo.orientations;
props.x = patch.width / 2;
props.y = patch.height / 2;
props.scale = patch.height / 2 / 3; // ???
final DominantOrientationExtractor doe = new DominantOrientationExtractor();
final float[] oris = doe.extractFeatureRaw(props);
final MBFImage p2 = patch.toRGB();
for (final float o : oris) {
p2.drawLine(p2.getWidth() / 2, p2.getHeight() / 2, o, 20, RGBColour.RED);
}
DisplayUtilities.display(p2);
final OrientedFeatureVector[] vectors = new OrientedFeatureVector[oris.length];
for (int i = 0; i < oris.length; i++) {
final float ori = oris[i];
final GradientFeatureProvider provider = factory.newProvider();
// and construct the feature and sampling every pixel in the patch
// note: the descriptor is actually computed over a sub-patch; there
// is
// a border that is used for oversampling and avoiding edge effects.
final float overSample = provider.getOversamplingAmount();
for (int y = 0; y < patchSize; y++) {
final float yy = (y * (2 * overSample + 1) / patchSize) - overSample;
for (int x = 0; x < patchSize; x++) {
final float xx = (x * (2 * overSample + 1) / patchSize) - overSample;
final float gradmag = gmo.magnitudes.pixels[y][x];
final float gradori = gmo.orientations.pixels[y][x];
provider.addSample(xx, yy, gradmag, gradori - ori);
}
}
vectors[i] = provider.getFeatureVector();
}
return vectors;
}