本文整理汇总了Java中java.awt.image.renderable.RenderContext.getTransform方法的典型用法代码示例。如果您正苦于以下问题:Java RenderContext.getTransform方法的具体用法?Java RenderContext.getTransform怎么用?Java RenderContext.getTransform使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类java.awt.image.renderable.RenderContext
的用法示例。
在下文中一共展示了RenderContext.getTransform方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: createRendering
import java.awt.image.renderable.RenderContext; //导入方法依赖的package包/类
public RenderedImage createRendering(RenderContext rc) {
// Degenerate Affine no output image..
if (invAffine == null) return null;
// Just copy over the rendering hints.
RenderingHints rh = rc.getRenderingHints();
if (rh == null) rh = new RenderingHints(null);
// Map the area of interest to our input...
Shape aoi = rc.getAreaOfInterest();
if (aoi != null)
aoi = invAffine.createTransformedShape(aoi);
// update the current affine transform
AffineTransform at = rc.getTransform();
at.concatenate(affine);
// Return what our input creates (it should factor in our affine).
return getSource().createRendering(new RenderContext(at, aoi, rh));
}
示例2: mapRenderContext
import java.awt.image.renderable.RenderContext; //导入方法依赖的package包/类
/**
* Maps the output RenderContext into the RenderContext for the ith
* source.
* This method satisfies the implementation of CRIF.
*
* @param i The index of the source image.
* @param renderContext The renderContext being applied to the operation.
* @param paramBlock The ParameterBlock containing the sources
* and the translation factors.
* @param image The RenderableImageOp from which this method
* was called.
*/
public RenderContext mapRenderContext(int i,
RenderContext renderContext,
ParameterBlock paramBlock,
RenderableImage image) {
float x_center = paramBlock.getFloatParameter(0);
float y_center = paramBlock.getFloatParameter(1);
float angle = paramBlock.getFloatParameter(2);
AffineTransform rotate =
AffineTransform.getRotateInstance(angle, x_center, y_center);
RenderContext RC = (RenderContext)renderContext.clone();
AffineTransform usr2dev = RC.getTransform();
usr2dev.concatenate(rotate);
RC.setTransform(usr2dev);
return RC;
}
示例3: mapRenderContext
import java.awt.image.renderable.RenderContext; //导入方法依赖的package包/类
/**
* Maps the output RenderContext into the RenderContext for the ith
* source.
* This method satisfies the implementation of CRIF.
*
* @param i The index of the source image.
* @param renderContext The renderContext being applied to the operation.
* @param paramBlock The ParameterBlock containing the sources
* and the translation factors.
* @param image The RenderableImageOp from which this method
* was called.
*/
public RenderContext mapRenderContext(int i,
RenderContext renderContext,
ParameterBlock paramBlock,
RenderableImage image) {
double scaleX = paramBlock.getDoubleParameter(0);
double scaleY = paramBlock.getDoubleParameter(1);
AffineTransform scale =
new AffineTransform(scaleX, 0.0, 0.0, scaleY, 0.0, 0.0);
RenderContext RC = (RenderContext)renderContext.clone();
AffineTransform usr2dev = RC.getTransform();
usr2dev.concatenate(scale);
RC.setTransform(usr2dev);
return RC;
}
示例4: mapRenderContext
import java.awt.image.renderable.RenderContext; //导入方法依赖的package包/类
/**
* Maps the output RenderContext into the RenderContext for the ith
* source.
* This method satisfies the implementation of CRIF.
*
* @param i The index of the source image.
* @param renderContext The renderContext being applied to the operation.
* @param paramBlock The ParameterBlock containing the sources
* and the translation factors.
* @param image The RenderableImageOp from which this method
* was called.
*/
public RenderContext mapRenderContext(int i,
RenderContext renderContext,
ParameterBlock paramBlock,
RenderableImage image) {
float scale_x = paramBlock.getFloatParameter(0);
float scale_y = paramBlock.getFloatParameter(1);
float trans_x = paramBlock.getFloatParameter(2);
float trans_y = paramBlock.getFloatParameter(3);
AffineTransform scale = new AffineTransform(scale_x, 0.0, 0.0, scale_y,
trans_x, trans_y);
RenderContext RC = (RenderContext)renderContext.clone();
AffineTransform usr2dev = RC.getTransform();
usr2dev.concatenate(scale);
RC.setTransform(usr2dev);
return RC;
}
示例5: mapRenderContext
import java.awt.image.renderable.RenderContext; //导入方法依赖的package包/类
/**
* Maps the output RenderContext into the RenderContext for the ith
* source.
* This method satisfies the implementation of CRIF.
*
* @param i The index of the source image.
* @param renderContext The renderContext being applied to the operation.
* @param paramBlock The ParameterBlock containing the sources
* and the translation factors.
* @param image The RenderableImageOp from which this method
* was called.
*/
public RenderContext mapRenderContext(int i,
RenderContext renderContext,
ParameterBlock paramBlock,
RenderableImage image) {
float scale_x = paramBlock.getFloatParameter(0);
float scale_y = paramBlock.getFloatParameter(1);
AffineTransform scale = new AffineTransform(scale_x, 0.0, 0.0, scale_y,
0.0, 0.0);
RenderContext RC = (RenderContext)renderContext.clone();
AffineTransform usr2dev = RC.getTransform();
usr2dev.concatenate(scale);
RC.setTransform(usr2dev);
return RC;
}
示例6: createRendering
import java.awt.image.renderable.RenderContext; //导入方法依赖的package包/类
public RenderedImage createRendering(RenderContext rc) {
// Source gets my usr2dev transform
AffineTransform at = rc.getTransform();
// Just copy over the rendering hints.
RenderingHints rh = rc.getRenderingHints();
if (rh == null) rh = new RenderingHints(null);
// if we didn't have an aoi specify our bounds as the aoi.
Shape aoi = rc.getAreaOfInterest();
if (aoi == null) {
aoi = getBounds2D();
}
rh.put(RenderingHintsKeyExt.KEY_COLORSPACE,
ColorSpaceHintKey.VALUE_COLORSPACE_ALPHA_CONVERT);
RenderedImage ri;
ri = getSource().createRendering(new RenderContext(at, aoi, rh));
if (ri == null)
return null;
CachableRed cr = RenderedImageCachableRed.wrap(ri);
Object val = cr.getProperty(ColorSpaceHintKey.PROPERTY_COLORSPACE);
if (val == ColorSpaceHintKey.VALUE_COLORSPACE_ALPHA_CONVERT)
return cr;
return new FilterAsAlphaRed(cr);
}
示例7: createRendering
import java.awt.image.renderable.RenderContext; //导入方法依赖的package包/类
public RenderedImage createRendering(RenderContext rc) {
// Source gets my usr2dev transform
AffineTransform at = rc.getTransform();
// Just copy over the rendering hints.
RenderingHints rh = rc.getRenderingHints();
if (rh == null) rh = new RenderingHints(null);
// if we didn't have an aoi specify our bounds as the aoi.
Shape aoi = rc.getAreaOfInterest();
if (aoi == null)
aoi = getBounds2D();
// We only want it's alpha channel...
rh.put(RenderingHintsKeyExt.KEY_COLORSPACE,
ColorSpaceHintKey.VALUE_COLORSPACE_ALPHA);
RenderedImage ri;
ri = getSource().createRendering(new RenderContext(at, aoi, rh));
if(ri == null){
return null;
}
CachableRed cr = RenderedImageCachableRed.wrap(ri);
Object val = cr.getProperty(ColorSpaceHintKey.PROPERTY_COLORSPACE);
if (val == ColorSpaceHintKey.VALUE_COLORSPACE_ALPHA)
return cr; // It listened to us...
return new FilterAlphaRed(cr);
}
示例8: mapRenderContext
import java.awt.image.renderable.RenderContext; //导入方法依赖的package包/类
/**
* Maps the output RenderContext into the RenderContext for the ith
* source.
* This method satisfies the implementation of CRIF.
*
* @param i The index of the source image.
* @param renderContext The renderContext being applied to the operation.
* @param paramBlock The ParameterBlock containing the sources
* and the translation factors.
* @param image The RenderableImageOp from which this method
* was called.
*/
public RenderContext mapRenderContext(int i,
RenderContext renderContext,
ParameterBlock paramBlock,
RenderableImage image) {
Object arg0 = paramBlock.getObjectParameter(0);
AffineTransform affine = (AffineTransform)arg0;
RenderContext RC = (RenderContext)renderContext.clone();
AffineTransform usr2dev = RC.getTransform();
usr2dev.concatenate(affine);
RC.setTransform(usr2dev);
return RC;
}
示例9: mapRenderContext
import java.awt.image.renderable.RenderContext; //导入方法依赖的package包/类
/**
* Maps the output RenderContext into the RenderContext for the ith
* source.
* This method satisfies the implementation of CRIF.
*
* @param i The index of the source image.
* @param renderContext The renderContext being applied to the operation.
* @param paramBlock The ParameterBlock containing the sources
* and the translation factors.
* @param image The RenderableImageOp from which this method
* was called.
*/
public RenderContext mapRenderContext(int i,
RenderContext renderContext,
ParameterBlock paramBlock,
RenderableImage image) {
AffineTransform translate = new AffineTransform();
translate.setToTranslation(paramBlock.getFloatParameter(0),
paramBlock.getFloatParameter(1));
RenderContext RC = (RenderContext)renderContext.clone();
AffineTransform usr2dev = RC.getTransform();
usr2dev.concatenate(translate);
RC.setTransform(usr2dev);
return RC;
}
示例10: create
import java.awt.image.renderable.RenderContext; //导入方法依赖的package包/类
/**
* Creates a <Code>RenderedImage</Code> from the renderable layer.
*
* @param renderContext The rendering information associated with
* this rendering.
* @param paramBlock The parameters used to create the image.
* @return A <code>RenderedImage</code>.
*/
public RenderedImage create(RenderContext renderContext,
ParameterBlock paramBlock) {
// Get the destination bounds in rendering-independent coordinates.
Rectangle2D dstRect2D = getBounds2D(paramBlock);
// Map the destination bounds to rendered coordinates. This method
// will cause extra data to be present if there is any rotation or
// shear.
AffineTransform tf = renderContext.getTransform();
Rectangle2D rect = tf.createTransformedShape(dstRect2D).getBounds2D();
// Make sure that the rendered rectangle is non-empty.
if(rect.getWidth() < 1.0 || rect.getHeight() < 1.0) {
double w = Math.max(rect.getWidth(), 1.0);
double h = Math.max(rect.getHeight(), 1.0);
rect.setRect(rect.getMinX(), rect.getMinY(), w, h);
}
// Initialize the rendered ParameterBlock.
ParameterBlock pb = new ParameterBlock();
pb.addSource(paramBlock.getRenderedSource(0));
pb.set((float)rect.getMinX(), 0);
pb.set((float)rect.getMinY(), 1);
pb.set((float)rect.getWidth(), 2);
pb.set((float)rect.getHeight(), 3);
// Crop the rendered source.
return JAI.create("crop", pb, renderContext.getRenderingHints());
}
示例11: createRendering
import java.awt.image.renderable.RenderContext; //导入方法依赖的package包/类
public RenderedImage createRendering(RenderContext rc){
Rectangle2D aoiRect;
Shape aoi = rc.getAreaOfInterest();
if(aoi == null){
aoiRect = getBounds2D();
} else {
Rectangle2D rect = getBounds2D();
aoiRect = aoi.getBounds2D();
if ( ! aoiRect.intersects(rect) )
return null;
Rectangle2D.intersect(aoiRect, rect, aoiRect);
}
AffineTransform usr2dev = rc.getTransform();
// Compute size of raster image in device space.
// System.out.println("Turbulence aoi : " + aoi);
// System.out.println("Scale X : " + usr2dev.getScaleX() + " scaleY : " + usr2dev.getScaleY());
// System.out.println("Turbulence aoi dev : " + usr2dev.createTransformedShape(aoi).getBounds());
final Rectangle devRect
= usr2dev.createTransformedShape(aoiRect).getBounds();
if ((devRect.width <= 0) ||
(devRect.height <= 0))
return null;
ColorSpace cs = getOperationColorSpace();
Rectangle2D tile = null;
if (stitched)
tile = (Rectangle2D)region.clone();
AffineTransform patternTxf = new AffineTransform();
try{
patternTxf = usr2dev.createInverse();
}catch(NoninvertibleTransformException e){
}
return new TurbulencePatternRed
(baseFreqX, baseFreqY, numOctaves, seed, fractalNoise,
tile, patternTxf, devRect, cs, true);
}
示例12: createRendering
import java.awt.image.renderable.RenderContext; //导入方法依赖的package包/类
public RenderedImage createRendering(RenderContext rc) {
RenderingHints rh = rc.getRenderingHints();
if (rh == null) rh = new RenderingHints(null);
Filter src = getSource();
Shape aoi = rc.getAreaOfInterest();
if(aoi == null){
aoi = getBounds2D();
}
AffineTransform usr2dev = rc.getTransform();
// We only depend on our source for stuff that is inside
// our bounds and his bounds (remember our bounds may be
// tighter than his in one or both directions).
Rectangle2D srect = src.getBounds2D();
Rectangle2D rect = getBounds2D();
Rectangle2D arect = aoi.getBounds2D();
// System.out.println("Rects Src:" + srect +
// "My: " + rect +
// "AOI: " + arect);
if ( ! arect.intersects(rect) )
return null;
Rectangle2D.intersect(arect, rect, arect);
RenderedImage ri = null;
if ( arect.intersects(srect) ) {
srect = (Rectangle2D)srect.clone();
Rectangle2D.intersect(srect, arect, srect);
RenderContext srcRC = new RenderContext(usr2dev, srect, rh);
ri = src.createRendering(srcRC);
// System.out.println("Pad filt: " + src + " R: " +
// src.getBounds2D());
}
// No source image so create a 1,1 transparent one...
if (ri == null)
ri = new BufferedImage(1, 1, BufferedImage.TYPE_INT_ARGB);
// org.apache.batik.test.gvt.ImageDisplay.showImage("Paded: ", ri);
// System.out.println("RI: " + ri + " R: " + srect);
CachableRed cr = GraphicsUtil.wrap(ri);
arect = usr2dev.createTransformedShape(arect).getBounds2D();
// System.out.println("Pad rect : " + arect);
// Use arect (my bounds intersect area of interest)
cr = new PadRed(cr, arect.getBounds(), padMode, rh);
return cr;
}
示例13: createRendering
import java.awt.image.renderable.RenderContext; //导入方法依赖的package包/类
public RenderedImage createRendering(RenderContext rc) {
AffineTransform usr2dev = rc.getTransform();
// Just copy over the rendering hints.
RenderingHints rh = rc.getRenderingHints();
if (rh == null) rh = new RenderingHints(null);
Shape aoi = rc.getAreaOfInterest();
if (aoi == null) aoi = getBounds2D();
Rectangle2D rect = getBounds2D();
Rectangle2D clipRect = clipPath.getBounds2D();
Rectangle2D aoiRect = aoi.getBounds2D();
if ( ! rect.intersects(clipRect) )
return null;
Rectangle2D.intersect(rect, clipRect, rect);
if ( ! rect.intersects(aoiRect) )
return null;
Rectangle2D.intersect(rect, aoi.getBounds2D(), rect);
Rectangle devR = usr2dev.createTransformedShape(rect).getBounds();
if ((devR.width == 0) || (devR.height == 0))
return null;
BufferedImage bi = new BufferedImage(devR.width, devR.height,
BufferedImage.TYPE_BYTE_GRAY);
Shape devShape = usr2dev.createTransformedShape(getClipPath());
Rectangle devAOIR;
devAOIR = usr2dev.createTransformedShape(aoi).getBounds();
Graphics2D g2d = GraphicsUtil.createGraphics(bi, rh);
if (false) {
java.util.Set s = rh.keySet();
java.util.Iterator i = s.iterator();
while (i.hasNext()) {
Object o = i.next();
System.out.println("XXX: " + o + " -> " + rh.get(o));
}
}
g2d.translate(-devR.x, -devR.y);
g2d.setPaint(Color.white);
g2d.fill(devShape);
g2d.dispose();
RenderedImage ri;
ri = getSource().createRendering(new RenderContext(usr2dev, rect, rh));
CachableRed cr, clipCr;
cr = RenderedImageCachableRed.wrap(ri);
clipCr = new BufferedImageCachableRed(bi, devR.x, devR.y);
CachableRed ret = new MultiplyAlphaRed(cr, clipCr);
// Pad back out to the proper size...
ret = new PadRed(ret, devAOIR, PadMode.ZERO_PAD, rh);
return ret;
}
示例14: createRendering
import java.awt.image.renderable.RenderContext; //导入方法依赖的package包/类
/**
* Create a RenderedImage that is filled with the current
* flood fill paint
* @param rc The current render context
* @return A RenderedImage with the flood fill
*/
public RenderedImage createRendering(RenderContext rc) {
// Get user space to device space transform
AffineTransform usr2dev = rc.getTransform();
if (usr2dev == null) {
usr2dev = new AffineTransform();
}
Rectangle2D imageRect = getBounds2D();
// Now, take area of interest into account. It is
// defined in user space.
Rectangle2D userAOI;
Shape aoi = rc.getAreaOfInterest();
if (aoi == null) {
aoi = imageRect;
userAOI = imageRect;
} else {
userAOI = aoi.getBounds2D();
// No intersection with the area of interest so return null..
if ( ! imageRect.intersects(userAOI) )
return null;
// intersect the filter area and the AOI in user space
Rectangle2D.intersect(imageRect, userAOI, userAOI);
}
// The rendered area is the interesection of the
// user space renderable area and the user space AOI bounds
final Rectangle renderedArea
= usr2dev.createTransformedShape(userAOI).getBounds();
if ((renderedArea.width <= 0) || (renderedArea.height <= 0)) {
// If there is no intersection, return null
return null;
}
CachableRed cr;
cr = new FloodRed(renderedArea, getFloodPaint());
// We use a pad because while FloodRed will advertise it's
// bounds based on renderedArea it will actually provide the
// flood data anywhere.
cr = new PadRed(cr, renderedArea, PadMode.ZERO_PAD, null);
return cr;
}
示例15: createRendering
import java.awt.image.renderable.RenderContext; //导入方法依赖的package包/类
/**
*
*/
public RenderedImage createRendering(RenderContext renderContext) {
// Get user space to device space transform
AffineTransform usr2dev = renderContext.getTransform();
if(usr2dev == null){
usr2dev = new AffineTransform();
}
RenderingHints hints = renderContext.getRenderingHints();
// As per specification, a value of zero for the
// x-axis or y-axis causes the filter to produce
// nothing.
// The processing is done as follows:
// + if the x resolution is zero, this is a no-op
// else compute the x scale.
// + if the y resolution is zero, this is a no-op
// else compute the y resolution from the x scale
// and compute the corresponding y scale.
// + if the y or x scale is less than one, insert
// an AffineRable.
// Else, return the source as is.
int filterResolutionX = getFilterResolutionX();
int filterResolutionY = getFilterResolutionY();
// System.out.println("FilterResRable: " + filterResolutionX + "x" +
// filterResolutionY);
if ((filterResolutionX <= 0) || (filterResolutionY == 0))
return null;
// Find out the renderable area
Rectangle2D imageRect = getBounds2D();
Rectangle devRect;
devRect = usr2dev.createTransformedShape(imageRect).getBounds();
// Now, compare the devRect with the filter
// resolution hints
float scaleX = 1;
if(filterResolutionX < devRect.width)
scaleX = filterResolutionX / (float)devRect.width;
float scaleY = 1;
if(filterResolutionY < 0)
scaleY = scaleX;
else if(filterResolutionY < devRect.height)
scaleY = filterResolutionY / (float)devRect.height;
// Only resample if either scaleX or scaleY is
// smaller than 1
if ((scaleX >= 1) && (scaleY >= 1))
return getSource().createRendering(renderContext);
// System.out.println("Using Fixed Resolution...");
// Using fixed resolution image since we need an image larger
// than this.
RenderedImage resRed = getResRed(hints);
float resScale = getResScale();
AffineTransform residualAT;
residualAT = new AffineTransform(usr2dev.getScaleX()/resScale,
usr2dev.getShearY()/resScale,
usr2dev.getShearX()/resScale,
usr2dev.getScaleY()/resScale,
usr2dev.getTranslateX(),
usr2dev.getTranslateY());
// org.ImageDisplay.showImage("AT: " + newUsr2Dev, result);
return new AffineRed(GraphicsUtil.wrap(resRed), residualAT, hints);
}