本文整理汇总了C++中AffineTransform::inverse方法的典型用法代码示例。如果您正苦于以下问题:C++ AffineTransform::inverse方法的具体用法?C++ AffineTransform::inverse怎么用?C++ AffineTransform::inverse使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类AffineTransform
的用法示例。
在下文中一共展示了AffineTransform::inverse方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: setupNonScalingStrokeContext
static bool setupNonScalingStrokeContext(AffineTransform& strokeTransform, GraphicsContextStateSaver& stateSaver)
{
if (!strokeTransform.isInvertible())
return false;
stateSaver.save();
stateSaver.context()->concatCTM(strokeTransform.inverse());
return true;
}
示例2: transformToUserSpaceAndCheckClipping
bool SVGLayoutSupport::transformToUserSpaceAndCheckClipping(
const LayoutObject& object,
const AffineTransform& localTransform,
const FloatPoint& pointInParent,
FloatPoint& localPoint) {
if (!localTransform.isInvertible())
return false;
localPoint = localTransform.inverse().mapPoint(pointInParent);
return pointInClippingArea(object, localPoint);
}
示例3: setupNonScalingStrokeContext
bool RenderSVGShape::setupNonScalingStrokeContext(AffineTransform& strokeTransform, GraphicsContextStateSaver& stateSaver)
{
Optional<AffineTransform> inverse = strokeTransform.inverse();
if (!inverse)
return false;
stateSaver.save();
stateSaver.context()->concatCTM(inverse.value());
return true;
}
示例4: paintFilteredContent
static void paintFilteredContent(const LayoutObject& object, GraphicsContext& context, FilterData* filterData)
{
ASSERT(filterData->m_state == FilterData::ReadyToPaint);
ASSERT(filterData->filter->sourceGraphic());
filterData->m_state = FilterData::PaintingFilter;
SkiaImageFilterBuilder builder;
RefPtr<SkImageFilter> imageFilter = builder.build(filterData->filter->lastEffect(), ColorSpaceDeviceRGB);
FloatRect boundaries = filterData->filter->filterRegion();
context.save();
// Clip drawing of filtered image to the minimum required paint rect.
FilterEffect* lastEffect = filterData->filter->lastEffect();
context.clipRect(lastEffect->determineAbsolutePaintRect(lastEffect->maxEffectRect()));
#ifdef CHECK_CTM_FOR_TRANSFORMED_IMAGEFILTER
// TODO: Remove this workaround once skew/rotation support is added in Skia
// (https://code.google.com/p/skia/issues/detail?id=3288, crbug.com/446935).
// If the CTM contains rotation or shearing, apply the filter to
// the unsheared/unrotated matrix, and do the shearing/rotation
// as a final pass.
AffineTransform ctm = SVGLayoutSupport::deprecatedCalculateTransformToLayer(&object);
if (ctm.b() || ctm.c()) {
AffineTransform scaleAndTranslate;
scaleAndTranslate.translate(ctm.e(), ctm.f());
scaleAndTranslate.scale(ctm.xScale(), ctm.yScale());
ASSERT(scaleAndTranslate.isInvertible());
AffineTransform shearAndRotate = scaleAndTranslate.inverse();
shearAndRotate.multiply(ctm);
context.concatCTM(shearAndRotate.inverse());
imageFilter = builder.buildTransform(shearAndRotate, imageFilter.get());
}
#endif
context.beginLayer(1, SkXfermode::kSrcOver_Mode, &boundaries, ColorFilterNone, imageFilter.get());
context.endLayer();
context.restore();
filterData->m_state = FilterData::ReadyToPaint;
}
示例5: point
bool CanvasRenderingContext2D::isPointInPath(const float x, const float y)
{
GraphicsContext* c = drawingContext();
if (!c)
return false;
if (!state().m_invertibleCTM)
return false;
FloatPoint point(x, y);
AffineTransform ctm = state().m_transform;
FloatPoint transformedPoint = ctm.inverse().mapPoint(point);
return m_path.contains(transformedPoint);
}
示例6: drawDeferredFilter
static void drawDeferredFilter(GraphicsContext* context, FilterData* filterData, SVGFilterElement* filterElement)
{
SkiaImageFilterBuilder builder(context);
SourceGraphic* sourceGraphic = static_cast<SourceGraphic*>(filterData->builder->getEffectById(SourceGraphic::effectName()));
ASSERT(sourceGraphic);
builder.setSourceGraphic(sourceGraphic);
RefPtr<ImageFilter> imageFilter = builder.build(filterData->builder->lastEffect(), ColorSpaceDeviceRGB);
FloatRect boundaries = filterData->boundaries;
context->save();
// Clip drawing of filtered image to the minimum required paint rect.
FilterEffect* lastEffect = filterData->builder->lastEffect();
context->clipRect(lastEffect->determineAbsolutePaintRect(lastEffect->maxEffectRect()));
if (filterElement->hasAttribute(SVGNames::filterResAttr)) {
// Get boundaries in device coords.
// FIXME: See crbug.com/382491. Is the use of getCTM OK here, given it does not include device
// zoom or High DPI adjustments?
FloatSize size = context->getCTM().mapSize(boundaries.size());
// Compute the scale amount required so that the resulting offscreen is exactly filterResX by filterResY pixels.
float filterResScaleX = filterElement->filterResX()->currentValue()->value() / size.width();
float filterResScaleY = filterElement->filterResY()->currentValue()->value() / size.height();
// Scale the CTM so the primitive is drawn to filterRes.
context->scale(filterResScaleX, filterResScaleY);
// Create a resize filter with the inverse scale.
AffineTransform resizeMatrix;
resizeMatrix.scale(1 / filterResScaleX, 1 / filterResScaleY);
imageFilter = builder.buildTransform(resizeMatrix, imageFilter.get());
}
// See crbug.com/382491.
if (!RuntimeEnabledFeatures::slimmingPaintEnabled()) {
// If the CTM contains rotation or shearing, apply the filter to
// the unsheared/unrotated matrix, and do the shearing/rotation
// as a final pass.
AffineTransform ctm = context->getCTM();
if (ctm.b() || ctm.c()) {
AffineTransform scaleAndTranslate;
scaleAndTranslate.translate(ctm.e(), ctm.f());
scaleAndTranslate.scale(ctm.xScale(), ctm.yScale());
ASSERT(scaleAndTranslate.isInvertible());
AffineTransform shearAndRotate = scaleAndTranslate.inverse();
shearAndRotate.multiply(ctm);
context->setCTM(scaleAndTranslate);
imageFilter = builder.buildTransform(shearAndRotate, imageFilter.get());
}
}
context->beginLayer(1, CompositeSourceOver, &boundaries, ColorFilterNone, imageFilter.get());
context->endLayer();
context->restore();
}
示例7: point
bool CanvasRenderingContext2D::isPointInPath(const float x, const float y)
{
GraphicsContext* c = drawingContext();
if (!c)
return false;
FloatPoint point(x, y);
// We have to invert the current transform to ensure we correctly handle the
// transforms applied to the current path.
AffineTransform ctm = state().m_transform;
if (!ctm.isInvertible())
return false;
FloatPoint transformedPoint = ctm.inverse().mapPoint(point);
return m_path.contains(transformedPoint);
}
示例8: nodeAtPoint
bool RenderSVGText::nodeAtPoint(const HitTestRequest& request, HitTestResult& result, int _x, int _y, int _tx, int _ty, HitTestAction hitTestAction)
{
PointerEventsHitRules hitRules(PointerEventsHitRules::SVG_TEXT_HITTESTING, style()->svgStyle()->pointerEvents());
bool isVisible = (style()->visibility() == VISIBLE);
if (isVisible || !hitRules.requireVisible) {
if ((hitRules.canHitStroke && (style()->svgStyle()->hasStroke() || !hitRules.requireStroke))
|| (hitRules.canHitFill && (style()->svgStyle()->hasFill() || !hitRules.requireFill))) {
AffineTransform totalTransform = absoluteTransform();
double localX, localY;
totalTransform.inverse().map(_x, _y, &localX, &localY);
FloatPoint hitPoint(_x, _y);
return RenderBlock::nodeAtPoint(request, result, (int)localX, (int)localY, _tx, _ty, hitTestAction);
}
}
return false;
}
示例9: clipToImageBuffer
void SVGRenderingContext::clipToImageBuffer(GraphicsContext& context, const AffineTransform& absoluteTransform, const FloatRect& targetRect, std::unique_ptr<ImageBuffer>& imageBuffer, bool safeToClear)
{
if (!imageBuffer)
return;
FloatRect absoluteTargetRect = calculateImageBufferRect(targetRect, absoluteTransform);
// The mask image has been created in the absolute coordinate space, as the image should not be scaled.
// So the actual masking process has to be done in the absolute coordinate space as well.
context.concatCTM(absoluteTransform.inverse().valueOr(AffineTransform()));
context.clipToImageBuffer(*imageBuffer, absoluteTargetRect);
context.concatCTM(absoluteTransform);
// When nesting resources, with objectBoundingBox as content unit types, there's no use in caching the
// resulting image buffer as the parent resource already caches the result.
if (safeToClear && !currentContentTransformation().isIdentity())
imageBuffer.reset();
}
示例10: addEllipse
void Path::addEllipse(const FloatPoint& p, float radiusX, float radiusY, float rotation, float startAngle, float endAngle, bool anticlockwise)
{
ASSERT(ellipseIsRenderable(startAngle, endAngle));
ASSERT(startAngle >= 0 && startAngle < twoPiFloat);
ASSERT((anticlockwise && (startAngle - endAngle) >= 0) || (!anticlockwise && (endAngle - startAngle) >= 0));
if (!rotation) {
addEllipse(FloatPoint(p.x(), p.y()), radiusX, radiusY, startAngle, endAngle, anticlockwise);
return;
}
// Add an arc after the relevant transform.
AffineTransform ellipseTransform = AffineTransform::translation(p.x(), p.y()).rotateRadians(rotation);
ASSERT(ellipseTransform.isInvertible());
AffineTransform inverseEllipseTransform = ellipseTransform.inverse();
transform(inverseEllipseTransform);
addEllipse(FloatPoint::zero(), radiusX, radiusY, startAngle, endAngle, anticlockwise);
transform(ellipseTransform);
}
示例11: drawingContext
void CanvasRenderingContext2D::setTransform(float m11, float m12, float m21, float m22, float dx, float dy)
{
GraphicsContext* c = drawingContext();
if (!c)
return;
if (!isfinite(m11) | !isfinite(m21) | !isfinite(dx) |
!isfinite(m12) | !isfinite(m22) | !isfinite(dy))
return;
AffineTransform ctm = state().m_transform;
if (!ctm.isInvertible())
return;
c->concatCTM(c->getCTM().inverse());
c->concatCTM(canvas()->baseTransform());
state().m_transform.multiply(ctm.inverse());
m_path.transform(ctm);
state().m_invertibleCTM = true;
transform(m11, m12, m21, m22, dx, dy);
}
示例12: nodeAtFloatPoint
bool RenderSVGForeignObject::nodeAtFloatPoint(const HitTestRequest& request, HitTestResult& result, const FloatPoint& pointInParent, HitTestAction hitTestAction)
{
// Embedded content is drawn in the foreground phase.
if (hitTestAction != HitTestForeground)
return false;
AffineTransform localTransform = this->localTransform();
if (!localTransform.isInvertible())
return false;
FloatPoint localPoint = localTransform.inverse().mapPoint(pointInParent);
// Early exit if local point is not contained in clipped viewport area
if (SVGRenderSupport::isOverflowHidden(this) && !m_viewport.contains(localPoint))
return false;
// FOs establish a stacking context, so we need to hit-test all layers.
HitTestLocation hitTestLocation(roundedLayoutPoint(localPoint));
return RenderBlock::nodeAtPoint(request, result, hitTestLocation, LayoutPoint(), HitTestForeground)
|| RenderBlock::nodeAtPoint(request, result, hitTestLocation, LayoutPoint(), HitTestFloat)
|| RenderBlock::nodeAtPoint(request, result, hitTestLocation, LayoutPoint(), HitTestChildBlockBackgrounds);
}
示例13: prepareFilterEffect
bool FilterEffectRendererHelper::prepareFilterEffect(RenderLayer* renderLayer, const LayoutRect& filterBoxRect, const LayoutRect& dirtyRect, const LayoutRect& layerRepaintRect)
{
ASSERT(m_haveFilterEffect && renderLayer->filterRenderer());
m_renderLayer = renderLayer;
m_repaintRect = dirtyRect;
FilterEffectRenderer* filter = renderLayer->filterRenderer();
LayoutRect filterSourceRect = filter->computeSourceImageRectForDirtyRect(filterBoxRect, dirtyRect);
if (filterSourceRect.isEmpty()) {
// The dirty rect is not in view, just bail out.
m_haveFilterEffect = false;
return false;
}
// Get the zoom factor to scale the filterSourceRect input
const RenderLayerModelObject* renderer = renderLayer->renderer();
const RenderStyle* style = renderer ? renderer->style() : 0;
float zoom = style ? style->effectiveZoom() : 1.0f;
AffineTransform absoluteTransform;
absoluteTransform.translate(filterBoxRect.x(), filterBoxRect.y());
filter->setAbsoluteTransform(absoluteTransform);
filter->setAbsoluteFilterRegion(AffineTransform().scale(zoom).mapRect(filterSourceRect));
filter->setFilterRegion(absoluteTransform.inverse().mapRect(filterSourceRect));
filter->lastEffect()->determineFilterPrimitiveSubregion();
bool hasUpdatedBackingStore = filter->updateBackingStoreRect(filterSourceRect);
if (filter->hasFilterThatMovesPixels()) {
if (hasUpdatedBackingStore)
m_repaintRect = filterSourceRect;
else {
m_repaintRect.unite(layerRepaintRect);
m_repaintRect.intersect(filterSourceRect);
}
}
return true;
}
示例14: calculateStrokeBoundingBox
FloatRect LayoutSVGShape::calculateStrokeBoundingBox() const
{
ASSERT(m_path);
FloatRect strokeBoundingBox = m_fillBoundingBox;
if (style()->svgStyle().hasStroke()) {
StrokeData strokeData;
SVGLayoutSupport::applyStrokeStyleToStrokeData(strokeData, styleRef(), *this);
if (hasNonScalingStroke()) {
AffineTransform nonScalingTransform = nonScalingStrokeTransform();
if (nonScalingTransform.isInvertible()) {
Path* usePath = nonScalingStrokePath(m_path.get(), nonScalingTransform);
FloatRect strokeBoundingRect = usePath->strokeBoundingRect(strokeData);
strokeBoundingRect = nonScalingTransform.inverse().mapRect(strokeBoundingRect);
strokeBoundingBox.unite(strokeBoundingRect);
}
} else {
strokeBoundingBox.unite(path().strokeBoundingRect(strokeData));
}
}
return strokeBoundingBox;
}
示例15:
SDFRigidTransform::SDFRigidTransform(const AffineTransform &t, const SDFNodePtr &n)
: _worldToLocal(t.inverse())
{
this->add(n);
}