当前位置: 首页>>代码示例>>C++>>正文


C++ AffineTransform::isInvertible方法代码示例

本文整理汇总了C++中AffineTransform::isInvertible方法的典型用法代码示例。如果您正苦于以下问题:C++ AffineTransform::isInvertible方法的具体用法?C++ AffineTransform::isInvertible怎么用?C++ AffineTransform::isInvertible使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在AffineTransform的用法示例。


在下文中一共展示了AffineTransform::isInvertible方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: transformToUserSpaceAndCheckClipping

bool SVGLayoutSupport::transformToUserSpaceAndCheckClipping(LayoutObject* object, const AffineTransform& localTransform, const FloatPoint& pointInParent, FloatPoint& localPoint)
{
    if (!localTransform.isInvertible())
        return false;
    localPoint = localTransform.inverse().mapPoint(pointInParent);
    return pointInClippingArea(object, localPoint);
}
开发者ID:kingysu,项目名称:blink-crosswalk,代码行数:7,代码来源:SVGLayoutSupport.cpp

示例2: nodeAtFloatPoint

bool LayoutSVGForeignObject::nodeAtFloatPoint(HitTestResult& result,
                                              const FloatPoint& pointInParent,
                                              HitTestAction hitTestAction) {
  // Embedded content is drawn in the foreground phase.
  if (hitTestAction != HitTestForeground)
    return false;

  AffineTransform localTransform = this->localSVGTransform();
  if (!localTransform.isInvertible())
    return false;

  FloatPoint localPoint = localTransform.inverse().mapPoint(pointInParent);

  // Early exit if local point is not contained in clipped viewport area
  if (SVGLayoutSupport::isOverflowHidden(this) &&
      !m_viewport.contains(localPoint))
    return false;

  // FOs establish a stacking context, so we need to hit-test all layers.
  HitTestLocation hitTestLocation(localPoint);
  return LayoutBlock::nodeAtPoint(result, hitTestLocation, LayoutPoint(),
                                  HitTestForeground) ||
         LayoutBlock::nodeAtPoint(result, hitTestLocation, LayoutPoint(),
                                  HitTestFloat) ||
         LayoutBlock::nodeAtPoint(result, hitTestLocation, LayoutPoint(),
                                  HitTestChildBlockBackgrounds);
}
开发者ID:mirror,项目名称:chromium,代码行数:27,代码来源:LayoutSVGForeignObject.cpp

示例3: paintContents

void OpaqueRectTrackingContentLayerDelegate::paintContents(SkCanvas* canvas, const WebRect& clip, bool canPaintLCDText, WebFloatRect& opaque)
{
    static const unsigned char* annotationsEnabled = 0;
    if (UNLIKELY(!annotationsEnabled))
        annotationsEnabled = EventTracer::getTraceCategoryEnabledFlag(TRACE_DISABLED_BY_DEFAULT("blink.graphics_context_annotations"));

    GraphicsContext context(canvas);
    context.setTrackOpaqueRegion(!m_opaque);
    context.setCertainlyOpaque(m_opaque);
    context.setShouldSmoothFonts(canPaintLCDText);

    if (*annotationsEnabled)
        context.setAnnotationMode(AnnotateAll);

    // Record transform prior to painting, as all opaque tracking will be
    // relative to this current value.
    AffineTransform canvasToContentTransform = context.getCTM().inverse();

    m_painter->paint(context, clip);

    // Transform tracked opaque paints back to our layer's content space.
    ASSERT(canvasToContentTransform.isInvertible());
    ASSERT(canvasToContentTransform.preservesAxisAlignment());
    opaque = canvasToContentTransform.mapRect(context.opaqueRegion().asRect());
}
开发者ID:Igalia,项目名称:blink,代码行数:25,代码来源:OpaqueRectTrackingContentLayerDelegate.cpp

示例4: hitTestClipContent

bool LayoutSVGResourceClipper::hitTestClipContent(const FloatRect& objectBoundingBox, const FloatPoint& nodeAtPoint)
{
    FloatPoint point = nodeAtPoint;
    if (!SVGLayoutSupport::pointInClippingArea(this, point))
        return false;

    if (clipPathUnits() == SVGUnitTypes::SVG_UNIT_TYPE_OBJECTBOUNDINGBOX) {
        AffineTransform transform;
        transform.translate(objectBoundingBox.x(), objectBoundingBox.y());
        transform.scaleNonUniform(objectBoundingBox.width(), objectBoundingBox.height());
        point = transform.inverse().mapPoint(point);
    }

    AffineTransform animatedLocalTransform = toSVGClipPathElement(element())->calculateAnimatedLocalTransform();
    if (!animatedLocalTransform.isInvertible())
        return false;

    point = animatedLocalTransform.inverse().mapPoint(point);

    for (SVGElement* childElement = Traversal<SVGElement>::firstChild(*element()); childElement; childElement = Traversal<SVGElement>::nextSibling(*childElement)) {
        LayoutObject* layoutObject = childElement->layoutObject();
        if (!layoutObject)
            continue;
        if (!layoutObject->isSVGShape() && !layoutObject->isSVGText() && !isSVGUseElement(*childElement))
            continue;
        IntPoint hitPoint;
        HitTestResult result(HitTestRequest::SVGClipContent, hitPoint);
        if (layoutObject->nodeAtFloatPoint(result, point, HitTestForeground))
            return true;
    }

    return false;
}
开发者ID:howardroark2018,项目名称:chromium,代码行数:33,代码来源:LayoutSVGResourceClipper.cpp

示例5: addEllipse

void Path::addEllipse(const FloatPoint& p,
                      float radiusX,
                      float radiusY,
                      float rotation,
                      float startAngle,
                      float endAngle,
                      bool anticlockwise) {
  ASSERT(ellipseIsRenderable(startAngle, endAngle));
  ASSERT(startAngle >= 0 && startAngle < twoPiFloat);
  ASSERT((anticlockwise && (startAngle - endAngle) >= 0) ||
         (!anticlockwise && (endAngle - startAngle) >= 0));

  if (!rotation) {
    addEllipse(FloatPoint(p.x(), p.y()), radiusX, radiusY, startAngle, endAngle,
               anticlockwise);
    return;
  }

  // Add an arc after the relevant transform.
  AffineTransform ellipseTransform =
      AffineTransform::translation(p.x(), p.y()).rotateRadians(rotation);
  ASSERT(ellipseTransform.isInvertible());
  AffineTransform inverseEllipseTransform = ellipseTransform.inverse();
  transform(inverseEllipseTransform);
  addEllipse(FloatPoint::zero(), radiusX, radiusY, startAngle, endAngle,
             anticlockwise);
  transform(ellipseTransform);
}
开发者ID:mirror,项目名称:chromium,代码行数:28,代码来源:Path.cpp

示例6: drawDeferredFilter

static void drawDeferredFilter(GraphicsContext* context, FilterData* filterData, SVGFilterElement* filterElement)
{
    SkiaImageFilterBuilder builder(context);
    SourceGraphic* sourceGraphic = static_cast<SourceGraphic*>(filterData->builder->getEffectById(SourceGraphic::effectName()));
    ASSERT(sourceGraphic);
    builder.setSourceGraphic(sourceGraphic);
    RefPtr<ImageFilter> imageFilter = builder.build(filterData->builder->lastEffect(), ColorSpaceDeviceRGB);
    FloatRect boundaries = filterData->boundaries;
    context->save();

    FloatSize deviceSize = context->getCTM().mapSize(boundaries.size());
    float scaledArea = deviceSize.width() * deviceSize.height();

    // If area of scaled size is bigger than the upper limit, adjust the scale
    // to fit. Note that this only really matters in the non-impl-side painting
    // case, since the impl-side case never allocates a full-sized backing
    // store, only tile-sized.
    // FIXME: remove this once all platforms are using impl-side painting.
    // crbug.com/169282.
    if (scaledArea > FilterEffect::maxFilterArea()) {
        float scale = sqrtf(FilterEffect::maxFilterArea() / scaledArea);
        context->scale(scale, scale);
    }
    // Clip drawing of filtered image to the minimum required paint rect.
    FilterEffect* lastEffect = filterData->builder->lastEffect();
    context->clipRect(lastEffect->determineAbsolutePaintRect(lastEffect->maxEffectRect()));
    if (filterElement->hasAttribute(SVGNames::filterResAttr)) {
        // Get boundaries in device coords.
        // FIXME: See crbug.com/382491. Is the use of getCTM OK here, given it does not include device
        // zoom or High DPI adjustments?
        FloatSize size = context->getCTM().mapSize(boundaries.size());
        // Compute the scale amount required so that the resulting offscreen is exactly filterResX by filterResY pixels.
        float filterResScaleX = filterElement->filterResX()->currentValue()->value() / size.width();
        float filterResScaleY = filterElement->filterResY()->currentValue()->value() / size.height();
        // Scale the CTM so the primitive is drawn to filterRes.
        context->scale(filterResScaleX, filterResScaleY);
        // Create a resize filter with the inverse scale.
        AffineTransform resizeMatrix;
        resizeMatrix.scale(1 / filterResScaleX, 1 / filterResScaleY);
        imageFilter = builder.buildTransform(resizeMatrix, imageFilter.get());
    }
    // If the CTM contains rotation or shearing, apply the filter to
    // the unsheared/unrotated matrix, and do the shearing/rotation
    // as a final pass.
    AffineTransform ctm = context->getCTM();
    if (ctm.b() || ctm.c()) {
        AffineTransform scaleAndTranslate;
        scaleAndTranslate.translate(ctm.e(), ctm.f());
        scaleAndTranslate.scale(ctm.xScale(), ctm.yScale());
        ASSERT(scaleAndTranslate.isInvertible());
        AffineTransform shearAndRotate = scaleAndTranslate.inverse();
        shearAndRotate.multiply(ctm);
        context->setCTM(scaleAndTranslate);
        imageFilter = builder.buildTransform(shearAndRotate, imageFilter.get());
    }
    context->beginLayer(1, CompositeSourceOver, &boundaries, ColorFilterNone, imageFilter.get());
    context->endLayer();
    context->restore();
}
开发者ID:335969568,项目名称:Blink-1,代码行数:59,代码来源:RenderSVGResourceFilter.cpp

示例7: setupNonScalingStrokeContext

static bool setupNonScalingStrokeContext(AffineTransform& strokeTransform, GraphicsContextStateSaver& stateSaver)
{
    if (!strokeTransform.isInvertible())
        return false;

    stateSaver.save();
    stateSaver.context().concatCTM(strokeTransform.inverse());
    return true;
}
开发者ID:astojilj,项目名称:chromium-crosswalk,代码行数:9,代码来源:SVGShapePainter.cpp

示例8: point

bool CanvasRenderingContext2D::isPointInPath(const float x, const float y)
{
    GraphicsContext* c = drawingContext();
    if (!c)
        return false;
    FloatPoint point(x, y);
    // We have to invert the current transform to ensure we correctly handle the
    // transforms applied to the current path.
    AffineTransform ctm = state().m_transform;
    if (!ctm.isInvertible())
        return false;
    FloatPoint transformedPoint = ctm.inverse().mapPoint(point);
    return m_path.contains(transformedPoint);
}
开发者ID:Gin-Rye,项目名称:duibrowser,代码行数:14,代码来源:CanvasRenderingContext2D.cpp

示例9: paintFilteredContent

static void paintFilteredContent(const LayoutObject& object, GraphicsContext& context, FilterData* filterData)
{
    ASSERT(filterData->m_state == FilterData::ReadyToPaint);
    ASSERT(filterData->filter->sourceGraphic());

    filterData->m_state = FilterData::PaintingFilter;

    SkiaImageFilterBuilder builder;
    RefPtr<SkImageFilter> imageFilter = builder.build(filterData->filter->lastEffect(), ColorSpaceDeviceRGB);
    FloatRect boundaries = filterData->filter->filterRegion();
    context.save();

    // Clip drawing of filtered image to the minimum required paint rect.
    FilterEffect* lastEffect = filterData->filter->lastEffect();
    context.clipRect(lastEffect->determineAbsolutePaintRect(lastEffect->maxEffectRect()));

#ifdef CHECK_CTM_FOR_TRANSFORMED_IMAGEFILTER
    // TODO: Remove this workaround once skew/rotation support is added in Skia
    // (https://code.google.com/p/skia/issues/detail?id=3288, crbug.com/446935).
    // If the CTM contains rotation or shearing, apply the filter to
    // the unsheared/unrotated matrix, and do the shearing/rotation
    // as a final pass.
    AffineTransform ctm = SVGLayoutSupport::deprecatedCalculateTransformToLayer(&object);
    if (ctm.b() || ctm.c()) {
        AffineTransform scaleAndTranslate;
        scaleAndTranslate.translate(ctm.e(), ctm.f());
        scaleAndTranslate.scale(ctm.xScale(), ctm.yScale());
        ASSERT(scaleAndTranslate.isInvertible());
        AffineTransform shearAndRotate = scaleAndTranslate.inverse();
        shearAndRotate.multiply(ctm);
        context.concatCTM(shearAndRotate.inverse());
        imageFilter = builder.buildTransform(shearAndRotate, imageFilter.get());
    }
#endif

    context.beginLayer(1, SkXfermode::kSrcOver_Mode, &boundaries, ColorFilterNone, imageFilter.get());
    context.endLayer();
    context.restore();

    filterData->m_state = FilterData::ReadyToPaint;
}
开发者ID:joone,项目名称:chromium-crosswalk,代码行数:41,代码来源:SVGFilterPainter.cpp

示例10: drawingContext

void CanvasRenderingContext2D::setTransform(float m11, float m12, float m21, float m22, float dx, float dy)
{
    GraphicsContext* c = drawingContext();
    if (!c)
        return;
    
    if (!isfinite(m11) | !isfinite(m21) | !isfinite(dx) | 
        !isfinite(m12) | !isfinite(m22) | !isfinite(dy))
        return;

    AffineTransform ctm = state().m_transform;
    if (!ctm.isInvertible())
        return;
    c->concatCTM(c->getCTM().inverse());
    c->concatCTM(canvas()->baseTransform());
    state().m_transform.multiply(ctm.inverse());
    m_path.transform(ctm);

    state().m_invertibleCTM = true;
    transform(m11, m12, m21, m22, dx, dy);
}
开发者ID:ShouqingZhang,项目名称:webkitdriver,代码行数:21,代码来源:CanvasRenderingContext2D.cpp

示例11: calculateStrokeBoundingBox

FloatRect LayoutSVGShape::calculateStrokeBoundingBox() const
{
    ASSERT(m_path);
    FloatRect strokeBoundingBox = m_fillBoundingBox;

    if (style()->svgStyle().hasStroke()) {
        StrokeData strokeData;
        SVGLayoutSupport::applyStrokeStyleToStrokeData(strokeData, styleRef(), *this);
        if (hasNonScalingStroke()) {
            AffineTransform nonScalingTransform = nonScalingStrokeTransform();
            if (nonScalingTransform.isInvertible()) {
                Path* usePath = nonScalingStrokePath(m_path.get(), nonScalingTransform);
                FloatRect strokeBoundingRect = usePath->strokeBoundingRect(strokeData);
                strokeBoundingRect = nonScalingTransform.inverse().mapRect(strokeBoundingRect);
                strokeBoundingBox.unite(strokeBoundingRect);
            }
        } else {
            strokeBoundingBox.unite(path().strokeBoundingRect(strokeData));
        }
    }

    return strokeBoundingBox;
}
开发者ID:shaoboyan,项目名称:chromium-crosswalk,代码行数:23,代码来源:LayoutSVGShape.cpp

示例12: transform

void CanvasRenderingContext2D::transform(float m11, float m12, float m21, float m22, float dx, float dy)
{
    GraphicsContext* c = drawingContext();
    if (!c)
        return;
    if (!state().m_invertibleCTM)
        return;

    if (!isfinite(m11) | !isfinite(m21) | !isfinite(dx) | 
        !isfinite(m12) | !isfinite(m22) | !isfinite(dy))
        return;

    AffineTransform transform(m11, m12, m21, m22, dx, dy);
    AffineTransform newTransform = transform * state().m_transform;
    if (!newTransform.isInvertible()) {
        state().m_invertibleCTM = false;
        return;
    }

    state().m_transform = newTransform;
    c->concatCTM(transform);
    m_path.transform(transform.inverse());
}
开发者ID:ShouqingZhang,项目名称:webkitdriver,代码行数:23,代码来源:CanvasRenderingContext2D.cpp

示例13: drawPattern

void Image::drawPattern(GraphicsContext* ctxt, const FloatRect& tileRect, const AffineTransform& patternTransform,
    const FloatPoint& phase, ColorSpace styleColorSpace, CompositeOperator op, const FloatRect& destRect, BlendMode blendMode)
{
    if (!nativeImageForCurrentFrame())
        return;

    if (!patternTransform.isInvertible())
        return;

    CGContextRef context = ctxt->platformContext();
    GraphicsContextStateSaver stateSaver(*ctxt);
    CGContextClipToRect(context, destRect);
    ctxt->setCompositeOperation(op, blendMode);
    CGContextTranslateCTM(context, destRect.x(), destRect.y() + destRect.height());
    CGContextScaleCTM(context, 1, -1);
    
    // Compute the scaled tile size.
    float scaledTileHeight = tileRect.height() * narrowPrecisionToFloat(patternTransform.d());
    
    // We have to adjust the phase to deal with the fact we're in Cartesian space now (with the bottom left corner of destRect being
    // the origin).
    float adjustedX = phase.x() - destRect.x() + tileRect.x() * narrowPrecisionToFloat(patternTransform.a()); // We translated the context so that destRect.x() is the origin, so subtract it out.
    float adjustedY = destRect.height() - (phase.y() - destRect.y() + tileRect.y() * narrowPrecisionToFloat(patternTransform.d()) + scaledTileHeight);

    CGImageRef tileImage = nativeImageForCurrentFrame();
    float h = CGImageGetHeight(tileImage);

    RetainPtr<CGImageRef> subImage;
    if (tileRect.size() == size())
        subImage = tileImage;
    else {
        // Copying a sub-image out of a partially-decoded image stops the decoding of the original image. It should never happen
        // because sub-images are only used for border-image, which only renders when the image is fully decoded.
        ASSERT(h == height());
        subImage = adoptCF(CGImageCreateWithImageInRect(tileImage, tileRect));
    }

    // Adjust the color space.
    subImage = Image::imageWithColorSpace(subImage.get(), styleColorSpace);

    // Leopard has an optimized call for the tiling of image patterns, but we can only use it if the image has been decoded enough that
    // its buffer is the same size as the overall image.  Because a partially decoded CGImageRef with a smaller width or height than the
    // overall image buffer needs to tile with "gaps", we can't use the optimized tiling call in that case.
    // FIXME: We cannot use CGContextDrawTiledImage with scaled tiles on Leopard, because it suffers from rounding errors.  Snow Leopard is ok.
    float scaledTileWidth = tileRect.width() * narrowPrecisionToFloat(patternTransform.a());
    float w = CGImageGetWidth(tileImage);
    if (w == size().width() && h == size().height() && !spaceSize().width() && !spaceSize().height())
        CGContextDrawTiledImage(context, FloatRect(adjustedX, adjustedY, scaledTileWidth, scaledTileHeight), subImage.get());
    else {
        // On Leopard and newer, this code now only runs for partially decoded images whose buffers do not yet match the overall size of the image.
        static const CGPatternCallbacks patternCallbacks = { 0, drawPatternCallback, patternReleaseCallback };
        CGAffineTransform matrix = CGAffineTransformMake(narrowPrecisionToCGFloat(patternTransform.a()), 0, 0, narrowPrecisionToCGFloat(patternTransform.d()), adjustedX, adjustedY);
        matrix = CGAffineTransformConcat(matrix, CGContextGetCTM(context));
        // The top of a partially-decoded image is drawn at the bottom of the tile. Map it to the top.
        matrix = CGAffineTransformTranslate(matrix, 0, size().height() - h);
#if PLATFORM(IOS)
        matrix = CGAffineTransformScale(matrix, 1, -1);
        matrix = CGAffineTransformTranslate(matrix, 0, -h);
#endif
        CGImageRef platformImage = CGImageRetain(subImage.get());
        RetainPtr<CGPatternRef> pattern = adoptCF(CGPatternCreate(platformImage, CGRectMake(0, 0, tileRect.width(), tileRect.height()), matrix,
            tileRect.width() + spaceSize().width() * (1 / narrowPrecisionToFloat(patternTransform.a())),
            tileRect.height() + spaceSize().height() * (1 / narrowPrecisionToFloat(patternTransform.d())),
            kCGPatternTilingConstantSpacing, true, &patternCallbacks));
        
        if (!pattern)
            return;

        RetainPtr<CGColorSpaceRef> patternSpace = adoptCF(CGColorSpaceCreatePattern(0));

        CGFloat alpha = 1;
        RetainPtr<CGColorRef> color = adoptCF(CGColorCreateWithPattern(patternSpace.get(), pattern.get(), &alpha));
        CGContextSetFillColorSpace(context, patternSpace.get());

        // FIXME: Really want a public API for this. It is just CGContextSetBaseCTM(context, CGAffineTransformIdentiy).
        wkSetBaseCTM(context, CGAffineTransformIdentity);
        CGContextSetPatternPhase(context, CGSizeZero);

        CGContextSetFillColorWithColor(context, color.get());
        CGContextFillRect(context, CGContextGetClipBoundingBox(context));
    }

    stateSaver.restore();

    if (imageObserver())
        imageObserver()->didDraw(this);
}
开发者ID:JefferyJeffery,项目名称:webkit,代码行数:87,代码来源:ImageCG.cpp

示例14:

void CanvasRenderingContext2DState::setTransform(const AffineTransform& transform)
{
    m_isTransformInvertible = transform.isInvertible();
    m_transform = transform;
}
开发者ID:mtucker6784,项目名称:chromium,代码行数:5,代码来源:CanvasRenderingContext2DState.cpp

示例15: applyResource

bool RenderSVGResourceFilter::applyResource(RenderElement& renderer, const RenderStyle&, GraphicsContext*& context, unsigned short resourceMode)
{
    ASSERT(context);
    ASSERT_UNUSED(resourceMode, resourceMode == ApplyToDefaultMode);

    if (m_filter.contains(&renderer)) {
        FilterData* filterData = m_filter.get(&renderer);
        if (filterData->state == FilterData::PaintingSource || filterData->state == FilterData::Applying)
            filterData->state = FilterData::CycleDetected;
        return false; // Already built, or we're in a cycle, or we're marked for removal. Regardless, just do nothing more now.
    }

    auto filterData = std::make_unique<FilterData>();
    FloatRect targetBoundingBox = renderer.objectBoundingBox();

    filterData->boundaries = SVGLengthContext::resolveRectangle<SVGFilterElement>(&filterElement(), filterElement().filterUnits(), targetBoundingBox);
    if (filterData->boundaries.isEmpty())
        return false;

    // Determine absolute transformation matrix for filter.
    AffineTransform absoluteTransform;
    SVGRenderingContext::calculateTransformationToOutermostCoordinateSystem(renderer, absoluteTransform);
    if (!absoluteTransform.isInvertible())
        return false;

    // Eliminate shear of the absolute transformation matrix, to be able to produce unsheared tile images for feTile.
    filterData->shearFreeAbsoluteTransform = AffineTransform(absoluteTransform.xScale(), 0, 0, absoluteTransform.yScale(), 0, 0);

    // Determine absolute boundaries of the filter and the drawing region.
    FloatRect absoluteFilterBoundaries = filterData->shearFreeAbsoluteTransform.mapRect(filterData->boundaries);
    filterData->drawingRegion = renderer.strokeBoundingBox();
    filterData->drawingRegion.intersect(filterData->boundaries);
    FloatRect absoluteDrawingRegion = filterData->shearFreeAbsoluteTransform.mapRect(filterData->drawingRegion);

    // Create the SVGFilter object.
    bool primitiveBoundingBoxMode = filterElement().primitiveUnits() == SVGUnitTypes::SVG_UNIT_TYPE_OBJECTBOUNDINGBOX;
    filterData->filter = SVGFilter::create(filterData->shearFreeAbsoluteTransform, absoluteDrawingRegion, targetBoundingBox, filterData->boundaries, primitiveBoundingBoxMode);

    // Create all relevant filter primitives.
    filterData->builder = buildPrimitives(filterData->filter.get());
    if (!filterData->builder)
        return false;

    // Calculate the scale factor for the use of filterRes.
    // Also see http://www.w3.org/TR/SVG/filters.html#FilterEffectsRegion
    FloatSize scale(1, 1);
    if (filterElement().hasAttribute(SVGNames::filterResAttr)) {
        scale.setWidth(filterElement().filterResX() / absoluteFilterBoundaries.width());
        scale.setHeight(filterElement().filterResY() / absoluteFilterBoundaries.height());
    }

    if (scale.isEmpty())
        return false;

    // Determine scale factor for filter. The size of intermediate ImageBuffers shouldn't be bigger than kMaxFilterSize.
    FloatRect tempSourceRect = absoluteDrawingRegion;
    tempSourceRect.scale(scale.width(), scale.height());
    fitsInMaximumImageSize(tempSourceRect.size(), scale);

    // Set the scale level in SVGFilter.
    filterData->filter->setFilterResolution(scale);

    static const unsigned maxTotalOfEffectInputs = 100;
    FilterEffect* lastEffect = filterData->builder->lastEffect();
    if (!lastEffect || lastEffect->totalNumberOfEffectInputs() > maxTotalOfEffectInputs)
        return false;

    RenderSVGResourceFilterPrimitive::determineFilterPrimitiveSubregion(*lastEffect);
    FloatRect subRegion = lastEffect->maxEffectRect();
    // At least one FilterEffect has a too big image size,
    // recalculate the effect sizes with new scale factors.
    if (!fitsInMaximumImageSize(subRegion.size(), scale)) {
        filterData->filter->setFilterResolution(scale);
        RenderSVGResourceFilterPrimitive::determineFilterPrimitiveSubregion(*lastEffect);
    }

    // If the drawingRegion is empty, we have something like <g filter=".."/>.
    // Even if the target objectBoundingBox() is empty, we still have to draw the last effect result image in postApplyResource.
    if (filterData->drawingRegion.isEmpty()) {
        ASSERT(!m_filter.contains(&renderer));
        filterData->savedContext = context;
        m_filter.set(&renderer, WTF::move(filterData));
        return false;
    }

    // Change the coordinate transformation applied to the filtered element to reflect the resolution of the filter.
    AffineTransform effectiveTransform;
    effectiveTransform.scale(scale.width(), scale.height());
    effectiveTransform.multiply(filterData->shearFreeAbsoluteTransform);

    std::unique_ptr<ImageBuffer> sourceGraphic;
    RenderingMode renderingMode = renderer.frame().settings().acceleratedFiltersEnabled() ? Accelerated : Unaccelerated;
    if (!SVGRenderingContext::createImageBuffer(filterData->drawingRegion, effectiveTransform, sourceGraphic, ColorSpaceLinearRGB, renderingMode)) {
        ASSERT(!m_filter.contains(&renderer));
        filterData->savedContext = context;
        m_filter.set(&renderer, WTF::move(filterData));
        return false;
    }

    // Set the rendering mode from the page's settings.
//.........这里部分代码省略.........
开发者ID:houzhenggang,项目名称:webkit,代码行数:101,代码来源:RenderSVGResourceFilter.cpp


注:本文中的AffineTransform::isInvertible方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。