本文整理汇总了C++中GrGpu类的典型用法代码示例。如果您正苦于以下问题:C++ GrGpu类的具体用法?C++ GrGpu怎么用?C++ GrGpu使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了GrGpu类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: SkSafeUnref
const GrIndexBuffer* GrGpu::getQuadIndexBuffer() const {
if (NULL == fQuadIndexBuffer || fQuadIndexBuffer->wasDestroyed()) {
SkSafeUnref(fQuadIndexBuffer);
static const int SIZE = sizeof(uint16_t) * 6 * MAX_QUADS;
GrGpu* me = const_cast<GrGpu*>(this);
fQuadIndexBuffer = me->createIndexBuffer(SIZE, false);
if (fQuadIndexBuffer) {
uint16_t* indices = (uint16_t*)fQuadIndexBuffer->map();
if (indices) {
fill_indices(indices, MAX_QUADS);
fQuadIndexBuffer->unmap();
} else {
indices = (uint16_t*)sk_malloc_throw(SIZE);
fill_indices(indices, MAX_QUADS);
if (!fQuadIndexBuffer->updateData(indices, SIZE)) {
fQuadIndexBuffer->unref();
fQuadIndexBuffer = NULL;
SkFAIL("Can't get indices into buffer!");
}
sk_free(indices);
}
}
}
return fQuadIndexBuffer;
}
示例2: SkASSERT
GrSemaphoresSubmitted GrDrawingManager::prepareSurfaceForExternalIO(
GrSurfaceProxy* proxy, int numSemaphores, GrBackendSemaphore backendSemaphores[]) {
if (this->wasAbandoned()) {
return GrSemaphoresSubmitted::kNo;
}
SkASSERT(proxy);
GrGpu* gpu = fContext->contextPriv().getGpu();
if (!gpu) {
return GrSemaphoresSubmitted::kNo; // Can't flush while DDL recording
}
GrSemaphoresSubmitted result = GrSemaphoresSubmitted::kNo;
if (proxy->priv().hasPendingIO() || numSemaphores) {
result = this->flush(proxy, numSemaphores, backendSemaphores);
}
if (!proxy->instantiate(fContext->contextPriv().resourceProvider())) {
return result;
}
GrSurface* surface = proxy->priv().peekSurface();
if (surface->asRenderTarget()) {
gpu->resolveRenderTarget(surface->asRenderTarget());
}
return result;
}
示例3: sizeof
GrIndexBuffer* GrGpu::createInstancedIndexBuffer(const uint16_t* pattern,
int patternSize,
int reps,
int vertCount,
bool isDynamic) {
size_t bufferSize = patternSize * reps * sizeof(uint16_t);
GrGpu* me = const_cast<GrGpu*>(this);
GrIndexBuffer* buffer = me->createIndexBuffer(bufferSize, isDynamic);
if (buffer) {
uint16_t* data = (uint16_t*) buffer->map();
bool useTempData = (NULL == data);
if (useTempData) {
data = SkNEW_ARRAY(uint16_t, reps * patternSize);
}
for (int i = 0; i < reps; ++i) {
int baseIdx = i * patternSize;
uint16_t baseVert = (uint16_t)(i * vertCount);
for (int j = 0; j < patternSize; ++j) {
data[baseIdx+j] = baseVert + pattern[j];
}
}
if (useTempData) {
if (!buffer->updateData(data, bufferSize)) {
SkFAIL("Can't get indices into buffer!");
}
SkDELETE_ARRAY(data);
} else {
buffer->unmap();
}
}
return buffer;
}
示例4: GrPoint
const GrVertexBuffer* GrGpu::getUnitSquareVertexBuffer() const {
if (NULL == fUnitSquareVertexBuffer) {
static const GrPoint DATA[] = {
{ 0, 0 },
{ GR_Scalar1, 0 },
{ GR_Scalar1, GR_Scalar1 },
{ 0, GR_Scalar1 }
#if 0
GrPoint(0, 0),
GrPoint(GR_Scalar1,0),
GrPoint(GR_Scalar1,GR_Scalar1),
GrPoint(0, GR_Scalar1)
#endif
};
static const size_t SIZE = sizeof(DATA);
GrGpu* me = const_cast<GrGpu*>(this);
fUnitSquareVertexBuffer = me->createVertexBuffer(SIZE, false);
if (NULL != fUnitSquareVertexBuffer) {
if (!fUnitSquareVertexBuffer->updateData(DATA, SIZE)) {
fUnitSquareVertexBuffer->unref();
fUnitSquareVertexBuffer = NULL;
GrCrash("Can't get vertices into buffer!");
}
}
}
return fUnitSquareVertexBuffer;
}
示例5: sizeof
const GrIndexBuffer* GrGpu::getQuadIndexBuffer() const {
if (NULL == fQuadIndexBuffer) {
static const int SIZE = sizeof(uint16_t) * 6 * MAX_QUADS;
GrGpu* me = const_cast<GrGpu*>(this);
fQuadIndexBuffer = me->createIndexBuffer(SIZE, false);
if (NULL != fQuadIndexBuffer) {
uint16_t* indices = (uint16_t*)fQuadIndexBuffer->lock();
if (NULL != indices) {
fill_indices(indices, MAX_QUADS);
fQuadIndexBuffer->unlock();
} else {
indices = (uint16_t*)GrMalloc(SIZE);
fill_indices(indices, MAX_QUADS);
if (!fQuadIndexBuffer->updateData(indices, SIZE)) {
fQuadIndexBuffer->unref();
fQuadIndexBuffer = NULL;
GrCrash("Can't get indices into buffer!");
}
GrFree(indices);
}
}
}
return fQuadIndexBuffer;
}
示例6: SkASSERT
void GrPathRendererChain::init() {
SkASSERT(!fInit);
GrGpu* gpu = fOwner->getGpu();
bool twoSided = gpu->caps()->twoSidedStencilSupport();
bool wrapOp = gpu->caps()->stencilWrapOpsSupport();
GrPathRenderer::AddPathRenderers(fOwner, this);
this->addPathRenderer(SkNEW_ARGS(GrDefaultPathRenderer,
(twoSided, wrapOp)))->unref();
fInit = true;
}
示例7: make_backend
static sk_sp<GrSurfaceProxy> make_backend(GrContext* context, const ProxyParams& p,
GrBackendTexture* backendTex) {
GrProxyProvider* proxyProvider = context->contextPriv().proxyProvider();
GrGpu* gpu = context->contextPriv().getGpu();
*backendTex = gpu->createTestingOnlyBackendTexture(nullptr, p.fSize, p.fSize,
p.fConfig, false,
GrMipMapped::kNo);
return proxyProvider->createWrappedTextureProxy(*backendTex, p.fOrigin);
}
示例8: SkASSERT
const GrGpu::MultisampleSpecs&
GrRenderTargetPriv::getMultisampleSpecs(const GrPipeline& pipeline) const {
SkASSERT(fRenderTarget == pipeline.getRenderTarget()); // TODO: remove RT from pipeline.
GrGpu* gpu = fRenderTarget->getGpu();
if (auto id = fRenderTarget->fMultisampleSpecsID) {
SkASSERT(gpu->queryMultisampleSpecs(pipeline).fUniqueID == id);
return gpu->getMultisampleSpecs(id);
}
const GrGpu::MultisampleSpecs& specs = gpu->queryMultisampleSpecs(pipeline);
fRenderTarget->fMultisampleSpecsID = specs.fUniqueID;
return specs;
}
示例9: SkSafeUnref
const GrIndexBuffer* GrGpu::getQuadIndexBuffer() const {
if (NULL == fQuadIndexBuffer || fQuadIndexBuffer->wasDestroyed()) {
SkSafeUnref(fQuadIndexBuffer);
GrGpu* me = const_cast<GrGpu*>(this);
fQuadIndexBuffer = me->createInstancedIndexBuffer(gQuadIndexPattern,
6,
MAX_QUADS,
4);
}
return fQuadIndexBuffer;
}
示例10: Create
GrPathRenderer* GrAAHairLinePathRenderer::Create(GrContext* context) {
const GrIndexBuffer* lIdxBuffer = context->getQuadIndexBuffer();
if (NULL == lIdxBuffer) {
return NULL;
}
GrGpu* gpu = context->getGpu();
GrIndexBuffer* qIdxBuf = gpu->createIndexBuffer(kQuadIdxSBufize, false);
SkAutoTUnref<GrIndexBuffer> qIdxBuffer(qIdxBuf);
if (NULL == qIdxBuf ||
!push_quad_index_data(qIdxBuf)) {
return NULL;
}
return SkNEW_ARGS(GrAAHairLinePathRenderer,
(context, lIdxBuffer, qIdxBuf));
}
示例11: Create
GrPathRenderer* GrAAHairLinePathRenderer::Create(GrContext* context) {
GrGpu* gpu = context->getGpu();
GrIndexBuffer* qIdxBuf = gpu->createInstancedIndexBuffer(kQuadIdxBufPattern,
kIdxsPerQuad,
kQuadsNumInIdxBuffer,
kQuadNumVertices);
SkAutoTUnref<GrIndexBuffer> qIdxBuffer(qIdxBuf);
GrIndexBuffer* lIdxBuf = gpu->createInstancedIndexBuffer(kLineSegIdxBufPattern,
kIdxsPerLineSeg,
kLineSegsNumInIdxBuffer,
kLineSegNumVertices);
SkAutoTUnref<GrIndexBuffer> lIdxBuffer(lIdxBuf);
return SkNEW_ARGS(GrAAHairLinePathRenderer,
(context, lIdxBuf, qIdxBuf));
}
示例12: Create
GrPathRenderer* GrAAHairLinePathRenderer::Create(GrContext* context) {
if (CanBeUsed(context)) {
const GrIndexBuffer* lIdxBuffer = context->getQuadIndexBuffer();
if (NULL == lIdxBuffer) {
return NULL;
}
GrGpu* gpu = context->getGpu();
GrIndexBuffer* qIdxBuf = gpu->createIndexBuffer(kQuadIdxSBufize, false);
SkAutoTUnref<GrIndexBuffer> qIdxBuffer(qIdxBuf); // cons will take a ref
if (NULL == qIdxBuf ||
!push_quad_index_data(qIdxBuffer.get())) {
return NULL;
}
return new GrAAHairLinePathRenderer(context,
lIdxBuffer,
qIdxBuf);
} else {
return NULL;
}
}
示例13: genIter
void GrTargetCommands::flush(GrInOrderDrawBuffer* iodb) {
if (fCmdBuffer.empty()) {
return;
}
GrGpu* gpu = iodb->getGpu();
// Loop over all batches and generate geometry
CmdBuffer::Iter genIter(fCmdBuffer);
while (genIter.next()) {
if (Cmd::kDrawBatch_CmdType == genIter->type()) {
DrawBatch* db = reinterpret_cast<DrawBatch*>(genIter.get());
fBatchTarget.resetNumberOfDraws();
db->fBatch->generateGeometry(&fBatchTarget, db->fState->getPipeline());
db->fBatch->setNumberOfDraws(fBatchTarget.numberOfDraws());
}
}
fBatchTarget.preFlush();
CmdBuffer::Iter iter(fCmdBuffer);
while (iter.next()) {
GrGpuTraceMarker newMarker("", -1);
SkString traceString;
if (iter->isTraced()) {
traceString = iodb->getCmdString(iter->markerID());
newMarker.fMarker = traceString.c_str();
gpu->addGpuTraceMarker(&newMarker);
}
iter->execute(gpu);
if (iter->isTraced()) {
gpu->removeGpuTraceMarker(&newMarker);
}
}
fBatchTarget.postFlush();
}
示例14: DEF_GPUTEST_FOR_RENDERING_CONTEXTS
DEF_GPUTEST_FOR_RENDERING_CONTEXTS(PromiseImageTest, reporter, ctxInfo) {
const int kWidth = 10;
const int kHeight = 10;
GrContext* ctx = ctxInfo.grContext();
GrGpu* gpu = ctx->contextPriv().getGpu();
for (bool releaseImageEarly : {true, false}) {
GrBackendTexture backendTex = gpu->createTestingOnlyBackendTexture(
nullptr, kWidth, kHeight, kRGBA_8888_GrPixelConfig, true, GrMipMapped::kNo);
REPORTER_ASSERT(reporter, backendTex.isValid());
GrBackendFormat backendFormat = backendTex.format();
REPORTER_ASSERT(reporter, backendFormat.isValid());
PromiseTextureChecker promiseChecker(backendTex);
GrSurfaceOrigin texOrigin = kTopLeft_GrSurfaceOrigin;
sk_sp<SkImage> refImg(
SkImage_Gpu::MakePromiseTexture(ctx, backendFormat, kWidth, kHeight,
GrMipMapped::kNo, texOrigin,
kRGBA_8888_SkColorType, kPremul_SkAlphaType,
nullptr,
PromiseTextureChecker::Fulfill,
PromiseTextureChecker::Release,
PromiseTextureChecker::Done,
&promiseChecker));
SkImageInfo info = SkImageInfo::MakeN32Premul(kWidth, kHeight);
sk_sp<SkSurface> surface = SkSurface::MakeRenderTarget(ctx, SkBudgeted::kNo, info);
SkCanvas* canvas = surface->getCanvas();
int expectedFulfillCnt = 0;
int expectedReleaseCnt = 0;
int expectedDoneCnt = 0;
canvas->drawImage(refImg, 0, 0);
REPORTER_ASSERT(reporter, check_fulfill_and_release_cnts(promiseChecker,
true,
expectedFulfillCnt,
expectedReleaseCnt,
true,
expectedDoneCnt,
reporter));
bool isVulkan = kVulkan_GrBackend == ctx->contextPriv().getBackend();
canvas->flush();
expectedFulfillCnt++;
expectedReleaseCnt++;
REPORTER_ASSERT(reporter, check_fulfill_and_release_cnts(promiseChecker,
!isVulkan,
expectedFulfillCnt,
expectedReleaseCnt,
!isVulkan,
expectedDoneCnt,
reporter));
gpu->testingOnly_flushGpuAndSync();
REPORTER_ASSERT(reporter, check_fulfill_and_release_cnts(promiseChecker,
true,
expectedFulfillCnt,
expectedReleaseCnt,
true,
expectedDoneCnt,
reporter));
canvas->drawImage(refImg, 0, 0);
canvas->drawImage(refImg, 0, 0);
canvas->flush();
expectedFulfillCnt++;
expectedReleaseCnt++;
gpu->testingOnly_flushGpuAndSync();
REPORTER_ASSERT(reporter, check_fulfill_and_release_cnts(promiseChecker,
true,
expectedFulfillCnt,
expectedReleaseCnt,
true,
expectedDoneCnt,
reporter));
// Now test code path on Vulkan where we released the texture, but the GPU isn't done with
// resource yet and we do another draw. We should only call fulfill on the first draw and
// use the cached GrBackendTexture on the second. Release should only be called after the
// second draw is finished.
canvas->drawImage(refImg, 0, 0);
canvas->flush();
expectedFulfillCnt++;
expectedReleaseCnt++;
REPORTER_ASSERT(reporter, check_fulfill_and_release_cnts(promiseChecker,
!isVulkan,
expectedFulfillCnt,
expectedReleaseCnt,
!isVulkan,
expectedDoneCnt,
reporter));
canvas->drawImage(refImg, 0, 0);
if (releaseImageEarly) {
//.........这里部分代码省略.........
示例15: switch
GrContext* GrContextFactory::get(GLContextType type, GrGLStandard forcedGpuAPI) {
for (int i = 0; i < fContexts.count(); ++i) {
if (forcedGpuAPI != kNone_GrGLStandard &&
forcedGpuAPI != fContexts[i].fGLContext->gl()->fStandard)
continue;
if (fContexts[i].fType == type) {
fContexts[i].fGLContext->makeCurrent();
return fContexts[i].fGrContext;
}
}
SkAutoTUnref<SkGLContext> glCtx;
SkAutoTUnref<GrContext> grCtx;
switch (type) {
case kNVPR_GLContextType: // fallthru
case kNative_GLContextType:
glCtx.reset(SkCreatePlatformGLContext(forcedGpuAPI));
break;
#ifdef SK_ANGLE
case kANGLE_GLContextType:
glCtx.reset(SkANGLEGLContext::Create(forcedGpuAPI));
break;
#endif
#ifdef SK_MESA
case kMESA_GLContextType:
glCtx.reset(SkMesaGLContext::Create(forcedGpuAPI));
break;
#endif
case kNull_GLContextType:
glCtx.reset(SkNullGLContext::Create(forcedGpuAPI));
break;
case kDebug_GLContextType:
glCtx.reset(SkDebugGLContext::Create(forcedGpuAPI));
break;
}
if (NULL == glCtx.get()) {
return NULL;
}
SkASSERT(glCtx->isValid());
// Block NVPR from non-NVPR types.
SkAutoTUnref<const GrGLInterface> glInterface(SkRef(glCtx->gl()));
if (kNVPR_GLContextType != type) {
glInterface.reset(GrGLInterfaceRemoveNVPR(glInterface));
if (!glInterface) {
return NULL;
}
} else {
if (!glInterface->hasExtension("GL_NV_path_rendering")) {
return NULL;
}
}
glCtx->makeCurrent();
GrBackendContext p3dctx = reinterpret_cast<GrBackendContext>(glInterface.get());
#ifdef SK_VULKAN
grCtx.reset(GrContext::Create(kVulkan_GrBackend, p3dctx, fGlobalOptions));
#else
grCtx.reset(GrContext::Create(kOpenGL_GrBackend, p3dctx, fGlobalOptions));
#endif
if (!grCtx.get()) {
return NULL;
}
// Warn if path rendering support is not available for the NVPR type.
if (kNVPR_GLContextType == type) {
if (!grCtx->caps()->shaderCaps()->pathRenderingSupport()) {
GrGpu* gpu = grCtx->getGpu();
const GrGLContext* ctx = gpu->glContextForTesting();
if (ctx) {
const GrGLubyte* verUByte;
GR_GL_CALL_RET(ctx->interface(), verUByte, GetString(GR_GL_VERSION));
const char* ver = reinterpret_cast<const char*>(verUByte);
SkDebugf("\nWARNING: nvprmsaa config requested, but driver path rendering "
"support not available. Maybe update the driver? Your driver version "
"string: \"%s\"\n", ver);
} else {
SkDebugf("\nWARNING: nvprmsaa config requested, but driver path rendering "
"support not available.\n");
}
}
}
GPUContext& ctx = fContexts.push_back();
ctx.fGLContext = glCtx.get();
ctx.fGLContext->ref();
ctx.fGrContext = grCtx.get();
ctx.fGrContext->ref();
ctx.fType = type;
return ctx.fGrContext;
}