本文整理汇总了C++中CALLOC_STRUCT函数的典型用法代码示例。如果您正苦于以下问题:C++ CALLOC_STRUCT函数的具体用法?C++ CALLOC_STRUCT怎么用?C++ CALLOC_STRUCT使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了CALLOC_STRUCT函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: intelCreateBuffer
/**
* This is called when we need to set up GL rendering to a new X window.
*/
static GLboolean
intelCreateBuffer(__DRIscreen * driScrnPriv,
__DRIdrawable * driDrawPriv,
const struct gl_config * mesaVis, GLboolean isPixmap)
{
struct intel_renderbuffer *rb;
mesa_format rgbFormat;
struct gl_framebuffer *fb;
if (isPixmap)
return false;
fb = CALLOC_STRUCT(gl_framebuffer);
if (!fb)
return false;
_mesa_initialize_window_framebuffer(fb, mesaVis);
if (mesaVis->redBits == 5)
rgbFormat = MESA_FORMAT_B5G6R5_UNORM;
else if (mesaVis->sRGBCapable)
rgbFormat = MESA_FORMAT_B8G8R8A8_SRGB;
else if (mesaVis->alphaBits == 0)
rgbFormat = MESA_FORMAT_B8G8R8X8_UNORM;
else
rgbFormat = MESA_FORMAT_B8G8R8A8_UNORM;
/* setup the hardware-based renderbuffers */
rb = intel_create_renderbuffer(rgbFormat);
_mesa_add_renderbuffer(fb, BUFFER_FRONT_LEFT, &rb->Base.Base);
if (mesaVis->doubleBufferMode) {
rb = intel_create_renderbuffer(rgbFormat);
_mesa_add_renderbuffer(fb, BUFFER_BACK_LEFT, &rb->Base.Base);
}
/*
* Assert here that the gl_config has an expected depth/stencil bit
* combination: one of d24/s8, d16/s0, d0/s0. (See intelInitScreen2(),
* which constructs the advertised configs.)
*/
if (mesaVis->depthBits == 24) {
assert(mesaVis->stencilBits == 8);
/*
* Use combined depth/stencil. Note that the renderbuffer is
* attached to two attachment points.
*/
rb = intel_create_private_renderbuffer(MESA_FORMAT_Z24_UNORM_S8_UINT);
_mesa_add_renderbuffer(fb, BUFFER_DEPTH, &rb->Base.Base);
_mesa_add_renderbuffer(fb, BUFFER_STENCIL, &rb->Base.Base);
}
else if (mesaVis->depthBits == 16) {
assert(mesaVis->stencilBits == 0);
rb = intel_create_private_renderbuffer(MESA_FORMAT_Z_UNORM16);
_mesa_add_renderbuffer(fb, BUFFER_DEPTH, &rb->Base.Base);
}
else {
assert(mesaVis->depthBits == 0);
assert(mesaVis->stencilBits == 0);
}
/* now add any/all software-based renderbuffers we may need */
_swrast_add_soft_renderbuffers(fb,
false, /* never sw color */
false, /* never sw depth */
false, /* never sw stencil */
mesaVis->accumRedBits > 0,
false, /* never sw alpha */
false /* never sw aux */ );
driDrawPriv->driverPrivate = fb;
return true;
}
示例2: nv30_screen_create
struct pipe_screen *
nv30_screen_create(struct nouveau_device *dev)
{
struct nv30_screen *screen = CALLOC_STRUCT(nv30_screen);
struct pipe_screen *pscreen;
struct nouveau_pushbuf *push;
struct nv04_fifo *fifo;
unsigned oclass = 0;
int ret, i;
if (!screen)
return NULL;
switch (dev->chipset & 0xf0) {
case 0x30:
if (RANKINE_0397_CHIPSET & (1 << (dev->chipset & 0x0f)))
oclass = NV30_3D_CLASS;
else
if (RANKINE_0697_CHIPSET & (1 << (dev->chipset & 0x0f)))
oclass = NV34_3D_CLASS;
else
if (RANKINE_0497_CHIPSET & (1 << (dev->chipset & 0x0f)))
oclass = NV35_3D_CLASS;
break;
case 0x40:
if (CURIE_4097_CHIPSET & (1 << (dev->chipset & 0x0f)))
oclass = NV40_3D_CLASS;
else
if (CURIE_4497_CHIPSET & (1 << (dev->chipset & 0x0f)))
oclass = NV44_3D_CLASS;
break;
case 0x60:
if (CURIE_4497_CHIPSET6X & (1 << (dev->chipset & 0x0f)))
oclass = NV44_3D_CLASS;
break;
default:
break;
}
if (!oclass) {
NOUVEAU_ERR("unknown 3d class for 0x%02x\n", dev->chipset);
FREE(screen);
return NULL;
}
pscreen = &screen->base.base;
pscreen->destroy = nv30_screen_destroy;
pscreen->get_param = nv30_screen_get_param;
pscreen->get_paramf = nv30_screen_get_paramf;
pscreen->get_shader_param = nv30_screen_get_shader_param;
pscreen->context_create = nv30_context_create;
pscreen->is_format_supported = nv30_screen_is_format_supported;
nv30_resource_screen_init(pscreen);
nouveau_screen_init_vdec(&screen->base);
screen->base.fence.emit = nv30_screen_fence_emit;
screen->base.fence.update = nv30_screen_fence_update;
ret = nouveau_screen_init(&screen->base, dev);
if (ret)
FAIL_SCREEN_INIT("nv30_screen_init failed: %d\n", ret);
screen->base.vidmem_bindings |= PIPE_BIND_VERTEX_BUFFER;
screen->base.sysmem_bindings |= PIPE_BIND_VERTEX_BUFFER;
if (oclass == NV40_3D_CLASS) {
screen->base.vidmem_bindings |= PIPE_BIND_INDEX_BUFFER;
screen->base.sysmem_bindings |= PIPE_BIND_INDEX_BUFFER;
}
fifo = screen->base.channel->data;
push = screen->base.pushbuf;
push->rsvd_kick = 16;
ret = nouveau_object_new(screen->base.channel, 0x00000000, NV01_NULL_CLASS,
NULL, 0, &screen->null);
if (ret)
FAIL_SCREEN_INIT("error allocating null object: %d\n", ret);
/* DMA_FENCE refuses to accept DMA objects with "adjust" filled in,
* this means that the address pointed at by the DMA object must
* be 4KiB aligned, which means this object needs to be the first
* one allocated on the channel.
*/
ret = nouveau_object_new(screen->base.channel, 0xbeef1e00,
NOUVEAU_NOTIFIER_CLASS, &(struct nv04_notify) {
.length = 32 }, sizeof(struct nv04_notify),
示例3: gbm_gallium_drm_bo_import
static struct gbm_bo *
gbm_gallium_drm_bo_import(struct gbm_device *gbm,
uint32_t type, void *buffer, uint32_t usage)
{
struct gbm_gallium_drm_device *gdrm = gbm_gallium_drm_device(gbm);
struct gbm_gallium_drm_bo *bo;
struct winsys_handle whandle;
struct pipe_resource *resource;
switch (type) {
#if HAVE_WAYLAND_PLATFORM
case GBM_BO_IMPORT_WL_BUFFER:
{
struct wl_drm_buffer *wb = (struct wl_drm_buffer *) buffer;
resource = wb->driver_buffer;
break;
}
#endif
case GBM_BO_IMPORT_EGL_IMAGE:
if (!gdrm->lookup_egl_image)
return NULL;
resource = gdrm->lookup_egl_image(gdrm->lookup_egl_image_data, buffer);
if (resource == NULL)
return NULL;
break;
default:
return NULL;
}
bo = CALLOC_STRUCT(gbm_gallium_drm_bo);
if (bo == NULL)
return NULL;
bo->base.base.gbm = gbm;
bo->base.base.width = resource->width0;
bo->base.base.height = resource->height0;
switch (resource->format) {
case PIPE_FORMAT_B8G8R8X8_UNORM:
bo->base.base.format = GBM_BO_FORMAT_XRGB8888;
break;
case PIPE_FORMAT_B8G8R8A8_UNORM:
bo->base.base.format = GBM_BO_FORMAT_ARGB8888;
break;
default:
FREE(bo);
return NULL;
}
pipe_resource_reference(&bo->resource, resource);
memset(&whandle, 0, sizeof(whandle));
whandle.type = DRM_API_HANDLE_TYPE_KMS;
gdrm->screen->resource_get_handle(gdrm->screen, bo->resource, &whandle);
bo->base.base.handle.u32 = whandle.handle;
bo->base.base.stride = whandle.stride;
return &bo->base.base;
}
示例4: XMesaCreateContext
/**
* Create a new XMesaContext.
* \param v the XMesaVisual
* \param share_list another XMesaContext with which to share display
* lists or NULL if no sharing is wanted.
* \return an XMesaContext or NULL if error.
*/
PUBLIC
XMesaContext XMesaCreateContext( XMesaVisual v, XMesaContext share_list,
GLuint major, GLuint minor,
GLuint profileMask, GLuint contextFlags)
{
XMesaDisplay xmdpy = xmesa_init_display(v->display);
struct st_context_attribs attribs;
enum st_context_error ctx_err = 0;
XMesaContext c;
if (!xmdpy)
return NULL;
/* Note: the XMesaContext contains a Mesa struct gl_context struct (inheritance) */
c = (XMesaContext) CALLOC_STRUCT(xmesa_context);
if (!c)
return NULL;
c->xm_visual = v;
c->xm_buffer = NULL; /* set later by XMesaMakeCurrent */
c->xm_read_buffer = NULL;
memset(&attribs, 0, sizeof(attribs));
attribs.visual = v->stvis;
attribs.major = major;
attribs.minor = minor;
if (contextFlags & GLX_CONTEXT_FORWARD_COMPATIBLE_BIT_ARB)
attribs.flags |= ST_CONTEXT_FLAG_FORWARD_COMPATIBLE;
if (contextFlags & GLX_CONTEXT_DEBUG_BIT_ARB)
attribs.flags |= ST_CONTEXT_FLAG_DEBUG;
if (contextFlags & GLX_CONTEXT_ROBUST_ACCESS_BIT_ARB)
attribs.flags |= ST_CONTEXT_FLAG_ROBUST_ACCESS;
/* There are no profiles before OpenGL 3.2. The
* GLX_ARB_create_context_profile spec says:
*
* "If the requested OpenGL version is less than 3.2,
* GLX_CONTEXT_PROFILE_MASK_ARB is ignored and the functionality of the
* context is determined solely by the requested version."
*
* The spec also says:
*
* "The default value for GLX_CONTEXT_PROFILE_MASK_ARB is
* GLX_CONTEXT_CORE_PROFILE_BIT_ARB."
*/
attribs.profile = ST_PROFILE_DEFAULT;
if ((major > 3 || (major == 3 && minor >= 2))
&& ((profileMask & GLX_CONTEXT_COMPATIBILITY_PROFILE_BIT_ARB) == 0))
attribs.profile = ST_PROFILE_OPENGL_CORE;
c->st = stapi->create_context(stapi, xmdpy->smapi, &attribs,
&ctx_err, (share_list) ? share_list->st : NULL);
if (c->st == NULL)
goto fail;
c->st->st_manager_private = (void *) c;
return c;
fail:
if (c->st)
c->st->destroy(c->st);
free(c);
return NULL;
}
示例5: r300_create_context
struct pipe_context* r300_create_context(struct pipe_screen* screen,
void *priv)
{
struct r300_context* r300 = CALLOC_STRUCT(r300_context);
struct r300_screen* r300screen = r300_screen(screen);
struct radeon_winsys *rws = r300screen->rws;
if (!r300)
return NULL;
r300->rws = rws;
r300->screen = r300screen;
r300->context.screen = screen;
r300->context.priv = priv;
r300->context.destroy = r300_destroy_context;
util_slab_create(&r300->pool_transfers,
sizeof(struct pipe_transfer), 64,
UTIL_SLAB_SINGLETHREADED);
r300->cs = rws->cs_create(rws, RING_GFX, NULL);
if (r300->cs == NULL)
goto fail;
if (!r300screen->caps.has_tcl) {
/* Create a Draw. This is used for SW TCL. */
r300->draw = draw_create(&r300->context);
if (r300->draw == NULL)
goto fail;
/* Enable our renderer. */
draw_set_rasterize_stage(r300->draw, r300_draw_stage(r300));
/* Disable converting points/lines to triangles. */
draw_wide_line_threshold(r300->draw, 10000000.f);
draw_wide_point_threshold(r300->draw, 10000000.f);
draw_wide_point_sprites(r300->draw, FALSE);
draw_enable_line_stipple(r300->draw, TRUE);
draw_enable_point_sprites(r300->draw, FALSE);
}
if (!r300_setup_atoms(r300))
goto fail;
r300_init_blit_functions(r300);
r300_init_flush_functions(r300);
r300_init_query_functions(r300);
r300_init_state_functions(r300);
r300_init_resource_functions(r300);
r300_init_render_functions(r300);
r300_init_states(&r300->context);
r300->context.create_video_decoder = vl_create_decoder;
r300->context.create_video_buffer = vl_video_buffer_create;
if (r300screen->caps.has_tcl) {
r300->uploader = u_upload_create(&r300->context, 256 * 1024, 4,
PIPE_BIND_INDEX_BUFFER);
}
r300->blitter = util_blitter_create(&r300->context);
if (r300->blitter == NULL)
goto fail;
r300->blitter->draw_rectangle = r300_blitter_draw_rectangle;
rws->cs_set_flush_callback(r300->cs, r300_flush_callback, r300);
/* The KIL opcode needs the first texture unit to be enabled
* on r3xx-r4xx. In order to calm down the CS checker, we bind this
* dummy texture there. */
if (!r300->screen->caps.is_r500) {
struct pipe_resource *tex;
struct pipe_resource rtempl = {{0}};
struct pipe_sampler_view vtempl = {{0}};
rtempl.target = PIPE_TEXTURE_2D;
rtempl.format = PIPE_FORMAT_I8_UNORM;
rtempl.usage = PIPE_USAGE_IMMUTABLE;
rtempl.width0 = 1;
rtempl.height0 = 1;
rtempl.depth0 = 1;
tex = screen->resource_create(screen, &rtempl);
u_sampler_view_default_template(&vtempl, tex, tex->format);
r300->texkill_sampler = (struct r300_sampler_view*)
r300->context.create_sampler_view(&r300->context, tex, &vtempl);
pipe_resource_reference(&tex, NULL);
}
if (r300screen->caps.has_tcl) {
struct pipe_resource vb;
memset(&vb, 0, sizeof(vb));
vb.target = PIPE_BUFFER;
vb.format = PIPE_FORMAT_R8_UNORM;
vb.usage = PIPE_USAGE_STATIC;
vb.width0 = sizeof(float) * 16;
vb.height0 = 1;
vb.depth0 = 1;
//.........这里部分代码省略.........
示例6: fd4_zsa_state_create
void *
fd4_zsa_state_create(struct pipe_context *pctx,
const struct pipe_depth_stencil_alpha_state *cso)
{
struct fd4_zsa_stateobj *so;
so = CALLOC_STRUCT(fd4_zsa_stateobj);
if (!so)
return NULL;
so->base = *cso;
so->rb_depth_control |=
A4XX_RB_DEPTH_CONTROL_ZFUNC(cso->depth.func); /* maps 1:1 */
if (cso->depth.enabled)
so->rb_depth_control |=
A4XX_RB_DEPTH_CONTROL_Z_ENABLE |
A4XX_RB_DEPTH_CONTROL_Z_TEST_ENABLE;
if (cso->depth.writemask)
so->rb_depth_control |= A4XX_RB_DEPTH_CONTROL_Z_WRITE_ENABLE;
if (cso->stencil[0].enabled) {
const struct pipe_stencil_state *s = &cso->stencil[0];
so->rb_stencil_control |=
A4XX_RB_STENCIL_CONTROL_STENCIL_READ |
A4XX_RB_STENCIL_CONTROL_STENCIL_ENABLE |
A4XX_RB_STENCIL_CONTROL_FUNC(s->func) | /* maps 1:1 */
A4XX_RB_STENCIL_CONTROL_FAIL(fd_stencil_op(s->fail_op)) |
A4XX_RB_STENCIL_CONTROL_ZPASS(fd_stencil_op(s->zpass_op)) |
A4XX_RB_STENCIL_CONTROL_ZFAIL(fd_stencil_op(s->zfail_op));
so->rb_stencil_control2 |=
A4XX_RB_STENCIL_CONTROL2_STENCIL_BUFFER;
so->rb_stencilrefmask |=
0xff000000 | /* ??? */
A4XX_RB_STENCILREFMASK_STENCILWRITEMASK(s->writemask) |
A4XX_RB_STENCILREFMASK_STENCILMASK(s->valuemask);
if (cso->stencil[1].enabled) {
const struct pipe_stencil_state *bs = &cso->stencil[1];
so->rb_stencil_control |=
A4XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF |
A4XX_RB_STENCIL_CONTROL_FUNC_BF(bs->func) | /* maps 1:1 */
A4XX_RB_STENCIL_CONTROL_FAIL_BF(fd_stencil_op(bs->fail_op)) |
A4XX_RB_STENCIL_CONTROL_ZPASS_BF(fd_stencil_op(bs->zpass_op)) |
A4XX_RB_STENCIL_CONTROL_ZFAIL_BF(fd_stencil_op(bs->zfail_op));
so->rb_stencilrefmask_bf |=
0xff000000 | /* ??? */
A4XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK(bs->writemask) |
A4XX_RB_STENCILREFMASK_BF_STENCILMASK(bs->valuemask);
}
}
if (cso->alpha.enabled) {
uint32_t ref = cso->alpha.ref_value * 255.0;
so->gras_alpha_control =
A4XX_GRAS_ALPHA_CONTROL_ALPHA_TEST_ENABLE;
so->rb_alpha_control =
A4XX_RB_ALPHA_CONTROL_ALPHA_TEST |
A4XX_RB_ALPHA_CONTROL_ALPHA_REF(ref) |
A4XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC(cso->alpha.func);
so->rb_depth_control |=
A4XX_RB_DEPTH_CONTROL_EARLY_Z_DISABLE;
}
return so;
}
示例7: svga_texture_get_transfer
/* XXX: Still implementing this as if it was a screen function, but
* can now modify it to queue transfers on the context.
*/
static struct pipe_transfer *
svga_texture_get_transfer(struct pipe_context *pipe,
struct pipe_resource *texture,
unsigned level,
unsigned usage,
const struct pipe_box *box)
{
struct svga_context *svga = svga_context(pipe);
struct svga_screen *ss = svga_screen(pipe->screen);
struct svga_winsys_screen *sws = ss->sws;
struct svga_transfer *st;
unsigned nblocksx = util_format_get_nblocksx(texture->format, box->width);
unsigned nblocksy = util_format_get_nblocksy(texture->format, box->height);
/* We can't map texture storage directly */
if (usage & PIPE_TRANSFER_MAP_DIRECTLY)
return NULL;
assert(box->depth == 1);
st = CALLOC_STRUCT(svga_transfer);
if (!st)
return NULL;
pipe_resource_reference(&st->base.resource, texture);
st->base.level = level;
st->base.usage = usage;
st->base.box = *box;
st->base.stride = nblocksx*util_format_get_blocksize(texture->format);
st->base.layer_stride = 0;
st->hw_nblocksy = nblocksy;
st->hwbuf = svga_winsys_buffer_create(svga,
1,
0,
st->hw_nblocksy*st->base.stride);
while(!st->hwbuf && (st->hw_nblocksy /= 2)) {
st->hwbuf = svga_winsys_buffer_create(svga,
1,
0,
st->hw_nblocksy*st->base.stride);
}
if(!st->hwbuf)
goto no_hwbuf;
if(st->hw_nblocksy < nblocksy) {
/* We couldn't allocate a hardware buffer big enough for the transfer,
* so allocate regular malloc memory instead */
if (0) {
debug_printf("%s: failed to allocate %u KB of DMA, "
"splitting into %u x %u KB DMA transfers\n",
__FUNCTION__,
(nblocksy*st->base.stride + 1023)/1024,
(nblocksy + st->hw_nblocksy - 1)/st->hw_nblocksy,
(st->hw_nblocksy*st->base.stride + 1023)/1024);
}
st->swbuf = MALLOC(nblocksy*st->base.stride);
if(!st->swbuf)
goto no_swbuf;
}
if (usage & PIPE_TRANSFER_READ) {
SVGA3dSurfaceDMAFlags flags;
memset(&flags, 0, sizeof flags);
svga_transfer_dma(svga, st, SVGA3D_READ_HOST_VRAM, flags);
}
return &st->base;
no_swbuf:
sws->buffer_destroy(sws, st->hwbuf);
no_hwbuf:
FREE(st);
return NULL;
}
示例8: etna_screen_resource_create
/* Allocate 2D texture or render target resource
*/
static struct pipe_resource * etna_screen_resource_create(struct pipe_screen *screen,
const struct pipe_resource *templat)
{
struct etna_screen *priv = etna_screen(screen);
assert(templat);
unsigned element_size = util_format_get_blocksize(templat->format);
if(!element_size)
return NULL;
/* Check input */
if(templat->target == PIPE_TEXTURE_CUBE)
{
assert(templat->array_size == 6);
} else if (templat->target == PIPE_BUFFER)
{
assert(templat->format == PIPE_FORMAT_R8_UNORM); /* bytes; want TYPELESS or similar */
assert(templat->array_size == 1);
assert(templat->height0 == 1);
assert(templat->depth0 == 1);
assert(templat->array_size == 1);
assert(templat->last_level == 0);
} else
{
assert(templat->array_size == 1);
}
assert(templat->width0 != 0);
assert(templat->height0 != 0);
assert(templat->depth0 != 0);
assert(templat->array_size != 0);
/* Figure out what tiling to use -- for now, assume that textures cannot be supertiled, and cannot be linear.
* There is a feature flag SUPERTILED_TEXTURE (not supported on any known hw) that may allow this, as well
* as LINEAR_TEXTURE_SUPPORT (supported on gc880 and gc2000 at least), but not sure how it works.
* Buffers always have LINEAR layout.
*/
unsigned layout = ETNA_LAYOUT_LINEAR;
if(templat->target != PIPE_BUFFER)
{
if(!(templat->bind & PIPE_BIND_SAMPLER_VIEW) && priv->specs.can_supertile &&
!DBG_ENABLED(ETNA_DBG_NO_SUPERTILE))
layout = ETNA_LAYOUT_SUPER_TILED;
else
layout = ETNA_LAYOUT_TILED;
}
/* XXX multi tiled formats */
/* Determine scaling for antialiasing, allow override using debug flag */
int nr_samples = templat->nr_samples;
if((templat->bind & (PIPE_BIND_RENDER_TARGET | PIPE_BIND_DEPTH_STENCIL)) &&
!(templat->bind & PIPE_BIND_SAMPLER_VIEW))
{
if(DBG_ENABLED(ETNA_DBG_MSAA_2X))
nr_samples = 2;
if(DBG_ENABLED(ETNA_DBG_MSAA_4X))
nr_samples = 4;
}
int msaa_xscale = 1, msaa_yscale = 1;
if(!translate_samples_to_xyscale(nr_samples, &msaa_xscale, &msaa_yscale, NULL))
{
/* Number of samples not supported */
assert(0);
}
/* Determine needed padding (alignment of height/width) */
unsigned paddingX = 0, paddingY = 0;
unsigned halign = TEXTURE_HALIGN_FOUR;
etna_layout_multiple(layout,
priv->dev->chip.pixel_pipes,
(templat->bind & PIPE_BIND_SAMPLER_VIEW) && !VIV_FEATURE(priv->dev, chipMinorFeatures1, TEXTURE_HALIGN),
&paddingX, &paddingY, &halign);
assert(paddingX && paddingY);
/* determine mipmap levels */
struct etna_resource *resource = CALLOC_STRUCT(etna_resource);
int max_mip_level = templat->last_level;
if(unlikely(max_mip_level >= ETNA_NUM_LOD)) /* max LOD supported by hw */
max_mip_level = ETNA_NUM_LOD - 1;
/* take care about DXTx formats, which have a divSize of non-1x1
* also: lower mipmaps are still 4x4 due to tiling. In as sense, compressed formats are already tiled.
* XXX UYVY formats?
*/
unsigned divSizeX = util_format_get_blockwidth(templat->format);
unsigned divSizeY = util_format_get_blockheight(templat->format);
unsigned ix = 0;
unsigned x = templat->width0, y = templat->height0;
unsigned offset = 0;
while(true)
{
struct etna_resource_level *mip = &resource->levels[ix];
mip->width = x;
mip->height = y;
mip->padded_width = align(x * msaa_xscale, paddingX);
mip->padded_height = align(y * msaa_yscale, paddingY);
mip->stride = align(mip->padded_width, divSizeX)/divSizeX * element_size;
mip->offset = offset;
mip->layer_stride = align(mip->padded_width, divSizeX)/divSizeX *
align(mip->padded_height, divSizeY)/divSizeY * element_size;
//.........这里部分代码省略.........
示例9: svga_get_tex_sampler_view
struct svga_sampler_view *
svga_get_tex_sampler_view(struct pipe_context *pipe,
struct pipe_resource *pt,
unsigned min_lod, unsigned max_lod)
{
struct svga_context *svga = svga_context(pipe);
struct svga_screen *ss = svga_screen(pipe->screen);
struct svga_texture *tex = svga_texture(pt);
struct svga_sampler_view *sv = NULL;
SVGA3dSurface1Flags flags = SVGA3D_SURFACE_HINT_TEXTURE;
SVGA3dSurfaceFormat format = svga_translate_format(ss, pt->format,
PIPE_BIND_SAMPLER_VIEW);
boolean view = TRUE;
assert(pt);
assert(min_lod <= max_lod);
assert(max_lod <= pt->last_level);
assert(!svga_have_vgpu10(svga));
/* Is a view needed */
{
/*
* Can't control max lod. For first level views and when we only
* look at one level we disable mip filtering to achive the same
* results as a view.
*/
if (min_lod == 0 && max_lod >= pt->last_level)
view = FALSE;
if (ss->debug.no_sampler_view)
view = FALSE;
if (ss->debug.force_sampler_view)
view = TRUE;
}
/* First try the cache */
if (view) {
mtx_lock(&ss->tex_mutex);
if (tex->cached_view &&
tex->cached_view->min_lod == min_lod &&
tex->cached_view->max_lod == max_lod) {
svga_sampler_view_reference(&sv, tex->cached_view);
mtx_unlock(&ss->tex_mutex);
SVGA_DBG(DEBUG_VIEWS, "svga: Sampler view: reuse %p, %u %u, last %u\n",
pt, min_lod, max_lod, pt->last_level);
svga_validate_sampler_view(svga_context(pipe), sv);
return sv;
}
mtx_unlock(&ss->tex_mutex);
}
sv = CALLOC_STRUCT(svga_sampler_view);
if (!sv)
return NULL;
pipe_reference_init(&sv->reference, 1);
/* Note: we're not refcounting the texture resource here to avoid
* a circular dependency.
*/
sv->texture = pt;
sv->min_lod = min_lod;
sv->max_lod = max_lod;
/* No view needed just use the whole texture */
if (!view) {
SVGA_DBG(DEBUG_VIEWS,
"svga: Sampler view: no %p, mips %u..%u, nr %u, size (%ux%ux%u), last %u\n",
pt, min_lod, max_lod,
max_lod - min_lod + 1,
pt->width0,
pt->height0,
pt->depth0,
pt->last_level);
sv->key.cachable = 0;
sv->handle = tex->handle;
debug_reference(&sv->reference,
(debug_reference_descriptor)svga_debug_describe_sampler_view, 0);
return sv;
}
SVGA_DBG(DEBUG_VIEWS,
"svga: Sampler view: yes %p, mips %u..%u, nr %u, size (%ux%ux%u), last %u\n",
pt, min_lod, max_lod,
max_lod - min_lod + 1,
pt->width0,
pt->height0,
pt->depth0,
pt->last_level);
sv->age = tex->age;
sv->handle = svga_texture_view_surface(svga, tex,
PIPE_BIND_SAMPLER_VIEW,
flags, format,
min_lod,
max_lod - min_lod + 1,
-1, 1, -1, FALSE,
&sv->key);
//.........这里部分代码省略.........
示例10: CALLOC_STRUCT
struct pipe_query *r600_create_batch_query(struct pipe_context *ctx,
unsigned num_queries,
unsigned *query_types)
{
struct r600_common_context *rctx = (struct r600_common_context *)ctx;
struct r600_common_screen *screen = rctx->screen;
struct r600_perfcounters *pc = screen->perfcounters;
struct r600_perfcounter_block *block;
struct r600_pc_group *group;
struct r600_query_pc *query;
unsigned base_gid, sub_gid, sub_index;
unsigned i, j;
if (!pc)
return NULL;
query = CALLOC_STRUCT(r600_query_pc);
if (!query)
return NULL;
query->b.b.ops = &batch_query_ops;
query->b.ops = &batch_query_hw_ops;
query->num_counters = num_queries;
/* Collect selectors per group */
for (i = 0; i < num_queries; ++i) {
unsigned sub_gid;
if (query_types[i] < R600_QUERY_FIRST_PERFCOUNTER)
goto error;
block = lookup_counter(pc, query_types[i] - R600_QUERY_FIRST_PERFCOUNTER,
&base_gid, &sub_index);
if (!block)
goto error;
sub_gid = sub_index / block->num_selectors;
sub_index = sub_index % block->num_selectors;
group = get_group_state(screen, query, block, sub_gid);
if (!group)
goto error;
if (group->num_counters >= block->num_counters) {
fprintf(stderr,
"perfcounter group %s: too many selected\n",
block->basename);
goto error;
}
group->selectors[group->num_counters] = sub_index;
++group->num_counters;
}
/* Compute result bases and CS size per group */
query->b.num_cs_dw_begin = pc->num_start_cs_dwords;
query->b.num_cs_dw_end = pc->num_stop_cs_dwords;
query->b.num_cs_dw_begin += pc->num_instance_cs_dwords; /* conservative */
query->b.num_cs_dw_end += pc->num_instance_cs_dwords;
i = 0;
for (group = query->groups; group; group = group->next) {
struct r600_perfcounter_block *block = group->block;
unsigned select_dw, read_dw;
unsigned instances = 1;
if ((block->flags & R600_PC_BLOCK_SE) && group->se < 0)
instances = rctx->screen->info.max_se;
if (group->instance < 0)
instances *= block->num_instances;
group->result_base = i;
query->b.result_size += sizeof(uint64_t) * instances * group->num_counters;
i += instances * group->num_counters;
pc->get_size(block, group->num_counters, group->selectors,
&select_dw, &read_dw);
query->b.num_cs_dw_begin += select_dw;
query->b.num_cs_dw_end += instances * read_dw;
query->b.num_cs_dw_begin += pc->num_instance_cs_dwords; /* conservative */
query->b.num_cs_dw_end += instances * pc->num_instance_cs_dwords;
}
if (query->shaders) {
if (query->shaders == R600_PC_SHADERS_WINDOWING)
query->shaders = 0xffffffff;
query->b.num_cs_dw_begin += pc->num_shaders_cs_dwords;
}
/* Map user-supplied query array to result indices */
query->counters = CALLOC(num_queries, sizeof(*query->counters));
for (i = 0; i < num_queries; ++i) {
struct r600_pc_counter *counter = &query->counters[i];
struct r600_perfcounter_block *block;
block = lookup_counter(pc, query_types[i] - R600_QUERY_FIRST_PERFCOUNTER,
&base_gid, &sub_index);
sub_gid = sub_index / block->num_selectors;
//.........这里部分代码省略.........
示例11: vmw_ioctl_gb_surface_create
uint32
vmw_ioctl_gb_surface_create(struct vmw_winsys_screen *vws,
SVGA3dSurfaceFlags flags,
SVGA3dSurfaceFormat format,
unsigned usage,
SVGA3dSize size,
uint32_t numFaces,
uint32_t numMipLevels,
uint32_t buffer_handle,
struct vmw_region **p_region)
{
union drm_vmw_gb_surface_create_arg s_arg;
struct drm_vmw_gb_surface_create_req *req = &s_arg.req;
struct drm_vmw_gb_surface_create_rep *rep = &s_arg.rep;
struct vmw_region *region = NULL;
int ret;
vmw_printf("%s flags %d format %d\n", __FUNCTION__, flags, format);
if (p_region) {
region = CALLOC_STRUCT(vmw_region);
if (!region)
return SVGA3D_INVALID_ID;
}
memset(&s_arg, 0, sizeof(s_arg));
if (flags & SVGA3D_SURFACE_HINT_SCANOUT) {
req->svga3d_flags = (uint32_t) (flags & ~SVGA3D_SURFACE_HINT_SCANOUT);
req->drm_surface_flags = drm_vmw_surface_flag_scanout;
} else {
req->svga3d_flags = (uint32_t) flags;
}
req->format = (uint32_t) format;
if (usage & SVGA_SURFACE_USAGE_SHARED)
req->drm_surface_flags |= drm_vmw_surface_flag_shareable;
req->drm_surface_flags |= drm_vmw_surface_flag_create_buffer;
assert(numFaces * numMipLevels < DRM_VMW_MAX_SURFACE_FACES*
DRM_VMW_MAX_MIP_LEVELS);
req->base_size.width = size.width;
req->base_size.height = size.height;
req->base_size.depth = size.depth;
req->mip_levels = numMipLevels;
req->multisample_count = 0;
req->autogen_filter = SVGA3D_TEX_FILTER_NONE;
if (buffer_handle)
req->buffer_handle = buffer_handle;
else
req->buffer_handle = SVGA3D_INVALID_ID;
ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GB_SURFACE_CREATE,
&s_arg, sizeof(s_arg));
if (ret)
goto out_fail_create;
if (p_region) {
region->handle = rep->buffer_handle;
region->map_handle = rep->buffer_map_handle;
region->drm_fd = vws->ioctl.drm_fd;
region->size = rep->backup_size;
*p_region = region;
}
vmw_printf("Surface id is %d\n", rep->sid);
return rep->handle;
out_fail_create:
if (region)
FREE(region);
return SVGA3D_INVALID_ID;
}
示例12: while
static struct r600_pc_group *get_group_state(struct r600_common_screen *screen,
struct r600_query_pc *query,
struct r600_perfcounter_block *block,
unsigned sub_gid)
{
struct r600_pc_group *group = query->groups;
while (group) {
if (group->block == block && group->sub_gid == sub_gid)
return group;
group = group->next;
}
group = CALLOC_STRUCT(r600_pc_group);
if (!group)
return NULL;
group->block = block;
group->sub_gid = sub_gid;
if (block->flags & R600_PC_BLOCK_SHADER) {
unsigned sub_gids = block->num_instances;
unsigned shader_id;
unsigned shaders;
unsigned query_shaders;
if (block->flags & R600_PC_BLOCK_SE_GROUPS)
sub_gids = sub_gids * screen->info.max_se;
shader_id = sub_gid / sub_gids;
sub_gid = sub_gid % sub_gids;
shaders = screen->perfcounters->shader_type_bits[shader_id];
query_shaders = query->shaders & ~R600_PC_SHADERS_WINDOWING;
if (query_shaders && query_shaders != shaders) {
fprintf(stderr, "r600_perfcounter: incompatible shader groups\n");
FREE(group);
return NULL;
}
query->shaders = shaders;
}
if (block->flags & R600_PC_BLOCK_SHADER_WINDOWED && !query->shaders) {
// A non-zero value in query->shaders ensures that the shader
// masking is reset unless the user explicitly requests one.
query->shaders = R600_PC_SHADERS_WINDOWING;
}
if (block->flags & R600_PC_BLOCK_SE_GROUPS) {
group->se = sub_gid / block->num_instances;
sub_gid = sub_gid % block->num_instances;
} else {
group->se = -1;
}
if (block->flags & R600_PC_BLOCK_INSTANCE_GROUPS) {
group->instance = sub_gid;
} else {
group->instance = -1;
}
group->next = query->groups;
query->groups = group;
return group;
}
示例13: _swrast_new_texture_image
/**
* Allocate a new swrast_texture_image (a subclass of gl_texture_image).
* Called via ctx->Driver.NewTextureImage().
*/
struct gl_texture_image *
_swrast_new_texture_image( struct gl_context *ctx )
{
(void) ctx;
return (struct gl_texture_image *) CALLOC_STRUCT(swrast_texture_image);
}
示例14: fd5_zsa_state_create
void *
fd5_zsa_state_create(struct pipe_context *pctx,
const struct pipe_depth_stencil_alpha_state *cso)
{
struct fd5_zsa_stateobj *so;
so = CALLOC_STRUCT(fd5_zsa_stateobj);
if (!so)
return NULL;
so->base = *cso;
switch (cso->depth.func) {
case PIPE_FUNC_LESS:
case PIPE_FUNC_LEQUAL:
so->gras_lrz_cntl = A5XX_GRAS_LRZ_CNTL_ENABLE;
break;
case PIPE_FUNC_GREATER:
case PIPE_FUNC_GEQUAL:
so->gras_lrz_cntl = A5XX_GRAS_LRZ_CNTL_ENABLE | A5XX_GRAS_LRZ_CNTL_GREATER;
break;
default:
/* LRZ not enabled */
so->gras_lrz_cntl = 0;
break;
}
if (!(cso->stencil->enabled || cso->alpha.enabled || !cso->depth.writemask))
so->lrz_write = true;
so->rb_depth_cntl |=
A5XX_RB_DEPTH_CNTL_ZFUNC(cso->depth.func); /* maps 1:1 */
if (cso->depth.enabled)
so->rb_depth_cntl |=
A5XX_RB_DEPTH_CNTL_Z_ENABLE |
A5XX_RB_DEPTH_CNTL_Z_TEST_ENABLE;
if (cso->depth.writemask)
so->rb_depth_cntl |= A5XX_RB_DEPTH_CNTL_Z_WRITE_ENABLE;
if (cso->stencil[0].enabled) {
const struct pipe_stencil_state *s = &cso->stencil[0];
so->rb_stencil_control |=
A5XX_RB_STENCIL_CONTROL_STENCIL_READ |
A5XX_RB_STENCIL_CONTROL_STENCIL_ENABLE |
A5XX_RB_STENCIL_CONTROL_FUNC(s->func) | /* maps 1:1 */
A5XX_RB_STENCIL_CONTROL_FAIL(fd_stencil_op(s->fail_op)) |
A5XX_RB_STENCIL_CONTROL_ZPASS(fd_stencil_op(s->zpass_op)) |
A5XX_RB_STENCIL_CONTROL_ZFAIL(fd_stencil_op(s->zfail_op));
so->rb_stencilrefmask |=
A5XX_RB_STENCILREFMASK_STENCILWRITEMASK(s->writemask) |
A5XX_RB_STENCILREFMASK_STENCILMASK(s->valuemask);
if (cso->stencil[1].enabled) {
const struct pipe_stencil_state *bs = &cso->stencil[1];
so->rb_stencil_control |=
A5XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF |
A5XX_RB_STENCIL_CONTROL_FUNC_BF(bs->func) | /* maps 1:1 */
A5XX_RB_STENCIL_CONTROL_FAIL_BF(fd_stencil_op(bs->fail_op)) |
A5XX_RB_STENCIL_CONTROL_ZPASS_BF(fd_stencil_op(bs->zpass_op)) |
A5XX_RB_STENCIL_CONTROL_ZFAIL_BF(fd_stencil_op(bs->zfail_op));
so->rb_stencilrefmask_bf |=
A5XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK(bs->writemask) |
A5XX_RB_STENCILREFMASK_BF_STENCILMASK(bs->valuemask);
}
}
if (cso->alpha.enabled) {
uint32_t ref = cso->alpha.ref_value * 255.0;
so->rb_alpha_control =
A5XX_RB_ALPHA_CONTROL_ALPHA_TEST |
A5XX_RB_ALPHA_CONTROL_ALPHA_REF(ref) |
A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC(cso->alpha.func);
// so->rb_depth_control |=
// A5XX_RB_DEPTH_CONTROL_EARLY_Z_DISABLE;
}
return so;
}
示例15: st_translate_geometry_program
/**
* Translate a geometry program to create a new variant.
*/
static struct st_gp_variant *
st_translate_geometry_program(struct st_context *st,
struct st_geometry_program *stgp,
const struct st_gp_variant_key *key)
{
GLuint inputMapping[GEOM_ATTRIB_MAX];
GLuint outputMapping[GEOM_RESULT_MAX];
struct pipe_context *pipe = st->pipe;
GLuint attr;
GLbitfield64 inputsRead;
GLuint vslot = 0;
GLuint num_generic = 0;
uint gs_num_inputs = 0;
uint gs_builtin_inputs = 0;
uint gs_array_offset = 0;
ubyte gs_output_semantic_name[PIPE_MAX_SHADER_OUTPUTS];
ubyte gs_output_semantic_index[PIPE_MAX_SHADER_OUTPUTS];
uint gs_num_outputs = 0;
GLint i;
GLuint maxSlot = 0;
struct ureg_program *ureg;
struct st_gp_variant *gpv;
gpv = CALLOC_STRUCT(st_gp_variant);
if (!gpv)
return NULL;
_mesa_remove_output_reads(&stgp->Base.Base, PROGRAM_OUTPUT);
_mesa_remove_output_reads(&stgp->Base.Base, PROGRAM_VARYING);
ureg = ureg_create( TGSI_PROCESSOR_GEOMETRY );
if (ureg == NULL) {
free(gpv);
return NULL;
}
/* which vertex output goes to the first geometry input */
vslot = 0;
memset(inputMapping, 0, sizeof(inputMapping));
memset(outputMapping, 0, sizeof(outputMapping));
/*
* Convert Mesa program inputs to TGSI input register semantics.
*/
inputsRead = stgp->Base.Base.InputsRead;
for (attr = 0; attr < GEOM_ATTRIB_MAX; attr++) {
if ((inputsRead & BITFIELD64_BIT(attr)) != 0) {
const GLuint slot = gs_num_inputs;
gs_num_inputs++;
inputMapping[attr] = slot;
stgp->input_map[slot + gs_array_offset] = vslot - gs_builtin_inputs;
stgp->input_to_index[attr] = vslot;
stgp->index_to_input[vslot] = attr;
++vslot;
if (attr != GEOM_ATTRIB_PRIMITIVE_ID) {
gs_array_offset += 2;
} else
++gs_builtin_inputs;
#if 0
debug_printf("input map at %d = %d\n",
slot + gs_array_offset, stgp->input_map[slot + gs_array_offset]);
#endif
switch (attr) {
case GEOM_ATTRIB_PRIMITIVE_ID:
stgp->input_semantic_name[slot] = TGSI_SEMANTIC_PRIMID;
stgp->input_semantic_index[slot] = 0;
break;
case GEOM_ATTRIB_POSITION:
stgp->input_semantic_name[slot] = TGSI_SEMANTIC_POSITION;
stgp->input_semantic_index[slot] = 0;
break;
case GEOM_ATTRIB_COLOR0:
stgp->input_semantic_name[slot] = TGSI_SEMANTIC_COLOR;
stgp->input_semantic_index[slot] = 0;
break;
case GEOM_ATTRIB_COLOR1:
stgp->input_semantic_name[slot] = TGSI_SEMANTIC_COLOR;
stgp->input_semantic_index[slot] = 1;
break;
case GEOM_ATTRIB_FOG_FRAG_COORD:
stgp->input_semantic_name[slot] = TGSI_SEMANTIC_FOG;
stgp->input_semantic_index[slot] = 0;
break;
case GEOM_ATTRIB_TEX_COORD:
stgp->input_semantic_name[slot] = TGSI_SEMANTIC_GENERIC;
stgp->input_semantic_index[slot] = num_generic++;
//.........这里部分代码省略.........