本文整理汇总了C++中p_atomic_inc函数的典型用法代码示例。如果您正苦于以下问题:C++ p_atomic_inc函数的具体用法?C++ p_atomic_inc怎么用?C++ p_atomic_inc使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了p_atomic_inc函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: CALLED
void
GalliumContext::Invalidate(uint32 width, uint32 height)
{
CALLED();
assert(fContext[fCurrentContext]);
// Update st_context dimensions
fContext[fCurrentContext]->width = width;
fContext[fCurrentContext]->height = height;
// Is this the best way to invalidate?
p_atomic_inc(&fContext[fCurrentContext]->read->stfbi->stamp);
p_atomic_inc(&fContext[fCurrentContext]->draw->stfbi->stamp);
}
示例2: _mesa_reference_shader_program_data
void
_mesa_reference_shader_program_data(struct gl_context *ctx,
struct gl_shader_program_data **ptr,
struct gl_shader_program_data *data)
{
if (*ptr == data)
return;
if (*ptr) {
struct gl_shader_program_data *oldData = *ptr;
assert(oldData->RefCount > 0);
if (p_atomic_dec_zero(&oldData->RefCount)) {
assert(ctx);
ralloc_free(oldData);
}
*ptr = NULL;
}
if (data)
p_atomic_inc(&data->RefCount);
*ptr = data;
}
示例3: amdgpu_cs_create
static struct radeon_winsys_cs *
amdgpu_cs_create(struct radeon_winsys_ctx *rwctx,
enum ring_type ring_type,
void (*flush)(void *ctx, unsigned flags,
struct pipe_fence_handle **fence),
void *flush_ctx,
struct radeon_winsys_cs_handle *trace_buf)
{
struct amdgpu_ctx *ctx = (struct amdgpu_ctx*)rwctx;
struct amdgpu_cs *cs;
cs = CALLOC_STRUCT(amdgpu_cs);
if (!cs) {
return NULL;
}
cs->ctx = ctx;
cs->flush_cs = flush;
cs->flush_data = flush_ctx;
cs->base.ring_type = ring_type;
if (!amdgpu_init_cs_context(cs, ring_type)) {
FREE(cs);
return NULL;
}
if (!amdgpu_get_new_ib(cs)) {
amdgpu_destroy_cs_context(cs);
FREE(cs);
return NULL;
}
p_atomic_inc(&ctx->ws->num_cs);
return &cs->base;
}
示例4: radeon_drm_winsys
static struct radeon_winsys_cs *radeon_drm_cs_create(struct radeon_winsys *rws)
{
struct radeon_drm_winsys *ws = radeon_drm_winsys(rws);
struct radeon_drm_cs *cs;
cs = CALLOC_STRUCT(radeon_drm_cs);
if (!cs) {
return NULL;
}
pipe_semaphore_init(&cs->flush_queued, 0);
pipe_semaphore_init(&cs->flush_completed, 0);
cs->ws = ws;
if (!radeon_init_cs_context(&cs->csc1, cs->ws)) {
FREE(cs);
return NULL;
}
if (!radeon_init_cs_context(&cs->csc2, cs->ws)) {
radeon_destroy_cs_context(&cs->csc1);
FREE(cs);
return NULL;
}
/* Set the first command buffer as current. */
cs->csc = &cs->csc1;
cs->cst = &cs->csc2;
cs->base.buf = cs->csc->buf;
p_atomic_inc(&ws->num_cs);
if (cs->ws->num_cpus > 1 && debug_get_option_thread())
cs->thread = pipe_thread_create(radeon_drm_cs_emit_ioctl, cs);
return &cs->base;
}
示例5: radeon_drm_winsys
static struct radeon_winsys_cs *radeon_drm_cs_create(struct radeon_winsys *rws,
enum ring_type ring_type,
struct radeon_winsys_cs_handle *trace_buf)
{
struct radeon_drm_winsys *ws = radeon_drm_winsys(rws);
struct radeon_drm_cs *cs;
cs = CALLOC_STRUCT(radeon_drm_cs);
if (!cs) {
return NULL;
}
pipe_semaphore_init(&cs->flush_completed, 0);
cs->ws = ws;
cs->trace_buf = (struct radeon_bo*)trace_buf;
if (!radeon_init_cs_context(&cs->csc1, cs->ws)) {
FREE(cs);
return NULL;
}
if (!radeon_init_cs_context(&cs->csc2, cs->ws)) {
radeon_destroy_cs_context(&cs->csc1);
FREE(cs);
return NULL;
}
/* Set the first command buffer as current. */
cs->csc = &cs->csc1;
cs->cst = &cs->csc2;
cs->base.buf = cs->csc->buf;
cs->base.ring_type = ring_type;
p_atomic_inc(&ws->num_cs);
return &cs->base;
}
示例6: radeon_drm_cs_flush
static void radeon_drm_cs_flush(struct radeon_winsys_cs *rcs, unsigned flags)
{
struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
struct radeon_cs_context *tmp;
if (rcs->cdw > RADEON_MAX_CMDBUF_DWORDS) {
fprintf(stderr, "radeon: command stream overflowed\n");
}
radeon_drm_cs_sync_flush(cs);
/* Flip command streams. */
tmp = cs->csc;
cs->csc = cs->cst;
cs->cst = tmp;
/* If the CS is not empty or overflowed, emit it in a separate thread. */
if (cs->base.cdw && cs->base.cdw <= RADEON_MAX_CMDBUF_DWORDS) {
unsigned i, crelocs = cs->cst->crelocs;
cs->cst->chunks[0].length_dw = cs->base.cdw;
for (i = 0; i < crelocs; i++) {
/* Update the number of active asynchronous CS ioctls for the buffer. */
p_atomic_inc(&cs->cst->relocs_bo[i]->num_active_ioctls);
}
cs->cst->flags[0] = 0;
cs->cst->flags[1] = RADEON_CS_RING_GFX;
cs->cst->cs.num_chunks = 2;
if (flags & RADEON_FLUSH_KEEP_TILING_FLAGS) {
cs->cst->flags[0] |= RADEON_CS_KEEP_TILING_FLAGS;
cs->cst->cs.num_chunks = 3;
}
if (cs->ws->info.r600_virtual_address) {
cs->cst->flags[0] |= RADEON_CS_USE_VM;
cs->cst->cs.num_chunks = 3;
}
if (flags & RADEON_FLUSH_COMPUTE) {
cs->cst->flags[1] = RADEON_CS_RING_COMPUTE;
cs->cst->cs.num_chunks = 3;
}
if (cs->thread &&
(flags & RADEON_FLUSH_ASYNC)) {
cs->flush_started = 1;
pipe_semaphore_signal(&cs->flush_queued);
} else {
radeon_drm_cs_emit_ioctl_oneshot(cs->cst);
}
} else {
radeon_cs_context_cleanup(cs->cst);
}
/* Prepare a new CS. */
cs->base.buf = cs->csc->buf;
cs->base.cdw = 0;
}
示例7: drisw_invalidate_drawable
static inline void
drisw_invalidate_drawable(__DRIdrawable *dPriv)
{
struct dri_drawable *drawable = dri_drawable(dPriv);
drawable->texture_stamp = dPriv->lastStamp - 1;
p_atomic_inc(&drawable->base.stamp);
}
示例8: amdgpu_add_buffer
static unsigned amdgpu_add_buffer(struct amdgpu_cs *cs,
struct amdgpu_winsys_bo *bo,
enum radeon_bo_usage usage,
enum radeon_bo_domain domains,
unsigned priority,
enum radeon_bo_domain *added_domains)
{
struct amdgpu_cs_buffer *buffer;
unsigned hash = bo->unique_id & (Elements(cs->buffer_indices_hashlist)-1);
int i = -1;
assert(priority < 64);
*added_domains = 0;
i = amdgpu_lookup_buffer(cs, bo);
if (i >= 0) {
buffer = &cs->buffers[i];
buffer->priority_usage |= 1llu << priority;
buffer->usage |= usage;
*added_domains = domains & ~buffer->domains;
buffer->domains |= domains;
cs->flags[i] = MAX2(cs->flags[i], priority / 4);
return i;
}
/* New buffer, check if the backing array is large enough. */
if (cs->num_buffers >= cs->max_num_buffers) {
uint32_t size;
cs->max_num_buffers += 10;
size = cs->max_num_buffers * sizeof(struct amdgpu_cs_buffer);
cs->buffers = realloc(cs->buffers, size);
size = cs->max_num_buffers * sizeof(amdgpu_bo_handle);
cs->handles = realloc(cs->handles, size);
cs->flags = realloc(cs->flags, cs->max_num_buffers);
}
/* Initialize the new buffer. */
cs->buffers[cs->num_buffers].bo = NULL;
amdgpu_winsys_bo_reference(&cs->buffers[cs->num_buffers].bo, bo);
cs->handles[cs->num_buffers] = bo->bo;
cs->flags[cs->num_buffers] = priority / 4;
p_atomic_inc(&bo->num_cs_references);
buffer = &cs->buffers[cs->num_buffers];
buffer->bo = bo;
buffer->priority_usage = 1llu << priority;
buffer->usage = usage;
buffer->domains = domains;
cs->buffer_indices_hashlist[hash] = cs->num_buffers;
*added_domains = domains;
return cs->num_buffers++;
}
示例9: radeon_lookup_or_add_real_buffer
static unsigned radeon_lookup_or_add_real_buffer(struct radeon_drm_cs *cs,
struct radeon_bo *bo)
{
struct radeon_cs_context *csc = cs->csc;
struct drm_radeon_cs_reloc *reloc;
unsigned hash = bo->hash & (ARRAY_SIZE(csc->reloc_indices_hashlist)-1);
int i = -1;
i = radeon_lookup_buffer(csc, bo);
if (i >= 0) {
/* For async DMA, every add_buffer call must add a buffer to the list
* no matter how many duplicates there are. This is due to the fact
* the DMA CS checker doesn't use NOP packets for offset patching,
* but always uses the i-th buffer from the list to patch the i-th
* offset. If there are N offsets in a DMA CS, there must also be N
* buffers in the relocation list.
*
* This doesn't have to be done if virtual memory is enabled,
* because there is no offset patching with virtual memory.
*/
if (cs->ring_type != RING_DMA || cs->ws->info.r600_has_virtual_memory) {
return i;
}
}
/* New relocation, check if the backing array is large enough. */
if (csc->num_relocs >= csc->max_relocs) {
uint32_t size;
csc->max_relocs = MAX2(csc->max_relocs + 16, (unsigned)(csc->max_relocs * 1.3));
size = csc->max_relocs * sizeof(csc->relocs_bo[0]);
csc->relocs_bo = realloc(csc->relocs_bo, size);
size = csc->max_relocs * sizeof(struct drm_radeon_cs_reloc);
csc->relocs = realloc(csc->relocs, size);
csc->chunks[1].chunk_data = (uint64_t)(uintptr_t)csc->relocs;
}
/* Initialize the new relocation. */
csc->relocs_bo[csc->num_relocs].bo = NULL;
csc->relocs_bo[csc->num_relocs].u.real.priority_usage = 0;
radeon_bo_reference(&csc->relocs_bo[csc->num_relocs].bo, bo);
p_atomic_inc(&bo->num_cs_references);
reloc = &csc->relocs[csc->num_relocs];
reloc->handle = bo->handle;
reloc->read_domains = 0;
reloc->write_domain = 0;
reloc->flags = 0;
csc->reloc_indices_hashlist[hash] = csc->num_relocs;
csc->chunks[1].length_dw += RELOC_DWORDS;
return csc->num_relocs++;
}
示例10: dri2_invalidate_drawable
static void
dri2_invalidate_drawable(__DRIdrawable *dPriv)
{
struct dri_drawable *drawable = dri_drawable(dPriv);
dri2InvalidateDrawable(dPriv);
drawable->dPriv->lastStamp = drawable->dPriv->dri2.stamp;
p_atomic_inc(&drawable->base.stamp);
}
示例11: amdgpu_add_reloc
static unsigned amdgpu_add_reloc(struct amdgpu_cs *cs,
struct amdgpu_winsys_bo *bo,
enum radeon_bo_usage usage,
enum radeon_bo_domain domains,
unsigned priority,
enum radeon_bo_domain *added_domains)
{
struct amdgpu_cs_buffer *reloc;
unsigned hash = bo->unique_id & (Elements(cs->buffer_indices_hashlist)-1);
int i = -1;
priority = MIN2(priority, 15);
*added_domains = 0;
i = amdgpu_get_reloc(cs, bo);
if (i >= 0) {
reloc = &cs->buffers[i];
reloc->usage |= usage;
*added_domains = domains & ~reloc->domains;
reloc->domains |= domains;
cs->flags[i] = MAX2(cs->flags[i], priority);
return i;
}
/* New relocation, check if the backing array is large enough. */
if (cs->num_buffers >= cs->max_num_buffers) {
uint32_t size;
cs->max_num_buffers += 10;
size = cs->max_num_buffers * sizeof(struct amdgpu_cs_buffer);
cs->buffers = realloc(cs->buffers, size);
size = cs->max_num_buffers * sizeof(amdgpu_bo_handle);
cs->handles = realloc(cs->handles, size);
cs->flags = realloc(cs->flags, cs->max_num_buffers);
}
/* Initialize the new relocation. */
cs->buffers[cs->num_buffers].bo = NULL;
amdgpu_winsys_bo_reference(&cs->buffers[cs->num_buffers].bo, bo);
cs->handles[cs->num_buffers] = bo->bo;
cs->flags[cs->num_buffers] = priority;
p_atomic_inc(&bo->num_cs_references);
reloc = &cs->buffers[cs->num_buffers];
reloc->bo = bo;
reloc->usage = usage;
reloc->domains = domains;
cs->buffer_indices_hashlist[hash] = cs->num_buffers;
*added_domains = domains;
return cs->num_buffers++;
}
示例12: egl_g3d_invalid_surface
static void
egl_g3d_invalid_surface(struct native_display *ndpy,
struct native_surface *nsurf,
unsigned int seq_num)
{
/* XXX not thread safe? */
struct egl_g3d_surface *gsurf = egl_g3d_surface(nsurf->user_data);
if (gsurf && gsurf->stfbi)
p_atomic_inc(&gsurf->stfbi->stamp);
}
示例13: thread_function
static int
thread_function(void *thread_data)
{
int thread_id = *((int *) thread_data);
LOG("thread %d starting\n", thread_id);
os_time_sleep(thread_id * 100 * 1000);
LOG("thread %d before barrier\n", thread_id);
CHECK(p_atomic_read(&proceeded) == 0);
p_atomic_inc(&waiting);
pipe_barrier_wait(&barrier);
CHECK(p_atomic_read(&waiting) == NUM_THREADS);
p_atomic_inc(&proceeded);
LOG("thread %d exiting\n", thread_id);
return 0;
}
示例14: radeon_drm_ws_queue_cs
void radeon_drm_ws_queue_cs(struct radeon_drm_winsys *ws, struct radeon_drm_cs *cs)
{
retry:
pipe_mutex_lock(ws->cs_stack_lock);
if (p_atomic_read(&ws->ncs) >= RING_LAST) {
/* no room left for a flush */
pipe_mutex_unlock(ws->cs_stack_lock);
goto retry;
}
ws->cs_stack[p_atomic_read(&ws->ncs)] = cs;
p_atomic_inc(&ws->ncs);
pipe_mutex_unlock(ws->cs_stack_lock);
pipe_semaphore_signal(&ws->cs_queued);
}
示例15: vmw_swc_shader_relocation
static void
vmw_swc_shader_relocation(struct svga_winsys_context *swc,
uint32 *shid,
uint32 *mobid,
uint32 *offset,
struct svga_winsys_gb_shader *shader,
unsigned flags)
{
struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
struct vmw_winsys_screen *vws = vswc->vws;
struct vmw_svga_winsys_shader *vshader;
struct vmw_ctx_validate_item *ishader;
if(!shader) {
*shid = SVGA3D_INVALID_ID;
return;
}
vshader = vmw_svga_winsys_shader(shader);
if (!vws->base.have_vgpu10) {
assert(vswc->shader.staged < vswc->shader.reserved);
ishader = util_hash_table_get(vswc->hash, vshader);
if (ishader == NULL) {
ishader = &vswc->shader.items[vswc->shader.used + vswc->shader.staged];
vmw_svga_winsys_shader_reference(&ishader->vshader, vshader);
ishader->referenced = FALSE;
/*
* Note that a failure here may just fall back to unhashed behavior
* and potentially cause unnecessary flushing, so ignore the
* return code.
*/
(void) util_hash_table_set(vswc->hash, vshader, ishader);
++vswc->shader.staged;
}
if (!ishader->referenced) {
ishader->referenced = TRUE;
p_atomic_inc(&vshader->validated);
}
}
if (shid)
*shid = vshader->shid;
if (vshader->buf)
vmw_swc_mob_relocation(swc, mobid, offset, vshader->buf,
0, SVGA_RELOC_READ);
}