本文整理汇总了C++中KGSL_MEMSTORE_OFFSET函数的典型用法代码示例。如果您正苦于以下问题:C++ KGSL_MEMSTORE_OFFSET函数的具体用法?C++ KGSL_MEMSTORE_OFFSET怎么用?C++ KGSL_MEMSTORE_OFFSET使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了KGSL_MEMSTORE_OFFSET函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: adreno_drawctxt_create
/**
* adreno_drawctxt_create - create a new adreno draw context
* @device - KGSL device to create the context on
* @pagetable - Pagetable for the context
* @context- Generic KGSL context structure
* @flags - flags for the context (passed from user space)
*
* Create a new draw context for the 3D core. Return 0 on success,
* or error code on failure.
*/
int adreno_drawctxt_create(struct kgsl_device *device,
struct kgsl_pagetable *pagetable,
struct kgsl_context *context, uint32_t flags)
{
struct adreno_context *drawctxt;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
int ret;
drawctxt = kzalloc(sizeof(struct adreno_context), GFP_KERNEL);
if (drawctxt == NULL)
return -ENOMEM;
drawctxt->pagetable = pagetable;
drawctxt->bin_base_offset = 0;
drawctxt->id = context->id;
rb->timestamp[context->id] = 0;
if (flags & KGSL_CONTEXT_PREAMBLE)
drawctxt->flags |= CTXT_FLAGS_PREAMBLE;
if (flags & KGSL_CONTEXT_NO_GMEM_ALLOC)
drawctxt->flags |= CTXT_FLAGS_NOGMEMALLOC;
if (flags & KGSL_CONTEXT_PER_CONTEXT_TS)
drawctxt->flags |= CTXT_FLAGS_PER_CONTEXT_TS;
if (flags & KGSL_CONTEXT_USER_GENERATED_TS) {
if (!(flags & KGSL_CONTEXT_PER_CONTEXT_TS)) {
ret = -EINVAL;
goto err;
}
drawctxt->flags |= CTXT_FLAGS_USER_GENERATED_TS;
}
ret = adreno_dev->gpudev->ctxt_create(adreno_dev, drawctxt);
if (ret)
goto err;
kgsl_sharedmem_writel(&device->memstore,
KGSL_MEMSTORE_OFFSET(drawctxt->id, ref_wait_ts),
KGSL_INIT_REFTIMESTAMP);
kgsl_sharedmem_writel(&device->memstore,
KGSL_MEMSTORE_OFFSET(drawctxt->id, ts_cmp_enable), 0);
kgsl_sharedmem_writel(&device->memstore,
KGSL_MEMSTORE_OFFSET(drawctxt->id, soptimestamp), 0);
kgsl_sharedmem_writel(&device->memstore,
KGSL_MEMSTORE_OFFSET(drawctxt->id, eoptimestamp), 0);
context->devctxt = drawctxt;
return 0;
err:
kfree(drawctxt);
return ret;
}
示例2: adreno_drawctxt_invalidate
/**
* adreno_drawctxt_invalidate() - Invalidate an adreno draw context
* @device: Pointer to the KGSL device structure for the GPU
* @context: Pointer to the KGSL context structure
*
* Invalidate the context and remove all queued commands and cancel any pending
* waiters
*/
void adreno_drawctxt_invalidate(struct kgsl_device *device,
struct kgsl_context *context)
{
struct adreno_context *drawctxt = ADRENO_CONTEXT(context);
trace_adreno_drawctxt_invalidate(drawctxt);
drawctxt->state = ADRENO_CONTEXT_STATE_INVALID;
/* Clear the pending queue */
mutex_lock(&drawctxt->mutex);
/*
* set the timestamp to the last value since the context is invalidated
* and we want the pending events for this context to go away
*/
kgsl_sharedmem_writel(device, &device->memstore,
KGSL_MEMSTORE_OFFSET(context->id, soptimestamp),
drawctxt->timestamp);
kgsl_sharedmem_writel(device, &device->memstore,
KGSL_MEMSTORE_OFFSET(context->id, eoptimestamp),
drawctxt->timestamp);
while (drawctxt->cmdqueue_head != drawctxt->cmdqueue_tail) {
struct kgsl_cmdbatch *cmdbatch =
drawctxt->cmdqueue[drawctxt->cmdqueue_head];
drawctxt->cmdqueue_head = (drawctxt->cmdqueue_head + 1) %
ADRENO_CONTEXT_CMDQUEUE_SIZE;
mutex_unlock(&drawctxt->mutex);
kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
kgsl_cancel_events_timestamp(device, &context->events,
cmdbatch->timestamp);
kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
kgsl_cmdbatch_destroy(cmdbatch);
mutex_lock(&drawctxt->mutex);
}
mutex_unlock(&drawctxt->mutex);
/* Give the bad news to everybody waiting around */
wake_up_all(&drawctxt->waiting);
wake_up_all(&drawctxt->wq);
}
示例3: adreno_context_restore
/**
* adreno_context_restore() - generic context restore handler
* @adreno_dev: the device
* @context: the context
*
* Basic context restore handler that writes the context identifier
* to the ringbuffer and issues pagetable switch commands if necessary.
*/
static int adreno_context_restore(struct adreno_device *adreno_dev,
struct adreno_context *context)
{
struct kgsl_device *device;
unsigned int cmds[8];
if (adreno_dev == NULL || context == NULL)
return -EINVAL;
device = &adreno_dev->dev;
/* write the context identifier to the ringbuffer */
cmds[0] = cp_nop_packet(1);
cmds[1] = KGSL_CONTEXT_TO_MEM_IDENTIFIER;
cmds[2] = cp_type3_packet(CP_MEM_WRITE, 2);
cmds[3] = device->memstore.gpuaddr +
KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, current_context);
cmds[4] = context->base.id;
/* Flush the UCHE for new context */
cmds[5] = cp_type0_packet(
adreno_getreg(adreno_dev, ADRENO_REG_UCHE_INVALIDATE0), 2);
cmds[6] = 0;
if (adreno_is_a4xx(adreno_dev))
cmds[7] = 0x12;
else if (adreno_is_a3xx(adreno_dev))
cmds[7] = 0x90000000;
return adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_NONE, cmds, 8);
}
示例4: adreno_drawctxt_detach
/**
* adreno_drawctxt_detach(): detach a context from the GPU
* @context: Generic KGSL context container for the context
*
*/
int adreno_drawctxt_detach(struct kgsl_context *context)
{
struct kgsl_device *device;
struct adreno_device *adreno_dev;
struct adreno_context *drawctxt;
struct adreno_ringbuffer *rb;
int ret;
if (context == NULL)
return 0;
device = context->device;
adreno_dev = ADRENO_DEVICE(device);
drawctxt = ADRENO_CONTEXT(context);
rb = drawctxt->rb;
/* deactivate context */
if (rb->drawctxt_active == drawctxt)
adreno_drawctxt_switch(adreno_dev, rb, NULL, 0);
spin_lock(&drawctxt->lock);
while (drawctxt->cmdqueue_head != drawctxt->cmdqueue_tail) {
struct kgsl_cmdbatch *cmdbatch =
drawctxt->cmdqueue[drawctxt->cmdqueue_head];
drawctxt->cmdqueue_head = (drawctxt->cmdqueue_head + 1) %
ADRENO_CONTEXT_CMDQUEUE_SIZE;
spin_unlock(&drawctxt->lock);
/*
* If the context is deteached while we are waiting for
* the next command in GFT SKIP CMD, print the context
* detached status here.
*/
adreno_fault_skipcmd_detached(device, drawctxt, cmdbatch);
/*
* Don't hold the drawctxt mutex while the cmdbatch is being
* destroyed because the cmdbatch destroy takes the device
* mutex and the world falls in on itself
*/
kgsl_cmdbatch_destroy(cmdbatch);
spin_lock(&drawctxt->lock);
}
spin_unlock(&drawctxt->lock);
/*
* internal_timestamp is set in adreno_ringbuffer_addcmds,
* which holds the device mutex. The entire context destroy
* process requires the device mutex as well. But lets
* make sure we notice if the locking changes.
*/
BUG_ON(!mutex_is_locked(&device->mutex));
/*
* Wait for the last global timestamp to pass before continuing.
* The maxumum wait time is 30s, some large IB's can take longer
* than 10s and if hang happens then the time for the context's
* commands to retire will be greater than 10s. 30s should be sufficient
* time to wait for the commands even if a hang happens.
*/
ret = adreno_drawctxt_wait_rb(adreno_dev, context,
drawctxt->internal_timestamp, 30 * 1000);
/*
* If the wait for global fails due to timeout then nothing after this
* point is likely to work very well - BUG_ON() so we can take advantage
* of the debug tools to figure out what the h - e - double hockey
* sticks happened. If EAGAIN error is returned then recovery will kick
* in and there will be no more commands in the RB pipe from this
* context which is waht we are waiting for, so ignore -EAGAIN error
*/
if (-EAGAIN == ret)
ret = 0;
BUG_ON(ret);
kgsl_sharedmem_writel(device, &device->memstore,
KGSL_MEMSTORE_OFFSET(context->id, soptimestamp),
drawctxt->timestamp);
kgsl_sharedmem_writel(device, &device->memstore,
KGSL_MEMSTORE_OFFSET(context->id, eoptimestamp),
drawctxt->timestamp);
adreno_profile_process_results(adreno_dev);
/* wake threads waiting to submit commands from this context */
wake_up_all(&drawctxt->waiting);
wake_up_all(&drawctxt->wq);
return ret;
}
示例5: adreno_drawctxt_invalidate
/**
* adreno_drawctxt_invalidate() - Invalidate an adreno draw context
* @device: Pointer to the KGSL device structure for the GPU
* @context: Pointer to the KGSL context structure
*
* Invalidate the context and remove all queued commands and cancel any pending
* waiters
*/
void adreno_drawctxt_invalidate(struct kgsl_device *device,
struct kgsl_context *context)
{
struct adreno_context *drawctxt = ADRENO_CONTEXT(context);
trace_adreno_drawctxt_invalidate(drawctxt);
spin_lock(&drawctxt->lock);
set_bit(KGSL_CONTEXT_PRIV_INVALID, &context->priv);
/*
* set the timestamp to the last value since the context is invalidated
* and we want the pending events for this context to go away
*/
kgsl_sharedmem_writel(device, &device->memstore,
KGSL_MEMSTORE_OFFSET(context->id, soptimestamp),
drawctxt->timestamp);
kgsl_sharedmem_writel(device, &device->memstore,
KGSL_MEMSTORE_OFFSET(context->id, eoptimestamp),
drawctxt->timestamp);
/* Get rid of commands still waiting in the queue */
while (drawctxt->cmdqueue_head != drawctxt->cmdqueue_tail) {
struct kgsl_cmdbatch *cmdbatch =
drawctxt->cmdqueue[drawctxt->cmdqueue_head];
drawctxt->cmdqueue_head = (drawctxt->cmdqueue_head + 1) %
ADRENO_CONTEXT_CMDQUEUE_SIZE;
kgsl_cancel_events_timestamp(device, &context->events,
cmdbatch->timestamp);
kgsl_cmdbatch_destroy(cmdbatch);
}
spin_unlock(&drawctxt->lock);
/* Make sure all "retired" events are processed */
kgsl_process_event_group(device, &context->events);
/* Give the bad news to everybody waiting around */
wake_up_all(&drawctxt->waiting);
wake_up_all(&drawctxt->wq);
}
示例6: adreno_drawctxt_invalidate
void adreno_drawctxt_invalidate(struct kgsl_device *device,
struct kgsl_context *context)
{
struct adreno_context *drawctxt = ADRENO_CONTEXT(context);
trace_adreno_drawctxt_invalidate(drawctxt);
drawctxt->state = ADRENO_CONTEXT_STATE_INVALID;
mutex_lock(&drawctxt->mutex);
kgsl_sharedmem_writel(device, &device->memstore,
KGSL_MEMSTORE_OFFSET(context->id, soptimestamp),
drawctxt->timestamp);
kgsl_sharedmem_writel(device, &device->memstore,
KGSL_MEMSTORE_OFFSET(context->id, eoptimestamp),
drawctxt->timestamp);
while (drawctxt->cmdqueue_head != drawctxt->cmdqueue_tail) {
struct kgsl_cmdbatch *cmdbatch =
drawctxt->cmdqueue[drawctxt->cmdqueue_head];
drawctxt->cmdqueue_head = (drawctxt->cmdqueue_head + 1) %
ADRENO_CONTEXT_CMDQUEUE_SIZE;
mutex_unlock(&drawctxt->mutex);
mutex_lock(&device->mutex);
kgsl_cancel_events_timestamp(device, context,
cmdbatch->timestamp);
mutex_unlock(&device->mutex);
kgsl_cmdbatch_destroy(cmdbatch);
mutex_lock(&drawctxt->mutex);
}
mutex_unlock(&drawctxt->mutex);
wake_up_all(&drawctxt->waiting);
wake_up_all(&drawctxt->wq);
}
示例7: _adreno_context_restore_cpu
static void _adreno_context_restore_cpu(struct adreno_ringbuffer *rb,
struct adreno_context *drawctxt)
{
kgsl_sharedmem_writel(rb->device, &(rb->device->memstore),
KGSL_MEMSTORE_RB_OFFSET(rb->device, current_context),
drawctxt ? drawctxt->base.id : 0);
kgsl_sharedmem_writel(rb->device, &(rb->device->memstore),
KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
current_context),
drawctxt ? drawctxt->base.id : 0);
}
示例8: adreno_context_restore
/**
* adreno_context_restore() - generic context restore handler
* @rb: The RB in which context is to be restored
*
* Basic context restore handler that writes the context identifier
* to the ringbuffer and issues pagetable switch commands if necessary.
*/
static void adreno_context_restore(struct adreno_ringbuffer *rb)
{
struct kgsl_device *device = rb->device;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
struct adreno_context *drawctxt = rb->drawctxt_active;
unsigned int cmds[11];
int ret;
if (!drawctxt)
return;
/*
* write the context identifier to the ringbuffer, write to both
* the global index and the index of the RB in which the context
* operates. The global values will always be reliable since we
* could be in middle of RB switch in which case the RB value may
* not be accurate
*/
cmds[0] = cp_nop_packet(1);
cmds[1] = KGSL_CONTEXT_TO_MEM_IDENTIFIER;
cmds[2] = cp_type3_packet(CP_MEM_WRITE, 2);
cmds[3] = device->memstore.gpuaddr +
KGSL_MEMSTORE_RB_OFFSET(rb, current_context);
cmds[4] = drawctxt->base.id;
cmds[5] = cp_type3_packet(CP_MEM_WRITE, 2);
cmds[6] = device->memstore.gpuaddr +
KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
current_context);
cmds[7] = drawctxt->base.id;
/* Flush the UCHE for new context */
cmds[8] = cp_type0_packet(
adreno_getreg(adreno_dev, ADRENO_REG_UCHE_INVALIDATE0), 2);
cmds[9] = 0;
if (adreno_is_a4xx(adreno_dev))
cmds[10] = 0x12;
else if (adreno_is_a3xx(adreno_dev))
cmds[10] = 0x90000000;
ret = adreno_ringbuffer_issuecmds(rb, KGSL_CMD_FLAGS_NONE, cmds, 11);
if (ret) {
/*
* A failure to submit commands to ringbuffer means RB may
* be full, in this case wait for idle and use CPU
*/
ret = adreno_idle(device);
BUG_ON(ret);
_adreno_context_restore_cpu(rb, drawctxt);
}
}
示例9: adreno_context_restore
int adreno_context_restore(struct adreno_device *adreno_dev,
struct adreno_context *context)
{
struct kgsl_device *device;
unsigned int cmds[5];
if (adreno_dev == NULL || context == NULL)
return -EINVAL;
device = &adreno_dev->dev;
cmds[0] = cp_nop_packet(1);
cmds[1] = KGSL_CONTEXT_TO_MEM_IDENTIFIER;
cmds[2] = cp_type3_packet(CP_MEM_WRITE, 2);
cmds[3] = device->memstore.gpuaddr +
KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, current_context);
cmds[4] = context->base.id;
return adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_NONE, cmds, 5);
}
示例10: adreno_drawctxt_create
/**
* adreno_drawctxt_create - create a new adreno draw context
* @device - KGSL device to create the context on
* @pagetable - Pagetable for the context
* @context- Generic KGSL context structure
* @flags - flags for the context (passed from user space)
*
* Create a new draw context for the 3D core. Return 0 on success,
* or error code on failure.
*/
int adreno_drawctxt_create(struct kgsl_device *device,
struct kgsl_pagetable *pagetable,
struct kgsl_context *context, uint32_t flags)
{
struct adreno_context *drawctxt;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
int ret;
drawctxt = kzalloc(sizeof(struct adreno_context), GFP_KERNEL);
if (drawctxt == NULL)
return -ENOMEM;
drawctxt->pagetable = pagetable;
drawctxt->bin_base_offset = 0;
drawctxt->id = context->id;
if (flags & KGSL_CONTEXT_PREAMBLE)
drawctxt->flags |= CTXT_FLAGS_PREAMBLE;
if (flags & KGSL_CONTEXT_NO_GMEM_ALLOC)
drawctxt->flags |= CTXT_FLAGS_NOGMEMALLOC;
if (flags & KGSL_CONTEXT_PER_CONTEXT_TS)
drawctxt->flags |= CTXT_FLAGS_PER_CONTEXT_TS;
ret = adreno_dev->gpudev->ctxt_create(adreno_dev, drawctxt);
if (ret)
goto err;
kgsl_sharedmem_writel(&device->memstore,
KGSL_MEMSTORE_OFFSET(drawctxt->id, ref_wait_ts),
KGSL_INIT_REFTIMESTAMP);
context->devctxt = drawctxt;
return 0;
err:
kfree(drawctxt);
return ret;
}
示例11: adreno_ringbuffer_addcmds
//.........这里部分代码省略.........
+ sizeof(uint)*(rb->wptr-total_sizedwords);
GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_nop_packet(1));
GSL_RB_WRITE(ringcmds, rcmd_gpu, KGSL_CMD_IDENTIFIER);
if (flags & KGSL_CMD_FLAGS_PMODE) {
/* disable protected mode error checking */
GSL_RB_WRITE(ringcmds, rcmd_gpu,
cp_type3_packet(CP_SET_PROTECTED_MODE, 1));
GSL_RB_WRITE(ringcmds, rcmd_gpu, 0);
}
for (i = 0; i < sizedwords; i++) {
GSL_RB_WRITE(ringcmds, rcmd_gpu, *cmds);
cmds++;
}
if (flags & KGSL_CMD_FLAGS_PMODE) {
/* re-enable protected mode error checking */
GSL_RB_WRITE(ringcmds, rcmd_gpu,
cp_type3_packet(CP_SET_PROTECTED_MODE, 1));
GSL_RB_WRITE(ringcmds, rcmd_gpu, 1);
}
/* always increment the global timestamp. once. */
rb->timestamp[KGSL_MEMSTORE_GLOBAL]++;
if (context) {
if (context_id == KGSL_MEMSTORE_GLOBAL)
rb->timestamp[context_id] =
rb->timestamp[KGSL_MEMSTORE_GLOBAL];
else
rb->timestamp[context_id]++;
}
timestamp = rb->timestamp[context_id];
/* HW Workaround for MMU Page fault
* due to memory getting free early before
* GPU completes it.
*/
if (adreno_is_a2xx(adreno_dev)) {
GSL_RB_WRITE(ringcmds, rcmd_gpu,
cp_type3_packet(CP_WAIT_FOR_IDLE, 1));
GSL_RB_WRITE(ringcmds, rcmd_gpu, 0x00);
}
/* scratchpad ts for recovery */
GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_type0_packet(REG_CP_TIMESTAMP, 1));
GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp[KGSL_MEMSTORE_GLOBAL]);
if (context->flags & CTXT_FLAGS_PER_CONTEXT_TS) {
/* start-of-pipeline timestamp */
GSL_RB_WRITE(ringcmds, rcmd_gpu,
cp_type3_packet(CP_MEM_WRITE, 2));
GSL_RB_WRITE(ringcmds, rcmd_gpu, (gpuaddr +
KGSL_MEMSTORE_OFFSET(context->id, soptimestamp)));
GSL_RB_WRITE(ringcmds, rcmd_gpu, timestamp);
/* end-of-pipeline timestamp */
GSL_RB_WRITE(ringcmds, rcmd_gpu,
cp_type3_packet(CP_EVENT_WRITE, 3));
GSL_RB_WRITE(ringcmds, rcmd_gpu, CACHE_FLUSH_TS);
GSL_RB_WRITE(ringcmds, rcmd_gpu, (gpuaddr +
KGSL_MEMSTORE_OFFSET(context->id, eoptimestamp)));
GSL_RB_WRITE(ringcmds, rcmd_gpu, timestamp);
GSL_RB_WRITE(ringcmds, rcmd_gpu,
cp_type3_packet(CP_MEM_WRITE, 2));
GSL_RB_WRITE(ringcmds, rcmd_gpu, (gpuaddr +
KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
eoptimestamp)));
GSL_RB_WRITE(ringcmds, rcmd_gpu,
rb->timestamp[KGSL_MEMSTORE_GLOBAL]);
} else {
GSL_RB_WRITE(ringcmds, rcmd_gpu,
cp_type3_packet(CP_EVENT_WRITE, 3));
GSL_RB_WRITE(ringcmds, rcmd_gpu, CACHE_FLUSH_TS);
GSL_RB_WRITE(ringcmds, rcmd_gpu, (gpuaddr +
KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
eoptimestamp)));
GSL_RB_WRITE(ringcmds, rcmd_gpu,
rb->timestamp[KGSL_MEMSTORE_GLOBAL]);
}
if (!(flags & KGSL_CMD_FLAGS_NO_TS_CMP)) {
/* Conditional execution based on memory values */
GSL_RB_WRITE(ringcmds, rcmd_gpu,
cp_type3_packet(CP_COND_EXEC, 4));
GSL_RB_WRITE(ringcmds, rcmd_gpu, (gpuaddr +
KGSL_MEMSTORE_OFFSET(
context_id, ts_cmp_enable)) >> 2);
GSL_RB_WRITE(ringcmds, rcmd_gpu, (gpuaddr +
KGSL_MEMSTORE_OFFSET(
context_id, ref_wait_ts)) >> 2);
GSL_RB_WRITE(ringcmds, rcmd_gpu, timestamp);
/* # of conditional command DWORDs */
GSL_RB_WRITE(ringcmds, rcmd_gpu, 2);
GSL_RB_WRITE(ringcmds, rcmd_gpu,
cp_type3_packet(CP_INTERRUPT, 1));
GSL_RB_WRITE(ringcmds, rcmd_gpu, CP_INT_CNTL__RB_INT_MASK);
}
示例12: adreno_ringbuffer_start
int adreno_ringbuffer_start(struct adreno_ringbuffer *rb, unsigned int init_ram)
{
int status;
/*cp_rb_cntl_u cp_rb_cntl; */
union reg_cp_rb_cntl cp_rb_cntl;
unsigned int rb_cntl;
struct kgsl_device *device = rb->device;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
if (rb->flags & KGSL_FLAGS_STARTED)
return 0;
if (init_ram)
rb->timestamp[KGSL_MEMSTORE_GLOBAL] = 0;
kgsl_sharedmem_set(&rb->memptrs_desc, 0, 0,
sizeof(struct kgsl_rbmemptrs));
kgsl_sharedmem_set(&rb->buffer_desc, 0, 0xAA,
(rb->sizedwords << 2));
if (adreno_is_a2xx(adreno_dev)) {
adreno_regwrite(device, REG_CP_RB_WPTR_BASE,
(rb->memptrs_desc.gpuaddr
+ GSL_RB_MEMPTRS_WPTRPOLL_OFFSET));
/* setup WPTR delay */
adreno_regwrite(device, REG_CP_RB_WPTR_DELAY,
0 /*0x70000010 */);
}
/*setup REG_CP_RB_CNTL */
adreno_regread(device, REG_CP_RB_CNTL, &rb_cntl);
cp_rb_cntl.val = rb_cntl;
/*
* The size of the ringbuffer in the hardware is the log2
* representation of the size in quadwords (sizedwords / 2)
*/
cp_rb_cntl.f.rb_bufsz = ilog2(rb->sizedwords >> 1);
/*
* Specify the quadwords to read before updating mem RPTR.
* Like above, pass the log2 representation of the blocksize
* in quadwords.
*/
cp_rb_cntl.f.rb_blksz = ilog2(KGSL_RB_BLKSIZE >> 3);
if (adreno_is_a2xx(adreno_dev)) {
/* WPTR polling */
cp_rb_cntl.f.rb_poll_en = GSL_RB_CNTL_POLL_EN;
}
/* mem RPTR writebacks */
cp_rb_cntl.f.rb_no_update = GSL_RB_CNTL_NO_UPDATE;
adreno_regwrite(device, REG_CP_RB_CNTL, cp_rb_cntl.val);
adreno_regwrite(device, REG_CP_RB_BASE, rb->buffer_desc.gpuaddr);
adreno_regwrite(device, REG_CP_RB_RPTR_ADDR,
rb->memptrs_desc.gpuaddr +
GSL_RB_MEMPTRS_RPTR_OFFSET);
if (adreno_is_a2xx(adreno_dev)) {
/* explicitly clear all cp interrupts */
adreno_regwrite(device, REG_CP_INT_ACK, 0xFFFFFFFF);
}
/* setup scratch/timestamp */
adreno_regwrite(device, REG_SCRATCH_ADDR, device->memstore.gpuaddr +
KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
soptimestamp));
adreno_regwrite(device, REG_SCRATCH_UMSK,
GSL_RB_MEMPTRS_SCRATCH_MASK);
/* load the CP ucode */
status = adreno_ringbuffer_load_pm4_ucode(device);
if (status != 0)
return status;
/* load the prefetch parser ucode */
status = adreno_ringbuffer_load_pfp_ucode(device);
if (status != 0)
return status;
rb->rptr = 0;
rb->wptr = 0;
/* clear ME_HALT to start micro engine */
adreno_regwrite(device, REG_CP_ME_CNTL, 0);
/* ME init is GPU specific, so jump into the sub-function */
adreno_dev->gpudev->rb_init(adreno_dev, rb);
/* idle device to validate ME INIT */
status = adreno_idle(device);
//.........这里部分代码省略.........
示例13: adreno_ringbuffer_start
int adreno_ringbuffer_start(struct adreno_ringbuffer *rb, unsigned int init_ram)
{
int status;
union reg_cp_rb_cntl cp_rb_cntl;
unsigned int rb_cntl;
struct kgsl_device *device = rb->device;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
if (rb->flags & KGSL_FLAGS_STARTED)
return 0;
if (init_ram)
rb->timestamp[KGSL_MEMSTORE_GLOBAL] = 0;
kgsl_sharedmem_set(&rb->memptrs_desc, 0, 0,
sizeof(struct kgsl_rbmemptrs));
kgsl_sharedmem_set(&rb->buffer_desc, 0, 0xAA,
(rb->sizedwords << 2));
if (adreno_is_a2xx(adreno_dev)) {
adreno_regwrite(device, REG_CP_RB_WPTR_BASE,
(rb->memptrs_desc.gpuaddr
+ GSL_RB_MEMPTRS_WPTRPOLL_OFFSET));
adreno_regwrite(device, REG_CP_RB_WPTR_DELAY,
0 );
}
adreno_regread(device, REG_CP_RB_CNTL, &rb_cntl);
cp_rb_cntl.val = rb_cntl;
cp_rb_cntl.f.rb_bufsz = ilog2(rb->sizedwords >> 1);
cp_rb_cntl.f.rb_blksz = ilog2(KGSL_RB_BLKSIZE >> 3);
if (adreno_is_a2xx(adreno_dev)) {
cp_rb_cntl.f.rb_poll_en = GSL_RB_CNTL_POLL_EN;
}
cp_rb_cntl.f.rb_no_update = GSL_RB_CNTL_NO_UPDATE;
adreno_regwrite(device, REG_CP_RB_CNTL, cp_rb_cntl.val);
adreno_regwrite(device, REG_CP_RB_BASE, rb->buffer_desc.gpuaddr);
adreno_regwrite(device, REG_CP_RB_RPTR_ADDR,
rb->memptrs_desc.gpuaddr +
GSL_RB_MEMPTRS_RPTR_OFFSET);
if (adreno_is_a3xx(adreno_dev)) {
adreno_regwrite(device, A3XX_CP_PROTECT_CTRL, 0x00000007);
adreno_regwrite(device, A3XX_CP_PROTECT_REG_0, 0x63000040);
adreno_regwrite(device, A3XX_CP_PROTECT_REG_1, 0x62000080);
adreno_regwrite(device, A3XX_CP_PROTECT_REG_2, 0x600000CC);
adreno_regwrite(device, A3XX_CP_PROTECT_REG_3, 0x60000108);
adreno_regwrite(device, A3XX_CP_PROTECT_REG_4, 0x64000140);
adreno_regwrite(device, A3XX_CP_PROTECT_REG_5, 0x66000400);
adreno_regwrite(device, A3XX_CP_PROTECT_REG_6, 0x65000700);
adreno_regwrite(device, A3XX_CP_PROTECT_REG_7, 0x610007D8);
adreno_regwrite(device, A3XX_CP_PROTECT_REG_8, 0x620007E0);
adreno_regwrite(device, A3XX_CP_PROTECT_REG_9, 0x61001178);
adreno_regwrite(device, A3XX_CP_PROTECT_REG_A, 0x64001180);
adreno_regwrite(device, A3XX_CP_PROTECT_REG_B, 0x60003300);
adreno_regwrite(device, A3XX_CP_PROTECT_REG_C, 0x6B00C000);
}
if (adreno_is_a2xx(adreno_dev)) {
adreno_regwrite(device, REG_CP_INT_ACK, 0xFFFFFFFF);
}
adreno_regwrite(device, REG_SCRATCH_ADDR, device->memstore.gpuaddr +
KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
soptimestamp));
adreno_regwrite(device, REG_SCRATCH_UMSK,
GSL_RB_MEMPTRS_SCRATCH_MASK);
status = adreno_ringbuffer_load_pm4_ucode(device);
if (status != 0)
return status;
//.........这里部分代码省略.........
示例14: adreno_drawctxt_create
/**
* adreno_drawctxt_create - create a new adreno draw context
* @dev_priv: the owner of the context
* @flags: flags for the context (passed from user space)
*
* Create and return a new draw context for the 3D core.
*/
struct kgsl_context *
adreno_drawctxt_create(struct kgsl_device_private *dev_priv,
uint32_t *flags)
{
struct adreno_context *drawctxt;
struct kgsl_device *device = dev_priv->device;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
int ret;
unsigned long local;
local = *flags & (KGSL_CONTEXT_PREAMBLE |
KGSL_CONTEXT_NO_GMEM_ALLOC |
KGSL_CONTEXT_PER_CONTEXT_TS |
KGSL_CONTEXT_USER_GENERATED_TS |
KGSL_CONTEXT_NO_FAULT_TOLERANCE |
KGSL_CONTEXT_CTX_SWITCH |
KGSL_CONTEXT_PRIORITY_MASK |
KGSL_CONTEXT_TYPE_MASK |
KGSL_CONTEXT_PWR_CONSTRAINT |
KGSL_CONTEXT_IFH_NOP |
KGSL_CONTEXT_SECURE);
/* Check for errors before trying to initialize */
/* We no longer support legacy context switching */
if ((local & KGSL_CONTEXT_PREAMBLE) == 0 ||
(local & KGSL_CONTEXT_NO_GMEM_ALLOC) == 0) {
KGSL_DEV_ERR_ONCE(device,
"legacy context switch not supported\n");
return ERR_PTR(-EINVAL);
}
/* Make sure that our target can support secure contexts if requested */
if (!kgsl_mmu_is_secured(&dev_priv->device->mmu) &&
(local & KGSL_CONTEXT_SECURE)) {
KGSL_DEV_ERR_ONCE(device, "Secure context not supported\n");
return ERR_PTR(-EINVAL);
}
drawctxt = kzalloc(sizeof(struct adreno_context), GFP_KERNEL);
if (drawctxt == NULL)
return ERR_PTR(-ENOMEM);
ret = kgsl_context_init(dev_priv, &drawctxt->base);
if (ret != 0) {
kfree(drawctxt);
return ERR_PTR(ret);
}
drawctxt->timestamp = 0;
drawctxt->base.flags = local;
/* Always enable per-context timestamps */
drawctxt->base.flags |= KGSL_CONTEXT_PER_CONTEXT_TS;
drawctxt->type = (drawctxt->base.flags & KGSL_CONTEXT_TYPE_MASK)
>> KGSL_CONTEXT_TYPE_SHIFT;
spin_lock_init(&drawctxt->lock);
init_waitqueue_head(&drawctxt->wq);
init_waitqueue_head(&drawctxt->waiting);
/* Set the context priority */
_set_context_priority(drawctxt);
/* set the context ringbuffer */
drawctxt->rb = adreno_ctx_get_rb(adreno_dev, drawctxt);
/*
* Set up the plist node for the dispatcher. Insert the node into the
* drawctxt pending list based on priority.
*/
plist_node_init(&drawctxt->pending, drawctxt->base.priority);
kgsl_sharedmem_writel(device, &device->memstore,
KGSL_MEMSTORE_OFFSET(drawctxt->base.id, soptimestamp),
0);
kgsl_sharedmem_writel(device, &device->memstore,
KGSL_MEMSTORE_OFFSET(drawctxt->base.id, eoptimestamp),
0);
adreno_context_debugfs_init(ADRENO_DEVICE(device), drawctxt);
/* copy back whatever flags we dediced were valid */
*flags = drawctxt->base.flags;
return &drawctxt->base;
}
示例15: adreno_drawctxt_create
struct kgsl_context *
adreno_drawctxt_create(struct kgsl_device_private *dev_priv,
uint32_t *flags)
{
struct adreno_context *drawctxt;
struct kgsl_device *device = dev_priv->device;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
int ret;
drawctxt = kzalloc(sizeof(struct adreno_context), GFP_KERNEL);
if (drawctxt == NULL)
return ERR_PTR(-ENOMEM);
ret = kgsl_context_init(dev_priv, &drawctxt->base);
if (ret != 0) {
kfree(drawctxt);
return ERR_PTR(ret);
}
drawctxt->bin_base_offset = 0;
drawctxt->timestamp = 0;
drawctxt->base.flags = *flags & (KGSL_CONTEXT_PREAMBLE |
KGSL_CONTEXT_NO_GMEM_ALLOC |
KGSL_CONTEXT_PER_CONTEXT_TS |
KGSL_CONTEXT_USER_GENERATED_TS |
KGSL_CONTEXT_NO_FAULT_TOLERANCE |
KGSL_CONTEXT_TYPE_MASK);
drawctxt->base.flags |= KGSL_CONTEXT_PER_CONTEXT_TS;
drawctxt->type = (drawctxt->base.flags & KGSL_CONTEXT_TYPE_MASK)
>> KGSL_CONTEXT_TYPE_SHIFT;
mutex_init(&drawctxt->mutex);
init_waitqueue_head(&drawctxt->wq);
init_waitqueue_head(&drawctxt->waiting);
plist_node_init(&drawctxt->pending, ADRENO_CONTEXT_DEFAULT_PRIORITY);
if (adreno_dev->gpudev->ctxt_create) {
ret = adreno_dev->gpudev->ctxt_create(adreno_dev, drawctxt);
if (ret)
goto err;
} else if ((drawctxt->base.flags & KGSL_CONTEXT_PREAMBLE) == 0 ||
(drawctxt->base.flags & KGSL_CONTEXT_NO_GMEM_ALLOC) == 0) {
KGSL_DEV_ERR_ONCE(device,
"legacy context switch not supported\n");
ret = -EINVAL;
goto err;
} else {
drawctxt->ops = &adreno_preamble_ctx_ops;
}
kgsl_sharedmem_writel(device, &device->memstore,
KGSL_MEMSTORE_OFFSET(drawctxt->base.id, soptimestamp),
0);
kgsl_sharedmem_writel(device, &device->memstore,
KGSL_MEMSTORE_OFFSET(drawctxt->base.id, eoptimestamp),
0);
*flags = drawctxt->base.flags;
return &drawctxt->base;
err:
kgsl_context_detach(&drawctxt->base);
return ERR_PTR(ret);
}