本文整理汇总了C++中ADRENO_DEVICE函数的典型用法代码示例。如果您正苦于以下问题:C++ ADRENO_DEVICE函数的具体用法?C++ ADRENO_DEVICE怎么用?C++ ADRENO_DEVICE使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了ADRENO_DEVICE函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: adreno_coresight_disable
void adreno_coresight_disable(struct coresight_device *csdev)
{
struct kgsl_device *device = dev_get_drvdata(csdev->dev.parent);
struct adreno_device *adreno_dev;
if (device == NULL)
return;
adreno_dev = ADRENO_DEVICE(device);
if (adreno_dev->gpudev->coresight_disable)
return adreno_dev->gpudev->coresight_disable(device);
}
示例2: a2xx_cp_intrcallback
static void a2xx_cp_intrcallback(struct kgsl_device *device)
{
unsigned int status = 0, num_reads = 0, master_status = 0;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
int i;
adreno_regread(device, REG_MASTER_INT_SIGNAL, &master_status);
while (!status && (num_reads < VALID_STATUS_COUNT_MAX) &&
(master_status & MASTER_INT_SIGNAL__CP_INT_STAT)) {
adreno_regread(device, REG_CP_INT_STATUS, &status);
adreno_regread(device, REG_MASTER_INT_SIGNAL,
&master_status);
num_reads++;
}
if (num_reads > 1)
KGSL_DRV_WARN(device,
"Looped %d times to read REG_CP_INT_STATUS\n",
num_reads);
trace_kgsl_a2xx_irq_status(device, master_status, status);
if (!status) {
if (master_status & MASTER_INT_SIGNAL__CP_INT_STAT) {
KGSL_DRV_WARN(device, "Unable to read CP_INT_STATUS\n");
wake_up_interruptible_all(&device->wait_queue);
} else
KGSL_DRV_WARN(device, "Spurious interrput detected\n");
return;
}
for (i = 0; i < ARRAY_SIZE(kgsl_cp_error_irqs); i++) {
if (status & kgsl_cp_error_irqs[i].mask) {
KGSL_CMD_CRIT(rb->device, "%s\n",
kgsl_cp_error_irqs[i].message);
kgsl_pwrctrl_irq(rb->device, KGSL_PWRFLAGS_OFF);
}
}
status &= CP_INT_MASK;
adreno_regwrite(device, REG_CP_INT_ACK, status);
if (status & (CP_INT_CNTL__IB1_INT_MASK | CP_INT_CNTL__RB_INT_MASK)) {
KGSL_CMD_WARN(rb->device, "ringbuffer ib1/rb interrupt\n");
queue_work(device->work_queue, &device->ts_expired_ws);
wake_up_interruptible_all(&device->wait_queue);
}
}
示例3: kgsl_istore_read
static ssize_t kgsl_istore_read(
struct file *file,
char __user *buff,
size_t buff_count,
loff_t *ppos)
{
int i, count, remaining, pos = 0, tot = 0;
struct kgsl_device *device = file->private_data;
const int rowc = 8;
struct adreno_device *adreno_dev;
if (!ppos || !device)
return 0;
adreno_dev = ADRENO_DEVICE(device);
count = adreno_dev->istore_size * adreno_dev->instruction_size;
remaining = count;
for (i = 0; i < count; i += rowc) {
unsigned int vals[rowc];
int j, ss;
int linec = min(remaining, rowc);
remaining -= rowc;
if (pos >= *ppos) {
for (j = 0; j < linec; ++j)
kgsl_regread_nolock(device,
ADRENO_ISTORE_START + i + j,
vals + j);
} else
memset(vals, 0, sizeof(vals));
ss = kgsl_hex_dump("IS: %04x: ", i, (uint8_t *)vals, rowc*4,
linec*4, buff);
if (ss < 0)
return ss;
if (pos >= *ppos) {
if (tot+ss >= buff_count)
return tot;
tot += ss;
buff += ss;
*ppos += ss;
}
pos += ss;
}
return tot;
}
示例4: kgsl_pwrctrl_sleep
/* Caller must hold the device mutex. */
int kgsl_pwrctrl_sleep(struct kgsl_device *device)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
KGSL_PWR_INFO(device, "sleep device %d\n", device->id);
/* Work through the legal state transitions */
if (device->requested_state == KGSL_STATE_NAP) {
if (device->ftbl->isidle(device))
goto nap;
} else if (device->requested_state == KGSL_STATE_SLEEP) {
if (device->state == KGSL_STATE_NAP ||
device->ftbl->isidle(device))
goto sleep;
}
device->requested_state = KGSL_STATE_NONE;
return -EBUSY;
sleep:
device->ftbl->suspend_context(device);
device->ftbl->stop(device);
kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
if (pwr->pwrlevels[0].gpu_freq > 0)
clk_set_rate(pwr->grp_clks[0],
pwr->pwrlevels[pwr->num_pwrlevels - 1].
gpu_freq);
device->pwrctrl.time = 0;
kgsl_pwrscale_sleep(device);
goto clk_off;
nap:
kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
clk_off:
kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF);
device->state = device->requested_state;
device->requested_state = KGSL_STATE_NONE;
wake_unlock(&device->idle_wakelock);
pm_qos_update_request(&device->pm_qos_req_dma,
PM_QOS_DEFAULT_VALUE);
KGSL_PWR_WARN(device, "state -> NAP/SLEEP(%d), device %d ts 0x%x\n",
device->state, device->id, adreno_dev->ringbuffer.timestamp);
return 0;
}
示例5: adreno_drawctxt_create
/**
* adreno_drawctxt_create - create a new adreno draw context
* @device - KGSL device to create the context on
* @pagetable - Pagetable for the context
* @context- Generic KGSL context structure
* @flags - flags for the context (passed from user space)
*
* Create a new draw context for the 3D core. Return 0 on success,
* or error code on failure.
*/
int adreno_drawctxt_create(struct kgsl_device *device,
struct kgsl_pagetable *pagetable,
struct kgsl_context *context, uint32_t flags)
{
struct adreno_context *drawctxt;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
int ret;
drawctxt = kzalloc(sizeof(struct adreno_context), GFP_KERNEL);
if (drawctxt == NULL)
return -ENOMEM;
drawctxt->pagetable = pagetable;
drawctxt->bin_base_offset = 0;
drawctxt->id = context->id;
rb->timestamp[context->id] = 0;
if (flags & KGSL_CONTEXT_PREAMBLE)
drawctxt->flags |= CTXT_FLAGS_PREAMBLE;
if (flags & KGSL_CONTEXT_NO_GMEM_ALLOC)
drawctxt->flags |= CTXT_FLAGS_NOGMEMALLOC;
if (flags & KGSL_CONTEXT_PER_CONTEXT_TS)
drawctxt->flags |= CTXT_FLAGS_PER_CONTEXT_TS;
ret = adreno_dev->gpudev->ctxt_create(adreno_dev, drawctxt);
if (ret)
goto err;
kgsl_sharedmem_writel(&device->memstore,
KGSL_MEMSTORE_OFFSET(drawctxt->id, ref_wait_ts),
KGSL_INIT_REFTIMESTAMP);
kgsl_sharedmem_writel(&device->memstore,
KGSL_MEMSTORE_OFFSET(drawctxt->id, ts_cmp_enable), 0);
kgsl_sharedmem_writel(&device->memstore,
KGSL_MEMSTORE_OFFSET(drawctxt->id, soptimestamp), 0);
kgsl_sharedmem_writel(&device->memstore,
KGSL_MEMSTORE_OFFSET(drawctxt->id, eoptimestamp), 0);
context->devctxt = drawctxt;
return 0;
err:
kfree(drawctxt);
return ret;
}
示例6: _ringbuffer_setup_common
/**
* _ringbuffer_setup_common() - Ringbuffer start
* @rb: Pointer to adreno ringbuffer
*
* Setup ringbuffer for GPU.
*/
static void _ringbuffer_setup_common(struct adreno_ringbuffer *rb)
{
struct kgsl_device *device = rb->device;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
struct adreno_ringbuffer *rb_temp;
int i;
FOR_EACH_RINGBUFFER(adreno_dev, rb_temp, i) {
kgsl_sharedmem_set(rb_temp->device,
&(rb_temp->buffer_desc), 0,
0xAA, KGSL_RB_SIZE);
rb_temp->wptr = 0;
rb_temp->rptr = 0;
adreno_iommu_set_pt_generate_rb_cmds(rb_temp,
device->mmu.defaultpagetable);
}
示例7: adreno_ringbuffer_load_pm4_ucode
static int adreno_ringbuffer_load_pm4_ucode(struct kgsl_device *device)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
const char *fwfile;
int i, ret = 0;
if (adreno_is_a220(adreno_dev)) {
fwfile = A220_PM4_470_FW;
} else if (adreno_is_a225(adreno_dev)) {
fwfile = A225_PM4_FW;
} else if (adreno_is_a20x(adreno_dev)) {
fwfile = A200_PM4_FW;
} else {
KGSL_DRV_ERR(device, "Could not load PM4 file\n");
return -EINVAL;
}
if (adreno_dev->pm4_fw == NULL) {
int len;
unsigned int *ptr;
ret = _load_firmware(device, fwfile, (void *) &ptr, &len);
if (ret)
goto err;
/* PM4 size is 3 dword aligned plus 1 dword of version */
if (len % ((sizeof(uint32_t) * 3)) != sizeof(uint32_t)) {
KGSL_DRV_ERR(device, "Bad firmware size: %d\n", len);
ret = -EINVAL;
goto err;
}
adreno_dev->pm4_fw_size = len / sizeof(uint32_t);
adreno_dev->pm4_fw = ptr;
}
KGSL_DRV_INFO(device, "loading pm4 ucode version: %d\n",
adreno_dev->pm4_fw[0]);
adreno_regwrite(device, REG_CP_DEBUG, 0x02000000);
adreno_regwrite(device, REG_CP_ME_RAM_WADDR, 0);
for (i = 1; i < adreno_dev->pm4_fw_size; i++)
adreno_regwrite(device, REG_CP_ME_RAM_DATA,
adreno_dev->pm4_fw[i]);
err:
return ret;
}
示例8: adreno_context_restore
/**
* adreno_context_restore() - generic context restore handler
* @rb: The RB in which context is to be restored
*
* Basic context restore handler that writes the context identifier
* to the ringbuffer and issues pagetable switch commands if necessary.
*/
static void adreno_context_restore(struct adreno_ringbuffer *rb)
{
struct kgsl_device *device = rb->device;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
struct adreno_context *drawctxt = rb->drawctxt_active;
unsigned int cmds[11];
int ret;
if (!drawctxt)
return;
/*
* write the context identifier to the ringbuffer, write to both
* the global index and the index of the RB in which the context
* operates. The global values will always be reliable since we
* could be in middle of RB switch in which case the RB value may
* not be accurate
*/
cmds[0] = cp_nop_packet(1);
cmds[1] = KGSL_CONTEXT_TO_MEM_IDENTIFIER;
cmds[2] = cp_type3_packet(CP_MEM_WRITE, 2);
cmds[3] = device->memstore.gpuaddr +
KGSL_MEMSTORE_RB_OFFSET(rb, current_context);
cmds[4] = drawctxt->base.id;
cmds[5] = cp_type3_packet(CP_MEM_WRITE, 2);
cmds[6] = device->memstore.gpuaddr +
KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
current_context);
cmds[7] = drawctxt->base.id;
/* Flush the UCHE for new context */
cmds[8] = cp_type0_packet(
adreno_getreg(adreno_dev, ADRENO_REG_UCHE_INVALIDATE0), 2);
cmds[9] = 0;
if (adreno_is_a4xx(adreno_dev))
cmds[10] = 0x12;
else if (adreno_is_a3xx(adreno_dev))
cmds[10] = 0x90000000;
ret = adreno_ringbuffer_issuecmds(rb, KGSL_CMD_FLAGS_NONE, cmds, 11);
if (ret) {
/*
* A failure to submit commands to ringbuffer means RB may
* be full, in this case wait for idle and use CPU
*/
ret = adreno_idle(device);
BUG_ON(ret);
_adreno_context_restore_cpu(rb, drawctxt);
}
}
示例9: gfx_store_reg
static ssize_t gfx_store_reg(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct kgsl_device *device = dev_get_drvdata(dev->parent);
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
struct coresight_attr *csight_attr = container_of(attr,
struct coresight_attr, attr);
unsigned int regval = 0;
regval = coresight_convert_reg(buf);
if (adreno_dev->gpudev->coresight_config_debug_reg)
adreno_dev->gpudev->coresight_config_debug_reg(device,
csight_attr->regname, regval);
return size;
}
示例10: adreno_ringbuffer_load_pfp_ucode
static int adreno_ringbuffer_load_pfp_ucode(struct kgsl_device *device)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
const char *fwfile;
int i, ret = 0;
if (adreno_is_a220(adreno_dev)) {
fwfile = A220_PFP_470_FW;
} else if (adreno_is_a225(adreno_dev)) {
fwfile = A225_PFP_FW;
} else if (adreno_is_a20x(adreno_dev)) {
fwfile = A200_PFP_FW;
} else {
KGSL_DRV_ERR(device, "Could not load PFP firmware\n");
return -EINVAL;
}
if (adreno_dev->pfp_fw == NULL) {
int len;
unsigned int *ptr;
ret = _load_firmware(device, fwfile, (void *) &ptr, &len);
if (ret)
goto err;
/* PFP size shold be dword aligned */
if (len % sizeof(uint32_t) != 0) {
KGSL_DRV_ERR(device, "Bad firmware size: %d\n", len);
ret = -EINVAL;
goto err;
}
adreno_dev->pfp_fw_size = len / sizeof(uint32_t);
adreno_dev->pfp_fw = ptr;
}
KGSL_DRV_INFO(device, "loading pfp ucode version: %d\n",
adreno_dev->pfp_fw[0]);
adreno_regwrite(device, REG_CP_PFP_UCODE_ADDR, 0);
for (i = 1; i < adreno_dev->pfp_fw_size; i++)
adreno_regwrite(device, REG_CP_PFP_UCODE_DATA,
adreno_dev->pfp_fw[i]);
err:
return ret;
}
示例11: snapshot_freeze_obj_list
/*
* snapshot_freeze_obj_list() - Take a list of ib objects and freeze their
* memory for snapshot
* @device: Device being snapshotted
* @ptbase: The pagetable base of the process to which IB belongs
* @ib_obj_list: List of the IB objects
*
* Returns 0 on success else error code
*/
static int snapshot_freeze_obj_list(struct kgsl_device *device,
phys_addr_t ptbase, struct adreno_ib_object_list *ib_obj_list)
{
int ret = 0;
struct adreno_ib_object *ib_objs;
unsigned int ib2base;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
int i;
adreno_readreg(adreno_dev, ADRENO_REG_CP_IB2_BASE, &ib2base);
for (i = 0; i < ib_obj_list->num_objs; i++) {
int temp_ret;
int index;
int freeze = 1;
ib_objs = &(ib_obj_list->obj_list[i]);
/* Make sure this object is not going to be saved statically */
for (index = 0; index < objbufptr; index++) {
if ((objbuf[index].gpuaddr <= ib_objs->gpuaddr) &&
((objbuf[index].gpuaddr +
(objbuf[index].dwords << 2)) >=
(ib_objs->gpuaddr + ib_objs->size)) &&
(objbuf[index].ptbase == ptbase)) {
freeze = 0;
break;
}
}
if (freeze) {
/* Save current IB2 statically */
if (ib2base == ib_objs->gpuaddr) {
push_object(device, SNAPSHOT_OBJ_TYPE_IB,
ptbase, ib_objs->gpuaddr, ib_objs->size >> 2);
} else {
temp_ret = kgsl_snapshot_get_object(device,
ptbase, ib_objs->gpuaddr, ib_objs->size,
ib_objs->snapshot_obj_type);
if (temp_ret < 0) {
if (ret >= 0)
ret = temp_ret;
} else {
snapshot_frozen_objsize += temp_ret;
}
}
}
示例12: adreno_compat_ioctl
long adreno_compat_ioctl(struct kgsl_device_private *dev_priv,
unsigned int cmd, void *data)
{
struct kgsl_device *device = dev_priv->device;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
int result = 0;
switch (cmd) {
case IOCTL_KGSL_PERFCOUNTER_QUERY_COMPAT: {
struct kgsl_perfcounter_query_compat *query32 = data;
struct kgsl_perfcounter_query query;
query.groupid = query32->groupid;
query.countables = (unsigned int __user *)(uintptr_t)
query32->countables;
query.count = query32->count;
query.max_counters = query32->max_counters;
result = adreno_perfcounter_query_group(adreno_dev,
query.groupid, query.countables,
query.count, &query.max_counters);
query32->max_counters = query.max_counters;
break;
}
case IOCTL_KGSL_PERFCOUNTER_READ_COMPAT: {
struct kgsl_perfcounter_read_compat *read32 = data;
struct kgsl_perfcounter_read read;
read.reads = (struct kgsl_perfcounter_read_group __user *)
(uintptr_t)read32->reads;
read.count = read32->count;
result = kgsl_active_count_get(device);
if (result)
break;
result = adreno_perfcounter_read_group(adreno_dev,
read.reads, read.count);
kgsl_active_count_put(device);
break;
}
default:
KGSL_DRV_INFO(dev_priv->device,
"invalid ioctl code %08x\n", cmd);
result = -ENOIOCTLCMD;
break;
}
return result;
}
示例13: adreno_coresight_show_register
ssize_t adreno_coresight_show_register(struct device *dev,
struct device_attribute *attr, char *buf)
{
unsigned int val = 0;
struct kgsl_device *device = dev_get_drvdata(dev->parent);
struct adreno_device *adreno_dev;
struct adreno_coresight_attr *cattr = TO_ADRENO_CORESIGHT_ATTR(attr);
if (device == NULL)
return -EINVAL;
adreno_dev = ADRENO_DEVICE(device);
if (cattr->reg == NULL)
return -EINVAL;
/*
* Return the current value of the register if coresight is enabled,
* otherwise report 0
*/
mutex_lock(&device->mutex);
if (test_bit(ADRENO_DEVICE_CORESIGHT, &adreno_dev->priv)) {
/*
* If the device isn't power collapsed read the actual value
* from the hardware - otherwise return the cached value
*/
if (device->state == KGSL_STATE_ACTIVE ||
device->state == KGSL_STATE_NAP) {
if (!kgsl_active_count_get(device)) {
kgsl_regread(device, cattr->reg->offset,
&cattr->reg->value);
kgsl_active_count_put(device);
}
}
val = cattr->reg->value;
}
mutex_unlock(&device->mutex);
return snprintf(buf, PAGE_SIZE, "0x%X", val);
}
示例14: adreno_coresight_store_register
ssize_t adreno_coresight_store_register(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
struct kgsl_device *device = dev_get_drvdata(dev->parent);
struct adreno_device *adreno_dev;
struct adreno_coresight_attr *cattr = TO_ADRENO_CORESIGHT_ATTR(attr);
unsigned long val;
int ret;
if (device == NULL)
return -EINVAL;
adreno_dev = ADRENO_DEVICE(device);
if (cattr->reg == NULL)
return -EINVAL;
ret = kstrtoul(buf, 0, &val);
if (ret)
return ret;
mutex_lock(&device->mutex);
/* Ignore writes while coresight is off */
if (!test_bit(ADRENO_DEVICE_CORESIGHT, &adreno_dev->priv))
goto out;
cattr->reg->value = val;
/* Program the hardware if it is not power collapsed */
if (device->state == KGSL_STATE_ACTIVE ||
device->state == KGSL_STATE_NAP) {
if (!kgsl_active_count_get(device)) {
kgsl_regwrite(device, cattr->reg->offset,
cattr->reg->value);
kgsl_active_count_put(device);
}
}
out:
mutex_unlock(&device->mutex);
return size;
}
示例15: adreno_debugfs_init
void adreno_debugfs_init(struct kgsl_device *device)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
if (!device->d_debugfs || IS_ERR(device->d_debugfs))
return;
debugfs_create_file("istore", 0400, device->d_debugfs, device,
&kgsl_istore_fops);
debugfs_create_file("sx_debug", 0400, device->d_debugfs, device,
&kgsl_sx_debug_fops);
debugfs_create_file("cp_debug", 0400, device->d_debugfs, device,
&kgsl_cp_debug_fops);
debugfs_create_file("mh_debug", 0400, device->d_debugfs, device,
&kgsl_mh_debug_fops);
debugfs_create_file("cff_dump", 0644, device->d_debugfs, device,
&kgsl_cff_dump_enable_fops);
debugfs_create_u32("wait_timeout", 0644, device->d_debugfs,
&adreno_dev->wait_timeout);
debugfs_create_u32("ib_check", 0644, device->d_debugfs,
&adreno_dev->ib_check_level);
/* By Default enable fast hang detection */
adreno_dev->fast_hang_detect = 1;
debugfs_create_u32("fast_hang_detect", 0644, device->d_debugfs,
&adreno_dev->fast_hang_detect);
/* Create post mortem control files */
pm_d_debugfs = debugfs_create_dir("postmortem", device->d_debugfs);
if (IS_ERR(pm_d_debugfs))
return;
debugfs_create_file("dump", 0600, pm_d_debugfs, device,
&pm_dump_fops);
debugfs_create_file("regs_enabled", 0644, pm_d_debugfs, device,
&pm_regs_enabled_fops);
debugfs_create_file("ib_dump_on_pagefault", 0644, device->d_debugfs,
device, &ib_dump_on_pagef_enabled_fops);
}