本文整理汇总了C++中queue_work函数的典型用法代码示例。如果您正苦于以下问题:C++ queue_work函数的具体用法?C++ queue_work怎么用?C++ queue_work使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了queue_work函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: rx_complete
static void rx_complete(struct usb_ep *ep, struct usb_request *req)
{
struct sk_buff *skb = req->context;
struct eth_dev *dev = ep->driver_data;
int status = req->status;
bool queue = 0;
switch (status) {
/* normal completion */
case 0:
skb_put(skb, req->actual);
if (dev->unwrap) {
unsigned long flags;
spin_lock_irqsave(&dev->lock, flags);
if (dev->port_usb) {
status = dev->unwrap(dev->port_usb,
skb,
&dev->rx_frames);
if (status == -EINVAL)
dev->net->stats.rx_errors++;
else if (status == -EOVERFLOW)
dev->net->stats.rx_over_errors++;
} else {
dev_kfree_skb_any(skb);
status = -ENOTCONN;
}
spin_unlock_irqrestore(&dev->lock, flags);
} else {
skb_queue_tail(&dev->rx_frames, skb);
}
if (!status)
queue = 1;
break;
/* software-driven interface shutdown */
case -ECONNRESET: /* unlink */
case -ESHUTDOWN: /* disconnect etc */
VDBG(dev, "rx shutdown, code %d\n", status);
goto quiesce;
/* for hardware automagic (such as pxa) */
case -ECONNABORTED: /* endpoint reset */
DBG(dev, "rx %s reset\n", ep->name);
defer_kevent(dev, WORK_RX_MEMORY);
quiesce:
dev_kfree_skb_any(skb);
goto clean;
/* data overrun */
case -EOVERFLOW:
dev->net->stats.rx_over_errors++;
/* FALLTHROUGH */
default:
queue = 1;
dev_kfree_skb_any(skb);
dev->net->stats.rx_errors++;
DBG(dev, "rx status %d\n", status);
break;
}
clean:
spin_lock(&dev->req_lock);
list_add(&req->list, &dev->rx_reqs);
spin_unlock(&dev->req_lock);
if (queue)
queue_work(uether_wq, &dev->rx_work);
}
示例2: enable_stm_feature
static void enable_stm_feature(struct diag_smd_info *smd_info)
{
driver->peripheral_supports_stm[smd_info->peripheral] = ENABLE_STM;
smd_info->general_context = UPDATE_PERIPHERAL_STM_STATE;
queue_work(driver->diag_cntl_wq, &(smd_info->diag_general_smd_work));
}
示例3: rrpc_move_valid_pages
//.........这里部分代码省略.........
BUG_ON(list_empty(prio_list));
max = list_first_entry(prio_list, struct rrpc_block, prio);
list_for_each_entry(rblock, prio_list, prio)
max = rblock_max_invalid(max, rblock);
return max;
}
static void rrpc_lun_gc(struct work_struct *work)
{
struct rrpc_lun *rlun = container_of(work, struct rrpc_lun, ws_gc);
struct rrpc *rrpc = rlun->rrpc;
struct nvm_lun *lun = rlun->parent;
struct rrpc_block_gc *gcb;
unsigned int nr_blocks_need;
nr_blocks_need = rrpc->dev->blks_per_lun / GC_LIMIT_INVERSE;
if (nr_blocks_need < rrpc->nr_luns)
nr_blocks_need = rrpc->nr_luns;
spin_lock(&rlun->lock);
while (nr_blocks_need > lun->nr_free_blocks &&
!list_empty(&rlun->prio_list)) {
struct rrpc_block *rblock = block_prio_find_max(rlun);
struct nvm_block *block = rblock->parent;
if (!rblock->nr_invalid_pages)
break;
gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
if (!gcb)
break;
list_del_init(&rblock->prio);
BUG_ON(!block_is_full(rrpc, rblock));
pr_debug("rrpc: selected block '%lu' for GC\n", block->id);
gcb->rrpc = rrpc;
gcb->rblk = rblock;
INIT_WORK(&gcb->ws_gc, rrpc_block_gc);
queue_work(rrpc->kgc_wq, &gcb->ws_gc);
nr_blocks_need--;
}
spin_unlock(&rlun->lock);
/* TODO: Hint that request queue can be started again */
}
static void rrpc_gc_queue(struct work_struct *work)
{
struct rrpc_block_gc *gcb = container_of(work, struct rrpc_block_gc,
ws_gc);
struct rrpc *rrpc = gcb->rrpc;
struct rrpc_block *rblk = gcb->rblk;
struct nvm_lun *lun = rblk->parent->lun;
struct rrpc_lun *rlun = &rrpc->luns[lun->id - rrpc->lun_offset];
spin_lock(&rlun->lock);
list_add_tail(&rblk->prio, &rlun->prio_list);
spin_unlock(&rlun->lock);
mempool_free(gcb, rrpc->gcb_pool);
pr_debug("nvm: block '%lu' is full, allow GC (sched)\n",
rblk->parent->id);
}
static const struct block_device_operations rrpc_fops = {
.owner = THIS_MODULE,
};
static struct rrpc_lun *rrpc_get_lun_rr(struct rrpc *rrpc, int is_gc)
{
unsigned int i;
struct rrpc_lun *rlun, *max_free;
if (!is_gc)
return get_next_lun(rrpc);
/* during GC, we don't care about RR, instead we want to make
* sure that we maintain evenness between the block luns.
*/
max_free = &rrpc->luns[0];
/* prevent GC-ing lun from devouring pages of a lun with
* little free blocks. We don't take the lock as we only need an
* estimate.
*/
rrpc_for_each_lun(rrpc, rlun, i) {
if (rlun->parent->nr_free_blocks >
max_free->parent->nr_free_blocks)
max_free = rlun;
}
return max_free;
}
示例4: gs_rx_push
//.........这里部分代码省略.........
while (!list_empty(queue)) {
struct usb_request *req;
req = list_first_entry(queue, struct usb_request, list);
/* discard data if tty was closed */
if (!tty)
goto recycle;
/* leave data queued if tty was rx throttled */
if (test_bit(TTY_THROTTLED, &tty->flags))
break;
switch (req->status) {
case -ESHUTDOWN:
disconnect = true;
pr_vdebug(PREFIX "%d: shutdown\n", port->port_num);
break;
default:
/* presumably a transient fault */
pr_warning(PREFIX "%d: unexpected RX status %d\n",
port->port_num, req->status);
/* FALLTHROUGH */
case 0:
/* normal completion */
break;
}
/* push data to (open) tty */
if (req->actual) {
char *packet = req->buf;
unsigned size = req->actual;
unsigned n;
int count;
/* we may have pushed part of this packet already... */
n = port->n_read;
if (n) {
packet += n;
size -= n;
}
count = tty_insert_flip_string(tty, packet, size);
port->nbytes_to_tty += count;
if (count)
do_push = true;
if (count != size) {
/* stop pushing; TTY layer can't handle more */
port->n_read += count;
pr_vdebug(PREFIX "%d: rx block %d/%d\n",
port->port_num,
count, req->actual);
break;
}
port->n_read = 0;
}
recycle:
list_move(&req->list, &port->read_pool);
port->read_started--;
}
/* Push from tty to ldisc; this is immediate with low_latency, and
* may trigger callbacks to this driver ... so drop the spinlock.
*/
if (tty && do_push) {
spin_unlock_irq(&port->port_lock);
tty_flip_buffer_push(tty);
wake_up_interruptible(&tty->read_wait);
spin_lock_irq(&port->port_lock);
/* tty may have been closed */
tty = port->port_tty;
}
/* We want our data queue to become empty ASAP, keeping data
* in the tty and ldisc (not here). If we couldn't push any
* this time around, there may be trouble unless there's an
* implicit tty_unthrottle() call on its way...
*
* REVISIT we should probably add a timer to keep the work queue
* from starving ... but it's not clear that case ever happens.
*/
if (!list_empty(queue) && tty) {
if (!test_bit(TTY_THROTTLED, &tty->flags)) {
if (do_push)
queue_work(gserial_wq, &port->push);
else
pr_warning(PREFIX "%d: RX not scheduled?\n",
port->port_num);
}
}
/* If we're still connected, refill the USB RX queue. */
if (!disconnect && port->port_usb)
gs_start_rx(port);
spin_unlock_irq(&port->port_lock);
}
示例5: md32_irq_handler
irqreturn_t md32_irq_handler(int irq, void *dev_id)
{
struct reg_md32_to_host_ipc *md32_irq;
int reboot = 0;
md32_irq = (struct reg_md32_to_host_ipc *)MD32_TO_HOST_ADDR;
if(md32_irq->wdt_int)
{
md32_wdt_handler();
md32_aee_stop();
#if 0
md32_prepare_aed("md32 wdt", &work_md32_reboot.aed);
mt_reg_sync_writel(0x0, MD32_BASE);
#endif
md32_aee_status.m2h_irq = MD32_TO_HOST_REG;
md32_irq->wdt_int = 0;
reboot = 1;
}
if(md32_irq->pmem_disp_int)
{
md32_pmem_abort_handler();
md32_aee_stop();
#if 0
md32_prepare_aed("md32 pmem abort", &work_md32_reboot.aed);
mt_reg_sync_writel(0x0, MD32_BASE);
#endif
md32_aee_status.m2h_irq = MD32_TO_HOST_REG;
md32_irq->pmem_disp_int = 0;
reboot = 1;
}
if(md32_irq->dmem_disp_int)
{
md32_dmem_abort_handler();
md32_aee_stop();
#if 0
md32_prepare_aed("md32 dmem abort", &work_md32_reboot.aed);
mt_reg_sync_writel(0x0, MD32_BASE);
#endif
md32_aee_status.m2h_irq = MD32_TO_HOST_REG;
md32_irq->dmem_disp_int = 0;
reboot = 1;
}
if(md32_irq->md32_ipc_int)
{
md32_ipi_handler();
md32_irq->ipc_md2host = 0;
md32_irq->md32_ipc_int = 0;
}
MD32_TO_HOST_REG = 0x0;
if(reboot)
{
queue_work(wq_md32_reboot, (struct work_struct *)&work_md32_reboot);
}
return IRQ_HANDLED;
}
示例6: baseband_xmm_power2_work_func
static void baseband_xmm_power2_work_func(struct work_struct *work)
{
struct baseband_xmm_power_work_t *bbxmm_work
= (struct baseband_xmm_power_work_t *) work;
int err;
pr_debug("%s bbxmm_work->state=%d\n", __func__, bbxmm_work->state);
switch (bbxmm_work->state) {
case BBXMM_WORK_UNINIT:
pr_debug("BBXMM_WORK_UNINIT\n");
/* free baseband irq(s) */
if (free_ipc_ap_wake_irq) {
free_irq(gpio_to_irq(baseband_power2_driver_data
->modem.xmm.ipc_ap_wake), NULL);
free_ipc_ap_wake_irq = 0;
}
break;
case BBXMM_WORK_INIT:
pr_debug("BBXMM_WORK_INIT\n");
/* request baseband irq(s) */
ipc_ap_wake_state = IPC_AP_WAKE_UNINIT;
err = request_threaded_irq(
gpio_to_irq(baseband_power2_driver_data->
modem.xmm.ipc_ap_wake),
NULL,
(modem_ver < XMM_MODEM_VER_1130)
? baseband_xmm_power2_ver_lt_1130_ipc_ap_wake_irq2
: baseband_xmm_power2_ver_ge_1130_ipc_ap_wake_irq2,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
"BBXMM_POWER2_IPC_AP_WAKE_IRQ",
NULL);
if (err < 0) {
pr_err("%s - request irq IPC_AP_WAKE_IRQ failed\n",
__func__);
return;
}
free_ipc_ap_wake_irq = 1;
ipc_ap_wake_state = IPC_AP_WAKE_IRQ_READY;
/* go to next state */
bbxmm_work->state = (modem_flash && !modem_pm)
? BBXMM_WORK_INIT_FLASH_STEP1
: (modem_flash && modem_pm)
? BBXMM_WORK_INIT_FLASH_PM_STEP1
: (!modem_flash && modem_pm)
? BBXMM_WORK_INIT_FLASHLESS_PM_STEP1
: BBXMM_WORK_UNINIT;
queue_work(workqueue, work);
break;
case BBXMM_WORK_INIT_FLASH_STEP1:
pr_debug("BBXMM_WORK_INIT_FLASH_STEP1\n");
break;
case BBXMM_WORK_INIT_FLASH_PM_STEP1:
pr_debug("BBXMM_WORK_INIT_FLASH_PM_STEP1\n");
/* go to next state */
bbxmm_work->state = (modem_ver < XMM_MODEM_VER_1130)
? BBXMM_WORK_INIT_FLASH_PM_VER_LT_1130_STEP1
: BBXMM_WORK_INIT_FLASH_PM_VER_GE_1130_STEP1;
queue_work(workqueue, work);
break;
case BBXMM_WORK_INIT_FLASH_PM_VER_LT_1130_STEP1:
pr_debug("BBXMM_WORK_INIT_FLASH_PM_VER_LT_1130_STEP1\n");
break;
case BBXMM_WORK_INIT_FLASH_PM_VER_GE_1130_STEP1:
pr_debug("BBXMM_WORK_INIT_FLASH_PM_VER_GE_1130_STEP1\n");
break;
case BBXMM_WORK_INIT_FLASHLESS_PM_STEP1:
pr_debug("BBXMM_WORK_INIT_FLASHLESS_PM_STEP1\n");
/* go to next state */
bbxmm_work->state = (modem_ver < XMM_MODEM_VER_1130)
? BBXMM_WORK_INIT_FLASHLESS_PM_VER_LT_1130_WAIT_IRQ
: BBXMM_WORK_INIT_FLASHLESS_PM_VER_GE_1130_STEP1;
queue_work(workqueue, work);
break;
case BBXMM_WORK_INIT_FLASHLESS_PM_VER_LT_1130_WAIT_IRQ:
pr_debug("BBXMM_WORK_INIT_FLASHLESS_PM_VER_LT_1130_WAIT_IRQ"
" - waiting for IPC_AP_WAKE_IRQ to trigger step1\n");
break;
case BBXMM_WORK_INIT_FLASHLESS_PM_VER_LT_1130_STEP1:
pr_debug("BBXMM_WORK_INIT_FLASHLESS_PM_VER_LT_1130_STEP1\n");
baseband_xmm_power2_flashless_pm_ver_lt_1130_step1(work);
break;
case BBXMM_WORK_INIT_FLASHLESS_PM_VER_LT_1130_STEP2:
pr_debug("BBXMM_WORK_INIT_FLASHLESS_PM_VER_LT_1130_STEP2\n");
baseband_xmm_power2_flashless_pm_ver_lt_1130_step2(work);
break;
case BBXMM_WORK_INIT_FLASHLESS_PM_VER_GE_1130_STEP1:
pr_debug("BBXMM_WORK_INIT_FLASHLESS_PM_VER_GE_1130_STEP1\n");
baseband_xmm_power2_flashless_pm_ver_ge_1130_step1(work);
break;
case BBXMM_WORK_INIT_FLASHLESS_PM_VER_GE_1130_STEP2:
pr_debug("BBXMM_WORK_INIT_FLASHLESS_PM_VER_GE_1130_STEP2\n");
baseband_xmm_power2_flashless_pm_ver_ge_1130_step2(work);
break;
case BBXMM_WORK_INIT_FLASHLESS_PM_VER_GE_1130_STEP3:
pr_debug("BBXMM_WORK_INIT_FLASHLESS_PM_VER_GE_1130_STEP3\n");
baseband_xmm_power2_flashless_pm_ver_ge_1130_step3(work);
break;
case BBXMM_WORK_INIT_FLASHLESS_PM_VER_GE_1130_STEP4:
pr_debug("BBXMM_WORK_INIT_FLASHLESS_PM_VER_GE_1130_STEP4\n");
//.........这里部分代码省略.........
示例7: flash_timer_callback
void flash_timer_callback(unsigned long data)
{
queue_work(flash_wq, (struct work_struct *)work );
mod_timer(&flash_timer, jiffies + msecs_to_jiffies(10000));
}
示例8: afs_wake_up_async_call
/*
* wake up an asynchronous call
*/
static void afs_wake_up_async_call(struct afs_call *call)
{
_enter("");
queue_work(afs_async_calls, &call->async_work);
}
示例9: touch_led_timedout
void touch_led_timedout(unsigned long ptr)
{
queue_work(tkey_i2c_local->wq, &tkey_i2c_local->work);
}
示例10: queue_up_suspend_work
void queue_up_suspend_work(void)
{
if (!work_pending(&suspend_work) && autosleep_state > PM_SUSPEND_ON)
queue_work(autosleep_wq, &suspend_work);
}
示例11: debug_flag_store
static ssize_t debug_flag_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
unsigned long state = 0;
HS_DBG();
if (strncmp(buf, "enable", count - 1) == 0) {
if (hi->debug_flag & DEBUG_FLAG_ADC) {
HS_LOG("Debug work is already running");
return count;
}
if (!debug_wq) {
debug_wq = create_workqueue("debug");
if (!debug_wq) {
HS_LOG("Failed to create debug workqueue");
return count;
}
}
HS_LOG("Enable headset debug");
mutex_lock(&hi->mutex_lock);
hi->debug_flag |= DEBUG_FLAG_ADC;
mutex_unlock(&hi->mutex_lock);
queue_work(debug_wq, &debug_work);
} else if (strncmp(buf, "disable", count - 1) == 0) {
if (!(hi->debug_flag & DEBUG_FLAG_ADC)) {
HS_LOG("Debug work has been stopped");
return count;
}
HS_LOG("Disable headset debug");
mutex_lock(&hi->mutex_lock);
hi->debug_flag &= ~DEBUG_FLAG_ADC;
mutex_unlock(&hi->mutex_lock);
if (debug_wq) {
flush_workqueue(debug_wq);
destroy_workqueue(debug_wq);
debug_wq = NULL;
}
} else if (strncmp(buf, "debug_log_enable", count - 1) == 0) {
HS_LOG("Enable headset debug log");
hi->debug_flag |= DEBUG_FLAG_LOG;
} else if (strncmp(buf, "debug_log_disable", count - 1) == 0) {
HS_LOG("Disable headset debug log");
hi->debug_flag &= ~DEBUG_FLAG_LOG;
} else if (strncmp(buf, "no_headset", count - 1) == 0) {
HS_LOG("Headset simulation: no_headset");
state = BIT_HEADSET | BIT_HEADSET_NO_MIC | BIT_35MM_HEADSET |
BIT_USB_AUDIO_OUT;
switch_send_event(state, 0);
} else if (strncmp(buf, "35mm_mic", count - 1) == 0) {
HS_LOG("Headset simulation: 35mm_mic");
state = BIT_HEADSET | BIT_35MM_HEADSET;
switch_send_event(state, 1);
} else if (strncmp(buf, "35mm_no_mic", count - 1) == 0) {
HS_LOG("Headset simulation: 35mm_no_mic");
state = BIT_HEADSET_NO_MIC | BIT_35MM_HEADSET;
switch_send_event(state, 1);
} else if (strncmp(buf, "usb_audio", count - 1) == 0) {
HS_LOG("Headset simulation: usb_audio");
state = BIT_USB_AUDIO_OUT;
switch_send_event(state, 1);
} else {
HS_LOG("Invalid parameter");
return count;
}
return count;
}
示例12: pr_debug
static irqreturn_t baseband_xmm_power2_ver_lt_1130_ipc_ap_wake_irq2
(int irq, void *dev_id)
{
int value;
pr_debug("%s\n", __func__);
/* check for platform data */
if (!baseband_power2_driver_data)
return IRQ_HANDLED;
value = gpio_get_value(baseband_power2_driver_data->
modem.xmm.ipc_ap_wake);
/* IPC_AP_WAKE state machine */
if (ipc_ap_wake_state < IPC_AP_WAKE_IRQ_READY) {
pr_err("%s - spurious irq\n", __func__);
} else if (ipc_ap_wake_state == IPC_AP_WAKE_IRQ_READY) {
if (!value) {
pr_debug("%s - IPC_AP_WAKE_INIT1"
" - got falling edge\n",
__func__);
/* go to IPC_AP_WAKE_INIT1 state */
ipc_ap_wake_state = IPC_AP_WAKE_INIT1;
/* queue work */
baseband_xmm_power2_work->state =
BBXMM_WORK_INIT_FLASHLESS_PM_VER_LT_1130_STEP1;
queue_work(workqueue, (struct work_struct *)
baseband_xmm_power2_work);
} else {
pr_debug("%s - IPC_AP_WAKE_INIT1"
" - wait for falling edge\n",
__func__);
}
} else if (ipc_ap_wake_state == IPC_AP_WAKE_INIT1) {
if (!value) {
pr_debug("%s - IPC_AP_WAKE_INIT2"
" - wait for rising edge\n",
__func__);
} else {
pr_debug("%s - IPC_AP_WAKE_INIT2"
" - got rising edge\n",
__func__);
/* go to IPC_AP_WAKE_INIT2 state */
ipc_ap_wake_state = IPC_AP_WAKE_INIT2;
/* queue work */
baseband_xmm_power2_work->state =
BBXMM_WORK_INIT_FLASHLESS_PM_VER_LT_1130_STEP2;
queue_work(workqueue, (struct work_struct *)
baseband_xmm_power2_work);
}
} else {
if (!value) {
pr_debug("%s - falling\n", __func__);
ipc_ap_wake_state = IPC_AP_WAKE_L;
} else {
pr_debug("%s - rising\n", __func__);
ipc_ap_wake_state = IPC_AP_WAKE_H;
}
return baseband_xmm_power_ipc_ap_wake_irq(irq, dev_id);
}
return IRQ_HANDLED;
}
示例13: kbase_instr_hwcnt_enable_internal_sec
mali_error kbase_instr_hwcnt_enable_internal_sec(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_uk_hwcnt_setup *setup, bool firstcall)
{
unsigned long flags, pm_flags;
mali_error err = MALI_ERROR_FUNCTION_FAILED;
u32 irq_mask;
int ret;
u64 shader_cores_needed;
KBASE_DEBUG_ASSERT(NULL != kctx);
KBASE_DEBUG_ASSERT(NULL != kbdev);
KBASE_DEBUG_ASSERT(NULL != setup);
KBASE_DEBUG_ASSERT(NULL == kbdev->hwcnt.suspended_kctx);
if (firstcall) {
shader_cores_needed = kbase_pm_get_present_cores(kbdev, KBASE_PM_CORE_SHADER);
/* Override core availability policy to ensure all cores are available */
kbase_pm_ca_instr_enable(kbdev);
/* Mark the context as active so the GPU is kept turned on */
kbase_pm_context_active(kbdev);
/* Request the cores early on synchronously - we'll release them on any errors
* (e.g. instrumentation already active) */
kbase_pm_request_cores_sync(kbdev, MALI_TRUE, shader_cores_needed);
}
spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
if (kbdev->hwcnt.state == KBASE_INSTR_STATE_RESETTING) {
/* GPU is being reset */
spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
wait_event_timeout(kbdev->hwcnt.wait, kbdev->hwcnt.triggered != 0, kbdev->hwcnt.timeout);
spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
}
if (kbdev->hwcnt.state != KBASE_INSTR_STATE_DISABLED) {
/* Instrumentation is already enabled */
spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
if (firstcall)
goto out_unrequest_cores;
else
goto out_err;
}
/* Enable interrupt */
spin_lock_irqsave(&kbdev->pm.power_change_lock, pm_flags);
irq_mask = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), NULL);
kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), irq_mask | PRFCNT_SAMPLE_COMPLETED, NULL);
spin_unlock_irqrestore(&kbdev->pm.power_change_lock, pm_flags);
/* In use, this context is the owner */
kbdev->hwcnt.kctx = kctx;
/* Remember the dump address so we can reprogram it later */
kbdev->hwcnt.addr = setup->dump_buffer;
if (firstcall) {
/* Remember all the settings for suspend/resume */
if (&kbdev->hwcnt.suspended_state != setup)
memcpy(&kbdev->hwcnt.suspended_state, setup, sizeof(kbdev->hwcnt.suspended_state));
/* Request the clean */
kbdev->hwcnt.state = KBASE_INSTR_STATE_REQUEST_CLEAN;
kbdev->hwcnt.triggered = 0;
/* Clean&invalidate the caches so we're sure the mmu tables for the dump buffer is valid */
ret = queue_work(kbdev->hwcnt.cache_clean_wq, &kbdev->hwcnt.cache_clean_work);
KBASE_DEBUG_ASSERT(ret);
}
spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
if (firstcall) {
/* Wait for cacheclean to complete */
wait_event_timeout(kbdev->hwcnt.wait, kbdev->hwcnt.triggered != 0, kbdev->hwcnt.timeout);
}
KBASE_DEBUG_ASSERT(kbdev->hwcnt.state == KBASE_INSTR_STATE_IDLE);
if (firstcall) {
/* Schedule the context in */
kbasep_js_schedule_privileged_ctx(kbdev, kctx);
kbase_pm_context_idle(kbdev);
} else {
kbase_mmu_update(kctx);
}
/* Configure */
kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_CONFIG), (kctx->as_nr << PRFCNT_CONFIG_AS_SHIFT) | PRFCNT_CONFIG_MODE_OFF, kctx);
kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_LO), setup->dump_buffer & 0xFFFFFFFF, kctx);
kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_HI), setup->dump_buffer >> 32, kctx);
kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_JM_EN), setup->jm_bm, kctx);
kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_SHADER_EN), setup->shader_bm, kctx);
kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_L3_CACHE_EN), setup->l3_cache_bm, kctx);
kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_MMU_L2_EN), setup->mmu_l2_bm, kctx);
/* Due to PRLAM-8186 we need to disable the Tiler before we enable the HW counter dump. */
if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8186))
kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_TILER_EN), 0, kctx);
else
//.........这里部分代码省略.........
示例14: tdmb_fc8080_spi_write_read
int tdmb_fc8080_spi_write_read(uint8* tx_data, int tx_length, uint8 *rx_data, int rx_length)
{
int rc;
struct spi_transfer t = {
.tx_buf = tx_data,
.rx_buf = rx_data,
.len = tx_length+rx_length,
};
struct spi_message m;
if (fc8080_ctrl_info.spi_ptr == NULL)
{
printk("tdmb_fc8080_spi_write_read error txdata=0x%x, length=%d\n", (unsigned int)tx_data, tx_length+rx_length);
return FALSE;
}
mutex_lock(&fc8080_ctrl_info.mutex);
spi_message_init(&m);
spi_message_add_tail(&t, &m);
rc = spi_sync(fc8080_ctrl_info.spi_ptr, &m);
if ( rc < 0 )
{
printk("tdmb_fc8080_spi_read_burst result(%d), actual_len=%d\n",rc, m.actual_length);
}
mutex_unlock(&fc8080_ctrl_info.mutex);
return TRUE;
}
#ifdef FEATURE_DMB_USE_WORKQUEUE
static irqreturn_t broadcast_tdmb_spi_isr(int irq, void *handle)
{
struct tdmb_fc8080_ctrl_blk* fc8080_info_p;
fc8080_info_p = (struct tdmb_fc8080_ctrl_blk *)handle;
if ( fc8080_info_p && fc8080_info_p->TdmbPowerOnState )
{
unsigned long flag;
if (fc8080_info_p->spi_irq_status)
{
printk("######### spi read function is so late skip #########\n");
return IRQ_HANDLED;
}
// printk("***** broadcast_tdmb_spi_isr coming *******\n");
spin_lock_irqsave(&fc8080_info_p->spin_lock, flag);
queue_work(fc8080_info_p->spi_wq, &fc8080_info_p->spi_work);
spin_unlock_irqrestore(&fc8080_info_p->spin_lock, flag);
}
else
{
printk("broadcast_tdmb_spi_isr is called, but device is off state\n");
}
return IRQ_HANDLED;
}
static void broacast_tdmb_spi_work(struct work_struct *tdmb_work)
{
struct tdmb_fc8080_ctrl_blk *pTdmbWorkData;
pTdmbWorkData = container_of(tdmb_work, struct tdmb_fc8080_ctrl_blk, spi_work);
if ( pTdmbWorkData )
{
tunerbb_drv_fc8080_isr_control(0);
pTdmbWorkData->spi_irq_status = TRUE;
broadcast_fc8080_drv_if_isr();
pTdmbWorkData->spi_irq_status = FALSE;
tunerbb_drv_fc8080_isr_control(1);
}
else
{
printk("~~~~~~~broadcast_tdmb_spi_work call but pTdmbworkData is NULL ~~~~~~~\n");
}
}
#else
static irqreturn_t broadcast_tdmb_spi_event_handler(int irq, void *handle)
{
struct tdmb_fc8080_ctrl_blk* fc8080_info_p;
fc8080_info_p = (struct tdmb_fc8080_ctrl_blk *)handle;
if ( fc8080_info_p && fc8080_info_p->TdmbPowerOnState )
{
if (fc8080_info_p->spi_irq_status)
{
printk("######### spi read function is so late skip ignore #########\n");
return IRQ_HANDLED;
}
tunerbb_drv_fc8080_isr_control(0);
fc8080_info_p->spi_irq_status = TRUE;
broadcast_fc8080_drv_if_isr();
fc8080_info_p->spi_irq_status = FALSE;
tunerbb_drv_fc8080_isr_control(1);
}
else
//.........这里部分代码省略.........
示例15: diag_smd_read
static int diag_smd_read(void *ctxt, unsigned char *buf, int buf_len)
{
int pkt_len = 0;
int err = 0;
int total_recd_partial = 0;
int total_recd = 0;
uint8_t buf_full = 0;
unsigned char *temp_buf = NULL;
uint32_t read_len = 0;
struct diag_smd_info *smd_info = NULL;
if (!ctxt || !buf || buf_len <= 0)
return -EIO;
smd_info = (struct diag_smd_info *)ctxt;
if (!smd_info->hdl || !smd_info->inited ||
!atomic_read(&smd_info->opened))
return -EIO;
/*
* Always try to read the data if notification is received from smd
* In case if packet size is 0 release the wake source hold earlier
*/
err = wait_event_interruptible(smd_info->read_wait_q,
(smd_info->hdl != NULL) &&
(atomic_read(&smd_info->opened) == 1));
if (err) {
diagfwd_channel_read_done(smd_info->fwd_ctxt, buf, 0);
return -ERESTARTSYS;
}
/*
* Reset the buffers. Also release the wake source hold earlier.
*/
if (atomic_read(&smd_info->diag_state) == 0) {
DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
"%s closing read thread. diag state is closed\n",
smd_info->name);
diagfwd_channel_read_done(smd_info->fwd_ctxt, buf, 0);
return 0;
}
if (!smd_info->hdl || !atomic_read(&smd_info->opened)) {
DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
"%s stopping read, hdl: %pK, opened: %d\n",
smd_info->name, smd_info->hdl,
atomic_read(&smd_info->opened));
goto fail_return;
}
do {
total_recd_partial = 0;
temp_buf = buf + total_recd;
pkt_len = smd_cur_packet_size(smd_info->hdl);
if (pkt_len <= 0)
break;
if (total_recd + pkt_len > buf_len) {
buf_full = 1;
break;
}
while (total_recd_partial < pkt_len) {
read_len = smd_read_avail(smd_info->hdl);
if (!read_len) {
wait_event_interruptible(smd_info->read_wait_q,
((atomic_read(&smd_info->opened)) &&
smd_read_avail(smd_info->hdl)));
if (!smd_info->hdl ||
!atomic_read(&smd_info->opened)) {
DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
"%s exiting from wait",
smd_info->name);
goto fail_return;
}
}
if (pkt_len < read_len)
goto fail_return;
smd_read(smd_info->hdl, temp_buf, read_len);
total_recd_partial += read_len;
total_recd += read_len;
temp_buf += read_len;
}
} while (pkt_len > 0);
if ((smd_info->type == TYPE_DATA && pkt_len) || buf_full)
err = queue_work(smd_info->wq, &(smd_info->read_work));
if (total_recd > 0) {
DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s read total bytes: %d\n",
smd_info->name, total_recd);
diagfwd_channel_read_done(smd_info->fwd_ctxt, buf, total_recd);
} else {
DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s error in read, err: %d\n",
smd_info->name, total_recd);
goto fail_return;
}
//.........这里部分代码省略.........