本文整理汇总了C++中INIT_COMPLETION函数的典型用法代码示例。如果您正苦于以下问题:C++ INIT_COMPLETION函数的具体用法?C++ INIT_COMPLETION怎么用?C++ INIT_COMPLETION使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了INIT_COMPLETION函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: rtc6213n_set_chan
/*
* rtc6213n_set_chan - set the channel
*/
static int rtc6213n_set_chan(struct rtc6213n_device *radio, unsigned short chan)
{
int retval;
unsigned long timeout;
bool timed_out = 0;
unsigned short current_chan =
radio->registers[CHANNEL] & CHANNEL_CSR0_CH;
dev_info(&radio->videodev->dev, "======== rtc6213n_set_chan ========\n");
dev_info(&radio->videodev->dev, "RTC6213n tuning process is starting\n");
dev_info(&radio->videodev->dev, "CHAN=0x%4.4hx SKCFG1=0x%4.4hx STATUS=0x%4.4hx chan=0x%4.4hx\n",
radio->registers[CHANNEL], radio->registers[SEEKCFG1],
radio->registers[STATUS], chan);
/* start tuning */
radio->registers[CHANNEL] &= ~CHANNEL_CSR0_CH;
radio->registers[CHANNEL] |= CHANNEL_CSR0_TUNE | chan;
retval = rtc6213n_set_register(radio, CHANNEL);
if (retval < 0) {
radio->registers[CHANNEL] = current_chan;
goto done;
}
/* currently I2C driver only uses interrupt way to tune */
if (radio->stci_enabled) {
INIT_COMPLETION(radio->completion);
/* wait till tune operation has completed */
retval = wait_for_completion_timeout(&radio->completion,
msecs_to_jiffies(tune_timeout));
if (!retval) {
dev_info(&radio->videodev->dev, "rtc6213n_set_chan : timed_out\n");
timeout = true;
}
retval = rtc6213n_get_register(radio, STATUS);
if (retval < 0)
goto stop;
} else {
/* wait till tune operation has completed */
timeout = jiffies + msecs_to_jiffies(tune_timeout);
do {
retval = rtc6213n_get_all_registers(radio);
if (retval < 0)
goto stop;
timed_out = time_after(jiffies, timeout);
} while (((radio->registers[STATUS] & STATUS_STD) == 0)
&& (!timed_out));
}
dev_info(&radio->videodev->dev, "RTC6213n tuning process is done\n");
dev_info(&radio->videodev->dev, "CHAN=0x%4.4hx SKCFG1=0x%4.4hx STATUS=0x%4.4hx, STD = %d, SF = %d, RSSI = %d\n",
radio->registers[CHANNEL], radio->registers[SEEKCFG1],
radio->registers[STATUS],
(radio->registers[STATUS] & STATUS_STD) >> 14,
(radio->registers[STATUS] & STATUS_SF) >> 13,
(radio->registers[RSSI] & RSSI_RSSI));
if ((radio->registers[STATUS] & STATUS_STD) == 0)
dev_info(&radio->videodev->dev, "tune does not complete\n");
if (timed_out)
dev_info(&radio->videodev->dev, "tune timed out after %u ms\n",
tune_timeout);
stop:
/* stop tuning */
current_chan = radio->registers[CHANNEL] & CHANNEL_CSR0_CH;
radio->registers[CHANNEL] &= ~CHANNEL_CSR0_TUNE;
retval = rtc6213n_set_register(radio, CHANNEL);
if (retval < 0) {
radio->registers[CHANNEL] = current_chan;
goto done;
}
retval = rtc6213n_get_register(radio, STATUS);
if (retval < 0)
goto done;
done:
dev_info(&radio->videodev->dev, "rtc6213n_set_chans is done\n");
dev_info(&radio->videodev->dev, "CHANNEL=0x%4.4hx SEEKCFG1=0x%4.4hx STATUS=0x%4.4hx\n",
radio->registers[CHANNEL], radio->registers[SEEKCFG1],
radio->registers[STATUS]);
dev_info(&radio->videodev->dev, "========= rtc6213n_set_chan End ==========\n");
return retval;
}
示例2: mdm_modem_ioctl
long mdm_modem_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
int status, ret = 0;
if (_IOC_TYPE(cmd) != CHARM_CODE) {
pr_err("%s: invalid ioctl code\n", __func__);
return -EINVAL;
}
pr_debug("%s: Entering ioctl cmd = %d\n", __func__, _IOC_NR(cmd));
switch (cmd) {
case WAKE_CHARM:
pr_info("%s: Powering on mdm\n", __func__);
mdm_drv->mdm_ready = 0;
mdm_drv->mdm_hsic_reconnectd = 0;
mdm_drv->ops->power_on_mdm_cb(mdm_drv);
break;
case CHECK_FOR_BOOT:
if (gpio_get_value(mdm_drv->mdm2ap_status_gpio) == 0)
put_user(1, (unsigned long __user *) arg);
else
put_user(0, (unsigned long __user *) arg);
break;
case NORMAL_BOOT_DONE:
{
int ret_mdm_hsic_reconnectd = 0;
pr_debug("%s: check if mdm is booted up\n", __func__);
get_user(status, (unsigned long __user *) arg);
if (status) {
pr_debug("%s: normal boot failed\n", __func__);
mdm_drv->mdm_boot_status = -EIO;
} else {
pr_info("%s: normal boot done\n", __func__);
mdm_drv->mdm_boot_status = 0;
}
mdm_status_change_notified = false;
queue_work_on(0, mdm_gpio_monitor_queue, &mdm_status_check_work);
mdm_drv->mdm_ready = 1;
if (mdm_drv->ops->normal_boot_done_cb != NULL)
mdm_drv->ops->normal_boot_done_cb(mdm_drv);
ret_mdm_hsic_reconnectd = mdm_hsic_reconnectd_check_fn();
if ( ret_mdm_hsic_reconnectd == 1 ) {
pr_info("%s: ret_mdm_hsic_reconnectd == 1\n", __func__);
} else {
pr_info("%s: ret_mdm_hsic_reconnectd == 0\n", __func__);
}
if (!first_boot)
complete(&mdm_boot);
else
first_boot = 0;
}
break;
case RAM_DUMP_DONE:
pr_debug("%s: mdm done collecting RAM dumps\n", __func__);
get_user(status, (unsigned long __user *) arg);
if (status)
mdm_drv->mdm_ram_dump_status = -EIO;
else {
pr_info("%s: ramdump collection completed\n", __func__);
mdm_drv->mdm_ram_dump_status = 0;
}
complete(&mdm_ram_dumps);
break;
case WAIT_FOR_RESTART:
pr_debug("%s: wait for mdm to need images reloaded\n",
__func__);
if (mdm_drv) {
dump_gpio("MDM2AP_STATUS", mdm_drv->mdm2ap_status_gpio);
dump_gpio("MDM2AP_ERRFATAL", mdm_drv->mdm2ap_errfatal_gpio);
}
ret = wait_for_completion_interruptible(&mdm_needs_reload);
if (!ret && mdm_drv) {
put_user(mdm_drv->boot_type,
(unsigned long __user *) arg);
pr_err("%s: mdm_drv->boot_type:%d\n", __func__, mdm_drv->boot_type);
}
INIT_COMPLETION(mdm_needs_reload);
break;
case GET_MFG_MODE:
pr_info("%s: board_mfg_mode()=%d\n", __func__, board_mfg_mode());
put_user(board_mfg_mode(),
(unsigned long __user *) arg);
break;
case SET_MODEM_ERRMSG:
pr_info("%s: Set modem fatal errmsg\n", __func__);
ret = set_mdm_errmsg((void __user *) arg);
break;
case GET_RADIO_FLAG:
pr_info("%s:get_radio_flag()=%x\n", __func__, get_radio_flag());
//.........这里部分代码省略.........
示例3: omap34xx_bridge_probe
//.........这里部分代码省略.........
GT_create(&driverTrace, "LD");
#ifdef CONFIG_BRIDGE_DEBUG
if (GT_str)
GT_set(GT_str);
#elif defined(DDSP_DEBUG_PRODUCT) && GT_TRACE
GT_set("**=67");
#endif
#ifdef CONFIG_PM
/* Initialize the wait queue */
bridge_suspend_data.suspended = 0;
init_waitqueue_head(&bridge_suspend_data.suspend_wq);
#endif
SERVICES_Init();
/* Autostart flag. This should be set to true if the DSP image should
* be loaded and run during bridge module initialization */
if (base_img) {
temp = true;
REG_SetValue(AUTOSTART, (u8 *)&temp, sizeof(temp));
REG_SetValue(DEFEXEC, (u8 *)base_img, strlen(base_img) + 1);
} else {
temp = false;
REG_SetValue(AUTOSTART, (u8 *)&temp, sizeof(temp));
REG_SetValue(DEFEXEC, (u8 *) "\0", (u32)2);
}
if (shm_size >= 0x10000) { /* 64 KB */
initStatus = REG_SetValue(SHMSIZE, (u8 *)&shm_size,
sizeof(shm_size));
} else {
initStatus = DSP_EINVALIDARG;
status = -1;
pr_err("%s: SHM size must be at least 64 KB\n", __func__);
}
GT_1trace(driverTrace, GT_7CLASS,
"requested shm_size = 0x%x\n", shm_size);
if (pdata->phys_mempool_base && pdata->phys_mempool_size) {
phys_mempool_base = pdata->phys_mempool_base;
phys_mempool_size = pdata->phys_mempool_size;
}
GT_1trace(driverTrace, GT_7CLASS, "phys_mempool_base = 0x%x \n",
phys_mempool_base);
GT_1trace(driverTrace, GT_7CLASS, "phys_mempool_size = 0x%x\n",
phys_mempool_base);
if ((phys_mempool_base > 0x0) && (phys_mempool_size > 0x0))
MEM_ExtPhysPoolInit(phys_mempool_base, phys_mempool_size);
if (tc_wordswapon) {
GT_0trace(driverTrace, GT_7CLASS, "TC Word Swap is enabled\n");
REG_SetValue(TCWORDSWAP, (u8 *)&tc_wordswapon,
sizeof(tc_wordswapon));
} else {
GT_0trace(driverTrace, GT_7CLASS, "TC Word Swap is disabled\n");
REG_SetValue(TCWORDSWAP, (u8 *)&tc_wordswapon,
sizeof(tc_wordswapon));
}
if (DSP_SUCCEEDED(initStatus)) {
#ifdef CONFIG_BRIDGE_DVFS
clk_handle = clk_get(NULL, "iva2_ck");
if (!clk_handle)
pr_err("%s: clk_get failed to get iva2_ck\n", __func__);
if (clk_notifier_register(clk_handle, &iva_clk_notifier))
pr_err("%s: clk_notifier_register failed for iva2_ck\n",
__func__);
if (!min_dsp_freq)
min_dsp_freq = pdata->mpu_min_speed;
#endif
driverContext = DSP_Init(&initStatus);
if (DSP_FAILED(initStatus)) {
status = -1;
pr_err("DSP Bridge driver initialization failed\n");
} else {
pr_info("DSP Bridge driver loaded\n");
}
}
#ifdef CONFIG_BRIDGE_RECOVERY
bridge_rec_queue = create_workqueue("bridge_rec_queue");
INIT_WORK(&bridge_recovery_work, bridge_recover);
INIT_COMPLETION(bridge_comp);
#endif
DBC_Assert(status == 0);
DBC_Assert(DSP_SUCCEEDED(initStatus));
return 0;
err2:
unregister_chrdev_region(dev, 1);
err1:
return result;
}
示例4: mdp4_dsi_cmd_do_blt
static void mdp4_dsi_cmd_do_blt(struct msm_fb_data_type *mfd, int enable)
{
unsigned long flags;
int cndx = 0;
struct vsycn_ctrl *vctrl;
struct mdp4_overlay_pipe *pipe;
int need_wait = 0;
vctrl = &vsync_ctrl_db[cndx];
pipe = vctrl->base_pipe;
mdp4_allocate_writeback_buf(mfd, MDP4_MIXER0);
if (mfd->ov0_wb_buf->write_addr == 0) {
pr_err("%s: no blt_base assigned\n", __func__);
return;
}
spin_lock_irqsave(&vctrl->spin_lock, flags);
if (enable && pipe->ov_blt_addr == 0) {
vctrl->blt_change++;
if (vctrl->dmap_koff != vctrl->dmap_done) {
INIT_COMPLETION(vctrl->dmap_comp);
need_wait = 1;
}
} else if (enable == 0 && pipe->ov_blt_addr) {
vctrl->blt_change++;
if (vctrl->ov_koff != vctrl->dmap_done) {
INIT_COMPLETION(vctrl->dmap_comp);
need_wait = 1;
}
}
spin_unlock_irqrestore(&vctrl->spin_lock, flags);
if (need_wait)
mdp4_dsi_cmd_wait4dmap(0);
spin_lock_irqsave(&vctrl->spin_lock, flags);
if (enable && pipe->ov_blt_addr == 0) {
pipe->ov_blt_addr = mfd->ov0_wb_buf->write_addr;
pipe->dma_blt_addr = mfd->ov0_wb_buf->read_addr;
pipe->ov_cnt = 0;
pipe->dmap_cnt = 0;
vctrl->ov_koff = vctrl->dmap_koff;
vctrl->ov_done = vctrl->dmap_done;
vctrl->blt_free = 0;
vctrl->blt_wait = 0;
vctrl->blt_end = 0;
mdp4_stat.blt_dsi_video++;
} else if (enable == 0 && pipe->ov_blt_addr) {
pipe->ov_blt_addr = 0;
pipe->dma_blt_addr = 0;
vctrl->blt_end = 1;
vctrl->blt_free = 4; /* 4 commits to free wb buf */
}
pr_debug("%s: changed=%d enable=%d ov_blt_addr=%x\n", __func__,
vctrl->blt_change, enable, (int)pipe->ov_blt_addr);
spin_unlock_irqrestore(&vctrl->spin_lock, flags);
}
示例5: mdm_modem_ioctl
long mdm_modem_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
int status, ret = 0;
if (_IOC_TYPE(cmd) != CHARM_CODE) {
pr_err("%s: invalid ioctl code\n", __func__);
return -EINVAL;
}
pr_debug("%s: Entering ioctl cmd = %d\n", __func__, _IOC_NR(cmd));
switch (cmd) {
case WAKE_CHARM:
pr_info("%s: Powering on mdm\n", __func__);
mdm_drv->ops->power_on_mdm_cb(mdm_drv);
break;
case CHECK_FOR_BOOT:
if (gpio_get_value(mdm_drv->mdm2ap_status_gpio) == 0)
put_user(1, (unsigned long __user *) arg);
else
put_user(0, (unsigned long __user *) arg);
break;
case NORMAL_BOOT_DONE:
pr_info("%s: check if mdm is booted up\n", __func__);
get_user(status, (unsigned long __user *) arg);
if (status) {
pr_debug("%s: normal boot failed\n", __func__);
mdm_drv->mdm_boot_status = -EIO;
} else {
pr_info("%s: normal boot done\n", __func__);
mdm_drv->mdm_boot_status = 0;
}
mdm_drv->mdm_ready = 1;
if (mdm_drv->ops->normal_boot_done_cb != NULL)
mdm_drv->ops->normal_boot_done_cb(mdm_drv);
if (!first_boot)
complete(&mdm_boot);
else
first_boot = 0;
complete(&mdm_boot);
break;
case RAM_DUMP_DONE:
pr_info("%s: mdm done collecting RAM dumps\n", __func__);
get_user(status, (unsigned long __user *) arg);
if (status)
mdm_drv->mdm_ram_dump_status = -EIO;
else {
pr_info("%s: ramdump collection completed\n", __func__);
mdm_drv->mdm_ram_dump_status = 0;
msleep(1000);
panic("CP Crash %s", mdm_read_err_report());
}
complete(&mdm_ram_dumps);
break;
case WAIT_FOR_RESTART:
pr_info("%s: wait for mdm to need images reloaded\n",
__func__);
ret = wait_for_completion_interruptible(&mdm_needs_reload);
if (!ret)
put_user(mdm_drv->boot_type,
(unsigned long __user *) arg);
INIT_COMPLETION(mdm_needs_reload);
break;
case SILENT_RESET_CONTROL:
pr_info("%s: mdm doing silent reset\n", __func__);
mdm_drv->mdm_ram_dump_status = 0;
complete(&mdm_ram_dumps);
break;
default:
pr_err("%s: invalid ioctl cmd = %d\n", __func__, _IOC_NR(cmd));
ret = -EINVAL;
break;
}
return ret;
}
示例6: omap3_bridge_startup
/**
* omap3_bridge_startup() - perform low lever initializations
* @pdev: pointer to platform device
*
* Initializes recovery, PM and DVFS required data, before calling
* clk and memory init routines.
*/
static int omap3_bridge_startup(struct platform_device *pdev)
{
struct omap_dsp_platform_data *pdata = pdev->dev.platform_data;
struct drv_data *drv_datap = NULL;
u32 phys_membase, phys_memsize;
int err;
#ifdef CONFIG_TIDSPBRIDGE_RECOVERY
bridge_rec_queue = create_workqueue("bridge_rec_queue");
INIT_WORK(&bridge_recovery_work, bridge_recover);
INIT_COMPLETION(bridge_comp);
#endif
#ifdef CONFIG_PM
/* Initialize the wait queue */
bridge_suspend_data.suspended = 0;
init_waitqueue_head(&bridge_suspend_data.suspend_wq);
#ifdef CONFIG_TIDSPBRIDGE_DVFS
for (i = 0; i < 6; i++)
pdata->mpu_speed[i] = vdd1_rate_table_bridge[i].rate;
err = cpufreq_register_notifier(&iva_clk_notifier,
CPUFREQ_TRANSITION_NOTIFIER);
if (err)
pr_err("%s: clk_notifier_register failed for iva2_ck\n",
__func__);
#endif
#endif
dsp_clk_init();
drv_datap = kzalloc(sizeof(struct drv_data), GFP_KERNEL);
if (!drv_datap) {
err = -ENOMEM;
goto err1;
}
drv_datap->shm_size = shm_size;
drv_datap->tc_wordswapon = tc_wordswapon;
if (base_img) {
drv_datap->base_img = kmalloc(strlen(base_img) + 1, GFP_KERNEL);
if (!drv_datap->base_img) {
err = -ENOMEM;
goto err2;
}
strncpy(drv_datap->base_img, base_img, strlen(base_img) + 1);
}
dev_set_drvdata(bridge, drv_datap);
if (shm_size < 0x10000) { /* 64 KB */
err = -EINVAL;
pr_err("%s: shm size must be at least 64 KB\n", __func__);
goto err3;
}
dev_dbg(bridge, "%s: requested shm_size = 0x%x\n", __func__, shm_size);
phys_membase = pdata->phys_mempool_base;
phys_memsize = pdata->phys_mempool_size;
if (phys_membase > 0 && phys_memsize > 0)
mem_ext_phys_pool_init(phys_membase, phys_memsize);
if (tc_wordswapon)
dev_dbg(bridge, "%s: TC Word Swap is enabled\n", __func__);
driver_context = dsp_init(&err);
if (err) {
pr_err("DSP Bridge driver initialization failed\n");
goto err4;
}
return 0;
err4:
mem_ext_phys_pool_release();
err3:
kfree(drv_datap->base_img);
err2:
kfree(drv_datap);
err1:
#ifdef CONFIG_TIDSPBRIDGE_DVFS
cpufreq_unregister_notifier(&iva_clk_notifier,
CPUFREQ_TRANSITION_NOTIFIER);
#endif
dsp_clk_exit();
return err;
}
示例7: at91_do_twi_transfer
static int at91_do_twi_transfer(struct at91_twi_dev *dev)
{
int ret;
bool has_unre_flag = dev->pdata->has_unre_flag;
dev_dbg(dev->dev, "transfer: %s %d bytes.\n",
(dev->msg->flags & I2C_M_RD) ? "read" : "write", dev->buf_len);
INIT_COMPLETION(dev->cmd_complete);
dev->transfer_status = 0;
if (!dev->buf_len) {
at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_QUICK);
at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
} else if (dev->msg->flags & I2C_M_RD) {
unsigned start_flags = AT91_TWI_START;
if (at91_twi_read(dev, AT91_TWI_SR) & AT91_TWI_RXRDY) {
dev_err(dev->dev, "RXRDY still set!");
at91_twi_read(dev, AT91_TWI_RHR);
}
/* if only one byte is to be read, immediately stop transfer */
if (dev->buf_len <= 1 && !(dev->msg->flags & I2C_M_RECV_LEN))
start_flags |= AT91_TWI_STOP;
at91_twi_write(dev, AT91_TWI_CR, start_flags);
/*
* When using dma, the last byte has to be read manually in
* order to not send the stop command too late and then
* to receive extra data. In practice, there are some issues
* if you use the dma to read n-1 bytes because of latency.
* Reading n-2 bytes with dma and the two last ones manually
* seems to be the best solution.
*/
if (dev->use_dma && (dev->buf_len > AT91_I2C_DMA_THRESHOLD)) {
at91_twi_read_data_dma(dev);
/*
* It is important to enable TXCOMP irq here because
* doing it only when transferring the last two bytes
* will mask NACK errors since TXCOMP is set when a
* NACK occurs.
*/
at91_twi_write(dev, AT91_TWI_IER,
AT91_TWI_TXCOMP);
} else
at91_twi_write(dev, AT91_TWI_IER,
AT91_TWI_TXCOMP | AT91_TWI_RXRDY);
} else {
if (dev->use_dma && (dev->buf_len > AT91_I2C_DMA_THRESHOLD)) {
at91_twi_write_data_dma(dev);
at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
} else {
at91_twi_write_next_byte(dev);
at91_twi_write(dev, AT91_TWI_IER,
AT91_TWI_TXCOMP | AT91_TWI_TXRDY);
}
}
ret = wait_for_completion_interruptible_timeout(&dev->cmd_complete,
dev->adapter.timeout);
if (ret == 0) {
dev_err(dev->dev, "controller timed out\n");
at91_init_twi_bus(dev);
ret = -ETIMEDOUT;
goto error;
}
if (dev->transfer_status & AT91_TWI_NACK) {
dev_dbg(dev->dev, "received nack\n");
ret = -EREMOTEIO;
goto error;
}
if (dev->transfer_status & AT91_TWI_OVRE) {
dev_err(dev->dev, "overrun while reading\n");
ret = -EIO;
goto error;
}
if (has_unre_flag && dev->transfer_status & AT91_TWI_UNRE) {
dev_err(dev->dev, "underrun while writing\n");
ret = -EIO;
goto error;
}
dev_dbg(dev->dev, "transfer complete\n");
return 0;
error:
at91_twi_dma_cleanup(dev);
return ret;
}
示例8: mdss_dsi_cmd_dma_tx
static int mdss_dsi_cmd_dma_tx(struct mdss_dsi_ctrl_pdata *ctrl,
struct dsi_buf *tp)
{
int len, ret = 0;
int domain = MDSS_IOMMU_DOMAIN_UNSECURE;
char *bp;
unsigned long size, addr;
struct mdss_dsi_ctrl_pdata *mctrl = NULL;
bp = tp->data;
len = ALIGN(tp->len, 4);
size = ALIGN(tp->len, SZ_4K);
if (is_mdss_iommu_attached()) {
ret = msm_iommu_map_contig_buffer(tp->dmap,
mdss_get_iommu_domain(domain), 0,
size, SZ_4K, 0, &(addr));
if (IS_ERR_VALUE(ret)) {
pr_err("unable to map dma memory to iommu(%d)\n", ret);
return -ENOMEM;
}
} else {
addr = tp->dmap;
}
INIT_COMPLETION(ctrl->dma_comp);
/* Ensure that for slave controller, master is also configured */
if (mdss_dsi_is_slave_ctrl(ctrl)) {
mctrl = mdss_dsi_get_master_ctrl();
if (mctrl) {
MIPI_OUTP(mctrl->ctrl_base + 0x048, addr);
MIPI_OUTP(mctrl->ctrl_base + 0x04c, len);
} else {
pr_warn("%s: Unable to get master control\n",
__func__);
}
}
MIPI_OUTP((ctrl->ctrl_base) + 0x048, addr);
MIPI_OUTP((ctrl->ctrl_base) + 0x04c, len);
wmb();
/* Trigger on master controller as well */
if (mctrl)
MIPI_OUTP(mctrl->ctrl_base + 0x090, 0x01);
MIPI_OUTP((ctrl->ctrl_base) + 0x090, 0x01);
wmb();
ret = wait_for_completion_timeout(&ctrl->dma_comp,
msecs_to_jiffies(DMA_TX_TIMEOUT));
if (ret == 0)
ret = -ETIMEDOUT;
else
ret = tp->len;
if (is_mdss_iommu_attached())
msm_iommu_unmap_contig_buffer(addr,
mdss_get_iommu_domain(domain), 0, size);
return ret;
}
示例9: mhl_send_msc_command
int mhl_send_msc_command(struct mhl_tx_ctrl *mhl_ctrl,
struct msc_command_struct *req)
{
int timeout;
u8 start_bit = 0x00;
u8 *burst_data;
int i;
struct i2c_client *client = mhl_ctrl->i2c_handle;
if (mhl_ctrl->cur_state != POWER_STATE_D0_MHL) {
pr_debug("%s: power_state:%02x CBUS(0x0A):%02x\n",
__func__,
mhl_ctrl->cur_state,
MHL_SII_REG_NAME_RD(REG_CBUS_BUS_STATUS));
return -EFAULT;
}
if (!req)
return -EFAULT;
pr_debug("%s: command=0x%02x offset=0x%02x %02x %02x",
__func__,
req->command,
req->offset,
req->payload.data[0],
req->payload.data[1]);
/* REG_CBUS_PRI_ADDR_CMD = REQ CBUS CMD or OFFSET */
MHL_SII_REG_NAME_WR(REG_CBUS_PRI_ADDR_CMD, req->offset);
MHL_SII_REG_NAME_WR(REG_CBUS_PRI_WR_DATA_1ST,
req->payload.data[0]);
switch (req->command) {
case MHL_SET_INT:
case MHL_WRITE_STAT:
start_bit = MSC_START_BIT_WRITE_REG;
break;
case MHL_READ_DEVCAP:
start_bit = MSC_START_BIT_READ_REG;
break;
case MHL_GET_STATE:
case MHL_GET_VENDOR_ID:
case MHL_SET_HPD:
case MHL_CLR_HPD:
case MHL_GET_SC1_ERRORCODE:
case MHL_GET_DDC_ERRORCODE:
case MHL_GET_MSC_ERRORCODE:
case MHL_GET_SC3_ERRORCODE:
start_bit = MSC_START_BIT_MSC_CMD;
MHL_SII_REG_NAME_WR(REG_CBUS_PRI_ADDR_CMD, req->command);
break;
case MHL_MSC_MSG:
start_bit = MSC_START_BIT_VS_CMD;
MHL_SII_REG_NAME_WR(REG_CBUS_PRI_WR_DATA_2ND,
req->payload.data[1]);
MHL_SII_REG_NAME_WR(REG_CBUS_PRI_ADDR_CMD, req->command);
break;
case MHL_WRITE_BURST:
start_bit = MSC_START_BIT_WRITE_BURST;
MHL_SII_REG_NAME_WR(REG_MSC_WRITE_BURST_LEN, req->length - 1);
if (!(req->payload.burst_data)) {
pr_err("%s: burst data is null!\n", __func__);
goto cbus_send_fail;
}
burst_data = req->payload.burst_data;
for (i = 0; i < req->length; i++, burst_data++)
MHL_SII_REG_NAME_WR(REG_CBUS_SCRATCHPAD_0 + i,
*burst_data);
break;
default:
pr_err("%s: unknown command! (%02x)\n",
__func__, req->command);
goto cbus_send_fail;
}
INIT_COMPLETION(mhl_ctrl->msc_cmd_done);
MHL_SII_REG_NAME_WR(REG_CBUS_PRI_START, start_bit);
timeout = wait_for_completion_interruptible_timeout
(&mhl_ctrl->msc_cmd_done, msecs_to_jiffies(T_ABORT_NEXT));
if (!timeout) {
pr_err("%s: cbus_command_send timed out!\n", __func__);
goto cbus_send_fail;
}
switch (req->command) {
case MHL_READ_DEVCAP:
req->retval = MHL_SII_REG_NAME_RD(REG_CBUS_PRI_RD_DATA_1ST);
break;
case MHL_MSC_MSG:
/* check if MSC_MSG NACKed */
if (MHL_SII_REG_NAME_RD(REG_MSC_WRITE_BURST_LEN) & BIT6)
return -EAGAIN;
default:
req->retval = 0;
break;
}
mhl_msc_command_done(mhl_ctrl, req);
pr_debug("%s: msc cmd done\n", __func__);
return 0;
//.........这里部分代码省略.........
示例10: mdp4_dsi_video_do_blt
static void mdp4_dsi_video_do_blt(struct msm_fb_data_type *mfd, int enable)
{
unsigned long flag;
int cndx = 0;
struct vsycn_ctrl *vctrl;
struct mdp4_overlay_pipe *pipe;
vctrl = &vsync_ctrl_db[cndx];
pipe = vctrl->base_pipe;
mdp4_allocate_writeback_buf(mfd, MDP4_MIXER0);
if (mfd->ov0_wb_buf->write_addr == 0) {
pr_info("%s: no blt_base assigned\n", __func__);
return;
}
spin_lock_irqsave(&vctrl->spin_lock, flag);
if (enable && pipe->ov_blt_addr == 0) {
pipe->ov_blt_addr = mfd->ov0_wb_buf->write_addr;
pipe->dma_blt_addr = mfd->ov0_wb_buf->read_addr;
pipe->ov_cnt = 0;
pipe->dmap_cnt = 0;
vctrl->ov_koff = 0;
vctrl->ov_done = 0;
vctrl->blt_free = 0;
mdp4_stat.blt_dsi_video++;
vctrl->blt_change++;
} else if (enable == 0 && pipe->ov_blt_addr) {
pipe->ov_blt_addr = 0;
pipe->dma_blt_addr = 0;
vctrl->blt_free = 4;
vctrl->blt_change++;
}
pr_info("%s: changed=%d enable=%d ov_blt_addr=%x\n", __func__,
vctrl->blt_change, enable, (int)pipe->ov_blt_addr);
if (!vctrl->blt_change) {
spin_unlock_irqrestore(&vctrl->spin_lock, flag);
return;
}
spin_unlock_irqrestore(&vctrl->spin_lock, flag);
if (mdp_ov0_blt_ctl == MDP4_BLT_SWITCH_TG_OFF) {
int tg_enabled;
pr_debug("%s: blt enabled by switching TG off\n", __func__);
tg_enabled = inpdw(MDP_BASE + DSI_VIDEO_BASE) & 0x01;
if (tg_enabled) {
mdp4_dsi_video_wait4dmap_done(0);
MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE, 0);
msleep(20);
}
mdp4_overlayproc_cfg(pipe);
mdp4_overlay_dmap_xy(pipe);
if (tg_enabled) {
if (pipe->ov_blt_addr) {
spin_lock_irqsave(&vctrl->spin_lock, flag);
pipe->ov_cnt++;
vctrl->ov_koff++;
mdp4_stat.kickoff_ov0++;
INIT_COMPLETION(vctrl->ov_comp);
vsync_irq_enable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM);
outpdw(MDP_BASE + 0x0004, 0);
spin_unlock_irqrestore(&vctrl->spin_lock, flag);
mdp4_dsi_video_wait4ov(0);
}
mipi_dsi_sw_reset();
MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE, 1);
}
vctrl->blt_change = 0;
}
}
示例11: mdp4_dsi_video_pipe_commit
int mdp4_dsi_video_pipe_commit(int cndx, int wait)
{
int i, undx;
int mixer = 0;
struct vsycn_ctrl *vctrl;
struct vsync_update *vp;
struct mdp4_overlay_pipe *pipe;
struct mdp4_overlay_pipe *real_pipe;
unsigned long flags;
int cnt = 0;
vctrl = &vsync_ctrl_db[cndx];
mutex_lock(&vctrl->update_lock);
undx = vctrl->update_ndx;
vp = &vctrl->vlist[undx];
pipe = vctrl->base_pipe;
mixer = pipe->mixer_num;
if (vp->update_cnt == 0) {
mutex_unlock(&vctrl->update_lock);
return cnt;
}
vctrl->update_ndx++;
vctrl->update_ndx &= 0x01;
vp->update_cnt = 0;
if (vctrl->blt_free) {
vctrl->blt_free--;
if (vctrl->blt_free == 0)
mdp4_free_writeback_buf(vctrl->mfd, mixer);
}
mutex_unlock(&vctrl->update_lock);
mdp4_overlay_iommu_unmap_freelist(mixer);
spin_lock_irqsave(&vctrl->spin_lock, flags);
if (vctrl->ov_koff != vctrl->ov_done) {
spin_unlock_irqrestore(&vctrl->spin_lock, flags);
pr_err("%s: Error, frame dropped %d %d\n", __func__,
vctrl->ov_koff, vctrl->ov_done);
return 0;
}
spin_unlock_irqrestore(&vctrl->spin_lock, flags);
if (vctrl->blt_change) {
pipe = vctrl->base_pipe;
spin_lock_irqsave(&vctrl->spin_lock, flags);
INIT_COMPLETION(vctrl->dmap_comp);
INIT_COMPLETION(vctrl->ov_comp);
vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM);
spin_unlock_irqrestore(&vctrl->spin_lock, flags);
mdp4_dsi_video_wait4dmap(0);
if (pipe->ov_blt_addr)
mdp4_dsi_video_wait4ov(0);
}
pipe = vp->plist;
for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) {
if (pipe->pipe_used) {
cnt++;
real_pipe = mdp4_overlay_ndx2pipe(pipe->pipe_ndx);
if (real_pipe && real_pipe->pipe_used) {
mdp4_overlay_vsync_commit(pipe);
}
mdp4_overlay_iommu_pipe_free(pipe->pipe_ndx, 0);
pipe->pipe_used = 0;
}
}
mdp4_mixer_stage_commit(mixer);
pipe = vctrl->base_pipe;
spin_lock_irqsave(&vctrl->spin_lock, flags);
if (pipe->ov_blt_addr) {
mdp4_dsi_video_blt_ov_update(pipe);
pipe->ov_cnt++;
INIT_COMPLETION(vctrl->ov_comp);
vsync_irq_enable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM);
mb();
vctrl->ov_koff++;
mdp4_stat.kickoff_ov0++;
outpdw(MDP_BASE + 0x0004, 0);
} else {
INIT_COMPLETION(vctrl->dmap_comp);
vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM);
}
spin_unlock_irqrestore(&vctrl->spin_lock, flags);
mdp4_stat.overlay_commit[pipe->mixer_num]++;
if (wait) {
if (pipe->ov_blt_addr)
mdp4_dsi_video_wait4ov(cndx);
//.........这里部分代码省略.........
示例12: omap2_onenand_wait
static int omap2_onenand_wait(struct mtd_info *mtd, int state)
{
struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
struct onenand_chip *this = mtd->priv;
unsigned int intr = 0;
unsigned int ctrl, ctrl_mask;
unsigned long timeout;
u32 syscfg;
if (state == FL_RESETING || state == FL_PREPARING_ERASE ||
state == FL_VERIFYING_ERASE) {
int i = 21;
unsigned int intr_flags = ONENAND_INT_MASTER;
switch (state) {
case FL_RESETING:
intr_flags |= ONENAND_INT_RESET;
break;
case FL_PREPARING_ERASE:
intr_flags |= ONENAND_INT_ERASE;
break;
case FL_VERIFYING_ERASE:
i = 101;
break;
}
while (--i) {
udelay(1);
intr = read_reg(c, ONENAND_REG_INTERRUPT);
if (intr & ONENAND_INT_MASTER)
break;
}
ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
if (ctrl & ONENAND_CTRL_ERROR) {
wait_err("controller error", state, ctrl, intr);
return -EIO;
}
if ((intr & intr_flags) == intr_flags)
return 0;
/* Continue in wait for interrupt branch */
}
if (state != FL_READING) {
int result;
/* Turn interrupts on */
syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
if (!(syscfg & ONENAND_SYS_CFG1_IOBE)) {
syscfg |= ONENAND_SYS_CFG1_IOBE;
write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
if (cpu_is_omap34xx())
/* Add a delay to let GPIO settle */
syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
}
INIT_COMPLETION(c->irq_done);
if (c->gpio_irq) {
result = gpio_get_value(c->gpio_irq);
if (result == -1) {
ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
intr = read_reg(c, ONENAND_REG_INTERRUPT);
wait_err("gpio error", state, ctrl, intr);
return -EIO;
}
} else
result = 0;
if (result == 0) {
int retry_cnt = 0;
retry:
result = wait_for_completion_timeout(&c->irq_done,
msecs_to_jiffies(20));
if (result == 0) {
/* Timeout after 20ms */
ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
if (ctrl & ONENAND_CTRL_ONGO &&
!this->ongoing) {
/*
* The operation seems to be still going
* so give it some more time.
*/
retry_cnt += 1;
if (retry_cnt < 3)
goto retry;
intr = read_reg(c,
ONENAND_REG_INTERRUPT);
wait_err("timeout", state, ctrl, intr);
return -EIO;
}
intr = read_reg(c, ONENAND_REG_INTERRUPT);
if ((intr & ONENAND_INT_MASTER) == 0)
wait_warn("timeout", state, ctrl, intr);
}
}
} else {
int retry_cnt = 0;
/* Turn interrupts off */
syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
syscfg &= ~ONENAND_SYS_CFG1_IOBE;
write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
//.........这里部分代码省略.........
示例13: mdp_pipe_kickoff
void mdp_pipe_kickoff(uint32 term, struct msm_fb_data_type *mfd)
{
/* complete all the writes before starting */
wmb();
/* kick off PPP engine */
if (term == MDP_PPP_TERM) {
if (mdp_debug[MDP_PPP_BLOCK])
jiffies_to_timeval(jiffies, &mdp_ppp_timeval);
/* let's turn on PPP block */
mdp_pipe_ctrl(MDP_PPP_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
mdp_enable_irq(term);
INIT_COMPLETION(mdp_ppp_comp);
mdp_ppp_waiting = TRUE;
outpdw(MDP_BASE + 0x30, 0x1000);
wait_for_completion_killable(&mdp_ppp_comp);
mdp_disable_irq(term);
if (mdp_debug[MDP_PPP_BLOCK]) {
struct timeval now;
jiffies_to_timeval(jiffies, &now);
mdp_ppp_timeval.tv_usec =
now.tv_usec - mdp_ppp_timeval.tv_usec;
MSM_FB_INFO("MDP-PPP: %d\n",
(int)mdp_ppp_timeval.tv_usec);
}
} else if (term == MDP_DMA2_TERM) {
if (mdp_debug[MDP_DMA2_BLOCK]) {
MSM_FB_INFO("MDP-DMA2: %d\n",
(int)mdp_dma2_timeval.tv_usec);
jiffies_to_timeval(jiffies, &mdp_dma2_timeval);
}
/* DMA update timestamp */
mdp_dma2_last_update_time = ktime_get_real();
/* let's turn on DMA2 block */
#if 0
mdp_pipe_ctrl(MDP_DMA2_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
#endif
#ifdef CONFIG_FB_MSM_MDP22
outpdw(MDP_CMD_DEBUG_ACCESS_BASE + 0x0044, 0x0);/* start DMA */
#else
mdp_lut_enable();
#ifdef CONFIG_FB_MSM_MDP40
outpdw(MDP_BASE + 0x000c, 0x0); /* start DMA */
#else
outpdw(MDP_BASE + 0x0044, 0x0); /* start DMA */
#endif
#endif
#ifdef CONFIG_FB_MSM_MDP40
} else if (term == MDP_DMA_S_TERM) {
mdp_pipe_ctrl(MDP_DMA_S_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
outpdw(MDP_BASE + 0x0010, 0x0); /* start DMA */
} else if (term == MDP_DMA_E_TERM) {
mdp_pipe_ctrl(MDP_DMA_E_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
outpdw(MDP_BASE + 0x0014, 0x0); /* start DMA */
} else if (term == MDP_OVERLAY0_TERM) {
mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
mdp_lut_enable();
outpdw(MDP_BASE + 0x0004, 0);
} else if (term == MDP_OVERLAY1_TERM) {
mdp_pipe_ctrl(MDP_OVERLAY1_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
mdp_lut_enable();
outpdw(MDP_BASE + 0x0008, 0);
}
#else
} else if (term == MDP_DMA_S_TERM) {
示例14: mdm_modem_ioctl
long mdm_modem_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
int status, ret = 0;
if (_IOC_TYPE(cmd) != CHARM_CODE) {
pr_err("%s: invalid ioctl code\n", __func__);
return -EINVAL;
}
pr_debug("%s: Entering ioctl cmd = %d\n", __func__, _IOC_NR(cmd));
switch (cmd) {
case WAKE_CHARM:
pr_info("%s: Powering on mdm\n", __func__);
mdm_drv->ops->power_on_mdm_cb(mdm_drv);
break;
case CHECK_FOR_BOOT:
if (gpio_get_value(mdm_drv->mdm2ap_status_gpio) == 0)
put_user(1, (unsigned long __user *) arg);
else
put_user(0, (unsigned long __user *) arg);
break;
case NORMAL_BOOT_DONE:
pr_debug("%s: check if mdm is booted up\n", __func__);
get_user(status, (unsigned long __user *) arg);
if (status) {
pr_debug("%s: normal boot failed\n", __func__);
mdm_drv->mdm_boot_status = -EIO;
} else {
pr_info("%s: normal boot done\n", __func__);
mdm_drv->mdm_boot_status = 0;
}
mdm_drv->mdm_ready = 1;
if (mdm_drv->ops->normal_boot_done_cb != NULL)
mdm_drv->ops->normal_boot_done_cb(mdm_drv);
if (!first_boot)
complete(&mdm_boot);
else
first_boot = 0;
/* If bootup succeeded, start a timer to check that the
* mdm2ap_status gpio goes high.
*/
if (!status && gpio_get_value(mdm_drv->mdm2ap_status_gpio) == 0)
schedule_delayed_work(&mdm2ap_status_check_work,
msecs_to_jiffies(MDM2AP_STATUS_TIMEOUT_MS));
break;
case RAM_DUMP_DONE:
pr_debug("%s: mdm done collecting RAM dumps\n", __func__);
get_user(status, (unsigned long __user *) arg);
if (status)
mdm_drv->mdm_ram_dump_status = -EIO;
else {
pr_info("%s: ramdump collection completed\n", __func__);
mdm_drv->mdm_ram_dump_status = 0;
}
complete(&mdm_ram_dumps);
break;
case WAIT_FOR_RESTART:
pr_debug("%s: wait for mdm to need images reloaded\n",
__func__);
ret = wait_for_completion_interruptible(&mdm_needs_reload);
if (!ret)
put_user(mdm_drv->boot_type,
(unsigned long __user *) arg);
INIT_COMPLETION(mdm_needs_reload);
break;
case GET_DLOAD_STATUS:
pr_debug("getting status of mdm2ap_errfatal_gpio\n");
if (gpio_get_value(mdm_drv->mdm2ap_errfatal_gpio) == 1 &&
!mdm_drv->mdm_ready)
put_user(1, (unsigned long __user *) arg);
else
put_user(0, (unsigned long __user *) arg);
break;
default:
pr_err("%s: invalid ioctl cmd = %d\n", __func__, _IOC_NR(cmd));
ret = -EINVAL;
break;
}
return ret;
}
示例15: mdp4_lcdc_pipe_commit
int mdp4_lcdc_pipe_commit(int cndx, int wait)
{
int i, undx;
int mixer = 0;
struct vsycn_ctrl *vctrl;
struct vsync_update *vp;
struct mdp4_overlay_pipe *pipe;
struct mdp4_overlay_pipe *real_pipe;
unsigned long flags;
int cnt = 0;
vctrl = &vsync_ctrl_db[cndx];
mutex_lock(&vctrl->update_lock);
undx = vctrl->update_ndx;
vp = &vctrl->vlist[undx];
pipe = vctrl->base_pipe;
if (pipe == NULL) {
pr_err("%s: NO base pipe\n", __func__);
mutex_unlock(&vctrl->update_lock);
return 0;
}
mixer = pipe->mixer_num;
mdp_update_pm(vctrl->mfd, vctrl->vsync_time);
/*
* allow stage_commit without pipes queued
* (vp->update_cnt == 0) to unstage pipes after
* overlay_unset
*/
vctrl->update_ndx++;
vctrl->update_ndx &= 0x01;
vp->update_cnt = 0; /* reset */
if (vctrl->blt_free) {
vctrl->blt_free--;
if (vctrl->blt_free == 0)
mdp4_free_writeback_buf(vctrl->mfd, mixer);
}
mutex_unlock(&vctrl->update_lock);
/* free previous committed iommu back to pool */
mdp4_overlay_iommu_unmap_freelist(mixer);
spin_lock_irqsave(&vctrl->spin_lock, flags);
if (vctrl->ov_koff != vctrl->ov_done) {
spin_unlock_irqrestore(&vctrl->spin_lock, flags);
pr_err("%s: Error, frame dropped %d %d\n", __func__,
vctrl->ov_koff, vctrl->ov_done);
return 0;
}
spin_unlock_irqrestore(&vctrl->spin_lock, flags);
mdp4_overlay_mdp_perf_upd(vctrl->mfd, 1);
if (vctrl->blt_change) {
pipe = vctrl->base_pipe;
spin_lock_irqsave(&vctrl->spin_lock, flags);
INIT_COMPLETION(vctrl->dmap_comp);
INIT_COMPLETION(vctrl->ov_comp);
vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM);
spin_unlock_irqrestore(&vctrl->spin_lock, flags);
mdp4_lcdc_wait4dmap(0);
if (pipe->ov_blt_addr)
mdp4_lcdc_wait4ov(0);
}
pipe = vp->plist;
for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) {
if (pipe->pipe_used) {
cnt++;
real_pipe = mdp4_overlay_ndx2pipe(pipe->pipe_ndx);
if (real_pipe && real_pipe->pipe_used) {
/* pipe not unset */
mdp4_overlay_vsync_commit(pipe);
}
/* free previous iommu to freelist
* which will be freed at next
* pipe_commit
*/
mdp4_overlay_iommu_pipe_free(pipe->pipe_ndx, 0);
pipe->pipe_used = 0; /* clear */
}
}
mdp4_mixer_stage_commit(mixer);
/* start timing generator & mmu if they are not started yet */
mdp4_overlay_lcdc_start();
pipe = vctrl->base_pipe;
spin_lock_irqsave(&vctrl->spin_lock, flags);
if (pipe->ov_blt_addr) {
mdp4_lcdc_blt_ov_update(pipe);
pipe->ov_cnt++;
INIT_COMPLETION(vctrl->ov_comp);
vsync_irq_enable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM);
mb();
//.........这里部分代码省略.........