本文整理汇总了C++中DECLARE_COMPLETION_ONSTACK函数的典型用法代码示例。如果您正苦于以下问题:C++ DECLARE_COMPLETION_ONSTACK函数的具体用法?C++ DECLARE_COMPLETION_ONSTACK怎么用?C++ DECLARE_COMPLETION_ONSTACK使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了DECLARE_COMPLETION_ONSTACK函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: blk_execute_rq
/**
* blk_execute_rq - insert a request into queue for execution
* @q: queue to insert the request in
* @bd_disk: matching gendisk
* @rq: request to insert
* @at_head: insert request at head or tail of queue
*
* Description:
* Insert a fully prepared request at the back of the I/O scheduler queue
* for execution and wait for completion.
*/
int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
struct request *rq, int at_head)
{
DECLARE_COMPLETION_ONSTACK(wait);
char sense[SCSI_SENSE_BUFFERSIZE];
int err = 0;
unsigned long hang_check;
/*
* we need an extra reference to the request, so we can look at
* it after io completion
*/
rq->ref_count++;
if (!rq->sense) {
memset(sense, 0, sizeof(sense));
rq->sense = sense;
rq->sense_len = 0;
}
rq->end_io_data = &wait;
blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq);
/* Prevent hang_check timer from firing at us during very long I/O */
hang_check = sysctl_hung_task_timeout_secs;
if (hang_check)
while (!wait_for_completion_timeout(&wait, hang_check * (HZ/2)));
else
wait_for_completion(&wait);
if (rq->errors)
err = -EIO;
return err;
}
示例2: hfsplus_submit_bio
int hfsplus_submit_bio(struct block_device *bdev, sector_t sector,
void *data, int rw)
{
DECLARE_COMPLETION_ONSTACK(wait);
struct bio *bio;
bio = bio_alloc(GFP_NOIO, 1);
bio->bi_sector = sector;
bio->bi_bdev = bdev;
bio->bi_end_io = hfsplus_end_io_sync;
bio->bi_private = &wait;
/*
* We always submit one sector at a time, so bio_add_page must not fail.
*/
if (bio_add_page(bio, virt_to_page(data), HFSPLUS_SECTOR_SIZE,
offset_in_page(data)) != HFSPLUS_SECTOR_SIZE)
BUG();
submit_bio(rw, bio);
wait_for_completion(&wait);
if (!bio_flagged(bio, BIO_UPTODATE))
return -EIO;
return 0;
}
示例3: sysfs_deactivate
/**
* sysfs_deactivate - deactivate sysfs_dirent
* @sd: sysfs_dirent to deactivate
*
* Deny new active references and drain existing ones.
*/
static void sysfs_deactivate(struct sysfs_dirent *sd)
{
DECLARE_COMPLETION_ONSTACK(wait);
int v;
BUG_ON(sd->s_sibling || !(sd->s_flags & SYSFS_FLAG_REMOVED));
if (!(sysfs_type(sd) & SYSFS_ACTIVE_REF))
return;
sd->s_sibling = (void *)&wait;
rwsem_acquire(&sd->dep_map, 0, 0, _RET_IP_);
/* atomic_add_return() is a mb(), put_active() will always see
* the updated sd->s_sibling.
*/
v = atomic_add_return(SD_DEACTIVATED_BIAS, &sd->s_active);
if (v != SD_DEACTIVATED_BIAS) {
lock_contended(&sd->dep_map, _RET_IP_);
wait_for_completion(&wait);
}
sd->s_sibling = NULL;
lock_acquired(&sd->dep_map, _RET_IP_);
rwsem_release(&sd->dep_map, 1, _RET_IP_);
}
示例4: qup_i2c_xfer
static int
qup_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
{
DECLARE_COMPLETION_ONSTACK(complete);
struct qup_i2c_dev *dev = i2c_get_adapdata(adap);
int ret;
int rem = num;
long timeout;
int err;
unsigned long flags;
del_timer_sync(&dev->pwr_timer);
mutex_lock(&dev->mlock);
if (dev->suspended) {
mutex_unlock(&dev->mlock);
dev_err(dev->dev, "qup_i2c_xfer: dev suspended, return Error!\n");
return -EIO;
}
if (dev->clk_state == 0) {
if (dev->clk_ctl == 0) {
if (dev->pdata->src_clk_rate > 0)
clk_set_rate(dev->clk,
dev->pdata->src_clk_rate);
else
dev->pdata->src_clk_rate = 19200000;
}
qup_i2c_pwr_mgmt(dev, 1);
}
/* Initialize QUP registers during first transfer */
if (dev->clk_ctl == 0) {
int fs_div;
int hs_div;
uint32_t fifo_reg;
if (dev->gsbi) {
writel(0x2 << 4, dev->gsbi);
/* GSBI memory is not in the same 1K region as other
* QUP registers. dsb() here ensures that the GSBI
* register is updated in correct order and that the
* write has gone through before programming QUP core
* registers
*/
dsb();
}
fs_div = ((dev->pdata->src_clk_rate
/ dev->pdata->clk_freq) / 2) - 3;
hs_div = 3;
dev->clk_ctl = ((hs_div & 0x7) << 8) | (fs_div & 0xff);
fifo_reg = readl(dev->base + QUP_IO_MODE);
if (fifo_reg & 0x3)
dev->out_blk_sz = (fifo_reg & 0x3) * 16;
else
dev->out_blk_sz = 16;
if (fifo_reg & 0x60)
dev->in_blk_sz = ((fifo_reg & 0x60) >> 5) * 16;
else
示例5: blkdev_issue_write_same
/**
* blkdev_issue_write_same - queue a write same operation
* @bdev: target blockdev
* @sector: start sector
* @nr_sects: number of sectors to write
* @gfp_mask: memory allocation flags (for bio_alloc)
* @page: page containing data to write
*
* Description:
* Issue a write same request for the sectors in question.
*/
int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask,
struct page *page)
{
DECLARE_COMPLETION_ONSTACK(wait);
struct request_queue *q = bdev_get_queue(bdev);
unsigned int max_write_same_sectors;
struct bio_batch bb;
struct bio *bio;
int ret = 0;
if (!q)
return -ENXIO;
/* Ensure that max_write_same_sectors doesn't overflow bi_size */
max_write_same_sectors = UINT_MAX >> 9;
atomic_set(&bb.done, 1);
bb.error = 0;
bb.wait = &wait;
while (nr_sects) {
bio = bio_alloc(gfp_mask, 1);
if (!bio) {
ret = -ENOMEM;
break;
}
bio->bi_iter.bi_sector = sector;
bio->bi_end_io = bio_batch_end_io;
bio->bi_bdev = bdev;
bio->bi_private = &bb;
bio->bi_vcnt = 1;
bio->bi_io_vec->bv_page = page;
bio->bi_io_vec->bv_offset = 0;
bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
if (nr_sects > max_write_same_sectors) {
bio->bi_iter.bi_size = max_write_same_sectors << 9;
nr_sects -= max_write_same_sectors;
sector += max_write_same_sectors;
} else {
bio->bi_iter.bi_size = nr_sects << 9;
nr_sects = 0;
}
atomic_inc(&bb.done);
submit_bio(REQ_WRITE | REQ_WRITE_SAME, bio);
}
/* Wait for bios in-flight */
if (!atomic_dec_and_test(&bb.done))
wait_for_completion_io(&wait);
if (bb.error)
return bb.error;
return ret;
}
示例6: smu_set_fan
static int smu_set_fan(int pwm, u8 id, u16 value)
{
struct smu_cmd cmd;
u8 buffer[16];
DECLARE_COMPLETION_ONSTACK(comp);
int rc;
/* Fill SMU command structure */
cmd.cmd = SMU_CMD_FAN_COMMAND;
/* The SMU has an "old" and a "new" way of setting the fan speed
* Unfortunately, I found no reliable way to know which one works
* on a given machine model. After some investigations it appears
* that MacOS X just tries the new one, and if it fails fallbacks
* to the old ones ... Ugh.
*/
retry:
if (smu_supports_new_fans_ops) {
buffer[0] = 0x30;
buffer[1] = id;
*((u16 *)(&buffer[2])) = value;
cmd.data_len = 4;
} else {
if (id > 7)
return -EINVAL;
/* Fill argument buffer */
memset(buffer, 0, 16);
buffer[0] = pwm ? 0x10 : 0x00;
buffer[1] = 0x01 << id;
*((u16 *)&buffer[2 + id * 2]) = value;
cmd.data_len = 14;
}
cmd.reply_len = 16;
cmd.data_buf = cmd.reply_buf = buffer;
cmd.status = 0;
cmd.done = smu_done_complete;
cmd.misc = ∁
rc = smu_queue_cmd(&cmd);
if (rc)
return rc;
wait_for_completion(&comp);
/* Handle fallback (see coment above) */
if (cmd.status != 0 && smu_supports_new_fans_ops) {
#ifdef CONFIG_DEBUG_PRINTK
printk(KERN_WARNING "windfarm: SMU failed new fan command "
"falling back to old method\n");
#else
;
#endif
smu_supports_new_fans_ops = 0;
goto retry;
}
return cmd.status;
}
示例7: dispc_mgr_disable_digit_out
static void dispc_mgr_disable_digit_out(void)
{
DECLARE_COMPLETION_ONSTACK(framedone_compl);
int r, i;
u32 irq_mask;
int num_irqs;
if (dispc_mgr_is_enabled(OMAP_DSS_CHANNEL_DIGIT) == false)
return;
/*
* When we disable the digit output, we need to wait for FRAMEDONE to
* know that DISPC has finished with the output.
*/
irq_mask = dispc_mgr_get_framedone_irq(OMAP_DSS_CHANNEL_DIGIT);
num_irqs = 1;
if (!irq_mask) {
/*
* omap 2/3 don't have framedone irq for TV, so we need to use
* vsyncs for this.
*/
irq_mask = dispc_mgr_get_vsync_irq(OMAP_DSS_CHANNEL_DIGIT);
/*
* We need to wait for both even and odd vsyncs. Note that this
* is not totally reliable, as we could get a vsync interrupt
* before we disable the output, which leads to timeout in the
* wait_for_completion.
*/
num_irqs = 2;
}
r = omap_dispc_register_isr(dispc_mgr_disable_isr, &framedone_compl,
irq_mask);
if (r)
DSSERR("failed to register %x isr\n", irq_mask);
dispc_mgr_enable(OMAP_DSS_CHANNEL_DIGIT, false);
/* if we couldn't register the irq, just sleep and exit */
if (r) {
msleep(100);
return;
}
for (i = 0; i < num_irqs; ++i) {
if (!wait_for_completion_timeout(&framedone_compl,
msecs_to_jiffies(100)))
DSSERR("timeout waiting for digit out to stop\n");
}
r = omap_dispc_unregister_isr(dispc_mgr_disable_isr, &framedone_compl,
irq_mask);
if (r)
DSSERR("failed to unregister %x isr\n", irq_mask);
}
示例8: wl1271_ps_elp_wakeup
int wl1271_ps_elp_wakeup(struct wl1271 *wl)
{
DECLARE_COMPLETION_ONSTACK(compl);
unsigned long flags;
int ret;
u32 start_time = jiffies;
bool pending = false;
if (!test_and_clear_bit(WL1271_FLAG_ELP_REQUESTED, &wl->flags))
return 0;
cancel_delayed_work(&wl->elp_work);
if (!test_bit(WL1271_FLAG_IN_ELP, &wl->flags))
return 0;
wl1271_debug(DEBUG_PSM, "waking up chip from elp");
spin_lock_irqsave(&wl->wl_lock, flags);
if (test_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags))
pending = true;
else
wl->elp_compl = &compl;
spin_unlock_irqrestore(&wl->wl_lock, flags);
wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_WAKE_UP);
if (!pending) {
ret = wait_for_completion_timeout(
&compl, msecs_to_jiffies(WL1271_WAKEUP_TIMEOUT));
if (ret == 0) {
wl1271_error("ELP wakeup timeout!");
wl12xx_queue_recovery_work(wl);
ret = -ETIMEDOUT;
goto err;
} else if (ret < 0) {
wl1271_error("ELP wakeup completion error.");
goto err;
}
}
clear_bit(WL1271_FLAG_IN_ELP, &wl->flags);
wl1271_debug(DEBUG_PSM, "wakeup time: %u ms",
jiffies_to_msecs(jiffies - start_time));
goto out;
err:
spin_lock_irqsave(&wl->wl_lock, flags);
wl->elp_compl = NULL;
spin_unlock_irqrestore(&wl->wl_lock, flags);
return ret;
out:
return 0;
}
示例9: pm8001_phy_control
/**
* pm8001_phy_control - this function should be registered to
* sas_domain_function_template to provide libsas used, note: this is just
* control the HBA phy rather than other expander phy if you want control
* other phy, you should use SMP command.
* @sas_phy: which phy in HBA phys.
* @func: the operation.
* @funcdata: always NULL.
*/
int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
void *funcdata)
{
int rc = 0, phy_id = sas_phy->id;
struct pm8001_hba_info *pm8001_ha = NULL;
struct sas_phy_linkrates *rates;
DECLARE_COMPLETION_ONSTACK(completion);
pm8001_ha = sas_phy->ha->lldd_ha;
pm8001_ha->phy[phy_id].enable_completion = &completion;
switch (func) {
case PHY_FUNC_SET_LINK_RATE:
rates = funcdata;
if (rates->minimum_linkrate) {
pm8001_ha->phy[phy_id].minimum_linkrate =
rates->minimum_linkrate;
}
if (rates->maximum_linkrate) {
pm8001_ha->phy[phy_id].maximum_linkrate =
rates->maximum_linkrate;
}
if (pm8001_ha->phy[phy_id].phy_state == 0) {
PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
wait_for_completion(&completion);
}
PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
PHY_LINK_RESET);
break;
case PHY_FUNC_HARD_RESET:
if (pm8001_ha->phy[phy_id].phy_state == 0) {
PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
wait_for_completion(&completion);
}
PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
PHY_HARD_RESET);
break;
case PHY_FUNC_LINK_RESET:
if (pm8001_ha->phy[phy_id].phy_state == 0) {
PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
wait_for_completion(&completion);
}
PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
PHY_LINK_RESET);
break;
case PHY_FUNC_RELEASE_SPINUP_HOLD:
PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
PHY_LINK_RESET);
break;
case PHY_FUNC_DISABLE:
PM8001_CHIP_DISP->phy_stop_req(pm8001_ha, phy_id);
break;
default:
rc = -EOPNOTSUPP;
}
msleep(300);
return rc;
}
示例10: msm_rpm_set_exclusive
static int msm_rpm_set_exclusive(int ctx,
uint32_t *sel_masks, struct msm_rpm_iv_pair *req, int count)
{
DECLARE_COMPLETION_ONSTACK(ack);
unsigned long flags;
uint32_t ctx_mask = msm_rpm_get_ctx_mask(ctx);
uint32_t ctx_mask_ack = 0;
uint32_t sel_masks_ack[SEL_MASK_SIZE];
int i;
msm_rpm_request_irq_mode.req = req;
msm_rpm_request_irq_mode.count = count;
msm_rpm_request_irq_mode.ctx_mask_ack = &ctx_mask_ack;
msm_rpm_request_irq_mode.sel_masks_ack = sel_masks_ack;
msm_rpm_request_irq_mode.done = &ack;
spin_lock_irqsave(&msm_rpm_lock, flags);
spin_lock(&msm_rpm_irq_lock);
BUG_ON(msm_rpm_request);
msm_rpm_request = &msm_rpm_request_irq_mode;
for (i = 0; i < count; i++) {
BUG_ON(target_enum(req[i].id) >= MSM_RPM_ID_LAST);
msm_rpm_write(MSM_RPM_PAGE_REQ,
target_enum(req[i].id), req[i].value);
}
msm_rpm_write_contiguous(MSM_RPM_PAGE_CTRL,
target_ctrl(MSM_RPM_CTRL_REQ_SEL_0),
sel_masks, msm_rpm_sel_mask_size);
msm_rpm_write(MSM_RPM_PAGE_CTRL,
target_ctrl(MSM_RPM_CTRL_REQ_CTX_0), ctx_mask);
/* Ensure RPM data is written before sending the interrupt */
mb();
msm_rpm_send_req_interrupt();
spin_unlock(&msm_rpm_irq_lock);
spin_unlock_irqrestore(&msm_rpm_lock, flags);
wait_for_completion(&ack);
BUG_ON((ctx_mask_ack & ~(msm_rpm_get_ctx_mask(MSM_RPM_CTX_REJECTED)))
!= ctx_mask);
BUG_ON(memcmp(sel_masks, sel_masks_ack, sizeof(sel_masks_ack)));
if (ctx_mask_ack & msm_rpm_get_ctx_mask(MSM_RPM_CTX_REJECTED)) {
//pr_warn("[K] %s: following request is rejected by rpm\n", __func__);
for (i = 0; i < count; i++)
/*pr_warn("[K] %s: id: %d, value: %d\n", __func__, req[i].id, req[i].value)*/;
return -ENOSPC;
} else {
return 0;
}
}
示例11: msm_rpm_set_exclusive
/* Upon return, the <req> array will contain values from the ack page.
*
* Note: assumes caller has acquired <msm_rpm_mutex>.
*
* Return value:
* 0: success
* -ENOSPC: request rejected
*/
static int msm_rpm_set_exclusive(int ctx,
uint32_t *sel_masks, struct msm_rpm_iv_pair *req, int count)
{
DECLARE_COMPLETION_ONSTACK(ack);
unsigned long flags;
uint32_t ctx_mask = msm_rpm_get_ctx_mask(ctx);
uint32_t ctx_mask_ack = 0;
uint32_t sel_masks_ack[SEL_MASK_SIZE];
int i;
msm_rpm_request_irq_mode.req = req;
msm_rpm_request_irq_mode.count = count;
msm_rpm_request_irq_mode.ctx_mask_ack = &ctx_mask_ack;
msm_rpm_request_irq_mode.sel_masks_ack = sel_masks_ack;
msm_rpm_request_irq_mode.done = &ack;
spin_lock_irqsave(&msm_rpm_lock, flags);
spin_lock(&msm_rpm_irq_lock);
BUG_ON(msm_rpm_request);
msm_rpm_request = &msm_rpm_request_irq_mode;
for (i = 0; i < count; i++) {
BUG_ON(target_enum(req[i].id) >= MSM_RPM_ID_LAST);
msm_rpm_write(MSM_RPM_PAGE_REQ,
target_enum(req[i].id), req[i].value);
}
msm_rpm_write_contiguous(MSM_RPM_PAGE_CTRL,
target_ctrl(MSM_RPM_CTRL_REQ_SEL_0),
sel_masks, msm_rpm_sel_mask_size);
msm_rpm_write(MSM_RPM_PAGE_CTRL,
target_ctrl(MSM_RPM_CTRL_REQ_CTX_0), ctx_mask);
/* Ensure RPM data is written before sending the interrupt */
mb();
#if defined(CONFIG_PANTECH_DEBUG)
#if defined(CONFIG_PANTECH_DEBUG_RPM_LOG) //p14291_121102
pantech_debug_rpm_log(1, req->id, req->value);
#endif
#endif
msm_rpm_send_req_interrupt();
spin_unlock(&msm_rpm_irq_lock);
spin_unlock_irqrestore(&msm_rpm_lock, flags);
wait_for_completion(&ack);
BUG_ON((ctx_mask_ack & ~(msm_rpm_get_ctx_mask(MSM_RPM_CTX_REJECTED)))
!= ctx_mask);
BUG_ON(memcmp(sel_masks, sel_masks_ack, sizeof(sel_masks_ack)));
return (ctx_mask_ack & msm_rpm_get_ctx_mask(MSM_RPM_CTX_REJECTED))
? -ENOSPC : 0;
}
示例12: wl1271_ps_elp_wakeup
int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake)
{
DECLARE_COMPLETION_ONSTACK(compl);
unsigned long flags;
int ret;
u32 start_time = jiffies;
bool pending = false;
if (!test_bit(WL1271_FLAG_IN_ELP, &wl->flags))
return 0;
wl1271_debug(DEBUG_PSM, "waking up chip from elp");
/*
* The spinlock is required here to synchronize both the work and
* the completion variable in one entity.
*/
spin_lock_irqsave(&wl->wl_lock, flags);
if (work_pending(&wl->irq_work) || chip_awake)
pending = true;
else
wl->elp_compl = &compl;
spin_unlock_irqrestore(&wl->wl_lock, flags);
wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_WAKE_UP);
if (!pending) {
ret = wait_for_completion_timeout(
&compl, msecs_to_jiffies(WL1271_WAKEUP_TIMEOUT));
if (ret == 0) {
wl1271_error("ELP wakeup timeout!");
ieee80211_queue_work(wl->hw, &wl->recovery_work);
ret = -ETIMEDOUT;
goto err;
} else if (ret < 0) {
wl1271_error("ELP wakeup completion error.");
goto err;
}
}
clear_bit(WL1271_FLAG_IN_ELP, &wl->flags);
wl1271_debug(DEBUG_PSM, "wakeup time: %u ms",
jiffies_to_msecs(jiffies - start_time));
goto out;
err:
spin_lock_irqsave(&wl->wl_lock, flags);
wl->elp_compl = NULL;
spin_unlock_irqrestore(&wl->wl_lock, flags);
return ret;
out:
return 0;
}
示例13: mmc_wait_for_req
/**
* mmc_wait_for_req - start a request and wait for completion
* @host: MMC host to start command
* @mrq: MMC request to start
*
* Start a new MMC custom command request for a host, and wait
* for the command to complete. Does not attempt to parse the
* response.
*/
void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
{
DECLARE_COMPLETION_ONSTACK(complete);
mrq->done_data = &complete;
mrq->done = mmc_wait_done;
mmc_start_request(host, mrq);
wait_for_completion(&complete);
}
示例14: hfsplus_submit_bio
/*
* hfsplus_submit_bio - Perfrom block I/O
* @sb: super block of volume for I/O
* @sector: block to read or write, for blocks of HFSPLUS_SECTOR_SIZE bytes
* @buf: buffer for I/O
* @data: output pointer for location of requested data
* @rw: direction of I/O
*
* The unit of I/O is hfsplus_min_io_size(sb), which may be bigger than
* HFSPLUS_SECTOR_SIZE, and @buf must be sized accordingly. On reads
* @data will return a pointer to the start of the requested sector,
* which may not be the same location as @buf.
*
* If @sector is not aligned to the bdev logical block size it will
* be rounded down. For writes this means that @buf should contain data
* that starts at the rounded-down address. As long as the data was
* read using hfsplus_submit_bio() and the same buffer is used things
* will work correctly.
*/
int hfsplus_submit_bio(struct super_block *sb, sector_t sector,
void *buf, void **data, int rw)
{
DECLARE_COMPLETION_ONSTACK(wait);
struct bio *bio;
int ret = 0;
u64 io_size;
loff_t start;
int offset;
/*
* Align sector to hardware sector size and find offset. We
* assume that io_size is a power of two, which _should_
* be true.
*/
io_size = hfsplus_min_io_size(sb);
start = (loff_t)sector << HFSPLUS_SECTOR_SHIFT;
offset = start & (io_size - 1);
sector &= ~((io_size >> HFSPLUS_SECTOR_SHIFT) - 1);
bio = bio_alloc(GFP_NOIO, 1);
bio->bi_sector = sector;
bio->bi_bdev = sb->s_bdev;
bio->bi_end_io = hfsplus_end_io_sync;
bio->bi_private = &wait;
if (!(rw & WRITE) && data)
*data = (u8 *)buf + offset;
while (io_size > 0) {
unsigned int page_offset = offset_in_page(buf);
unsigned int len = min_t(unsigned int, PAGE_SIZE - page_offset,
io_size);
ret = bio_add_page(bio, virt_to_page(buf), len, page_offset);
if (ret != len) {
ret = -EIO;
goto out;
}
io_size -= len;
buf = (u8 *)buf + len;
}
submit_bio(rw, bio);
wait_for_completion(&wait);
if (!bio_flagged(bio, BIO_UPTODATE))
ret = -EIO;
out:
bio_put(bio);
return ret < 0 ? ret : 0;
}
示例15: dit4192_spi_read_device
static int dit4192_spi_read_device(struct dit4192 *dit4192, u8 reg, int bytes, u8 *buf)
{
int ret;
unsigned char header[2];
struct spi_transfer spi_transfer_w;
struct spi_transfer spi_transfer_r;
struct spi_message spi_message;
DECLARE_COMPLETION_ONSTACK(context);
memset(&spi_transfer_w, 0, sizeof(struct spi_transfer));
memset(&spi_transfer_r, 0, sizeof(struct spi_transfer));
memset(&spi_message, 0, sizeof(struct spi_message));
spi_setup(dit4192->spi);
spi_message_init(&spi_message);
header[DIT4192_HEADER_0] = DIT4192_CMD_R | DIT4192_IO_STEP_1 | reg; //0x80
header[DIT4192_HEADER_1] = 0;
spi_transfer_w.tx_buf = header;
spi_transfer_w.len = 2;
spi_message_add_tail(&spi_transfer_w, &spi_message);
spi_transfer_r.rx_buf = buf;
spi_transfer_r.len = bytes;
spi_message_add_tail(&spi_transfer_r, &spi_message);
spi_message.complete = dit4192_spi_completion_cb;
spi_message.context = &context;
/* must use spi_async in a context that may sleep */
ret = spi_async(dit4192->spi, &spi_message);
if (ret == 0) {
wait_for_completion(&context);
if (spi_message.status == 0) {
/* spi_message.actual_length should contain the number
* of bytes actually read and should update ret to be
* the actual length, but since our driver doesn't
* support this, assume all count bytes were read.
*/
ret = bytes;
}
if (ret > 0) {
ret = -EFAULT;
}
} else {
pr_err("%s: Error calling spi_async, ret = %d\n", __func__, ret);
}
return ret;
}