本文整理汇总了C++中rq_is_sync函数的典型用法代码示例。如果您正苦于以下问题:C++ rq_is_sync函数的具体用法?C++ rq_is_sync怎么用?C++ rq_is_sync使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了rq_is_sync函数的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: sio_latter_request
static struct request *
sio_latter_request(struct request_queue *q, struct request *rq)
{
struct sio_data *sd = q->elevator->elevator_data;
const int sync = rq_is_sync(rq);
const int data_dir = rq_data_dir(rq);
if (rq->queuelist.next == &sd->fifo_list[sync][data_dir])
return NULL;
/* Return latter request */
return list_entry(rq->queuelist.next, struct request, queuelist);
}
示例2: vr_add_request
/*
* add rq to rbtree and fifo
*/
static void
vr_add_request(struct request_queue *q, struct request *rq)
{
struct vr_data *vd = vr_get_data(q);
const int dir = rq_is_sync(rq);
vr_add_rq_rb(vd, rq);
if (vd->fifo_expire[dir]) {
rq_set_fifo_time(rq, jiffies + vd->fifo_expire[dir]);
list_add_tail(&rq->queuelist, &vd->fifo_list[dir]);
}
}
示例3: row_get_queue_prio
/*
* row_get_queue_prio() - Get queue priority for a given request
*
* This is a helping function which purpose is to determine what
* ROW queue the given request should be added to (and
* dispatched from later on)
*
*/
static enum row_queue_prio row_get_queue_prio(struct request *rq,
struct row_data *rd)
{
const int data_dir = rq_data_dir(rq);
const bool is_sync = rq_is_sync(rq);
enum row_queue_prio q_type = ROWQ_MAX_PRIO;
int ioprio_class = IOPRIO_PRIO_CLASS(rq->elv.icq->ioc->ioprio);
if (unlikely(row_get_current()->flags & PF_MUTEX_GC)) {
if (data_dir == READ)
q_type = ROWQ_PRIO_HIGH_READ;
else
q_type = ROWQ_PRIO_HIGH_SWRITE;
return q_type;
}
switch (ioprio_class) {
case IOPRIO_CLASS_RT:
if (data_dir == READ)
q_type = ROWQ_PRIO_HIGH_READ;
else if (is_sync)
q_type = ROWQ_PRIO_HIGH_SWRITE;
else {
q_type = ROWQ_PRIO_REG_WRITE;
}
break;
case IOPRIO_CLASS_IDLE:
if (data_dir == READ)
q_type = ROWQ_PRIO_LOW_READ;
else if (is_sync)
q_type = ROWQ_PRIO_LOW_SWRITE;
else {
pr_err("%s:%s(): got a simple write from IDLE_CLASS. How???",/*lint !e585*/
rq->rq_disk->disk_name, __func__);
q_type = ROWQ_PRIO_REG_WRITE;
}
break;
case IOPRIO_CLASS_NONE:
case IOPRIO_CLASS_BE:
default:
if (data_dir == READ)
q_type = ROWQ_PRIO_REG_READ;
else if (is_sync)
q_type = ROWQ_PRIO_REG_SWRITE;
else
q_type = ROWQ_PRIO_REG_WRITE;
break;
}
return q_type;
}
示例4: fiops_completed_request
static void fiops_completed_request(struct request_queue *q, struct request *rq)
{
struct fiops_data *fiopsd = q->elevator->elevator_data;
struct fiops_ioc *ioc = RQ_CIC(rq);
fiopsd->in_flight[rq_is_sync(rq)]--;
ioc->in_flight--;
fiops_log_ioc(fiopsd, ioc, "in_flight %d, busy queues %d",
ioc->in_flight, fiopsd->busy_queues);
if (fiopsd->in_flight[0] + fiopsd->in_flight[1] == 0)
fiops_schedule_dispatch(fiopsd);
}
开发者ID:Minia89,项目名称:Note-3-AEL-Kernel-SM-N9005_EUR_LL_Opensource_Update2.,代码行数:14,代码来源:fiops-iosched.c
示例5: fiops_scaled_vios
static u64 fiops_scaled_vios(struct fiops_data *fiopsd,
struct fiops_ioc *ioc, struct request *rq)
{
int vios = VIOS_SCALE;
if (rq_data_dir(rq) == WRITE)
vios = vios * fiopsd->write_scale / fiopsd->read_scale;
if (!rq_is_sync(rq))
vios = vios * fiopsd->async_scale / fiopsd->sync_scale;
vios += vios * (ioc->ioprio - IOPRIO_NORM) / VIOS_PRIO_SCALE;
return vios;
}
开发者ID:Minia89,项目名称:Note-3-AEL-Kernel-SM-N9005_EUR_LL_Opensource_Update2.,代码行数:15,代码来源:fiops-iosched.c
示例6: blk_queue_start_tag
/**
* blk_queue_start_tag - find a free tag and assign it
* @q: the request queue for the device
* @rq: the block request that needs tagging
*
* Description:
* This can either be used as a stand-alone helper, or possibly be
* assigned as the queue &prep_rq_fn (in which case &struct request
* automagically gets a tag assigned). Note that this function
* assumes that any type of request can be queued! if this is not
* true for your device, you must check the request type before
* calling this function. The request will also be removed from
* the request queue, so it's the drivers responsibility to readd
* it if it should need to be restarted for some reason.
*
* Notes:
* queue lock must be held.
**/
int blk_queue_start_tag(struct request_queue *q, struct request *rq)
{
struct blk_queue_tag *bqt = q->queue_tags;
unsigned max_depth;
int tag;
if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
printk(KERN_ERR
"%s: request %p for device [%s] already tagged %d",
__func__, rq,
rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag);
BUG();
}
/*
* Protect against shared tag maps, as we may not have exclusive
* access to the tag map.
*
* We reserve a few tags just for sync IO, since we don't want
* to starve sync IO on behalf of flooding async IO.
*/
max_depth = bqt->max_depth;
if (!rq_is_sync(rq) && max_depth > 1) {
max_depth -= 2;
if (!max_depth)
max_depth = 1;
if (q->in_flight[0] > max_depth)
return 1;
}
do {
tag = find_first_zero_bit(bqt->tag_map, max_depth);
if (tag >= max_depth)
return 1;
} while (test_and_set_bit_lock(tag, bqt->tag_map));
/*
* We need lock ordering semantics given by test_and_set_bit_lock.
* See blk_queue_end_tag for details.
*/
rq->cmd_flags |= REQ_QUEUED;
rq->tag = tag;
bqt->tag_index[tag] = rq;
blk_start_request(rq);
list_add(&rq->queuelist, &q->tag_busy_list);
return 0;
}
示例7: fiops_dispatch_request
/* return vios dispatched */
static u64 fiops_dispatch_request(struct fiops_data *fiopsd,
struct fiops_ioc *ioc)
{
struct request *rq;
struct request_queue *q = fiopsd->queue;
rq = rq_entry_fifo(ioc->fifo.next);
fiops_remove_request(rq);
elv_dispatch_add_tail(q, rq);
fiopsd->in_flight[rq_is_sync(rq)]++;
ioc->in_flight++;
return fiops_scaled_vios(fiopsd, ioc, rq);
}
开发者ID:Minia89,项目名称:Note-3-AEL-Kernel-SM-N9005_EUR_LL_Opensource_Update2.,代码行数:17,代码来源:fiops-iosched.c
示例8: row_get_queue_prio
/*
* row_get_queue_prio() - Get queue priority for a given request
*
* This is a helping function which purpose is to determine what
* ROW queue the given request should be added to (and
* dispatched from later on)
*
*/
static enum row_queue_prio row_get_queue_prio(struct request *rq)
{
const int data_dir = rq_data_dir(rq);
const bool is_sync = rq_is_sync(rq);
enum row_queue_prio q_type = ROWQ_MAX_PRIO;
int ioprio_class = IOPRIO_PRIO_CLASS(rq->elv.icq->ioc->ioprio);
switch (ioprio_class) {
case IOPRIO_CLASS_RT:
if (data_dir == READ)
q_type = ROWQ_PRIO_HIGH_READ;
else if (is_sync)
q_type = ROWQ_PRIO_HIGH_SWRITE;
else {
pr_err("%s:%s(): got a simple write from RT_CLASS. How???",
rq->rq_disk->disk_name, __func__);
q_type = ROWQ_PRIO_REG_WRITE;
}
rq->cmd_flags |= REQ_URGENT;
break;
case IOPRIO_CLASS_IDLE:
if (data_dir == READ)
q_type = ROWQ_PRIO_LOW_READ;
else if (is_sync)
q_type = ROWQ_PRIO_LOW_SWRITE;
else {
pr_err("%s:%s(): got a simple write from IDLE_CLASS. How???",
rq->rq_disk->disk_name, __func__);
q_type = ROWQ_PRIO_REG_WRITE;
}
break;
case IOPRIO_CLASS_NONE:
case IOPRIO_CLASS_BE:
default:
if (data_dir == READ)
q_type = ROWQ_PRIO_REG_READ;
else if (is_sync)
q_type = ROWQ_PRIO_REG_SWRITE;
else
q_type = ROWQ_PRIO_REG_WRITE;
break;
}
return q_type;
}
示例9: gp_sdcard_xfer_request
/**
* @brief Request service function.
* @param sd[in]: Card information.
* @param req[in]: Start sector.
* @return SUCCESS/ERROR_ID.
*/
static int gp_sdcard_xfer_request(gpSDInfo_t *sd, struct request *req)
{
int ret = 1;
while (ret)
{
unsigned int ln;
unsigned int retry = 0;
ln = blk_rq_map_sg(sd->queue, req, sd->sg);
#if 0 /* This is used for usb disk check */
{
bool do_sync = (rq_is_sync(req) && rq_data_dir(req) == WRITE);
if (do_sync)
{
DEBUG("[Jerry] detect do write sync\n");
}
}
#endif
while(1)
{
ret = gp_sdcard_transfer_scatter(sd, blk_rq_pos(req), sd->sg, ln, rq_data_dir(req));
/* ----- Re-try procedure ----- */
if(ret<0)
{
unsigned int cid[4];
unsigned int capacity;
if((retry>=SD_RETRY)||(gp_sdcard_ckinsert(sd)==0)||sd->fremove)
goto out_error;
/* ----- Re-initialize sd card ----- */
memcpy(cid, sd->CID, sizeof(cid));
capacity = sd->capacity;
if(gp_sdcard_cardinit(sd)!=0)
{
DERROR("[%d]: Re-initialize fail\n",sd->device_id);
goto out_error;
}
else if((cid[0]!=sd->CID[0])||(cid[1]!=sd->CID[1])||(cid[2]!=sd->CID[2])||(cid[3]!=sd->CID[3])||(capacity!=sd->capacity))
{
DERROR("[%d]: Different card insert\n",sd->device_id);
goto out_error;
}
retry ++;
}
else
break;
}
/* ----- End of request ----- */
spin_lock_irq(&sd->lock);
ret = __blk_end_request(req, 0, ret<<9);
spin_unlock_irq(&sd->lock);
}
return 1;
out_error:
spin_lock_irq(&sd->lock);
DEBUG("[%d]: txrx fail %d\n", sd->device_id, ret);
__blk_end_request_all(req, ret);;
spin_unlock_irq(&sd->lock);
return -ENXIO;
}
示例10: blk_queue_start_tag
/**
* blk_queue_start_tag - find a free tag and assign it
* @q: the request queue for the device
* @rq: the block request that needs tagging
*
* Description:
* This can either be used as a stand-alone helper, or possibly be
* assigned as the queue &prep_rq_fn (in which case &struct request
* automagically gets a tag assigned). Note that this function
* assumes that any type of request can be queued! if this is not
* true for your device, you must check the request type before
* calling this function. The request will also be removed from
* the request queue, so it's the drivers responsibility to readd
* it if it should need to be restarted for some reason.
*
* Notes:
* queue lock must be held.
**/
int blk_queue_start_tag(struct request_queue *q, struct request *rq)
{
struct blk_queue_tag *bqt = q->queue_tags;
unsigned max_depth;
int tag;
if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
printk(KERN_ERR
"%s: request %p for device [%s] already tagged %d",
__func__, rq,
rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag);
BUG();
}
/*
* Protect against shared tag maps, as we may not have exclusive
* access to the tag map.
*
* We reserve a few tags just for sync IO, since we don't want
* to starve sync IO on behalf of flooding async IO.
*/
max_depth = bqt->max_depth;
if (!rq_is_sync(rq) && max_depth > 1) {
switch (max_depth) {
case 2:
max_depth = 1;
break;
case 3:
max_depth = 2;
break;
default:
max_depth -= 2;
}
if (q->in_flight[BLK_RW_ASYNC] > max_depth)
return 1;
}
do {
if (bqt->alloc_policy == BLK_TAG_ALLOC_FIFO) {
tag = find_first_zero_bit(bqt->tag_map, max_depth);
if (tag >= max_depth)
return 1;
} else {
int start = bqt->next_tag;
int size = min_t(int, bqt->max_depth, max_depth + start);
tag = find_next_zero_bit(bqt->tag_map, size, start);
if (tag >= size && start + size > bqt->max_depth) {
size = start + size - bqt->max_depth;
tag = find_first_zero_bit(bqt->tag_map, size);
}
if (tag >= size)
return 1;
}
} while (test_and_set_bit_lock(tag, bqt->tag_map));
/*
* We need lock ordering semantics given by test_and_set_bit_lock.
* See blk_queue_end_tag for details.
*/
bqt->next_tag = (tag + 1) % bqt->max_depth;
rq->cmd_flags |= REQ_QUEUED;
rq->tag = tag;
bqt->tag_index[tag] = rq;
blk_start_request(rq);
list_add(&rq->queuelist, &q->tag_busy_list);
return 0;
}