本文整理汇总了C++中rq_data_dir函数的典型用法代码示例。如果您正苦于以下问题:C++ rq_data_dir函数的具体用法?C++ rq_data_dir怎么用?C++ rq_data_dir使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了rq_data_dir函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: htifblk_segment
static int htifblk_segment(struct htifblk_device *dev,
struct request *req)
{
static struct htifblk_request pkt __aligned(HTIF_ALIGN);
u64 offset, size, end;
unsigned long cmd;
offset = (blk_rq_pos(req) << SECTOR_SIZE_SHIFT);
size = (blk_rq_cur_sectors(req) << SECTOR_SIZE_SHIFT);
end = offset + size;
if (unlikely(end < offset || end > dev->size)) {
dev_err(&dev->dev->dev, "out-of-bounds access:"
" offset=%llu size=%llu\n", offset, size);
return -EINVAL;
}
rmb();
pkt.addr = __pa(req->buffer);
pkt.offset = offset;
pkt.size = size;
pkt.tag = dev->tag;
switch (rq_data_dir(req)) {
case READ:
cmd = HTIF_CMD_READ;
break;
case WRITE:
cmd = HTIF_CMD_WRITE;
break;
default:
return -EINVAL;
}
dev->req = req;
htif_tohost(dev->dev->index, cmd, __pa(&pkt));
return 0;
}
示例2: do_ldm_req
//从请求队列上获取请求操作对象,从请求对象中获得操作参数:读写操作的起始sector和操作字节数,然后将所需的操作执行到硬件上去
//本函数是由blk驱动框架来自动调用的,调用时机由电梯算法调度决定
static void do_ldm_req(struct request_queue *q)
{
//从请求队列上获取一个请求对象
struct request *req = blk_fetch_request(q);
while (req) {
//从第几个扇区开始操作
u32 start = blk_rq_pos(req) * SECTOR_SIZE;
//获得当前请求操作的字节数
u32 len = blk_rq_cur_bytes(req);
//检查本次request操作是否越界
int err = 0;
if (start + len > DEV_SIZE) {
printk(KERN_ERR "request region is out of device capacity\n");
err = -EIO;
goto err_request;
}
//rq_data_dir获得当前请求的操作方向
//建议在memcpy前后加上打印语句,以便观察读写操作的调度时机
//数据从内核传输到应用
if (rq_data_dir(req) == READ) {
memcpy(req->buffer, (u8*)ldm.addr + start, len);
printk("read from %d, size %d\n", start, len);
} else { //数据从应用层传输到内核并写入
memcpy((u8*)ldm.addr + start, req->buffer, len);
printk("write from %d, size %d\n", start, len);
}
//__blk_end_request_cur:返回false表示当前req的所有操作都完成了,于是下面试图调用blk_fetch_request再从队列上获取新的请求,如果获取不到,则req得到NULL将退出循环;
//返回true的话说明当前req操作还没完成,继续循环执行
//err参数可以独立改变__blk_end_request_cur的返回值,err<0时,函数返回false。当发生其他错误时可以用err参数来结束当前req请求,从请求队列上获取新的请求
err_request:
if (!__blk_end_request_cur(req, err)) {
req = blk_fetch_request(q);
}
}
}
示例3: sbull_request
/*
* The simple form of the request function.
*/
static void sbull_request(struct request_queue *q)
{
struct request *req;
req = blk_fetch_request(q);
while (req != NULL) {
struct sbull_dev *dev = req->rq_disk->private_data;
if (! blk_fs_request(req)) {
printk (KERN_NOTICE "Skip non-fs request\n");
__blk_end_request_all(req, -EIO);
continue;
}
// printk (KERN_NOTICE "Req dev %d dir %ld sec %ld, nr %d f %lx\n",
// dev - Devices, rq_data_dir(req),
// req->sector, req->current_nr_sectors,
// req->flags);
sbull_transfer(dev, blk_rq_pos(req), blk_rq_cur_sectors(req), req->buffer, rq_data_dir(req));
/* end_request(req, 1); */
if(!__blk_end_request_cur(req, 0)) {
req = blk_fetch_request(q);
}
}
}
示例4: htifbd_request
static void htifbd_request(struct request_queue *q)
{
struct request *req;
req = blk_fetch_request(q);
while (req != NULL) {
struct htifbd_dev *dev;
dev = req->rq_disk->private_data;
if (req->cmd_type != REQ_TYPE_FS) {
pr_notice(DRIVER_NAME ": ignoring non-fs request for %s\n",
req->rq_disk->disk_name);
__blk_end_request_all(req, -EIO);
continue;
}
htifbd_transfer(dev, blk_rq_pos(req), blk_rq_cur_sectors(req),
req->buffer, rq_data_dir(req));
if (!__blk_end_request_cur(req, 0)) {
req = blk_fetch_request(q);
}
}
}
示例5: sd_do_request
/*
* Request dispatcher.
*/
static int sd_do_request(struct sd_host *host, struct request *req)
{
int nr_sectors = 0;
int error;
error = sd_check_request(host, req);
if (error) {
nr_sectors = error;
goto out;
}
switch (rq_data_dir(req)) {
case WRITE:
nr_sectors = sd_write_request(host, req);
break;
case READ:
nr_sectors = sd_read_request(host, req);
break;
}
out:
return nr_sectors;
}
示例6: flash_merged_requests
/*
This function does 3 tasks:
1 check if next expires before req, is so set expire time of req to be the expire time of next
2 delete next from async fifo queue
3 check if merged req size >= bundle_size; if so, delete req from async fifo queue, reinit and insert it to bundle queue
*/
static void
flash_merged_requests(struct request_queue *q, struct request *req,
struct request *next)
{
struct flash_data *fd = q->elevator->elevator_data;
// const int data_type = !rq_is_sync(req);
// FIXME:
const int data_type = rq_data_dir(req);
/*
* if next expires before rq, assign its expire time to rq
* and move into next position (next will be deleted) in fifo
*/
// TODO: why need to check if async queue is empty here?
if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
if (time_before(rq_fifo_time(next), rq_fifo_time(req))) {
list_move(&req->queuelist, &next->queuelist);
rq_set_fifo_time(req, rq_fifo_time(next));
}
}
/* delete next */
rq_fifo_clear(next);
/* task 3 only kick into bundle queue if req is async */
if(req->__data_len >= fd->bundle_size && data_type == 1)
{
/* did both delete and init */
rq_fifo_clear(req);
list_add_tail(&req->queuelist, &fd->bundle_list);
#ifdef DEBUG_FLASH
printk("req of type %d of size %d is inserted to bundle queue\n", data_type, req->__data_len);
#endif
}
}
示例7: osprd_process_request
/*
* osprd_process_request(d, req)
* Called when the user reads or writes a sector.
* Should perform the read or write, as appropriate.
*/
static void osprd_process_request(osprd_info_t *d, struct request *req)
{
if (!blk_fs_request(req)) {
end_request(req, 0);
return;
}
// EXERCISE: Perform the read or write request by copying data between
// our data array and the request's buffer.
// Hint: The 'struct request' argument tells you what kind of request
// this is, and which sectors are being read or written.
// Read about 'struct request' in <linux/blkdev.h>.
// Consider the 'req->sector', 'req->current_nr_sectors', and
// 'req->buffer' members, and the rq_data_dir() function.
// Your code here.
if(req->sector+req->current_nr_sectors <= nsectors) {
switch(rq_data_dir(req)) {
case READ:
memcpy(req->buffer, d->data+req->sector*SECTOR_SIZE, req->current_nr_sectors*SECTOR_SIZE);
break;
case WRITE:
memcpy(d->data+req->sector*SECTOR_SIZE, req->buffer, req->current_nr_sectors*SECTOR_SIZE);
break;
default:
eprintk("Failed to process request...\n");
end_request(req, 0);
}
}
else {
eprintk("Sector overflow...\n");
end_request(req, 0);
}
end_request(req, 1);
}
示例8: osprd_process_request
//first implement this, and test cases not involved lock can pass
static void osprd_process_request(osprd_info_t *d, struct request *req)
{
if (!blk_fs_request(req)) {
end_request(req, 0);
return;
}
// EXERCISE: Perform the read or write request by copying data between
// our data array and the request's buffer.
// Hint: The 'struct request' argument tells you what kind of request
// this is, and which sectors are being read or written.
// Read about 'struct request' in <linux/blkdev.h>.
// Consider the 'req->sector', 'req->current_nr_sectors', and
// 'req->buffer' members, and the rq_data_dir() function.
// Your code here.
//specify the request is read or write
unsigned int requestType = rq_data_dir(req);
//compute the offset, set pointer to corret region
//the beginning address in osprd we are going to interact with
uint8_t *data_ptr = d->data + (req->sector) * SECTOR_SIZE;
if (requestType == READ)
{
memcpy((void *)req->buffer, (void *)data_ptr, req->current_nr_sectors * SECTOR_SIZE);
}
else if (requestType == WRITE)
{
memcpy((void *)data_ptr, (void *)req->buffer, req->current_nr_sectors * SECTOR_SIZE);
}
else
{
eprintk("Error read/wirte.\n");
end_request(req, 0);
}
end_request(req, 1); //minimum read/write is one sector
}
示例9: blk_rq_merge_ok
bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
{
struct request_queue *q = rq->q;
if (!rq_mergeable(rq) || !bio_mergeable(bio))
return false;
if (!blk_check_merge_flags(rq->cmd_flags, bio->bi_rw))
return false;
/* different data direction or already started, don't merge */
if (bio_data_dir(bio) != rq_data_dir(rq))
return false;
/* must be same device and not a special request */
if (rq->rq_disk != bio->bi_bdev->bd_disk || req_no_special_merge(rq))
return false;
/* only merge integrity protected bio into ditto rq */
if (blk_integrity_merge_bio(rq->q, rq, bio) == false)
return false;
/* must be using the same buffer */
if (rq->cmd_flags & REQ_WRITE_SAME &&
!blk_write_same_mergeable(rq->bio, bio))
return false;
if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS)) {
struct bio_vec *bprev;
bprev = &rq->biotail->bi_io_vec[bio->bi_vcnt - 1];
if (bvec_gap_to_prev(bprev, bio->bi_io_vec[0].bv_offset))
return false;
}
return true;
}
示例10: lkl_disk_request
static void lkl_disk_request(struct request_queue *q)
{
struct request *req;
while ((req = elv_next_request(q)) != NULL) {
struct lkl_disk_dev *dev = req->rq_disk->private_data;
struct lkl_disk_cs cs;
if (! blk_fs_request(req)) {
printk (KERN_NOTICE "lkl_disk_request: skip non-fs request\n");
__blk_end_request(req, -EIO, req->hard_cur_sectors << 9);
continue;
}
cs.linux_cookie=req;
lkl_disk_do_rw(dev->data, req->sector, req->current_nr_sectors,
req->buffer, rq_data_dir(req), &cs);
/*
* Async is broken.
*/
BUG_ON (cs.sync == 0);
blk_end_request(req, cs.error ? -EIO : 0, blk_rq_bytes(req));
}
}
示例11: pd_next_buf
static void pd_next_buf( int unit )
{ long saved_flags;
spin_lock_irqsave(&pd_lock,saved_flags);
end_request(1);
if (!pd_run) { spin_unlock_irqrestore(&pd_lock,saved_flags);
return;
}
/* paranoia */
if (QUEUE_EMPTY ||
(rq_data_dir(CURRENT) != pd_cmd) ||
(minor(CURRENT->rq_dev) != pd_dev) ||
(CURRENT->rq_status == RQ_INACTIVE) ||
(CURRENT->sector != pd_block))
printk("%s: OUCH: request list changed unexpectedly\n",
PD.name);
pd_count = CURRENT->current_nr_sectors;
pd_buf = CURRENT->buffer;
spin_unlock_irqrestore(&pd_lock,saved_flags);
}
示例12: sbull_request
/*
* The simple form of the request function.
*/
static void sbull_request(struct request_queue *q)
{
struct request *req;
while ((req = blk_fetch_request(q)) != NULL) {
do {
struct sbull_dev *dev = req->rq_disk->private_data;
if (req->cmd_type != REQ_TYPE_FS) {
printk (KERN_NOTICE "Skip non-fs request\n");
if (!__blk_end_request_cur(req, -1))
req = NULL;
continue;
}
// printk (KERN_NOTICE "Req dev %d dir %ld sec %ld, nr %d f %lx\n",
// dev - Devices, rq_data_dir(req),
// req->sector, req->current_nr_sectors,
// req->flags);
sbull_transfer(dev, blk_rq_pos(req), blk_rq_cur_sectors(req),
req->buffer, rq_data_dir(req));
if (!__blk_end_request_cur(req, 0))
req = NULL;
} while(req != NULL);
}
}
示例13: do_z2_request
static void do_z2_request(struct request_queue *q)
{
struct request *req;
req = blk_fetch_request(q);
while (req) {
unsigned long start = blk_rq_pos(req) << 9;
unsigned long len = blk_rq_cur_bytes(req);
int err = 0;
if (start + len > z2ram_size) {
pr_err(DEVICE_NAME ": bad access: block=%llu, "
"count=%u\n",
(unsigned long long)blk_rq_pos(req),
blk_rq_cur_sectors(req));
err = -EIO;
goto done;
}
while (len) {
unsigned long addr = start & Z2RAM_CHUNKMASK;
unsigned long size = Z2RAM_CHUNKSIZE - addr;
if (len < size)
size = len;
addr += z2ram_map[ start >> Z2RAM_CHUNKSHIFT ];
if (rq_data_dir(req) == READ)
memcpy(req->buffer, (char *)addr, size);
else
memcpy((char *)addr, req->buffer, size);
start += size;
len -= size;
}
done:
if (!__blk_end_request_cur(req, err))
req = blk_fetch_request(q);
}
}
示例14: sg_io
static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
struct sg_io_hdr *hdr, fmode_t mode)
{
unsigned long start_time;
ssize_t ret = 0;
int writing = 0;
int at_head = 0;
struct request *rq;
char sense[SCSI_SENSE_BUFFERSIZE];
struct bio *bio;
if (hdr->interface_id != 'S')
return -EINVAL;
if (hdr->dxfer_len > (queue_max_hw_sectors(q) << 9))
return -EIO;
if (hdr->dxfer_len)
switch (hdr->dxfer_direction) {
default:
return -EINVAL;
case SG_DXFER_TO_DEV:
writing = 1;
break;
case SG_DXFER_TO_FROM_DEV:
case SG_DXFER_FROM_DEV:
break;
}
if (hdr->flags & SG_FLAG_Q_AT_HEAD)
at_head = 1;
ret = -ENOMEM;
rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL);
if (IS_ERR(rq))
return PTR_ERR(rq);
blk_rq_set_block_pc(rq);
if (hdr->cmd_len > BLK_MAX_CDB) {
rq->cmd = kzalloc(hdr->cmd_len, GFP_KERNEL);
if (!rq->cmd)
goto out_put_request;
}
ret = blk_fill_sghdr_rq(q, rq, hdr, mode);
if (ret < 0)
goto out_free_cdb;
ret = 0;
if (hdr->iovec_count) {
struct iov_iter i;
struct iovec *iov = NULL;
ret = import_iovec(rq_data_dir(rq),
hdr->dxferp, hdr->iovec_count,
0, &iov, &i);
if (ret < 0)
goto out_free_cdb;
/* SG_IO howto says that the shorter of the two wins */
iov_iter_truncate(&i, hdr->dxfer_len);
ret = blk_rq_map_user_iov(q, rq, NULL, &i, GFP_KERNEL);
kfree(iov);
} else if (hdr->dxfer_len)
ret = blk_rq_map_user(q, rq, NULL, hdr->dxferp, hdr->dxfer_len,
GFP_KERNEL);
if (ret)
goto out_free_cdb;
bio = rq->bio;
memset(sense, 0, sizeof(sense));
rq->sense = sense;
rq->sense_len = 0;
rq->retries = 0;
start_time = jiffies;
/* ignore return value. All information is passed back to caller
* (if he doesn't check that is his problem).
* N.B. a non-zero SCSI status is _not_ necessarily an error.
*/
blk_execute_rq(q, bd_disk, rq, at_head);
hdr->duration = jiffies_to_msecs(jiffies - start_time);
ret = blk_complete_sghdr_rq(rq, hdr, bio);
out_free_cdb:
if (rq->cmd != rq->__cmd)
kfree(rq->cmd);
out_put_request:
blk_put_request(rq);
return ret;
}
示例15: virtio_queue_rq
static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
struct virtio_blk *vblk = hctx->queue->queuedata;
struct request *req = bd->rq;
struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
unsigned long flags;
unsigned int num;
int qid = hctx->queue_num;
int err;
bool notify = false;
BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
vbr->req = req;
if (req->cmd_flags & REQ_FLUSH) {
vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_FLUSH);
vbr->out_hdr.sector = 0;
vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req));
} else {
switch (req->cmd_type) {
case REQ_TYPE_FS:
vbr->out_hdr.type = 0;
vbr->out_hdr.sector = cpu_to_virtio64(vblk->vdev, blk_rq_pos(vbr->req));
vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req));
break;
case REQ_TYPE_BLOCK_PC:
vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_SCSI_CMD);
vbr->out_hdr.sector = 0;
vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req));
break;
case REQ_TYPE_DRV_PRIV:
vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_GET_ID);
vbr->out_hdr.sector = 0;
vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req));
break;
default:
/* We don't put anything else in the queue. */
BUG();
}
}
blk_mq_start_request(req);
num = blk_rq_map_sg(hctx->queue, vbr->req, vbr->sg);
if (num) {
if (rq_data_dir(vbr->req) == WRITE)
vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_OUT);
else
vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_IN);
}
spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
err = __virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);
if (err) {
virtqueue_kick(vblk->vqs[qid].vq);
blk_mq_stop_hw_queue(hctx);
spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
/* Out of mem doesn't actually happen, since we fall back
* to direct descriptors */
if (err == -ENOMEM || err == -ENOSPC)
return BLK_MQ_RQ_QUEUE_BUSY;
return BLK_MQ_RQ_QUEUE_ERROR;
}
if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
notify = true;
spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
if (notify)
virtqueue_notify(vblk->vqs[qid].vq);
return BLK_MQ_RQ_QUEUE_OK;
}