本文整理汇总了C++中queue_flag_set_unlocked函数的典型用法代码示例。如果您正苦于以下问题:C++ queue_flag_set_unlocked函数的具体用法?C++ queue_flag_set_unlocked怎么用?C++ queue_flag_set_unlocked使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了queue_flag_set_unlocked函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: card_blk_remove
static void card_blk_remove(struct memory_card *card)
{
struct card_blk_data *card_data = card_get_drvdata(card);
if (card_data) {
int devidx;
del_gendisk(card_data->disk);
/*
* I think this is needed.
*/
queue_flag_set_unlocked(QUEUE_FLAG_DEAD, card_data->queue.queue);
queue_flag_set_unlocked(QUEUE_FLAG_STOPPED, card_data->queue.queue);
card_data->queue.queue->queuedata = NULL;
card_cleanup_queue(&card_data->queue);
//card_data->disk->queue = NULL;
devidx = card_data->disk->first_minor >> CARD_SHIFT;
__clear_bit(devidx, dev_use);
card_blk_put(card_data);
}
card_set_drvdata(card, NULL);
}
示例2: blk_queue_init_tags
/**
* blk_queue_init_tags - initialize the queue tag info
* @q: the request queue for the device
* @depth: the maximum queue depth supported
* @tags: the tag to use
*
* Queue lock must be held here if the function is called to resize an
* existing map.
**/
int blk_queue_init_tags(struct request_queue *q, int depth,
struct blk_queue_tag *tags)
{
int rc;
BUG_ON(tags && q->queue_tags && tags != q->queue_tags);
if (!tags && !q->queue_tags) {
tags = __blk_queue_init_tags(q, depth);
if (!tags)
goto fail;
} else if (q->queue_tags) {
rc = blk_queue_resize_tags(q, depth);
if (rc)
return rc;
queue_flag_set(QUEUE_FLAG_QUEUED, q);
return 0;
} else
atomic_inc(&tags->refcnt);
/*
* assign it, all done
*/
q->queue_tags = tags;
queue_flag_set_unlocked(QUEUE_FLAG_QUEUED, q);
INIT_LIST_HEAD(&q->tag_busy_list);
return 0;
fail:
kfree(tags);
return -ENOMEM;
}
示例3: tier_attr_discard_store
static ssize_t tier_attr_discard_store(struct tier_device *dev,
const char *buf, size_t s)
{
if ('0' != buf[0] && '1' != buf[0])
return s;
#if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0)
return -EOPNOTSUPP;
#endif
if ('0' == buf[0]) {
if (dev->discard) {
dev->discard = 0;
pr_info("discard_to_devices is disabled\n");
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0)
if (dev->discard) {
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD,
dev->rqueue);
}
#endif
}
} else {
if (!dev->discard) {
dev->discard = 1;
pr_info("discard is enabled\n");
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0)
if (dev->discard) {
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
dev->rqueue);
}
#endif
}
}
return s;
}
示例4: mmc_queue_setup_discard
static void mmc_queue_setup_discard(struct request_queue *q,
struct mmc_card *card)
{
unsigned max_discard;
max_discard = mmc_calc_max_discard(card);
if (!max_discard)
return;
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
blk_queue_max_discard_sectors(q, max_discard);
q->limits.discard_granularity = card->pref_erase << 9;
/* granularity must not be greater than max. discard */
if (card->pref_erase > max_discard)
q->limits.discard_granularity = 0;
if (mmc_can_secure_erase_trim(card))
queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q);
}
示例5: blk_register_queue
int blk_register_queue(struct gendisk *disk)
{
add_my_disk(disk);
int ret;
struct device *dev = disk_to_dev(disk);
struct request_queue *q = disk->queue;
if (WARN_ON(!q))
return -ENXIO;
/*
* SCSI probing may synchronously create and destroy a lot of
* request_queues for non-existent devices. Shutting down a fully
* functional queue takes measureable wallclock time as RCU grace
* periods are involved. To avoid excessive latency in these
* cases, a request_queue starts out in a degraded mode which is
* faster to shut down and is made fully functional here as
* request_queues for non-existent devices never get registered.
*/
if (!blk_queue_init_done(q)) {
queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q);
blk_queue_bypass_end(q);
if (q->mq_ops)
blk_mq_finish_init(q);
}
ret = blk_trace_init_sysfs(dev);
if (ret)
return ret;
ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue");
if (ret < 0) {
blk_trace_remove_sysfs(dev);
return ret;
}
kobject_uevent(&q->kobj, KOBJ_ADD);
if (q->mq_ops)
blk_mq_register_disk(disk);
if (!q->request_fn)
return 0;
ret = elv_register_queue(q);
if (ret) {
kobject_uevent(&q->kobj, KOBJ_REMOVE);
kobject_del(&q->kobj);
blk_trace_remove_sysfs(dev);
kobject_put(&dev->kobj);
return ret;
}
return 0;
}
示例6: mmc_queue_setup_discard
static void mmc_queue_setup_discard(struct request_queue *q,
struct mmc_card *card)
{
unsigned max_discard;
max_discard = mmc_calc_max_discard(card);
if (!max_discard)
return;
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
q->limits.max_discard_sectors = max_discard;
if (card->erased_byte == 0)
q->limits.discard_zeroes_data = 1;
q->limits.discard_granularity = card->pref_erase << 9;
/* granularity must not be greater than max. discard */
if (card->pref_erase > max_discard)
q->limits.discard_granularity = 0;
if (mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card))
queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q);
}
示例7: card_queue_thread
static int card_queue_thread(void *d)
{
struct card_queue *cq = d;
struct request_queue *q = cq->queue;
// unsigned char rewait;
/*
* Set iothread to ensure that we aren't put to sleep by
* the process freezing. We handle suspension ourselves.
*/
current->flags |= PF_MEMALLOC;
down(&cq->thread_sem);
do {
struct request *req = NULL;
/*wait sdio handle irq & xfer data*/
//for(rewait=3;(!sdio_irq_handled)&&(rewait--);)
// schedule();
spin_lock_irq(q->queue_lock);
set_current_state(TASK_INTERRUPTIBLE);
q = cq->queue;
if (!blk_queue_plugged(q)) {
req = blk_fetch_request(q);
}
cq->req = req;
spin_unlock_irq(q->queue_lock);
if (!req) {
if (kthread_should_stop()) {
set_current_state(TASK_RUNNING);
break;
}
up(&cq->thread_sem);
schedule();
down(&cq->thread_sem);
continue;
}
set_current_state(TASK_RUNNING);
cq->issue_fn(cq, req);
cond_resched();
} while (1);
/*Stop queue*/
spin_lock_irq(q->queue_lock);
queue_flag_set_unlocked(QUEUE_FLAG_STOPPED, cq->queue);
spin_unlock_irq(q->queue_lock);
up(&cq->thread_sem);
cq->thread = NULL;
return 0;
}
示例8: sd_init_blk_dev
/*
* Initializes the block layer interfaces.
*/
static int sd_init_blk_dev(struct sd_host *host)
{
struct gendisk *disk;
struct request_queue *queue;
int channel;
int retval;
channel = to_channel(exi_get_exi_channel(host->exi_device));
/* queue */
retval = -ENOMEM;
spin_lock_init(&host->queue_lock);
queue = blk_init_queue(sd_request_func, &host->queue_lock);
if (!queue) {
sd_printk(KERN_ERR, "error initializing queue\n");
goto err_blk_init_queue;
}
blk_queue_dma_alignment(queue, EXI_DMA_ALIGN);
blk_queue_max_phys_segments(queue, 1);
blk_queue_max_hw_segments(queue, 1);
blk_queue_max_sectors(queue, 8);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, queue);
queue->queuedata = host;
host->queue = queue;
/* disk */
disk = alloc_disk(1 << MMC_SHIFT);
if (!disk) {
sd_printk(KERN_ERR, "error allocating disk\n");
goto err_alloc_disk;
}
disk->major = SD_MAJOR;
disk->first_minor = channel << MMC_SHIFT;
disk->fops = &sd_fops;
sprintf(disk->disk_name, "%s%c", SD_NAME, 'a' + channel);
disk->private_data = host;
disk->queue = host->queue;
host->disk = disk;
retval = 0;
goto out;
err_alloc_disk:
blk_cleanup_queue(host->queue);
host->queue = NULL;
err_blk_init_queue:
out:
return retval;
}
示例9: blk_register_queue
int blk_register_queue(struct gendisk *disk)
{
int ret;
struct device *dev = disk_to_dev(disk);
struct request_queue *q = disk->queue;
if (WARN_ON(!q))
return -ENXIO;
/*
* Initialization must be complete by now. Finish the initial
* bypass from queue allocation.
*/
blk_queue_bypass_end(q);
queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q);
ret = blk_trace_init_sysfs(dev);
if (ret)
return ret;
ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue");
if (ret < 0) {
blk_trace_remove_sysfs(dev);
return ret;
}
kobject_uevent(&q->kobj, KOBJ_ADD);
if (q->mq_ops)
blk_mq_register_disk(disk);
if (!q->request_fn)
return 0;
ret = elv_register_queue(q);
if (ret) {
kobject_uevent(&q->kobj, KOBJ_REMOVE);
kobject_del(&q->kobj);
blk_trace_remove_sysfs(dev);
kobject_put(&dev->kobj);
return ret;
}
return 0;
}
示例10: bsg_setup_queue
/**
* bsg_setup_queue - Create and add the bsg hooks so we can receive requests
* @dev: device to attach bsg device to
* @q: request queue setup by caller
* @name: device to give bsg device
*
* The caller should have setup the reuqest queue with bsg_request_fn
* as the request_fn.
*/
int bsg_setup_queue(struct device *dev, struct request_queue *q, char *name)
{
int ret;
q->queuedata = dev;
queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
blk_queue_softirq_done(q, bsg_softirq_done);
blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
ret = bsg_register_queue(q, dev, name, NULL);
if (ret) {
printk(KERN_ERR "%s: bsg interface failed to "
"initialize - register queue\n", dev->kobj.name);
return ret;
}
return 0;
}
示例11: mmc_setup_queue
static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
{
struct mmc_host *host = card->host;
u64 limit = BLK_BOUNCE_HIGH;
if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue);
if (mmc_can_erase(card))
mmc_queue_setup_discard(mq->queue, card);
blk_queue_bounce_limit(mq->queue, limit);
blk_queue_max_hw_sectors(mq->queue,
min(host->max_blk_count, host->max_req_size / 512));
blk_queue_max_segments(mq->queue, host->max_segs);
blk_queue_max_segment_size(mq->queue, host->max_seg_size);
/* Initialize thread_sem even if it is not used */
sema_init(&mq->thread_sem, 1);
}
示例12: pmem_attach_disk
static int pmem_attach_disk(struct device *dev,
struct nd_namespace_common *ndns, struct pmem_device *pmem)
{
struct gendisk *disk;
pmem->pmem_queue = blk_alloc_queue(GFP_KERNEL);
if (!pmem->pmem_queue)
return -ENOMEM;
blk_queue_make_request(pmem->pmem_queue, pmem_make_request);
blk_queue_physical_block_size(pmem->pmem_queue, PAGE_SIZE);
blk_queue_max_hw_sectors(pmem->pmem_queue, UINT_MAX);
blk_queue_bounce_limit(pmem->pmem_queue, BLK_BOUNCE_ANY);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, pmem->pmem_queue);
disk = alloc_disk(0);
if (!disk) {
blk_cleanup_queue(pmem->pmem_queue);
return -ENOMEM;
}
disk->major = pmem_major;
disk->first_minor = 0;
disk->fops = &pmem_fops;
disk->private_data = pmem;
disk->queue = pmem->pmem_queue;
disk->flags = GENHD_FL_EXT_DEVT;
nvdimm_namespace_disk_name(ndns, disk->disk_name);
disk->driverfs_dev = dev;
set_capacity(disk, (pmem->size - pmem->data_offset) / 512);
pmem->pmem_disk = disk;
add_disk(disk);
revalidate_disk(disk);
return 0;
}
示例13: mmc_init_queue
/**
* mmc_init_queue - initialise a queue structure.
* @mq: mmc queue
* @card: mmc card to attach this queue
* @lock: queue lock
* @subname: partition subname
*
* Initialise a MMC card request queue.
*/
int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
spinlock_t *lock, const char *subname)
{
struct mmc_host *host = card->host;
u64 limit = BLK_BOUNCE_HIGH;
int ret;
struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
limit = *mmc_dev(host)->dma_mask;
mq->card = card;
mq->queue = blk_init_queue(mmc_request, lock);
if (!mq->queue)
return -ENOMEM;
memset(&mq->mqrq_cur, 0, sizeof(mq->mqrq_cur));
memset(&mq->mqrq_prev, 0, sizeof(mq->mqrq_prev));
mq->mqrq_cur = mqrq_cur;
mq->mqrq_prev = mqrq_prev;
mq->queue->queuedata = mq;
blk_queue_prep_rq(mq->queue, mmc_prep_request);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
if (mmc_can_erase(card))
mmc_queue_setup_discard(mq->queue, card);
#ifdef CONFIG_MMC_BLOCK_BOUNCE
if (host->max_segs == 1) {
unsigned int bouncesz;
if(!mmc_card_sd(card))
bouncesz = MMC_QUEUE_BOUNCESZ;
else
bouncesz = MMC_QUEUE_SD_BOUNCESZ;
if (bouncesz > host->max_req_size)
bouncesz = host->max_req_size;
if (bouncesz > host->max_seg_size)
bouncesz = host->max_seg_size;
if (bouncesz > (host->max_blk_count * 512))
bouncesz = host->max_blk_count * 512;
if (bouncesz > 512) {
if(!mmc_card_sd(card))
mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
else
mqrq_cur->bounce_buf = mmc_queue_cur_bounce_buf;
if (!mqrq_cur->bounce_buf) {
printk(KERN_WARNING "%s: unable to "
"allocate bounce cur buffer\n",
mmc_card_name(card));
}
if(!mmc_card_sd(card))
mqrq_prev->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
else
mqrq_prev->bounce_buf = mmc_queue_prev_bounce_buf;
if (!mqrq_prev->bounce_buf) {
printk(KERN_WARNING "%s: unable to "
"allocate bounce prev buffer\n",
mmc_card_name(card));
kfree(mqrq_cur->bounce_buf);
mqrq_cur->bounce_buf = NULL;
}
}
if (mqrq_cur->bounce_buf && mqrq_prev->bounce_buf) {
blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
blk_queue_max_segments(mq->queue, bouncesz / 512);
blk_queue_max_segment_size(mq->queue, bouncesz);
mqrq_cur->sg = mmc_alloc_sg(1, &ret);
if (ret)
goto cleanup_queue;
mqrq_cur->bounce_sg =
mmc_alloc_sg(bouncesz / 512, &ret);
if (ret)
goto cleanup_queue;
mqrq_prev->sg = mmc_alloc_sg(1, &ret);
if (ret)
goto cleanup_queue;
mqrq_prev->bounce_sg =
mmc_alloc_sg(bouncesz / 512, &ret);
if (ret)
goto cleanup_queue;
}
//.........这里部分代码省略.........
示例14: gp_sdcard_work_init
/**
* @brief Card initial function.
* @param work[in]: Work structure.
* @return None.
*/
static void gp_sdcard_work_init(struct work_struct *work)
{
gpSDInfo_t* sd = container_of(work, gpSDInfo_t,init);
int pin_handle;
int ret = 0,i=0;
int pin_id;
if(sd->device_id == 0)
pin_id = GP_PIN_SD0;
else if(sd->device_id == 1)
pin_id = GP_PIN_SD1;
else
pin_id = GP_PIN_SD2;
pin_handle = gp_board_pin_func_request( pin_id, GP_BOARD_WAIT_FOREVER);
if(pin_handle<0)
{
DERROR("[%d]: can't get pin handle\n", sd->device_id);
goto init_work_end;
}
/* ----- chris: Set Pin state for SD before power on ----- */
sd->sd_func->set_power(1);
/* ----- chris: delay 250ms after card power on ----- */
msleep(250);
/* ----- Initial SD card ----- */
ret = gp_sdcard_cardinit(sd);
if (ret != 0)
{
DERROR("[%d]: initial fail\n",sd->device_id);
gp_board_pin_func_release(pin_handle);
goto init_work_end;
}
gp_board_pin_func_release(pin_handle);
if(sd->present==1)
{
if(sd->card_type == SDIO)
{
sd->pin_handle = gp_board_pin_func_request(pin_id, GP_BOARD_WAIT_FOREVER);
if(sd->pin_handle<0)
{
DERROR("[%d]: can't get pin handle\n", sd->device_id);
goto init_work_end;
}
DEBUG("SDIO card detected\n");
gp_sdio_insert_device(sd->device_id, sd->RCA);
}
else
{
unsigned int cnt =0;
/* ----- Wait 30 second for all process close handle ----- */
while((sd->users)&&cnt<120)
{
msleep(250);
cnt++;
}
if(sd->users)
{
DERROR("Some handle do not free\n");
}
if(sd->status)
{
gp_sdcard_blk_put(sd);
sd->status = 0;
}
sd->handle_dma = gp_apbdma0_request(1000);
if(sd->handle_dma==0)
goto init_work_end;
sd->queue = blk_init_queue(gp_sdcard_request, &sd->lock);
if(sd->queue==NULL)
{
DERROR("NO MEMORY: queue\n");
goto fail_queue;
}
blk_queue_ordered(sd->queue, QUEUE_ORDERED_DRAIN, NULL);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, sd->queue);
blk_queue_logical_block_size(sd->queue, 512);
blk_queue_max_sectors(sd->queue, SD_MAX_SECTORS );
blk_queue_max_phys_segments(sd->queue, SD_MAX_PHY_SEGMENTS);
blk_queue_max_hw_segments(sd->queue, SD_MAX_HW_SEGMENTS);
blk_queue_max_segment_size(sd->queue, SD_MAX_PHY_SEGMENTS_SIZE);
/* ----- Initial scatter list ----- */
sd->sg = kmalloc(sizeof(struct scatterlist) *SD_MAX_PHY_SEGMENTS, GFP_KERNEL);
if (!sd->sg)
{
DERROR("NO MEMORY: queue\n");
goto fail_thread;
}
sg_init_table(sd->sg, SD_MAX_PHY_SEGMENTS);
init_MUTEX(&sd->thread_sem);
/* ----- Enable thread ----- */
sd->thread = kthread_run(gp_sdcard_queue_thread, sd, "sd-qd");
if (IS_ERR(sd->thread))
{
goto fail_thread;
//.........这里部分代码省略.........
示例15: null_add_dev
static int null_add_dev(void)
{
struct gendisk *disk;
struct nullb *nullb;
sector_t size;
int rv;
nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node);
if (!nullb) {
rv = -ENOMEM;
goto out;
}
spin_lock_init(&nullb->lock);
if (queue_mode == NULL_Q_MQ && use_per_node_hctx)
submit_queues = nr_online_nodes;
rv = setup_queues(nullb);
if (rv)
goto out_free_nullb;
if (queue_mode == NULL_Q_MQ) {
nullb->tag_set.ops = &null_mq_ops;
nullb->tag_set.nr_hw_queues = submit_queues;
nullb->tag_set.queue_depth = hw_queue_depth;
nullb->tag_set.numa_node = home_node;
nullb->tag_set.cmd_size = sizeof(struct nullb_cmd);
nullb->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
nullb->tag_set.driver_data = nullb;
rv = blk_mq_alloc_tag_set(&nullb->tag_set);
if (rv)
goto out_cleanup_queues;
nullb->q = blk_mq_init_queue(&nullb->tag_set);
if (IS_ERR(nullb->q)) {
rv = -ENOMEM;
goto out_cleanup_tags;
}
} else if (queue_mode == NULL_Q_BIO) {
nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node);
if (!nullb->q) {
rv = -ENOMEM;
goto out_cleanup_queues;
}
blk_queue_make_request(nullb->q, null_queue_bio);
rv = init_driver_queues(nullb);
if (rv)
goto out_cleanup_blk_queue;
} else {
nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node);
if (!nullb->q) {
rv = -ENOMEM;
goto out_cleanup_queues;
}
blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
rv = init_driver_queues(nullb);
if (rv)
goto out_cleanup_blk_queue;
}
nullb->q->queuedata = nullb;
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q);
disk = nullb->disk = alloc_disk_node(1, home_node);
if (!disk) {
rv = -ENOMEM;
goto out_cleanup_blk_queue;
}
mutex_lock(&lock);
list_add_tail(&nullb->list, &nullb_list);
nullb->index = nullb_indexes++;
mutex_unlock(&lock);
blk_queue_logical_block_size(nullb->q, bs);
blk_queue_physical_block_size(nullb->q, bs);
size = gb * 1024 * 1024 * 1024ULL;
sector_div(size, bs);
set_capacity(disk, size);
disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO;
disk->major = null_major;
disk->first_minor = nullb->index;
disk->fops = &null_fops;
disk->private_data = nullb;
disk->queue = nullb->q;
sprintf(disk->disk_name, "nullb%d", nullb->index);
add_disk(disk);
return 0;
out_cleanup_blk_queue:
blk_cleanup_queue(nullb->q);
out_cleanup_tags:
if (queue_mode == NULL_Q_MQ)
blk_mq_free_tag_set(&nullb->tag_set);
//.........这里部分代码省略.........