本文整理汇总了C++中prepare_to_wait函数的典型用法代码示例。如果您正苦于以下问题:C++ prepare_to_wait函数的具体用法?C++ prepare_to_wait怎么用?C++ prepare_to_wait使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了prepare_to_wait函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: sk_stream_wait_close
void sk_stream_wait_close(struct sock *sk, long timeout)
{
if (timeout) {
DEFINE_WAIT(wait);
do {
prepare_to_wait(sk->sk_sleep, &wait,
TASK_INTERRUPTIBLE);
if (sk_wait_event(sk, &timeout, !sk_stream_closing(sk)))
break;
} while (!signal_pending(current) && timeout);
finish_wait(sk->sk_sleep, &wait);
}
}
示例2: svc_dropparty
static int svc_dropparty(struct socket *sock, int ep_ref)
{
DEFINE_WAIT(wait);
struct atm_vcc *vcc = ATM_SD(sock);
int error;
lock_sock(vcc->sk);
set_bit(ATM_VF_WAITING, &vcc->flags);
prepare_to_wait(vcc->sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
sigd_enq2(vcc, as_dropparty, NULL, NULL, NULL, NULL, ep_ref);
while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) {
schedule();
prepare_to_wait(vcc->sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
}
finish_wait(vcc->sk->sk_sleep, &wait);
if (!sigd) {
error = -EUNATCH;
goto out;
}
error = xchg(&vcc->sk->sk_err_soft, 0);
out:
release_sock(vcc->sk);
return error;
}
示例3: speakup_thread
int speakup_thread(void *data)
{
unsigned long flags;
int should_break;
struct bleep our_sound;
our_sound.active = 0;
our_sound.freq = 0;
our_sound.jiffies = 0;
mutex_lock(&spk_mutex);
while (1) {
DEFINE_WAIT(wait);
while (1) {
spk_lock(flags);
our_sound = unprocessed_sound;
unprocessed_sound.active = 0;
prepare_to_wait(&speakup_event, &wait,
TASK_INTERRUPTIBLE);
should_break = kthread_should_stop() ||
our_sound.active ||
(synth && synth->catch_up && synth->alive &&
(speakup_info.flushing ||
!synth_buffer_empty()));
spk_unlock(flags);
if (should_break)
break;
mutex_unlock(&spk_mutex);
schedule();
mutex_lock(&spk_mutex);
}
finish_wait(&speakup_event, &wait);
if (kthread_should_stop())
break;
if (our_sound.active)
kd_mksound(our_sound.freq, our_sound.jiffies);
if (synth && synth->catch_up && synth->alive) {
/* It is up to the callee to take the lock, so that it
* can sleep whenever it likes */
synth->catch_up(synth);
}
speakup_start_ttys();
}
mutex_unlock(&spk_mutex);
return 0;
}
示例4: __wait_on_atomic_t
/*
* To allow interruptible waiting and asynchronous (i.e. nonblocking) waiting,
* the actions of __wait_on_atomic_t() are permitted return codes. Nonzero
* return codes halt waiting and return.
*/
static __sched
int __wait_on_atomic_t(wait_queue_head_t *wq, struct wait_bit_queue *q,
int (*action)(atomic_t *), unsigned mode)
{
atomic_t *val;
int ret = 0;
do {
prepare_to_wait(wq, &q->wait, mode);
val = q->key.flags;
if (atomic_read(val) == 0)
ret = (*action)(val);
} while (!ret && atomic_read(val) != 0);
finish_wait(wq, &q->wait);
return ret;
}
示例5: fusion_sleep_on
void
fusion_sleep_on(wait_queue_head_t *q, struct semaphore *lock, signed long *timeout)
{
DEFINE_WAIT(wait);
prepare_to_wait( q, &wait, TASK_INTERRUPTIBLE );
up( lock );
if (timeout)
*timeout = schedule_timeout(*timeout);
else
schedule();
finish_wait( q, &wait );
}
示例6: congestion_wait
/**
* congestion_wait - wait for a backing_dev to become uncongested
* @sync: SYNC or ASYNC IO
* @timeout: timeout in jiffies
*
* Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit
* write congestion. If no backing_devs are congested then just wait for the
* next write to be completed.
*/
long congestion_wait(int sync, long timeout)
{
long ret;
unsigned long start = jiffies;
DEFINE_WAIT(wait);
wait_queue_head_t *wqh = &congestion_wqh[sync];
prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
ret = io_schedule_timeout(timeout);
finish_wait(wqh, &wait);
trace_writeback_congestion_wait(jiffies_to_usecs(timeout),
jiffies_to_usecs(jiffies - start));
return ret;
}
示例7: pipe_wait
/* Drop the inode semaphore and wait for a pipe event, atomically */
void pipe_wait(struct pipe_inode_info *pipe)
{
DEFINE_WAIT(wait);
/*
* Pipes are system-local resources, so sleeping on them
* is considered a noninteractive wait:
*/
prepare_to_wait(&pipe->wait, &wait, TASK_INTERRUPTIBLE);
if (pipe->inode)
mutex_unlock(&pipe->inode->i_mutex);
schedule();
finish_wait(&pipe->wait, &wait);
if (pipe->inode)
mutex_lock(&pipe->inode->i_mutex);
}
示例8: wait_iff_congested
/**
* wait_iff_congested - Conditionally wait for a backing_dev to become uncongested or a zone to complete writes
* @zone: A zone to check if it is heavily congested
* @sync: SYNC or ASYNC IO
* @timeout: timeout in jiffies
*
* In the event of a congested backing_dev (any backing_dev) and the given
* @zone has experienced recent congestion, this waits for up to @timeout
* jiffies for either a BDI to exit congestion of the given @sync queue
* or a write to complete.
*
* In the absence of zone congestion, a short sleep or a cond_resched is
* performed to yield the processor and to allow other subsystems to make
* a forward progress.
*
* The return value is 0 if the sleep is for the full timeout. Otherwise,
* it is the number of jiffies that were still remaining when the function
* returned. return_value == timeout implies the function did not sleep.
*/
long wait_iff_congested(struct zone *zone, int sync, long timeout)
{
long ret;
unsigned long start = jiffies;
DEFINE_WAIT(wait);
wait_queue_head_t *wqh = &congestion_wqh[sync];
/*
* If there is no congestion, or heavy congestion is not being
* encountered in the current zone, yield if necessary instead
* of sleeping on the congestion queue
*/
if (atomic_read(&nr_wb_congested[sync]) == 0 ||
!test_bit(ZONE_CONGESTED, &zone->flags)) {
/*
* Memory allocation/reclaim might be called from a WQ
* context and the current implementation of the WQ
* concurrency control doesn't recognize that a particular
* WQ is congested if the worker thread is looping without
* ever sleeping. Therefore we have to do a short sleep
* here rather than calling cond_resched().
*/
if (current->flags & PF_WQ_WORKER)
schedule_timeout_uninterruptible(1);
else
cond_resched();
/* In case we scheduled, work out time remaining */
ret = timeout - (jiffies - start);
if (ret < 0)
ret = 0;
goto out;
}
/* Sleep until uncongested or a write happens */
prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
ret = io_schedule_timeout(timeout);
finish_wait(wqh, &wait);
out:
trace_writeback_wait_iff_congested(jiffies_to_usecs(timeout),
jiffies_to_usecs(jiffies - start));
return ret;
}
示例9: cpu_hotplug_begin
/*
* This ensures that the hotplug operation can begin only when the
* refcount goes to zero.
*
* Note that during a cpu-hotplug operation, the new readers, if any,
* will be blocked by the cpu_hotplug.lock
*
* Since cpu_hotplug_begin() is always called after invoking
* cpu_maps_update_begin(), we can be sure that only one writer is active.
*
* Note that theoretically, there is a possibility of a livelock:
* - Refcount goes to zero, last reader wakes up the sleeping
* writer.
* - Last reader unlocks the cpu_hotplug.lock.
* - A new reader arrives at this moment, bumps up the refcount.
* - The writer acquires the cpu_hotplug.lock finds the refcount
* non zero and goes to sleep again.
*
* However, this is very difficult to achieve in practice since
* get_online_cpus() not an api which is called all that often.
*
*/
void cpu_hotplug_begin(void)
{
DEFINE_WAIT(wait);
cpu_hotplug.active_writer = current;
cpuhp_lock_acquire();
for (;;) {
mutex_lock(&cpu_hotplug.lock);
prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE);
if (likely(!atomic_read(&cpu_hotplug.refcount)))
break;
mutex_unlock(&cpu_hotplug.lock);
schedule();
}
finish_wait(&cpu_hotplug.wq, &wait);
}
示例10: mct_u232_ioctl
static int mct_u232_ioctl(struct tty_struct *tty,
unsigned int cmd, unsigned long arg)
{
DEFINE_WAIT(wait);
struct usb_serial_port *port = tty->driver_data;
struct mct_u232_private *mct_u232_port = usb_get_serial_port_data(port);
struct async_icount cnow, cprev;
unsigned long flags;
dbg("%s - port %d, cmd = 0x%x", __func__, port->number, cmd);
switch (cmd) {
case TIOCMIWAIT:
dbg("%s (%d) TIOCMIWAIT", __func__, port->number);
spin_lock_irqsave(&mct_u232_port->lock, flags);
cprev = mct_u232_port->icount;
spin_unlock_irqrestore(&mct_u232_port->lock, flags);
for ( ; ; ) {
prepare_to_wait(&mct_u232_port->msr_wait,
&wait, TASK_INTERRUPTIBLE);
schedule();
finish_wait(&mct_u232_port->msr_wait, &wait);
/* see if a signal did it */
if (signal_pending(current))
return -ERESTARTSYS;
spin_lock_irqsave(&mct_u232_port->lock, flags);
cnow = mct_u232_port->icount;
spin_unlock_irqrestore(&mct_u232_port->lock, flags);
if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
cnow.dcd == cprev.dcd && cnow.cts == cprev.cts)
return -EIO; /* no change => error */
if (((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) ||
((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) ||
((arg & TIOCM_CD) && (cnow.dcd != cprev.dcd)) ||
((arg & TIOCM_CTS) && (cnow.cts != cprev.cts))) {
return 0;
}
cprev = cnow;
}
}
return -ENOIOCTLCMD;
}
示例11: jiq_read_wq_delayed
static int jiq_read_wq_delayed(char *buf, char **start, off_t offset,
int len, int *eof, void *data)
{
DEFINE_WAIT(wait);
jiq_data.len = 0; /* nothing printed, yet */
jiq_data.buf = buf; /* print in this place */
jiq_data.jiffies = jiffies; /* initial time */
jiq_data.delay = delay;
prepare_to_wait(&jiq_wait, &wait, TASK_INTERRUPTIBLE);
schedule_delayed_work(&jiq_work, delay);
schedule();
finish_wait(&jiq_wait, &wait);
*eof = 1;
return jiq_data.len;
}
示例12: wait_for_commit
/* wait for a transaction commit to be fully complete */
static noinline int wait_for_commit(struct btrfs_root *root,
struct btrfs_transaction *commit)
{
DEFINE_WAIT(wait);
mutex_lock(&root->fs_info->trans_mutex);
while (!commit->commit_done) {
prepare_to_wait(&commit->commit_wait, &wait,
TASK_UNINTERRUPTIBLE);
if (commit->commit_done)
break;
mutex_unlock(&root->fs_info->trans_mutex);
schedule();
mutex_lock(&root->fs_info->trans_mutex);
}
mutex_unlock(&root->fs_info->trans_mutex);
finish_wait(&commit->commit_wait, &wait);
return 0;
}
示例13: atf_log_read
static ssize_t atf_log_read(struct file *file, char __user *buf, size_t count, loff_t *pos)
{
ssize_t ret;
unsigned int write_pos;
unsigned int read_pos;
DEFINE_WAIT(wait);
start:
while (1) {
atf_log_lock();
write_pos = atf_buf_vir_ctl->info.atf_write_pos;
read_pos = atf_buf_vir_ctl->info.atf_read_pos;
//pr_notice("atf_log_read: wait in wq\n");
prepare_to_wait(&atf_log_wq, &wait, TASK_INTERRUPTIBLE);
ret = (write_pos == read_pos);
atf_log_unlock();
if (!ret)
break;
if (file->f_flags & O_NONBLOCK) {
ret = -EAGAIN;
break;
}
if (signal_pending(current)) {
ret = -EINTR;
break;
}
schedule();
}
finish_wait(&atf_log_wq, &wait);
//pr_notice("atf_log_read: finish wait\n");
if (ret)
return ret;
atf_log_lock();
if (unlikely(write_pos == read_pos)) {
atf_log_unlock();
goto start;
}
ret = do_read_log_to_usr(buf, count);
atf_buf_vir_ctl->info.atf_read_pos = index_to_pos(read_index);
atf_buf_vir_ctl->info.atf_read_seq += ret;
atf_log_unlock();
//pr_notice("atf_log_read: return %d, idx: %lu, readpos: %p, writepos: %p\n", ret, read_index, atf_buf_vir_ctl->info.atf_read_pos, atf_buf_vir_ctl->info.atf_write_pos);
return ret;
}
示例14: mempool_alloc
/**
* mempool_alloc - allocate an element from a specific memory pool
* @pool: pointer to the memory pool which was allocated via
* mempool_create().
* @gfp_mask: the usual allocation bitmask.
*
* this function only sleeps if the alloc_fn function sleeps or
* returns NULL. Note that due to preallocation, this function
* *never* fails when called from process contexts. (it might
* fail if called from an IRQ context.)
*/
void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask)
{
void *element;
unsigned long flags;
wait_queue_t wait;
gfp_t gfp_temp;
might_sleep_if(gfp_mask & __GFP_WAIT);
gfp_mask |= __GFP_NOMEMALLOC; /* don't allocate emergency reserves */
gfp_mask |= __GFP_NORETRY; /* don't loop in __alloc_pages */
gfp_mask |= __GFP_NOWARN; /* failures are OK */
gfp_temp = gfp_mask & ~(__GFP_WAIT|__GFP_IO);
repeat_alloc:
element = pool->alloc(gfp_temp, pool->pool_data);
if (likely(element != NULL))
return element;
spin_lock_irqsave(&pool->lock, flags);
if (likely(pool->curr_nr)) {
element = remove_element(pool);
spin_unlock_irqrestore(&pool->lock, flags);
return element;
}
spin_unlock_irqrestore(&pool->lock, flags);
/* We must not sleep in the GFP_ATOMIC case */
if (!(gfp_mask & __GFP_WAIT))
return NULL;
/* Now start performing page reclaim */
gfp_temp = gfp_mask;
init_wait(&wait);
prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);
smp_mb();
if (!pool->curr_nr)
io_schedule();
finish_wait(&pool->wait, &wait);
goto repeat_alloc;
}
示例15: scull_getwritespace
static int scull_getwritespace(struct scull_pipe *dev, struct file *filp)
{
while (spacefree(dev) == 0) { /* full */
DEFINE_WAIT(wait);
up(&dev->sem);
if (filp->f_flags & O_NONBLOCK)
return -EAGAIN;
prepare_to_wait(&dev->outq, &wait, TASK_INTERRUPTIBLE);
if (spacefree(dev) == 0)
schedule();
finish_wait(&dev->outq, &wait);
if (signal_pending(current))
return -ERESTARTSYS; /* signal: tell the fs layer to handle it */
if (down_interruptible(&dev->sem))
return -ERESTARTSYS;
}
return 0;
}