本文整理汇总了C++中signal_pending函数的典型用法代码示例。如果您正苦于以下问题:C++ signal_pending函数的具体用法?C++ signal_pending怎么用?C++ signal_pending使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了signal_pending函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: key_wait_bit_intr
static int key_wait_bit_intr(void *flags)
{
schedule();
return signal_pending(current) ? -ERESTARTSYS : 0;
}
示例2: sys_wait4
//.........这里部分代码省略.........
current->state = TASK_INTERRUPTIBLE;
HW2_DBG("a");
read_lock(&tasklist_lock);
HW2_DBG("a");
tsk = current;
HW2_DBG("a");
do {
struct task_struct *p;
HW2_DBG("b");
for (p = tsk->p_cptr ; p ; p = p->p_osptr) {
if (pid>0) {
if (p->pid != pid)
continue;
} else if (!pid) {
if (p->pgrp != current->pgrp)
continue;
} else if (pid != -1) {
if (p->pgrp != -pid)
continue;
}
/* Wait for all children (clone and not) if __WALL is set;
* otherwise, wait for clone children *only* if __WCLONE is
* set; otherwise, wait for non-clone children *only*. (Note:
* A "clone" child here is one that reports to its parent
* using a signal other than SIGCHLD.) */
if (((p->exit_signal != SIGCHLD) ^ ((options & __WCLONE) != 0))
&& !(options & __WALL))
continue;
flag = 1;
switch (p->state) {
case TASK_STOPPED:
if (!p->exit_code)
continue;
if (!(options & WUNTRACED) && !(p->ptrace & PT_PTRACED))
continue;
read_unlock(&tasklist_lock);
retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
if (!retval && stat_addr)
retval = put_user((p->exit_code << 8) | 0x7f, stat_addr);
if (!retval) {
p->exit_code = 0;
retval = p->pid;
}
goto end_wait4;
case TASK_ZOMBIE:
current->times.tms_cutime += p->times.tms_utime + p->times.tms_cutime;
current->times.tms_cstime += p->times.tms_stime + p->times.tms_cstime;
read_unlock(&tasklist_lock);
retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
if (!retval && stat_addr)
retval = put_user(p->exit_code, stat_addr);
if (retval)
goto end_wait4;
retval = p->pid;
if (p->p_opptr != p->p_pptr) {
write_lock_irq(&tasklist_lock);
REMOVE_LINKS(p);
p->p_pptr = p->p_opptr;
SET_LINKS(p);
do_notify_parent(p, SIGCHLD);
write_unlock_irq(&tasklist_lock);
} else
release_task(p);
goto end_wait4;
default:
continue;
}
}
HW2_DBG("c");
if (options & __WNOTHREAD)
break;
HW2_DBG("c");
tsk = next_thread(tsk);
HW2_DBG("c");
} while (tsk != current);
HW2_DBG("d");
read_unlock(&tasklist_lock);
HW2_DBG("d flag=%d\n", flag);
if (flag) {
retval = 0;
if (options & WNOHANG)
goto end_wait4;
retval = -ERESTARTSYS;
if (signal_pending(current))
goto end_wait4;
HW2_DBG("e");
schedule();
HW2_DBG("f");
goto repeat;
}
HW2_DBG("g");
retval = -ECHILD;
end_wait4:
HW2_DBG("h");
current->state = TASK_RUNNING;
HW2_DBG("h");
remove_wait_queue(¤t->wait_chldexit,&wait);
HW2_DBG("h");
return retval;
}
示例3: run_guest
/*H:030
* Let's jump straight to the the main loop which runs the Guest.
* Remember, this is called by the Launcher reading /dev/lguest, and we keep
* going around and around until something interesting happens.
*/
int run_guest(struct lg_cpu *cpu, unsigned long __user *user)
{
/* We stop running once the Guest is dead. */
while (!cpu->lg->dead) {
unsigned int irq;
bool more;
/* First we run any hypercalls the Guest wants done. */
if (cpu->hcall)
do_hypercalls(cpu);
/*
* It's possible the Guest did a NOTIFY hypercall to the
* Launcher.
*/
if (cpu->pending_notify) {
/*
* Does it just needs to write to a registered
* eventfd (ie. the appropriate virtqueue thread)?
*/
if (!send_notify_to_eventfd(cpu)) {
/* OK, we tell the main Laucher. */
if (put_user(cpu->pending_notify, user))
return -EFAULT;
return sizeof(cpu->pending_notify);
}
}
/*
* All long-lived kernel loops need to check with this horrible
* thing called the freezer. If the Host is trying to suspend,
* it stops us.
*/
try_to_freeze();
/* Check for signals */
if (signal_pending(current))
return -ERESTARTSYS;
/*
* Check if there are any interrupts which can be delivered now:
* if so, this sets up the hander to be executed when we next
* run the Guest.
*/
irq = interrupt_pending(cpu, &more);
if (irq < LGUEST_IRQS)
try_deliver_interrupt(cpu, irq, more);
/*
* Just make absolutely sure the Guest is still alive. One of
* those hypercalls could have been fatal, for example.
*/
if (cpu->lg->dead)
break;
/*
* If the Guest asked to be stopped, we sleep. The Guest's
* clock timer will wake us.
*/
if (cpu->halted) {
set_current_state(TASK_INTERRUPTIBLE);
/*
* Just before we sleep, make sure no interrupt snuck in
* which we should be doing.
*/
if (interrupt_pending(cpu, &more) < LGUEST_IRQS)
set_current_state(TASK_RUNNING);
else
schedule();
continue;
}
/*
* OK, now we're ready to jump into the Guest. First we put up
* the "Do Not Disturb" sign:
*/
local_irq_disable();
/* Actually run the Guest until something happens. */
lguest_arch_run_guest(cpu);
/* Now we're ready to be interrupted or moved to other CPUs */
local_irq_enable();
/* Now we deal with whatever happened to the Guest. */
lguest_arch_handle_trap(cpu);
}
/* Special case: Guest is 'dead' but wants a reboot. */
if (cpu->lg->dead == ERR_PTR(-ERESTART))
return -ERESTART;
/* The Guest is dead => "No such file or directory" */
return -ENOENT;
}
示例4: adu_write
static ssize_t adu_write(struct file *file, const __user char *buffer,
size_t count, loff_t *ppos)
{
DECLARE_WAITQUEUE(waita, current);
struct adu_device *dev;
size_t bytes_written = 0;
size_t bytes_to_write;
size_t buffer_size;
unsigned long flags;
int retval;
dbg(2," %s : enter, count = %Zd", __func__, count);
dev = file->private_data;
retval = mutex_lock_interruptible(&dev->mtx);
if (retval)
goto exit_nolock;
/* verify that the device wasn't unplugged */
if (dev->udev == NULL) {
retval = -ENODEV;
printk(KERN_ERR "adutux: No device or device unplugged %d\n",
retval);
goto exit;
}
/* verify that we actually have some data to write */
if (count == 0) {
dbg(1," %s : write request of 0 bytes", __func__);
goto exit;
}
while (count > 0) {
add_wait_queue(&dev->write_wait, &waita);
set_current_state(TASK_INTERRUPTIBLE);
spin_lock_irqsave(&dev->buflock, flags);
if (!dev->out_urb_finished) {
spin_unlock_irqrestore(&dev->buflock, flags);
mutex_unlock(&dev->mtx);
if (signal_pending(current)) {
dbg(1," %s : interrupted", __func__);
set_current_state(TASK_RUNNING);
retval = -EINTR;
goto exit_onqueue;
}
if (schedule_timeout(COMMAND_TIMEOUT) == 0) {
dbg(1, "%s - command timed out.", __func__);
retval = -ETIMEDOUT;
goto exit_onqueue;
}
remove_wait_queue(&dev->write_wait, &waita);
retval = mutex_lock_interruptible(&dev->mtx);
if (retval) {
retval = bytes_written ? bytes_written : retval;
goto exit_nolock;
}
dbg(4," %s : in progress, count = %Zd", __func__, count);
} else {
spin_unlock_irqrestore(&dev->buflock, flags);
set_current_state(TASK_RUNNING);
remove_wait_queue(&dev->write_wait, &waita);
dbg(4," %s : sending, count = %Zd", __func__, count);
/* write the data into interrupt_out_buffer from userspace */
buffer_size = le16_to_cpu(dev->interrupt_out_endpoint->wMaxPacketSize);
bytes_to_write = count > buffer_size ? buffer_size : count;
dbg(4," %s : buffer_size = %Zd, count = %Zd, bytes_to_write = %Zd",
__func__, buffer_size, count, bytes_to_write);
if (copy_from_user(dev->interrupt_out_buffer, buffer, bytes_to_write) != 0) {
retval = -EFAULT;
goto exit;
}
/* send off the urb */
usb_fill_int_urb(
dev->interrupt_out_urb,
dev->udev,
usb_sndintpipe(dev->udev, dev->interrupt_out_endpoint->bEndpointAddress),
dev->interrupt_out_buffer,
bytes_to_write,
adu_interrupt_out_callback,
dev,
dev->interrupt_out_endpoint->bInterval);
dev->interrupt_out_urb->actual_length = bytes_to_write;
dev->out_urb_finished = 0;
retval = usb_submit_urb(dev->interrupt_out_urb, GFP_KERNEL);
if (retval < 0) {
dev->out_urb_finished = 1;
dev_err(&dev->udev->dev, "Couldn't submit "
"interrupt_out_urb %d\n", retval);
goto exit;
}
buffer += bytes_to_write;
count -= bytes_to_write;
//.........这里部分代码省略.........
示例5: sock_xmit
/*
* Send or receive packet.
*/
static int sock_xmit(struct nbd_device *lo, int send, void *buf, int size,
int msg_flags)
{
struct socket *sock = lo->sock;
int result;
struct msghdr msg;
struct kvec iov;
sigset_t blocked, oldset;
if (unlikely(!sock)) {
dev_err(disk_to_dev(lo->disk),
"Attempted %s on closed socket in sock_xmit\n",
(send ? "send" : "recv"));
return -EINVAL;
}
/* Allow interception of SIGKILL only
* Don't allow other signals to interrupt the transmission */
siginitsetinv(&blocked, sigmask(SIGKILL));
sigprocmask(SIG_SETMASK, &blocked, &oldset);
do {
sock->sk->sk_allocation = GFP_NOIO;
iov.iov_base = buf;
iov.iov_len = size;
msg.msg_name = NULL;
msg.msg_namelen = 0;
msg.msg_control = NULL;
msg.msg_controllen = 0;
msg.msg_flags = msg_flags | MSG_NOSIGNAL;
if (send) {
struct timer_list ti;
if (lo->xmit_timeout) {
init_timer(&ti);
ti.function = nbd_xmit_timeout;
ti.data = (unsigned long)current;
ti.expires = jiffies + lo->xmit_timeout;
add_timer(&ti);
}
result = kernel_sendmsg(sock, &msg, &iov, 1, size);
if (lo->xmit_timeout)
del_timer_sync(&ti);
} else
result = kernel_recvmsg(sock, &msg, &iov, 1, size,
msg.msg_flags);
if (signal_pending(current)) {
siginfo_t info;
printk(KERN_WARNING "nbd (pid %d: %s) got signal %d\n",
task_pid_nr(current), current->comm,
dequeue_signal_lock(current, ¤t->blocked, &info));
result = -EINTR;
sock_shutdown(lo, !send);
break;
}
if (result <= 0) {
if (result == 0)
result = -EPIPE; /* short read */
break;
}
size -= result;
buf += result;
} while (size > 0);
sigprocmask(SIG_SETMASK, &oldset, NULL);
return result;
}
示例6: do_msgsnd
long do_msgsnd(int msqid, long mtype, void __user *mtext,
size_t msgsz, int msgflg)
{
struct msg_queue *msq;
struct msg_msg *msg;
int err;
struct ipc_namespace *ns;
ns = current->nsproxy->ipc_ns;
if (msgsz > ns->msg_ctlmax || (long) msgsz < 0 || msqid < 0)
return -EINVAL;
if (mtype < 1)
return -EINVAL;
msg = load_msg(mtext, msgsz);
if (IS_ERR(msg))
return PTR_ERR(msg);
msg->m_type = mtype;
msg->m_ts = msgsz;
msq = msg_lock_check(ns, msqid);
if (IS_ERR(msq)) {
err = PTR_ERR(msq);
goto out_free;
}
for (;;) {
struct msg_sender s;
err = -EACCES;
if (ipcperms(&msq->q_perm, S_IWUGO))
goto out_unlock_free;
err = security_msg_queue_msgsnd(msq, msg, msgflg);
if (err)
goto out_unlock_free;
if (msgsz + msq->q_cbytes <= msq->q_qbytes &&
1 + msq->q_qnum <= msq->q_qbytes) {
break;
}
/* queue full, wait: */
if (msgflg & IPC_NOWAIT) {
err = -EAGAIN;
goto out_unlock_free;
}
ss_add(msq, &s);
ipc_rcu_getref(msq);
msg_unlock(msq);
schedule();
ipc_lock_by_ptr(&msq->q_perm);
ipc_rcu_putref(msq);
if (msq->q_perm.deleted) {
err = -EIDRM;
goto out_unlock_free;
}
ss_del(&s);
if (signal_pending(current)) {
err = -ERESTARTNOHAND;
goto out_unlock_free;
}
}
msq->q_lspid = task_tgid_vnr(current);
msq->q_stime = get_seconds();
if (!pipelined_send(msq, msg)) {
/* noone is waiting for this message, enqueue it */
list_add_tail(&msg->m_list, &msq->q_messages);
msq->q_cbytes += msgsz;
msq->q_qnum++;
atomic_add(msgsz, &ns->msg_bytes);
atomic_inc(&ns->msg_hdrs);
}
err = 0;
msg = NULL;
out_unlock_free:
msg_unlock(msq);
out_free:
if (msg != NULL)
free_msg(msg);
return err;
}
示例7: osprd_ioctl
/*
* osprd_ioctl(inode, filp, cmd, arg)
* Called to perform an ioctl on the named file.
*/
int osprd_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
int r = 0;
// is file open for writing?
int filp_writable = (filp->f_mode & FMODE_WRITE) != 0;
osprd_info_t *d = file2osprd(filp); // device info
DEFINE_WAIT(wait); // wait queue entry in case we block
wait.func = &default_wake_function;
// This line avoids compiler warnings; you may remove it.
(void) filp_writable, (void) d;
// Set 'r' to the ioctl's return value: 0 on success, negative on error
if (cmd == OSPRDIOCACQUIRE) {
// EXERCISE: Lock the ramdisk.
//
// If *filp is a writable file, then attempt to write-lock
// the ramdisk; otherwise attempt to read-lock the ramdisk.
//
// This lock request must block using 'd->blockq' until:
// 1) no other process holds a write lock;
// 2) either the request is for a read lock, or no other process
// holds a read lock; and
// 3) lock requests should be serviced in order, so no process
// that blocked earlier is still blocked waiting for the
// lock.
//
// If a process acquires a lock, mark this fact by setting
// 'filp->f_flags |= F_OSPRD_LOCKED'. You may also need to
// keep track of how many read and write locks are held:
// change the 'osprd_info_t' structure to do this.
//
// Also wake up processes waiting on 'd->blockq' as needed.
//
// If the lock request would cause a deadlock, return -EDEADLK.
// If the lock request blocks and is awoken by a signal, then
// return -ERESTARTSYS.
// Otherwise, if we can grant the lock request, return 0.
// Your code here (instead of the next two lines).
if(filp_writable){
// Attempt to take write lock
if(d->num_ramdisks_open){
d->num_ramdisks_open = 0;
r = -EDEADLK;
return r;
}
if(waitqueue_active(&d->blockq) || d->write_lock_count ||
d->read_lock_count || (filp->f_flags & F_OSPRD_LOCKED)) {
/* Enque writer process and call scheduler if
* i. Wait queue is not empty
* ii. No. of readers > 0
* iii. No. of writers > 0
* iv. Ramdisk has been locked
*/
osp_spin_lock(&d->mutex);
prepare_to_wait_exclusive(&d->blockq,&wait,TASK_INTERRUPTIBLE);
osp_spin_unlock(&d->mutex);
do{
schedule();
/* if signal has occured, return ERESTARTSYS to caller */
if(signal_pending(current)){
r = -ERESTARTSYS;
return r;
}
}while(d->write_lock_count || d->read_lock_count ||
(filp->f_flags & F_OSPRD_LOCKED));
/* All condtions for locking satisfied; unblock (dequeue) */
finish_wait(&d->blockq, &wait);
}
/* Acquire write lock */
osp_spin_lock(&d->mutex);
filp->f_flags |= F_OSPRD_LOCKED;
d->write_lock_count++;
osp_spin_unlock(&d->mutex);
} else {
// Attempt to take read lock
/* Enque writer process and call scheduler if
* i. Wait queue is not empty
* ii. No. of writers > 0
* iii. Ramdisk has been locked
*/
if(waitqueue_active(&d->blockq) || d->write_lock_count ||
(filp->f_flags & F_OSPRD_LOCKED)) {
osp_spin_lock(&d->mutex);
prepare_to_wait_exclusive(&d->blockq,&wait,TASK_INTERRUPTIBLE);
osp_spin_unlock(&d->mutex);
do{
//.........这里部分代码省略.........
示例8: pp_read
static ssize_t pp_read(struct file *file, char __user *buf, size_t count,
loff_t *ppos)
{
unsigned int minor = iminor(file_inode(file));
struct pp_struct *pp = file->private_data;
char *kbuffer;
ssize_t bytes_read = 0;
struct parport *pport;
int mode;
if (!(pp->flags & PP_CLAIMED)) {
/* Don't have the port claimed */
pr_debug(CHRDEV "%x: claim the port first\n", minor);
return -EINVAL;
}
/* Trivial case. */
if (count == 0)
return 0;
kbuffer = kmalloc(min_t(size_t, count, PP_BUFFER_SIZE), GFP_KERNEL);
if (!kbuffer)
return -ENOMEM;
pport = pp->pdev->port;
mode = pport->ieee1284.mode & ~(IEEE1284_DEVICEID | IEEE1284_ADDR);
parport_set_timeout(pp->pdev,
(file->f_flags & O_NONBLOCK) ?
PARPORT_INACTIVITY_O_NONBLOCK :
pp->default_inactivity);
while (bytes_read == 0) {
ssize_t need = min_t(unsigned long, count, PP_BUFFER_SIZE);
if (mode == IEEE1284_MODE_EPP) {
/* various specials for EPP mode */
int flags = 0;
size_t (*fn)(struct parport *, void *, size_t, int);
if (pp->flags & PP_W91284PIC)
flags |= PARPORT_W91284PIC;
if (pp->flags & PP_FASTREAD)
flags |= PARPORT_EPP_FAST;
if (pport->ieee1284.mode & IEEE1284_ADDR)
fn = pport->ops->epp_read_addr;
else
fn = pport->ops->epp_read_data;
bytes_read = (*fn)(pport, kbuffer, need, flags);
} else {
bytes_read = parport_read(pport, kbuffer, need);
}
if (bytes_read != 0)
break;
if (file->f_flags & O_NONBLOCK) {
bytes_read = -EAGAIN;
break;
}
if (signal_pending(current)) {
bytes_read = -ERESTARTSYS;
break;
}
cond_resched();
}
parport_set_timeout(pp->pdev, pp->default_inactivity);
if (bytes_read > 0 && copy_to_user(buf, kbuffer, bytes_read))
bytes_read = -EFAULT;
kfree(kbuffer);
pp_enable_irq(pp);
return bytes_read;
}
示例9: pp_write
static ssize_t pp_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
unsigned int minor = iminor(file_inode(file));
struct pp_struct *pp = file->private_data;
char *kbuffer;
ssize_t bytes_written = 0;
ssize_t wrote;
int mode;
struct parport *pport;
if (!(pp->flags & PP_CLAIMED)) {
/* Don't have the port claimed */
pr_debug(CHRDEV "%x: claim the port first\n", minor);
return -EINVAL;
}
kbuffer = kmalloc(min_t(size_t, count, PP_BUFFER_SIZE), GFP_KERNEL);
if (!kbuffer)
return -ENOMEM;
pport = pp->pdev->port;
mode = pport->ieee1284.mode & ~(IEEE1284_DEVICEID | IEEE1284_ADDR);
parport_set_timeout(pp->pdev,
(file->f_flags & O_NONBLOCK) ?
PARPORT_INACTIVITY_O_NONBLOCK :
pp->default_inactivity);
while (bytes_written < count) {
ssize_t n = min_t(unsigned long, count - bytes_written, PP_BUFFER_SIZE);
if (copy_from_user(kbuffer, buf + bytes_written, n)) {
bytes_written = -EFAULT;
break;
}
if ((pp->flags & PP_FASTWRITE) && (mode == IEEE1284_MODE_EPP)) {
/* do a fast EPP write */
if (pport->ieee1284.mode & IEEE1284_ADDR) {
wrote = pport->ops->epp_write_addr(pport,
kbuffer, n, PARPORT_EPP_FAST);
} else {
wrote = pport->ops->epp_write_data(pport,
kbuffer, n, PARPORT_EPP_FAST);
}
} else {
wrote = parport_write(pp->pdev->port, kbuffer, n);
}
if (wrote <= 0) {
if (!bytes_written)
bytes_written = wrote;
break;
}
bytes_written += wrote;
if (file->f_flags & O_NONBLOCK) {
if (!bytes_written)
bytes_written = -EAGAIN;
break;
}
if (signal_pending(current))
break;
cond_resched();
}
parport_set_timeout(pp->pdev, pp->default_inactivity);
kfree(kbuffer);
pp_enable_irq(pp);
return bytes_written;
}
示例10: DEFINE_WAIT
static struct ivtv_buffer *ivtv_get_buffer(struct ivtv_stream *s, int non_block, int *err)
{
struct ivtv *itv = s->itv;
struct ivtv_stream *s_vbi = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
struct ivtv_buffer *buf;
DEFINE_WAIT(wait);
*err = 0;
while (1) {
if (s->type == IVTV_ENC_STREAM_TYPE_MPG) {
/* Process pending program info updates and pending VBI data */
ivtv_update_pgm_info(itv);
if (time_after(jiffies,
itv->dualwatch_jiffies +
msecs_to_jiffies(1000))) {
itv->dualwatch_jiffies = jiffies;
ivtv_dualwatch(itv);
}
if (test_bit(IVTV_F_S_INTERNAL_USE, &s_vbi->s_flags) &&
!test_bit(IVTV_F_S_APPL_IO, &s_vbi->s_flags)) {
while ((buf = ivtv_dequeue(s_vbi, &s_vbi->q_full))) {
/* byteswap and process VBI data */
ivtv_process_vbi_data(itv, buf, s_vbi->dma_pts, s_vbi->type);
ivtv_enqueue(s_vbi, buf, &s_vbi->q_free);
}
}
buf = &itv->vbi.sliced_mpeg_buf;
if (buf->readpos != buf->bytesused) {
return buf;
}
}
/* do we have leftover data? */
buf = ivtv_dequeue(s, &s->q_io);
if (buf)
return buf;
/* do we have new data? */
buf = ivtv_dequeue(s, &s->q_full);
if (buf) {
if ((buf->b_flags & IVTV_F_B_NEED_BUF_SWAP) == 0)
return buf;
buf->b_flags &= ~IVTV_F_B_NEED_BUF_SWAP;
if (s->type == IVTV_ENC_STREAM_TYPE_MPG)
/* byteswap MPG data */
ivtv_buf_swap(buf);
else if (s->type != IVTV_DEC_STREAM_TYPE_VBI) {
/* byteswap and process VBI data */
ivtv_process_vbi_data(itv, buf, s->dma_pts, s->type);
}
return buf;
}
/* return if end of stream */
if (s->type != IVTV_DEC_STREAM_TYPE_VBI && !test_bit(IVTV_F_S_STREAMING, &s->s_flags)) {
IVTV_DEBUG_INFO("EOS %s\n", s->name);
return NULL;
}
/* return if file was opened with O_NONBLOCK */
if (non_block) {
*err = -EAGAIN;
return NULL;
}
/* wait for more data to arrive */
mutex_unlock(&itv->serialize_lock);
prepare_to_wait(&s->waitq, &wait, TASK_INTERRUPTIBLE);
/* New buffers might have become available before we were added to the waitqueue */
if (!s->q_full.buffers)
schedule();
finish_wait(&s->waitq, &wait);
mutex_lock(&itv->serialize_lock);
if (signal_pending(current)) {
/* return if a signal was received */
IVTV_DEBUG_INFO("User stopped %s\n", s->name);
*err = -EINTR;
return NULL;
}
}
}
示例11: ivtv_write
static ssize_t ivtv_write(struct file *filp, const char __user *user_buf, size_t count, loff_t *pos)
{
struct ivtv_open_id *id = fh2id(filp->private_data);
struct ivtv *itv = id->itv;
struct ivtv_stream *s = &itv->streams[id->type];
struct yuv_playback_info *yi = &itv->yuv_info;
struct ivtv_buffer *buf;
struct ivtv_queue q;
int bytes_written = 0;
int mode;
int rc;
DEFINE_WAIT(wait);
IVTV_DEBUG_HI_FILE("write %zd bytes to %s\n", count, s->name);
if (s->type != IVTV_DEC_STREAM_TYPE_MPG &&
s->type != IVTV_DEC_STREAM_TYPE_YUV &&
s->type != IVTV_DEC_STREAM_TYPE_VOUT)
/* not decoder streams */
return -EINVAL;
/* Try to claim this stream */
if (ivtv_claim_stream(id, s->type))
return -EBUSY;
/* This stream does not need to start any decoding */
if (s->type == IVTV_DEC_STREAM_TYPE_VOUT) {
int elems = count / sizeof(struct v4l2_sliced_vbi_data);
set_bit(IVTV_F_S_APPL_IO, &s->s_flags);
return ivtv_write_vbi_from_user(itv,
(const struct v4l2_sliced_vbi_data __user *)user_buf, elems);
}
mode = s->type == IVTV_DEC_STREAM_TYPE_MPG ? OUT_MPG : OUT_YUV;
if (ivtv_set_output_mode(itv, mode) != mode) {
ivtv_release_stream(s);
return -EBUSY;
}
ivtv_queue_init(&q);
set_bit(IVTV_F_S_APPL_IO, &s->s_flags);
/* Start decoder (returns 0 if already started) */
rc = ivtv_start_decoding(id, itv->speed);
if (rc) {
IVTV_DEBUG_WARN("Failed start decode stream %s\n", s->name);
/* failure, clean up */
clear_bit(IVTV_F_S_STREAMING, &s->s_flags);
clear_bit(IVTV_F_S_APPL_IO, &s->s_flags);
return rc;
}
retry:
/* If possible, just DMA the entire frame - Check the data transfer size
since we may get here before the stream has been fully set-up */
if (mode == OUT_YUV && s->q_full.length == 0 && itv->dma_data_req_size) {
while (count >= itv->dma_data_req_size) {
rc = ivtv_yuv_udma_stream_frame(itv, (void __user *)user_buf);
if (rc < 0)
return rc;
bytes_written += itv->dma_data_req_size;
user_buf += itv->dma_data_req_size;
count -= itv->dma_data_req_size;
}
if (count == 0) {
IVTV_DEBUG_HI_FILE("Wrote %d bytes to %s (%d)\n", bytes_written, s->name, s->q_full.bytesused);
return bytes_written;
}
}
for (;;) {
/* Gather buffers */
while (q.length - q.bytesused < count && (buf = ivtv_dequeue(s, &s->q_io)))
ivtv_enqueue(s, buf, &q);
while (q.length - q.bytesused < count && (buf = ivtv_dequeue(s, &s->q_free))) {
ivtv_enqueue(s, buf, &q);
}
if (q.buffers)
break;
if (filp->f_flags & O_NONBLOCK)
return -EAGAIN;
mutex_unlock(&itv->serialize_lock);
prepare_to_wait(&s->waitq, &wait, TASK_INTERRUPTIBLE);
/* New buffers might have become free before we were added to the waitqueue */
if (!s->q_free.buffers)
schedule();
finish_wait(&s->waitq, &wait);
mutex_lock(&itv->serialize_lock);
if (signal_pending(current)) {
IVTV_DEBUG_INFO("User stopped %s\n", s->name);
return -EINTR;
}
}
/* copy user data into buffers */
while ((buf = ivtv_dequeue(s, &q))) {
//.........这里部分代码省略.........
示例12: jffs2_garbage_collect_thread
static int jffs2_garbage_collect_thread(void *_c)
{
struct jffs2_sb_info *c = _c;
daemonize("jffs2_gcd_mtd%d", c->mtd->index);
allow_signal(SIGKILL);
allow_signal(SIGSTOP);
allow_signal(SIGCONT);
c->gc_task = current;
up(&c->gc_thread_start);
set_user_nice(current, 10);
for (;;) {
allow_signal(SIGHUP);
if (!jffs2_thread_should_wake(c)) {
set_current_state (TASK_INTERRUPTIBLE);
D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread sleeping...\n"));
/* Yes, there's a race here; we checked jffs2_thread_should_wake()
before setting current->state to TASK_INTERRUPTIBLE. But it doesn't
matter - We don't care if we miss a wakeup, because the GC thread
is only an optimisation anyway. */
schedule();
}
if (current->flags & PF_FREEZE) {
refrigerator(0);
/* refrigerator() should recalc sigpending for us
but doesn't. No matter - allow_signal() will. */
continue;
}
cond_resched();
/* Put_super will send a SIGKILL and then wait on the sem.
*/
while (signal_pending(current)) {
siginfo_t info;
unsigned long signr;
signr = dequeue_signal_lock(current, ¤t->blocked, &info);
switch(signr) {
case SIGSTOP:
D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): SIGSTOP received.\n"));
set_current_state(TASK_STOPPED);
schedule();
break;
case SIGKILL:
D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): SIGKILL received.\n"));
die:
spin_lock(&c->erase_completion_lock);
c->gc_task = NULL;
spin_unlock(&c->erase_completion_lock);
complete_and_exit(&c->gc_thread_exit, 0);
case SIGHUP:
D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): SIGHUP received.\n"));
break;
default:
D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): signal %ld received\n", signr));
}
}
/* We don't want SIGHUP to interrupt us. STOP and KILL are OK though. */
disallow_signal(SIGHUP);
D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): pass\n"));
if (jffs2_garbage_collect_pass(c) == -ENOSPC) {
printk(KERN_NOTICE "No space for garbage collection. Aborting GC thread\n");
goto die;
}
}
}
示例13: xts_thread
/*
* This task waits until at least one touchscreen is touched. It then loops
* digitizing and generating events until no touchscreens are being touched.
*/
static int
xts_thread(void *arg)
{
int any_pens_down;
struct xts_dev *dev;
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
xts_task = tsk;
daemonize();
reparent_to_init();
strcpy(xts_task->comm, XTS_NAME);
xts_task->tty = NULL;
/* only want to receive SIGKILL */
spin_lock_irq(&xts_task->sigmask_lock);
siginitsetinv(&xts_task->blocked, sigmask(SIGKILL));
recalc_sigpending(xts_task);
spin_unlock_irq(&xts_task->sigmask_lock);
complete(&task_sync);
add_wait_queue(&irq_wait, &wait);
any_pens_down = 0;
for (;;) {
/*
* Block waiting for interrupt or if any pens are down, either
* an interrupt or timeout to sample again.
*/
set_current_state(TASK_INTERRUPTIBLE);
if (any_pens_down)
schedule_timeout(HZ / 100);
while (signal_pending(tsk)) {
siginfo_t info;
/* Only honor the signal if we're cleaning up */
if (task_shutdown)
goto exit;
/*
* Someone else sent us a kill (probably the
* shutdown scripts "Sending all processes the
* KILL signal"). Just dequeue it and ignore
* it.
*/
spin_lock_irq(¤t->sigmask_lock);
(void)dequeue_signal(¤t->blocked, &info);
spin_unlock_irq(¤t->sigmask_lock);
}
schedule();
any_pens_down = 0;
for (dev = dev_list; dev; dev = dev->next_dev) {
if (dev->pen_is_down) {
u32 x, y;
XTouchscreen_GetPosition_2D(&dev->Touchscreen,
&x, &y);
event_add(dev, 255, (u16) x, (u16) y);
dev->pen_was_down = 1;
any_pens_down = 1;
} else if (dev->pen_was_down) {
event_add(dev, 0, 0, 0);
dev->pen_was_down = 0;
}
}
}
exit:
remove_wait_queue(&irq_wait, &wait);
xts_task = NULL;
complete_and_exit(&task_sync, 0);
}
示例14: svc_accept
static int svc_accept(struct socket *sock,struct socket *newsock,int flags)
{
struct sock *sk = sock->sk;
struct sk_buff *skb;
struct atmsvc_msg *msg;
struct atm_vcc *old_vcc = ATM_SD(sock);
struct atm_vcc *new_vcc;
int error;
lock_sock(sk);
error = svc_create(newsock,0);
if (error)
goto out;
new_vcc = ATM_SD(newsock);
DPRINTK("svc_accept %p -> %p\n",old_vcc,new_vcc);
while (1) {
DEFINE_WAIT(wait);
prepare_to_wait(old_vcc->sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
while (!(skb = skb_dequeue(&old_vcc->sk->sk_receive_queue)) &&
sigd) {
if (test_bit(ATM_VF_RELEASED,&old_vcc->flags)) break;
if (test_bit(ATM_VF_CLOSE,&old_vcc->flags)) {
error = -sk->sk_err;
break;
}
if (flags & O_NONBLOCK) {
error = -EAGAIN;
break;
}
release_sock(sk);
schedule();
lock_sock(sk);
if (signal_pending(current)) {
error = -ERESTARTSYS;
break;
}
prepare_to_wait(old_vcc->sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
}
finish_wait(old_vcc->sk->sk_sleep, &wait);
if (error)
goto out;
if (!skb) {
error = -EUNATCH;
goto out;
}
msg = (struct atmsvc_msg *) skb->data;
new_vcc->qos = msg->qos;
set_bit(ATM_VF_HASQOS,&new_vcc->flags);
new_vcc->remote = msg->svc;
new_vcc->local = msg->local;
new_vcc->sap = msg->sap;
error = vcc_connect(newsock, msg->pvc.sap_addr.itf,
msg->pvc.sap_addr.vpi, msg->pvc.sap_addr.vci);
dev_kfree_skb(skb);
old_vcc->sk->sk_ack_backlog--;
if (error) {
sigd_enq2(NULL,as_reject,old_vcc,NULL,NULL,
&old_vcc->qos,error);
error = error == -EAGAIN ? -EBUSY : error;
goto out;
}
/* wait should be short, so we ignore the non-blocking flag */
set_bit(ATM_VF_WAITING, &new_vcc->flags);
prepare_to_wait(new_vcc->sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
sigd_enq(new_vcc,as_accept,old_vcc,NULL,NULL);
while (test_bit(ATM_VF_WAITING, &new_vcc->flags) && sigd) {
release_sock(sk);
schedule();
lock_sock(sk);
prepare_to_wait(new_vcc->sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
}
finish_wait(new_vcc->sk->sk_sleep, &wait);
if (!sigd) {
error = -EUNATCH;
goto out;
}
if (!new_vcc->sk->sk_err)
break;
if (new_vcc->sk->sk_err != ERESTARTSYS) {
error = -new_vcc->sk->sk_err;
goto out;
}
}
newsock->state = SS_CONNECTED;
out:
release_sock(sk);
return error;
}
示例15: __mutex_lock_common
/*
* Lock a mutex (possibly interruptible), slowpath:
*/
static inline int __sched
__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass)
{
struct task_struct *task = current;
struct mutex_waiter waiter;
unsigned int old_val;
unsigned long flags;
spin_lock_mutex(&lock->wait_lock, flags);
debug_mutex_lock_common(lock, &waiter);
mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
debug_mutex_add_waiter(lock, &waiter, task->thread_info);
/* add waiting tasks to the end of the waitqueue (FIFO): */
list_add_tail(&waiter.list, &lock->wait_list);
waiter.task = task;
for (;;) {
/*
* Lets try to take the lock again - this is needed even if
* we get here for the first time (shortly after failing to
* acquire the lock), to make sure that we get a wakeup once
* it's unlocked. Later on, if we sleep, this is the
* operation that gives us the lock. We xchg it to -1, so
* that when we release the lock, we properly wake up the
* other waiters:
*/
old_val = atomic_xchg(&lock->count, -1);
if (old_val == 1)
break;
/*
* got a signal? (This code gets eliminated in the
* TASK_UNINTERRUPTIBLE case.)
*/
if (unlikely(state == TASK_INTERRUPTIBLE &&
signal_pending(task))) {
mutex_remove_waiter(lock, &waiter, task->thread_info);
mutex_release(&lock->dep_map, 1, _RET_IP_);
spin_unlock_mutex(&lock->wait_lock, flags);
debug_mutex_free_waiter(&waiter);
return -EINTR;
}
__set_task_state(task, state);
/* didnt get the lock, go to sleep: */
spin_unlock_mutex(&lock->wait_lock, flags);
schedule();
spin_lock_mutex(&lock->wait_lock, flags);
}
/* got the lock - rejoice! */
mutex_remove_waiter(lock, &waiter, task->thread_info);
debug_mutex_set_owner(lock, task->thread_info);
/* set it to 0 if there are no waiters left: */
if (likely(list_empty(&lock->wait_list)))
atomic_set(&lock->count, 0);
spin_unlock_mutex(&lock->wait_lock, flags);
debug_mutex_free_waiter(&waiter);
return 0;
}