本文整理汇总了C++中spin_unlock_irq函数的典型用法代码示例。如果您正苦于以下问题:C++ spin_unlock_irq函数的具体用法?C++ spin_unlock_irq怎么用?C++ spin_unlock_irq使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了spin_unlock_irq函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: probe_irq_on
unsigned long probe_irq_on(void)
{
unsigned int i;
irq_desc_t *desc;
unsigned long val;
unsigned long delay;
down(&probe_sem);
/*
* something may have generated an irq long ago and we want to
* flush such a longstanding irq before considering it as spurious.
*/
for (i = NR_IRQS-1; i > 0; i--) {
desc = irq_desc + i;
spin_lock_irq(&desc->lock);
if (!irq_desc[i].action)
irq_desc[i].handler->startup(i);
spin_unlock_irq(&desc->lock);
}
/* Wait for longstanding interrupts to trigger. */
for (delay = jiffies + HZ/50; time_after(delay, jiffies); )
/* about 20ms delay */ synchronize_irq();
/*
* enable any unassigned irqs
* (we must startup again here because if a longstanding irq
* happened in the previous stage, it may have masked itself)
*/
for (i = NR_IRQS-1; i > 0; i--) {
desc = irq_desc + i;
spin_lock_irq(&desc->lock);
if (!desc->action) {
desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
if (desc->handler->startup(i))
desc->status |= IRQ_PENDING;
}
spin_unlock_irq(&desc->lock);
}
/*
* Wait for spurious interrupts to trigger
*/
for (delay = jiffies + HZ/10; time_after(delay, jiffies); )
/* about 100ms delay */ synchronize_irq();
/*
* Now filter out any obviously spurious interrupts
*/
val = 0;
for (i = 0; i < NR_IRQS; i++) {
irq_desc_t *desc = irq_desc + i;
unsigned int status;
spin_lock_irq(&desc->lock);
status = desc->status;
if (status & IRQ_AUTODETECT) {
/* It triggered already - consider it spurious. */
if (!(status & IRQ_WAITING)) {
desc->status = status & ~IRQ_AUTODETECT;
desc->handler->shutdown(i);
} else
if (i < 32)
val |= 1 << i;
}
spin_unlock_irq(&desc->lock);
}
return val;
}
示例2: do_syslog
int do_syslog(int type, char __user *buf, int len, bool from_file)
{
unsigned i, j, limit, count;
int do_clear = 0;
char c;
int error;
error = check_syslog_permissions(type, from_file);
if (error)
goto out;
error = security_syslog(type);
if (error)
return error;
switch (type) {
case SYSLOG_ACTION_CLOSE: /* Close log */
break;
case SYSLOG_ACTION_OPEN: /* Open log */
break;
case SYSLOG_ACTION_READ: /* Read from log */
error = -EINVAL;
if (!buf || len < 0)
goto out;
error = 0;
if (!len)
goto out;
if (!access_ok(VERIFY_WRITE, buf, len)) {
error = -EFAULT;
goto out;
}
error = wait_event_interruptible(log_wait,
(log_start - log_end));
if (error)
goto out;
i = 0;
spin_lock_irq(&logbuf_lock);
while (!error && (log_start != log_end) && i < len) {
c = LOG_BUF(log_start);
log_start++;
spin_unlock_irq(&logbuf_lock);
error = __put_user(c,buf);
buf++;
i++;
cond_resched();
spin_lock_irq(&logbuf_lock);
}
spin_unlock_irq(&logbuf_lock);
if (!error)
error = i;
break;
/* Read/clear last kernel messages */
case SYSLOG_ACTION_READ_CLEAR:
do_clear = 1;
/* FALL THRU */
/* Read last kernel messages */
case SYSLOG_ACTION_READ_ALL:
error = -EINVAL;
if (!buf || len < 0)
goto out;
error = 0;
if (!len)
goto out;
if (!access_ok(VERIFY_WRITE, buf, len)) {
error = -EFAULT;
goto out;
}
count = len;
if (count > log_buf_len)
count = log_buf_len;
spin_lock_irq(&logbuf_lock);
if (count > logged_chars)
count = logged_chars;
if (do_clear)
logged_chars = 0;
limit = log_end;
/*
* __put_user() could sleep, and while we sleep
* printk() could overwrite the messages
* we try to copy to user space. Therefore
* the messages are copied in reverse. <manfreds>
*/
for (i = 0; i < count && !error; i++) {
j = limit-1-i;
if (j + log_buf_len < log_end)
break;
c = LOG_BUF(j);
spin_unlock_irq(&logbuf_lock);
error = __put_user(c,&buf[count-1-i]);
cond_resched();
spin_lock_irq(&logbuf_lock);
}
spin_unlock_irq(&logbuf_lock);
if (error)
break;
error = i;
if (i != count) {
int offset = count-error;
/* buffer overflow during copy, correct user buffer. */
for (i = 0; i < error; i++) {
//.........这里部分代码省略.........
示例3: rtc_mrst_do_shutdown
static void rtc_mrst_do_shutdown(void)
{
spin_lock_irq(&rtc_lock);
mrst_irq_disable(&mrst_rtc, RTC_IRQMASK);
spin_unlock_irq(&rtc_lock);
}
示例4: nilfs_btnode_prepare_change_key
/**
* nilfs_btnode_prepare_change_key
* prepare to move contents of the block for old key to one of new key.
* the old buffer will not be removed, but might be reused for new buffer.
* it might return -ENOMEM because of memory allocation errors,
* and might return -EIO because of disk read errors.
*/
int nilfs_btnode_prepare_change_key(struct address_space *btnc,
struct nilfs_btnode_chkey_ctxt *ctxt)
{
struct buffer_head *obh, *nbh;
struct inode *inode = NILFS_BTNC_I(btnc);
__u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey;
int err;
if (oldkey == newkey)
return 0;
obh = ctxt->bh;
ctxt->newbh = NULL;
if (inode->i_blkbits == PAGE_CACHE_SHIFT) {
lock_page(obh->b_page);
/*
* We cannot call radix_tree_preload for the kernels older
* than 2.6.23, because it is not exported for modules.
*/
retry:
err = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
if (err)
goto failed_unlock;
/* BUG_ON(oldkey != obh->b_page->index); */
if (unlikely(oldkey != obh->b_page->index))
NILFS_PAGE_BUG(obh->b_page,
"invalid oldkey %lld (newkey=%lld)",
(unsigned long long)oldkey,
(unsigned long long)newkey);
spin_lock_irq(&btnc->tree_lock);
err = radix_tree_insert(&btnc->page_tree, newkey, obh->b_page);
spin_unlock_irq(&btnc->tree_lock);
/*
* Note: page->index will not change to newkey until
* nilfs_btnode_commit_change_key() will be called.
* To protect the page in intermediate state, the page lock
* is held.
*/
radix_tree_preload_end();
if (!err)
return 0;
else if (err != -EEXIST)
goto failed_unlock;
err = invalidate_inode_pages2_range(btnc, newkey, newkey);
if (!err)
goto retry;
/* fallback to copy mode */
unlock_page(obh->b_page);
}
nbh = nilfs_btnode_create_block(btnc, newkey);
if (!nbh)
return -ENOMEM;
BUG_ON(nbh == obh);
ctxt->newbh = nbh;
return 0;
failed_unlock:
unlock_page(obh->b_page);
return err;
}
示例5: acm_port_activate
static int acm_port_activate(struct tty_port *port, struct tty_struct *tty)
{
struct acm *acm = container_of(port, struct acm, port);
int retval = -ENODEV;
dev_dbg(&acm->control->dev, "%s\n", __func__);
mutex_lock(&acm->mutex);
if (acm->disconnected)
goto disconnected;
retval = usb_autopm_get_interface(acm->control);
if (retval)
goto error_get_interface;
/*
* FIXME: Why do we need this? Allocating 64K of physically contiguous
* memory is really nasty...
*/
set_bit(TTY_NO_WRITE_SPLIT, &tty->flags);
acm->control->needs_remote_wakeup = 1;
acm->ctrlurb->dev = acm->dev;
if (usb_submit_urb(acm->ctrlurb, GFP_KERNEL)) {
dev_err(&acm->control->dev,
"%s - usb_submit_urb(ctrl irq) failed\n", __func__);
goto error_submit_urb;
}
acm->ctrlout = ACM_CTRL_DTR | ACM_CTRL_RTS;
if (acm_set_control(acm, acm->ctrlout) < 0 &&
(acm->ctrl_caps & USB_CDC_CAP_LINE))
goto error_set_control;
usb_autopm_put_interface(acm->control);
/*
* Unthrottle device in case the TTY was closed while throttled.
*/
spin_lock_irq(&acm->read_lock);
acm->throttled = 0;
acm->throttle_req = 0;
spin_unlock_irq(&acm->read_lock);
if (acm_submit_read_urbs(acm, GFP_KERNEL))
goto error_submit_read_urbs;
mutex_unlock(&acm->mutex);
return 0;
error_submit_read_urbs:
acm->ctrlout = 0;
acm_set_control(acm, acm->ctrlout);
error_set_control:
usb_kill_urb(acm->ctrlurb);
error_submit_urb:
usb_autopm_put_interface(acm->control);
error_get_interface:
disconnected:
mutex_unlock(&acm->mutex);
return retval;
}
示例6: s5p_ehci_resume
static int s5p_ehci_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct s5p_ehci_hcd *s5p_ehci = platform_get_drvdata(pdev);
struct usb_hcd *hcd = s5p_ehci->hcd;
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
clk_enable(s5p_ehci->clk);
s5p_ehci_phy_init(pdev);
/* if EHCI was off, hcd was removed */
if (!s5p_ehci->power_on) {
dev_info(dev, "Nothing to do for the device (power off)\n");
return 0;
}
if (time_before(jiffies, ehci->next_statechange))
usleep_range(10000, 11000);
/* Mark hardware accessible again as we are out of D3 state by now */
set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
if (ehci_readl(ehci, &ehci->regs->configured_flag) == FLAG_CF) {
int mask = INTR_MASK;
if (!hcd->self.root_hub->do_remote_wakeup)
mask &= ~STS_PCD;
ehci_writel(ehci, mask, &ehci->regs->intr_enable);
ehci_readl(ehci, &ehci->regs->intr_enable);
return 0;
}
ehci_dbg(ehci, "lost power, restarting\n");
usb_root_hub_lost_power(hcd->self.root_hub);
(void) ehci_halt(ehci);
(void) ehci_reset(ehci);
/* emptying the schedule aborts any urbs */
spin_lock_irq(&ehci->lock);
if (ehci->reclaim)
end_unlink_async(ehci);
ehci_work(ehci);
spin_unlock_irq(&ehci->lock);
ehci_writel(ehci, ehci->command, &ehci->regs->command);
ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag);
ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */
/* here we "know" root ports should always stay powered */
ehci_port_power(ehci, 1);
hcd->state = HC_STATE_SUSPENDED;
/* Update runtime PM status and clear runtime_error */
pm_runtime_disable(dev);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
/* Prevent device from runtime suspend during resume time */
pm_runtime_get_sync(dev);
#ifdef CONFIG_MDM_HSIC_PM
set_host_stat(hsic_pm_dev, POWER_ON);
wait_dev_pwr_stat(hsic_pm_dev, POWER_ON);
#endif
#if defined(CONFIG_LINK_DEVICE_HSIC) || defined(CONFIG_LINK_DEVICE_USB) \
|| defined(CONFIG_MDM_HSIC_PM)
pm_runtime_mark_last_busy(&hcd->self.root_hub->dev);
#endif
return 0;
}
示例7: nilfs_copy_back_pages
/**
* nilfs_copy_back_pages -- copy back pages to original cache from shadow cache
* @dmap: destination page cache
* @smap: source page cache
*
* No pages must no be added to the cache during this process.
* This must be ensured by the caller.
*/
void nilfs_copy_back_pages(struct address_space *dmap,
struct address_space *smap)
{
struct pagevec pvec;
unsigned int i, n;
pgoff_t index = 0;
int err;
pagevec_init(&pvec, 0);
repeat:
n = pagevec_lookup(&pvec, smap, index, PAGEVEC_SIZE);
if (!n)
return;
index = pvec.pages[n - 1]->index + 1;
for (i = 0; i < pagevec_count(&pvec); i++) {
struct page *page = pvec.pages[i], *dpage;
pgoff_t offset = page->index;
lock_page(page);
dpage = find_lock_page(dmap, offset);
if (dpage) {
/* override existing page on the destination cache */
WARN_ON(PageDirty(dpage));
nilfs_copy_page(dpage, page, 0);
unlock_page(dpage);
page_cache_release(dpage);
} else {
struct page *page2;
/* move the page to the destination cache */
spin_lock_irq(&smap->tree_lock);
page2 = radix_tree_delete(&smap->page_tree, offset);
WARN_ON(page2 != page);
smap->nrpages--;
spin_unlock_irq(&smap->tree_lock);
spin_lock_irq(&dmap->tree_lock);
err = radix_tree_insert(&dmap->page_tree, offset, page);
if (unlikely(err < 0)) {
WARN_ON(err == -EEXIST);
page->mapping = NULL;
page_cache_release(page); /* for cache */
} else {
page->mapping = dmap;
dmap->nrpages++;
if (PageDirty(page))
radix_tree_tag_set(&dmap->page_tree,
offset,
PAGECACHE_TAG_DIRTY);
}
spin_unlock_irq(&dmap->tree_lock);
}
unlock_page(page);
}
pagevec_release(&pvec);
cond_resched();
goto repeat;
}
示例8: pnx4008_dma_unlock
static inline void pnx4008_dma_unlock(void)
{
spin_unlock_irq(&dma_lock);
}
示例9: snd_mixart_send_msg
int snd_mixart_send_msg(struct mixart_mgr *mgr, struct mixart_msg *request, int max_resp_size, void *resp_data)
{
struct mixart_msg resp;
u32 msg_frame = 0; /* set to 0, so it's no notification to wait for, but the answer */
int err;
wait_queue_t wait;
long timeout;
mutex_lock(&mgr->msg_mutex);
init_waitqueue_entry(&wait, current);
spin_lock_irq(&mgr->msg_lock);
/* send the message */
err = send_msg(mgr, request, max_resp_size, 1, &msg_frame); /* send and mark the answer pending */
if (err) {
spin_unlock_irq(&mgr->msg_lock);
mutex_unlock(&mgr->msg_mutex);
return err;
}
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&mgr->msg_sleep, &wait);
spin_unlock_irq(&mgr->msg_lock);
timeout = schedule_timeout(MSG_TIMEOUT_JIFFIES);
remove_wait_queue(&mgr->msg_sleep, &wait);
if (! timeout) {
/* error - no ack */
mutex_unlock(&mgr->msg_mutex);
snd_printk(KERN_ERR "error: no response on msg %x\n", msg_frame);
return -EIO;
}
/* retrieve the answer into the same struct mixart_msg */
resp.message_id = 0;
resp.uid = (struct mixart_uid){0,0};
resp.data = resp_data;
resp.size = max_resp_size;
err = get_msg(mgr, &resp, msg_frame);
if( request->message_id != resp.message_id )
snd_printk(KERN_ERR "RESPONSE ERROR!\n");
mutex_unlock(&mgr->msg_mutex);
return err;
}
int snd_mixart_send_msg_wait_notif(struct mixart_mgr *mgr,
struct mixart_msg *request, u32 notif_event)
{
int err;
wait_queue_t wait;
long timeout;
if (snd_BUG_ON(!notif_event))
return -EINVAL;
if (snd_BUG_ON((notif_event & MSG_TYPE_MASK) != MSG_TYPE_NOTIFY))
return -EINVAL;
if (snd_BUG_ON(notif_event & MSG_CANCEL_NOTIFY_MASK))
return -EINVAL;
mutex_lock(&mgr->msg_mutex);
init_waitqueue_entry(&wait, current);
spin_lock_irq(&mgr->msg_lock);
/* send the message */
err = send_msg(mgr, request, MSG_DEFAULT_SIZE, 1, ¬if_event); /* send and mark the notification event pending */
if(err) {
spin_unlock_irq(&mgr->msg_lock);
mutex_unlock(&mgr->msg_mutex);
return err;
}
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&mgr->msg_sleep, &wait);
spin_unlock_irq(&mgr->msg_lock);
timeout = schedule_timeout(MSG_TIMEOUT_JIFFIES);
remove_wait_queue(&mgr->msg_sleep, &wait);
if (! timeout) {
/* error - no ack */
mutex_unlock(&mgr->msg_mutex);
snd_printk(KERN_ERR "error: notification %x not received\n", notif_event);
return -EIO;
}
mutex_unlock(&mgr->msg_mutex);
return 0;
}
示例10: netx_set_termios
static void
netx_set_termios(struct uart_port *port, struct termios *termios,
struct termios *old)
{
unsigned int baud, quot;
unsigned char old_cr;
unsigned char line_cr = LINE_CR_FEN;
unsigned char rts_cr = 0;
switch (termios->c_cflag & CSIZE) {
case CS5:
line_cr |= LINE_CR_5BIT;
break;
case CS6:
line_cr |= LINE_CR_6BIT;
break;
case CS7:
line_cr |= LINE_CR_7BIT;
break;
case CS8:
line_cr |= LINE_CR_8BIT;
break;
}
if (termios->c_cflag & CSTOPB)
line_cr |= LINE_CR_STP2;
if (termios->c_cflag & PARENB) {
line_cr |= LINE_CR_PEN;
if (!(termios->c_cflag & PARODD))
line_cr |= LINE_CR_EPS;
}
if (termios->c_cflag & CRTSCTS)
rts_cr = RTS_CR_AUTO | RTS_CR_CTS_CTR | RTS_CR_RTS_POL;
baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16);
quot = baud * 4096;
quot /= 1000;
quot *= 256;
quot /= 100000;
spin_lock_irq(&port->lock);
uart_update_timeout(port, termios->c_cflag, baud);
old_cr = readl(port->membase + UART_CR);
/* disable interrupts */
writel(old_cr & ~(CR_MSIE | CR_RIE | CR_TIE | CR_RTIE),
port->membase + UART_CR);
/* drain transmitter */
while (readl(port->membase + UART_FR) & FR_BUSY);
/* disable UART */
writel(old_cr & ~CR_UART_EN, port->membase + UART_CR);
/* modem status interrupts */
old_cr &= ~CR_MSIE;
if (UART_ENABLE_MS(port, termios->c_cflag))
old_cr |= CR_MSIE;
writel((quot>>8) & 0xff, port->membase + UART_BAUDDIV_MSB);
writel(quot & 0xff, port->membase + UART_BAUDDIV_LSB);
writel(line_cr, port->membase + UART_LINE_CR);
writel(rts_cr, port->membase + UART_RTS_CR);
/*
* Characters to ignore
*/
port->ignore_status_mask = 0;
if (termios->c_iflag & IGNPAR)
port->ignore_status_mask |= SR_PE;
if (termios->c_iflag & IGNBRK) {
port->ignore_status_mask |= SR_BE;
/*
* If we're ignoring parity and break indicators,
* ignore overruns too (for real raw support).
*/
if (termios->c_iflag & IGNPAR)
port->ignore_status_mask |= SR_PE;
}
port->read_status_mask = 0;
if (termios->c_iflag & (BRKINT | PARMRK))
port->read_status_mask |= SR_BE;
if (termios->c_iflag & INPCK)
port->read_status_mask |= SR_PE | SR_FE;
writel(old_cr, port->membase + UART_CR);
spin_unlock_irq(&port->lock);
}
示例11: msm_hsic_resume_thread
static int msm_hsic_resume_thread(void *data)
{
struct msm_hsic_hcd *mehci = data;
struct usb_hcd *hcd = hsic_to_hcd(mehci);
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
u32 temp;
unsigned long resume_needed = 0;
int retry_cnt = 0;
int tight_resume = 0;
struct msm_hsic_host_platform_data *pdata = mehci->dev->platform_data;
dbg_log_event(NULL, "Resume RH", 0);
/* keep delay between bus states */
if (time_before(jiffies, ehci->next_statechange))
usleep_range(5000, 5000);
spin_lock_irq(&ehci->lock);
if (!HCD_HW_ACCESSIBLE(hcd)) {
spin_unlock_irq(&ehci->lock);
mehci->resume_status = -ESHUTDOWN;
complete(&mehci->rt_completion);
return 0;
}
if (unlikely(ehci->debug)) {
if (!dbgp_reset_prep())
ehci->debug = NULL;
else
dbgp_external_startup();
}
/* at least some APM implementations will try to deliver
* IRQs right away, so delay them until we're ready.
*/
ehci_writel(ehci, 0, &ehci->regs->intr_enable);
/* re-init operational registers */
ehci_writel(ehci, 0, &ehci->regs->segment);
ehci_writel(ehci, ehci->periodic_dma, &ehci->regs->frame_list);
ehci_writel(ehci, (u32) ehci->async->qh_dma, &ehci->regs->async_next);
/*CMD_RUN will be set after, PORT_RESUME gets cleared*/
if (ehci->resume_sof_bug)
ehci->command &= ~CMD_RUN;
/* restore CMD_RUN, framelist size, and irq threshold */
ehci_writel(ehci, ehci->command, &ehci->regs->command);
/* manually resume the ports we suspended during bus_suspend() */
resume_again:
if (retry_cnt >= RESUME_RETRY_LIMIT) {
pr_info("retry count(%d) reached max, resume in tight loop\n",
retry_cnt);
tight_resume = 1;
}
temp = ehci_readl(ehci, &ehci->regs->port_status[0]);
temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS);
if (test_bit(0, &ehci->bus_suspended) && (temp & PORT_SUSPEND)) {
temp |= PORT_RESUME;
set_bit(0, &resume_needed);
}
dbg_log_event(NULL, "FPR: Set", temp);
ehci_writel(ehci, temp, &ehci->regs->port_status[0]);
/* HSIC controller has a h/w bug due to which it can try to send SOFs
* (start of frames) during port resume resulting in phy lockup. HSIC hw
* controller in MSM clears FPR bit after driving the resume signal for
* 20ms. Workaround is to stop SOFs before driving resume and then start
* sending SOFs immediately. Need to send SOFs within 3ms of resume
* completion otherwise peripheral may enter undefined state. As
* usleep_range does not gurantee exact sleep time, GPTimer is used to
* to time the resume sequence. If driver exceeds allowable time SOFs,
* repeat the resume process.
*/
if (ehci->resume_sof_bug && resume_needed) {
if (!tight_resume) {
mehci->resume_again = 0;
ehci_writel(ehci, GPT_LD(RESUME_SIGNAL_TIME_MS),
&mehci->timer->gptimer0_ld);
ehci_writel(ehci, GPT_RESET | GPT_RUN,
&mehci->timer->gptimer0_ctrl);
ehci_writel(ehci, INTR_MASK | STS_GPTIMER0_INTERRUPT,
&ehci->regs->intr_enable);
ehci_writel(ehci, GPT_LD(RESUME_SIGNAL_TIME_SOF_MS),
&mehci->timer->gptimer1_ld);
ehci_writel(ehci, GPT_RESET | GPT_RUN,
&mehci->timer->gptimer1_ctrl);
spin_unlock_irq(&ehci->lock);
if (pdata && pdata->swfi_latency)
pm_qos_update_request(&mehci->pm_qos_req_dma,
pdata->swfi_latency + 1);
wait_for_completion(&mehci->gpt0_completion);
if (pdata && pdata->swfi_latency)
pm_qos_update_request(&mehci->pm_qos_req_dma,
PM_QOS_DEFAULT_VALUE);
//.........这里部分代码省略.........
示例12: irix_sigreturn
asmlinkage void
irix_sigreturn(struct pt_regs *regs)
{
struct sigctx_irix5 *context, *magic;
unsigned long umask, mask;
u64 *fregs;
int sig, i, base = 0;
sigset_t blocked;
/* Always make any pending restarted system calls return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
if (regs->regs[2] == 1000)
base = 1;
context = (struct sigctx_irix5 *) regs->regs[base + 4];
magic = (struct sigctx_irix5 *) regs->regs[base + 5];
sig = (int) regs->regs[base + 6];
#ifdef DEBUG_SIG
printk("[%s:%d] IRIX sigreturn(scp[%p],ucp[%p],sig[%d])\n",
current->comm, current->pid, context, magic, sig);
#endif
if (!context)
context = magic;
if (!access_ok(VERIFY_READ, context, sizeof(struct sigctx_irix5)))
goto badframe;
#ifdef DEBUG_SIG
dump_irix5_sigctx(context);
#endif
__get_user(regs->cp0_epc, &context->pc);
umask = context->rmask; mask = 2;
for (i = 1; i < 32; i++, mask <<= 1) {
if(umask & mask)
__get_user(regs->regs[i], &context->regs[i]);
}
__get_user(regs->hi, &context->hi);
__get_user(regs->lo, &context->lo);
if ((umask & 1) && context->usedfp) {
fregs = (u64 *) ¤t->thread.fpu;
for(i = 0; i < 32; i++)
fregs[i] = (u64) context->fpregs[i];
__get_user(current->thread.fpu.hard.fcr31, &context->fpcsr);
}
/* XXX do sigstack crapola here... XXX */
if (__copy_from_user(&blocked, &context->sigset, sizeof(blocked)))
goto badframe;
sigdelsetmask(&blocked, ~_BLOCKABLE);
spin_lock_irq(¤t->sighand->siglock);
current->blocked = blocked;
recalc_sigpending();
spin_unlock_irq(¤t->sighand->siglock);
/*
* Don't let your children do this ...
*/
if (current_thread_info()->flags & TIF_SYSCALL_TRACE)
do_syscall_trace(regs, 1);
__asm__ __volatile__(
"move\t$29,%0\n\t"
"j\tsyscall_exit"
:/* no outputs */
:"r" (®s));
/* Unreached */
badframe:
force_sig(SIGSEGV, current);
}
示例13: virtio_config_disable
static void virtio_config_disable(struct virtio_device *dev)
{
spin_lock_irq(&dev->config_lock);
dev->config_enabled = false;
spin_unlock_irq(&dev->config_lock);
}
示例14: do_signal
/* Note that 'init' is a special process: it doesn't get signals it doesn't
* want to handle. Thus you cannot kill init even with a SIGKILL even by
* mistake.
*/
asmlinkage int do_signal(sigset_t *oldset, struct pt_regs * regs,
unsigned long orig_i0, int restart_syscall)
{
unsigned long signr;
siginfo_t info;
struct k_sigaction *ka;
if (!oldset)
oldset = ¤t->blocked;
#ifdef CONFIG_SPARC32_COMPAT
if (current->thread.flags & SPARC_FLAG_32BIT) {
extern asmlinkage int do_signal32(sigset_t *, struct pt_regs *,
unsigned long, int);
return do_signal32(oldset, regs, orig_i0, restart_syscall);
}
#endif
for (;;) {
spin_lock_irq(¤t->sigmask_lock);
signr = dequeue_signal(¤t->blocked, &info);
spin_unlock_irq(¤t->sigmask_lock);
if (!signr) break;
if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
current->exit_code = signr;
current->state = TASK_STOPPED;
notify_parent(current, SIGCHLD);
schedule();
if (!(signr = current->exit_code))
continue;
current->exit_code = 0;
if (signr == SIGSTOP)
continue;
/* Update the siginfo structure. Is this good? */
if (signr != info.si_signo) {
info.si_signo = signr;
info.si_errno = 0;
info.si_code = SI_USER;
info.si_pid = current->p_pptr->pid;
info.si_uid = current->p_pptr->uid;
}
/* If the (new) signal is now blocked, requeue it. */
if (sigismember(¤t->blocked, signr)) {
send_sig_info(signr, &info, current);
continue;
}
}
ka = ¤t->sig->action[signr-1];
if(ka->sa.sa_handler == SIG_IGN) {
if(signr != SIGCHLD)
continue;
/* sys_wait4() grabs the master kernel lock, so
* we need not do so, that sucker should be
* threaded and would not be that difficult to
* do anyways.
*/
while(sys_wait4(-1, NULL, WNOHANG, NULL) > 0)
;
continue;
}
if(ka->sa.sa_handler == SIG_DFL) {
unsigned long exit_code = signr;
if(current->pid == 1)
continue;
switch(signr) {
case SIGCONT: case SIGCHLD: case SIGWINCH:
continue;
case SIGTSTP: case SIGTTIN: case SIGTTOU:
if (is_orphaned_pgrp(current->pgrp))
continue;
case SIGSTOP:
if (current->ptrace & PT_PTRACED)
continue;
current->state = TASK_STOPPED;
current->exit_code = signr;
if(!(current->p_pptr->sig->action[SIGCHLD-1].sa.sa_flags &
SA_NOCLDSTOP))
notify_parent(current, SIGCHLD);
schedule();
continue;
case SIGQUIT: case SIGILL: case SIGTRAP:
case SIGABRT: case SIGFPE: case SIGSEGV:
case SIGBUS: case SIGSYS: case SIGXCPU: case SIGXFSZ:
if (do_coredump(signr, regs))
exit_code |= 0x80;
#ifdef DEBUG_SIGNALS
//.........这里部分代码省略.........
示例15: handle_signal
static void
handle_signal(unsigned long sig, siginfo_t *info, sigset_t *oldset,
struct pt_regs * regs)
{
struct k_sigaction *ka = ¤t->sighand->action[sig-1];
/* Are we from a system call? */
if (regs->tra >= 0) {
/* If so, check system call restarting.. */
switch (regs->regs[0]) {
case -ERESTARTNOHAND:
regs->regs[0] = -EINTR;
break;
case -ERESTARTSYS:
if (!(ka->sa.sa_flags & SA_RESTART)) {
regs->regs[0] = -EINTR;
break;
}
/* fallthrough */
case -ERESTARTNOINTR:
regs->pc -= 2;
}
} else {
/* gUSA handling */
#ifdef CONFIG_PREEMPT
unsigned long flags;
local_irq_save(flags);
#endif
if (regs->regs[15] >= 0xc0000000) {
int offset = (int)regs->regs[15];
/* Reset stack pointer: clear critical region mark */
regs->regs[15] = regs->regs[1];
if (regs->pc < regs->regs[0])
/* Go to rewind point #1 */
regs->pc = regs->regs[0] + offset - 2;
}
#ifdef CONFIG_PREEMPT
local_irq_restore(flags);
#endif
}
/* Set up the stack frame */
if (ka->sa.sa_flags & SA_SIGINFO)
setup_rt_frame(sig, ka, info, oldset, regs);
else
setup_frame(sig, ka, oldset, regs);
if (ka->sa.sa_flags & SA_ONESHOT)
ka->sa.sa_handler = SIG_DFL;
if (!(ka->sa.sa_flags & SA_NODEFER)) {
spin_lock_irq(¤t->sighand->siglock);
sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask);
sigaddset(¤t->blocked,sig);
recalc_sigpending();
spin_unlock_irq(¤t->sighand->siglock);
}
}