本文整理汇总了C++中BUG函数的典型用法代码示例。如果您正苦于以下问题:C++ BUG函数的具体用法?C++ BUG怎么用?C++ BUG使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了BUG函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: do_page_fault
/*
* This routine handles page faults. It determines the address,
* and the problem, and then passes it off to one of the appropriate
* routines.
*/
asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
unsigned long address)
{
struct vm_area_struct *vma = NULL;
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->mm;
const int field = sizeof(unsigned long) * 2;
unsigned long flags = 0;
siginfo_t info;
int fault;
info.si_code = SEGV_MAPERR;
/*
* We fault-in kernel-space virtual memory on-demand. The
* 'reference' page table is init_mm.pgd.
*
* NOTE! We MUST NOT take any locks for this case. We may
* be in an interrupt or a critical region, and should
* only copy the information from the master page table,
* nothing more.
*/
if (unlikely(address >= VMALLOC_START && address <= VMALLOC_END))
goto vmalloc_fault;
#ifdef MODULE_START
if (unlikely(address >= MODULE_START && address < MODULE_END))
goto vmalloc_fault;
#endif
/*
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
if (in_atomic() || !mm)
goto bad_area_nosemaphore;
if (user_mode(regs))
flags |= FAULT_FLAG_USER;
down_read(&mm->mmap_sem);
vma = find_vma(mm, address);
if (!vma)
goto bad_area;
if (vma->vm_start <= address)
goto good_area;
if (!(vma->vm_flags & VM_GROWSDOWN))
goto bad_area;
if (expand_stack(vma, address))
goto bad_area;
/*
* Ok, we have a good vm_area for this memory access, so
* we can handle it..
*/
good_area:
info.si_code = SEGV_ACCERR;
if (write) {
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
flags |= FAULT_FLAG_WRITE;
} else {
if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
goto bad_area;
}
survive:
/*
* If for any reason at all we couldn't handle the fault,
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
fault = handle_mm_fault(mm, vma, address, flags);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
}
if (fault & VM_FAULT_MAJOR)
tsk->maj_flt++;
else
tsk->min_flt++;
up_read(&mm->mmap_sem);
return;
/*
* Something tried to access memory that isn't in our memory map..
* Fix it, but check if it's kernel or user first..
*/
bad_area:
up_read(&mm->mmap_sem);
bad_area_nosemaphore:
//.........这里部分代码省略.........
示例2: setup_local_APIC
void __devinit setup_local_APIC(void)
{
unsigned long oldvalue, value, ver, maxlvt;
int i, j;
/* Pound the ESR really hard over the head with a big hammer - mbligh */
if (esr_disable) {
apic_write(APIC_ESR, 0);
apic_write(APIC_ESR, 0);
apic_write(APIC_ESR, 0);
apic_write(APIC_ESR, 0);
}
value = apic_read(APIC_LVR);
ver = GET_APIC_VERSION(value);
BUILD_BUG_ON((SPURIOUS_APIC_VECTOR & 0x0f) != 0x0f);
/*
* Double-check whether this APIC is really registered.
*/
if (!apic_id_registered())
BUG();
/*
* Intel recommends to set DFR, LDR and TPR before enabling
* an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
* document number 292116). So here it goes...
*/
init_apic_ldr();
/*
* Set Task Priority to reject any interrupts below FIRST_DYNAMIC_VECTOR.
*/
apic_write_around(APIC_TASKPRI, (FIRST_DYNAMIC_VECTOR & 0xF0) - 0x10);
/*
* After a crash, we no longer service the interrupts and a pending
* interrupt from previous kernel might still have ISR bit set.
*
* Most probably by now CPU has serviced that pending interrupt and
* it might not have done the ack_APIC_irq() because it thought,
* interrupt came from i8259 as ExtInt. LAPIC did not get EOI so it
* does not clear the ISR bit and cpu thinks it has already serivced
* the interrupt. Hence a vector might get locked. It was noticed
* for timer irq (vector 0x31). Issue an extra EOI to clear ISR.
*/
for (i = APIC_ISR_NR - 1; i >= 0; i--) {
value = apic_read(APIC_ISR + i*0x10);
for (j = 31; j >= 0; j--) {
if (value & (1<<j))
ack_APIC_irq();
}
}
/*
* Now that we are all set up, enable the APIC
*/
value = apic_read(APIC_SPIV);
value &= ~APIC_VECTOR_MASK;
/*
* Enable APIC
*/
value |= APIC_SPIV_APIC_ENABLED;
/*
* Some unknown Intel IO/APIC (or APIC) errata is biting us with
* certain networking cards. If high frequency interrupts are
* happening on a particular IOAPIC pin, plus the IOAPIC routing
* entry is masked/unmasked at a high rate as well then sooner or
* later IOAPIC line gets 'stuck', no more interrupts are received
* from the device. If focus CPU is disabled then the hang goes
* away, oh well :-(
*
* [ This bug can be reproduced easily with a level-triggered
* PCI Ne2000 networking cards and PII/PIII processors, dual
* BX chipset. ]
*/
/*
* Actually disabling the focus CPU check just makes the hang less
* frequent as it makes the interrupt distributon model be more
* like LRU than MRU (the short-term load is more even across CPUs).
* See also the comment in end_level_ioapic_irq(). --macro
*/
#if 1
/* Enable focus processor (bit==0) */
value &= ~APIC_SPIV_FOCUS_DISABLED;
#else
/* Disable focus processor (bit==1) */
value |= APIC_SPIV_FOCUS_DISABLED;
#endif
/*
* Set spurious IRQ vector
*/
value |= SPURIOUS_APIC_VECTOR;
/*
* Enable directed EOI
*/
if ( directed_eoi_enabled )
//.........这里部分代码省略.........
示例3: play_dead
static inline void play_dead(void)
{
BUG();
}
示例4: do_check
static int
do_check( PKT_secret_key *sk, const char *tryagain_text, int mode,
int *canceled )
{
gpg_error_t err;
u16 csum=0;
int i, res;
size_t nbytes;
if( sk->is_protected ) { /* remove the protection */
DEK *dek = NULL;
u32 keyid[4]; /* 4! because we need two of them */
gcry_cipher_hd_t cipher_hd=NULL;
PKT_secret_key *save_sk;
if( sk->protect.s2k.mode == 1001 ) {
log_info(_("secret key parts are not available\n"));
return G10ERR_UNU_SECKEY;
}
if( sk->protect.algo == CIPHER_ALGO_NONE )
BUG();
if( openpgp_cipher_test_algo( sk->protect.algo ) ) {
log_info(_("protection algorithm %d%s is not supported\n"),
sk->protect.algo,sk->protect.algo==1?" (IDEA)":"" );
if (sk->protect.algo==CIPHER_ALGO_IDEA)
{
write_status (STATUS_RSA_OR_IDEA);
idea_cipher_warn (0);
}
return G10ERR_CIPHER_ALGO;
}
if(gcry_md_test_algo (sk->protect.s2k.hash_algo))
{
log_info(_("protection digest %d is not supported\n"),
sk->protect.s2k.hash_algo);
return G10ERR_DIGEST_ALGO;
}
keyid_from_sk( sk, keyid );
keyid[2] = keyid[3] = 0;
if( !sk->is_primary ) {
keyid[2] = sk->main_keyid[0];
keyid[3] = sk->main_keyid[1];
}
dek = passphrase_to_dek( keyid, sk->pubkey_algo, sk->protect.algo,
&sk->protect.s2k, mode,
tryagain_text, canceled );
if (!dek && canceled && *canceled)
return GPG_ERR_CANCELED;
err = openpgp_cipher_open (&cipher_hd, sk->protect.algo,
GCRY_CIPHER_MODE_CFB,
(GCRY_CIPHER_SECURE
| (sk->protect.algo >= 100 ?
0 : GCRY_CIPHER_ENABLE_SYNC)));
if (err)
log_fatal ("cipher open failed: %s\n", gpg_strerror (err) );
err = gcry_cipher_setkey (cipher_hd, dek->key, dek->keylen);
if (err)
log_fatal ("set key failed: %s\n", gpg_strerror (err) );
xfree(dek);
save_sk = copy_secret_key( NULL, sk );
gcry_cipher_setiv ( cipher_hd, sk->protect.iv, sk->protect.ivlen );
csum = 0;
if( sk->version >= 4 ) {
int ndata;
unsigned int ndatabits;
byte *p, *data;
u16 csumc = 0;
i = pubkey_get_npkey(sk->pubkey_algo);
assert ( gcry_mpi_get_flag (sk->skey[i], GCRYMPI_FLAG_OPAQUE ));
p = gcry_mpi_get_opaque ( sk->skey[i], &ndatabits );
ndata = (ndatabits+7)/8;
if ( ndata > 1 && p )
csumc = p[ndata-2] << 8 | p[ndata-1];
data = xmalloc_secure ( ndata );
if (p)
gcry_cipher_decrypt ( cipher_hd, data, ndata, p, ndata );
else
memset (data, 0, ndata);
gcry_mpi_release (sk->skey[i]); sk->skey[i] = NULL ;
p = data;
if (sk->protect.sha1chk) {
/* This is the new SHA1 checksum method to detect
tampering with the key as used by the Klima/Rosa
attack */
sk->csum = 0;
csum = 1;
if( ndata < 20 )
log_error("not enough bytes for SHA-1 checksum\n");
else {
gcry_md_hd_t h;
//.........这里部分代码省略.........
示例5: write_mft_record_nolock
/**
* write_mft_record_nolock - write out a mapped (extent) mft record
* @ni: ntfs inode describing the mapped (extent) mft record
* @m: mapped (extent) mft record to write
* @sync: if true, wait for i/o completion
*
* Write the mapped (extent) mft record @m described by the (regular or extent)
* ntfs inode @ni to backing store. If the mft record @m has a counterpart in
* the mft mirror, that is also updated.
*
* On success, clean the mft record and return 0. On error, leave the mft
* record dirty and return -errno. The caller should call make_bad_inode() on
* the base inode to ensure no more access happens to this inode. We do not do
* it here as the caller may want to finish writing other extent mft records
* first to minimize on-disk metadata inconsistencies.
*
* NOTE: We always perform synchronous i/o and ignore the @sync parameter.
* However, if the mft record has a counterpart in the mft mirror and @sync is
* true, we write the mft record, wait for i/o completion, and only then write
* the mft mirror copy. This ensures that if the system crashes either the mft
* or the mft mirror will contain a self-consistent mft record @m. If @sync is
* false on the other hand, we start i/o on both and then wait for completion
* on them. This provides a speedup but no longer guarantees that you will end
* up with a self-consistent mft record in the case of a crash but if you asked
* for asynchronous writing you probably do not care about that anyway.
*
* TODO: If @sync is false, want to do truly asynchronous i/o, i.e. just
* schedule i/o via ->writepage or do it via kntfsd or whatever.
*/
int write_mft_record_nolock(ntfs_inode *ni, MFT_RECORD *m, int sync)
{
ntfs_volume *vol = ni->vol;
struct page *page = ni->page;
unsigned int blocksize = vol->sb->s_blocksize;
int max_bhs = vol->mft_record_size / blocksize;
struct buffer_head *bhs[max_bhs];
struct buffer_head *bh, *head;
unsigned int block_start, block_end, m_start, m_end;
int i_bhs, nr_bhs, err = 0;
ntfs_debug("Entering for inode 0x%lx.", ni->mft_no);
BUG_ON(NInoAttr(ni));
BUG_ON(!max_bhs);
BUG_ON(!PageLocked(page));
/*
* If the ntfs_inode is clean no need to do anything. If it is dirty,
* mark it as clean now so that it can be redirtied later on if needed.
* There is no danger of races since the caller is holding the locks
* for the mft record @m and the page it is in.
*/
if (!NInoTestClearDirty(ni))
goto done;
/* Make sure we have mapped buffers. */
if (!page_has_buffers(page)) {
no_buffers_err_out:
ntfs_error(vol->sb, "Writing mft records without existing "
"buffers is not implemented yet. %s",
ntfs_please_email);
err = -EOPNOTSUPP;
goto err_out;
}
bh = head = page_buffers(page);
if (!bh)
goto no_buffers_err_out;
nr_bhs = 0;
block_start = 0;
m_start = ni->page_ofs;
m_end = m_start + vol->mft_record_size;
do {
block_end = block_start + blocksize;
/*
* If the buffer is outside the mft record, just skip it,
* clearing it if it is dirty to make sure it is not written
* out. It should never be marked dirty but better be safe.
*/
if ((block_end <= m_start) || (block_start >= m_end)) {
if (buffer_dirty(bh)) {
ntfs_warning(vol->sb, "Clearing dirty mft "
"record page buffer. %s",
ntfs_please_email);
clear_buffer_dirty(bh);
}
continue;
}
if (!buffer_mapped(bh)) {
ntfs_error(vol->sb, "Writing mft records without "
"existing mapped buffers is not "
"implemented yet. %s",
ntfs_please_email);
err = -EOPNOTSUPP;
continue;
}
if (!buffer_uptodate(bh)) {
ntfs_error(vol->sb, "Writing mft records without "
"existing uptodate buffers is not "
"implemented yet. %s",
ntfs_please_email);
err = -EOPNOTSUPP;
continue;
}
//.........这里部分代码省略.........
示例6: rxrpc_recvmsg
/*
* receive a message from an RxRPC socket
* - we need to be careful about two or more threads calling recvmsg
* simultaneously
*/
int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len, int flags)
{
struct rxrpc_skb_priv *sp;
struct rxrpc_call *call = NULL, *continue_call = NULL;
struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
struct sk_buff *skb;
long timeo;
int copy, ret, ullen, offset, copied = 0;
u32 abort_code;
DEFINE_WAIT(wait);
_enter(",,,%zu,%d", len, flags);
if (flags & (MSG_OOB | MSG_TRUNC))
return -EOPNOTSUPP;
ullen = msg->msg_flags & MSG_CMSG_COMPAT ? 4 : sizeof(unsigned long);
timeo = sock_rcvtimeo(&rx->sk, flags & MSG_DONTWAIT);
msg->msg_flags |= MSG_MORE;
lock_sock(&rx->sk);
for (;;) {
/* return immediately if a client socket has no outstanding
* calls */
if (RB_EMPTY_ROOT(&rx->calls)) {
if (copied)
goto out;
if (rx->sk.sk_state != RXRPC_SERVER_LISTENING) {
release_sock(&rx->sk);
if (continue_call)
rxrpc_put_call(continue_call);
return -ENODATA;
}
}
/* get the next message on the Rx queue */
skb = skb_peek(&rx->sk.sk_receive_queue);
if (!skb) {
/* nothing remains on the queue */
if (copied &&
(msg->msg_flags & MSG_PEEK || timeo == 0))
goto out;
/* wait for a message to turn up */
release_sock(&rx->sk);
prepare_to_wait_exclusive(sk_sleep(&rx->sk), &wait,
TASK_INTERRUPTIBLE);
ret = sock_error(&rx->sk);
if (ret)
goto wait_error;
if (skb_queue_empty(&rx->sk.sk_receive_queue)) {
if (signal_pending(current))
goto wait_interrupted;
timeo = schedule_timeout(timeo);
}
finish_wait(sk_sleep(&rx->sk), &wait);
lock_sock(&rx->sk);
continue;
}
peek_next_packet:
sp = rxrpc_skb(skb);
call = sp->call;
ASSERT(call != NULL);
_debug("next pkt %s", rxrpc_pkts[sp->hdr.type]);
/* make sure we wait for the state to be updated in this call */
spin_lock_bh(&call->lock);
spin_unlock_bh(&call->lock);
if (test_bit(RXRPC_CALL_RELEASED, &call->flags)) {
_debug("packet from released call");
if (skb_dequeue(&rx->sk.sk_receive_queue) != skb)
BUG();
rxrpc_free_skb(skb);
continue;
}
/* determine whether to continue last data receive */
if (continue_call) {
_debug("maybe cont");
if (call != continue_call ||
skb->mark != RXRPC_SKB_MARK_DATA) {
release_sock(&rx->sk);
rxrpc_put_call(continue_call);
_leave(" = %d [noncont]", copied);
return copied;
}
}
//.........这里部分代码省略.........
示例7: bcm63xx_init_irq
static void bcm63xx_init_irq(void)
{
int irq_bits;
irq_stat_addr[0] = bcm63xx_regset_address(RSET_PERF);
irq_mask_addr[0] = bcm63xx_regset_address(RSET_PERF);
irq_stat_addr[1] = bcm63xx_regset_address(RSET_PERF);
irq_mask_addr[1] = bcm63xx_regset_address(RSET_PERF);
switch (bcm63xx_get_cpu_id()) {
case BCM3368_CPU_ID:
irq_stat_addr[0] += PERF_IRQSTAT_3368_REG;
irq_mask_addr[0] += PERF_IRQMASK_3368_REG;
irq_stat_addr[1] = 0;
irq_mask_addr[1] = 0;
irq_bits = 32;
ext_irq_count = 4;
ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_3368;
break;
case BCM6328_CPU_ID:
irq_stat_addr[0] += PERF_IRQSTAT_6328_REG(0);
irq_mask_addr[0] += PERF_IRQMASK_6328_REG(0);
irq_stat_addr[1] += PERF_IRQSTAT_6328_REG(1);
irq_mask_addr[1] += PERF_IRQMASK_6328_REG(1);
irq_bits = 64;
ext_irq_count = 4;
is_ext_irq_cascaded = 1;
ext_irq_start = BCM_6328_EXT_IRQ0 - IRQ_INTERNAL_BASE;
ext_irq_end = BCM_6328_EXT_IRQ3 - IRQ_INTERNAL_BASE;
ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6328;
break;
case BCM6338_CPU_ID:
irq_stat_addr[0] += PERF_IRQSTAT_6338_REG;
irq_mask_addr[0] += PERF_IRQMASK_6338_REG;
irq_stat_addr[1] = 0;
irq_mask_addr[1] = 0;
irq_bits = 32;
ext_irq_count = 4;
ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6338;
break;
case BCM6345_CPU_ID:
irq_stat_addr[0] += PERF_IRQSTAT_6345_REG;
irq_mask_addr[0] += PERF_IRQMASK_6345_REG;
irq_stat_addr[1] = 0;
irq_mask_addr[1] = 0;
irq_bits = 32;
ext_irq_count = 4;
ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6345;
break;
case BCM6348_CPU_ID:
irq_stat_addr[0] += PERF_IRQSTAT_6348_REG;
irq_mask_addr[0] += PERF_IRQMASK_6348_REG;
irq_stat_addr[1] = 0;
irq_mask_addr[1] = 0;
irq_bits = 32;
ext_irq_count = 4;
ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6348;
break;
case BCM6358_CPU_ID:
irq_stat_addr[0] += PERF_IRQSTAT_6358_REG(0);
irq_mask_addr[0] += PERF_IRQMASK_6358_REG(0);
irq_stat_addr[1] += PERF_IRQSTAT_6358_REG(1);
irq_mask_addr[1] += PERF_IRQMASK_6358_REG(1);
irq_bits = 32;
ext_irq_count = 4;
is_ext_irq_cascaded = 1;
ext_irq_start = BCM_6358_EXT_IRQ0 - IRQ_INTERNAL_BASE;
ext_irq_end = BCM_6358_EXT_IRQ3 - IRQ_INTERNAL_BASE;
ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6358;
break;
case BCM6362_CPU_ID:
irq_stat_addr[0] += PERF_IRQSTAT_6362_REG(0);
irq_mask_addr[0] += PERF_IRQMASK_6362_REG(0);
irq_stat_addr[1] += PERF_IRQSTAT_6362_REG(1);
irq_mask_addr[1] += PERF_IRQMASK_6362_REG(1);
irq_bits = 64;
ext_irq_count = 4;
is_ext_irq_cascaded = 1;
ext_irq_start = BCM_6362_EXT_IRQ0 - IRQ_INTERNAL_BASE;
ext_irq_end = BCM_6362_EXT_IRQ3 - IRQ_INTERNAL_BASE;
ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6362;
break;
case BCM6368_CPU_ID:
irq_stat_addr[0] += PERF_IRQSTAT_6368_REG(0);
irq_mask_addr[0] += PERF_IRQMASK_6368_REG(0);
irq_stat_addr[1] += PERF_IRQSTAT_6368_REG(1);
irq_mask_addr[1] += PERF_IRQMASK_6368_REG(1);
irq_bits = 64;
ext_irq_count = 6;
is_ext_irq_cascaded = 1;
ext_irq_start = BCM_6368_EXT_IRQ0 - IRQ_INTERNAL_BASE;
ext_irq_end = BCM_6368_EXT_IRQ5 - IRQ_INTERNAL_BASE;
ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6368;
ext_irq_cfg_reg2 = PERF_EXTIRQ_CFG_REG2_6368;
break;
default:
BUG();
}
if (irq_bits == 32) {
//.........这里部分代码省略.........
示例8: kprobe_handler
/*
* Called with IRQs disabled. IRQs must remain disabled from that point
* all the way until processing this kprobe is complete. The current
* kprobes implementation cannot process more than one nested level of
* kprobe, and that level is reserved for user kprobe handlers, so we can't
* risk encountering a new kprobe in an interrupt handler.
*/
void __kprobes kprobe_handler(struct pt_regs *regs)
{
struct kprobe *p, *cur;
struct kprobe_ctlblk *kcb;
kcb = get_kprobe_ctlblk();
cur = kprobe_running();
#ifdef CONFIG_THUMB2_KERNEL
/*
* First look for a probe which was registered using an address with
* bit 0 set, this is the usual situation for pointers to Thumb code.
* If not found, fallback to looking for one with bit 0 clear.
*/
p = get_kprobe((kprobe_opcode_t *)(regs->ARM_pc | 1));
if (!p)
p = get_kprobe((kprobe_opcode_t *)regs->ARM_pc);
#else /* ! CONFIG_THUMB2_KERNEL */
p = get_kprobe((kprobe_opcode_t *)regs->ARM_pc);
#endif
if (p) {
if (cur) {
/* Kprobe is pending, so we're recursing. */
switch (kcb->kprobe_status) {
case KPROBE_HIT_ACTIVE:
case KPROBE_HIT_SSDONE:
/* A pre- or post-handler probe got us here. */
kprobes_inc_nmissed_count(p);
save_previous_kprobe(kcb);
set_current_kprobe(p);
kcb->kprobe_status = KPROBE_REENTER;
singlestep(p, regs, kcb);
restore_previous_kprobe(kcb);
break;
default:
/* impossible cases */
BUG();
}
} else if (p->ainsn.insn_check_cc(regs->ARM_cpsr)) {
/* Probe hit and conditional execution check ok. */
set_current_kprobe(p);
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
/*
* If we have no pre-handler or it returned 0, we
* continue with normal processing. If we have a
* pre-handler and it returned non-zero, it prepped
* for calling the break_handler below on re-entry,
* so get out doing nothing more here.
*/
if (!p->pre_handler || !p->pre_handler(p, regs)) {
kcb->kprobe_status = KPROBE_HIT_SS;
singlestep(p, regs, kcb);
if (p->post_handler) {
kcb->kprobe_status = KPROBE_HIT_SSDONE;
p->post_handler(p, regs, 0);
}
reset_current_kprobe();
}
} else {
/*
* Probe hit but conditional execution check failed,
* so just skip the instruction and continue as if
* nothing had happened.
*/
singlestep_skip(p, regs);
}
} else if (cur) {
/* We probably hit a jprobe. Call its break handler. */
if (cur->break_handler && cur->break_handler(cur, regs)) {
kcb->kprobe_status = KPROBE_HIT_SS;
singlestep(cur, regs, kcb);
if (cur->post_handler) {
kcb->kprobe_status = KPROBE_HIT_SSDONE;
cur->post_handler(cur, regs, 0);
}
}
reset_current_kprobe();
} else {
/*
* The probe was removed and a race is in progress.
* There is nothing we can do about it. Let's restart
* the instruction. By the time we can restart, the
* real instruction will be there.
*/
}
}
示例9: vmw_ttm_map_dma
//.........这里部分代码省略.........
return &vmw_tt->vsgt;
}
static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
{
struct vmw_ttm_tt *vmw_be =
container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
int ret;
ret = vmw_ttm_map_dma(vmw_be);
if (unlikely(ret != 0))
return ret;
vmw_be->gmr_id = bo_mem->start;
vmw_be->mem_type = bo_mem->mem_type;
switch (bo_mem->mem_type) {
case VMW_PL_GMR:
return vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt,
ttm->num_pages, vmw_be->gmr_id);
case VMW_PL_MOB:
if (unlikely(vmw_be->mob == NULL)) {
vmw_be->mob =
vmw_mob_create(ttm->num_pages);
if (unlikely(vmw_be->mob == NULL))
return -ENOMEM;
}
return vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob,
&vmw_be->vsgt, ttm->num_pages,
vmw_be->gmr_id);
default:
BUG();
}
return 0;
}
static int vmw_ttm_unbind(struct ttm_tt *ttm)
{
struct vmw_ttm_tt *vmw_be =
container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
switch (vmw_be->mem_type) {
case VMW_PL_GMR:
vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
break;
case VMW_PL_MOB:
vmw_mob_unbind(vmw_be->dev_priv, vmw_be->mob);
break;
default:
BUG();
}
if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind)
vmw_ttm_unmap_dma(vmw_be);
return 0;
}
static void vmw_ttm_destroy(struct ttm_tt *ttm)
{
struct vmw_ttm_tt *vmw_be =
container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
示例10: __build_packet_message
//.........这里部分代码省略.........
goto nla_put_failure;
} else {
/* Case 2: indev is a bridge group, we need to look
* for physical device (when called from ipv4) */
if (nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV,
htonl(outdev->ifindex)))
goto nla_put_failure;
if (skb->nf_bridge && skb->nf_bridge->physoutdev &&
nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
htonl(skb->nf_bridge->physoutdev->ifindex)))
goto nla_put_failure;
}
#endif
}
if (skb->mark &&
nla_put_be32(inst->skb, NFULA_MARK, htonl(skb->mark)))
goto nla_put_failure;
if (indev && skb->dev &&
skb->mac_header != skb->network_header) {
struct nfulnl_msg_packet_hw phw;
int len = dev_parse_header(skb, phw.hw_addr);
if (len > 0) {
phw.hw_addrlen = htons(len);
if (nla_put(inst->skb, NFULA_HWADDR, sizeof(phw), &phw))
goto nla_put_failure;
}
}
if (indev && skb_mac_header_was_set(skb)) {
if (nla_put_be32(inst->skb, NFULA_HWTYPE, htons(skb->dev->type)) ||
nla_put_be16(inst->skb, NFULA_HWLEN,
htons(skb->dev->hard_header_len)) ||
nla_put(inst->skb, NFULA_HWHEADER, skb->dev->hard_header_len,
skb_mac_header(skb)))
goto nla_put_failure;
}
if (skb->tstamp.tv64) {
struct nfulnl_msg_packet_timestamp ts;
struct timeval tv = ktime_to_timeval(skb->tstamp);
ts.sec = cpu_to_be64(tv.tv_sec);
ts.usec = cpu_to_be64(tv.tv_usec);
if (nla_put(inst->skb, NFULA_TIMESTAMP, sizeof(ts), &ts))
goto nla_put_failure;
}
/* UID */
if (skb->sk) {
read_lock_bh(&skb->sk->sk_callback_lock);
if (skb->sk->sk_socket && skb->sk->sk_socket->file) {
struct file *file = skb->sk->sk_socket->file;
__be32 uid = htonl(file->f_cred->fsuid);
__be32 gid = htonl(file->f_cred->fsgid);
read_unlock_bh(&skb->sk->sk_callback_lock);
if (nla_put_be32(inst->skb, NFULA_UID, uid) ||
nla_put_be32(inst->skb, NFULA_GID, gid))
goto nla_put_failure;
} else
read_unlock_bh(&skb->sk->sk_callback_lock);
}
/* local sequence number */
if ((inst->flags & NFULNL_CFG_F_SEQ) &&
nla_put_be32(inst->skb, NFULA_SEQ, htonl(inst->seq++)))
goto nla_put_failure;
/* global sequence number */
if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
htonl(atomic_inc_return(&global_seq))))
goto nla_put_failure;
if (data_len) {
struct nlattr *nla;
int size = nla_attr_size(data_len);
if (skb_tailroom(inst->skb) < nla_total_size(data_len)) {
printk(KERN_WARNING "nfnetlink_log: no tailroom!\n");
goto nlmsg_failure;
}
nla = (struct nlattr *)skb_put(inst->skb, nla_total_size(data_len));
nla->nla_type = NFULA_PAYLOAD;
nla->nla_len = size;
if (skb_copy_bits(skb, 0, nla_data(nla), data_len))
BUG();
}
nlh->nlmsg_len = inst->skb->tail - old_tail;
return 0;
nlmsg_failure:
nla_put_failure:
PRINTR(KERN_ERR "nfnetlink_log: error creating log nlmsg\n");
return -1;
}
示例11: v9fs_file_do_lock
static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl)
{
struct p9_flock flock;
struct p9_fid *fid;
uint8_t status;
int res = 0;
unsigned char fl_type;
fid = filp->private_data;
BUG_ON(fid == NULL);
if ((fl->fl_flags & FL_POSIX) != FL_POSIX)
BUG();
res = posix_lock_file_wait(filp, fl);
if (res < 0)
goto out;
/* convert posix lock to p9 tlock args */
memset(&flock, 0, sizeof(flock));
/* map the lock type */
switch (fl->fl_type) {
case F_RDLCK:
flock.type = P9_LOCK_TYPE_RDLCK;
break;
case F_WRLCK:
flock.type = P9_LOCK_TYPE_WRLCK;
break;
case F_UNLCK:
flock.type = P9_LOCK_TYPE_UNLCK;
break;
}
flock.start = fl->fl_start;
if (fl->fl_end == OFFSET_MAX)
flock.length = 0;
else
flock.length = fl->fl_end - fl->fl_start + 1;
flock.proc_id = fl->fl_pid;
flock.client_id = fid->clnt->name;
if (IS_SETLKW(cmd))
flock.flags = P9_LOCK_FLAGS_BLOCK;
/*
* if its a blocked request and we get P9_LOCK_BLOCKED as the status
* for lock request, keep on trying
*/
for (;;) {
res = p9_client_lock_dotl(fid, &flock, &status);
if (res < 0)
break;
if (status != P9_LOCK_BLOCKED)
break;
if (status == P9_LOCK_BLOCKED && !IS_SETLKW(cmd))
break;
if (schedule_timeout_interruptible(P9_LOCK_TIMEOUT) != 0)
break;
}
/* map 9p status to VFS status */
switch (status) {
case P9_LOCK_SUCCESS:
res = 0;
break;
case P9_LOCK_BLOCKED:
res = -EAGAIN;
break;
case P9_LOCK_ERROR:
case P9_LOCK_GRACE:
res = -ENOLCK;
break;
default:
BUG();
}
/*
* incase server returned error for lock request, revert
* it locally
*/
if (res < 0 && fl->fl_type != F_UNLCK) {
fl_type = fl->fl_type;
fl->fl_type = F_UNLCK;
res = posix_lock_file_wait(filp, fl);
fl->fl_type = fl_type;
}
out:
return res;
}
示例12: relay_crypto_init
/** Initialize <b>crypto</b> from the key material in key_data.
*
* If <b>is_hs_v3</b> is set, this cpath will be used for next gen hidden
* service circuits and <b>key_data</b> must be at least
* HS_NTOR_KEY_EXPANSION_KDF_OUT_LEN bytes in length.
*
* If <b>is_hs_v3</b> is not set, key_data must contain CPATH_KEY_MATERIAL_LEN
* bytes, which are used as follows:
* - 20 to initialize f_digest
* - 20 to initialize b_digest
* - 16 to key f_crypto
* - 16 to key b_crypto
*
* (If 'reverse' is true, then f_XX and b_XX are swapped.)
*
* Return 0 if init was successful, else -1 if it failed.
*/
int
relay_crypto_init(relay_crypto_t *crypto,
const char *key_data, size_t key_data_len,
int reverse, int is_hs_v3)
{
crypto_digest_t *tmp_digest;
crypto_cipher_t *tmp_crypto;
size_t digest_len = 0;
size_t cipher_key_len = 0;
tor_assert(crypto);
tor_assert(key_data);
tor_assert(!(crypto->f_crypto || crypto->b_crypto ||
crypto->f_digest || crypto->b_digest));
/* Basic key size validation */
if (is_hs_v3 && BUG(key_data_len != HS_NTOR_KEY_EXPANSION_KDF_OUT_LEN)) {
goto err;
} else if (!is_hs_v3 && BUG(key_data_len != CPATH_KEY_MATERIAL_LEN)) {
goto err;
}
/* If we are using this crypto for next gen onion services use SHA3-256,
otherwise use good ol' SHA1 */
if (is_hs_v3) {
digest_len = DIGEST256_LEN;
cipher_key_len = CIPHER256_KEY_LEN;
crypto->f_digest = crypto_digest256_new(DIGEST_SHA3_256);
crypto->b_digest = crypto_digest256_new(DIGEST_SHA3_256);
} else {
digest_len = DIGEST_LEN;
cipher_key_len = CIPHER_KEY_LEN;
crypto->f_digest = crypto_digest_new();
crypto->b_digest = crypto_digest_new();
}
tor_assert(digest_len != 0);
tor_assert(cipher_key_len != 0);
const int cipher_key_bits = (int) cipher_key_len * 8;
crypto_digest_add_bytes(crypto->f_digest, key_data, digest_len);
crypto_digest_add_bytes(crypto->b_digest, key_data+digest_len, digest_len);
crypto->f_crypto = crypto_cipher_new_with_bits(key_data+(2*digest_len),
cipher_key_bits);
if (!crypto->f_crypto) {
log_warn(LD_BUG,"Forward cipher initialization failed.");
goto err;
}
crypto->b_crypto = crypto_cipher_new_with_bits(
key_data+(2*digest_len)+cipher_key_len,
cipher_key_bits);
if (!crypto->b_crypto) {
log_warn(LD_BUG,"Backward cipher initialization failed.");
goto err;
}
if (reverse) {
tmp_digest = crypto->f_digest;
crypto->f_digest = crypto->b_digest;
crypto->b_digest = tmp_digest;
tmp_crypto = crypto->f_crypto;
crypto->f_crypto = crypto->b_crypto;
crypto->b_crypto = tmp_crypto;
}
return 0;
err:
relay_crypto_clear(crypto);
return -1;
}
示例13: early_init_dt_add_memory_arch
void __init early_init_dt_add_memory_arch(u64 base, u64 size)
{
BUG();
}
示例14: early_init_dt_scan_chosen_arch
void __init early_init_dt_scan_chosen_arch(unsigned long node)
{
BUG();
}
示例15: keyserver_get
static gpg_error_t
keyserver_get (ctrl_t ctrl, KEYDB_SEARCH_DESC *desc, int ndesc,
struct keyserver_spec *keyserver)
{
gpg_error_t err = 0;
char **pattern;
int idx, npat;
estream_t datastream;
/* Create an array filled with a search pattern for each key. The
array is delimited by a NULL entry. */
pattern = xtrycalloc (ndesc+1, sizeof *pattern);
if (!pattern)
return gpg_error_from_syserror ();
for (npat=idx=0; idx < ndesc; idx++)
{
int quiet = 0;
if (desc[idx].mode == KEYDB_SEARCH_MODE_FPR20
|| desc[idx].mode == KEYDB_SEARCH_MODE_FPR16)
{
pattern[npat] = xtrymalloc (2+2*20+1);
if (!pattern[npat])
err = gpg_error_from_syserror ();
else
{
strcpy (pattern[npat], "0x");
bin2hex (desc[idx].u.fpr,
desc[idx].mode == KEYDB_SEARCH_MODE_FPR20? 20 : 16,
pattern[npat]+2);
npat++;
}
}
else if(desc[idx].mode == KEYDB_SEARCH_MODE_LONG_KID)
{
pattern[npat] = xtryasprintf ("0x%08lX%08lX",
(ulong)desc[idx].u.kid[0],
(ulong)desc[idx].u.kid[1]);
if (!pattern[npat])
err = gpg_error_from_syserror ();
else
npat++;
}
else if(desc[idx].mode == KEYDB_SEARCH_MODE_SHORT_KID)
{
pattern[npat] = xtryasprintf ("0x%08lX", (ulong)desc[idx].u.kid[1]);
if (!pattern[npat])
err = gpg_error_from_syserror ();
else
npat++;
}
else if(desc[idx].mode == KEYDB_SEARCH_MODE_EXACT)
{
/* FIXME: We don't need this. It is used as a dummy by
keyserver_fetch which passes an entire URL. Better use a
separate function here. */
pattern[npat] = xtrystrdup ("0x0000000000000000");
if (!pattern[npat])
err = gpg_error_from_syserror ();
else
{
npat++;
quiet = 1;
}
}
else if (desc[idx].mode == KEYDB_SEARCH_MODE_NONE)
continue;
else
BUG();
if (err)
{
for (idx=0; idx < npat; idx++)
xfree (pattern[idx]);
xfree (pattern);
return err;
}
if (!quiet && keyserver)
{
if (keyserver->host)
log_info (_("requesting key %s from %s server %s\n"),
keystr_from_desc (&desc[idx]),
keyserver->scheme, keyserver->host);
else
log_info (_("requesting key %s from %s\n"),
keystr_from_desc (&desc[idx]), keyserver->uri);
}
}
err = gpg_dirmngr_ks_get (ctrl, pattern, &datastream);
for (idx=0; idx < npat; idx++)
xfree (pattern[idx]);
xfree (pattern);
if (!err)
{
void *stats_handle;
//.........这里部分代码省略.........