本文整理汇总了C++中Debugger函数的典型用法代码示例。如果您正苦于以下问题:C++ Debugger函数的具体用法?C++ Debugger怎么用?C++ Debugger使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了Debugger函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: prom_cngetc
int
prom_cngetc(dev_t dev)
{
unsigned char ch = '\0';
int l;
#ifdef DDB
static int nplus = 0;
#endif
while ((l = OF_read(stdin, &ch, 1)) != 1)
/* void */;
#ifdef DDB
if (ch == '+') {
if (nplus++ > 3)
Debugger();
} else
nplus = 0;
#endif
if (ch == '\r')
ch = '\n';
if (ch == '\b')
ch = '\177';
return ch;
}
示例2: hammer_critical_error
/*
* Report critical errors. ip may be NULL.
*/
void
hammer_critical_error(hammer_mount_t hmp, hammer_inode_t ip,
int error, const char *msg)
{
hmp->flags |= HAMMER_MOUNT_CRITICAL_ERROR;
krateprintf(&hmp->krate,
"HAMMER(%s): Critical error inode=%jd error=%d %s\n",
hmp->mp->mnt_stat.f_mntfromname,
(intmax_t)(ip ? ip->obj_id : -1),
error, msg);
if (hmp->ronly == 0) {
hmp->ronly = 2; /* special errored read-only mode */
hmp->mp->mnt_flag |= MNT_RDONLY;
RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL,
hammer_adjust_volume_mode, NULL);
kprintf("HAMMER(%s): Forcing read-only mode\n",
hmp->mp->mnt_stat.f_mntfromname);
}
hmp->error = error;
if (hammer_debug_critical)
Debugger("Entering debugger");
}
示例3: hammer_lock_sh
/*
* Obtain a shared lock
*
* We do not give pending exclusive locks priority over shared locks as
* doing so could lead to a deadlock.
*/
void
hammer_lock_sh(struct hammer_lock *lock)
{
thread_t td = curthread;
u_int lv;
u_int nlv;
const char *ident = "hmrlck";
KKASSERT(lock->refs);
for (;;) {
lv = lock->lockval;
if ((lv & HAMMER_LOCKF_EXCLUSIVE) == 0) {
nlv = (lv + 1);
if (atomic_cmpset_int(&lock->lockval, lv, nlv))
break;
} else if (lock->lowner == td) {
/*
* Disallowed case, drop into kernel debugger for
* now. A cont continues w/ an exclusive lock.
*/
nlv = (lv + 1);
if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
if (hammer_debug_critical)
Debugger("hammer_lock_sh: holding ex");
break;
}
} else {
nlv = lv | HAMMER_LOCKF_WANTED;
++hammer_contention_count;
tsleep_interlock(&lock->lockval, 0);
if (atomic_cmpset_int(&lock->lockval, lv, nlv))
tsleep(&lock->lockval, PINTERLOCKED, ident, 0);
}
}
}
示例4: cpu_boot_secondary
void
cpu_boot_secondary(struct cpu_info *ci)
{
struct pcb *pcb;
int i;
struct pmap *kpm = pmap_kernel();
extern u_int32_t mp_pdirpa;
if (mp_verbose)
printf("%s: starting", ci->ci_dev.dv_xname);
/* XXX move elsewhere, not per CPU. */
mp_pdirpa = kpm->pm_pdirpa;
pcb = ci->ci_idle_pcb;
if (mp_verbose)
printf(", init idle stack ptr is 0x%x\n", pcb->pcb_esp);
CPU_STARTUP(ci);
/*
* wait for it to become ready
*/
for (i = 100000; (!(ci->ci_flags & CPUF_RUNNING)) && i > 0; i--) {
delay(10);
}
if (!(ci->ci_flags & CPUF_RUNNING)) {
printf("%s failed to become ready\n", ci->ci_dev.dv_xname);
#ifdef DDB
Debugger();
#endif
}
CPU_START_CLEANUP(ci);
}
示例5: lck_rw_lock_shared_to_exclusive
boolean_t
lck_rw_lock_shared_to_exclusive(
lck_rw_t *lck)
{
int i;
boolean_t do_wakeup = FALSE;
wait_result_t res;
#if MACH_LDEBUG
int decrementer;
#endif /* MACH_LDEBUG */
boolean_t istate;
#if CONFIG_DTRACE
uint64_t wait_interval = 0;
int slept = 0;
int readers_at_sleep = 0;
#endif
istate = lck_interlock_lock(lck);
lck->lck_rw_shared_count--;
if (lck->lck_rw_want_upgrade) {
KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX_CODE) | DBG_FUNC_START,
(int)lck, lck->lck_rw_shared_count, lck->lck_rw_want_upgrade, 0, 0);
/*
* Someone else has requested upgrade.
* Since we've released a read lock, wake
* him up.
*/
if (lck->lck_w_waiting && (lck->lck_rw_shared_count == 0)) {
lck->lck_w_waiting = FALSE;
do_wakeup = TRUE;
}
lck_interlock_unlock(lck, istate);
if (do_wakeup)
thread_wakeup(RW_LOCK_WRITER_EVENT(lck));
KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX_CODE) | DBG_FUNC_END,
(int)lck, lck->lck_rw_shared_count, lck->lck_rw_want_upgrade, 0, 0);
return (FALSE);
}
lck->lck_rw_want_upgrade = TRUE;
#if MACH_LDEBUG
decrementer = DECREMENTER_TIMEOUT;
#endif /* MACH_LDEBUG */
while (lck->lck_rw_shared_count != 0) {
#if CONFIG_DTRACE
if (lockstat_probemap[LS_LCK_RW_LOCK_SHARED_TO_EXCL_SPIN] && wait_interval == 0) {
wait_interval = mach_absolute_time();
readers_at_sleep = lck->lck_rw_shared_count;
} else {
wait_interval = -1;
}
#endif
i = lock_wait_time[lck->lck_rw_can_sleep ? 1 : 0];
KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX1_CODE) | DBG_FUNC_START,
(int)lck, lck->lck_rw_shared_count, i, 0, 0);
if (i != 0) {
lck_interlock_unlock(lck, istate);
#if MACH_LDEBUG
if (!--decrementer)
Debugger("timeout - lck_rw_shared_count");
#endif /* MACH_LDEBUG */
while (--i != 0 && lck->lck_rw_shared_count != 0)
lck_rw_lock_pause(istate);
istate = lck_interlock_lock(lck);
}
if (lck->lck_rw_can_sleep && lck->lck_rw_shared_count != 0) {
lck->lck_w_waiting = TRUE;
res = assert_wait(RW_LOCK_WRITER_EVENT(lck), THREAD_UNINT);
if (res == THREAD_WAITING) {
lck_interlock_unlock(lck, istate);
res = thread_block(THREAD_CONTINUE_NULL);
#if CONFIG_DTRACE
slept = 1;
#endif
istate = lck_interlock_lock(lck);
}
}
KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX1_CODE) | DBG_FUNC_END,
(int)lck, lck->lck_rw_shared_count, 0, 0, 0);
}
lck_interlock_unlock(lck, istate);
#if CONFIG_DTRACE
/*
* We infer whether we took the sleep/spin path above by checking readers_at_sleep.
*/
if (wait_interval != 0 && wait_interval != (unsigned) -1 && readers_at_sleep) {
if (slept == 0) {
LOCKSTAT_RECORD2(LS_LCK_RW_LOCK_SHARED_TO_EXCL_SPIN, lck, mach_absolute_time() - wait_interval, 0);
//.........这里部分代码省略.........
示例6: Stop
void Stop()
{
Debugging = 1;
Debugger();
}
示例7: panic
//.........这里部分代码省略.........
if (xgd && xgd != gd) {
crit_enter();
++mycpu->gd_trap_nesting_level;
if (mycpu->gd_trap_nesting_level < 25) {
kprintf("SECONDARY PANIC ON CPU %d THREAD %p\n",
mycpu->gd_cpuid, td);
}
td->td_release = NULL; /* be a grinch */
for (;;) {
lwkt_deschedule_self(td);
lwkt_switch();
}
/* NOT REACHED */
/* --mycpu->gd_trap_nesting_level */
/* crit_exit() */
}
/*
* Reentrant panic
*/
if (xgd && xgd == gd)
break;
/*
* We got it
*/
if (atomic_cmpset_ptr(&panic_cpu_gd, NULL, gd))
break;
}
#else
panic_cpu_gd = gd;
#endif
/*
* Try to get the system into a working state. Save information
* we are about to destroy.
*/
kvcreinitspin();
if (panicstr == NULL) {
bcopy(td->td_toks_array, panic_tokens, sizeof(panic_tokens));
panic_tokens_count = td->td_toks_stop - &td->td_toks_base;
}
lwkt_relalltokens(td);
td->td_toks_stop = &td->td_toks_base;
/*
* Setup
*/
bootopt = RB_AUTOBOOT | RB_DUMP;
if (sync_on_panic == 0)
bootopt |= RB_NOSYNC;
newpanic = 0;
if (panicstr) {
bootopt |= RB_NOSYNC;
} else {
panicstr = fmt;
newpanic = 1;
}
/*
* Format the panic string.
*/
__va_start(ap, fmt);
kvsnprintf(buf, sizeof(buf), fmt, ap);
if (panicstr == fmt)
panicstr = buf;
__va_end(ap);
kprintf("panic: %s\n", buf);
#ifdef SMP
/* two separate prints in case of an unmapped page and trap */
kprintf("cpuid = %d\n", mycpu->gd_cpuid);
#endif
#if (NGPIO > 0) && defined(ERROR_LED_ON_PANIC)
led_switch("error", 1);
#endif
#if defined(WDOG_DISABLE_ON_PANIC) && defined(WATCHDOG_ENABLE)
wdog_disable();
#endif
/*
* Enter the debugger or fall through & dump. Entering the
* debugger will stop cpus. If not entering the debugger stop
* cpus here.
*/
#if defined(DDB)
if (newpanic && trace_on_panic)
print_backtrace(-1);
if (debugger_on_panic)
Debugger("panic");
else
#endif
#ifdef SMP
if (newpanic)
stop_cpus(mycpu->gd_other_cpus);
#else
;
#endif
boot(bootopt);
}
示例8: hammer2_hardlink_shiftup
/*
* Shift *chainp up to the specified directory, change the filename
* to "0xINODENUMBER", and adjust the key. The chain becomes the
* invisible hardlink target.
*
* The original *chainp has already been marked deleted.
*/
static
void
hammer2_hardlink_shiftup(hammer2_trans_t *trans, hammer2_chain_t **chainp,
hammer2_inode_t *dip, hammer2_chain_t **dchainp,
int nlinks, int *errorp)
{
hammer2_inode_data_t *nipdata;
hammer2_chain_t *chain;
hammer2_chain_t *xchain;
hammer2_key_t key_dummy;
hammer2_key_t lhc;
hammer2_blockref_t bref;
int cache_index = -1;
chain = *chainp;
lhc = chain->data->ipdata.inum;
KKASSERT((lhc & HAMMER2_DIRHASH_VISIBLE) == 0);
/*
* Locate the inode or indirect block to create the new
* entry in. lhc represents the inode number so there is
* no collision iteration.
*
* There should be no key collisions with invisible inode keys.
*
* WARNING! Must use inode_lock_ex() on dip to handle a stale
* dip->chain cache.
*/
retry:
*errorp = 0;
xchain = hammer2_chain_lookup(dchainp, &key_dummy,
lhc, lhc, &cache_index, 0);
if (xchain) {
kprintf("X3 chain %p dip %p dchain %p dip->chain %p\n",
xchain, dip, *dchainp, dip->chain);
hammer2_chain_unlock(xchain);
xchain = NULL;
*errorp = ENOSPC;
#if 0
Debugger("X3");
#endif
}
/*
* Create entry in common parent directory using the seek position
* calculated above.
*
* We must refactor chain because it might have been shifted into
* an indirect chain by the create.
*/
if (*errorp == 0) {
KKASSERT(xchain == NULL);
#if 0
*errorp = hammer2_chain_create(trans, dchainp, &xchain,
lhc, 0,
HAMMER2_BREF_TYPE_INODE,/* n/a */
HAMMER2_INODE_BYTES); /* n/a */
#endif
/*XXX this somehow isn't working on chain XXX*/
/*KKASSERT(xxx)*/
}
/*
* Cleanup and handle retries.
*/
if (*errorp == EAGAIN) {
kprintf("R");
hammer2_chain_wait(*dchainp);
hammer2_chain_drop(*dchainp);
goto retry;
}
/*
* Handle the error case
*/
if (*errorp) {
panic("error2");
KKASSERT(xchain == NULL);
return;
}
/*
* Use xchain as a placeholder for (lhc). Duplicate chain to the
* same target bref as xchain and then delete xchain. The duplication
* occurs after xchain in flush order even though xchain is deleted
* after the duplication. XXX
*
* WARNING! Duplications (to a different parent) can cause indirect
* blocks to be inserted, refactor xchain.
*/
bref = chain->bref;
bref.key = lhc; /* invisible dir entry key */
bref.keybits = 0;
//.........这里部分代码省略.........
示例9: compat_16_netbsd32___sigreturn14
/* ARGSUSED */
int
compat_16_netbsd32___sigreturn14(struct lwp *l, const struct compat_16_netbsd32___sigreturn14_args *uap, register_t *retval)
{
/* {
syscallarg(struct sigcontext *) sigcntxp;
} */
struct netbsd32_sigcontext sc, *scp;
struct trapframe64 *tf;
struct proc *p = l->l_proc;
/* First ensure consistent stack state (see sendsig). */
write_user_windows();
if (rwindow_save(l)) {
#ifdef DEBUG
printf("netbsd32_sigreturn14: rwindow_save(%p) failed, sending SIGILL\n", p);
Debugger();
#endif
mutex_enter(p->p_lock);
sigexit(l, SIGILL);
}
#ifdef DEBUG
if (sigdebug & SDB_FOLLOW) {
printf("netbsd32_sigreturn14: %s[%d], sigcntxp %p\n",
p->p_comm, p->p_pid, SCARG(uap, sigcntxp));
if (sigdebug & SDB_DDB) Debugger();
}
#endif
scp = (struct netbsd32_sigcontext *)(u_long)SCARG(uap, sigcntxp);
if ((vaddr_t)scp & 3 || (copyin((void *)scp, &sc, sizeof sc) != 0))
{
#ifdef DEBUG
printf("netbsd32_sigreturn14: copyin failed: scp=%p\n", scp);
Debugger();
#endif
return (EINVAL);
}
scp = ≻
tf = l->l_md.md_tf;
/*
* Only the icc bits in the psr are used, so it need not be
* verified. pc and npc must be multiples of 4. This is all
* that is required; if it holds, just do it.
*/
if (((sc.sc_pc | sc.sc_npc) & 3) != 0 || (sc.sc_pc == 0) || (sc.sc_npc == 0))
#ifdef DEBUG
{
printf("netbsd32_sigreturn14: pc %p or npc %p invalid\n", sc.sc_pc, sc.sc_npc);
Debugger();
return (EINVAL);
}
#else
return (EINVAL);
#endif
/* take only psr ICC field */
tf->tf_tstate = (int64_t)(tf->tf_tstate & ~TSTATE_CCR) | PSRCC_TO_TSTATE(sc.sc_psr);
tf->tf_pc = (int64_t)sc.sc_pc;
tf->tf_npc = (int64_t)sc.sc_npc;
tf->tf_global[1] = (int64_t)sc.sc_g1;
tf->tf_out[0] = (int64_t)sc.sc_o0;
tf->tf_out[6] = (int64_t)sc.sc_sp;
#ifdef DEBUG
if (sigdebug & SDB_FOLLOW) {
printf("netbsd32_sigreturn14: return trapframe pc=%p sp=%p tstate=%llx\n",
(vaddr_t)tf->tf_pc, (vaddr_t)tf->tf_out[6], tf->tf_tstate);
if (sigdebug & SDB_DDB) Debugger();
}
#endif
/* Restore signal stack. */
mutex_enter(p->p_lock);
if (sc.sc_onstack & SS_ONSTACK)
l->l_sigstk.ss_flags |= SS_ONSTACK;
else
l->l_sigstk.ss_flags &= ~SS_ONSTACK;
/* Restore signal mask. */
(void) sigprocmask1(l, SIG_SETMASK, &sc.sc_mask, 0);
mutex_exit(p->p_lock);
return (EJUSTRETURN);
}
示例10: mips_init
//.........这里部分代码省略.........
/*
* Last chance to call the BIOS. Wiping the TLB means the BIOS' data
* areas are demapped on most systems.
*/
delay(20*1000); /* Let any UART FIFO drain... */
sys_config.cpu[0].tlbwired = UPAGES / 2;
tlb_set_wired(0);
tlb_flush(sys_config.cpu[0].tlbsize);
tlb_set_wired(sys_config.cpu[0].tlbwired);
/*
* Get a console, very early but after initial mapping setup.
*/
consinit();
printf("Initial setup done, switching console.\n");
/*
* Init message buffer.
*/
msgbufbase = (caddr_t)pmap_steal_memory(MSGBUFSIZE, NULL,NULL);
initmsgbuf(msgbufbase, MSGBUFSIZE);
/*
* Allocate U page(s) for proc[0], pm_tlbpid 1.
*/
proc0.p_addr = proc0paddr = curprocpaddr =
(struct user *)pmap_steal_memory(USPACE, NULL, NULL);
proc0.p_md.md_regs = (struct trap_frame *)&proc0paddr->u_pcb.pcb_regs;
tlb_set_pid(1);
/*
* Allocate system data structures.
*/
i = (vsize_t)allocsys(NULL);
sd = (caddr_t)pmap_steal_memory(i, NULL, NULL);
allocsys(sd);
/*
* Bootstrap VM system.
*/
pmap_bootstrap();
/*
* Copy down exception vector code.
*/
bcopy(exception, (char *)CACHE_ERR_EXC_VEC, e_exception - exception);
bcopy(exception, (char *)GEN_EXC_VEC, e_exception - exception);
/*
* Build proper TLB refill handler trampolines.
*/
switch (cputype) {
case MIPS_R5000:
/*
* R5000 processors need a specific chip bug workaround
* in their tlb handlers. Theoretically only revision 1
* of the processor need it, but there is evidence
* later versions also need it.
*
* This is also necessary on RM52x0; we test on the `rounded'
* cputype value instead of sys_config.cpu[0].type; this
* causes RM7k and RM9k to be included, just to be on the
* safe side.
*/
tlb_handler = (vaddr_t)&tlb_miss_err_r5k;
xtlb_handler = (vaddr_t)&xtlb_miss_err_r5k;
break;
default:
tlb_handler = (vaddr_t)&tlb_miss;
xtlb_handler = (vaddr_t)&xtlb_miss;
break;
}
build_trampoline(TLB_MISS_EXC_VEC, tlb_handler);
build_trampoline(XTLB_MISS_EXC_VEC, xtlb_handler);
/*
* Turn off bootstrap exception vectors.
*/
setsr(getsr() & ~SR_BOOT_EXC_VEC);
proc0.p_md.md_regs->sr = getsr();
/*
* Clear out the I and D caches.
*/
Mips_SyncCache();
#ifdef DDB
db_machine_init();
if (boothowto & RB_KDB)
Debugger();
#endif
/*
* Return new stack pointer.
*/
return ((caddr_t)proc0paddr + USPACE - 64);
}
示例11: mach_init
//.........这里部分代码省略.........
physmem += btoc(((int) len));
mem_clusters[mem_cluster_cnt].start =
(long) start;
mem_clusters[mem_cluster_cnt].size =
(long) len;
mem_cluster_cnt++;
added = 1;
}
}
if (added)
printf("added to map\n");
else
printf("not added to map\n");
idx++;
}
} else {
/*
* Handle the case of not being called from the firmware.
*/
/* XXX hardwire to 32MB; should be kernel config option */
physmem = 32 * 1024 * 1024 / 4096;
mem_clusters[0].start = 0;
mem_clusters[0].size = ctob(physmem);
mem_cluster_cnt = 1;
}
for (i = 0; i < sizeof(bootinfo.boot_flags); i++) {
switch (bootinfo.boot_flags[i]) {
case '\0':
break;
case ' ':
continue;
case '-':
while (bootinfo.boot_flags[i] != ' ' &&
bootinfo.boot_flags[i] != '\0') {
switch (bootinfo.boot_flags[i]) {
case 'a':
boothowto |= RB_ASKNAME;
break;
case 'd':
boothowto |= RB_KDB;
break;
case 's':
boothowto |= RB_SINGLE;
break;
}
i++;
}
}
}
/*
* Load the rest of the available pages into the VM system.
* The first chunk is tricky because we have to avoid the
* kernel, but the rest are easy.
*/
first = round_page(MIPS_KSEG0_TO_PHYS(kernend));
last = mem_clusters[0].start + mem_clusters[0].size;
uvm_page_physload(atop(first), atop(last), atop(first), atop(last),
VM_FREELIST_DEFAULT);
for (i = 1; i < mem_cluster_cnt; i++) {
first = round_page(mem_clusters[i].start);
last = mem_clusters[i].start + mem_clusters[i].size;
uvm_page_physload(atop(first), atop(last), atop(first),
atop(last), VM_FREELIST_DEFAULT);
}
/*
* Initialize error message buffer (at end of core).
*/
mips_init_msgbuf();
/*
* Allocate space for proc0's USPACE
*/
p0 = (void *)pmap_steal_memory(USPACE, NULL, NULL);
lwp0.l_addr = proc0paddr = (struct user *)p0;
lwp0.l_md.md_regs = (struct frame *)((char *)p0 + USPACE) - 1;
proc0paddr->u_pcb.pcb_context[11] =
MIPS_INT_MASK | MIPS_SR_INT_IE; /* SR */
pmap_bootstrap();
/*
* Initialize debuggers, and break into them, if appropriate.
*/
#if NKSYMS || defined(DDB) || defined(LKM)
ksyms_init(((uintptr_t)ksym_end - (uintptr_t)ksym_start),
ksym_start, ksym_end);
#endif
if (boothowto & RB_KDB) {
#if defined(DDB)
Debugger();
#endif
}
}
示例12: trap_pfault
//.........这里部分代码省略.........
if (vm == NULL) {
fault_flags = -1;
ftype = -1;
goto nogo;
}
/*
* Debugging, try to catch kernel faults on the user address
* space when not inside on onfault (e.g. copyin/copyout)
* routine.
*/
if (usermode == 0 && (td->td_pcb == NULL ||
td->td_pcb->pcb_onfault == NULL)) {
#ifdef DDB
if (freeze_on_seg_fault) {
kprintf("trap_pfault: user address fault from kernel mode "
"%016lx\n", (long)frame->tf_addr);
while (freeze_on_seg_fault)
tsleep(&freeze_on_seg_fault, 0, "frzseg", hz * 20);
}
#endif
}
map = &vm->vm_map;
}
/*
* PGEX_I is defined only if the execute disable bit capability is
* supported and enabled.
*/
if (frame->tf_err & PGEX_W)
ftype = VM_PROT_WRITE;
else if (frame->tf_err & PGEX_I)
ftype = VM_PROT_EXECUTE;
else
ftype = VM_PROT_READ;
if (map != &kernel_map) {
/*
* Keep swapout from messing with us during this
* critical time.
*/
PHOLD(lp->lwp_proc);
/*
* Issue fault
*/
fault_flags = 0;
if (usermode)
fault_flags |= VM_FAULT_BURST | VM_FAULT_USERMODE;
if (ftype & VM_PROT_WRITE)
fault_flags |= VM_FAULT_DIRTY;
else
fault_flags |= VM_FAULT_NORMAL;
rv = vm_fault(map, va, ftype, fault_flags);
PRELE(lp->lwp_proc);
} else {
/*
* Don't have to worry about process locking or stacks in the
* kernel.
*/
fault_flags = VM_FAULT_NORMAL;
rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
}
if (rv == KERN_SUCCESS)
return (0);
nogo:
if (!usermode) {
/*
* NOTE: in 64-bit mode traps push rsp/ss
* even if no ring change occurs.
*/
if (td->td_pcb->pcb_onfault &&
td->td_pcb->pcb_onfault_sp == frame->tf_rsp &&
td->td_gd->gd_intr_nesting_level == 0) {
frame->tf_rip = (register_t)td->td_pcb->pcb_onfault;
return (0);
}
trap_fatal(frame, frame->tf_addr);
return (-1);
}
/*
* NOTE: on x86_64 we have a tf_addr field in the trapframe, no
* kludge is needed to pass the fault address to signal handlers.
*/
p = td->td_proc;
#ifdef DDB
if (td->td_lwp->lwp_vkernel == NULL) {
while (freeze_on_seg_fault) {
tsleep(p, 0, "freeze", hz * 20);
}
if (ddb_on_seg_fault)
Debugger("ddb_on_seg_fault");
}
#endif
return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV);
}
示例13: trap_pfault
//.........这里部分代码省略.........
if (!usermode) {
if (td->td_gd->gd_intr_nesting_level == 0 &&
td->td_pcb->pcb_onfault) {
frame->tf_rip = (register_t)td->td_pcb->pcb_onfault;
return (0);
}
trap_fatal(frame, frame->tf_addr);
return (-1);
}
/*
* NOTE: on x86_64 we have a tf_addr field in the trapframe, no
* kludge is needed to pass the fault address to signal handlers.
*/
p = td->td_proc;
if (td->td_lwp->lwp_vkernel == NULL) {
#ifdef DDB
if (bootverbose || freeze_on_seg_fault || ddb_on_seg_fault) {
#else
if (bootverbose) {
#endif
kprintf("seg-fault ft=%04x ff=%04x addr=%p rip=%p "
"pid=%d cpu=%d p_comm=%s\n",
ftype, fault_flags,
(void *)frame->tf_addr,
(void *)frame->tf_rip,
p->p_pid, mycpu->gd_cpuid, p->p_comm);
}
#ifdef DDB
while (freeze_on_seg_fault) {
tsleep(p, 0, "freeze", hz * 20);
}
if (ddb_on_seg_fault)
Debugger("ddb_on_seg_fault");
#endif
}
return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV);
}
static void
trap_fatal(struct trapframe *frame, vm_offset_t eva)
{
int code, ss;
u_int type;
long rsp;
struct soft_segment_descriptor softseg;
char *msg;
code = frame->tf_err;
type = frame->tf_trapno;
sdtossd(&gdt[IDXSEL(frame->tf_cs & 0xffff)], &softseg);
if (type <= MAX_TRAP_MSG)
msg = trap_msg[type];
else
msg = "UNKNOWN";
kprintf("\n\nFatal trap %d: %s while in %s mode\n", type, msg,
ISPL(frame->tf_cs) == SEL_UPL ? "user" : "kernel");
#ifdef SMP
/* three separate prints in case of a trap on an unmapped page */
kprintf("cpuid = %d; ", mycpu->gd_cpuid);
kprintf("lapic->id = %08x\n", lapic->id);
#endif
if (type == T_PAGEFLT) {
kprintf("fault virtual address = 0x%lx\n", eva);
示例14: vinumioctl
/* ioctl routine */
int
vinumioctl(dev_t dev,
u_long cmd,
caddr_t data,
int flag,
struct thread *td)
{
unsigned int objno;
int error = 0;
struct sd *sd;
struct plex *plex;
struct volume *vol;
unsigned int index; /* for transferring config info */
unsigned int sdno; /* for transferring config info */
int fe; /* free list element number */
struct _ioctl_reply *ioctl_reply = (struct _ioctl_reply *) data; /* struct to return */
/* First, decide what we're looking at */
switch (DEVTYPE(dev)) {
case VINUM_SUPERDEV_TYPE: /* ordinary super device */
ioctl_reply = (struct _ioctl_reply *) data; /* save the address to reply to */
switch (cmd) {
#ifdef VINUMDEBUG
case VINUM_DEBUG:
if (((struct debuginfo *) data)->changeit) /* change debug settings */
debug = (((struct debuginfo *) data)->param);
else {
if (debug & DEBUG_REMOTEGDB)
boothowto |= RB_GDB; /* serial debug line */
else
boothowto &= ~RB_GDB; /* local ddb */
Debugger("vinum debug");
}
ioctl_reply = (struct _ioctl_reply *) data; /* reinstate the address to reply to */
ioctl_reply->error = 0;
return 0;
#endif
case VINUM_CREATE: /* create a vinum object */
error = lock_config(); /* get the config for us alone */
if (error) /* can't do it, */
return error; /* give up */
error = setjmp(command_fail); /* come back here on error */
if (error == 0) /* first time, */
ioctl_reply->error = parse_user_config((char *) data, /* update the config */
&keyword_set);
else if (ioctl_reply->error == 0) { /* longjmp, but no error status */
ioctl_reply->error = EINVAL; /* note that something's up */
ioctl_reply->msg[0] = '\0'; /* no message? */
}
unlock_config();
return 0; /* must be 0 to return the real error info */
case VINUM_GETCONFIG: /* get the configuration information */
bcopy(&vinum_conf, data, sizeof(vinum_conf));
return 0;
/* start configuring the subsystem */
case VINUM_STARTCONFIG:
return start_config(*(int *) data); /* just lock it. Parameter is 'force' */
/*
* Move the individual parts of the config to user space.
*
* Specify the index of the object in the first word of data,
* and return the object there
*/
case VINUM_DRIVECONFIG:
index = *(int *) data; /* get the index */
if (index >= (unsigned) vinum_conf.drives_allocated) /* can't do it */
return ENXIO; /* bang */
bcopy(&DRIVE[index], data, sizeof(struct _drive)); /* copy the config item out */
return 0;
case VINUM_SDCONFIG:
index = *(int *) data; /* get the index */
if (index >= (unsigned) vinum_conf.subdisks_allocated) /* can't do it */
return ENXIO; /* bang */
bcopy(&SD[index], data, sizeof(struct _sd)); /* copy the config item out */
return 0;
case VINUM_PLEXCONFIG:
index = *(int *) data; /* get the index */
if (index >= (unsigned) vinum_conf.plexes_allocated) /* can't do it */
return ENXIO; /* bang */
bcopy(&PLEX[index], data, sizeof(struct _plex)); /* copy the config item out */
return 0;
case VINUM_VOLCONFIG:
index = *(int *) data; /* get the index */
if (index >= (unsigned) vinum_conf.volumes_allocated) /* can't do it */
return ENXIO; /* bang */
bcopy(&VOL[index], data, sizeof(struct _volume)); /* copy the config item out */
return 0;
case VINUM_PLEXSDCONFIG:
index = *(int *) data; /* get the plex index */
sdno = ((int *) data)[1]; /* and the sd index */
if ((index >= (unsigned) vinum_conf.plexes_allocated) /* plex doesn't exist */
//.........这里部分代码省略.........
示例15: initarm
//.........这里部分代码省略.........
* and second level page tables. Pages of memory will be allocated
* and mapped for other structures that are required for system
* operation. When it returns, physical_freestart and free_pages will
* have been updated to reflect the allocations that were made. In
* addition, kernel_l1pt, kernel_pt_table[], systempage, irqstack,
* abtstack, undstack, kernelstack, msgbufphys will be set to point to
* the memory that was allocated for them.
*/
setup_real_page_tables();
/*
* Moved from cpu_startup() as data_abort_handler() references
* this during uvm init.
*/
proc0paddr = (struct user *)kernelstack.pv_va;
lwp0.l_addr = proc0paddr;
#ifdef VERBOSE_INIT_ARM
printf("bootstrap done.\n");
#endif
arm32_vector_init(ARM_VECTORS_LOW, ARM_VEC_ALL);
/*
* Pages were allocated during the secondary bootstrap for the
* stacks for different CPU modes.
* We must now set the r13 registers in the different CPU modes to
* point to these stacks.
* Since the ARM stacks use STMFD etc. we must set r13 to the top end
* of the stack memory.
*/
#ifdef VERBOSE_INIT_ARM
printf("init subsystems: stacks ");
#endif
set_stackptr(PSR_IRQ32_MODE, irqstack.pv_va + IRQ_STACK_SIZE * PAGE_SIZE);
set_stackptr(PSR_ABT32_MODE, abtstack.pv_va + ABT_STACK_SIZE * PAGE_SIZE);
set_stackptr(PSR_UND32_MODE, undstack.pv_va + UND_STACK_SIZE * PAGE_SIZE);
/*
* Well we should set a data abort handler.
* Once things get going this will change as we will need a proper
* handler.
* Until then we will use a handler that just panics but tells us
* why.
* Initialisation of the vectors will just panic on a data abort.
* This just fills in a slightly better one.
*/
#ifdef VERBOSE_INIT_ARM
printf("vectors ");
#endif
data_abort_handler_address = (u_int)data_abort_handler;
prefetch_abort_handler_address = (u_int)prefetch_abort_handler;
undefined_handler_address = (u_int)undefinedinstruction_bounce;
/* Initialise the undefined instruction handlers */
#ifdef VERBOSE_INIT_ARM
printf("undefined ");
#endif
undefined_init();
/* Load memory into UVM. */
#ifdef VERBOSE_INIT_ARM
printf("page ");
#endif
uvm_setpagesize(); /* initialize PAGE_SIZE-dependent variables */
uvm_page_physload(atop(physical_freestart), atop(physical_freeend),
atop(physical_freestart), atop(physical_freeend),
VM_FREELIST_DEFAULT);
/* Boot strap pmap telling it where the kernel page table is */
#ifdef VERBOSE_INIT_ARM
printf("pmap ");
#endif
pmap_bootstrap(KERNEL_VM_BASE, KERNEL_VM_BASE + KERNEL_VM_SIZE);
#ifdef VERBOSE_INIT_ARM
printf("done.\n");
#endif
#ifdef KGDB
if (boothowto & RB_KDB) {
kgdb_debug_init = 1;
kgdb_connect(1);
}
#endif
#ifdef DDB
db_machine_init();
/* Firmware doesn't load symbols. */
ddb_init(0, NULL, NULL);
if (boothowto & RB_KDB)
Debugger();
#endif
/* We return the new stack pointer address */
return(kernelstack.pv_va + USPACE_SVC_STACK_TOP);
}