本文整理汇总了C++中PCPU_SET函数的典型用法代码示例。如果您正苦于以下问题:C++ PCPU_SET函数的具体用法?C++ PCPU_SET怎么用?C++ PCPU_SET使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了PCPU_SET函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: mptable_setup_local
/*
* Initialize the local APIC on the BSP.
*/
static int
mptable_setup_local(void)
{
PCPU_SET(apic_id, 0);
PCPU_SET(vcpu_id, 0);
return (0);
}
示例2: pcpu_initclock
void
pcpu_initclock(void)
{
PCPU_SET(clockadj, 0);
PCPU_SET(clock, ia64_get_itc());
ia64_set_itm(PCPU_GET(clock) + ia64_clock_reload);
ia64_set_itv(CLOCK_VECTOR); /* highest priority class */
ia64_srlz_d();
}
示例3: lapic_init
/*
* Map the local APIC and setup necessary interrupt vectors.
*/
void
lapic_init(vm_paddr_t addr)
{
u_int regs[4];
int i, arat;
/* Map the local APIC and setup the spurious interrupt handler. */
KASSERT(trunc_page(addr) == addr,
("local APIC not aligned on a page boundary"));
lapic_paddr = addr;
lapic = pmap_mapdev(addr, sizeof(lapic_t));
setidt(APIC_SPURIOUS_INT, IDTVEC(spuriousint), SDT_APIC, SEL_KPL,
GSEL_APIC);
/* Perform basic initialization of the BSP's local APIC. */
lapic_enable();
/* Set BSP's per-CPU local APIC ID. */
PCPU_SET(apic_id, lapic_id());
/* Local APIC timer interrupt. */
setidt(APIC_TIMER_INT, IDTVEC(timerint), SDT_APIC, SEL_KPL, GSEL_APIC);
/* Local APIC error interrupt. */
setidt(APIC_ERROR_INT, IDTVEC(errorint), SDT_APIC, SEL_KPL, GSEL_APIC);
/* XXX: Thermal interrupt */
/* Local APIC CMCI. */
setidt(APIC_CMC_INT, IDTVEC(cmcint), SDT_APICT, SEL_KPL, GSEL_APIC);
if ((resource_int_value("apic", 0, "clock", &i) != 0 || i != 0)) {
arat = 0;
/* Intel CPUID 0x06 EAX[2] set if APIC timer runs in C3. */
if (cpu_vendor_id == CPU_VENDOR_INTEL && cpu_high >= 6) {
do_cpuid(0x06, regs);
if ((regs[0] & CPUTPM1_ARAT) != 0)
arat = 1;
}
bzero(&lapic_et, sizeof(lapic_et));
lapic_et.et_name = "LAPIC";
lapic_et.et_flags = ET_FLAGS_PERIODIC | ET_FLAGS_ONESHOT |
ET_FLAGS_PERCPU;
lapic_et.et_quality = 600;
if (!arat) {
lapic_et.et_flags |= ET_FLAGS_C3STOP;
lapic_et.et_quality -= 200;
}
lapic_et.et_frequency = 0;
/* We don't know frequency yet, so trying to guess. */
lapic_et.et_min_period.sec = 0;
lapic_et.et_min_period.frac = 0x00001000LL << 32;
lapic_et.et_max_period.sec = 1;
lapic_et.et_max_period.frac = 0;
lapic_et.et_start = lapic_et_start;
lapic_et.et_stop = lapic_et_stop;
lapic_et.et_priv = NULL;
et_register(&lapic_et);
}
}
示例4: efi_arch_enter
/*
* Create an environment for the EFI runtime code call. The most
* important part is creating the required 1:1 physical->virtual
* mappings for the runtime segments. To do that, we manually create
* page table which unmap userspace but gives correct kernel mapping.
* The 1:1 mappings for runtime segments usually occupy low 4G of the
* physical address map.
*
* The 1:1 mappings were chosen over the SetVirtualAddressMap() EFI RT
* service, because there are some BIOSes which fail to correctly
* relocate itself on the call, requiring both 1:1 and virtual
* mapping. As result, we must provide 1:1 mapping anyway, so no
* reason to bother with the virtual map, and no need to add a
* complexity into loader.
*
* The fpu_kern_enter() call allows firmware to use FPU, as mandated
* by the specification. In particular, CR0.TS bit is cleared. Also
* it enters critical section, giving us neccessary protection against
* context switch.
*
* There is no need to disable interrupts around the change of %cr3,
* the kernel mappings are correct, while we only grabbed the
* userspace portion of VA. Interrupts handlers must not access
* userspace. Having interrupts enabled fixes the issue with
* firmware/SMM long operation, which would negatively affect IPIs,
* esp. TLB shootdown requests.
*/
int
efi_arch_enter(void)
{
pmap_t curpmap;
curpmap = PCPU_GET(curpmap);
PMAP_LOCK_ASSERT(curpmap, MA_OWNED);
/*
* IPI TLB shootdown handler invltlb_pcid_handler() reloads
* %cr3 from the curpmap->pm_cr3, which would disable runtime
* segments mappings. Block the handler's action by setting
* curpmap to impossible value. See also comment in
* pmap.c:pmap_activate_sw().
*/
if (pmap_pcid_enabled && !invpcid_works)
PCPU_SET(curpmap, NULL);
load_cr3(VM_PAGE_TO_PHYS(efi_pml4_page) | (pmap_pcid_enabled ?
curpmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid : 0));
/*
* If PCID is enabled, the clear CR3_PCID_SAVE bit in the loaded %cr3
* causes TLB invalidation.
*/
if (!pmap_pcid_enabled)
invltlb();
return (0);
}
示例5: enable_vec
void
enable_vec(struct thread *td)
{
int msr;
struct pcb *pcb;
struct trapframe *tf;
pcb = td->td_pcb;
tf = trapframe(td);
/*
* Save the thread's Altivec CPU number, and set the CPU's current
* vector thread
*/
td->td_pcb->pcb_veccpu = PCPU_GET(cpuid);
PCPU_SET(vecthread, td);
/*
* Enable the vector unit for when the thread returns from the
* exception. If this is the first time the unit has been used by
* the thread, initialise the vector registers and VSCR to 0, and
* set the flag to indicate that the vector unit is in use.
*/
tf->srr1 |= PSL_VEC;
if (!(pcb->pcb_flags & PCB_VEC)) {
memset(&pcb->pcb_vec, 0, sizeof pcb->pcb_vec);
pcb->pcb_flags |= PCB_VEC;
}
/*
* Temporarily enable the vector unit so the registers
* can be restored.
*/
msr = mfmsr();
mtmsr(msr | PSL_VEC);
isync();
/*
* Restore VSCR by first loading it into a vector and then into VSCR.
* (this needs to done before loading the user's vector registers
* since we need to use a scratch vector register)
*/
__asm __volatile("vxor 0,0,0; lvewx 0,0,%0; mtvscr 0" \
:: "b"(&pcb->pcb_vec.vscr));
#define LVX(n) __asm ("lvx " #n ",0,%0" \
:: "b"(&pcb->pcb_vec.vr[n]));
LVX(0); LVX(1); LVX(2); LVX(3);
LVX(4); LVX(5); LVX(6); LVX(7);
LVX(8); LVX(9); LVX(10); LVX(11);
LVX(12); LVX(13); LVX(14); LVX(15);
LVX(16); LVX(17); LVX(18); LVX(19);
LVX(20); LVX(21); LVX(22); LVX(23);
LVX(24); LVX(25); LVX(26); LVX(27);
LVX(28); LVX(29); LVX(30); LVX(31);
#undef LVX
isync();
mtmsr(msr);
}
示例6: mips_pcpu0_init
/*
* Initialize per cpu data structures, include curthread.
*/
void
mips_pcpu0_init()
{
/* Initialize pcpu info of cpu-zero */
pcpu_init(PCPU_ADDR(0), 0, sizeof(struct pcpu));
PCPU_SET(curthread, &thread0);
}
示例7: mips_proc0_init
/*
* Initialize mips and configure to run kernel
*/
void
mips_proc0_init(void)
{
#ifdef SMP
if (platform_processor_id() != 0)
panic("BSP must be processor number 0");
#endif
proc_linkup0(&proc0, &thread0);
KASSERT((kstack0 & PAGE_MASK) == 0,
("kstack0 is not aligned on a page boundary: 0x%0lx",
(long)kstack0));
thread0.td_kstack = kstack0;
thread0.td_kstack_pages = KSTACK_PAGES;
/*
* Do not use cpu_thread_alloc to initialize these fields
* thread0 is the only thread that has kstack located in KSEG0
* while cpu_thread_alloc handles kstack allocated in KSEG2.
*/
thread0.td_pcb = (struct pcb *)(thread0.td_kstack +
thread0.td_kstack_pages * PAGE_SIZE) - 1;
thread0.td_frame = &thread0.td_pcb->pcb_regs;
/* Steal memory for the dynamic per-cpu area. */
dpcpu_init((void *)pmap_steal_memory(DPCPU_SIZE), 0);
PCPU_SET(curpcb, thread0.td_pcb);
/*
* There is no need to initialize md_upte array for thread0 as it's
* located in .bss section and should be explicitly zeroed during
* kernel initialization.
*/
}
示例8: acpi_wakeup_machdep
int
acpi_wakeup_machdep(struct acpi_softc *sc, int state, int sleep_result,
int intr_enabled)
{
if (sleep_result == -1)
return (sleep_result);
if (!intr_enabled) {
/* Wakeup MD procedures in interrupt disabled context */
if (sleep_result == 1) {
pmap_init_pat();
initializecpu();
PCPU_SET(switchtime, 0);
PCPU_SET(switchticks, ticks);
#ifdef DEV_APIC
lapic_xapic_mode();
#endif
#ifdef SMP
if (!CPU_EMPTY(&suspcpus))
acpi_wakeup_cpus(sc);
#endif
}
#ifdef SMP
if (!CPU_EMPTY(&suspcpus))
restart_cpus(suspcpus);
#endif
mca_resume();
#ifdef __amd64__
if (vmm_resume_p != NULL)
vmm_resume_p();
#endif
intr_resume(/*suspend_cancelled*/false);
AcpiSetFirmwareWakingVector(0, 0);
} else {
/* Wakeup MD procedures in interrupt enabled context */
if (sleep_result == 1 && mem_range_softc.mr_op != NULL &&
mem_range_softc.mr_op->reinit != NULL)
mem_range_softc.mr_op->reinit(&mem_range_softc);
}
return (sleep_result);
}
示例9: ia64_highfp_enable
int
ia64_highfp_enable(struct thread *td, struct trapframe *tf)
{
struct pcb *pcb;
struct pcpu *cpu;
struct thread *td1;
pcb = td->td_pcb;
mtx_lock_spin(&ia64_highfp_mtx);
cpu = pcb->pcb_fpcpu;
#ifdef SMP
if (cpu != NULL && cpu != pcpup) {
KASSERT(cpu->pc_fpcurthread == td,
("cpu->pc_fpcurthread != td"));
ia64_highfp_ipi(cpu);
}
#endif
td1 = PCPU_GET(fpcurthread);
if (td1 != NULL && td1 != td) {
KASSERT(td1->td_pcb->pcb_fpcpu == pcpup,
("td1->td_pcb->pcb_fpcpu != pcpup"));
save_high_fp(&td1->td_pcb->pcb_high_fp);
td1->td_frame->tf_special.psr |= IA64_PSR_DFH;
td1->td_pcb->pcb_fpcpu = NULL;
PCPU_SET(fpcurthread, NULL);
td1 = NULL;
}
if (td1 == NULL) {
KASSERT(pcb->pcb_fpcpu == NULL, ("pcb->pcb_fpcpu != NULL"));
KASSERT(PCPU_GET(fpcurthread) == NULL,
("PCPU_GET(fpcurthread) != NULL"));
restore_high_fp(&pcb->pcb_high_fp);
PCPU_SET(fpcurthread, td);
pcb->pcb_fpcpu = pcpup;
tf->tf_special.psr &= ~IA64_PSR_MFH;
}
tf->tf_special.psr &= ~IA64_PSR_DFH;
mtx_unlock_spin(&ia64_highfp_mtx);
return ((td1 != NULL) ? 1 : 0);
}
示例10: sched_run
noreturn void sched_run() {
thread_t *td = thread_self();
PCPU_SET(idle_thread, td);
td->td_slice = 0;
sched_active = true;
while (true) {
td->td_flags |= TDF_NEEDSWITCH;
}
}
示例11: acpi_wakeup_machdep
int
acpi_wakeup_machdep(struct acpi_softc *sc, int state,
int sleep_result, int intr_enabled)
{
if (sleep_result == -1)
return (sleep_result);
if (intr_enabled == 0) {
/* Wakeup MD procedures in interrupt disabled context */
if (sleep_result == 1) {
pmap_init_pat();
load_cr3(susppcbs[0]->pcb_cr3);
initializecpu();
PCPU_SET(switchtime, 0);
PCPU_SET(switchticks, ticks);
#ifdef SMP
if (!CPU_EMPTY(&suspcpus))
acpi_wakeup_cpus(sc, &suspcpus);
#endif
}
#ifdef SMP
if (!CPU_EMPTY(&suspcpus))
restart_cpus(suspcpus);
#endif
mca_resume();
intr_resume();
} else {
/* Wakeup MD procedures in interrupt enabled context */
AcpiSetFirmwareWakingVector(0);
if (sleep_result == 1 && mem_range_softc.mr_op != NULL &&
mem_range_softc.mr_op->reinit != NULL)
mem_range_softc.mr_op->reinit(&mem_range_softc);
}
return (sleep_result);
}
示例12: efi_arch_leave
void
efi_arch_leave(void)
{
pmap_t curpmap;
curpmap = &curproc->p_vmspace->vm_pmap;
if (pmap_pcid_enabled && !invpcid_works)
PCPU_SET(curpmap, curpmap);
load_cr3(curpmap->pm_cr3 | (pmap_pcid_enabled ?
curpmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid : 0));
if (!pmap_pcid_enabled)
invltlb();
}
示例13: thread_bootstrap
static void thread_bootstrap() {
thread_init();
/* Create main kernel thread */
thread_t *td = thread_create("kernel-main", (void *)kernel_init, NULL);
exc_frame_t *kframe = td->td_kframe;
kframe->a0 = (reg_t)_kenv.argc;
kframe->a1 = (reg_t)_kenv.argv;
kframe->sr |= SR_IE; /* the thread will run with interrupts enabled */
td->td_state = TDS_RUNNING;
PCPU_SET(curthread, td);
}
示例14: fork_exit
/*
* Handle the return of a child process from fork1(). This function
* is called from the MD fork_trampoline() entry point.
*/
void
fork_exit(void (*callout)(void *, struct trapframe *), void *arg,
struct trapframe *frame)
{
struct proc *p;
struct thread *td;
struct thread *dtd;
td = curthread;
p = td->td_proc;
KASSERT(p->p_state == PRS_NORMAL, ("executing process is still new"));
CTR4(KTR_PROC, "fork_exit: new thread %p (td_sched %p, pid %d, %s)",
td, td_get_sched(td), p->p_pid, td->td_name);
sched_fork_exit(td);
/*
* Processes normally resume in mi_switch() after being
* cpu_switch()'ed to, but when children start up they arrive here
* instead, so we must do much the same things as mi_switch() would.
*/
if ((dtd = PCPU_GET(deadthread))) {
PCPU_SET(deadthread, NULL);
thread_stash(dtd);
}
thread_unlock(td);
/*
* cpu_fork_kthread_handler intercepts this function call to
* have this call a non-return function to stay in kernel mode.
* initproc has its own fork handler, but it does return.
*/
KASSERT(callout != NULL, ("NULL callout in fork_exit"));
callout(arg, frame);
/*
* Check if a kernel thread misbehaved and returned from its main
* function.
*/
if (p->p_flag & P_KPROC) {
printf("Kernel thread \"%s\" (pid %d) exited prematurely.\n",
td->td_name, p->p_pid);
kthread_exit();
}
mtx_assert(&Giant, MA_NOTOWNED);
if (p->p_sysent->sv_schedtail != NULL)
(p->p_sysent->sv_schedtail)(td);
td->td_pflags &= ~TDP_FORKING;
}
示例15: save_vec
void
save_vec(struct thread *td)
{
struct pcb *pcb;
pcb = td->td_pcb;
save_vec_int(td);
/*
* Clear the current vec thread and pcb's CPU id
* XXX should this be left clear to allow lazy save/restore ?
*/
pcb->pcb_veccpu = INT_MAX;
PCPU_SET(vecthread, NULL);
}