本文整理汇总了C++中curcpu函数的典型用法代码示例。如果您正苦于以下问题:C++ curcpu函数的具体用法?C++ curcpu怎么用?C++ curcpu使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了curcpu函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: callout_hardclock
/*
* callout_hardclock:
*
* Called from hardclock() once every tick. We schedule a soft
* interrupt if there is work to be done.
*/
void
callout_hardclock(void)
{
struct callout_cpu *cc;
int needsoftclock, ticks;
cc = curcpu()->ci_data.cpu_callout;
mutex_spin_enter(cc->cc_lock);
ticks = ++cc->cc_ticks;
MOVEBUCKET(cc, 0, ticks);
if (MASKWHEEL(0, ticks) == 0) {
MOVEBUCKET(cc, 1, ticks);
if (MASKWHEEL(1, ticks) == 0) {
MOVEBUCKET(cc, 2, ticks);
if (MASKWHEEL(2, ticks) == 0)
MOVEBUCKET(cc, 3, ticks);
}
}
needsoftclock = !CIRCQ_EMPTY(&cc->cc_todo);
mutex_spin_exit(cc->cc_lock);
if (needsoftclock)
softint_schedule(callout_sih);
}
示例2: xpq_increment_idx
static inline void
xpq_increment_idx(void)
{
if (__predict_false(++xpq_idx_array[curcpu()->ci_cpuid] == XPQUEUE_SIZE))
xpq_flush_queue();
}
示例3: arm_dflt_setipl
void
arm_dflt_setipl(int newcpl)
{
struct cpu_info *ci = curcpu();
ci->ci_cpl = newcpl;
}
示例4: cpu_set_curpri
void
cpu_set_curpri(int pri)
{
kpreempt_disable();
curcpu()->ci_schedstate.spc_curpriority = pri;
kpreempt_enable();
}
示例5: dosoftint
void
dosoftint()
{
struct cpu_info *ci = curcpu();
int sir, q, mask;
#ifdef MULTIPROCESSOR
register_t sr;
/* Enable interrupts */
sr = getsr();
ENABLEIPI();
__mp_lock(&kernel_lock);
#endif
while ((sir = ci->ci_softpending) != 0) {
atomic_clearbits_int(&ci->ci_softpending, sir);
for (q = SI_NQUEUES - 1; q >= 0; q--) {
mask = SINTMASK(q);
if (sir & mask)
softintr_dispatch(q);
}
}
#ifdef MULTIPROCESSOR
__mp_unlock(&kernel_lock);
setsr(sr);
#endif
}
示例6: db_cpu_cmd
void
db_cpu_cmd(db_expr_t addr, bool have_addr, db_expr_t count, const char *modif)
{
struct cpu_info *ci;
if (!have_addr) {
cpu_debug_dump();
return;
}
if ((addr < 0) || (addr >= sparc_ncpus)) {
db_printf("%ld: CPU out of range\n", addr);
return;
}
ci = cpus[addr];
if (ci == NULL) {
db_printf("CPU %ld not configured\n", addr);
return;
}
if (ci != curcpu()) {
if (!(ci->flags & CPUFLG_PAUSED)) {
db_printf("CPU %ld not paused\n", addr);
return;
}
}
if (ci->ci_ddb_regs == 0) {
db_printf("CPU %ld has no saved regs\n", addr);
return;
}
db_printf("using CPU %ld", addr);
ddb_regp = __UNVOLATILE(ci->ci_ddb_regs);
ddb_cpuinfo = ci;
}
示例7: tegra_cpufreq_post
static void
tegra_cpufreq_post(void *arg1, void *arg2)
{
struct cpu_info *ci = curcpu();
ci->ci_data.cpu_cc_freq = cpufreq_get_rate() * 1000000;
}
示例8: userret
static __inline void
userret (struct lwp *l, register_t pc, u_quad_t oticks)
{
struct proc *p = l->l_proc;
int sig;
/* take pending signals */
while ((sig = CURSIG(l)) != 0)
postsig(sig);
l->l_priority = l->l_usrpri;
if (want_resched) {
/*
* We're being preempted.
*/
preempt(0);
while ((sig = CURSIG(l)) != 0)
postsig(sig);
}
/*
* If profiling, charge recent system time to the trapped pc.
*/
if (l->l_flag & P_PROFIL) {
extern int psratio;
addupc_task(p, pc, (int)(p->p_sticks - oticks) * psratio);
}
curcpu()->ci_schedstate.spc_curpriority = l->l_priority;
}
示例9: mtx_enter_try
int
mtx_enter_try(struct mutex *mtx)
{
struct cpu_info *owner, *ci = curcpu();
int s;
if (mtx->mtx_wantipl != IPL_NONE)
s = splraise(mtx->mtx_wantipl);
owner = atomic_cas_ptr(&mtx->mtx_owner, NULL, ci);
#ifdef DIAGNOSTIC
if (__predict_false(owner == ci))
panic("mtx %p: locking against myself", mtx);
#endif
if (owner == NULL) {
if (mtx->mtx_wantipl != IPL_NONE)
mtx->mtx_oldipl = s;
#ifdef DIAGNOSTIC
ci->ci_mutex_level++;
#endif
membar_enter();
return (1);
}
if (mtx->mtx_wantipl != IPL_NONE)
splx(s);
return (0);
}
示例10: percpu_getref
void *
percpu_getref(percpu_t *pc)
{
kpreempt_disable();
return percpu_getptr_remote(pc, curcpu());
}
示例11: secondary_main
/*
* Further secondary CPU initialization.
*
* We are now running on our startup stack, with proper page tables.
* There is nothing to do but display some details about the CPU and its CMMUs.
*/
void
secondary_main()
{
struct cpu_info *ci = curcpu();
int s;
cpu_configuration_print(0);
ncpus++;
sched_init_cpu(ci);
nanouptime(&ci->ci_schedstate.spc_runtime);
ci->ci_curproc = NULL;
ci->ci_randseed = (arc4random() & 0x7fffffff) + 1;
/*
* Release cpu_hatch_mutex to let other secondary processors
* have a chance to run.
*/
hatch_pending_count--;
__cpu_simple_unlock(&cpu_hatch_mutex);
/* wait for cpu_boot_secondary_processors() */
__cpu_simple_lock(&cpu_boot_mutex);
__cpu_simple_unlock(&cpu_boot_mutex);
spl0();
SCHED_LOCK(s);
set_psr(get_psr() & ~PSR_IND);
SET(ci->ci_flags, CIF_ALIVE);
cpu_switchto(NULL, sched_chooseproc());
}
示例12: wb_userret
/*
* Same as above, but also handles writeback completion on 68040.
*/
void
wb_userret(struct proc *p, struct frame *fp)
{
int sig;
union sigval sv;
/* take pending signals */
while ((sig = CURSIG(p)) != 0)
postsig(sig);
p->p_priority = p->p_usrpri;
/*
* Deal with user mode writebacks (from trap, or from sigreturn).
* If any writeback fails, go back and attempt signal delivery
* unless we have already been here and attempted the writeback
* (e.g. bad address with user ignoring SIGSEGV). In that case,
* we just return to the user without successfully completing
* the writebacks. Maybe we should just drop the sucker?
*/
if (mmutype == MMU_68040 && fp->f_format == FMT7) {
if ((sig = writeback(fp)) != 0) {
sv.sival_ptr = (void *)fp->f_fmt7.f_fa;
trapsignal(p, sig, T_MMUFLT, SEGV_MAPERR, sv);
while ((sig = CURSIG(p)) != 0)
postsig(sig);
p->p_priority = p->p_usrpri;
}
}
curcpu()->ci_schedstate.spc_curpriority = p->p_priority;
}
示例13: cpu_intr
void
cpu_intr(int ppl, vaddr_t pc, uint32_t status)
{
struct cpu_info * const ci = curcpu();
uint32_t pending;
int ipl;
#ifdef DIAGNOSTIC
const int mtx_count = ci->ci_mtx_count;
const u_int biglock_count = ci->ci_biglock_count;
const u_int blcnt = curlwp->l_blcnt;
#endif
KASSERT(ci->ci_cpl == IPL_HIGH);
KDASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE);
ci->ci_data.cpu_nintr++;
while (ppl < (ipl = splintr(&pending))) {
KDASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE);
splx(ipl); /* lower to interrupt level */
KDASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE);
KASSERTMSG(ci->ci_cpl == ipl,
"%s: cpl (%d) != ipl (%d)", __func__, ci->ci_cpl, ipl);
KASSERT(pending != 0);
cf.pc = pc;
cf.sr = status;
cf.intr = (ci->ci_idepth > 1);
#ifdef MIPS3_ENABLE_CLOCK_INTR
if (pending & MIPS_INT_MASK_5) {
KASSERTMSG(ipl == IPL_SCHED,
"%s: ipl (%d) != IPL_SCHED (%d)",
__func__, ipl, IPL_SCHED);
/* call the common MIPS3 clock interrupt handler */
mips3_clockintr(&cf);
pending ^= MIPS_INT_MASK_5;
}
#endif
if (pending != 0) {
/* Process I/O and error interrupts. */
evbmips_iointr(ipl, pc, pending);
}
KASSERT(biglock_count == ci->ci_biglock_count);
KASSERT(blcnt == curlwp->l_blcnt);
KASSERT(mtx_count == ci->ci_mtx_count);
/*
* If even our spl is higher now (due to interrupting while
* spin-lock is held and higher IPL spin-lock is locked, it
* can no longer be locked so it's safe to lower IPL back
* to ppl.
*/
(void) splhigh(); /* disable interrupts */
}
KASSERT(ci->ci_cpl == IPL_HIGH);
KDASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE);
}
示例14: amlogic_cpufreq_cb
static void
amlogic_cpufreq_cb(void *arg1, void *arg2)
{
struct cpu_info *ci = curcpu();
ci->ci_data.cpu_cc_freq = cpufreq_get_rate() * 1000000;
}
示例15: cpu_idle_mwait_cycle
void
cpu_idle_mwait_cycle(void)
{
struct cpu_info *ci = curcpu();
if ((read_rflags() & PSL_I) == 0)
panic("idle with interrupts blocked!");
/* something already queued? */
if (!cpu_is_idle(ci))
return;
/*
* About to idle; setting the MWAIT_IN_IDLE bit tells
* cpu_unidle() that it can't be a no-op and tells cpu_kick()
* that it doesn't need to use an IPI. We also set the
* MWAIT_KEEP_IDLING bit: those routines clear it to stop
* the mwait. Once they're set, we do a final check of the
* queue, in case another cpu called setrunqueue() and added
* something to the queue and called cpu_unidle() between
* the check in sched_idle() and here.
*/
atomic_setbits_int(&ci->ci_mwait, MWAIT_IDLING | MWAIT_ONLY);
if (cpu_is_idle(ci)) {
monitor(&ci->ci_mwait, 0, 0);
if ((ci->ci_mwait & MWAIT_IDLING) == MWAIT_IDLING)
mwait(0, 0);
}
/* done idling; let cpu_kick() know that an IPI is required */
atomic_clearbits_int(&ci->ci_mwait, MWAIT_IDLING);
}