本文整理汇总了C++中smp_processor_id函数的典型用法代码示例。如果您正苦于以下问题:C++ smp_processor_id函数的具体用法?C++ smp_processor_id怎么用?C++ smp_processor_id使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了smp_processor_id函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: ia64_sync_itc
/*
* Synchronize ar.itc of the current (slave) CPU with the ar.itc of the MASTER CPU
* (normally the time-keeper CPU). We use a closed loop to eliminate the possibility of
* unaccounted-for errors (such as getting a machine check in the middle of a calibration
* step). The basic idea is for the slave to ask the master what itc value it has and to
* read its own itc before and after the master responds. Each iteration gives us three
* timestamps:
*
* slave master
*
* t0 ---\
* ---\
* --->
* tm
* /---
* /---
* t1 <---
*
*
* The goal is to adjust the slave's ar.itc such that tm falls exactly half-way between t0
* and t1. If we achieve this, the clocks are synchronized provided the interconnect
* between the slave and the master is symmetric. Even if the interconnect were
* asymmetric, we would still know that the synchronization error is smaller than the
* roundtrip latency (t0 - t1).
*
* When the interconnect is quiet and symmetric, this lets us synchronize the itc to
* within one or two cycles. However, we can only *guarantee* that the synchronization is
* accurate to within a round-trip time, which is typically in the range of several
* hundred cycles (e.g., ~500 cycles). In practice, this means that the itc's are usually
* almost perfectly synchronized, but we shouldn't assume that the accuracy is much better
* than half a micro second or so.
*/
void
ia64_sync_itc (unsigned int master)
{
long i, delta, adj, adjust_latency = 0, done = 0;
unsigned long flags, rt, master_time_stamp, bound;
#if DEBUG_ITC_SYNC
struct {
long rt; /* roundtrip time */
long master; /* master's timestamp */
long diff; /* difference between midpoint and master's timestamp */
long lat; /* estimate of itc adjustment latency */
} t[NUM_ROUNDS];
#endif
/*
* Make sure local timer ticks are disabled while we sync. If
* they were enabled, we'd have to worry about nasty issues
* like setting the ITC ahead of (or a long time before) the
* next scheduled tick.
*/
BUG_ON((ia64_get_itv() & (1 << 16)) == 0);
go[MASTER] = 1;
if (smp_call_function_single(master, sync_master, NULL, 1, 0) < 0) {
printk(KERN_ERR "sync_itc: failed to get attention of CPU %u!\n", master);
return;
}
while (go[MASTER])
cpu_relax(); /* wait for master to be ready */
spin_lock_irqsave(&itc_sync_lock, flags);
{
for (i = 0; i < NUM_ROUNDS; ++i) {
delta = get_delta(&rt, &master_time_stamp);
if (delta == 0) {
done = 1; /* let's lock on to this... */
bound = rt;
}
if (!done) {
if (i > 0) {
adjust_latency += -delta;
adj = -delta + adjust_latency/4;
} else
adj = -delta;
ia64_set_itc(ia64_get_itc() + adj);
}
#if DEBUG_ITC_SYNC
t[i].rt = rt;
t[i].master = master_time_stamp;
t[i].diff = delta;
t[i].lat = adjust_latency/4;
#endif
}
}
spin_unlock_irqrestore(&itc_sync_lock, flags);
#if DEBUG_ITC_SYNC
for (i = 0; i < NUM_ROUNDS; ++i)
printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
t[i].rt, t[i].master, t[i].diff, t[i].lat);
#endif
printk(KERN_INFO "CPU %d: synchronized ITC with CPU %u (last diff %ld cycles, "
"maxerr %lu cycles)\n", smp_processor_id(), master, delta, rt);
//.........这里部分代码省略.........
示例2: tick_check_new_device
/*
* Check, if the new registered device should be used.
*/
static int tick_check_new_device(struct clock_event_device *newdev)
{
struct clock_event_device *curdev;
struct tick_device *td;
int cpu, ret = NOTIFY_OK;
unsigned long flags;
raw_spin_lock_irqsave(&tick_device_lock, flags);
cpu = smp_processor_id();
if (!cpumask_test_cpu(cpu, newdev->cpumask))
goto out_bc;
td = &per_cpu(tick_cpu_device, cpu);
curdev = td->evtdev;
/* cpu local device ? */
if (!cpumask_equal(newdev->cpumask, cpumask_of(cpu))) {
/*
* If the cpu affinity of the device interrupt can not
* be set, ignore it.
*/
if (!irq_can_set_affinity(newdev->irq))
goto out_bc;
/*
* If we have a cpu local device already, do not replace it
* by a non cpu local device
*/
if (curdev && cpumask_equal(curdev->cpumask, cpumask_of(cpu)))
goto out_bc;
}
/*
* If we have an active device, then check the rating and the oneshot
* feature.
*/
if (curdev) {
/*
* Prefer one shot capable devices !
*/
if ((curdev->features & CLOCK_EVT_FEAT_ONESHOT) &&
!(newdev->features & CLOCK_EVT_FEAT_ONESHOT))
goto out_bc;
/*
* Check the rating
*/
if (curdev->rating >= newdev->rating)
goto out_bc;
}
/*
* Replace the eventually existing device by the new
* device. If the current device is the broadcast device, do
* not give it back to the clockevents layer !
*/
if (tick_is_broadcast_device(curdev)) {
clockevents_shutdown(curdev);
curdev = NULL;
}
clockevents_exchange_device(curdev, newdev);
tick_setup_device(td, newdev, cpu, cpumask_of(cpu));
if (newdev->features & CLOCK_EVT_FEAT_ONESHOT)
tick_oneshot_notify();
raw_spin_unlock_irqrestore(&tick_device_lock, flags);
return NOTIFY_STOP;
out_bc:
/*
* Can the new device be used as a broadcast device ?
*/
if (tick_check_broadcast_device(newdev))
ret = NOTIFY_STOP;
raw_spin_unlock_irqrestore(&tick_device_lock, flags);
return ret;
}
示例3: cris_mmu_init
/*
* The kernel is already mapped with linear mapping at kseg_c so there's no
* need to map it with a page table. However, head.S also temporarily mapped it
* at kseg_4 thus the ksegs are set up again. Also clear the TLB and do various
* other paging stuff.
*/
void __init
cris_mmu_init(void)
{
unsigned long mmu_config;
unsigned long mmu_kbase_hi;
unsigned long mmu_kbase_lo;
unsigned short mmu_page_id;
/*
* Make sure the current pgd table points to something sane, even if it
* is most probably not used until the next switch_mm.
*/
per_cpu(current_pgd, smp_processor_id()) = init_mm.pgd;
#ifdef CONFIG_SMP
{
pgd_t **pgd;
pgd = (pgd_t**)&per_cpu(current_pgd, smp_processor_id());
SUPP_BANK_SEL(1);
SUPP_REG_WR(RW_MM_TLB_PGD, pgd);
SUPP_BANK_SEL(2);
SUPP_REG_WR(RW_MM_TLB_PGD, pgd);
}
#endif
/* Initialise the TLB. Function found in tlb.c. */
tlb_init();
/* Enable exceptions and initialize the kernel segments. */
mmu_config = ( REG_STATE(mmu, rw_mm_cfg, we, on) |
REG_STATE(mmu, rw_mm_cfg, acc, on) |
REG_STATE(mmu, rw_mm_cfg, ex, on) |
REG_STATE(mmu, rw_mm_cfg, inv, on) |
REG_STATE(mmu, rw_mm_cfg, seg_f, linear) |
REG_STATE(mmu, rw_mm_cfg, seg_e, linear) |
REG_STATE(mmu, rw_mm_cfg, seg_d, page) |
REG_STATE(mmu, rw_mm_cfg, seg_c, linear) |
REG_STATE(mmu, rw_mm_cfg, seg_b, linear) |
#ifndef CONFIG_ETRAXFS_SIM
REG_STATE(mmu, rw_mm_cfg, seg_a, page) |
#else
REG_STATE(mmu, rw_mm_cfg, seg_a, linear) |
#endif
REG_STATE(mmu, rw_mm_cfg, seg_9, page) |
REG_STATE(mmu, rw_mm_cfg, seg_8, page) |
REG_STATE(mmu, rw_mm_cfg, seg_7, page) |
REG_STATE(mmu, rw_mm_cfg, seg_6, page) |
REG_STATE(mmu, rw_mm_cfg, seg_5, page) |
REG_STATE(mmu, rw_mm_cfg, seg_4, page) |
REG_STATE(mmu, rw_mm_cfg, seg_3, page) |
REG_STATE(mmu, rw_mm_cfg, seg_2, page) |
REG_STATE(mmu, rw_mm_cfg, seg_1, page) |
REG_STATE(mmu, rw_mm_cfg, seg_0, page));
mmu_kbase_hi = ( REG_FIELD(mmu, rw_mm_kbase_hi, base_f, 0x0) |
REG_FIELD(mmu, rw_mm_kbase_hi, base_e, 0x8) |
REG_FIELD(mmu, rw_mm_kbase_hi, base_d, 0x0) |
#ifndef CONFIG_ETRAXFS_SIM
REG_FIELD(mmu, rw_mm_kbase_hi, base_c, 0x4) |
#else
REG_FIELD(mmu, rw_mm_kbase_hi, base_c, 0x0) |
#endif
REG_FIELD(mmu, rw_mm_kbase_hi, base_b, 0xb) |
#ifndef CONFIG_ETRAXFS_SIM
REG_FIELD(mmu, rw_mm_kbase_hi, base_a, 0x0) |
#else
REG_FIELD(mmu, rw_mm_kbase_hi, base_a, 0xa) |
#endif
REG_FIELD(mmu, rw_mm_kbase_hi, base_9, 0x0) |
REG_FIELD(mmu, rw_mm_kbase_hi, base_8, 0x0));
mmu_kbase_lo = ( REG_FIELD(mmu, rw_mm_kbase_lo, base_7, 0x0) |
REG_FIELD(mmu, rw_mm_kbase_lo, base_6, 0x0) |
REG_FIELD(mmu, rw_mm_kbase_lo, base_5, 0x0) |
REG_FIELD(mmu, rw_mm_kbase_lo, base_4, 0x0) |
REG_FIELD(mmu, rw_mm_kbase_lo, base_3, 0x0) |
REG_FIELD(mmu, rw_mm_kbase_lo, base_2, 0x0) |
REG_FIELD(mmu, rw_mm_kbase_lo, base_1, 0x0) |
REG_FIELD(mmu, rw_mm_kbase_lo, base_0, 0x0));
mmu_page_id = REG_FIELD(mmu, rw_mm_tlb_hi, pid, 0);
/* Update the instruction MMU. */
SUPP_BANK_SEL(BANK_IM);
SUPP_REG_WR(RW_MM_CFG, mmu_config);
SUPP_REG_WR(RW_MM_KBASE_HI, mmu_kbase_hi);
SUPP_REG_WR(RW_MM_KBASE_LO, mmu_kbase_lo);
SUPP_REG_WR(RW_MM_TLB_HI, mmu_page_id);
/* Update the data MMU. */
SUPP_BANK_SEL(BANK_DM);
SUPP_REG_WR(RW_MM_CFG, mmu_config);
SUPP_REG_WR(RW_MM_KBASE_HI, mmu_kbase_hi);
SUPP_REG_WR(RW_MM_KBASE_LO, mmu_kbase_lo);
//.........这里部分代码省略.........
示例4: vprintk
asmlinkage int vprintk(const char *fmt, va_list args)
{
int printed_len = 0;
int current_log_level = default_message_loglevel;
unsigned long flags;
int this_cpu;
char *p;
boot_delay_msec();
preempt_disable();
/* This stops the holder of console_sem just where we want him */
raw_local_irq_save(flags);
this_cpu = smp_processor_id();
/*
* Ouch, printk recursed into itself!
*/
if (unlikely(printk_cpu == this_cpu)) {
/*
* If a crash is occurring during printk() on this CPU,
* then try to get the crash message out but make sure
* we can't deadlock. Otherwise just return to avoid the
* recursion and return - but flag the recursion so that
* it can be printed at the next appropriate moment:
*/
if (!oops_in_progress) {
recursion_bug = 1;
goto out_restore_irqs;
}
zap_locks();
}
lockdep_off();
spin_lock(&logbuf_lock);
printk_cpu = this_cpu;
if (recursion_bug) {
recursion_bug = 0;
strcpy(printk_buf, recursion_bug_msg);
printed_len = sizeof(recursion_bug_msg);
}
/* Emit the output into the temporary buffer */
printed_len += vscnprintf(printk_buf + printed_len,
sizeof(printk_buf) - printed_len, fmt, args);
/*
* Copy the output into log_buf. If the caller didn't provide
* appropriate log level tags, we insert them here
*/
for (p = printk_buf; *p; p++) {
if (new_text_line) {
/* If a token, set current_log_level and skip over */
if (p[0] == '<' && p[1] >= '0' && p[1] <= '7' &&
p[2] == '>') {
current_log_level = p[1] - '0';
p += 3;
printed_len -= 3;
}
/* Always output the token */
emit_log_char('<');
emit_log_char(current_log_level + '0');
emit_log_char('>');
#ifdef CONFIG_KERNEL_LOGGING
{
char tbuf[3];
// check kernel start
if( (b_first_call_after_booting == 0) &&
(logging_mode & LOGGING_RAM_MASK) &&
(!ioremapped && log_buf_base ) )
{
// char tempStar[] = "********************************************\n";
char tempChar[] = "============== start kernel logging !! ==============\n";
b_first_call_after_booting = 1;
// emit_log_char_RAMbuf(tempStar, sizeof(tempStar));
emit_log_char_RAMbuf(tempChar, sizeof(tempChar));
// emit_log_char_RAMbuf(tempStar, sizeof(tempStar));
}
sprintf(tbuf, "<%1d>",default_message_loglevel);
emit_log_char_RAMbuf(tbuf, 3);
}
#endif
printed_len += 3;
new_text_line = 0;
if (printk_time) {
/* Follow the token with the time */
char tbuf[50], *tp;
unsigned tlen;
unsigned long long t;
unsigned long nanosec_rem;
t = cpu_clock(printk_cpu);
nanosec_rem = do_div(t, 1000000000);
//.........这里部分代码省略.........
示例5: mcheck_cmn_handler
/* Shared #MC handler. */
void mcheck_cmn_handler(const struct cpu_user_regs *regs)
{
struct mca_banks *bankmask = mca_allbanks;
struct mca_banks *clear_bank = __get_cpu_var(mce_clear_banks);
uint64_t gstatus;
mctelem_cookie_t mctc = NULL;
struct mca_summary bs;
mce_spin_lock(&mce_logout_lock);
if (clear_bank != NULL) {
memset( clear_bank->bank_map, 0x0,
sizeof(long) * BITS_TO_LONGS(clear_bank->num));
}
mctc = mcheck_mca_logout(MCA_MCE_SCAN, bankmask, &bs, clear_bank);
if (bs.errcnt) {
/*
* Uncorrected errors must be dealt with in softirq context.
*/
if (bs.uc || bs.pcc) {
add_taint(TAINT_MACHINE_CHECK);
if (mctc != NULL)
mctelem_defer(mctc);
/*
* For PCC=1 and can't be recovered, context is lost, so
* reboot now without clearing the banks, and deal with
* the telemetry after reboot (the MSRs are sticky)
*/
if (bs.pcc || !bs.recoverable)
cpumask_set_cpu(smp_processor_id(), &mce_fatal_cpus);
} else {
if (mctc != NULL)
mctelem_commit(mctc);
}
atomic_set(&found_error, 1);
/* The last CPU will be take check/clean-up etc */
atomic_set(&severity_cpu, smp_processor_id());
mce_printk(MCE_CRITICAL, "MCE: clear_bank map %lx on CPU%d\n",
*((unsigned long*)clear_bank), smp_processor_id());
if (clear_bank != NULL)
mcheck_mca_clearbanks(clear_bank);
} else {
if (mctc != NULL)
mctelem_dismiss(mctc);
}
mce_spin_unlock(&mce_logout_lock);
mce_barrier_enter(&mce_trap_bar);
if ( mctc != NULL && mce_urgent_action(regs, mctc))
cpumask_set_cpu(smp_processor_id(), &mce_fatal_cpus);
mce_barrier_exit(&mce_trap_bar);
/*
* Wait until everybody has processed the trap.
*/
mce_barrier_enter(&mce_trap_bar);
if (atomic_read(&severity_cpu) == smp_processor_id())
{
/* According to SDM, if no error bank found on any cpus,
* something unexpected happening, we can't do any
* recovery job but to reset the system.
*/
if (atomic_read(&found_error) == 0)
mc_panic("MCE: No CPU found valid MCE, need reset");
if (!cpumask_empty(&mce_fatal_cpus))
{
char *ebufp, ebuf[96] = "MCE: Fatal error happened on CPUs ";
ebufp = ebuf + strlen(ebuf);
cpumask_scnprintf(ebufp, 95 - strlen(ebuf), &mce_fatal_cpus);
mc_panic(ebuf);
}
atomic_set(&found_error, 0);
}
mce_barrier_exit(&mce_trap_bar);
/* Clear flags after above fatal check */
mce_barrier_enter(&mce_trap_bar);
gstatus = mca_rdmsr(MSR_IA32_MCG_STATUS);
if ((gstatus & MCG_STATUS_MCIP) != 0) {
mce_printk(MCE_CRITICAL, "MCE: Clear [email protected] last step");
mca_wrmsr(MSR_IA32_MCG_STATUS, 0);
}
mce_barrier_exit(&mce_trap_bar);
raise_softirq(MACHINE_CHECK_SOFTIRQ);
}
示例6: xics_migrate_irqs_away
/* Interrupts are disabled. */
void xics_migrate_irqs_away(void)
{
int cpu = smp_processor_id(), hw_cpu = hard_smp_processor_id();
unsigned int irq, virq;
struct irq_desc *desc;
/* If we used to be the default server, move to the new "boot_cpuid" */
if (hw_cpu == xics_default_server)
xics_update_irq_servers();
/* Reject any interrupt that was queued to us... */
icp_ops->set_priority(0);
/* Remove ourselves from the global interrupt queue */
xics_set_cpu_giq(xics_default_distrib_server, 0);
/* Allow IPIs again... */
icp_ops->set_priority(DEFAULT_PRIORITY);
for_each_irq_desc(virq, desc) {
struct irq_chip *chip;
long server;
unsigned long flags;
struct ics *ics;
/* We can't set affinity on ISA interrupts */
if (virq < NUM_ISA_INTERRUPTS)
continue;
/* We only need to migrate enabled IRQS */
if (!desc->action)
continue;
if (desc->irq_data.domain != xics_host)
continue;
irq = desc->irq_data.hwirq;
/* We need to get IPIs still. */
if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
continue;
chip = irq_desc_get_chip(desc);
if (!chip || !chip->irq_set_affinity)
continue;
raw_spin_lock_irqsave(&desc->lock, flags);
/* Locate interrupt server */
server = -1;
ics = irq_get_chip_data(virq);
if (ics)
server = ics->get_server(ics, irq);
if (server < 0) {
printk(KERN_ERR "%s: Can't find server for irq %d\n",
__func__, irq);
goto unlock;
}
/* We only support delivery to all cpus or to one cpu.
* The irq has to be migrated only in the single cpu
* case.
*/
if (server != hw_cpu)
goto unlock;
/* This is expected during cpu offline. */
if (cpu_online(cpu))
pr_warning("IRQ %u affinity broken off cpu %u\n",
virq, cpu);
/* Reset affinity to all cpus */
raw_spin_unlock_irqrestore(&desc->lock, flags);
irq_set_affinity(virq, cpu_all_mask);
continue;
unlock:
raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
示例7: timer_interrupt
/*
* timer_interrupt - gets called when the decrementer overflows,
* with interrupts disabled.
* We set it up to overflow again in 1/HZ seconds.
*/
void timer_interrupt(struct pt_regs * regs)
{
int next_dec;
unsigned long cpu = smp_processor_id();
unsigned jiffy_stamp = last_jiffy_stamp(cpu);
extern void do_IRQ(struct pt_regs *);
if (atomic_read(&ppc_n_lost_interrupts) != 0)
do_IRQ(regs);
irq_enter();
while ((next_dec = tb_ticks_per_jiffy - tb_delta(&jiffy_stamp)) <= 0) {
jiffy_stamp += tb_ticks_per_jiffy;
profile_tick(CPU_PROFILING, regs);
update_process_times(user_mode(regs));
if (smp_processor_id())
continue;
/* We are in an interrupt, no need to save/restore flags */
write_seqlock(&xtime_lock);
tb_last_stamp = jiffy_stamp;
do_timer(regs);
/*
* update the rtc when needed, this should be performed on the
* right fraction of a second. Half or full second ?
* Full second works on mk48t59 clocks, others need testing.
* Note that this update is basically only used through
* the adjtimex system calls. Setting the HW clock in
* any other way is a /dev/rtc and userland business.
* This is still wrong by -0.5/+1.5 jiffies because of the
* timer interrupt resolution and possible delay, but here we
* hit a quantization limit which can only be solved by higher
* resolution timers and decoupling time management from timer
* interrupts. This is also wrong on the clocks
* which require being written at the half second boundary.
* We should have an rtc call that only sets the minutes and
* seconds like on Intel to avoid problems with non UTC clocks.
*/
if ( ppc_md.set_rtc_time && ntp_synced() &&
xtime.tv_sec - last_rtc_update >= 659 &&
abs((xtime.tv_nsec / 1000) - (1000000-1000000/HZ)) < 500000/HZ &&
jiffies - wall_jiffies == 1) {
if (ppc_md.set_rtc_time(xtime.tv_sec+1 + timezone_offset) == 0)
last_rtc_update = xtime.tv_sec+1;
else
/* Try again one minute later */
last_rtc_update += 60;
}
write_sequnlock(&xtime_lock);
}
if ( !disarm_decr[smp_processor_id()] )
set_dec(next_dec);
last_jiffy_stamp(cpu) = jiffy_stamp;
if (ppc_md.heartbeat && !ppc_md.heartbeat_count--)
ppc_md.heartbeat();
irq_exit();
}
示例8: vprintk
asmlinkage int vprintk(const char *fmt, va_list args)
{
int printed_len = 0;
int current_log_level = default_message_loglevel;
unsigned long flags;
int this_cpu;
char *p;
boot_delay_msec();
printk_delay();
preempt_disable();
/* This stops the holder of console_sem just where we want him */
raw_local_irq_save(flags);
this_cpu = smp_processor_id();
/*
* Ouch, printk recursed into itself!
*/
if (unlikely(printk_cpu == this_cpu)) {
/*
* If a crash is occurring during printk() on this CPU,
* then try to get the crash message out but make sure
* we can't deadlock. Otherwise just return to avoid the
* recursion and return - but flag the recursion so that
* it can be printed at the next appropriate moment:
*/
if (!oops_in_progress) {
recursion_bug = 1;
goto out_restore_irqs;
}
zap_locks();
}
lockdep_off();
spin_lock(&logbuf_lock);
printk_cpu = this_cpu;
if (recursion_bug) {
recursion_bug = 0;
strcpy(printk_buf, recursion_bug_msg);
printed_len = strlen(recursion_bug_msg);
}
/* Emit the output into the temporary buffer */
printed_len += vscnprintf(printk_buf + printed_len,
sizeof(printk_buf) - printed_len, fmt, args);
#ifdef CONFIG_DEBUG_LL
printascii(printk_buf);
#endif
p = printk_buf;
/* Do we have a loglevel in the string? */
if (p[0] == '<') {
unsigned char c = p[1];
if (c && p[2] == '>') {
switch (c) {
case '0' ... '7': /* loglevel */
current_log_level = c - '0';
/* Fallthrough - make sure we're on a new line */
case 'd': /* KERN_DEFAULT */
if (!new_text_line) {
emit_log_char('\n');
new_text_line = 1;
}
/* Fallthrough - skip the loglevel */
case 'c': /* KERN_CONT */
p += 3;
break;
}
}
}
示例9: check_irq_vectors_for_cpu_disable
/*
* This cpu is going to be removed and its vectors migrated to the remaining
* online cpus. Check to see if there are enough vectors in the remaining cpus.
* This function is protected by stop_machine().
*/
int check_irq_vectors_for_cpu_disable(void)
{
unsigned int this_cpu, vector, this_count, count;
struct irq_desc *desc;
struct irq_data *data;
int cpu;
this_cpu = smp_processor_id();
cpumask_copy(&online_new, cpu_online_mask);
cpumask_clear_cpu(this_cpu, &online_new);
this_count = 0;
for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
desc = __this_cpu_read(vector_irq[vector]);
if (IS_ERR_OR_NULL(desc))
continue;
/*
* Protect against concurrent action removal, affinity
* changes etc.
*/
raw_spin_lock(&desc->lock);
data = irq_desc_get_irq_data(desc);
cpumask_copy(&affinity_new,
irq_data_get_affinity_mask(data));
cpumask_clear_cpu(this_cpu, &affinity_new);
/* Do not count inactive or per-cpu irqs. */
if (!irq_desc_has_action(desc) || irqd_is_per_cpu(data)) {
raw_spin_unlock(&desc->lock);
continue;
}
raw_spin_unlock(&desc->lock);
/*
* A single irq may be mapped to multiple cpu's
* vector_irq[] (for example IOAPIC cluster mode). In
* this case we have two possibilities:
*
* 1) the resulting affinity mask is empty; that is
* this the down'd cpu is the last cpu in the irq's
* affinity mask, or
*
* 2) the resulting affinity mask is no longer a
* subset of the online cpus but the affinity mask is
* not zero; that is the down'd cpu is the last online
* cpu in a user set affinity mask.
*/
if (cpumask_empty(&affinity_new) ||
!cpumask_subset(&affinity_new, &online_new))
this_count++;
}
count = 0;
for_each_online_cpu(cpu) {
if (cpu == this_cpu)
continue;
/*
* We scan from FIRST_EXTERNAL_VECTOR to first system
* vector. If the vector is marked in the used vectors
* bitmap or an irq is assigned to it, we don't count
* it as available.
*
* As this is an inaccurate snapshot anyway, we can do
* this w/o holding vector_lock.
*/
for (vector = FIRST_EXTERNAL_VECTOR;
vector < first_system_vector; vector++) {
if (!test_bit(vector, used_vectors) &&
IS_ERR_OR_NULL(per_cpu(vector_irq, cpu)[vector]))
count++;
}
}
if (count < this_count) {
pr_warn("CPU %d disable failed: CPU has %u vectors assigned and there are only %u available.\n",
this_cpu, this_count, count);
return -ERANGE;
}
return 0;
}
示例10: __profile_flip_buffers
/*
* Each cpu has a pair of open-addressed hashtables for pending
* profile hits. read_profile() IPI's all cpus to request them
* to flip buffers and flushes their contents to prof_buffer itself.
* Flip requests are serialized by the profile_flip_mutex. The sole
* use of having a second hashtable is for avoiding cacheline
* contention that would otherwise happen during flushes of pending
* profile hits required for the accuracy of reported profile hits
* and so resurrect the interrupt livelock issue.
*
* The open-addressed hashtables are indexed by profile buffer slot
* and hold the number of pending hits to that profile buffer slot on
* a cpu in an entry. When the hashtable overflows, all pending hits
* are accounted to their corresponding profile buffer slots with
* atomic_add() and the hashtable emptied. As numerous pending hits
* may be accounted to a profile buffer slot in a hashtable entry,
* this amortizes a number of atomic profile buffer increments likely
* to be far larger than the number of entries in the hashtable,
* particularly given that the number of distinct profile buffer
* positions to which hits are accounted during short intervals (e.g.
* several seconds) is usually very small. Exclusion from buffer
* flipping is provided by interrupt disablement (note that for
* SCHED_PROFILING profile_hit() may be called from process context).
* The hash function is meant to be lightweight as opposed to strong,
* and was vaguely inspired by ppc64 firmware-supported inverted
* pagetable hash functions, but uses a full hashtable full of finite
* collision chains, not just pairs of them.
*
* -- wli
*/
static void __profile_flip_buffers(void *unused)
{
int cpu = smp_processor_id();
per_cpu(cpu_profile_flip, cpu) = !per_cpu(cpu_profile_flip, cpu);
}
示例11: xen_play_dead
static void __cpuinit xen_play_dead(void) /* used only with CPU_HOTPLUG */
{
play_dead_common();
HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
cpu_bringup();
}
示例12: sched_clock_idle_sleep_event
/*
* We are going deep-idle (irqs are disabled):
*/
void sched_clock_idle_sleep_event(void)
{
sched_clock_cpu(smp_processor_id());
}
示例13: smp_callin
static void __cpuinit
smp_callin (void)
{
int cpuid, phys_id, itc_master;
struct cpuinfo_ia64 *last_cpuinfo, *this_cpuinfo;
extern void ia64_init_itm(void);
extern volatile int time_keeper_id;
#ifdef CONFIG_PERFMON
extern void pfm_init_percpu(void);
#endif
cpuid = smp_processor_id();
phys_id = hard_smp_processor_id();
itc_master = time_keeper_id;
if (cpu_online(cpuid)) {
printk(KERN_ERR "huh, phys CPU#0x%x, CPU#0x%x already present??\n",
phys_id, cpuid);
BUG();
}
fix_b0_for_bsp();
lock_ipi_calllock();
spin_lock(&vector_lock);
/* Setup the per cpu irq handling data structures */
__setup_vector_irq(cpuid);
cpu_set(cpuid, cpu_online_map);
unlock_ipi_calllock();
per_cpu(cpu_state, cpuid) = CPU_ONLINE;
spin_unlock(&vector_lock);
smp_setup_percpu_timer();
ia64_mca_cmc_vector_setup(); /* Setup vector on AP */
#ifdef CONFIG_PERFMON
pfm_init_percpu();
#endif
local_irq_enable();
if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) {
/*
* Synchronize the ITC with the BP. Need to do this after irqs are
* enabled because ia64_sync_itc() calls smp_call_function_single(), which
* calls spin_unlock_bh(), which calls spin_unlock_bh(), which calls
* local_bh_enable(), which bugs out if irqs are not enabled...
*/
Dprintk("Going to syncup ITC with ITC Master.\n");
ia64_sync_itc(itc_master);
}
/*
* Get our bogomips.
*/
ia64_init_itm();
/*
* Delay calibration can be skipped if new processor is identical to the
* previous processor.
*/
last_cpuinfo = cpu_data(cpuid - 1);
this_cpuinfo = local_cpu_data;
if (last_cpuinfo->itc_freq != this_cpuinfo->itc_freq ||
last_cpuinfo->proc_freq != this_cpuinfo->proc_freq ||
last_cpuinfo->features != this_cpuinfo->features ||
last_cpuinfo->revision != this_cpuinfo->revision ||
last_cpuinfo->family != this_cpuinfo->family ||
last_cpuinfo->archrev != this_cpuinfo->archrev ||
last_cpuinfo->model != this_cpuinfo->model)
calibrate_delay();
local_cpu_data->loops_per_jiffy = loops_per_jiffy;
#ifdef CONFIG_IA32_SUPPORT
ia32_gdt_init();
#endif
/*
* Allow the master to continue.
*/
cpu_set(cpuid, cpu_callin_map);
Dprintk("Stack on CPU %d at about %p\n",cpuid, &cpuid);
}
示例14: nmi_cpu_setup
static void nmi_cpu_setup(void * dummy)
{
int cpu = smp_processor_id();
struct op_msrs * msrs = &cpu_msrs[cpu];
model->setup_ctrs(msrs);
}
示例15: nmi_save_registers
static void nmi_save_registers(void *dummy)
{
int cpu = smp_processor_id();
struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
nmi_cpu_save_registers(msrs);
}