本文整理汇总了C++中send_IPI_mask函数的典型用法代码示例。如果您正苦于以下问题:C++ send_IPI_mask函数的具体用法?C++ send_IPI_mask怎么用?C++ send_IPI_mask使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了send_IPI_mask函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: smp_send_nmi_allbutself
static void smp_send_nmi_allbutself(void)
{
cpumask_t mask = cpu_online_map;
cpu_clear(safe_smp_processor_id(), mask);
if (!cpus_empty(mask))
send_IPI_mask(mask, NMI_VECTOR);
}
示例2: __smp_call_function_many
static void __smp_call_function_many(cpumask_t *mask, void (*func) (void *info),
void *info, int nonatomic, int wait)
{
struct call_data_struct data;
int cpus;
int cpu = smp_processor_id();
if (cpu_isset(cpu, *mask))
cpu_clear(cpu, *mask);
cpus = cpus_weight(*mask);
if (!cpus)
return;
data.func = func;
data.info = info;
atomic_set(&data.started, 0);
data.wait = wait;
if (wait)
atomic_set(&data.finished, 0);
call_data = &data;
wmb();
/* Send a message to all other CPUs and wait for them to respond */
send_IPI_mask(*mask, CALL_FUNCTION_VECTOR);
/* Wait for response */
while (atomic_read(&data.started) != cpus)
cpu_relax();
if (!wait)
return;
while (atomic_read(&data.finished) != cpus)
cpu_relax();
}
示例3: flush_tlb_others
static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
unsigned long va)
{
int sender;
union smp_flush_state *f;
/* Caller has disabled preemption */
sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS;
f = &per_cpu(flush_state, sender);
/* Could avoid this lock when
num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is
probably not worth checking this for a cache-hot lock. */
spin_lock(&f->tlbstate_lock);
f->flush_mm = mm;
f->flush_va = va;
cpus_or(f->flush_cpumask, cpumask, f->flush_cpumask);
/*
* We have to send the IPI only to
* CPUs affected.
*/
send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR_START + sender);
while (!cpus_empty(f->flush_cpumask))
cpu_relax();
f->flush_mm = NULL;
f->flush_va = 0;
spin_unlock(&f->tlbstate_lock);
}
示例4: __smp_call_function_single
/*
* this function sends a 'generic call function' IPI to one other CPU
* in the system.
*
* cpu is a standard Linux logical CPU number.
*/
static void
__smp_call_function_single(int cpu, void (*func) (void *info), void *info,
int nonatomic, int wait)
{
struct call_data_struct data;
int cpus = 1;
data.func = func;
data.info = info;
atomic_set(&data.started, 0);
data.wait = wait;
if (wait)
atomic_set(&data.finished, 0);
call_data = &data;
wmb();
/* Send a message to all other CPUs and wait for them to respond */
send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_VECTOR);
/* Wait for response */
while (atomic_read(&data.started) != cpus)
cpu_relax();
if (!wait)
return;
while (atomic_read(&data.finished) != cpus)
cpu_relax();
}
示例5: send_IPI_allbutself
static inline void send_IPI_allbutself(int vector)
{
/*
* if there are no other CPUs in the system then
* we get an APIC send error if we try to broadcast.
* thus we have to avoid sending IPIs in this case.
*/
if (!(smp_num_cpus > 1))
return;
if (clustered_apic_mode) {
// Pointless. Use send_IPI_mask to do this instead
int cpu;
if (smp_num_cpus > 1) {
for (cpu = 0; cpu < smp_num_cpus; ++cpu) {
if (cpu != smp_processor_id())
send_IPI_mask(1 << cpu, vector);
}
}
} else {
__send_IPI_shortcut(APIC_DEST_ALLBUT, vector);
return;
}
}
示例6: flush_tlb_others
static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
unsigned long va)
{
/*
* A couple of (to be removed) sanity checks:
*
* - current CPU must not be in mask
* - mask must exist :)
*/
BUG_ON(cpus_empty(cpumask));
BUG_ON(cpu_isset(smp_processor_id(), cpumask));
BUG_ON(!mm);
/* If a CPU which we ran on has gone down, OK. */
cpus_and(cpumask, cpumask, cpu_online_map);
if (cpus_empty(cpumask))
return;
/*
* i'm not happy about this global shared spinlock in the
* MM hot path, but we'll see how contended it is.
* Temporarily this turns IRQs off, so that lockups are
* detected by the NMI watchdog.
*/
spin_lock(&tlbstate_lock);
flush_mm = mm;
flush_va = va;
#if NR_CPUS <= BITS_PER_LONG
atomic_set_mask(cpumask, &flush_cpumask);
#else
{
int k;
unsigned long *flush_mask = (unsigned long *)&flush_cpumask;
unsigned long *cpu_mask = (unsigned long *)&cpumask;
for (k = 0; k < BITS_TO_LONGS(NR_CPUS); ++k)
atomic_set_mask(cpu_mask[k], &flush_mask[k]);
}
#endif
/*
* Make the above memory operations globally visible before
* sending the IPI.
*/
smp_mb();
/*
* We have to send the IPI only to
* CPUs affected.
*/
send_IPI_mask(flush_cpumask, INVALIDATE_TLB_VECTOR);
while (!cpus_empty(flush_cpumask))
/* nothing. lockup detection does not belong here */
mb();
flush_mm = NULL;
flush_va = 0;
spin_unlock(&tlbstate_lock);
}
示例7: flush_tlb_others
/*==========================================================================*
* Name: flush_tlb_others
*
* Description: This routine requests other CPU to execute flush TLB.
* 1.Setup parameters.
* 2.Send 'INVALIDATE_TLB_IPI' to other CPU.
* Request other CPU to execute 'smp_invalidate_interrupt()'.
* 3.Wait for other CPUs operation finished.
*
* Born on Date: 2002.02.05
*
* Arguments: cpumask - bitmap of target CPUs
* *mm - a pointer to the mm struct for flush TLB
* *vma - a pointer to the vma struct include va
* va - virtual address for flush TLB
*
* Returns: void (cannot fail)
*
* Modification log:
* Date Who Description
* ---------- --- --------------------------------------------------------
*
*==========================================================================*/
static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
struct vm_area_struct *vma, unsigned long va)
{
unsigned long *mask;
#ifdef DEBUG_SMP
unsigned long flags;
__save_flags(flags);
if (!(flags & 0x0040)) /* Interrupt Disable NONONO */
BUG();
#endif /* DEBUG_SMP */
/*
* A couple of (to be removed) sanity checks:
*
* - we do not send IPIs to not-yet booted CPUs.
* - current CPU must not be in mask
* - mask must exist :)
*/
BUG_ON(cpumask_empty(&cpumask));
BUG_ON(cpumask_test_cpu(smp_processor_id(), &cpumask));
BUG_ON(!mm);
/* If a CPU which we ran on has gone down, OK. */
cpumask_and(&cpumask, &cpumask, cpu_online_mask);
if (cpumask_empty(&cpumask))
return;
/*
* i'm not happy about this global shared spinlock in the
* MM hot path, but we'll see how contended it is.
* Temporarily this turns IRQs off, so that lockups are
* detected by the NMI watchdog.
*/
spin_lock(&tlbstate_lock);
flush_mm = mm;
flush_vma = vma;
flush_va = va;
mask=cpumask_bits(&cpumask);
atomic_set_mask(*mask, (atomic_t *)&flush_cpumask);
/*
* We have to send the IPI only to
* CPUs affected.
*/
send_IPI_mask(&cpumask, INVALIDATE_TLB_IPI, 0);
while (!cpumask_empty((cpumask_t*)&flush_cpumask)) {
/* nothing. lockup detection does not belong here */
mb();
}
flush_mm = NULL;
flush_vma = NULL;
flush_va = 0;
spin_unlock(&tlbstate_lock);
}
示例8: smp_send_timer_broadcast_ipi
void smp_send_timer_broadcast_ipi(void)
{
cpumask_t mask;
cpus_and(mask, cpu_online_map, timer_interrupt_broadcast_ipi_mask);
if (!cpus_empty(mask)) {
send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
}
}
示例9: send_ipi_interrupt
static void
send_ipi_interrupt(cpumask_t *mask, int vector)
{
# if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 18)
/***********************************************/
/* Theres 'flat' and theres 'cluster'. The */
/* cluster functions handle more than 8 */
/* cpus. The flat does not - since the APIC */
/* only has room for an 8-bit cpu mask. */
/***********************************************/
static void (*send_IPI_mask)(cpumask_t, int);
if (send_IPI_mask == NULL)
send_IPI_mask = get_proc_addr("cluster_send_IPI_mask");
if (send_IPI_mask == NULL) dtrace_printf("HELP ON send_ipi_interrupt!\n"); else
send_IPI_mask(*mask, vector);
# elif LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 28)
/***********************************************/
/* Issue with GPL/inlined function. */
/***********************************************/
{
void send_IPI_mask_sequence(cpumask_t mask, int vector);
static void (*send_IPI_mask_sequence_ptr)(cpumask_t, int);
if (send_IPI_mask_sequence_ptr == NULL)
send_IPI_mask_sequence_ptr = get_proc_addr("send_IPI_mask_sequence");
send_IPI_mask_sequence_ptr(*mask, vector);
}
# elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 28)
send_IPI_mask(*mask, vector);
# else
if (x_apic == NULL) {
static void (*flat_send_IPI_mask)(cpumask_t *, int);
if (flat_send_IPI_mask == NULL)
flat_send_IPI_mask = get_proc_addr("flat_send_IPI_mask");
if (flat_send_IPI_mask) {
flat_send_IPI_mask(mask, vector);
return;
}
dtrace_linux_panic("x_apic is null - giving up\n");
return;
}
x_apic->send_IPI_mask(mask, vector);
# endif
}
示例10: smp_send_call_function_mask
void smp_send_call_function_mask(const cpumask_t *mask)
{
send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
if ( cpumask_test_cpu(smp_processor_id(), mask) )
{
local_irq_disable();
smp_call_function_interrupt();
local_irq_enable();
}
}
示例11: native_smp_call_function_mask
/**
* smp_call_function_mask(): Run a function on a set of other CPUs.
* @mask: The set of cpus to run on. Must not include the current cpu.
* @func: The function to run. This must be fast and non-blocking.
* @info: An arbitrary pointer to pass to the function.
* @wait: If true, wait (atomically) until function has completed on other CPUs.
*
* Returns 0 on success, else a negative status code.
*
* If @wait is true, then returns once @func has returned; otherwise
* it returns just before the target cpu calls @func.
*
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler or from a bottom half handler.
*/
static int
native_smp_call_function_mask(cpumask_t mask,
void (*func)(void *), void *info,
int wait)
{
struct call_data_struct data;
cpumask_t allbutself;
int cpus;
/* Can deadlock when called with interrupts disabled */
WARN_ON(irqs_disabled());
/* Holding any lock stops cpus from going down. */
spin_lock(&call_lock);
allbutself = cpu_online_map;
cpu_clear(smp_processor_id(), allbutself);
cpus_and(mask, mask, allbutself);
cpus = cpus_weight(mask);
if (!cpus) {
spin_unlock(&call_lock);
return 0;
}
data.func = func;
data.info = info;
atomic_set(&data.started, 0);
data.wait = wait;
if (wait)
atomic_set(&data.finished, 0);
call_data = &data;
mb();
/* Send a message to other CPUs */
if (cpus_equal(mask, allbutself))
send_IPI_allbutself(CALL_FUNCTION_VECTOR);
else
send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
/* Wait for response */
while (atomic_read(&data.started) != cpus)
cpu_relax();
if (wait)
while (atomic_read(&data.finished) != cpus)
cpu_relax();
spin_unlock(&call_lock);
return 0;
}
示例12: native_flush_tlb_others
void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
unsigned long va)
{
cpumask_t cpumask = *cpumaskp;
/*
* A couple of (to be removed) sanity checks:
*
* - current CPU must not be in mask
* - mask must exist :)
*/
BUG_ON(cpus_empty(cpumask));
BUG_ON(cpu_isset(smp_processor_id(), cpumask));
BUG_ON(!mm);
#ifdef CONFIG_HOTPLUG_CPU
/* If a CPU which we ran on has gone down, OK. */
cpus_and(cpumask, cpumask, cpu_online_map);
if (unlikely(cpus_empty(cpumask)))
return;
#endif
/*
* i'm not happy about this global shared spinlock in the
* MM hot path, but we'll see how contended it is.
* AK: x86-64 has a faster method that could be ported.
*/
spin_lock(&tlbstate_lock);
flush_mm = mm;
flush_va = va;
cpus_or(flush_cpumask, cpumask, flush_cpumask);
/*
* Make the above memory operations globally visible before
* sending the IPI.
*/
smp_mb();
/*
* We have to send the IPI only to
* CPUs affected.
*/
send_IPI_mask(&cpumask, INVALIDATE_TLB_VECTOR);
while (!cpus_empty(flush_cpumask))
/* nothing. lockup detection does not belong here */
cpu_relax();
flush_mm = NULL;
flush_va = 0;
spin_unlock(&tlbstate_lock);
}
示例13: send_IPI_all
static inline void send_IPI_all(int vector)
{
if (clustered_apic_mode) {
// Pointless. Use send_IPI_mask to do this instead
int cpu;
for (cpu = 0; cpu < smp_num_cpus; ++cpu) {
send_IPI_mask(1 << cpu, vector);
}
} else {
__send_IPI_shortcut(APIC_DEST_ALLINC, vector);
}
}
示例14: __smp_ipi_test_interrupt
static void __smp_ipi_test_interrupt(void)
{
cpumask_t mask;
if (smp_processor_id() == test_cpu_x) {
if (nr_trips == INITIAL_DISCARD) {
start_time = NOW();
send_ipi_time = 0;
}
if (nr_trips == NR_TRIPS + INITIAL_DISCARD) {
finish_time = NOW();
tasklet_schedule(&ipi_test_tasklet);
return;
}
nr_trips++;
mask = cpumask_of_cpu(test_cpu_y);
send_ipi_time -= NOW();
send_IPI_mask(&mask, IPI_TEST_VECTOR);
send_ipi_time += NOW();
} else {
mask = cpumask_of_cpu(test_cpu_x);
send_IPI_mask(&mask, IPI_TEST_VECTOR);
}
}
示例15: send_ipi_interrupt
static void
send_ipi_interrupt(cpumask_t *mask, int vector)
{
# if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 18)
/***********************************************/
/* Theres 'flat' and theres 'cluster'. The */
/* cluster functions handle more than 8 */
/* cpus. The flat does not - since the APIC */
/* only has room for an 8-bit cpu mask. */
/***********************************************/
static void (*send_IPI_mask)(cpumask_t, int);
if (send_IPI_mask == NULL)
send_IPI_mask = get_proc_addr("cluster_send_IPI_mask");
if (send_IPI_mask == NULL) dtrace_printf("HELP ON send_ipi_interrupt!\n"); else
send_IPI_mask(*mask, vector);
# elif LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 28)
send_IPI_mask_sequence(*mask, vector);
# elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 28)
send_IPI_mask(*mask, vector);
# else
x_apic->send_IPI_mask(mask, vector);
# endif
}