本文整理汇总了C++中cpumask_empty函数的典型用法代码示例。如果您正苦于以下问题:C++ cpumask_empty函数的具体用法?C++ cpumask_empty怎么用?C++ cpumask_empty使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了cpumask_empty函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: irq_move_masked_irq
void irq_move_masked_irq(struct irq_data *idata)
{
struct irq_desc *desc = irq_data_to_desc(idata);
struct irq_chip *chip = desc->irq_data.chip;
if (likely(!irqd_is_setaffinity_pending(&desc->irq_data)))
return;
irqd_clr_move_pending(&desc->irq_data);
/*
* Paranoia: cpu-local interrupts shouldn't be calling in here anyway.
*/
if (irqd_is_per_cpu(&desc->irq_data)) {
WARN_ON(1);
return;
}
if (unlikely(cpumask_empty(desc->pending_mask)))
return;
if (!chip->irq_set_affinity)
return;
assert_raw_spin_locked(&desc->lock);
/*
* If there was a valid mask to work with, please
* do the disable, re-program, enable sequence.
* This is *not* particularly important for level triggered
* but in a edge trigger case, we might be setting rte
* when an active trigger is coming in. This could
* cause some ioapics to mal-function.
* Being paranoid i guess!
*
* For correct operation this depends on the caller
* masking the irqs.
*/
if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids)
irq_do_set_affinity(&desc->irq_data, desc->pending_mask, false);
cpumask_clear(desc->pending_mask);
}
示例2: tick_shutdown_broadcast
/*
* Remove a CPU from broadcasting
*/
void tick_shutdown_broadcast(unsigned int *cpup)
{
struct clock_event_device *bc;
unsigned long flags;
unsigned int cpu = *cpup;
raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
bc = tick_broadcast_device.evtdev;
cpumask_clear_cpu(cpu, tick_broadcast_mask);
cpumask_clear_cpu(cpu, tick_broadcast_on);
if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
if (bc && cpumask_empty(tick_broadcast_mask))
clockevents_shutdown(bc);
}
raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
}
示例3: _percpu_write_lock
void _percpu_write_lock(percpu_rwlock_t **per_cpudata,
percpu_rwlock_t *percpu_rwlock)
{
unsigned int cpu;
cpumask_t *rwlock_readers = &this_cpu(percpu_rwlock_readers);
/* Validate the correct per_cpudata variable has been provided. */
_percpu_rwlock_owner_check(per_cpudata, percpu_rwlock);
/*
* First take the write lock to protect against other writers or slow
* path readers.
*/
write_lock(&percpu_rwlock->rwlock);
/* Now set the global variable so that readers start using read_lock. */
percpu_rwlock->writer_activating = 1;
smp_mb();
/* Using a per cpu cpumask is only safe if there is no nesting. */
ASSERT(!in_irq());
cpumask_copy(rwlock_readers, &cpu_online_map);
/* Check if there are any percpu readers in progress on this rwlock. */
for ( ; ; )
{
for_each_cpu(cpu, rwlock_readers)
{
/*
* Remove any percpu readers not contending on this rwlock
* from our check mask.
*/
if ( per_cpu_ptr(per_cpudata, cpu) != percpu_rwlock )
__cpumask_clear_cpu(cpu, rwlock_readers);
}
/* Check if we've cleared all percpu readers from check mask. */
if ( cpumask_empty(rwlock_readers) )
break;
/* Give the coherency fabric a break. */
cpu_relax();
};
}
示例4: flush_area_mask
void flush_area_mask(const cpumask_t *mask, const void *va, unsigned int flags)
{
ASSERT(local_irq_is_enabled());
if ( cpumask_test_cpu(smp_processor_id(), mask) )
flush_area_local(va, flags);
if ( !cpumask_subset(mask, cpumask_of(smp_processor_id())) )
{
spin_lock(&flush_lock);
cpumask_and(&flush_cpumask, mask, &cpu_online_map);
cpumask_clear_cpu(smp_processor_id(), &flush_cpumask);
flush_va = va;
flush_flags = flags;
send_IPI_mask(&flush_cpumask, INVALIDATE_TLB_VECTOR);
while ( !cpumask_empty(&flush_cpumask) )
cpu_relax();
spin_unlock(&flush_lock);
}
}
示例5: tick_do_broadcast
/*
* Broadcast the event to the cpus, which are set in the mask (mangled).
*/
static bool tick_do_broadcast(struct cpumask *mask)
{
int cpu = smp_processor_id();
struct tick_device *td;
bool local = false;
/*
* Check, if the current cpu is in the mask
*/
if (cpumask_test_cpu(cpu, mask)) {
struct clock_event_device *bc = tick_broadcast_device.evtdev;
cpumask_clear_cpu(cpu, mask);
/*
* We only run the local handler, if the broadcast
* device is not hrtimer based. Otherwise we run into
* a hrtimer recursion.
*
* local timer_interrupt()
* local_handler()
* expire_hrtimers()
* bc_handler()
* local_handler()
* expire_hrtimers()
*/
local = !(bc->features & CLOCK_EVT_FEAT_HRTIMER);
}
if (!cpumask_empty(mask)) {
/*
* It might be necessary to actually check whether the devices
* have different broadcast functions. For now, just use the
* one of the first device. This works as long as we have this
* misfeature only on x86 (lapic)
*/
td = &per_cpu(tick_cpu_device, cpumask_first(mask));
td->evtdev->broadcast(mask);
}
return local;
}
示例6: rtas_cpu_state_change_mask
/* On return cpumask will be altered to indicate CPUs changed.
* CPUs with states changed will be set in the mask,
* CPUs with status unchanged will be unset in the mask. */
static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
cpumask_var_t cpus)
{
int cpu;
int cpuret = 0;
int ret = 0;
if (cpumask_empty(cpus))
return 0;
for_each_cpu(cpu, cpus) {
switch (state) {
case DOWN:
cpuret = cpu_down(cpu);
break;
case UP:
cpuret = cpu_up(cpu);
break;
}
if (cpuret) {
pr_debug("%s: cpu_%s for cpu#%d returned %d.\n",
__func__,
((state == UP) ? "up" : "down"),
cpu, cpuret);
if (!ret)
ret = cpuret;
if (state == UP) {
/* clear bits for unchanged cpus, return */
cpumask_shift_right(cpus, cpus, cpu);
cpumask_shift_left(cpus, cpus, cpu);
break;
} else {
/* clear bit for unchanged cpu, continue */
cpumask_clear_cpu(cpu, cpus);
}
}
}
return ret;
}
示例7: get_cur_val
static u32 get_cur_val(const cpumask_t *mask)
{
struct cpufreq_policy *policy;
struct processor_performance *perf;
struct drv_cmd cmd;
unsigned int cpu = smp_processor_id();
if (unlikely(cpumask_empty(mask)))
return 0;
if (!cpumask_test_cpu(cpu, mask))
cpu = cpumask_first(mask);
if (cpu >= nr_cpu_ids || !cpu_online(cpu))
return 0;
policy = per_cpu(cpufreq_cpu_policy, cpu);
if (!policy || !cpufreq_drv_data[policy->cpu])
return 0;
switch (cpufreq_drv_data[policy->cpu]->arch_cpu_flags) {
case SYSTEM_INTEL_MSR_CAPABLE:
cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
cmd.addr.msr.reg = MSR_IA32_PERF_STATUS;
break;
case SYSTEM_IO_CAPABLE:
cmd.type = SYSTEM_IO_CAPABLE;
perf = cpufreq_drv_data[policy->cpu]->acpi_data;
cmd.addr.io.port = perf->control_register.address;
cmd.addr.io.bit_width = perf->control_register.bit_width;
break;
default:
return 0;
}
cmd.mask = cpumask_of(cpu);
drv_read(&cmd);
return cmd.val;
}
示例8: enable_nonboot_cpus
void __ref enable_nonboot_cpus(void)
{
int cpu, error;
#if defined (CONFIG_MACH_APQ8064_OMEGA) || defined (CONFIG_MACH_APQ8064_OMEGAR)
static int first = 0;
if (!first) {
init_timer(&boost_freq_timer);
first = 1;
}
if (timer_pending(&boost_freq_timer))
del_timer(&boost_freq_timer);
boost_freq_timer.function = boost_freq_timer_cb;
boost_freq_timer.expires =
jiffies + msecs_to_jiffies(BOOST_FREQ_TIME_MS);
add_timer(&boost_freq_timer);
boost_freq = 1;
#endif
/* Allow everyone to use the CPU hotplug again */
cpu_maps_update_begin();
cpu_hotplug_disabled = 0;
if (cpumask_empty(frozen_cpus))
goto out;
printk(KERN_INFO "Enabling non-boot CPUs ...\n");
arch_enable_nonboot_cpus_begin();
for_each_cpu(cpu, frozen_cpus) {
error = _cpu_up(cpu, 1);
if (!error) {
printk(KERN_INFO "CPU%d is up\n", cpu);
continue;
}
printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error);
}
示例9: opal_flash_term_callback
/* This gets called just before system reboots */
void opal_flash_term_callback(void)
{
struct cpumask mask;
if (update_flash_data.status != FLASH_IMG_READY)
return;
pr_alert("FLASH: Flashing new firmware\n");
pr_alert("FLASH: Image is %u bytes\n", image_data.size);
pr_alert("FLASH: Performing flash and reboot/shutdown\n");
pr_alert("FLASH: This will take several minutes. Do not power off!\n");
/* Small delay to help getting the above message out */
msleep(500);
/* Return secondary CPUs to firmware */
cpumask_copy(&mask, cpu_online_mask);
cpumask_clear_cpu(smp_processor_id(), &mask);
if (!cpumask_empty(&mask))
smp_call_function_many(&mask,
flash_return_cpu, NULL, false);
/* Hard disable interrupts */
hard_irq_disable();
}
示例10: __bind_irq_vector
static int __bind_irq_vector(int irq, int vector, cpumask_t domain)
{
cpumask_t mask;
int cpu;
struct irq_cfg *cfg = &irq_cfg[irq];
BUG_ON((unsigned)irq >= NR_IRQS);
BUG_ON((unsigned)vector >= IA64_NUM_VECTORS);
cpumask_and(&mask, &domain, cpu_online_mask);
if (cpumask_empty(&mask))
return -EINVAL;
if ((cfg->vector == vector) && cpumask_equal(&cfg->domain, &domain))
return 0;
if (cfg->vector != IRQ_VECTOR_UNASSIGNED)
return -EBUSY;
for_each_cpu(cpu, &mask)
per_cpu(vector_irq, cpu)[vector] = irq;
cfg->vector = vector;
cfg->domain = domain;
irq_status[irq] = IRQ_USED;
cpumask_or(&vector_table[vector], &vector_table[vector], &domain);
return 0;
}
示例11: arch_get_hmp_domains
void __init arch_get_hmp_domains(struct list_head *hmp_domains_list)
{
struct hmp_domain *domain;
arch_get_fast_and_slow_cpus(&hmp_fast_cpu_mask, &hmp_slow_cpu_mask);
/*
* Initialize hmp_domains
* Must be ordered with respect to compute capacity.
* Fastest domain at head of list.
*/
if(!cpumask_empty(&hmp_slow_cpu_mask)) {
domain = (struct hmp_domain *)
kmalloc(sizeof(struct hmp_domain), GFP_KERNEL);
cpumask_copy(&domain->possible_cpus, &hmp_slow_cpu_mask);
cpumask_and(&domain->cpus, cpu_online_mask, &domain->possible_cpus);
list_add(&domain->hmp_domains, hmp_domains_list);
}
domain = (struct hmp_domain *)
kmalloc(sizeof(struct hmp_domain), GFP_KERNEL);
cpumask_copy(&domain->possible_cpus, &hmp_fast_cpu_mask);
cpumask_and(&domain->cpus, cpu_online_mask, &domain->possible_cpus);
list_add(&domain->hmp_domains, hmp_domains_list);
}
示例12: enable_nonboot_cpus
void __ref enable_nonboot_cpus(void)
{
int cpu, error;
/* Allow everyone to use the CPU hotplug again */
cpu_maps_update_begin();
cpu_hotplug_disabled = 0;
if (cpumask_empty(frozen_cpus))
goto out;
pr_info("Enabling non-boot CPUs ...\n");
arch_enable_nonboot_cpus_begin();
for_each_cpu(cpu, frozen_cpus) {
trace_suspend_resume(TPS("CPU_ON"), cpu, true);
error = _cpu_up(cpu, 1);
trace_suspend_resume(TPS("CPU_ON"), cpu, false);
if (!error) {
pr_info("CPU%d is up\n", cpu);
continue;
}
pr_warn("Error taking CPU%d up: %d\n", cpu, error);
}
示例13: tick_handle_oneshot_broadcast
/*
* Handle oneshot mode broadcasting
*/
static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
{
struct tick_device *td;
ktime_t now, next_event;
int cpu, next_cpu = 0;
bool bc_local;
raw_spin_lock(&tick_broadcast_lock);
dev->next_event = KTIME_MAX;
next_event = KTIME_MAX;
cpumask_clear(tmpmask);
now = ktime_get();
/* Find all expired events */
for_each_cpu(cpu, tick_broadcast_oneshot_mask) {
/*
* Required for !SMP because for_each_cpu() reports
* unconditionally CPU0 as set on UP kernels.
*/
if (!IS_ENABLED(CONFIG_SMP) &&
cpumask_empty(tick_broadcast_oneshot_mask))
break;
td = &per_cpu(tick_cpu_device, cpu);
if (td->evtdev->next_event <= now) {
cpumask_set_cpu(cpu, tmpmask);
/*
* Mark the remote cpu in the pending mask, so
* it can avoid reprogramming the cpu local
* timer in tick_broadcast_oneshot_control().
*/
cpumask_set_cpu(cpu, tick_broadcast_pending_mask);
} else if (td->evtdev->next_event < next_event) {
next_event = td->evtdev->next_event;
next_cpu = cpu;
}
}
/*
* Remove the current cpu from the pending mask. The event is
* delivered immediately in tick_do_broadcast() !
*/
cpumask_clear_cpu(smp_processor_id(), tick_broadcast_pending_mask);
/* Take care of enforced broadcast requests */
cpumask_or(tmpmask, tmpmask, tick_broadcast_force_mask);
cpumask_clear(tick_broadcast_force_mask);
/*
* Sanity check. Catch the case where we try to broadcast to
* offline cpus.
*/
if (WARN_ON_ONCE(!cpumask_subset(tmpmask, cpu_online_mask)))
cpumask_and(tmpmask, tmpmask, cpu_online_mask);
/*
* Wakeup the cpus which have an expired event.
*/
bc_local = tick_do_broadcast(tmpmask);
/*
* Two reasons for reprogram:
*
* - The global event did not expire any CPU local
* events. This happens in dyntick mode, as the maximum PIT
* delta is quite small.
*
* - There are pending events on sleeping CPUs which were not
* in the event mask
*/
if (next_event != KTIME_MAX)
tick_broadcast_set_event(dev, next_cpu, next_event);
raw_spin_unlock(&tick_broadcast_lock);
if (bc_local) {
td = this_cpu_ptr(&tick_cpu_device);
td->evtdev->event_handler(td->evtdev);
}
}
示例14: tick_broadcast_control
/**
* tick_broadcast_control - Enable/disable or force broadcast mode
* @mode: The selected broadcast mode
*
* Called when the system enters a state where affected tick devices
* might stop. Note: TICK_BROADCAST_FORCE cannot be undone.
*/
void tick_broadcast_control(enum tick_broadcast_mode mode)
{
struct clock_event_device *bc, *dev;
struct tick_device *td;
int cpu, bc_stopped;
unsigned long flags;
/* Protects also the local clockevent device. */
raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
td = this_cpu_ptr(&tick_cpu_device);
dev = td->evtdev;
/*
* Is the device not affected by the powerstate ?
*/
if (!dev || !(dev->features & CLOCK_EVT_FEAT_C3STOP))
goto out;
if (!tick_device_is_functional(dev))
goto out;
cpu = smp_processor_id();
bc = tick_broadcast_device.evtdev;
bc_stopped = cpumask_empty(tick_broadcast_mask);
switch (mode) {
case TICK_BROADCAST_FORCE:
tick_broadcast_forced = 1;
case TICK_BROADCAST_ON:
cpumask_set_cpu(cpu, tick_broadcast_on);
if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_mask)) {
/*
* Only shutdown the cpu local device, if:
*
* - the broadcast device exists
* - the broadcast device is not a hrtimer based one
* - the broadcast device is in periodic mode to
* avoid a hickup during switch to oneshot mode
*/
if (bc && !(bc->features & CLOCK_EVT_FEAT_HRTIMER) &&
tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
clockevents_shutdown(dev);
}
break;
case TICK_BROADCAST_OFF:
if (tick_broadcast_forced)
break;
cpumask_clear_cpu(cpu, tick_broadcast_on);
if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_mask)) {
if (tick_broadcast_device.mode ==
TICKDEV_MODE_PERIODIC)
tick_setup_periodic(dev, 0);
}
break;
}
if (bc) {
if (cpumask_empty(tick_broadcast_mask)) {
if (!bc_stopped)
clockevents_shutdown(bc);
} else if (bc_stopped) {
if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
tick_broadcast_start_periodic(bc);
else
tick_broadcast_setup_oneshot(bc);
}
}
out:
raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
}
示例15: tick_device_uses_broadcast
/*
* Check, if the device is disfunctional and a place holder, which
* needs to be handled by the broadcast device.
*/
int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
{
struct clock_event_device *bc = tick_broadcast_device.evtdev;
unsigned long flags;
int ret = 0;
raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
/*
* Devices might be registered with both periodic and oneshot
* mode disabled. This signals, that the device needs to be
* operated from the broadcast device and is a placeholder for
* the cpu local device.
*/
if (!tick_device_is_functional(dev)) {
dev->event_handler = tick_handle_periodic;
tick_device_setup_broadcast_func(dev);
cpumask_set_cpu(cpu, tick_broadcast_mask);
if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
tick_broadcast_start_periodic(bc);
else
tick_broadcast_setup_oneshot(bc);
ret = 1;
} else {
/*
* Clear the broadcast bit for this cpu if the
* device is not power state affected.
*/
if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
cpumask_clear_cpu(cpu, tick_broadcast_mask);
else
tick_device_setup_broadcast_func(dev);
/*
* Clear the broadcast bit if the CPU is not in
* periodic broadcast on state.
*/
if (!cpumask_test_cpu(cpu, tick_broadcast_on))
cpumask_clear_cpu(cpu, tick_broadcast_mask);
switch (tick_broadcast_device.mode) {
case TICKDEV_MODE_ONESHOT:
/*
* If the system is in oneshot mode we can
* unconditionally clear the oneshot mask bit,
* because the CPU is running and therefore
* not in an idle state which causes the power
* state affected device to stop. Let the
* caller initialize the device.
*/
tick_broadcast_clear_oneshot(cpu);
ret = 0;
break;
case TICKDEV_MODE_PERIODIC:
/*
* If the system is in periodic mode, check
* whether the broadcast device can be
* switched off now.
*/
if (cpumask_empty(tick_broadcast_mask) && bc)
clockevents_shutdown(bc);
/*
* If we kept the cpu in the broadcast mask,
* tell the caller to leave the per cpu device
* in shutdown state. The periodic interrupt
* is delivered by the broadcast device, if
* the broadcast device exists and is not
* hrtimer based.
*/
if (bc && !(bc->features & CLOCK_EVT_FEAT_HRTIMER))
ret = cpumask_test_cpu(cpu, tick_broadcast_mask);
break;
default:
break;
}
}
raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
return ret;
}