本文整理汇总了C++中cpumask_set_cpu函数的典型用法代码示例。如果您正苦于以下问题:C++ cpumask_set_cpu函数的具体用法?C++ cpumask_set_cpu怎么用?C++ cpumask_set_cpu使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了cpumask_set_cpu函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: db8500_cpufreq_cooling_probe
static int db8500_cpufreq_cooling_probe(struct platform_device *pdev)
{
struct thermal_cooling_device *cdev;
struct cpumask mask_val;
/* make sure cpufreq driver has been initialized */
if (!cpufreq_frequency_get_table(0))
return -EPROBE_DEFER;
cpumask_set_cpu(0, &mask_val);
cdev = cpufreq_cooling_register(&mask_val);
if (IS_ERR(cdev)) {
dev_err(&pdev->dev, "Failed to register cooling device\n");
return PTR_ERR(cdev);
}
platform_set_drvdata(pdev, cdev);
dev_info(&pdev->dev, "Cooling device registered: %s\n", cdev->type);
return 0;
}
示例2: cpudl_find
/*
* cpudl_find - find the best (later-dl) CPU in the system
* @cp: the cpudl max-heap context
* @p: the task
* @later_mask: a mask to fill in with the selected CPUs (or NULL)
*
* Returns: int - best CPU (heap maximum if suitable)
*/
int cpudl_find(struct cpudl *cp, struct task_struct *p,
struct cpumask *later_mask)
{
int best_cpu = -1;
const struct sched_dl_entity *dl_se = &p->dl;
if (later_mask && cpumask_and(later_mask, cp->free_cpus,
&p->cpus_allowed) && cpumask_and(later_mask,
later_mask, cpu_active_mask)) {
best_cpu = cpumask_any(later_mask);
goto out;
} else if (cpumask_test_cpu(cpudl_maximum(cp), &p->cpus_allowed) &&
dl_time_before(dl_se->deadline, cp->elements[0].dl)) {
best_cpu = cpudl_maximum(cp);
if (later_mask)
cpumask_set_cpu(best_cpu, later_mask);
}
out:
WARN_ON(!cpu_present(best_cpu) && best_cpu != -1);
return best_cpu;
}
示例3: amlogic_register_thermal
/* Register with the in-kernel thermal management */
static int amlogic_register_thermal(struct amlogic_thermal_platform_data *pdata)
{
int ret=0;
struct cpumask mask_val;
memset(&mask_val,0,sizeof(struct cpumask));
cpumask_set_cpu(0, &mask_val);
pdata->cpu_cool_dev= cpufreq_cooling_register(&mask_val);
if (IS_ERR(pdata->cpu_cool_dev)) {
pr_err("Failed to register cpufreq cooling device\n");
ret = -EINVAL;
goto err_unregister;
}
pdata->cpucore_cool_dev = cpucore_cooling_register();
if (IS_ERR(pdata->cpucore_cool_dev)) {
pr_err("Failed to register cpufreq cooling device\n");
ret = -EINVAL;
goto err_unregister;
}
pdata->therm_dev = thermal_zone_device_register(pdata->name,
pdata->temp_trip_count, 7, pdata, &amlogic_dev_ops, NULL, 0,
pdata->idle_interval);
if (IS_ERR(pdata->therm_dev)) {
pr_err("Failed to register thermal zone device\n");
ret = -EINVAL;
goto err_unregister;
}
pr_info("amlogic: Kernel Thermal management registered\n");
return 0;
err_unregister:
amlogic_unregister_thermal(pdata);
return ret;
}
示例4: disable_nonboot_cpus
int disable_nonboot_cpus(void)
{
int cpu, first_cpu, error = 0;
cpu_maps_update_begin();
first_cpu = cpumask_first(cpu_online_mask);
/*
* We take down all of the non-boot CPUs in one shot to avoid races
* with the userspace trying to use the CPU hotplug at the same time
*/
cpumask_clear(frozen_cpus);
pr_info("Disabling non-boot CPUs ...\n");
for_each_online_cpu(cpu) {
if (cpu == first_cpu)
continue;
trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
error = _cpu_down(cpu, 1);
trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
if (!error)
cpumask_set_cpu(cpu, frozen_cpus);
else {
pr_err("Error taking CPU%d down: %d\n", cpu, error);
break;
}
}
if (!error) {
BUG_ON(num_online_cpus() > 1);
/* Make sure the CPUs won't be enabled by someone else */
cpu_hotplug_disabled = 1;
} else {
pr_err("Non-boot CPUs are not disabled\n");
}
cpu_maps_update_done();
return error;
}
示例5: prom_init
void __init prom_init(void)
{
int *argv, *envp; /* passed as 32 bit ptrs */
struct psb_info *prom_infop;
void *reset_vec;
#ifdef CONFIG_SMP
int i;
#endif
/* truncate to 32 bit and sign extend all args */
argv = (int *)(long)(int)fw_arg1;
envp = (int *)(long)(int)fw_arg2;
prom_infop = (struct psb_info *)(long)(int)fw_arg3;
nlm_prom_info = *prom_infop;
nlm_init_node();
/* Update reset entry point with CPU init code */
reset_vec = (void *)CKSEG1ADDR(RESET_VEC_PHYS);
memset(reset_vec, 0, RESET_VEC_SIZE);
memcpy(reset_vec, (void *)nlm_reset_entry,
(nlm_reset_entry_end - nlm_reset_entry));
nlm_early_serial_setup();
build_arcs_cmdline(argv);
prom_add_memory();
#ifdef CONFIG_SMP
for (i = 0; i < 32; i++)
if (nlm_prom_info.online_cpu_map & (1 << i))
cpumask_set_cpu(i, &nlm_cpumask);
nlm_wakeup_secondary_cpus();
register_smp_ops(&nlm_smp_ops);
#endif
xlr_board_info_setup();
xlr_percpu_fmn_init();
}
示例6: percpu_ida_free
/**
* percpu_ida_free - free a tag
* @pool: pool @tag was allocated from
* @tag: a tag previously allocated with percpu_ida_alloc()
*
* Safe to be called from interrupt context.
*/
void percpu_ida_free(struct percpu_ida *pool, unsigned tag)
{
struct percpu_ida_cpu *tags;
unsigned long flags;
unsigned nr_free;
BUG_ON(tag >= pool->nr_tags);
tags = raw_cpu_ptr(pool->tag_cpu);
spin_lock_irqsave(&tags->lock, flags);
tags->freelist[tags->nr_free++] = tag;
nr_free = tags->nr_free;
if (nr_free == 1) {
cpumask_set_cpu(smp_processor_id(),
&pool->cpus_have_tags);
wake_up(&pool->wait);
}
spin_unlock_irqrestore(&tags->lock, flags);
if (nr_free == pool->percpu_max_size) {
spin_lock_irqsave(&pool->lock, flags);
spin_lock(&tags->lock);
if (tags->nr_free == pool->percpu_max_size) {
move_tags(pool->freelist, &pool->nr_free,
tags->freelist, &tags->nr_free,
pool->percpu_batch_size);
wake_up(&pool->wait);
}
spin_unlock(&tags->lock);
spin_unlock_irqrestore(&pool->lock, flags);
}
}
示例7: crash_ipi_callback
void crash_ipi_callback(struct pt_regs *regs)
{
static cpumask_t cpus_state_saved = CPU_MASK_NONE;
int cpu = smp_processor_id();
if (!cpu_online(cpu))
return;
hard_irq_disable();
if (!cpumask_test_cpu(cpu, &cpus_state_saved)) {
crash_save_cpu(regs, cpu);
cpumask_set_cpu(cpu, &cpus_state_saved);
}
atomic_inc(&cpus_in_crash);
smp_mb__after_atomic_inc();
/*
* Starting the kdump boot.
* This barrier is needed to make sure that all CPUs are stopped.
*/
while (!time_to_dump)
cpu_relax();
if (ppc_md.kexec_cpu_down)
ppc_md.kexec_cpu_down(1, 1);
#ifdef CONFIG_PPC64
kexec_smp_wait();
#else
for (;;); /* FIXME */
#endif
/* NOTREACHED */
}
示例8: rq_attach_root
void rq_attach_root(struct rq *rq, struct root_domain *rd)
{
struct root_domain *old_rd = NULL;
unsigned long flags;
raw_spin_lock_irqsave(&rq->lock, flags);
if (rq->rd) {
old_rd = rq->rd;
if (cpumask_test_cpu(rq->cpu, old_rd->online))
set_rq_offline(rq);
cpumask_clear_cpu(rq->cpu, old_rd->span);
/*
* If we dont want to free the old_rd yet then
* set old_rd to NULL to skip the freeing later
* in this function:
*/
if (!atomic_dec_and_test(&old_rd->refcount))
old_rd = NULL;
}
atomic_inc(&rd->refcount);
rq->rd = rd;
cpumask_set_cpu(rq->cpu, rd->span);
if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
set_rq_online(rq);
raw_spin_unlock_irqrestore(&rq->lock, flags);
if (old_rd)
call_rcu_sched(&old_rd->rcu, free_rootdomain);
}
示例9: kzalloc
/*
* Take a map of online CPUs and the number of available interrupt vectors
* and generate an output cpumask suitable for spreading MSI/MSI-X vectors
* so that they are distributed as good as possible around the CPUs. If
* more vectors than CPUs are available we'll map one to each CPU,
* otherwise we map one to the first sibling of each socket.
*
* If there are more vectors than CPUs we will still only have one bit
* set per CPU, but interrupt code will keep on assigning the vectors from
* the start of the bitmap until we run out of vectors.
*/
struct cpumask *irq_create_affinity_mask(unsigned int *nr_vecs)
{
struct cpumask *affinity_mask;
unsigned int max_vecs = *nr_vecs;
if (max_vecs == 1)
return NULL;
affinity_mask = kzalloc(cpumask_size(), GFP_KERNEL);
if (!affinity_mask) {
*nr_vecs = 1;
return NULL;
}
get_online_cpus();
if (max_vecs >= num_online_cpus()) {
cpumask_copy(affinity_mask, cpu_online_mask);
*nr_vecs = num_online_cpus();
} else {
unsigned int vecs = 0, cpu;
for_each_online_cpu(cpu) {
if (cpu == get_first_sibling(cpu)) {
cpumask_set_cpu(cpu, affinity_mask);
vecs++;
}
if (--max_vecs == 0)
break;
}
*nr_vecs = vecs;
}
put_online_cpus();
return affinity_mask;
}
示例10: bench_outstanding_parallel_cpus
void noinline bench_outstanding_parallel_cpus(uint32_t loops, int nr_cpus,
int outstanding_pages)
{
const char *desc = "parallel_cpus";
struct time_bench_sync sync;
struct time_bench_cpu *cpu_tasks;
struct cpumask my_cpumask;
int i;
/* Allocate records for CPUs */
cpu_tasks = kzalloc(sizeof(*cpu_tasks) * nr_cpus, GFP_KERNEL);
/* Reduce number of CPUs to run on */
cpumask_clear(&my_cpumask);
for (i = 0; i < nr_cpus ; i++) {
cpumask_set_cpu(i, &my_cpumask);
}
pr_info("Limit to %d parallel CPUs\n", nr_cpus);
time_bench_run_concurrent(loops, outstanding_pages, NULL,
&my_cpumask, &sync, cpu_tasks,
time_alloc_pages_outstanding);
time_bench_print_stats_cpumask(desc, cpu_tasks, &my_cpumask);
kfree(cpu_tasks);
}
示例11: hyperv_prepare_irq_remapping
static int __init hyperv_prepare_irq_remapping(void)
{
struct fwnode_handle *fn;
int i;
if (!hypervisor_is_type(X86_HYPER_MS_HYPERV) ||
!x2apic_supported())
return -ENODEV;
fn = irq_domain_alloc_named_id_fwnode("HYPERV-IR", 0);
if (!fn)
return -ENOMEM;
ioapic_ir_domain =
irq_domain_create_hierarchy(arch_get_ir_parent_domain(),
0, IOAPIC_REMAPPING_ENTRY, fn,
&hyperv_ir_domain_ops, NULL);
irq_domain_free_fwnode(fn);
/*
* Hyper-V doesn't provide irq remapping function for
* IO-APIC and so IO-APIC only accepts 8-bit APIC ID.
* Cpu's APIC ID is read from ACPI MADT table and APIC IDs
* in the MADT table on Hyper-v are sorted monotonic increasingly.
* APIC ID reflects cpu topology. There maybe some APIC ID
* gaps when cpu number in a socket is not power of two. Prepare
* max cpu affinity for IOAPIC irqs. Scan cpu 0-255 and set cpu
* into ioapic_max_cpumask if its APIC ID is less than 256.
*/
for (i = min_t(unsigned int, num_possible_cpus() - 1, 255); i >= 0; i--)
if (cpu_physical_id(i) < 256)
cpumask_set_cpu(i, &ioapic_max_cpumask);
return 0;
}
示例12: bl_idle_driver_init
static int __init bl_idle_driver_init(struct cpuidle_driver *drv, int cpu_id)
{
struct cpuinfo_arm *cpu_info;
struct cpumask *cpumask;
unsigned long cpuid;
int cpu;
cpumask = kzalloc(cpumask_size(), GFP_KERNEL);
if (!cpumask)
return -ENOMEM;
for_each_possible_cpu(cpu) {
cpu_info = &per_cpu(cpu_data, cpu);
cpuid = is_smp() ? cpu_info->cpuid : read_cpuid_id();
/* read cpu id part number */
if ((cpuid & 0xFFF0) == cpu_id)
cpumask_set_cpu(cpu, cpumask);
}
drv->cpumask = cpumask;
return 0;
}
示例13: homecache_mask
/* Return a mask of the cpus whose caches currently own these pages. */
static void homecache_mask(struct page *page, int pages,
struct cpumask *home_mask)
{
int i;
cpumask_clear(home_mask);
for (i = 0; i < pages; ++i) {
int home = page_home(&page[i]);
if (home == PAGE_HOME_IMMUTABLE ||
home == PAGE_HOME_INCOHERENT) {
cpumask_copy(home_mask, cpu_possible_mask);
return;
}
#if CHIP_HAS_CBOX_HOME_MAP()
if (home == PAGE_HOME_HASH) {
cpumask_or(home_mask, home_mask, &hash_for_home_map);
continue;
}
#endif
if (home == PAGE_HOME_UNCACHED)
continue;
BUG_ON(home < 0 || home >= NR_CPUS);
cpumask_set_cpu(home, home_mask);
}
}
示例14: update_siblings_masks
static void update_siblings_masks(unsigned int cpuid)
{
struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
int cpu;
if (cpuid_topo->cluster_id == -1) {
/*
* DT does not contain topology information for this cpu
* reset it to default behaviour
*/
pr_debug("CPU%u: No topology information configured\n", cpuid);
cpuid_topo->core_id = 0;
cpumask_set_cpu(cpuid, &cpuid_topo->core_sibling);
cpumask_set_cpu(cpuid, &cpuid_topo->thread_sibling);
return;
}
/* update core and thread sibling masks */
for_each_possible_cpu(cpu) {
cpu_topo = &cpu_topology[cpu];
if (cpuid_topo->cluster_id != cpu_topo->cluster_id)
continue;
cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
if (cpu != cpuid)
cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
if (cpuid_topo->core_id != cpu_topo->core_id)
continue;
cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
if (cpu != cpuid)
cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
}
}
示例15: tegra_cpuidle_register
static int tegra_cpuidle_register(unsigned int cpu)
{
struct cpuidle_driver *drv;
struct cpuidle_state *state;
drv = &per_cpu(cpuidle_drv, cpu);
drv->name = driver_name;
drv->owner = owner;
drv->cpumask = &per_cpu(idle_mask, cpu);
cpumask_set_cpu(cpu, drv->cpumask);
drv->state_count = 0;
state = &drv->states[CPUIDLE_STATE_CLKGATING];
snprintf(state->name, CPUIDLE_NAME_LEN, "clock-gated");
snprintf(state->desc, CPUIDLE_DESC_LEN, "CPU clock gated");
state->exit_latency = 10;
state->target_residency = 10;
state->power_usage = 600;
state->flags = CPUIDLE_FLAG_TIME_VALID;
state->enter = tegra_idle_enter_clock_gating;
#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
drv->safe_state_index = 0;
#endif
drv->state_count++;
#ifdef CONFIG_PM_SLEEP
state = &drv->states[CPUIDLE_STATE_POWERGATING];
snprintf(state->name, CPUIDLE_NAME_LEN, "powered-down");
snprintf(state->desc, CPUIDLE_DESC_LEN, "CPU power gated");
state->exit_latency = tegra_cpu_power_good_time();
state->target_residency = tegra_cpu_power_off_time() +
tegra_cpu_power_good_time();
if (state->target_residency < tegra_pd_min_residency)
state->target_residency = tegra_pd_min_residency;
state->power_usage = 100;
state->flags = CPUIDLE_FLAG_TIME_VALID;
state->enter = tegra_idle_enter_pd;
drv->state_count++;
if (cpu == 0) {
state = &drv->states[CPUIDLE_STATE_MC_CLK_STOP];
snprintf(state->name, CPUIDLE_NAME_LEN, "mc-clock");
snprintf(state->desc, CPUIDLE_DESC_LEN, "MC clock stop");
state->exit_latency = tegra_cpu_power_good_time() +
DRAM_SELF_REFRESH_EXIT_LATENCY;
state->target_residency = tegra_cpu_power_off_time() +
tegra_cpu_power_good_time() + DRAM_SELF_REFRESH_EXIT_LATENCY;
if (state->target_residency < tegra_mc_clk_stop_min_residency())
state->target_residency =
tegra_mc_clk_stop_min_residency();
state->power_usage = 0;
state->flags = CPUIDLE_FLAG_TIME_VALID;
state->enter = tegra_idle_enter_pd;
state->disabled = true;
drv->state_count++;
}
#endif
if (cpuidle_register(drv, NULL)) {
pr_err("CPU%u: failed to register driver\n", cpu);
return -EIO;
}
on_each_cpu_mask(drv->cpumask, tegra_cpuidle_setup_bctimer,
(void *)CLOCK_EVT_NOTIFY_BROADCAST_ON, 1);
return 0;
}