本文整理汇总了C++中cpu_down函数的典型用法代码示例。如果您正苦于以下问题:C++ cpu_down函数的具体用法?C++ cpu_down怎么用?C++ cpu_down使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了cpu_down函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: rtas_cpu_state_change_mask
/* On return cpumask will be altered to indicate CPUs changed.
* CPUs with states changed will be set in the mask,
* CPUs with status unchanged will be unset in the mask. */
static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
cpumask_var_t cpus)
{
int cpu;
int cpuret = 0;
int ret = 0;
if (cpumask_empty(cpus))
return 0;
for_each_cpu(cpu, cpus) {
switch (state) {
case DOWN:
cpuret = cpu_down(cpu);
break;
case UP:
cpuret = cpu_up(cpu);
break;
}
if (cpuret) {
pr_debug("%s: cpu_%s for cpu#%d returned %d.\n",
__func__,
((state == UP) ? "up" : "down"),
cpu, cpuret);
if (!ret)
ret = cpuret;
if (state == UP) {
/* clear bits for unchanged cpus, return */
cpumask_shift_right(cpus, cpus, cpu);
cpumask_shift_left(cpus, cpus, cpu);
break;
} else {
/* clear bit for unchanged cpu, continue */
cpumask_clear_cpu(cpu, cpus);
}
}
}
return ret;
}
示例2: store_online
static ssize_t __ref store_online(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct cpu *cpu = container_of(dev, struct cpu, dev);
int cpuid = cpu->dev.id;
int from_nid, to_nid;
ssize_t ret;
cpu_hotplug_driver_lock();
switch (buf[0]) {
case '0':
ret = cpu_down(cpuid);
if (!ret)
kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
break;
case '1':
from_nid = cpu_to_node(cpuid);
ret = cpu_up(cpuid);
/*
* When hot adding memory to memoryless node and enabling a cpu
* on the node, node number of the cpu may internally change.
*/
to_nid = cpu_to_node(cpuid);
if (from_nid != to_nid)
change_cpu_under_node(cpu, from_nid, to_nid);
if (!ret)
kobject_uevent(&dev->kobj, KOBJ_ONLINE);
break;
default:
ret = -EINVAL;
}
cpu_hotplug_driver_unlock();
if (ret >= 0)
ret = count;
return ret;
}
示例3: disable_nonboot_cpus
void disable_nonboot_cpus(void)
{
int cpu, error;
error = 0;
cpus_clear(frozen_cpus);
printk("Freezing cpus ...\n");
for_each_online_cpu(cpu) {
if (cpu == 0)
continue;
error = cpu_down(cpu);
if (!error) {
cpu_set(cpu, frozen_cpus);
printk("CPU%d is down\n", cpu);
continue;
}
printk("Error taking cpu %d down: %d\n", cpu, error);
}
BUG_ON(raw_smp_processor_id() != 0);
if (error)
panic("cpus not sleeping");
}
示例4: min_max_constraints_workfunc
static void min_max_constraints_workfunc(struct work_struct *work)
{
int count = -1;
bool up = false;
unsigned int cpu;
int nr_cpus = num_online_cpus();
int max_cpus = pm_qos_request(PM_QOS_MAX_ONLINE_CPUS) ? : 4;
int min_cpus = pm_qos_request(PM_QOS_MIN_ONLINE_CPUS);
if (cpq_state == TEGRA_CPQ_DISABLED)
return;
if (is_lp_cluster())
return;
if (nr_cpus < min_cpus) {
up = true;
count = min_cpus - nr_cpus;
} else if (nr_cpus > max_cpus && max_cpus >= min_cpus) {
count = nr_cpus - max_cpus;
}
for (;count > 0; count--) {
if (up) {
cpu = cpumask_next_zero(0, cpu_online_mask);
if (cpu < nr_cpu_ids)
cpu_up(cpu);
else
break;
} else {
cpu = cpumask_next(0, cpu_online_mask);
if (cpu < nr_cpu_ids)
cpu_down(cpu);
else
break;
}
}
}
示例5: store_cpucore_max_num_limit
static ssize_t __ref store_cpucore_max_num_limit(struct kobject *kobj,
struct attribute *attr, const char *buf, size_t count)
{
int input, delta, cpu;
if (!sscanf(buf, "%u", &input))
return -EINVAL;
if (input < 1 || input > 4) {
pr_err("Must keep input range 1 ~ 4\n");
return -EINVAL;
}
delta = input - num_online_cpus();
if (delta > 0) {
cpu = 1;
while (delta) {
if (!cpu_online(cpu)) {
cpu_up(cpu);
delta--;
}
cpu++;
}
} else if (delta < 0) {
cpu = 3;
while (delta) {
if (cpu_online(cpu)) {
cpu_down(cpu);
delta++;
}
cpu--;
}
}
max_num_cpu = input;
return count;
}
示例6: setup_cpu_watcher
static int setup_cpu_watcher(struct notifier_block *notifier,
unsigned long event, void *data)
{
int cpu;
static struct xenbus_watch cpu_watch = {
.node = "cpu",
.callback = handle_vcpu_hotplug_event};
(void)register_xenbus_watch(&cpu_watch);
for_each_possible_cpu(cpu) {
if (vcpu_online(cpu) == 0) {
(void)cpu_down(cpu);
set_cpu_present(cpu, false);
}
}
return NOTIFY_DONE;
}
static int __init setup_vcpu_hotplug_event(void)
{
static struct notifier_block xsn_cpu = {
.notifier_call = setup_cpu_watcher };
#ifdef CONFIG_X86
if (!xen_pv_domain() && !xen_pvh_domain())
#else
if (!xen_domain())
#endif
return -ENODEV;
register_xenstore_notifier(&xsn_cpu);
return 0;
}
arch_initcall(setup_vcpu_hotplug_event);
示例7: store_online
static ssize_t store_online(struct sys_device *dev, const char *buf,
size_t count)
{
struct cpu *cpu = container_of(dev, struct cpu, sysdev);
ssize_t ret;
switch (buf[0]) {
case '0':
ret = cpu_down(cpu->sysdev.id);
if (!ret)
kobject_hotplug(&dev->kobj, KOBJ_OFFLINE);
break;
case '1':
ret = cpu_up(cpu->sysdev.id);
break;
default:
ret = -EINVAL;
}
if (ret >= 0)
ret = count;
return ret;
}
示例8: hps_early_suspend
static void hps_early_suspend(struct early_suspend *h)
{
hps_warn("hps_early_suspend\n");
mutex_lock(&hps_ctxt.lock);
hps_ctxt.state = STATE_EARLY_SUSPEND;
hps_ctxt.rush_boost_enabled_backup = hps_ctxt.rush_boost_enabled;
hps_ctxt.rush_boost_enabled = 0;
//Reset data structure of statistics process while enter early suspend mode.
hps_ctxt.up_loads_sum = 0;
hps_ctxt.up_loads_count = 0;
hps_ctxt.up_loads_history_index = 0;
hps_ctxt.up_loads_history[hps_ctxt.es_up_times - 1] = 0;
hps_ctxt.down_loads_sum = 0;
hps_ctxt.down_loads_count = 0;
hps_ctxt.down_loads_history_index = 0;
hps_ctxt.down_loads_history[hps_ctxt.es_down_times - 1] = 0;
if (hps_ctxt.is_hmp && hps_ctxt.early_suspend_enabled)
{
unsigned int cpu;
for (cpu = hps_ctxt.big_cpu_id_max; cpu >= hps_ctxt.big_cpu_id_min; --cpu)
{
if (cpu_online(cpu))
cpu_down(cpu);
}
}
mutex_unlock(&hps_ctxt.lock);
atomic_set(&hps_ctxt.is_ondemand, 1);
hps_warn("state: %u, enabled: %u, early_suspend_enabled: %u, suspend_enabled: %u, rush_boost_enabled: %u\n",
hps_ctxt.state, hps_ctxt.enabled, hps_ctxt.early_suspend_enabled, hps_ctxt.suspend_enabled, hps_ctxt.rush_boost_enabled);
return;
}
示例9: tplug_work_fn
static void __cpuinit tplug_work_fn(struct work_struct *work)
{
int i;
unsigned int load[8], avg_load[8];
switch(endurance_level)
{
case 0:
core_limit = NR_CPUS;
break;
case 1:
core_limit = NR_CPUS / 2;
break;
case 2:
core_limit = NR_CPUS / 4;
break;
default:
core_limit = NR_CPUS;
break;
}
for(i = 0 ; i < core_limit; i++)
{
if(cpu_online(i))
load[i] = get_curr_load(i);
else
load[i] = 0;
avg_load[i] = ((int) load[i] + (int) last_load[i]) / 2;
last_load[i] = load[i];
}
for(i = 0 ; i < core_limit; i++)
{
if(cpu_online(i) && avg_load[i] > load_threshold && cpu_is_offline(i+1))
{
if(DEBUG)
pr_info("%s : bringing back cpu%d\n", THUNDERPLUG,i);
if(!((i+1) > 7)) {
last_time[i+1] = ktime_to_ms(ktime_get());
cpu_up(i+1);
}
}
else if(cpu_online(i) && avg_load[i] < load_threshold && cpu_online(i+1))
{
if(DEBUG)
pr_info("%s : offlining cpu%d\n", THUNDERPLUG,i);
if(!(i+1)==0) {
now[i+1] = ktime_to_ms(ktime_get());
if((now[i+1] - last_time[i+1]) > MIN_CPU_UP_TIME)
cpu_down(i+1);
}
}
}
#ifdef CONFIG_USES_MALI_MP2_GPU
if(gpu_hotplug_enabled) {
if(DEBUG)
pr_info("%s: current gpu load %d\n", THUNDERPLUG, get_gpu_load());
if(get_gpu_load() > gpu_min_load_threshold) {
if(get_gpu_cores_enabled() < 2) {
enable_gpu_cores(2);
if(DEBUG)
pr_info("%s: gpu1 onlined\n", THUNDERPLUG);
}
}
else {
if(get_gpu_cores_enabled() > 1) {
enable_gpu_cores(1);
if(DEBUG)
pr_info("%s: gpu1 offlined\n", THUNDERPLUG);
}
}
}
#endif
#ifdef CONFIG_SCHED_HMP
if(tplug_hp_style == 1 && !isSuspended)
#else
if(tplug_hp_enabled != 0 && !isSuspended)
#endif
queue_delayed_work_on(0, tplug_wq, &tplug_work,
msecs_to_jiffies(sampling_time));
else {
if(!isSuspended)
cpus_online_all();
else
thunderplug_suspend();
}
}
示例10: tegra_auto_hotplug_work_func
static void tegra_auto_hotplug_work_func(struct work_struct *work)
{
bool up = false;
unsigned int cpu = nr_cpu_ids;
mutex_lock(tegra3_cpu_lock);
if (mp_policy && !is_lp_cluster()) {
mutex_unlock(tegra3_cpu_lock);
return;
}
switch (hp_state) {
case TEGRA_HP_DISABLED:
case TEGRA_HP_IDLE:
break;
case TEGRA_HP_DOWN:
cpu = tegra_get_slowest_cpu_n();
if (cpu < nr_cpu_ids) {
up = false;
queue_delayed_work(
hotplug_wq, &hotplug_work, down_delay);
hp_stats_update(cpu, false);
} else if (!is_lp_cluster() && !no_lp) {
if(!clk_set_parent(cpu_clk, cpu_lp_clk)) {
CPU_DEBUG_PRINTK(CPU_DEBUG_HOTPLUG, " enter LPCPU");
hp_stats_update(CONFIG_NR_CPUS, true);
hp_stats_update(0, false);
/* catch-up with governor target speed */
tegra_cpu_set_speed_cap(NULL);
} else
queue_delayed_work(
hotplug_wq, &hotplug_work, down_delay);
}
break;
case TEGRA_HP_UP:
if (is_lp_cluster() && !no_lp) {
if(!clk_set_parent(cpu_clk, cpu_g_clk)) {
CPU_DEBUG_PRINTK(CPU_DEBUG_HOTPLUG,
" leave LPCPU (%s)", __func__);
hp_stats_update(CONFIG_NR_CPUS, false);
hp_stats_update(0, true);
/* catch-up with governor target speed */
tegra_cpu_set_speed_cap(NULL);
}
} else {
switch (tegra_cpu_speed_balance()) {
/* cpu speed is up and balanced - one more on-line */
case TEGRA_CPU_SPEED_BALANCED:
cpu = cpumask_next_zero(0, cpu_online_mask);
if (cpu < nr_cpu_ids) {
up = true;
hp_stats_update(cpu, true);
}
break;
/* cpu speed is up, but skewed - remove one core */
case TEGRA_CPU_SPEED_SKEWED:
cpu = tegra_get_slowest_cpu_n();
if (cpu < nr_cpu_ids) {
up = false;
hp_stats_update(cpu, false);
}
break;
/* cpu speed is up, but under-utilized - do nothing */
case TEGRA_CPU_SPEED_BIASED:
default:
break;
}
}
queue_delayed_work(
hotplug_wq, &hotplug_work, up2gn_delay);
break;
default:
pr_err(CPU_HOTPLUG_TAG"%s: invalid tegra hotplug state %d\n",
__func__, hp_state);
}
mutex_unlock(tegra3_cpu_lock);
if (system_state > SYSTEM_RUNNING) {
pr_info(CPU_HOTPLUG_TAG" system is not running\n");
} else if (cpu < nr_cpu_ids) {
if (up) {
updateCurrentCPUTotalActiveTime();
cpu_up(cpu);
pr_info(CPU_HOTPLUG_TAG" turn on CPU %d, online CPU 0-3=[%d%d%d%d]\n",
cpu, cpu_online(0), cpu_online(1), cpu_online(2), cpu_online(3));
} else {
updateCurrentCPUTotalActiveTime();
cpu_down(cpu);
pr_info(CPU_HOTPLUG_TAG" turn off CPU %d, online CPU 0-3=[%d%d%d%d]\n",
cpu, cpu_online(0), cpu_online(1), cpu_online(2), cpu_online(3));
}
}
}
示例11: tplug_work_fn
static void __cpuinit tplug_work_fn(struct work_struct *work)
{
int i;
unsigned int load[8], avg_load[8];
switch(endurance_level)
{
case 0:
core_limit = 8;
break;
case 1:
core_limit = 4;
break;
case 2:
core_limit = 2;
break;
default:
core_limit = 8;
break;
}
for(i = 0 ; i < core_limit; i++)
{
if(cpu_online(i))
load[i] = get_curr_load(i);
else
load[i] = 0;
avg_load[i] = ((int) load[i] + (int) last_load[i]) / 2;
last_load[i] = load[i];
}
for(i = 0 ; i < core_limit; i++)
{
if(cpu_online(i) && avg_load[i] > load_threshold && cpu_is_offline(i+1))
{
if(DEBUG)
pr_info("%s : bringing back cpu%d\n", THUNDERPLUG,i);
if(!((i+1) > 7))
cpu_up(i+1);
}
else if(cpu_online(i) && avg_load[i] < load_threshold && cpu_online(i+1))
{
if(DEBUG)
pr_info("%s : offlining cpu%d\n", THUNDERPLUG,i);
if(!(i+1)==0)
cpu_down(i+1);
}
}
if(tplug_hp_enabled != 0 && !isSuspended)
queue_delayed_work_on(0, tplug_wq, &tplug_work,
msecs_to_jiffies(sampling_time));
else {
if(!isSuspended)
cpus_online_all();
else
thunderplug_suspend();
}
}
示例12: hotplug_timer
static void hotplug_timer(struct work_struct *work)
{
struct cpu_hotplug_info tmp_hotplug_info[4];
int i;
unsigned int load = 0;
unsigned int cpu_rq_min=0;
unsigned long nr_rq_min = -1UL;
unsigned int select_off_cpu = 0;
enum flag flag_hotplug;
mutex_lock(&hotplug_lock);
if (user_lock == 1)
goto no_hotplug;
for_each_online_cpu(i) {
struct cpu_time_info *tmp_info;
cputime64_t cur_wall_time, cur_idle_time;
unsigned int idle_time, wall_time;
tmp_info = &per_cpu(hotplug_cpu_time, i);
cur_idle_time = get_cpu_idle_time_us(i, &cur_wall_time);
idle_time = (unsigned int)cputime64_sub(cur_idle_time,
tmp_info->prev_cpu_idle);
tmp_info->prev_cpu_idle = cur_idle_time;
wall_time = (unsigned int)cputime64_sub(cur_wall_time,
tmp_info->prev_cpu_wall);
tmp_info->prev_cpu_wall = cur_wall_time;
if (wall_time < idle_time)
goto no_hotplug;
tmp_info->load = 100 * (wall_time - idle_time) / wall_time;
load += tmp_info->load;
/*find minimum runqueue length*/
tmp_hotplug_info[i].nr_running = get_cpu_nr_running(i);
if (i && nr_rq_min > tmp_hotplug_info[i].nr_running) {
nr_rq_min = tmp_hotplug_info[i].nr_running;
cpu_rq_min = i;
}
}
for (i = NUM_CPUS - 1; i > 0; --i) {
if (cpu_online(i) == 0) {
select_off_cpu = i;
break;
}
}
/*standallone hotplug*/
flag_hotplug = standalone_hotplug(load, nr_rq_min, cpu_rq_min);
/*cpu hotplug*/
if (flag_hotplug == HOTPLUG_IN && cpu_online(select_off_cpu) == CPU_OFF) {
#ifndef PRODUCT_SHIP
DBG_PRINT("cpu%d turning on!\n", select_off_cpu);
#endif
cpu_up(select_off_cpu);
#ifndef PRODUCT_SHIP
DBG_PRINT("cpu%d on\n", select_off_cpu);
#endif
hotpluging_rate = CHECK_DELAY * 4;
} else if (flag_hotplug == HOTPLUG_OUT && cpu_online(cpu_rq_min) == CPU_ON) {
#ifndef PRODUCT_SHIP
DBG_PRINT("cpu%d turnning off!\n", cpu_rq_min);
#endif
cpu_down(cpu_rq_min);
#ifndef PRODUCT_SHIP
DBG_PRINT("cpu%d off!\n", cpu_rq_min);
#endif
hotpluging_rate = CHECK_DELAY;
}
no_hotplug:
queue_delayed_work_on(0, hotplug_wq, &hotplug_work, hotpluging_rate);
mutex_unlock(&hotplug_lock);
}
示例13: set_cpu_config
static void set_cpu_config(enum ux500_uc new_uc)
{
bool update = false;
int cpu;
int min_freq, max_freq;
if (new_uc != current_uc)
update = true;
else if ((user_config_updated) && (new_uc == UX500_UC_USER))
update = true;
pr_debug("%s: new_usecase=%d, current_usecase=%d, update=%d\n",
__func__, new_uc, current_uc, update);
if (!update)
goto exit;
/* Cpu hotplug */
if (!(usecase_conf[new_uc].second_cpu_online) &&
(num_online_cpus() > 1))
cpu_down(1);
else if ((usecase_conf[new_uc].second_cpu_online) &&
(num_online_cpus() < 2))
cpu_up(1);
if (usecase_conf[new_uc].max_arm)
max_freq = usecase_conf[new_uc].max_arm;
else
max_freq = system_max_freq;
if (usecase_conf[new_uc].min_arm)
min_freq = usecase_conf[new_uc].min_arm;
else
min_freq = system_min_freq;
for_each_online_cpu(cpu)
set_cpufreq(cpu,
min_freq,
max_freq);
/* Kinda doing the job twice, but this is needed for reference keeping */
if (usecase_conf[new_uc].min_arm)
prcmu_qos_update_requirement(PRCMU_QOS_ARM_KHZ,
"usecase",
usecase_conf[new_uc].min_arm);
else
prcmu_qos_update_requirement(PRCMU_QOS_ARM_KHZ,
"usecase",
PRCMU_QOS_DEFAULT_VALUE);
/* Cpu idle */
cpuidle_set_multiplier(usecase_conf[new_uc].cpuidle_multiplier);
/* L2 prefetch */
if (usecase_conf[new_uc].l2_prefetch_en)
outer_prefetch_enable();
else
outer_prefetch_disable();
/* Force cpuidle state */
cpuidle_force_state(usecase_conf[new_uc].forced_state);
/* QOS override */
prcmu_qos_voice_call_override(usecase_conf[new_uc].vc_override);
current_uc = new_uc;
exit:
/* Its ok to clear even if new_uc != UX500_UC_USER */
user_config_updated = false;
}
示例14: hotplug_timer
static void hotplug_timer(struct work_struct *work)
{
extern unsigned int sysctl_sched_olord_period;
unsigned int i, load = 0;
int offline_target = -1, online_target = -1;
struct cpu_time_info *tmp_info;
cputime64_t cur_wall_time, cur_idle_time;
unsigned int idle_time, wall_time;
printk(KERN_INFO "%u\n", sysctl_sched_olord_period);
mutex_lock(&hotplug_lock);
/* Find the target CPUs for online and offline */
for (i = 0; i < (sizeof cpus / sizeof (int)); i++){
//printk(KERN_INFO "cpus[%u]: %u\n", i, cpus[i]);
if(cpu_online(cpus[i])){
offline_target = cpus[i];
break;
}
else
online_target = cpus[i];
}
//printk(KERN_INFO "offline: %d, online %d\n", offline_target, online_target);
/* Calculate load */
tmp_info = &per_cpu(hotplug_cpu_time, offline_target);
cur_idle_time = get_cpu_idle_time_us(offline_target, &cur_wall_time);
/* Use cputime64_sub for older kernels */
//idle_time = (unsigned int)cputime64_sub(cur_idle_time,
// tmp_info->prev_cpu_idle);
idle_time = (unsigned int)(cur_idle_time - tmp_info->prev_cpu_idle);
tmp_info->prev_cpu_idle = cur_idle_time;
/* Use cputime64_sub for older kernels */
//wall_time = (unsigned int)cputime64_sub(cur_wall_time,
// tmp_info->prev_cpu_wall);
wall_time = (cur_wall_time - tmp_info->prev_cpu_wall);
tmp_info->prev_cpu_wall = cur_wall_time;
if (wall_time < idle_time)
goto no_hotplug;
load = 100 * (wall_time - idle_time) / wall_time;
//printk(KERN_INFO "Load %u\n", load);
/* Offline */
if (((load < trans_load_l_inuse)) &&
(num_online_cpus() > 1) && (offline_target > 0)) {
//printk(KERN_INFO "load: %u cpu %u turning off\n", load, offline_target);
cpu_down(offline_target);
hotpluging_rate = CHECK_DELAY;
/* Online */
} else if (((load > trans_load_h_inuse)) &&
(num_present_cpus() > num_online_cpus()) &&
(online_target != -1)) {
//printk(KERN_INFO "load: %u cpu %u turning on\n", load, online_target);
cpu_up(online_target);
hotpluging_rate = CHECK_DELAY * 10;
}
no_hotplug:
mutex_unlock(&hotplug_lock);
/* If we're being removed, don't queue more work */
if (likely(die == 0))
queue_delayed_work_on(0, hotplug_wq, &hotplug_work, hotpluging_rate);
}
示例15: __cpu_hotplug
static int __ref __cpu_hotplug(bool out_flag, enum hotplug_cmd cmd)
{
int i = 0;
int ret = 0;
if (exynos_dm_hotplug_disabled())
return 0;
#if defined(CONFIG_SCHED_HMP)
if (out_flag) {
if (do_disable_hotplug)
goto blk_out;
if (cmd == CMD_BIG_OUT && !in_low_power_mode) {
for (i = setup_max_cpus - 1; i >= NR_CA7; i--) {
if (cpu_online(i)) {
ret = cpu_down(i);
if (ret)
goto blk_out;
}
}
} else {
for (i = setup_max_cpus - 1; i > 0; i--) {
if (cpu_online(i)) {
ret = cpu_down(i);
if (ret)
goto blk_out;
}
}
}
} else {
if (in_suspend_prepared)
goto blk_out;
if (cmd == CMD_BIG_IN) {
if (in_low_power_mode)
goto blk_out;
for (i = NR_CA7; i < setup_max_cpus; i++) {
if (!cpu_online(i)) {
ret = cpu_up(i);
if (ret)
goto blk_out;
}
}
} else {
if (big_hotpluged && !do_disable_hotplug) {
for (i = 1; i < NR_CA7; i++) {
if (!cpu_online(i)) {
ret = cpu_up(i);
if (ret)
goto blk_out;
}
}
} else {
for (i = 1; i < setup_max_cpus; i++) {
if (do_hotplug_out && i >= NR_CA7)
goto blk_out;
if (!cpu_online(i)) {
ret = cpu_up(i);
if (ret)
goto blk_out;
}
}
}
}
}
#else
if (out_flag) {
if (do_disable_hotplug)
goto blk_out;
for (i = setup_max_cpus - 1; i > 0; i--) {
if (cpu_online(i)) {
ret = cpu_down(i);
if (ret)
goto blk_out;
}
}
} else {
if (in_suspend_prepared)
goto blk_out;
for (i = 1; i < setup_max_cpus; i++) {
if (!cpu_online(i)) {
ret = cpu_up(i);
if (ret)
goto blk_out;
}
}
}
#endif
blk_out:
return ret;
}