本文整理汇总了C++中idle_cpu函数的典型用法代码示例。如果您正苦于以下问题:C++ idle_cpu函数的具体用法?C++ idle_cpu怎么用?C++ idle_cpu使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了idle_cpu函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: rtcc_idle_handler
/*
* RTCC idle handler, called when CPU is idle
*/
static int rtcc_idle_handler(struct notifier_block *nb, unsigned long val, void *data)
{
if (likely(!atomic_read(&krtccd_enabled)))
return 0;
if (likely(atomic_read(&need_to_reclaim) == 0))
return 0;
// To prevent RTCC from running too frequently
if (likely(time_before(jiffies, prev_jiffy + rtcc_reclaim_jiffies)))
return 0;
if (unlikely(atomic_read(&kswapd_running) == 1))
return 0;
if (unlikely(idle_cpu(task_cpu(krtccd)) && this_cpu_loadx(3) == 0) || rtcc_boost_mode) {
if (likely(atomic_read(&krtccd_running) == 0)) {
atomic_set(&krtccd_running, 1);
wake_up_process(krtccd);
prev_jiffy = jiffies;
}
}
return 0;
}
示例2: main
int main(void) {
int i;
/* TODO: run test_printk() */
/*
test_printk();
*/
disable_interrupt();
init_pcbs();
setup_irq(8, irq0_handle);
setup_irq(0x80, irq1_handle);
setup_irq(9, keyboard_int);
/*
kthread_create(test1_a, stack1+KSTACK_SIZE);
kthread_create(test1_b, stack2+KSTACK_SIZE);
kthread_create(test1_c, stack3+KSTACK_SIZE);
kthread_create(test_thread_a, stack1+KSTACK_SIZE);
kthread_create(test_thread_b, stack2+KSTACK_SIZE);
kthread_create(test_msg_c, stack1+KSTACK_SIZE);
kthread_create(test_msg_d, stack2+KSTACK_SIZE);
kthread_create(test_thread_in_thread, stack1+KSTACK_SIZE);
*/
UserThread_create(0x2000);
kthread_create(tty_driver, stack2+KSTACK_SIZE);
enable_interrupt();
while (1) {
idle_cpu();
}
}
示例3: _cpu_down
static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
{
int err, nr_calls = 0;
void *hcpu = (void *)(long)cpu;
unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
struct take_cpu_down_param tcd_param = {
.mod = mod,
.hcpu = hcpu,
};
if (num_online_cpus() == 1)
return -EBUSY;
if (!cpu_online(cpu))
return -EINVAL;
cpu_hotplug_begin();
err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
if (err) {
nr_calls--;
__cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
printk("%s: attempt to take down CPU %u failed\n",
__func__, cpu);
goto out_release;
}
err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
if (err) {
cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
goto out_release;
}
BUG_ON(cpu_online(cpu));
while (!idle_cpu(cpu))
cpu_relax();
__cpu_die(cpu);
cpu_notify_nofail(CPU_DEAD | mod, hcpu);
check_for_tasks(cpu);
#ifdef CONFIG_HTC_ACPU_DEBUG
{
unsigned int status = 0;
msm_proc_comm(PCOM_BACKUP_CPU_STATUS, (unsigned*)&cpu, (unsigned*) &status);
}
#endif
out_release:
cpu_hotplug_done();
if (!err)
cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
return err;
}
示例4: panic
void panic(char *str) {
/* TODO: you can modify it as your wish */
disable_interrupt();
printk("\n\n** Kernel panic: %s **\n", str);
while (1) {
idle_cpu();
}
}
示例5: irq_enter
/*
* Enter an interrupt context.
*/
void irq_enter(void)
{
__irq_enter();
#ifdef CONFIG_NO_HZ
if (idle_cpu(smp_processor_id()))
tick_nohz_update_jiffies();
#endif
}
示例6: hrtimer_get_target
/*
* Get the preferred target CPU for NOHZ
*/
static int hrtimer_get_target(int this_cpu, int pinned)
{
#ifdef CONFIG_NO_HZ
if (!pinned && get_sysctl_timer_migration() && idle_cpu(this_cpu))
return get_nohz_timer_target();
#endif
return this_cpu;
}
示例7: hrtimer_get_target
/*
* Get the preferred target CPU for NOHZ
*/
static int hrtimer_get_target(int this_cpu, int pinned)
{
#ifdef CONFIG_NO_HZ
#ifdef CONFIG_SCHED_BFS
if (!pinned && get_sysctl_timer_migration() && idle_cpu(this_cpu)) {
int preferred_cpu = get_nohz_load_balancer();
if (preferred_cpu >= 0)
return preferred_cpu;
}
#else
if (!pinned && get_sysctl_timer_migration() && idle_cpu(this_cpu))
return get_nohz_timer_target();
#endif
#endif
return this_cpu;
}
示例8: thread_b
void thread_b(void){
int tmp;
while(1){
tmp = cnt+1;
printk("\ndata%d \n", tmp);
cnt = tmp;
for (tmp = 0; tmp < 20; tmp++);
idle_cpu();
}
}
示例9: rcu_debugfs_show
static int rcu_debugfs_show(struct seq_file *m, void *unused)
{
int cpu, q, s[2], msecs;
raw_local_irq_disable();
msecs = div_s64(sched_clock() - rcu_timestamp, NSEC_PER_MSEC);
raw_local_irq_enable();
seq_printf(m, "%14u: #batches seen\n",
rcu_stats.nbatches);
seq_printf(m, "%14u: #barriers seen\n",
atomic_read(&rcu_stats.nbarriers));
seq_printf(m, "%14llu: #callbacks invoked\n",
rcu_stats.ninvoked);
seq_printf(m, "%14u: #callbacks left to invoke\n",
atomic_read(&rcu_stats.nleft));
seq_printf(m, "%14u: #msecs since last end-of-batch\n",
msecs);
seq_printf(m, "%14u: #passes forced (0 is best)\n",
rcu_stats.nforced);
seq_printf(m, "\n");
for_each_online_cpu(cpu)
seq_printf(m, "%4d ", cpu);
seq_printf(m, " CPU\n");
s[1] = s[0] = 0;
for_each_online_cpu(cpu) {
struct rcu_data *rd = &rcu_data[cpu];
int w = ACCESS_ONCE(rd->which) & 1;
seq_printf(m, "%c%c%c%d ",
'-',
idle_cpu(cpu) ? 'I' : '-',
rd->wait ? 'W' : '-',
w);
s[w]++;
}
seq_printf(m, " FLAGS\n");
for (q = 0; q < 2; q++) {
for_each_online_cpu(cpu) {
struct rcu_data *rd = &rcu_data[cpu];
struct rcu_list *l = &rd->cblist[q];
seq_printf(m, "%4d ", l->count);
}
seq_printf(m, " Q%d%c\n", q, " *"[s[q] > s[q^1]]);
}
seq_printf(m, "\nFLAGS:\n");
seq_printf(m, " I - cpu idle, 0|1 - Q0 or Q1 is current Q, other is previous Q,\n");
seq_printf(m, " W - cpu does not permit current batch to end (waiting),\n");
seq_printf(m, " * - marks the Q that is current for most CPUs.\n");
return 0;
}
示例10: rcu_check_callbacks
/*
* Check to see if the scheduling-clock interrupt came from an extended
* quiescent state, and, if so, tell RCU about it.
*/
void rcu_check_callbacks(int cpu, int user)
{
if (user ||
(idle_cpu(cpu) &&
!in_softirq() &&
hardirq_count() <= (1 << HARDIRQ_SHIFT)))
rcu_sched_qs(cpu);
else if (!in_softirq())
rcu_bh_qs(cpu);
rcu_preempt_check_callbacks();
}
示例11: rcu_check_callbacks
void rcu_check_callbacks(int cpu, int user)
{
if (user ||
(idle_cpu(cpu) && !in_softirq() &&
hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
rcu_qsctr_inc(cpu);
rcu_bh_qsctr_inc(cpu);
} else if (!in_softirq())
rcu_bh_qsctr_inc(cpu);
tasklet_schedule(&per_cpu(rcu_tasklet, cpu));
}
示例12: test_server
void test_server(void){
struct message ms;
while (1){
receive(&ms);
printk("SERVER receive message (%d,%d,%d) from PID:%d",
ms.p1, ms.p2, ms.p3, ms.source);
send(2, &ms);
idle_cpu();
}
}
示例13: rcu_check_callbacks
/*
* Check to see if the scheduling-clock interrupt came from an extended
* quiescent state, and, if so, tell RCU about it.
*/
void rcu_check_callbacks(int cpu, int user)
{
if (!rcu_needs_cpu(0))
return; /* RCU doesn't need anything to be done. */
if (user ||
(idle_cpu(cpu) &&
!in_softirq() &&
hardirq_count() <= (1 << HARDIRQ_SHIFT)))
rcu_qsctr_inc(cpu);
else if (!in_softirq())
rcu_bh_qsctr_inc(cpu);
}
示例14: tick_irq_exit
static inline void tick_irq_exit(void)
{
#ifdef CONFIG_NO_HZ_COMMON
int cpu = smp_processor_id();
/* Make sure that timer wheel updates are propagated */
if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
if (!in_interrupt())
tick_nohz_irq_exit();
}
#endif
}
示例15: __rcu_delimit_batches
/*
* Check if the conditions for ending the current batch are true. If
* so then end it.
*
* Must be invoked periodically, and the periodic invocations must be
* far enough apart in time for the previous batch to become quiescent.
* This is a few tens of microseconds unless NMIs are involved; an NMI
* stretches out the requirement by the duration of the NMI.
*
* "Quiescent" means the owning cpu is no longer appending callbacks
* and has completed execution of a trailing write-memory-barrier insn.
*/
static void __rcu_delimit_batches(struct rcu_list *pending)
{
struct rcu_data *rd;
struct rcu_list *plist;
int cpu, eob, prev;
if (!rcu_scheduler_active)
return;
rcu_stats.nlast++;
/* If an NMI occured then the previous batch may not yet be
* quiescent. Let's wait till it is.
*/
if (rcu_nmi_seen) {
rcu_nmi_seen = 0;
rcu_stats.nmis++;
return;
}
/*
* Find out if the current batch has ended
* (end-of-batch).
*/
eob = 1;
for_each_online_cpu(cpu) {
rd = &rcu_data[cpu];
if (rd->wait) {
rd->wait = preempt_count_cpu(cpu) > idle_cpu(cpu);
if (rd->wait) {
eob = 0;
break;
}
}
}
/*
* Exit if batch has not ended. But first, tickle all non-cooperating
* CPUs if enough time has passed.
*/
if (eob == 0) {
if (rcu_wdog_ctr >= rcu_wdog_lim) {
rcu_wdog_ctr = 0;
rcu_stats.nforced++;
for_each_online_cpu(cpu) {
if (rcu_data[cpu].wait)
force_cpu_resched(cpu);
}
}
rcu_wdog_ctr += rcu_hz_period_us;
return;
}