本文整理汇总了C++中sched_clock_idle_sleep_event函数的典型用法代码示例。如果您正苦于以下问题:C++ sched_clock_idle_sleep_event函数的具体用法?C++ sched_clock_idle_sleep_event怎么用?C++ sched_clock_idle_sleep_event使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了sched_clock_idle_sleep_event函数的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: acpi_idle_enter_simple
/**
* acpi_idle_enter_simple - enters an ACPI state without BM handling
* @dev: the target CPU
* @drv: cpuidle driver with cpuidle state information
* @index: the index of suggested state
*/
static int acpi_idle_enter_simple(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
struct acpi_processor *pr;
struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
pr = __this_cpu_read(processors);
if (unlikely(!pr))
return -EINVAL;
if (cx->entry_method == ACPI_CSTATE_FFH) {
if (current_set_polling_and_test())
return -EINVAL;
}
/*
* Must be done before busmaster disable as we might need to
* access HPET !
*/
lapic_timer_state_broadcast(pr, cx, 1);
if (cx->type == ACPI_STATE_C3)
ACPI_FLUSH_CPU_CACHE();
/* Tell the scheduler that we are going deep-idle: */
sched_clock_idle_sleep_event();
acpi_idle_do_entry(cx);
sched_clock_idle_wakeup_event(0);
lapic_timer_state_broadcast(pr, cx, 0);
return index;
}
示例2: acpi_idle_enter_simple
static int acpi_idle_enter_simple(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
struct acpi_processor *pr;
struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);
ktime_t kt1, kt2;
s64 idle_time_ns;
s64 idle_time;
pr = __this_cpu_read(processors);
dev->last_residency = 0;
if (unlikely(!pr))
return -EINVAL;
local_irq_disable();
if (cx->entry_method != ACPI_CSTATE_FFH) {
current_thread_info()->status &= ~TS_POLLING;
smp_mb();
if (unlikely(need_resched())) {
current_thread_info()->status |= TS_POLLING;
local_irq_enable();
return -EINVAL;
}
}
lapic_timer_state_broadcast(pr, cx, 1);
if (cx->type == ACPI_STATE_C3)
ACPI_FLUSH_CPU_CACHE();
kt1 = ktime_get_real();
sched_clock_idle_sleep_event();
acpi_idle_do_entry(cx);
kt2 = ktime_get_real();
idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1));
idle_time = idle_time_ns;
do_div(idle_time, NSEC_PER_USEC);
dev->last_residency = (int)idle_time;
sched_clock_idle_wakeup_event(idle_time_ns);
local_irq_enable();
if (cx->entry_method != ACPI_CSTATE_FFH)
current_thread_info()->status |= TS_POLLING;
cx->usage++;
lapic_timer_state_broadcast(pr, cx, 0);
cx->time += idle_time;
return index;
}
示例3: tick_nohz_start_idle
static ktime_t tick_nohz_start_idle(int cpu, struct tick_sched *ts)
{
ktime_t now = ktime_get();
ts->idle_entrytime = now;
ts->idle_active = 1;
sched_clock_idle_sleep_event();
return now;
}
示例4: tick_nohz_start_idle
static ktime_t tick_nohz_start_idle(int cpu, struct tick_sched *ts)
{
ktime_t now = ktime_get();
ts->idle_entrytime = now;
ts->idle_active = 1;
#ifdef CONFIG_MT_LOAD_BALANCE_PROFILER
mt_lbprof_update_state(cpu, MT_LBPROF_NO_TASK_STATE);
#endif
sched_clock_idle_sleep_event();
return now;
}
示例5: acpi_idle_enter_simple
/**
* acpi_idle_enter_simple - enters an ACPI state without BM handling
* @dev: the target CPU
* @drv: cpuidle driver with cpuidle state information
* @index: the index of suggested state
*/
static int acpi_idle_enter_simple(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
struct acpi_processor *pr;
struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);
pr = __this_cpu_read(processors);
if (unlikely(!pr))
return -EINVAL;
if (cx->entry_method != ACPI_CSTATE_FFH) {
current_thread_info()->status &= ~TS_POLLING;
/*
* TS_POLLING-cleared state must be visible before we test
* NEED_RESCHED:
*/
smp_mb();
if (unlikely(need_resched())) {
current_thread_info()->status |= TS_POLLING;
return -EINVAL;
}
}
/*
* Must be done before busmaster disable as we might need to
* access HPET !
*/
lapic_timer_state_broadcast(pr, cx, 1);
if (cx->type == ACPI_STATE_C3)
ACPI_FLUSH_CPU_CACHE();
/* Tell the scheduler that we are going deep-idle: */
sched_clock_idle_sleep_event();
acpi_idle_do_entry(cx);
sched_clock_idle_wakeup_event(0);
if (cx->entry_method != ACPI_CSTATE_FFH)
current_thread_info()->status |= TS_POLLING;
cx->usage++;
lapic_timer_state_broadcast(pr, cx, 0);
return index;
}
示例6: tick_nohz_start_idle
static ktime_t tick_nohz_start_idle(struct tick_sched *ts)
{
ktime_t now, delta;
now = ktime_get();
if (ts->idle_active) {
delta = ktime_sub(now, ts->idle_entrytime);
ts->idle_lastupdate = now;
ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
}
ts->idle_entrytime = now;
ts->idle_active = 1;
sched_clock_idle_sleep_event();
return now;
}
示例7: omap3_enter_idle
static int omap3_enter_idle(struct cpuidle_device *dev,
struct cpuidle_state *state)
{
struct omap3_processor_cx *cx;
u8 cur_per_state, cur_neon_state, pre_neon_state, pre_per_state;
struct timespec ts_preidle, ts_postidle, ts_idle;
u32 fclken_core, iclken_core, fclken_per, iclken_per;
u32 sdrcpwr_val, sdrc_power_register = 0x0;
int wakeup_latency;
int core_sleep_flg = 0;
u32 per_ctx_saved = 0;
int ret = -1;
#ifdef CONFIG_ENABLE_SWLATENCY_MEASURE
int idle_status = 0;
#endif
local_irq_disable();
local_fiq_disable();
if (need_resched()) {
local_irq_enable();
local_fiq_enable();
return 0;
}
#ifdef CONFIG_ENABLE_SWLATENCY_MEASURE
sw_latency_arr[swlat_arr_wrptr].sleep_start =
omap_32k_sync_timer_read();
#endif
PM_PREPWSTST_MPU = 0xFF;
PM_PREPWSTST_CORE = 0xFF;
PM_PREPWSTST_NEON = 0xFF;
PM_PREPWSTST_PER = 0xFF;
cx = cpuidle_get_statedata(state);
target_state.mpu_state = cx->mpu_state;
target_state.core_state = cx->core_state;
/* take a time marker for residency */
getnstimeofday(&ts_preidle);
if (cx->type == OMAP3_STATE_C0) {
omap_sram_idle();
goto return_sleep_time;
}
if (cx->type > OMAP3_STATE_C1)
sched_clock_idle_sleep_event(); /* about to enter deep idle */
correct_target_state();
wakeup_latency = cx->wakeup_latency;
if (target_state.core_state != cx->core_state) {
/* Currently, this can happen only for core_off */
/* Adjust wakeup latency to that of core_cswr state */
/* Hard coded now and needs to be made more generic */
/* omap3_power_states[4] is CSWR for core */
wakeup_latency = omap3_power_states[4].wakeup_latency;
}
/* Reprogram next wake up tick to adjust for wake latency */
if (wakeup_latency > 1000) {
struct tick_device *d = tick_get_device(smp_processor_id());
ktime_t adjust, next, now = ktime_get();
if (ktime_to_ns(ktime_sub(d->evtdev->next_event, now)) >
(wakeup_latency * 1000 + NSEC_PER_MSEC)) {
adjust = ktime_set(0, (wakeup_latency * 1000));
next = ktime_sub(d->evtdev->next_event, adjust);
clockevents_program_event(d->evtdev, next, now);
}
}
/* Check for pending interrupts. If there is an interrupt, return */
if (INTCPS_PENDING_IRQ0 | INTCPS_PENDING_IRQ1 | INTCPS_PENDING_IRQ2)
goto return_sleep_time;
prcm_get_power_domain_state(DOM_PER, &cur_per_state);
prcm_get_power_domain_state(DOM_NEON, &cur_neon_state);
fclken_core = CM_FCLKEN1_CORE;
iclken_core = CM_ICLKEN1_CORE;
fclken_per = CM_FCLKEN_PER;
iclken_per = CM_ICLKEN_PER;
/* If target state if core_off, save registers
* before changing anything
*/
if (target_state.core_state >= PRCM_CORE_OSWR_MEMRET) {
prcm_save_registers(&target_state);
omap_uart_save_ctx(0);
omap_uart_save_ctx(1);
}
/* Check for pending interrupts. If there is an interrupt, return */
if (INTCPS_PENDING_IRQ0 | INTCPS_PENDING_IRQ1 | INTCPS_PENDING_IRQ2)
goto return_sleep_time;
/* Program MPU and NEON to target state */
if (target_state.mpu_state > PRCM_MPU_ACTIVE) {
if ((cur_neon_state == PRCM_ON) &&
//.........这里部分代码省略.........
示例8: acpi_idle_enter_bm
static int acpi_idle_enter_bm(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
struct acpi_processor *pr;
struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);
ktime_t kt1, kt2;
s64 idle_time_ns;
s64 idle_time;
pr = __this_cpu_read(processors);
dev->last_residency = 0;
if (unlikely(!pr))
return -EINVAL;
if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
if (drv->safe_state_index >= 0) {
return drv->states[drv->safe_state_index].enter(dev,
drv, drv->safe_state_index);
} else {
local_irq_disable();
acpi_safe_halt();
local_irq_enable();
return -EINVAL;
}
}
local_irq_disable();
if (cx->entry_method != ACPI_CSTATE_FFH) {
current_thread_info()->status &= ~TS_POLLING;
smp_mb();
if (unlikely(need_resched())) {
current_thread_info()->status |= TS_POLLING;
local_irq_enable();
return -EINVAL;
}
}
acpi_unlazy_tlb(smp_processor_id());
sched_clock_idle_sleep_event();
lapic_timer_state_broadcast(pr, cx, 1);
kt1 = ktime_get_real();
if (pr->flags.bm_check && pr->flags.bm_control) {
raw_spin_lock(&c3_lock);
c3_cpu_count++;
if (c3_cpu_count == num_online_cpus())
acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
raw_spin_unlock(&c3_lock);
} else if (!pr->flags.bm_check) {
ACPI_FLUSH_CPU_CACHE();
}
acpi_idle_do_entry(cx);
if (pr->flags.bm_check && pr->flags.bm_control) {
raw_spin_lock(&c3_lock);
acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
c3_cpu_count--;
raw_spin_unlock(&c3_lock);
}
kt2 = ktime_get_real();
idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1));
idle_time = idle_time_ns;
do_div(idle_time, NSEC_PER_USEC);
dev->last_residency = (int)idle_time;
sched_clock_idle_wakeup_event(idle_time_ns);
local_irq_enable();
if (cx->entry_method != ACPI_CSTATE_FFH)
current_thread_info()->status |= TS_POLLING;
cx->usage++;
lapic_timer_state_broadcast(pr, cx, 0);
cx->time += idle_time;
return index;
}
示例9: acpi_idle_enter_bm
/**
* acpi_idle_enter_bm - enters C3 with proper BM handling
* @dev: the target CPU
* @drv: cpuidle driver containing state data
* @index: the index of suggested state
*
* If BM is detected, the deepest non-C3 idle state is entered instead.
*/
static int acpi_idle_enter_bm(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
struct acpi_processor *pr;
struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
pr = __this_cpu_read(processors);
if (unlikely(!pr))
return -EINVAL;
if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
if (drv->safe_state_index >= 0) {
return drv->states[drv->safe_state_index].enter(dev,
drv, drv->safe_state_index);
} else {
acpi_safe_halt();
return -EBUSY;
}
}
if (cx->entry_method == ACPI_CSTATE_FFH) {
if (current_set_polling_and_test())
return -EINVAL;
}
acpi_unlazy_tlb(smp_processor_id());
/* Tell the scheduler that we are going deep-idle: */
sched_clock_idle_sleep_event();
/*
* Must be done before busmaster disable as we might need to
* access HPET !
*/
lapic_timer_state_broadcast(pr, cx, 1);
/*
* disable bus master
* bm_check implies we need ARB_DIS
* !bm_check implies we need cache flush
* bm_control implies whether we can do ARB_DIS
*
* That leaves a case where bm_check is set and bm_control is
* not set. In that case we cannot do much, we enter C3
* without doing anything.
*/
if (pr->flags.bm_check && pr->flags.bm_control) {
raw_spin_lock(&c3_lock);
c3_cpu_count++;
/* Disable bus master arbitration when all CPUs are in C3 */
if (c3_cpu_count == num_online_cpus())
acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
raw_spin_unlock(&c3_lock);
} else if (!pr->flags.bm_check) {
ACPI_FLUSH_CPU_CACHE();
}
acpi_idle_do_entry(cx);
/* Re-enable bus master arbitration */
if (pr->flags.bm_check && pr->flags.bm_control) {
raw_spin_lock(&c3_lock);
acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
c3_cpu_count--;
raw_spin_unlock(&c3_lock);
}
sched_clock_idle_wakeup_event(0);
lapic_timer_state_broadcast(pr, cx, 0);
return index;
}
示例10: acpi_idle_enter_bm
/**
* acpi_idle_enter_bm - enters C3 with proper BM handling
* @dev: the target CPU
* @state: the state data
*
* If BM is detected, the deepest non-C3 idle state is entered instead.
*/
static int acpi_idle_enter_bm(struct cpuidle_device *dev,
struct cpuidle_state *state)
{
struct acpi_processor *pr;
struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
u32 t1, t2;
int sleep_ticks = 0;
pr = __get_cpu_var(processors);
if (unlikely(!pr))
return 0;
if (acpi_idle_suspend)
return(acpi_idle_enter_c1(dev, state));
if (acpi_idle_bm_check()) {
if (dev->safe_state) {
dev->last_state = dev->safe_state;
return dev->safe_state->enter(dev, dev->safe_state);
} else {
local_irq_disable();
acpi_safe_halt();
local_irq_enable();
return 0;
}
}
local_irq_disable();
current_thread_info()->status &= ~TS_POLLING;
/*
* TS_POLLING-cleared state must be visible before we test
* NEED_RESCHED:
*/
smp_mb();
if (unlikely(need_resched())) {
current_thread_info()->status |= TS_POLLING;
local_irq_enable();
return 0;
}
acpi_unlazy_tlb(smp_processor_id());
/* Tell the scheduler that we are going deep-idle: */
sched_clock_idle_sleep_event();
/*
* Must be done before busmaster disable as we might need to
* access HPET !
*/
acpi_state_timer_broadcast(pr, cx, 1);
/*
* disable bus master
* bm_check implies we need ARB_DIS
* !bm_check implies we need cache flush
* bm_control implies whether we can do ARB_DIS
*
* That leaves a case where bm_check is set and bm_control is
* not set. In that case we cannot do much, we enter C3
* without doing anything.
*/
if (pr->flags.bm_check && pr->flags.bm_control) {
spin_lock(&c3_lock);
c3_cpu_count++;
/* Disable bus master arbitration when all CPUs are in C3 */
if (c3_cpu_count == num_online_cpus())
acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
spin_unlock(&c3_lock);
} else if (!pr->flags.bm_check) {
ACPI_FLUSH_CPU_CACHE();
}
t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
acpi_idle_do_entry(cx);
t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
/* Re-enable bus master arbitration */
if (pr->flags.bm_check && pr->flags.bm_control) {
spin_lock(&c3_lock);
acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
c3_cpu_count--;
spin_unlock(&c3_lock);
}
#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
/* TSC could halt in idle, so notify users */
if (tsc_halts_in_c(ACPI_STATE_C3))
mark_tsc_unstable("TSC halts in idle");
#endif
sleep_ticks = ticks_elapsed(t1, t2);
/* Tell the scheduler how much we idled: */
sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
//.........这里部分代码省略.........
示例11: acpi_idle_enter_simple
/**
* acpi_idle_enter_simple - enters an ACPI state without BM handling
* @dev: the target CPU
* @state: the state data
*/
static int acpi_idle_enter_simple(struct cpuidle_device *dev,
struct cpuidle_state *state)
{
struct acpi_processor *pr;
struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
u32 t1, t2;
int sleep_ticks = 0;
pr = __get_cpu_var(processors);
if (unlikely(!pr))
return 0;
if (acpi_idle_suspend)
return(acpi_idle_enter_c1(dev, state));
local_irq_disable();
current_thread_info()->status &= ~TS_POLLING;
/*
* TS_POLLING-cleared state must be visible before we test
* NEED_RESCHED:
*/
smp_mb();
if (unlikely(need_resched())) {
current_thread_info()->status |= TS_POLLING;
local_irq_enable();
return 0;
}
/*
* Must be done before busmaster disable as we might need to
* access HPET !
*/
acpi_state_timer_broadcast(pr, cx, 1);
if (cx->type == ACPI_STATE_C3)
ACPI_FLUSH_CPU_CACHE();
t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
/* Tell the scheduler that we are going deep-idle: */
sched_clock_idle_sleep_event();
acpi_idle_do_entry(cx);
t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
/* TSC could halt in idle, so notify users */
if (tsc_halts_in_c(cx->type))
mark_tsc_unstable("TSC halts in idle");;
#endif
sleep_ticks = ticks_elapsed(t1, t2);
/* Tell the scheduler how much we idled: */
sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
local_irq_enable();
current_thread_info()->status |= TS_POLLING;
cx->usage++;
acpi_state_timer_broadcast(pr, cx, 0);
cx->time += sleep_ticks;
return ticks_elapsed_in_us(t1, t2);
}
示例12: acpi_idle_enter_simple
/**
* acpi_idle_enter_simple - enters an ACPI state without BM handling
* @dev: the target CPU
* @index: the index of suggested state
*/
static int acpi_idle_enter_simple(struct cpuidle_device *dev, int index)
{
struct acpi_processor *pr;
struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);
ktime_t kt1, kt2;
s64 idle_time;
s64 sleep_ticks = 0;
pr = __get_cpu_var(processors);
dev->last_residency = 0;
if (unlikely(!pr))
return -EINVAL;
local_irq_disable();
if (acpi_idle_suspend) {
local_irq_enable();
cpu_relax();
return -EBUSY;
}
if (cx->entry_method != ACPI_CSTATE_FFH) {
current_thread_info()->status &= ~TS_POLLING;
/*
* TS_POLLING-cleared state must be visible before we test
* NEED_RESCHED:
*/
smp_mb();
}
if (unlikely(need_resched())) {
current_thread_info()->status |= TS_POLLING;
local_irq_enable();
return -EINVAL;
}
/*
* Must be done before busmaster disable as we might need to
* access HPET !
*/
lapic_timer_state_broadcast(pr, cx, 1);
if (cx->type == ACPI_STATE_C3)
ACPI_FLUSH_CPU_CACHE();
kt1 = ktime_get_real();
/* Tell the scheduler that we are going deep-idle: */
sched_clock_idle_sleep_event();
acpi_idle_do_entry(cx);
kt2 = ktime_get_real();
idle_time = ktime_to_us(ktime_sub(kt2, kt1));
sleep_ticks = us_to_pm_timer_ticks(idle_time);
/* Update device last_residency*/
dev->last_residency = (int)idle_time;
/* Tell the scheduler how much we idled: */
sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
local_irq_enable();
current_thread_info()->status |= TS_POLLING;
cx->usage++;
lapic_timer_state_broadcast(pr, cx, 0);
cx->time += idle_time;
return index;
}
示例13: omap3_enter_idle
static int omap3_enter_idle(struct cpuidle_device *dev,
struct cpuidle_state *state)
{
struct omap3_processor_cx *cx;
struct timespec ts_preidle;
struct timespec ts_postidle;
struct timespec ts_idle;
/* Used for LPR mode DSS context save/restore. */
u32 pm_wken_dss = 0;
u32 pm_pwstctrl_dss = 0;
u32 cm_clkstctrl_dss = 0;
u32 cm_fclken_dss = 0;
u32 cm_iclken_dss = 0;
u32 cm_autoidle_dss = 0;
u32 fclken_core;
u32 iclken_core;
u32 fclken_per;
u32 iclken_per;
int wakeup_latency;
struct system_power_state target_state;
struct system_power_state cur_state;
#ifdef CONFIG_HW_SUP_TRANS
u32 sleepdep_per;
u32 wakedep_per;
#endif /* #ifdef CONFIG_HW_SUP_TRANS */
u32 sdrc_power_register = 0;
int core_sleep_flg = 0;
int got_console_lock = 0;
/* Disable interrupts.
*/
local_irq_disable();
local_fiq_disable();
/* If need resched - return immediately
*/
if( need_resched()) {
local_fiq_enable();
local_irq_enable();
return 0;
}
/* Reset previous power state registers.
*/
clear_prepwstst();
omap3_idle_setup_wkup_sources ();
/* Set up target state from state context provided by cpuidle.
*/
cx = cpuidle_get_statedata(state);
target_state.mpu_state = cx->mpu_state;
target_state.core_state = cx->core_state;
target_state.neon_state = 0; /* Avoid gcc warning. Will be set in
adjust_target_states(). */
/* take a time marker for residency.
*/
getnstimeofday(&ts_preidle);
/* If the requested state is C0, we bail here...
*/
if (cx->type == OMAP3_STATE_C1) {
omap_sram_idle(target_state.mpu_state);
goto return_sleep_time;
}
if (cx->type > OMAP3_STATE_C2)
sched_clock_idle_sleep_event(); /* about to enter deep idle */
/* Adjust PER and NEON domain target states as well as CORE domain
* target state depending on MPU/CORE setting, enable_off sysfs entry
* and PER timer status.
*/
adjust_target_states(&target_state);
wakeup_latency = cx->wakeup_latency;
/* NOTE:
* We will never get the condition below as we are not supporting
* CORE OFF right now. Keeping this code around for future reference.
*/
if (target_state.core_state != cx->core_state) {
/* Currently, this can happen only for core_off. Adjust wakeup
* latency to that of core_cswr state. Hard coded now and needs
* to be made more generic omap3_power_states[4] is CSWR for
* core */
wakeup_latency = omap3_power_states[4].wakeup_latency;
}
/* Reprogram next wake up tick to adjust for wake latency */
if (wakeup_latency > 1000) {
struct tick_device *d = tick_get_device(smp_processor_id());
ktime_t now = ktime_get();
//.........这里部分代码省略.........