本文整理汇总了C++中sched_clock_idle_wakeup_event函数的典型用法代码示例。如果您正苦于以下问题:C++ sched_clock_idle_wakeup_event函数的具体用法?C++ sched_clock_idle_wakeup_event怎么用?C++ sched_clock_idle_wakeup_event使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了sched_clock_idle_wakeup_event函数的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: acpi_idle_enter_simple
/**
* acpi_idle_enter_simple - enters an ACPI state without BM handling
* @dev: the target CPU
* @drv: cpuidle driver with cpuidle state information
* @index: the index of suggested state
*/
static int acpi_idle_enter_simple(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
struct acpi_processor *pr;
struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
pr = __this_cpu_read(processors);
if (unlikely(!pr))
return -EINVAL;
if (cx->entry_method == ACPI_CSTATE_FFH) {
if (current_set_polling_and_test())
return -EINVAL;
}
/*
* Must be done before busmaster disable as we might need to
* access HPET !
*/
lapic_timer_state_broadcast(pr, cx, 1);
if (cx->type == ACPI_STATE_C3)
ACPI_FLUSH_CPU_CACHE();
/* Tell the scheduler that we are going deep-idle: */
sched_clock_idle_sleep_event();
acpi_idle_do_entry(cx);
sched_clock_idle_wakeup_event(0);
lapic_timer_state_broadcast(pr, cx, 0);
return index;
}
示例2: acpi_idle_enter_simple
static int acpi_idle_enter_simple(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
struct acpi_processor *pr;
struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);
ktime_t kt1, kt2;
s64 idle_time_ns;
s64 idle_time;
pr = __this_cpu_read(processors);
dev->last_residency = 0;
if (unlikely(!pr))
return -EINVAL;
local_irq_disable();
if (cx->entry_method != ACPI_CSTATE_FFH) {
current_thread_info()->status &= ~TS_POLLING;
smp_mb();
if (unlikely(need_resched())) {
current_thread_info()->status |= TS_POLLING;
local_irq_enable();
return -EINVAL;
}
}
lapic_timer_state_broadcast(pr, cx, 1);
if (cx->type == ACPI_STATE_C3)
ACPI_FLUSH_CPU_CACHE();
kt1 = ktime_get_real();
sched_clock_idle_sleep_event();
acpi_idle_do_entry(cx);
kt2 = ktime_get_real();
idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1));
idle_time = idle_time_ns;
do_div(idle_time, NSEC_PER_USEC);
dev->last_residency = (int)idle_time;
sched_clock_idle_wakeup_event(idle_time_ns);
local_irq_enable();
if (cx->entry_method != ACPI_CSTATE_FFH)
current_thread_info()->status |= TS_POLLING;
cx->usage++;
lapic_timer_state_broadcast(pr, cx, 0);
cx->time += idle_time;
return index;
}
示例3: tick_nohz_stop_idle
static void tick_nohz_stop_idle(int cpu, ktime_t now)
{
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
update_ts_time_stats(cpu, ts, now, NULL);
ts->idle_active = 0;
sched_clock_idle_wakeup_event(0);
}
示例4: tick_nohz_stop_idle
static void tick_nohz_stop_idle(int cpu, ktime_t now)
{
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
ktime_t delta;
delta = ktime_sub(now, ts->idle_entrytime);
ts->idle_lastupdate = now;
ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
ts->idle_active = 0;
sched_clock_idle_wakeup_event(0);
}
示例5: tick_nohz_stop_idle
static void tick_nohz_stop_idle(int cpu, ktime_t now)
{
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
update_ts_time_stats(cpu, ts, now, NULL);
ts->idle_active = 0;
#ifdef CONFIG_MT_LOAD_BALANCE_PROFILER
mt_lbprof_update_state(cpu, MT_LBPROF_NO_TASK_STATE);
#endif
sched_clock_idle_wakeup_event(0);
}
示例6: acpi_idle_enter_simple
/**
* acpi_idle_enter_simple - enters an ACPI state without BM handling
* @dev: the target CPU
* @drv: cpuidle driver with cpuidle state information
* @index: the index of suggested state
*/
static int acpi_idle_enter_simple(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
struct acpi_processor *pr;
struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);
pr = __this_cpu_read(processors);
if (unlikely(!pr))
return -EINVAL;
if (cx->entry_method != ACPI_CSTATE_FFH) {
current_thread_info()->status &= ~TS_POLLING;
/*
* TS_POLLING-cleared state must be visible before we test
* NEED_RESCHED:
*/
smp_mb();
if (unlikely(need_resched())) {
current_thread_info()->status |= TS_POLLING;
return -EINVAL;
}
}
/*
* Must be done before busmaster disable as we might need to
* access HPET !
*/
lapic_timer_state_broadcast(pr, cx, 1);
if (cx->type == ACPI_STATE_C3)
ACPI_FLUSH_CPU_CACHE();
/* Tell the scheduler that we are going deep-idle: */
sched_clock_idle_sleep_event();
acpi_idle_do_entry(cx);
sched_clock_idle_wakeup_event(0);
if (cx->entry_method != ACPI_CSTATE_FFH)
current_thread_info()->status |= TS_POLLING;
cx->usage++;
lapic_timer_state_broadcast(pr, cx, 0);
return index;
}
示例7: omap3_enter_idle
//.........这里部分代码省略.........
/* Continue core restoration part, only if Core-Sleep is attempted */
if ((target_state.core_state > PRCM_CORE_ACTIVE) && core_sleep_flg) {
prcm_set_core_domain_state(PRCM_CORE_ACTIVE);
#ifdef CONFIG_OMAP_SMARTREFLEX
enable_smartreflex(SR1_ID);
enable_smartreflex(SR2_ID);
#endif
if (target_state.core_state >= PRCM_CORE_OSWR_MEMRET) {
#ifdef CONFIG_OMAP34XX_OFFMODE
context_restore_update(DOM_CORE1);
#endif
prcm_restore_registers(&target_state);
prcm_restore_core_context(target_state.core_state);
omap3_restore_core_settings();
}
/* Errata 1.4
* if the timer device gets idled which is when we
* are cutting the timer ICLK which is when we try
* to put Core to RET.
* Wait Period = 2 timer interface clock cycles +
* 1 timer functional clock cycle
* Interface clock = L4 clock. For the computation L4
* clock is assumed at 50MHz (worst case).
* Functional clock = 32KHz
* Wait Period = 2*10^-6/50 + 1/32768 = 0.000030557 = 30.557uSec
* Roundingoff the delay value to a safer 50uSec
*/
omap_udelay(GPTIMER_WAIT_DELAY);
CM_AUTOIDLE_WKUP &= ~(0x1);
if (core_off_notification != NULL)
core_off_notification(PRCM_FALSE);
}
if (cur_per_state == PRCM_ON) {
CM_FCLKEN_PER = fclken_per;
CM_ICLKEN_PER = iclken_per;
prcm_get_pre_power_domain_state(DOM_PER, &pre_per_state);
if (pre_per_state == PRCM_OFF && per_ctx_saved) {
if (enable_debug)
per_off++;
omap3_restore_per_context();
post_uart_inactivity();
#ifdef CONFIG_OMAP34XX_OFFMODE
context_restore_update(DOM_PER);
#endif
}
}
pr_debug("MPU state:%x,CORE state:%x\n", PM_PREPWSTST_MPU,
PM_PREPWSTST_CORE);
store_prepwst();
return_sleep_time:
getnstimeofday(&ts_postidle);
ts_idle = timespec_sub(ts_postidle, ts_preidle);
if (cx->type > OMAP3_STATE_C1)
sched_clock_idle_wakeup_event(timespec_to_ns(&ts_idle));
#ifdef CONFIG_ENABLE_SWLATENCY_MEASURE
if (idle_status) {
sw_latency_arr[swlat_arr_wrptr].wkup_end =
omap_32k_sync_timer_read();
sw_latency_arr[swlat_arr_wrptr].wkup_start =
wakeup_start_32ksync;
sw_latency_arr[swlat_arr_wrptr].cstate =
((PM_PREPWSTST_MPU & 0x3) << 2) |
(PM_PREPWSTST_CORE & 0x3) |
(omap_readl(0x48306CB0) << 16);
swlat_arr_wrptr++;
if (swlat_arr_wrptr == SW_LATENCY_ARR_SIZE)
swlat_arr_wrptr = 0;
}
#endif
local_irq_enable();
local_fiq_enable();
#ifdef OMAP3_START_RNG
if (!is_device_type_gp()) {
/*Start RNG after interrupts are enabled
* and only when CORE OFF was successful
*/
if (!(prepwst_core_rng & 0x3)) {
ret = omap3_start_rng();
if (ret)
printk(KERN_INFO"Failed to generate new"
" RN in idle %x\n", ret);
prepwst_core_rng = 0xFF;
}
}
#endif
return (u32)timespec_to_ns(&ts_idle)/1000;
}
示例8: acpi_idle_enter_bm
static int acpi_idle_enter_bm(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
struct acpi_processor *pr;
struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);
ktime_t kt1, kt2;
s64 idle_time_ns;
s64 idle_time;
pr = __this_cpu_read(processors);
dev->last_residency = 0;
if (unlikely(!pr))
return -EINVAL;
if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
if (drv->safe_state_index >= 0) {
return drv->states[drv->safe_state_index].enter(dev,
drv, drv->safe_state_index);
} else {
local_irq_disable();
acpi_safe_halt();
local_irq_enable();
return -EINVAL;
}
}
local_irq_disable();
if (cx->entry_method != ACPI_CSTATE_FFH) {
current_thread_info()->status &= ~TS_POLLING;
smp_mb();
if (unlikely(need_resched())) {
current_thread_info()->status |= TS_POLLING;
local_irq_enable();
return -EINVAL;
}
}
acpi_unlazy_tlb(smp_processor_id());
sched_clock_idle_sleep_event();
lapic_timer_state_broadcast(pr, cx, 1);
kt1 = ktime_get_real();
if (pr->flags.bm_check && pr->flags.bm_control) {
raw_spin_lock(&c3_lock);
c3_cpu_count++;
if (c3_cpu_count == num_online_cpus())
acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
raw_spin_unlock(&c3_lock);
} else if (!pr->flags.bm_check) {
ACPI_FLUSH_CPU_CACHE();
}
acpi_idle_do_entry(cx);
if (pr->flags.bm_check && pr->flags.bm_control) {
raw_spin_lock(&c3_lock);
acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
c3_cpu_count--;
raw_spin_unlock(&c3_lock);
}
kt2 = ktime_get_real();
idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1));
idle_time = idle_time_ns;
do_div(idle_time, NSEC_PER_USEC);
dev->last_residency = (int)idle_time;
sched_clock_idle_wakeup_event(idle_time_ns);
local_irq_enable();
if (cx->entry_method != ACPI_CSTATE_FFH)
current_thread_info()->status |= TS_POLLING;
cx->usage++;
lapic_timer_state_broadcast(pr, cx, 0);
cx->time += idle_time;
return index;
}
示例9: acpi_idle_enter_bm
/**
* acpi_idle_enter_bm - enters C3 with proper BM handling
* @dev: the target CPU
* @drv: cpuidle driver containing state data
* @index: the index of suggested state
*
* If BM is detected, the deepest non-C3 idle state is entered instead.
*/
static int acpi_idle_enter_bm(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
struct acpi_processor *pr;
struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
pr = __this_cpu_read(processors);
if (unlikely(!pr))
return -EINVAL;
if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
if (drv->safe_state_index >= 0) {
return drv->states[drv->safe_state_index].enter(dev,
drv, drv->safe_state_index);
} else {
acpi_safe_halt();
return -EBUSY;
}
}
if (cx->entry_method == ACPI_CSTATE_FFH) {
if (current_set_polling_and_test())
return -EINVAL;
}
acpi_unlazy_tlb(smp_processor_id());
/* Tell the scheduler that we are going deep-idle: */
sched_clock_idle_sleep_event();
/*
* Must be done before busmaster disable as we might need to
* access HPET !
*/
lapic_timer_state_broadcast(pr, cx, 1);
/*
* disable bus master
* bm_check implies we need ARB_DIS
* !bm_check implies we need cache flush
* bm_control implies whether we can do ARB_DIS
*
* That leaves a case where bm_check is set and bm_control is
* not set. In that case we cannot do much, we enter C3
* without doing anything.
*/
if (pr->flags.bm_check && pr->flags.bm_control) {
raw_spin_lock(&c3_lock);
c3_cpu_count++;
/* Disable bus master arbitration when all CPUs are in C3 */
if (c3_cpu_count == num_online_cpus())
acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
raw_spin_unlock(&c3_lock);
} else if (!pr->flags.bm_check) {
ACPI_FLUSH_CPU_CACHE();
}
acpi_idle_do_entry(cx);
/* Re-enable bus master arbitration */
if (pr->flags.bm_check && pr->flags.bm_control) {
raw_spin_lock(&c3_lock);
acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
c3_cpu_count--;
raw_spin_unlock(&c3_lock);
}
sched_clock_idle_wakeup_event(0);
lapic_timer_state_broadcast(pr, cx, 0);
return index;
}
示例10: acpi_idle_enter_bm
/**
* acpi_idle_enter_bm - enters C3 with proper BM handling
* @dev: the target CPU
* @state: the state data
*
* If BM is detected, the deepest non-C3 idle state is entered instead.
*/
static int acpi_idle_enter_bm(struct cpuidle_device *dev,
struct cpuidle_state *state)
{
struct acpi_processor *pr;
struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
u32 t1, t2;
int sleep_ticks = 0;
pr = __get_cpu_var(processors);
if (unlikely(!pr))
return 0;
if (acpi_idle_suspend)
return(acpi_idle_enter_c1(dev, state));
if (acpi_idle_bm_check()) {
if (dev->safe_state) {
dev->last_state = dev->safe_state;
return dev->safe_state->enter(dev, dev->safe_state);
} else {
local_irq_disable();
acpi_safe_halt();
local_irq_enable();
return 0;
}
}
local_irq_disable();
current_thread_info()->status &= ~TS_POLLING;
/*
* TS_POLLING-cleared state must be visible before we test
* NEED_RESCHED:
*/
smp_mb();
if (unlikely(need_resched())) {
current_thread_info()->status |= TS_POLLING;
local_irq_enable();
return 0;
}
acpi_unlazy_tlb(smp_processor_id());
/* Tell the scheduler that we are going deep-idle: */
sched_clock_idle_sleep_event();
/*
* Must be done before busmaster disable as we might need to
* access HPET !
*/
acpi_state_timer_broadcast(pr, cx, 1);
/*
* disable bus master
* bm_check implies we need ARB_DIS
* !bm_check implies we need cache flush
* bm_control implies whether we can do ARB_DIS
*
* That leaves a case where bm_check is set and bm_control is
* not set. In that case we cannot do much, we enter C3
* without doing anything.
*/
if (pr->flags.bm_check && pr->flags.bm_control) {
spin_lock(&c3_lock);
c3_cpu_count++;
/* Disable bus master arbitration when all CPUs are in C3 */
if (c3_cpu_count == num_online_cpus())
acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
spin_unlock(&c3_lock);
} else if (!pr->flags.bm_check) {
ACPI_FLUSH_CPU_CACHE();
}
t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
acpi_idle_do_entry(cx);
t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
/* Re-enable bus master arbitration */
if (pr->flags.bm_check && pr->flags.bm_control) {
spin_lock(&c3_lock);
acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
c3_cpu_count--;
spin_unlock(&c3_lock);
}
#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
/* TSC could halt in idle, so notify users */
if (tsc_halts_in_c(ACPI_STATE_C3))
mark_tsc_unstable("TSC halts in idle");
#endif
sleep_ticks = ticks_elapsed(t1, t2);
/* Tell the scheduler how much we idled: */
sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
//.........这里部分代码省略.........
示例11: acpi_idle_enter_simple
/**
* acpi_idle_enter_simple - enters an ACPI state without BM handling
* @dev: the target CPU
* @state: the state data
*/
static int acpi_idle_enter_simple(struct cpuidle_device *dev,
struct cpuidle_state *state)
{
struct acpi_processor *pr;
struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
u32 t1, t2;
int sleep_ticks = 0;
pr = __get_cpu_var(processors);
if (unlikely(!pr))
return 0;
if (acpi_idle_suspend)
return(acpi_idle_enter_c1(dev, state));
local_irq_disable();
current_thread_info()->status &= ~TS_POLLING;
/*
* TS_POLLING-cleared state must be visible before we test
* NEED_RESCHED:
*/
smp_mb();
if (unlikely(need_resched())) {
current_thread_info()->status |= TS_POLLING;
local_irq_enable();
return 0;
}
/*
* Must be done before busmaster disable as we might need to
* access HPET !
*/
acpi_state_timer_broadcast(pr, cx, 1);
if (cx->type == ACPI_STATE_C3)
ACPI_FLUSH_CPU_CACHE();
t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
/* Tell the scheduler that we are going deep-idle: */
sched_clock_idle_sleep_event();
acpi_idle_do_entry(cx);
t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
/* TSC could halt in idle, so notify users */
if (tsc_halts_in_c(cx->type))
mark_tsc_unstable("TSC halts in idle");;
#endif
sleep_ticks = ticks_elapsed(t1, t2);
/* Tell the scheduler how much we idled: */
sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
local_irq_enable();
current_thread_info()->status |= TS_POLLING;
cx->usage++;
acpi_state_timer_broadcast(pr, cx, 0);
cx->time += sleep_ticks;
return ticks_elapsed_in_us(t1, t2);
}
示例12: acpi_idle_enter_simple
/**
* acpi_idle_enter_simple - enters an ACPI state without BM handling
* @dev: the target CPU
* @index: the index of suggested state
*/
static int acpi_idle_enter_simple(struct cpuidle_device *dev, int index)
{
struct acpi_processor *pr;
struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);
ktime_t kt1, kt2;
s64 idle_time;
s64 sleep_ticks = 0;
pr = __get_cpu_var(processors);
dev->last_residency = 0;
if (unlikely(!pr))
return -EINVAL;
local_irq_disable();
if (acpi_idle_suspend) {
local_irq_enable();
cpu_relax();
return -EBUSY;
}
if (cx->entry_method != ACPI_CSTATE_FFH) {
current_thread_info()->status &= ~TS_POLLING;
/*
* TS_POLLING-cleared state must be visible before we test
* NEED_RESCHED:
*/
smp_mb();
}
if (unlikely(need_resched())) {
current_thread_info()->status |= TS_POLLING;
local_irq_enable();
return -EINVAL;
}
/*
* Must be done before busmaster disable as we might need to
* access HPET !
*/
lapic_timer_state_broadcast(pr, cx, 1);
if (cx->type == ACPI_STATE_C3)
ACPI_FLUSH_CPU_CACHE();
kt1 = ktime_get_real();
/* Tell the scheduler that we are going deep-idle: */
sched_clock_idle_sleep_event();
acpi_idle_do_entry(cx);
kt2 = ktime_get_real();
idle_time = ktime_to_us(ktime_sub(kt2, kt1));
sleep_ticks = us_to_pm_timer_ticks(idle_time);
/* Update device last_residency*/
dev->last_residency = (int)idle_time;
/* Tell the scheduler how much we idled: */
sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
local_irq_enable();
current_thread_info()->status |= TS_POLLING;
cx->usage++;
lapic_timer_state_broadcast(pr, cx, 0);
cx->time += idle_time;
return index;
}
示例13: omap3_enter_idle
//.........这里部分代码省略.........
#endif
}
/* Restore CAM and SGX. */
if (PRCM_ON == cur_state.cam_state)
prcm_transition_domain_to(DOM_CAM, PRCM_ON);
if (PRCM_ON == cur_state.sgx_state)
prcm_transition_domain_to(DOM_SGX, PRCM_ON);
/* If we lost CORE context, restore it.
*/
if (target_state.core_state >= PRCM_CORE_OSWR_MEMRET) {
#ifdef CONFIG_OMAP34XX_OFFMODE
context_restore_update(DOM_CORE1);
#endif
prcm_restore_registers();
prcm_restore_core_context(target_state.core_state);
#ifdef CONFIG_CORE_OFF
omap3_restore_core_settings();
#endif
}
/* Restore DSS settings */
if (omap2_disp_lpr_is_enabled()) {
PM_WKEN_DSS = pm_wken_dss;
PM_PWSTCTRL_DSS = pm_pwstctrl_dss;
CM_CLKSTCTRL_DSS = cm_clkstctrl_dss;
CM_FCLKEN_DSS = cm_fclken_dss;
CM_ICLKEN_DSS = cm_iclken_dss;
CM_AUTOIDLE_DSS = cm_autoidle_dss;
}
/* At this point CORE and PER domain are back. We can release
* the console if we have it.
*/
if (got_console_lock) {
release_console_sem();
}
#ifdef CONFIG_OMAP_32K_TIMER
/* Errata 1.4
* If a General Purpose Timer (GPTimer) is in posted mode
* (TSIRC.POSTED=1), due to internal resynchronizations, values
* read in TCRR, TCAR1 and TCAR2 registers right after the
* timer interface clock (L4) goes from stopped to active may
* not return the expected values. The most common event
* leading to this situation occurs upon wake up from idle.
*
* Software has to wait at least (2 timer interface clock
* cycles + 1 timer functional clock cycle) after L4 clock
* wakeup before reading TCRR, TCAR1 or TCAR2 registers for
* GPTimers in POSTED internal synchro- nization mode, and
* before reading WCRR register of the Watchdog timers . The
* same workaround must be applied before reading CR and
* 32KSYNCNT_REV registers of the synctimer module.
*
* Wait Period = 2 timer interface clock cycles +
* 1 timer functional clock cycle
* Interface clock = L4 clock (50MHz worst case).
* Functional clock = 32KHz
* Wait Period = 2*10^-6/50 + 1/32768 = 0.000030557 = 30.557us
* Rounding off the delay value to a safer 50us.
*/
udelay(GPTIMER_WAIT_DELAY);
#endif
/* Disable autoidling of GPT1.
*/
CM_AUTOIDLE_WKUP &= ~(0x1);
}
DPRINTK("MPU state:%x, CORE state:%x\n",
PM_PREPWSTST_MPU, PM_PREPWSTST_CORE);
/* Do wakeup event check s*/
post_uart_activity();
/* Update stats for sysfs entries.
*/
store_prepwst();
return_sleep_time:
getnstimeofday(&ts_postidle);
#if defined(CONFIG_SYSFS) && defined(DEBUG_BAIL_STATS)
ts_last_wake_up = ts_postidle;
#endif
ts_idle = timespec_sub(ts_postidle, ts_preidle);
if (cx->type > OMAP3_STATE_C2)
sched_clock_idle_wakeup_event(timespec_to_ns(&ts_idle));
DEBUG_STATE_PRINT(core_sleep_flg);
local_irq_enable();
local_fiq_enable();
return (u32)timespec_to_ns(&ts_idle)/1000;
}