本文整理汇总了C++中ACPI_FLUSH_CPU_CACHE函数的典型用法代码示例。如果您正苦于以下问题:C++ ACPI_FLUSH_CPU_CACHE函数的具体用法?C++ ACPI_FLUSH_CPU_CACHE怎么用?C++ ACPI_FLUSH_CPU_CACHE使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了ACPI_FLUSH_CPU_CACHE函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: acpi_idle_play_dead
/**
* acpi_idle_play_dead - enters an ACPI state for long-term idle (i.e. off-lining)
* @dev: the target CPU
* @index: the index of suggested state
*/
static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
{
struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
ACPI_FLUSH_CPU_CACHE();
while (1) {
if (cx->entry_method == ACPI_CSTATE_HALT)
safe_halt();
else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) {
inb(cx->address);
/* See comment in acpi_idle_do_entry() */
inl(acpi_gbl_FADT.xpm_timer_block.address);
} else
return -ENODEV;
}
/* Never reached */
return 0;
}
示例2: acpi_sleep_prepare
static int acpi_sleep_prepare(u32 acpi_state)
{
#ifdef CONFIG_ACPI_SLEEP
/* do we have a wakeup address for S2 and S3? */
if (acpi_state == ACPI_STATE_S3) {
if (!acpi_wakeup_address) {
return -EFAULT;
}
acpi_set_firmware_waking_vector((acpi_physical_address)
virt_to_phys((void *)
acpi_wakeup_address));
}
ACPI_FLUSH_CPU_CACHE();
acpi_enable_wakeup_device_prep(acpi_state);
#endif
printk(KERN_INFO PREFIX "Preparing to enter system sleep state S%d\n",
acpi_state);
acpi_enter_sleep_state_prep(acpi_state);
return 0;
}
示例3: acpi_idle_play_dead
static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
{
struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);
ACPI_FLUSH_CPU_CACHE();
while (1) {
if (cx->entry_method == ACPI_CSTATE_HALT)
safe_halt();
else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) {
inb(cx->address);
inl(acpi_gbl_FADT.xpm_timer_block.address);
} else
return -ENODEV;
}
return 0;
}
示例4: acpi_idle_enter
static int acpi_idle_enter(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
struct acpi_processor *pr;
pr = __this_cpu_read(processors);
if (unlikely(!pr))
return -EINVAL;
if (cx->type != ACPI_STATE_C1) {
if (acpi_idle_fallback_to_c1(pr) && num_online_cpus() > 1) {
index = ACPI_IDLE_STATE_START;
cx = per_cpu(acpi_cstate[index], dev->cpu);
} else if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check) {
if (cx->bm_sts_skip || !acpi_idle_bm_check()) {
acpi_idle_enter_bm(pr, cx, true);
return index;
} else if (drv->safe_state_index >= 0) {
index = drv->safe_state_index;
cx = per_cpu(acpi_cstate[index], dev->cpu);
} else {
acpi_safe_halt();
return -EBUSY;
}
}
}
lapic_timer_state_broadcast(pr, cx, 1);
if (cx->type == ACPI_STATE_C3)
ACPI_FLUSH_CPU_CACHE();
acpi_idle_do_entry(cx);
lapic_timer_state_broadcast(pr, cx, 0);
return index;
}
示例5: acpi_enter_sleep_state
//.........这里部分代码省略.........
/* Insert SLP_TYP bits */
PM1Acontrol |=
(acpi_gbl_sleep_type_a << sleep_type_reg_info->bit_position);
PM1Bcontrol |=
(acpi_gbl_sleep_type_b << sleep_type_reg_info->bit_position);
/*
* We split the writes of SLP_TYP and SLP_EN to workaround
* poorly implemented hardware.
*/
/* Write #1: fill in SLP_TYP data */
status = acpi_hw_register_write(ACPI_MTX_DO_NOT_LOCK,
ACPI_REGISTER_PM1A_CONTROL,
PM1Acontrol);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
status = acpi_hw_register_write(ACPI_MTX_DO_NOT_LOCK,
ACPI_REGISTER_PM1B_CONTROL,
PM1Bcontrol);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/* Insert SLP_ENABLE bit */
PM1Acontrol |= sleep_enable_reg_info->access_bit_mask;
PM1Bcontrol |= sleep_enable_reg_info->access_bit_mask;
/* Write #2: SLP_TYP + SLP_EN */
ACPI_FLUSH_CPU_CACHE();
#if !(defined(CONFIG_XEN) && defined(CONFIG_X86))
status = acpi_hw_register_write(ACPI_MTX_DO_NOT_LOCK,
ACPI_REGISTER_PM1A_CONTROL,
PM1Acontrol);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
status = acpi_hw_register_write(ACPI_MTX_DO_NOT_LOCK,
ACPI_REGISTER_PM1B_CONTROL,
PM1Bcontrol);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
if (sleep_state > ACPI_STATE_S3) {
/*
* We wanted to sleep > S3, but it didn't happen (by virtue of the
* fact that we are still executing!)
*
* Wait ten seconds, then try again. This is to get S4/S5 to work on
* all machines.
*
* We wait so long to allow chipsets that poll this reg very slowly to
* still read the right value. Ideally, this block would go
* away entirely.
*/
acpi_os_stall(10000000);
status = acpi_hw_register_write(ACPI_MTX_DO_NOT_LOCK,
ACPI_REGISTER_PM1_CONTROL,
sleep_enable_reg_info->
access_bit_mask);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
}
/* Wait until we enter sleep state */
do {
status = acpi_get_register(ACPI_BITREG_WAKE_STATUS, &in_value,
ACPI_MTX_DO_NOT_LOCK);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/* Spin until we wake */
} while (!in_value);
#else
/* PV ACPI just need check hypercall return value */
err = acpi_notify_hypervisor_state(sleep_state,
PM1Acontrol, PM1Bcontrol);
if (err) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Hypervisor failure [%d]\n", err));
return_ACPI_STATUS(AE_ERROR);
}
#endif
return_ACPI_STATUS(AE_OK);
}
示例6: acpi_hw_legacy_sleep
//.........这里部分代码省略.........
}
acpi_gbl_system_awake_and_running = FALSE;
status = acpi_hw_enable_all_wakeup_gpes();
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/* Get current value of PM1A control */
status = acpi_hw_register_read(ACPI_REGISTER_PM1_CONTROL,
&pm1a_control);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
ACPI_DEBUG_PRINT((ACPI_DB_INIT,
"Entering sleep state [S%u]\n", sleep_state));
/* Clear the SLP_EN and SLP_TYP fields */
pm1a_control &= ~(sleep_type_reg_info->access_bit_mask |
sleep_enable_reg_info->access_bit_mask);
pm1b_control = pm1a_control;
/* Insert the SLP_TYP bits */
pm1a_control |=
(acpi_gbl_sleep_type_a << sleep_type_reg_info->bit_position);
pm1b_control |=
(acpi_gbl_sleep_type_b << sleep_type_reg_info->bit_position);
/*
* We split the writes of SLP_TYP and SLP_EN to workaround
* poorly implemented hardware.
*/
/* Write #1: write the SLP_TYP data to the PM1 Control registers */
status = acpi_hw_write_pm1_control(pm1a_control, pm1b_control);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/* Insert the sleep enable (SLP_EN) bit */
pm1a_control |= sleep_enable_reg_info->access_bit_mask;
pm1b_control |= sleep_enable_reg_info->access_bit_mask;
/* Flush caches, as per ACPI specification */
ACPI_FLUSH_CPU_CACHE();
status = acpi_os_prepare_sleep(sleep_state, pm1a_control,
pm1b_control);
if (ACPI_SKIP(status))
return_ACPI_STATUS(AE_OK);
if (ACPI_FAILURE(status))
return_ACPI_STATUS(status);
/* Write #2: Write both SLP_TYP + SLP_EN */
status = acpi_hw_write_pm1_control(pm1a_control, pm1b_control);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
if (sleep_state > ACPI_STATE_S3) {
/*
* We wanted to sleep > S3, but it didn't happen (by virtue of the
* fact that we are still executing!)
*
* Wait ten seconds, then try again. This is to get S4/S5 to work on
* all machines.
*
* We wait so long to allow chipsets that poll this reg very slowly
* to still read the right value. Ideally, this block would go
* away entirely.
*/
acpi_os_stall(10000000);
status = acpi_hw_register_write(ACPI_REGISTER_PM1_CONTROL,
sleep_enable_reg_info->
access_bit_mask);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
}
/* Wait for transition back to Working State */
do {
status =
acpi_read_bit_register(ACPI_BITREG_WAKE_STATUS, &in_value);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
} while (!in_value);
return_ACPI_STATUS(AE_OK);
}
示例7: acpi_enter_sleep_state
acpi_status asmlinkage
acpi_enter_sleep_state (
u8 sleep_state)
{
u32 PM1Acontrol;
u32 PM1Bcontrol;
struct acpi_bit_register_info *sleep_type_reg_info;
struct acpi_bit_register_info *sleep_enable_reg_info;
u32 in_value;
acpi_status status;
ACPI_FUNCTION_TRACE ("acpi_enter_sleep_state");
if ((acpi_gbl_sleep_type_a > ACPI_SLEEP_TYPE_MAX) ||
(acpi_gbl_sleep_type_b > ACPI_SLEEP_TYPE_MAX)) {
ACPI_REPORT_ERROR (("Sleep values out of range: A=%X B=%X\n",
acpi_gbl_sleep_type_a, acpi_gbl_sleep_type_b));
return_ACPI_STATUS (AE_AML_OPERAND_VALUE);
}
sleep_type_reg_info = acpi_hw_get_bit_register_info (ACPI_BITREG_SLEEP_TYPE_A);
sleep_enable_reg_info = acpi_hw_get_bit_register_info (ACPI_BITREG_SLEEP_ENABLE);
if (sleep_state != ACPI_STATE_S5) {
/* Clear wake status */
status = acpi_set_register (ACPI_BITREG_WAKE_STATUS, 1, ACPI_MTX_DO_NOT_LOCK);
if (ACPI_FAILURE (status)) {
return_ACPI_STATUS (status);
}
status = acpi_hw_clear_acpi_status (ACPI_MTX_DO_NOT_LOCK);
if (ACPI_FAILURE (status)) {
return_ACPI_STATUS (status);
}
/* Disable BM arbitration */
status = acpi_set_register (ACPI_BITREG_ARB_DISABLE, 1, ACPI_MTX_DO_NOT_LOCK);
if (ACPI_FAILURE (status)) {
return_ACPI_STATUS (status);
}
}
/*
* 1) Disable all runtime GPEs
* 2) Enable all wakeup GPEs
*/
status = acpi_hw_prepare_gpes_for_sleep ();
if (ACPI_FAILURE (status)) {
return_ACPI_STATUS (status);
}
/* Get current value of PM1A control */
status = acpi_hw_register_read (ACPI_MTX_DO_NOT_LOCK, ACPI_REGISTER_PM1_CONTROL, &PM1Acontrol);
if (ACPI_FAILURE (status)) {
return_ACPI_STATUS (status);
}
ACPI_DEBUG_PRINT ((ACPI_DB_INIT, "Entering sleep state [S%d]\n", sleep_state));
/* Clear SLP_EN and SLP_TYP fields */
PM1Acontrol &= ~(sleep_type_reg_info->access_bit_mask | sleep_enable_reg_info->access_bit_mask);
PM1Bcontrol = PM1Acontrol;
/* Insert SLP_TYP bits */
PM1Acontrol |= (acpi_gbl_sleep_type_a << sleep_type_reg_info->bit_position);
PM1Bcontrol |= (acpi_gbl_sleep_type_b << sleep_type_reg_info->bit_position);
/*
* We split the writes of SLP_TYP and SLP_EN to workaround
* poorly implemented hardware.
*/
/* Write #1: fill in SLP_TYP data */
status = acpi_hw_register_write (ACPI_MTX_DO_NOT_LOCK, ACPI_REGISTER_PM1A_CONTROL, PM1Acontrol);
if (ACPI_FAILURE (status)) {
return_ACPI_STATUS (status);
}
status = acpi_hw_register_write (ACPI_MTX_DO_NOT_LOCK, ACPI_REGISTER_PM1B_CONTROL, PM1Bcontrol);
if (ACPI_FAILURE (status)) {
return_ACPI_STATUS (status);
}
/* Insert SLP_ENABLE bit */
PM1Acontrol |= sleep_enable_reg_info->access_bit_mask;
PM1Bcontrol |= sleep_enable_reg_info->access_bit_mask;
/* Write #2: SLP_TYP + SLP_EN */
ACPI_FLUSH_CPU_CACHE ();
status = acpi_hw_register_write (ACPI_MTX_DO_NOT_LOCK, ACPI_REGISTER_PM1A_CONTROL, PM1Acontrol);
//.........这里部分代码省略.........
示例8: AcpiHwExtendedSleep
ACPI_STATUS
AcpiHwExtendedSleep (
UINT8 SleepState)
{
ACPI_STATUS Status;
UINT8 SleepTypeValue;
UINT64 SleepStatus;
ACPI_FUNCTION_TRACE (HwExtendedSleep);
/* Extended sleep registers must be valid */
if (!AcpiGbl_FADT.SleepControl.Address ||
!AcpiGbl_FADT.SleepStatus.Address)
{
return_ACPI_STATUS (AE_NOT_EXIST);
}
/* Clear wake status (WAK_STS) */
Status = AcpiWrite ((UINT64) ACPI_X_WAKE_STATUS, &AcpiGbl_FADT.SleepStatus);
if (ACPI_FAILURE (Status))
{
return_ACPI_STATUS (Status);
}
AcpiGbl_SystemAwakeAndRunning = FALSE;
/* Flush caches, as per ACPI specification */
ACPI_FLUSH_CPU_CACHE ();
/*
* Set the SLP_TYP and SLP_EN bits.
*
* Note: We only use the first value returned by the \_Sx method
* (AcpiGbl_SleepTypeA) - As per ACPI specification.
*/
ACPI_DEBUG_PRINT ((ACPI_DB_INIT,
"Entering sleep state [S%u]\n", SleepState));
SleepTypeValue = ((AcpiGbl_SleepTypeA << ACPI_X_SLEEP_TYPE_POSITION) &
ACPI_X_SLEEP_TYPE_MASK);
Status = AcpiWrite ((UINT64) (SleepTypeValue | ACPI_X_SLEEP_ENABLE),
&AcpiGbl_FADT.SleepControl);
if (ACPI_FAILURE (Status))
{
return_ACPI_STATUS (Status);
}
/* Wait for transition back to Working State */
do
{
Status = AcpiRead (&SleepStatus, &AcpiGbl_FADT.SleepStatus);
if (ACPI_FAILURE (Status))
{
return_ACPI_STATUS (Status);
}
} while (!(((UINT8) SleepStatus) & ACPI_X_WAKE_STATUS));
return_ACPI_STATUS (AE_OK);
}
示例9: AcpiEnterSleepState
//.........这里部分代码省略.........
Arg.Type = ACPI_TYPE_INTEGER;
Arg.Integer.Value = SleepState;
Status = AcpiEvaluateObject (NULL, METHOD_NAME__GTS, &ArgList, NULL);
if (ACPI_FAILURE (Status) && Status != AE_NOT_FOUND)
{
return_ACPI_STATUS (Status);
}
/* Get current value of PM1A control */
Status = AcpiHwRegisterRead (ACPI_REGISTER_PM1_CONTROL,
&Pm1aControl);
if (ACPI_FAILURE (Status))
{
return_ACPI_STATUS (Status);
}
ACPI_DEBUG_PRINT ((ACPI_DB_INIT,
"Entering sleep state [S%d]\n", SleepState));
/* Clear the SLP_EN and SLP_TYP fields */
Pm1aControl &= ~(SleepTypeRegInfo->AccessBitMask |
SleepEnableRegInfo->AccessBitMask);
Pm1bControl = Pm1aControl;
/* Insert the SLP_TYP bits */
Pm1aControl |= (AcpiGbl_SleepTypeA << SleepTypeRegInfo->BitPosition);
Pm1bControl |= (AcpiGbl_SleepTypeB << SleepTypeRegInfo->BitPosition);
/*
* We split the writes of SLP_TYP and SLP_EN to workaround
* poorly implemented hardware.
*/
/* Write #1: write the SLP_TYP data to the PM1 Control registers */
Status = AcpiHwWritePm1Control (Pm1aControl, Pm1bControl);
if (ACPI_FAILURE (Status))
{
return_ACPI_STATUS (Status);
}
/* Insert the sleep enable (SLP_EN) bit */
Pm1aControl |= SleepEnableRegInfo->AccessBitMask;
Pm1bControl |= SleepEnableRegInfo->AccessBitMask;
/* Flush caches, as per ACPI specification */
ACPI_FLUSH_CPU_CACHE ();
/* Write #2: Write both SLP_TYP + SLP_EN */
Status = AcpiHwWritePm1Control (Pm1aControl, Pm1bControl);
if (ACPI_FAILURE (Status))
{
return_ACPI_STATUS (Status);
}
if (SleepState > ACPI_STATE_S3)
{
/*
* We wanted to sleep > S3, but it didn't happen (by virtue of the
* fact that we are still executing!)
*
* Wait ten seconds, then try again. This is to get S4/S5 to work on
* all machines.
*
* We wait so long to allow chipsets that poll this reg very slowly
* to still read the right value. Ideally, this block would go
* away entirely.
*/
AcpiOsStall (10000000);
Status = AcpiHwRegisterWrite (ACPI_REGISTER_PM1_CONTROL,
SleepEnableRegInfo->AccessBitMask);
if (ACPI_FAILURE (Status))
{
return_ACPI_STATUS (Status);
}
}
/* Wait until we enter sleep state */
do
{
Status = AcpiReadBitRegister (ACPI_BITREG_WAKE_STATUS, &InValue);
if (ACPI_FAILURE (Status))
{
return_ACPI_STATUS (Status);
}
/* Spin until we wake */
} while (!InValue);
return_ACPI_STATUS (AE_OK);
}
示例10: acpi_cst_idle
/*
* Idle the CPU in the lowest state possible. This function is called with
* interrupts disabled. Note that once it re-enables interrupts, a task
* switch can occur so do not access shared data (i.e. the softc) after
* interrupts are re-enabled.
*/
static void
acpi_cst_idle(void)
{
struct acpi_cst_softc *sc;
struct acpi_cst_cx *cx_next;
union microtime_pcpu start, end;
int cx_next_idx, i, tdiff, bm_arb_disabled = 0;
/* If disabled, return immediately. */
if (acpi_cst_disable_idle) {
ACPI_ENABLE_IRQS();
return;
}
/*
* Look up our CPU id to get our softc. If it's NULL, we'll use C1
* since there is no Cx state for this processor.
*/
sc = acpi_cst_softc[mdcpu->mi.gd_cpuid];
if (sc == NULL) {
acpi_cst_c1_halt();
return;
}
/* Still probing; use C1 */
if (sc->cst_flags & ACPI_CST_FLAG_PROBING) {
acpi_cst_c1_halt();
return;
}
/* Find the lowest state that has small enough latency. */
cx_next_idx = 0;
for (i = sc->cst_cx_lowest; i >= 0; i--) {
if (sc->cst_cx_states[i].trans_lat * 3 <= sc->cst_prev_sleep) {
cx_next_idx = i;
break;
}
}
/*
* Check for bus master activity if needed for the selected state.
* If there was activity, clear the bit and use the lowest non-C3 state.
*/
cx_next = &sc->cst_cx_states[cx_next_idx];
if (cx_next->flags & ACPI_CST_CX_FLAG_BM_STS) {
int bm_active;
AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, &bm_active);
if (bm_active != 0) {
AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, 1);
cx_next_idx = sc->cst_non_c3;
}
}
/* Select the next state and update statistics. */
cx_next = &sc->cst_cx_states[cx_next_idx];
sc->cst_cx_stats[cx_next_idx]++;
KASSERT(cx_next->type != ACPI_STATE_C0, ("C0 sleep"));
/*
* Execute HLT (or equivalent) and wait for an interrupt. We can't
* calculate the time spent in C1 since the place we wake up is an
* ISR. Assume we slept half of quantum and return.
*/
if (cx_next->type == ACPI_STATE_C1) {
sc->cst_prev_sleep = (sc->cst_prev_sleep * 3 + 500000 / hz) / 4;
cx_next->enter(cx_next);
return;
}
/* Execute the proper preamble before enter the selected state. */
if (cx_next->preamble == ACPI_CST_CX_PREAMBLE_BM_ARB) {
AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 1);
bm_arb_disabled = 1;
} else if (cx_next->preamble == ACPI_CST_CX_PREAMBLE_WBINVD) {
ACPI_FLUSH_CPU_CACHE();
}
/*
* Enter the selected state and check time spent asleep.
*/
microtime_pcpu_get(&start);
cpu_mfence();
cx_next->enter(cx_next);
cpu_mfence();
microtime_pcpu_get(&end);
/* Enable bus master arbitration, if it was disabled. */
if (bm_arb_disabled)
AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 0);
ACPI_ENABLE_IRQS();
//.........这里部分代码省略.........
示例11: acpi_idle_enter_bm
static int acpi_idle_enter_bm(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
struct acpi_processor *pr;
struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);
ktime_t kt1, kt2;
s64 idle_time_ns;
s64 idle_time;
pr = __this_cpu_read(processors);
dev->last_residency = 0;
if (unlikely(!pr))
return -EINVAL;
if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
if (drv->safe_state_index >= 0) {
return drv->states[drv->safe_state_index].enter(dev,
drv, drv->safe_state_index);
} else {
local_irq_disable();
acpi_safe_halt();
local_irq_enable();
return -EINVAL;
}
}
local_irq_disable();
if (cx->entry_method != ACPI_CSTATE_FFH) {
current_thread_info()->status &= ~TS_POLLING;
smp_mb();
if (unlikely(need_resched())) {
current_thread_info()->status |= TS_POLLING;
local_irq_enable();
return -EINVAL;
}
}
acpi_unlazy_tlb(smp_processor_id());
sched_clock_idle_sleep_event();
lapic_timer_state_broadcast(pr, cx, 1);
kt1 = ktime_get_real();
if (pr->flags.bm_check && pr->flags.bm_control) {
raw_spin_lock(&c3_lock);
c3_cpu_count++;
if (c3_cpu_count == num_online_cpus())
acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
raw_spin_unlock(&c3_lock);
} else if (!pr->flags.bm_check) {
ACPI_FLUSH_CPU_CACHE();
}
acpi_idle_do_entry(cx);
if (pr->flags.bm_check && pr->flags.bm_control) {
raw_spin_lock(&c3_lock);
acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
c3_cpu_count--;
raw_spin_unlock(&c3_lock);
}
kt2 = ktime_get_real();
idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1));
idle_time = idle_time_ns;
do_div(idle_time, NSEC_PER_USEC);
dev->last_residency = (int)idle_time;
sched_clock_idle_wakeup_event(idle_time_ns);
local_irq_enable();
if (cx->entry_method != ACPI_CSTATE_FFH)
current_thread_info()->status |= TS_POLLING;
cx->usage++;
lapic_timer_state_broadcast(pr, cx, 0);
cx->time += idle_time;
return index;
}
示例12: acpicpu_cstate_idle
/*
* The main idle loop.
*/
void
acpicpu_cstate_idle(void)
{
struct cpu_info *ci = curcpu();
struct acpicpu_softc *sc;
int state;
KASSERT(acpicpu_sc != NULL);
KASSERT(ci->ci_acpiid < maxcpus);
sc = acpicpu_sc[ci->ci_acpiid];
if (__predict_false(sc == NULL))
return;
KASSERT(ci->ci_ilevel == IPL_NONE);
KASSERT((sc->sc_flags & ACPICPU_FLAG_C) != 0);
if (__predict_false(sc->sc_cold != false))
return;
if (__predict_false(mutex_tryenter(&sc->sc_mtx) == 0))
return;
state = acpicpu_cstate_latency(sc);
mutex_exit(&sc->sc_mtx);
/*
* Apply AMD C1E quirk.
*/
if ((sc->sc_flags & ACPICPU_FLAG_C_C1E) != 0)
acpicpu_md_quirk_c1e();
/*
* Check for bus master activity. Note that particularly usb(4)
* causes high activity, which may prevent the use of C3 states.
*/
if ((sc->sc_cstate[state].cs_flags & ACPICPU_FLAG_C_BM_STS) != 0) {
if (acpicpu_cstate_bm_check() != false)
state--;
if (__predict_false(sc->sc_cstate[state].cs_method == 0))
state = ACPI_STATE_C1;
}
KASSERT(state != ACPI_STATE_C0);
if (state != ACPI_STATE_C3) {
acpicpu_cstate_idle_enter(sc, state);
return;
}
/*
* On all recent (Intel) CPUs caches are shared
* by CPUs and bus master control is required to
* keep these coherent while in C3. Flushing the
* CPU caches is only the last resort.
*/
if ((sc->sc_flags & ACPICPU_FLAG_C_BM) == 0)
ACPI_FLUSH_CPU_CACHE();
/*
* Allow the bus master to request that any given
* CPU should return immediately to C0 from C3.
*/
if ((sc->sc_flags & ACPICPU_FLAG_C_BM) != 0)
(void)AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 1);
/*
* It may be necessary to disable bus master arbitration
* to ensure that bus master cycles do not occur while
* sleeping in C3 (see ACPI 4.0, section 8.1.4).
*/
if ((sc->sc_flags & ACPICPU_FLAG_C_ARB) != 0)
(void)AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 1);
acpicpu_cstate_idle_enter(sc, state);
/*
* Disable bus master wake and re-enable the arbiter.
*/
if ((sc->sc_flags & ACPICPU_FLAG_C_BM) != 0)
(void)AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 0);
if ((sc->sc_flags & ACPICPU_FLAG_C_ARB) != 0)
(void)AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 0);
}
示例13: acpi_idle_enter_bm
/**
* acpi_idle_enter_bm - enters C3 with proper BM handling
* @dev: the target CPU
* @state: the state data
*
* If BM is detected, the deepest non-C3 idle state is entered instead.
*/
static int acpi_idle_enter_bm(struct cpuidle_device *dev,
struct cpuidle_state *state)
{
struct acpi_processor *pr;
struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
u32 t1, t2;
int sleep_ticks = 0;
pr = __get_cpu_var(processors);
if (unlikely(!pr))
return 0;
if (acpi_idle_suspend)
return(acpi_idle_enter_c1(dev, state));
if (acpi_idle_bm_check()) {
if (dev->safe_state) {
dev->last_state = dev->safe_state;
return dev->safe_state->enter(dev, dev->safe_state);
} else {
local_irq_disable();
acpi_safe_halt();
local_irq_enable();
return 0;
}
}
local_irq_disable();
current_thread_info()->status &= ~TS_POLLING;
/*
* TS_POLLING-cleared state must be visible before we test
* NEED_RESCHED:
*/
smp_mb();
if (unlikely(need_resched())) {
current_thread_info()->status |= TS_POLLING;
local_irq_enable();
return 0;
}
acpi_unlazy_tlb(smp_processor_id());
/* Tell the scheduler that we are going deep-idle: */
sched_clock_idle_sleep_event();
/*
* Must be done before busmaster disable as we might need to
* access HPET !
*/
acpi_state_timer_broadcast(pr, cx, 1);
/*
* disable bus master
* bm_check implies we need ARB_DIS
* !bm_check implies we need cache flush
* bm_control implies whether we can do ARB_DIS
*
* That leaves a case where bm_check is set and bm_control is
* not set. In that case we cannot do much, we enter C3
* without doing anything.
*/
if (pr->flags.bm_check && pr->flags.bm_control) {
spin_lock(&c3_lock);
c3_cpu_count++;
/* Disable bus master arbitration when all CPUs are in C3 */
if (c3_cpu_count == num_online_cpus())
acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
spin_unlock(&c3_lock);
} else if (!pr->flags.bm_check) {
ACPI_FLUSH_CPU_CACHE();
}
t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
acpi_idle_do_entry(cx);
t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
/* Re-enable bus master arbitration */
if (pr->flags.bm_check && pr->flags.bm_control) {
spin_lock(&c3_lock);
acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
c3_cpu_count--;
spin_unlock(&c3_lock);
}
#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
/* TSC could halt in idle, so notify users */
if (tsc_halts_in_c(ACPI_STATE_C3))
mark_tsc_unstable("TSC halts in idle");
#endif
sleep_ticks = ticks_elapsed(t1, t2);
/* Tell the scheduler how much we idled: */
sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
//.........这里部分代码省略.........
示例14: acpi_idle_enter_simple
/**
* acpi_idle_enter_simple - enters an ACPI state without BM handling
* @dev: the target CPU
* @state: the state data
*/
static int acpi_idle_enter_simple(struct cpuidle_device *dev,
struct cpuidle_state *state)
{
struct acpi_processor *pr;
struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
u32 t1, t2;
int sleep_ticks = 0;
pr = __get_cpu_var(processors);
if (unlikely(!pr))
return 0;
if (acpi_idle_suspend)
return(acpi_idle_enter_c1(dev, state));
local_irq_disable();
current_thread_info()->status &= ~TS_POLLING;
/*
* TS_POLLING-cleared state must be visible before we test
* NEED_RESCHED:
*/
smp_mb();
if (unlikely(need_resched())) {
current_thread_info()->status |= TS_POLLING;
local_irq_enable();
return 0;
}
/*
* Must be done before busmaster disable as we might need to
* access HPET !
*/
acpi_state_timer_broadcast(pr, cx, 1);
if (cx->type == ACPI_STATE_C3)
ACPI_FLUSH_CPU_CACHE();
t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
/* Tell the scheduler that we are going deep-idle: */
sched_clock_idle_sleep_event();
acpi_idle_do_entry(cx);
t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
/* TSC could halt in idle, so notify users */
if (tsc_halts_in_c(cx->type))
mark_tsc_unstable("TSC halts in idle");;
#endif
sleep_ticks = ticks_elapsed(t1, t2);
/* Tell the scheduler how much we idled: */
sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
local_irq_enable();
current_thread_info()->status |= TS_POLLING;
cx->usage++;
acpi_state_timer_broadcast(pr, cx, 0);
cx->time += sleep_ticks;
return ticks_elapsed_in_us(t1, t2);
}
示例15: acpi_hw_legacy_sleep
acpi_status acpi_hw_legacy_sleep(u8 sleep_state, u8 flags)
{
struct acpi_bit_register_info *sleep_type_reg_info;
struct acpi_bit_register_info *sleep_enable_reg_info;
u32 pm1a_control;
u32 pm1b_control;
u32 in_value;
acpi_status status;
ACPI_FUNCTION_TRACE(hw_legacy_sleep);
sleep_type_reg_info =
acpi_hw_get_bit_register_info(ACPI_BITREG_SLEEP_TYPE);
sleep_enable_reg_info =
acpi_hw_get_bit_register_info(ACPI_BITREG_SLEEP_ENABLE);
status =
acpi_write_bit_register(ACPI_BITREG_WAKE_STATUS, ACPI_CLEAR_STATUS);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
status = acpi_hw_clear_acpi_status();
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
if (sleep_state != ACPI_STATE_S5) {
status = acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
if (ACPI_FAILURE(status) && (status != AE_BAD_ADDRESS)) {
return_ACPI_STATUS(status);
}
}
status = acpi_hw_disable_all_gpes();
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
acpi_gbl_system_awake_and_running = FALSE;
status = acpi_hw_enable_all_wakeup_gpes();
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
if (flags & ACPI_EXECUTE_GTS) {
acpi_hw_execute_sleep_method(METHOD_PATHNAME__GTS, sleep_state);
}
status = acpi_hw_register_read(ACPI_REGISTER_PM1_CONTROL,
&pm1a_control);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
ACPI_DEBUG_PRINT((ACPI_DB_INIT,
"Entering sleep state [S%u]\n", sleep_state));
pm1a_control &= ~(sleep_type_reg_info->access_bit_mask |
sleep_enable_reg_info->access_bit_mask);
pm1b_control = pm1a_control;
pm1a_control |=
(acpi_gbl_sleep_type_a << sleep_type_reg_info->bit_position);
pm1b_control |=
(acpi_gbl_sleep_type_b << sleep_type_reg_info->bit_position);
status = acpi_hw_write_pm1_control(pm1a_control, pm1b_control);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
pm1a_control |= sleep_enable_reg_info->access_bit_mask;
pm1b_control |= sleep_enable_reg_info->access_bit_mask;
ACPI_FLUSH_CPU_CACHE();
status = acpi_os_prepare_sleep(sleep_state, pm1a_control,
pm1b_control);
if (ACPI_SKIP(status))
return_ACPI_STATUS(AE_OK);
if (ACPI_FAILURE(status))
//.........这里部分代码省略.........