Merge branch 'acpi-processor'
* acpi-processor: ACPI / cpuidle: Common callback routine for entering states ACPI / cpuidle: Merge acpi_idle_enter_c1() and acpi_idle_enter_simple() ACPI / cpuidle: Drop flags.bm_check tests from acpi_idle_enter_bm() ACPI / cpuidle: Clean up white space in a switch statement ACPI / cpuidle: Drop irrelevant comment from acpi_idle_enter_simple() ACPI / cpuidle: Clean up fallback to C1 checks ACPI / cpuidle: Drop unnecessary calls from ->enter callback routines ACPI / cpuidle: Drop unnecessary calls from acpi_idle_do_entry()
This commit is contained in:
Коммит
d232096802
|
@ -681,15 +681,13 @@ static int acpi_idle_bm_check(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* acpi_idle_do_entry - a helper function that does C2 and C3 type entry
|
* acpi_idle_do_entry - enter idle state using the appropriate method
|
||||||
* @cx: cstate data
|
* @cx: cstate data
|
||||||
*
|
*
|
||||||
* Caller disables interrupt before call and enables interrupt after return.
|
* Caller disables interrupt before call and enables interrupt after return.
|
||||||
*/
|
*/
|
||||||
static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
|
static void acpi_idle_do_entry(struct acpi_processor_cx *cx)
|
||||||
{
|
{
|
||||||
/* Don't trace irqs off for idle */
|
|
||||||
stop_critical_timings();
|
|
||||||
if (cx->entry_method == ACPI_CSTATE_FFH) {
|
if (cx->entry_method == ACPI_CSTATE_FFH) {
|
||||||
/* Call into architectural FFH based C-state */
|
/* Call into architectural FFH based C-state */
|
||||||
acpi_processor_ffh_cstate_enter(cx);
|
acpi_processor_ffh_cstate_enter(cx);
|
||||||
|
@ -703,37 +701,8 @@ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
|
||||||
gets asserted in time to freeze execution properly. */
|
gets asserted in time to freeze execution properly. */
|
||||||
inl(acpi_gbl_FADT.xpm_timer_block.address);
|
inl(acpi_gbl_FADT.xpm_timer_block.address);
|
||||||
}
|
}
|
||||||
start_critical_timings();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* acpi_idle_enter_c1 - enters an ACPI C1 state-type
|
|
||||||
* @dev: the target CPU
|
|
||||||
* @drv: cpuidle driver containing cpuidle state info
|
|
||||||
* @index: index of target state
|
|
||||||
*
|
|
||||||
* This is equivalent to the HALT instruction.
|
|
||||||
*/
|
|
||||||
static int acpi_idle_enter_c1(struct cpuidle_device *dev,
|
|
||||||
struct cpuidle_driver *drv, int index)
|
|
||||||
{
|
|
||||||
struct acpi_processor *pr;
|
|
||||||
struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
|
|
||||||
|
|
||||||
pr = __this_cpu_read(processors);
|
|
||||||
|
|
||||||
if (unlikely(!pr))
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
lapic_timer_state_broadcast(pr, cx, 1);
|
|
||||||
acpi_idle_do_entry(cx);
|
|
||||||
|
|
||||||
lapic_timer_state_broadcast(pr, cx, 0);
|
|
||||||
|
|
||||||
return index;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* acpi_idle_play_dead - enters an ACPI state for long-term idle (i.e. off-lining)
|
* acpi_idle_play_dead - enters an ACPI state for long-term idle (i.e. off-lining)
|
||||||
* @dev: the target CPU
|
* @dev: the target CPU
|
||||||
|
@ -761,47 +730,11 @@ static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
static bool acpi_idle_fallback_to_c1(struct acpi_processor *pr)
|
||||||
* acpi_idle_enter_simple - enters an ACPI state without BM handling
|
|
||||||
* @dev: the target CPU
|
|
||||||
* @drv: cpuidle driver with cpuidle state information
|
|
||||||
* @index: the index of suggested state
|
|
||||||
*/
|
|
||||||
static int acpi_idle_enter_simple(struct cpuidle_device *dev,
|
|
||||||
struct cpuidle_driver *drv, int index)
|
|
||||||
{
|
{
|
||||||
struct acpi_processor *pr;
|
return IS_ENABLED(CONFIG_HOTPLUG_CPU) && num_online_cpus() > 1 &&
|
||||||
struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
|
!(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED) &&
|
||||||
|
!pr->flags.has_cst;
|
||||||
pr = __this_cpu_read(processors);
|
|
||||||
|
|
||||||
if (unlikely(!pr))
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
#ifdef CONFIG_HOTPLUG_CPU
|
|
||||||
if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
|
|
||||||
!pr->flags.has_cst &&
|
|
||||||
!(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
|
|
||||||
return acpi_idle_enter_c1(dev, drv, CPUIDLE_DRIVER_STATE_START);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Must be done before busmaster disable as we might need to
|
|
||||||
* access HPET !
|
|
||||||
*/
|
|
||||||
lapic_timer_state_broadcast(pr, cx, 1);
|
|
||||||
|
|
||||||
if (cx->type == ACPI_STATE_C3)
|
|
||||||
ACPI_FLUSH_CPU_CACHE();
|
|
||||||
|
|
||||||
/* Tell the scheduler that we are going deep-idle: */
|
|
||||||
sched_clock_idle_sleep_event();
|
|
||||||
acpi_idle_do_entry(cx);
|
|
||||||
|
|
||||||
sched_clock_idle_wakeup_event(0);
|
|
||||||
|
|
||||||
lapic_timer_state_broadcast(pr, cx, 0);
|
|
||||||
return index;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int c3_cpu_count;
|
static int c3_cpu_count;
|
||||||
|
@ -809,44 +742,14 @@ static DEFINE_RAW_SPINLOCK(c3_lock);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* acpi_idle_enter_bm - enters C3 with proper BM handling
|
* acpi_idle_enter_bm - enters C3 with proper BM handling
|
||||||
* @dev: the target CPU
|
* @pr: Target processor
|
||||||
* @drv: cpuidle driver containing state data
|
* @cx: Target state context
|
||||||
* @index: the index of suggested state
|
|
||||||
*
|
|
||||||
* If BM is detected, the deepest non-C3 idle state is entered instead.
|
|
||||||
*/
|
*/
|
||||||
static int acpi_idle_enter_bm(struct cpuidle_device *dev,
|
static void acpi_idle_enter_bm(struct acpi_processor *pr,
|
||||||
struct cpuidle_driver *drv, int index)
|
struct acpi_processor_cx *cx)
|
||||||
{
|
{
|
||||||
struct acpi_processor *pr;
|
|
||||||
struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
|
|
||||||
|
|
||||||
pr = __this_cpu_read(processors);
|
|
||||||
|
|
||||||
if (unlikely(!pr))
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
#ifdef CONFIG_HOTPLUG_CPU
|
|
||||||
if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
|
|
||||||
!pr->flags.has_cst &&
|
|
||||||
!(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
|
|
||||||
return acpi_idle_enter_c1(dev, drv, CPUIDLE_DRIVER_STATE_START);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
|
|
||||||
if (drv->safe_state_index >= 0) {
|
|
||||||
return drv->states[drv->safe_state_index].enter(dev,
|
|
||||||
drv, drv->safe_state_index);
|
|
||||||
} else {
|
|
||||||
acpi_safe_halt();
|
|
||||||
return -EBUSY;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
acpi_unlazy_tlb(smp_processor_id());
|
acpi_unlazy_tlb(smp_processor_id());
|
||||||
|
|
||||||
/* Tell the scheduler that we are going deep-idle: */
|
|
||||||
sched_clock_idle_sleep_event();
|
|
||||||
/*
|
/*
|
||||||
* Must be done before busmaster disable as we might need to
|
* Must be done before busmaster disable as we might need to
|
||||||
* access HPET !
|
* access HPET !
|
||||||
|
@ -856,37 +759,71 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
|
||||||
/*
|
/*
|
||||||
* disable bus master
|
* disable bus master
|
||||||
* bm_check implies we need ARB_DIS
|
* bm_check implies we need ARB_DIS
|
||||||
* !bm_check implies we need cache flush
|
|
||||||
* bm_control implies whether we can do ARB_DIS
|
* bm_control implies whether we can do ARB_DIS
|
||||||
*
|
*
|
||||||
* That leaves a case where bm_check is set and bm_control is
|
* That leaves a case where bm_check is set and bm_control is
|
||||||
* not set. In that case we cannot do much, we enter C3
|
* not set. In that case we cannot do much, we enter C3
|
||||||
* without doing anything.
|
* without doing anything.
|
||||||
*/
|
*/
|
||||||
if (pr->flags.bm_check && pr->flags.bm_control) {
|
if (pr->flags.bm_control) {
|
||||||
raw_spin_lock(&c3_lock);
|
raw_spin_lock(&c3_lock);
|
||||||
c3_cpu_count++;
|
c3_cpu_count++;
|
||||||
/* Disable bus master arbitration when all CPUs are in C3 */
|
/* Disable bus master arbitration when all CPUs are in C3 */
|
||||||
if (c3_cpu_count == num_online_cpus())
|
if (c3_cpu_count == num_online_cpus())
|
||||||
acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
|
acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
|
||||||
raw_spin_unlock(&c3_lock);
|
raw_spin_unlock(&c3_lock);
|
||||||
} else if (!pr->flags.bm_check) {
|
|
||||||
ACPI_FLUSH_CPU_CACHE();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
acpi_idle_do_entry(cx);
|
acpi_idle_do_entry(cx);
|
||||||
|
|
||||||
/* Re-enable bus master arbitration */
|
/* Re-enable bus master arbitration */
|
||||||
if (pr->flags.bm_check && pr->flags.bm_control) {
|
if (pr->flags.bm_control) {
|
||||||
raw_spin_lock(&c3_lock);
|
raw_spin_lock(&c3_lock);
|
||||||
acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
|
acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
|
||||||
c3_cpu_count--;
|
c3_cpu_count--;
|
||||||
raw_spin_unlock(&c3_lock);
|
raw_spin_unlock(&c3_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
sched_clock_idle_wakeup_event(0);
|
lapic_timer_state_broadcast(pr, cx, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int acpi_idle_enter(struct cpuidle_device *dev,
|
||||||
|
struct cpuidle_driver *drv, int index)
|
||||||
|
{
|
||||||
|
struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
|
||||||
|
struct acpi_processor *pr;
|
||||||
|
|
||||||
|
pr = __this_cpu_read(processors);
|
||||||
|
if (unlikely(!pr))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (cx->type != ACPI_STATE_C1) {
|
||||||
|
if (acpi_idle_fallback_to_c1(pr)) {
|
||||||
|
index = CPUIDLE_DRIVER_STATE_START;
|
||||||
|
cx = per_cpu(acpi_cstate[index], dev->cpu);
|
||||||
|
} else if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check) {
|
||||||
|
if (cx->bm_sts_skip || !acpi_idle_bm_check()) {
|
||||||
|
acpi_idle_enter_bm(pr, cx);
|
||||||
|
return index;
|
||||||
|
} else if (drv->safe_state_index >= 0) {
|
||||||
|
index = drv->safe_state_index;
|
||||||
|
cx = per_cpu(acpi_cstate[index], dev->cpu);
|
||||||
|
} else {
|
||||||
|
acpi_safe_halt();
|
||||||
|
return -EBUSY;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
lapic_timer_state_broadcast(pr, cx, 1);
|
||||||
|
|
||||||
|
if (cx->type == ACPI_STATE_C3)
|
||||||
|
ACPI_FLUSH_CPU_CACHE();
|
||||||
|
|
||||||
|
acpi_idle_do_entry(cx);
|
||||||
|
|
||||||
lapic_timer_state_broadcast(pr, cx, 0);
|
lapic_timer_state_broadcast(pr, cx, 0);
|
||||||
|
|
||||||
return index;
|
return index;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -981,27 +918,12 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
|
||||||
strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
|
strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
|
||||||
state->exit_latency = cx->latency;
|
state->exit_latency = cx->latency;
|
||||||
state->target_residency = cx->latency * latency_factor;
|
state->target_residency = cx->latency * latency_factor;
|
||||||
|
state->enter = acpi_idle_enter;
|
||||||
|
|
||||||
state->flags = 0;
|
state->flags = 0;
|
||||||
switch (cx->type) {
|
if (cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2) {
|
||||||
case ACPI_STATE_C1:
|
|
||||||
|
|
||||||
state->enter = acpi_idle_enter_c1;
|
|
||||||
state->enter_dead = acpi_idle_play_dead;
|
state->enter_dead = acpi_idle_play_dead;
|
||||||
drv->safe_state_index = count;
|
drv->safe_state_index = count;
|
||||||
break;
|
|
||||||
|
|
||||||
case ACPI_STATE_C2:
|
|
||||||
state->enter = acpi_idle_enter_simple;
|
|
||||||
state->enter_dead = acpi_idle_play_dead;
|
|
||||||
drv->safe_state_index = count;
|
|
||||||
break;
|
|
||||||
|
|
||||||
case ACPI_STATE_C3:
|
|
||||||
state->enter = pr->flags.bm_check ?
|
|
||||||
acpi_idle_enter_bm :
|
|
||||||
acpi_idle_enter_simple;
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
count++;
|
count++;
|
||||||
|
|
Загрузка…
Ссылка в новой задаче