intel-idle: convert to x86_cpu_id auto probing

With this it should be automatically loaded on suitable systems by
udev.

The old switch () is replaced with a table based approach, this
also cleans up the code.

Cc: Len Brown <lenb@kernel.org>
Signed-off-by: Andi Kleen <ak@linux.intel.com>
Signed-off-by: Thomas Renninger <trenn@suse.de>
Acked-by: H. Peter Anvin <hpa@zytor.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
This commit is contained in:
Andi Kleen 2012-01-26 00:09:07 +01:00 коммит произвёл Greg Kroah-Hartman
Родитель 3bd391f056
Коммит b66b8b9a4a
1 изменённых файлов: 66 добавлений и 50 удалений

Просмотреть файл

@ -62,6 +62,7 @@
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/module.h>
#include <asm/cpu_device_id.h>
#include <asm/mwait.h>
#include <asm/msr.h>
@ -81,18 +82,23 @@ static unsigned int mwait_substates;
/* Reliable LAPIC Timer States, bit 1 for C1 etc. */
static unsigned int lapic_timer_reliable_states = (1 << 1); /* Default to only C1 */
struct idle_cpu {
struct cpuidle_state *state_table;
/*
* Hardware C-state auto-demotion may not always be optimal.
* Indicate which enable bits to clear here.
*/
unsigned long auto_demotion_disable_flags;
};
static const struct idle_cpu *icpu;
static struct cpuidle_device __percpu *intel_idle_cpuidle_devices;
static int intel_idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index);
static struct cpuidle_state *cpuidle_state_table;
/*
* Hardware C-state auto-demotion may not always be optimal.
* Indicate which enable bits to clear here.
*/
static unsigned long long auto_demotion_disable_flags;
/*
* Set this flag for states where the HW flushes the TLB for us
* and so we don't need cross-calls to keep it consistent.
@ -319,27 +325,72 @@ static void auto_demotion_disable(void *dummy)
unsigned long long msr_bits;
rdmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits);
msr_bits &= ~auto_demotion_disable_flags;
msr_bits &= ~(icpu->auto_demotion_disable_flags);
wrmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits);
}
static const struct idle_cpu idle_cpu_nehalem = {
.state_table = nehalem_cstates,
};
static const struct idle_cpu idle_cpu_westmere = {
.state_table = nehalem_cstates,
.auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE,
};
static const struct idle_cpu idle_cpu_atom = {
.state_table = atom_cstates,
};
static const struct idle_cpu idle_cpu_lincroft = {
.state_table = atom_cstates,
.auto_demotion_disable_flags = ATM_LNC_C6_AUTO_DEMOTE,
};
static const struct idle_cpu idle_cpu_snb = {
.state_table = snb_cstates,
};
#define ICPU(model, cpu) \
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu }
static const struct x86_cpu_id intel_idle_ids[] = {
ICPU(0x1a, idle_cpu_nehalem),
ICPU(0x1e, idle_cpu_nehalem),
ICPU(0x1f, idle_cpu_nehalem),
ICPU(0x25, idle_cpu_westmere),
ICPU(0x2c, idle_cpu_westmere),
ICPU(0x2f, idle_cpu_westmere),
ICPU(0x1c, idle_cpu_atom),
ICPU(0x26, idle_cpu_lincroft),
ICPU(0x2f, idle_cpu_westmere),
ICPU(0x2a, idle_cpu_snb),
ICPU(0x2d, idle_cpu_snb),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids);
/*
* intel_idle_probe()
*/
static int intel_idle_probe(void)
{
unsigned int eax, ebx, ecx;
const struct x86_cpu_id *id;
if (max_cstate == 0) {
pr_debug(PREFIX "disabled\n");
return -EPERM;
}
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
return -ENODEV;
if (!boot_cpu_has(X86_FEATURE_MWAIT))
id = x86_match_cpu(intel_idle_ids);
if (!id) {
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
boot_cpu_data.x86 == 6)
pr_debug(PREFIX "does not run on family %d model %d\n",
boot_cpu_data.x86, boot_cpu_data.x86_model);
return -ENODEV;
}
if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
return -ENODEV;
@ -353,43 +404,8 @@ static int intel_idle_probe(void)
pr_debug(PREFIX "MWAIT substates: 0x%x\n", mwait_substates);
if (boot_cpu_data.x86 != 6) /* family 6 */
return -ENODEV;
switch (boot_cpu_data.x86_model) {
case 0x1A: /* Core i7, Xeon 5500 series */
case 0x1E: /* Core i7 and i5 Processor - Lynnfield Jasper Forest */
case 0x1F: /* Core i7 and i5 Processor - Nehalem */
case 0x2E: /* Nehalem-EX Xeon */
case 0x2F: /* Westmere-EX Xeon */
case 0x25: /* Westmere */
case 0x2C: /* Westmere */
cpuidle_state_table = nehalem_cstates;
auto_demotion_disable_flags =
(NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE);
break;
case 0x1C: /* 28 - Atom Processor */
cpuidle_state_table = atom_cstates;
break;
case 0x26: /* 38 - Lincroft Atom Processor */
cpuidle_state_table = atom_cstates;
auto_demotion_disable_flags = ATM_LNC_C6_AUTO_DEMOTE;
break;
case 0x2A: /* SNB */
case 0x2D: /* SNB Xeon */
cpuidle_state_table = snb_cstates;
break;
default:
pr_debug(PREFIX "does not run on family %d model %d\n",
boot_cpu_data.x86, boot_cpu_data.x86_model);
return -ENODEV;
}
icpu = (const struct idle_cpu *)id->driver_data;
cpuidle_state_table = icpu->state_table;
if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */
lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE;
@ -470,7 +486,7 @@ static int intel_idle_cpuidle_driver_init(void)
drv->state_count += 1;
}
if (auto_demotion_disable_flags)
if (icpu->auto_demotion_disable_flags)
on_each_cpu(auto_demotion_disable, NULL, 1);
return 0;
@ -522,7 +538,7 @@ int intel_idle_cpu_init(int cpu)
return -EIO;
}
if (auto_demotion_disable_flags)
if (icpu->auto_demotion_disable_flags)
smp_call_function_single(cpu, auto_demotion_disable, NULL, 1);
return 0;