Merge back new cpuidle material for v4.7.
This commit is contained in:
Коммит
fd7c3c29f9
|
@ -36,7 +36,7 @@ struct cpuidle_ops {
|
|||
|
||||
struct of_cpuidle_method {
|
||||
const char *method;
|
||||
struct cpuidle_ops *ops;
|
||||
const struct cpuidle_ops *ops;
|
||||
};
|
||||
|
||||
#define CPUIDLE_METHOD_OF_DECLARE(name, _method, _ops) \
|
||||
|
|
|
@ -70,7 +70,7 @@ int arm_cpuidle_suspend(int index)
|
|||
*
|
||||
* Returns a struct cpuidle_ops pointer, NULL if not found.
|
||||
*/
|
||||
static struct cpuidle_ops *__init arm_cpuidle_get_ops(const char *method)
|
||||
static const struct cpuidle_ops *__init arm_cpuidle_get_ops(const char *method)
|
||||
{
|
||||
struct of_cpuidle_method *m = __cpuidle_method_of_table;
|
||||
|
||||
|
@ -88,7 +88,7 @@ static struct cpuidle_ops *__init arm_cpuidle_get_ops(const char *method)
|
|||
*
|
||||
* Get the method name defined in the 'enable-method' property, retrieve the
|
||||
* associated cpuidle_ops and do a struct copy. This copy is needed because all
|
||||
* cpuidle_ops are tagged __initdata and will be unloaded after the init
|
||||
* cpuidle_ops are tagged __initconst and will be unloaded after the init
|
||||
* process.
|
||||
*
|
||||
* Return 0 on sucess, -ENOENT if no 'enable-method' is defined, -EOPNOTSUPP if
|
||||
|
@ -97,7 +97,7 @@ static struct cpuidle_ops *__init arm_cpuidle_get_ops(const char *method)
|
|||
static int __init arm_cpuidle_read_ops(struct device_node *dn, int cpu)
|
||||
{
|
||||
const char *enable_method;
|
||||
struct cpuidle_ops *ops;
|
||||
const struct cpuidle_ops *ops;
|
||||
|
||||
enable_method = of_get_property(dn, "enable-method", NULL);
|
||||
if (!enable_method)
|
||||
|
|
|
@ -173,7 +173,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
|
|||
|
||||
struct cpuidle_state *target_state = &drv->states[index];
|
||||
bool broadcast = !!(target_state->flags & CPUIDLE_FLAG_TIMER_STOP);
|
||||
ktime_t time_start, time_end;
|
||||
u64 time_start, time_end;
|
||||
s64 diff;
|
||||
|
||||
/*
|
||||
|
@ -195,13 +195,13 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
|
|||
sched_idle_set_state(target_state);
|
||||
|
||||
trace_cpu_idle_rcuidle(index, dev->cpu);
|
||||
time_start = ktime_get();
|
||||
time_start = local_clock();
|
||||
|
||||
stop_critical_timings();
|
||||
entered_state = target_state->enter(dev, drv, index);
|
||||
start_critical_timings();
|
||||
|
||||
time_end = ktime_get();
|
||||
time_end = local_clock();
|
||||
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
|
||||
|
||||
/* The cpu is no longer idle or about to enter idle. */
|
||||
|
@ -217,7 +217,11 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
|
|||
if (!cpuidle_state_is_coupled(drv, entered_state))
|
||||
local_irq_enable();
|
||||
|
||||
diff = ktime_to_us(ktime_sub(time_end, time_start));
|
||||
/*
|
||||
* local_clock() returns the time in nanosecond, let's shift
|
||||
* by 10 (divide by 1024) to have microsecond based time.
|
||||
*/
|
||||
diff = (time_end - time_start) >> 10;
|
||||
if (diff > INT_MAX)
|
||||
diff = INT_MAX;
|
||||
|
||||
|
@ -433,6 +437,8 @@ static void __cpuidle_unregister_device(struct cpuidle_device *dev)
|
|||
list_del(&dev->device_list);
|
||||
per_cpu(cpuidle_devices, dev->cpu) = NULL;
|
||||
module_put(drv->owner);
|
||||
|
||||
dev->registered = 0;
|
||||
}
|
||||
|
||||
static void __cpuidle_device_init(struct cpuidle_device *dev)
|
||||
|
|
|
@ -355,7 +355,7 @@ int psci_cpu_suspend_enter(unsigned long index)
|
|||
|
||||
/* ARM specific CPU idle operations */
|
||||
#ifdef CONFIG_ARM
|
||||
static struct cpuidle_ops psci_cpuidle_ops __initdata = {
|
||||
static const struct cpuidle_ops psci_cpuidle_ops __initconst = {
|
||||
.suspend = psci_cpu_suspend_enter,
|
||||
.init = psci_dt_cpu_init_idle,
|
||||
};
|
||||
|
|
|
@ -766,6 +766,67 @@ static struct cpuidle_state knl_cstates[] = {
|
|||
.enter = NULL }
|
||||
};
|
||||
|
||||
static struct cpuidle_state bxt_cstates[] = {
|
||||
{
|
||||
.name = "C1-BXT",
|
||||
.desc = "MWAIT 0x00",
|
||||
.flags = MWAIT2flg(0x00),
|
||||
.exit_latency = 2,
|
||||
.target_residency = 2,
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C1E-BXT",
|
||||
.desc = "MWAIT 0x01",
|
||||
.flags = MWAIT2flg(0x01),
|
||||
.exit_latency = 10,
|
||||
.target_residency = 20,
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C6-BXT",
|
||||
.desc = "MWAIT 0x20",
|
||||
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
|
||||
.exit_latency = 133,
|
||||
.target_residency = 133,
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C7s-BXT",
|
||||
.desc = "MWAIT 0x31",
|
||||
.flags = MWAIT2flg(0x31) | CPUIDLE_FLAG_TLB_FLUSHED,
|
||||
.exit_latency = 155,
|
||||
.target_residency = 155,
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C8-BXT",
|
||||
.desc = "MWAIT 0x40",
|
||||
.flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
|
||||
.exit_latency = 1000,
|
||||
.target_residency = 1000,
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C9-BXT",
|
||||
.desc = "MWAIT 0x50",
|
||||
.flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED,
|
||||
.exit_latency = 2000,
|
||||
.target_residency = 2000,
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C10-BXT",
|
||||
.desc = "MWAIT 0x60",
|
||||
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
|
||||
.exit_latency = 10000,
|
||||
.target_residency = 10000,
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.enter = NULL }
|
||||
};
|
||||
|
||||
/**
|
||||
* intel_idle
|
||||
* @dev: cpuidle_device
|
||||
|
@ -950,6 +1011,11 @@ static const struct idle_cpu idle_cpu_knl = {
|
|||
.state_table = knl_cstates,
|
||||
};
|
||||
|
||||
static const struct idle_cpu idle_cpu_bxt = {
|
||||
.state_table = bxt_cstates,
|
||||
.disable_promotion_to_c1e = true,
|
||||
};
|
||||
|
||||
#define ICPU(model, cpu) \
|
||||
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu }
|
||||
|
||||
|
@ -985,6 +1051,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
|
|||
ICPU(0x9e, idle_cpu_skl),
|
||||
ICPU(0x55, idle_cpu_skx),
|
||||
ICPU(0x57, idle_cpu_knl),
|
||||
ICPU(0x5c, idle_cpu_bxt),
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids);
|
||||
|
@ -1075,6 +1142,73 @@ static void ivt_idle_state_table_update(void)
|
|||
|
||||
/* else, 1 and 2 socket systems use default ivt_cstates */
|
||||
}
|
||||
|
||||
/*
|
||||
* Translate IRTL (Interrupt Response Time Limit) MSR to usec
|
||||
*/
|
||||
|
||||
static unsigned int irtl_ns_units[] = {
|
||||
1, 32, 1024, 32768, 1048576, 33554432, 0, 0 };
|
||||
|
||||
static unsigned long long irtl_2_usec(unsigned long long irtl)
|
||||
{
|
||||
unsigned long long ns;
|
||||
|
||||
ns = irtl_ns_units[(irtl >> 10) & 0x3];
|
||||
|
||||
return div64_u64((irtl & 0x3FF) * ns, 1000);
|
||||
}
|
||||
/*
|
||||
* bxt_idle_state_table_update(void)
|
||||
*
|
||||
* On BXT, we trust the IRTL to show the definitive maximum latency
|
||||
* We use the same value for target_residency.
|
||||
*/
|
||||
static void bxt_idle_state_table_update(void)
|
||||
{
|
||||
unsigned long long msr;
|
||||
|
||||
rdmsrl(MSR_PKGC6_IRTL, msr);
|
||||
if (msr) {
|
||||
unsigned int usec = irtl_2_usec(msr);
|
||||
|
||||
bxt_cstates[2].exit_latency = usec;
|
||||
bxt_cstates[2].target_residency = usec;
|
||||
}
|
||||
|
||||
rdmsrl(MSR_PKGC7_IRTL, msr);
|
||||
if (msr) {
|
||||
unsigned int usec = irtl_2_usec(msr);
|
||||
|
||||
bxt_cstates[3].exit_latency = usec;
|
||||
bxt_cstates[3].target_residency = usec;
|
||||
}
|
||||
|
||||
rdmsrl(MSR_PKGC8_IRTL, msr);
|
||||
if (msr) {
|
||||
unsigned int usec = irtl_2_usec(msr);
|
||||
|
||||
bxt_cstates[4].exit_latency = usec;
|
||||
bxt_cstates[4].target_residency = usec;
|
||||
}
|
||||
|
||||
rdmsrl(MSR_PKGC9_IRTL, msr);
|
||||
if (msr) {
|
||||
unsigned int usec = irtl_2_usec(msr);
|
||||
|
||||
bxt_cstates[5].exit_latency = usec;
|
||||
bxt_cstates[5].target_residency = usec;
|
||||
}
|
||||
|
||||
rdmsrl(MSR_PKGC10_IRTL, msr);
|
||||
if (msr) {
|
||||
unsigned int usec = irtl_2_usec(msr);
|
||||
|
||||
bxt_cstates[6].exit_latency = usec;
|
||||
bxt_cstates[6].target_residency = usec;
|
||||
}
|
||||
|
||||
}
|
||||
/*
|
||||
* sklh_idle_state_table_update(void)
|
||||
*
|
||||
|
@ -1130,6 +1264,9 @@ static void intel_idle_state_table_update(void)
|
|||
case 0x3e: /* IVT */
|
||||
ivt_idle_state_table_update();
|
||||
break;
|
||||
case 0x5c: /* BXT */
|
||||
bxt_idle_state_table_update();
|
||||
break;
|
||||
case 0x5e: /* SKL-H */
|
||||
sklh_idle_state_table_update();
|
||||
break;
|
||||
|
|
|
@ -274,7 +274,7 @@ check_spm:
|
|||
return per_cpu(cpu_spm_drv, cpu) ? 0 : -ENXIO;
|
||||
}
|
||||
|
||||
static struct cpuidle_ops qcom_cpuidle_ops __initdata = {
|
||||
static const struct cpuidle_ops qcom_cpuidle_ops __initconst = {
|
||||
.suspend = qcom_idle_enter,
|
||||
.init = qcom_cpuidle_init,
|
||||
};
|
||||
|
|
Загрузка…
Ссылка в новой задаче