Power management and ACPI fixes for v4.2-rc5
- Fix a recently introduced issue in the cpufreq core causing it to attempt to create duplicate symbolic links to the policy directory in sysfs for CPUs that are offline when the cpufreq driver is being registered (Rafael J Wysocki). - Fix a recently introduced problem in the ACPI device power management core code causing it to store an incorrect value in the device object's power.state field in some cases which in turn leads to attempts to turn power resources off while they should still be on going forward (Mika Westerberg). - Fix an intel_pstate driver issue introduced during the 4.1 cycle which leads to kernel panics on boot on Knights Landing chips due to incomplete support for them in that driver (Lukasz Anaczkowski). / -----BEGIN PGP SIGNATURE----- Version: GnuPG v2.0.22 (GNU/Linux) iQIcBAABCAAGBQJVuUbwAAoJEILEb/54YlRxb64P/jtDaEmhvMDLnOAP/8/+v+Hq kRLF2T+zcX7yu1wEjqWsO94ir+IYX51rbIkMp0rt8+m/CqhTF8QyqvQgaxW2fMAj KA6++qT/c+8yeTdSSoxDSV4ux3Ax+d22Q560NOqIFFKK0b+IRSTVirzjyhcMrBJS iiaTS8R6o19BttjZrJw4SRnNBXoAeEHeVlMXOqk3S6lq/b89XGIqG93mzaXsFUdz xXmkkqudP6z5+jymmiTGp0X+sIpDlR7UruZnk0hZZbEEdmEodlkVFOtYQEV1cro1 +e3lhMcvG1uO5Sxb5nabKtq8J8rG/buKP+vvsBifKrkxNZMSdnjVvHKxqooVkKVt MwGUrjnZhE1dvgL9QvnDNlafitRmrPtolohZ1v7YEXjhsvbbZafr8kkXgo+OmwUs wPYmzRg5H2+Di9jhms6hV1z89GtyzsVVOigf3cQZd7hBye4ZsPdWVx0QMyU+0+nC GXRRl/8zKyCWCRdnnBwokWs3wiMUAGJ998WPtgo9LhhCXV/JxKIzTXDq+vYxUJ7g Z5tbtg7Sgg4VH6WqVQC07x24FFwXKLVtrKd60x0/gbGXYCf7TPwO2dQKjaiF4cHb 4XSdc2RDEUd2uV7r22j8MaHkPEC33V/Z76nVL49wu8TE4/n0KF6O0+RXMB9Mvsmr iZ/CV3rgDFfhf3trOC9v =tXB4 -----END PGP SIGNATURE----- Merge tag 'pm+acpi-4.2-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm Pull power management and ACPI fixes from Rafael Wysocki: "These fix three regressions, two recent ones (cpufreq core and ACPI device power management) and one introduced during the 4.1 cycle (intel_pstate). Specifics: - Fix a recently introduced issue in the cpufreq core causing it to attempt to create duplicate symbolic links to the policy directory in sysfs for CPUs that are offline when the cpufreq driver is being registered (Rafael J Wysocki) - Fix a recently introduced problem in the ACPI device power management core code causing it to store an incorrect value in the device object's power.state field in some cases which in turn leads to attempts to turn power resources off while they should still be on going forward (Mika Westerberg) - Fix an intel_pstate driver issue introduced during the 4.1 cycle which leads to kernel panics on boot on Knights Landing chips due to incomplete support for them in that driver (Lukasz Anaczkowski)" * tag 'pm+acpi-4.2-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: cpufreq: Avoid attempts to create duplicate symbolic links ACPI / PM: Use target_state to set the device power state intel_pstate: Add get_scaling cpu_defaults param to Knights Landing
This commit is contained in:
Коммит
d41a83ba4b
|
@ -231,7 +231,7 @@ int acpi_device_set_power(struct acpi_device *device, int state)
|
|||
dev_warn(&device->dev, "Failed to change power state to %s\n",
|
||||
acpi_power_state_string(state));
|
||||
} else {
|
||||
device->power.state = state;
|
||||
device->power.state = target_state;
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
|
||||
"Device [%s] transitioned to %s\n",
|
||||
device->pnp.bus_id,
|
||||
|
|
|
@ -1002,7 +1002,7 @@ static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
|
|||
int ret = 0;
|
||||
|
||||
/* Some related CPUs might not be present (physically hotplugged) */
|
||||
for_each_cpu_and(j, policy->related_cpus, cpu_present_mask) {
|
||||
for_each_cpu(j, policy->real_cpus) {
|
||||
if (j == policy->kobj_cpu)
|
||||
continue;
|
||||
|
||||
|
@ -1019,7 +1019,7 @@ static void cpufreq_remove_dev_symlink(struct cpufreq_policy *policy)
|
|||
unsigned int j;
|
||||
|
||||
/* Some related CPUs might not be present (physically hotplugged) */
|
||||
for_each_cpu_and(j, policy->related_cpus, cpu_present_mask) {
|
||||
for_each_cpu(j, policy->real_cpus) {
|
||||
if (j == policy->kobj_cpu)
|
||||
continue;
|
||||
|
||||
|
@ -1163,11 +1163,14 @@ static struct cpufreq_policy *cpufreq_policy_alloc(struct device *dev)
|
|||
if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
|
||||
goto err_free_cpumask;
|
||||
|
||||
if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
|
||||
goto err_free_rcpumask;
|
||||
|
||||
ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &dev->kobj,
|
||||
"cpufreq");
|
||||
if (ret) {
|
||||
pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret);
|
||||
goto err_free_rcpumask;
|
||||
goto err_free_real_cpus;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&policy->policy_list);
|
||||
|
@ -1184,6 +1187,8 @@ static struct cpufreq_policy *cpufreq_policy_alloc(struct device *dev)
|
|||
|
||||
return policy;
|
||||
|
||||
err_free_real_cpus:
|
||||
free_cpumask_var(policy->real_cpus);
|
||||
err_free_rcpumask:
|
||||
free_cpumask_var(policy->related_cpus);
|
||||
err_free_cpumask:
|
||||
|
@ -1234,6 +1239,7 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy, bool notify)
|
|||
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
|
||||
|
||||
cpufreq_policy_put_kobj(policy, notify);
|
||||
free_cpumask_var(policy->real_cpus);
|
||||
free_cpumask_var(policy->related_cpus);
|
||||
free_cpumask_var(policy->cpus);
|
||||
kfree(policy);
|
||||
|
@ -1258,14 +1264,17 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
|
|||
|
||||
pr_debug("adding CPU %u\n", cpu);
|
||||
|
||||
/*
|
||||
* Only possible if 'cpu' wasn't physically present earlier and we are
|
||||
* here from subsys_interface add callback. A hotplug notifier will
|
||||
* follow and we will handle it like logical CPU hotplug then. For now,
|
||||
* just create the sysfs link.
|
||||
*/
|
||||
if (cpu_is_offline(cpu))
|
||||
return add_cpu_dev_symlink(per_cpu(cpufreq_cpu_data, cpu), cpu);
|
||||
if (cpu_is_offline(cpu)) {
|
||||
/*
|
||||
* Only possible if we are here from the subsys_interface add
|
||||
* callback. A hotplug notifier will follow and we will handle
|
||||
* it as CPU online then. For now, just create the sysfs link,
|
||||
* unless there is no policy or the link is already present.
|
||||
*/
|
||||
policy = per_cpu(cpufreq_cpu_data, cpu);
|
||||
return policy && !cpumask_test_and_set_cpu(cpu, policy->real_cpus)
|
||||
? add_cpu_dev_symlink(policy, cpu) : 0;
|
||||
}
|
||||
|
||||
if (!down_read_trylock(&cpufreq_rwsem))
|
||||
return 0;
|
||||
|
@ -1307,6 +1316,10 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
|
|||
/* related cpus should atleast have policy->cpus */
|
||||
cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
|
||||
|
||||
/* Remember which CPUs have been present at the policy creation time. */
|
||||
if (!recover_policy)
|
||||
cpumask_and(policy->real_cpus, policy->cpus, cpu_present_mask);
|
||||
|
||||
/*
|
||||
* affected cpus must always be the one, which are online. We aren't
|
||||
* managing offline cpus here.
|
||||
|
@ -1420,8 +1433,7 @@ nomem_out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int __cpufreq_remove_dev_prepare(struct device *dev,
|
||||
struct subsys_interface *sif)
|
||||
static int __cpufreq_remove_dev_prepare(struct device *dev)
|
||||
{
|
||||
unsigned int cpu = dev->id;
|
||||
int ret = 0;
|
||||
|
@ -1437,10 +1449,8 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
|
|||
|
||||
if (has_target()) {
|
||||
ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
|
||||
if (ret) {
|
||||
if (ret)
|
||||
pr_err("%s: Failed to stop governor\n", __func__);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
down_write(&policy->rwsem);
|
||||
|
@ -1473,8 +1483,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int __cpufreq_remove_dev_finish(struct device *dev,
|
||||
struct subsys_interface *sif)
|
||||
static int __cpufreq_remove_dev_finish(struct device *dev)
|
||||
{
|
||||
unsigned int cpu = dev->id;
|
||||
int ret;
|
||||
|
@ -1492,10 +1501,8 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
|
|||
/* If cpu is last user of policy, free policy */
|
||||
if (has_target()) {
|
||||
ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
|
||||
if (ret) {
|
||||
if (ret)
|
||||
pr_err("%s: Failed to exit governor\n", __func__);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1506,10 +1513,6 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
|
|||
if (cpufreq_driver->exit)
|
||||
cpufreq_driver->exit(policy);
|
||||
|
||||
/* Free the policy only if the driver is getting removed. */
|
||||
if (sif)
|
||||
cpufreq_policy_free(policy, true);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1521,42 +1524,41 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
|
|||
static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
|
||||
{
|
||||
unsigned int cpu = dev->id;
|
||||
int ret;
|
||||
struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
|
||||
|
||||
/*
|
||||
* Only possible if 'cpu' is getting physically removed now. A hotplug
|
||||
* notifier should have already been called and we just need to remove
|
||||
* link or free policy here.
|
||||
*/
|
||||
if (cpu_is_offline(cpu)) {
|
||||
struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
|
||||
struct cpumask mask;
|
||||
if (!policy)
|
||||
return 0;
|
||||
|
||||
if (!policy)
|
||||
return 0;
|
||||
if (cpu_online(cpu)) {
|
||||
__cpufreq_remove_dev_prepare(dev);
|
||||
__cpufreq_remove_dev_finish(dev);
|
||||
}
|
||||
|
||||
cpumask_copy(&mask, policy->related_cpus);
|
||||
cpumask_clear_cpu(cpu, &mask);
|
||||
|
||||
/*
|
||||
* Free policy only if all policy->related_cpus are removed
|
||||
* physically.
|
||||
*/
|
||||
if (cpumask_intersects(&mask, cpu_present_mask)) {
|
||||
remove_cpu_dev_symlink(policy, cpu);
|
||||
return 0;
|
||||
}
|
||||
cpumask_clear_cpu(cpu, policy->real_cpus);
|
||||
|
||||
if (cpumask_empty(policy->real_cpus)) {
|
||||
cpufreq_policy_free(policy, true);
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = __cpufreq_remove_dev_prepare(dev, sif);
|
||||
if (cpu != policy->kobj_cpu) {
|
||||
remove_cpu_dev_symlink(policy, cpu);
|
||||
} else {
|
||||
/*
|
||||
* The CPU owning the policy object is going away. Move it to
|
||||
* another suitable CPU.
|
||||
*/
|
||||
unsigned int new_cpu = cpumask_first(policy->real_cpus);
|
||||
struct device *new_dev = get_cpu_device(new_cpu);
|
||||
|
||||
if (!ret)
|
||||
ret = __cpufreq_remove_dev_finish(dev, sif);
|
||||
dev_dbg(dev, "%s: Moving policy object to CPU%u\n", __func__, new_cpu);
|
||||
|
||||
return ret;
|
||||
sysfs_remove_link(&new_dev->kobj, "cpufreq");
|
||||
policy->kobj_cpu = new_cpu;
|
||||
WARN_ON(kobject_move(&policy->kobj, &new_dev->kobj));
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void handle_update(struct work_struct *work)
|
||||
|
@ -2395,11 +2397,11 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
|
|||
break;
|
||||
|
||||
case CPU_DOWN_PREPARE:
|
||||
__cpufreq_remove_dev_prepare(dev, NULL);
|
||||
__cpufreq_remove_dev_prepare(dev);
|
||||
break;
|
||||
|
||||
case CPU_POST_DEAD:
|
||||
__cpufreq_remove_dev_finish(dev, NULL);
|
||||
__cpufreq_remove_dev_finish(dev);
|
||||
break;
|
||||
|
||||
case CPU_DOWN_FAILED:
|
||||
|
|
|
@ -681,6 +681,7 @@ static struct cpu_defaults knl_params = {
|
|||
.get_max = core_get_max_pstate,
|
||||
.get_min = core_get_min_pstate,
|
||||
.get_turbo = knl_get_turbo_pstate,
|
||||
.get_scaling = core_get_scaling,
|
||||
.set = core_set_pstate,
|
||||
},
|
||||
};
|
||||
|
|
|
@ -62,6 +62,7 @@ struct cpufreq_policy {
|
|||
/* CPUs sharing clock, require sw coordination */
|
||||
cpumask_var_t cpus; /* Online CPUs only */
|
||||
cpumask_var_t related_cpus; /* Online + Offline CPUs */
|
||||
cpumask_var_t real_cpus; /* Related and present */
|
||||
|
||||
unsigned int shared_type; /* ACPI: ANY or ALL affected CPUs
|
||||
should set cpufreq */
|
||||
|
|
Загрузка…
Ссылка в новой задаче