Additional power management updates for 5.5-rc1
- Avoid a race condition in the ACPI EC driver that may cause systems to be unable to leave suspend-to-idle (Rafael Wysocki). - Drop the "disabled" field, which is redundant, from struct cpuidle_state (Rafael Wysocki). - Reintroduce device PM QoS frequency constraints (temporarily introduced and than dropped during the 5.4 cycle) in preparation for adding QoS support to devfreq (Leonard Crestez). - Clean up indentation (in multiple places) and the cpuidle drivers help text in Kconfig (Krzysztof Kozlowski, Randy Dunlap). -----BEGIN PGP SIGNATURE----- iQJGBAABCAAwFiEE4fcc61cGeeHD/fCwgsRv/nhiVHEFAl3nhpQSHHJqd0Byand5 c29ja2kubmV0AAoJEILEb/54YlRxQj4P/2HbVROWMON7q9iWhgO59qABEbqU8M7L DaJ2gu+bDe3FQ9Ek6Y2EObfGw3nl9riyGbZH/jVmcOkbuXE+aQXv/j7eEnM9G35+ 8+JSfhucVsohaHVxT2ROMv+7YD+pLyWK1ivuVK/dNcvmxQaC9CKrmn3GF2ujkqNR ahdRRzZobGeC6mc8tms3GYpWkd1R5zd74ALGVsw9i/eB3P/YgrlS8HaQynpbaflZ qhRKZgsTf8QD6+OG+6HQhWpOfAlG36dsJnvuk0Oa0Cpnw+Zfj6WoR1jpL9ufNWBM Re1faTfppy6Hnyxr62Ytkbq2pYozTVAnQM+TKNIGoqxA4OIXvhgQpBqApmuJXpRx ZFBfr943f7I2jmAAznHeiW9l3n+4h725rpoxKapnlO3OMRDwCTqxbMahiS+CDULd gSu4prnoBdd9WrwiR7M1PA4X2Eb2M0kYFQUr7BltlTgjLHjQy47Mnazh9WxYBAv8 p1tip39QHeZcdO3rdW1O21ljNekEIOFAi5bVVECsR6RyA+KR+vHgFP9pMUWyCpgU +rde+MdGKIL3sw/szNhTTDfQ49vz/ObcipJg3/rakq6jXeFL4n5NwMy5jYrquPlx xxHx3Yp1PCBEZ1TXS6+JjznvQBU/G/7YvoWobpqwN/IL1wa55rWOX8Ah1+YnfLzF fGzh0EvPJKyM =KAyd -----END PGP SIGNATURE----- Merge tag 'pm-5.5-rc1-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm Pull additional power management updates from Rafael Wysocki: "These fix an ACPI EC driver bug exposed by the recent rework of the suspend-to-idle code flow, reintroduce frequency constraints into device PM QoS (in preparation for adding QoS support to devfreq), drop a redundant field from struct cpuidle_state and clean up Kconfig in some places. Specifics: - Avoid a race condition in the ACPI EC driver that may cause systems to be unable to leave suspend-to-idle (Rafael Wysocki) - Drop the "disabled" field, which is redundant, from struct cpuidle_state (Rafael Wysocki) - Reintroduce device PM QoS frequency constraints (temporarily introduced and than dropped during the 5.4 cycle) in preparation for adding QoS support to devfreq (Leonard Crestez) - Clean up indentation (in multiple places) and the cpuidle drivers help text in Kconfig (Krzysztof Kozlowski, Randy Dunlap)" * tag 'pm-5.5-rc1-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: ACPI: PM: s2idle: Rework ACPI events synchronization ACPI: EC: Rework flushing of pending work PM / devfreq: Add missing locking while setting suspend_freq PM / QoS: Restore DEV_PM_QOS_MIN/MAX_FREQUENCY PM / QoS: Reorder pm_qos/freq_qos/dev_pm_qos structs PM / QoS: Initial kunit test PM / QoS: Redefine FREQ_QOS_MAX_DEFAULT_VALUE to S32_MAX power: avs: Fix Kconfig indentation cpufreq: Fix Kconfig indentation cpuidle: minor Kconfig help text fixes cpuidle: Drop disabled field from struct cpuidle_state cpuidle: Fix Kconfig indentation
This commit is contained in:
Коммит
ef867c12f3
|
@ -67,7 +67,7 @@ static struct cpuidle_driver cpuidle_driver = {
|
|||
.enter = cpuidle_sleep_enter,
|
||||
.name = "C2",
|
||||
.desc = "SuperH Sleep Mode [SF]",
|
||||
.disabled = true,
|
||||
.flags = CPUIDLE_FLAG_UNUSABLE,
|
||||
},
|
||||
{
|
||||
.exit_latency = 2300,
|
||||
|
@ -76,7 +76,7 @@ static struct cpuidle_driver cpuidle_driver = {
|
|||
.enter = cpuidle_sleep_enter,
|
||||
.name = "C3",
|
||||
.desc = "SuperH Mobile Standby Mode [SF]",
|
||||
.disabled = true,
|
||||
.flags = CPUIDLE_FLAG_UNUSABLE,
|
||||
},
|
||||
},
|
||||
.safe_state_index = 0,
|
||||
|
@ -86,10 +86,10 @@ static struct cpuidle_driver cpuidle_driver = {
|
|||
int __init sh_mobile_setup_cpuidle(void)
|
||||
{
|
||||
if (sh_mobile_sleep_supported & SUSP_SH_SF)
|
||||
cpuidle_driver.states[1].disabled = false;
|
||||
cpuidle_driver.states[1].flags = CPUIDLE_FLAG_NONE;
|
||||
|
||||
if (sh_mobile_sleep_supported & SUSP_SH_STANDBY)
|
||||
cpuidle_driver.states[2].disabled = false;
|
||||
cpuidle_driver.states[2].flags = CPUIDLE_FLAG_NONE;
|
||||
|
||||
return cpuidle_register(&cpuidle_driver, NULL);
|
||||
}
|
||||
|
|
|
@ -533,26 +533,10 @@ static void acpi_ec_enable_event(struct acpi_ec *ec)
|
|||
}
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
static bool acpi_ec_query_flushed(struct acpi_ec *ec)
|
||||
static void __acpi_ec_flush_work(void)
|
||||
{
|
||||
bool flushed;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&ec->lock, flags);
|
||||
flushed = !ec->nr_pending_queries;
|
||||
spin_unlock_irqrestore(&ec->lock, flags);
|
||||
return flushed;
|
||||
}
|
||||
|
||||
static void __acpi_ec_flush_event(struct acpi_ec *ec)
|
||||
{
|
||||
/*
|
||||
* When ec_freeze_events is true, we need to flush events in
|
||||
* the proper position before entering the noirq stage.
|
||||
*/
|
||||
wait_event(ec->wait, acpi_ec_query_flushed(ec));
|
||||
if (ec_query_wq)
|
||||
flush_workqueue(ec_query_wq);
|
||||
flush_scheduled_work(); /* flush ec->work */
|
||||
flush_workqueue(ec_query_wq); /* flush queries */
|
||||
}
|
||||
|
||||
static void acpi_ec_disable_event(struct acpi_ec *ec)
|
||||
|
@ -562,15 +546,21 @@ static void acpi_ec_disable_event(struct acpi_ec *ec)
|
|||
spin_lock_irqsave(&ec->lock, flags);
|
||||
__acpi_ec_disable_event(ec);
|
||||
spin_unlock_irqrestore(&ec->lock, flags);
|
||||
__acpi_ec_flush_event(ec);
|
||||
|
||||
/*
|
||||
* When ec_freeze_events is true, we need to flush events in
|
||||
* the proper position before entering the noirq stage.
|
||||
*/
|
||||
__acpi_ec_flush_work();
|
||||
}
|
||||
|
||||
void acpi_ec_flush_work(void)
|
||||
{
|
||||
if (first_ec)
|
||||
__acpi_ec_flush_event(first_ec);
|
||||
/* Without ec_query_wq there is nothing to flush. */
|
||||
if (!ec_query_wq)
|
||||
return;
|
||||
|
||||
flush_scheduled_work();
|
||||
__acpi_ec_flush_work();
|
||||
}
|
||||
#endif /* CONFIG_PM_SLEEP */
|
||||
|
||||
|
|
|
@ -977,6 +977,16 @@ static int acpi_s2idle_prepare_late(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void acpi_s2idle_sync(void)
|
||||
{
|
||||
/*
|
||||
* The EC driver uses the system workqueue and an additional special
|
||||
* one, so those need to be flushed too.
|
||||
*/
|
||||
acpi_ec_flush_work();
|
||||
acpi_os_wait_events_complete(); /* synchronize Notify handling */
|
||||
}
|
||||
|
||||
static void acpi_s2idle_wake(void)
|
||||
{
|
||||
/*
|
||||
|
@ -1001,13 +1011,8 @@ static void acpi_s2idle_wake(void)
|
|||
* should be missed by canceling the wakeup here.
|
||||
*/
|
||||
pm_system_cancel_wakeup();
|
||||
/*
|
||||
* The EC driver uses the system workqueue and an additional
|
||||
* special one, so those need to be flushed too.
|
||||
*/
|
||||
acpi_os_wait_events_complete(); /* synchronize EC GPE processing */
|
||||
acpi_ec_flush_work();
|
||||
acpi_os_wait_events_complete(); /* synchronize Notify handling */
|
||||
|
||||
acpi_s2idle_sync();
|
||||
|
||||
rearm_wake_irq(acpi_sci_irq);
|
||||
}
|
||||
|
@ -1024,6 +1029,13 @@ static void acpi_s2idle_restore_early(void)
|
|||
|
||||
static void acpi_s2idle_restore(void)
|
||||
{
|
||||
/*
|
||||
* Drain pending events before restoring the working-state configuration
|
||||
* of GPEs.
|
||||
*/
|
||||
acpi_os_wait_events_complete(); /* synchronize GPE processing */
|
||||
acpi_s2idle_sync();
|
||||
|
||||
s2idle_wakeup = false;
|
||||
|
||||
acpi_enable_all_runtime_gpes();
|
||||
|
|
|
@ -148,6 +148,10 @@ config DEBUG_TEST_DRIVER_REMOVE
|
|||
unusable. You should say N here unless you are explicitly looking to
|
||||
test this functionality.
|
||||
|
||||
config PM_QOS_KUNIT_TEST
|
||||
bool "KUnit Test for PM QoS features"
|
||||
depends on KUNIT
|
||||
|
||||
config HMEM_REPORTING
|
||||
bool
|
||||
default n
|
||||
|
|
|
@ -4,5 +4,6 @@ obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o wakeup_stats.o
|
|||
obj-$(CONFIG_PM_TRACE_RTC) += trace.o
|
||||
obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o domain_governor.o
|
||||
obj-$(CONFIG_HAVE_CLK) += clock_ops.o
|
||||
obj-$(CONFIG_PM_QOS_KUNIT_TEST) += qos-test.o
|
||||
|
||||
ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
|
||||
|
|
|
@ -0,0 +1,117 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright 2019 NXP
|
||||
*/
|
||||
#include <kunit/test.h>
|
||||
#include <linux/pm_qos.h>
|
||||
|
||||
/* Basic test for aggregating two "min" requests */
|
||||
static void freq_qos_test_min(struct kunit *test)
|
||||
{
|
||||
struct freq_constraints qos;
|
||||
struct freq_qos_request req1, req2;
|
||||
int ret;
|
||||
|
||||
freq_constraints_init(&qos);
|
||||
memset(&req1, 0, sizeof(req1));
|
||||
memset(&req2, 0, sizeof(req2));
|
||||
|
||||
ret = freq_qos_add_request(&qos, &req1, FREQ_QOS_MIN, 1000);
|
||||
KUNIT_EXPECT_EQ(test, ret, 1);
|
||||
ret = freq_qos_add_request(&qos, &req2, FREQ_QOS_MIN, 2000);
|
||||
KUNIT_EXPECT_EQ(test, ret, 1);
|
||||
|
||||
KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MIN), 2000);
|
||||
|
||||
ret = freq_qos_remove_request(&req2);
|
||||
KUNIT_EXPECT_EQ(test, ret, 1);
|
||||
KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MIN), 1000);
|
||||
|
||||
ret = freq_qos_remove_request(&req1);
|
||||
KUNIT_EXPECT_EQ(test, ret, 1);
|
||||
KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MIN),
|
||||
FREQ_QOS_MIN_DEFAULT_VALUE);
|
||||
}
|
||||
|
||||
/* Test that requests for MAX_DEFAULT_VALUE have no effect */
|
||||
static void freq_qos_test_maxdef(struct kunit *test)
|
||||
{
|
||||
struct freq_constraints qos;
|
||||
struct freq_qos_request req1, req2;
|
||||
int ret;
|
||||
|
||||
freq_constraints_init(&qos);
|
||||
memset(&req1, 0, sizeof(req1));
|
||||
memset(&req2, 0, sizeof(req2));
|
||||
KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MAX),
|
||||
FREQ_QOS_MAX_DEFAULT_VALUE);
|
||||
|
||||
ret = freq_qos_add_request(&qos, &req1, FREQ_QOS_MAX,
|
||||
FREQ_QOS_MAX_DEFAULT_VALUE);
|
||||
KUNIT_EXPECT_EQ(test, ret, 0);
|
||||
ret = freq_qos_add_request(&qos, &req2, FREQ_QOS_MAX,
|
||||
FREQ_QOS_MAX_DEFAULT_VALUE);
|
||||
KUNIT_EXPECT_EQ(test, ret, 0);
|
||||
|
||||
/* Add max 1000 */
|
||||
ret = freq_qos_update_request(&req1, 1000);
|
||||
KUNIT_EXPECT_EQ(test, ret, 1);
|
||||
KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MAX), 1000);
|
||||
|
||||
/* Add max 2000, no impact */
|
||||
ret = freq_qos_update_request(&req2, 2000);
|
||||
KUNIT_EXPECT_EQ(test, ret, 0);
|
||||
KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MAX), 1000);
|
||||
|
||||
/* Remove max 1000, new max 2000 */
|
||||
ret = freq_qos_remove_request(&req1);
|
||||
KUNIT_EXPECT_EQ(test, ret, 1);
|
||||
KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MAX), 2000);
|
||||
}
|
||||
|
||||
/*
|
||||
* Test that a freq_qos_request can be added again after removal
|
||||
*
|
||||
* This issue was solved by commit 05ff1ba412fd ("PM: QoS: Invalidate frequency
|
||||
* QoS requests after removal")
|
||||
*/
|
||||
static void freq_qos_test_readd(struct kunit *test)
|
||||
{
|
||||
struct freq_constraints qos;
|
||||
struct freq_qos_request req;
|
||||
int ret;
|
||||
|
||||
freq_constraints_init(&qos);
|
||||
memset(&req, 0, sizeof(req));
|
||||
KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MIN),
|
||||
FREQ_QOS_MIN_DEFAULT_VALUE);
|
||||
|
||||
/* Add */
|
||||
ret = freq_qos_add_request(&qos, &req, FREQ_QOS_MIN, 1000);
|
||||
KUNIT_EXPECT_EQ(test, ret, 1);
|
||||
KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MIN), 1000);
|
||||
|
||||
/* Remove */
|
||||
ret = freq_qos_remove_request(&req);
|
||||
KUNIT_EXPECT_EQ(test, ret, 1);
|
||||
KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MIN),
|
||||
FREQ_QOS_MIN_DEFAULT_VALUE);
|
||||
|
||||
/* Add again */
|
||||
ret = freq_qos_add_request(&qos, &req, FREQ_QOS_MIN, 2000);
|
||||
KUNIT_EXPECT_EQ(test, ret, 1);
|
||||
KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MIN), 2000);
|
||||
}
|
||||
|
||||
static struct kunit_case pm_qos_test_cases[] = {
|
||||
KUNIT_CASE(freq_qos_test_min),
|
||||
KUNIT_CASE(freq_qos_test_maxdef),
|
||||
KUNIT_CASE(freq_qos_test_readd),
|
||||
{},
|
||||
};
|
||||
|
||||
static struct kunit_suite pm_qos_test_module = {
|
||||
.name = "qos-kunit-test",
|
||||
.test_cases = pm_qos_test_cases,
|
||||
};
|
||||
kunit_test_suite(pm_qos_test_module);
|
|
@ -115,10 +115,20 @@ s32 dev_pm_qos_read_value(struct device *dev, enum dev_pm_qos_req_type type)
|
|||
|
||||
spin_lock_irqsave(&dev->power.lock, flags);
|
||||
|
||||
if (type == DEV_PM_QOS_RESUME_LATENCY) {
|
||||
switch (type) {
|
||||
case DEV_PM_QOS_RESUME_LATENCY:
|
||||
ret = IS_ERR_OR_NULL(qos) ? PM_QOS_RESUME_LATENCY_NO_CONSTRAINT
|
||||
: pm_qos_read_value(&qos->resume_latency);
|
||||
} else {
|
||||
break;
|
||||
case DEV_PM_QOS_MIN_FREQUENCY:
|
||||
ret = IS_ERR_OR_NULL(qos) ? PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE
|
||||
: freq_qos_read_value(&qos->freq, FREQ_QOS_MIN);
|
||||
break;
|
||||
case DEV_PM_QOS_MAX_FREQUENCY:
|
||||
ret = IS_ERR_OR_NULL(qos) ? PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE
|
||||
: freq_qos_read_value(&qos->freq, FREQ_QOS_MAX);
|
||||
break;
|
||||
default:
|
||||
WARN_ON(1);
|
||||
ret = 0;
|
||||
}
|
||||
|
@ -159,6 +169,10 @@ static int apply_constraint(struct dev_pm_qos_request *req,
|
|||
req->dev->power.set_latency_tolerance(req->dev, value);
|
||||
}
|
||||
break;
|
||||
case DEV_PM_QOS_MIN_FREQUENCY:
|
||||
case DEV_PM_QOS_MAX_FREQUENCY:
|
||||
ret = freq_qos_apply(&req->data.freq, action, value);
|
||||
break;
|
||||
case DEV_PM_QOS_FLAGS:
|
||||
ret = pm_qos_update_flags(&qos->flags, &req->data.flr,
|
||||
action, value);
|
||||
|
@ -209,6 +223,8 @@ static int dev_pm_qos_constraints_allocate(struct device *dev)
|
|||
c->no_constraint_value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
|
||||
c->type = PM_QOS_MIN;
|
||||
|
||||
freq_constraints_init(&qos->freq);
|
||||
|
||||
INIT_LIST_HEAD(&qos->flags.list);
|
||||
|
||||
spin_lock_irq(&dev->power.lock);
|
||||
|
@ -269,6 +285,20 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
|
|||
memset(req, 0, sizeof(*req));
|
||||
}
|
||||
|
||||
c = &qos->freq.min_freq;
|
||||
plist_for_each_entry_safe(req, tmp, &c->list, data.freq.pnode) {
|
||||
apply_constraint(req, PM_QOS_REMOVE_REQ,
|
||||
PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE);
|
||||
memset(req, 0, sizeof(*req));
|
||||
}
|
||||
|
||||
c = &qos->freq.max_freq;
|
||||
plist_for_each_entry_safe(req, tmp, &c->list, data.freq.pnode) {
|
||||
apply_constraint(req, PM_QOS_REMOVE_REQ,
|
||||
PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE);
|
||||
memset(req, 0, sizeof(*req));
|
||||
}
|
||||
|
||||
f = &qos->flags;
|
||||
list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) {
|
||||
apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
|
||||
|
@ -314,11 +344,22 @@ static int __dev_pm_qos_add_request(struct device *dev,
|
|||
ret = dev_pm_qos_constraints_allocate(dev);
|
||||
|
||||
trace_dev_pm_qos_add_request(dev_name(dev), type, value);
|
||||
if (!ret) {
|
||||
req->dev = dev;
|
||||
req->type = type;
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
req->dev = dev;
|
||||
req->type = type;
|
||||
if (req->type == DEV_PM_QOS_MIN_FREQUENCY)
|
||||
ret = freq_qos_add_request(&dev->power.qos->freq,
|
||||
&req->data.freq,
|
||||
FREQ_QOS_MIN, value);
|
||||
else if (req->type == DEV_PM_QOS_MAX_FREQUENCY)
|
||||
ret = freq_qos_add_request(&dev->power.qos->freq,
|
||||
&req->data.freq,
|
||||
FREQ_QOS_MAX, value);
|
||||
else
|
||||
ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -382,6 +423,10 @@ static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
|
|||
case DEV_PM_QOS_LATENCY_TOLERANCE:
|
||||
curr_value = req->data.pnode.prio;
|
||||
break;
|
||||
case DEV_PM_QOS_MIN_FREQUENCY:
|
||||
case DEV_PM_QOS_MAX_FREQUENCY:
|
||||
curr_value = req->data.freq.pnode.prio;
|
||||
break;
|
||||
case DEV_PM_QOS_FLAGS:
|
||||
curr_value = req->data.flr.flags;
|
||||
break;
|
||||
|
@ -507,6 +552,14 @@ int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier,
|
|||
ret = blocking_notifier_chain_register(dev->power.qos->resume_latency.notifiers,
|
||||
notifier);
|
||||
break;
|
||||
case DEV_PM_QOS_MIN_FREQUENCY:
|
||||
ret = freq_qos_add_notifier(&dev->power.qos->freq,
|
||||
FREQ_QOS_MIN, notifier);
|
||||
break;
|
||||
case DEV_PM_QOS_MAX_FREQUENCY:
|
||||
ret = freq_qos_add_notifier(&dev->power.qos->freq,
|
||||
FREQ_QOS_MAX, notifier);
|
||||
break;
|
||||
default:
|
||||
WARN_ON(1);
|
||||
ret = -EINVAL;
|
||||
|
@ -546,6 +599,14 @@ int dev_pm_qos_remove_notifier(struct device *dev,
|
|||
ret = blocking_notifier_chain_unregister(dev->power.qos->resume_latency.notifiers,
|
||||
notifier);
|
||||
break;
|
||||
case DEV_PM_QOS_MIN_FREQUENCY:
|
||||
ret = freq_qos_remove_notifier(&dev->power.qos->freq,
|
||||
FREQ_QOS_MIN, notifier);
|
||||
break;
|
||||
case DEV_PM_QOS_MAX_FREQUENCY:
|
||||
ret = freq_qos_remove_notifier(&dev->power.qos->freq,
|
||||
FREQ_QOS_MAX, notifier);
|
||||
break;
|
||||
default:
|
||||
WARN_ON(1);
|
||||
ret = -EINVAL;
|
||||
|
|
|
@ -48,9 +48,9 @@ config PPC_PASEMI_CPUFREQ
|
|||
PWRficient processors.
|
||||
|
||||
config POWERNV_CPUFREQ
|
||||
tristate "CPU frequency scaling for IBM POWERNV platform"
|
||||
depends on PPC_POWERNV
|
||||
default y
|
||||
help
|
||||
tristate "CPU frequency scaling for IBM POWERNV platform"
|
||||
depends on PPC_POWERNV
|
||||
default y
|
||||
help
|
||||
This adds support for CPU frequency switching on IBM POWERNV
|
||||
platform
|
||||
|
|
|
@ -4,17 +4,17 @@
|
|||
#
|
||||
|
||||
config X86_INTEL_PSTATE
|
||||
bool "Intel P state control"
|
||||
depends on X86
|
||||
select ACPI_PROCESSOR if ACPI
|
||||
select ACPI_CPPC_LIB if X86_64 && ACPI && SCHED_MC_PRIO
|
||||
help
|
||||
This driver provides a P state for Intel core processors.
|
||||
bool "Intel P state control"
|
||||
depends on X86
|
||||
select ACPI_PROCESSOR if ACPI
|
||||
select ACPI_CPPC_LIB if X86_64 && ACPI && SCHED_MC_PRIO
|
||||
help
|
||||
This driver provides a P state for Intel core processors.
|
||||
The driver implements an internal governor and will become
|
||||
the scaling driver and governor for Sandy bridge processors.
|
||||
the scaling driver and governor for Sandy bridge processors.
|
||||
|
||||
When this driver is enabled it will become the preferred
|
||||
scaling driver for Sandy bridge processors.
|
||||
scaling driver for Sandy bridge processors.
|
||||
|
||||
If in doubt, say N.
|
||||
|
||||
|
|
|
@ -16,7 +16,7 @@ config CPU_IDLE
|
|||
if CPU_IDLE
|
||||
|
||||
config CPU_IDLE_MULTIPLE_DRIVERS
|
||||
bool
|
||||
bool
|
||||
|
||||
config CPU_IDLE_GOV_LADDER
|
||||
bool "Ladder governor (for periodic timer tick)"
|
||||
|
@ -63,13 +63,13 @@ source "drivers/cpuidle/Kconfig.powerpc"
|
|||
endmenu
|
||||
|
||||
config HALTPOLL_CPUIDLE
|
||||
tristate "Halt poll cpuidle driver"
|
||||
depends on X86 && KVM_GUEST
|
||||
default y
|
||||
help
|
||||
This option enables halt poll cpuidle driver, which allows to poll
|
||||
before halting in the guest (more efficient than polling in the
|
||||
host via halt_poll_ns for some scenarios).
|
||||
tristate "Halt poll cpuidle driver"
|
||||
depends on X86 && KVM_GUEST
|
||||
default y
|
||||
help
|
||||
This option enables halt poll cpuidle driver, which allows to poll
|
||||
before halting in the guest (more efficient than polling in the
|
||||
host via halt_poll_ns for some scenarios).
|
||||
|
||||
endif
|
||||
|
||||
|
|
|
@ -3,15 +3,15 @@
|
|||
# ARM CPU Idle drivers
|
||||
#
|
||||
config ARM_CPUIDLE
|
||||
bool "Generic ARM/ARM64 CPU idle Driver"
|
||||
select DT_IDLE_STATES
|
||||
bool "Generic ARM/ARM64 CPU idle Driver"
|
||||
select DT_IDLE_STATES
|
||||
select CPU_IDLE_MULTIPLE_DRIVERS
|
||||
help
|
||||
Select this to enable generic cpuidle driver for ARM.
|
||||
It provides a generic idle driver whose idle states are configured
|
||||
at run-time through DT nodes. The CPUidle suspend backend is
|
||||
initialized by calling the CPU operations init idle hook
|
||||
provided by architecture code.
|
||||
help
|
||||
Select this to enable generic cpuidle driver for ARM.
|
||||
It provides a generic idle driver whose idle states are configured
|
||||
at run-time through DT nodes. The CPUidle suspend backend is
|
||||
initialized by calling the CPU operations init idle hook
|
||||
provided by architecture code.
|
||||
|
||||
config ARM_PSCI_CPUIDLE
|
||||
bool "PSCI CPU idle Driver"
|
||||
|
@ -65,21 +65,21 @@ config ARM_U8500_CPUIDLE
|
|||
bool "Cpu Idle Driver for the ST-E u8500 processors"
|
||||
depends on ARCH_U8500 && !ARM64
|
||||
help
|
||||
Select this to enable cpuidle for ST-E u8500 processors
|
||||
Select this to enable cpuidle for ST-E u8500 processors.
|
||||
|
||||
config ARM_AT91_CPUIDLE
|
||||
bool "Cpu Idle Driver for the AT91 processors"
|
||||
default y
|
||||
depends on ARCH_AT91 && !ARM64
|
||||
help
|
||||
Select this to enable cpuidle for AT91 processors
|
||||
Select this to enable cpuidle for AT91 processors.
|
||||
|
||||
config ARM_EXYNOS_CPUIDLE
|
||||
bool "Cpu Idle Driver for the Exynos processors"
|
||||
depends on ARCH_EXYNOS && !ARM64
|
||||
select ARCH_NEEDS_CPU_IDLE_COUPLED if SMP
|
||||
help
|
||||
Select this to enable cpuidle for Exynos processors
|
||||
Select this to enable cpuidle for Exynos processors.
|
||||
|
||||
config ARM_MVEBU_V7_CPUIDLE
|
||||
bool "CPU Idle Driver for mvebu v7 family processors"
|
||||
|
|
|
@ -572,7 +572,7 @@ static int __cpuidle_register_device(struct cpuidle_device *dev)
|
|||
return -EINVAL;
|
||||
|
||||
for (i = 0; i < drv->state_count; i++)
|
||||
if (drv->states[i].disabled)
|
||||
if (drv->states[i].flags & CPUIDLE_FLAG_UNUSABLE)
|
||||
dev->states_usage[i].disable |= CPUIDLE_STATE_DISABLED_BY_DRIVER;
|
||||
|
||||
per_cpu(cpuidle_devices, dev->cpu) = dev;
|
||||
|
|
|
@ -53,7 +53,6 @@ void cpuidle_poll_state_init(struct cpuidle_driver *drv)
|
|||
state->target_residency_ns = 0;
|
||||
state->power_usage = -1;
|
||||
state->enter = poll_idle;
|
||||
state->disabled = false;
|
||||
state->flags = CPUIDLE_FLAG_POLLING;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cpuidle_poll_state_init);
|
||||
|
|
|
@ -921,7 +921,9 @@ int devfreq_suspend_device(struct devfreq *devfreq)
|
|||
}
|
||||
|
||||
if (devfreq->suspend_freq) {
|
||||
mutex_lock(&devfreq->lock);
|
||||
ret = devfreq_set_target(devfreq, devfreq->suspend_freq, 0);
|
||||
mutex_unlock(&devfreq->lock);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
@ -949,7 +951,9 @@ int devfreq_resume_device(struct devfreq *devfreq)
|
|||
return 0;
|
||||
|
||||
if (devfreq->resume_freq) {
|
||||
mutex_lock(&devfreq->lock);
|
||||
ret = devfreq_set_target(devfreq, devfreq->resume_freq, 0);
|
||||
mutex_unlock(&devfreq->lock);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -1291,8 +1291,8 @@ static void sklh_idle_state_table_update(void)
|
|||
return;
|
||||
}
|
||||
|
||||
skl_cstates[5].disabled = 1; /* C8-SKL */
|
||||
skl_cstates[6].disabled = 1; /* C9-SKL */
|
||||
skl_cstates[5].flags |= CPUIDLE_FLAG_UNUSABLE; /* C8-SKL */
|
||||
skl_cstates[6].flags |= CPUIDLE_FLAG_UNUSABLE; /* C9-SKL */
|
||||
}
|
||||
/*
|
||||
* intel_idle_state_table_update()
|
||||
|
@ -1355,7 +1355,7 @@ static void __init intel_idle_cpuidle_driver_init(void)
|
|||
continue;
|
||||
|
||||
/* if state marked as disabled, skip it */
|
||||
if (cpuidle_state_table[cstate].disabled != 0) {
|
||||
if (cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_UNUSABLE) {
|
||||
pr_debug("state %s is disabled\n",
|
||||
cpuidle_state_table[cstate].name);
|
||||
continue;
|
||||
|
|
|
@ -13,9 +13,9 @@ menuconfig POWER_AVS
|
|||
Say Y here to enable Adaptive Voltage Scaling class support.
|
||||
|
||||
config ROCKCHIP_IODOMAIN
|
||||
tristate "Rockchip IO domain support"
|
||||
depends on POWER_AVS && ARCH_ROCKCHIP && OF
|
||||
help
|
||||
Say y here to enable support io domains on Rockchip SoCs. It is
|
||||
necessary for the io domain setting of the SoC to match the
|
||||
voltage supplied by the regulators.
|
||||
tristate "Rockchip IO domain support"
|
||||
depends on POWER_AVS && ARCH_ROCKCHIP && OF
|
||||
help
|
||||
Say y here to enable support io domains on Rockchip SoCs. It is
|
||||
necessary for the io domain setting of the SoC to match the
|
||||
voltage supplied by the regulators.
|
||||
|
|
|
@ -54,7 +54,6 @@ struct cpuidle_state {
|
|||
unsigned int exit_latency; /* in US */
|
||||
int power_usage; /* in mW */
|
||||
unsigned int target_residency; /* in US */
|
||||
bool disabled; /* disabled on all CPUs */
|
||||
|
||||
int (*enter) (struct cpuidle_device *dev,
|
||||
struct cpuidle_driver *drv,
|
||||
|
@ -77,6 +76,7 @@ struct cpuidle_state {
|
|||
#define CPUIDLE_FLAG_POLLING BIT(0) /* polling state */
|
||||
#define CPUIDLE_FLAG_COUPLED BIT(1) /* state applies to multiple cpus */
|
||||
#define CPUIDLE_FLAG_TIMER_STOP BIT(2) /* timer is stopped on this state */
|
||||
#define CPUIDLE_FLAG_UNUSABLE BIT(3) /* avoid using this state */
|
||||
|
||||
struct cpuidle_device_kobj;
|
||||
struct cpuidle_state_kobj;
|
||||
|
|
|
@ -34,6 +34,8 @@ enum pm_qos_flags_status {
|
|||
#define PM_QOS_RESUME_LATENCY_NO_CONSTRAINT PM_QOS_LATENCY_ANY
|
||||
#define PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS PM_QOS_LATENCY_ANY_NS
|
||||
#define PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE 0
|
||||
#define PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE 0
|
||||
#define PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE FREQ_QOS_MAX_DEFAULT_VALUE
|
||||
#define PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT (-1)
|
||||
|
||||
#define PM_QOS_FLAG_NO_POWER_OFF (1 << 0)
|
||||
|
@ -49,21 +51,6 @@ struct pm_qos_flags_request {
|
|||
s32 flags; /* Do not change to 64 bit */
|
||||
};
|
||||
|
||||
enum dev_pm_qos_req_type {
|
||||
DEV_PM_QOS_RESUME_LATENCY = 1,
|
||||
DEV_PM_QOS_LATENCY_TOLERANCE,
|
||||
DEV_PM_QOS_FLAGS,
|
||||
};
|
||||
|
||||
struct dev_pm_qos_request {
|
||||
enum dev_pm_qos_req_type type;
|
||||
union {
|
||||
struct plist_node pnode;
|
||||
struct pm_qos_flags_request flr;
|
||||
} data;
|
||||
struct device *dev;
|
||||
};
|
||||
|
||||
enum pm_qos_type {
|
||||
PM_QOS_UNITIALIZED,
|
||||
PM_QOS_MAX, /* return the largest value */
|
||||
|
@ -90,9 +77,51 @@ struct pm_qos_flags {
|
|||
s32 effective_flags; /* Do not change to 64 bit */
|
||||
};
|
||||
|
||||
|
||||
#define FREQ_QOS_MIN_DEFAULT_VALUE 0
|
||||
#define FREQ_QOS_MAX_DEFAULT_VALUE S32_MAX
|
||||
|
||||
enum freq_qos_req_type {
|
||||
FREQ_QOS_MIN = 1,
|
||||
FREQ_QOS_MAX,
|
||||
};
|
||||
|
||||
struct freq_constraints {
|
||||
struct pm_qos_constraints min_freq;
|
||||
struct blocking_notifier_head min_freq_notifiers;
|
||||
struct pm_qos_constraints max_freq;
|
||||
struct blocking_notifier_head max_freq_notifiers;
|
||||
};
|
||||
|
||||
struct freq_qos_request {
|
||||
enum freq_qos_req_type type;
|
||||
struct plist_node pnode;
|
||||
struct freq_constraints *qos;
|
||||
};
|
||||
|
||||
|
||||
enum dev_pm_qos_req_type {
|
||||
DEV_PM_QOS_RESUME_LATENCY = 1,
|
||||
DEV_PM_QOS_LATENCY_TOLERANCE,
|
||||
DEV_PM_QOS_MIN_FREQUENCY,
|
||||
DEV_PM_QOS_MAX_FREQUENCY,
|
||||
DEV_PM_QOS_FLAGS,
|
||||
};
|
||||
|
||||
struct dev_pm_qos_request {
|
||||
enum dev_pm_qos_req_type type;
|
||||
union {
|
||||
struct plist_node pnode;
|
||||
struct pm_qos_flags_request flr;
|
||||
struct freq_qos_request freq;
|
||||
} data;
|
||||
struct device *dev;
|
||||
};
|
||||
|
||||
struct dev_pm_qos {
|
||||
struct pm_qos_constraints resume_latency;
|
||||
struct pm_qos_constraints latency_tolerance;
|
||||
struct freq_constraints freq;
|
||||
struct pm_qos_flags flags;
|
||||
struct dev_pm_qos_request *resume_latency_req;
|
||||
struct dev_pm_qos_request *latency_tolerance_req;
|
||||
|
@ -191,6 +220,10 @@ static inline s32 dev_pm_qos_read_value(struct device *dev,
|
|||
switch (type) {
|
||||
case DEV_PM_QOS_RESUME_LATENCY:
|
||||
return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
|
||||
case DEV_PM_QOS_MIN_FREQUENCY:
|
||||
return PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE;
|
||||
case DEV_PM_QOS_MAX_FREQUENCY:
|
||||
return PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE;
|
||||
default:
|
||||
WARN_ON(1);
|
||||
return 0;
|
||||
|
@ -255,27 +288,6 @@ static inline s32 dev_pm_qos_raw_resume_latency(struct device *dev)
|
|||
}
|
||||
#endif
|
||||
|
||||
#define FREQ_QOS_MIN_DEFAULT_VALUE 0
|
||||
#define FREQ_QOS_MAX_DEFAULT_VALUE (-1)
|
||||
|
||||
enum freq_qos_req_type {
|
||||
FREQ_QOS_MIN = 1,
|
||||
FREQ_QOS_MAX,
|
||||
};
|
||||
|
||||
struct freq_constraints {
|
||||
struct pm_qos_constraints min_freq;
|
||||
struct blocking_notifier_head min_freq_notifiers;
|
||||
struct pm_qos_constraints max_freq;
|
||||
struct blocking_notifier_head max_freq_notifiers;
|
||||
};
|
||||
|
||||
struct freq_qos_request {
|
||||
enum freq_qos_req_type type;
|
||||
struct plist_node pnode;
|
||||
struct freq_constraints *qos;
|
||||
};
|
||||
|
||||
static inline int freq_qos_request_active(struct freq_qos_request *req)
|
||||
{
|
||||
return !IS_ERR_OR_NULL(req->qos);
|
||||
|
@ -291,6 +303,8 @@ int freq_qos_add_request(struct freq_constraints *qos,
|
|||
enum freq_qos_req_type type, s32 value);
|
||||
int freq_qos_update_request(struct freq_qos_request *req, s32 new_value);
|
||||
int freq_qos_remove_request(struct freq_qos_request *req);
|
||||
int freq_qos_apply(struct freq_qos_request *req,
|
||||
enum pm_qos_req_action action, s32 value);
|
||||
|
||||
int freq_qos_add_notifier(struct freq_constraints *qos,
|
||||
enum freq_qos_req_type type,
|
||||
|
|
|
@ -714,8 +714,10 @@ s32 freq_qos_read_value(struct freq_constraints *qos,
|
|||
* @req: Constraint request to apply.
|
||||
* @action: Action to perform (add/update/remove).
|
||||
* @value: Value to assign to the QoS request.
|
||||
*
|
||||
* This is only meant to be called from inside pm_qos, not drivers.
|
||||
*/
|
||||
static int freq_qos_apply(struct freq_qos_request *req,
|
||||
int freq_qos_apply(struct freq_qos_request *req,
|
||||
enum pm_qos_req_action action, s32 value)
|
||||
{
|
||||
int ret;
|
||||
|
|
Загрузка…
Ссылка в новой задаче