Merge branches 'pm-cpuidle' and 'pm-sleep'
* pm-cpuidle: PM / Domains: Add genpd governor for CPUs cpuidle: Export the next timer expiration for CPUs PM / Domains: Add support for CPU devices to genpd PM / Domains: Add generic data pointer to struct genpd_power_state cpuidle: exynos: Unify target residency for AFTR and coupled AFTR states * pm-sleep: PM / core: Propagate dev->power.wakeup_path when no callbacks PM / core: Introduce dpm_async_fn() helper PM / core: fix kerneldoc comment for device_pm_wait_for_dev() PM / core: fix kerneldoc comment for dpm_watchdog_handler() PM / sleep: Measure the time of filesystems syncing PM / sleep: Refactor filesystems sync to reduce duplication PM / wakeup: Use pm_pr_dbg() instead of pr_debug()
This commit is contained in:
Коммит
78baa1ea58
|
@ -22,6 +22,7 @@
|
||||||
#include <linux/sched.h>
|
#include <linux/sched.h>
|
||||||
#include <linux/suspend.h>
|
#include <linux/suspend.h>
|
||||||
#include <linux/export.h>
|
#include <linux/export.h>
|
||||||
|
#include <linux/cpu.h>
|
||||||
|
|
||||||
#include "power.h"
|
#include "power.h"
|
||||||
|
|
||||||
|
@ -128,6 +129,7 @@ static const struct genpd_lock_ops genpd_spin_ops = {
|
||||||
#define genpd_is_irq_safe(genpd) (genpd->flags & GENPD_FLAG_IRQ_SAFE)
|
#define genpd_is_irq_safe(genpd) (genpd->flags & GENPD_FLAG_IRQ_SAFE)
|
||||||
#define genpd_is_always_on(genpd) (genpd->flags & GENPD_FLAG_ALWAYS_ON)
|
#define genpd_is_always_on(genpd) (genpd->flags & GENPD_FLAG_ALWAYS_ON)
|
||||||
#define genpd_is_active_wakeup(genpd) (genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP)
|
#define genpd_is_active_wakeup(genpd) (genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP)
|
||||||
|
#define genpd_is_cpu_domain(genpd) (genpd->flags & GENPD_FLAG_CPU_DOMAIN)
|
||||||
|
|
||||||
static inline bool irq_safe_dev_in_no_sleep_domain(struct device *dev,
|
static inline bool irq_safe_dev_in_no_sleep_domain(struct device *dev,
|
||||||
const struct generic_pm_domain *genpd)
|
const struct generic_pm_domain *genpd)
|
||||||
|
@ -1454,6 +1456,56 @@ static void genpd_free_dev_data(struct device *dev,
|
||||||
dev_pm_put_subsys_data(dev);
|
dev_pm_put_subsys_data(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void __genpd_update_cpumask(struct generic_pm_domain *genpd,
|
||||||
|
int cpu, bool set, unsigned int depth)
|
||||||
|
{
|
||||||
|
struct gpd_link *link;
|
||||||
|
|
||||||
|
if (!genpd_is_cpu_domain(genpd))
|
||||||
|
return;
|
||||||
|
|
||||||
|
list_for_each_entry(link, &genpd->slave_links, slave_node) {
|
||||||
|
struct generic_pm_domain *master = link->master;
|
||||||
|
|
||||||
|
genpd_lock_nested(master, depth + 1);
|
||||||
|
__genpd_update_cpumask(master, cpu, set, depth + 1);
|
||||||
|
genpd_unlock(master);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (set)
|
||||||
|
cpumask_set_cpu(cpu, genpd->cpus);
|
||||||
|
else
|
||||||
|
cpumask_clear_cpu(cpu, genpd->cpus);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void genpd_update_cpumask(struct generic_pm_domain *genpd,
|
||||||
|
struct device *dev, bool set)
|
||||||
|
{
|
||||||
|
int cpu;
|
||||||
|
|
||||||
|
if (!genpd_is_cpu_domain(genpd))
|
||||||
|
return;
|
||||||
|
|
||||||
|
for_each_possible_cpu(cpu) {
|
||||||
|
if (get_cpu_device(cpu) == dev) {
|
||||||
|
__genpd_update_cpumask(genpd, cpu, set, 0);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void genpd_set_cpumask(struct generic_pm_domain *genpd,
|
||||||
|
struct device *dev)
|
||||||
|
{
|
||||||
|
genpd_update_cpumask(genpd, dev, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void genpd_clear_cpumask(struct generic_pm_domain *genpd,
|
||||||
|
struct device *dev)
|
||||||
|
{
|
||||||
|
genpd_update_cpumask(genpd, dev, false);
|
||||||
|
}
|
||||||
|
|
||||||
static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
|
static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
|
||||||
struct gpd_timing_data *td)
|
struct gpd_timing_data *td)
|
||||||
{
|
{
|
||||||
|
@ -1475,6 +1527,7 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
|
||||||
|
|
||||||
genpd_lock(genpd);
|
genpd_lock(genpd);
|
||||||
|
|
||||||
|
genpd_set_cpumask(genpd, dev);
|
||||||
dev_pm_domain_set(dev, &genpd->domain);
|
dev_pm_domain_set(dev, &genpd->domain);
|
||||||
|
|
||||||
genpd->device_count++;
|
genpd->device_count++;
|
||||||
|
@ -1532,6 +1585,7 @@ static int genpd_remove_device(struct generic_pm_domain *genpd,
|
||||||
genpd->device_count--;
|
genpd->device_count--;
|
||||||
genpd->max_off_time_changed = true;
|
genpd->max_off_time_changed = true;
|
||||||
|
|
||||||
|
genpd_clear_cpumask(genpd, dev);
|
||||||
dev_pm_domain_set(dev, NULL);
|
dev_pm_domain_set(dev, NULL);
|
||||||
|
|
||||||
list_del_init(&pdd->list_node);
|
list_del_init(&pdd->list_node);
|
||||||
|
@ -1686,6 +1740,12 @@ out:
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
|
EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
|
||||||
|
|
||||||
|
static void genpd_free_default_power_state(struct genpd_power_state *states,
|
||||||
|
unsigned int state_count)
|
||||||
|
{
|
||||||
|
kfree(states);
|
||||||
|
}
|
||||||
|
|
||||||
static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
|
static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
|
||||||
{
|
{
|
||||||
struct genpd_power_state *state;
|
struct genpd_power_state *state;
|
||||||
|
@ -1696,7 +1756,7 @@ static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
|
||||||
|
|
||||||
genpd->states = state;
|
genpd->states = state;
|
||||||
genpd->state_count = 1;
|
genpd->state_count = 1;
|
||||||
genpd->free = state;
|
genpd->free_states = genpd_free_default_power_state;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -1762,11 +1822,18 @@ int pm_genpd_init(struct generic_pm_domain *genpd,
|
||||||
if (genpd_is_always_on(genpd) && !genpd_status_on(genpd))
|
if (genpd_is_always_on(genpd) && !genpd_status_on(genpd))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (genpd_is_cpu_domain(genpd) &&
|
||||||
|
!zalloc_cpumask_var(&genpd->cpus, GFP_KERNEL))
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
/* Use only one "off" state if there were no states declared */
|
/* Use only one "off" state if there were no states declared */
|
||||||
if (genpd->state_count == 0) {
|
if (genpd->state_count == 0) {
|
||||||
ret = genpd_set_default_power_state(genpd);
|
ret = genpd_set_default_power_state(genpd);
|
||||||
if (ret)
|
if (ret) {
|
||||||
|
if (genpd_is_cpu_domain(genpd))
|
||||||
|
free_cpumask_var(genpd->cpus);
|
||||||
return ret;
|
return ret;
|
||||||
|
}
|
||||||
} else if (!gov && genpd->state_count > 1) {
|
} else if (!gov && genpd->state_count > 1) {
|
||||||
pr_warn("%s: no governor for states\n", genpd->name);
|
pr_warn("%s: no governor for states\n", genpd->name);
|
||||||
}
|
}
|
||||||
|
@ -1812,7 +1879,11 @@ static int genpd_remove(struct generic_pm_domain *genpd)
|
||||||
list_del(&genpd->gpd_list_node);
|
list_del(&genpd->gpd_list_node);
|
||||||
genpd_unlock(genpd);
|
genpd_unlock(genpd);
|
||||||
cancel_work_sync(&genpd->power_off_work);
|
cancel_work_sync(&genpd->power_off_work);
|
||||||
kfree(genpd->free);
|
if (genpd_is_cpu_domain(genpd))
|
||||||
|
free_cpumask_var(genpd->cpus);
|
||||||
|
if (genpd->free_states)
|
||||||
|
genpd->free_states(genpd->states, genpd->state_count);
|
||||||
|
|
||||||
pr_debug("%s: removed %s\n", __func__, genpd->name);
|
pr_debug("%s: removed %s\n", __func__, genpd->name);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -10,6 +10,9 @@
|
||||||
#include <linux/pm_domain.h>
|
#include <linux/pm_domain.h>
|
||||||
#include <linux/pm_qos.h>
|
#include <linux/pm_qos.h>
|
||||||
#include <linux/hrtimer.h>
|
#include <linux/hrtimer.h>
|
||||||
|
#include <linux/cpuidle.h>
|
||||||
|
#include <linux/cpumask.h>
|
||||||
|
#include <linux/ktime.h>
|
||||||
|
|
||||||
static int dev_update_qos_constraint(struct device *dev, void *data)
|
static int dev_update_qos_constraint(struct device *dev, void *data)
|
||||||
{
|
{
|
||||||
|
@ -210,8 +213,10 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
|
||||||
struct generic_pm_domain *genpd = pd_to_genpd(pd);
|
struct generic_pm_domain *genpd = pd_to_genpd(pd);
|
||||||
struct gpd_link *link;
|
struct gpd_link *link;
|
||||||
|
|
||||||
if (!genpd->max_off_time_changed)
|
if (!genpd->max_off_time_changed) {
|
||||||
|
genpd->state_idx = genpd->cached_power_down_state_idx;
|
||||||
return genpd->cached_power_down_ok;
|
return genpd->cached_power_down_ok;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We have to invalidate the cached results for the masters, so
|
* We have to invalidate the cached results for the masters, so
|
||||||
|
@ -236,6 +241,7 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
|
||||||
genpd->state_idx--;
|
genpd->state_idx--;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
genpd->cached_power_down_state_idx = genpd->state_idx;
|
||||||
return genpd->cached_power_down_ok;
|
return genpd->cached_power_down_ok;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -244,6 +250,65 @@ static bool always_on_power_down_ok(struct dev_pm_domain *domain)
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_CPU_IDLE
|
||||||
|
static bool cpu_power_down_ok(struct dev_pm_domain *pd)
|
||||||
|
{
|
||||||
|
struct generic_pm_domain *genpd = pd_to_genpd(pd);
|
||||||
|
struct cpuidle_device *dev;
|
||||||
|
ktime_t domain_wakeup, next_hrtimer;
|
||||||
|
s64 idle_duration_ns;
|
||||||
|
int cpu, i;
|
||||||
|
|
||||||
|
/* Validate dev PM QoS constraints. */
|
||||||
|
if (!default_power_down_ok(pd))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (!(genpd->flags & GENPD_FLAG_CPU_DOMAIN))
|
||||||
|
return true;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Find the next wakeup for any of the online CPUs within the PM domain
|
||||||
|
* and its subdomains. Note, we only need the genpd->cpus, as it already
|
||||||
|
* contains a mask of all CPUs from subdomains.
|
||||||
|
*/
|
||||||
|
domain_wakeup = ktime_set(KTIME_SEC_MAX, 0);
|
||||||
|
for_each_cpu_and(cpu, genpd->cpus, cpu_online_mask) {
|
||||||
|
dev = per_cpu(cpuidle_devices, cpu);
|
||||||
|
if (dev) {
|
||||||
|
next_hrtimer = READ_ONCE(dev->next_hrtimer);
|
||||||
|
if (ktime_before(next_hrtimer, domain_wakeup))
|
||||||
|
domain_wakeup = next_hrtimer;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* The minimum idle duration is from now - until the next wakeup. */
|
||||||
|
idle_duration_ns = ktime_to_ns(ktime_sub(domain_wakeup, ktime_get()));
|
||||||
|
if (idle_duration_ns <= 0)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Find the deepest idle state that has its residency value satisfied
|
||||||
|
* and by also taking into account the power off latency for the state.
|
||||||
|
* Start at the state picked by the dev PM QoS constraint validation.
|
||||||
|
*/
|
||||||
|
i = genpd->state_idx;
|
||||||
|
do {
|
||||||
|
if (idle_duration_ns >= (genpd->states[i].residency_ns +
|
||||||
|
genpd->states[i].power_off_latency_ns)) {
|
||||||
|
genpd->state_idx = i;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
} while (--i >= 0);
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct dev_power_governor pm_domain_cpu_gov = {
|
||||||
|
.suspend_ok = default_suspend_ok,
|
||||||
|
.power_down_ok = cpu_power_down_ok,
|
||||||
|
};
|
||||||
|
#endif
|
||||||
|
|
||||||
struct dev_power_governor simple_qos_governor = {
|
struct dev_power_governor simple_qos_governor = {
|
||||||
.suspend_ok = default_suspend_ok,
|
.suspend_ok = default_suspend_ok,
|
||||||
.power_down_ok = default_power_down_ok,
|
.power_down_ok = default_power_down_ok,
|
||||||
|
|
|
@ -478,7 +478,7 @@ struct dpm_watchdog {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* dpm_watchdog_handler - Driver suspend / resume watchdog handler.
|
* dpm_watchdog_handler - Driver suspend / resume watchdog handler.
|
||||||
* @data: Watchdog object address.
|
* @t: The timer that PM watchdog depends on.
|
||||||
*
|
*
|
||||||
* Called when a driver has timed out suspending or resuming.
|
* Called when a driver has timed out suspending or resuming.
|
||||||
* There's not much we can do here to recover so panic() to
|
* There's not much we can do here to recover so panic() to
|
||||||
|
@ -706,6 +706,19 @@ static bool is_async(struct device *dev)
|
||||||
&& !pm_trace_is_enabled();
|
&& !pm_trace_is_enabled();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool dpm_async_fn(struct device *dev, async_func_t func)
|
||||||
|
{
|
||||||
|
reinit_completion(&dev->power.completion);
|
||||||
|
|
||||||
|
if (is_async(dev)) {
|
||||||
|
get_device(dev);
|
||||||
|
async_schedule(func, dev);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
static void async_resume_noirq(void *data, async_cookie_t cookie)
|
static void async_resume_noirq(void *data, async_cookie_t cookie)
|
||||||
{
|
{
|
||||||
struct device *dev = (struct device *)data;
|
struct device *dev = (struct device *)data;
|
||||||
|
@ -732,13 +745,8 @@ void dpm_noirq_resume_devices(pm_message_t state)
|
||||||
* in case the starting of async threads is
|
* in case the starting of async threads is
|
||||||
* delayed by non-async resuming devices.
|
* delayed by non-async resuming devices.
|
||||||
*/
|
*/
|
||||||
list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
|
list_for_each_entry(dev, &dpm_noirq_list, power.entry)
|
||||||
reinit_completion(&dev->power.completion);
|
dpm_async_fn(dev, async_resume_noirq);
|
||||||
if (is_async(dev)) {
|
|
||||||
get_device(dev);
|
|
||||||
async_schedule_dev(async_resume_noirq, dev);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
while (!list_empty(&dpm_noirq_list)) {
|
while (!list_empty(&dpm_noirq_list)) {
|
||||||
dev = to_device(dpm_noirq_list.next);
|
dev = to_device(dpm_noirq_list.next);
|
||||||
|
@ -889,13 +897,8 @@ void dpm_resume_early(pm_message_t state)
|
||||||
* in case the starting of async threads is
|
* in case the starting of async threads is
|
||||||
* delayed by non-async resuming devices.
|
* delayed by non-async resuming devices.
|
||||||
*/
|
*/
|
||||||
list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
|
list_for_each_entry(dev, &dpm_late_early_list, power.entry)
|
||||||
reinit_completion(&dev->power.completion);
|
dpm_async_fn(dev, async_resume_early);
|
||||||
if (is_async(dev)) {
|
|
||||||
get_device(dev);
|
|
||||||
async_schedule_dev(async_resume_early, dev);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
while (!list_empty(&dpm_late_early_list)) {
|
while (!list_empty(&dpm_late_early_list)) {
|
||||||
dev = to_device(dpm_late_early_list.next);
|
dev = to_device(dpm_late_early_list.next);
|
||||||
|
@ -1053,13 +1056,8 @@ void dpm_resume(pm_message_t state)
|
||||||
pm_transition = state;
|
pm_transition = state;
|
||||||
async_error = 0;
|
async_error = 0;
|
||||||
|
|
||||||
list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
|
list_for_each_entry(dev, &dpm_suspended_list, power.entry)
|
||||||
reinit_completion(&dev->power.completion);
|
dpm_async_fn(dev, async_resume);
|
||||||
if (is_async(dev)) {
|
|
||||||
get_device(dev);
|
|
||||||
async_schedule_dev(async_resume, dev);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
while (!list_empty(&dpm_suspended_list)) {
|
while (!list_empty(&dpm_suspended_list)) {
|
||||||
dev = to_device(dpm_suspended_list.next);
|
dev = to_device(dpm_suspended_list.next);
|
||||||
|
@ -1373,13 +1371,9 @@ static void async_suspend_noirq(void *data, async_cookie_t cookie)
|
||||||
|
|
||||||
static int device_suspend_noirq(struct device *dev)
|
static int device_suspend_noirq(struct device *dev)
|
||||||
{
|
{
|
||||||
reinit_completion(&dev->power.completion);
|
if (dpm_async_fn(dev, async_suspend_noirq))
|
||||||
|
|
||||||
if (is_async(dev)) {
|
|
||||||
get_device(dev);
|
|
||||||
async_schedule_dev(async_suspend_noirq, dev);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
|
||||||
return __device_suspend_noirq(dev, pm_transition, false);
|
return __device_suspend_noirq(dev, pm_transition, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1576,13 +1570,8 @@ static void async_suspend_late(void *data, async_cookie_t cookie)
|
||||||
|
|
||||||
static int device_suspend_late(struct device *dev)
|
static int device_suspend_late(struct device *dev)
|
||||||
{
|
{
|
||||||
reinit_completion(&dev->power.completion);
|
if (dpm_async_fn(dev, async_suspend_late))
|
||||||
|
|
||||||
if (is_async(dev)) {
|
|
||||||
get_device(dev);
|
|
||||||
async_schedule_dev(async_suspend_late, dev);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
|
||||||
|
|
||||||
return __device_suspend_late(dev, pm_transition, false);
|
return __device_suspend_late(dev, pm_transition, false);
|
||||||
}
|
}
|
||||||
|
@ -1747,6 +1736,10 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
|
||||||
if (dev->power.syscore)
|
if (dev->power.syscore)
|
||||||
goto Complete;
|
goto Complete;
|
||||||
|
|
||||||
|
/* Avoid direct_complete to let wakeup_path propagate. */
|
||||||
|
if (device_may_wakeup(dev) || dev->power.wakeup_path)
|
||||||
|
dev->power.direct_complete = false;
|
||||||
|
|
||||||
if (dev->power.direct_complete) {
|
if (dev->power.direct_complete) {
|
||||||
if (pm_runtime_status_suspended(dev)) {
|
if (pm_runtime_status_suspended(dev)) {
|
||||||
pm_runtime_disable(dev);
|
pm_runtime_disable(dev);
|
||||||
|
@ -1842,13 +1835,8 @@ static void async_suspend(void *data, async_cookie_t cookie)
|
||||||
|
|
||||||
static int device_suspend(struct device *dev)
|
static int device_suspend(struct device *dev)
|
||||||
{
|
{
|
||||||
reinit_completion(&dev->power.completion);
|
if (dpm_async_fn(dev, async_suspend))
|
||||||
|
|
||||||
if (is_async(dev)) {
|
|
||||||
get_device(dev);
|
|
||||||
async_schedule_dev(async_suspend, dev);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
|
||||||
|
|
||||||
return __device_suspend(dev, pm_transition, false);
|
return __device_suspend(dev, pm_transition, false);
|
||||||
}
|
}
|
||||||
|
@ -2069,8 +2057,8 @@ EXPORT_SYMBOL_GPL(__suspend_report_result);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
|
* device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
|
||||||
* @dev: Device to wait for.
|
|
||||||
* @subordinate: Device that needs to wait for @dev.
|
* @subordinate: Device that needs to wait for @dev.
|
||||||
|
* @dev: Device to wait for.
|
||||||
*/
|
*/
|
||||||
int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
|
int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
|
||||||
{
|
{
|
||||||
|
|
|
@ -804,7 +804,7 @@ void pm_print_active_wakeup_sources(void)
|
||||||
srcuidx = srcu_read_lock(&wakeup_srcu);
|
srcuidx = srcu_read_lock(&wakeup_srcu);
|
||||||
list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
|
list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
|
||||||
if (ws->active) {
|
if (ws->active) {
|
||||||
pr_debug("active wakeup source: %s\n", ws->name);
|
pm_pr_dbg("active wakeup source: %s\n", ws->name);
|
||||||
active = 1;
|
active = 1;
|
||||||
} else if (!active &&
|
} else if (!active &&
|
||||||
(!last_activity_ws ||
|
(!last_activity_ws ||
|
||||||
|
@ -815,7 +815,7 @@ void pm_print_active_wakeup_sources(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!active && last_activity_ws)
|
if (!active && last_activity_ws)
|
||||||
pr_debug("last active wakeup source: %s\n",
|
pm_pr_dbg("last active wakeup source: %s\n",
|
||||||
last_activity_ws->name);
|
last_activity_ws->name);
|
||||||
srcu_read_unlock(&wakeup_srcu, srcuidx);
|
srcu_read_unlock(&wakeup_srcu, srcuidx);
|
||||||
}
|
}
|
||||||
|
@ -845,7 +845,7 @@ bool pm_wakeup_pending(void)
|
||||||
raw_spin_unlock_irqrestore(&events_lock, flags);
|
raw_spin_unlock_irqrestore(&events_lock, flags);
|
||||||
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
pr_debug("Wakeup pending, aborting suspend\n");
|
pm_pr_dbg("Wakeup pending, aborting suspend\n");
|
||||||
pm_print_active_wakeup_sources();
|
pm_print_active_wakeup_sources();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -84,7 +84,7 @@ static struct cpuidle_driver exynos_idle_driver = {
|
||||||
[1] = {
|
[1] = {
|
||||||
.enter = exynos_enter_lowpower,
|
.enter = exynos_enter_lowpower,
|
||||||
.exit_latency = 300,
|
.exit_latency = 300,
|
||||||
.target_residency = 100000,
|
.target_residency = 10000,
|
||||||
.name = "C1",
|
.name = "C1",
|
||||||
.desc = "ARM power down",
|
.desc = "ARM power down",
|
||||||
},
|
},
|
||||||
|
|
|
@ -328,9 +328,23 @@ int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
|
||||||
int cpuidle_enter(struct cpuidle_driver *drv, struct cpuidle_device *dev,
|
int cpuidle_enter(struct cpuidle_driver *drv, struct cpuidle_device *dev,
|
||||||
int index)
|
int index)
|
||||||
{
|
{
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Store the next hrtimer, which becomes either next tick or the next
|
||||||
|
* timer event, whatever expires first. Additionally, to make this data
|
||||||
|
* useful for consumers outside cpuidle, we rely on that the governor's
|
||||||
|
* ->select() callback have decided, whether to stop the tick or not.
|
||||||
|
*/
|
||||||
|
WRITE_ONCE(dev->next_hrtimer, tick_nohz_get_next_hrtimer());
|
||||||
|
|
||||||
if (cpuidle_state_is_coupled(drv, index))
|
if (cpuidle_state_is_coupled(drv, index))
|
||||||
return cpuidle_enter_state_coupled(dev, drv, index);
|
ret = cpuidle_enter_state_coupled(dev, drv, index);
|
||||||
return cpuidle_enter_state(dev, drv, index);
|
else
|
||||||
|
ret = cpuidle_enter_state(dev, drv, index);
|
||||||
|
|
||||||
|
WRITE_ONCE(dev->next_hrtimer, 0);
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -511,6 +525,7 @@ static void __cpuidle_device_init(struct cpuidle_device *dev)
|
||||||
{
|
{
|
||||||
memset(dev->states_usage, 0, sizeof(dev->states_usage));
|
memset(dev->states_usage, 0, sizeof(dev->states_usage));
|
||||||
dev->last_residency = 0;
|
dev->last_residency = 0;
|
||||||
|
dev->next_hrtimer = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -83,6 +83,7 @@ struct cpuidle_device {
|
||||||
unsigned int use_deepest_state:1;
|
unsigned int use_deepest_state:1;
|
||||||
unsigned int poll_time_limit:1;
|
unsigned int poll_time_limit:1;
|
||||||
unsigned int cpu;
|
unsigned int cpu;
|
||||||
|
ktime_t next_hrtimer;
|
||||||
|
|
||||||
int last_residency;
|
int last_residency;
|
||||||
struct cpuidle_state_usage states_usage[CPUIDLE_STATE_MAX];
|
struct cpuidle_state_usage states_usage[CPUIDLE_STATE_MAX];
|
||||||
|
|
|
@ -16,6 +16,7 @@
|
||||||
#include <linux/of.h>
|
#include <linux/of.h>
|
||||||
#include <linux/notifier.h>
|
#include <linux/notifier.h>
|
||||||
#include <linux/spinlock.h>
|
#include <linux/spinlock.h>
|
||||||
|
#include <linux/cpumask.h>
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Flags to control the behaviour of a genpd.
|
* Flags to control the behaviour of a genpd.
|
||||||
|
@ -42,11 +43,22 @@
|
||||||
* GENPD_FLAG_ACTIVE_WAKEUP: Instructs genpd to keep the PM domain powered
|
* GENPD_FLAG_ACTIVE_WAKEUP: Instructs genpd to keep the PM domain powered
|
||||||
* on, in case any of its attached devices is used
|
* on, in case any of its attached devices is used
|
||||||
* in the wakeup path to serve system wakeups.
|
* in the wakeup path to serve system wakeups.
|
||||||
|
*
|
||||||
|
* GENPD_FLAG_CPU_DOMAIN: Instructs genpd that it should expect to get
|
||||||
|
* devices attached, which may belong to CPUs or
|
||||||
|
* possibly have subdomains with CPUs attached.
|
||||||
|
* This flag enables the genpd backend driver to
|
||||||
|
* deploy idle power management support for CPUs
|
||||||
|
* and groups of CPUs. Note that, the backend
|
||||||
|
* driver must then comply with the so called,
|
||||||
|
* last-man-standing algorithm, for the CPUs in the
|
||||||
|
* PM domain.
|
||||||
*/
|
*/
|
||||||
#define GENPD_FLAG_PM_CLK (1U << 0)
|
#define GENPD_FLAG_PM_CLK (1U << 0)
|
||||||
#define GENPD_FLAG_IRQ_SAFE (1U << 1)
|
#define GENPD_FLAG_IRQ_SAFE (1U << 1)
|
||||||
#define GENPD_FLAG_ALWAYS_ON (1U << 2)
|
#define GENPD_FLAG_ALWAYS_ON (1U << 2)
|
||||||
#define GENPD_FLAG_ACTIVE_WAKEUP (1U << 3)
|
#define GENPD_FLAG_ACTIVE_WAKEUP (1U << 3)
|
||||||
|
#define GENPD_FLAG_CPU_DOMAIN (1U << 4)
|
||||||
|
|
||||||
enum gpd_status {
|
enum gpd_status {
|
||||||
GPD_STATE_ACTIVE = 0, /* PM domain is active */
|
GPD_STATE_ACTIVE = 0, /* PM domain is active */
|
||||||
|
@ -69,6 +81,7 @@ struct genpd_power_state {
|
||||||
s64 residency_ns;
|
s64 residency_ns;
|
||||||
struct fwnode_handle *fwnode;
|
struct fwnode_handle *fwnode;
|
||||||
ktime_t idle_time;
|
ktime_t idle_time;
|
||||||
|
void *data;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct genpd_lock_ops;
|
struct genpd_lock_ops;
|
||||||
|
@ -93,6 +106,7 @@ struct generic_pm_domain {
|
||||||
unsigned int suspended_count; /* System suspend device counter */
|
unsigned int suspended_count; /* System suspend device counter */
|
||||||
unsigned int prepared_count; /* Suspend counter of prepared devices */
|
unsigned int prepared_count; /* Suspend counter of prepared devices */
|
||||||
unsigned int performance_state; /* Aggregated max performance state */
|
unsigned int performance_state; /* Aggregated max performance state */
|
||||||
|
cpumask_var_t cpus; /* A cpumask of the attached CPUs */
|
||||||
int (*power_off)(struct generic_pm_domain *domain);
|
int (*power_off)(struct generic_pm_domain *domain);
|
||||||
int (*power_on)(struct generic_pm_domain *domain);
|
int (*power_on)(struct generic_pm_domain *domain);
|
||||||
struct opp_table *opp_table; /* OPP table of the genpd */
|
struct opp_table *opp_table; /* OPP table of the genpd */
|
||||||
|
@ -104,15 +118,17 @@ struct generic_pm_domain {
|
||||||
s64 max_off_time_ns; /* Maximum allowed "suspended" time. */
|
s64 max_off_time_ns; /* Maximum allowed "suspended" time. */
|
||||||
bool max_off_time_changed;
|
bool max_off_time_changed;
|
||||||
bool cached_power_down_ok;
|
bool cached_power_down_ok;
|
||||||
|
bool cached_power_down_state_idx;
|
||||||
int (*attach_dev)(struct generic_pm_domain *domain,
|
int (*attach_dev)(struct generic_pm_domain *domain,
|
||||||
struct device *dev);
|
struct device *dev);
|
||||||
void (*detach_dev)(struct generic_pm_domain *domain,
|
void (*detach_dev)(struct generic_pm_domain *domain,
|
||||||
struct device *dev);
|
struct device *dev);
|
||||||
unsigned int flags; /* Bit field of configs for genpd */
|
unsigned int flags; /* Bit field of configs for genpd */
|
||||||
struct genpd_power_state *states;
|
struct genpd_power_state *states;
|
||||||
|
void (*free_states)(struct genpd_power_state *states,
|
||||||
|
unsigned int state_count);
|
||||||
unsigned int state_count; /* number of states */
|
unsigned int state_count; /* number of states */
|
||||||
unsigned int state_idx; /* state that genpd will go to when off */
|
unsigned int state_idx; /* state that genpd will go to when off */
|
||||||
void *free; /* Free the state that was allocated for default */
|
|
||||||
ktime_t on_time;
|
ktime_t on_time;
|
||||||
ktime_t accounting_time;
|
ktime_t accounting_time;
|
||||||
const struct genpd_lock_ops *lock_ops;
|
const struct genpd_lock_ops *lock_ops;
|
||||||
|
@ -187,6 +203,9 @@ int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state);
|
||||||
|
|
||||||
extern struct dev_power_governor simple_qos_governor;
|
extern struct dev_power_governor simple_qos_governor;
|
||||||
extern struct dev_power_governor pm_domain_always_on_gov;
|
extern struct dev_power_governor pm_domain_always_on_gov;
|
||||||
|
#ifdef CONFIG_CPU_IDLE
|
||||||
|
extern struct dev_power_governor pm_domain_cpu_gov;
|
||||||
|
#endif
|
||||||
#else
|
#else
|
||||||
|
|
||||||
static inline struct generic_pm_domain_data *dev_gpd_data(struct device *dev)
|
static inline struct generic_pm_domain_data *dev_gpd_data(struct device *dev)
|
||||||
|
|
|
@ -425,6 +425,7 @@ void restore_processor_state(void);
|
||||||
/* kernel/power/main.c */
|
/* kernel/power/main.c */
|
||||||
extern int register_pm_notifier(struct notifier_block *nb);
|
extern int register_pm_notifier(struct notifier_block *nb);
|
||||||
extern int unregister_pm_notifier(struct notifier_block *nb);
|
extern int unregister_pm_notifier(struct notifier_block *nb);
|
||||||
|
extern void ksys_sync_helper(void);
|
||||||
|
|
||||||
#define pm_notifier(fn, pri) { \
|
#define pm_notifier(fn, pri) { \
|
||||||
static struct notifier_block fn##_nb = \
|
static struct notifier_block fn##_nb = \
|
||||||
|
@ -462,6 +463,8 @@ static inline int unregister_pm_notifier(struct notifier_block *nb)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void ksys_sync_helper(void) {}
|
||||||
|
|
||||||
#define pm_notifier(fn, pri) do { (void)(fn); } while (0)
|
#define pm_notifier(fn, pri) do { (void)(fn); } while (0)
|
||||||
|
|
||||||
static inline bool pm_wakeup_pending(void) { return false; }
|
static inline bool pm_wakeup_pending(void) { return false; }
|
||||||
|
|
|
@ -122,6 +122,7 @@ extern void tick_nohz_idle_enter(void);
|
||||||
extern void tick_nohz_idle_exit(void);
|
extern void tick_nohz_idle_exit(void);
|
||||||
extern void tick_nohz_irq_exit(void);
|
extern void tick_nohz_irq_exit(void);
|
||||||
extern bool tick_nohz_idle_got_tick(void);
|
extern bool tick_nohz_idle_got_tick(void);
|
||||||
|
extern ktime_t tick_nohz_get_next_hrtimer(void);
|
||||||
extern ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next);
|
extern ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next);
|
||||||
extern unsigned long tick_nohz_get_idle_calls(void);
|
extern unsigned long tick_nohz_get_idle_calls(void);
|
||||||
extern unsigned long tick_nohz_get_idle_calls_cpu(int cpu);
|
extern unsigned long tick_nohz_get_idle_calls_cpu(int cpu);
|
||||||
|
@ -145,7 +146,11 @@ static inline void tick_nohz_idle_restart_tick(void) { }
|
||||||
static inline void tick_nohz_idle_enter(void) { }
|
static inline void tick_nohz_idle_enter(void) { }
|
||||||
static inline void tick_nohz_idle_exit(void) { }
|
static inline void tick_nohz_idle_exit(void) { }
|
||||||
static inline bool tick_nohz_idle_got_tick(void) { return false; }
|
static inline bool tick_nohz_idle_got_tick(void) { return false; }
|
||||||
|
static inline ktime_t tick_nohz_get_next_hrtimer(void)
|
||||||
|
{
|
||||||
|
/* Next wake up is the tick period, assume it starts now */
|
||||||
|
return ktime_add(ktime_get(), TICK_NSEC);
|
||||||
|
}
|
||||||
static inline ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next)
|
static inline ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next)
|
||||||
{
|
{
|
||||||
*delta_next = TICK_NSEC;
|
*delta_next = TICK_NSEC;
|
||||||
|
|
|
@ -14,7 +14,6 @@
|
||||||
|
|
||||||
#include <linux/export.h>
|
#include <linux/export.h>
|
||||||
#include <linux/suspend.h>
|
#include <linux/suspend.h>
|
||||||
#include <linux/syscalls.h>
|
|
||||||
#include <linux/reboot.h>
|
#include <linux/reboot.h>
|
||||||
#include <linux/string.h>
|
#include <linux/string.h>
|
||||||
#include <linux/device.h>
|
#include <linux/device.h>
|
||||||
|
@ -709,9 +708,7 @@ int hibernate(void)
|
||||||
goto Exit;
|
goto Exit;
|
||||||
}
|
}
|
||||||
|
|
||||||
pr_info("Syncing filesystems ... \n");
|
ksys_sync_helper();
|
||||||
ksys_sync();
|
|
||||||
pr_info("done.\n");
|
|
||||||
|
|
||||||
error = freeze_processes();
|
error = freeze_processes();
|
||||||
if (error)
|
if (error)
|
||||||
|
|
|
@ -16,6 +16,7 @@
|
||||||
#include <linux/debugfs.h>
|
#include <linux/debugfs.h>
|
||||||
#include <linux/seq_file.h>
|
#include <linux/seq_file.h>
|
||||||
#include <linux/suspend.h>
|
#include <linux/suspend.h>
|
||||||
|
#include <linux/syscalls.h>
|
||||||
|
|
||||||
#include "power.h"
|
#include "power.h"
|
||||||
|
|
||||||
|
@ -51,6 +52,19 @@ void unlock_system_sleep(void)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(unlock_system_sleep);
|
EXPORT_SYMBOL_GPL(unlock_system_sleep);
|
||||||
|
|
||||||
|
void ksys_sync_helper(void)
|
||||||
|
{
|
||||||
|
ktime_t start;
|
||||||
|
long elapsed_msecs;
|
||||||
|
|
||||||
|
start = ktime_get();
|
||||||
|
ksys_sync();
|
||||||
|
elapsed_msecs = ktime_to_ms(ktime_sub(ktime_get(), start));
|
||||||
|
pr_info("Filesystems sync: %ld.%03ld seconds\n",
|
||||||
|
elapsed_msecs / MSEC_PER_SEC, elapsed_msecs % MSEC_PER_SEC);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(ksys_sync_helper);
|
||||||
|
|
||||||
/* Routines for PM-transition notifications */
|
/* Routines for PM-transition notifications */
|
||||||
|
|
||||||
static BLOCKING_NOTIFIER_HEAD(pm_chain_head);
|
static BLOCKING_NOTIFIER_HEAD(pm_chain_head);
|
||||||
|
|
|
@ -17,7 +17,6 @@
|
||||||
#include <linux/console.h>
|
#include <linux/console.h>
|
||||||
#include <linux/cpu.h>
|
#include <linux/cpu.h>
|
||||||
#include <linux/cpuidle.h>
|
#include <linux/cpuidle.h>
|
||||||
#include <linux/syscalls.h>
|
|
||||||
#include <linux/gfp.h>
|
#include <linux/gfp.h>
|
||||||
#include <linux/io.h>
|
#include <linux/io.h>
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
|
@ -568,13 +567,11 @@ static int enter_state(suspend_state_t state)
|
||||||
if (state == PM_SUSPEND_TO_IDLE)
|
if (state == PM_SUSPEND_TO_IDLE)
|
||||||
s2idle_begin();
|
s2idle_begin();
|
||||||
|
|
||||||
#ifndef CONFIG_SUSPEND_SKIP_SYNC
|
if (!IS_ENABLED(CONFIG_SUSPEND_SKIP_SYNC)) {
|
||||||
trace_suspend_resume(TPS("sync_filesystems"), 0, true);
|
trace_suspend_resume(TPS("sync_filesystems"), 0, true);
|
||||||
pr_info("Syncing filesystems ... ");
|
ksys_sync_helper();
|
||||||
ksys_sync();
|
trace_suspend_resume(TPS("sync_filesystems"), 0, false);
|
||||||
pr_cont("done.\n");
|
}
|
||||||
trace_suspend_resume(TPS("sync_filesystems"), 0, false);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
pm_pr_dbg("Preparing system for sleep (%s)\n", mem_sleep_labels[state]);
|
pm_pr_dbg("Preparing system for sleep (%s)\n", mem_sleep_labels[state]);
|
||||||
pm_suspend_clear_flags();
|
pm_suspend_clear_flags();
|
||||||
|
|
|
@ -10,7 +10,6 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/suspend.h>
|
#include <linux/suspend.h>
|
||||||
#include <linux/syscalls.h>
|
|
||||||
#include <linux/reboot.h>
|
#include <linux/reboot.h>
|
||||||
#include <linux/string.h>
|
#include <linux/string.h>
|
||||||
#include <linux/device.h>
|
#include <linux/device.h>
|
||||||
|
@ -228,9 +227,7 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
|
||||||
if (data->frozen)
|
if (data->frozen)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
printk("Syncing filesystems ... ");
|
ksys_sync_helper();
|
||||||
ksys_sync();
|
|
||||||
printk("done.\n");
|
|
||||||
|
|
||||||
error = freeze_processes();
|
error = freeze_processes();
|
||||||
if (error)
|
if (error)
|
||||||
|
|
|
@ -1022,6 +1022,18 @@ bool tick_nohz_idle_got_tick(void)
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* tick_nohz_get_next_hrtimer - return the next expiration time for the hrtimer
|
||||||
|
* or the tick, whatever that expires first. Note that, if the tick has been
|
||||||
|
* stopped, it returns the next hrtimer.
|
||||||
|
*
|
||||||
|
* Called from power state control code with interrupts disabled
|
||||||
|
*/
|
||||||
|
ktime_t tick_nohz_get_next_hrtimer(void)
|
||||||
|
{
|
||||||
|
return __this_cpu_read(tick_cpu_device.evtdev)->next_event;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* tick_nohz_get_sleep_length - return the expected length of the current sleep
|
* tick_nohz_get_sleep_length - return the expected length of the current sleep
|
||||||
* @delta_next: duration until the next event if the tick cannot be stopped
|
* @delta_next: duration until the next event if the tick cannot be stopped
|
||||||
|
|
Загрузка…
Ссылка в новой задаче