From b10abd0a8859493a93c6b8020f2be2587557749d Mon Sep 17 00:00:00 2001 From: "Joel Fernandes (Google)" Date: Wed, 20 Mar 2019 20:34:23 -0400 Subject: [PATCH 01/25] sched/cpufreq: Annotate cpufreq_update_util_data pointer with __rcu Recently I added an RCU annotation check to rcu_assign_pointer(). All pointers assigned to RCU protected data are to be annotated with __rcu inorder to be able to use rcu_assign_pointer() similar to checks in other RCU APIs. This resulted in a sparse error: kernel//sched/cpufreq.c:41:9: sparse: error: incompatible types in comparison expression (different address spaces) Fix this by annotating cpufreq_update_util_data pointer with __rcu. This will also help sparse catch any future RCU misuage bugs. Signed-off-by: Joel Fernandes (Google) [ From an RCU perspective. ] Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Paul E. McKenney Cc: Josh Triplett Cc: Lai Jiangshan Cc: Linus Torvalds Cc: Luc Van Oostenryck Cc: Mathieu Desnoyers Cc: Mike Galbraith Cc: Morten Rasmussen Cc: Peter Zijlstra Cc: Steven Rostedt Cc: Thomas Gleixner Cc: keescook@chromium.org Cc: kernel-hardening@lists.openwall.com Cc: kernel-team@android.com Link: https://lkml.kernel.org/r/20190321003426.160260-2-joel@joelfernandes.org Signed-off-by: Ingo Molnar --- kernel/sched/cpufreq.c | 2 +- kernel/sched/sched.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/sched/cpufreq.c b/kernel/sched/cpufreq.c index 835671f0f917..b5dcd1d83c7f 100644 --- a/kernel/sched/cpufreq.c +++ b/kernel/sched/cpufreq.c @@ -7,7 +7,7 @@ */ #include "sched.h" -DEFINE_PER_CPU(struct update_util_data *, cpufreq_update_util_data); +DEFINE_PER_CPU(struct update_util_data __rcu *, cpufreq_update_util_data); /** * cpufreq_add_update_util_hook - Populate the CPU's update_util_data pointer. diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index efa686eeff26..713715dd00cf 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -2185,7 +2185,7 @@ static inline u64 irq_time_read(int cpu) #endif /* CONFIG_IRQ_TIME_ACCOUNTING */ #ifdef CONFIG_CPU_FREQ -DECLARE_PER_CPU(struct update_util_data *, cpufreq_update_util_data); +DECLARE_PER_CPU(struct update_util_data __rcu *, cpufreq_update_util_data); /** * cpufreq_update_util - Take a note about CPU utilization changes. From 994aeb7a93e43d28f6074195ccb03a384342e1bf Mon Sep 17 00:00:00 2001 From: "Joel Fernandes (Google)" Date: Wed, 20 Mar 2019 20:34:24 -0400 Subject: [PATCH 02/25] sched_domain: Annotate RCU pointers properly The scheduler uses RCU API in various places to access sched_domain pointers. These cause sparse errors as below. Many new errors show up because of an annotation check I added to rcu_assign_pointer(). Let us annotate the pointers correctly which also will help sparse catch any potential future bugs. This fixes the following sparse errors: rt.c:1681:9: error: incompatible types in comparison expression deadline.c:1904:9: error: incompatible types in comparison expression core.c:519:9: error: incompatible types in comparison expression core.c:1634:17: error: incompatible types in comparison expression fair.c:6193:14: error: incompatible types in comparison expression fair.c:9883:22: error: incompatible types in comparison expression fair.c:9897:9: error: incompatible types in comparison expression sched.h:1287:9: error: incompatible types in comparison expression topology.c:612:9: error: incompatible types in comparison expression topology.c:615:9: error: incompatible types in comparison expression sched.h:1300:9: error: incompatible types in comparison expression topology.c:618:9: error: incompatible types in comparison expression sched.h:1287:9: error: incompatible types in comparison expression topology.c:621:9: error: incompatible types in comparison expression sched.h:1300:9: error: incompatible types in comparison expression topology.c:624:9: error: incompatible types in comparison expression topology.c:671:9: error: incompatible types in comparison expression stats.c:45:17: error: incompatible types in comparison expression fair.c:5998:15: error: incompatible types in comparison expression fair.c:5989:15: error: incompatible types in comparison expression fair.c:5998:15: error: incompatible types in comparison expression fair.c:5989:15: error: incompatible types in comparison expression fair.c:6120:19: error: incompatible types in comparison expression fair.c:6506:14: error: incompatible types in comparison expression fair.c:6515:14: error: incompatible types in comparison expression fair.c:6623:9: error: incompatible types in comparison expression fair.c:5970:17: error: incompatible types in comparison expression fair.c:8642:21: error: incompatible types in comparison expression fair.c:9253:9: error: incompatible types in comparison expression fair.c:9331:9: error: incompatible types in comparison expression fair.c:9519:15: error: incompatible types in comparison expression fair.c:9533:14: error: incompatible types in comparison expression fair.c:9542:14: error: incompatible types in comparison expression fair.c:9567:14: error: incompatible types in comparison expression fair.c:9597:14: error: incompatible types in comparison expression fair.c:9421:16: error: incompatible types in comparison expression fair.c:9421:16: error: incompatible types in comparison expression Signed-off-by: Joel Fernandes (Google) Signed-off-by: Peter Zijlstra (Intel) [ From an RCU perspective. ] Reviewed-by: Paul E. McKenney Cc: Josh Triplett Cc: Lai Jiangshan Cc: Linus Torvalds Cc: Luc Van Oostenryck Cc: Mathieu Desnoyers Cc: Mike Galbraith Cc: Morten Rasmussen Cc: Peter Zijlstra Cc: Steven Rostedt Cc: Thomas Gleixner Cc: keescook@chromium.org Cc: kernel-hardening@lists.openwall.com Cc: kernel-team@android.com Link: https://lkml.kernel.org/r/20190321003426.160260-3-joel@joelfernandes.org Signed-off-by: Ingo Molnar --- include/linux/sched/topology.h | 4 ++-- kernel/sched/sched.h | 14 +++++++------- kernel/sched/topology.c | 10 +++++----- 3 files changed, 14 insertions(+), 14 deletions(-) diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h index 57c7ed3fe465..cfc0a89a7159 100644 --- a/include/linux/sched/topology.h +++ b/include/linux/sched/topology.h @@ -76,8 +76,8 @@ struct sched_domain_shared { struct sched_domain { /* These fields must be setup */ - struct sched_domain *parent; /* top domain must be null terminated */ - struct sched_domain *child; /* bottom domain must be null terminated */ + struct sched_domain __rcu *parent; /* top domain must be null terminated */ + struct sched_domain __rcu *child; /* bottom domain must be null terminated */ struct sched_group *groups; /* the balancing groups of the domain */ unsigned long min_interval; /* Minimum balance interval ms */ unsigned long max_interval; /* Maximum balance interval ms */ diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 713715dd00cf..2b452d68ab2e 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -869,8 +869,8 @@ struct rq { atomic_t nr_iowait; #ifdef CONFIG_SMP - struct root_domain *rd; - struct sched_domain *sd; + struct root_domain *rd; + struct sched_domain __rcu *sd; unsigned long cpu_capacity; unsigned long cpu_capacity_orig; @@ -1324,13 +1324,13 @@ static inline struct sched_domain *lowest_flag_domain(int cpu, int flag) return sd; } -DECLARE_PER_CPU(struct sched_domain *, sd_llc); +DECLARE_PER_CPU(struct sched_domain __rcu *, sd_llc); DECLARE_PER_CPU(int, sd_llc_size); DECLARE_PER_CPU(int, sd_llc_id); -DECLARE_PER_CPU(struct sched_domain_shared *, sd_llc_shared); -DECLARE_PER_CPU(struct sched_domain *, sd_numa); -DECLARE_PER_CPU(struct sched_domain *, sd_asym_packing); -DECLARE_PER_CPU(struct sched_domain *, sd_asym_cpucapacity); +DECLARE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared); +DECLARE_PER_CPU(struct sched_domain __rcu *, sd_numa); +DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing); +DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity); extern struct static_key_false sched_asym_cpucapacity; struct sched_group_capacity { diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index ab7f371a3a17..64bec54ded3e 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -615,13 +615,13 @@ static void destroy_sched_domains(struct sched_domain *sd) * the cpumask of the domain), this allows us to quickly tell if * two CPUs are in the same cache domain, see cpus_share_cache(). */ -DEFINE_PER_CPU(struct sched_domain *, sd_llc); +DEFINE_PER_CPU(struct sched_domain __rcu *, sd_llc); DEFINE_PER_CPU(int, sd_llc_size); DEFINE_PER_CPU(int, sd_llc_id); -DEFINE_PER_CPU(struct sched_domain_shared *, sd_llc_shared); -DEFINE_PER_CPU(struct sched_domain *, sd_numa); -DEFINE_PER_CPU(struct sched_domain *, sd_asym_packing); -DEFINE_PER_CPU(struct sched_domain *, sd_asym_cpucapacity); +DEFINE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared); +DEFINE_PER_CPU(struct sched_domain __rcu *, sd_numa); +DEFINE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing); +DEFINE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity); DEFINE_STATIC_KEY_FALSE(sched_asym_cpucapacity); static void update_top_cache_domain(int cpu) From 03f4b48edae7d936c5bf23488c1ce3fbe7fc2733 Mon Sep 17 00:00:00 2001 From: "Joel Fernandes (Google)" Date: Wed, 20 Mar 2019 20:34:25 -0400 Subject: [PATCH 03/25] rcuwait: Annotate task_struct with __rcu This suppresses sparse error generated due to the recently added rcu_assign_pointer sparse check. percpu-rwsem.c:162:9: sparse: error: incompatible types in comparison expression exit.c:316:16: sparse: error: incompatible types in comparison expression Signed-off-by: Joel Fernandes (Google) Signed-off-by: Peter Zijlstra (Intel) [ From an RCU perspective. ] Reviewed-by: Paul E. McKenney Cc: Josh Triplett Cc: Lai Jiangshan Cc: Linus Torvalds Cc: Luc Van Oostenryck Cc: Mathieu Desnoyers Cc: Mike Galbraith Cc: Morten Rasmussen Cc: Peter Zijlstra Cc: Steven Rostedt Cc: Thomas Gleixner Cc: keescook@chromium.org Cc: kernel-hardening@lists.openwall.com Cc: kernel-team@android.com Link: https://lkml.kernel.org/r/20190321003426.160260-4-joel@joelfernandes.org Signed-off-by: Ingo Molnar --- include/linux/rcuwait.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/rcuwait.h b/include/linux/rcuwait.h index 90bfa3279a01..563290fc194f 100644 --- a/include/linux/rcuwait.h +++ b/include/linux/rcuwait.h @@ -18,7 +18,7 @@ * awoken. */ struct rcuwait { - struct task_struct *task; + struct task_struct __rcu *task; }; #define __RCUWAIT_INITIALIZER(name) \ From 7ba7319f9e3898101bff5d63cbae5a6cc174c8c9 Mon Sep 17 00:00:00 2001 From: "Joel Fernandes (Google)" Date: Wed, 20 Mar 2019 20:34:26 -0400 Subject: [PATCH 04/25] sched/core: Annotate perf_domain pointer with __rcu This fixes the following sparse errors in sched/fair.c: fair.c:6506:14: error: incompatible types in comparison expression fair.c:8642:21: error: incompatible types in comparison expression Using __rcu will also help sparse catch any future bugs. Signed-off-by: Joel Fernandes (Google) Signed-off-by: Peter Zijlstra (Intel) [ From an RCU perspective. ] Reviewed-by: Paul E. McKenney Cc: Josh Triplett Cc: Lai Jiangshan Cc: Linus Torvalds Cc: Luc Van Oostenryck Cc: Mathieu Desnoyers Cc: Mike Galbraith Cc: Morten Rasmussen Cc: Peter Zijlstra Cc: Steven Rostedt Cc: Thomas Gleixner Cc: keescook@chromium.org Cc: kernel-hardening@lists.openwall.com Cc: kernel-team@android.com Link: https://lkml.kernel.org/r/20190321003426.160260-5-joel@joelfernandes.org Signed-off-by: Ingo Molnar --- kernel/sched/sched.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 2b452d68ab2e..b52ed1ada0be 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -780,7 +780,7 @@ struct root_domain { * NULL-terminated list of performance domains intersecting with the * CPUs of the rd. Protected by RCU. */ - struct perf_domain *pd; + struct perf_domain __rcu *pd; }; extern struct root_domain def_root_domain; From 71b47eaf6fb29b7f9722dc1646c26eb8a96e0a6d Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Wed, 20 Mar 2019 21:38:39 +0800 Subject: [PATCH 05/25] sched/fair: Make sync_entity_load_avg() and remove_entity_load_avg() static Fix these sparse warnigs: kernel/sched/fair.c:3570:6: warning: symbol 'sync_entity_load_avg' was not declared. Should it be static? kernel/sched/fair.c:3583:6: warning: symbol 'remove_entity_load_avg' was not declared. Should it be static? Signed-off-by: YueHaibing Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Link: https://lkml.kernel.org/r/20190320133839.21392-1-yuehaibing@huawei.com Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 40bd1e27b1b7..ed7f5f8107b7 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3567,7 +3567,7 @@ static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq) * Synchronize entity load avg of dequeued entity without locking * the previous rq. */ -void sync_entity_load_avg(struct sched_entity *se) +static void sync_entity_load_avg(struct sched_entity *se) { struct cfs_rq *cfs_rq = cfs_rq_of(se); u64 last_update_time; @@ -3580,7 +3580,7 @@ void sync_entity_load_avg(struct sched_entity *se) * Task first catches up with cfs_rq, and then subtract * itself from the cfs_rq (task must be off the queue now). */ -void remove_entity_load_avg(struct sched_entity *se) +static void remove_entity_load_avg(struct sched_entity *se) { struct cfs_rq *cfs_rq = cfs_rq_of(se); unsigned long flags; From d8743230c9f4e92f370ecd2a90c680ddcede6ae5 Mon Sep 17 00:00:00 2001 From: Valentin Schneider Date: Tue, 9 Apr 2019 18:35:46 +0100 Subject: [PATCH 06/25] sched/topology: Fix build_sched_groups() comment The comment was introduced (pre 2.6.12) by: 8a7a2318dc07 ("[PATCH] sched: consolidate sched domains") and referred to sched_group->cpu_power. This was folded into sched_group->sched_group_power in commit 9c3f75cbd144 ("sched: Break out cpu_power from the sched_group structure") The comment was then updated in: ced549fa5fc1 ("sched: Remove remaining dubious usage of "power"") but should have replaced "sg->cpu_capacity" with "sg->sched_group_capacity". Do that now. Signed-off-by: Valentin Schneider Cc: Dietmar.Eggemann@arm.com Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: morten.rasmussen@arm.com Cc: qais.yousef@arm.com Link: http://lkml.kernel.org/r/20190409173546.4747-3-valentin.schneider@arm.com Signed-off-by: Ingo Molnar --- kernel/sched/topology.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 64bec54ded3e..90e1a870fb0d 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -1087,8 +1087,8 @@ static struct sched_group *get_group(int cpu, struct sd_data *sdd) /* * build_sched_groups will build a circular linked list of the groups - * covered by the given span, and will set each group's ->cpumask correctly, - * and ->cpu_capacity to 0. + * covered by the given span, will set each group's ->cpumask correctly, + * and will initialize their ->sgc. * * Assumes the sched_domain tree is fully constructed */ From 67d4f6ff2fb69e02bd6365a91ca3939b7a14deac Mon Sep 17 00:00:00 2001 From: Valentin Schneider Date: Tue, 9 Apr 2019 18:35:45 +0100 Subject: [PATCH 07/25] sched/topology: Skip duplicate group rewrites in build_sched_groups() While staring at build_sched_domains(), I realized that get_group() does several duplicate (thus useless) writes. If you take the Arm Juno r0 (LITTLEs = [0, 3, 4, 5], bigs = [1, 2]), the sched_group build flow would look like this: ('MC[cpu]->sg' means 'per_cpu_ptr(&tl->data->sg, cpu)' with 'tl == MC') build_sched_groups(MC[CPU0]->sd, CPU0) get_group(0) -> MC[CPU0]->sg get_group(3) -> MC[CPU3]->sg get_group(4) -> MC[CPU4]->sg get_group(5) -> MC[CPU5]->sg build_sched_groups(DIE[CPU0]->sd, CPU0) get_group(0) -> DIE[CPU0]->sg get_group(1) -> DIE[CPU1]->sg <=================+ | build_sched_groups(MC[CPU1]->sd, CPU1) | get_group(1) -> MC[CPU1]->sg | get_group(2) -> MC[CPU2]->sg | | build_sched_groups(DIE[CPU1]->sd, CPU1) ^ get_group(1) -> DIE[CPU1]->sg } We've set up these two up here! get_group(3) -> DIE[CPU0]->sg } From this point on, we will only use sched_groups that have been previously visited & initialized. The only new operation will be which group pointer we affect to sd->groups. On the Juno r0 we get 32 get_group() calls, every single one of them writing to a sched_group->cpumask. However, all of the data structures we need are set up after 8 visits (see above). Return early from get_group() if we've already visited (and thus initialized) the sched_group we're looking at. Overlapping domains are not affected as they do not use build_sched_groups(). Tested on a Juno and a 2 * (Xeon E5-2690) system. ( FWIW I initially checked the refs for both sg && sg->sgc, but figured if they weren't both 0 or > 1 then something must have gone wrong, so I threw in a WARN_ON(). ) No change in functionality intended. Signed-off-by: Valentin Schneider Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- kernel/sched/topology.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 90e1a870fb0d..c65b31e9458b 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -1059,6 +1059,7 @@ static struct sched_group *get_group(int cpu, struct sd_data *sdd) struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); struct sched_domain *child = sd->child; struct sched_group *sg; + bool already_visited; if (child) cpu = cpumask_first(sched_domain_span(child)); @@ -1066,9 +1067,14 @@ static struct sched_group *get_group(int cpu, struct sd_data *sdd) sg = *per_cpu_ptr(sdd->sg, cpu); sg->sgc = *per_cpu_ptr(sdd->sgc, cpu); - /* For claim_allocations: */ - atomic_inc(&sg->ref); - atomic_inc(&sg->sgc->ref); + /* Increase refcounts for claim_allocations: */ + already_visited = atomic_inc_return(&sg->ref) > 1; + /* sgc visits should follow a similar trend as sg */ + WARN_ON(already_visited != (atomic_inc_return(&sg->sgc->ref) > 1)); + + /* If we have already visited that group, it's already initialized. */ + if (already_visited) + return sg; if (child) { cpumask_copy(sched_group_span(sg), sched_domain_span(child)); From e2abb398115e9c33f3d1e25bf6d1d08badc58b13 Mon Sep 17 00:00:00 2001 From: Valentin Schneider Date: Tue, 16 Apr 2019 15:06:21 +0100 Subject: [PATCH 08/25] sched/fair: Remove unneeded prototype of capacity_of() The prototype of that function was already hoisted up in: commit 3b1baa6496e6 ("sched/fair: Add 'group_misfit_task' load-balance type") but that seems to have been missed. Get rid of the extra prototype. Signed-off-by: Valentin Schneider Acked-by: Quentin Perret Cc: Dietmar.Eggemann@arm.com Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: morten.rasmussen@arm.com Fixes: 2802bf3cd936 ("sched/fair: Add over-utilization/tipping point indicator") Link: http://lkml.kernel.org/r/20190416140621.19884-1-valentin.schneider@arm.com Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 1 - 1 file changed, 1 deletion(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index ed7f5f8107b7..b6cc0703b850 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -5116,7 +5116,6 @@ static inline void hrtick_update(struct rq *rq) #ifdef CONFIG_SMP static inline unsigned long cpu_util(int cpu); -static unsigned long capacity_of(int cpu); static inline bool cpu_overutilized(int cpu) { From 6d25be5782e482eb93e3de0c94d0a517879377d0 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 13 Mar 2019 17:55:48 +0100 Subject: [PATCH 09/25] sched/core, workqueues: Distangle worker accounting from rq lock The worker accounting for CPU bound workers is plugged into the core scheduler code and the wakeup code. This is not a hard requirement and can be avoided by keeping track of the state in the workqueue code itself. Keep track of the sleeping state in the worker itself and call the notifier before entering the core scheduler. There might be false positives when the task is woken between that call and actually scheduling, but that's not really different from scheduling and being woken immediately after switching away. When nr_running is updated when the task is retunrning from schedule() then it is later compared when it is done from ttwu(). [ bigeasy: preempt_disable() around wq_worker_sleeping() by Daniel Bristot de Oliveira ] Signed-off-by: Thomas Gleixner Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Peter Zijlstra (Intel) Acked-by: Tejun Heo Cc: Daniel Bristot de Oliveira Cc: Lai Jiangshan Cc: Linus Torvalds Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/ad2b29b5715f970bffc1a7026cabd6ff0b24076a.1532952814.git.bristot@redhat.com Signed-off-by: Ingo Molnar --- kernel/sched/core.c | 88 +++++++++---------------------------- kernel/workqueue.c | 54 ++++++++++------------- kernel/workqueue_internal.h | 5 ++- 3 files changed, 48 insertions(+), 99 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 4778c48a7fda..6184a0856aab 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1685,10 +1685,6 @@ static inline void ttwu_activate(struct rq *rq, struct task_struct *p, int en_fl { activate_task(rq, p, en_flags); p->on_rq = TASK_ON_RQ_QUEUED; - - /* If a worker is waking up, notify the workqueue: */ - if (p->flags & PF_WQ_WORKER) - wq_worker_waking_up(p, cpu_of(rq)); } /* @@ -2106,56 +2102,6 @@ out: return success; } -/** - * try_to_wake_up_local - try to wake up a local task with rq lock held - * @p: the thread to be awakened - * @rf: request-queue flags for pinning - * - * Put @p on the run-queue if it's not already there. The caller must - * ensure that this_rq() is locked, @p is bound to this_rq() and not - * the current task. - */ -static void try_to_wake_up_local(struct task_struct *p, struct rq_flags *rf) -{ - struct rq *rq = task_rq(p); - - if (WARN_ON_ONCE(rq != this_rq()) || - WARN_ON_ONCE(p == current)) - return; - - lockdep_assert_held(&rq->lock); - - if (!raw_spin_trylock(&p->pi_lock)) { - /* - * This is OK, because current is on_cpu, which avoids it being - * picked for load-balance and preemption/IRQs are still - * disabled avoiding further scheduler activity on it and we've - * not yet picked a replacement task. - */ - rq_unlock(rq, rf); - raw_spin_lock(&p->pi_lock); - rq_relock(rq, rf); - } - - if (!(p->state & TASK_NORMAL)) - goto out; - - trace_sched_waking(p); - - if (!task_on_rq_queued(p)) { - if (p->in_iowait) { - delayacct_blkio_end(p); - atomic_dec(&rq->nr_iowait); - } - ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK); - } - - ttwu_do_wakeup(rq, p, 0, rf); - ttwu_stat(p, smp_processor_id(), 0); -out: - raw_spin_unlock(&p->pi_lock); -} - /** * wake_up_process - Wake up a specific process * @p: The process to be woken up. @@ -3472,19 +3418,6 @@ static void __sched notrace __schedule(bool preempt) atomic_inc(&rq->nr_iowait); delayacct_blkio_start(); } - - /* - * If a worker went to sleep, notify and ask workqueue - * whether it wants to wake up a task to maintain - * concurrency. - */ - if (prev->flags & PF_WQ_WORKER) { - struct task_struct *to_wakeup; - - to_wakeup = wq_worker_sleeping(prev); - if (to_wakeup) - try_to_wake_up_local(to_wakeup, &rf); - } } switch_count = &prev->nvcsw; } @@ -3544,6 +3477,20 @@ static inline void sched_submit_work(struct task_struct *tsk) { if (!tsk->state || tsk_is_pi_blocked(tsk)) return; + + /* + * If a worker went to sleep, notify and ask workqueue whether + * it wants to wake up a task to maintain concurrency. + * As this function is called inside the schedule() context, + * we disable preemption to avoid it calling schedule() again + * in the possible wakeup of a kworker. + */ + if (tsk->flags & PF_WQ_WORKER) { + preempt_disable(); + wq_worker_sleeping(tsk); + preempt_enable_no_resched(); + } + /* * If we are going to sleep and we have plugged IO queued, * make sure to submit it to avoid deadlocks. @@ -3552,6 +3499,12 @@ static inline void sched_submit_work(struct task_struct *tsk) blk_schedule_flush_plug(tsk); } +static void sched_update_worker(struct task_struct *tsk) +{ + if (tsk->flags & PF_WQ_WORKER) + wq_worker_running(tsk); +} + asmlinkage __visible void __sched schedule(void) { struct task_struct *tsk = current; @@ -3562,6 +3515,7 @@ asmlinkage __visible void __sched schedule(void) __schedule(false); sched_preempt_enable_no_resched(); } while (need_resched()); + sched_update_worker(tsk); } EXPORT_SYMBOL(schedule); diff --git a/kernel/workqueue.c b/kernel/workqueue.c index ddee541ea97a..56180c9286f5 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -841,43 +841,32 @@ static void wake_up_worker(struct worker_pool *pool) } /** - * wq_worker_waking_up - a worker is waking up + * wq_worker_running - a worker is running again * @task: task waking up - * @cpu: CPU @task is waking up to * - * This function is called during try_to_wake_up() when a worker is - * being awoken. - * - * CONTEXT: - * spin_lock_irq(rq->lock) + * This function is called when a worker returns from schedule() */ -void wq_worker_waking_up(struct task_struct *task, int cpu) +void wq_worker_running(struct task_struct *task) { struct worker *worker = kthread_data(task); - if (!(worker->flags & WORKER_NOT_RUNNING)) { - WARN_ON_ONCE(worker->pool->cpu != cpu); + if (!worker->sleeping) + return; + if (!(worker->flags & WORKER_NOT_RUNNING)) atomic_inc(&worker->pool->nr_running); - } + worker->sleeping = 0; } /** * wq_worker_sleeping - a worker is going to sleep * @task: task going to sleep * - * This function is called during schedule() when a busy worker is - * going to sleep. Worker on the same cpu can be woken up by - * returning pointer to its task. - * - * CONTEXT: - * spin_lock_irq(rq->lock) - * - * Return: - * Worker task on @cpu to wake up, %NULL if none. + * This function is called from schedule() when a busy worker is + * going to sleep. */ -struct task_struct *wq_worker_sleeping(struct task_struct *task) +void wq_worker_sleeping(struct task_struct *task) { - struct worker *worker = kthread_data(task), *to_wakeup = NULL; + struct worker *next, *worker = kthread_data(task); struct worker_pool *pool; /* @@ -886,13 +875,15 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task) * checking NOT_RUNNING. */ if (worker->flags & WORKER_NOT_RUNNING) - return NULL; + return; pool = worker->pool; - /* this can only happen on the local cpu */ - if (WARN_ON_ONCE(pool->cpu != raw_smp_processor_id())) - return NULL; + if (WARN_ON_ONCE(worker->sleeping)) + return; + + worker->sleeping = 1; + spin_lock_irq(&pool->lock); /* * The counterpart of the following dec_and_test, implied mb, @@ -906,9 +897,12 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task) * lock is safe. */ if (atomic_dec_and_test(&pool->nr_running) && - !list_empty(&pool->worklist)) - to_wakeup = first_idle_worker(pool); - return to_wakeup ? to_wakeup->task : NULL; + !list_empty(&pool->worklist)) { + next = first_idle_worker(pool); + if (next) + wake_up_process(next->task); + } + spin_unlock_irq(&pool->lock); } /** @@ -4929,7 +4923,7 @@ static void rebind_workers(struct worker_pool *pool) * * WRITE_ONCE() is necessary because @worker->flags may be * tested without holding any lock in - * wq_worker_waking_up(). Without it, NOT_RUNNING test may + * wq_worker_running(). Without it, NOT_RUNNING test may * fail incorrectly leading to premature concurrency * management operations. */ diff --git a/kernel/workqueue_internal.h b/kernel/workqueue_internal.h index cb68b03ca89a..498de0e909a4 100644 --- a/kernel/workqueue_internal.h +++ b/kernel/workqueue_internal.h @@ -44,6 +44,7 @@ struct worker { unsigned long last_active; /* L: last active timestamp */ unsigned int flags; /* X: flags */ int id; /* I: worker id */ + int sleeping; /* None */ /* * Opaque string set with work_set_desc(). Printed out with task @@ -72,8 +73,8 @@ static inline struct worker *current_wq_worker(void) * Scheduler hooks for concurrency managed workqueue. Only to be used from * sched/ and workqueue.c. */ -void wq_worker_waking_up(struct task_struct *task, int cpu); -struct task_struct *wq_worker_sleeping(struct task_struct *task); +void wq_worker_running(struct task_struct *task); +void wq_worker_sleeping(struct task_struct *task); work_func_t wq_worker_last_func(struct task_struct *task); #endif /* _KERNEL_WORKQUEUE_INTERNAL_H */ From 1b174a2cb67a3a156d5a28426ae14241e6dfa655 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 9 Apr 2019 09:53:13 +0200 Subject: [PATCH 10/25] sched/core: Remove ttwu_activate() After the removal of try_to_wake_up_local(), there is only one user of ttwu_activate() left, and since it is a trivial function, remove it. Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Signed-off-by: Ingo Molnar --- kernel/sched/core.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 6184a0856aab..3feb83df322e 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1681,12 +1681,6 @@ ttwu_stat(struct task_struct *p, int cpu, int wake_flags) __schedstat_inc(p->se.statistics.nr_wakeups_sync); } -static inline void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags) -{ - activate_task(rq, p, en_flags); - p->on_rq = TASK_ON_RQ_QUEUED; -} - /* * Mark the task runnable and perform wakeup-preemption. */ @@ -1738,7 +1732,8 @@ ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags, en_flags |= ENQUEUE_MIGRATED; #endif - ttwu_activate(rq, p, en_flags); + activate_task(rq, p, en_flags); + p->on_rq = TASK_ON_RQ_QUEUED; ttwu_do_wakeup(rq, p, wake_flags, rf); } From 7dd7788411646c9619aa6495f832bc0a9b0146b5 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 9 Apr 2019 09:59:05 +0200 Subject: [PATCH 11/25] sched/core: Unify p->on_rq updates Almost all {,de}activate_task() invocations pair with p->on_rq updates, the exception being the usage in rt/deadline which hold both rq locks and therefore don't strictly need to set TASK_ON_RQ_MIGRATING, but it is harmless if we do anyway. Put the updates in {,de}activate_task() and cut down on repetition. Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Signed-off-by: Ingo Molnar --- kernel/sched/core.c | 9 ++++----- kernel/sched/fair.c | 2 -- 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 3feb83df322e..f4838b78b9f9 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -792,10 +792,14 @@ void activate_task(struct rq *rq, struct task_struct *p, int flags) rq->nr_uninterruptible--; enqueue_task(rq, p, flags); + + p->on_rq = TASK_ON_RQ_QUEUED; } void deactivate_task(struct rq *rq, struct task_struct *p, int flags) { + p->on_rq = (flags & DEQUEUE_SLEEP) ? 0 : TASK_ON_RQ_MIGRATING; + if (task_contributes_to_load(p)) rq->nr_uninterruptible++; @@ -1237,11 +1241,9 @@ static void __migrate_swap_task(struct task_struct *p, int cpu) rq_pin_lock(src_rq, &srf); rq_pin_lock(dst_rq, &drf); - p->on_rq = TASK_ON_RQ_MIGRATING; deactivate_task(src_rq, p, 0); set_task_cpu(p, cpu); activate_task(dst_rq, p, 0); - p->on_rq = TASK_ON_RQ_QUEUED; check_preempt_curr(dst_rq, p, 0); rq_unpin_lock(dst_rq, &drf); @@ -1733,7 +1735,6 @@ ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags, #endif activate_task(rq, p, en_flags); - p->on_rq = TASK_ON_RQ_QUEUED; ttwu_do_wakeup(rq, p, wake_flags, rf); } @@ -2408,7 +2409,6 @@ void wake_up_new_task(struct task_struct *p) post_init_entity_util_avg(p); activate_task(rq, p, ENQUEUE_NOCLOCK); - p->on_rq = TASK_ON_RQ_QUEUED; trace_sched_wakeup_new(p); check_preempt_curr(rq, p, WF_FORK); #ifdef CONFIG_SMP @@ -3407,7 +3407,6 @@ static void __sched notrace __schedule(bool preempt) prev->state = TASK_RUNNING; } else { deactivate_task(rq, prev, DEQUEUE_SLEEP | DEQUEUE_NOCLOCK); - prev->on_rq = 0; if (prev->in_iowait) { atomic_inc(&rq->nr_iowait); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index b6cc0703b850..e5b100b6ba4e 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -7491,7 +7491,6 @@ static void detach_task(struct task_struct *p, struct lb_env *env) { lockdep_assert_held(&env->src_rq->lock); - p->on_rq = TASK_ON_RQ_MIGRATING; deactivate_task(env->src_rq, p, DEQUEUE_NOCLOCK); set_task_cpu(p, env->dst_cpu); } @@ -7627,7 +7626,6 @@ static void attach_task(struct rq *rq, struct task_struct *p) BUG_ON(task_rq(p) != rq); activate_task(rq, p, ENQUEUE_NOCLOCK); - p->on_rq = TASK_ON_RQ_QUEUED; check_preempt_curr(rq, p, 0); } From b1546edcf2aab710a5afc98d65c948a4bfac0353 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Thu, 18 Apr 2019 22:47:13 +0800 Subject: [PATCH 12/25] sched/core: Make some functions static Fix these sparse warnings: kernel/sched/core.c:6577:11: warning: symbol 'min_cfs_quota_period' was not declared. Should it be static? kernel/sched/core.c:6657:5: warning: symbol 'tg_set_cfs_quota' was not declared. Should it be static? kernel/sched/core.c:6670:6: warning: symbol 'tg_get_cfs_quota' was not declared. Should it be static? kernel/sched/core.c:6683:5: warning: symbol 'tg_set_cfs_period' was not declared. Should it be static? kernel/sched/core.c:6693:6: warning: symbol 'tg_get_cfs_period' was not declared. Should it be static? kernel/sched/fair.c:2596:6: warning: symbol 'task_tick_numa' was not declared. Should it be static? Signed-off-by: YueHaibing Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20190418144713.34332-1-yuehaibing@huawei.com Signed-off-by: Ingo Molnar --- kernel/sched/core.c | 10 +++++----- kernel/sched/fair.c | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index f4838b78b9f9..8b64ef0d5589 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -6522,7 +6522,7 @@ static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css, static DEFINE_MUTEX(cfs_constraints_mutex); const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */ -const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */ +static const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */ static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime); @@ -6602,7 +6602,7 @@ out_unlock: return ret; } -int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us) +static int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us) { u64 quota, period; @@ -6615,7 +6615,7 @@ int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us) return tg_set_cfs_bandwidth(tg, period, quota); } -long tg_get_cfs_quota(struct task_group *tg) +static long tg_get_cfs_quota(struct task_group *tg) { u64 quota_us; @@ -6628,7 +6628,7 @@ long tg_get_cfs_quota(struct task_group *tg) return quota_us; } -int tg_set_cfs_period(struct task_group *tg, long cfs_period_us) +static int tg_set_cfs_period(struct task_group *tg, long cfs_period_us) { u64 quota, period; @@ -6638,7 +6638,7 @@ int tg_set_cfs_period(struct task_group *tg, long cfs_period_us) return tg_set_cfs_bandwidth(tg, period, quota); } -long tg_get_cfs_period(struct task_group *tg) +static long tg_get_cfs_period(struct task_group *tg) { u64 cfs_period_us; diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index e5b100b6ba4e..13bafe350abf 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -2593,7 +2593,7 @@ out: /* * Drive the periodic memory faults.. */ -void task_tick_numa(struct rq *rq, struct task_struct *curr) +static void task_tick_numa(struct rq *rq, struct task_struct *curr) { struct callback_head *work = &curr->numa_work; u64 period, now; From bee9853932e90ce94bce4276ec6b7b06bc48070b Mon Sep 17 00:00:00 2001 From: Joel Savitz Date: Wed, 6 Mar 2019 20:13:33 -0500 Subject: [PATCH 13/25] sched/core: Fix typo in comment Signed-off-by: Joel Savitz Cc: Andrew Morton Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: trivial@kernel.org Link: http://lkml.kernel.org/r/1551921213-813-1-git-send-email-jsavitz@redhat.com Signed-off-by: Ingo Molnar --- kernel/sched/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 8b64ef0d5589..fb09eaad1d3a 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -924,7 +924,7 @@ static inline bool is_per_cpu_kthread(struct task_struct *p) } /* - * Per-CPU kthreads are allowed to run on !actie && online CPUs, see + * Per-CPU kthreads are allowed to run on !active && online CPUs, see * __set_cpus_allowed_ptr() and select_fallback_rq(). */ static inline bool is_cpu_allowed(struct task_struct *p, int cpu) From 1a010e29cfa00fee2888fd2fd4983f848cbafb58 Mon Sep 17 00:00:00 2001 From: Konstantin Khlebnikov Date: Wed, 27 Feb 2019 11:10:17 +0300 Subject: [PATCH 14/25] sched/rt: Check integer overflow at usec to nsec conversion Example of unhandled overflows: # echo 18446744073709651 > cpu.rt_runtime_us # cat cpu.rt_runtime_us 99 # echo 18446744073709900 > cpu.rt_period_us # cat cpu.rt_period_us 348 After this patch they will fail with -EINVAL. Signed-off-by: Konstantin Khlebnikov Acked-by: Peter Zijlstra Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/155125501739.293431.5252197504404771496.stgit@buzz Signed-off-by: Ingo Molnar --- kernel/sched/rt.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 90fa23d36565..1e6b909dca36 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -2555,6 +2555,8 @@ int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us) rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC; if (rt_runtime_us < 0) rt_runtime = RUNTIME_INF; + else if ((u64)rt_runtime_us > U64_MAX / NSEC_PER_USEC) + return -EINVAL; return tg_set_rt_bandwidth(tg, rt_period, rt_runtime); } @@ -2575,6 +2577,9 @@ int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us) { u64 rt_runtime, rt_period; + if (rt_period_us > U64_MAX / NSEC_PER_USEC) + return -EINVAL; + rt_period = rt_period_us * NSEC_PER_USEC; rt_runtime = tg->rt_bandwidth.rt_runtime; From 5b61d50ab4ef590f5e1d4df15cd2cea5f5715308 Mon Sep 17 00:00:00 2001 From: Konstantin Khlebnikov Date: Wed, 27 Feb 2019 11:10:18 +0300 Subject: [PATCH 15/25] sched/core: Handle overflow in cpu_shares_write_u64 Bit shift in scale_load() could overflow shares. This patch saturates it to MAX_SHARES like following sched_group_set_shares(). Example: # echo 9223372036854776832 > cpu.shares # cat cpu.shares Before patch: 1024 After pattch: 262144 Signed-off-by: Konstantin Khlebnikov Acked-by: Peter Zijlstra Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/155125501891.293431.3345233332801109696.stgit@buzz Signed-off-by: Ingo Molnar --- kernel/sched/core.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index fb09eaad1d3a..685b1541ce51 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -6507,6 +6507,8 @@ static void cpu_cgroup_attach(struct cgroup_taskset *tset) static int cpu_shares_write_u64(struct cgroup_subsys_state *css, struct cftype *cftype, u64 shareval) { + if (shareval > scale_load_down(ULONG_MAX)) + shareval = MAX_SHARES; return sched_group_set_shares(css_tg(css), scale_load(shareval)); } From 1a8b4540db732ca16c9e43ac7c08b1b8f0b252d8 Mon Sep 17 00:00:00 2001 From: Konstantin Khlebnikov Date: Wed, 27 Feb 2019 11:10:20 +0300 Subject: [PATCH 16/25] sched/core: Check quota and period overflow at usec to nsec conversion Large values could overflow u64 and pass following sanity checks. # echo 18446744073750000 > cpu.cfs_period_us # cat cpu.cfs_period_us 40448 # echo 18446744073750000 > cpu.cfs_quota_us # cat cpu.cfs_quota_us 40448 After this patch they will fail with -EINVAL. Signed-off-by: Konstantin Khlebnikov Acked-by: Peter Zijlstra Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/155125502079.293431.3947497929372138600.stgit@buzz Signed-off-by: Ingo Molnar --- kernel/sched/core.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 685b1541ce51..de8ab411826c 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -6611,8 +6611,10 @@ static int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us) period = ktime_to_ns(tg->cfs_bandwidth.period); if (cfs_quota_us < 0) quota = RUNTIME_INF; - else + else if ((u64)cfs_quota_us <= U64_MAX / NSEC_PER_USEC) quota = (u64)cfs_quota_us * NSEC_PER_USEC; + else + return -EINVAL; return tg_set_cfs_bandwidth(tg, period, quota); } @@ -6634,6 +6636,9 @@ static int tg_set_cfs_period(struct task_group *tg, long cfs_period_us) { u64 quota, period; + if ((u64)cfs_period_us > U64_MAX / NSEC_PER_USEC) + return -EINVAL; + period = (u64)cfs_period_us * NSEC_PER_USEC; quota = tg->cfs_bandwidth.quota; From b6fbbf31d15b5072250ec6ed79e415a1160e5621 Mon Sep 17 00:00:00 2001 From: Juri Lelli Date: Wed, 19 Dec 2018 14:34:44 +0100 Subject: [PATCH 17/25] cgroup/cpuset: Update stale generate_sched_domains() comments Commit: fc560a26acce ("cpuset: replace cpuset->stack_list with cpuset_for_each_descendant_pre()") removed the local list (q) that was used to perform a top-down scan of all cpusets; however, comments mentioning it were not updated. Update comments to reflect current implementation. Signed-off-by: Juri Lelli Acked-by: Tejun Heo Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: cgroups@vger.kernel.org Cc: lizefan@huawei.com Link: http://lkml.kernel.org/r/20181219133445.31982-1-juri.lelli@redhat.com Signed-off-by: Ingo Molnar --- kernel/cgroup/cpuset.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index 4834c4214e9c..6a1942ed781c 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -740,11 +740,10 @@ static inline int nr_cpusets(void) * Must be called with cpuset_mutex held. * * The three key local variables below are: - * q - a linked-list queue of cpuset pointers, used to implement a - * top-down scan of all cpusets. This scan loads a pointer - * to each cpuset marked is_sched_load_balance into the - * array 'csa'. For our purposes, rebuilding the schedulers - * sched domains, we can ignore !is_sched_load_balance cpusets. + * cp - cpuset pointer, used (together with pos_css) to perform a + * top-down scan of all cpusets. For our purposes, rebuilding + * the schedulers sched domains, we can ignore !is_sched_load_ + * balance cpusets. * csa - (for CpuSet Array) Array of pointers to all the cpusets * that need to be load balanced, for convenient iterative * access by the subsequent code that finds the best partition, @@ -775,7 +774,7 @@ static inline int nr_cpusets(void) static int generate_sched_domains(cpumask_var_t **domains, struct sched_domain_attr **attributes) { - struct cpuset *cp; /* scans q */ + struct cpuset *cp; /* top-down scan of cpusets */ struct cpuset **csa; /* array of all cpuset ptrs */ int csn; /* how many cpuset ptrs in csa so far */ int i, j, k; /* indices for partition finding loops */ From cb0c04143b6196f4a479ba113706329fc667ee15 Mon Sep 17 00:00:00 2001 From: Juri Lelli Date: Wed, 19 Dec 2018 14:34:45 +0100 Subject: [PATCH 18/25] sched/topology: Update init_sched_domains() comment Holding hotplug lock is not a requirement anymore for callers of sched_ init_domains after commit: 6acce3ef8452 ("sched: Remove get_online_cpus() usage") Update the relative comment preceding init_sched_domains(). Signed-off-by: Juri Lelli Acked-by: Tejun Heo Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: cgroups@vger.kernel.org Cc: lizefan@huawei.com Link: http://lkml.kernel.org/r/20181219133445.31982-2-juri.lelli@redhat.com Signed-off-by: Ingo Molnar --- kernel/sched/topology.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index c65b31e9458b..f53f89df837d 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -2081,9 +2081,8 @@ void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms) } /* - * Set up scheduler domains and groups. Callers must hold the hotplug lock. - * For now this just excludes isolated CPUs, but could be used to - * exclude other special cases in the future. + * Set up scheduler domains and groups. For now this just excludes isolated + * CPUs, but could be used to exclude other special cases in the future. */ int sched_init_domains(const struct cpumask *cpu_map) { From ad2e379def135ebc079f89a0e0b1d987d243f949 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 28 Nov 2018 15:23:50 +0000 Subject: [PATCH 19/25] sched/debug: Fix spelling mistake "logaritmic" -> "logarithmic" Signed-off-by: Colin Ian King Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: kernel-janitors@vger.kernel.org Link: http://lkml.kernel.org/r/20181128152350.13622-1-colin.king@canonical.com Signed-off-by: Ingo Molnar --- kernel/sched/debug.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index 8039d62ae36e..678bfb9bd87f 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -702,7 +702,7 @@ do { \ static const char *sched_tunable_scaling_names[] = { "none", - "logaritmic", + "logarithmic", "linear" }; From 9b019acb72e4b5741d88e8936d6f200ed44b66b2 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Fri, 12 Apr 2019 14:26:13 +1000 Subject: [PATCH 20/25] sched/nohz: Run NOHZ idle load balancer on HK_FLAG_MISC CPUs The NOHZ idle balancer runs on the lowest idle CPU. This can interfere with isolated CPUs, so confine it to HK_FLAG_MISC housekeeping CPUs. HK_FLAG_SCHED is not used for this because it is not set anywhere at the moment. This could be folded into HK_FLAG_SCHED once that option is fixed. The problem was observed with increased jitter on an application running on CPU0, caused by NOHZ idle load balancing being run on CPU1 (an SMT sibling). Signed-off-by: Nicholas Piggin Signed-off-by: Peter Zijlstra (Intel) Cc: Frederic Weisbecker Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: https://lkml.kernel.org/r/20190412042613.28930-1-npiggin@gmail.com Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 13bafe350abf..7b0da7007da3 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -9519,22 +9519,26 @@ static inline int on_null_domain(struct rq *rq) * - When one of the busy CPUs notice that there may be an idle rebalancing * needed, they will kick the idle load balancer, which then does idle * load balancing for all the idle CPUs. + * - HK_FLAG_MISC CPUs are used for this task, because HK_FLAG_SCHED not set + * anywhere yet. */ static inline int find_new_ilb(void) { - int ilb = cpumask_first(nohz.idle_cpus_mask); + int ilb; - if (ilb < nr_cpu_ids && idle_cpu(ilb)) - return ilb; + for_each_cpu_and(ilb, nohz.idle_cpus_mask, + housekeeping_cpumask(HK_FLAG_MISC)) { + if (idle_cpu(ilb)) + return ilb; + } return nr_cpu_ids; } /* - * Kick a CPU to do the nohz balancing, if it is time for it. We pick the - * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle - * CPU (if there is one). + * Kick a CPU to do the nohz balancing, if it is time for it. We pick any + * idle CPU in the HK_FLAG_MISC housekeeping set (if there is one). */ static void kick_ilb(unsigned int flags) { From 77a5352ba977d2554643e3797e10823d0d03dcf7 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Thu, 11 Apr 2019 13:34:44 +1000 Subject: [PATCH 21/25] sched/core: Allow the remote scheduler tick to be started on CPU0 This has no effect yet because CPU0 will always be a housekeeping CPU until a later change. Signed-off-by: Nicholas Piggin Signed-off-by: Peter Zijlstra (Intel) Cc: Frederic Weisbecker Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rafael J . Wysocki Cc: Thomas Gleixner Cc: linuxppc-dev@lists.ozlabs.org Link: https://lkml.kernel.org/r/20190411033448.20842-2-npiggin@gmail.com Signed-off-by: Ingo Molnar --- kernel/sched/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index de8ab411826c..cef22c5499a8 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5866,7 +5866,7 @@ void __init sched_init_smp(void) static int __init migration_init(void) { - sched_rq_cpu_starting(smp_processor_id()); + sched_cpu_starting(smp_processor_id()); return 0; } early_initcall(migration_init); From 2f1a6fbbef7781382850c3104ecb658f21b5d460 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Thu, 11 Apr 2019 13:34:45 +1000 Subject: [PATCH 22/25] power/suspend: Add function to disable secondaries for suspend This adds a function to disable secondary CPUs for suspend that are not necessarily non-zero / non-boot CPUs. Platforms will be able to use this to suspend using non-zero CPUs. Signed-off-by: Nicholas Piggin Signed-off-by: Peter Zijlstra (Intel) Cc: Frederic Weisbecker Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rafael J . Wysocki Cc: Thomas Gleixner Cc: linuxppc-dev@lists.ozlabs.org Link: https://lkml.kernel.org/r/20190411033448.20842-3-npiggin@gmail.com Signed-off-by: Ingo Molnar --- include/linux/cpu.h | 12 ++++++++++++ kernel/kexec_core.c | 4 ++-- kernel/power/hibernate.c | 12 ++++++------ kernel/power/suspend.c | 4 ++-- 4 files changed, 22 insertions(+), 10 deletions(-) diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 5041357d0297..7ab2f09c0a14 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -137,9 +137,21 @@ static inline int disable_nonboot_cpus(void) return freeze_secondary_cpus(0); } extern void enable_nonboot_cpus(void); + +static inline int suspend_disable_secondary_cpus(void) +{ + return freeze_secondary_cpus(0); +} +static inline void suspend_enable_secondary_cpus(void) +{ + return enable_nonboot_cpus(); +} + #else /* !CONFIG_PM_SLEEP_SMP */ static inline int disable_nonboot_cpus(void) { return 0; } static inline void enable_nonboot_cpus(void) {} +static inline int suspend_disable_secondary_cpus(void) { return 0; } +static inline void suspend_enable_secondary_cpus(void) { } #endif /* !CONFIG_PM_SLEEP_SMP */ void cpu_startup_entry(enum cpuhp_state state); diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c index d7140447be75..fd5c95ff9251 100644 --- a/kernel/kexec_core.c +++ b/kernel/kexec_core.c @@ -1150,7 +1150,7 @@ int kernel_kexec(void) error = dpm_suspend_end(PMSG_FREEZE); if (error) goto Resume_devices; - error = disable_nonboot_cpus(); + error = suspend_disable_secondary_cpus(); if (error) goto Enable_cpus; local_irq_disable(); @@ -1183,7 +1183,7 @@ int kernel_kexec(void) Enable_irqs: local_irq_enable(); Enable_cpus: - enable_nonboot_cpus(); + suspend_enable_secondary_cpus(); dpm_resume_start(PMSG_RESTORE); Resume_devices: dpm_resume_end(PMSG_RESTORE); diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index abef759de7c8..cfc7a57049e4 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -281,7 +281,7 @@ static int create_image(int platform_mode) if (error || hibernation_test(TEST_PLATFORM)) goto Platform_finish; - error = disable_nonboot_cpus(); + error = suspend_disable_secondary_cpus(); if (error || hibernation_test(TEST_CPUS)) goto Enable_cpus; @@ -323,7 +323,7 @@ static int create_image(int platform_mode) local_irq_enable(); Enable_cpus: - enable_nonboot_cpus(); + suspend_enable_secondary_cpus(); Platform_finish: platform_finish(platform_mode); @@ -417,7 +417,7 @@ int hibernation_snapshot(int platform_mode) int __weak hibernate_resume_nonboot_cpu_disable(void) { - return disable_nonboot_cpus(); + return suspend_disable_secondary_cpus(); } /** @@ -486,7 +486,7 @@ static int resume_target_kernel(bool platform_mode) local_irq_enable(); Enable_cpus: - enable_nonboot_cpus(); + suspend_enable_secondary_cpus(); Cleanup: platform_restore_cleanup(platform_mode); @@ -564,7 +564,7 @@ int hibernation_platform_enter(void) if (error) goto Platform_finish; - error = disable_nonboot_cpus(); + error = suspend_disable_secondary_cpus(); if (error) goto Enable_cpus; @@ -586,7 +586,7 @@ int hibernation_platform_enter(void) local_irq_enable(); Enable_cpus: - enable_nonboot_cpus(); + suspend_enable_secondary_cpus(); Platform_finish: hibernation_ops->finish(); diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index 0bd595a0b610..59b6def23046 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c @@ -428,7 +428,7 @@ static int suspend_enter(suspend_state_t state, bool *wakeup) if (suspend_test(TEST_PLATFORM)) goto Platform_wake; - error = disable_nonboot_cpus(); + error = suspend_disable_secondary_cpus(); if (error || suspend_test(TEST_CPUS)) goto Enable_cpus; @@ -458,7 +458,7 @@ static int suspend_enter(suspend_state_t state, bool *wakeup) BUG_ON(irqs_disabled()); Enable_cpus: - enable_nonboot_cpus(); + suspend_enable_secondary_cpus(); Platform_wake: platform_resume_noirq(state); From 9ca12ac04bb7d7cfb28aa549dcd3d15761f15543 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Thu, 11 Apr 2019 13:34:46 +1000 Subject: [PATCH 23/25] kernel/cpu: Allow non-zero CPU to be primary for suspend / kexec freeze This patch provides an arch option, ARCH_SUSPEND_NONZERO_CPU, to opt-in to allowing suspend to occur on one of the housekeeping CPUs rather than hardcoded CPU0. This will allow CPU0 to be a nohz_full CPU with a later change. It may be possible for platforms with hardware/firmware restrictions on suspend/wake effectively support this by handing off the final stage to CPU0 when kernel housekeeping is no longer required. Another option is to make housekeeping / nohz_full mask dynamic at runtime, but the complexity could not be justified at this time. Signed-off-by: Nicholas Piggin Signed-off-by: Peter Zijlstra (Intel) Cc: Frederic Weisbecker Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rafael J . Wysocki Cc: Thomas Gleixner Cc: linuxppc-dev@lists.ozlabs.org Link: https://lkml.kernel.org/r/20190411033448.20842-4-npiggin@gmail.com Signed-off-by: Ingo Molnar --- arch/powerpc/Kconfig | 4 ++++ include/linux/cpu.h | 7 ++++++- kernel/cpu.c | 10 +++++++++- kernel/power/Kconfig | 9 +++++++++ 4 files changed, 28 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 2d0be82c3061..bc98b0e37a10 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -318,6 +318,10 @@ config ARCH_SUSPEND_POSSIBLE (PPC_85xx && !PPC_E500MC) || PPC_86xx || PPC_PSERIES \ || 44x || 40x +config ARCH_SUSPEND_NONZERO_CPU + def_bool y + depends on PPC_POWERNV || PPC_PSERIES + config PPC_DCR_NATIVE bool diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 7ab2f09c0a14..73baab8535c1 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -140,7 +140,12 @@ extern void enable_nonboot_cpus(void); static inline int suspend_disable_secondary_cpus(void) { - return freeze_secondary_cpus(0); + int cpu = 0; + + if (IS_ENABLED(CONFIG_PM_SLEEP_SMP_NONZERO_CPU)) + cpu = -1; + + return freeze_secondary_cpus(cpu); } static inline void suspend_enable_secondary_cpus(void) { diff --git a/kernel/cpu.c b/kernel/cpu.c index 6754f3ecfd94..d1bf6e2b4752 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -1199,8 +1200,15 @@ int freeze_secondary_cpus(int primary) int cpu, error = 0; cpu_maps_update_begin(); - if (!cpu_online(primary)) + if (primary == -1) { primary = cpumask_first(cpu_online_mask); + if (!housekeeping_cpu(primary, HK_FLAG_TIMER)) + primary = housekeeping_any_cpu(HK_FLAG_TIMER); + } else { + if (!cpu_online(primary)) + primary = cpumask_first(cpu_online_mask); + } + /* * We take down all of the non-boot CPUs in one shot to avoid races * with the userspace trying to use the CPU hotplug at the same time diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index f8fe57d1022e..9bbaaab14b36 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig @@ -114,6 +114,15 @@ config PM_SLEEP_SMP depends on PM_SLEEP select HOTPLUG_CPU +config PM_SLEEP_SMP_NONZERO_CPU + def_bool y + depends on PM_SLEEP_SMP + depends on ARCH_SUSPEND_NONZERO_CPU + ---help--- + If an arch can suspend (for suspend, hibernate, kexec, etc) on a + non-zero numbered CPU, it may define ARCH_SUSPEND_NONZERO_CPU. This + will allow nohz_full mask to include CPU0. + config PM_AUTOSLEEP bool "Opportunistic sleep" depends on PM_SLEEP From 9219565aa89033a9cfdae788c1940473a1253d6c Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Thu, 11 Apr 2019 13:34:47 +1000 Subject: [PATCH 24/25] sched/isolation: Require a present CPU in housekeeping mask During housekeeping mask setup, currently a possible CPU is required. That does not guarantee the CPU would be available at boot time, so check to ensure that at least one present CPU is in the mask. Signed-off-by: Nicholas Piggin Signed-off-by: Peter Zijlstra (Intel) Cc: Frederic Weisbecker Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rafael J . Wysocki Cc: Thomas Gleixner Cc: linuxppc-dev@lists.ozlabs.org Link: https://lkml.kernel.org/r/20190411033448.20842-5-npiggin@gmail.com Signed-off-by: Ingo Molnar --- kernel/sched/isolation.c | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/kernel/sched/isolation.c b/kernel/sched/isolation.c index b02d148e7672..687302051a27 100644 --- a/kernel/sched/isolation.c +++ b/kernel/sched/isolation.c @@ -65,6 +65,7 @@ void __init housekeeping_init(void) static int __init housekeeping_setup(char *str, enum hk_flags flags) { cpumask_var_t non_housekeeping_mask; + cpumask_var_t tmp; int err; alloc_bootmem_cpumask_var(&non_housekeeping_mask); @@ -75,16 +76,23 @@ static int __init housekeeping_setup(char *str, enum hk_flags flags) return 0; } + alloc_bootmem_cpumask_var(&tmp); if (!housekeeping_flags) { alloc_bootmem_cpumask_var(&housekeeping_mask); cpumask_andnot(housekeeping_mask, cpu_possible_mask, non_housekeeping_mask); - if (cpumask_empty(housekeeping_mask)) - __cpumask_set_cpu(smp_processor_id(), housekeeping_mask); - } else { - cpumask_var_t tmp; - alloc_bootmem_cpumask_var(&tmp); + cpumask_andnot(tmp, cpu_present_mask, non_housekeeping_mask); + if (cpumask_empty(tmp)) { + pr_warn("Housekeeping: must include one present CPU, " + "using boot CPU:%d\n", smp_processor_id()); + __cpumask_set_cpu(smp_processor_id(), housekeeping_mask); + __cpumask_clear_cpu(smp_processor_id(), non_housekeeping_mask); + } + } else { + cpumask_andnot(tmp, cpu_present_mask, non_housekeeping_mask); + if (cpumask_empty(tmp)) + __cpumask_clear_cpu(smp_processor_id(), non_housekeeping_mask); cpumask_andnot(tmp, cpu_possible_mask, non_housekeeping_mask); if (!cpumask_equal(tmp, housekeeping_mask)) { pr_warn("Housekeeping: nohz_full= must match isolcpus=\n"); @@ -92,8 +100,8 @@ static int __init housekeeping_setup(char *str, enum hk_flags flags) free_bootmem_cpumask_var(non_housekeeping_mask); return 0; } - free_bootmem_cpumask_var(tmp); } + free_bootmem_cpumask_var(tmp); if ((flags & HK_FLAG_TICK) && !(housekeeping_flags & HK_FLAG_TICK)) { if (IS_ENABLED(CONFIG_NO_HZ_FULL)) { From 08ae95f4fd3b38b257f5dc7e6507e071c27ba0d5 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Thu, 11 Apr 2019 13:34:48 +1000 Subject: [PATCH 25/25] nohz_full: Allow the boot CPU to be nohz_full Allow the boot CPU/CPU0 to be nohz_full. Have the boot CPU take the do_timer duty during boot until a housekeeping CPU can take over. This is supported when CONFIG_PM_SLEEP_SMP is not configured, or when it is configured and the arch allows suspend on non-zero CPUs. nohz_full has been trialed at a large supercomputer site and found to significantly reduce jitter. In order to deploy it in production, they need CPU0 to be nohz_full because their job control system requires the application CPUs to start from 0, and the housekeeping CPUs are placed higher. An equivalent job scheduling that uses CPU0 for housekeeping could be achieved by modifying their system, but it is preferable if nohz_full can support their environment without modification. Signed-off-by: Nicholas Piggin Signed-off-by: Peter Zijlstra (Intel) Cc: Frederic Weisbecker Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rafael J . Wysocki Cc: Thomas Gleixner Cc: linuxppc-dev@lists.ozlabs.org Link: https://lkml.kernel.org/r/20190411033448.20842-6-npiggin@gmail.com Signed-off-by: Ingo Molnar --- kernel/time/tick-common.c | 50 +++++++++++++++++++++++++++++++++++---- kernel/time/tick-sched.c | 34 ++++++++++++++++++-------- 2 files changed, 70 insertions(+), 14 deletions(-) diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index df401463a191..e49e8091f9ac 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c @@ -46,6 +46,14 @@ ktime_t tick_period; * procedure also covers cpu hotplug. */ int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT; +#ifdef CONFIG_NO_HZ_FULL +/* + * tick_do_timer_boot_cpu indicates the boot CPU temporarily owns + * tick_do_timer_cpu and it should be taken over by an eligible secondary + * when one comes online. + */ +static int tick_do_timer_boot_cpu __read_mostly = -1; +#endif /* * Debugging: see timer_list.c @@ -167,6 +175,26 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast) } } +#ifdef CONFIG_NO_HZ_FULL +static void giveup_do_timer(void *info) +{ + int cpu = *(unsigned int *)info; + + WARN_ON(tick_do_timer_cpu != smp_processor_id()); + + tick_do_timer_cpu = cpu; +} + +static void tick_take_do_timer_from_boot(void) +{ + int cpu = smp_processor_id(); + int from = tick_do_timer_boot_cpu; + + if (from >= 0 && from != cpu) + smp_call_function_single(from, giveup_do_timer, &cpu, 1); +} +#endif + /* * Setup the tick device */ @@ -186,12 +214,26 @@ static void tick_setup_device(struct tick_device *td, * this cpu: */ if (tick_do_timer_cpu == TICK_DO_TIMER_BOOT) { - if (!tick_nohz_full_cpu(cpu)) - tick_do_timer_cpu = cpu; - else - tick_do_timer_cpu = TICK_DO_TIMER_NONE; + tick_do_timer_cpu = cpu; + tick_next_period = ktime_get(); tick_period = NSEC_PER_SEC / HZ; +#ifdef CONFIG_NO_HZ_FULL + /* + * The boot CPU may be nohz_full, in which case set + * tick_do_timer_boot_cpu so the first housekeeping + * secondary that comes up will take do_timer from + * us. + */ + if (tick_nohz_full_cpu(cpu)) + tick_do_timer_boot_cpu = cpu; + + } else if (tick_do_timer_boot_cpu != -1 && + !tick_nohz_full_cpu(cpu)) { + tick_take_do_timer_from_boot(); + tick_do_timer_boot_cpu = -1; + WARN_ON(tick_do_timer_cpu != cpu); +#endif } /* diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 6fa52cd6df0b..4aa917acbe1c 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -121,10 +121,16 @@ static void tick_sched_do_timer(struct tick_sched *ts, ktime_t now) * into a long sleep. If two CPUs happen to assign themselves to * this duty, then the jiffies update is still serialized by * jiffies_lock. + * + * If nohz_full is enabled, this should not happen because the + * tick_do_timer_cpu never relinquishes. */ - if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE) - && !tick_nohz_full_cpu(cpu)) + if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) { +#ifdef CONFIG_NO_HZ_FULL + WARN_ON(tick_nohz_full_running); +#endif tick_do_timer_cpu = cpu; + } #endif /* Check, if the jiffies need an update */ @@ -395,8 +401,8 @@ void __init tick_nohz_full_setup(cpumask_var_t cpumask) static int tick_nohz_cpu_down(unsigned int cpu) { /* - * The boot CPU handles housekeeping duty (unbound timers, - * workqueues, timekeeping, ...) on behalf of full dynticks + * The tick_do_timer_cpu CPU handles housekeeping duty (unbound + * timers, workqueues, timekeeping, ...) on behalf of full dynticks * CPUs. It must remain online when nohz full is enabled. */ if (tick_nohz_full_running && tick_do_timer_cpu == cpu) @@ -423,12 +429,15 @@ void __init tick_nohz_init(void) return; } - cpu = smp_processor_id(); + if (IS_ENABLED(CONFIG_PM_SLEEP_SMP) && + !IS_ENABLED(CONFIG_PM_SLEEP_SMP_NONZERO_CPU)) { + cpu = smp_processor_id(); - if (cpumask_test_cpu(cpu, tick_nohz_full_mask)) { - pr_warn("NO_HZ: Clearing %d from nohz_full range for timekeeping\n", - cpu); - cpumask_clear_cpu(cpu, tick_nohz_full_mask); + if (cpumask_test_cpu(cpu, tick_nohz_full_mask)) { + pr_warn("NO_HZ: Clearing %d from nohz_full range " + "for timekeeping\n", cpu); + cpumask_clear_cpu(cpu, tick_nohz_full_mask); + } } for_each_cpu(cpu, tick_nohz_full_mask) @@ -904,8 +913,13 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts) /* * Boot safety: make sure the timekeeping duty has been * assigned before entering dyntick-idle mode, + * tick_do_timer_cpu is TICK_DO_TIMER_BOOT */ - if (tick_do_timer_cpu == TICK_DO_TIMER_NONE) + if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_BOOT)) + return false; + + /* Should not happen for nohz-full */ + if (WARN_ON_ONCE(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) return false; }