diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 7230bb59a06f..2e9b9ebbeb78 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -177,6 +177,7 @@ extern void put_online_cpus(void); #define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri) #define register_hotcpu_notifier(nb) register_cpu_notifier(nb) #define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb) +void clear_tasks_mm_cpumask(int cpu); int cpu_down(unsigned int cpu); #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE diff --git a/kernel/cpu.c b/kernel/cpu.c index 0e6353cf147a..0575197deb4a 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -10,6 +10,8 @@ #include #include #include +#include +#include #include #include #include @@ -173,6 +175,30 @@ void __ref unregister_cpu_notifier(struct notifier_block *nb) } EXPORT_SYMBOL(unregister_cpu_notifier); +void clear_tasks_mm_cpumask(int cpu) +{ + struct task_struct *p; + + /* + * This function is called after the cpu is taken down and marked + * offline, so its not like new tasks will ever get this cpu set in + * their mm mask. -- Peter Zijlstra + * Thus, we may use rcu_read_lock() here, instead of grabbing + * full-fledged tasklist_lock. + */ + rcu_read_lock(); + for_each_process(p) { + struct task_struct *t; + + t = find_lock_task_mm(p); + if (!t) + continue; + cpumask_clear_cpu(cpu, mm_cpumask(t->mm)); + task_unlock(t); + } + rcu_read_unlock(); +} + static inline void check_for_tasks(int cpu) { struct task_struct *p;