mm: handle lru_add_drain_all for UP properly
Since for_each_cpu(cpu, mask) added by commit2d3854a37e
("cpumask: introduce new API, without changing anything") did not evaluate the mask argument if NR_CPUS == 1 due to CONFIG_SMP=n, lru_add_drain_all() is hitting WARN_ON() at __flush_work() added by commit4d43d395fe
("workqueue: Try to catch flush_work() without INIT_WORK().") by unconditionally calling flush_work() [1]. Workaround this issue by using CONFIG_SMP=n specific lru_add_drain_all implementation. There is no real need to defer the implementation to the workqueue as the draining is going to happen on the local cpu. So alias lru_add_drain_all to lru_add_drain which does all the necessary work. [akpm@linux-foundation.org: fix various build warnings] [1] https://lkml.kernel.org/r/18a30387-6aa5-6123-e67c-57579ecc3f38@roeck-us.net Link: http://lkml.kernel.org/r/20190213124334.GH4525@dhcp22.suse.cz Signed-off-by: Michal Hocko <mhocko@suse.com> Reported-by: Guenter Roeck <linux@roeck-us.net> Debugged-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp> Cc: Tejun Heo <tj@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
94b3334cbe
Коммит
6ea183d60c
17
mm/swap.c
17
mm/swap.c
|
@ -320,11 +320,6 @@ static inline void activate_page_drain(int cpu)
|
|||
{
|
||||
}
|
||||
|
||||
static bool need_activate_page_drain(int cpu)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
void activate_page(struct page *page)
|
||||
{
|
||||
struct zone *zone = page_zone(page);
|
||||
|
@ -653,13 +648,15 @@ void lru_add_drain(void)
|
|||
put_cpu();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
|
||||
|
||||
static void lru_add_drain_per_cpu(struct work_struct *dummy)
|
||||
{
|
||||
lru_add_drain();
|
||||
}
|
||||
|
||||
static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
|
||||
|
||||
/*
|
||||
* Doesn't need any cpu hotplug locking because we do rely on per-cpu
|
||||
* kworkers being shut down before our page_alloc_cpu_dead callback is
|
||||
|
@ -702,6 +699,12 @@ void lru_add_drain_all(void)
|
|||
|
||||
mutex_unlock(&lock);
|
||||
}
|
||||
#else
|
||||
void lru_add_drain_all(void)
|
||||
{
|
||||
lru_add_drain();
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* release_pages - batched put_page()
|
||||
|
|
Загрузка…
Ссылка в новой задаче