mm, oom: rename zonelist locking functions
try_set_zonelist_oom() and clear_zonelist_oom() are not named properly to imply that they require locking semantics to avoid out_of_memory() being reordered. zone_scan_lock is required for both functions to ensure that there is proper locking synchronization. Rename try_set_zonelist_oom() to oom_zonelist_trylock() and rename clear_zonelist_oom() to oom_zonelist_unlock() to imply there is proper locking semantics. At the same time, convert oom_zonelist_trylock() to return bool instead of int since only success and failure are tested. Signed-off-by: David Rientjes <rientjes@google.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Rik van Riel <riel@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
8d060bf490
Коммит
e972a070e2
|
@ -55,8 +55,8 @@ extern void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
|
||||||
struct mem_cgroup *memcg, nodemask_t *nodemask,
|
struct mem_cgroup *memcg, nodemask_t *nodemask,
|
||||||
const char *message);
|
const char *message);
|
||||||
|
|
||||||
extern int try_set_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags);
|
extern bool oom_zonelist_trylock(struct zonelist *zonelist, gfp_t gfp_flags);
|
||||||
extern void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags);
|
extern void oom_zonelist_unlock(struct zonelist *zonelist, gfp_t gfp_flags);
|
||||||
|
|
||||||
extern void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask,
|
extern void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask,
|
||||||
int order, const nodemask_t *nodemask);
|
int order, const nodemask_t *nodemask);
|
||||||
|
|
|
@ -559,28 +559,25 @@ EXPORT_SYMBOL_GPL(unregister_oom_notifier);
|
||||||
* if a parallel OOM killing is already taking place that includes a zone in
|
* if a parallel OOM killing is already taking place that includes a zone in
|
||||||
* the zonelist. Otherwise, locks all zones in the zonelist and returns 1.
|
* the zonelist. Otherwise, locks all zones in the zonelist and returns 1.
|
||||||
*/
|
*/
|
||||||
int try_set_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask)
|
bool oom_zonelist_trylock(struct zonelist *zonelist, gfp_t gfp_mask)
|
||||||
{
|
{
|
||||||
struct zoneref *z;
|
struct zoneref *z;
|
||||||
struct zone *zone;
|
struct zone *zone;
|
||||||
int ret = 1;
|
bool ret = true;
|
||||||
|
|
||||||
spin_lock(&zone_scan_lock);
|
spin_lock(&zone_scan_lock);
|
||||||
for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
|
for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask))
|
||||||
if (zone_is_oom_locked(zone)) {
|
if (zone_is_oom_locked(zone)) {
|
||||||
ret = 0;
|
ret = false;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
|
/*
|
||||||
/*
|
* Lock each zone in the zonelist under zone_scan_lock so a parallel
|
||||||
* Lock each zone in the zonelist under zone_scan_lock so a
|
* call to oom_zonelist_trylock() doesn't succeed when it shouldn't.
|
||||||
* parallel invocation of try_set_zonelist_oom() doesn't succeed
|
*/
|
||||||
* when it shouldn't.
|
for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask))
|
||||||
*/
|
|
||||||
zone_set_flag(zone, ZONE_OOM_LOCKED);
|
zone_set_flag(zone, ZONE_OOM_LOCKED);
|
||||||
}
|
|
||||||
|
|
||||||
out:
|
out:
|
||||||
spin_unlock(&zone_scan_lock);
|
spin_unlock(&zone_scan_lock);
|
||||||
|
@ -592,15 +589,14 @@ out:
|
||||||
* allocation attempts with zonelists containing them may now recall the OOM
|
* allocation attempts with zonelists containing them may now recall the OOM
|
||||||
* killer, if necessary.
|
* killer, if necessary.
|
||||||
*/
|
*/
|
||||||
void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask)
|
void oom_zonelist_unlock(struct zonelist *zonelist, gfp_t gfp_mask)
|
||||||
{
|
{
|
||||||
struct zoneref *z;
|
struct zoneref *z;
|
||||||
struct zone *zone;
|
struct zone *zone;
|
||||||
|
|
||||||
spin_lock(&zone_scan_lock);
|
spin_lock(&zone_scan_lock);
|
||||||
for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
|
for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask))
|
||||||
zone_clear_flag(zone, ZONE_OOM_LOCKED);
|
zone_clear_flag(zone, ZONE_OOM_LOCKED);
|
||||||
}
|
|
||||||
spin_unlock(&zone_scan_lock);
|
spin_unlock(&zone_scan_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -695,8 +691,8 @@ void pagefault_out_of_memory(void)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
zonelist = node_zonelist(first_memory_node, GFP_KERNEL);
|
zonelist = node_zonelist(first_memory_node, GFP_KERNEL);
|
||||||
if (try_set_zonelist_oom(zonelist, GFP_KERNEL)) {
|
if (oom_zonelist_trylock(zonelist, GFP_KERNEL)) {
|
||||||
out_of_memory(NULL, 0, 0, NULL, false);
|
out_of_memory(NULL, 0, 0, NULL, false);
|
||||||
clear_zonelist_oom(zonelist, GFP_KERNEL);
|
oom_zonelist_unlock(zonelist, GFP_KERNEL);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -2246,8 +2246,8 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
|
||||||
{
|
{
|
||||||
struct page *page;
|
struct page *page;
|
||||||
|
|
||||||
/* Acquire the OOM killer lock for the zones in zonelist */
|
/* Acquire the per-zone oom lock for each zone */
|
||||||
if (!try_set_zonelist_oom(zonelist, gfp_mask)) {
|
if (!oom_zonelist_trylock(zonelist, gfp_mask)) {
|
||||||
schedule_timeout_uninterruptible(1);
|
schedule_timeout_uninterruptible(1);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
@ -2285,7 +2285,7 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
|
||||||
out_of_memory(zonelist, gfp_mask, order, nodemask, false);
|
out_of_memory(zonelist, gfp_mask, order, nodemask, false);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
clear_zonelist_oom(zonelist, gfp_mask);
|
oom_zonelist_unlock(zonelist, gfp_mask);
|
||||||
return page;
|
return page;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Загрузка…
Ссылка в новой задаче