mm, page_alloc: shorten the page allocator fast path
The page allocator fast path checks page multiple times unnecessarily. This patch avoids all the slowpath checks if the first allocation attempt succeeds. Signed-off-by: Mel Gorman <mgorman@techsingularity.net> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Jesper Dangaard Brouer <brouer@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
3777999dd4
Коммит
4fcb097117
|
@ -3398,31 +3398,26 @@ retry_cpuset:
|
||||||
ac.nodemask, &ac.preferred_zone);
|
ac.nodemask, &ac.preferred_zone);
|
||||||
if (!ac.preferred_zone) {
|
if (!ac.preferred_zone) {
|
||||||
page = NULL;
|
page = NULL;
|
||||||
goto out;
|
goto no_zone;
|
||||||
}
|
}
|
||||||
|
|
||||||
ac.classzone_idx = zonelist_zone_idx(preferred_zoneref);
|
ac.classzone_idx = zonelist_zone_idx(preferred_zoneref);
|
||||||
|
|
||||||
/* First allocation attempt */
|
/* First allocation attempt */
|
||||||
page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac);
|
page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac);
|
||||||
if (unlikely(!page)) {
|
if (likely(page))
|
||||||
/*
|
goto out;
|
||||||
* Runtime PM, block IO and its error handling path
|
|
||||||
* can deadlock because I/O on the device might not
|
|
||||||
* complete.
|
|
||||||
*/
|
|
||||||
alloc_mask = memalloc_noio_flags(gfp_mask);
|
|
||||||
ac.spread_dirty_pages = false;
|
|
||||||
|
|
||||||
page = __alloc_pages_slowpath(alloc_mask, order, &ac);
|
/*
|
||||||
}
|
* Runtime PM, block IO and its error handling path can deadlock
|
||||||
|
* because I/O on the device might not complete.
|
||||||
|
*/
|
||||||
|
alloc_mask = memalloc_noio_flags(gfp_mask);
|
||||||
|
ac.spread_dirty_pages = false;
|
||||||
|
|
||||||
if (kmemcheck_enabled && page)
|
page = __alloc_pages_slowpath(alloc_mask, order, &ac);
|
||||||
kmemcheck_pagealloc_alloc(page, order, gfp_mask);
|
|
||||||
|
|
||||||
trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype);
|
no_zone:
|
||||||
|
|
||||||
out:
|
|
||||||
/*
|
/*
|
||||||
* When updating a task's mems_allowed, it is possible to race with
|
* When updating a task's mems_allowed, it is possible to race with
|
||||||
* parallel threads in such a way that an allocation can fail while
|
* parallel threads in such a way that an allocation can fail while
|
||||||
|
@ -3434,6 +3429,12 @@ out:
|
||||||
goto retry_cpuset;
|
goto retry_cpuset;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
out:
|
||||||
|
if (kmemcheck_enabled && page)
|
||||||
|
kmemcheck_pagealloc_alloc(page, order, gfp_mask);
|
||||||
|
|
||||||
|
trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype);
|
||||||
|
|
||||||
return page;
|
return page;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(__alloc_pages_nodemask);
|
EXPORT_SYMBOL(__alloc_pages_nodemask);
|
||||||
|
|
Загрузка…
Ссылка в новой задаче