mm: page_alloc: only check the alloc flags and gfp_mask for dirty once
Currently it's calculated once per zone in the zonelist. Signed-off-by: Mel Gorman <mgorman@suse.de> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Reviewed-by: Rik van Riel <riel@redhat.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Jan Kara <jack@suse.cz> Cc: Michal Hocko <mhocko@suse.cz> Cc: Hugh Dickins <hughd@google.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Theodore Ts'o <tytso@mit.edu> Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
d34c5fa06f
Коммит
a6e21b14f2
|
@ -1917,6 +1917,8 @@ get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
|
|||
nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
|
||||
int zlc_active = 0; /* set if using zonelist_cache */
|
||||
int did_zlc_setup = 0; /* just call zlc_setup() one time */
|
||||
bool consider_zone_dirty = (alloc_flags & ALLOC_WMARK_LOW) &&
|
||||
(gfp_mask & __GFP_WRITE);
|
||||
|
||||
classzone_idx = zone_idx(preferred_zone);
|
||||
zonelist_scan:
|
||||
|
@ -1976,8 +1978,7 @@ zonelist_scan:
|
|||
* will require awareness of zones in the
|
||||
* dirty-throttling and the flusher threads.
|
||||
*/
|
||||
if ((alloc_flags & ALLOC_WMARK_LOW) &&
|
||||
(gfp_mask & __GFP_WRITE) && !zone_dirty_ok(zone))
|
||||
if (consider_zone_dirty && !zone_dirty_ok(zone))
|
||||
continue;
|
||||
|
||||
mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
|
||||
|
|
Загрузка…
Ссылка в новой задаче