diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 284153d3e0fc..678b2882faaa 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3855,6 +3855,60 @@ got_pg: return page; } +static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order, + struct zonelist *zonelist, nodemask_t *nodemask, + struct alloc_context *ac, gfp_t *alloc_mask, + unsigned int *alloc_flags) +{ + ac->high_zoneidx = gfp_zone(gfp_mask); + ac->zonelist = zonelist; + ac->nodemask = nodemask; + ac->migratetype = gfpflags_to_migratetype(gfp_mask); + + if (cpusets_enabled()) { + *alloc_mask |= __GFP_HARDWALL; + *alloc_flags |= ALLOC_CPUSET; + if (!ac->nodemask) + ac->nodemask = &cpuset_current_mems_allowed; + } + + lockdep_trace_alloc(gfp_mask); + + might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM); + + if (should_fail_alloc_page(gfp_mask, order)) + return false; + + /* + * Check the zones suitable for the gfp_mask contain at least one + * valid zone. It's possible to have an empty zonelist as a result + * of __GFP_THISNODE and a memoryless node + */ + if (unlikely(!ac->zonelist->_zonerefs->zone)) + return false; + + if (IS_ENABLED(CONFIG_CMA) && ac->migratetype == MIGRATE_MOVABLE) + *alloc_flags |= ALLOC_CMA; + + return true; +} + +/* Determine whether to spread dirty pages and what the first usable zone */ +static inline void finalise_ac(gfp_t gfp_mask, + unsigned int order, struct alloc_context *ac) +{ + /* Dirty zone balancing only done in the fast path */ + ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE); + + /* + * The preferred zone is used for statistics but crucially it is + * also used as the starting point for the zonelist iterator. It + * may get reset for allocations that ignore memory policies. + */ + ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, + ac->high_zoneidx, ac->nodemask); +} + /* * This is the 'heart' of the zoned buddy allocator. */ @@ -3865,50 +3919,13 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, struct page *page; unsigned int alloc_flags = ALLOC_WMARK_LOW; gfp_t alloc_mask = gfp_mask; /* The gfp_t that was actually used for allocation */ - struct alloc_context ac = { - .high_zoneidx = gfp_zone(gfp_mask), - .zonelist = zonelist, - .nodemask = nodemask, - .migratetype = gfpflags_to_migratetype(gfp_mask), - }; - - if (cpusets_enabled()) { - alloc_mask |= __GFP_HARDWALL; - alloc_flags |= ALLOC_CPUSET; - if (!ac.nodemask) - ac.nodemask = &cpuset_current_mems_allowed; - } + struct alloc_context ac = { }; gfp_mask &= gfp_allowed_mask; - - lockdep_trace_alloc(gfp_mask); - - might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM); - - if (should_fail_alloc_page(gfp_mask, order)) + if (!prepare_alloc_pages(gfp_mask, order, zonelist, nodemask, &ac, &alloc_mask, &alloc_flags)) return NULL; - /* - * Check the zones suitable for the gfp_mask contain at least one - * valid zone. It's possible to have an empty zonelist as a result - * of __GFP_THISNODE and a memoryless node - */ - if (unlikely(!zonelist->_zonerefs->zone)) - return NULL; - - if (IS_ENABLED(CONFIG_CMA) && ac.migratetype == MIGRATE_MOVABLE) - alloc_flags |= ALLOC_CMA; - - /* Dirty zone balancing only done in the fast path */ - ac.spread_dirty_pages = (gfp_mask & __GFP_WRITE); - - /* - * The preferred zone is used for statistics but crucially it is - * also used as the starting point for the zonelist iterator. It - * may get reset for allocations that ignore memory policies. - */ - ac.preferred_zoneref = first_zones_zonelist(ac.zonelist, - ac.high_zoneidx, ac.nodemask); + finalise_ac(gfp_mask, order, &ac); if (!ac.preferred_zoneref->zone) { page = NULL; /*