mm: vmscan: remove shrink_control arg from do_try_to_free_pages()
There is no need passing on a shrink_control struct from try_to_free_pages() and friends to do_try_to_free_pages() and then to shrink_zones(), because it is only used in shrink_zones() and the only field initialized on the top level is gfp_mask, which is always equal to scan_control.gfp_mask. So let's move shrink_control initialization to shrink_zones(). Signed-off-by: Vladimir Davydov <vdavydov@parallels.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Michal Hocko <mhocko@suse.cz> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Rik van Riel <riel@redhat.com> Cc: Dave Chinner <dchinner@redhat.com> Cc: Glauber Costa <glommer@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
65ec02cb9a
Коммит
3115cd9145
32
mm/vmscan.c
32
mm/vmscan.c
|
@ -2291,8 +2291,7 @@ static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
|
|||
* the caller that it should consider retrying the allocation instead of
|
||||
* further reclaim.
|
||||
*/
|
||||
static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc,
|
||||
struct shrink_control *shrink)
|
||||
static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
|
||||
{
|
||||
struct zoneref *z;
|
||||
struct zone *zone;
|
||||
|
@ -2301,6 +2300,9 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc,
|
|||
unsigned long lru_pages = 0;
|
||||
bool aborted_reclaim = false;
|
||||
struct reclaim_state *reclaim_state = current->reclaim_state;
|
||||
struct shrink_control shrink = {
|
||||
.gfp_mask = sc->gfp_mask,
|
||||
};
|
||||
|
||||
/*
|
||||
* If the number of buffer_heads in the machine exceeds the maximum
|
||||
|
@ -2310,7 +2312,7 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc,
|
|||
if (buffer_heads_over_limit)
|
||||
sc->gfp_mask |= __GFP_HIGHMEM;
|
||||
|
||||
nodes_clear(shrink->nodes_to_scan);
|
||||
nodes_clear(shrink.nodes_to_scan);
|
||||
|
||||
for_each_zone_zonelist_nodemask(zone, z, zonelist,
|
||||
gfp_zone(sc->gfp_mask), sc->nodemask) {
|
||||
|
@ -2325,7 +2327,7 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc,
|
|||
continue;
|
||||
|
||||
lru_pages += zone_reclaimable_pages(zone);
|
||||
node_set(zone_to_nid(zone), shrink->nodes_to_scan);
|
||||
node_set(zone_to_nid(zone), shrink.nodes_to_scan);
|
||||
|
||||
if (sc->priority != DEF_PRIORITY &&
|
||||
!zone_reclaimable(zone))
|
||||
|
@ -2370,7 +2372,7 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc,
|
|||
* pages.
|
||||
*/
|
||||
if (global_reclaim(sc)) {
|
||||
shrink_slab(shrink, sc->nr_scanned, lru_pages);
|
||||
shrink_slab(&shrink, sc->nr_scanned, lru_pages);
|
||||
if (reclaim_state) {
|
||||
sc->nr_reclaimed += reclaim_state->reclaimed_slab;
|
||||
reclaim_state->reclaimed_slab = 0;
|
||||
|
@ -2417,8 +2419,7 @@ static bool all_unreclaimable(struct zonelist *zonelist,
|
|||
* else, the number of pages reclaimed
|
||||
*/
|
||||
static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
|
||||
struct scan_control *sc,
|
||||
struct shrink_control *shrink)
|
||||
struct scan_control *sc)
|
||||
{
|
||||
unsigned long total_scanned = 0;
|
||||
unsigned long writeback_threshold;
|
||||
|
@ -2433,7 +2434,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
|
|||
vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup,
|
||||
sc->priority);
|
||||
sc->nr_scanned = 0;
|
||||
aborted_reclaim = shrink_zones(zonelist, sc, shrink);
|
||||
aborted_reclaim = shrink_zones(zonelist, sc);
|
||||
|
||||
total_scanned += sc->nr_scanned;
|
||||
if (sc->nr_reclaimed >= sc->nr_to_reclaim)
|
||||
|
@ -2596,9 +2597,6 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
|
|||
.target_mem_cgroup = NULL,
|
||||
.nodemask = nodemask,
|
||||
};
|
||||
struct shrink_control shrink = {
|
||||
.gfp_mask = sc.gfp_mask,
|
||||
};
|
||||
|
||||
/*
|
||||
* Do not enter reclaim if fatal signal was delivered while throttled.
|
||||
|
@ -2612,7 +2610,7 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
|
|||
sc.may_writepage,
|
||||
gfp_mask);
|
||||
|
||||
nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
|
||||
nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
|
||||
|
||||
trace_mm_vmscan_direct_reclaim_end(nr_reclaimed);
|
||||
|
||||
|
@ -2679,9 +2677,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
|
|||
.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
|
||||
(GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK),
|
||||
};
|
||||
struct shrink_control shrink = {
|
||||
.gfp_mask = sc.gfp_mask,
|
||||
};
|
||||
|
||||
/*
|
||||
* Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't
|
||||
|
@ -2696,7 +2691,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
|
|||
sc.may_writepage,
|
||||
sc.gfp_mask);
|
||||
|
||||
nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
|
||||
nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
|
||||
|
||||
trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
|
||||
|
||||
|
@ -3331,9 +3326,6 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
|
|||
.order = 0,
|
||||
.priority = DEF_PRIORITY,
|
||||
};
|
||||
struct shrink_control shrink = {
|
||||
.gfp_mask = sc.gfp_mask,
|
||||
};
|
||||
struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
|
||||
struct task_struct *p = current;
|
||||
unsigned long nr_reclaimed;
|
||||
|
@ -3343,7 +3335,7 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
|
|||
reclaim_state.reclaimed_slab = 0;
|
||||
p->reclaim_state = &reclaim_state;
|
||||
|
||||
nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
|
||||
nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
|
||||
|
||||
p->reclaim_state = NULL;
|
||||
lockdep_clear_current_reclaim_state();
|
||||
|
|
Загрузка…
Ссылка в новой задаче