mm/vmscan: __isolate_lru_page_prepare() cleanup
The function just returns 2 results, so using a 'switch' to deal with its result is unnecessary. Also simplify it to a bool func as Vlastimil suggested. Also remove 'goto' by reusing list_move(), and take Matthew Wilcox's suggestion to update comments in function. Link: https://lkml.kernel.org/r/728874d7-2d93-4049-68c1-dcc3b2d52ccd@linux.alibaba.com Signed-off-by: Alex Shi <alex.shi@linux.alibaba.com> Reviewed-by: Andrew Morton <akpm@linux-foundation.org> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Matthew Wilcox <willy@infradead.org> Cc: Hugh Dickins <hughd@google.com> Cc: Yu Zhao <yuzhao@google.com> Cc: Michal Hocko <mhocko@suse.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
7ecc956551
Коммит
c2135f7c57
|
@ -356,7 +356,7 @@ extern void lru_cache_add_inactive_or_unevictable(struct page *page,
|
|||
extern unsigned long zone_reclaimable_pages(struct zone *zone);
|
||||
extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
|
||||
gfp_t gfp_mask, nodemask_t *mask);
|
||||
extern int __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode);
|
||||
extern bool __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode);
|
||||
extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
|
||||
unsigned long nr_pages,
|
||||
gfp_t gfp_mask,
|
||||
|
|
|
@ -988,7 +988,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
|
|||
if (unlikely(!get_page_unless_zero(page)))
|
||||
goto isolate_fail;
|
||||
|
||||
if (__isolate_lru_page_prepare(page, isolate_mode) != 0)
|
||||
if (!__isolate_lru_page_prepare(page, isolate_mode))
|
||||
goto isolate_fail_put;
|
||||
|
||||
/* Try isolate the page */
|
||||
|
|
70
mm/vmscan.c
70
mm/vmscan.c
|
@ -1539,19 +1539,17 @@ unsigned int reclaim_clean_pages_from_list(struct zone *zone,
|
|||
* page: page to consider
|
||||
* mode: one of the LRU isolation modes defined above
|
||||
*
|
||||
* returns 0 on success, -ve errno on failure.
|
||||
* returns true on success, false on failure.
|
||||
*/
|
||||
int __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode)
|
||||
bool __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode)
|
||||
{
|
||||
int ret = -EBUSY;
|
||||
|
||||
/* Only take pages on the LRU. */
|
||||
if (!PageLRU(page))
|
||||
return ret;
|
||||
return false;
|
||||
|
||||
/* Compaction should not handle unevictable pages but CMA can do so */
|
||||
if (PageUnevictable(page) && !(mode & ISOLATE_UNEVICTABLE))
|
||||
return ret;
|
||||
return false;
|
||||
|
||||
/*
|
||||
* To minimise LRU disruption, the caller can indicate that it only
|
||||
|
@ -1564,7 +1562,7 @@ int __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode)
|
|||
if (mode & ISOLATE_ASYNC_MIGRATE) {
|
||||
/* All the caller can do on PageWriteback is block */
|
||||
if (PageWriteback(page))
|
||||
return ret;
|
||||
return false;
|
||||
|
||||
if (PageDirty(page)) {
|
||||
struct address_space *mapping;
|
||||
|
@ -1580,20 +1578,20 @@ int __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode)
|
|||
* from the page cache.
|
||||
*/
|
||||
if (!trylock_page(page))
|
||||
return ret;
|
||||
return false;
|
||||
|
||||
mapping = page_mapping(page);
|
||||
migrate_dirty = !mapping || mapping->a_ops->migratepage;
|
||||
unlock_page(page);
|
||||
if (!migrate_dirty)
|
||||
return ret;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
if ((mode & ISOLATE_UNMAPPED) && page_mapped(page))
|
||||
return ret;
|
||||
return false;
|
||||
|
||||
return 0;
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1677,35 +1675,31 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
|
|||
* only when the page is being freed somewhere else.
|
||||
*/
|
||||
scan += nr_pages;
|
||||
switch (__isolate_lru_page_prepare(page, mode)) {
|
||||
case 0:
|
||||
/*
|
||||
* Be careful not to clear PageLRU until after we're
|
||||
* sure the page is not being freed elsewhere -- the
|
||||
* page release code relies on it.
|
||||
*/
|
||||
if (unlikely(!get_page_unless_zero(page)))
|
||||
goto busy;
|
||||
|
||||
if (!TestClearPageLRU(page)) {
|
||||
/*
|
||||
* This page may in other isolation path,
|
||||
* but we still hold lru_lock.
|
||||
*/
|
||||
put_page(page);
|
||||
goto busy;
|
||||
}
|
||||
|
||||
nr_taken += nr_pages;
|
||||
nr_zone_taken[page_zonenum(page)] += nr_pages;
|
||||
list_move(&page->lru, dst);
|
||||
break;
|
||||
|
||||
default:
|
||||
busy:
|
||||
/* else it is being freed elsewhere */
|
||||
if (!__isolate_lru_page_prepare(page, mode)) {
|
||||
/* It is being freed elsewhere */
|
||||
list_move(&page->lru, src);
|
||||
continue;
|
||||
}
|
||||
/*
|
||||
* Be careful not to clear PageLRU until after we're
|
||||
* sure the page is not being freed elsewhere -- the
|
||||
* page release code relies on it.
|
||||
*/
|
||||
if (unlikely(!get_page_unless_zero(page))) {
|
||||
list_move(&page->lru, src);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!TestClearPageLRU(page)) {
|
||||
/* Another thread is already isolating this page */
|
||||
put_page(page);
|
||||
list_move(&page->lru, src);
|
||||
continue;
|
||||
}
|
||||
|
||||
nr_taken += nr_pages;
|
||||
nr_zone_taken[page_zonenum(page)] += nr_pages;
|
||||
list_move(&page->lru, dst);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
Загрузка…
Ссылка в новой задаче