mm, memcg: use consistent gfp flags during readahead
Vladimir has noticed that we might declare memcg oom even during readahead because read_pages only uses GFP_KERNEL (with mapping_gfp restriction) while __do_page_cache_readahead uses page_cache_alloc_readahead which adds __GFP_NORETRY to prevent from OOMs. This gfp mask discrepancy is really unfortunate and easily fixable. Drop page_cache_alloc_readahead() which only has one user and outsource the gfp_mask logic into readahead_gfp_mask and propagate this mask from __do_page_cache_readahead down to read_pages. This alone would have only very limited impact as most filesystems are implementing ->readpages and the common implementation mpage_readpages does GFP_KERNEL (with mapping_gfp restriction) again. We can tell it to use readahead_gfp_mask instead as this function is called only during readahead as well. The same applies to read_cache_pages. ext4 has its own ext4_mpage_readpages but the path which has pages != NULL can use the same gfp mask. Btrfs, cifs, f2fs and orangefs are doing a very similar pattern to mpage_readpages so the same can be applied to them as well. [akpm@linux-foundation.org: coding-style fixes] [mhocko@suse.com: restrict gfp mask in mpage_alloc] Link: http://lkml.kernel.org/r/20160610074223.GC32285@dhcp22.suse.cz Link: http://lkml.kernel.org/r/1465301556-26431-1-git-send-email-mhocko@kernel.org Signed-off-by: Michal Hocko <mhocko@suse.com> Cc: Vladimir Davydov <vdavydov@parallels.com> Cc: Chris Mason <clm@fb.com> Cc: Steve French <sfrench@samba.org> Cc: Theodore Ts'o <tytso@mit.edu> Cc: Jan Kara <jack@suse.cz> Cc: Mike Marshall <hubcap@omnibond.com> Cc: Jaegeuk Kim <jaegeuk@kernel.org> Cc: Changman Lee <cm224.lee@samsung.com> Cc: Chao Yu <yuchao0@huawei.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
e5e3f4c4f0
Коммит
8a5c743e30
|
@ -4180,7 +4180,8 @@ int extent_readpages(struct extent_io_tree *tree,
|
|||
prefetchw(&page->flags);
|
||||
list_del(&page->lru);
|
||||
if (add_to_page_cache_lru(page, mapping,
|
||||
page->index, GFP_NOFS)) {
|
||||
page->index,
|
||||
readahead_gfp_mask(mapping))) {
|
||||
put_page(page);
|
||||
continue;
|
||||
}
|
||||
|
|
|
@ -3366,7 +3366,7 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
|
|||
struct page *page, *tpage;
|
||||
unsigned int expected_index;
|
||||
int rc;
|
||||
gfp_t gfp = mapping_gfp_constraint(mapping, GFP_KERNEL);
|
||||
gfp_t gfp = readahead_gfp_mask(mapping);
|
||||
|
||||
INIT_LIST_HEAD(tmplist);
|
||||
|
||||
|
|
|
@ -166,7 +166,7 @@ int ext4_mpage_readpages(struct address_space *mapping,
|
|||
page = list_entry(pages->prev, struct page, lru);
|
||||
list_del(&page->lru);
|
||||
if (add_to_page_cache_lru(page, mapping, page->index,
|
||||
mapping_gfp_constraint(mapping, GFP_KERNEL)))
|
||||
readahead_gfp_mask(mapping)))
|
||||
goto next_page;
|
||||
}
|
||||
|
||||
|
|
|
@ -996,7 +996,8 @@ static int f2fs_mpage_readpages(struct address_space *mapping,
|
|||
page = list_entry(pages->prev, struct page, lru);
|
||||
list_del(&page->lru);
|
||||
if (add_to_page_cache_lru(page, mapping,
|
||||
page->index, GFP_KERNEL))
|
||||
page->index,
|
||||
readahead_gfp_mask(mapping)))
|
||||
goto next_page;
|
||||
}
|
||||
|
||||
|
|
|
@ -71,6 +71,8 @@ mpage_alloc(struct block_device *bdev,
|
|||
{
|
||||
struct bio *bio;
|
||||
|
||||
/* Restrict the given (page cache) mask for slab allocations */
|
||||
gfp_flags &= GFP_KERNEL;
|
||||
bio = bio_alloc(gfp_flags, nr_vecs);
|
||||
|
||||
if (bio == NULL && (current->flags & PF_MEMALLOC)) {
|
||||
|
@ -362,7 +364,7 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages,
|
|||
sector_t last_block_in_bio = 0;
|
||||
struct buffer_head map_bh;
|
||||
unsigned long first_logical_block = 0;
|
||||
gfp_t gfp = mapping_gfp_constraint(mapping, GFP_KERNEL);
|
||||
gfp_t gfp = readahead_gfp_mask(mapping);
|
||||
|
||||
map_bh.b_state = 0;
|
||||
map_bh.b_size = 0;
|
||||
|
|
|
@ -80,7 +80,7 @@ static int orangefs_readpages(struct file *file,
|
|||
if (!add_to_page_cache(page,
|
||||
mapping,
|
||||
page->index,
|
||||
GFP_KERNEL)) {
|
||||
readahead_gfp_mask(mapping))) {
|
||||
ret = read_one_page(page);
|
||||
gossip_debug(GOSSIP_INODE_DEBUG,
|
||||
"failure adding page to cache, read_one_page returned: %d\n",
|
||||
|
|
|
@ -209,10 +209,10 @@ static inline struct page *page_cache_alloc_cold(struct address_space *x)
|
|||
return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD);
|
||||
}
|
||||
|
||||
static inline struct page *page_cache_alloc_readahead(struct address_space *x)
|
||||
static inline gfp_t readahead_gfp_mask(struct address_space *x)
|
||||
{
|
||||
return __page_cache_alloc(mapping_gfp_mask(x) |
|
||||
__GFP_COLD | __GFP_NORETRY | __GFP_NOWARN);
|
||||
return mapping_gfp_mask(x) |
|
||||
__GFP_COLD | __GFP_NORETRY | __GFP_NOWARN;
|
||||
}
|
||||
|
||||
typedef int filler_t(void *, struct page *);
|
||||
|
|
|
@ -89,7 +89,7 @@ int read_cache_pages(struct address_space *mapping, struct list_head *pages,
|
|||
page = lru_to_page(pages);
|
||||
list_del(&page->lru);
|
||||
if (add_to_page_cache_lru(page, mapping, page->index,
|
||||
mapping_gfp_constraint(mapping, GFP_KERNEL))) {
|
||||
readahead_gfp_mask(mapping))) {
|
||||
read_cache_pages_invalidate_page(mapping, page);
|
||||
continue;
|
||||
}
|
||||
|
@ -108,7 +108,7 @@ int read_cache_pages(struct address_space *mapping, struct list_head *pages,
|
|||
EXPORT_SYMBOL(read_cache_pages);
|
||||
|
||||
static int read_pages(struct address_space *mapping, struct file *filp,
|
||||
struct list_head *pages, unsigned nr_pages)
|
||||
struct list_head *pages, unsigned int nr_pages, gfp_t gfp)
|
||||
{
|
||||
struct blk_plug plug;
|
||||
unsigned page_idx;
|
||||
|
@ -126,10 +126,8 @@ static int read_pages(struct address_space *mapping, struct file *filp,
|
|||
for (page_idx = 0; page_idx < nr_pages; page_idx++) {
|
||||
struct page *page = lru_to_page(pages);
|
||||
list_del(&page->lru);
|
||||
if (!add_to_page_cache_lru(page, mapping, page->index,
|
||||
mapping_gfp_constraint(mapping, GFP_KERNEL))) {
|
||||
if (!add_to_page_cache_lru(page, mapping, page->index, gfp))
|
||||
mapping->a_ops->readpage(filp, page);
|
||||
}
|
||||
put_page(page);
|
||||
}
|
||||
ret = 0;
|
||||
|
@ -159,6 +157,7 @@ int __do_page_cache_readahead(struct address_space *mapping, struct file *filp,
|
|||
int page_idx;
|
||||
int ret = 0;
|
||||
loff_t isize = i_size_read(inode);
|
||||
gfp_t gfp_mask = readahead_gfp_mask(mapping);
|
||||
|
||||
if (isize == 0)
|
||||
goto out;
|
||||
|
@ -180,7 +179,7 @@ int __do_page_cache_readahead(struct address_space *mapping, struct file *filp,
|
|||
if (page && !radix_tree_exceptional_entry(page))
|
||||
continue;
|
||||
|
||||
page = page_cache_alloc_readahead(mapping);
|
||||
page = __page_cache_alloc(gfp_mask);
|
||||
if (!page)
|
||||
break;
|
||||
page->index = page_offset;
|
||||
|
@ -196,7 +195,7 @@ int __do_page_cache_readahead(struct address_space *mapping, struct file *filp,
|
|||
* will then handle the error.
|
||||
*/
|
||||
if (ret)
|
||||
read_pages(mapping, filp, &page_pool, ret);
|
||||
read_pages(mapping, filp, &page_pool, ret, gfp_mask);
|
||||
BUG_ON(!list_empty(&page_pool));
|
||||
out:
|
||||
return ret;
|
||||
|
|
Загрузка…
Ссылка в новой задаче