mm/filemap: Add filemap_add_folio()
Convert __add_to_page_cache_locked() into __filemap_add_folio(). Add an assertion to it that (for !hugetlbfs), the folio is naturally aligned within the file. Move the prototype from mm.h to pagemap.h. Convert add_to_page_cache_lru() into filemap_add_folio(). Add a compatibility wrapper for unconverted callers. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: David Howells <dhowells@redhat.com> Acked-by: Vlastimil Babka <vbabka@suse.cz>
This commit is contained in:
Родитель
bb3c579e25
Коммит
9dd3d06940
|
@ -213,13 +213,6 @@ int overcommit_kbytes_handler(struct ctl_table *, int, void *, size_t *,
|
||||||
loff_t *);
|
loff_t *);
|
||||||
int overcommit_policy_handler(struct ctl_table *, int, void *, size_t *,
|
int overcommit_policy_handler(struct ctl_table *, int, void *, size_t *,
|
||||||
loff_t *);
|
loff_t *);
|
||||||
/*
|
|
||||||
* Any attempt to mark this function as static leads to build failure
|
|
||||||
* when CONFIG_DEBUG_INFO_BTF is enabled because __add_to_page_cache_locked()
|
|
||||||
* is referred to by BPF code. This must be visible for error injection.
|
|
||||||
*/
|
|
||||||
int __add_to_page_cache_locked(struct page *page, struct address_space *mapping,
|
|
||||||
pgoff_t index, gfp_t gfp, void **shadowp);
|
|
||||||
|
|
||||||
#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
|
#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
|
||||||
#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
|
#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
|
||||||
|
|
|
@ -876,9 +876,11 @@ static inline int fault_in_pages_readable(const char __user *uaddr, size_t size)
|
||||||
}
|
}
|
||||||
|
|
||||||
int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
|
int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
|
||||||
pgoff_t index, gfp_t gfp_mask);
|
pgoff_t index, gfp_t gfp);
|
||||||
int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
|
int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
|
||||||
pgoff_t index, gfp_t gfp_mask);
|
pgoff_t index, gfp_t gfp);
|
||||||
|
int filemap_add_folio(struct address_space *mapping, struct folio *folio,
|
||||||
|
pgoff_t index, gfp_t gfp);
|
||||||
extern void delete_from_page_cache(struct page *page);
|
extern void delete_from_page_cache(struct page *page);
|
||||||
extern void __delete_from_page_cache(struct page *page, void *shadow);
|
extern void __delete_from_page_cache(struct page *page, void *shadow);
|
||||||
void replace_page_cache_page(struct page *old, struct page *new);
|
void replace_page_cache_page(struct page *old, struct page *new);
|
||||||
|
@ -903,6 +905,10 @@ static inline int add_to_page_cache(struct page *page,
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Must be non-static for BPF error injection */
|
||||||
|
int __filemap_add_folio(struct address_space *mapping, struct folio *folio,
|
||||||
|
pgoff_t index, gfp_t gfp, void **shadowp);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct readahead_control - Describes a readahead request.
|
* struct readahead_control - Describes a readahead request.
|
||||||
*
|
*
|
||||||
|
|
|
@ -13319,7 +13319,7 @@ BTF_SET_START(btf_non_sleepable_error_inject)
|
||||||
/* Three functions below can be called from sleepable and non-sleepable context.
|
/* Three functions below can be called from sleepable and non-sleepable context.
|
||||||
* Assume non-sleepable from bpf safety point of view.
|
* Assume non-sleepable from bpf safety point of view.
|
||||||
*/
|
*/
|
||||||
BTF_ID(func, __add_to_page_cache_locked)
|
BTF_ID(func, __filemap_add_folio)
|
||||||
BTF_ID(func, should_fail_alloc_page)
|
BTF_ID(func, should_fail_alloc_page)
|
||||||
BTF_ID(func, should_failslab)
|
BTF_ID(func, should_failslab)
|
||||||
BTF_SET_END(btf_non_sleepable_error_inject)
|
BTF_SET_END(btf_non_sleepable_error_inject)
|
||||||
|
|
70
mm/filemap.c
70
mm/filemap.c
|
@ -872,26 +872,25 @@ void replace_page_cache_page(struct page *old, struct page *new)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(replace_page_cache_page);
|
EXPORT_SYMBOL_GPL(replace_page_cache_page);
|
||||||
|
|
||||||
noinline int __add_to_page_cache_locked(struct page *page,
|
noinline int __filemap_add_folio(struct address_space *mapping,
|
||||||
struct address_space *mapping,
|
struct folio *folio, pgoff_t index, gfp_t gfp, void **shadowp)
|
||||||
pgoff_t offset, gfp_t gfp,
|
|
||||||
void **shadowp)
|
|
||||||
{
|
{
|
||||||
XA_STATE(xas, &mapping->i_pages, offset);
|
XA_STATE(xas, &mapping->i_pages, index);
|
||||||
int huge = PageHuge(page);
|
int huge = folio_test_hugetlb(folio);
|
||||||
int error;
|
int error;
|
||||||
bool charged = false;
|
bool charged = false;
|
||||||
|
|
||||||
VM_BUG_ON_PAGE(!PageLocked(page), page);
|
VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
|
||||||
VM_BUG_ON_PAGE(PageSwapBacked(page), page);
|
VM_BUG_ON_FOLIO(folio_test_swapbacked(folio), folio);
|
||||||
mapping_set_update(&xas, mapping);
|
mapping_set_update(&xas, mapping);
|
||||||
|
|
||||||
get_page(page);
|
folio_get(folio);
|
||||||
page->mapping = mapping;
|
folio->mapping = mapping;
|
||||||
page->index = offset;
|
folio->index = index;
|
||||||
|
|
||||||
if (!huge) {
|
if (!huge) {
|
||||||
error = mem_cgroup_charge(page_folio(page), NULL, gfp);
|
error = mem_cgroup_charge(folio, NULL, gfp);
|
||||||
|
VM_BUG_ON_FOLIO(index & (folio_nr_pages(folio) - 1), folio);
|
||||||
if (error)
|
if (error)
|
||||||
goto error;
|
goto error;
|
||||||
charged = true;
|
charged = true;
|
||||||
|
@ -903,7 +902,7 @@ noinline int __add_to_page_cache_locked(struct page *page,
|
||||||
unsigned int order = xa_get_order(xas.xa, xas.xa_index);
|
unsigned int order = xa_get_order(xas.xa, xas.xa_index);
|
||||||
void *entry, *old = NULL;
|
void *entry, *old = NULL;
|
||||||
|
|
||||||
if (order > thp_order(page))
|
if (order > folio_order(folio))
|
||||||
xas_split_alloc(&xas, xa_load(xas.xa, xas.xa_index),
|
xas_split_alloc(&xas, xa_load(xas.xa, xas.xa_index),
|
||||||
order, gfp);
|
order, gfp);
|
||||||
xas_lock_irq(&xas);
|
xas_lock_irq(&xas);
|
||||||
|
@ -920,13 +919,13 @@ noinline int __add_to_page_cache_locked(struct page *page,
|
||||||
*shadowp = old;
|
*shadowp = old;
|
||||||
/* entry may have been split before we acquired lock */
|
/* entry may have been split before we acquired lock */
|
||||||
order = xa_get_order(xas.xa, xas.xa_index);
|
order = xa_get_order(xas.xa, xas.xa_index);
|
||||||
if (order > thp_order(page)) {
|
if (order > folio_order(folio)) {
|
||||||
xas_split(&xas, old, order);
|
xas_split(&xas, old, order);
|
||||||
xas_reset(&xas);
|
xas_reset(&xas);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
xas_store(&xas, page);
|
xas_store(&xas, folio);
|
||||||
if (xas_error(&xas))
|
if (xas_error(&xas))
|
||||||
goto unlock;
|
goto unlock;
|
||||||
|
|
||||||
|
@ -934,7 +933,7 @@ noinline int __add_to_page_cache_locked(struct page *page,
|
||||||
|
|
||||||
/* hugetlb pages do not participate in page cache accounting */
|
/* hugetlb pages do not participate in page cache accounting */
|
||||||
if (!huge)
|
if (!huge)
|
||||||
__inc_lruvec_page_state(page, NR_FILE_PAGES);
|
__lruvec_stat_add_folio(folio, NR_FILE_PAGES);
|
||||||
unlock:
|
unlock:
|
||||||
xas_unlock_irq(&xas);
|
xas_unlock_irq(&xas);
|
||||||
} while (xas_nomem(&xas, gfp));
|
} while (xas_nomem(&xas, gfp));
|
||||||
|
@ -942,19 +941,19 @@ unlock:
|
||||||
if (xas_error(&xas)) {
|
if (xas_error(&xas)) {
|
||||||
error = xas_error(&xas);
|
error = xas_error(&xas);
|
||||||
if (charged)
|
if (charged)
|
||||||
mem_cgroup_uncharge(page_folio(page));
|
mem_cgroup_uncharge(folio);
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
|
|
||||||
trace_mm_filemap_add_to_page_cache(page);
|
trace_mm_filemap_add_to_page_cache(&folio->page);
|
||||||
return 0;
|
return 0;
|
||||||
error:
|
error:
|
||||||
page->mapping = NULL;
|
folio->mapping = NULL;
|
||||||
/* Leave page->index set: truncation relies upon it */
|
/* Leave page->index set: truncation relies upon it */
|
||||||
put_page(page);
|
folio_put(folio);
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
ALLOW_ERROR_INJECTION(__add_to_page_cache_locked, ERRNO);
|
ALLOW_ERROR_INJECTION(__filemap_add_folio, ERRNO);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* add_to_page_cache_locked - add a locked page to the pagecache
|
* add_to_page_cache_locked - add a locked page to the pagecache
|
||||||
|
@ -971,39 +970,38 @@ ALLOW_ERROR_INJECTION(__add_to_page_cache_locked, ERRNO);
|
||||||
int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
|
int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
|
||||||
pgoff_t offset, gfp_t gfp_mask)
|
pgoff_t offset, gfp_t gfp_mask)
|
||||||
{
|
{
|
||||||
return __add_to_page_cache_locked(page, mapping, offset,
|
return __filemap_add_folio(mapping, page_folio(page), offset,
|
||||||
gfp_mask, NULL);
|
gfp_mask, NULL);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(add_to_page_cache_locked);
|
EXPORT_SYMBOL(add_to_page_cache_locked);
|
||||||
|
|
||||||
int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
|
int filemap_add_folio(struct address_space *mapping, struct folio *folio,
|
||||||
pgoff_t offset, gfp_t gfp_mask)
|
pgoff_t index, gfp_t gfp)
|
||||||
{
|
{
|
||||||
void *shadow = NULL;
|
void *shadow = NULL;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
__SetPageLocked(page);
|
__folio_set_locked(folio);
|
||||||
ret = __add_to_page_cache_locked(page, mapping, offset,
|
ret = __filemap_add_folio(mapping, folio, index, gfp, &shadow);
|
||||||
gfp_mask, &shadow);
|
|
||||||
if (unlikely(ret))
|
if (unlikely(ret))
|
||||||
__ClearPageLocked(page);
|
__folio_clear_locked(folio);
|
||||||
else {
|
else {
|
||||||
/*
|
/*
|
||||||
* The page might have been evicted from cache only
|
* The folio might have been evicted from cache only
|
||||||
* recently, in which case it should be activated like
|
* recently, in which case it should be activated like
|
||||||
* any other repeatedly accessed page.
|
* any other repeatedly accessed folio.
|
||||||
* The exception is pages getting rewritten; evicting other
|
* The exception is folios getting rewritten; evicting other
|
||||||
* data from the working set, only to cache data that will
|
* data from the working set, only to cache data that will
|
||||||
* get overwritten with something else, is a waste of memory.
|
* get overwritten with something else, is a waste of memory.
|
||||||
*/
|
*/
|
||||||
WARN_ON_ONCE(PageActive(page));
|
WARN_ON_ONCE(folio_test_active(folio));
|
||||||
if (!(gfp_mask & __GFP_WRITE) && shadow)
|
if (!(gfp & __GFP_WRITE) && shadow)
|
||||||
workingset_refault(page_folio(page), shadow);
|
workingset_refault(folio, shadow);
|
||||||
lru_cache_add(page);
|
folio_add_lru(folio);
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(add_to_page_cache_lru);
|
EXPORT_SYMBOL_GPL(filemap_add_folio);
|
||||||
|
|
||||||
#ifdef CONFIG_NUMA
|
#ifdef CONFIG_NUMA
|
||||||
struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order)
|
struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order)
|
||||||
|
|
|
@ -108,3 +108,10 @@ void lru_cache_add(struct page *page)
|
||||||
folio_add_lru(page_folio(page));
|
folio_add_lru(page_folio(page));
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(lru_cache_add);
|
EXPORT_SYMBOL(lru_cache_add);
|
||||||
|
|
||||||
|
int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
|
||||||
|
pgoff_t index, gfp_t gfp)
|
||||||
|
{
|
||||||
|
return filemap_add_folio(mapping, page_folio(page), index, gfp);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(add_to_page_cache_lru);
|
||||||
|
|
Загрузка…
Ссылка в новой задаче