mm/filemap: Allow large folios to be added to the page cache
We return -EEXIST if there are any non-shadow entries in the page cache in the range covered by the folio. If there are multiple shadow entries in the range, we set *shadowp to one of them (currently the one at the highest index). If that turns out to be the wrong answer, we can implement something more complex. This is mostly modelled after the equivalent function in the shmem code. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
This commit is contained in:
Родитель
d4b4084ac3
Коммит
d68eccad37
39
mm/filemap.c
39
mm/filemap.c
|
@ -842,26 +842,27 @@ noinline int __filemap_add_folio(struct address_space *mapping,
|
|||
{
|
||||
XA_STATE(xas, &mapping->i_pages, index);
|
||||
int huge = folio_test_hugetlb(folio);
|
||||
int error;
|
||||
bool charged = false;
|
||||
long nr = 1;
|
||||
|
||||
VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
|
||||
VM_BUG_ON_FOLIO(folio_test_swapbacked(folio), folio);
|
||||
mapping_set_update(&xas, mapping);
|
||||
|
||||
folio_get(folio);
|
||||
folio->mapping = mapping;
|
||||
folio->index = index;
|
||||
|
||||
if (!huge) {
|
||||
error = mem_cgroup_charge(folio, NULL, gfp);
|
||||
int error = mem_cgroup_charge(folio, NULL, gfp);
|
||||
VM_BUG_ON_FOLIO(index & (folio_nr_pages(folio) - 1), folio);
|
||||
if (error)
|
||||
goto error;
|
||||
return error;
|
||||
charged = true;
|
||||
xas_set_order(&xas, index, folio_order(folio));
|
||||
nr = folio_nr_pages(folio);
|
||||
}
|
||||
|
||||
gfp &= GFP_RECLAIM_MASK;
|
||||
folio_ref_add(folio, nr);
|
||||
folio->mapping = mapping;
|
||||
folio->index = xas.xa_index;
|
||||
|
||||
do {
|
||||
unsigned int order = xa_get_order(xas.xa, xas.xa_index);
|
||||
|
@ -885,6 +886,8 @@ noinline int __filemap_add_folio(struct address_space *mapping,
|
|||
/* entry may have been split before we acquired lock */
|
||||
order = xa_get_order(xas.xa, xas.xa_index);
|
||||
if (order > folio_order(folio)) {
|
||||
/* How to handle large swap entries? */
|
||||
BUG_ON(shmem_mapping(mapping));
|
||||
xas_split(&xas, old, order);
|
||||
xas_reset(&xas);
|
||||
}
|
||||
|
@ -894,29 +897,31 @@ noinline int __filemap_add_folio(struct address_space *mapping,
|
|||
if (xas_error(&xas))
|
||||
goto unlock;
|
||||
|
||||
mapping->nrpages++;
|
||||
mapping->nrpages += nr;
|
||||
|
||||
/* hugetlb pages do not participate in page cache accounting */
|
||||
if (!huge)
|
||||
__lruvec_stat_add_folio(folio, NR_FILE_PAGES);
|
||||
if (!huge) {
|
||||
__lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr);
|
||||
if (folio_test_pmd_mappable(folio))
|
||||
__lruvec_stat_mod_folio(folio,
|
||||
NR_FILE_THPS, nr);
|
||||
}
|
||||
unlock:
|
||||
xas_unlock_irq(&xas);
|
||||
} while (xas_nomem(&xas, gfp));
|
||||
|
||||
if (xas_error(&xas)) {
|
||||
error = xas_error(&xas);
|
||||
if (charged)
|
||||
mem_cgroup_uncharge(folio);
|
||||
if (xas_error(&xas))
|
||||
goto error;
|
||||
}
|
||||
|
||||
trace_mm_filemap_add_to_page_cache(folio);
|
||||
return 0;
|
||||
error:
|
||||
if (charged)
|
||||
mem_cgroup_uncharge(folio);
|
||||
folio->mapping = NULL;
|
||||
/* Leave page->index set: truncation relies upon it */
|
||||
folio_put(folio);
|
||||
return error;
|
||||
folio_put_refs(folio, nr);
|
||||
return xas_error(&xas);
|
||||
}
|
||||
ALLOW_ERROR_INJECTION(__filemap_add_folio, ERRNO);
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче