mm/writeback: Add __folio_mark_dirty()
Turn __set_page_dirty() into a wrapper around __folio_mark_dirty(). Convert account_page_dirtied() into folio_account_dirtied() and account the number of pages in the folio to support multi-page folios. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: David Howells <dhowells@redhat.com> Acked-by: Vlastimil Babka <vbabka@suse.cz>
This commit is contained in:
Родитель
b5e84594ca
Коммит
203a315166
|
@ -1615,10 +1615,9 @@ void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
|
|||
void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio,
|
||||
struct bdi_writeback *wb);
|
||||
|
||||
static inline void mem_cgroup_track_foreign_dirty(struct page *page,
|
||||
static inline void mem_cgroup_track_foreign_dirty(struct folio *folio,
|
||||
struct bdi_writeback *wb)
|
||||
{
|
||||
struct folio *folio = page_folio(page);
|
||||
if (mem_cgroup_disabled())
|
||||
return;
|
||||
|
||||
|
@ -1643,7 +1642,7 @@ static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb,
|
|||
{
|
||||
}
|
||||
|
||||
static inline void mem_cgroup_track_foreign_dirty(struct page *page,
|
||||
static inline void mem_cgroup_track_foreign_dirty(struct folio *folio,
|
||||
struct bdi_writeback *wb)
|
||||
{
|
||||
}
|
||||
|
|
|
@ -772,8 +772,13 @@ void end_page_writeback(struct page *page);
|
|||
void folio_end_writeback(struct folio *folio);
|
||||
void wait_for_stable_page(struct page *page);
|
||||
void folio_wait_stable(struct folio *folio);
|
||||
void __folio_mark_dirty(struct folio *folio, struct address_space *, int warn);
|
||||
static inline void __set_page_dirty(struct page *page,
|
||||
struct address_space *mapping, int warn)
|
||||
{
|
||||
__folio_mark_dirty(page_folio(page), mapping, warn);
|
||||
}
|
||||
|
||||
void __set_page_dirty(struct page *, struct address_space *, int warn);
|
||||
int __set_page_dirty_nobuffers(struct page *page);
|
||||
int __set_page_dirty_no_writeback(struct page *page);
|
||||
|
||||
|
|
|
@ -2438,29 +2438,30 @@ EXPORT_SYMBOL(__set_page_dirty_no_writeback);
|
|||
*
|
||||
* NOTE: This relies on being atomic wrt interrupts.
|
||||
*/
|
||||
static void account_page_dirtied(struct page *page,
|
||||
static void folio_account_dirtied(struct folio *folio,
|
||||
struct address_space *mapping)
|
||||
{
|
||||
struct inode *inode = mapping->host;
|
||||
|
||||
trace_writeback_dirty_page(page, mapping);
|
||||
trace_writeback_dirty_page(&folio->page, mapping);
|
||||
|
||||
if (mapping_can_writeback(mapping)) {
|
||||
struct bdi_writeback *wb;
|
||||
long nr = folio_nr_pages(folio);
|
||||
|
||||
inode_attach_wb(inode, page);
|
||||
inode_attach_wb(inode, &folio->page);
|
||||
wb = inode_to_wb(inode);
|
||||
|
||||
__inc_lruvec_page_state(page, NR_FILE_DIRTY);
|
||||
__inc_zone_page_state(page, NR_ZONE_WRITE_PENDING);
|
||||
__inc_node_page_state(page, NR_DIRTIED);
|
||||
inc_wb_stat(wb, WB_RECLAIMABLE);
|
||||
inc_wb_stat(wb, WB_DIRTIED);
|
||||
task_io_account_write(PAGE_SIZE);
|
||||
current->nr_dirtied++;
|
||||
__this_cpu_inc(bdp_ratelimits);
|
||||
__lruvec_stat_mod_folio(folio, NR_FILE_DIRTY, nr);
|
||||
__zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, nr);
|
||||
__node_stat_mod_folio(folio, NR_DIRTIED, nr);
|
||||
wb_stat_mod(wb, WB_RECLAIMABLE, nr);
|
||||
wb_stat_mod(wb, WB_DIRTIED, nr);
|
||||
task_io_account_write(nr * PAGE_SIZE);
|
||||
current->nr_dirtied += nr;
|
||||
__this_cpu_add(bdp_ratelimits, nr);
|
||||
|
||||
mem_cgroup_track_foreign_dirty(page, wb);
|
||||
mem_cgroup_track_foreign_dirty(folio, wb);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2481,24 +2482,24 @@ void account_page_cleaned(struct page *page, struct address_space *mapping,
|
|||
}
|
||||
|
||||
/*
|
||||
* Mark the page dirty, and set it dirty in the page cache, and mark the inode
|
||||
* dirty.
|
||||
* Mark the folio dirty, and set it dirty in the page cache, and mark
|
||||
* the inode dirty.
|
||||
*
|
||||
* If warn is true, then emit a warning if the page is not uptodate and has
|
||||
* If warn is true, then emit a warning if the folio is not uptodate and has
|
||||
* not been truncated.
|
||||
*
|
||||
* The caller must hold lock_page_memcg().
|
||||
*/
|
||||
void __set_page_dirty(struct page *page, struct address_space *mapping,
|
||||
void __folio_mark_dirty(struct folio *folio, struct address_space *mapping,
|
||||
int warn)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
xa_lock_irqsave(&mapping->i_pages, flags);
|
||||
if (page->mapping) { /* Race with truncate? */
|
||||
WARN_ON_ONCE(warn && !PageUptodate(page));
|
||||
account_page_dirtied(page, mapping);
|
||||
__xa_set_mark(&mapping->i_pages, page_index(page),
|
||||
if (folio->mapping) { /* Race with truncate? */
|
||||
WARN_ON_ONCE(warn && !folio_test_uptodate(folio));
|
||||
folio_account_dirtied(folio, mapping);
|
||||
__xa_set_mark(&mapping->i_pages, folio_index(folio),
|
||||
PAGECACHE_TAG_DIRTY);
|
||||
}
|
||||
xa_unlock_irqrestore(&mapping->i_pages, flags);
|
||||
|
|
Загрузка…
Ссылка в новой задаче