Convert xfs/iomap to use folios
This should be all that is needed for XFS to use large folios. There is no code in this pull request to create large folios, but no additional changes should be needed to XFS or iomap once they are created. -----BEGIN PGP SIGNATURE----- iQEzBAABCgAdFiEEejHryeLBw/spnjHrDpNsjXcpgj4FAmHcpaUACgkQDpNsjXcp gj4MUAf+ItcKfgFo1QCMT+6Y0mohVqPme/vdyOCNv6yOOfZZqN5ZQc+2hmxXrRz9 XPOPwZKL0TttlHSYEJmrm8mqwN8UXl0kqMu4kQqOXMziiD9qpVlaLXOZ7iLdkQxu z/xe1iACcGfJUaQCsaMP6BZqp6iETA4qP72dBE4jc6PC4H3OI0pN/900gEbAcLxD Yn0a5NhrdS/EySU2aHLB6OcwhqnSiHBVjUbFiuXxuvOYyzLaERIh00Kx3jLdj4DR 82K4TF8h2IZpALfIDSt0JG+gHLCc+EfF7Yd/xkeEv0md3ncyi+jWvFCFPNJbyFjm cYoDTSunfbxwszA2n01R4JM8/KkGwA== =IeFX -----END PGP SIGNATURE----- Merge tag 'iomap-5.17' of git://git.infradead.org/users/willy/linux Pull iomap updates from Matthew Wilcox: "Convert xfs/iomap to use folios. This should be all that is needed for XFS to use large folios. There is no code in this pull request to create large folios, but no additional changes should be needed to XFS or iomap once they are created. Usually this would have come from Darrick, and we had intended that it would come that route. Between the holidays and various things which Darrick needed to work on, he asked if I could send things directly. There weren't any other iomap patches pending for this release, which probably also played a role" * tag 'iomap-5.17' of git://git.infradead.org/users/willy/linux: (26 commits) iomap: Inline __iomap_zero_iter into its caller xfs: Support large folios iomap: Support large folios in invalidatepage iomap: Convert iomap_migrate_page() to use folios iomap: Convert iomap_add_to_ioend() to take a folio iomap: Simplify iomap_do_writepage() iomap: Simplify iomap_writepage_map() iomap,xfs: Convert ->discard_page to ->discard_folio iomap: Convert iomap_write_end_inline to take a folio iomap: Convert iomap_write_begin() and iomap_write_end() to folios iomap: Convert __iomap_zero_iter to use a folio iomap: Allow iomap_write_begin() to be called with the full length iomap: Convert iomap_page_mkwrite to use a folio iomap: Convert readahead and readpage to use a folio iomap: Convert iomap_read_inline_data to take a folio iomap: Use folio offsets instead of page offsets iomap: Convert bio completions to use folios iomap: Pass the iomap_page into iomap_set_range_uptodate iomap: Add iomap_invalidate_folio iomap: Convert iomap_releasepage to use a folio ...
This commit is contained in:
Коммит
f079ab01b5
|
@ -279,6 +279,7 @@ Accounting Framework
|
|||
Block Devices
|
||||
=============
|
||||
|
||||
.. kernel-doc:: include/linux/bio.h
|
||||
.. kernel-doc:: block/blk-core.c
|
||||
:export:
|
||||
|
||||
|
|
22
block/bio.c
22
block/bio.c
|
@ -1035,6 +1035,28 @@ int bio_add_page(struct bio *bio, struct page *page,
|
|||
}
|
||||
EXPORT_SYMBOL(bio_add_page);
|
||||
|
||||
/**
|
||||
* bio_add_folio - Attempt to add part of a folio to a bio.
|
||||
* @bio: BIO to add to.
|
||||
* @folio: Folio to add.
|
||||
* @len: How many bytes from the folio to add.
|
||||
* @off: First byte in this folio to add.
|
||||
*
|
||||
* Filesystems that use folios can call this function instead of calling
|
||||
* bio_add_page() for each page in the folio. If @off is bigger than
|
||||
* PAGE_SIZE, this function can create a bio_vec that starts in a page
|
||||
* after the bv_page. BIOs do not support folios that are 4GiB or larger.
|
||||
*
|
||||
* Return: Whether the addition was successful.
|
||||
*/
|
||||
bool bio_add_folio(struct bio *bio, struct folio *folio, size_t len,
|
||||
size_t off)
|
||||
{
|
||||
if (len > UINT_MAX || off > UINT_MAX)
|
||||
return 0;
|
||||
return bio_add_page(bio, &folio->page, len, off) > 0;
|
||||
}
|
||||
|
||||
void __bio_release_pages(struct bio *bio, bool mark_dirty)
|
||||
{
|
||||
struct bvec_iter_all iter_all;
|
||||
|
|
23
fs/buffer.c
23
fs/buffer.c
|
@ -1969,34 +1969,34 @@ iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,
|
|||
}
|
||||
}
|
||||
|
||||
int __block_write_begin_int(struct page *page, loff_t pos, unsigned len,
|
||||
int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len,
|
||||
get_block_t *get_block, const struct iomap *iomap)
|
||||
{
|
||||
unsigned from = pos & (PAGE_SIZE - 1);
|
||||
unsigned to = from + len;
|
||||
struct inode *inode = page->mapping->host;
|
||||
struct inode *inode = folio->mapping->host;
|
||||
unsigned block_start, block_end;
|
||||
sector_t block;
|
||||
int err = 0;
|
||||
unsigned blocksize, bbits;
|
||||
struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
|
||||
|
||||
BUG_ON(!PageLocked(page));
|
||||
BUG_ON(!folio_test_locked(folio));
|
||||
BUG_ON(from > PAGE_SIZE);
|
||||
BUG_ON(to > PAGE_SIZE);
|
||||
BUG_ON(from > to);
|
||||
|
||||
head = create_page_buffers(page, inode, 0);
|
||||
head = create_page_buffers(&folio->page, inode, 0);
|
||||
blocksize = head->b_size;
|
||||
bbits = block_size_bits(blocksize);
|
||||
|
||||
block = (sector_t)page->index << (PAGE_SHIFT - bbits);
|
||||
block = (sector_t)folio->index << (PAGE_SHIFT - bbits);
|
||||
|
||||
for(bh = head, block_start = 0; bh != head || !block_start;
|
||||
block++, block_start=block_end, bh = bh->b_this_page) {
|
||||
block_end = block_start + blocksize;
|
||||
if (block_end <= from || block_start >= to) {
|
||||
if (PageUptodate(page)) {
|
||||
if (folio_test_uptodate(folio)) {
|
||||
if (!buffer_uptodate(bh))
|
||||
set_buffer_uptodate(bh);
|
||||
}
|
||||
|
@ -2016,20 +2016,20 @@ int __block_write_begin_int(struct page *page, loff_t pos, unsigned len,
|
|||
|
||||
if (buffer_new(bh)) {
|
||||
clean_bdev_bh_alias(bh);
|
||||
if (PageUptodate(page)) {
|
||||
if (folio_test_uptodate(folio)) {
|
||||
clear_buffer_new(bh);
|
||||
set_buffer_uptodate(bh);
|
||||
mark_buffer_dirty(bh);
|
||||
continue;
|
||||
}
|
||||
if (block_end > to || block_start < from)
|
||||
zero_user_segments(page,
|
||||
folio_zero_segments(folio,
|
||||
to, block_end,
|
||||
block_start, from);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if (PageUptodate(page)) {
|
||||
if (folio_test_uptodate(folio)) {
|
||||
if (!buffer_uptodate(bh))
|
||||
set_buffer_uptodate(bh);
|
||||
continue;
|
||||
|
@ -2050,14 +2050,15 @@ int __block_write_begin_int(struct page *page, loff_t pos, unsigned len,
|
|||
err = -EIO;
|
||||
}
|
||||
if (unlikely(err))
|
||||
page_zero_new_buffers(page, from, to);
|
||||
page_zero_new_buffers(&folio->page, from, to);
|
||||
return err;
|
||||
}
|
||||
|
||||
int __block_write_begin(struct page *page, loff_t pos, unsigned len,
|
||||
get_block_t *get_block)
|
||||
{
|
||||
return __block_write_begin_int(page, pos, len, get_block, NULL);
|
||||
return __block_write_begin_int(page_folio(page), pos, len, get_block,
|
||||
NULL);
|
||||
}
|
||||
EXPORT_SYMBOL(__block_write_begin);
|
||||
|
||||
|
|
|
@ -37,7 +37,7 @@ static inline int emergency_thaw_bdev(struct super_block *sb)
|
|||
/*
|
||||
* buffer.c
|
||||
*/
|
||||
int __block_write_begin_int(struct page *page, loff_t pos, unsigned len,
|
||||
int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len,
|
||||
get_block_t *get_block, const struct iomap *iomap);
|
||||
|
||||
/*
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -437,37 +437,37 @@ xfs_prepare_ioend(
|
|||
* see a ENOSPC in writeback).
|
||||
*/
|
||||
static void
|
||||
xfs_discard_page(
|
||||
struct page *page,
|
||||
loff_t fileoff)
|
||||
xfs_discard_folio(
|
||||
struct folio *folio,
|
||||
loff_t pos)
|
||||
{
|
||||
struct inode *inode = page->mapping->host;
|
||||
struct inode *inode = folio->mapping->host;
|
||||
struct xfs_inode *ip = XFS_I(inode);
|
||||
struct xfs_mount *mp = ip->i_mount;
|
||||
unsigned int pageoff = offset_in_page(fileoff);
|
||||
xfs_fileoff_t start_fsb = XFS_B_TO_FSBT(mp, fileoff);
|
||||
xfs_fileoff_t pageoff_fsb = XFS_B_TO_FSBT(mp, pageoff);
|
||||
size_t offset = offset_in_folio(folio, pos);
|
||||
xfs_fileoff_t start_fsb = XFS_B_TO_FSBT(mp, pos);
|
||||
xfs_fileoff_t pageoff_fsb = XFS_B_TO_FSBT(mp, offset);
|
||||
int error;
|
||||
|
||||
if (xfs_is_shutdown(mp))
|
||||
goto out_invalidate;
|
||||
|
||||
xfs_alert_ratelimited(mp,
|
||||
"page discard on page "PTR_FMT", inode 0x%llx, offset %llu.",
|
||||
page, ip->i_ino, fileoff);
|
||||
"page discard on page "PTR_FMT", inode 0x%llx, pos %llu.",
|
||||
folio, ip->i_ino, pos);
|
||||
|
||||
error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
|
||||
i_blocks_per_page(inode, page) - pageoff_fsb);
|
||||
i_blocks_per_folio(inode, folio) - pageoff_fsb);
|
||||
if (error && !xfs_is_shutdown(mp))
|
||||
xfs_alert(mp, "page discard unable to remove delalloc mapping.");
|
||||
out_invalidate:
|
||||
iomap_invalidatepage(page, pageoff, PAGE_SIZE - pageoff);
|
||||
iomap_invalidate_folio(folio, offset, folio_size(folio) - offset);
|
||||
}
|
||||
|
||||
static const struct iomap_writeback_ops xfs_writeback_ops = {
|
||||
.map_blocks = xfs_map_blocks,
|
||||
.prepare_ioend = xfs_prepare_ioend,
|
||||
.discard_page = xfs_discard_page,
|
||||
.discard_folio = xfs_discard_folio,
|
||||
};
|
||||
|
||||
STATIC int
|
||||
|
|
|
@ -87,6 +87,7 @@ xfs_inode_alloc(
|
|||
/* VFS doesn't initialise i_mode or i_state! */
|
||||
VFS_I(ip)->i_mode = 0;
|
||||
VFS_I(ip)->i_state = 0;
|
||||
mapping_set_large_folios(VFS_I(ip)->i_mapping);
|
||||
|
||||
XFS_STATS_INC(mp, vn_active);
|
||||
ASSERT(atomic_read(&ip->i_pincount) == 0);
|
||||
|
@ -320,6 +321,7 @@ xfs_reinit_inode(
|
|||
inode->i_rdev = dev;
|
||||
inode->i_uid = uid;
|
||||
inode->i_gid = gid;
|
||||
mapping_set_large_folios(inode->i_mapping);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
|
|
@ -166,7 +166,7 @@ static inline void bio_advance(struct bio *bio, unsigned int nbytes)
|
|||
*/
|
||||
#define bio_for_each_bvec_all(bvl, bio, i) \
|
||||
for (i = 0, bvl = bio_first_bvec_all(bio); \
|
||||
i < (bio)->bi_vcnt; i++, bvl++) \
|
||||
i < (bio)->bi_vcnt; i++, bvl++)
|
||||
|
||||
#define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len)
|
||||
|
||||
|
@ -260,6 +260,57 @@ static inline struct bio_vec *bio_last_bvec_all(struct bio *bio)
|
|||
return &bio->bi_io_vec[bio->bi_vcnt - 1];
|
||||
}
|
||||
|
||||
/**
|
||||
* struct folio_iter - State for iterating all folios in a bio.
|
||||
* @folio: The current folio we're iterating. NULL after the last folio.
|
||||
* @offset: The byte offset within the current folio.
|
||||
* @length: The number of bytes in this iteration (will not cross folio
|
||||
* boundary).
|
||||
*/
|
||||
struct folio_iter {
|
||||
struct folio *folio;
|
||||
size_t offset;
|
||||
size_t length;
|
||||
/* private: for use by the iterator */
|
||||
size_t _seg_count;
|
||||
int _i;
|
||||
};
|
||||
|
||||
static inline void bio_first_folio(struct folio_iter *fi, struct bio *bio,
|
||||
int i)
|
||||
{
|
||||
struct bio_vec *bvec = bio_first_bvec_all(bio) + i;
|
||||
|
||||
fi->folio = page_folio(bvec->bv_page);
|
||||
fi->offset = bvec->bv_offset +
|
||||
PAGE_SIZE * (bvec->bv_page - &fi->folio->page);
|
||||
fi->_seg_count = bvec->bv_len;
|
||||
fi->length = min(folio_size(fi->folio) - fi->offset, fi->_seg_count);
|
||||
fi->_i = i;
|
||||
}
|
||||
|
||||
static inline void bio_next_folio(struct folio_iter *fi, struct bio *bio)
|
||||
{
|
||||
fi->_seg_count -= fi->length;
|
||||
if (fi->_seg_count) {
|
||||
fi->folio = folio_next(fi->folio);
|
||||
fi->offset = 0;
|
||||
fi->length = min(folio_size(fi->folio), fi->_seg_count);
|
||||
} else if (fi->_i + 1 < bio->bi_vcnt) {
|
||||
bio_first_folio(fi, bio, fi->_i + 1);
|
||||
} else {
|
||||
fi->folio = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* bio_for_each_folio_all - Iterate over each folio in a bio.
|
||||
* @fi: struct folio_iter which is updated for each folio.
|
||||
* @bio: struct bio to iterate over.
|
||||
*/
|
||||
#define bio_for_each_folio_all(fi, bio) \
|
||||
for (bio_first_folio(&fi, bio, 0); fi.folio; bio_next_folio(&fi, bio))
|
||||
|
||||
enum bip_flags {
|
||||
BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */
|
||||
BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */
|
||||
|
@ -409,7 +460,8 @@ extern void bio_uninit(struct bio *);
|
|||
extern void bio_reset(struct bio *);
|
||||
void bio_chain(struct bio *, struct bio *);
|
||||
|
||||
extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
|
||||
int bio_add_page(struct bio *, struct page *, unsigned len, unsigned off);
|
||||
bool bio_add_folio(struct bio *, struct folio *, size_t len, size_t off);
|
||||
extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
|
||||
unsigned int, unsigned int);
|
||||
int bio_add_zone_append_page(struct bio *bio, struct page *page,
|
||||
|
|
|
@ -225,6 +225,7 @@ void iomap_readahead(struct readahead_control *, const struct iomap_ops *ops);
|
|||
int iomap_is_partially_uptodate(struct page *page, unsigned long from,
|
||||
unsigned long count);
|
||||
int iomap_releasepage(struct page *page, gfp_t gfp_mask);
|
||||
void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len);
|
||||
void iomap_invalidatepage(struct page *page, unsigned int offset,
|
||||
unsigned int len);
|
||||
#ifdef CONFIG_MIGRATION
|
||||
|
@ -284,7 +285,7 @@ struct iomap_writeback_ops {
|
|||
* Optional, allows the file system to discard state on a page where
|
||||
* we failed to submit any I/O.
|
||||
*/
|
||||
void (*discard_page)(struct page *page, loff_t fileoff);
|
||||
void (*discard_folio)(struct folio *folio, loff_t pos);
|
||||
};
|
||||
|
||||
struct iomap_writepage_ctx {
|
||||
|
|
Загрузка…
Ссылка в новой задаче