New code for 5.15:
- Simplify the bio_end_page usage in the buffered IO code. - Support reading inline data at nonzero offsets for erofs. - Fix some typos and bad grammar. - Convert kmap_atomic usage in the inline data read path. - Add some extra inline data input checking. - Fix a memory corruption bug stemming from iomap_swapfile_activate trying to activate more pages than mm was expecting. - Pass errnos through the page writeback code so that writeback errors are reported correctly instead of being munged to EIO. - Replace iomap_apply with a open-coded iterator loops to reduce the number of indirect calls by a third to a half. - Refactor the fsdax code to use iomap iterators instead of the open-coded iomap_apply code that it had before. - Format file range iomap tracepoint data in hexadecimal and standardize the names used in the pretty-print string. -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEUzaAxoMeQq6m2jMV+H93GTRKtOsFAmEnwC0ACgkQ+H93GTRK tOtVOQ//Zu9ul2ZmPARMV8xyAfopnLpmggREOthFbPkDZ3z3ZgRpPxlbAvWEEKnj VDNLFNj204rDojuxP/YSdxgiawLod7dYfXIwwft8R8oI7MdgVQhpvimUi5bkz/Od X5pmFDe84INfFvEztOgC+sPk1RI/ToQLgrcIffWMWfF2iyVkNVMCD5MMe6LoH1la 9GbVCfPx6Y2Nffaa8EuAEgaCo7FMPc81bvQG4qpeqXyX8qql/r5n4YENhkn3n4qa zI4F2lgqwbelFkamZOYNDjtLt13lb7Ze0PoFOpmTZUqlyybqhRxDvJ+OxZn8W6zH 20pxWx/RCXhCp/sS6DRcYyf7WKoIfdGDkxed7aSuhJ+VKKtBtsjMoy7dh5IY5RJa 8L1DMat6xtea8Glx04SF7Vib0n/An9oHOTzLEWxsUlRaPhW68uVpKgXuGLTAf+dc ztJhlQ9pLX0D2NmgGlkXN8d4F1XEH2BgyIrtF6UNtMbyIlCREHM9HELJs6JzKl6U a4ivJXyaq8o/hlXr8IMWUOTVubS0i+hgvvQjnVJmcSTJxhH10mPPJLnNsGX6heD9 SlnnXRbD03iqsbMJP/R431VKooryOSKBc86IEECkuMz3RUfw75DGAnLtETnT1rsA 71rSVf5NaCGZ2hV4du6jv53TS7yrPpqkxJHyDWD1WP4xGPbO1XA= =iVns -----END PGP SIGNATURE----- Merge tag 'iomap-5.15-merge-4' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux Pull iomap updates from Darrick Wong: "The most notable externally visible change for this cycle is the addition of support for reads to inline tail fragments of files, which was requested by the erofs developers; and a correction for a kernel memory corruption bug if the sysadmin tries to activate a swapfile with more pages than the swapfile header suggests. We also now report writeback completion errors to the file mapping correctly, instead of munging all errors into EIO. Internally, the bulk of the changes are Christoph's patchset to reduce the indirect function call count by a third to a half by converting iomap iteration from a loop pattern to a generator/consumer pattern. As an added bonus, fsdax no longer open-codes iomap apply loops. Summary: - Simplify the bio_end_page usage in the buffered IO code. - Support reading inline data at nonzero offsets for erofs. - Fix some typos and bad grammar. - Convert kmap_atomic usage in the inline data read path. - Add some extra inline data input checking. - Fix a memory corruption bug stemming from iomap_swapfile_activate trying to activate more pages than mm was expecting. - Pass errnos through the page writeback code so that writeback errors are reported correctly instead of being munged to EIO. - Replace iomap_apply with a open-coded iterator loops to reduce the number of indirect calls by a third to a half. - Refactor the fsdax code to use iomap iterators instead of the open-coded iomap_apply code that it had before. - Format file range iomap tracepoint data in hexadecimal and standardize the names used in the pretty-print string" * tag 'iomap-5.15-merge-4' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux: (41 commits) iomap: standardize tracepoint formatting and storage mm/swap: consider max pages in iomap_swapfile_add_extent iomap: move loop control code to iter.c iomap: constify iomap_iter_srcmap fsdax: switch the fault handlers to use iomap_iter fsdax: factor out a dax_fault_actor() helper fsdax: factor out helpers to simplify the dax fault code iomap: rework unshare flag iomap: pass an iomap_iter to various buffered I/O helpers iomap: remove iomap_apply fsdax: switch dax_iomap_rw to use iomap_iter iomap: switch iomap_swapfile_activate to use iomap_iter iomap: switch iomap_seek_data to use iomap_iter iomap: switch iomap_seek_hole to use iomap_iter iomap: switch iomap_bmap to use iomap_iter iomap: switch iomap_fiemap to use iomap_iter iomap: switch __iomap_dio_rw to use iomap_iter iomap: switch iomap_page_mkwrite to use iomap_iter iomap: switch iomap_zero_range to use iomap_iter iomap: switch iomap_file_unshare to use iomap_iter ...
This commit is contained in:
Коммит
0ee7c3e25d
|
@ -8247,9 +8247,10 @@ static struct btrfs_dio_private *btrfs_create_dio_private(struct bio *dio_bio,
|
|||
return dip;
|
||||
}
|
||||
|
||||
static blk_qc_t btrfs_submit_direct(struct inode *inode, struct iomap *iomap,
|
||||
static blk_qc_t btrfs_submit_direct(const struct iomap_iter *iter,
|
||||
struct bio *dio_bio, loff_t file_offset)
|
||||
{
|
||||
struct inode *inode = iter->inode;
|
||||
const bool write = (btrfs_op(dio_bio) == BTRFS_MAP_WRITE);
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
|
||||
const bool raid56 = (btrfs_data_alloc_profile(fs_info) &
|
||||
|
@ -8265,7 +8266,7 @@ static blk_qc_t btrfs_submit_direct(struct inode *inode, struct iomap *iomap,
|
|||
int ret;
|
||||
blk_status_t status;
|
||||
struct btrfs_io_geometry geom;
|
||||
struct btrfs_dio_data *dio_data = iomap->private;
|
||||
struct btrfs_dio_data *dio_data = iter->iomap.private;
|
||||
struct extent_map *em = NULL;
|
||||
|
||||
dip = btrfs_create_dio_private(dio_bio, inode, file_offset);
|
||||
|
|
|
@ -1912,7 +1912,7 @@ EXPORT_SYMBOL(page_zero_new_buffers);
|
|||
|
||||
static void
|
||||
iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,
|
||||
struct iomap *iomap)
|
||||
const struct iomap *iomap)
|
||||
{
|
||||
loff_t offset = block << inode->i_blkbits;
|
||||
|
||||
|
@ -1966,7 +1966,7 @@ iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,
|
|||
}
|
||||
|
||||
int __block_write_begin_int(struct page *page, loff_t pos, unsigned len,
|
||||
get_block_t *get_block, struct iomap *iomap)
|
||||
get_block_t *get_block, const struct iomap *iomap)
|
||||
{
|
||||
unsigned from = pos & (PAGE_SIZE - 1);
|
||||
unsigned to = from + len;
|
||||
|
|
600
fs/dax.c
600
fs/dax.c
|
@ -1005,12 +1005,12 @@ int dax_writeback_mapping_range(struct address_space *mapping,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
|
||||
|
||||
static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos)
|
||||
static sector_t dax_iomap_sector(const struct iomap *iomap, loff_t pos)
|
||||
{
|
||||
return (iomap->addr + (pos & PAGE_MASK) - iomap->offset) >> 9;
|
||||
}
|
||||
|
||||
static int dax_iomap_pfn(struct iomap *iomap, loff_t pos, size_t size,
|
||||
static int dax_iomap_pfn(const struct iomap *iomap, loff_t pos, size_t size,
|
||||
pfn_t *pfnp)
|
||||
{
|
||||
const sector_t sector = dax_iomap_sector(iomap, pos);
|
||||
|
@ -1066,6 +1066,66 @@ static vm_fault_t dax_load_hole(struct xa_state *xas,
|
|||
return ret;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_FS_DAX_PMD
|
||||
static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
|
||||
const struct iomap *iomap, void **entry)
|
||||
{
|
||||
struct address_space *mapping = vmf->vma->vm_file->f_mapping;
|
||||
unsigned long pmd_addr = vmf->address & PMD_MASK;
|
||||
struct vm_area_struct *vma = vmf->vma;
|
||||
struct inode *inode = mapping->host;
|
||||
pgtable_t pgtable = NULL;
|
||||
struct page *zero_page;
|
||||
spinlock_t *ptl;
|
||||
pmd_t pmd_entry;
|
||||
pfn_t pfn;
|
||||
|
||||
zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm);
|
||||
|
||||
if (unlikely(!zero_page))
|
||||
goto fallback;
|
||||
|
||||
pfn = page_to_pfn_t(zero_page);
|
||||
*entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn,
|
||||
DAX_PMD | DAX_ZERO_PAGE, false);
|
||||
|
||||
if (arch_needs_pgtable_deposit()) {
|
||||
pgtable = pte_alloc_one(vma->vm_mm);
|
||||
if (!pgtable)
|
||||
return VM_FAULT_OOM;
|
||||
}
|
||||
|
||||
ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
|
||||
if (!pmd_none(*(vmf->pmd))) {
|
||||
spin_unlock(ptl);
|
||||
goto fallback;
|
||||
}
|
||||
|
||||
if (pgtable) {
|
||||
pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
|
||||
mm_inc_nr_ptes(vma->vm_mm);
|
||||
}
|
||||
pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
|
||||
pmd_entry = pmd_mkhuge(pmd_entry);
|
||||
set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
|
||||
spin_unlock(ptl);
|
||||
trace_dax_pmd_load_hole(inode, vmf, zero_page, *entry);
|
||||
return VM_FAULT_NOPAGE;
|
||||
|
||||
fallback:
|
||||
if (pgtable)
|
||||
pte_free(vma->vm_mm, pgtable);
|
||||
trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry);
|
||||
return VM_FAULT_FALLBACK;
|
||||
}
|
||||
#else
|
||||
static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
|
||||
const struct iomap *iomap, void **entry)
|
||||
{
|
||||
return VM_FAULT_FALLBACK;
|
||||
}
|
||||
#endif /* CONFIG_FS_DAX_PMD */
|
||||
|
||||
s64 dax_iomap_zero(loff_t pos, u64 length, struct iomap *iomap)
|
||||
{
|
||||
sector_t sector = iomap_sector(iomap, pos & PAGE_MASK);
|
||||
|
@ -1103,20 +1163,21 @@ s64 dax_iomap_zero(loff_t pos, u64 length, struct iomap *iomap)
|
|||
return size;
|
||||
}
|
||||
|
||||
static loff_t
|
||||
dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
|
||||
struct iomap *iomap, struct iomap *srcmap)
|
||||
static loff_t dax_iomap_iter(const struct iomap_iter *iomi,
|
||||
struct iov_iter *iter)
|
||||
{
|
||||
const struct iomap *iomap = &iomi->iomap;
|
||||
loff_t length = iomap_length(iomi);
|
||||
loff_t pos = iomi->pos;
|
||||
struct block_device *bdev = iomap->bdev;
|
||||
struct dax_device *dax_dev = iomap->dax_dev;
|
||||
struct iov_iter *iter = data;
|
||||
loff_t end = pos + length, done = 0;
|
||||
ssize_t ret = 0;
|
||||
size_t xfer;
|
||||
int id;
|
||||
|
||||
if (iov_iter_rw(iter) == READ) {
|
||||
end = min(end, i_size_read(inode));
|
||||
end = min(end, i_size_read(iomi->inode));
|
||||
if (pos >= end)
|
||||
return 0;
|
||||
|
||||
|
@ -1133,7 +1194,7 @@ dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
|
|||
* written by write(2) is visible in mmap.
|
||||
*/
|
||||
if (iomap->flags & IOMAP_F_NEW) {
|
||||
invalidate_inode_pages2_range(inode->i_mapping,
|
||||
invalidate_inode_pages2_range(iomi->inode->i_mapping,
|
||||
pos >> PAGE_SHIFT,
|
||||
(end - 1) >> PAGE_SHIFT);
|
||||
}
|
||||
|
@ -1209,31 +1270,29 @@ ssize_t
|
|||
dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
|
||||
const struct iomap_ops *ops)
|
||||
{
|
||||
struct address_space *mapping = iocb->ki_filp->f_mapping;
|
||||
struct inode *inode = mapping->host;
|
||||
loff_t pos = iocb->ki_pos, ret = 0, done = 0;
|
||||
unsigned flags = 0;
|
||||
struct iomap_iter iomi = {
|
||||
.inode = iocb->ki_filp->f_mapping->host,
|
||||
.pos = iocb->ki_pos,
|
||||
.len = iov_iter_count(iter),
|
||||
};
|
||||
loff_t done = 0;
|
||||
int ret;
|
||||
|
||||
if (iov_iter_rw(iter) == WRITE) {
|
||||
lockdep_assert_held_write(&inode->i_rwsem);
|
||||
flags |= IOMAP_WRITE;
|
||||
lockdep_assert_held_write(&iomi.inode->i_rwsem);
|
||||
iomi.flags |= IOMAP_WRITE;
|
||||
} else {
|
||||
lockdep_assert_held(&inode->i_rwsem);
|
||||
lockdep_assert_held(&iomi.inode->i_rwsem);
|
||||
}
|
||||
|
||||
if (iocb->ki_flags & IOCB_NOWAIT)
|
||||
flags |= IOMAP_NOWAIT;
|
||||
iomi.flags |= IOMAP_NOWAIT;
|
||||
|
||||
while (iov_iter_count(iter)) {
|
||||
ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
|
||||
iter, dax_iomap_actor);
|
||||
if (ret <= 0)
|
||||
break;
|
||||
pos += ret;
|
||||
done += ret;
|
||||
}
|
||||
while ((ret = iomap_iter(&iomi, ops)) > 0)
|
||||
iomi.processed = dax_iomap_iter(&iomi, iter);
|
||||
|
||||
iocb->ki_pos += done;
|
||||
done = iomi.pos - iocb->ki_pos;
|
||||
iocb->ki_pos = iomi.pos;
|
||||
return done ? done : ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dax_iomap_rw);
|
||||
|
@ -1250,44 +1309,146 @@ static vm_fault_t dax_fault_return(int error)
|
|||
* flushed on write-faults (non-cow), but not read-faults.
|
||||
*/
|
||||
static bool dax_fault_is_synchronous(unsigned long flags,
|
||||
struct vm_area_struct *vma, struct iomap *iomap)
|
||||
struct vm_area_struct *vma, const struct iomap *iomap)
|
||||
{
|
||||
return (flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC)
|
||||
&& (iomap->flags & IOMAP_F_DIRTY);
|
||||
}
|
||||
|
||||
/*
|
||||
* When handling a synchronous page fault and the inode need a fsync, we can
|
||||
* insert the PTE/PMD into page tables only after that fsync happened. Skip
|
||||
* insertion for now and return the pfn so that caller can insert it after the
|
||||
* fsync is done.
|
||||
*/
|
||||
static vm_fault_t dax_fault_synchronous_pfnp(pfn_t *pfnp, pfn_t pfn)
|
||||
{
|
||||
if (WARN_ON_ONCE(!pfnp))
|
||||
return VM_FAULT_SIGBUS;
|
||||
*pfnp = pfn;
|
||||
return VM_FAULT_NEEDDSYNC;
|
||||
}
|
||||
|
||||
static vm_fault_t dax_fault_cow_page(struct vm_fault *vmf,
|
||||
const struct iomap_iter *iter)
|
||||
{
|
||||
sector_t sector = dax_iomap_sector(&iter->iomap, iter->pos);
|
||||
unsigned long vaddr = vmf->address;
|
||||
vm_fault_t ret;
|
||||
int error = 0;
|
||||
|
||||
switch (iter->iomap.type) {
|
||||
case IOMAP_HOLE:
|
||||
case IOMAP_UNWRITTEN:
|
||||
clear_user_highpage(vmf->cow_page, vaddr);
|
||||
break;
|
||||
case IOMAP_MAPPED:
|
||||
error = copy_cow_page_dax(iter->iomap.bdev, iter->iomap.dax_dev,
|
||||
sector, vmf->cow_page, vaddr);
|
||||
break;
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
error = -EIO;
|
||||
break;
|
||||
}
|
||||
|
||||
if (error)
|
||||
return dax_fault_return(error);
|
||||
|
||||
__SetPageUptodate(vmf->cow_page);
|
||||
ret = finish_fault(vmf);
|
||||
if (!ret)
|
||||
return VM_FAULT_DONE_COW;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* dax_fault_iter - Common actor to handle pfn insertion in PTE/PMD fault.
|
||||
* @vmf: vm fault instance
|
||||
* @iter: iomap iter
|
||||
* @pfnp: pfn to be returned
|
||||
* @xas: the dax mapping tree of a file
|
||||
* @entry: an unlocked dax entry to be inserted
|
||||
* @pmd: distinguish whether it is a pmd fault
|
||||
*/
|
||||
static vm_fault_t dax_fault_iter(struct vm_fault *vmf,
|
||||
const struct iomap_iter *iter, pfn_t *pfnp,
|
||||
struct xa_state *xas, void **entry, bool pmd)
|
||||
{
|
||||
struct address_space *mapping = vmf->vma->vm_file->f_mapping;
|
||||
const struct iomap *iomap = &iter->iomap;
|
||||
size_t size = pmd ? PMD_SIZE : PAGE_SIZE;
|
||||
loff_t pos = (loff_t)xas->xa_index << PAGE_SHIFT;
|
||||
bool write = vmf->flags & FAULT_FLAG_WRITE;
|
||||
bool sync = dax_fault_is_synchronous(iter->flags, vmf->vma, iomap);
|
||||
unsigned long entry_flags = pmd ? DAX_PMD : 0;
|
||||
int err = 0;
|
||||
pfn_t pfn;
|
||||
|
||||
if (!pmd && vmf->cow_page)
|
||||
return dax_fault_cow_page(vmf, iter);
|
||||
|
||||
/* if we are reading UNWRITTEN and HOLE, return a hole. */
|
||||
if (!write &&
|
||||
(iomap->type == IOMAP_UNWRITTEN || iomap->type == IOMAP_HOLE)) {
|
||||
if (!pmd)
|
||||
return dax_load_hole(xas, mapping, entry, vmf);
|
||||
return dax_pmd_load_hole(xas, vmf, iomap, entry);
|
||||
}
|
||||
|
||||
if (iomap->type != IOMAP_MAPPED) {
|
||||
WARN_ON_ONCE(1);
|
||||
return pmd ? VM_FAULT_FALLBACK : VM_FAULT_SIGBUS;
|
||||
}
|
||||
|
||||
err = dax_iomap_pfn(&iter->iomap, pos, size, &pfn);
|
||||
if (err)
|
||||
return pmd ? VM_FAULT_FALLBACK : dax_fault_return(err);
|
||||
|
||||
*entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, entry_flags,
|
||||
write && !sync);
|
||||
|
||||
if (sync)
|
||||
return dax_fault_synchronous_pfnp(pfnp, pfn);
|
||||
|
||||
/* insert PMD pfn */
|
||||
if (pmd)
|
||||
return vmf_insert_pfn_pmd(vmf, pfn, write);
|
||||
|
||||
/* insert PTE pfn */
|
||||
if (write)
|
||||
return vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
|
||||
return vmf_insert_mixed(vmf->vma, vmf->address, pfn);
|
||||
}
|
||||
|
||||
static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
|
||||
int *iomap_errp, const struct iomap_ops *ops)
|
||||
{
|
||||
struct vm_area_struct *vma = vmf->vma;
|
||||
struct address_space *mapping = vma->vm_file->f_mapping;
|
||||
struct address_space *mapping = vmf->vma->vm_file->f_mapping;
|
||||
XA_STATE(xas, &mapping->i_pages, vmf->pgoff);
|
||||
struct inode *inode = mapping->host;
|
||||
unsigned long vaddr = vmf->address;
|
||||
loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT;
|
||||
struct iomap iomap = { .type = IOMAP_HOLE };
|
||||
struct iomap srcmap = { .type = IOMAP_HOLE };
|
||||
unsigned flags = IOMAP_FAULT;
|
||||
int error, major = 0;
|
||||
bool write = vmf->flags & FAULT_FLAG_WRITE;
|
||||
bool sync;
|
||||
struct iomap_iter iter = {
|
||||
.inode = mapping->host,
|
||||
.pos = (loff_t)vmf->pgoff << PAGE_SHIFT,
|
||||
.len = PAGE_SIZE,
|
||||
.flags = IOMAP_FAULT,
|
||||
};
|
||||
vm_fault_t ret = 0;
|
||||
void *entry;
|
||||
pfn_t pfn;
|
||||
int error;
|
||||
|
||||
trace_dax_pte_fault(inode, vmf, ret);
|
||||
trace_dax_pte_fault(iter.inode, vmf, ret);
|
||||
/*
|
||||
* Check whether offset isn't beyond end of file now. Caller is supposed
|
||||
* to hold locks serializing us with truncate / punch hole so this is
|
||||
* a reliable test.
|
||||
*/
|
||||
if (pos >= i_size_read(inode)) {
|
||||
if (iter.pos >= i_size_read(iter.inode)) {
|
||||
ret = VM_FAULT_SIGBUS;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (write && !vmf->cow_page)
|
||||
flags |= IOMAP_WRITE;
|
||||
if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page)
|
||||
iter.flags |= IOMAP_WRITE;
|
||||
|
||||
entry = grab_mapping_entry(&xas, mapping, 0);
|
||||
if (xa_is_internal(entry)) {
|
||||
|
@ -1306,206 +1467,42 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
|
|||
goto unlock_entry;
|
||||
}
|
||||
|
||||
/*
|
||||
* Note that we don't bother to use iomap_apply here: DAX required
|
||||
* the file system block size to be equal the page size, which means
|
||||
* that we never have to deal with more than a single extent here.
|
||||
*/
|
||||
error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap, &srcmap);
|
||||
while ((error = iomap_iter(&iter, ops)) > 0) {
|
||||
if (WARN_ON_ONCE(iomap_length(&iter) < PAGE_SIZE)) {
|
||||
iter.processed = -EIO; /* fs corruption? */
|
||||
continue;
|
||||
}
|
||||
|
||||
ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, false);
|
||||
if (ret != VM_FAULT_SIGBUS &&
|
||||
(iter.iomap.flags & IOMAP_F_NEW)) {
|
||||
count_vm_event(PGMAJFAULT);
|
||||
count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT);
|
||||
ret |= VM_FAULT_MAJOR;
|
||||
}
|
||||
|
||||
if (!(ret & VM_FAULT_ERROR))
|
||||
iter.processed = PAGE_SIZE;
|
||||
}
|
||||
|
||||
if (iomap_errp)
|
||||
*iomap_errp = error;
|
||||
if (error) {
|
||||
if (!ret && error)
|
||||
ret = dax_fault_return(error);
|
||||
goto unlock_entry;
|
||||
}
|
||||
if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
|
||||
error = -EIO; /* fs corruption? */
|
||||
goto error_finish_iomap;
|
||||
}
|
||||
|
||||
if (vmf->cow_page) {
|
||||
sector_t sector = dax_iomap_sector(&iomap, pos);
|
||||
|
||||
switch (iomap.type) {
|
||||
case IOMAP_HOLE:
|
||||
case IOMAP_UNWRITTEN:
|
||||
clear_user_highpage(vmf->cow_page, vaddr);
|
||||
break;
|
||||
case IOMAP_MAPPED:
|
||||
error = copy_cow_page_dax(iomap.bdev, iomap.dax_dev,
|
||||
sector, vmf->cow_page, vaddr);
|
||||
break;
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
error = -EIO;
|
||||
break;
|
||||
}
|
||||
|
||||
if (error)
|
||||
goto error_finish_iomap;
|
||||
|
||||
__SetPageUptodate(vmf->cow_page);
|
||||
ret = finish_fault(vmf);
|
||||
if (!ret)
|
||||
ret = VM_FAULT_DONE_COW;
|
||||
goto finish_iomap;
|
||||
}
|
||||
|
||||
sync = dax_fault_is_synchronous(flags, vma, &iomap);
|
||||
|
||||
switch (iomap.type) {
|
||||
case IOMAP_MAPPED:
|
||||
if (iomap.flags & IOMAP_F_NEW) {
|
||||
count_vm_event(PGMAJFAULT);
|
||||
count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
|
||||
major = VM_FAULT_MAJOR;
|
||||
}
|
||||
error = dax_iomap_pfn(&iomap, pos, PAGE_SIZE, &pfn);
|
||||
if (error < 0)
|
||||
goto error_finish_iomap;
|
||||
|
||||
entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn,
|
||||
0, write && !sync);
|
||||
|
||||
/*
|
||||
* If we are doing synchronous page fault and inode needs fsync,
|
||||
* we can insert PTE into page tables only after that happens.
|
||||
* Skip insertion for now and return the pfn so that caller can
|
||||
* insert it after fsync is done.
|
||||
*/
|
||||
if (sync) {
|
||||
if (WARN_ON_ONCE(!pfnp)) {
|
||||
error = -EIO;
|
||||
goto error_finish_iomap;
|
||||
}
|
||||
*pfnp = pfn;
|
||||
ret = VM_FAULT_NEEDDSYNC | major;
|
||||
goto finish_iomap;
|
||||
}
|
||||
trace_dax_insert_mapping(inode, vmf, entry);
|
||||
if (write)
|
||||
ret = vmf_insert_mixed_mkwrite(vma, vaddr, pfn);
|
||||
else
|
||||
ret = vmf_insert_mixed(vma, vaddr, pfn);
|
||||
|
||||
goto finish_iomap;
|
||||
case IOMAP_UNWRITTEN:
|
||||
case IOMAP_HOLE:
|
||||
if (!write) {
|
||||
ret = dax_load_hole(&xas, mapping, &entry, vmf);
|
||||
goto finish_iomap;
|
||||
}
|
||||
fallthrough;
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
error = -EIO;
|
||||
break;
|
||||
}
|
||||
|
||||
error_finish_iomap:
|
||||
ret = dax_fault_return(error);
|
||||
finish_iomap:
|
||||
if (ops->iomap_end) {
|
||||
int copied = PAGE_SIZE;
|
||||
|
||||
if (ret & VM_FAULT_ERROR)
|
||||
copied = 0;
|
||||
/*
|
||||
* The fault is done by now and there's no way back (other
|
||||
* thread may be already happily using PTE we have installed).
|
||||
* Just ignore error from ->iomap_end since we cannot do much
|
||||
* with it.
|
||||
*/
|
||||
ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap);
|
||||
}
|
||||
unlock_entry:
|
||||
unlock_entry:
|
||||
dax_unlock_entry(&xas, entry);
|
||||
out:
|
||||
trace_dax_pte_fault_done(inode, vmf, ret);
|
||||
return ret | major;
|
||||
out:
|
||||
trace_dax_pte_fault_done(iter.inode, vmf, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_FS_DAX_PMD
|
||||
static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
|
||||
struct iomap *iomap, void **entry)
|
||||
static bool dax_fault_check_fallback(struct vm_fault *vmf, struct xa_state *xas,
|
||||
pgoff_t max_pgoff)
|
||||
{
|
||||
struct address_space *mapping = vmf->vma->vm_file->f_mapping;
|
||||
unsigned long pmd_addr = vmf->address & PMD_MASK;
|
||||
struct vm_area_struct *vma = vmf->vma;
|
||||
struct inode *inode = mapping->host;
|
||||
pgtable_t pgtable = NULL;
|
||||
struct page *zero_page;
|
||||
spinlock_t *ptl;
|
||||
pmd_t pmd_entry;
|
||||
pfn_t pfn;
|
||||
|
||||
zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm);
|
||||
|
||||
if (unlikely(!zero_page))
|
||||
goto fallback;
|
||||
|
||||
pfn = page_to_pfn_t(zero_page);
|
||||
*entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn,
|
||||
DAX_PMD | DAX_ZERO_PAGE, false);
|
||||
|
||||
if (arch_needs_pgtable_deposit()) {
|
||||
pgtable = pte_alloc_one(vma->vm_mm);
|
||||
if (!pgtable)
|
||||
return VM_FAULT_OOM;
|
||||
}
|
||||
|
||||
ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
|
||||
if (!pmd_none(*(vmf->pmd))) {
|
||||
spin_unlock(ptl);
|
||||
goto fallback;
|
||||
}
|
||||
|
||||
if (pgtable) {
|
||||
pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
|
||||
mm_inc_nr_ptes(vma->vm_mm);
|
||||
}
|
||||
pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
|
||||
pmd_entry = pmd_mkhuge(pmd_entry);
|
||||
set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
|
||||
spin_unlock(ptl);
|
||||
trace_dax_pmd_load_hole(inode, vmf, zero_page, *entry);
|
||||
return VM_FAULT_NOPAGE;
|
||||
|
||||
fallback:
|
||||
if (pgtable)
|
||||
pte_free(vma->vm_mm, pgtable);
|
||||
trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry);
|
||||
return VM_FAULT_FALLBACK;
|
||||
}
|
||||
|
||||
static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
|
||||
const struct iomap_ops *ops)
|
||||
{
|
||||
struct vm_area_struct *vma = vmf->vma;
|
||||
struct address_space *mapping = vma->vm_file->f_mapping;
|
||||
XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER);
|
||||
unsigned long pmd_addr = vmf->address & PMD_MASK;
|
||||
bool write = vmf->flags & FAULT_FLAG_WRITE;
|
||||
bool sync;
|
||||
unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT;
|
||||
struct inode *inode = mapping->host;
|
||||
vm_fault_t result = VM_FAULT_FALLBACK;
|
||||
struct iomap iomap = { .type = IOMAP_HOLE };
|
||||
struct iomap srcmap = { .type = IOMAP_HOLE };
|
||||
pgoff_t max_pgoff;
|
||||
void *entry;
|
||||
loff_t pos;
|
||||
int error;
|
||||
pfn_t pfn;
|
||||
|
||||
/*
|
||||
* Check whether offset isn't beyond end of file now. Caller is
|
||||
* supposed to hold locks serializing us with truncate / punch hole so
|
||||
* this is a reliable test.
|
||||
*/
|
||||
max_pgoff = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
|
||||
|
||||
trace_dax_pmd_fault(inode, vmf, max_pgoff, 0);
|
||||
|
||||
/*
|
||||
* Make sure that the faulting address's PMD offset (color) matches
|
||||
|
@ -1515,25 +1512,58 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
|
|||
*/
|
||||
if ((vmf->pgoff & PG_PMD_COLOUR) !=
|
||||
((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR))
|
||||
goto fallback;
|
||||
return true;
|
||||
|
||||
/* Fall back to PTEs if we're going to COW */
|
||||
if (write && !(vma->vm_flags & VM_SHARED))
|
||||
goto fallback;
|
||||
if (write && !(vmf->vma->vm_flags & VM_SHARED))
|
||||
return true;
|
||||
|
||||
/* If the PMD would extend outside the VMA */
|
||||
if (pmd_addr < vma->vm_start)
|
||||
goto fallback;
|
||||
if ((pmd_addr + PMD_SIZE) > vma->vm_end)
|
||||
goto fallback;
|
||||
if (pmd_addr < vmf->vma->vm_start)
|
||||
return true;
|
||||
if ((pmd_addr + PMD_SIZE) > vmf->vma->vm_end)
|
||||
return true;
|
||||
|
||||
/* If the PMD would extend beyond the file size */
|
||||
if ((xas->xa_index | PG_PMD_COLOUR) >= max_pgoff)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
|
||||
const struct iomap_ops *ops)
|
||||
{
|
||||
struct address_space *mapping = vmf->vma->vm_file->f_mapping;
|
||||
XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER);
|
||||
struct iomap_iter iter = {
|
||||
.inode = mapping->host,
|
||||
.len = PMD_SIZE,
|
||||
.flags = IOMAP_FAULT,
|
||||
};
|
||||
vm_fault_t ret = VM_FAULT_FALLBACK;
|
||||
pgoff_t max_pgoff;
|
||||
void *entry;
|
||||
int error;
|
||||
|
||||
if (vmf->flags & FAULT_FLAG_WRITE)
|
||||
iter.flags |= IOMAP_WRITE;
|
||||
|
||||
/*
|
||||
* Check whether offset isn't beyond end of file now. Caller is
|
||||
* supposed to hold locks serializing us with truncate / punch hole so
|
||||
* this is a reliable test.
|
||||
*/
|
||||
max_pgoff = DIV_ROUND_UP(i_size_read(iter.inode), PAGE_SIZE);
|
||||
|
||||
trace_dax_pmd_fault(iter.inode, vmf, max_pgoff, 0);
|
||||
|
||||
if (xas.xa_index >= max_pgoff) {
|
||||
result = VM_FAULT_SIGBUS;
|
||||
ret = VM_FAULT_SIGBUS;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* If the PMD would extend beyond the file size */
|
||||
if ((xas.xa_index | PG_PMD_COLOUR) >= max_pgoff)
|
||||
if (dax_fault_check_fallback(vmf, &xas, max_pgoff))
|
||||
goto fallback;
|
||||
|
||||
/*
|
||||
|
@ -1544,7 +1574,7 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
|
|||
*/
|
||||
entry = grab_mapping_entry(&xas, mapping, PMD_ORDER);
|
||||
if (xa_is_internal(entry)) {
|
||||
result = xa_to_internal(entry);
|
||||
ret = xa_to_internal(entry);
|
||||
goto fallback;
|
||||
}
|
||||
|
||||
|
@ -1556,88 +1586,30 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
|
|||
*/
|
||||
if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) &&
|
||||
!pmd_devmap(*vmf->pmd)) {
|
||||
result = 0;
|
||||
ret = 0;
|
||||
goto unlock_entry;
|
||||
}
|
||||
|
||||
/*
|
||||
* Note that we don't use iomap_apply here. We aren't doing I/O, only
|
||||
* setting up a mapping, so really we're using iomap_begin() as a way
|
||||
* to look up our filesystem block.
|
||||
*/
|
||||
pos = (loff_t)xas.xa_index << PAGE_SHIFT;
|
||||
error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap,
|
||||
&srcmap);
|
||||
if (error)
|
||||
goto unlock_entry;
|
||||
iter.pos = (loff_t)xas.xa_index << PAGE_SHIFT;
|
||||
while ((error = iomap_iter(&iter, ops)) > 0) {
|
||||
if (iomap_length(&iter) < PMD_SIZE)
|
||||
continue; /* actually breaks out of the loop */
|
||||
|
||||
if (iomap.offset + iomap.length < pos + PMD_SIZE)
|
||||
goto finish_iomap;
|
||||
|
||||
sync = dax_fault_is_synchronous(iomap_flags, vma, &iomap);
|
||||
|
||||
switch (iomap.type) {
|
||||
case IOMAP_MAPPED:
|
||||
error = dax_iomap_pfn(&iomap, pos, PMD_SIZE, &pfn);
|
||||
if (error < 0)
|
||||
goto finish_iomap;
|
||||
|
||||
entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn,
|
||||
DAX_PMD, write && !sync);
|
||||
|
||||
/*
|
||||
* If we are doing synchronous page fault and inode needs fsync,
|
||||
* we can insert PMD into page tables only after that happens.
|
||||
* Skip insertion for now and return the pfn so that caller can
|
||||
* insert it after fsync is done.
|
||||
*/
|
||||
if (sync) {
|
||||
if (WARN_ON_ONCE(!pfnp))
|
||||
goto finish_iomap;
|
||||
*pfnp = pfn;
|
||||
result = VM_FAULT_NEEDDSYNC;
|
||||
goto finish_iomap;
|
||||
}
|
||||
|
||||
trace_dax_pmd_insert_mapping(inode, vmf, PMD_SIZE, pfn, entry);
|
||||
result = vmf_insert_pfn_pmd(vmf, pfn, write);
|
||||
break;
|
||||
case IOMAP_UNWRITTEN:
|
||||
case IOMAP_HOLE:
|
||||
if (WARN_ON_ONCE(write))
|
||||
break;
|
||||
result = dax_pmd_load_hole(&xas, vmf, &iomap, &entry);
|
||||
break;
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
break;
|
||||
ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, true);
|
||||
if (ret != VM_FAULT_FALLBACK)
|
||||
iter.processed = PMD_SIZE;
|
||||
}
|
||||
|
||||
finish_iomap:
|
||||
if (ops->iomap_end) {
|
||||
int copied = PMD_SIZE;
|
||||
|
||||
if (result == VM_FAULT_FALLBACK)
|
||||
copied = 0;
|
||||
/*
|
||||
* The fault is done by now and there's no way back (other
|
||||
* thread may be already happily using PMD we have installed).
|
||||
* Just ignore error from ->iomap_end since we cannot do much
|
||||
* with it.
|
||||
*/
|
||||
ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags,
|
||||
&iomap);
|
||||
}
|
||||
unlock_entry:
|
||||
unlock_entry:
|
||||
dax_unlock_entry(&xas, entry);
|
||||
fallback:
|
||||
if (result == VM_FAULT_FALLBACK) {
|
||||
split_huge_pmd(vma, vmf->pmd, vmf->address);
|
||||
fallback:
|
||||
if (ret == VM_FAULT_FALLBACK) {
|
||||
split_huge_pmd(vmf->vma, vmf->pmd, vmf->address);
|
||||
count_vm_event(THP_FAULT_FALLBACK);
|
||||
}
|
||||
out:
|
||||
trace_dax_pmd_fault_done(inode, vmf, max_pgoff, result);
|
||||
return result;
|
||||
trace_dax_pmd_fault_done(iter.inode, vmf, max_pgoff, ret);
|
||||
return ret;
|
||||
}
|
||||
#else
|
||||
static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
|
||||
|
|
|
@ -1002,7 +1002,7 @@ static void gfs2_write_unlock(struct inode *inode)
|
|||
}
|
||||
|
||||
static int gfs2_iomap_page_prepare(struct inode *inode, loff_t pos,
|
||||
unsigned len, struct iomap *iomap)
|
||||
unsigned len)
|
||||
{
|
||||
unsigned int blockmask = i_blocksize(inode) - 1;
|
||||
struct gfs2_sbd *sdp = GFS2_SB(inode);
|
||||
|
@ -1013,8 +1013,7 @@ static int gfs2_iomap_page_prepare(struct inode *inode, loff_t pos,
|
|||
}
|
||||
|
||||
static void gfs2_iomap_page_done(struct inode *inode, loff_t pos,
|
||||
unsigned copied, struct page *page,
|
||||
struct iomap *iomap)
|
||||
unsigned copied, struct page *page)
|
||||
{
|
||||
struct gfs2_trans *tr = current->journal_info;
|
||||
struct gfs2_inode *ip = GFS2_I(inode);
|
||||
|
|
|
@ -48,8 +48,8 @@ static inline int emergency_thaw_bdev(struct super_block *sb)
|
|||
/*
|
||||
* buffer.c
|
||||
*/
|
||||
extern int __block_write_begin_int(struct page *page, loff_t pos, unsigned len,
|
||||
get_block_t *get_block, struct iomap *iomap);
|
||||
int __block_write_begin_int(struct page *page, loff_t pos, unsigned len,
|
||||
get_block_t *get_block, const struct iomap *iomap);
|
||||
|
||||
/*
|
||||
* char_dev.c
|
||||
|
|
|
@ -9,9 +9,9 @@ ccflags-y += -I $(srctree)/$(src) # needed for trace events
|
|||
obj-$(CONFIG_FS_IOMAP) += iomap.o
|
||||
|
||||
iomap-y += trace.o \
|
||||
apply.o \
|
||||
buffered-io.o \
|
||||
direct-io.o \
|
||||
fiemap.o \
|
||||
iter.o \
|
||||
seek.o
|
||||
iomap-$(CONFIG_SWAP) += swapfile.o
|
||||
|
|
|
@ -1,99 +0,0 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (C) 2010 Red Hat, Inc.
|
||||
* Copyright (c) 2016-2018 Christoph Hellwig.
|
||||
*/
|
||||
#include <linux/module.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/iomap.h>
|
||||
#include "trace.h"
|
||||
|
||||
/*
|
||||
* Execute a iomap write on a segment of the mapping that spans a
|
||||
* contiguous range of pages that have identical block mapping state.
|
||||
*
|
||||
* This avoids the need to map pages individually, do individual allocations
|
||||
* for each page and most importantly avoid the need for filesystem specific
|
||||
* locking per page. Instead, all the operations are amortised over the entire
|
||||
* range of pages. It is assumed that the filesystems will lock whatever
|
||||
* resources they require in the iomap_begin call, and release them in the
|
||||
* iomap_end call.
|
||||
*/
|
||||
loff_t
|
||||
iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags,
|
||||
const struct iomap_ops *ops, void *data, iomap_actor_t actor)
|
||||
{
|
||||
struct iomap iomap = { .type = IOMAP_HOLE };
|
||||
struct iomap srcmap = { .type = IOMAP_HOLE };
|
||||
loff_t written = 0, ret;
|
||||
u64 end;
|
||||
|
||||
trace_iomap_apply(inode, pos, length, flags, ops, actor, _RET_IP_);
|
||||
|
||||
/*
|
||||
* Need to map a range from start position for length bytes. This can
|
||||
* span multiple pages - it is only guaranteed to return a range of a
|
||||
* single type of pages (e.g. all into a hole, all mapped or all
|
||||
* unwritten). Failure at this point has nothing to undo.
|
||||
*
|
||||
* If allocation is required for this range, reserve the space now so
|
||||
* that the allocation is guaranteed to succeed later on. Once we copy
|
||||
* the data into the page cache pages, then we cannot fail otherwise we
|
||||
* expose transient stale data. If the reserve fails, we can safely
|
||||
* back out at this point as there is nothing to undo.
|
||||
*/
|
||||
ret = ops->iomap_begin(inode, pos, length, flags, &iomap, &srcmap);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (WARN_ON(iomap.offset > pos)) {
|
||||
written = -EIO;
|
||||
goto out;
|
||||
}
|
||||
if (WARN_ON(iomap.length == 0)) {
|
||||
written = -EIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
trace_iomap_apply_dstmap(inode, &iomap);
|
||||
if (srcmap.type != IOMAP_HOLE)
|
||||
trace_iomap_apply_srcmap(inode, &srcmap);
|
||||
|
||||
/*
|
||||
* Cut down the length to the one actually provided by the filesystem,
|
||||
* as it might not be able to give us the whole size that we requested.
|
||||
*/
|
||||
end = iomap.offset + iomap.length;
|
||||
if (srcmap.type != IOMAP_HOLE)
|
||||
end = min(end, srcmap.offset + srcmap.length);
|
||||
if (pos + length > end)
|
||||
length = end - pos;
|
||||
|
||||
/*
|
||||
* Now that we have guaranteed that the space allocation will succeed,
|
||||
* we can do the copy-in page by page without having to worry about
|
||||
* failures exposing transient data.
|
||||
*
|
||||
* To support COW operations, we read in data for partially blocks from
|
||||
* the srcmap if the file system filled it in. In that case we the
|
||||
* length needs to be limited to the earlier of the ends of the iomaps.
|
||||
* If the file system did not provide a srcmap we pass in the normal
|
||||
* iomap into the actors so that they don't need to have special
|
||||
* handling for the two cases.
|
||||
*/
|
||||
written = actor(inode, pos, length, data, &iomap,
|
||||
srcmap.type != IOMAP_HOLE ? &srcmap : &iomap);
|
||||
|
||||
out:
|
||||
/*
|
||||
* Now the data has been copied, commit the range we've copied. This
|
||||
* should not fail unless the filesystem has had a fatal error.
|
||||
*/
|
||||
if (ops->iomap_end) {
|
||||
ret = ops->iomap_end(inode, pos, length,
|
||||
written > 0 ? written : 0,
|
||||
flags, &iomap);
|
||||
}
|
||||
|
||||
return written ? written : ret;
|
||||
}
|
|
@ -36,7 +36,7 @@ static inline struct iomap_page *to_iomap_page(struct page *page)
|
|||
{
|
||||
/*
|
||||
* per-block data is stored in the head page. Callers should
|
||||
* not be dealing with tail pages (and if they are, they can
|
||||
* not be dealing with tail pages, and if they are, they can
|
||||
* call thp_head() first.
|
||||
*/
|
||||
VM_BUG_ON_PGFLAGS(PageTail(page), page);
|
||||
|
@ -98,7 +98,7 @@ iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop,
|
|||
unsigned last = (poff + plen - 1) >> block_bits;
|
||||
|
||||
/*
|
||||
* If the block size is smaller than the page size we need to check the
|
||||
* If the block size is smaller than the page size, we need to check the
|
||||
* per-block uptodate status and adjust the offset and length if needed
|
||||
* to avoid reading in already uptodate ranges.
|
||||
*/
|
||||
|
@ -126,7 +126,7 @@ iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop,
|
|||
}
|
||||
|
||||
/*
|
||||
* If the extent spans the block that contains the i_size we need to
|
||||
* If the extent spans the block that contains the i_size, we need to
|
||||
* handle both halves separately so that we properly zero data in the
|
||||
* page cache for blocks that are entirely outside of i_size.
|
||||
*/
|
||||
|
@ -205,60 +205,67 @@ struct iomap_readpage_ctx {
|
|||
struct readahead_control *rac;
|
||||
};
|
||||
|
||||
static void
|
||||
iomap_read_inline_data(struct inode *inode, struct page *page,
|
||||
struct iomap *iomap)
|
||||
static loff_t iomap_read_inline_data(const struct iomap_iter *iter,
|
||||
struct page *page)
|
||||
{
|
||||
size_t size = i_size_read(inode);
|
||||
const struct iomap *iomap = iomap_iter_srcmap(iter);
|
||||
size_t size = i_size_read(iter->inode) - iomap->offset;
|
||||
size_t poff = offset_in_page(iomap->offset);
|
||||
void *addr;
|
||||
|
||||
if (PageUptodate(page))
|
||||
return;
|
||||
return PAGE_SIZE - poff;
|
||||
|
||||
BUG_ON(page_has_private(page));
|
||||
BUG_ON(page->index);
|
||||
BUG_ON(size > PAGE_SIZE - offset_in_page(iomap->inline_data));
|
||||
if (WARN_ON_ONCE(size > PAGE_SIZE - poff))
|
||||
return -EIO;
|
||||
if (WARN_ON_ONCE(size > PAGE_SIZE -
|
||||
offset_in_page(iomap->inline_data)))
|
||||
return -EIO;
|
||||
if (WARN_ON_ONCE(size > iomap->length))
|
||||
return -EIO;
|
||||
if (poff > 0)
|
||||
iomap_page_create(iter->inode, page);
|
||||
|
||||
addr = kmap_atomic(page);
|
||||
addr = kmap_local_page(page) + poff;
|
||||
memcpy(addr, iomap->inline_data, size);
|
||||
memset(addr + size, 0, PAGE_SIZE - size);
|
||||
kunmap_atomic(addr);
|
||||
SetPageUptodate(page);
|
||||
memset(addr + size, 0, PAGE_SIZE - poff - size);
|
||||
kunmap_local(addr);
|
||||
iomap_set_range_uptodate(page, poff, PAGE_SIZE - poff);
|
||||
return PAGE_SIZE - poff;
|
||||
}
|
||||
|
||||
static inline bool iomap_block_needs_zeroing(struct inode *inode,
|
||||
struct iomap *iomap, loff_t pos)
|
||||
static inline bool iomap_block_needs_zeroing(const struct iomap_iter *iter,
|
||||
loff_t pos)
|
||||
{
|
||||
return iomap->type != IOMAP_MAPPED ||
|
||||
(iomap->flags & IOMAP_F_NEW) ||
|
||||
pos >= i_size_read(inode);
|
||||
const struct iomap *srcmap = iomap_iter_srcmap(iter);
|
||||
|
||||
return srcmap->type != IOMAP_MAPPED ||
|
||||
(srcmap->flags & IOMAP_F_NEW) ||
|
||||
pos >= i_size_read(iter->inode);
|
||||
}
|
||||
|
||||
static loff_t
|
||||
iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
|
||||
struct iomap *iomap, struct iomap *srcmap)
|
||||
static loff_t iomap_readpage_iter(const struct iomap_iter *iter,
|
||||
struct iomap_readpage_ctx *ctx, loff_t offset)
|
||||
{
|
||||
struct iomap_readpage_ctx *ctx = data;
|
||||
const struct iomap *iomap = &iter->iomap;
|
||||
loff_t pos = iter->pos + offset;
|
||||
loff_t length = iomap_length(iter) - offset;
|
||||
struct page *page = ctx->cur_page;
|
||||
struct iomap_page *iop;
|
||||
bool same_page = false, is_contig = false;
|
||||
loff_t orig_pos = pos;
|
||||
unsigned poff, plen;
|
||||
sector_t sector;
|
||||
|
||||
if (iomap->type == IOMAP_INLINE) {
|
||||
WARN_ON_ONCE(pos);
|
||||
iomap_read_inline_data(inode, page, iomap);
|
||||
return PAGE_SIZE;
|
||||
}
|
||||
if (iomap->type == IOMAP_INLINE)
|
||||
return min(iomap_read_inline_data(iter, page), length);
|
||||
|
||||
/* zero post-eof blocks as the page may be mapped */
|
||||
iop = iomap_page_create(inode, page);
|
||||
iomap_adjust_read_range(inode, iop, &pos, length, &poff, &plen);
|
||||
iop = iomap_page_create(iter->inode, page);
|
||||
iomap_adjust_read_range(iter->inode, iop, &pos, length, &poff, &plen);
|
||||
if (plen == 0)
|
||||
goto done;
|
||||
|
||||
if (iomap_block_needs_zeroing(inode, iomap, pos)) {
|
||||
if (iomap_block_needs_zeroing(iter, pos)) {
|
||||
zero_user(page, poff, plen);
|
||||
iomap_set_range_uptodate(page, poff, plen);
|
||||
goto done;
|
||||
|
@ -268,16 +275,10 @@ iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
|
|||
if (iop)
|
||||
atomic_add(plen, &iop->read_bytes_pending);
|
||||
|
||||
/* Try to merge into a previous segment if we can */
|
||||
sector = iomap_sector(iomap, pos);
|
||||
if (ctx->bio && bio_end_sector(ctx->bio) == sector) {
|
||||
if (__bio_try_merge_page(ctx->bio, page, plen, poff,
|
||||
&same_page))
|
||||
goto done;
|
||||
is_contig = true;
|
||||
}
|
||||
|
||||
if (!is_contig || bio_full(ctx->bio, plen)) {
|
||||
if (!ctx->bio ||
|
||||
bio_end_sector(ctx->bio) != sector ||
|
||||
bio_add_page(ctx->bio, page, plen, poff) != plen) {
|
||||
gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
|
||||
gfp_t orig_gfp = gfp;
|
||||
unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE);
|
||||
|
@ -301,13 +302,12 @@ iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
|
|||
ctx->bio->bi_iter.bi_sector = sector;
|
||||
bio_set_dev(ctx->bio, iomap->bdev);
|
||||
ctx->bio->bi_end_io = iomap_read_end_io;
|
||||
__bio_add_page(ctx->bio, page, plen, poff);
|
||||
}
|
||||
|
||||
bio_add_page(ctx->bio, page, plen, poff);
|
||||
done:
|
||||
/*
|
||||
* Move the caller beyond our range so that it keeps making progress.
|
||||
* For that we have to include any leading non-uptodate ranges, but
|
||||
* For that, we have to include any leading non-uptodate ranges, but
|
||||
* we can skip trailing ones as they will be handled in the next
|
||||
* iteration.
|
||||
*/
|
||||
|
@ -317,23 +317,23 @@ done:
|
|||
int
|
||||
iomap_readpage(struct page *page, const struct iomap_ops *ops)
|
||||
{
|
||||
struct iomap_readpage_ctx ctx = { .cur_page = page };
|
||||
struct inode *inode = page->mapping->host;
|
||||
unsigned poff;
|
||||
loff_t ret;
|
||||
struct iomap_iter iter = {
|
||||
.inode = page->mapping->host,
|
||||
.pos = page_offset(page),
|
||||
.len = PAGE_SIZE,
|
||||
};
|
||||
struct iomap_readpage_ctx ctx = {
|
||||
.cur_page = page,
|
||||
};
|
||||
int ret;
|
||||
|
||||
trace_iomap_readpage(page->mapping->host, 1);
|
||||
|
||||
for (poff = 0; poff < PAGE_SIZE; poff += ret) {
|
||||
ret = iomap_apply(inode, page_offset(page) + poff,
|
||||
PAGE_SIZE - poff, 0, ops, &ctx,
|
||||
iomap_readpage_actor);
|
||||
if (ret <= 0) {
|
||||
WARN_ON_ONCE(ret == 0);
|
||||
SetPageError(page);
|
||||
break;
|
||||
}
|
||||
}
|
||||
while ((ret = iomap_iter(&iter, ops)) > 0)
|
||||
iter.processed = iomap_readpage_iter(&iter, &ctx, 0);
|
||||
|
||||
if (ret < 0)
|
||||
SetPageError(page);
|
||||
|
||||
if (ctx.bio) {
|
||||
submit_bio(ctx.bio);
|
||||
|
@ -344,23 +344,22 @@ iomap_readpage(struct page *page, const struct iomap_ops *ops)
|
|||
}
|
||||
|
||||
/*
|
||||
* Just like mpage_readahead and block_read_full_page we always
|
||||
* Just like mpage_readahead and block_read_full_page, we always
|
||||
* return 0 and just mark the page as PageError on errors. This
|
||||
* should be cleaned up all through the stack eventually.
|
||||
* should be cleaned up throughout the stack eventually.
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iomap_readpage);
|
||||
|
||||
static loff_t
|
||||
iomap_readahead_actor(struct inode *inode, loff_t pos, loff_t length,
|
||||
void *data, struct iomap *iomap, struct iomap *srcmap)
|
||||
static loff_t iomap_readahead_iter(const struct iomap_iter *iter,
|
||||
struct iomap_readpage_ctx *ctx)
|
||||
{
|
||||
struct iomap_readpage_ctx *ctx = data;
|
||||
loff_t length = iomap_length(iter);
|
||||
loff_t done, ret;
|
||||
|
||||
for (done = 0; done < length; done += ret) {
|
||||
if (ctx->cur_page && offset_in_page(pos + done) == 0) {
|
||||
if (ctx->cur_page && offset_in_page(iter->pos + done) == 0) {
|
||||
if (!ctx->cur_page_in_bio)
|
||||
unlock_page(ctx->cur_page);
|
||||
put_page(ctx->cur_page);
|
||||
|
@ -370,8 +369,7 @@ iomap_readahead_actor(struct inode *inode, loff_t pos, loff_t length,
|
|||
ctx->cur_page = readahead_page(ctx->rac);
|
||||
ctx->cur_page_in_bio = false;
|
||||
}
|
||||
ret = iomap_readpage_actor(inode, pos + done, length - done,
|
||||
ctx, iomap, srcmap);
|
||||
ret = iomap_readpage_iter(iter, ctx, done);
|
||||
}
|
||||
|
||||
return done;
|
||||
|
@ -394,25 +392,19 @@ iomap_readahead_actor(struct inode *inode, loff_t pos, loff_t length,
|
|||
*/
|
||||
void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops)
|
||||
{
|
||||
struct inode *inode = rac->mapping->host;
|
||||
loff_t pos = readahead_pos(rac);
|
||||
size_t length = readahead_length(rac);
|
||||
struct iomap_iter iter = {
|
||||
.inode = rac->mapping->host,
|
||||
.pos = readahead_pos(rac),
|
||||
.len = readahead_length(rac),
|
||||
};
|
||||
struct iomap_readpage_ctx ctx = {
|
||||
.rac = rac,
|
||||
};
|
||||
|
||||
trace_iomap_readahead(inode, readahead_count(rac));
|
||||
trace_iomap_readahead(rac->mapping->host, readahead_count(rac));
|
||||
|
||||
while (length > 0) {
|
||||
ssize_t ret = iomap_apply(inode, pos, length, 0, ops,
|
||||
&ctx, iomap_readahead_actor);
|
||||
if (ret <= 0) {
|
||||
WARN_ON_ONCE(ret == 0);
|
||||
break;
|
||||
}
|
||||
pos += ret;
|
||||
length -= ret;
|
||||
}
|
||||
while (iomap_iter(&iter, ops) > 0)
|
||||
iter.processed = iomap_readahead_iter(&iter, &ctx);
|
||||
|
||||
if (ctx.bio)
|
||||
submit_bio(ctx.bio);
|
||||
|
@ -467,7 +459,7 @@ iomap_releasepage(struct page *page, gfp_t gfp_mask)
|
|||
/*
|
||||
* mm accommodates an old ext3 case where clean pages might not have had
|
||||
* the dirty bit cleared. Thus, it can send actual dirty pages to
|
||||
* ->releasepage() via shrink_active_list(), skip those here.
|
||||
* ->releasepage() via shrink_active_list(); skip those here.
|
||||
*/
|
||||
if (PageDirty(page) || PageWriteback(page))
|
||||
return 0;
|
||||
|
@ -482,7 +474,7 @@ iomap_invalidatepage(struct page *page, unsigned int offset, unsigned int len)
|
|||
trace_iomap_invalidatepage(page->mapping->host, offset, len);
|
||||
|
||||
/*
|
||||
* If we are invalidating the entire page, clear the dirty state from it
|
||||
* If we're invalidating the entire page, clear the dirty state from it
|
||||
* and release it to avoid unnecessary buildup of the LRU.
|
||||
*/
|
||||
if (offset == 0 && len == PAGE_SIZE) {
|
||||
|
@ -516,10 +508,6 @@ iomap_migrate_page(struct address_space *mapping, struct page *newpage,
|
|||
EXPORT_SYMBOL_GPL(iomap_migrate_page);
|
||||
#endif /* CONFIG_MIGRATION */
|
||||
|
||||
enum {
|
||||
IOMAP_WRITE_F_UNSHARE = (1 << 0),
|
||||
};
|
||||
|
||||
static void
|
||||
iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
|
||||
{
|
||||
|
@ -535,7 +523,7 @@ iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
|
|||
|
||||
static int
|
||||
iomap_read_page_sync(loff_t block_start, struct page *page, unsigned poff,
|
||||
unsigned plen, struct iomap *iomap)
|
||||
unsigned plen, const struct iomap *iomap)
|
||||
{
|
||||
struct bio_vec bvec;
|
||||
struct bio bio;
|
||||
|
@ -548,12 +536,12 @@ iomap_read_page_sync(loff_t block_start, struct page *page, unsigned poff,
|
|||
return submit_bio_wait(&bio);
|
||||
}
|
||||
|
||||
static int
|
||||
__iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, int flags,
|
||||
struct page *page, struct iomap *srcmap)
|
||||
static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
|
||||
unsigned len, struct page *page)
|
||||
{
|
||||
struct iomap_page *iop = iomap_page_create(inode, page);
|
||||
loff_t block_size = i_blocksize(inode);
|
||||
const struct iomap *srcmap = iomap_iter_srcmap(iter);
|
||||
struct iomap_page *iop = iomap_page_create(iter->inode, page);
|
||||
loff_t block_size = i_blocksize(iter->inode);
|
||||
loff_t block_start = round_down(pos, block_size);
|
||||
loff_t block_end = round_up(pos + len, block_size);
|
||||
unsigned from = offset_in_page(pos), to = from + len, poff, plen;
|
||||
|
@ -563,18 +551,18 @@ __iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, int flags,
|
|||
ClearPageError(page);
|
||||
|
||||
do {
|
||||
iomap_adjust_read_range(inode, iop, &block_start,
|
||||
iomap_adjust_read_range(iter->inode, iop, &block_start,
|
||||
block_end - block_start, &poff, &plen);
|
||||
if (plen == 0)
|
||||
break;
|
||||
|
||||
if (!(flags & IOMAP_WRITE_F_UNSHARE) &&
|
||||
if (!(iter->flags & IOMAP_UNSHARE) &&
|
||||
(from <= poff || from >= poff + plen) &&
|
||||
(to <= poff || to >= poff + plen))
|
||||
continue;
|
||||
|
||||
if (iomap_block_needs_zeroing(inode, srcmap, block_start)) {
|
||||
if (WARN_ON_ONCE(flags & IOMAP_WRITE_F_UNSHARE))
|
||||
if (iomap_block_needs_zeroing(iter, block_start)) {
|
||||
if (WARN_ON_ONCE(iter->flags & IOMAP_UNSHARE))
|
||||
return -EIO;
|
||||
zero_user_segments(page, poff, from, to, poff + plen);
|
||||
} else {
|
||||
|
@ -589,41 +577,54 @@ __iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, int flags,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
|
||||
struct page **pagep, struct iomap *iomap, struct iomap *srcmap)
|
||||
static int iomap_write_begin_inline(const struct iomap_iter *iter,
|
||||
struct page *page)
|
||||
{
|
||||
const struct iomap_page_ops *page_ops = iomap->page_ops;
|
||||
int ret;
|
||||
|
||||
/* needs more work for the tailpacking case; disable for now */
|
||||
if (WARN_ON_ONCE(iomap_iter_srcmap(iter)->offset != 0))
|
||||
return -EIO;
|
||||
ret = iomap_read_inline_data(iter, page);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
|
||||
unsigned len, struct page **pagep)
|
||||
{
|
||||
const struct iomap_page_ops *page_ops = iter->iomap.page_ops;
|
||||
const struct iomap *srcmap = iomap_iter_srcmap(iter);
|
||||
struct page *page;
|
||||
int status = 0;
|
||||
|
||||
BUG_ON(pos + len > iomap->offset + iomap->length);
|
||||
if (srcmap != iomap)
|
||||
BUG_ON(pos + len > iter->iomap.offset + iter->iomap.length);
|
||||
if (srcmap != &iter->iomap)
|
||||
BUG_ON(pos + len > srcmap->offset + srcmap->length);
|
||||
|
||||
if (fatal_signal_pending(current))
|
||||
return -EINTR;
|
||||
|
||||
if (page_ops && page_ops->page_prepare) {
|
||||
status = page_ops->page_prepare(inode, pos, len, iomap);
|
||||
status = page_ops->page_prepare(iter->inode, pos, len);
|
||||
if (status)
|
||||
return status;
|
||||
}
|
||||
|
||||
page = grab_cache_page_write_begin(inode->i_mapping, pos >> PAGE_SHIFT,
|
||||
AOP_FLAG_NOFS);
|
||||
page = grab_cache_page_write_begin(iter->inode->i_mapping,
|
||||
pos >> PAGE_SHIFT, AOP_FLAG_NOFS);
|
||||
if (!page) {
|
||||
status = -ENOMEM;
|
||||
goto out_no_page;
|
||||
}
|
||||
|
||||
if (srcmap->type == IOMAP_INLINE)
|
||||
iomap_read_inline_data(inode, page, srcmap);
|
||||
else if (iomap->flags & IOMAP_F_BUFFER_HEAD)
|
||||
status = iomap_write_begin_inline(iter, page);
|
||||
else if (srcmap->flags & IOMAP_F_BUFFER_HEAD)
|
||||
status = __block_write_begin_int(page, pos, len, NULL, srcmap);
|
||||
else
|
||||
status = __iomap_write_begin(inode, pos, len, flags, page,
|
||||
srcmap);
|
||||
status = __iomap_write_begin(iter, pos, len, page);
|
||||
|
||||
if (unlikely(status))
|
||||
goto out_unlock;
|
||||
|
@ -634,11 +635,11 @@ iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
|
|||
out_unlock:
|
||||
unlock_page(page);
|
||||
put_page(page);
|
||||
iomap_write_failed(inode, pos, len);
|
||||
iomap_write_failed(iter->inode, pos, len);
|
||||
|
||||
out_no_page:
|
||||
if (page_ops && page_ops->page_done)
|
||||
page_ops->page_done(inode, pos, 0, NULL, iomap);
|
||||
page_ops->page_done(iter->inode, pos, 0, NULL);
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -650,13 +651,13 @@ static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
|
|||
/*
|
||||
* The blocks that were entirely written will now be uptodate, so we
|
||||
* don't have to worry about a readpage reading them and overwriting a
|
||||
* partial write. However if we have encountered a short write and only
|
||||
* partial write. However, if we've encountered a short write and only
|
||||
* partially written into a block, it will not be marked uptodate, so a
|
||||
* readpage might come in and destroy our partial write.
|
||||
*
|
||||
* Do the simplest thing, and just treat any short write to a non
|
||||
* uptodate page as a zero-length write, and force the caller to redo
|
||||
* the whole thing.
|
||||
* Do the simplest thing and just treat any short write to a
|
||||
* non-uptodate page as a zero-length write, and force the caller to
|
||||
* redo the whole thing.
|
||||
*/
|
||||
if (unlikely(copied < len && !PageUptodate(page)))
|
||||
return 0;
|
||||
|
@ -665,39 +666,40 @@ static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
|
|||
return copied;
|
||||
}
|
||||
|
||||
static size_t iomap_write_end_inline(struct inode *inode, struct page *page,
|
||||
struct iomap *iomap, loff_t pos, size_t copied)
|
||||
static size_t iomap_write_end_inline(const struct iomap_iter *iter,
|
||||
struct page *page, loff_t pos, size_t copied)
|
||||
{
|
||||
const struct iomap *iomap = &iter->iomap;
|
||||
void *addr;
|
||||
|
||||
WARN_ON_ONCE(!PageUptodate(page));
|
||||
BUG_ON(pos + copied > PAGE_SIZE - offset_in_page(iomap->inline_data));
|
||||
BUG_ON(!iomap_inline_data_valid(iomap));
|
||||
|
||||
flush_dcache_page(page);
|
||||
addr = kmap_atomic(page);
|
||||
memcpy(iomap->inline_data + pos, addr + pos, copied);
|
||||
kunmap_atomic(addr);
|
||||
addr = kmap_local_page(page) + pos;
|
||||
memcpy(iomap_inline_data(iomap, pos), addr, copied);
|
||||
kunmap_local(addr);
|
||||
|
||||
mark_inode_dirty(inode);
|
||||
mark_inode_dirty(iter->inode);
|
||||
return copied;
|
||||
}
|
||||
|
||||
/* Returns the number of bytes copied. May be 0. Cannot be an errno. */
|
||||
static size_t iomap_write_end(struct inode *inode, loff_t pos, size_t len,
|
||||
size_t copied, struct page *page, struct iomap *iomap,
|
||||
struct iomap *srcmap)
|
||||
static size_t iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len,
|
||||
size_t copied, struct page *page)
|
||||
{
|
||||
const struct iomap_page_ops *page_ops = iomap->page_ops;
|
||||
loff_t old_size = inode->i_size;
|
||||
const struct iomap_page_ops *page_ops = iter->iomap.page_ops;
|
||||
const struct iomap *srcmap = iomap_iter_srcmap(iter);
|
||||
loff_t old_size = iter->inode->i_size;
|
||||
size_t ret;
|
||||
|
||||
if (srcmap->type == IOMAP_INLINE) {
|
||||
ret = iomap_write_end_inline(inode, page, iomap, pos, copied);
|
||||
ret = iomap_write_end_inline(iter, page, pos, copied);
|
||||
} else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) {
|
||||
ret = block_write_end(NULL, inode->i_mapping, pos, len, copied,
|
||||
page, NULL);
|
||||
ret = block_write_end(NULL, iter->inode->i_mapping, pos, len,
|
||||
copied, page, NULL);
|
||||
} else {
|
||||
ret = __iomap_write_end(inode, pos, len, copied, page);
|
||||
ret = __iomap_write_end(iter->inode, pos, len, copied, page);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -706,29 +708,28 @@ static size_t iomap_write_end(struct inode *inode, loff_t pos, size_t len,
|
|||
* preferably after I/O completion so that no stale data is exposed.
|
||||
*/
|
||||
if (pos + ret > old_size) {
|
||||
i_size_write(inode, pos + ret);
|
||||
iomap->flags |= IOMAP_F_SIZE_CHANGED;
|
||||
i_size_write(iter->inode, pos + ret);
|
||||
iter->iomap.flags |= IOMAP_F_SIZE_CHANGED;
|
||||
}
|
||||
unlock_page(page);
|
||||
|
||||
if (old_size < pos)
|
||||
pagecache_isize_extended(inode, old_size, pos);
|
||||
pagecache_isize_extended(iter->inode, old_size, pos);
|
||||
if (page_ops && page_ops->page_done)
|
||||
page_ops->page_done(inode, pos, ret, page, iomap);
|
||||
page_ops->page_done(iter->inode, pos, ret, page);
|
||||
put_page(page);
|
||||
|
||||
if (ret < len)
|
||||
iomap_write_failed(inode, pos, len);
|
||||
iomap_write_failed(iter->inode, pos, len);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static loff_t
|
||||
iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
|
||||
struct iomap *iomap, struct iomap *srcmap)
|
||||
static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
|
||||
{
|
||||
struct iov_iter *i = data;
|
||||
long status = 0;
|
||||
loff_t length = iomap_length(iter);
|
||||
loff_t pos = iter->pos;
|
||||
ssize_t written = 0;
|
||||
long status = 0;
|
||||
|
||||
do {
|
||||
struct page *page;
|
||||
|
@ -744,7 +745,7 @@ again:
|
|||
bytes = length;
|
||||
|
||||
/*
|
||||
* Bring in the user page that we will copy from _first_.
|
||||
* Bring in the user page that we'll copy from _first_.
|
||||
* Otherwise there's a nasty deadlock on copying from the
|
||||
* same page as we're writing to, without it being marked
|
||||
* up-to-date.
|
||||
|
@ -754,18 +755,16 @@ again:
|
|||
break;
|
||||
}
|
||||
|
||||
status = iomap_write_begin(inode, pos, bytes, 0, &page, iomap,
|
||||
srcmap);
|
||||
status = iomap_write_begin(iter, pos, bytes, &page);
|
||||
if (unlikely(status))
|
||||
break;
|
||||
|
||||
if (mapping_writably_mapped(inode->i_mapping))
|
||||
if (mapping_writably_mapped(iter->inode->i_mapping))
|
||||
flush_dcache_page(page);
|
||||
|
||||
copied = copy_page_from_iter_atomic(page, offset, bytes, i);
|
||||
|
||||
status = iomap_write_end(inode, pos, bytes, copied, page, iomap,
|
||||
srcmap);
|
||||
status = iomap_write_end(iter, pos, bytes, copied, page);
|
||||
|
||||
if (unlikely(copied != status))
|
||||
iov_iter_revert(i, copied - status);
|
||||
|
@ -786,36 +785,38 @@ again:
|
|||
written += status;
|
||||
length -= status;
|
||||
|
||||
balance_dirty_pages_ratelimited(inode->i_mapping);
|
||||
balance_dirty_pages_ratelimited(iter->inode->i_mapping);
|
||||
} while (iov_iter_count(i) && length);
|
||||
|
||||
return written ? written : status;
|
||||
}
|
||||
|
||||
ssize_t
|
||||
iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *iter,
|
||||
iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i,
|
||||
const struct iomap_ops *ops)
|
||||
{
|
||||
struct inode *inode = iocb->ki_filp->f_mapping->host;
|
||||
loff_t pos = iocb->ki_pos, ret = 0, written = 0;
|
||||
struct iomap_iter iter = {
|
||||
.inode = iocb->ki_filp->f_mapping->host,
|
||||
.pos = iocb->ki_pos,
|
||||
.len = iov_iter_count(i),
|
||||
.flags = IOMAP_WRITE,
|
||||
};
|
||||
int ret;
|
||||
|
||||
while (iov_iter_count(iter)) {
|
||||
ret = iomap_apply(inode, pos, iov_iter_count(iter),
|
||||
IOMAP_WRITE, ops, iter, iomap_write_actor);
|
||||
if (ret <= 0)
|
||||
break;
|
||||
pos += ret;
|
||||
written += ret;
|
||||
}
|
||||
|
||||
return written ? written : ret;
|
||||
while ((ret = iomap_iter(&iter, ops)) > 0)
|
||||
iter.processed = iomap_write_iter(&iter, i);
|
||||
if (iter.pos == iocb->ki_pos)
|
||||
return ret;
|
||||
return iter.pos - iocb->ki_pos;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
|
||||
|
||||
static loff_t
|
||||
iomap_unshare_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
|
||||
struct iomap *iomap, struct iomap *srcmap)
|
||||
static loff_t iomap_unshare_iter(struct iomap_iter *iter)
|
||||
{
|
||||
struct iomap *iomap = &iter->iomap;
|
||||
const struct iomap *srcmap = iomap_iter_srcmap(iter);
|
||||
loff_t pos = iter->pos;
|
||||
loff_t length = iomap_length(iter);
|
||||
long status = 0;
|
||||
loff_t written = 0;
|
||||
|
||||
|
@ -831,13 +832,11 @@ iomap_unshare_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
|
|||
unsigned long bytes = min_t(loff_t, PAGE_SIZE - offset, length);
|
||||
struct page *page;
|
||||
|
||||
status = iomap_write_begin(inode, pos, bytes,
|
||||
IOMAP_WRITE_F_UNSHARE, &page, iomap, srcmap);
|
||||
status = iomap_write_begin(iter, pos, bytes, &page);
|
||||
if (unlikely(status))
|
||||
return status;
|
||||
|
||||
status = iomap_write_end(inode, pos, bytes, bytes, page, iomap,
|
||||
srcmap);
|
||||
status = iomap_write_end(iter, pos, bytes, bytes, page);
|
||||
if (WARN_ON_ONCE(status == 0))
|
||||
return -EIO;
|
||||
|
||||
|
@ -847,7 +846,7 @@ iomap_unshare_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
|
|||
written += status;
|
||||
length -= status;
|
||||
|
||||
balance_dirty_pages_ratelimited(inode->i_mapping);
|
||||
balance_dirty_pages_ratelimited(iter->inode->i_mapping);
|
||||
} while (length);
|
||||
|
||||
return written;
|
||||
|
@ -857,44 +856,43 @@ int
|
|||
iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
|
||||
const struct iomap_ops *ops)
|
||||
{
|
||||
loff_t ret;
|
||||
struct iomap_iter iter = {
|
||||
.inode = inode,
|
||||
.pos = pos,
|
||||
.len = len,
|
||||
.flags = IOMAP_WRITE | IOMAP_UNSHARE,
|
||||
};
|
||||
int ret;
|
||||
|
||||
while (len) {
|
||||
ret = iomap_apply(inode, pos, len, IOMAP_WRITE, ops, NULL,
|
||||
iomap_unshare_actor);
|
||||
if (ret <= 0)
|
||||
return ret;
|
||||
pos += ret;
|
||||
len -= ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
while ((ret = iomap_iter(&iter, ops)) > 0)
|
||||
iter.processed = iomap_unshare_iter(&iter);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iomap_file_unshare);
|
||||
|
||||
static s64 iomap_zero(struct inode *inode, loff_t pos, u64 length,
|
||||
struct iomap *iomap, struct iomap *srcmap)
|
||||
static s64 __iomap_zero_iter(struct iomap_iter *iter, loff_t pos, u64 length)
|
||||
{
|
||||
struct page *page;
|
||||
int status;
|
||||
unsigned offset = offset_in_page(pos);
|
||||
unsigned bytes = min_t(u64, PAGE_SIZE - offset, length);
|
||||
|
||||
status = iomap_write_begin(inode, pos, bytes, 0, &page, iomap, srcmap);
|
||||
status = iomap_write_begin(iter, pos, bytes, &page);
|
||||
if (status)
|
||||
return status;
|
||||
|
||||
zero_user(page, offset, bytes);
|
||||
mark_page_accessed(page);
|
||||
|
||||
return iomap_write_end(inode, pos, bytes, bytes, page, iomap, srcmap);
|
||||
return iomap_write_end(iter, pos, bytes, bytes, page);
|
||||
}
|
||||
|
||||
static loff_t iomap_zero_range_actor(struct inode *inode, loff_t pos,
|
||||
loff_t length, void *data, struct iomap *iomap,
|
||||
struct iomap *srcmap)
|
||||
static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero)
|
||||
{
|
||||
bool *did_zero = data;
|
||||
struct iomap *iomap = &iter->iomap;
|
||||
const struct iomap *srcmap = iomap_iter_srcmap(iter);
|
||||
loff_t pos = iter->pos;
|
||||
loff_t length = iomap_length(iter);
|
||||
loff_t written = 0;
|
||||
|
||||
/* already zeroed? we're done. */
|
||||
|
@ -904,10 +902,10 @@ static loff_t iomap_zero_range_actor(struct inode *inode, loff_t pos,
|
|||
do {
|
||||
s64 bytes;
|
||||
|
||||
if (IS_DAX(inode))
|
||||
if (IS_DAX(iter->inode))
|
||||
bytes = dax_iomap_zero(pos, length, iomap);
|
||||
else
|
||||
bytes = iomap_zero(inode, pos, length, iomap, srcmap);
|
||||
bytes = __iomap_zero_iter(iter, pos, length);
|
||||
if (bytes < 0)
|
||||
return bytes;
|
||||
|
||||
|
@ -925,19 +923,17 @@ int
|
|||
iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
|
||||
const struct iomap_ops *ops)
|
||||
{
|
||||
loff_t ret;
|
||||
struct iomap_iter iter = {
|
||||
.inode = inode,
|
||||
.pos = pos,
|
||||
.len = len,
|
||||
.flags = IOMAP_ZERO,
|
||||
};
|
||||
int ret;
|
||||
|
||||
while (len > 0) {
|
||||
ret = iomap_apply(inode, pos, len, IOMAP_ZERO,
|
||||
ops, did_zero, iomap_zero_range_actor);
|
||||
if (ret <= 0)
|
||||
return ret;
|
||||
|
||||
pos += ret;
|
||||
len -= ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
while ((ret = iomap_iter(&iter, ops)) > 0)
|
||||
iter.processed = iomap_zero_iter(&iter, did_zero);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iomap_zero_range);
|
||||
|
||||
|
@ -955,15 +951,15 @@ iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(iomap_truncate_page);
|
||||
|
||||
static loff_t
|
||||
iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
|
||||
void *data, struct iomap *iomap, struct iomap *srcmap)
|
||||
static loff_t iomap_page_mkwrite_iter(struct iomap_iter *iter,
|
||||
struct page *page)
|
||||
{
|
||||
struct page *page = data;
|
||||
loff_t length = iomap_length(iter);
|
||||
int ret;
|
||||
|
||||
if (iomap->flags & IOMAP_F_BUFFER_HEAD) {
|
||||
ret = __block_write_begin_int(page, pos, length, NULL, iomap);
|
||||
if (iter->iomap.flags & IOMAP_F_BUFFER_HEAD) {
|
||||
ret = __block_write_begin_int(page, iter->pos, length, NULL,
|
||||
&iter->iomap);
|
||||
if (ret)
|
||||
return ret;
|
||||
block_commit_write(page, 0, length);
|
||||
|
@ -977,29 +973,24 @@ iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
|
|||
|
||||
vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
|
||||
{
|
||||
struct iomap_iter iter = {
|
||||
.inode = file_inode(vmf->vma->vm_file),
|
||||
.flags = IOMAP_WRITE | IOMAP_FAULT,
|
||||
};
|
||||
struct page *page = vmf->page;
|
||||
struct inode *inode = file_inode(vmf->vma->vm_file);
|
||||
unsigned long length;
|
||||
loff_t offset;
|
||||
ssize_t ret;
|
||||
|
||||
lock_page(page);
|
||||
ret = page_mkwrite_check_truncate(page, inode);
|
||||
ret = page_mkwrite_check_truncate(page, iter.inode);
|
||||
if (ret < 0)
|
||||
goto out_unlock;
|
||||
length = ret;
|
||||
|
||||
offset = page_offset(page);
|
||||
while (length > 0) {
|
||||
ret = iomap_apply(inode, offset, length,
|
||||
IOMAP_WRITE | IOMAP_FAULT, ops, page,
|
||||
iomap_page_mkwrite_actor);
|
||||
if (unlikely(ret <= 0))
|
||||
goto out_unlock;
|
||||
offset += ret;
|
||||
length -= ret;
|
||||
}
|
||||
iter.pos = page_offset(page);
|
||||
iter.len = ret;
|
||||
while ((ret = iomap_iter(&iter, ops)) > 0)
|
||||
iter.processed = iomap_page_mkwrite_iter(&iter, page);
|
||||
|
||||
if (ret < 0)
|
||||
goto out_unlock;
|
||||
wait_for_stable_page(page);
|
||||
return VM_FAULT_LOCKED;
|
||||
out_unlock:
|
||||
|
@ -1016,7 +1007,7 @@ iomap_finish_page_writeback(struct inode *inode, struct page *page,
|
|||
|
||||
if (error) {
|
||||
SetPageError(page);
|
||||
mapping_set_error(inode->i_mapping, -EIO);
|
||||
mapping_set_error(inode->i_mapping, error);
|
||||
}
|
||||
|
||||
WARN_ON_ONCE(i_blocks_per_page(inode, page) > 1 && !iop);
|
||||
|
@ -1153,7 +1144,7 @@ static void iomap_writepage_end_bio(struct bio *bio)
|
|||
* Submit the final bio for an ioend.
|
||||
*
|
||||
* If @error is non-zero, it means that we have a situation where some part of
|
||||
* the submission process has failed after we have marked paged for writeback
|
||||
* the submission process has failed after we've marked pages for writeback
|
||||
* and unlocked them. In this situation, we need to fail the bio instead of
|
||||
* submitting it. This typically only happens on a filesystem shutdown.
|
||||
*/
|
||||
|
@ -1168,7 +1159,7 @@ iomap_submit_ioend(struct iomap_writepage_ctx *wpc, struct iomap_ioend *ioend,
|
|||
error = wpc->ops->prepare_ioend(ioend, error);
|
||||
if (error) {
|
||||
/*
|
||||
* If we are failing the IO now, just mark the ioend with an
|
||||
* If we're failing the IO now, just mark the ioend with an
|
||||
* error and finish it. This will run IO completion immediately
|
||||
* as there is only one reference to the ioend at this point in
|
||||
* time.
|
||||
|
@ -1210,7 +1201,7 @@ iomap_alloc_ioend(struct inode *inode, struct iomap_writepage_ctx *wpc,
|
|||
/*
|
||||
* Allocate a new bio, and chain the old bio to the new one.
|
||||
*
|
||||
* Note that we have to do perform the chaining in this unintuitive order
|
||||
* Note that we have to perform the chaining in this unintuitive order
|
||||
* so that the bi_private linkage is set up in the right direction for the
|
||||
* traversal in iomap_finish_ioend().
|
||||
*/
|
||||
|
@ -1249,7 +1240,7 @@ iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t offset,
|
|||
|
||||
/*
|
||||
* Test to see if we have an existing ioend structure that we could append to
|
||||
* first, otherwise finish off the current ioend and start another.
|
||||
* first; otherwise finish off the current ioend and start another.
|
||||
*/
|
||||
static void
|
||||
iomap_add_to_ioend(struct inode *inode, loff_t offset, struct page *page,
|
||||
|
@ -1259,7 +1250,6 @@ iomap_add_to_ioend(struct inode *inode, loff_t offset, struct page *page,
|
|||
sector_t sector = iomap_sector(&wpc->iomap, offset);
|
||||
unsigned len = i_blocksize(inode);
|
||||
unsigned poff = offset & (PAGE_SIZE - 1);
|
||||
bool merged, same_page = false;
|
||||
|
||||
if (!wpc->ioend || !iomap_can_add_to_ioend(wpc, offset, sector)) {
|
||||
if (wpc->ioend)
|
||||
|
@ -1267,19 +1257,13 @@ iomap_add_to_ioend(struct inode *inode, loff_t offset, struct page *page,
|
|||
wpc->ioend = iomap_alloc_ioend(inode, wpc, offset, sector, wbc);
|
||||
}
|
||||
|
||||
merged = __bio_try_merge_page(wpc->ioend->io_bio, page, len, poff,
|
||||
&same_page);
|
||||
if (iop)
|
||||
atomic_add(len, &iop->write_bytes_pending);
|
||||
|
||||
if (!merged) {
|
||||
if (bio_full(wpc->ioend->io_bio, len)) {
|
||||
wpc->ioend->io_bio =
|
||||
iomap_chain_bio(wpc->ioend->io_bio);
|
||||
}
|
||||
bio_add_page(wpc->ioend->io_bio, page, len, poff);
|
||||
if (bio_add_page(wpc->ioend->io_bio, page, len, poff) != len) {
|
||||
wpc->ioend->io_bio = iomap_chain_bio(wpc->ioend->io_bio);
|
||||
__bio_add_page(wpc->ioend->io_bio, page, len, poff);
|
||||
}
|
||||
|
||||
if (iop)
|
||||
atomic_add(len, &iop->write_bytes_pending);
|
||||
wpc->ioend->io_size += len;
|
||||
wbc_account_cgroup_owner(wbc, page, len);
|
||||
}
|
||||
|
@ -1287,9 +1271,9 @@ iomap_add_to_ioend(struct inode *inode, loff_t offset, struct page *page,
|
|||
/*
|
||||
* We implement an immediate ioend submission policy here to avoid needing to
|
||||
* chain multiple ioends and hence nest mempool allocations which can violate
|
||||
* forward progress guarantees we need to provide. The current ioend we are
|
||||
* adding blocks to is cached on the writepage context, and if the new block
|
||||
* does not append to the cached ioend it will create a new ioend and cache that
|
||||
* the forward progress guarantees we need to provide. The current ioend we're
|
||||
* adding blocks to is cached in the writepage context, and if the new block
|
||||
* doesn't append to the cached ioend, it will create a new ioend and cache that
|
||||
* instead.
|
||||
*
|
||||
* If a new ioend is created and cached, the old ioend is returned and queued
|
||||
|
@ -1351,7 +1335,7 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc,
|
|||
if (unlikely(error)) {
|
||||
/*
|
||||
* Let the filesystem know what portion of the current page
|
||||
* failed to map. If the page wasn't been added to ioend, it
|
||||
* failed to map. If the page hasn't been added to ioend, it
|
||||
* won't be affected by I/O completion and we must unlock it
|
||||
* now.
|
||||
*/
|
||||
|
@ -1368,7 +1352,7 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc,
|
|||
unlock_page(page);
|
||||
|
||||
/*
|
||||
* Preserve the original error if there was one, otherwise catch
|
||||
* Preserve the original error if there was one; catch
|
||||
* submission errors here and propagate into subsequent ioend
|
||||
* submissions.
|
||||
*/
|
||||
|
@ -1395,8 +1379,8 @@ done:
|
|||
/*
|
||||
* Write out a dirty page.
|
||||
*
|
||||
* For delalloc space on the page we need to allocate space and flush it.
|
||||
* For unwritten space on the page we need to start the conversion to
|
||||
* For delalloc space on the page, we need to allocate space and flush it.
|
||||
* For unwritten space on the page, we need to start the conversion to
|
||||
* regular allocated space.
|
||||
*/
|
||||
static int
|
||||
|
@ -1411,7 +1395,7 @@ iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data)
|
|||
trace_iomap_writepage(inode, page_offset(page), PAGE_SIZE);
|
||||
|
||||
/*
|
||||
* Refuse to write the page out if we are called from reclaim context.
|
||||
* Refuse to write the page out if we're called from reclaim context.
|
||||
*
|
||||
* This avoids stack overflows when called from deeply used stacks in
|
||||
* random callers for direct reclaim or memcg reclaim. We explicitly
|
||||
|
@ -1456,20 +1440,20 @@ iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data)
|
|||
unsigned offset_into_page = offset & (PAGE_SIZE - 1);
|
||||
|
||||
/*
|
||||
* Skip the page if it is fully outside i_size, e.g. due to a
|
||||
* truncate operation that is in progress. We must redirty the
|
||||
* Skip the page if it's fully outside i_size, e.g. due to a
|
||||
* truncate operation that's in progress. We must redirty the
|
||||
* page so that reclaim stops reclaiming it. Otherwise
|
||||
* iomap_vm_releasepage() is called on it and gets confused.
|
||||
*
|
||||
* Note that the end_index is unsigned long, it would overflow
|
||||
* if the given offset is greater than 16TB on 32-bit system
|
||||
* and if we do check the page is fully outside i_size or not
|
||||
* via "if (page->index >= end_index + 1)" as "end_index + 1"
|
||||
* will be evaluated to 0. Hence this page will be redirtied
|
||||
* and be written out repeatedly which would result in an
|
||||
* infinite loop, the user program that perform this operation
|
||||
* will hang. Instead, we can verify this situation by checking
|
||||
* if the page to write is totally beyond the i_size or if it's
|
||||
* Note that the end_index is unsigned long. If the given
|
||||
* offset is greater than 16TB on a 32-bit system then if we
|
||||
* checked if the page is fully outside i_size with
|
||||
* "if (page->index >= end_index + 1)", "end_index + 1" would
|
||||
* overflow and evaluate to 0. Hence this page would be
|
||||
* redirtied and written out repeatedly, which would result in
|
||||
* an infinite loop; the user program performing this operation
|
||||
* would hang. Instead, we can detect this situation by
|
||||
* checking if the page is totally beyond i_size or if its
|
||||
* offset is just equal to the EOF.
|
||||
*/
|
||||
if (page->index > end_index ||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (C) 2010 Red Hat, Inc.
|
||||
* Copyright (c) 2016-2018 Christoph Hellwig.
|
||||
* Copyright (c) 2016-2021 Christoph Hellwig.
|
||||
*/
|
||||
#include <linux/module.h>
|
||||
#include <linux/compiler.h>
|
||||
|
@ -59,19 +59,17 @@ int iomap_dio_iopoll(struct kiocb *kiocb, bool spin)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(iomap_dio_iopoll);
|
||||
|
||||
static void iomap_dio_submit_bio(struct iomap_dio *dio, struct iomap *iomap,
|
||||
struct bio *bio, loff_t pos)
|
||||
static void iomap_dio_submit_bio(const struct iomap_iter *iter,
|
||||
struct iomap_dio *dio, struct bio *bio, loff_t pos)
|
||||
{
|
||||
atomic_inc(&dio->ref);
|
||||
|
||||
if (dio->iocb->ki_flags & IOCB_HIPRI)
|
||||
bio_set_polled(bio, dio->iocb);
|
||||
|
||||
dio->submit.last_queue = bdev_get_queue(iomap->bdev);
|
||||
dio->submit.last_queue = bdev_get_queue(iter->iomap.bdev);
|
||||
if (dio->dops && dio->dops->submit_io)
|
||||
dio->submit.cookie = dio->dops->submit_io(
|
||||
file_inode(dio->iocb->ki_filp),
|
||||
iomap, bio, pos);
|
||||
dio->submit.cookie = dio->dops->submit_io(iter, bio, pos);
|
||||
else
|
||||
dio->submit.cookie = submit_bio(bio);
|
||||
}
|
||||
|
@ -181,24 +179,23 @@ static void iomap_dio_bio_end_io(struct bio *bio)
|
|||
}
|
||||
}
|
||||
|
||||
static void
|
||||
iomap_dio_zero(struct iomap_dio *dio, struct iomap *iomap, loff_t pos,
|
||||
unsigned len)
|
||||
static void iomap_dio_zero(const struct iomap_iter *iter, struct iomap_dio *dio,
|
||||
loff_t pos, unsigned len)
|
||||
{
|
||||
struct page *page = ZERO_PAGE(0);
|
||||
int flags = REQ_SYNC | REQ_IDLE;
|
||||
struct bio *bio;
|
||||
|
||||
bio = bio_alloc(GFP_KERNEL, 1);
|
||||
bio_set_dev(bio, iomap->bdev);
|
||||
bio->bi_iter.bi_sector = iomap_sector(iomap, pos);
|
||||
bio_set_dev(bio, iter->iomap.bdev);
|
||||
bio->bi_iter.bi_sector = iomap_sector(&iter->iomap, pos);
|
||||
bio->bi_private = dio;
|
||||
bio->bi_end_io = iomap_dio_bio_end_io;
|
||||
|
||||
get_page(page);
|
||||
__bio_add_page(bio, page, len, 0);
|
||||
bio_set_op_attrs(bio, REQ_OP_WRITE, flags);
|
||||
iomap_dio_submit_bio(dio, iomap, bio, pos);
|
||||
iomap_dio_submit_bio(iter, dio, bio, pos);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -206,8 +203,8 @@ iomap_dio_zero(struct iomap_dio *dio, struct iomap *iomap, loff_t pos,
|
|||
* mapping, and whether or not we want FUA. Note that we can end up
|
||||
* clearing the WRITE_FUA flag in the dio request.
|
||||
*/
|
||||
static inline unsigned int
|
||||
iomap_dio_bio_opflags(struct iomap_dio *dio, struct iomap *iomap, bool use_fua)
|
||||
static inline unsigned int iomap_dio_bio_opflags(struct iomap_dio *dio,
|
||||
const struct iomap *iomap, bool use_fua)
|
||||
{
|
||||
unsigned int opflags = REQ_SYNC | REQ_IDLE;
|
||||
|
||||
|
@ -229,13 +226,16 @@ iomap_dio_bio_opflags(struct iomap_dio *dio, struct iomap *iomap, bool use_fua)
|
|||
return opflags;
|
||||
}
|
||||
|
||||
static loff_t
|
||||
iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
|
||||
struct iomap_dio *dio, struct iomap *iomap)
|
||||
static loff_t iomap_dio_bio_iter(const struct iomap_iter *iter,
|
||||
struct iomap_dio *dio)
|
||||
{
|
||||
const struct iomap *iomap = &iter->iomap;
|
||||
struct inode *inode = iter->inode;
|
||||
unsigned int blkbits = blksize_bits(bdev_logical_block_size(iomap->bdev));
|
||||
unsigned int fs_block_size = i_blocksize(inode), pad;
|
||||
unsigned int align = iov_iter_alignment(dio->submit.iter);
|
||||
loff_t length = iomap_length(iter);
|
||||
loff_t pos = iter->pos;
|
||||
unsigned int bio_opf;
|
||||
struct bio *bio;
|
||||
bool need_zeroout = false;
|
||||
|
@ -286,7 +286,7 @@ iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
|
|||
/* zero out from the start of the block to the write offset */
|
||||
pad = pos & (fs_block_size - 1);
|
||||
if (pad)
|
||||
iomap_dio_zero(dio, iomap, pos - pad, pad);
|
||||
iomap_dio_zero(iter, dio, pos - pad, pad);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -339,7 +339,7 @@ iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
|
|||
|
||||
nr_pages = bio_iov_vecs_to_alloc(dio->submit.iter,
|
||||
BIO_MAX_VECS);
|
||||
iomap_dio_submit_bio(dio, iomap, bio, pos);
|
||||
iomap_dio_submit_bio(iter, dio, bio, pos);
|
||||
pos += n;
|
||||
} while (nr_pages);
|
||||
|
||||
|
@ -355,7 +355,7 @@ zero_tail:
|
|||
/* zero out from the end of the write to the end of the block */
|
||||
pad = pos & (fs_block_size - 1);
|
||||
if (pad)
|
||||
iomap_dio_zero(dio, iomap, pos, fs_block_size - pad);
|
||||
iomap_dio_zero(iter, dio, pos, fs_block_size - pad);
|
||||
}
|
||||
out:
|
||||
/* Undo iter limitation to current extent */
|
||||
|
@ -365,65 +365,67 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static loff_t
|
||||
iomap_dio_hole_actor(loff_t length, struct iomap_dio *dio)
|
||||
static loff_t iomap_dio_hole_iter(const struct iomap_iter *iter,
|
||||
struct iomap_dio *dio)
|
||||
{
|
||||
length = iov_iter_zero(length, dio->submit.iter);
|
||||
loff_t length = iov_iter_zero(iomap_length(iter), dio->submit.iter);
|
||||
|
||||
dio->size += length;
|
||||
return length;
|
||||
}
|
||||
|
||||
static loff_t
|
||||
iomap_dio_inline_actor(struct inode *inode, loff_t pos, loff_t length,
|
||||
struct iomap_dio *dio, struct iomap *iomap)
|
||||
static loff_t iomap_dio_inline_iter(const struct iomap_iter *iomi,
|
||||
struct iomap_dio *dio)
|
||||
{
|
||||
const struct iomap *iomap = &iomi->iomap;
|
||||
struct iov_iter *iter = dio->submit.iter;
|
||||
void *inline_data = iomap_inline_data(iomap, iomi->pos);
|
||||
loff_t length = iomap_length(iomi);
|
||||
loff_t pos = iomi->pos;
|
||||
size_t copied;
|
||||
|
||||
BUG_ON(pos + length > PAGE_SIZE - offset_in_page(iomap->inline_data));
|
||||
if (WARN_ON_ONCE(!iomap_inline_data_valid(iomap)))
|
||||
return -EIO;
|
||||
|
||||
if (dio->flags & IOMAP_DIO_WRITE) {
|
||||
loff_t size = inode->i_size;
|
||||
loff_t size = iomi->inode->i_size;
|
||||
|
||||
if (pos > size)
|
||||
memset(iomap->inline_data + size, 0, pos - size);
|
||||
copied = copy_from_iter(iomap->inline_data + pos, length, iter);
|
||||
memset(iomap_inline_data(iomap, size), 0, pos - size);
|
||||
copied = copy_from_iter(inline_data, length, iter);
|
||||
if (copied) {
|
||||
if (pos + copied > size)
|
||||
i_size_write(inode, pos + copied);
|
||||
mark_inode_dirty(inode);
|
||||
i_size_write(iomi->inode, pos + copied);
|
||||
mark_inode_dirty(iomi->inode);
|
||||
}
|
||||
} else {
|
||||
copied = copy_to_iter(iomap->inline_data + pos, length, iter);
|
||||
copied = copy_to_iter(inline_data, length, iter);
|
||||
}
|
||||
dio->size += copied;
|
||||
return copied;
|
||||
}
|
||||
|
||||
static loff_t
|
||||
iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
|
||||
void *data, struct iomap *iomap, struct iomap *srcmap)
|
||||
static loff_t iomap_dio_iter(const struct iomap_iter *iter,
|
||||
struct iomap_dio *dio)
|
||||
{
|
||||
struct iomap_dio *dio = data;
|
||||
|
||||
switch (iomap->type) {
|
||||
switch (iter->iomap.type) {
|
||||
case IOMAP_HOLE:
|
||||
if (WARN_ON_ONCE(dio->flags & IOMAP_DIO_WRITE))
|
||||
return -EIO;
|
||||
return iomap_dio_hole_actor(length, dio);
|
||||
return iomap_dio_hole_iter(iter, dio);
|
||||
case IOMAP_UNWRITTEN:
|
||||
if (!(dio->flags & IOMAP_DIO_WRITE))
|
||||
return iomap_dio_hole_actor(length, dio);
|
||||
return iomap_dio_bio_actor(inode, pos, length, dio, iomap);
|
||||
return iomap_dio_hole_iter(iter, dio);
|
||||
return iomap_dio_bio_iter(iter, dio);
|
||||
case IOMAP_MAPPED:
|
||||
return iomap_dio_bio_actor(inode, pos, length, dio, iomap);
|
||||
return iomap_dio_bio_iter(iter, dio);
|
||||
case IOMAP_INLINE:
|
||||
return iomap_dio_inline_actor(inode, pos, length, dio, iomap);
|
||||
return iomap_dio_inline_iter(iter, dio);
|
||||
case IOMAP_DELALLOC:
|
||||
/*
|
||||
* DIO is not serialised against mmap() access at all, and so
|
||||
* if the page_mkwrite occurs between the writeback and the
|
||||
* iomap_apply() call in the DIO path, then it will see the
|
||||
* iomap_iter() call in the DIO path, then it will see the
|
||||
* DELALLOC block that the page-mkwrite allocated.
|
||||
*/
|
||||
pr_warn_ratelimited("Direct I/O collision with buffered writes! File: %pD4 Comm: %.20s\n",
|
||||
|
@ -454,16 +456,19 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
|
|||
{
|
||||
struct address_space *mapping = iocb->ki_filp->f_mapping;
|
||||
struct inode *inode = file_inode(iocb->ki_filp);
|
||||
size_t count = iov_iter_count(iter);
|
||||
loff_t pos = iocb->ki_pos;
|
||||
loff_t end = iocb->ki_pos + count - 1, ret = 0;
|
||||
struct iomap_iter iomi = {
|
||||
.inode = inode,
|
||||
.pos = iocb->ki_pos,
|
||||
.len = iov_iter_count(iter),
|
||||
.flags = IOMAP_DIRECT,
|
||||
};
|
||||
loff_t end = iomi.pos + iomi.len - 1, ret = 0;
|
||||
bool wait_for_completion =
|
||||
is_sync_kiocb(iocb) || (dio_flags & IOMAP_DIO_FORCE_WAIT);
|
||||
unsigned int iomap_flags = IOMAP_DIRECT;
|
||||
struct blk_plug plug;
|
||||
struct iomap_dio *dio;
|
||||
|
||||
if (!count)
|
||||
if (!iomi.len)
|
||||
return NULL;
|
||||
|
||||
dio = kmalloc(sizeof(*dio), GFP_KERNEL);
|
||||
|
@ -484,29 +489,30 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
|
|||
dio->submit.last_queue = NULL;
|
||||
|
||||
if (iov_iter_rw(iter) == READ) {
|
||||
if (pos >= dio->i_size)
|
||||
if (iomi.pos >= dio->i_size)
|
||||
goto out_free_dio;
|
||||
|
||||
if (iocb->ki_flags & IOCB_NOWAIT) {
|
||||
if (filemap_range_needs_writeback(mapping, pos, end)) {
|
||||
if (filemap_range_needs_writeback(mapping, iomi.pos,
|
||||
end)) {
|
||||
ret = -EAGAIN;
|
||||
goto out_free_dio;
|
||||
}
|
||||
iomap_flags |= IOMAP_NOWAIT;
|
||||
iomi.flags |= IOMAP_NOWAIT;
|
||||
}
|
||||
|
||||
if (iter_is_iovec(iter))
|
||||
dio->flags |= IOMAP_DIO_DIRTY;
|
||||
} else {
|
||||
iomap_flags |= IOMAP_WRITE;
|
||||
iomi.flags |= IOMAP_WRITE;
|
||||
dio->flags |= IOMAP_DIO_WRITE;
|
||||
|
||||
if (iocb->ki_flags & IOCB_NOWAIT) {
|
||||
if (filemap_range_has_page(mapping, pos, end)) {
|
||||
if (filemap_range_has_page(mapping, iomi.pos, end)) {
|
||||
ret = -EAGAIN;
|
||||
goto out_free_dio;
|
||||
}
|
||||
iomap_flags |= IOMAP_NOWAIT;
|
||||
iomi.flags |= IOMAP_NOWAIT;
|
||||
}
|
||||
|
||||
/* for data sync or sync, we need sync completion processing */
|
||||
|
@ -525,12 +531,13 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
|
|||
|
||||
if (dio_flags & IOMAP_DIO_OVERWRITE_ONLY) {
|
||||
ret = -EAGAIN;
|
||||
if (pos >= dio->i_size || pos + count > dio->i_size)
|
||||
if (iomi.pos >= dio->i_size ||
|
||||
iomi.pos + iomi.len > dio->i_size)
|
||||
goto out_free_dio;
|
||||
iomap_flags |= IOMAP_OVERWRITE_ONLY;
|
||||
iomi.flags |= IOMAP_OVERWRITE_ONLY;
|
||||
}
|
||||
|
||||
ret = filemap_write_and_wait_range(mapping, pos, end);
|
||||
ret = filemap_write_and_wait_range(mapping, iomi.pos, end);
|
||||
if (ret)
|
||||
goto out_free_dio;
|
||||
|
||||
|
@ -540,9 +547,10 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
|
|||
* If this invalidation fails, let the caller fall back to
|
||||
* buffered I/O.
|
||||
*/
|
||||
if (invalidate_inode_pages2_range(mapping, pos >> PAGE_SHIFT,
|
||||
end >> PAGE_SHIFT)) {
|
||||
trace_iomap_dio_invalidate_fail(inode, pos, count);
|
||||
if (invalidate_inode_pages2_range(mapping,
|
||||
iomi.pos >> PAGE_SHIFT, end >> PAGE_SHIFT)) {
|
||||
trace_iomap_dio_invalidate_fail(inode, iomi.pos,
|
||||
iomi.len);
|
||||
ret = -ENOTBLK;
|
||||
goto out_free_dio;
|
||||
}
|
||||
|
@ -557,31 +565,23 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
|
|||
inode_dio_begin(inode);
|
||||
|
||||
blk_start_plug(&plug);
|
||||
do {
|
||||
ret = iomap_apply(inode, pos, count, iomap_flags, ops, dio,
|
||||
iomap_dio_actor);
|
||||
if (ret <= 0) {
|
||||
/* magic error code to fall back to buffered I/O */
|
||||
if (ret == -ENOTBLK) {
|
||||
wait_for_completion = true;
|
||||
ret = 0;
|
||||
}
|
||||
break;
|
||||
}
|
||||
pos += ret;
|
||||
|
||||
if (iov_iter_rw(iter) == READ && pos >= dio->i_size) {
|
||||
/*
|
||||
* We only report that we've read data up to i_size.
|
||||
* Revert iter to a state corresponding to that as
|
||||
* some callers (such as splice code) rely on it.
|
||||
*/
|
||||
iov_iter_revert(iter, pos - dio->i_size);
|
||||
break;
|
||||
}
|
||||
} while ((count = iov_iter_count(iter)) > 0);
|
||||
while ((ret = iomap_iter(&iomi, ops)) > 0)
|
||||
iomi.processed = iomap_dio_iter(&iomi, dio);
|
||||
blk_finish_plug(&plug);
|
||||
|
||||
/*
|
||||
* We only report that we've read data up to i_size.
|
||||
* Revert iter to a state corresponding to that as some callers (such
|
||||
* as the splice code) rely on it.
|
||||
*/
|
||||
if (iov_iter_rw(iter) == READ && iomi.pos >= dio->i_size)
|
||||
iov_iter_revert(iter, iomi.pos - dio->i_size);
|
||||
|
||||
/* magic error code to fall back to buffered I/O */
|
||||
if (ret == -ENOTBLK) {
|
||||
wait_for_completion = true;
|
||||
ret = 0;
|
||||
}
|
||||
if (ret < 0)
|
||||
iomap_dio_set_error(dio, ret);
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (c) 2016-2018 Christoph Hellwig.
|
||||
* Copyright (c) 2016-2021 Christoph Hellwig.
|
||||
*/
|
||||
#include <linux/module.h>
|
||||
#include <linux/compiler.h>
|
||||
|
@ -8,13 +8,8 @@
|
|||
#include <linux/iomap.h>
|
||||
#include <linux/fiemap.h>
|
||||
|
||||
struct fiemap_ctx {
|
||||
struct fiemap_extent_info *fi;
|
||||
struct iomap prev;
|
||||
};
|
||||
|
||||
static int iomap_to_fiemap(struct fiemap_extent_info *fi,
|
||||
struct iomap *iomap, u32 flags)
|
||||
const struct iomap *iomap, u32 flags)
|
||||
{
|
||||
switch (iomap->type) {
|
||||
case IOMAP_HOLE:
|
||||
|
@ -43,24 +38,22 @@ static int iomap_to_fiemap(struct fiemap_extent_info *fi,
|
|||
iomap->length, flags);
|
||||
}
|
||||
|
||||
static loff_t
|
||||
iomap_fiemap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
|
||||
struct iomap *iomap, struct iomap *srcmap)
|
||||
static loff_t iomap_fiemap_iter(const struct iomap_iter *iter,
|
||||
struct fiemap_extent_info *fi, struct iomap *prev)
|
||||
{
|
||||
struct fiemap_ctx *ctx = data;
|
||||
loff_t ret = length;
|
||||
int ret;
|
||||
|
||||
if (iomap->type == IOMAP_HOLE)
|
||||
return length;
|
||||
if (iter->iomap.type == IOMAP_HOLE)
|
||||
return iomap_length(iter);
|
||||
|
||||
ret = iomap_to_fiemap(ctx->fi, &ctx->prev, 0);
|
||||
ctx->prev = *iomap;
|
||||
ret = iomap_to_fiemap(fi, prev, 0);
|
||||
*prev = iter->iomap;
|
||||
switch (ret) {
|
||||
case 0: /* success */
|
||||
return length;
|
||||
return iomap_length(iter);
|
||||
case 1: /* extent array full */
|
||||
return 0;
|
||||
default:
|
||||
default: /* error */
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
@ -68,73 +61,63 @@ iomap_fiemap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
|
|||
int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi,
|
||||
u64 start, u64 len, const struct iomap_ops *ops)
|
||||
{
|
||||
struct fiemap_ctx ctx;
|
||||
loff_t ret;
|
||||
struct iomap_iter iter = {
|
||||
.inode = inode,
|
||||
.pos = start,
|
||||
.len = len,
|
||||
.flags = IOMAP_REPORT,
|
||||
};
|
||||
struct iomap prev = {
|
||||
.type = IOMAP_HOLE,
|
||||
};
|
||||
int ret;
|
||||
|
||||
memset(&ctx, 0, sizeof(ctx));
|
||||
ctx.fi = fi;
|
||||
ctx.prev.type = IOMAP_HOLE;
|
||||
|
||||
ret = fiemap_prep(inode, fi, start, &len, 0);
|
||||
ret = fiemap_prep(inode, fi, start, &iter.len, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
while (len > 0) {
|
||||
ret = iomap_apply(inode, start, len, IOMAP_REPORT, ops, &ctx,
|
||||
iomap_fiemap_actor);
|
||||
/* inode with no (attribute) mapping will give ENOENT */
|
||||
if (ret == -ENOENT)
|
||||
break;
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (ret == 0)
|
||||
break;
|
||||
while ((ret = iomap_iter(&iter, ops)) > 0)
|
||||
iter.processed = iomap_fiemap_iter(&iter, fi, &prev);
|
||||
|
||||
start += ret;
|
||||
len -= ret;
|
||||
}
|
||||
|
||||
if (ctx.prev.type != IOMAP_HOLE) {
|
||||
ret = iomap_to_fiemap(fi, &ctx.prev, FIEMAP_EXTENT_LAST);
|
||||
if (prev.type != IOMAP_HOLE) {
|
||||
ret = iomap_to_fiemap(fi, &prev, FIEMAP_EXTENT_LAST);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* inode with no (attribute) mapping will give ENOENT */
|
||||
if (ret < 0 && ret != -ENOENT)
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iomap_fiemap);
|
||||
|
||||
static loff_t
|
||||
iomap_bmap_actor(struct inode *inode, loff_t pos, loff_t length,
|
||||
void *data, struct iomap *iomap, struct iomap *srcmap)
|
||||
{
|
||||
sector_t *bno = data, addr;
|
||||
|
||||
if (iomap->type == IOMAP_MAPPED) {
|
||||
addr = (pos - iomap->offset + iomap->addr) >> inode->i_blkbits;
|
||||
*bno = addr;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* legacy ->bmap interface. 0 is the error return (!) */
|
||||
sector_t
|
||||
iomap_bmap(struct address_space *mapping, sector_t bno,
|
||||
const struct iomap_ops *ops)
|
||||
{
|
||||
struct inode *inode = mapping->host;
|
||||
loff_t pos = bno << inode->i_blkbits;
|
||||
unsigned blocksize = i_blocksize(inode);
|
||||
struct iomap_iter iter = {
|
||||
.inode = mapping->host,
|
||||
.pos = (loff_t)bno << mapping->host->i_blkbits,
|
||||
.len = i_blocksize(mapping->host),
|
||||
.flags = IOMAP_REPORT,
|
||||
};
|
||||
const unsigned int blkshift = mapping->host->i_blkbits - SECTOR_SHIFT;
|
||||
int ret;
|
||||
|
||||
if (filemap_write_and_wait(mapping))
|
||||
return 0;
|
||||
|
||||
bno = 0;
|
||||
ret = iomap_apply(inode, pos, blocksize, 0, ops, &bno,
|
||||
iomap_bmap_actor);
|
||||
while ((ret = iomap_iter(&iter, ops)) > 0) {
|
||||
if (iter.iomap.type == IOMAP_MAPPED)
|
||||
bno = iomap_sector(&iter.iomap, iter.pos) >> blkshift;
|
||||
/* leave iter.processed unset to abort loop */
|
||||
}
|
||||
if (ret)
|
||||
return 0;
|
||||
|
||||
return bno;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iomap_bmap);
|
||||
|
|
|
@ -0,0 +1,80 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (C) 2010 Red Hat, Inc.
|
||||
* Copyright (c) 2016-2021 Christoph Hellwig.
|
||||
*/
|
||||
#include <linux/fs.h>
|
||||
#include <linux/iomap.h>
|
||||
#include "trace.h"
|
||||
|
||||
static inline int iomap_iter_advance(struct iomap_iter *iter)
|
||||
{
|
||||
/* handle the previous iteration (if any) */
|
||||
if (iter->iomap.length) {
|
||||
if (iter->processed <= 0)
|
||||
return iter->processed;
|
||||
if (WARN_ON_ONCE(iter->processed > iomap_length(iter)))
|
||||
return -EIO;
|
||||
iter->pos += iter->processed;
|
||||
iter->len -= iter->processed;
|
||||
if (!iter->len)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* clear the state for the next iteration */
|
||||
iter->processed = 0;
|
||||
memset(&iter->iomap, 0, sizeof(iter->iomap));
|
||||
memset(&iter->srcmap, 0, sizeof(iter->srcmap));
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline void iomap_iter_done(struct iomap_iter *iter)
|
||||
{
|
||||
WARN_ON_ONCE(iter->iomap.offset > iter->pos);
|
||||
WARN_ON_ONCE(iter->iomap.length == 0);
|
||||
WARN_ON_ONCE(iter->iomap.offset + iter->iomap.length <= iter->pos);
|
||||
|
||||
trace_iomap_iter_dstmap(iter->inode, &iter->iomap);
|
||||
if (iter->srcmap.type != IOMAP_HOLE)
|
||||
trace_iomap_iter_srcmap(iter->inode, &iter->srcmap);
|
||||
}
|
||||
|
||||
/**
|
||||
* iomap_iter - iterate over a ranges in a file
|
||||
* @iter: iteration structue
|
||||
* @ops: iomap ops provided by the file system
|
||||
*
|
||||
* Iterate over filesystem-provided space mappings for the provided file range.
|
||||
*
|
||||
* This function handles cleanup of resources acquired for iteration when the
|
||||
* filesystem indicates there are no more space mappings, which means that this
|
||||
* function must be called in a loop that continues as long it returns a
|
||||
* positive value. If 0 or a negative value is returned, the caller must not
|
||||
* return to the loop body. Within a loop body, there are two ways to break out
|
||||
* of the loop body: leave @iter.processed unchanged, or set it to a negative
|
||||
* errno.
|
||||
*/
|
||||
int iomap_iter(struct iomap_iter *iter, const struct iomap_ops *ops)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (iter->iomap.length && ops->iomap_end) {
|
||||
ret = ops->iomap_end(iter->inode, iter->pos, iomap_length(iter),
|
||||
iter->processed > 0 ? iter->processed : 0,
|
||||
iter->flags, &iter->iomap);
|
||||
if (ret < 0 && !iter->processed)
|
||||
return ret;
|
||||
}
|
||||
|
||||
trace_iomap_iter(iter, ops, _RET_IP_);
|
||||
ret = iomap_iter_advance(iter);
|
||||
if (ret <= 0)
|
||||
return ret;
|
||||
|
||||
ret = ops->iomap_begin(iter->inode, iter->pos, iter->len, iter->flags,
|
||||
&iter->iomap, &iter->srcmap);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
iomap_iter_done(iter);
|
||||
return 1;
|
||||
}
|
|
@ -1,7 +1,7 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (C) 2017 Red Hat, Inc.
|
||||
* Copyright (c) 2018 Christoph Hellwig.
|
||||
* Copyright (c) 2018-2021 Christoph Hellwig.
|
||||
*/
|
||||
#include <linux/module.h>
|
||||
#include <linux/compiler.h>
|
||||
|
@ -10,21 +10,20 @@
|
|||
#include <linux/pagemap.h>
|
||||
#include <linux/pagevec.h>
|
||||
|
||||
static loff_t
|
||||
iomap_seek_hole_actor(struct inode *inode, loff_t start, loff_t length,
|
||||
void *data, struct iomap *iomap, struct iomap *srcmap)
|
||||
static loff_t iomap_seek_hole_iter(const struct iomap_iter *iter,
|
||||
loff_t *hole_pos)
|
||||
{
|
||||
loff_t offset = start;
|
||||
loff_t length = iomap_length(iter);
|
||||
|
||||
switch (iomap->type) {
|
||||
switch (iter->iomap.type) {
|
||||
case IOMAP_UNWRITTEN:
|
||||
offset = mapping_seek_hole_data(inode->i_mapping, start,
|
||||
start + length, SEEK_HOLE);
|
||||
if (offset == start + length)
|
||||
*hole_pos = mapping_seek_hole_data(iter->inode->i_mapping,
|
||||
iter->pos, iter->pos + length, SEEK_HOLE);
|
||||
if (*hole_pos == iter->pos + length)
|
||||
return length;
|
||||
fallthrough;
|
||||
return 0;
|
||||
case IOMAP_HOLE:
|
||||
*(loff_t *)data = offset;
|
||||
*hole_pos = iter->pos;
|
||||
return 0;
|
||||
default:
|
||||
return length;
|
||||
|
@ -32,70 +31,73 @@ iomap_seek_hole_actor(struct inode *inode, loff_t start, loff_t length,
|
|||
}
|
||||
|
||||
loff_t
|
||||
iomap_seek_hole(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
|
||||
iomap_seek_hole(struct inode *inode, loff_t pos, const struct iomap_ops *ops)
|
||||
{
|
||||
loff_t size = i_size_read(inode);
|
||||
loff_t ret;
|
||||
struct iomap_iter iter = {
|
||||
.inode = inode,
|
||||
.pos = pos,
|
||||
.flags = IOMAP_REPORT,
|
||||
};
|
||||
int ret;
|
||||
|
||||
/* Nothing to be found before or beyond the end of the file. */
|
||||
if (offset < 0 || offset >= size)
|
||||
if (pos < 0 || pos >= size)
|
||||
return -ENXIO;
|
||||
|
||||
while (offset < size) {
|
||||
ret = iomap_apply(inode, offset, size - offset, IOMAP_REPORT,
|
||||
ops, &offset, iomap_seek_hole_actor);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (ret == 0)
|
||||
break;
|
||||
offset += ret;
|
||||
}
|
||||
|
||||
return offset;
|
||||
iter.len = size - pos;
|
||||
while ((ret = iomap_iter(&iter, ops)) > 0)
|
||||
iter.processed = iomap_seek_hole_iter(&iter, &pos);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (iter.len) /* found hole before EOF */
|
||||
return pos;
|
||||
return size;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iomap_seek_hole);
|
||||
|
||||
static loff_t
|
||||
iomap_seek_data_actor(struct inode *inode, loff_t start, loff_t length,
|
||||
void *data, struct iomap *iomap, struct iomap *srcmap)
|
||||
static loff_t iomap_seek_data_iter(const struct iomap_iter *iter,
|
||||
loff_t *hole_pos)
|
||||
{
|
||||
loff_t offset = start;
|
||||
loff_t length = iomap_length(iter);
|
||||
|
||||
switch (iomap->type) {
|
||||
switch (iter->iomap.type) {
|
||||
case IOMAP_HOLE:
|
||||
return length;
|
||||
case IOMAP_UNWRITTEN:
|
||||
offset = mapping_seek_hole_data(inode->i_mapping, start,
|
||||
start + length, SEEK_DATA);
|
||||
if (offset < 0)
|
||||
*hole_pos = mapping_seek_hole_data(iter->inode->i_mapping,
|
||||
iter->pos, iter->pos + length, SEEK_DATA);
|
||||
if (*hole_pos < 0)
|
||||
return length;
|
||||
fallthrough;
|
||||
return 0;
|
||||
default:
|
||||
*(loff_t *)data = offset;
|
||||
*hole_pos = iter->pos;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
loff_t
|
||||
iomap_seek_data(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
|
||||
iomap_seek_data(struct inode *inode, loff_t pos, const struct iomap_ops *ops)
|
||||
{
|
||||
loff_t size = i_size_read(inode);
|
||||
loff_t ret;
|
||||
struct iomap_iter iter = {
|
||||
.inode = inode,
|
||||
.pos = pos,
|
||||
.flags = IOMAP_REPORT,
|
||||
};
|
||||
int ret;
|
||||
|
||||
/* Nothing to be found before or beyond the end of the file. */
|
||||
if (offset < 0 || offset >= size)
|
||||
if (pos < 0 || pos >= size)
|
||||
return -ENXIO;
|
||||
|
||||
while (offset < size) {
|
||||
ret = iomap_apply(inode, offset, size - offset, IOMAP_REPORT,
|
||||
ops, &offset, iomap_seek_data_actor);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (ret == 0)
|
||||
return offset;
|
||||
offset += ret;
|
||||
}
|
||||
|
||||
iter.len = size - pos;
|
||||
while ((ret = iomap_iter(&iter, ops)) > 0)
|
||||
iter.processed = iomap_seek_data_iter(&iter, &pos);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (iter.len) /* found data before EOF */
|
||||
return pos;
|
||||
/* We've reached the end of the file without finding data */
|
||||
return -ENXIO;
|
||||
}
|
||||
|
|
|
@ -31,11 +31,16 @@ static int iomap_swapfile_add_extent(struct iomap_swapfile_info *isi)
|
|||
{
|
||||
struct iomap *iomap = &isi->iomap;
|
||||
unsigned long nr_pages;
|
||||
unsigned long max_pages;
|
||||
uint64_t first_ppage;
|
||||
uint64_t first_ppage_reported;
|
||||
uint64_t next_ppage;
|
||||
int error;
|
||||
|
||||
if (unlikely(isi->nr_pages >= isi->sis->max))
|
||||
return 0;
|
||||
max_pages = isi->sis->max - isi->nr_pages;
|
||||
|
||||
/*
|
||||
* Round the start up and the end down so that the physical
|
||||
* extent aligns to a page boundary.
|
||||
|
@ -48,6 +53,7 @@ static int iomap_swapfile_add_extent(struct iomap_swapfile_info *isi)
|
|||
if (first_ppage >= next_ppage)
|
||||
return 0;
|
||||
nr_pages = next_ppage - first_ppage;
|
||||
nr_pages = min(nr_pages, max_pages);
|
||||
|
||||
/*
|
||||
* Calculate how much swap space we're adding; the first page contains
|
||||
|
@ -88,13 +94,9 @@ static int iomap_swapfile_fail(struct iomap_swapfile_info *isi, const char *str)
|
|||
* swap only cares about contiguous page-aligned physical extents and makes no
|
||||
* distinction between written and unwritten extents.
|
||||
*/
|
||||
static loff_t iomap_swapfile_activate_actor(struct inode *inode, loff_t pos,
|
||||
loff_t count, void *data, struct iomap *iomap,
|
||||
struct iomap *srcmap)
|
||||
static loff_t iomap_swapfile_iter(const struct iomap_iter *iter,
|
||||
struct iomap *iomap, struct iomap_swapfile_info *isi)
|
||||
{
|
||||
struct iomap_swapfile_info *isi = data;
|
||||
int error;
|
||||
|
||||
switch (iomap->type) {
|
||||
case IOMAP_MAPPED:
|
||||
case IOMAP_UNWRITTEN:
|
||||
|
@ -125,12 +127,12 @@ static loff_t iomap_swapfile_activate_actor(struct inode *inode, loff_t pos,
|
|||
isi->iomap.length += iomap->length;
|
||||
} else {
|
||||
/* Otherwise, add the retained iomap and store this one. */
|
||||
error = iomap_swapfile_add_extent(isi);
|
||||
int error = iomap_swapfile_add_extent(isi);
|
||||
if (error)
|
||||
return error;
|
||||
memcpy(&isi->iomap, iomap, sizeof(isi->iomap));
|
||||
}
|
||||
return count;
|
||||
return iomap_length(iter);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -141,16 +143,19 @@ int iomap_swapfile_activate(struct swap_info_struct *sis,
|
|||
struct file *swap_file, sector_t *pagespan,
|
||||
const struct iomap_ops *ops)
|
||||
{
|
||||
struct inode *inode = swap_file->f_mapping->host;
|
||||
struct iomap_iter iter = {
|
||||
.inode = inode,
|
||||
.pos = 0,
|
||||
.len = ALIGN_DOWN(i_size_read(inode), PAGE_SIZE),
|
||||
.flags = IOMAP_REPORT,
|
||||
};
|
||||
struct iomap_swapfile_info isi = {
|
||||
.sis = sis,
|
||||
.lowest_ppage = (sector_t)-1ULL,
|
||||
.file = swap_file,
|
||||
};
|
||||
struct address_space *mapping = swap_file->f_mapping;
|
||||
struct inode *inode = mapping->host;
|
||||
loff_t pos = 0;
|
||||
loff_t len = ALIGN_DOWN(i_size_read(inode), PAGE_SIZE);
|
||||
loff_t ret;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Persist all file mapping metadata so that we won't have any
|
||||
|
@ -160,15 +165,10 @@ int iomap_swapfile_activate(struct swap_info_struct *sis,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
while (len > 0) {
|
||||
ret = iomap_apply(inode, pos, len, IOMAP_REPORT,
|
||||
ops, &isi, iomap_swapfile_activate_actor);
|
||||
if (ret <= 0)
|
||||
return ret;
|
||||
|
||||
pos += ret;
|
||||
len -= ret;
|
||||
}
|
||||
while ((ret = iomap_iter(&iter, ops)) > 0)
|
||||
iter.processed = iomap_swapfile_iter(&iter, &iter.iomap, &isi);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (isi.iomap.length) {
|
||||
ret = iomap_swapfile_add_extent(&isi);
|
||||
|
|
|
@ -1,9 +1,18 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (c) 2009-2019 Christoph Hellwig
|
||||
* Copyright (c) 2009-2021 Christoph Hellwig
|
||||
*
|
||||
* NOTE: none of these tracepoints shall be consider a stable kernel ABI
|
||||
* NOTE: none of these tracepoints shall be considered a stable kernel ABI
|
||||
* as they can change at any time.
|
||||
*
|
||||
* Current conventions for printing numbers measuring specific units:
|
||||
*
|
||||
* offset: byte offset into a subcomponent of a file operation
|
||||
* pos: file offset, in bytes
|
||||
* length: length of a file operation, in bytes
|
||||
* ino: inode number
|
||||
*
|
||||
* Numbers describing space allocations should be formatted in hexadecimal.
|
||||
*/
|
||||
#undef TRACE_SYSTEM
|
||||
#define TRACE_SYSTEM iomap
|
||||
|
@ -42,14 +51,14 @@ DEFINE_READPAGE_EVENT(iomap_readpage);
|
|||
DEFINE_READPAGE_EVENT(iomap_readahead);
|
||||
|
||||
DECLARE_EVENT_CLASS(iomap_range_class,
|
||||
TP_PROTO(struct inode *inode, unsigned long off, unsigned int len),
|
||||
TP_PROTO(struct inode *inode, loff_t off, u64 len),
|
||||
TP_ARGS(inode, off, len),
|
||||
TP_STRUCT__entry(
|
||||
__field(dev_t, dev)
|
||||
__field(u64, ino)
|
||||
__field(loff_t, size)
|
||||
__field(unsigned long, offset)
|
||||
__field(unsigned int, length)
|
||||
__field(loff_t, offset)
|
||||
__field(u64, length)
|
||||
),
|
||||
TP_fast_assign(
|
||||
__entry->dev = inode->i_sb->s_dev;
|
||||
|
@ -58,8 +67,7 @@ DECLARE_EVENT_CLASS(iomap_range_class,
|
|||
__entry->offset = off;
|
||||
__entry->length = len;
|
||||
),
|
||||
TP_printk("dev %d:%d ino 0x%llx size 0x%llx offset %lx "
|
||||
"length %x",
|
||||
TP_printk("dev %d:%d ino 0x%llx size 0x%llx offset 0x%llx length 0x%llx",
|
||||
MAJOR(__entry->dev), MINOR(__entry->dev),
|
||||
__entry->ino,
|
||||
__entry->size,
|
||||
|
@ -69,7 +77,7 @@ DECLARE_EVENT_CLASS(iomap_range_class,
|
|||
|
||||
#define DEFINE_RANGE_EVENT(name) \
|
||||
DEFINE_EVENT(iomap_range_class, name, \
|
||||
TP_PROTO(struct inode *inode, unsigned long off, unsigned int len),\
|
||||
TP_PROTO(struct inode *inode, loff_t off, u64 len),\
|
||||
TP_ARGS(inode, off, len))
|
||||
DEFINE_RANGE_EVENT(iomap_writepage);
|
||||
DEFINE_RANGE_EVENT(iomap_releasepage);
|
||||
|
@ -122,8 +130,8 @@ DECLARE_EVENT_CLASS(iomap_class,
|
|||
__entry->flags = iomap->flags;
|
||||
__entry->bdev = iomap->bdev ? iomap->bdev->bd_dev : 0;
|
||||
),
|
||||
TP_printk("dev %d:%d ino 0x%llx bdev %d:%d addr %lld offset %lld "
|
||||
"length %llu type %s flags %s",
|
||||
TP_printk("dev %d:%d ino 0x%llx bdev %d:%d addr 0x%llx offset 0x%llx "
|
||||
"length 0x%llx type %s flags %s",
|
||||
MAJOR(__entry->dev), MINOR(__entry->dev),
|
||||
__entry->ino,
|
||||
MAJOR(__entry->bdev), MINOR(__entry->bdev),
|
||||
|
@ -138,36 +146,32 @@ DECLARE_EVENT_CLASS(iomap_class,
|
|||
DEFINE_EVENT(iomap_class, name, \
|
||||
TP_PROTO(struct inode *inode, struct iomap *iomap), \
|
||||
TP_ARGS(inode, iomap))
|
||||
DEFINE_IOMAP_EVENT(iomap_apply_dstmap);
|
||||
DEFINE_IOMAP_EVENT(iomap_apply_srcmap);
|
||||
DEFINE_IOMAP_EVENT(iomap_iter_dstmap);
|
||||
DEFINE_IOMAP_EVENT(iomap_iter_srcmap);
|
||||
|
||||
TRACE_EVENT(iomap_apply,
|
||||
TP_PROTO(struct inode *inode, loff_t pos, loff_t length,
|
||||
unsigned int flags, const void *ops, void *actor,
|
||||
unsigned long caller),
|
||||
TP_ARGS(inode, pos, length, flags, ops, actor, caller),
|
||||
TRACE_EVENT(iomap_iter,
|
||||
TP_PROTO(struct iomap_iter *iter, const void *ops,
|
||||
unsigned long caller),
|
||||
TP_ARGS(iter, ops, caller),
|
||||
TP_STRUCT__entry(
|
||||
__field(dev_t, dev)
|
||||
__field(u64, ino)
|
||||
__field(loff_t, pos)
|
||||
__field(loff_t, length)
|
||||
__field(u64, length)
|
||||
__field(unsigned int, flags)
|
||||
__field(const void *, ops)
|
||||
__field(void *, actor)
|
||||
__field(unsigned long, caller)
|
||||
),
|
||||
TP_fast_assign(
|
||||
__entry->dev = inode->i_sb->s_dev;
|
||||
__entry->ino = inode->i_ino;
|
||||
__entry->pos = pos;
|
||||
__entry->length = length;
|
||||
__entry->flags = flags;
|
||||
__entry->dev = iter->inode->i_sb->s_dev;
|
||||
__entry->ino = iter->inode->i_ino;
|
||||
__entry->pos = iter->pos;
|
||||
__entry->length = iomap_length(iter);
|
||||
__entry->flags = iter->flags;
|
||||
__entry->ops = ops;
|
||||
__entry->actor = actor;
|
||||
__entry->caller = caller;
|
||||
),
|
||||
TP_printk("dev %d:%d ino 0x%llx pos %lld length %lld flags %s (0x%x) "
|
||||
"ops %ps caller %pS actor %ps",
|
||||
TP_printk("dev %d:%d ino 0x%llx pos 0x%llx length 0x%llx flags %s (0x%x) ops %ps caller %pS",
|
||||
MAJOR(__entry->dev), MINOR(__entry->dev),
|
||||
__entry->ino,
|
||||
__entry->pos,
|
||||
|
@ -175,8 +179,7 @@ TRACE_EVENT(iomap_apply,
|
|||
__print_flags(__entry->flags, "|", IOMAP_FLAGS_STRINGS),
|
||||
__entry->flags,
|
||||
__entry->ops,
|
||||
(void *)__entry->caller,
|
||||
__entry->actor)
|
||||
(void *)__entry->caller)
|
||||
);
|
||||
|
||||
#endif /* _IOMAP_TRACE_H */
|
||||
|
|
|
@ -91,12 +91,29 @@ struct iomap {
|
|||
const struct iomap_page_ops *page_ops;
|
||||
};
|
||||
|
||||
static inline sector_t
|
||||
iomap_sector(struct iomap *iomap, loff_t pos)
|
||||
static inline sector_t iomap_sector(const struct iomap *iomap, loff_t pos)
|
||||
{
|
||||
return (iomap->addr + pos - iomap->offset) >> SECTOR_SHIFT;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns the inline data pointer for logical offset @pos.
|
||||
*/
|
||||
static inline void *iomap_inline_data(const struct iomap *iomap, loff_t pos)
|
||||
{
|
||||
return iomap->inline_data + pos - iomap->offset;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if the mapping's length is within the valid range for inline data.
|
||||
* This is used to guard against accessing data beyond the page inline_data
|
||||
* points at.
|
||||
*/
|
||||
static inline bool iomap_inline_data_valid(const struct iomap *iomap)
|
||||
{
|
||||
return iomap->length <= PAGE_SIZE - offset_in_page(iomap->inline_data);
|
||||
}
|
||||
|
||||
/*
|
||||
* When a filesystem sets page_ops in an iomap mapping it returns, page_prepare
|
||||
* and page_done will be called for each page written to. This only applies to
|
||||
|
@ -108,10 +125,9 @@ iomap_sector(struct iomap *iomap, loff_t pos)
|
|||
* associated page could not be obtained.
|
||||
*/
|
||||
struct iomap_page_ops {
|
||||
int (*page_prepare)(struct inode *inode, loff_t pos, unsigned len,
|
||||
struct iomap *iomap);
|
||||
int (*page_prepare)(struct inode *inode, loff_t pos, unsigned len);
|
||||
void (*page_done)(struct inode *inode, loff_t pos, unsigned copied,
|
||||
struct page *page, struct iomap *iomap);
|
||||
struct page *page);
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -124,6 +140,7 @@ struct iomap_page_ops {
|
|||
#define IOMAP_DIRECT (1 << 4) /* direct I/O */
|
||||
#define IOMAP_NOWAIT (1 << 5) /* do not block */
|
||||
#define IOMAP_OVERWRITE_ONLY (1 << 6) /* only pure overwrites allowed */
|
||||
#define IOMAP_UNSHARE (1 << 7) /* unshare_file_range */
|
||||
|
||||
struct iomap_ops {
|
||||
/*
|
||||
|
@ -145,15 +162,61 @@ struct iomap_ops {
|
|||
ssize_t written, unsigned flags, struct iomap *iomap);
|
||||
};
|
||||
|
||||
/*
|
||||
* Main iomap iterator function.
|
||||
/**
|
||||
* struct iomap_iter - Iterate through a range of a file
|
||||
* @inode: Set at the start of the iteration and should not change.
|
||||
* @pos: The current file position we are operating on. It is updated by
|
||||
* calls to iomap_iter(). Treat as read-only in the body.
|
||||
* @len: The remaining length of the file segment we're operating on.
|
||||
* It is updated at the same time as @pos.
|
||||
* @processed: The number of bytes processed by the body in the most recent
|
||||
* iteration, or a negative errno. 0 causes the iteration to stop.
|
||||
* @flags: Zero or more of the iomap_begin flags above.
|
||||
* @iomap: Map describing the I/O iteration
|
||||
* @srcmap: Source map for COW operations
|
||||
*/
|
||||
typedef loff_t (*iomap_actor_t)(struct inode *inode, loff_t pos, loff_t len,
|
||||
void *data, struct iomap *iomap, struct iomap *srcmap);
|
||||
struct iomap_iter {
|
||||
struct inode *inode;
|
||||
loff_t pos;
|
||||
u64 len;
|
||||
s64 processed;
|
||||
unsigned flags;
|
||||
struct iomap iomap;
|
||||
struct iomap srcmap;
|
||||
};
|
||||
|
||||
loff_t iomap_apply(struct inode *inode, loff_t pos, loff_t length,
|
||||
unsigned flags, const struct iomap_ops *ops, void *data,
|
||||
iomap_actor_t actor);
|
||||
int iomap_iter(struct iomap_iter *iter, const struct iomap_ops *ops);
|
||||
|
||||
/**
|
||||
* iomap_length - length of the current iomap iteration
|
||||
* @iter: iteration structure
|
||||
*
|
||||
* Returns the length that the operation applies to for the current iteration.
|
||||
*/
|
||||
static inline u64 iomap_length(const struct iomap_iter *iter)
|
||||
{
|
||||
u64 end = iter->iomap.offset + iter->iomap.length;
|
||||
|
||||
if (iter->srcmap.type != IOMAP_HOLE)
|
||||
end = min(end, iter->srcmap.offset + iter->srcmap.length);
|
||||
return min(iter->len, end - iter->pos);
|
||||
}
|
||||
|
||||
/**
|
||||
* iomap_iter_srcmap - return the source map for the current iomap iteration
|
||||
* @i: iteration structure
|
||||
*
|
||||
* Write operations on file systems with reflink support might require a
|
||||
* source and a destination map. This function retourns the source map
|
||||
* for a given operation, which may or may no be identical to the destination
|
||||
* map in &i->iomap.
|
||||
*/
|
||||
static inline const struct iomap *iomap_iter_srcmap(const struct iomap_iter *i)
|
||||
{
|
||||
if (i->srcmap.type != IOMAP_HOLE)
|
||||
return &i->srcmap;
|
||||
return &i->iomap;
|
||||
}
|
||||
|
||||
ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from,
|
||||
const struct iomap_ops *ops);
|
||||
|
@ -250,8 +313,8 @@ int iomap_writepages(struct address_space *mapping,
|
|||
struct iomap_dio_ops {
|
||||
int (*end_io)(struct kiocb *iocb, ssize_t size, int error,
|
||||
unsigned flags);
|
||||
blk_qc_t (*submit_io)(struct inode *inode, struct iomap *iomap,
|
||||
struct bio *bio, loff_t file_offset);
|
||||
blk_qc_t (*submit_io)(const struct iomap_iter *iter, struct bio *bio,
|
||||
loff_t file_offset);
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
Загрузка…
Ссылка в новой задаче