dax: export a low-level __dax_zero_page_range helper

This allows XFS to perform zeroing using the iomap infrastructure and
avoid buffer heads.

Reviewed-by: Jan Kara <jack@suse.cz>
Signed-off-by: Christoph Hellwig <hch@lst.de>
[vishal: fix conflicts with dax-error-handling]
Signed-off-by: Vishal Verma <vishal.l.verma@intel.com>
This commit is contained in:
Christoph Hellwig 2016-05-09 10:47:04 +02:00 коммит произвёл Vishal Verma
Родитель 3dc2916107
Коммит 679c8bd3b2
2 изменённых файлов: 27 добавлений и 15 удалений

Просмотреть файл

@ -947,6 +947,23 @@ int dax_pfn_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
}
EXPORT_SYMBOL_GPL(dax_pfn_mkwrite);
int __dax_zero_page_range(struct block_device *bdev, sector_t sector,
unsigned int offset, unsigned int length)
{
struct blk_dax_ctl dax = {
.sector = sector,
.size = PAGE_SIZE,
};
if (dax_map_atomic(bdev, &dax) < 0)
return PTR_ERR(dax.addr);
clear_pmem(dax.addr + offset, length);
wmb_pmem();
dax_unmap_atomic(bdev, &dax);
return 0;
}
EXPORT_SYMBOL_GPL(__dax_zero_page_range);
/**
* dax_zero_page_range - zero a range within a page of a DAX file
* @inode: The file being truncated
@ -982,23 +999,11 @@ int dax_zero_page_range(struct inode *inode, loff_t from, unsigned length,
bh.b_bdev = inode->i_sb->s_bdev;
bh.b_size = PAGE_SIZE;
err = get_block(inode, index, &bh, 0);
if (err < 0)
if (err < 0 || !buffer_written(&bh))
return err;
if (buffer_written(&bh)) {
struct block_device *bdev = bh.b_bdev;
struct blk_dax_ctl dax = {
.sector = to_sector(&bh, inode),
.size = PAGE_SIZE,
};
if (dax_map_atomic(bdev, &dax) < 0)
return PTR_ERR(dax.addr);
clear_pmem(dax.addr + offset, length);
wmb_pmem();
dax_unmap_atomic(bdev, &dax);
}
return 0;
return __dax_zero_page_range(bh.b_bdev, to_sector(&bh, inode),
offset, length);
}
EXPORT_SYMBOL_GPL(dax_zero_page_range);

Просмотреть файл

@ -14,12 +14,19 @@ int __dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t);
#ifdef CONFIG_FS_DAX
struct page *read_dax_sector(struct block_device *bdev, sector_t n);
int __dax_zero_page_range(struct block_device *bdev, sector_t sector,
unsigned int offset, unsigned int length);
#else
static inline struct page *read_dax_sector(struct block_device *bdev,
sector_t n)
{
return ERR_PTR(-ENXIO);
}
static inline int __dax_zero_page_range(struct block_device *bdev,
sector_t sector, unsigned int offset, unsigned int length)
{
return -ENXIO;
}
#endif
#ifdef CONFIG_TRANSPARENT_HUGEPAGE