Revert "block: use DAX for partition table reads"
commitd1a5f2b4d8
("block: use DAX for partition table reads") was part of a stalled effort to allow dax mappings of block devices. Since then the device-dax mechanism has filled the role of dax-mapping static device ranges. Now that we are moving ->direct_access() from a block_device operation to a dax_inode operation we would need block devices to map and carry their own dax_inode reference. Unless / until we decide to revive dax mapping of raw block devices through the dax_inode scheme, there is no need to carry read_dax_sector(). Its removal in turn allows for the removal of bdev_direct_access() and should have been included in commit2237570168
("block_dev: remove DAX leftovers"). Cc: Jeff Moyer <jmoyer@redhat.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
This commit is contained in:
Родитель
fa5d932c32
Коммит
a41fe02b6b
|
@ -16,7 +16,6 @@
|
|||
#include <linux/kmod.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/genhd.h>
|
||||
#include <linux/dax.h>
|
||||
#include <linux/blktrace_api.h>
|
||||
|
||||
#include "partitions/check.h"
|
||||
|
@ -631,24 +630,12 @@ int invalidate_partitions(struct gendisk *disk, struct block_device *bdev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static struct page *read_pagecache_sector(struct block_device *bdev, sector_t n)
|
||||
{
|
||||
struct address_space *mapping = bdev->bd_inode->i_mapping;
|
||||
|
||||
return read_mapping_page(mapping, (pgoff_t)(n >> (PAGE_SHIFT-9)),
|
||||
NULL);
|
||||
}
|
||||
|
||||
unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p)
|
||||
{
|
||||
struct address_space *mapping = bdev->bd_inode->i_mapping;
|
||||
struct page *page;
|
||||
|
||||
/* don't populate page cache for dax capable devices */
|
||||
if (IS_DAX(bdev->bd_inode))
|
||||
page = read_dax_sector(bdev, n);
|
||||
else
|
||||
page = read_pagecache_sector(bdev, n);
|
||||
|
||||
page = read_mapping_page(mapping, (pgoff_t)(n >> (PAGE_SHIFT-9)), NULL);
|
||||
if (!IS_ERR(page)) {
|
||||
if (PageError(page))
|
||||
goto fail;
|
||||
|
|
20
fs/dax.c
20
fs/dax.c
|
@ -101,26 +101,6 @@ static int dax_is_empty_entry(void *entry)
|
|||
return (unsigned long)entry & RADIX_DAX_EMPTY;
|
||||
}
|
||||
|
||||
struct page *read_dax_sector(struct block_device *bdev, sector_t n)
|
||||
{
|
||||
struct page *page = alloc_pages(GFP_KERNEL, 0);
|
||||
struct blk_dax_ctl dax = {
|
||||
.size = PAGE_SIZE,
|
||||
.sector = n & ~((((int) PAGE_SIZE) / 512) - 1),
|
||||
};
|
||||
long rc;
|
||||
|
||||
if (!page)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
rc = dax_map_atomic(bdev, &dax);
|
||||
if (rc < 0)
|
||||
return ERR_PTR(rc);
|
||||
memcpy_from_pmem(page_address(page), dax.addr, PAGE_SIZE);
|
||||
dax_unmap_atomic(bdev, &dax);
|
||||
return page;
|
||||
}
|
||||
|
||||
/*
|
||||
* DAX radix tree locking
|
||||
*/
|
||||
|
|
|
@ -70,15 +70,9 @@ void dax_wake_mapping_entry_waiter(struct address_space *mapping,
|
|||
pgoff_t index, void *entry, bool wake_all);
|
||||
|
||||
#ifdef CONFIG_FS_DAX
|
||||
struct page *read_dax_sector(struct block_device *bdev, sector_t n);
|
||||
int __dax_zero_page_range(struct block_device *bdev, sector_t sector,
|
||||
unsigned int offset, unsigned int length);
|
||||
#else
|
||||
static inline struct page *read_dax_sector(struct block_device *bdev,
|
||||
sector_t n)
|
||||
{
|
||||
return ERR_PTR(-ENXIO);
|
||||
}
|
||||
static inline int __dax_zero_page_range(struct block_device *bdev,
|
||||
sector_t sector, unsigned int offset, unsigned int length)
|
||||
{
|
||||
|
|
Загрузка…
Ссылка в новой задаче