- Fix dm core to use more efficient bio_split() instead of
bio_clone_bioset(). Also fixes splitting bio that has integrity payload. - Three fixes related to properly validating DAX capabilities of a stacked DM device that will advertise DAX support. - Update DM writecache target to use 2-factor allocator arguments. Kees says this is the last related change for 4.18. - Fix DM zoned target to use GFP_NOIO to avoid triggering reclaim during IO submission (caught by lockdep). - Fix DM thinp to gracefully recover from running out of data space while a previous async discard completes (whereby freeing space). - Fix DM thinp's metadata transaction commit to avoid needless work. -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABAgAGBQJbNURXAAoJEMUj8QotnQNaPVYIAISzFYsSLEEYiGF5CDn2A07/ vd4TBIk7sAVQ/zouNGSNUHepoPJZR8E75SLO4lqZ214fyeYl9/48Za3dlyXeHldR ziCCYFyHKvKgfZpBpbO43HbZbWvcVplIAt+GhhXepHy9jJC7+XQzMOqGwcvK2pW4 qCOqLTLaTGdzx5UaoLCG2Yg3E1TrYH0kZei/WLRBGW12WAsCzqNqUsZ5TgeilxCt r/5ajPsXkcNjV9BrUNwY43J+WX8eKIuFKRSbnVdjHBEoVCeCwnP1R8WJnMm5/BTh KZpyYTlFsBZ+Gvzojt2XI80ttviXtSUoJJNZgze9JT2xR4jMfn4yFQr/odxJucE= =ZGTy -----END PGP SIGNATURE----- Merge tag 'for-4.18/dm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm Pull device mapper fixes from Mike Snitzer: - Fix dm core to use more efficient bio_split() instead of bio_clone_bioset(). Also fixes splitting bio that has integrity payload. - Three fixes related to properly validating DAX capabilities of a stacked DM device that will advertise DAX support. - Update DM writecache target to use 2-factor allocator arguments. Kees says this is the last related change for 4.18. - Fix DM zoned target to use GFP_NOIO to avoid triggering reclaim during IO submission (caught by lockdep). - Fix DM thinp to gracefully recover from running out of data space while a previous async discard completes (whereby freeing space). - Fix DM thinp's metadata transaction commit to avoid needless work. * tag 'for-4.18/dm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm: dm: prevent DAX mounts if not supported dax: check for QUEUE_FLAG_DAX in bdev_dax_supported() pmem: only set QUEUE_FLAG_DAX for fsdax mode dm thin: handle running out of data space vs concurrent discard dm raid: don't use 'const' in function return dm zoned: avoid triggering reclaim from inside dmz_map() dm writecache: use 2-factor allocator arguments dm thin metadata: remove needless work from __commit_transaction dm: use bio_split() when splitting out the already processed bio
This commit is contained in:
Коммит
ff23908bb7
|
@ -86,6 +86,7 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
|
||||||
{
|
{
|
||||||
struct dax_device *dax_dev;
|
struct dax_device *dax_dev;
|
||||||
bool dax_enabled = false;
|
bool dax_enabled = false;
|
||||||
|
struct request_queue *q;
|
||||||
pgoff_t pgoff;
|
pgoff_t pgoff;
|
||||||
int err, id;
|
int err, id;
|
||||||
void *kaddr;
|
void *kaddr;
|
||||||
|
@ -99,6 +100,13 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
q = bdev_get_queue(bdev);
|
||||||
|
if (!q || !blk_queue_dax(q)) {
|
||||||
|
pr_debug("%s: error: request queue doesn't support dax\n",
|
||||||
|
bdevname(bdev, buf));
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
err = bdev_dax_pgoff(bdev, 0, PAGE_SIZE, &pgoff);
|
err = bdev_dax_pgoff(bdev, 0, PAGE_SIZE, &pgoff);
|
||||||
if (err) {
|
if (err) {
|
||||||
pr_debug("%s: error: unaligned partition for dax\n",
|
pr_debug("%s: error: unaligned partition for dax\n",
|
||||||
|
|
|
@ -588,7 +588,7 @@ static const char *raid10_md_layout_to_format(int layout)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Return md raid10 algorithm for @name */
|
/* Return md raid10 algorithm for @name */
|
||||||
static const int raid10_name_to_format(const char *name)
|
static int raid10_name_to_format(const char *name)
|
||||||
{
|
{
|
||||||
if (!strcasecmp(name, "near"))
|
if (!strcasecmp(name, "near"))
|
||||||
return ALGORITHM_RAID10_NEAR;
|
return ALGORITHM_RAID10_NEAR;
|
||||||
|
|
|
@ -885,9 +885,7 @@ EXPORT_SYMBOL_GPL(dm_table_set_type);
|
||||||
static int device_supports_dax(struct dm_target *ti, struct dm_dev *dev,
|
static int device_supports_dax(struct dm_target *ti, struct dm_dev *dev,
|
||||||
sector_t start, sector_t len, void *data)
|
sector_t start, sector_t len, void *data)
|
||||||
{
|
{
|
||||||
struct request_queue *q = bdev_get_queue(dev->bdev);
|
return bdev_dax_supported(dev->bdev, PAGE_SIZE);
|
||||||
|
|
||||||
return q && blk_queue_dax(q);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool dm_table_supports_dax(struct dm_table *t)
|
static bool dm_table_supports_dax(struct dm_table *t)
|
||||||
|
@ -1907,6 +1905,9 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
|
||||||
|
|
||||||
if (dm_table_supports_dax(t))
|
if (dm_table_supports_dax(t))
|
||||||
blk_queue_flag_set(QUEUE_FLAG_DAX, q);
|
blk_queue_flag_set(QUEUE_FLAG_DAX, q);
|
||||||
|
else
|
||||||
|
blk_queue_flag_clear(QUEUE_FLAG_DAX, q);
|
||||||
|
|
||||||
if (dm_table_supports_dax_write_cache(t))
|
if (dm_table_supports_dax_write_cache(t))
|
||||||
dax_write_cache(t->md->dax_dev, true);
|
dax_write_cache(t->md->dax_dev, true);
|
||||||
|
|
||||||
|
|
|
@ -776,7 +776,6 @@ static int __write_changed_details(struct dm_pool_metadata *pmd)
|
||||||
static int __commit_transaction(struct dm_pool_metadata *pmd)
|
static int __commit_transaction(struct dm_pool_metadata *pmd)
|
||||||
{
|
{
|
||||||
int r;
|
int r;
|
||||||
size_t metadata_len, data_len;
|
|
||||||
struct thin_disk_superblock *disk_super;
|
struct thin_disk_superblock *disk_super;
|
||||||
struct dm_block *sblock;
|
struct dm_block *sblock;
|
||||||
|
|
||||||
|
@ -797,14 +796,6 @@ static int __commit_transaction(struct dm_pool_metadata *pmd)
|
||||||
if (r < 0)
|
if (r < 0)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
r = dm_sm_root_size(pmd->metadata_sm, &metadata_len);
|
|
||||||
if (r < 0)
|
|
||||||
return r;
|
|
||||||
|
|
||||||
r = dm_sm_root_size(pmd->data_sm, &data_len);
|
|
||||||
if (r < 0)
|
|
||||||
return r;
|
|
||||||
|
|
||||||
r = save_sm_roots(pmd);
|
r = save_sm_roots(pmd);
|
||||||
if (r < 0)
|
if (r < 0)
|
||||||
return r;
|
return r;
|
||||||
|
|
|
@ -1386,6 +1386,8 @@ static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
|
||||||
|
|
||||||
static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
|
static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
|
||||||
|
|
||||||
|
static void requeue_bios(struct pool *pool);
|
||||||
|
|
||||||
static void check_for_space(struct pool *pool)
|
static void check_for_space(struct pool *pool)
|
||||||
{
|
{
|
||||||
int r;
|
int r;
|
||||||
|
@ -1398,8 +1400,10 @@ static void check_for_space(struct pool *pool)
|
||||||
if (r)
|
if (r)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (nr_free)
|
if (nr_free) {
|
||||||
set_pool_mode(pool, PM_WRITE);
|
set_pool_mode(pool, PM_WRITE);
|
||||||
|
requeue_bios(pool);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1476,7 +1480,10 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
|
||||||
|
|
||||||
r = dm_pool_alloc_data_block(pool->pmd, result);
|
r = dm_pool_alloc_data_block(pool->pmd, result);
|
||||||
if (r) {
|
if (r) {
|
||||||
metadata_operation_failed(pool, "dm_pool_alloc_data_block", r);
|
if (r == -ENOSPC)
|
||||||
|
set_pool_mode(pool, PM_OUT_OF_DATA_SPACE);
|
||||||
|
else
|
||||||
|
metadata_operation_failed(pool, "dm_pool_alloc_data_block", r);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -259,7 +259,7 @@ static int persistent_memory_claim(struct dm_writecache *wc)
|
||||||
if (da != p) {
|
if (da != p) {
|
||||||
long i;
|
long i;
|
||||||
wc->memory_map = NULL;
|
wc->memory_map = NULL;
|
||||||
pages = kvmalloc(p * sizeof(struct page *), GFP_KERNEL);
|
pages = kvmalloc_array(p, sizeof(struct page *), GFP_KERNEL);
|
||||||
if (!pages) {
|
if (!pages) {
|
||||||
r = -ENOMEM;
|
r = -ENOMEM;
|
||||||
goto err2;
|
goto err2;
|
||||||
|
@ -859,7 +859,7 @@ static int writecache_alloc_entries(struct dm_writecache *wc)
|
||||||
|
|
||||||
if (wc->entries)
|
if (wc->entries)
|
||||||
return 0;
|
return 0;
|
||||||
wc->entries = vmalloc(sizeof(struct wc_entry) * wc->n_blocks);
|
wc->entries = vmalloc(array_size(sizeof(struct wc_entry), wc->n_blocks));
|
||||||
if (!wc->entries)
|
if (!wc->entries)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
for (b = 0; b < wc->n_blocks; b++) {
|
for (b = 0; b < wc->n_blocks; b++) {
|
||||||
|
@ -1481,9 +1481,9 @@ static void __writecache_writeback_pmem(struct dm_writecache *wc, struct writeba
|
||||||
wb->bio.bi_iter.bi_sector = read_original_sector(wc, e);
|
wb->bio.bi_iter.bi_sector = read_original_sector(wc, e);
|
||||||
wb->page_offset = PAGE_SIZE;
|
wb->page_offset = PAGE_SIZE;
|
||||||
if (max_pages <= WB_LIST_INLINE ||
|
if (max_pages <= WB_LIST_INLINE ||
|
||||||
unlikely(!(wb->wc_list = kmalloc(max_pages * sizeof(struct wc_entry *),
|
unlikely(!(wb->wc_list = kmalloc_array(max_pages, sizeof(struct wc_entry *),
|
||||||
GFP_NOIO | __GFP_NORETRY |
|
GFP_NOIO | __GFP_NORETRY |
|
||||||
__GFP_NOMEMALLOC | __GFP_NOWARN)))) {
|
__GFP_NOMEMALLOC | __GFP_NOWARN)))) {
|
||||||
wb->wc_list = wb->wc_list_inline;
|
wb->wc_list = wb->wc_list_inline;
|
||||||
max_pages = WB_LIST_INLINE;
|
max_pages = WB_LIST_INLINE;
|
||||||
}
|
}
|
||||||
|
|
|
@ -787,7 +787,7 @@ static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
||||||
|
|
||||||
/* Chunk BIO work */
|
/* Chunk BIO work */
|
||||||
mutex_init(&dmz->chunk_lock);
|
mutex_init(&dmz->chunk_lock);
|
||||||
INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_KERNEL);
|
INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_NOIO);
|
||||||
dmz->chunk_wq = alloc_workqueue("dmz_cwq_%s", WQ_MEM_RECLAIM | WQ_UNBOUND,
|
dmz->chunk_wq = alloc_workqueue("dmz_cwq_%s", WQ_MEM_RECLAIM | WQ_UNBOUND,
|
||||||
0, dev->name);
|
0, dev->name);
|
||||||
if (!dmz->chunk_wq) {
|
if (!dmz->chunk_wq) {
|
||||||
|
|
|
@ -1056,8 +1056,7 @@ static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
|
||||||
if (len < 1)
|
if (len < 1)
|
||||||
goto out;
|
goto out;
|
||||||
nr_pages = min(len, nr_pages);
|
nr_pages = min(len, nr_pages);
|
||||||
if (ti->type->direct_access)
|
ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn);
|
||||||
ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn);
|
|
||||||
|
|
||||||
out:
|
out:
|
||||||
dm_put_live_table(md, srcu_idx);
|
dm_put_live_table(md, srcu_idx);
|
||||||
|
@ -1606,10 +1605,9 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
|
||||||
* the usage of io->orig_bio in dm_remap_zone_report()
|
* the usage of io->orig_bio in dm_remap_zone_report()
|
||||||
* won't be affected by this reassignment.
|
* won't be affected by this reassignment.
|
||||||
*/
|
*/
|
||||||
struct bio *b = bio_clone_bioset(bio, GFP_NOIO,
|
struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count,
|
||||||
&md->queue->bio_split);
|
GFP_NOIO, &md->queue->bio_split);
|
||||||
ci.io->orig_bio = b;
|
ci.io->orig_bio = b;
|
||||||
bio_advance(bio, (bio_sectors(bio) - ci.sector_count) << 9);
|
|
||||||
bio_chain(b, bio);
|
bio_chain(b, bio);
|
||||||
ret = generic_make_request(bio);
|
ret = generic_make_request(bio);
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -414,7 +414,8 @@ static int pmem_attach_disk(struct device *dev,
|
||||||
blk_queue_logical_block_size(q, pmem_sector_size(ndns));
|
blk_queue_logical_block_size(q, pmem_sector_size(ndns));
|
||||||
blk_queue_max_hw_sectors(q, UINT_MAX);
|
blk_queue_max_hw_sectors(q, UINT_MAX);
|
||||||
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
|
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
|
||||||
blk_queue_flag_set(QUEUE_FLAG_DAX, q);
|
if (pmem->pfn_flags & PFN_MAP)
|
||||||
|
blk_queue_flag_set(QUEUE_FLAG_DAX, q);
|
||||||
q->queuedata = pmem;
|
q->queuedata = pmem;
|
||||||
|
|
||||||
disk = alloc_disk_node(0, nid);
|
disk = alloc_disk_node(0, nid);
|
||||||
|
|
Загрузка…
Ссылка в новой задаче