dm: delay registering the gendisk
device mapper is currently the only outlier that tries to call register_disk after add_disk, leading to fairly inconsistent state of these block layer data structures. Instead change device-mapper to just register the gendisk later now that the holder mechanism can cope with that. Note that this introduces a user visible change: the dm kobject is now only visible after the initial table has been loaded. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Mike Snitzer <snitzer@redhat.com> Link: https://lore.kernel.org/r/20210804094147.459763-8-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Родитель
ba30585936
Коммит
89f871af1b
|
@ -559,7 +559,6 @@ int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
|
||||||
err = blk_mq_init_allocated_queue(md->tag_set, md->queue);
|
err = blk_mq_init_allocated_queue(md->tag_set, md->queue);
|
||||||
if (err)
|
if (err)
|
||||||
goto out_tag_set;
|
goto out_tag_set;
|
||||||
elevator_init_mq(md->queue);
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out_tag_set:
|
out_tag_set:
|
||||||
|
|
|
@ -1693,7 +1693,10 @@ static void cleanup_mapped_device(struct mapped_device *md)
|
||||||
spin_lock(&_minor_lock);
|
spin_lock(&_minor_lock);
|
||||||
md->disk->private_data = NULL;
|
md->disk->private_data = NULL;
|
||||||
spin_unlock(&_minor_lock);
|
spin_unlock(&_minor_lock);
|
||||||
del_gendisk(md->disk);
|
if (dm_get_md_type(md) != DM_TYPE_NONE) {
|
||||||
|
dm_sysfs_exit(md);
|
||||||
|
del_gendisk(md->disk);
|
||||||
|
}
|
||||||
dm_queue_destroy_keyslot_manager(md->queue);
|
dm_queue_destroy_keyslot_manager(md->queue);
|
||||||
blk_cleanup_disk(md->disk);
|
blk_cleanup_disk(md->disk);
|
||||||
}
|
}
|
||||||
|
@ -1788,7 +1791,6 @@ static struct mapped_device *alloc_dev(int minor)
|
||||||
goto bad;
|
goto bad;
|
||||||
}
|
}
|
||||||
|
|
||||||
add_disk_no_queue_reg(md->disk);
|
|
||||||
format_dev_t(md->name, MKDEV(_major, minor));
|
format_dev_t(md->name, MKDEV(_major, minor));
|
||||||
|
|
||||||
md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0);
|
md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0);
|
||||||
|
@ -1989,19 +1991,12 @@ static struct dm_table *__unbind(struct mapped_device *md)
|
||||||
*/
|
*/
|
||||||
int dm_create(int minor, struct mapped_device **result)
|
int dm_create(int minor, struct mapped_device **result)
|
||||||
{
|
{
|
||||||
int r;
|
|
||||||
struct mapped_device *md;
|
struct mapped_device *md;
|
||||||
|
|
||||||
md = alloc_dev(minor);
|
md = alloc_dev(minor);
|
||||||
if (!md)
|
if (!md)
|
||||||
return -ENXIO;
|
return -ENXIO;
|
||||||
|
|
||||||
r = dm_sysfs_init(md);
|
|
||||||
if (r) {
|
|
||||||
free_dev(md);
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
|
|
||||||
*result = md;
|
*result = md;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -2081,10 +2076,15 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
|
||||||
r = dm_table_set_restrictions(t, md->queue, &limits);
|
r = dm_table_set_restrictions(t, md->queue, &limits);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
|
add_disk(md->disk);
|
||||||
|
|
||||||
|
r = dm_sysfs_init(md);
|
||||||
|
if (r) {
|
||||||
|
del_gendisk(md->disk);
|
||||||
|
return r;
|
||||||
|
}
|
||||||
md->type = type;
|
md->type = type;
|
||||||
|
|
||||||
blk_register_queue(md->disk);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2190,7 +2190,6 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
|
||||||
DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
|
DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
|
||||||
dm_device_name(md), atomic_read(&md->holders));
|
dm_device_name(md), atomic_read(&md->holders));
|
||||||
|
|
||||||
dm_sysfs_exit(md);
|
|
||||||
dm_table_destroy(__unbind(md));
|
dm_table_destroy(__unbind(md));
|
||||||
free_dev(md);
|
free_dev(md);
|
||||||
}
|
}
|
||||||
|
|
Загрузка…
Ссылка в новой задаче