- Fix a particularly nasty DM core bug in a 4.15 refcount_t conversion.
- Fix various targets to dm_register_target after module __init resources created; otherwise racing lvm2 commands could result in a NULL pointer during initialization of associated DM kernel module. - Fix regression in bio-based DM multipath queue_if_no_path handling. - Fix DM bufio's shrinker to reclaim more than one buffer per scan. -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABAgAGBQJaNBomAAoJEMUj8QotnQNao5IH/0X0Auycfx2O8dkVoRhW1Q3x NNt7m6aKhmdUsBYOug9/na5kNKqsRzKyPYSV9bM0Cy5mJzgxQYMeL5Tmu2qwGDOL 1C/HUhffmZJQ+lK5dS2wQ41Ep+lppm8KYofJ70Ueb+JQ9Uxmkp9GXGud0LrJ0QzR 9D5i/3jAlZuOnGLQ0+Q0E9wXa8sQdfrAbcPzz+4nG9aqGcz2T5lfbwg1K+Ym0U3r 0jBAHZWhamJQP1gW1+i0EWWtR68TgaWbHeTjrdvm2pUueAaywJzP9oeK++p3Op+9 A2JRE3I4ClAkUBjj480UAJW8Egg6zZ1mfOKta/CpqChbVqqjANi9lGSyRlkYMFg= =oNN+ -----END PGP SIGNATURE----- Merge tag 'for-4.15/dm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm Pull device mapper fixes from Mike Snitzer: - fix a particularly nasty DM core bug in a 4.15 refcount_t conversion. - fix various targets to dm_register_target after module __init resources created; otherwise racing lvm2 commands could result in a NULL pointer during initialization of associated DM kernel module. - fix regression in bio-based DM multipath queue_if_no_path handling. - fix DM bufio's shrinker to reclaim more than one buffer per scan. * tag 'for-4.15/dm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm: dm bufio: fix shrinker scans when (nr_to_scan < retain_target) dm mpath: fix bio-based multipath queue_if_no_path handling dm: fix various targets to dm_register_target after module __init resources created dm table: fix regression from improper dm_dev_internal.count refcount_t conversion
This commit is contained in:
Коммит
ee1b43ece1
|
@ -1611,7 +1611,8 @@ static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
|
||||||
int l;
|
int l;
|
||||||
struct dm_buffer *b, *tmp;
|
struct dm_buffer *b, *tmp;
|
||||||
unsigned long freed = 0;
|
unsigned long freed = 0;
|
||||||
unsigned long count = nr_to_scan;
|
unsigned long count = c->n_buffers[LIST_CLEAN] +
|
||||||
|
c->n_buffers[LIST_DIRTY];
|
||||||
unsigned long retain_target = get_retain_buffers(c);
|
unsigned long retain_target = get_retain_buffers(c);
|
||||||
|
|
||||||
for (l = 0; l < LIST_SIZE; l++) {
|
for (l = 0; l < LIST_SIZE; l++) {
|
||||||
|
@ -1647,8 +1648,11 @@ static unsigned long
|
||||||
dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
|
dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
|
||||||
{
|
{
|
||||||
struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker);
|
struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker);
|
||||||
|
unsigned long count = READ_ONCE(c->n_buffers[LIST_CLEAN]) +
|
||||||
|
READ_ONCE(c->n_buffers[LIST_DIRTY]);
|
||||||
|
unsigned long retain_target = get_retain_buffers(c);
|
||||||
|
|
||||||
return READ_ONCE(c->n_buffers[LIST_CLEAN]) + READ_ONCE(c->n_buffers[LIST_DIRTY]);
|
return (count < retain_target) ? 0 : (count - retain_target);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -3472,18 +3472,18 @@ static int __init dm_cache_init(void)
|
||||||
{
|
{
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
r = dm_register_target(&cache_target);
|
|
||||||
if (r) {
|
|
||||||
DMERR("cache target registration failed: %d", r);
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
|
|
||||||
migration_cache = KMEM_CACHE(dm_cache_migration, 0);
|
migration_cache = KMEM_CACHE(dm_cache_migration, 0);
|
||||||
if (!migration_cache) {
|
if (!migration_cache) {
|
||||||
dm_unregister_target(&cache_target);
|
dm_unregister_target(&cache_target);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
r = dm_register_target(&cache_target);
|
||||||
|
if (r) {
|
||||||
|
DMERR("cache target registration failed: %d", r);
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -457,6 +457,38 @@ do { \
|
||||||
dm_noflush_suspending((m)->ti)); \
|
dm_noflush_suspending((m)->ti)); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Check whether bios must be queued in the device-mapper core rather
|
||||||
|
* than here in the target.
|
||||||
|
*
|
||||||
|
* If MPATHF_QUEUE_IF_NO_PATH and MPATHF_SAVED_QUEUE_IF_NO_PATH hold
|
||||||
|
* the same value then we are not between multipath_presuspend()
|
||||||
|
* and multipath_resume() calls and we have no need to check
|
||||||
|
* for the DMF_NOFLUSH_SUSPENDING flag.
|
||||||
|
*/
|
||||||
|
static bool __must_push_back(struct multipath *m, unsigned long flags)
|
||||||
|
{
|
||||||
|
return ((test_bit(MPATHF_QUEUE_IF_NO_PATH, &flags) !=
|
||||||
|
test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &flags)) &&
|
||||||
|
dm_noflush_suspending(m->ti));
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Following functions use READ_ONCE to get atomic access to
|
||||||
|
* all m->flags to avoid taking spinlock
|
||||||
|
*/
|
||||||
|
static bool must_push_back_rq(struct multipath *m)
|
||||||
|
{
|
||||||
|
unsigned long flags = READ_ONCE(m->flags);
|
||||||
|
return test_bit(MPATHF_QUEUE_IF_NO_PATH, &flags) || __must_push_back(m, flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool must_push_back_bio(struct multipath *m)
|
||||||
|
{
|
||||||
|
unsigned long flags = READ_ONCE(m->flags);
|
||||||
|
return __must_push_back(m, flags);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Map cloned requests (request-based multipath)
|
* Map cloned requests (request-based multipath)
|
||||||
*/
|
*/
|
||||||
|
@ -478,7 +510,7 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
|
||||||
pgpath = choose_pgpath(m, nr_bytes);
|
pgpath = choose_pgpath(m, nr_bytes);
|
||||||
|
|
||||||
if (!pgpath) {
|
if (!pgpath) {
|
||||||
if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
|
if (must_push_back_rq(m))
|
||||||
return DM_MAPIO_DELAY_REQUEUE;
|
return DM_MAPIO_DELAY_REQUEUE;
|
||||||
dm_report_EIO(m); /* Failed */
|
dm_report_EIO(m); /* Failed */
|
||||||
return DM_MAPIO_KILL;
|
return DM_MAPIO_KILL;
|
||||||
|
@ -553,7 +585,7 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!pgpath) {
|
if (!pgpath) {
|
||||||
if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
|
if (must_push_back_bio(m))
|
||||||
return DM_MAPIO_REQUEUE;
|
return DM_MAPIO_REQUEUE;
|
||||||
dm_report_EIO(m);
|
dm_report_EIO(m);
|
||||||
return DM_MAPIO_KILL;
|
return DM_MAPIO_KILL;
|
||||||
|
@ -651,8 +683,7 @@ static int queue_if_no_path(struct multipath *m, bool queue_if_no_path,
|
||||||
assign_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags,
|
assign_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags,
|
||||||
(save_old_value && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) ||
|
(save_old_value && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) ||
|
||||||
(!save_old_value && queue_if_no_path));
|
(!save_old_value && queue_if_no_path));
|
||||||
assign_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags,
|
assign_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags, queue_if_no_path);
|
||||||
queue_if_no_path || dm_noflush_suspending(m->ti));
|
|
||||||
spin_unlock_irqrestore(&m->lock, flags);
|
spin_unlock_irqrestore(&m->lock, flags);
|
||||||
|
|
||||||
if (!queue_if_no_path) {
|
if (!queue_if_no_path) {
|
||||||
|
@ -1486,7 +1517,7 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone,
|
||||||
fail_path(pgpath);
|
fail_path(pgpath);
|
||||||
|
|
||||||
if (atomic_read(&m->nr_valid_paths) == 0 &&
|
if (atomic_read(&m->nr_valid_paths) == 0 &&
|
||||||
!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
|
!must_push_back_rq(m)) {
|
||||||
if (error == BLK_STS_IOERR)
|
if (error == BLK_STS_IOERR)
|
||||||
dm_report_EIO(m);
|
dm_report_EIO(m);
|
||||||
/* complete with the original error */
|
/* complete with the original error */
|
||||||
|
@ -1521,8 +1552,12 @@ static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone,
|
||||||
|
|
||||||
if (atomic_read(&m->nr_valid_paths) == 0 &&
|
if (atomic_read(&m->nr_valid_paths) == 0 &&
|
||||||
!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
|
!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
|
||||||
dm_report_EIO(m);
|
if (must_push_back_bio(m)) {
|
||||||
*error = BLK_STS_IOERR;
|
r = DM_ENDIO_REQUEUE;
|
||||||
|
} else {
|
||||||
|
dm_report_EIO(m);
|
||||||
|
*error = BLK_STS_IOERR;
|
||||||
|
}
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1957,13 +1992,6 @@ static int __init dm_multipath_init(void)
|
||||||
{
|
{
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
r = dm_register_target(&multipath_target);
|
|
||||||
if (r < 0) {
|
|
||||||
DMERR("request-based register failed %d", r);
|
|
||||||
r = -EINVAL;
|
|
||||||
goto bad_register_target;
|
|
||||||
}
|
|
||||||
|
|
||||||
kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0);
|
kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0);
|
||||||
if (!kmultipathd) {
|
if (!kmultipathd) {
|
||||||
DMERR("failed to create workqueue kmpathd");
|
DMERR("failed to create workqueue kmpathd");
|
||||||
|
@ -1985,13 +2013,20 @@ static int __init dm_multipath_init(void)
|
||||||
goto bad_alloc_kmpath_handlerd;
|
goto bad_alloc_kmpath_handlerd;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
r = dm_register_target(&multipath_target);
|
||||||
|
if (r < 0) {
|
||||||
|
DMERR("request-based register failed %d", r);
|
||||||
|
r = -EINVAL;
|
||||||
|
goto bad_register_target;
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
bad_register_target:
|
||||||
|
destroy_workqueue(kmpath_handlerd);
|
||||||
bad_alloc_kmpath_handlerd:
|
bad_alloc_kmpath_handlerd:
|
||||||
destroy_workqueue(kmultipathd);
|
destroy_workqueue(kmultipathd);
|
||||||
bad_alloc_kmultipathd:
|
bad_alloc_kmultipathd:
|
||||||
dm_unregister_target(&multipath_target);
|
|
||||||
bad_register_target:
|
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2411,24 +2411,6 @@ static int __init dm_snapshot_init(void)
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
r = dm_register_target(&snapshot_target);
|
|
||||||
if (r < 0) {
|
|
||||||
DMERR("snapshot target register failed %d", r);
|
|
||||||
goto bad_register_snapshot_target;
|
|
||||||
}
|
|
||||||
|
|
||||||
r = dm_register_target(&origin_target);
|
|
||||||
if (r < 0) {
|
|
||||||
DMERR("Origin target register failed %d", r);
|
|
||||||
goto bad_register_origin_target;
|
|
||||||
}
|
|
||||||
|
|
||||||
r = dm_register_target(&merge_target);
|
|
||||||
if (r < 0) {
|
|
||||||
DMERR("Merge target register failed %d", r);
|
|
||||||
goto bad_register_merge_target;
|
|
||||||
}
|
|
||||||
|
|
||||||
r = init_origin_hash();
|
r = init_origin_hash();
|
||||||
if (r) {
|
if (r) {
|
||||||
DMERR("init_origin_hash failed.");
|
DMERR("init_origin_hash failed.");
|
||||||
|
@ -2449,19 +2431,37 @@ static int __init dm_snapshot_init(void)
|
||||||
goto bad_pending_cache;
|
goto bad_pending_cache;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
r = dm_register_target(&snapshot_target);
|
||||||
|
if (r < 0) {
|
||||||
|
DMERR("snapshot target register failed %d", r);
|
||||||
|
goto bad_register_snapshot_target;
|
||||||
|
}
|
||||||
|
|
||||||
|
r = dm_register_target(&origin_target);
|
||||||
|
if (r < 0) {
|
||||||
|
DMERR("Origin target register failed %d", r);
|
||||||
|
goto bad_register_origin_target;
|
||||||
|
}
|
||||||
|
|
||||||
|
r = dm_register_target(&merge_target);
|
||||||
|
if (r < 0) {
|
||||||
|
DMERR("Merge target register failed %d", r);
|
||||||
|
goto bad_register_merge_target;
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
bad_pending_cache:
|
|
||||||
kmem_cache_destroy(exception_cache);
|
|
||||||
bad_exception_cache:
|
|
||||||
exit_origin_hash();
|
|
||||||
bad_origin_hash:
|
|
||||||
dm_unregister_target(&merge_target);
|
|
||||||
bad_register_merge_target:
|
bad_register_merge_target:
|
||||||
dm_unregister_target(&origin_target);
|
dm_unregister_target(&origin_target);
|
||||||
bad_register_origin_target:
|
bad_register_origin_target:
|
||||||
dm_unregister_target(&snapshot_target);
|
dm_unregister_target(&snapshot_target);
|
||||||
bad_register_snapshot_target:
|
bad_register_snapshot_target:
|
||||||
|
kmem_cache_destroy(pending_cache);
|
||||||
|
bad_pending_cache:
|
||||||
|
kmem_cache_destroy(exception_cache);
|
||||||
|
bad_exception_cache:
|
||||||
|
exit_origin_hash();
|
||||||
|
bad_origin_hash:
|
||||||
dm_exception_store_exit();
|
dm_exception_store_exit();
|
||||||
|
|
||||||
return r;
|
return r;
|
||||||
|
|
|
@ -453,14 +453,15 @@ int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
|
||||||
|
|
||||||
refcount_set(&dd->count, 1);
|
refcount_set(&dd->count, 1);
|
||||||
list_add(&dd->list, &t->devices);
|
list_add(&dd->list, &t->devices);
|
||||||
|
goto out;
|
||||||
|
|
||||||
} else if (dd->dm_dev->mode != (mode | dd->dm_dev->mode)) {
|
} else if (dd->dm_dev->mode != (mode | dd->dm_dev->mode)) {
|
||||||
r = upgrade_mode(dd, mode, t->md);
|
r = upgrade_mode(dd, mode, t->md);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
refcount_inc(&dd->count);
|
|
||||||
}
|
}
|
||||||
|
refcount_inc(&dd->count);
|
||||||
|
out:
|
||||||
*result = dd->dm_dev;
|
*result = dd->dm_dev;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -4355,30 +4355,28 @@ static struct target_type thin_target = {
|
||||||
|
|
||||||
static int __init dm_thin_init(void)
|
static int __init dm_thin_init(void)
|
||||||
{
|
{
|
||||||
int r;
|
int r = -ENOMEM;
|
||||||
|
|
||||||
pool_table_init();
|
pool_table_init();
|
||||||
|
|
||||||
|
_new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0);
|
||||||
|
if (!_new_mapping_cache)
|
||||||
|
return r;
|
||||||
|
|
||||||
r = dm_register_target(&thin_target);
|
r = dm_register_target(&thin_target);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
goto bad_new_mapping_cache;
|
||||||
|
|
||||||
r = dm_register_target(&pool_target);
|
r = dm_register_target(&pool_target);
|
||||||
if (r)
|
if (r)
|
||||||
goto bad_pool_target;
|
goto bad_thin_target;
|
||||||
|
|
||||||
r = -ENOMEM;
|
|
||||||
|
|
||||||
_new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0);
|
|
||||||
if (!_new_mapping_cache)
|
|
||||||
goto bad_new_mapping_cache;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
bad_new_mapping_cache:
|
bad_thin_target:
|
||||||
dm_unregister_target(&pool_target);
|
|
||||||
bad_pool_target:
|
|
||||||
dm_unregister_target(&thin_target);
|
dm_unregister_target(&thin_target);
|
||||||
|
bad_new_mapping_cache:
|
||||||
|
kmem_cache_destroy(_new_mapping_cache);
|
||||||
|
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
Загрузка…
Ссылка в новой задаче