blk-mq: fix deadlock when reading cpu_list
CPU hotplug handling for blk-mq (blk_mq_queue_reinit) acquires all_q_mutex in blk_mq_queue_reinit_notify() and then removes sysfs entries by blk_mq_sysfs_unregister(). Removing sysfs entry needs to be blocked until the active reference of the kernfs_node to be zero. On the other hand, reading blk_mq_hw_sysfs_cpu sysfs entry (e.g. /sys/block/nullb0/mq/0/cpu_list) acquires all_q_mutex in blk_mq_hw_sysfs_cpus_show(). If these happen at the same time, a deadlock can happen. Because one can wait for the active reference to be zero with holding all_q_mutex, and the other tries to acquire all_q_mutex with holding the active reference. The reason that all_q_mutex is acquired in blk_mq_hw_sysfs_cpus_show() is to avoid reading an imcomplete hctx->cpumask. Since reading sysfs entry for blk-mq needs to acquire q->sysfs_lock, we can avoid deadlock and reading an imcomplete hctx->cpumask by protecting q->sysfs_lock while hctx->cpumask is being updated. Signed-off-by: Akinobu Mita <akinobu.mita@gmail.com> Reviewed-by: Ming Lei <tom.leiming@gmail.com> Cc: Ming Lei <tom.leiming@gmail.com> Cc: Wanpeng Li <wanpeng.li@hotmail.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
Родитель
5778322e67
Коммит
60de074ba1
|
@ -229,8 +229,6 @@ static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
|
|||
unsigned int i, first = 1;
|
||||
ssize_t ret = 0;
|
||||
|
||||
blk_mq_disable_hotplug();
|
||||
|
||||
for_each_cpu(i, hctx->cpumask) {
|
||||
if (first)
|
||||
ret += sprintf(ret + page, "%u", i);
|
||||
|
@ -240,8 +238,6 @@ static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
|
|||
first = 0;
|
||||
}
|
||||
|
||||
blk_mq_enable_hotplug();
|
||||
|
||||
ret += sprintf(ret + page, "\n");
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -1797,6 +1797,11 @@ static void blk_mq_map_swqueue(struct request_queue *q,
|
|||
struct blk_mq_ctx *ctx;
|
||||
struct blk_mq_tag_set *set = q->tag_set;
|
||||
|
||||
/*
|
||||
* Avoid others reading imcomplete hctx->cpumask through sysfs
|
||||
*/
|
||||
mutex_lock(&q->sysfs_lock);
|
||||
|
||||
queue_for_each_hw_ctx(q, hctx, i) {
|
||||
cpumask_clear(hctx->cpumask);
|
||||
hctx->nr_ctx = 0;
|
||||
|
@ -1816,6 +1821,8 @@ static void blk_mq_map_swqueue(struct request_queue *q,
|
|||
hctx->ctxs[hctx->nr_ctx++] = ctx;
|
||||
}
|
||||
|
||||
mutex_unlock(&q->sysfs_lock);
|
||||
|
||||
queue_for_each_hw_ctx(q, hctx, i) {
|
||||
struct blk_mq_ctxmap *map = &hctx->ctx_map;
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче