From d94ecfc399715f06da347922e7979c088b1d8834 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 5 Jun 2020 19:44:09 +0800 Subject: [PATCH] blk-mq: split out a __blk_mq_get_driver_tag helper Allocation of the driver tag in the case of using a scheduler shares very little code with the "normal" tag allocation. Split out a new helper to streamline this path, and untangle it from the complex normal tag allocation. This way also avoids to fail driver tag allocation because of inactive hctx during cpu hotplug, and fixes potential hang risk. Fixes: bf0beec0607d ("blk-mq: drain I/O when all CPUs in a hctx are offline") Signed-off-by: Ming Lei Signed-off-by: Christoph Hellwig Tested-by: John Garry Cc: Dongli Zhang Cc: Hannes Reinecke Cc: Daniel Wagner Signed-off-by: Jens Axboe --- block/blk-mq-tag.c | 27 +++++++++++++++++++++++++++ block/blk-mq-tag.h | 8 ++++++++ block/blk-mq.c | 29 ----------------------------- block/blk-mq.h | 1 - 4 files changed, 35 insertions(+), 30 deletions(-) diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 96a39d0724a2..cded7fdcad8e 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -191,6 +191,33 @@ found_tag: return tag + tag_offset; } +bool __blk_mq_get_driver_tag(struct request *rq) +{ + struct sbitmap_queue *bt = &rq->mq_hctx->tags->bitmap_tags; + unsigned int tag_offset = rq->mq_hctx->tags->nr_reserved_tags; + bool shared = blk_mq_tag_busy(rq->mq_hctx); + int tag; + + if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag)) { + bt = &rq->mq_hctx->tags->breserved_tags; + tag_offset = 0; + } + + if (!hctx_may_queue(rq->mq_hctx, bt)) + return false; + tag = __sbitmap_queue_get(bt); + if (tag == BLK_MQ_NO_TAG) + return false; + + rq->tag = tag + tag_offset; + if (shared) { + rq->rq_flags |= RQF_MQ_INFLIGHT; + atomic_inc(&rq->mq_hctx->nr_active); + } + rq->mq_hctx->tags->rqs[rq->tag] = rq; + return true; +} + void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx, unsigned int tag) { diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h index d38e48f2a0a4..2e4ef51cdb32 100644 --- a/block/blk-mq-tag.h +++ b/block/blk-mq-tag.h @@ -51,6 +51,14 @@ enum { BLK_MQ_TAG_MAX = BLK_MQ_NO_TAG - 1, }; +bool __blk_mq_get_driver_tag(struct request *rq); +static inline bool blk_mq_get_driver_tag(struct request *rq) +{ + if (rq->tag != BLK_MQ_NO_TAG) + return true; + return __blk_mq_get_driver_tag(rq); +} + extern bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *); extern void __blk_mq_tag_idle(struct blk_mq_hw_ctx *); diff --git a/block/blk-mq.c b/block/blk-mq.c index 9a36ac1c1fa1..4f57d27bfa73 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1052,35 +1052,6 @@ static inline unsigned int queued_to_index(unsigned int queued) return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1); } -bool blk_mq_get_driver_tag(struct request *rq) -{ - struct blk_mq_alloc_data data = { - .q = rq->q, - .hctx = rq->mq_hctx, - .flags = BLK_MQ_REQ_NOWAIT, - .cmd_flags = rq->cmd_flags, - }; - bool shared; - - if (rq->tag != BLK_MQ_NO_TAG) - return true; - - if (blk_mq_tag_is_reserved(data.hctx->sched_tags, rq->internal_tag)) - data.flags |= BLK_MQ_REQ_RESERVED; - - shared = blk_mq_tag_busy(data.hctx); - rq->tag = blk_mq_get_tag(&data); - if (rq->tag >= 0) { - if (shared) { - rq->rq_flags |= RQF_MQ_INFLIGHT; - atomic_inc(&data.hctx->nr_active); - } - data.hctx->tags->rqs[rq->tag] = rq; - } - - return rq->tag != BLK_MQ_NO_TAG; -} - static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode, int flags, void *key) { diff --git a/block/blk-mq.h b/block/blk-mq.h index a139b0631817..b3ce0f3a2ad2 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -44,7 +44,6 @@ bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *, bool); void blk_mq_add_to_requeue_list(struct request *rq, bool at_head, bool kick_requeue_list); void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list); -bool blk_mq_get_driver_tag(struct request *rq); struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *start);