block: Improve performance for BLK_MQ_F_BLOCKING drivers
blk_mq_run_queue() runs the queue asynchronously if BLK_MQ_F_BLOCKING has been set. This is suboptimal since running the queue asynchronously is slower than running the queue synchronously. This patch modifies blk_mq_run_queue() as follows if BLK_MQ_F_BLOCKING has been set: - Run the queue synchronously if it is allowed to sleep. - Run the queue asynchronously if it is not allowed to sleep. Additionally, blk_mq_run_hw_queue(hctx, false) calls are modified into blk_mq_run_hw_queue(hctx, hctx->flags & BLK_MQ_F_BLOCKING) if the caller may be invoked from atomic context. The following caller chains have been reviewed: blk_mq_run_hw_queue(hctx, false) blk_mq_get_tag() /* may sleep, hence the functions it calls may also sleep */ blk_execute_rq() /* may sleep */ blk_mq_run_hw_queues(q, async=false) blk_freeze_queue_start() /* may sleep */ blk_mq_requeue_work() /* may sleep */ scsi_kick_queue() scsi_requeue_run_queue() /* may sleep */ scsi_run_host_queues() scsi_ioctl_reset() /* may sleep */ blk_mq_insert_requests(hctx, ctx, list, run_queue_async=false) blk_mq_dispatch_plug_list(plug, from_sched=false) blk_mq_flush_plug_list(plug, from_schedule=false) __blk_flush_plug(plug, from_schedule=false) blk_add_rq_to_plug() blk_mq_submit_bio() /* may sleep if REQ_NOWAIT has not been set */ blk_mq_plug_issue_direct() blk_mq_flush_plug_list() /* see above */ blk_mq_dispatch_plug_list(plug, from_sched=false) blk_mq_flush_plug_list() /* see above */ blk_mq_try_issue_directly() blk_mq_submit_bio() /* may sleep if REQ_NOWAIT has not been set */ blk_mq_try_issue_list_directly(hctx, list) blk_mq_insert_requests() /* see above */ Cc: Christoph Hellwig <hch@lst.de> Cc: Ming Lei <ming.lei@redhat.com> Signed-off-by: Bart Van Assche <bvanassche@acm.org> Link: https://lore.kernel.org/r/20230721172731.955724-4-bvanassche@acm.org Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Родитель
d42e2e3448
Коммит
65a558f66c
|
@ -1323,7 +1323,7 @@ void blk_execute_rq_nowait(struct request *rq, bool at_head)
|
|||
}
|
||||
|
||||
blk_mq_insert_request(rq, at_head ? BLK_MQ_INSERT_AT_HEAD : 0);
|
||||
blk_mq_run_hw_queue(hctx, false);
|
||||
blk_mq_run_hw_queue(hctx, hctx->flags & BLK_MQ_F_BLOCKING);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
|
||||
|
||||
|
@ -2222,6 +2222,8 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
|
|||
*/
|
||||
WARN_ON_ONCE(!async && in_interrupt());
|
||||
|
||||
might_sleep_if(!async && hctx->flags & BLK_MQ_F_BLOCKING);
|
||||
|
||||
/*
|
||||
* When queue is quiesced, we may be switching io scheduler, or
|
||||
* updating nr_hw_queues, or other things, and we can't run queue
|
||||
|
@ -2237,8 +2239,7 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
|
|||
if (!need_run)
|
||||
return;
|
||||
|
||||
if (async || (hctx->flags & BLK_MQ_F_BLOCKING) ||
|
||||
!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask)) {
|
||||
if (async || !cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask)) {
|
||||
blk_mq_delay_run_hw_queue(hctx, 0);
|
||||
return;
|
||||
}
|
||||
|
@ -2373,7 +2374,7 @@ void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
|
|||
{
|
||||
clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
|
||||
|
||||
blk_mq_run_hw_queue(hctx, false);
|
||||
blk_mq_run_hw_queue(hctx, hctx->flags & BLK_MQ_F_BLOCKING);
|
||||
}
|
||||
EXPORT_SYMBOL(blk_mq_start_hw_queue);
|
||||
|
||||
|
@ -2403,7 +2404,8 @@ void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
|
|||
unsigned long i;
|
||||
|
||||
queue_for_each_hw_ctx(q, hctx, i)
|
||||
blk_mq_start_stopped_hw_queue(hctx, async);
|
||||
blk_mq_start_stopped_hw_queue(hctx, async ||
|
||||
(hctx->flags & BLK_MQ_F_BLOCKING));
|
||||
}
|
||||
EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
|
||||
|
||||
|
@ -2461,6 +2463,8 @@ static void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx,
|
|||
list_for_each_entry(rq, list, queuelist) {
|
||||
BUG_ON(rq->mq_ctx != ctx);
|
||||
trace_block_rq_insert(rq);
|
||||
if (rq->cmd_flags & REQ_NOWAIT)
|
||||
run_queue_async = true;
|
||||
}
|
||||
|
||||
spin_lock(&ctx->lock);
|
||||
|
@ -2621,7 +2625,7 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
|
|||
|
||||
if ((rq->rq_flags & RQF_USE_SCHED) || !blk_mq_get_budget_and_tag(rq)) {
|
||||
blk_mq_insert_request(rq, 0);
|
||||
blk_mq_run_hw_queue(hctx, false);
|
||||
blk_mq_run_hw_queue(hctx, rq->cmd_flags & REQ_NOWAIT);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -335,7 +335,8 @@ static void scsi_single_lun_run(struct scsi_device *current_sdev)
|
|||
* but in most cases, we will be first. Ideally, each LU on the
|
||||
* target would get some limited time or requests on the target.
|
||||
*/
|
||||
blk_mq_run_hw_queues(current_sdev->request_queue, false);
|
||||
blk_mq_run_hw_queues(current_sdev->request_queue,
|
||||
shost->queuecommand_may_block);
|
||||
|
||||
spin_lock_irqsave(shost->host_lock, flags);
|
||||
if (!starget->starget_sdev_user)
|
||||
|
|
Загрузка…
Ссылка в новой задаче