block: Remove unnecessary elevator operation checks
The insert_requests and dispatch_request elevator operations are mandatory for the correct execution of an elevator, and all implemented elevators (bfq, kyber and mq-deadline) implement them. As a result, there is no need to check for these operations before calling them when a queue has an elevator set. This simplifies the code in __blk_mq_sched_dispatch_requests() and blk_mq_sched_insert_request(). To avoid out-of-tree elevators to crash the kernel in case of bad implementation, add a check in elv_register() to verify that these operations are implemented. A small, probably not significant, IOPS improvement of 0.1% is observed with this patch applied (4.117 MIOPS to 4.123 MIOPS, average of 20 fio runs doing 4K random direct reads with psync and 32 jobs). Signed-off-by: Damien Le Moal <damien.lemoal@wdc.com> Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com> Link: https://lore.kernel.org/r/20210618015922.713999-1-damien.lemoal@wdc.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Родитель
f0c1c4d286
Коммит
e42cfb1da0
|
@ -294,8 +294,7 @@ static int blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx)
|
|||
static int __blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
|
||||
{
|
||||
struct request_queue *q = hctx->queue;
|
||||
struct elevator_queue *e = q->elevator;
|
||||
const bool has_sched_dispatch = e && e->type->ops.dispatch_request;
|
||||
const bool has_sched = q->elevator;
|
||||
int ret = 0;
|
||||
LIST_HEAD(rq_list);
|
||||
|
||||
|
@ -326,12 +325,12 @@ static int __blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
|
|||
if (!list_empty(&rq_list)) {
|
||||
blk_mq_sched_mark_restart_hctx(hctx);
|
||||
if (blk_mq_dispatch_rq_list(hctx, &rq_list, 0)) {
|
||||
if (has_sched_dispatch)
|
||||
if (has_sched)
|
||||
ret = blk_mq_do_dispatch_sched(hctx);
|
||||
else
|
||||
ret = blk_mq_do_dispatch_ctx(hctx);
|
||||
}
|
||||
} else if (has_sched_dispatch) {
|
||||
} else if (has_sched) {
|
||||
ret = blk_mq_do_dispatch_sched(hctx);
|
||||
} else if (hctx->dispatch_busy) {
|
||||
/* dequeue request one by one from sw queue if queue is busy */
|
||||
|
@ -463,7 +462,7 @@ void blk_mq_sched_insert_request(struct request *rq, bool at_head,
|
|||
goto run;
|
||||
}
|
||||
|
||||
if (e && e->type->ops.insert_requests) {
|
||||
if (e) {
|
||||
LIST_HEAD(list);
|
||||
|
||||
list_add(&rq->queuelist, &list);
|
||||
|
@ -494,9 +493,9 @@ void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
|
|||
percpu_ref_get(&q->q_usage_counter);
|
||||
|
||||
e = hctx->queue->elevator;
|
||||
if (e && e->type->ops.insert_requests)
|
||||
if (e) {
|
||||
e->type->ops.insert_requests(hctx, list, false);
|
||||
else {
|
||||
} else {
|
||||
/*
|
||||
* try to issue requests directly if the hw queue isn't
|
||||
* busy in case of 'none' scheduler, and this way may save
|
||||
|
|
|
@ -522,6 +522,10 @@ void elv_unregister_queue(struct request_queue *q)
|
|||
|
||||
int elv_register(struct elevator_type *e)
|
||||
{
|
||||
/* insert_requests and dispatch_request are mandatory */
|
||||
if (WARN_ON_ONCE(!e->ops.insert_requests || !e->ops.dispatch_request))
|
||||
return -EINVAL;
|
||||
|
||||
/* create icq_cache if requested */
|
||||
if (e->icq_size) {
|
||||
if (WARN_ON(e->icq_size < sizeof(struct io_cq)) ||
|
||||
|
|
Загрузка…
Ссылка в новой задаче