bfq: Relax waker detection for shared queues

[ Upstream commit f950667356 ]

Currently we look for waker only if current queue has no requests. This
makes sense for bfq queues with a single process however for shared
queues when there is a larger number of processes the condition that
queue has no requests is difficult to meet because often at least one
process has some request in flight although all the others are waiting
for the waker to do the work and this harms throughput. Relax the "no
queued request for bfq queue" condition to "the current task has no
queued requests yet". For this, we also need to start tracking number of
requests in flight for each task.

This patch (together with the following one) restores the performance
for dbench with 128 clients that regressed with commit c65e6fd460
("bfq: Do not let waker requests skip proper accounting") because
this commit makes requests of wakers properly enter BFQ queues and thus
these queues become ineligible for the old waker detection logic.
Dbench results:

         Vanilla 5.18-rc3        5.18-rc3 + revert      5.18-rc3 patched
Mean     1237.36 (   0.00%)      950.16 *  23.21%*      988.35 *  20.12%*

Numbers are time to complete workload so lower is better.

Fixes: c65e6fd460 ("bfq: Do not let waker requests skip proper accounting")
Signed-off-by: Jan Kara <jack@suse.cz>
Link: https://lore.kernel.org/r/20220519105235.31397-1-jack@suse.cz
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
Jan Kara 2022-05-19 12:52:29 +02:00 коммит произвёл Greg Kroah-Hartman
Родитель 3ade442ea5
Коммит 9de82bfc05
2 изменённых файлов: 4 добавлений и 2 удалений

Просмотреть файл

@ -2022,7 +2022,6 @@ static void bfq_check_waker(struct bfq_data *bfqd, struct bfq_queue *bfqq,
if (!bfqd->last_completed_rq_bfqq || if (!bfqd->last_completed_rq_bfqq ||
bfqd->last_completed_rq_bfqq == bfqq || bfqd->last_completed_rq_bfqq == bfqq ||
bfq_bfqq_has_short_ttime(bfqq) || bfq_bfqq_has_short_ttime(bfqq) ||
bfqq->dispatched > 0 ||
now_ns - bfqd->last_completion >= 4 * NSEC_PER_MSEC || now_ns - bfqd->last_completion >= 4 * NSEC_PER_MSEC ||
bfqd->last_completed_rq_bfqq == bfqq->waker_bfqq) bfqd->last_completed_rq_bfqq == bfqq->waker_bfqq)
return; return;
@ -2084,7 +2083,7 @@ static void bfq_add_request(struct request *rq)
bfqq->queued[rq_is_sync(rq)]++; bfqq->queued[rq_is_sync(rq)]++;
bfqd->queued++; bfqd->queued++;
if (RB_EMPTY_ROOT(&bfqq->sort_list) && bfq_bfqq_sync(bfqq)) { if (bfq_bfqq_sync(bfqq) && RQ_BIC(rq)->requests <= 1) {
bfq_check_waker(bfqd, bfqq, now_ns); bfq_check_waker(bfqd, bfqq, now_ns);
/* /*
@ -6422,6 +6421,7 @@ static void bfq_finish_requeue_request(struct request *rq)
bfq_completed_request(bfqq, bfqd); bfq_completed_request(bfqq, bfqd);
} }
bfq_finish_requeue_request_body(bfqq); bfq_finish_requeue_request_body(bfqq);
RQ_BIC(rq)->requests--;
spin_unlock_irqrestore(&bfqd->lock, flags); spin_unlock_irqrestore(&bfqd->lock, flags);
/* /*
@ -6643,6 +6643,7 @@ static struct bfq_queue *bfq_init_rq(struct request *rq)
bfqq->allocated++; bfqq->allocated++;
bfqq->ref++; bfqq->ref++;
bic->requests++;
bfq_log_bfqq(bfqd, bfqq, "get_request %p: bfqq %p, %d", bfq_log_bfqq(bfqd, bfqq, "get_request %p: bfqq %p, %d",
rq, bfqq, bfqq->ref); rq, bfqq, bfqq->ref);

Просмотреть файл

@ -466,6 +466,7 @@ struct bfq_io_cq {
struct bfq_queue *stable_merge_bfqq; struct bfq_queue *stable_merge_bfqq;
bool stably_merged; /* non splittable if true */ bool stably_merged; /* non splittable if true */
unsigned int requests; /* Number of requests this process has in flight */
}; };
/** /**