block: sum requests in the plug structure
This isn't exactly the same as the previous count, as it includes requests for all devices. But that really doesn't matter, if we have more than the threshold (16) queued up, flush it. It's not worth it to have an expensive list loop for this. Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Родитель
af78ff7c6e
Коммит
5f0ed774ed
|
@ -736,7 +736,6 @@ no_merge:
|
||||||
* Caller must ensure !blk_queue_nomerges(q) beforehand.
|
* Caller must ensure !blk_queue_nomerges(q) beforehand.
|
||||||
*/
|
*/
|
||||||
bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
|
bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
|
||||||
unsigned int *request_count,
|
|
||||||
struct request **same_queue_rq)
|
struct request **same_queue_rq)
|
||||||
{
|
{
|
||||||
struct blk_plug *plug;
|
struct blk_plug *plug;
|
||||||
|
@ -746,22 +745,19 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
|
||||||
plug = current->plug;
|
plug = current->plug;
|
||||||
if (!plug)
|
if (!plug)
|
||||||
return false;
|
return false;
|
||||||
*request_count = 0;
|
|
||||||
|
|
||||||
plug_list = &plug->mq_list;
|
plug_list = &plug->mq_list;
|
||||||
|
|
||||||
list_for_each_entry_reverse(rq, plug_list, queuelist) {
|
list_for_each_entry_reverse(rq, plug_list, queuelist) {
|
||||||
bool merged = false;
|
bool merged = false;
|
||||||
|
|
||||||
if (rq->q == q) {
|
if (rq->q == q && same_queue_rq) {
|
||||||
(*request_count)++;
|
|
||||||
/*
|
/*
|
||||||
* Only blk-mq multiple hardware queues case checks the
|
* Only blk-mq multiple hardware queues case checks the
|
||||||
* rq in the same queue, there should be only one such
|
* rq in the same queue, there should be only one such
|
||||||
* rq in a queue
|
* rq in a queue
|
||||||
**/
|
**/
|
||||||
if (same_queue_rq)
|
*same_queue_rq = rq;
|
||||||
*same_queue_rq = rq;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (rq->q != q || !blk_rq_merge_ok(rq, bio))
|
if (rq->q != q || !blk_rq_merge_ok(rq, bio))
|
||||||
|
@ -788,26 +784,6 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned int blk_plug_queued_count(struct request_queue *q)
|
|
||||||
{
|
|
||||||
struct blk_plug *plug;
|
|
||||||
struct request *rq;
|
|
||||||
struct list_head *plug_list;
|
|
||||||
unsigned int ret = 0;
|
|
||||||
|
|
||||||
plug = current->plug;
|
|
||||||
if (!plug)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
plug_list = &plug->mq_list;
|
|
||||||
list_for_each_entry(rq, plug_list, queuelist) {
|
|
||||||
if (rq->q == q)
|
|
||||||
ret++;
|
|
||||||
}
|
|
||||||
out:
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
void blk_init_request_from_bio(struct request *req, struct bio *bio)
|
void blk_init_request_from_bio(struct request *req, struct bio *bio)
|
||||||
{
|
{
|
||||||
if (bio->bi_opf & REQ_RAHEAD)
|
if (bio->bi_opf & REQ_RAHEAD)
|
||||||
|
@ -1803,6 +1779,8 @@ void blk_start_plug(struct blk_plug *plug)
|
||||||
|
|
||||||
INIT_LIST_HEAD(&plug->mq_list);
|
INIT_LIST_HEAD(&plug->mq_list);
|
||||||
INIT_LIST_HEAD(&plug->cb_list);
|
INIT_LIST_HEAD(&plug->cb_list);
|
||||||
|
plug->rq_count = 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Store ordering should not be needed here, since a potential
|
* Store ordering should not be needed here, since a potential
|
||||||
* preempt will imply a full memory barrier
|
* preempt will imply a full memory barrier
|
||||||
|
|
|
@ -1675,6 +1675,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
|
||||||
unsigned int depth;
|
unsigned int depth;
|
||||||
|
|
||||||
list_splice_init(&plug->mq_list, &list);
|
list_splice_init(&plug->mq_list, &list);
|
||||||
|
plug->rq_count = 0;
|
||||||
|
|
||||||
list_sort(NULL, &list, plug_rq_cmp);
|
list_sort(NULL, &list, plug_rq_cmp);
|
||||||
|
|
||||||
|
@ -1871,7 +1872,6 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
|
||||||
const int is_flush_fua = op_is_flush(bio->bi_opf);
|
const int is_flush_fua = op_is_flush(bio->bi_opf);
|
||||||
struct blk_mq_alloc_data data = { .flags = 0, .cmd_flags = bio->bi_opf };
|
struct blk_mq_alloc_data data = { .flags = 0, .cmd_flags = bio->bi_opf };
|
||||||
struct request *rq;
|
struct request *rq;
|
||||||
unsigned int request_count = 0;
|
|
||||||
struct blk_plug *plug;
|
struct blk_plug *plug;
|
||||||
struct request *same_queue_rq = NULL;
|
struct request *same_queue_rq = NULL;
|
||||||
blk_qc_t cookie;
|
blk_qc_t cookie;
|
||||||
|
@ -1884,7 +1884,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
|
||||||
return BLK_QC_T_NONE;
|
return BLK_QC_T_NONE;
|
||||||
|
|
||||||
if (!is_flush_fua && !blk_queue_nomerges(q) &&
|
if (!is_flush_fua && !blk_queue_nomerges(q) &&
|
||||||
blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
|
blk_attempt_plug_merge(q, bio, &same_queue_rq))
|
||||||
return BLK_QC_T_NONE;
|
return BLK_QC_T_NONE;
|
||||||
|
|
||||||
if (blk_mq_sched_bio_merge(q, bio))
|
if (blk_mq_sched_bio_merge(q, bio))
|
||||||
|
@ -1915,20 +1915,12 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
|
||||||
blk_insert_flush(rq);
|
blk_insert_flush(rq);
|
||||||
blk_mq_run_hw_queue(data.hctx, true);
|
blk_mq_run_hw_queue(data.hctx, true);
|
||||||
} else if (plug && q->nr_hw_queues == 1) {
|
} else if (plug && q->nr_hw_queues == 1) {
|
||||||
|
unsigned int request_count = plug->rq_count;
|
||||||
struct request *last = NULL;
|
struct request *last = NULL;
|
||||||
|
|
||||||
blk_mq_put_ctx(data.ctx);
|
blk_mq_put_ctx(data.ctx);
|
||||||
blk_mq_bio_to_request(rq, bio);
|
blk_mq_bio_to_request(rq, bio);
|
||||||
|
|
||||||
/*
|
|
||||||
* @request_count may become stale because of schedule
|
|
||||||
* out, so check the list again.
|
|
||||||
*/
|
|
||||||
if (list_empty(&plug->mq_list))
|
|
||||||
request_count = 0;
|
|
||||||
else if (blk_queue_nomerges(q))
|
|
||||||
request_count = blk_plug_queued_count(q);
|
|
||||||
|
|
||||||
if (!request_count)
|
if (!request_count)
|
||||||
trace_block_plug(q);
|
trace_block_plug(q);
|
||||||
else
|
else
|
||||||
|
@ -1941,6 +1933,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
|
||||||
}
|
}
|
||||||
|
|
||||||
list_add_tail(&rq->queuelist, &plug->mq_list);
|
list_add_tail(&rq->queuelist, &plug->mq_list);
|
||||||
|
plug->rq_count++;
|
||||||
} else if (plug && !blk_queue_nomerges(q)) {
|
} else if (plug && !blk_queue_nomerges(q)) {
|
||||||
blk_mq_bio_to_request(rq, bio);
|
blk_mq_bio_to_request(rq, bio);
|
||||||
|
|
||||||
|
@ -1956,6 +1949,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
|
||||||
if (same_queue_rq)
|
if (same_queue_rq)
|
||||||
list_del_init(&same_queue_rq->queuelist);
|
list_del_init(&same_queue_rq->queuelist);
|
||||||
list_add_tail(&rq->queuelist, &plug->mq_list);
|
list_add_tail(&rq->queuelist, &plug->mq_list);
|
||||||
|
plug->rq_count++;
|
||||||
|
|
||||||
blk_mq_put_ctx(data.ctx);
|
blk_mq_put_ctx(data.ctx);
|
||||||
|
|
||||||
|
|
|
@ -161,9 +161,7 @@ bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
|
||||||
bool bio_attempt_discard_merge(struct request_queue *q, struct request *req,
|
bool bio_attempt_discard_merge(struct request_queue *q, struct request *req,
|
||||||
struct bio *bio);
|
struct bio *bio);
|
||||||
bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
|
bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
|
||||||
unsigned int *request_count,
|
|
||||||
struct request **same_queue_rq);
|
struct request **same_queue_rq);
|
||||||
unsigned int blk_plug_queued_count(struct request_queue *q);
|
|
||||||
|
|
||||||
void blk_account_io_start(struct request *req, bool new_io);
|
void blk_account_io_start(struct request *req, bool new_io);
|
||||||
void blk_account_io_completion(struct request *req, unsigned int bytes);
|
void blk_account_io_completion(struct request *req, unsigned int bytes);
|
||||||
|
|
|
@ -1130,6 +1130,7 @@ extern void blk_set_queue_dying(struct request_queue *);
|
||||||
struct blk_plug {
|
struct blk_plug {
|
||||||
struct list_head mq_list; /* blk-mq requests */
|
struct list_head mq_list; /* blk-mq requests */
|
||||||
struct list_head cb_list; /* md requires an unplug callback */
|
struct list_head cb_list; /* md requires an unplug callback */
|
||||||
|
unsigned short rq_count;
|
||||||
};
|
};
|
||||||
#define BLK_MAX_REQUEST_COUNT 16
|
#define BLK_MAX_REQUEST_COUNT 16
|
||||||
#define BLK_PLUG_FLUSH_SIZE (128 * 1024)
|
#define BLK_PLUG_FLUSH_SIZE (128 * 1024)
|
||||||
|
|
Загрузка…
Ссылка в новой задаче