blk-mq: use the I/O scheduler for writes from the flush state machine
Send write requests issued by the flush state machine through the normal I/O submission path including the I/O scheduler (if present) so that I/O scheduler policies are applied to writes with the FUA flag set. Separate the I/O scheduler members from the flush members in struct request since now a request may pass through both an I/O scheduler and the flush machinery. Note that the actual flush requests, which have no bio attached to the request still bypass the I/O schedulers. Signed-off-by: Bart Van Assche <bvanassche@acm.org> [hch: rebased] Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Damien Le Moal <dlemoal@kernel.org> Link: https://lore.kernel.org/r/20230519044050.107790-5-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Родитель
360f264834
Коммит
be4c427809
|
@ -458,7 +458,7 @@ static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
|
||||||
* Flush/passthrough requests are special and go directly to the
|
* Flush/passthrough requests are special and go directly to the
|
||||||
* dispatch list.
|
* dispatch list.
|
||||||
*/
|
*/
|
||||||
if (!op_is_flush(data->cmd_flags) &&
|
if ((data->cmd_flags & REQ_OP_MASK) != REQ_OP_FLUSH &&
|
||||||
!blk_op_is_passthrough(data->cmd_flags)) {
|
!blk_op_is_passthrough(data->cmd_flags)) {
|
||||||
struct elevator_mq_ops *ops = &q->elevator->type->ops;
|
struct elevator_mq_ops *ops = &q->elevator->type->ops;
|
||||||
|
|
||||||
|
@ -2497,7 +2497,7 @@ static void blk_mq_insert_request(struct request *rq, blk_insert_t flags)
|
||||||
* dispatch it given we prioritize requests in hctx->dispatch.
|
* dispatch it given we prioritize requests in hctx->dispatch.
|
||||||
*/
|
*/
|
||||||
blk_mq_request_bypass_insert(rq, flags);
|
blk_mq_request_bypass_insert(rq, flags);
|
||||||
} else if (rq->rq_flags & RQF_FLUSH_SEQ) {
|
} else if (req_op(rq) == REQ_OP_FLUSH) {
|
||||||
/*
|
/*
|
||||||
* Firstly normal IO request is inserted to scheduler queue or
|
* Firstly normal IO request is inserted to scheduler queue or
|
||||||
* sw queue, meantime we add flush request to dispatch queue(
|
* sw queue, meantime we add flush request to dispatch queue(
|
||||||
|
|
|
@ -169,25 +169,20 @@ struct request {
|
||||||
void *completion_data;
|
void *completion_data;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Three pointers are available for the IO schedulers, if they need
|
* Three pointers are available for the IO schedulers, if they need
|
||||||
* more they have to dynamically allocate it. Flush requests are
|
* more they have to dynamically allocate it.
|
||||||
* never put on the IO scheduler. So let the flush fields share
|
|
||||||
* space with the elevator data.
|
|
||||||
*/
|
*/
|
||||||
union {
|
struct {
|
||||||
struct {
|
struct io_cq *icq;
|
||||||
struct io_cq *icq;
|
void *priv[2];
|
||||||
void *priv[2];
|
} elv;
|
||||||
} elv;
|
|
||||||
|
|
||||||
struct {
|
struct {
|
||||||
unsigned int seq;
|
unsigned int seq;
|
||||||
struct list_head list;
|
struct list_head list;
|
||||||
rq_end_io_fn *saved_end_io;
|
rq_end_io_fn *saved_end_io;
|
||||||
} flush;
|
} flush;
|
||||||
};
|
|
||||||
|
|
||||||
union {
|
union {
|
||||||
struct __call_single_data csd;
|
struct __call_single_data csd;
|
||||||
|
|
Загрузка…
Ссылка в новой задаче