io_uring: optimise batch completion

First, convert rest of iopoll bits to single linked lists, and also
replace per-request list_add_tail() with splicing a part of slist.

With that, use io_free_batch_list() to put/free requests. The main
advantage of it is that it's now the only user of struct req_batch and
friends, and so they can be inlined. The main overhead there was
per-request call to not-inlined io_req_free_batch(), which is expensive
enough.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/b37fc6d5954b241e025eead7ab92c6f44a42f229.1632516769.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Pavel Begunkov 2021-09-24 21:59:52 +01:00 коммит произвёл Jens Axboe
Родитель b3fa03fd1b
Коммит f5ed3bcd5b
1 изменённых файлов: 10 добавлений и 28 удалений

Просмотреть файл

@ -2421,35 +2421,13 @@ static inline bool io_run_task_work(void)
return false;
}
/*
* Find and free completed poll iocbs
*/
static void io_iopoll_complete(struct io_ring_ctx *ctx, struct list_head *done)
{
struct req_batch rb;
struct io_kiocb *req;
io_init_req_batch(&rb);
while (!list_empty(done)) {
req = list_first_entry(done, struct io_kiocb, inflight_entry);
list_del(&req->inflight_entry);
if (req_ref_put_and_test(req))
io_req_free_batch(&rb, req, &ctx->submit_state);
}
io_commit_cqring(ctx);
io_cqring_ev_posted_iopoll(ctx);
io_req_free_batch_finish(ctx, &rb);
}
static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
{
struct io_wq_work_node *pos, *start, *prev;
unsigned int poll_flags = BLK_POLL_NOSLEEP;
struct io_wq_work_list list;
DEFINE_IO_COMP_BATCH(iob);
int nr_events = 0;
LIST_HEAD(done);
/*
* Only spin for completions if we don't have multiple devices hanging
@ -2496,15 +2474,19 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
if (!smp_load_acquire(&req->iopoll_completed))
break;
__io_cqring_fill_event(ctx, req->user_data, req->result,
io_put_rw_kbuf(req));
list_add_tail(&req->inflight_entry, &done);
io_put_rw_kbuf(req));
nr_events++;
}
if (unlikely(!nr_events))
return 0;
io_commit_cqring(ctx);
io_cqring_ev_posted_iopoll(ctx);
list.first = start ? start->next : ctx->iopoll_list.first;
list.last = prev;
wq_list_cut(&ctx->iopoll_list, prev, start);
if (nr_events)
io_iopoll_complete(ctx, &done);
io_free_batch_list(ctx, &list);
return nr_events;
}