io_uring: extend task put optimisations

Now with IRQ completions done via IRQ, almost all requests freeing
are done from the context of submitter task, so it makes sense to
extend task_put optimisation from io_req_free_batch_finish() to cover
all the cases including task_work by moving it into io_put_task().

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/824a7cbd745ddeee4a0f3ff85c558a24fd005872.1629302453.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Pavel Begunkov 2021-08-18 17:01:43 +01:00 коммит произвёл Jens Axboe
Родитель 316319e82f
Коммит e98e49b2bb
1 изменённых файлов: 9 добавлений и 7 удалений

Просмотреть файл

@ -1623,11 +1623,15 @@ static inline void io_put_task(struct task_struct *task, int nr)
{ {
struct io_uring_task *tctx = task->io_uring; struct io_uring_task *tctx = task->io_uring;
if (likely(task == current)) {
tctx->cached_refs += nr;
} else {
percpu_counter_sub(&tctx->inflight, nr); percpu_counter_sub(&tctx->inflight, nr);
if (unlikely(atomic_read(&tctx->in_idle))) if (unlikely(atomic_read(&tctx->in_idle)))
wake_up(&tctx->wait); wake_up(&tctx->wait);
put_task_struct_many(task, nr); put_task_struct_many(task, nr);
} }
}
static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data, static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
long res, unsigned int cflags) long res, unsigned int cflags)
@ -2171,9 +2175,7 @@ static void io_req_free_batch_finish(struct io_ring_ctx *ctx,
{ {
if (rb->ctx_refs) if (rb->ctx_refs)
percpu_ref_put_many(&ctx->refs, rb->ctx_refs); percpu_ref_put_many(&ctx->refs, rb->ctx_refs);
if (rb->task == current) if (rb->task)
current->io_uring->cached_refs += rb->task_refs;
else if (rb->task)
io_put_task(rb->task, rb->task_refs); io_put_task(rb->task, rb->task_refs);
} }