io_uring: add more likely/unlikely() annotations

Add two extra unlikely() in io_submit_sqes() and one around
io_req_needs_clean() to help the compiler to avoid extra jumps
in hot paths.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/88e087afe657e7660194353aada9b00f11d480f9.1633373302.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Pavel Begunkov 2021-10-04 20:02:47 +01:00 коммит произвёл Jens Axboe
Родитель 7e3709d576
Коммит 51d48dab62
1 изменённых файлов: 3 добавлений и 3 удалений

Просмотреть файл

@ -1957,7 +1957,7 @@ static inline void io_dismantle_req(struct io_kiocb *req)
{
unsigned int flags = req->flags;
if (io_req_needs_clean(req))
if (unlikely(io_req_needs_clean(req)))
io_clean_op(req);
if (!(flags & REQ_F_FIXED_FILE))
io_put_file(req->file);
@ -7198,11 +7198,11 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
unsigned int entries = io_sqring_entries(ctx);
int submitted = 0;
if (!entries)
if (unlikely(!entries))
return 0;
/* make sure SQ entry isn't read before tail */
nr = min3(nr, ctx->sq_entries, entries);
if (!percpu_ref_tryget_many(&ctx->refs, nr))
if (unlikely(!percpu_ref_tryget_many(&ctx->refs, nr)))
return -EAGAIN;
io_get_task_refs(nr);