io_uring: kill io_wq_current_is_worker() in iopoll

Don't decide about locking based on io_wq_current_is_worker(), it's not
consistent with all other code and is expensive, use issue_flags.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/7546d5a58efa4360173541c6fe02ee6b8c7b4ea7.1634314022.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Pavel Begunkov 2021-10-15 17:09:12 +01:00 коммит произвёл Jens Axboe
Родитель 9983028e76
Коммит 9882131cd9
1 изменённых файлов: 5 добавлений и 5 удалений

Просмотреть файл

@ -2713,13 +2713,13 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
* find it from a io_do_iopoll() thread before the issuer is done
* accessing the kiocb cookie.
*/
static void io_iopoll_req_issued(struct io_kiocb *req)
static void io_iopoll_req_issued(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_ring_ctx *ctx = req->ctx;
const bool in_async = io_wq_current_is_worker();
const bool need_lock = !(issue_flags & IO_URING_F_NONBLOCK);
/* workqueue context doesn't hold uring_lock, grab it now */
if (unlikely(in_async))
if (unlikely(need_lock))
mutex_lock(&ctx->uring_lock);
/*
@ -2747,7 +2747,7 @@ static void io_iopoll_req_issued(struct io_kiocb *req)
else
wq_list_add_tail(&req->comp_list, &ctx->iopoll_list);
if (unlikely(in_async)) {
if (unlikely(need_lock)) {
/*
* If IORING_SETUP_SQPOLL is enabled, sqes are either handle
* in sq thread task context or in io worker task context. If
@ -6715,7 +6715,7 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
return ret;
/* If the op doesn't have a file, we're not polling for it */
if ((req->ctx->flags & IORING_SETUP_IOPOLL) && req->file)
io_iopoll_req_issued(req);
io_iopoll_req_issued(req, issue_flags);
return 0;
}