io_uring: pass in EPOLL_URING_WAKE for eventfd signaling and wakeups

[ Upstream commit 4464853277 ]

Pass in EPOLL_URING_WAKE when signaling eventfd or doing poll related
wakups, so that we can check for a circular event dependency between
eventfd and epoll. If this flag is set when our wakeup handlers are
called, then we know we have a dependency that needs to terminate
multishot requests.

eventfd and epoll are the only such possible dependencies.

Cc: stable@vger.kernel.org # 6.0
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
Jens Axboe 2022-12-23 07:04:49 -07:00 коммит произвёл Greg Kroah-Hartman
Родитель 77baf39227
Коммит ccf06b5a98
1 изменённых файлов: 20 добавлений и 7 удалений

Просмотреть файл

@ -1629,13 +1629,15 @@ static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
* wake as many waiters as we need to.
*/
if (wq_has_sleeper(&ctx->cq_wait))
wake_up_all(&ctx->cq_wait);
__wake_up(&ctx->cq_wait, TASK_NORMAL, 0,
poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
if (ctx->sq_data && waitqueue_active(&ctx->sq_data->wait))
wake_up(&ctx->sq_data->wait);
if (io_should_trigger_evfd(ctx))
eventfd_signal(ctx->cq_ev_fd, 1);
eventfd_signal_mask(ctx->cq_ev_fd, 1, EPOLL_URING_WAKE);
if (waitqueue_active(&ctx->poll_wait))
wake_up_interruptible(&ctx->poll_wait);
__wake_up(&ctx->poll_wait, TASK_INTERRUPTIBLE, 0,
poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
}
static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
@ -1645,12 +1647,14 @@ static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
if (ctx->flags & IORING_SETUP_SQPOLL) {
if (waitqueue_active(&ctx->cq_wait))
wake_up_all(&ctx->cq_wait);
__wake_up(&ctx->cq_wait, TASK_NORMAL, 0,
poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
}
if (io_should_trigger_evfd(ctx))
eventfd_signal(ctx->cq_ev_fd, 1);
eventfd_signal_mask(ctx->cq_ev_fd, 1, EPOLL_URING_WAKE);
if (waitqueue_active(&ctx->poll_wait))
wake_up_interruptible(&ctx->poll_wait);
__wake_up(&ctx->poll_wait, TASK_INTERRUPTIBLE, 0,
poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
}
/* Returns true if there are no backlogged entries after the flush */
@ -5636,8 +5640,17 @@ static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
if (mask && !(mask & poll->events))
return 0;
if (io_poll_get_ownership(req))
if (io_poll_get_ownership(req)) {
/*
* If we trigger a multishot poll off our own wakeup path,
* disable multishot as there is a circular dependency between
* CQ posting and triggering the event.
*/
if (mask & EPOLL_URING_WAKE)
poll->events |= EPOLLONESHOT;
__io_poll_execute(req, mask);
}
return 1;
}