io_uring: check for need to re-wait in polled async handling
We added this for just the regular poll requests in commit a6ba632d2c
("io_uring: retry poll if we got woken with non-matching mask"), we
should do the same for the poll handler used pollable async requests.
Move the re-wait check and arm into a helper, and call it from
io_async_task_func() as well.
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Родитель
8835758085
Коммит
74ce6ce43d
|
@ -4156,6 +4156,26 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool io_poll_rewait(struct io_kiocb *req, struct io_poll_iocb *poll)
|
||||||
|
__acquires(&req->ctx->completion_lock)
|
||||||
|
{
|
||||||
|
struct io_ring_ctx *ctx = req->ctx;
|
||||||
|
|
||||||
|
if (!req->result && !READ_ONCE(poll->canceled)) {
|
||||||
|
struct poll_table_struct pt = { ._key = poll->events };
|
||||||
|
|
||||||
|
req->result = vfs_poll(req->file, &pt) & poll->events;
|
||||||
|
}
|
||||||
|
|
||||||
|
spin_lock_irq(&ctx->completion_lock);
|
||||||
|
if (!req->result && !READ_ONCE(poll->canceled)) {
|
||||||
|
add_wait_queue(poll->head, &poll->wait);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
static void io_async_task_func(struct callback_head *cb)
|
static void io_async_task_func(struct callback_head *cb)
|
||||||
{
|
{
|
||||||
struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
|
struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
|
||||||
|
@ -4164,14 +4184,16 @@ static void io_async_task_func(struct callback_head *cb)
|
||||||
|
|
||||||
trace_io_uring_task_run(req->ctx, req->opcode, req->user_data);
|
trace_io_uring_task_run(req->ctx, req->opcode, req->user_data);
|
||||||
|
|
||||||
WARN_ON_ONCE(!list_empty(&req->apoll->poll.wait.entry));
|
if (io_poll_rewait(req, &apoll->poll)) {
|
||||||
|
|
||||||
if (hash_hashed(&req->hash_node)) {
|
|
||||||
spin_lock_irq(&ctx->completion_lock);
|
|
||||||
hash_del(&req->hash_node);
|
|
||||||
spin_unlock_irq(&ctx->completion_lock);
|
spin_unlock_irq(&ctx->completion_lock);
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (hash_hashed(&req->hash_node))
|
||||||
|
hash_del(&req->hash_node);
|
||||||
|
|
||||||
|
spin_unlock_irq(&ctx->completion_lock);
|
||||||
|
|
||||||
/* restore ->work in case we need to retry again */
|
/* restore ->work in case we need to retry again */
|
||||||
memcpy(&req->work, &apoll->work, sizeof(req->work));
|
memcpy(&req->work, &apoll->work, sizeof(req->work));
|
||||||
|
|
||||||
|
@ -4436,18 +4458,11 @@ static void io_poll_task_handler(struct io_kiocb *req, struct io_kiocb **nxt)
|
||||||
struct io_ring_ctx *ctx = req->ctx;
|
struct io_ring_ctx *ctx = req->ctx;
|
||||||
struct io_poll_iocb *poll = &req->poll;
|
struct io_poll_iocb *poll = &req->poll;
|
||||||
|
|
||||||
if (!req->result && !READ_ONCE(poll->canceled)) {
|
if (io_poll_rewait(req, poll)) {
|
||||||
struct poll_table_struct pt = { ._key = poll->events };
|
|
||||||
|
|
||||||
req->result = vfs_poll(req->file, &pt) & poll->events;
|
|
||||||
}
|
|
||||||
|
|
||||||
spin_lock_irq(&ctx->completion_lock);
|
|
||||||
if (!req->result && !READ_ONCE(poll->canceled)) {
|
|
||||||
add_wait_queue(poll->head, &poll->wait);
|
|
||||||
spin_unlock_irq(&ctx->completion_lock);
|
spin_unlock_irq(&ctx->completion_lock);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
hash_del(&req->hash_node);
|
hash_del(&req->hash_node);
|
||||||
io_poll_complete(req, req->result, 0);
|
io_poll_complete(req, req->result, 0);
|
||||||
req->flags |= REQ_F_COMP_LOCKED;
|
req->flags |= REQ_F_COMP_LOCKED;
|
||||||
|
|
Загрузка…
Ссылка в новой задаче