io_uring-5.7-2020-05-22
-----BEGIN PGP SIGNATURE----- iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAl7IByoQHGF4Ym9lQGtl cm5lbC5kawAKCRD301j7KXHgpigmEACWJoK7zk5OK2RhavpzOsb2SDu8nz/YAbUe +R6tXAjwe4Z7lVVa+FW/fmN9/mQcjyYRIbbG564IFs5fe6hPUoOjUzHqvGTOFLHd Fw8mjVKgWjWAE5GdoX6ATauLhVwwjnImej1PNfO/J5y29o0SQksP8MbM0eGuuNx1 piqxBj0/3h3YyPn1GeJmqxwwcsFhzHDqk7fbkfbQokZk+7SPiKpqWgJBa7AKSlNC N0WTluT4UOummQZw1RFynPfA4cCuX6XHVgWAa9h7vrJHXigvuMWqLaHG+MBFqeKu xD6PPnaCnMwcLRe4T2sJvtjxmNSdyr15Q2kGkIi/RhohSIn4u/y8jEA6wTprCP48 rDi30dn1o2LwUj2S1NO3YCOV8jIKWUguztEvKiAXmjf4KDZIDd4/OwrFsJdb4vg9 EuK86SEwXbvFHf9nu1M7pHlGThKfQi0CiK6C6M7Qb/kOthio72wwZ46gGkwLDk5z DZWHymHBhQw/z1c20loX7pBvFIzLzbuYUThf23UegPzXVqqQfBkqs4BGFcOGuqy6 yfEYF/MAX/O/TQgm2dDQHrhl05AevLu/UQXMXZ8Ha6OrmlC4C2qu3Te/iZO8FUew YIx5H5XmBh93McjpmJ8VCn7CjE+y/ufNTMdvm8WzCyAIfH40gfcyLangpre26QoJ CCAARffXrQ== =ZYUy -----END PGP SIGNATURE----- Merge tag 'io_uring-5.7-2020-05-22' of git://git.kernel.dk/linux-block Pull io_uring fixes from Jens Axboe: "A small collection of small fixes that should go into this release: - Two fixes for async request preparation (Pavel) - Busy clear fix for SQPOLL (Xiaoguang) - Don't use kiocb->private for O_DIRECT buf index, some file systems use it (Bijan) - Kill dead check in io_splice() - Ensure sqo_wait is initialized early - Cancel task_work if we fail adding to original process - Only add (IO)pollable requests to iopoll list, fixing a regression in this merge window" * tag 'io_uring-5.7-2020-05-22' of git://git.kernel.dk/linux-block: io_uring: reset -EBUSY error when io sq thread is waken up io_uring: don't add non-IO requests to iopoll pending list io_uring: don't use kiocb.private to store buf_index io_uring: cancel work if task_work_add() fails io_uring: remove dead check in io_splice() io_uring: fix FORCE_ASYNC req preparation io_uring: don't prepare DRAIN reqs twice io_uring: initialize ctx->sqo_wait earlier
This commit is contained in:
Коммит
444565650a
|
@ -619,6 +619,8 @@ struct io_kiocb {
|
|||
bool needs_fixed_file;
|
||||
u8 opcode;
|
||||
|
||||
u16 buf_index;
|
||||
|
||||
struct io_ring_ctx *ctx;
|
||||
struct list_head list;
|
||||
unsigned int flags;
|
||||
|
@ -924,6 +926,7 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
|
|||
goto err;
|
||||
|
||||
ctx->flags = p->flags;
|
||||
init_waitqueue_head(&ctx->sqo_wait);
|
||||
init_waitqueue_head(&ctx->cq_wait);
|
||||
INIT_LIST_HEAD(&ctx->cq_overflow_list);
|
||||
init_completion(&ctx->completions[0]);
|
||||
|
@ -2100,9 +2103,7 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
|
|||
|
||||
req->rw.addr = READ_ONCE(sqe->addr);
|
||||
req->rw.len = READ_ONCE(sqe->len);
|
||||
/* we own ->private, reuse it for the buffer index / buffer ID */
|
||||
req->rw.kiocb.private = (void *) (unsigned long)
|
||||
READ_ONCE(sqe->buf_index);
|
||||
req->buf_index = READ_ONCE(sqe->buf_index);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2145,7 +2146,7 @@ static ssize_t io_import_fixed(struct io_kiocb *req, int rw,
|
|||
struct io_ring_ctx *ctx = req->ctx;
|
||||
size_t len = req->rw.len;
|
||||
struct io_mapped_ubuf *imu;
|
||||
unsigned index, buf_index;
|
||||
u16 index, buf_index;
|
||||
size_t offset;
|
||||
u64 buf_addr;
|
||||
|
||||
|
@ -2153,7 +2154,7 @@ static ssize_t io_import_fixed(struct io_kiocb *req, int rw,
|
|||
if (unlikely(!ctx->user_bufs))
|
||||
return -EFAULT;
|
||||
|
||||
buf_index = (unsigned long) req->rw.kiocb.private;
|
||||
buf_index = req->buf_index;
|
||||
if (unlikely(buf_index >= ctx->nr_user_bufs))
|
||||
return -EFAULT;
|
||||
|
||||
|
@ -2269,10 +2270,10 @@ static void __user *io_rw_buffer_select(struct io_kiocb *req, size_t *len,
|
|||
bool needs_lock)
|
||||
{
|
||||
struct io_buffer *kbuf;
|
||||
int bgid;
|
||||
u16 bgid;
|
||||
|
||||
kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
|
||||
bgid = (int) (unsigned long) req->rw.kiocb.private;
|
||||
bgid = req->buf_index;
|
||||
kbuf = io_buffer_select(req, len, bgid, kbuf, needs_lock);
|
||||
if (IS_ERR(kbuf))
|
||||
return kbuf;
|
||||
|
@ -2363,7 +2364,7 @@ static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
|
|||
}
|
||||
|
||||
/* buffer index only valid with fixed read/write, or buffer select */
|
||||
if (req->rw.kiocb.private && !(req->flags & REQ_F_BUFFER_SELECT))
|
||||
if (req->buf_index && !(req->flags & REQ_F_BUFFER_SELECT))
|
||||
return -EINVAL;
|
||||
|
||||
if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) {
|
||||
|
@ -2771,11 +2772,8 @@ static int io_splice(struct io_kiocb *req, bool force_nonblock)
|
|||
poff_in = (sp->off_in == -1) ? NULL : &sp->off_in;
|
||||
poff_out = (sp->off_out == -1) ? NULL : &sp->off_out;
|
||||
|
||||
if (sp->len) {
|
||||
if (sp->len)
|
||||
ret = do_splice(in, poff_in, out, poff_out, sp->len, flags);
|
||||
if (force_nonblock && ret == -EAGAIN)
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED));
|
||||
req->flags &= ~REQ_F_NEED_CLEANUP;
|
||||
|
@ -4137,12 +4135,14 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
|
|||
req->result = mask;
|
||||
init_task_work(&req->task_work, func);
|
||||
/*
|
||||
* If this fails, then the task is exiting. Punt to one of the io-wq
|
||||
* threads to ensure the work gets run, we can't always rely on exit
|
||||
* cancelation taking care of this.
|
||||
* If this fails, then the task is exiting. When a task exits, the
|
||||
* work gets canceled, so just cancel this request as well instead
|
||||
* of executing it. We can't safely execute it anyway, as we may not
|
||||
* have the needed state needed for it anyway.
|
||||
*/
|
||||
ret = task_work_add(tsk, &req->task_work, true);
|
||||
if (unlikely(ret)) {
|
||||
WRITE_ONCE(poll->canceled, true);
|
||||
tsk = io_wq_get_task(req->ctx->io_wq);
|
||||
task_work_add(tsk, &req->task_work, true);
|
||||
}
|
||||
|
@ -5013,12 +5013,13 @@ static int io_req_defer(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
|||
if (!req_need_defer(req) && list_empty_careful(&ctx->defer_list))
|
||||
return 0;
|
||||
|
||||
if (!req->io && io_alloc_async_ctx(req))
|
||||
return -EAGAIN;
|
||||
|
||||
ret = io_req_defer_prep(req, sqe);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (!req->io) {
|
||||
if (io_alloc_async_ctx(req))
|
||||
return -EAGAIN;
|
||||
ret = io_req_defer_prep(req, sqe);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
spin_lock_irq(&ctx->completion_lock);
|
||||
if (!req_need_defer(req) && list_empty(&ctx->defer_list)) {
|
||||
|
@ -5305,7 +5306,8 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (ctx->flags & IORING_SETUP_IOPOLL) {
|
||||
/* If the op doesn't have a file, we're not polling for it */
|
||||
if ((ctx->flags & IORING_SETUP_IOPOLL) && req->file) {
|
||||
const bool in_async = io_wq_current_is_worker();
|
||||
|
||||
if (req->result == -EAGAIN)
|
||||
|
@ -5606,9 +5608,15 @@ fail_req:
|
|||
io_double_put_req(req);
|
||||
}
|
||||
} else if (req->flags & REQ_F_FORCE_ASYNC) {
|
||||
ret = io_req_defer_prep(req, sqe);
|
||||
if (unlikely(ret < 0))
|
||||
goto fail_req;
|
||||
if (!req->io) {
|
||||
ret = -EAGAIN;
|
||||
if (io_alloc_async_ctx(req))
|
||||
goto fail_req;
|
||||
ret = io_req_defer_prep(req, sqe);
|
||||
if (unlikely(ret < 0))
|
||||
goto fail_req;
|
||||
}
|
||||
|
||||
/*
|
||||
* Never try inline submit of IOSQE_ASYNC is set, go straight
|
||||
* to async execution.
|
||||
|
@ -6024,6 +6032,7 @@ static int io_sq_thread(void *data)
|
|||
finish_wait(&ctx->sqo_wait, &wait);
|
||||
|
||||
ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
|
||||
ret = 0;
|
||||
continue;
|
||||
}
|
||||
finish_wait(&ctx->sqo_wait, &wait);
|
||||
|
@ -6837,7 +6846,6 @@ static int io_sq_offload_start(struct io_ring_ctx *ctx,
|
|||
{
|
||||
int ret;
|
||||
|
||||
init_waitqueue_head(&ctx->sqo_wait);
|
||||
mmgrab(current->mm);
|
||||
ctx->sqo_mm = current->mm;
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче