io_uring: Fix mm_fault with READ/WRITE_FIXED
Commitfb5ccc9878
("io_uring: Fix broken links with offloading") introduced a potential performance regression with unconditionally taking mm even for READ/WRITE_FIXED operations. Return the logic handling it back. mm-faulted requests will go through the generic submission path, so honoring links and drains, but will fail further on req->has_user check. Fixes:fb5ccc9878
("io_uring: Fix broken links with offloading") Cc: stable@vger.kernel.org # v5.4 Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Родитель
fa45622808
Коммит
95a1b3ff9a
|
@ -2726,13 +2726,14 @@ static bool io_get_sqring(struct io_ring_ctx *ctx, struct sqe_submit *s)
|
|||
}
|
||||
|
||||
static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
|
||||
bool has_user, bool mm_fault)
|
||||
struct mm_struct **mm)
|
||||
{
|
||||
struct io_submit_state state, *statep = NULL;
|
||||
struct io_kiocb *link = NULL;
|
||||
struct io_kiocb *shadow_req = NULL;
|
||||
bool prev_was_link = false;
|
||||
int i, submitted = 0;
|
||||
bool mm_fault = false;
|
||||
|
||||
if (nr > IO_PLUG_THRESHOLD) {
|
||||
io_submit_state_start(&state, ctx, nr);
|
||||
|
@ -2745,6 +2746,14 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
|
|||
if (!io_get_sqring(ctx, &s))
|
||||
break;
|
||||
|
||||
if (io_sqe_needs_user(s.sqe) && !*mm) {
|
||||
mm_fault = mm_fault || !mmget_not_zero(ctx->sqo_mm);
|
||||
if (!mm_fault) {
|
||||
use_mm(ctx->sqo_mm);
|
||||
*mm = ctx->sqo_mm;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* If previous wasn't linked and we have a linked command,
|
||||
* that's the end of the chain. Submit the previous link.
|
||||
|
@ -2768,17 +2777,12 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
|
|||
}
|
||||
|
||||
out:
|
||||
if (unlikely(mm_fault)) {
|
||||
io_cqring_add_event(ctx, s.sqe->user_data,
|
||||
-EFAULT);
|
||||
} else {
|
||||
s.has_user = has_user;
|
||||
s.in_async = true;
|
||||
s.needs_fixed_file = true;
|
||||
trace_io_uring_submit_sqe(ctx, true, true);
|
||||
io_submit_sqe(ctx, &s, statep, &link);
|
||||
submitted++;
|
||||
}
|
||||
s.has_user = *mm != NULL;
|
||||
s.in_async = true;
|
||||
s.needs_fixed_file = true;
|
||||
trace_io_uring_submit_sqe(ctx, true, true);
|
||||
io_submit_sqe(ctx, &s, statep, &link);
|
||||
submitted++;
|
||||
}
|
||||
|
||||
if (link)
|
||||
|
@ -2805,7 +2809,6 @@ static int io_sq_thread(void *data)
|
|||
|
||||
timeout = inflight = 0;
|
||||
while (!kthread_should_park()) {
|
||||
bool mm_fault = false;
|
||||
unsigned int to_submit;
|
||||
|
||||
if (inflight) {
|
||||
|
@ -2890,18 +2893,8 @@ static int io_sq_thread(void *data)
|
|||
ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
|
||||
}
|
||||
|
||||
/* Unless all new commands are FIXED regions, grab mm */
|
||||
if (!cur_mm) {
|
||||
mm_fault = !mmget_not_zero(ctx->sqo_mm);
|
||||
if (!mm_fault) {
|
||||
use_mm(ctx->sqo_mm);
|
||||
cur_mm = ctx->sqo_mm;
|
||||
}
|
||||
}
|
||||
|
||||
to_submit = min(to_submit, ctx->sq_entries);
|
||||
inflight += io_submit_sqes(ctx, to_submit, cur_mm != NULL,
|
||||
mm_fault);
|
||||
inflight += io_submit_sqes(ctx, to_submit, &cur_mm);
|
||||
|
||||
/* Commit SQ ring head once we've consumed all SQEs */
|
||||
io_commit_sqring(ctx);
|
||||
|
|
Загрузка…
Ссылка в новой задаче