io_uring/io-wq: limit retrying worker initialisation
commit 0453aad676ff99787124b9b3af4a5f59fbe808e2 upstream.
If io-wq worker creation fails, we retry it by queueing up a task_work.
tasK_work is needed because it should be done from the user process
context. The problem is that retries are not limited, and if queueing a
task_work is the reason for the failure, we might get into an infinite
loop.
It doesn't seem to happen now but it would with the following patch
executing task_work in the freezer's loop. For now, arbitrarily limit the
number of attempts to create a worker.
Cc: stable@vger.kernel.org
Fixes: 3146cba99a
("io-wq: make worker creation resilient against signals")
Reported-by: Julian Orth <ju.orth@gmail.com>
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/8280436925db88448c7c85c6656edee1a43029ea.1720634146.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Родитель
e34191cce3
Коммит
5c402f323f
|
@ -19,6 +19,7 @@
|
||||||
#include "io-wq.h"
|
#include "io-wq.h"
|
||||||
|
|
||||||
#define WORKER_IDLE_TIMEOUT (5 * HZ)
|
#define WORKER_IDLE_TIMEOUT (5 * HZ)
|
||||||
|
#define WORKER_INIT_LIMIT 3
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
IO_WORKER_F_UP = 1, /* up and active */
|
IO_WORKER_F_UP = 1, /* up and active */
|
||||||
|
@ -54,6 +55,7 @@ struct io_worker {
|
||||||
unsigned long create_state;
|
unsigned long create_state;
|
||||||
struct callback_head create_work;
|
struct callback_head create_work;
|
||||||
int create_index;
|
int create_index;
|
||||||
|
int init_retries;
|
||||||
|
|
||||||
union {
|
union {
|
||||||
struct rcu_head rcu;
|
struct rcu_head rcu;
|
||||||
|
@ -732,7 +734,7 @@ static bool io_wq_work_match_all(struct io_wq_work *work, void *data)
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool io_should_retry_thread(long err)
|
static inline bool io_should_retry_thread(struct io_worker *worker, long err)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* Prevent perpetual task_work retry, if the task (or its group) is
|
* Prevent perpetual task_work retry, if the task (or its group) is
|
||||||
|
@ -740,6 +742,8 @@ static inline bool io_should_retry_thread(long err)
|
||||||
*/
|
*/
|
||||||
if (fatal_signal_pending(current))
|
if (fatal_signal_pending(current))
|
||||||
return false;
|
return false;
|
||||||
|
if (worker->init_retries++ >= WORKER_INIT_LIMIT)
|
||||||
|
return false;
|
||||||
|
|
||||||
switch (err) {
|
switch (err) {
|
||||||
case -EAGAIN:
|
case -EAGAIN:
|
||||||
|
@ -766,7 +770,7 @@ static void create_worker_cont(struct callback_head *cb)
|
||||||
io_init_new_worker(wqe, worker, tsk);
|
io_init_new_worker(wqe, worker, tsk);
|
||||||
io_worker_release(worker);
|
io_worker_release(worker);
|
||||||
return;
|
return;
|
||||||
} else if (!io_should_retry_thread(PTR_ERR(tsk))) {
|
} else if (!io_should_retry_thread(worker, PTR_ERR(tsk))) {
|
||||||
struct io_wqe_acct *acct = io_wqe_get_acct(worker);
|
struct io_wqe_acct *acct = io_wqe_get_acct(worker);
|
||||||
|
|
||||||
atomic_dec(&acct->nr_running);
|
atomic_dec(&acct->nr_running);
|
||||||
|
@ -831,7 +835,7 @@ fail:
|
||||||
tsk = create_io_thread(io_wqe_worker, worker, wqe->node);
|
tsk = create_io_thread(io_wqe_worker, worker, wqe->node);
|
||||||
if (!IS_ERR(tsk)) {
|
if (!IS_ERR(tsk)) {
|
||||||
io_init_new_worker(wqe, worker, tsk);
|
io_init_new_worker(wqe, worker, tsk);
|
||||||
} else if (!io_should_retry_thread(PTR_ERR(tsk))) {
|
} else if (!io_should_retry_thread(worker, PTR_ERR(tsk))) {
|
||||||
kfree(worker);
|
kfree(worker);
|
||||||
goto fail;
|
goto fail;
|
||||||
} else {
|
} else {
|
||||||
|
|
Загрузка…
Ссылка в новой задаче