-----BEGIN PGP SIGNATURE-----
 
 iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmIWxTkQHGF4Ym9lQGtl
 cm5lbC5kawAKCRD301j7KXHgpn5TD/9hKbzdSOoacsq3bYeQvffAwVIulAlW9RSW
 PG00Sck5qEhi73GKogX1RPhj4TNHf2mHD0ehLUC3fzYEag1kJ7wHawu4wYi/uoIe
 U6vYDdOxcWFDuUUl3Qa6x1fbPdOvN5lR5c2MXpp5yQwW+0D2FrhjUpsY7Oi452R9
 /jh/+Z82lGzTvIYN+j8m5viNwEOxuSe+jpbtEqhcDxxwcVNm/IVpfYZL8kUtA7zt
 FdiQmnMLp2//zLr14EUfK+OOjorS66zEBlHJWaKB3U6GJy5F3kLCxhFdyV8G3Tg6
 7PAtPRCbrljVihYwMihYnlBDaR9Q0XnCZ8PHbvwnrM7m1pHoaYHHS1JOkXoOsEbG
 ALY9gC23QQ9m5k4LFwIcdhlERKMjmSKgkHxSMN3nbruZVEtF3CUNerBk9acQzBfq
 fDqwp92cbZe/wO6lwUNuAxRr8BWknyFs6M2NuzYf4DLboqYYXL6GMnJ+dg67jrcE
 mEZNv6K10cCYzS9QfuU89dW5kGEhMdGjcI5zhaashbfY0PLKC4/kl8dpV8ji0NTE
 ft2wo/Ze9YQ4BBzNjmpmgsYFSimooIYYn8Wusq0zUbMzEMuP8x02kS9PXlDPkr59
 UtlRHJM7USnfX6Pza6jn25DO1Rped4Ye8IvSOT5t+rguQYltMYCXybJlU+N6giIX
 Fj5g7b6zgQ==
 =6TsN
 -----END PGP SIGNATURE-----

Merge tag 'io_uring-5.17-2022-02-23' of git://git.kernel.dk/linux-block

Pull io_uring fixes from Jens Axboe:

 - Add a conditional schedule point in io_add_buffers() (Eric)

 - Fix for a quiesce speedup merged in this release (Dylan)

 - Don't convert to jiffies for event timeout waiting, it's way too
   coarse when we accept a timespec as input (me)

* tag 'io_uring-5.17-2022-02-23' of git://git.kernel.dk/linux-block:
  io_uring: disallow modification of rsrc_data during quiesce
  io_uring: don't convert to jiffies for waiting on timeouts
  io_uring: add a schedule point in io_add_buffers()
This commit is contained in:
Linus Torvalds 2022-02-24 11:08:15 -08:00
Родитель 6c528f34ca 80912cef18
Коммит 3a5f59b17f
1 изменённых файлов: 17 добавлений и 7 удалений

Просмотреть файл

@ -4567,6 +4567,7 @@ static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head)
} else {
list_add_tail(&buf->list, &(*head)->list);
}
cond_resched();
}
return i ? i : -ENOMEM;
@ -7693,7 +7694,7 @@ static int io_run_task_work_sig(void)
/* when returns >0, the caller should retry */
static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
struct io_wait_queue *iowq,
signed long *timeout)
ktime_t timeout)
{
int ret;
@ -7705,8 +7706,9 @@ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
if (test_bit(0, &ctx->check_cq_overflow))
return 1;
*timeout = schedule_timeout(*timeout);
return !*timeout ? -ETIME : 1;
if (!schedule_hrtimeout(&timeout, HRTIMER_MODE_ABS))
return -ETIME;
return 1;
}
/*
@ -7719,7 +7721,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
{
struct io_wait_queue iowq;
struct io_rings *rings = ctx->rings;
signed long timeout = MAX_SCHEDULE_TIMEOUT;
ktime_t timeout = KTIME_MAX;
int ret;
do {
@ -7735,7 +7737,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
if (get_timespec64(&ts, uts))
return -EFAULT;
timeout = timespec64_to_jiffies(&ts);
timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
}
if (sig) {
@ -7767,7 +7769,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
}
prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq,
TASK_INTERRUPTIBLE);
ret = io_cqring_wait_schedule(ctx, &iowq, &timeout);
ret = io_cqring_wait_schedule(ctx, &iowq, timeout);
finish_wait(&ctx->cq_wait, &iowq.wq);
cond_resched();
} while (ret > 0);
@ -7924,7 +7926,15 @@ static __cold int io_rsrc_ref_quiesce(struct io_rsrc_data *data,
ret = wait_for_completion_interruptible(&data->done);
if (!ret) {
mutex_lock(&ctx->uring_lock);
break;
if (atomic_read(&data->refs) > 0) {
/*
* it has been revived by another thread while
* we were unlocked
*/
mutex_unlock(&ctx->uring_lock);
} else {
break;
}
}
atomic_inc(&data->refs);