2020-09-13 22:09:39 +03:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
|
|
|
#ifndef _LINUX_IO_URING_H
|
|
|
|
#define _LINUX_IO_URING_H
|
|
|
|
|
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/xarray.h>
|
2020-10-14 19:48:51 +03:00
|
|
|
|
2020-09-13 22:09:39 +03:00
|
|
|
#if defined(CONFIG_IO_URING)
|
2020-09-19 05:41:00 +03:00
|
|
|
struct sock *io_uring_get_socket(struct file *file);
|
2021-08-12 07:14:35 +03:00
|
|
|
void __io_uring_cancel(bool cancel_all);
|
2020-09-13 22:09:39 +03:00
|
|
|
void __io_uring_free(struct task_struct *tsk);
|
io_uring: add support for registering ring file descriptors
Lots of workloads use multiple threads, in which case the file table is
shared between them. This makes getting and putting the ring file
descriptor for each io_uring_enter(2) system call more expensive, as it
involves an atomic get and put for each call.
Similarly to how we allow registering normal file descriptors to avoid
this overhead, add support for an io_uring_register(2) API that allows
to register the ring fds themselves:
1) IORING_REGISTER_RING_FDS - takes an array of io_uring_rsrc_update
structs, and registers them with the task.
2) IORING_UNREGISTER_RING_FDS - takes an array of io_uring_src_update
structs, and unregisters them.
When a ring fd is registered, it is internally represented by an offset.
This offset is returned to the application, and the application then
uses this offset and sets IORING_ENTER_REGISTERED_RING for the
io_uring_enter(2) system call. This works just like using a registered
file descriptor, rather than a real one, in an SQE, where
IOSQE_FIXED_FILE gets set to tell io_uring that we're using an internal
offset/descriptor rather than a real file descriptor.
In initial testing, this provides a nice bump in performance for
threaded applications in real world cases where the batch count (eg
number of requests submitted per io_uring_enter(2) invocation) is low.
In a microbenchmark, submitting NOP requests, we see the following
increases in performance:
Requests per syscall Baseline Registered Increase
----------------------------------------------------------------
1 ~7030K ~8080K +15%
2 ~13120K ~14800K +13%
4 ~22740K ~25300K +11%
Co-developed-by: Xiaoguang Wang <xiaoguang.wang@linux.alibaba.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2022-03-04 18:22:22 +03:00
|
|
|
void io_uring_unreg_ringfd(void);
|
2020-09-13 22:09:39 +03:00
|
|
|
|
2021-08-12 07:14:35 +03:00
|
|
|
static inline void io_uring_files_cancel(void)
|
2020-09-13 22:09:39 +03:00
|
|
|
{
|
io_uring: add support for registering ring file descriptors
Lots of workloads use multiple threads, in which case the file table is
shared between them. This makes getting and putting the ring file
descriptor for each io_uring_enter(2) system call more expensive, as it
involves an atomic get and put for each call.
Similarly to how we allow registering normal file descriptors to avoid
this overhead, add support for an io_uring_register(2) API that allows
to register the ring fds themselves:
1) IORING_REGISTER_RING_FDS - takes an array of io_uring_rsrc_update
structs, and registers them with the task.
2) IORING_UNREGISTER_RING_FDS - takes an array of io_uring_src_update
structs, and unregisters them.
When a ring fd is registered, it is internally represented by an offset.
This offset is returned to the application, and the application then
uses this offset and sets IORING_ENTER_REGISTERED_RING for the
io_uring_enter(2) system call. This works just like using a registered
file descriptor, rather than a real one, in an SQE, where
IOSQE_FIXED_FILE gets set to tell io_uring that we're using an internal
offset/descriptor rather than a real file descriptor.
In initial testing, this provides a nice bump in performance for
threaded applications in real world cases where the batch count (eg
number of requests submitted per io_uring_enter(2) invocation) is low.
In a microbenchmark, submitting NOP requests, we see the following
increases in performance:
Requests per syscall Baseline Registered Increase
----------------------------------------------------------------
1 ~7030K ~8080K +15%
2 ~13120K ~14800K +13%
4 ~22740K ~25300K +11%
Co-developed-by: Xiaoguang Wang <xiaoguang.wang@linux.alibaba.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2022-03-04 18:22:22 +03:00
|
|
|
if (current->io_uring) {
|
|
|
|
io_uring_unreg_ringfd();
|
2021-08-12 07:14:35 +03:00
|
|
|
__io_uring_cancel(false);
|
io_uring: add support for registering ring file descriptors
Lots of workloads use multiple threads, in which case the file table is
shared between them. This makes getting and putting the ring file
descriptor for each io_uring_enter(2) system call more expensive, as it
involves an atomic get and put for each call.
Similarly to how we allow registering normal file descriptors to avoid
this overhead, add support for an io_uring_register(2) API that allows
to register the ring fds themselves:
1) IORING_REGISTER_RING_FDS - takes an array of io_uring_rsrc_update
structs, and registers them with the task.
2) IORING_UNREGISTER_RING_FDS - takes an array of io_uring_src_update
structs, and unregisters them.
When a ring fd is registered, it is internally represented by an offset.
This offset is returned to the application, and the application then
uses this offset and sets IORING_ENTER_REGISTERED_RING for the
io_uring_enter(2) system call. This works just like using a registered
file descriptor, rather than a real one, in an SQE, where
IOSQE_FIXED_FILE gets set to tell io_uring that we're using an internal
offset/descriptor rather than a real file descriptor.
In initial testing, this provides a nice bump in performance for
threaded applications in real world cases where the batch count (eg
number of requests submitted per io_uring_enter(2) invocation) is low.
In a microbenchmark, submitting NOP requests, we see the following
increases in performance:
Requests per syscall Baseline Registered Increase
----------------------------------------------------------------
1 ~7030K ~8080K +15%
2 ~13120K ~14800K +13%
4 ~22740K ~25300K +11%
Co-developed-by: Xiaoguang Wang <xiaoguang.wang@linux.alibaba.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2022-03-04 18:22:22 +03:00
|
|
|
}
|
2020-09-13 22:09:39 +03:00
|
|
|
}
|
2021-04-11 03:46:27 +03:00
|
|
|
static inline void io_uring_task_cancel(void)
|
2020-09-13 22:09:39 +03:00
|
|
|
{
|
2021-08-12 07:14:34 +03:00
|
|
|
if (current->io_uring)
|
2021-08-12 07:14:35 +03:00
|
|
|
__io_uring_cancel(true);
|
2020-09-13 22:09:39 +03:00
|
|
|
}
|
|
|
|
static inline void io_uring_free(struct task_struct *tsk)
|
|
|
|
{
|
|
|
|
if (tsk->io_uring)
|
|
|
|
__io_uring_free(tsk);
|
|
|
|
}
|
|
|
|
#else
|
2020-09-19 05:41:00 +03:00
|
|
|
static inline struct sock *io_uring_get_socket(struct file *file)
|
|
|
|
{
|
|
|
|
return NULL;
|
|
|
|
}
|
2020-09-13 22:09:39 +03:00
|
|
|
static inline void io_uring_task_cancel(void)
|
|
|
|
{
|
|
|
|
}
|
2021-08-12 07:14:35 +03:00
|
|
|
static inline void io_uring_files_cancel(void)
|
2020-09-13 22:09:39 +03:00
|
|
|
{
|
|
|
|
}
|
|
|
|
static inline void io_uring_free(struct task_struct *tsk)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#endif
|