io_uring: correct fill events helpers types
CQE result is a 32-bit integer, so the functions generating CQEs are better to accept not long but ints. Convert io_cqring_fill_event() and other helpers. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/7ca6f15255e9117eae28adcac272744cae29b113.1633373302.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Родитель
eb6e6f0690
Коммит
54daa9b2d8
|
@ -1072,7 +1072,7 @@ static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
|
|||
static void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd);
|
||||
|
||||
static bool io_cqring_fill_event(struct io_ring_ctx *ctx, u64 user_data,
|
||||
long res, unsigned int cflags);
|
||||
s32 res, u32 cflags);
|
||||
static void io_put_req(struct io_kiocb *req);
|
||||
static void io_put_req_deferred(struct io_kiocb *req);
|
||||
static void io_dismantle_req(struct io_kiocb *req);
|
||||
|
@ -1730,7 +1730,7 @@ static inline void io_get_task_refs(int nr)
|
|||
}
|
||||
|
||||
static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
|
||||
long res, unsigned int cflags)
|
||||
s32 res, u32 cflags)
|
||||
{
|
||||
struct io_overflow_cqe *ocqe;
|
||||
|
||||
|
@ -1758,7 +1758,7 @@ static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
|
|||
}
|
||||
|
||||
static inline bool __io_cqring_fill_event(struct io_ring_ctx *ctx, u64 user_data,
|
||||
long res, unsigned int cflags)
|
||||
s32 res, u32 cflags)
|
||||
{
|
||||
struct io_uring_cqe *cqe;
|
||||
|
||||
|
@ -1781,13 +1781,13 @@ static inline bool __io_cqring_fill_event(struct io_ring_ctx *ctx, u64 user_data
|
|||
|
||||
/* not as hot to bloat with inlining */
|
||||
static noinline bool io_cqring_fill_event(struct io_ring_ctx *ctx, u64 user_data,
|
||||
long res, unsigned int cflags)
|
||||
s32 res, u32 cflags)
|
||||
{
|
||||
return __io_cqring_fill_event(ctx, user_data, res, cflags);
|
||||
}
|
||||
|
||||
static void io_req_complete_post(struct io_kiocb *req, long res,
|
||||
unsigned int cflags)
|
||||
static void io_req_complete_post(struct io_kiocb *req, s32 res,
|
||||
u32 cflags)
|
||||
{
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
|
||||
|
@ -1816,8 +1816,8 @@ static void io_req_complete_post(struct io_kiocb *req, long res,
|
|||
io_cqring_ev_posted(ctx);
|
||||
}
|
||||
|
||||
static inline void io_req_complete_state(struct io_kiocb *req, long res,
|
||||
unsigned int cflags)
|
||||
static inline void io_req_complete_state(struct io_kiocb *req, s32 res,
|
||||
u32 cflags)
|
||||
{
|
||||
req->result = res;
|
||||
req->cflags = cflags;
|
||||
|
@ -1825,7 +1825,7 @@ static inline void io_req_complete_state(struct io_kiocb *req, long res,
|
|||
}
|
||||
|
||||
static inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags,
|
||||
long res, unsigned cflags)
|
||||
s32 res, u32 cflags)
|
||||
{
|
||||
if (issue_flags & IO_URING_F_COMPLETE_DEFER)
|
||||
io_req_complete_state(req, res, cflags);
|
||||
|
@ -1833,12 +1833,12 @@ static inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags,
|
|||
io_req_complete_post(req, res, cflags);
|
||||
}
|
||||
|
||||
static inline void io_req_complete(struct io_kiocb *req, long res)
|
||||
static inline void io_req_complete(struct io_kiocb *req, s32 res)
|
||||
{
|
||||
__io_req_complete(req, 0, res, 0);
|
||||
}
|
||||
|
||||
static void io_req_complete_failed(struct io_kiocb *req, long res)
|
||||
static void io_req_complete_failed(struct io_kiocb *req, s32 res)
|
||||
{
|
||||
req_set_fail(req);
|
||||
io_req_complete_post(req, res, 0);
|
||||
|
@ -2618,7 +2618,7 @@ static bool __io_complete_rw_common(struct io_kiocb *req, long res)
|
|||
static void io_req_task_complete(struct io_kiocb *req, bool *locked)
|
||||
{
|
||||
unsigned int cflags = io_put_rw_kbuf(req);
|
||||
long res = req->result;
|
||||
int res = req->result;
|
||||
|
||||
if (*locked) {
|
||||
io_req_complete_state(req, res, cflags);
|
||||
|
|
Загрузка…
Ссылка в новой задаче