dm rq: change ->rq_end_io calling conventions

Instead of returning either a DM_ENDIO_* constant or an error code, add
a new DM_ENDIO_DONE value that means keep errno as is.  This allows us
to easily keep the existing error code in case where we can't push back,
and it also preparares for the new block level status codes with strict
type checking.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
This commit is contained in:
Christoph Hellwig 2017-04-26 09:40:37 +02:00 коммит произвёл Mike Snitzer
Родитель b79f10eefd
Коммит 7ed8578a96
3 изменённых файлов: 20 добавлений и 11 удалений

Просмотреть файл

@ -1469,6 +1469,7 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone,
{
struct dm_mpath_io *mpio = get_mpio(map_context);
struct pgpath *pgpath = mpio->pgpath;
int r = DM_ENDIO_DONE;
/*
* We don't queue any clone request inside the multipath target
@ -1484,14 +1485,18 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone,
if (error && !noretry_error(error)) {
struct multipath *m = ti->private;
error = DM_ENDIO_REQUEUE;
r = DM_ENDIO_REQUEUE;
if (pgpath)
fail_path(pgpath);
if (atomic_read(&m->nr_valid_paths) == 0 &&
!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
if (error == -EIO)
error = dm_report_EIO(m);
/* complete with the original error */
r = DM_ENDIO_DONE;
}
}
if (pgpath) {
@ -1501,7 +1506,7 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone,
ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
}
return error;
return r;
}
static int do_end_io_bio(struct multipath *m, struct bio *clone,

Просмотреть файл

@ -287,7 +287,7 @@ static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_
static void dm_done(struct request *clone, int error, bool mapped)
{
int r = error;
int r = DM_ENDIO_DONE;
struct dm_rq_target_io *tio = clone->end_io_data;
dm_request_endio_fn rq_end_io = NULL;
@ -298,7 +298,7 @@ static void dm_done(struct request *clone, int error, bool mapped)
r = rq_end_io(tio->ti, clone, error, &tio->info);
}
if (unlikely(r == -EREMOTEIO)) {
if (unlikely(error == -EREMOTEIO)) {
if (req_op(clone) == REQ_OP_WRITE_SAME &&
!clone->q->limits.max_write_same_sectors)
disable_write_same(tio->md);
@ -307,16 +307,19 @@ static void dm_done(struct request *clone, int error, bool mapped)
disable_write_zeroes(tio->md);
}
if (r <= 0)
switch (r) {
case DM_ENDIO_DONE:
/* The target wants to complete the I/O */
dm_end_request(clone, r);
else if (r == DM_ENDIO_INCOMPLETE)
dm_end_request(clone, error);
break;
case DM_ENDIO_INCOMPLETE:
/* The target will handle the I/O */
return;
else if (r == DM_ENDIO_REQUEUE)
case DM_ENDIO_REQUEUE:
/* The target wants to requeue the I/O */
dm_requeue_original_request(tio, false);
else {
break;
default:
DMWARN("unimplemented target endio return value: %d", r);
BUG();
}

Просмотреть файл

@ -593,6 +593,7 @@ extern struct ratelimit_state dm_ratelimit_state;
/*
* Definitions of return values from target end_io function.
*/
#define DM_ENDIO_DONE 0
#define DM_ENDIO_INCOMPLETE 1
#define DM_ENDIO_REQUEUE 2