[SCSI] block: separate failfast into multiple bits.
Multipath is best at handling transport errors. If it gets a device error then there is not much the multipath layer can do. It will just access the same device but from a different path. This patch breaks up failfast into device, transport and driver errors. The multipath layers (md and dm mutlipath) only ask the lower levels to fast fail transport errors. The user of failfast, read ahead, will ask to fast fail on all errors. Note that blk_noretry_request will return true if any failfast bit is set. This allows drivers that do not support the multipath failfast bits to continue to fail on any failfast error like before. Drivers like scsi that are able to fail fast specific errors can check for the specific fail fast type. In the next patch I will convert scsi. Signed-off-by: Mike Christie <michaelc@cs.wisc.edu> Cc: Jens Axboe <jens.axboe@oracle.com> Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
This commit is contained in:
Родитель
056a448349
Коммит
6000a368cd
|
@ -1075,8 +1075,15 @@ void init_request_from_bio(struct request *req, struct bio *bio)
|
|||
/*
|
||||
* inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST)
|
||||
*/
|
||||
if (bio_rw_ahead(bio) || bio_failfast(bio))
|
||||
req->cmd_flags |= REQ_FAILFAST;
|
||||
if (bio_rw_ahead(bio))
|
||||
req->cmd_flags |= (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
|
||||
REQ_FAILFAST_DRIVER);
|
||||
if (bio_failfast_dev(bio))
|
||||
req->cmd_flags |= REQ_FAILFAST_DEV;
|
||||
if (bio_failfast_transport(bio))
|
||||
req->cmd_flags |= REQ_FAILFAST_TRANSPORT;
|
||||
if (bio_failfast_driver(bio))
|
||||
req->cmd_flags |= REQ_FAILFAST_DRIVER;
|
||||
|
||||
/*
|
||||
* REQ_BARRIER implies no merging, but lets make it explicit
|
||||
|
|
|
@ -849,7 +849,7 @@ static int multipath_map(struct dm_target *ti, struct bio *bio,
|
|||
dm_bio_record(&mpio->details, bio);
|
||||
|
||||
map_context->ptr = mpio;
|
||||
bio->bi_rw |= (1 << BIO_RW_FAILFAST);
|
||||
bio->bi_rw |= (1 << BIO_RW_FAILFAST_TRANSPORT);
|
||||
r = map_io(m, bio, mpio, 0);
|
||||
if (r < 0 || r == DM_MAPIO_REQUEUE)
|
||||
mempool_free(mpio, m->mpio_pool);
|
||||
|
|
|
@ -176,7 +176,7 @@ static int multipath_make_request (struct request_queue *q, struct bio * bio)
|
|||
mp_bh->bio = *bio;
|
||||
mp_bh->bio.bi_sector += multipath->rdev->data_offset;
|
||||
mp_bh->bio.bi_bdev = multipath->rdev->bdev;
|
||||
mp_bh->bio.bi_rw |= (1 << BIO_RW_FAILFAST);
|
||||
mp_bh->bio.bi_rw |= (1 << BIO_RW_FAILFAST_TRANSPORT);
|
||||
mp_bh->bio.bi_end_io = multipath_end_request;
|
||||
mp_bh->bio.bi_private = mp_bh;
|
||||
generic_make_request(&mp_bh->bio);
|
||||
|
@ -402,7 +402,7 @@ static void multipathd (mddev_t *mddev)
|
|||
*bio = *(mp_bh->master_bio);
|
||||
bio->bi_sector += conf->multipaths[mp_bh->path].rdev->data_offset;
|
||||
bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev;
|
||||
bio->bi_rw |= (1 << BIO_RW_FAILFAST);
|
||||
bio->bi_rw |= (1 << BIO_RW_FAILFAST_TRANSPORT);
|
||||
bio->bi_end_io = multipath_end_request;
|
||||
bio->bi_private = mp_bh;
|
||||
generic_make_request(bio);
|
||||
|
|
|
@ -544,7 +544,7 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
|
|||
}
|
||||
cqr->retries = DIAG_MAX_RETRIES;
|
||||
cqr->buildclk = get_clock();
|
||||
if (req->cmd_flags & REQ_FAILFAST)
|
||||
if (blk_noretry_request(req))
|
||||
set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
|
||||
cqr->startdev = memdev;
|
||||
cqr->memdev = memdev;
|
||||
|
|
|
@ -1700,7 +1700,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
|
|||
recid++;
|
||||
}
|
||||
}
|
||||
if (req->cmd_flags & REQ_FAILFAST)
|
||||
if (blk_noretry_request(req))
|
||||
set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
|
||||
cqr->startdev = startdev;
|
||||
cqr->memdev = startdev;
|
||||
|
|
|
@ -355,7 +355,7 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
|
|||
recid++;
|
||||
}
|
||||
}
|
||||
if (req->cmd_flags & REQ_FAILFAST)
|
||||
if (blk_noretry_request(req))
|
||||
set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
|
||||
cqr->startdev = memdev;
|
||||
cqr->memdev = memdev;
|
||||
|
|
|
@ -109,7 +109,8 @@ static struct request *get_alua_req(struct scsi_device *sdev,
|
|||
}
|
||||
|
||||
rq->cmd_type = REQ_TYPE_BLOCK_PC;
|
||||
rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE;
|
||||
rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
|
||||
REQ_FAILFAST_DRIVER | REQ_NOMERGE;
|
||||
rq->retries = ALUA_FAILOVER_RETRIES;
|
||||
rq->timeout = ALUA_FAILOVER_TIMEOUT;
|
||||
|
||||
|
|
|
@ -303,7 +303,8 @@ static struct request *get_req(struct scsi_device *sdev, int cmd,
|
|||
|
||||
rq->cmd[4] = len;
|
||||
rq->cmd_type = REQ_TYPE_BLOCK_PC;
|
||||
rq->cmd_flags |= REQ_FAILFAST;
|
||||
rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
|
||||
REQ_FAILFAST_DRIVER;
|
||||
rq->timeout = CLARIION_TIMEOUT;
|
||||
rq->retries = CLARIION_RETRIES;
|
||||
|
||||
|
|
|
@ -112,7 +112,8 @@ static int hp_sw_tur(struct scsi_device *sdev, struct hp_sw_dh_data *h)
|
|||
return SCSI_DH_RES_TEMP_UNAVAIL;
|
||||
|
||||
req->cmd_type = REQ_TYPE_BLOCK_PC;
|
||||
req->cmd_flags |= REQ_FAILFAST;
|
||||
req->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
|
||||
REQ_FAILFAST_DRIVER;
|
||||
req->cmd_len = COMMAND_SIZE(TEST_UNIT_READY);
|
||||
req->cmd[0] = TEST_UNIT_READY;
|
||||
req->timeout = HP_SW_TIMEOUT;
|
||||
|
@ -204,7 +205,8 @@ static int hp_sw_start_stop(struct scsi_device *sdev, struct hp_sw_dh_data *h)
|
|||
return SCSI_DH_RES_TEMP_UNAVAIL;
|
||||
|
||||
req->cmd_type = REQ_TYPE_BLOCK_PC;
|
||||
req->cmd_flags |= REQ_FAILFAST;
|
||||
req->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
|
||||
REQ_FAILFAST_DRIVER;
|
||||
req->cmd_len = COMMAND_SIZE(START_STOP);
|
||||
req->cmd[0] = START_STOP;
|
||||
req->cmd[4] = 1; /* Start spin cycle */
|
||||
|
|
|
@ -226,7 +226,8 @@ static struct request *get_rdac_req(struct scsi_device *sdev,
|
|||
}
|
||||
|
||||
rq->cmd_type = REQ_TYPE_BLOCK_PC;
|
||||
rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE;
|
||||
rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
|
||||
REQ_FAILFAST_DRIVER;
|
||||
rq->retries = RDAC_RETRIES;
|
||||
rq->timeout = RDAC_TIMEOUT;
|
||||
|
||||
|
|
|
@ -109,7 +109,9 @@ static int spi_execute(struct scsi_device *sdev, const void *cmd,
|
|||
for(i = 0; i < DV_RETRIES; i++) {
|
||||
result = scsi_execute(sdev, cmd, dir, buffer, bufflen,
|
||||
sense, DV_TIMEOUT, /* retries */ 1,
|
||||
REQ_FAILFAST);
|
||||
REQ_FAILFAST_DEV |
|
||||
REQ_FAILFAST_TRANSPORT |
|
||||
REQ_FAILFAST_DRIVER);
|
||||
if (result & DRIVER_SENSE) {
|
||||
struct scsi_sense_hdr sshdr_tmp;
|
||||
if (!sshdr)
|
||||
|
|
|
@ -129,25 +129,30 @@ struct bio {
|
|||
* bit 2 -- barrier
|
||||
* Insert a serialization point in the IO queue, forcing previously
|
||||
* submitted IO to be completed before this oen is issued.
|
||||
* bit 3 -- fail fast, don't want low level driver retries
|
||||
* bit 4 -- synchronous I/O hint: the block layer will unplug immediately
|
||||
* bit 3 -- synchronous I/O hint: the block layer will unplug immediately
|
||||
* Note that this does NOT indicate that the IO itself is sync, just
|
||||
* that the block layer will not postpone issue of this IO by plugging.
|
||||
* bit 5 -- metadata request
|
||||
* bit 4 -- metadata request
|
||||
* Used for tracing to differentiate metadata and data IO. May also
|
||||
* get some preferential treatment in the IO scheduler
|
||||
* bit 6 -- discard sectors
|
||||
* bit 5 -- discard sectors
|
||||
* Informs the lower level device that this range of sectors is no longer
|
||||
* used by the file system and may thus be freed by the device. Used
|
||||
* for flash based storage.
|
||||
* bit 6 -- fail fast device errors
|
||||
* bit 7 -- fail fast transport errors
|
||||
* bit 8 -- fail fast driver errors
|
||||
* Don't want driver retries for any fast fail whatever the reason.
|
||||
*/
|
||||
#define BIO_RW 0 /* Must match RW in req flags (blkdev.h) */
|
||||
#define BIO_RW_AHEAD 1 /* Must match FAILFAST in req flags */
|
||||
#define BIO_RW_BARRIER 2
|
||||
#define BIO_RW_FAILFAST 3
|
||||
#define BIO_RW_SYNC 4
|
||||
#define BIO_RW_META 5
|
||||
#define BIO_RW_DISCARD 6
|
||||
#define BIO_RW_SYNC 3
|
||||
#define BIO_RW_META 4
|
||||
#define BIO_RW_DISCARD 5
|
||||
#define BIO_RW_FAILFAST_DEV 6
|
||||
#define BIO_RW_FAILFAST_TRANSPORT 7
|
||||
#define BIO_RW_FAILFAST_DRIVER 8
|
||||
|
||||
/*
|
||||
* upper 16 bits of bi_rw define the io priority of this bio
|
||||
|
@ -174,7 +179,10 @@ struct bio {
|
|||
#define bio_sectors(bio) ((bio)->bi_size >> 9)
|
||||
#define bio_barrier(bio) ((bio)->bi_rw & (1 << BIO_RW_BARRIER))
|
||||
#define bio_sync(bio) ((bio)->bi_rw & (1 << BIO_RW_SYNC))
|
||||
#define bio_failfast(bio) ((bio)->bi_rw & (1 << BIO_RW_FAILFAST))
|
||||
#define bio_failfast_dev(bio) ((bio)->bi_rw & (1 << BIO_RW_FAILFAST_DEV))
|
||||
#define bio_failfast_transport(bio) \
|
||||
((bio)->bi_rw & (1 << BIO_RW_FAILFAST_TRANSPORT))
|
||||
#define bio_failfast_driver(bio) ((bio)->bi_rw & (1 << BIO_RW_FAILFAST_DRIVER))
|
||||
#define bio_rw_ahead(bio) ((bio)->bi_rw & (1 << BIO_RW_AHEAD))
|
||||
#define bio_rw_meta(bio) ((bio)->bi_rw & (1 << BIO_RW_META))
|
||||
#define bio_discard(bio) ((bio)->bi_rw & (1 << BIO_RW_DISCARD))
|
||||
|
|
|
@ -87,7 +87,9 @@ enum {
|
|||
*/
|
||||
enum rq_flag_bits {
|
||||
__REQ_RW, /* not set, read. set, write */
|
||||
__REQ_FAILFAST, /* no low level driver retries */
|
||||
__REQ_FAILFAST_DEV, /* no driver retries of device errors */
|
||||
__REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
|
||||
__REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */
|
||||
__REQ_DISCARD, /* request to discard sectors */
|
||||
__REQ_SORTED, /* elevator knows about this request */
|
||||
__REQ_SOFTBARRIER, /* may not be passed by ioscheduler */
|
||||
|
@ -111,8 +113,10 @@ enum rq_flag_bits {
|
|||
};
|
||||
|
||||
#define REQ_RW (1 << __REQ_RW)
|
||||
#define REQ_FAILFAST_DEV (1 << __REQ_FAILFAST_DEV)
|
||||
#define REQ_FAILFAST_TRANSPORT (1 << __REQ_FAILFAST_TRANSPORT)
|
||||
#define REQ_FAILFAST_DRIVER (1 << __REQ_FAILFAST_DRIVER)
|
||||
#define REQ_DISCARD (1 << __REQ_DISCARD)
|
||||
#define REQ_FAILFAST (1 << __REQ_FAILFAST)
|
||||
#define REQ_SORTED (1 << __REQ_SORTED)
|
||||
#define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER)
|
||||
#define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER)
|
||||
|
@ -560,7 +564,12 @@ enum {
|
|||
#define blk_special_request(rq) ((rq)->cmd_type == REQ_TYPE_SPECIAL)
|
||||
#define blk_sense_request(rq) ((rq)->cmd_type == REQ_TYPE_SENSE)
|
||||
|
||||
#define blk_noretry_request(rq) ((rq)->cmd_flags & REQ_FAILFAST)
|
||||
#define blk_failfast_dev(rq) ((rq)->cmd_flags & REQ_FAILFAST_DEV)
|
||||
#define blk_failfast_transport(rq) ((rq)->cmd_flags & REQ_FAILFAST_TRANSPORT)
|
||||
#define blk_failfast_driver(rq) ((rq)->cmd_flags & REQ_FAILFAST_DRIVER)
|
||||
#define blk_noretry_request(rq) (blk_failfast_dev(rq) || \
|
||||
blk_failfast_transport(rq) || \
|
||||
blk_failfast_driver(rq))
|
||||
#define blk_rq_started(rq) ((rq)->cmd_flags & REQ_STARTED)
|
||||
|
||||
#define blk_account_rq(rq) (blk_rq_started(rq) && (blk_fs_request(rq) || blk_discard_rq(rq)))
|
||||
|
|
Загрузка…
Ссылка в новой задаче