block: move blk_rq_err_bytes to scsi
blk_rq_err_bytes is only used by the scsi midlayer, so move it there. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com> Link: https://lore.kernel.org/r/20211117061404.331732-2-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Родитель
d58071a8a7
Коммит
79478bf9ea
|
@ -1176,47 +1176,6 @@ blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
|
||||
|
||||
/**
|
||||
* blk_rq_err_bytes - determine number of bytes till the next failure boundary
|
||||
* @rq: request to examine
|
||||
*
|
||||
* Description:
|
||||
* A request could be merge of IOs which require different failure
|
||||
* handling. This function determines the number of bytes which
|
||||
* can be failed from the beginning of the request without
|
||||
* crossing into area which need to be retried further.
|
||||
*
|
||||
* Return:
|
||||
* The number of bytes to fail.
|
||||
*/
|
||||
unsigned int blk_rq_err_bytes(const struct request *rq)
|
||||
{
|
||||
unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
|
||||
unsigned int bytes = 0;
|
||||
struct bio *bio;
|
||||
|
||||
if (!(rq->rq_flags & RQF_MIXED_MERGE))
|
||||
return blk_rq_bytes(rq);
|
||||
|
||||
/*
|
||||
* Currently the only 'mixing' which can happen is between
|
||||
* different fastfail types. We can safely fail portions
|
||||
* which have all the failfast bits that the first one has -
|
||||
* the ones which are at least as eager to fail as the first
|
||||
* one.
|
||||
*/
|
||||
for (bio = rq->bio; bio; bio = bio->bi_next) {
|
||||
if ((bio->bi_opf & ff) != ff)
|
||||
break;
|
||||
bytes += bio->bi_iter.bi_size;
|
||||
}
|
||||
|
||||
/* this could lead to infinite loop */
|
||||
BUG_ON(blk_rq_bytes(rq) && !bytes);
|
||||
return bytes;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_rq_err_bytes);
|
||||
|
||||
static void update_io_ticks(struct block_device *part, unsigned long now,
|
||||
bool end)
|
||||
{
|
||||
|
|
|
@ -617,6 +617,46 @@ static blk_status_t scsi_result_to_blk_status(struct scsi_cmnd *cmd, int result)
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* scsi_rq_err_bytes - determine number of bytes till the next failure boundary
|
||||
* @rq: request to examine
|
||||
*
|
||||
* Description:
|
||||
* A request could be merge of IOs which require different failure
|
||||
* handling. This function determines the number of bytes which
|
||||
* can be failed from the beginning of the request without
|
||||
* crossing into area which need to be retried further.
|
||||
*
|
||||
* Return:
|
||||
* The number of bytes to fail.
|
||||
*/
|
||||
static unsigned int scsi_rq_err_bytes(const struct request *rq)
|
||||
{
|
||||
unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
|
||||
unsigned int bytes = 0;
|
||||
struct bio *bio;
|
||||
|
||||
if (!(rq->rq_flags & RQF_MIXED_MERGE))
|
||||
return blk_rq_bytes(rq);
|
||||
|
||||
/*
|
||||
* Currently the only 'mixing' which can happen is between
|
||||
* different fastfail types. We can safely fail portions
|
||||
* which have all the failfast bits that the first one has -
|
||||
* the ones which are at least as eager to fail as the first
|
||||
* one.
|
||||
*/
|
||||
for (bio = rq->bio; bio; bio = bio->bi_next) {
|
||||
if ((bio->bi_opf & ff) != ff)
|
||||
break;
|
||||
bytes += bio->bi_iter.bi_size;
|
||||
}
|
||||
|
||||
/* this could lead to infinite loop */
|
||||
BUG_ON(blk_rq_bytes(rq) && !bytes);
|
||||
return bytes;
|
||||
}
|
||||
|
||||
/* Helper for scsi_io_completion() when "reprep" action required. */
|
||||
static void scsi_io_completion_reprep(struct scsi_cmnd *cmd,
|
||||
struct request_queue *q)
|
||||
|
@ -794,7 +834,7 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
|
|||
scsi_print_command(cmd);
|
||||
}
|
||||
}
|
||||
if (!scsi_end_request(req, blk_stat, blk_rq_err_bytes(req)))
|
||||
if (!scsi_end_request(req, blk_stat, scsi_rq_err_bytes(req)))
|
||||
return;
|
||||
fallthrough;
|
||||
case ACTION_REPREP:
|
||||
|
|
|
@ -947,7 +947,6 @@ struct req_iterator {
|
|||
* blk_rq_pos() : the current sector
|
||||
* blk_rq_bytes() : bytes left in the entire request
|
||||
* blk_rq_cur_bytes() : bytes left in the current segment
|
||||
* blk_rq_err_bytes() : bytes left till the next error boundary
|
||||
* blk_rq_sectors() : sectors left in the entire request
|
||||
* blk_rq_cur_sectors() : sectors left in the current segment
|
||||
* blk_rq_stats_sectors() : sectors of the entire request used for stats
|
||||
|
@ -971,8 +970,6 @@ static inline int blk_rq_cur_bytes(const struct request *rq)
|
|||
return bio_iovec(rq->bio).bv_len;
|
||||
}
|
||||
|
||||
unsigned int blk_rq_err_bytes(const struct request *rq);
|
||||
|
||||
static inline unsigned int blk_rq_sectors(const struct request *rq)
|
||||
{
|
||||
return blk_rq_bytes(rq) >> SECTOR_SHIFT;
|
||||
|
|
Загрузка…
Ссылка в новой задаче