block: switch bios to blk_status_t

Replace bi_error with a new bi_status to allow for a clear conversion.
Note that device mapper overloaded bi_error with a private value, which
we'll have to keep arround at least for now and thus propagate to a
proper blk_status_t value.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
Christoph Hellwig 2017-06-03 09:38:06 +02:00 коммит произвёл Jens Axboe
Родитель fc17b6534e
Коммит 4e4cbee93d
106 изменённых файлов: 625 добавлений и 603 удалений

Просмотреть файл

@ -221,7 +221,7 @@ static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
* @bio: bio to generate/verify integrity metadata for * @bio: bio to generate/verify integrity metadata for
* @proc_fn: Pointer to the relevant processing function * @proc_fn: Pointer to the relevant processing function
*/ */
static int bio_integrity_process(struct bio *bio, static blk_status_t bio_integrity_process(struct bio *bio,
integrity_processing_fn *proc_fn) integrity_processing_fn *proc_fn)
{ {
struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
@ -229,7 +229,7 @@ static int bio_integrity_process(struct bio *bio,
struct bvec_iter bviter; struct bvec_iter bviter;
struct bio_vec bv; struct bio_vec bv;
struct bio_integrity_payload *bip = bio_integrity(bio); struct bio_integrity_payload *bip = bio_integrity(bio);
unsigned int ret = 0; blk_status_t ret = BLK_STS_OK;
void *prot_buf = page_address(bip->bip_vec->bv_page) + void *prot_buf = page_address(bip->bip_vec->bv_page) +
bip->bip_vec->bv_offset; bip->bip_vec->bv_offset;
@ -366,7 +366,7 @@ static void bio_integrity_verify_fn(struct work_struct *work)
struct bio *bio = bip->bip_bio; struct bio *bio = bip->bip_bio;
struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
bio->bi_error = bio_integrity_process(bio, bi->profile->verify_fn); bio->bi_status = bio_integrity_process(bio, bi->profile->verify_fn);
/* Restore original bio completion handler */ /* Restore original bio completion handler */
bio->bi_end_io = bip->bip_end_io; bio->bi_end_io = bip->bip_end_io;
@ -395,7 +395,7 @@ void bio_integrity_endio(struct bio *bio)
* integrity metadata. Restore original bio end_io handler * integrity metadata. Restore original bio end_io handler
* and run it. * and run it.
*/ */
if (bio->bi_error) { if (bio->bi_status) {
bio->bi_end_io = bip->bip_end_io; bio->bi_end_io = bip->bip_end_io;
bio_endio(bio); bio_endio(bio);

Просмотреть файл

@ -309,8 +309,8 @@ static struct bio *__bio_chain_endio(struct bio *bio)
{ {
struct bio *parent = bio->bi_private; struct bio *parent = bio->bi_private;
if (!parent->bi_error) if (!parent->bi_status)
parent->bi_error = bio->bi_error; parent->bi_status = bio->bi_status;
bio_put(bio); bio_put(bio);
return parent; return parent;
} }
@ -918,7 +918,7 @@ static void submit_bio_wait_endio(struct bio *bio)
{ {
struct submit_bio_ret *ret = bio->bi_private; struct submit_bio_ret *ret = bio->bi_private;
ret->error = bio->bi_error; ret->error = blk_status_to_errno(bio->bi_status);
complete(&ret->event); complete(&ret->event);
} }
@ -1818,7 +1818,7 @@ again:
if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) { if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
trace_block_bio_complete(bdev_get_queue(bio->bi_bdev), trace_block_bio_complete(bdev_get_queue(bio->bi_bdev),
bio, bio->bi_error); bio, bio->bi_status);
bio_clear_flag(bio, BIO_TRACE_COMPLETION); bio_clear_flag(bio, BIO_TRACE_COMPLETION);
} }

Просмотреть файл

@ -144,6 +144,9 @@ static const struct {
[BLK_STS_PROTECTION] = { -EILSEQ, "protection" }, [BLK_STS_PROTECTION] = { -EILSEQ, "protection" },
[BLK_STS_RESOURCE] = { -ENOMEM, "kernel resource" }, [BLK_STS_RESOURCE] = { -ENOMEM, "kernel resource" },
/* device mapper special case, should not leak out: */
[BLK_STS_DM_REQUEUE] = { -EREMCHG, "dm internal retry" },
/* everything else not covered above: */ /* everything else not covered above: */
[BLK_STS_IOERR] = { -EIO, "I/O" }, [BLK_STS_IOERR] = { -EIO, "I/O" },
}; };
@ -188,7 +191,7 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
unsigned int nbytes, blk_status_t error) unsigned int nbytes, blk_status_t error)
{ {
if (error) if (error)
bio->bi_error = blk_status_to_errno(error); bio->bi_status = error;
if (unlikely(rq->rq_flags & RQF_QUIET)) if (unlikely(rq->rq_flags & RQF_QUIET))
bio_set_flag(bio, BIO_QUIET); bio_set_flag(bio, BIO_QUIET);
@ -1717,7 +1720,7 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
blk_queue_split(q, &bio, q->bio_split); blk_queue_split(q, &bio, q->bio_split);
if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
bio->bi_error = -EIO; bio->bi_status = BLK_STS_IOERR;
bio_endio(bio); bio_endio(bio);
return BLK_QC_T_NONE; return BLK_QC_T_NONE;
} }
@ -1775,7 +1778,10 @@ get_rq:
req = get_request(q, bio->bi_opf, bio, GFP_NOIO); req = get_request(q, bio->bi_opf, bio, GFP_NOIO);
if (IS_ERR(req)) { if (IS_ERR(req)) {
__wbt_done(q->rq_wb, wb_acct); __wbt_done(q->rq_wb, wb_acct);
bio->bi_error = PTR_ERR(req); if (PTR_ERR(req) == -ENOMEM)
bio->bi_status = BLK_STS_RESOURCE;
else
bio->bi_status = BLK_STS_IOERR;
bio_endio(bio); bio_endio(bio);
goto out_unlock; goto out_unlock;
} }
@ -1930,7 +1936,7 @@ generic_make_request_checks(struct bio *bio)
{ {
struct request_queue *q; struct request_queue *q;
int nr_sectors = bio_sectors(bio); int nr_sectors = bio_sectors(bio);
int err = -EIO; blk_status_t status = BLK_STS_IOERR;
char b[BDEVNAME_SIZE]; char b[BDEVNAME_SIZE];
struct hd_struct *part; struct hd_struct *part;
@ -1973,7 +1979,7 @@ generic_make_request_checks(struct bio *bio)
!test_bit(QUEUE_FLAG_WC, &q->queue_flags)) { !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA); bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
if (!nr_sectors) { if (!nr_sectors) {
err = 0; status = BLK_STS_OK;
goto end_io; goto end_io;
} }
} }
@ -2025,9 +2031,9 @@ generic_make_request_checks(struct bio *bio)
return true; return true;
not_supported: not_supported:
err = -EOPNOTSUPP; status = BLK_STS_NOTSUPP;
end_io: end_io:
bio->bi_error = err; bio->bi_status = status;
bio_endio(bio); bio_endio(bio);
return false; return false;
} }

Просмотреть файл

@ -384,9 +384,9 @@ static struct kobj_type integrity_ktype = {
.sysfs_ops = &integrity_ops, .sysfs_ops = &integrity_ops,
}; };
static int blk_integrity_nop_fn(struct blk_integrity_iter *iter) static blk_status_t blk_integrity_nop_fn(struct blk_integrity_iter *iter)
{ {
return 0; return BLK_STS_OK;
} }
static const struct blk_integrity_profile nop_profile = { static const struct blk_integrity_profile nop_profile = {

Просмотреть файл

@ -143,7 +143,7 @@ static void bounce_end_io(struct bio *bio, mempool_t *pool)
mempool_free(bvec->bv_page, pool); mempool_free(bvec->bv_page, pool);
} }
bio_orig->bi_error = bio->bi_error; bio_orig->bi_status = bio->bi_status;
bio_endio(bio_orig); bio_endio(bio_orig);
bio_put(bio); bio_put(bio);
} }
@ -163,7 +163,7 @@ static void __bounce_end_io_read(struct bio *bio, mempool_t *pool)
{ {
struct bio *bio_orig = bio->bi_private; struct bio *bio_orig = bio->bi_private;
if (!bio->bi_error) if (!bio->bi_status)
copy_to_high_bio_irq(bio_orig, bio); copy_to_high_bio_irq(bio_orig, bio);
bounce_end_io(bio, pool); bounce_end_io(bio, pool);

Просмотреть файл

@ -46,8 +46,8 @@ static __be16 t10_pi_ip_fn(void *data, unsigned int len)
* 16 bit app tag, 32 bit reference tag. Type 3 does not define the ref * 16 bit app tag, 32 bit reference tag. Type 3 does not define the ref
* tag. * tag.
*/ */
static int t10_pi_generate(struct blk_integrity_iter *iter, csum_fn *fn, static blk_status_t t10_pi_generate(struct blk_integrity_iter *iter,
unsigned int type) csum_fn *fn, unsigned int type)
{ {
unsigned int i; unsigned int i;
@ -67,11 +67,11 @@ static int t10_pi_generate(struct blk_integrity_iter *iter, csum_fn *fn,
iter->seed++; iter->seed++;
} }
return 0; return BLK_STS_OK;
} }
static int t10_pi_verify(struct blk_integrity_iter *iter, csum_fn *fn, static blk_status_t t10_pi_verify(struct blk_integrity_iter *iter,
unsigned int type) csum_fn *fn, unsigned int type)
{ {
unsigned int i; unsigned int i;
@ -108,7 +108,7 @@ static int t10_pi_verify(struct blk_integrity_iter *iter, csum_fn *fn,
"(rcvd %04x, want %04x)\n", iter->disk_name, "(rcvd %04x, want %04x)\n", iter->disk_name,
(unsigned long long)iter->seed, (unsigned long long)iter->seed,
be16_to_cpu(pi->guard_tag), be16_to_cpu(csum)); be16_to_cpu(pi->guard_tag), be16_to_cpu(csum));
return -EILSEQ; return BLK_STS_PROTECTION;
} }
next: next:
@ -117,45 +117,45 @@ next:
iter->seed++; iter->seed++;
} }
return 0; return BLK_STS_OK;
} }
static int t10_pi_type1_generate_crc(struct blk_integrity_iter *iter) static blk_status_t t10_pi_type1_generate_crc(struct blk_integrity_iter *iter)
{ {
return t10_pi_generate(iter, t10_pi_crc_fn, 1); return t10_pi_generate(iter, t10_pi_crc_fn, 1);
} }
static int t10_pi_type1_generate_ip(struct blk_integrity_iter *iter) static blk_status_t t10_pi_type1_generate_ip(struct blk_integrity_iter *iter)
{ {
return t10_pi_generate(iter, t10_pi_ip_fn, 1); return t10_pi_generate(iter, t10_pi_ip_fn, 1);
} }
static int t10_pi_type1_verify_crc(struct blk_integrity_iter *iter) static blk_status_t t10_pi_type1_verify_crc(struct blk_integrity_iter *iter)
{ {
return t10_pi_verify(iter, t10_pi_crc_fn, 1); return t10_pi_verify(iter, t10_pi_crc_fn, 1);
} }
static int t10_pi_type1_verify_ip(struct blk_integrity_iter *iter) static blk_status_t t10_pi_type1_verify_ip(struct blk_integrity_iter *iter)
{ {
return t10_pi_verify(iter, t10_pi_ip_fn, 1); return t10_pi_verify(iter, t10_pi_ip_fn, 1);
} }
static int t10_pi_type3_generate_crc(struct blk_integrity_iter *iter) static blk_status_t t10_pi_type3_generate_crc(struct blk_integrity_iter *iter)
{ {
return t10_pi_generate(iter, t10_pi_crc_fn, 3); return t10_pi_generate(iter, t10_pi_crc_fn, 3);
} }
static int t10_pi_type3_generate_ip(struct blk_integrity_iter *iter) static blk_status_t t10_pi_type3_generate_ip(struct blk_integrity_iter *iter)
{ {
return t10_pi_generate(iter, t10_pi_ip_fn, 3); return t10_pi_generate(iter, t10_pi_ip_fn, 3);
} }
static int t10_pi_type3_verify_crc(struct blk_integrity_iter *iter) static blk_status_t t10_pi_type3_verify_crc(struct blk_integrity_iter *iter)
{ {
return t10_pi_verify(iter, t10_pi_crc_fn, 3); return t10_pi_verify(iter, t10_pi_crc_fn, 3);
} }
static int t10_pi_type3_verify_ip(struct blk_integrity_iter *iter) static blk_status_t t10_pi_type3_verify_ip(struct blk_integrity_iter *iter)
{ {
return t10_pi_verify(iter, t10_pi_ip_fn, 3); return t10_pi_verify(iter, t10_pi_ip_fn, 3);
} }

Просмотреть файл

@ -1070,7 +1070,7 @@ aoe_end_request(struct aoedev *d, struct request *rq, int fastfail)
d->ip.rq = NULL; d->ip.rq = NULL;
do { do {
bio = rq->bio; bio = rq->bio;
bok = !fastfail && !bio->bi_error; bok = !fastfail && !bio->bi_status;
} while (__blk_end_request(rq, bok ? BLK_STS_OK : BLK_STS_IOERR, bio->bi_iter.bi_size)); } while (__blk_end_request(rq, bok ? BLK_STS_OK : BLK_STS_IOERR, bio->bi_iter.bi_size));
/* cf. http://lkml.org/lkml/2006/10/31/28 */ /* cf. http://lkml.org/lkml/2006/10/31/28 */
@ -1131,7 +1131,7 @@ ktiocomplete(struct frame *f)
ahout->cmdstat, ahin->cmdstat, ahout->cmdstat, ahin->cmdstat,
d->aoemajor, d->aoeminor); d->aoemajor, d->aoeminor);
noskb: if (buf) noskb: if (buf)
buf->bio->bi_error = -EIO; buf->bio->bi_status = BLK_STS_IOERR;
goto out; goto out;
} }
@ -1144,7 +1144,7 @@ noskb: if (buf)
"aoe: runt data size in read from", "aoe: runt data size in read from",
(long) d->aoemajor, d->aoeminor, (long) d->aoemajor, d->aoeminor,
skb->len, n); skb->len, n);
buf->bio->bi_error = -EIO; buf->bio->bi_status = BLK_STS_IOERR;
break; break;
} }
if (n > f->iter.bi_size) { if (n > f->iter.bi_size) {
@ -1152,7 +1152,7 @@ noskb: if (buf)
"aoe: too-large data size in read from", "aoe: too-large data size in read from",
(long) d->aoemajor, d->aoeminor, (long) d->aoemajor, d->aoeminor,
n, f->iter.bi_size); n, f->iter.bi_size);
buf->bio->bi_error = -EIO; buf->bio->bi_status = BLK_STS_IOERR;
break; break;
} }
bvcpy(skb, f->buf->bio, f->iter, n); bvcpy(skb, f->buf->bio, f->iter, n);
@ -1654,7 +1654,7 @@ aoe_failbuf(struct aoedev *d, struct buf *buf)
if (buf == NULL) if (buf == NULL)
return; return;
buf->iter.bi_size = 0; buf->iter.bi_size = 0;
buf->bio->bi_error = -EIO; buf->bio->bi_status = BLK_STS_IOERR;
if (buf->nframesout == 0) if (buf->nframesout == 0)
aoe_end_buf(d, buf); aoe_end_buf(d, buf);
} }

Просмотреть файл

@ -170,7 +170,7 @@ aoe_failip(struct aoedev *d)
if (rq == NULL) if (rq == NULL)
return; return;
while ((bio = d->ip.nxbio)) { while ((bio = d->ip.nxbio)) {
bio->bi_error = -EIO; bio->bi_status = BLK_STS_IOERR;
d->ip.nxbio = bio->bi_next; d->ip.nxbio = bio->bi_next;
n = (unsigned long) rq->special; n = (unsigned long) rq->special;
rq->special = (void *) --n; rq->special = (void *) --n;

Просмотреть файл

@ -178,7 +178,7 @@ static int _drbd_md_sync_page_io(struct drbd_device *device,
else else
submit_bio(bio); submit_bio(bio);
wait_until_done_or_force_detached(device, bdev, &device->md_io.done); wait_until_done_or_force_detached(device, bdev, &device->md_io.done);
if (!bio->bi_error) if (!bio->bi_status)
err = device->md_io.error; err = device->md_io.error;
out: out:

Просмотреть файл

@ -959,16 +959,16 @@ static void drbd_bm_endio(struct bio *bio)
!bm_test_page_unchanged(b->bm_pages[idx])) !bm_test_page_unchanged(b->bm_pages[idx]))
drbd_warn(device, "bitmap page idx %u changed during IO!\n", idx); drbd_warn(device, "bitmap page idx %u changed during IO!\n", idx);
if (bio->bi_error) { if (bio->bi_status) {
/* ctx error will hold the completed-last non-zero error code, /* ctx error will hold the completed-last non-zero error code,
* in case error codes differ. */ * in case error codes differ. */
ctx->error = bio->bi_error; ctx->error = blk_status_to_errno(bio->bi_status);
bm_set_page_io_err(b->bm_pages[idx]); bm_set_page_io_err(b->bm_pages[idx]);
/* Not identical to on disk version of it. /* Not identical to on disk version of it.
* Is BM_PAGE_IO_ERROR enough? */ * Is BM_PAGE_IO_ERROR enough? */
if (__ratelimit(&drbd_ratelimit_state)) if (__ratelimit(&drbd_ratelimit_state))
drbd_err(device, "IO ERROR %d on bitmap page idx %u\n", drbd_err(device, "IO ERROR %d on bitmap page idx %u\n",
bio->bi_error, idx); bio->bi_status, idx);
} else { } else {
bm_clear_page_io_err(b->bm_pages[idx]); bm_clear_page_io_err(b->bm_pages[idx]);
dynamic_drbd_dbg(device, "bitmap page idx %u completed\n", idx); dynamic_drbd_dbg(device, "bitmap page idx %u completed\n", idx);

Просмотреть файл

@ -1627,7 +1627,7 @@ static inline void drbd_generic_make_request(struct drbd_device *device,
__release(local); __release(local);
if (!bio->bi_bdev) { if (!bio->bi_bdev) {
drbd_err(device, "drbd_generic_make_request: bio->bi_bdev == NULL\n"); drbd_err(device, "drbd_generic_make_request: bio->bi_bdev == NULL\n");
bio->bi_error = -ENODEV; bio->bi_status = BLK_STS_IOERR;
bio_endio(bio); bio_endio(bio);
return; return;
} }

Просмотреть файл

@ -1229,9 +1229,9 @@ void one_flush_endio(struct bio *bio)
struct drbd_device *device = octx->device; struct drbd_device *device = octx->device;
struct issue_flush_context *ctx = octx->ctx; struct issue_flush_context *ctx = octx->ctx;
if (bio->bi_error) { if (bio->bi_status) {
ctx->error = bio->bi_error; ctx->error = blk_status_to_errno(bio->bi_status);
drbd_info(device, "local disk FLUSH FAILED with status %d\n", bio->bi_error); drbd_info(device, "local disk FLUSH FAILED with status %d\n", bio->bi_status);
} }
kfree(octx); kfree(octx);
bio_put(bio); bio_put(bio);

Просмотреть файл

@ -203,7 +203,7 @@ void start_new_tl_epoch(struct drbd_connection *connection)
void complete_master_bio(struct drbd_device *device, void complete_master_bio(struct drbd_device *device,
struct bio_and_error *m) struct bio_and_error *m)
{ {
m->bio->bi_error = m->error; m->bio->bi_status = errno_to_blk_status(m->error);
bio_endio(m->bio); bio_endio(m->bio);
dec_ap_bio(device); dec_ap_bio(device);
} }
@ -1157,7 +1157,7 @@ static void drbd_process_discard_req(struct drbd_request *req)
if (blkdev_issue_zeroout(bdev, req->i.sector, req->i.size >> 9, if (blkdev_issue_zeroout(bdev, req->i.sector, req->i.size >> 9,
GFP_NOIO, 0)) GFP_NOIO, 0))
req->private_bio->bi_error = -EIO; req->private_bio->bi_status = BLK_STS_IOERR;
bio_endio(req->private_bio); bio_endio(req->private_bio);
} }
@ -1225,7 +1225,7 @@ drbd_request_prepare(struct drbd_device *device, struct bio *bio, unsigned long
/* only pass the error to the upper layers. /* only pass the error to the upper layers.
* if user cannot handle io errors, that's not our business. */ * if user cannot handle io errors, that's not our business. */
drbd_err(device, "could not kmalloc() req\n"); drbd_err(device, "could not kmalloc() req\n");
bio->bi_error = -ENOMEM; bio->bi_status = BLK_STS_RESOURCE;
bio_endio(bio); bio_endio(bio);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }

Просмотреть файл

@ -63,7 +63,7 @@ void drbd_md_endio(struct bio *bio)
struct drbd_device *device; struct drbd_device *device;
device = bio->bi_private; device = bio->bi_private;
device->md_io.error = bio->bi_error; device->md_io.error = blk_status_to_errno(bio->bi_status);
/* We grabbed an extra reference in _drbd_md_sync_page_io() to be able /* We grabbed an extra reference in _drbd_md_sync_page_io() to be able
* to timeout on the lower level device, and eventually detach from it. * to timeout on the lower level device, and eventually detach from it.
@ -177,13 +177,13 @@ void drbd_peer_request_endio(struct bio *bio)
bool is_discard = bio_op(bio) == REQ_OP_WRITE_ZEROES || bool is_discard = bio_op(bio) == REQ_OP_WRITE_ZEROES ||
bio_op(bio) == REQ_OP_DISCARD; bio_op(bio) == REQ_OP_DISCARD;
if (bio->bi_error && __ratelimit(&drbd_ratelimit_state)) if (bio->bi_status && __ratelimit(&drbd_ratelimit_state))
drbd_warn(device, "%s: error=%d s=%llus\n", drbd_warn(device, "%s: error=%d s=%llus\n",
is_write ? (is_discard ? "discard" : "write") is_write ? (is_discard ? "discard" : "write")
: "read", bio->bi_error, : "read", bio->bi_status,
(unsigned long long)peer_req->i.sector); (unsigned long long)peer_req->i.sector);
if (bio->bi_error) if (bio->bi_status)
set_bit(__EE_WAS_ERROR, &peer_req->flags); set_bit(__EE_WAS_ERROR, &peer_req->flags);
bio_put(bio); /* no need for the bio anymore */ bio_put(bio); /* no need for the bio anymore */
@ -243,16 +243,16 @@ void drbd_request_endio(struct bio *bio)
if (__ratelimit(&drbd_ratelimit_state)) if (__ratelimit(&drbd_ratelimit_state))
drbd_emerg(device, "delayed completion of aborted local request; disk-timeout may be too aggressive\n"); drbd_emerg(device, "delayed completion of aborted local request; disk-timeout may be too aggressive\n");
if (!bio->bi_error) if (!bio->bi_status)
drbd_panic_after_delayed_completion_of_aborted_request(device); drbd_panic_after_delayed_completion_of_aborted_request(device);
} }
/* to avoid recursion in __req_mod */ /* to avoid recursion in __req_mod */
if (unlikely(bio->bi_error)) { if (unlikely(bio->bi_status)) {
switch (bio_op(bio)) { switch (bio_op(bio)) {
case REQ_OP_WRITE_ZEROES: case REQ_OP_WRITE_ZEROES:
case REQ_OP_DISCARD: case REQ_OP_DISCARD:
if (bio->bi_error == -EOPNOTSUPP) if (bio->bi_status == BLK_STS_NOTSUPP)
what = DISCARD_COMPLETED_NOTSUPP; what = DISCARD_COMPLETED_NOTSUPP;
else else
what = DISCARD_COMPLETED_WITH_ERROR; what = DISCARD_COMPLETED_WITH_ERROR;
@ -272,7 +272,7 @@ void drbd_request_endio(struct bio *bio)
} }
bio_put(req->private_bio); bio_put(req->private_bio);
req->private_bio = ERR_PTR(bio->bi_error); req->private_bio = ERR_PTR(blk_status_to_errno(bio->bi_status));
/* not req_mod(), we need irqsave here! */ /* not req_mod(), we need irqsave here! */
spin_lock_irqsave(&device->resource->req_lock, flags); spin_lock_irqsave(&device->resource->req_lock, flags);

Просмотреть файл

@ -3780,9 +3780,9 @@ static void floppy_rb0_cb(struct bio *bio)
struct rb0_cbdata *cbdata = (struct rb0_cbdata *)bio->bi_private; struct rb0_cbdata *cbdata = (struct rb0_cbdata *)bio->bi_private;
int drive = cbdata->drive; int drive = cbdata->drive;
if (bio->bi_error) { if (bio->bi_status) {
pr_info("floppy: error %d while reading block 0\n", pr_info("floppy: error %d while reading block 0\n",
bio->bi_error); bio->bi_status);
set_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags); set_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
} }
complete(&cbdata->complete); complete(&cbdata->complete);

Просмотреть файл

@ -952,9 +952,9 @@ static void pkt_end_io_read(struct bio *bio)
pkt_dbg(2, pd, "bio=%p sec0=%llx sec=%llx err=%d\n", pkt_dbg(2, pd, "bio=%p sec0=%llx sec=%llx err=%d\n",
bio, (unsigned long long)pkt->sector, bio, (unsigned long long)pkt->sector,
(unsigned long long)bio->bi_iter.bi_sector, bio->bi_error); (unsigned long long)bio->bi_iter.bi_sector, bio->bi_status);
if (bio->bi_error) if (bio->bi_status)
atomic_inc(&pkt->io_errors); atomic_inc(&pkt->io_errors);
if (atomic_dec_and_test(&pkt->io_wait)) { if (atomic_dec_and_test(&pkt->io_wait)) {
atomic_inc(&pkt->run_sm); atomic_inc(&pkt->run_sm);
@ -969,7 +969,7 @@ static void pkt_end_io_packet_write(struct bio *bio)
struct pktcdvd_device *pd = pkt->pd; struct pktcdvd_device *pd = pkt->pd;
BUG_ON(!pd); BUG_ON(!pd);
pkt_dbg(2, pd, "id=%d, err=%d\n", pkt->id, bio->bi_error); pkt_dbg(2, pd, "id=%d, err=%d\n", pkt->id, bio->bi_status);
pd->stats.pkt_ended++; pd->stats.pkt_ended++;
@ -1305,16 +1305,16 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
pkt_queue_bio(pd, pkt->w_bio); pkt_queue_bio(pd, pkt->w_bio);
} }
static void pkt_finish_packet(struct packet_data *pkt, int error) static void pkt_finish_packet(struct packet_data *pkt, blk_status_t status)
{ {
struct bio *bio; struct bio *bio;
if (error) if (status)
pkt->cache_valid = 0; pkt->cache_valid = 0;
/* Finish all bios corresponding to this packet */ /* Finish all bios corresponding to this packet */
while ((bio = bio_list_pop(&pkt->orig_bios))) { while ((bio = bio_list_pop(&pkt->orig_bios))) {
bio->bi_error = error; bio->bi_status = status;
bio_endio(bio); bio_endio(bio);
} }
} }
@ -1349,7 +1349,7 @@ static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data
if (atomic_read(&pkt->io_wait) > 0) if (atomic_read(&pkt->io_wait) > 0)
return; return;
if (!pkt->w_bio->bi_error) { if (!pkt->w_bio->bi_status) {
pkt_set_state(pkt, PACKET_FINISHED_STATE); pkt_set_state(pkt, PACKET_FINISHED_STATE);
} else { } else {
pkt_set_state(pkt, PACKET_RECOVERY_STATE); pkt_set_state(pkt, PACKET_RECOVERY_STATE);
@ -1366,7 +1366,7 @@ static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data
break; break;
case PACKET_FINISHED_STATE: case PACKET_FINISHED_STATE:
pkt_finish_packet(pkt, pkt->w_bio->bi_error); pkt_finish_packet(pkt, pkt->w_bio->bi_status);
return; return;
default: default:
@ -2301,7 +2301,7 @@ static void pkt_end_io_read_cloned(struct bio *bio)
struct packet_stacked_data *psd = bio->bi_private; struct packet_stacked_data *psd = bio->bi_private;
struct pktcdvd_device *pd = psd->pd; struct pktcdvd_device *pd = psd->pd;
psd->bio->bi_error = bio->bi_error; psd->bio->bi_status = bio->bi_status;
bio_put(bio); bio_put(bio);
bio_endio(psd->bio); bio_endio(psd->bio);
mempool_free(psd, psd_pool); mempool_free(psd, psd_pool);

Просмотреть файл

@ -428,7 +428,7 @@ static void ps3vram_cache_cleanup(struct ps3_system_bus_device *dev)
kfree(priv->cache.tags); kfree(priv->cache.tags);
} }
static int ps3vram_read(struct ps3_system_bus_device *dev, loff_t from, static blk_status_t ps3vram_read(struct ps3_system_bus_device *dev, loff_t from,
size_t len, size_t *retlen, u_char *buf) size_t len, size_t *retlen, u_char *buf)
{ {
struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev); struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
@ -438,7 +438,7 @@ static int ps3vram_read(struct ps3_system_bus_device *dev, loff_t from,
(unsigned int)from, len); (unsigned int)from, len);
if (from >= priv->size) if (from >= priv->size)
return -EIO; return BLK_STS_IOERR;
if (len > priv->size - from) if (len > priv->size - from)
len = priv->size - from; len = priv->size - from;
@ -472,14 +472,14 @@ static int ps3vram_read(struct ps3_system_bus_device *dev, loff_t from,
return 0; return 0;
} }
static int ps3vram_write(struct ps3_system_bus_device *dev, loff_t to, static blk_status_t ps3vram_write(struct ps3_system_bus_device *dev, loff_t to,
size_t len, size_t *retlen, const u_char *buf) size_t len, size_t *retlen, const u_char *buf)
{ {
struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev); struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
unsigned int cached, count; unsigned int cached, count;
if (to >= priv->size) if (to >= priv->size)
return -EIO; return BLK_STS_IOERR;
if (len > priv->size - to) if (len > priv->size - to)
len = priv->size - to; len = priv->size - to;
@ -554,7 +554,7 @@ static struct bio *ps3vram_do_bio(struct ps3_system_bus_device *dev,
int write = bio_data_dir(bio) == WRITE; int write = bio_data_dir(bio) == WRITE;
const char *op = write ? "write" : "read"; const char *op = write ? "write" : "read";
loff_t offset = bio->bi_iter.bi_sector << 9; loff_t offset = bio->bi_iter.bi_sector << 9;
int error = 0; blk_status_t error = 0;
struct bio_vec bvec; struct bio_vec bvec;
struct bvec_iter iter; struct bvec_iter iter;
struct bio *next; struct bio *next;
@ -578,7 +578,7 @@ static struct bio *ps3vram_do_bio(struct ps3_system_bus_device *dev,
if (retlen != len) { if (retlen != len) {
dev_err(&dev->core, "Short %s\n", op); dev_err(&dev->core, "Short %s\n", op);
error = -EIO; error = BLK_STS_IOERR;
goto out; goto out;
} }
@ -593,7 +593,7 @@ out:
next = bio_list_peek(&priv->list); next = bio_list_peek(&priv->list);
spin_unlock_irq(&priv->lock); spin_unlock_irq(&priv->lock);
bio->bi_error = error; bio->bi_status = error;
bio_endio(bio); bio_endio(bio);
return next; return next;
} }

Просмотреть файл

@ -149,7 +149,7 @@ static blk_qc_t rsxx_make_request(struct request_queue *q, struct bio *bio)
{ {
struct rsxx_cardinfo *card = q->queuedata; struct rsxx_cardinfo *card = q->queuedata;
struct rsxx_bio_meta *bio_meta; struct rsxx_bio_meta *bio_meta;
int st = -EINVAL; blk_status_t st = BLK_STS_IOERR;
blk_queue_split(q, &bio, q->bio_split); blk_queue_split(q, &bio, q->bio_split);
@ -161,15 +161,11 @@ static blk_qc_t rsxx_make_request(struct request_queue *q, struct bio *bio)
if (bio_end_sector(bio) > get_capacity(card->gendisk)) if (bio_end_sector(bio) > get_capacity(card->gendisk))
goto req_err; goto req_err;
if (unlikely(card->halt)) { if (unlikely(card->halt))
st = -EFAULT;
goto req_err; goto req_err;
}
if (unlikely(card->dma_fault)) { if (unlikely(card->dma_fault))
st = (-EFAULT);
goto req_err; goto req_err;
}
if (bio->bi_iter.bi_size == 0) { if (bio->bi_iter.bi_size == 0) {
dev_err(CARD_TO_DEV(card), "size zero BIO!\n"); dev_err(CARD_TO_DEV(card), "size zero BIO!\n");
@ -178,7 +174,7 @@ static blk_qc_t rsxx_make_request(struct request_queue *q, struct bio *bio)
bio_meta = kmem_cache_alloc(bio_meta_pool, GFP_KERNEL); bio_meta = kmem_cache_alloc(bio_meta_pool, GFP_KERNEL);
if (!bio_meta) { if (!bio_meta) {
st = -ENOMEM; st = BLK_STS_RESOURCE;
goto req_err; goto req_err;
} }
@ -205,7 +201,7 @@ queue_err:
kmem_cache_free(bio_meta_pool, bio_meta); kmem_cache_free(bio_meta_pool, bio_meta);
req_err: req_err:
if (st) if (st)
bio->bi_error = st; bio->bi_status = st;
bio_endio(bio); bio_endio(bio);
return BLK_QC_T_NONE; return BLK_QC_T_NONE;
} }

Просмотреть файл

@ -611,7 +611,7 @@ static void rsxx_schedule_done(struct work_struct *work)
mutex_unlock(&ctrl->work_lock); mutex_unlock(&ctrl->work_lock);
} }
static int rsxx_queue_discard(struct rsxx_cardinfo *card, static blk_status_t rsxx_queue_discard(struct rsxx_cardinfo *card,
struct list_head *q, struct list_head *q,
unsigned int laddr, unsigned int laddr,
rsxx_dma_cb cb, rsxx_dma_cb cb,
@ -621,7 +621,7 @@ static int rsxx_queue_discard(struct rsxx_cardinfo *card,
dma = kmem_cache_alloc(rsxx_dma_pool, GFP_KERNEL); dma = kmem_cache_alloc(rsxx_dma_pool, GFP_KERNEL);
if (!dma) if (!dma)
return -ENOMEM; return BLK_STS_RESOURCE;
dma->cmd = HW_CMD_BLK_DISCARD; dma->cmd = HW_CMD_BLK_DISCARD;
dma->laddr = laddr; dma->laddr = laddr;
@ -640,7 +640,7 @@ static int rsxx_queue_discard(struct rsxx_cardinfo *card,
return 0; return 0;
} }
static int rsxx_queue_dma(struct rsxx_cardinfo *card, static blk_status_t rsxx_queue_dma(struct rsxx_cardinfo *card,
struct list_head *q, struct list_head *q,
int dir, int dir,
unsigned int dma_off, unsigned int dma_off,
@ -655,7 +655,7 @@ static int rsxx_queue_dma(struct rsxx_cardinfo *card,
dma = kmem_cache_alloc(rsxx_dma_pool, GFP_KERNEL); dma = kmem_cache_alloc(rsxx_dma_pool, GFP_KERNEL);
if (!dma) if (!dma)
return -ENOMEM; return BLK_STS_RESOURCE;
dma->cmd = dir ? HW_CMD_BLK_WRITE : HW_CMD_BLK_READ; dma->cmd = dir ? HW_CMD_BLK_WRITE : HW_CMD_BLK_READ;
dma->laddr = laddr; dma->laddr = laddr;
@ -677,7 +677,7 @@ static int rsxx_queue_dma(struct rsxx_cardinfo *card,
return 0; return 0;
} }
int rsxx_dma_queue_bio(struct rsxx_cardinfo *card, blk_status_t rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
struct bio *bio, struct bio *bio,
atomic_t *n_dmas, atomic_t *n_dmas,
rsxx_dma_cb cb, rsxx_dma_cb cb,
@ -694,7 +694,7 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
unsigned int dma_len; unsigned int dma_len;
int dma_cnt[RSXX_MAX_TARGETS]; int dma_cnt[RSXX_MAX_TARGETS];
int tgt; int tgt;
int st; blk_status_t st;
int i; int i;
addr8 = bio->bi_iter.bi_sector << 9; /* sectors are 512 bytes */ addr8 = bio->bi_iter.bi_sector << 9; /* sectors are 512 bytes */
@ -769,7 +769,6 @@ bvec_err:
for (i = 0; i < card->n_targets; i++) for (i = 0; i < card->n_targets; i++)
rsxx_cleanup_dma_queue(&card->ctrl[i], &dma_list[i], rsxx_cleanup_dma_queue(&card->ctrl[i], &dma_list[i],
FREE_DMA); FREE_DMA);
return st; return st;
} }

Просмотреть файл

@ -391,7 +391,7 @@ int rsxx_dma_cancel(struct rsxx_dma_ctrl *ctrl);
void rsxx_dma_cleanup(void); void rsxx_dma_cleanup(void);
void rsxx_dma_queue_reset(struct rsxx_cardinfo *card); void rsxx_dma_queue_reset(struct rsxx_cardinfo *card);
int rsxx_dma_configure(struct rsxx_cardinfo *card); int rsxx_dma_configure(struct rsxx_cardinfo *card);
int rsxx_dma_queue_bio(struct rsxx_cardinfo *card, blk_status_t rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
struct bio *bio, struct bio *bio,
atomic_t *n_dmas, atomic_t *n_dmas,
rsxx_dma_cb cb, rsxx_dma_cb cb,

Просмотреть файл

@ -454,7 +454,7 @@ static void process_page(unsigned long data)
PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
if (control & DMASCR_HARD_ERROR) { if (control & DMASCR_HARD_ERROR) {
/* error */ /* error */
bio->bi_error = -EIO; bio->bi_status = BLK_STS_IOERR;
dev_printk(KERN_WARNING, &card->dev->dev, dev_printk(KERN_WARNING, &card->dev->dev,
"I/O error on sector %d/%d\n", "I/O error on sector %d/%d\n",
le32_to_cpu(desc->local_addr)>>9, le32_to_cpu(desc->local_addr)>>9,

Просмотреть файл

@ -1069,20 +1069,17 @@ static void xen_blk_drain_io(struct xen_blkif_ring *ring)
atomic_set(&blkif->drain, 0); atomic_set(&blkif->drain, 0);
} }
/* static void __end_block_io_op(struct pending_req *pending_req,
* Completion callback on the bio's. Called as bh->b_end_io() blk_status_t error)
*/
static void __end_block_io_op(struct pending_req *pending_req, int error)
{ {
/* An error fails the entire request. */ /* An error fails the entire request. */
if ((pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE) && if (pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE &&
(error == -EOPNOTSUPP)) { error == BLK_STS_NOTSUPP) {
pr_debug("flush diskcache op failed, not supported\n"); pr_debug("flush diskcache op failed, not supported\n");
xen_blkbk_flush_diskcache(XBT_NIL, pending_req->ring->blkif->be, 0); xen_blkbk_flush_diskcache(XBT_NIL, pending_req->ring->blkif->be, 0);
pending_req->status = BLKIF_RSP_EOPNOTSUPP; pending_req->status = BLKIF_RSP_EOPNOTSUPP;
} else if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) && } else if (pending_req->operation == BLKIF_OP_WRITE_BARRIER &&
(error == -EOPNOTSUPP)) { error == BLK_STS_NOTSUPP) {
pr_debug("write barrier op failed, not supported\n"); pr_debug("write barrier op failed, not supported\n");
xen_blkbk_barrier(XBT_NIL, pending_req->ring->blkif->be, 0); xen_blkbk_barrier(XBT_NIL, pending_req->ring->blkif->be, 0);
pending_req->status = BLKIF_RSP_EOPNOTSUPP; pending_req->status = BLKIF_RSP_EOPNOTSUPP;
@ -1106,7 +1103,7 @@ static void __end_block_io_op(struct pending_req *pending_req, int error)
*/ */
static void end_block_io_op(struct bio *bio) static void end_block_io_op(struct bio *bio)
{ {
__end_block_io_op(bio->bi_private, bio->bi_error); __end_block_io_op(bio->bi_private, bio->bi_status);
bio_put(bio); bio_put(bio);
} }
@ -1423,7 +1420,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
for (i = 0; i < nbio; i++) for (i = 0; i < nbio; i++)
bio_put(biolist[i]); bio_put(biolist[i]);
atomic_set(&pending_req->pendcnt, 1); atomic_set(&pending_req->pendcnt, 1);
__end_block_io_op(pending_req, -EINVAL); __end_block_io_op(pending_req, BLK_STS_RESOURCE);
msleep(1); /* back off a bit */ msleep(1); /* back off a bit */
return -EIO; return -EIO;
} }

Просмотреть файл

@ -2006,7 +2006,7 @@ static void split_bio_end(struct bio *bio)
if (atomic_dec_and_test(&split_bio->pending)) { if (atomic_dec_and_test(&split_bio->pending)) {
split_bio->bio->bi_phys_segments = 0; split_bio->bio->bi_phys_segments = 0;
split_bio->bio->bi_error = bio->bi_error; split_bio->bio->bi_status = bio->bi_status;
bio_endio(split_bio->bio); bio_endio(split_bio->bio);
kfree(split_bio); kfree(split_bio);
} }

Просмотреть файл

@ -296,8 +296,8 @@ void pblk_flush_writer(struct pblk *pblk)
pr_err("pblk: tear down bio failed\n"); pr_err("pblk: tear down bio failed\n");
} }
if (bio->bi_error) if (bio->bi_status)
pr_err("pblk: flush sync write failed (%u)\n", bio->bi_error); pr_err("pblk: flush sync write failed (%u)\n", bio->bi_status);
bio_put(bio); bio_put(bio);
} }

Просмотреть файл

@ -114,7 +114,7 @@ static void pblk_end_io_read(struct nvm_rq *rqd)
pblk_log_read_err(pblk, rqd); pblk_log_read_err(pblk, rqd);
#ifdef CONFIG_NVM_DEBUG #ifdef CONFIG_NVM_DEBUG
else else
WARN_ONCE(bio->bi_error, "pblk: corrupted read error\n"); WARN_ONCE(bio->bi_status, "pblk: corrupted read error\n");
#endif #endif
if (rqd->nr_ppas > 1) if (rqd->nr_ppas > 1)
@ -123,7 +123,7 @@ static void pblk_end_io_read(struct nvm_rq *rqd)
bio_put(bio); bio_put(bio);
if (r_ctx->orig_bio) { if (r_ctx->orig_bio) {
#ifdef CONFIG_NVM_DEBUG #ifdef CONFIG_NVM_DEBUG
WARN_ONCE(r_ctx->orig_bio->bi_error, WARN_ONCE(r_ctx->orig_bio->bi_status,
"pblk: corrupted read bio\n"); "pblk: corrupted read bio\n");
#endif #endif
bio_endio(r_ctx->orig_bio); bio_endio(r_ctx->orig_bio);

Просмотреть файл

@ -186,7 +186,7 @@ static void pblk_end_io_write(struct nvm_rq *rqd)
} }
#ifdef CONFIG_NVM_DEBUG #ifdef CONFIG_NVM_DEBUG
else else
WARN_ONCE(rqd->bio->bi_error, "pblk: corrupted write error\n"); WARN_ONCE(rqd->bio->bi_status, "pblk: corrupted write error\n");
#endif #endif
pblk_complete_write(pblk, rqd, c_ctx); pblk_complete_write(pblk, rqd, c_ctx);

Просмотреть файл

@ -279,8 +279,8 @@ static void rrpc_end_sync_bio(struct bio *bio)
{ {
struct completion *waiting = bio->bi_private; struct completion *waiting = bio->bi_private;
if (bio->bi_error) if (bio->bi_status)
pr_err("nvm: gc request failed (%u).\n", bio->bi_error); pr_err("nvm: gc request failed (%u).\n", bio->bi_status);
complete(waiting); complete(waiting);
} }
@ -359,7 +359,7 @@ try:
goto finished; goto finished;
} }
wait_for_completion_io(&wait); wait_for_completion_io(&wait);
if (bio->bi_error) { if (bio->bi_status) {
rrpc_inflight_laddr_release(rrpc, rqd); rrpc_inflight_laddr_release(rrpc, rqd);
goto finished; goto finished;
} }
@ -385,7 +385,7 @@ try:
wait_for_completion_io(&wait); wait_for_completion_io(&wait);
rrpc_inflight_laddr_release(rrpc, rqd); rrpc_inflight_laddr_release(rrpc, rqd);
if (bio->bi_error) if (bio->bi_status)
goto finished; goto finished;
bio_reset(bio); bio_reset(bio);

Просмотреть файл

@ -849,10 +849,11 @@ static inline void wake_up_allocators(struct cache_set *c)
/* Forward declarations */ /* Forward declarations */
void bch_count_io_errors(struct cache *, int, const char *); void bch_count_io_errors(struct cache *, blk_status_t, const char *);
void bch_bbio_count_io_errors(struct cache_set *, struct bio *, void bch_bbio_count_io_errors(struct cache_set *, struct bio *,
int, const char *); blk_status_t, const char *);
void bch_bbio_endio(struct cache_set *, struct bio *, int, const char *); void bch_bbio_endio(struct cache_set *, struct bio *, blk_status_t,
const char *);
void bch_bbio_free(struct bio *, struct cache_set *); void bch_bbio_free(struct bio *, struct cache_set *);
struct bio *bch_bbio_alloc(struct cache_set *); struct bio *bch_bbio_alloc(struct cache_set *);

Просмотреть файл

@ -307,7 +307,7 @@ static void bch_btree_node_read(struct btree *b)
bch_submit_bbio(bio, b->c, &b->key, 0); bch_submit_bbio(bio, b->c, &b->key, 0);
closure_sync(&cl); closure_sync(&cl);
if (bio->bi_error) if (bio->bi_status)
set_btree_node_io_error(b); set_btree_node_io_error(b);
bch_bbio_free(bio, b->c); bch_bbio_free(bio, b->c);
@ -374,10 +374,10 @@ static void btree_node_write_endio(struct bio *bio)
struct closure *cl = bio->bi_private; struct closure *cl = bio->bi_private;
struct btree *b = container_of(cl, struct btree, io); struct btree *b = container_of(cl, struct btree, io);
if (bio->bi_error) if (bio->bi_status)
set_btree_node_io_error(b); set_btree_node_io_error(b);
bch_bbio_count_io_errors(b->c, bio, bio->bi_error, "writing btree"); bch_bbio_count_io_errors(b->c, bio, bio->bi_status, "writing btree");
closure_put(cl); closure_put(cl);
} }

Просмотреть файл

@ -50,7 +50,7 @@ void bch_submit_bbio(struct bio *bio, struct cache_set *c,
/* IO errors */ /* IO errors */
void bch_count_io_errors(struct cache *ca, int error, const char *m) void bch_count_io_errors(struct cache *ca, blk_status_t error, const char *m)
{ {
/* /*
* The halflife of an error is: * The halflife of an error is:
@ -103,7 +103,7 @@ void bch_count_io_errors(struct cache *ca, int error, const char *m)
} }
void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio, void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
int error, const char *m) blk_status_t error, const char *m)
{ {
struct bbio *b = container_of(bio, struct bbio, bio); struct bbio *b = container_of(bio, struct bbio, bio);
struct cache *ca = PTR_CACHE(c, &b->key, 0); struct cache *ca = PTR_CACHE(c, &b->key, 0);
@ -132,7 +132,7 @@ void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
} }
void bch_bbio_endio(struct cache_set *c, struct bio *bio, void bch_bbio_endio(struct cache_set *c, struct bio *bio,
int error, const char *m) blk_status_t error, const char *m)
{ {
struct closure *cl = bio->bi_private; struct closure *cl = bio->bi_private;

Просмотреть файл

@ -549,7 +549,7 @@ static void journal_write_endio(struct bio *bio)
{ {
struct journal_write *w = bio->bi_private; struct journal_write *w = bio->bi_private;
cache_set_err_on(bio->bi_error, w->c, "journal io error"); cache_set_err_on(bio->bi_status, w->c, "journal io error");
closure_put(&w->c->journal.io); closure_put(&w->c->journal.io);
} }

Просмотреть файл

@ -63,14 +63,14 @@ static void read_moving_endio(struct bio *bio)
struct moving_io *io = container_of(bio->bi_private, struct moving_io *io = container_of(bio->bi_private,
struct moving_io, cl); struct moving_io, cl);
if (bio->bi_error) if (bio->bi_status)
io->op.error = bio->bi_error; io->op.status = bio->bi_status;
else if (!KEY_DIRTY(&b->key) && else if (!KEY_DIRTY(&b->key) &&
ptr_stale(io->op.c, &b->key, 0)) { ptr_stale(io->op.c, &b->key, 0)) {
io->op.error = -EINTR; io->op.status = BLK_STS_IOERR;
} }
bch_bbio_endio(io->op.c, bio, bio->bi_error, "reading data to move"); bch_bbio_endio(io->op.c, bio, bio->bi_status, "reading data to move");
} }
static void moving_init(struct moving_io *io) static void moving_init(struct moving_io *io)
@ -92,7 +92,7 @@ static void write_moving(struct closure *cl)
struct moving_io *io = container_of(cl, struct moving_io, cl); struct moving_io *io = container_of(cl, struct moving_io, cl);
struct data_insert_op *op = &io->op; struct data_insert_op *op = &io->op;
if (!op->error) { if (!op->status) {
moving_init(io); moving_init(io);
io->bio.bio.bi_iter.bi_sector = KEY_START(&io->w->key); io->bio.bio.bi_iter.bi_sector = KEY_START(&io->w->key);

Просмотреть файл

@ -81,7 +81,7 @@ static void bch_data_insert_keys(struct closure *cl)
if (ret == -ESRCH) { if (ret == -ESRCH) {
op->replace_collision = true; op->replace_collision = true;
} else if (ret) { } else if (ret) {
op->error = -ENOMEM; op->status = BLK_STS_RESOURCE;
op->insert_data_done = true; op->insert_data_done = true;
} }
@ -178,17 +178,17 @@ static void bch_data_insert_endio(struct bio *bio)
struct closure *cl = bio->bi_private; struct closure *cl = bio->bi_private;
struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
if (bio->bi_error) { if (bio->bi_status) {
/* TODO: We could try to recover from this. */ /* TODO: We could try to recover from this. */
if (op->writeback) if (op->writeback)
op->error = bio->bi_error; op->status = bio->bi_status;
else if (!op->replace) else if (!op->replace)
set_closure_fn(cl, bch_data_insert_error, op->wq); set_closure_fn(cl, bch_data_insert_error, op->wq);
else else
set_closure_fn(cl, NULL, NULL); set_closure_fn(cl, NULL, NULL);
} }
bch_bbio_endio(op->c, bio, bio->bi_error, "writing data to cache"); bch_bbio_endio(op->c, bio, bio->bi_status, "writing data to cache");
} }
static void bch_data_insert_start(struct closure *cl) static void bch_data_insert_start(struct closure *cl)
@ -488,15 +488,15 @@ static void bch_cache_read_endio(struct bio *bio)
* from the backing device. * from the backing device.
*/ */
if (bio->bi_error) if (bio->bi_status)
s->iop.error = bio->bi_error; s->iop.status = bio->bi_status;
else if (!KEY_DIRTY(&b->key) && else if (!KEY_DIRTY(&b->key) &&
ptr_stale(s->iop.c, &b->key, 0)) { ptr_stale(s->iop.c, &b->key, 0)) {
atomic_long_inc(&s->iop.c->cache_read_races); atomic_long_inc(&s->iop.c->cache_read_races);
s->iop.error = -EINTR; s->iop.status = BLK_STS_IOERR;
} }
bch_bbio_endio(s->iop.c, bio, bio->bi_error, "reading from cache"); bch_bbio_endio(s->iop.c, bio, bio->bi_status, "reading from cache");
} }
/* /*
@ -593,9 +593,9 @@ static void request_endio(struct bio *bio)
{ {
struct closure *cl = bio->bi_private; struct closure *cl = bio->bi_private;
if (bio->bi_error) { if (bio->bi_status) {
struct search *s = container_of(cl, struct search, cl); struct search *s = container_of(cl, struct search, cl);
s->iop.error = bio->bi_error; s->iop.status = bio->bi_status;
/* Only cache read errors are recoverable */ /* Only cache read errors are recoverable */
s->recoverable = false; s->recoverable = false;
} }
@ -611,7 +611,7 @@ static void bio_complete(struct search *s)
&s->d->disk->part0, s->start_time); &s->d->disk->part0, s->start_time);
trace_bcache_request_end(s->d, s->orig_bio); trace_bcache_request_end(s->d, s->orig_bio);
s->orig_bio->bi_error = s->iop.error; s->orig_bio->bi_status = s->iop.status;
bio_endio(s->orig_bio); bio_endio(s->orig_bio);
s->orig_bio = NULL; s->orig_bio = NULL;
} }
@ -664,7 +664,7 @@ static inline struct search *search_alloc(struct bio *bio,
s->iop.inode = d->id; s->iop.inode = d->id;
s->iop.write_point = hash_long((unsigned long) current, 16); s->iop.write_point = hash_long((unsigned long) current, 16);
s->iop.write_prio = 0; s->iop.write_prio = 0;
s->iop.error = 0; s->iop.status = 0;
s->iop.flags = 0; s->iop.flags = 0;
s->iop.flush_journal = op_is_flush(bio->bi_opf); s->iop.flush_journal = op_is_flush(bio->bi_opf);
s->iop.wq = bcache_wq; s->iop.wq = bcache_wq;
@ -707,7 +707,7 @@ static void cached_dev_read_error(struct closure *cl)
/* Retry from the backing device: */ /* Retry from the backing device: */
trace_bcache_read_retry(s->orig_bio); trace_bcache_read_retry(s->orig_bio);
s->iop.error = 0; s->iop.status = 0;
do_bio_hook(s, s->orig_bio); do_bio_hook(s, s->orig_bio);
/* XXX: invalidate cache */ /* XXX: invalidate cache */
@ -767,7 +767,7 @@ static void cached_dev_read_done_bh(struct closure *cl)
!s->cache_miss, s->iop.bypass); !s->cache_miss, s->iop.bypass);
trace_bcache_read(s->orig_bio, !s->cache_miss, s->iop.bypass); trace_bcache_read(s->orig_bio, !s->cache_miss, s->iop.bypass);
if (s->iop.error) if (s->iop.status)
continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq); continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq);
else if (s->iop.bio || verify(dc, &s->bio.bio)) else if (s->iop.bio || verify(dc, &s->bio.bio))
continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq); continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq);

Просмотреть файл

@ -10,7 +10,7 @@ struct data_insert_op {
unsigned inode; unsigned inode;
uint16_t write_point; uint16_t write_point;
uint16_t write_prio; uint16_t write_prio;
short error; blk_status_t status;
union { union {
uint16_t flags; uint16_t flags;

Просмотреть файл

@ -271,7 +271,7 @@ static void write_super_endio(struct bio *bio)
{ {
struct cache *ca = bio->bi_private; struct cache *ca = bio->bi_private;
bch_count_io_errors(ca, bio->bi_error, "writing superblock"); bch_count_io_errors(ca, bio->bi_status, "writing superblock");
closure_put(&ca->set->sb_write); closure_put(&ca->set->sb_write);
} }
@ -321,7 +321,7 @@ static void uuid_endio(struct bio *bio)
struct closure *cl = bio->bi_private; struct closure *cl = bio->bi_private;
struct cache_set *c = container_of(cl, struct cache_set, uuid_write); struct cache_set *c = container_of(cl, struct cache_set, uuid_write);
cache_set_err_on(bio->bi_error, c, "accessing uuids"); cache_set_err_on(bio->bi_status, c, "accessing uuids");
bch_bbio_free(bio, c); bch_bbio_free(bio, c);
closure_put(cl); closure_put(cl);
} }
@ -494,7 +494,7 @@ static void prio_endio(struct bio *bio)
{ {
struct cache *ca = bio->bi_private; struct cache *ca = bio->bi_private;
cache_set_err_on(bio->bi_error, ca->set, "accessing priorities"); cache_set_err_on(bio->bi_status, ca->set, "accessing priorities");
bch_bbio_free(bio, ca->set); bch_bbio_free(bio, ca->set);
closure_put(&ca->prio); closure_put(&ca->prio);
} }

Просмотреть файл

@ -167,7 +167,7 @@ static void dirty_endio(struct bio *bio)
struct keybuf_key *w = bio->bi_private; struct keybuf_key *w = bio->bi_private;
struct dirty_io *io = w->private; struct dirty_io *io = w->private;
if (bio->bi_error) if (bio->bi_status)
SET_KEY_DIRTY(&w->key, false); SET_KEY_DIRTY(&w->key, false);
closure_put(&io->cl); closure_put(&io->cl);
@ -195,7 +195,7 @@ static void read_dirty_endio(struct bio *bio)
struct dirty_io *io = w->private; struct dirty_io *io = w->private;
bch_count_io_errors(PTR_CACHE(io->dc->disk.c, &w->key, 0), bch_count_io_errors(PTR_CACHE(io->dc->disk.c, &w->key, 0),
bio->bi_error, "reading dirty data from cache"); bio->bi_status, "reading dirty data from cache");
dirty_endio(bio); dirty_endio(bio);
} }

Просмотреть файл

@ -229,7 +229,7 @@ void dm_cell_release_no_holder(struct dm_bio_prison *prison,
EXPORT_SYMBOL_GPL(dm_cell_release_no_holder); EXPORT_SYMBOL_GPL(dm_cell_release_no_holder);
void dm_cell_error(struct dm_bio_prison *prison, void dm_cell_error(struct dm_bio_prison *prison,
struct dm_bio_prison_cell *cell, int error) struct dm_bio_prison_cell *cell, blk_status_t error)
{ {
struct bio_list bios; struct bio_list bios;
struct bio *bio; struct bio *bio;
@ -238,7 +238,7 @@ void dm_cell_error(struct dm_bio_prison *prison,
dm_cell_release(prison, cell, &bios); dm_cell_release(prison, cell, &bios);
while ((bio = bio_list_pop(&bios))) { while ((bio = bio_list_pop(&bios))) {
bio->bi_error = error; bio->bi_status = error;
bio_endio(bio); bio_endio(bio);
} }
} }

Просмотреть файл

@ -91,7 +91,7 @@ void dm_cell_release_no_holder(struct dm_bio_prison *prison,
struct dm_bio_prison_cell *cell, struct dm_bio_prison_cell *cell,
struct bio_list *inmates); struct bio_list *inmates);
void dm_cell_error(struct dm_bio_prison *prison, void dm_cell_error(struct dm_bio_prison *prison,
struct dm_bio_prison_cell *cell, int error); struct dm_bio_prison_cell *cell, blk_status_t error);
/* /*
* Visits the cell and then releases. Guarantees no new inmates are * Visits the cell and then releases. Guarantees no new inmates are

Просмотреть файл

@ -145,8 +145,8 @@ struct dm_buffer {
enum data_mode data_mode; enum data_mode data_mode;
unsigned char list_mode; /* LIST_* */ unsigned char list_mode; /* LIST_* */
unsigned hold_count; unsigned hold_count;
int read_error; blk_status_t read_error;
int write_error; blk_status_t write_error;
unsigned long state; unsigned long state;
unsigned long last_accessed; unsigned long last_accessed;
struct dm_bufio_client *c; struct dm_bufio_client *c;
@ -555,7 +555,7 @@ static void dmio_complete(unsigned long error, void *context)
{ {
struct dm_buffer *b = context; struct dm_buffer *b = context;
b->bio.bi_error = error ? -EIO : 0; b->bio.bi_status = error ? BLK_STS_IOERR : 0;
b->bio.bi_end_io(&b->bio); b->bio.bi_end_io(&b->bio);
} }
@ -588,7 +588,7 @@ static void use_dmio(struct dm_buffer *b, int rw, sector_t sector,
r = dm_io(&io_req, 1, &region, NULL); r = dm_io(&io_req, 1, &region, NULL);
if (r) { if (r) {
b->bio.bi_error = r; b->bio.bi_status = errno_to_blk_status(r);
end_io(&b->bio); end_io(&b->bio);
} }
} }
@ -596,7 +596,7 @@ static void use_dmio(struct dm_buffer *b, int rw, sector_t sector,
static void inline_endio(struct bio *bio) static void inline_endio(struct bio *bio)
{ {
bio_end_io_t *end_fn = bio->bi_private; bio_end_io_t *end_fn = bio->bi_private;
int error = bio->bi_error; blk_status_t status = bio->bi_status;
/* /*
* Reset the bio to free any attached resources * Reset the bio to free any attached resources
@ -604,7 +604,7 @@ static void inline_endio(struct bio *bio)
*/ */
bio_reset(bio); bio_reset(bio);
bio->bi_error = error; bio->bi_status = status;
end_fn(bio); end_fn(bio);
} }
@ -685,11 +685,12 @@ static void write_endio(struct bio *bio)
{ {
struct dm_buffer *b = container_of(bio, struct dm_buffer, bio); struct dm_buffer *b = container_of(bio, struct dm_buffer, bio);
b->write_error = bio->bi_error; b->write_error = bio->bi_status;
if (unlikely(bio->bi_error)) { if (unlikely(bio->bi_status)) {
struct dm_bufio_client *c = b->c; struct dm_bufio_client *c = b->c;
int error = bio->bi_error;
(void)cmpxchg(&c->async_write_error, 0, error); (void)cmpxchg(&c->async_write_error, 0,
blk_status_to_errno(bio->bi_status));
} }
BUG_ON(!test_bit(B_WRITING, &b->state)); BUG_ON(!test_bit(B_WRITING, &b->state));
@ -1063,7 +1064,7 @@ static void read_endio(struct bio *bio)
{ {
struct dm_buffer *b = container_of(bio, struct dm_buffer, bio); struct dm_buffer *b = container_of(bio, struct dm_buffer, bio);
b->read_error = bio->bi_error; b->read_error = bio->bi_status;
BUG_ON(!test_bit(B_READING, &b->state)); BUG_ON(!test_bit(B_READING, &b->state));
@ -1107,7 +1108,7 @@ static void *new_read(struct dm_bufio_client *c, sector_t block,
wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE); wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
if (b->read_error) { if (b->read_error) {
int error = b->read_error; int error = blk_status_to_errno(b->read_error);
dm_bufio_release(b); dm_bufio_release(b);
@ -1257,7 +1258,8 @@ EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async);
*/ */
int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c) int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c)
{ {
int a, f; blk_status_t a;
int f;
unsigned long buffers_processed = 0; unsigned long buffers_processed = 0;
struct dm_buffer *b, *tmp; struct dm_buffer *b, *tmp;

Просмотреть файл

@ -119,7 +119,7 @@ static void iot_io_end(struct io_tracker *iot, sector_t len)
*/ */
struct continuation { struct continuation {
struct work_struct ws; struct work_struct ws;
int input; blk_status_t input;
}; };
static inline void init_continuation(struct continuation *k, static inline void init_continuation(struct continuation *k,
@ -145,7 +145,7 @@ struct batcher {
/* /*
* The operation that everyone is waiting for. * The operation that everyone is waiting for.
*/ */
int (*commit_op)(void *context); blk_status_t (*commit_op)(void *context);
void *commit_context; void *commit_context;
/* /*
@ -171,8 +171,7 @@ struct batcher {
static void __commit(struct work_struct *_ws) static void __commit(struct work_struct *_ws)
{ {
struct batcher *b = container_of(_ws, struct batcher, commit_work); struct batcher *b = container_of(_ws, struct batcher, commit_work);
blk_status_t r;
int r;
unsigned long flags; unsigned long flags;
struct list_head work_items; struct list_head work_items;
struct work_struct *ws, *tmp; struct work_struct *ws, *tmp;
@ -205,7 +204,7 @@ static void __commit(struct work_struct *_ws)
while ((bio = bio_list_pop(&bios))) { while ((bio = bio_list_pop(&bios))) {
if (r) { if (r) {
bio->bi_error = r; bio->bi_status = r;
bio_endio(bio); bio_endio(bio);
} else } else
b->issue_op(bio, b->issue_context); b->issue_op(bio, b->issue_context);
@ -213,7 +212,7 @@ static void __commit(struct work_struct *_ws)
} }
static void batcher_init(struct batcher *b, static void batcher_init(struct batcher *b,
int (*commit_op)(void *), blk_status_t (*commit_op)(void *),
void *commit_context, void *commit_context,
void (*issue_op)(struct bio *bio, void *), void (*issue_op)(struct bio *bio, void *),
void *issue_context, void *issue_context,
@ -955,7 +954,7 @@ static void writethrough_endio(struct bio *bio)
dm_unhook_bio(&pb->hook_info, bio); dm_unhook_bio(&pb->hook_info, bio);
if (bio->bi_error) { if (bio->bi_status) {
bio_endio(bio); bio_endio(bio);
return; return;
} }
@ -1220,7 +1219,7 @@ static void copy_complete(int read_err, unsigned long write_err, void *context)
struct dm_cache_migration *mg = container_of(context, struct dm_cache_migration, k); struct dm_cache_migration *mg = container_of(context, struct dm_cache_migration, k);
if (read_err || write_err) if (read_err || write_err)
mg->k.input = -EIO; mg->k.input = BLK_STS_IOERR;
queue_continuation(mg->cache->wq, &mg->k); queue_continuation(mg->cache->wq, &mg->k);
} }
@ -1266,8 +1265,8 @@ static void overwrite_endio(struct bio *bio)
dm_unhook_bio(&pb->hook_info, bio); dm_unhook_bio(&pb->hook_info, bio);
if (bio->bi_error) if (bio->bi_status)
mg->k.input = bio->bi_error; mg->k.input = bio->bi_status;
queue_continuation(mg->cache->wq, &mg->k); queue_continuation(mg->cache->wq, &mg->k);
} }
@ -1323,8 +1322,10 @@ static void mg_complete(struct dm_cache_migration *mg, bool success)
if (mg->overwrite_bio) { if (mg->overwrite_bio) {
if (success) if (success)
force_set_dirty(cache, cblock); force_set_dirty(cache, cblock);
else if (mg->k.input)
mg->overwrite_bio->bi_status = mg->k.input;
else else
mg->overwrite_bio->bi_error = (mg->k.input ? : -EIO); mg->overwrite_bio->bi_status = BLK_STS_IOERR;
bio_endio(mg->overwrite_bio); bio_endio(mg->overwrite_bio);
} else { } else {
if (success) if (success)
@ -1504,7 +1505,7 @@ static void mg_copy(struct work_struct *ws)
r = copy(mg, is_policy_promote); r = copy(mg, is_policy_promote);
if (r) { if (r) {
DMERR_LIMIT("%s: migration copy failed", cache_device_name(cache)); DMERR_LIMIT("%s: migration copy failed", cache_device_name(cache));
mg->k.input = -EIO; mg->k.input = BLK_STS_IOERR;
mg_complete(mg, false); mg_complete(mg, false);
} }
} }
@ -1907,12 +1908,12 @@ static int commit(struct cache *cache, bool clean_shutdown)
/* /*
* Used by the batcher. * Used by the batcher.
*/ */
static int commit_op(void *context) static blk_status_t commit_op(void *context)
{ {
struct cache *cache = context; struct cache *cache = context;
if (dm_cache_changed_this_transaction(cache->cmd)) if (dm_cache_changed_this_transaction(cache->cmd))
return commit(cache, false); return errno_to_blk_status(commit(cache, false));
return 0; return 0;
} }
@ -2018,7 +2019,7 @@ static void requeue_deferred_bios(struct cache *cache)
bio_list_init(&cache->deferred_bios); bio_list_init(&cache->deferred_bios);
while ((bio = bio_list_pop(&bios))) { while ((bio = bio_list_pop(&bios))) {
bio->bi_error = DM_ENDIO_REQUEUE; bio->bi_status = BLK_STS_DM_REQUEUE;
bio_endio(bio); bio_endio(bio);
} }
} }
@ -2820,7 +2821,8 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
return r; return r;
} }
static int cache_end_io(struct dm_target *ti, struct bio *bio, int *error) static int cache_end_io(struct dm_target *ti, struct bio *bio,
blk_status_t *error)
{ {
struct cache *cache = ti->private; struct cache *cache = ti->private;
unsigned long flags; unsigned long flags;

Просмотреть файл

@ -71,7 +71,7 @@ struct dm_crypt_io {
struct convert_context ctx; struct convert_context ctx;
atomic_t io_pending; atomic_t io_pending;
int error; blk_status_t error;
sector_t sector; sector_t sector;
struct rb_node rb_node; struct rb_node rb_node;
@ -1292,7 +1292,7 @@ static void crypt_free_req(struct crypt_config *cc, void *req, struct bio *base_
/* /*
* Encrypt / decrypt data from one bio to another one (can be the same one) * Encrypt / decrypt data from one bio to another one (can be the same one)
*/ */
static int crypt_convert(struct crypt_config *cc, static blk_status_t crypt_convert(struct crypt_config *cc,
struct convert_context *ctx) struct convert_context *ctx)
{ {
unsigned int tag_offset = 0; unsigned int tag_offset = 0;
@ -1343,13 +1343,13 @@ static int crypt_convert(struct crypt_config *cc,
*/ */
case -EBADMSG: case -EBADMSG:
atomic_dec(&ctx->cc_pending); atomic_dec(&ctx->cc_pending);
return -EILSEQ; return BLK_STS_PROTECTION;
/* /*
* There was an error while processing the request. * There was an error while processing the request.
*/ */
default: default:
atomic_dec(&ctx->cc_pending); atomic_dec(&ctx->cc_pending);
return -EIO; return BLK_STS_IOERR;
} }
} }
@ -1463,7 +1463,7 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
{ {
struct crypt_config *cc = io->cc; struct crypt_config *cc = io->cc;
struct bio *base_bio = io->base_bio; struct bio *base_bio = io->base_bio;
int error = io->error; blk_status_t error = io->error;
if (!atomic_dec_and_test(&io->io_pending)) if (!atomic_dec_and_test(&io->io_pending))
return; return;
@ -1476,7 +1476,7 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
else else
kfree(io->integrity_metadata); kfree(io->integrity_metadata);
base_bio->bi_error = error; base_bio->bi_status = error;
bio_endio(base_bio); bio_endio(base_bio);
} }
@ -1502,7 +1502,7 @@ static void crypt_endio(struct bio *clone)
struct dm_crypt_io *io = clone->bi_private; struct dm_crypt_io *io = clone->bi_private;
struct crypt_config *cc = io->cc; struct crypt_config *cc = io->cc;
unsigned rw = bio_data_dir(clone); unsigned rw = bio_data_dir(clone);
int error; blk_status_t error;
/* /*
* free the processed pages * free the processed pages
@ -1510,7 +1510,7 @@ static void crypt_endio(struct bio *clone)
if (rw == WRITE) if (rw == WRITE)
crypt_free_buffer_pages(cc, clone); crypt_free_buffer_pages(cc, clone);
error = clone->bi_error; error = clone->bi_status;
bio_put(clone); bio_put(clone);
if (rw == READ && !error) { if (rw == READ && !error) {
@ -1570,7 +1570,7 @@ static void kcryptd_io_read_work(struct work_struct *work)
crypt_inc_pending(io); crypt_inc_pending(io);
if (kcryptd_io_read(io, GFP_NOIO)) if (kcryptd_io_read(io, GFP_NOIO))
io->error = -ENOMEM; io->error = BLK_STS_RESOURCE;
crypt_dec_pending(io); crypt_dec_pending(io);
} }
@ -1656,7 +1656,7 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
sector_t sector; sector_t sector;
struct rb_node **rbp, *parent; struct rb_node **rbp, *parent;
if (unlikely(io->error < 0)) { if (unlikely(io->error)) {
crypt_free_buffer_pages(cc, clone); crypt_free_buffer_pages(cc, clone);
bio_put(clone); bio_put(clone);
crypt_dec_pending(io); crypt_dec_pending(io);
@ -1697,7 +1697,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
struct bio *clone; struct bio *clone;
int crypt_finished; int crypt_finished;
sector_t sector = io->sector; sector_t sector = io->sector;
int r; blk_status_t r;
/* /*
* Prevent io from disappearing until this function completes. * Prevent io from disappearing until this function completes.
@ -1707,7 +1707,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size); clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size);
if (unlikely(!clone)) { if (unlikely(!clone)) {
io->error = -EIO; io->error = BLK_STS_IOERR;
goto dec; goto dec;
} }
@ -1718,7 +1718,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
crypt_inc_pending(io); crypt_inc_pending(io);
r = crypt_convert(cc, &io->ctx); r = crypt_convert(cc, &io->ctx);
if (r < 0) if (r)
io->error = r; io->error = r;
crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending); crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending);
@ -1740,7 +1740,7 @@ static void kcryptd_crypt_read_done(struct dm_crypt_io *io)
static void kcryptd_crypt_read_convert(struct dm_crypt_io *io) static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
{ {
struct crypt_config *cc = io->cc; struct crypt_config *cc = io->cc;
int r = 0; blk_status_t r;
crypt_inc_pending(io); crypt_inc_pending(io);
@ -1748,7 +1748,7 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
io->sector); io->sector);
r = crypt_convert(cc, &io->ctx); r = crypt_convert(cc, &io->ctx);
if (r < 0) if (r)
io->error = r; io->error = r;
if (atomic_dec_and_test(&io->ctx.cc_pending)) if (atomic_dec_and_test(&io->ctx.cc_pending))
@ -1781,9 +1781,9 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
if (error == -EBADMSG) { if (error == -EBADMSG) {
DMERR_LIMIT("INTEGRITY AEAD ERROR, sector %llu", DMERR_LIMIT("INTEGRITY AEAD ERROR, sector %llu",
(unsigned long long)le64_to_cpu(*org_sector_of_dmreq(cc, dmreq))); (unsigned long long)le64_to_cpu(*org_sector_of_dmreq(cc, dmreq)));
io->error = -EILSEQ; io->error = BLK_STS_PROTECTION;
} else if (error < 0) } else if (error < 0)
io->error = -EIO; io->error = BLK_STS_IOERR;
crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio); crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio);

Просмотреть файл

@ -358,7 +358,8 @@ map_bio:
return DM_MAPIO_REMAPPED; return DM_MAPIO_REMAPPED;
} }
static int flakey_end_io(struct dm_target *ti, struct bio *bio, int *error) static int flakey_end_io(struct dm_target *ti, struct bio *bio,
blk_status_t *error)
{ {
struct flakey_c *fc = ti->private; struct flakey_c *fc = ti->private;
struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data)); struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
@ -377,7 +378,7 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio, int *error)
* Error read during the down_interval if drop_writes * Error read during the down_interval if drop_writes
* and error_writes were not configured. * and error_writes were not configured.
*/ */
*error = -EIO; *error = BLK_STS_IOERR;
} }
} }

Просмотреть файл

@ -246,7 +246,7 @@ struct dm_integrity_io {
unsigned metadata_offset; unsigned metadata_offset;
atomic_t in_flight; atomic_t in_flight;
int bi_error; blk_status_t bi_status;
struct completion *completion; struct completion *completion;
@ -1114,8 +1114,8 @@ static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io *
static void do_endio(struct dm_integrity_c *ic, struct bio *bio) static void do_endio(struct dm_integrity_c *ic, struct bio *bio)
{ {
int r = dm_integrity_failed(ic); int r = dm_integrity_failed(ic);
if (unlikely(r) && !bio->bi_error) if (unlikely(r) && !bio->bi_status)
bio->bi_error = r; bio->bi_status = errno_to_blk_status(r);
bio_endio(bio); bio_endio(bio);
} }
@ -1123,7 +1123,7 @@ static void do_endio_flush(struct dm_integrity_c *ic, struct dm_integrity_io *di
{ {
struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
if (unlikely(dio->fua) && likely(!bio->bi_error) && likely(!dm_integrity_failed(ic))) if (unlikely(dio->fua) && likely(!bio->bi_status) && likely(!dm_integrity_failed(ic)))
submit_flush_bio(ic, dio); submit_flush_bio(ic, dio);
else else
do_endio(ic, bio); do_endio(ic, bio);
@ -1142,9 +1142,9 @@ static void dec_in_flight(struct dm_integrity_io *dio)
bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
if (unlikely(dio->bi_error) && !bio->bi_error) if (unlikely(dio->bi_status) && !bio->bi_status)
bio->bi_error = dio->bi_error; bio->bi_status = dio->bi_status;
if (likely(!bio->bi_error) && unlikely(bio_sectors(bio) != dio->range.n_sectors)) { if (likely(!bio->bi_status) && unlikely(bio_sectors(bio) != dio->range.n_sectors)) {
dio->range.logical_sector += dio->range.n_sectors; dio->range.logical_sector += dio->range.n_sectors;
bio_advance(bio, dio->range.n_sectors << SECTOR_SHIFT); bio_advance(bio, dio->range.n_sectors << SECTOR_SHIFT);
INIT_WORK(&dio->work, integrity_bio_wait); INIT_WORK(&dio->work, integrity_bio_wait);
@ -1318,7 +1318,7 @@ skip_io:
dec_in_flight(dio); dec_in_flight(dio);
return; return;
error: error:
dio->bi_error = r; dio->bi_status = errno_to_blk_status(r);
dec_in_flight(dio); dec_in_flight(dio);
} }
@ -1331,7 +1331,7 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
sector_t area, offset; sector_t area, offset;
dio->ic = ic; dio->ic = ic;
dio->bi_error = 0; dio->bi_status = 0;
if (unlikely(bio->bi_opf & REQ_PREFLUSH)) { if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
submit_flush_bio(ic, dio); submit_flush_bio(ic, dio);

Просмотреть файл

@ -124,7 +124,7 @@ static void complete_io(struct io *io)
fn(error_bits, context); fn(error_bits, context);
} }
static void dec_count(struct io *io, unsigned int region, int error) static void dec_count(struct io *io, unsigned int region, blk_status_t error)
{ {
if (error) if (error)
set_bit(region, &io->error_bits); set_bit(region, &io->error_bits);
@ -137,9 +137,9 @@ static void endio(struct bio *bio)
{ {
struct io *io; struct io *io;
unsigned region; unsigned region;
int error; blk_status_t error;
if (bio->bi_error && bio_data_dir(bio) == READ) if (bio->bi_status && bio_data_dir(bio) == READ)
zero_fill_bio(bio); zero_fill_bio(bio);
/* /*
@ -147,7 +147,7 @@ static void endio(struct bio *bio)
*/ */
retrieve_io_and_region_from_bio(bio, &io, &region); retrieve_io_and_region_from_bio(bio, &io, &region);
error = bio->bi_error; error = bio->bi_status;
bio_put(bio); bio_put(bio);
dec_count(io, region, error); dec_count(io, region, error);
@ -319,7 +319,7 @@ static void do_region(int op, int op_flags, unsigned region,
if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES || if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES ||
op == REQ_OP_WRITE_SAME) && op == REQ_OP_WRITE_SAME) &&
special_cmd_max_sectors == 0) { special_cmd_max_sectors == 0) {
dec_count(io, region, -EOPNOTSUPP); dec_count(io, region, BLK_STS_NOTSUPP);
return; return;
} }

Просмотреть файл

@ -150,10 +150,10 @@ static void log_end_io(struct bio *bio)
{ {
struct log_writes_c *lc = bio->bi_private; struct log_writes_c *lc = bio->bi_private;
if (bio->bi_error) { if (bio->bi_status) {
unsigned long flags; unsigned long flags;
DMERR("Error writing log block, error=%d", bio->bi_error); DMERR("Error writing log block, error=%d", bio->bi_status);
spin_lock_irqsave(&lc->blocks_lock, flags); spin_lock_irqsave(&lc->blocks_lock, flags);
lc->logging_enabled = false; lc->logging_enabled = false;
spin_unlock_irqrestore(&lc->blocks_lock, flags); spin_unlock_irqrestore(&lc->blocks_lock, flags);
@ -664,7 +664,8 @@ map_bio:
return DM_MAPIO_REMAPPED; return DM_MAPIO_REMAPPED;
} }
static int normal_end_io(struct dm_target *ti, struct bio *bio, int *error) static int normal_end_io(struct dm_target *ti, struct bio *bio,
blk_status_t *error)
{ {
struct log_writes_c *lc = ti->private; struct log_writes_c *lc = ti->private;
struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data)); struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));

Просмотреть файл

@ -565,7 +565,7 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m
mpio->pgpath = pgpath; mpio->pgpath = pgpath;
mpio->nr_bytes = nr_bytes; mpio->nr_bytes = nr_bytes;
bio->bi_error = 0; bio->bi_status = 0;
bio->bi_bdev = pgpath->path.dev->bdev; bio->bi_bdev = pgpath->path.dev->bdev;
bio->bi_opf |= REQ_FAILFAST_TRANSPORT; bio->bi_opf |= REQ_FAILFAST_TRANSPORT;
@ -623,10 +623,10 @@ static void process_queued_bios(struct work_struct *work)
r = __multipath_map_bio(m, bio, get_mpio_from_bio(bio)); r = __multipath_map_bio(m, bio, get_mpio_from_bio(bio));
switch (r) { switch (r) {
case DM_MAPIO_KILL: case DM_MAPIO_KILL:
r = -EIO; bio->bi_status = BLK_STS_IOERR;
/*FALLTHRU*/ bio_endio(bio);
case DM_MAPIO_REQUEUE: case DM_MAPIO_REQUEUE:
bio->bi_error = r; bio->bi_status = BLK_STS_DM_REQUEUE;
bio_endio(bio); bio_endio(bio);
break; break;
case DM_MAPIO_REMAPPED: case DM_MAPIO_REMAPPED:
@ -1510,7 +1510,8 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone,
return r; return r;
} }
static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone, int *error) static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone,
blk_status_t *error)
{ {
struct multipath *m = ti->private; struct multipath *m = ti->private;
struct dm_mpath_io *mpio = get_mpio_from_bio(clone); struct dm_mpath_io *mpio = get_mpio_from_bio(clone);
@ -1518,7 +1519,7 @@ static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone, int *er
unsigned long flags; unsigned long flags;
int r = DM_ENDIO_DONE; int r = DM_ENDIO_DONE;
if (!*error || noretry_error(errno_to_blk_status(*error))) if (!*error || noretry_error(*error))
goto done; goto done;
if (pgpath) if (pgpath)
@ -1527,7 +1528,7 @@ static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone, int *er
if (atomic_read(&m->nr_valid_paths) == 0 && if (atomic_read(&m->nr_valid_paths) == 0 &&
!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) { !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
dm_report_EIO(m); dm_report_EIO(m);
*error = -EIO; *error = BLK_STS_IOERR;
goto done; goto done;
} }

Просмотреть файл

@ -490,9 +490,9 @@ static void hold_bio(struct mirror_set *ms, struct bio *bio)
* If device is suspended, complete the bio. * If device is suspended, complete the bio.
*/ */
if (dm_noflush_suspending(ms->ti)) if (dm_noflush_suspending(ms->ti))
bio->bi_error = DM_ENDIO_REQUEUE; bio->bi_status = BLK_STS_DM_REQUEUE;
else else
bio->bi_error = -EIO; bio->bi_status = BLK_STS_IOERR;
bio_endio(bio); bio_endio(bio);
return; return;
@ -626,7 +626,7 @@ static void write_callback(unsigned long error, void *context)
* degrade the array. * degrade the array.
*/ */
if (bio_op(bio) == REQ_OP_DISCARD) { if (bio_op(bio) == REQ_OP_DISCARD) {
bio->bi_error = -EOPNOTSUPP; bio->bi_status = BLK_STS_NOTSUPP;
bio_endio(bio); bio_endio(bio);
return; return;
} }
@ -1236,7 +1236,8 @@ static int mirror_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_REMAPPED; return DM_MAPIO_REMAPPED;
} }
static int mirror_end_io(struct dm_target *ti, struct bio *bio, int *error) static int mirror_end_io(struct dm_target *ti, struct bio *bio,
blk_status_t *error)
{ {
int rw = bio_data_dir(bio); int rw = bio_data_dir(bio);
struct mirror_set *ms = (struct mirror_set *) ti->private; struct mirror_set *ms = (struct mirror_set *) ti->private;
@ -1255,7 +1256,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int *error)
return DM_ENDIO_DONE; return DM_ENDIO_DONE;
} }
if (*error == -EOPNOTSUPP) if (*error == BLK_STS_NOTSUPP)
return DM_ENDIO_DONE; return DM_ENDIO_DONE;
if (bio->bi_opf & REQ_RAHEAD) if (bio->bi_opf & REQ_RAHEAD)
@ -1277,7 +1278,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int *error)
bd = &bio_record->details; bd = &bio_record->details;
dm_bio_restore(bd, bio); dm_bio_restore(bd, bio);
bio->bi_error = 0; bio->bi_status = 0;
queue_bio(ms, bio, rw); queue_bio(ms, bio, rw);
return DM_ENDIO_INCOMPLETE; return DM_ENDIO_INCOMPLETE;

Просмотреть файл

@ -119,7 +119,7 @@ static void end_clone_bio(struct bio *clone)
struct dm_rq_target_io *tio = info->tio; struct dm_rq_target_io *tio = info->tio;
struct bio *bio = info->orig; struct bio *bio = info->orig;
unsigned int nr_bytes = info->orig->bi_iter.bi_size; unsigned int nr_bytes = info->orig->bi_iter.bi_size;
blk_status_t error = errno_to_blk_status(clone->bi_error); blk_status_t error = clone->bi_status;
bio_put(clone); bio_put(clone);

Просмотреть файл

@ -1590,7 +1590,7 @@ static void full_bio_end_io(struct bio *bio)
{ {
void *callback_data = bio->bi_private; void *callback_data = bio->bi_private;
dm_kcopyd_do_callback(callback_data, 0, bio->bi_error ? 1 : 0); dm_kcopyd_do_callback(callback_data, 0, bio->bi_status ? 1 : 0);
} }
static void start_full_bio(struct dm_snap_pending_exception *pe, static void start_full_bio(struct dm_snap_pending_exception *pe,
@ -1851,7 +1851,8 @@ out_unlock:
return r; return r;
} }
static int snapshot_end_io(struct dm_target *ti, struct bio *bio, int *error) static int snapshot_end_io(struct dm_target *ti, struct bio *bio,
blk_status_t *error)
{ {
struct dm_snapshot *s = ti->private; struct dm_snapshot *s = ti->private;

Просмотреть файл

@ -375,7 +375,8 @@ static void stripe_status(struct dm_target *ti, status_type_t type,
} }
} }
static int stripe_end_io(struct dm_target *ti, struct bio *bio, int *error) static int stripe_end_io(struct dm_target *ti, struct bio *bio,
blk_status_t *error)
{ {
unsigned i; unsigned i;
char major_minor[16]; char major_minor[16];
@ -387,7 +388,7 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, int *error)
if (bio->bi_opf & REQ_RAHEAD) if (bio->bi_opf & REQ_RAHEAD)
return DM_ENDIO_DONE; return DM_ENDIO_DONE;
if (*error == -EOPNOTSUPP) if (*error == BLK_STS_NOTSUPP)
return DM_ENDIO_DONE; return DM_ENDIO_DONE;
memset(major_minor, 0, sizeof(major_minor)); memset(major_minor, 0, sizeof(major_minor));

Просмотреть файл

@ -383,8 +383,8 @@ static void end_discard(struct discard_op *op, int r)
* Even if r is set, there could be sub discards in flight that we * Even if r is set, there could be sub discards in flight that we
* need to wait for. * need to wait for.
*/ */
if (r && !op->parent_bio->bi_error) if (r && !op->parent_bio->bi_status)
op->parent_bio->bi_error = r; op->parent_bio->bi_status = errno_to_blk_status(r);
bio_endio(op->parent_bio); bio_endio(op->parent_bio);
} }
@ -450,22 +450,20 @@ static void cell_release_no_holder(struct pool *pool,
} }
static void cell_error_with_code(struct pool *pool, static void cell_error_with_code(struct pool *pool,
struct dm_bio_prison_cell *cell, int error_code) struct dm_bio_prison_cell *cell, blk_status_t error_code)
{ {
dm_cell_error(pool->prison, cell, error_code); dm_cell_error(pool->prison, cell, error_code);
dm_bio_prison_free_cell(pool->prison, cell); dm_bio_prison_free_cell(pool->prison, cell);
} }
static int get_pool_io_error_code(struct pool *pool) static blk_status_t get_pool_io_error_code(struct pool *pool)
{ {
return pool->out_of_data_space ? -ENOSPC : -EIO; return pool->out_of_data_space ? BLK_STS_NOSPC : BLK_STS_IOERR;
} }
static void cell_error(struct pool *pool, struct dm_bio_prison_cell *cell) static void cell_error(struct pool *pool, struct dm_bio_prison_cell *cell)
{ {
int error = get_pool_io_error_code(pool); cell_error_with_code(pool, cell, get_pool_io_error_code(pool));
cell_error_with_code(pool, cell, error);
} }
static void cell_success(struct pool *pool, struct dm_bio_prison_cell *cell) static void cell_success(struct pool *pool, struct dm_bio_prison_cell *cell)
@ -475,7 +473,7 @@ static void cell_success(struct pool *pool, struct dm_bio_prison_cell *cell)
static void cell_requeue(struct pool *pool, struct dm_bio_prison_cell *cell) static void cell_requeue(struct pool *pool, struct dm_bio_prison_cell *cell)
{ {
cell_error_with_code(pool, cell, DM_ENDIO_REQUEUE); cell_error_with_code(pool, cell, BLK_STS_DM_REQUEUE);
} }
/*----------------------------------------------------------------*/ /*----------------------------------------------------------------*/
@ -555,17 +553,18 @@ static void __merge_bio_list(struct bio_list *bios, struct bio_list *master)
bio_list_init(master); bio_list_init(master);
} }
static void error_bio_list(struct bio_list *bios, int error) static void error_bio_list(struct bio_list *bios, blk_status_t error)
{ {
struct bio *bio; struct bio *bio;
while ((bio = bio_list_pop(bios))) { while ((bio = bio_list_pop(bios))) {
bio->bi_error = error; bio->bi_status = error;
bio_endio(bio); bio_endio(bio);
} }
} }
static void error_thin_bio_list(struct thin_c *tc, struct bio_list *master, int error) static void error_thin_bio_list(struct thin_c *tc, struct bio_list *master,
blk_status_t error)
{ {
struct bio_list bios; struct bio_list bios;
unsigned long flags; unsigned long flags;
@ -608,11 +607,11 @@ static void requeue_io(struct thin_c *tc)
__merge_bio_list(&bios, &tc->retry_on_resume_list); __merge_bio_list(&bios, &tc->retry_on_resume_list);
spin_unlock_irqrestore(&tc->lock, flags); spin_unlock_irqrestore(&tc->lock, flags);
error_bio_list(&bios, DM_ENDIO_REQUEUE); error_bio_list(&bios, BLK_STS_DM_REQUEUE);
requeue_deferred_cells(tc); requeue_deferred_cells(tc);
} }
static void error_retry_list_with_code(struct pool *pool, int error) static void error_retry_list_with_code(struct pool *pool, blk_status_t error)
{ {
struct thin_c *tc; struct thin_c *tc;
@ -624,9 +623,7 @@ static void error_retry_list_with_code(struct pool *pool, int error)
static void error_retry_list(struct pool *pool) static void error_retry_list(struct pool *pool)
{ {
int error = get_pool_io_error_code(pool); error_retry_list_with_code(pool, get_pool_io_error_code(pool));
error_retry_list_with_code(pool, error);
} }
/* /*
@ -774,7 +771,7 @@ struct dm_thin_new_mapping {
*/ */
atomic_t prepare_actions; atomic_t prepare_actions;
int err; blk_status_t status;
struct thin_c *tc; struct thin_c *tc;
dm_block_t virt_begin, virt_end; dm_block_t virt_begin, virt_end;
dm_block_t data_block; dm_block_t data_block;
@ -814,7 +811,7 @@ static void copy_complete(int read_err, unsigned long write_err, void *context)
{ {
struct dm_thin_new_mapping *m = context; struct dm_thin_new_mapping *m = context;
m->err = read_err || write_err ? -EIO : 0; m->status = read_err || write_err ? BLK_STS_IOERR : 0;
complete_mapping_preparation(m); complete_mapping_preparation(m);
} }
@ -825,7 +822,7 @@ static void overwrite_endio(struct bio *bio)
bio->bi_end_io = m->saved_bi_end_io; bio->bi_end_io = m->saved_bi_end_io;
m->err = bio->bi_error; m->status = bio->bi_status;
complete_mapping_preparation(m); complete_mapping_preparation(m);
} }
@ -925,7 +922,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
struct bio *bio = m->bio; struct bio *bio = m->bio;
int r; int r;
if (m->err) { if (m->status) {
cell_error(pool, m->cell); cell_error(pool, m->cell);
goto out; goto out;
} }
@ -1495,7 +1492,7 @@ static void retry_on_resume(struct bio *bio)
spin_unlock_irqrestore(&tc->lock, flags); spin_unlock_irqrestore(&tc->lock, flags);
} }
static int should_error_unserviceable_bio(struct pool *pool) static blk_status_t should_error_unserviceable_bio(struct pool *pool)
{ {
enum pool_mode m = get_pool_mode(pool); enum pool_mode m = get_pool_mode(pool);
@ -1503,27 +1500,27 @@ static int should_error_unserviceable_bio(struct pool *pool)
case PM_WRITE: case PM_WRITE:
/* Shouldn't get here */ /* Shouldn't get here */
DMERR_LIMIT("bio unserviceable, yet pool is in PM_WRITE mode"); DMERR_LIMIT("bio unserviceable, yet pool is in PM_WRITE mode");
return -EIO; return BLK_STS_IOERR;
case PM_OUT_OF_DATA_SPACE: case PM_OUT_OF_DATA_SPACE:
return pool->pf.error_if_no_space ? -ENOSPC : 0; return pool->pf.error_if_no_space ? BLK_STS_NOSPC : 0;
case PM_READ_ONLY: case PM_READ_ONLY:
case PM_FAIL: case PM_FAIL:
return -EIO; return BLK_STS_IOERR;
default: default:
/* Shouldn't get here */ /* Shouldn't get here */
DMERR_LIMIT("bio unserviceable, yet pool has an unknown mode"); DMERR_LIMIT("bio unserviceable, yet pool has an unknown mode");
return -EIO; return BLK_STS_IOERR;
} }
} }
static void handle_unserviceable_bio(struct pool *pool, struct bio *bio) static void handle_unserviceable_bio(struct pool *pool, struct bio *bio)
{ {
int error = should_error_unserviceable_bio(pool); blk_status_t error = should_error_unserviceable_bio(pool);
if (error) { if (error) {
bio->bi_error = error; bio->bi_status = error;
bio_endio(bio); bio_endio(bio);
} else } else
retry_on_resume(bio); retry_on_resume(bio);
@ -1533,7 +1530,7 @@ static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *c
{ {
struct bio *bio; struct bio *bio;
struct bio_list bios; struct bio_list bios;
int error; blk_status_t error;
error = should_error_unserviceable_bio(pool); error = should_error_unserviceable_bio(pool);
if (error) { if (error) {
@ -2071,7 +2068,8 @@ static void process_thin_deferred_bios(struct thin_c *tc)
unsigned count = 0; unsigned count = 0;
if (tc->requeue_mode) { if (tc->requeue_mode) {
error_thin_bio_list(tc, &tc->deferred_bio_list, DM_ENDIO_REQUEUE); error_thin_bio_list(tc, &tc->deferred_bio_list,
BLK_STS_DM_REQUEUE);
return; return;
} }
@ -2322,7 +2320,7 @@ static void do_no_space_timeout(struct work_struct *ws)
if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) { if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) {
pool->pf.error_if_no_space = true; pool->pf.error_if_no_space = true;
notify_of_pool_mode_change_to_oods(pool); notify_of_pool_mode_change_to_oods(pool);
error_retry_list_with_code(pool, -ENOSPC); error_retry_list_with_code(pool, BLK_STS_NOSPC);
} }
} }
@ -2624,7 +2622,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
thin_hook_bio(tc, bio); thin_hook_bio(tc, bio);
if (tc->requeue_mode) { if (tc->requeue_mode) {
bio->bi_error = DM_ENDIO_REQUEUE; bio->bi_status = BLK_STS_DM_REQUEUE;
bio_endio(bio); bio_endio(bio);
return DM_MAPIO_SUBMITTED; return DM_MAPIO_SUBMITTED;
} }
@ -4177,7 +4175,8 @@ static int thin_map(struct dm_target *ti, struct bio *bio)
return thin_bio_map(ti, bio); return thin_bio_map(ti, bio);
} }
static int thin_endio(struct dm_target *ti, struct bio *bio, int *err) static int thin_endio(struct dm_target *ti, struct bio *bio,
blk_status_t *err)
{ {
unsigned long flags; unsigned long flags;
struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));

Просмотреть файл

@ -538,13 +538,13 @@ static int verity_verify_io(struct dm_verity_io *io)
/* /*
* End one "io" structure with a given error. * End one "io" structure with a given error.
*/ */
static void verity_finish_io(struct dm_verity_io *io, int error) static void verity_finish_io(struct dm_verity_io *io, blk_status_t status)
{ {
struct dm_verity *v = io->v; struct dm_verity *v = io->v;
struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size); struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
bio->bi_end_io = io->orig_bi_end_io; bio->bi_end_io = io->orig_bi_end_io;
bio->bi_error = error; bio->bi_status = status;
verity_fec_finish_io(io); verity_fec_finish_io(io);
@ -555,15 +555,15 @@ static void verity_work(struct work_struct *w)
{ {
struct dm_verity_io *io = container_of(w, struct dm_verity_io, work); struct dm_verity_io *io = container_of(w, struct dm_verity_io, work);
verity_finish_io(io, verity_verify_io(io)); verity_finish_io(io, errno_to_blk_status(verity_verify_io(io)));
} }
static void verity_end_io(struct bio *bio) static void verity_end_io(struct bio *bio)
{ {
struct dm_verity_io *io = bio->bi_private; struct dm_verity_io *io = bio->bi_private;
if (bio->bi_error && !verity_fec_is_enabled(io->v)) { if (bio->bi_status && !verity_fec_is_enabled(io->v)) {
verity_finish_io(io, bio->bi_error); verity_finish_io(io, bio->bi_status);
return; return;
} }

Просмотреть файл

@ -63,7 +63,7 @@ static struct workqueue_struct *deferred_remove_workqueue;
*/ */
struct dm_io { struct dm_io {
struct mapped_device *md; struct mapped_device *md;
int error; blk_status_t status;
atomic_t io_count; atomic_t io_count;
struct bio *bio; struct bio *bio;
unsigned long start_time; unsigned long start_time;
@ -768,23 +768,24 @@ static int __noflush_suspending(struct mapped_device *md)
* Decrements the number of outstanding ios that a bio has been * Decrements the number of outstanding ios that a bio has been
* cloned into, completing the original io if necc. * cloned into, completing the original io if necc.
*/ */
static void dec_pending(struct dm_io *io, int error) static void dec_pending(struct dm_io *io, blk_status_t error)
{ {
unsigned long flags; unsigned long flags;
int io_error; blk_status_t io_error;
struct bio *bio; struct bio *bio;
struct mapped_device *md = io->md; struct mapped_device *md = io->md;
/* Push-back supersedes any I/O errors */ /* Push-back supersedes any I/O errors */
if (unlikely(error)) { if (unlikely(error)) {
spin_lock_irqsave(&io->endio_lock, flags); spin_lock_irqsave(&io->endio_lock, flags);
if (!(io->error > 0 && __noflush_suspending(md))) if (!(io->status == BLK_STS_DM_REQUEUE &&
io->error = error; __noflush_suspending(md)))
io->status = error;
spin_unlock_irqrestore(&io->endio_lock, flags); spin_unlock_irqrestore(&io->endio_lock, flags);
} }
if (atomic_dec_and_test(&io->io_count)) { if (atomic_dec_and_test(&io->io_count)) {
if (io->error == DM_ENDIO_REQUEUE) { if (io->status == BLK_STS_DM_REQUEUE) {
/* /*
* Target requested pushing back the I/O. * Target requested pushing back the I/O.
*/ */
@ -793,16 +794,16 @@ static void dec_pending(struct dm_io *io, int error)
bio_list_add_head(&md->deferred, io->bio); bio_list_add_head(&md->deferred, io->bio);
else else
/* noflush suspend was interrupted. */ /* noflush suspend was interrupted. */
io->error = -EIO; io->status = BLK_STS_IOERR;
spin_unlock_irqrestore(&md->deferred_lock, flags); spin_unlock_irqrestore(&md->deferred_lock, flags);
} }
io_error = io->error; io_error = io->status;
bio = io->bio; bio = io->bio;
end_io_acct(io); end_io_acct(io);
free_io(md, io); free_io(md, io);
if (io_error == DM_ENDIO_REQUEUE) if (io_error == BLK_STS_DM_REQUEUE)
return; return;
if ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size) { if ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size) {
@ -814,7 +815,7 @@ static void dec_pending(struct dm_io *io, int error)
queue_io(md, bio); queue_io(md, bio);
} else { } else {
/* done with normal IO or empty flush */ /* done with normal IO or empty flush */
bio->bi_error = io_error; bio->bi_status = io_error;
bio_endio(bio); bio_endio(bio);
} }
} }
@ -838,14 +839,13 @@ void disable_write_zeroes(struct mapped_device *md)
static void clone_endio(struct bio *bio) static void clone_endio(struct bio *bio)
{ {
int error = bio->bi_error; blk_status_t error = bio->bi_status;
int r = error;
struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
struct dm_io *io = tio->io; struct dm_io *io = tio->io;
struct mapped_device *md = tio->io->md; struct mapped_device *md = tio->io->md;
dm_endio_fn endio = tio->ti->type->end_io; dm_endio_fn endio = tio->ti->type->end_io;
if (unlikely(error == -EREMOTEIO)) { if (unlikely(error == BLK_STS_TARGET)) {
if (bio_op(bio) == REQ_OP_WRITE_SAME && if (bio_op(bio) == REQ_OP_WRITE_SAME &&
!bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors) !bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors)
disable_write_same(md); disable_write_same(md);
@ -855,10 +855,10 @@ static void clone_endio(struct bio *bio)
} }
if (endio) { if (endio) {
r = endio(tio->ti, bio, &error); int r = endio(tio->ti, bio, &error);
switch (r) { switch (r) {
case DM_ENDIO_REQUEUE: case DM_ENDIO_REQUEUE:
error = DM_ENDIO_REQUEUE; error = BLK_STS_DM_REQUEUE;
/*FALLTHRU*/ /*FALLTHRU*/
case DM_ENDIO_DONE: case DM_ENDIO_DONE:
break; break;
@ -1094,11 +1094,11 @@ static void __map_bio(struct dm_target_io *tio)
generic_make_request(clone); generic_make_request(clone);
break; break;
case DM_MAPIO_KILL: case DM_MAPIO_KILL:
r = -EIO; dec_pending(tio->io, BLK_STS_IOERR);
/*FALLTHRU*/ free_tio(tio);
break;
case DM_MAPIO_REQUEUE: case DM_MAPIO_REQUEUE:
/* error the io and bail out, or requeue it if needed */ dec_pending(tio->io, BLK_STS_DM_REQUEUE);
dec_pending(tio->io, r);
free_tio(tio); free_tio(tio);
break; break;
default: default:
@ -1366,7 +1366,7 @@ static void __split_and_process_bio(struct mapped_device *md,
ci.map = map; ci.map = map;
ci.md = md; ci.md = md;
ci.io = alloc_io(md); ci.io = alloc_io(md);
ci.io->error = 0; ci.io->status = 0;
atomic_set(&ci.io->io_count, 1); atomic_set(&ci.io->io_count, 1);
ci.io->bio = bio; ci.io->bio = bio;
ci.io->md = md; ci.io->md = md;

Просмотреть файл

@ -273,7 +273,7 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
} }
if (mddev->ro == 1 && unlikely(rw == WRITE)) { if (mddev->ro == 1 && unlikely(rw == WRITE)) {
if (bio_sectors(bio) != 0) if (bio_sectors(bio) != 0)
bio->bi_error = -EROFS; bio->bi_status = BLK_STS_IOERR;
bio_endio(bio); bio_endio(bio);
return BLK_QC_T_NONE; return BLK_QC_T_NONE;
} }
@ -719,8 +719,8 @@ static void super_written(struct bio *bio)
struct md_rdev *rdev = bio->bi_private; struct md_rdev *rdev = bio->bi_private;
struct mddev *mddev = rdev->mddev; struct mddev *mddev = rdev->mddev;
if (bio->bi_error) { if (bio->bi_status) {
pr_err("md: super_written gets error=%d\n", bio->bi_error); pr_err("md: super_written gets error=%d\n", bio->bi_status);
md_error(mddev, rdev); md_error(mddev, rdev);
if (!test_bit(Faulty, &rdev->flags) if (!test_bit(Faulty, &rdev->flags)
&& (bio->bi_opf & MD_FAILFAST)) { && (bio->bi_opf & MD_FAILFAST)) {
@ -801,7 +801,7 @@ int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
submit_bio_wait(bio); submit_bio_wait(bio);
ret = !bio->bi_error; ret = !bio->bi_status;
bio_put(bio); bio_put(bio);
return ret; return ret;
} }

Просмотреть файл

@ -73,12 +73,12 @@ static void multipath_reschedule_retry (struct multipath_bh *mp_bh)
* operation and are ready to return a success/failure code to the buffer * operation and are ready to return a success/failure code to the buffer
* cache layer. * cache layer.
*/ */
static void multipath_end_bh_io (struct multipath_bh *mp_bh, int err) static void multipath_end_bh_io(struct multipath_bh *mp_bh, blk_status_t status)
{ {
struct bio *bio = mp_bh->master_bio; struct bio *bio = mp_bh->master_bio;
struct mpconf *conf = mp_bh->mddev->private; struct mpconf *conf = mp_bh->mddev->private;
bio->bi_error = err; bio->bi_status = status;
bio_endio(bio); bio_endio(bio);
mempool_free(mp_bh, conf->pool); mempool_free(mp_bh, conf->pool);
} }
@ -89,7 +89,7 @@ static void multipath_end_request(struct bio *bio)
struct mpconf *conf = mp_bh->mddev->private; struct mpconf *conf = mp_bh->mddev->private;
struct md_rdev *rdev = conf->multipaths[mp_bh->path].rdev; struct md_rdev *rdev = conf->multipaths[mp_bh->path].rdev;
if (!bio->bi_error) if (!bio->bi_status)
multipath_end_bh_io(mp_bh, 0); multipath_end_bh_io(mp_bh, 0);
else if (!(bio->bi_opf & REQ_RAHEAD)) { else if (!(bio->bi_opf & REQ_RAHEAD)) {
/* /*
@ -102,7 +102,7 @@ static void multipath_end_request(struct bio *bio)
(unsigned long long)bio->bi_iter.bi_sector); (unsigned long long)bio->bi_iter.bi_sector);
multipath_reschedule_retry(mp_bh); multipath_reschedule_retry(mp_bh);
} else } else
multipath_end_bh_io(mp_bh, bio->bi_error); multipath_end_bh_io(mp_bh, bio->bi_status);
rdev_dec_pending(rdev, conf->mddev); rdev_dec_pending(rdev, conf->mddev);
} }
@ -347,7 +347,7 @@ static void multipathd(struct md_thread *thread)
pr_err("multipath: %s: unrecoverable IO read error for block %llu\n", pr_err("multipath: %s: unrecoverable IO read error for block %llu\n",
bdevname(bio->bi_bdev,b), bdevname(bio->bi_bdev,b),
(unsigned long long)bio->bi_iter.bi_sector); (unsigned long long)bio->bi_iter.bi_sector);
multipath_end_bh_io(mp_bh, -EIO); multipath_end_bh_io(mp_bh, BLK_STS_IOERR);
} else { } else {
pr_err("multipath: %s: redirecting sector %llu to another IO path\n", pr_err("multipath: %s: redirecting sector %llu to another IO path\n",
bdevname(bio->bi_bdev,b), bdevname(bio->bi_bdev,b),

Просмотреть файл

@ -277,7 +277,7 @@ static void call_bio_endio(struct r1bio *r1_bio)
struct r1conf *conf = r1_bio->mddev->private; struct r1conf *conf = r1_bio->mddev->private;
if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
bio->bi_error = -EIO; bio->bi_status = BLK_STS_IOERR;
bio_endio(bio); bio_endio(bio);
/* /*
@ -335,7 +335,7 @@ static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio)
static void raid1_end_read_request(struct bio *bio) static void raid1_end_read_request(struct bio *bio)
{ {
int uptodate = !bio->bi_error; int uptodate = !bio->bi_status;
struct r1bio *r1_bio = bio->bi_private; struct r1bio *r1_bio = bio->bi_private;
struct r1conf *conf = r1_bio->mddev->private; struct r1conf *conf = r1_bio->mddev->private;
struct md_rdev *rdev = conf->mirrors[r1_bio->read_disk].rdev; struct md_rdev *rdev = conf->mirrors[r1_bio->read_disk].rdev;
@ -426,12 +426,12 @@ static void raid1_end_write_request(struct bio *bio)
struct md_rdev *rdev = conf->mirrors[mirror].rdev; struct md_rdev *rdev = conf->mirrors[mirror].rdev;
bool discard_error; bool discard_error;
discard_error = bio->bi_error && bio_op(bio) == REQ_OP_DISCARD; discard_error = bio->bi_status && bio_op(bio) == REQ_OP_DISCARD;
/* /*
* 'one mirror IO has finished' event handler: * 'one mirror IO has finished' event handler:
*/ */
if (bio->bi_error && !discard_error) { if (bio->bi_status && !discard_error) {
set_bit(WriteErrorSeen, &rdev->flags); set_bit(WriteErrorSeen, &rdev->flags);
if (!test_and_set_bit(WantReplacement, &rdev->flags)) if (!test_and_set_bit(WantReplacement, &rdev->flags))
set_bit(MD_RECOVERY_NEEDED, & set_bit(MD_RECOVERY_NEEDED, &
@ -802,7 +802,7 @@ static void flush_bio_list(struct r1conf *conf, struct bio *bio)
bio->bi_next = NULL; bio->bi_next = NULL;
bio->bi_bdev = rdev->bdev; bio->bi_bdev = rdev->bdev;
if (test_bit(Faulty, &rdev->flags)) { if (test_bit(Faulty, &rdev->flags)) {
bio->bi_error = -EIO; bio->bi_status = BLK_STS_IOERR;
bio_endio(bio); bio_endio(bio);
} else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
!blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
@ -1856,7 +1856,7 @@ static void end_sync_read(struct bio *bio)
* or re-read if the read failed. * or re-read if the read failed.
* We don't do much here, just schedule handling by raid1d * We don't do much here, just schedule handling by raid1d
*/ */
if (!bio->bi_error) if (!bio->bi_status)
set_bit(R1BIO_Uptodate, &r1_bio->state); set_bit(R1BIO_Uptodate, &r1_bio->state);
if (atomic_dec_and_test(&r1_bio->remaining)) if (atomic_dec_and_test(&r1_bio->remaining))
@ -1865,7 +1865,7 @@ static void end_sync_read(struct bio *bio)
static void end_sync_write(struct bio *bio) static void end_sync_write(struct bio *bio)
{ {
int uptodate = !bio->bi_error; int uptodate = !bio->bi_status;
struct r1bio *r1_bio = get_resync_r1bio(bio); struct r1bio *r1_bio = get_resync_r1bio(bio);
struct mddev *mddev = r1_bio->mddev; struct mddev *mddev = r1_bio->mddev;
struct r1conf *conf = mddev->private; struct r1conf *conf = mddev->private;
@ -2058,7 +2058,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
idx ++; idx ++;
} }
set_bit(R1BIO_Uptodate, &r1_bio->state); set_bit(R1BIO_Uptodate, &r1_bio->state);
bio->bi_error = 0; bio->bi_status = 0;
return 1; return 1;
} }
@ -2082,16 +2082,16 @@ static void process_checks(struct r1bio *r1_bio)
for (i = 0; i < conf->raid_disks * 2; i++) { for (i = 0; i < conf->raid_disks * 2; i++) {
int j; int j;
int size; int size;
int error; blk_status_t status;
struct bio_vec *bi; struct bio_vec *bi;
struct bio *b = r1_bio->bios[i]; struct bio *b = r1_bio->bios[i];
struct resync_pages *rp = get_resync_pages(b); struct resync_pages *rp = get_resync_pages(b);
if (b->bi_end_io != end_sync_read) if (b->bi_end_io != end_sync_read)
continue; continue;
/* fixup the bio for reuse, but preserve errno */ /* fixup the bio for reuse, but preserve errno */
error = b->bi_error; status = b->bi_status;
bio_reset(b); bio_reset(b);
b->bi_error = error; b->bi_status = status;
b->bi_vcnt = vcnt; b->bi_vcnt = vcnt;
b->bi_iter.bi_size = r1_bio->sectors << 9; b->bi_iter.bi_size = r1_bio->sectors << 9;
b->bi_iter.bi_sector = r1_bio->sector + b->bi_iter.bi_sector = r1_bio->sector +
@ -2113,7 +2113,7 @@ static void process_checks(struct r1bio *r1_bio)
} }
for (primary = 0; primary < conf->raid_disks * 2; primary++) for (primary = 0; primary < conf->raid_disks * 2; primary++)
if (r1_bio->bios[primary]->bi_end_io == end_sync_read && if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
!r1_bio->bios[primary]->bi_error) { !r1_bio->bios[primary]->bi_status) {
r1_bio->bios[primary]->bi_end_io = NULL; r1_bio->bios[primary]->bi_end_io = NULL;
rdev_dec_pending(conf->mirrors[primary].rdev, mddev); rdev_dec_pending(conf->mirrors[primary].rdev, mddev);
break; break;
@ -2123,7 +2123,7 @@ static void process_checks(struct r1bio *r1_bio)
int j; int j;
struct bio *pbio = r1_bio->bios[primary]; struct bio *pbio = r1_bio->bios[primary];
struct bio *sbio = r1_bio->bios[i]; struct bio *sbio = r1_bio->bios[i];
int error = sbio->bi_error; blk_status_t status = sbio->bi_status;
struct page **ppages = get_resync_pages(pbio)->pages; struct page **ppages = get_resync_pages(pbio)->pages;
struct page **spages = get_resync_pages(sbio)->pages; struct page **spages = get_resync_pages(sbio)->pages;
struct bio_vec *bi; struct bio_vec *bi;
@ -2132,12 +2132,12 @@ static void process_checks(struct r1bio *r1_bio)
if (sbio->bi_end_io != end_sync_read) if (sbio->bi_end_io != end_sync_read)
continue; continue;
/* Now we can 'fixup' the error value */ /* Now we can 'fixup' the error value */
sbio->bi_error = 0; sbio->bi_status = 0;
bio_for_each_segment_all(bi, sbio, j) bio_for_each_segment_all(bi, sbio, j)
page_len[j] = bi->bv_len; page_len[j] = bi->bv_len;
if (!error) { if (!status) {
for (j = vcnt; j-- ; ) { for (j = vcnt; j-- ; ) {
if (memcmp(page_address(ppages[j]), if (memcmp(page_address(ppages[j]),
page_address(spages[j]), page_address(spages[j]),
@ -2149,7 +2149,7 @@ static void process_checks(struct r1bio *r1_bio)
if (j >= 0) if (j >= 0)
atomic64_add(r1_bio->sectors, &mddev->resync_mismatches); atomic64_add(r1_bio->sectors, &mddev->resync_mismatches);
if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery) if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
&& !error)) { && !status)) {
/* No need to write to this device. */ /* No need to write to this device. */
sbio->bi_end_io = NULL; sbio->bi_end_io = NULL;
rdev_dec_pending(conf->mirrors[i].rdev, mddev); rdev_dec_pending(conf->mirrors[i].rdev, mddev);
@ -2400,11 +2400,11 @@ static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio
struct bio *bio = r1_bio->bios[m]; struct bio *bio = r1_bio->bios[m];
if (bio->bi_end_io == NULL) if (bio->bi_end_io == NULL)
continue; continue;
if (!bio->bi_error && if (!bio->bi_status &&
test_bit(R1BIO_MadeGood, &r1_bio->state)) { test_bit(R1BIO_MadeGood, &r1_bio->state)) {
rdev_clear_badblocks(rdev, r1_bio->sector, s, 0); rdev_clear_badblocks(rdev, r1_bio->sector, s, 0);
} }
if (bio->bi_error && if (bio->bi_status &&
test_bit(R1BIO_WriteError, &r1_bio->state)) { test_bit(R1BIO_WriteError, &r1_bio->state)) {
if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0)) if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0))
md_error(conf->mddev, rdev); md_error(conf->mddev, rdev);

Просмотреть файл

@ -336,7 +336,7 @@ static void raid_end_bio_io(struct r10bio *r10_bio)
struct r10conf *conf = r10_bio->mddev->private; struct r10conf *conf = r10_bio->mddev->private;
if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
bio->bi_error = -EIO; bio->bi_status = BLK_STS_IOERR;
bio_endio(bio); bio_endio(bio);
/* /*
@ -389,7 +389,7 @@ static int find_bio_disk(struct r10conf *conf, struct r10bio *r10_bio,
static void raid10_end_read_request(struct bio *bio) static void raid10_end_read_request(struct bio *bio)
{ {
int uptodate = !bio->bi_error; int uptodate = !bio->bi_status;
struct r10bio *r10_bio = bio->bi_private; struct r10bio *r10_bio = bio->bi_private;
int slot, dev; int slot, dev;
struct md_rdev *rdev; struct md_rdev *rdev;
@ -477,7 +477,7 @@ static void raid10_end_write_request(struct bio *bio)
struct bio *to_put = NULL; struct bio *to_put = NULL;
bool discard_error; bool discard_error;
discard_error = bio->bi_error && bio_op(bio) == REQ_OP_DISCARD; discard_error = bio->bi_status && bio_op(bio) == REQ_OP_DISCARD;
dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl); dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
@ -491,7 +491,7 @@ static void raid10_end_write_request(struct bio *bio)
/* /*
* this branch is our 'one mirror IO has finished' event handler: * this branch is our 'one mirror IO has finished' event handler:
*/ */
if (bio->bi_error && !discard_error) { if (bio->bi_status && !discard_error) {
if (repl) if (repl)
/* Never record new bad blocks to replacement, /* Never record new bad blocks to replacement,
* just fail it. * just fail it.
@ -913,7 +913,7 @@ static void flush_pending_writes(struct r10conf *conf)
bio->bi_next = NULL; bio->bi_next = NULL;
bio->bi_bdev = rdev->bdev; bio->bi_bdev = rdev->bdev;
if (test_bit(Faulty, &rdev->flags)) { if (test_bit(Faulty, &rdev->flags)) {
bio->bi_error = -EIO; bio->bi_status = BLK_STS_IOERR;
bio_endio(bio); bio_endio(bio);
} else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
!blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
@ -1098,7 +1098,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
bio->bi_next = NULL; bio->bi_next = NULL;
bio->bi_bdev = rdev->bdev; bio->bi_bdev = rdev->bdev;
if (test_bit(Faulty, &rdev->flags)) { if (test_bit(Faulty, &rdev->flags)) {
bio->bi_error = -EIO; bio->bi_status = BLK_STS_IOERR;
bio_endio(bio); bio_endio(bio);
} else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
!blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
@ -1888,7 +1888,7 @@ static void __end_sync_read(struct r10bio *r10_bio, struct bio *bio, int d)
{ {
struct r10conf *conf = r10_bio->mddev->private; struct r10conf *conf = r10_bio->mddev->private;
if (!bio->bi_error) if (!bio->bi_status)
set_bit(R10BIO_Uptodate, &r10_bio->state); set_bit(R10BIO_Uptodate, &r10_bio->state);
else else
/* The write handler will notice the lack of /* The write handler will notice the lack of
@ -1972,7 +1972,7 @@ static void end_sync_write(struct bio *bio)
else else
rdev = conf->mirrors[d].rdev; rdev = conf->mirrors[d].rdev;
if (bio->bi_error) { if (bio->bi_status) {
if (repl) if (repl)
md_error(mddev, rdev); md_error(mddev, rdev);
else { else {
@ -2021,7 +2021,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
/* find the first device with a block */ /* find the first device with a block */
for (i=0; i<conf->copies; i++) for (i=0; i<conf->copies; i++)
if (!r10_bio->devs[i].bio->bi_error) if (!r10_bio->devs[i].bio->bi_status)
break; break;
if (i == conf->copies) if (i == conf->copies)
@ -2050,7 +2050,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
tpages = get_resync_pages(tbio)->pages; tpages = get_resync_pages(tbio)->pages;
d = r10_bio->devs[i].devnum; d = r10_bio->devs[i].devnum;
rdev = conf->mirrors[d].rdev; rdev = conf->mirrors[d].rdev;
if (!r10_bio->devs[i].bio->bi_error) { if (!r10_bio->devs[i].bio->bi_status) {
/* We know that the bi_io_vec layout is the same for /* We know that the bi_io_vec layout is the same for
* both 'first' and 'i', so we just compare them. * both 'first' and 'i', so we just compare them.
* All vec entries are PAGE_SIZE; * All vec entries are PAGE_SIZE;
@ -2633,7 +2633,7 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
rdev = conf->mirrors[dev].rdev; rdev = conf->mirrors[dev].rdev;
if (r10_bio->devs[m].bio == NULL) if (r10_bio->devs[m].bio == NULL)
continue; continue;
if (!r10_bio->devs[m].bio->bi_error) { if (!r10_bio->devs[m].bio->bi_status) {
rdev_clear_badblocks( rdev_clear_badblocks(
rdev, rdev,
r10_bio->devs[m].addr, r10_bio->devs[m].addr,
@ -2649,7 +2649,7 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
if (r10_bio->devs[m].repl_bio == NULL) if (r10_bio->devs[m].repl_bio == NULL)
continue; continue;
if (!r10_bio->devs[m].repl_bio->bi_error) { if (!r10_bio->devs[m].repl_bio->bi_status) {
rdev_clear_badblocks( rdev_clear_badblocks(
rdev, rdev,
r10_bio->devs[m].addr, r10_bio->devs[m].addr,
@ -2675,7 +2675,7 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
r10_bio->devs[m].addr, r10_bio->devs[m].addr,
r10_bio->sectors, 0); r10_bio->sectors, 0);
rdev_dec_pending(rdev, conf->mddev); rdev_dec_pending(rdev, conf->mddev);
} else if (bio != NULL && bio->bi_error) { } else if (bio != NULL && bio->bi_status) {
fail = true; fail = true;
if (!narrow_write_error(r10_bio, m)) { if (!narrow_write_error(r10_bio, m)) {
md_error(conf->mddev, rdev); md_error(conf->mddev, rdev);
@ -3267,7 +3267,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
r10_bio->devs[i].repl_bio->bi_end_io = NULL; r10_bio->devs[i].repl_bio->bi_end_io = NULL;
bio = r10_bio->devs[i].bio; bio = r10_bio->devs[i].bio;
bio->bi_error = -EIO; bio->bi_status = BLK_STS_IOERR;
rcu_read_lock(); rcu_read_lock();
rdev = rcu_dereference(conf->mirrors[d].rdev); rdev = rcu_dereference(conf->mirrors[d].rdev);
if (rdev == NULL || test_bit(Faulty, &rdev->flags)) { if (rdev == NULL || test_bit(Faulty, &rdev->flags)) {
@ -3309,7 +3309,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
/* Need to set up for writing to the replacement */ /* Need to set up for writing to the replacement */
bio = r10_bio->devs[i].repl_bio; bio = r10_bio->devs[i].repl_bio;
bio->bi_error = -EIO; bio->bi_status = BLK_STS_IOERR;
sector = r10_bio->devs[i].addr; sector = r10_bio->devs[i].addr;
bio->bi_next = biolist; bio->bi_next = biolist;
@ -3375,7 +3375,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
if (bio->bi_end_io == end_sync_read) { if (bio->bi_end_io == end_sync_read) {
md_sync_acct(bio->bi_bdev, nr_sectors); md_sync_acct(bio->bi_bdev, nr_sectors);
bio->bi_error = 0; bio->bi_status = 0;
generic_make_request(bio); generic_make_request(bio);
} }
} }
@ -4394,7 +4394,7 @@ read_more:
read_bio->bi_end_io = end_reshape_read; read_bio->bi_end_io = end_reshape_read;
bio_set_op_attrs(read_bio, REQ_OP_READ, 0); bio_set_op_attrs(read_bio, REQ_OP_READ, 0);
read_bio->bi_flags &= (~0UL << BIO_RESET_BITS); read_bio->bi_flags &= (~0UL << BIO_RESET_BITS);
read_bio->bi_error = 0; read_bio->bi_status = 0;
read_bio->bi_vcnt = 0; read_bio->bi_vcnt = 0;
read_bio->bi_iter.bi_size = 0; read_bio->bi_iter.bi_size = 0;
r10_bio->master_bio = read_bio; r10_bio->master_bio = read_bio;
@ -4638,7 +4638,7 @@ static void end_reshape_write(struct bio *bio)
rdev = conf->mirrors[d].rdev; rdev = conf->mirrors[d].rdev;
} }
if (bio->bi_error) { if (bio->bi_status) {
/* FIXME should record badblock */ /* FIXME should record badblock */
md_error(mddev, rdev); md_error(mddev, rdev);
} }

Просмотреть файл

@ -572,7 +572,7 @@ static void r5l_log_endio(struct bio *bio)
struct r5l_log *log = io->log; struct r5l_log *log = io->log;
unsigned long flags; unsigned long flags;
if (bio->bi_error) if (bio->bi_status)
md_error(log->rdev->mddev, log->rdev); md_error(log->rdev->mddev, log->rdev);
bio_put(bio); bio_put(bio);
@ -1247,7 +1247,7 @@ static void r5l_log_flush_endio(struct bio *bio)
unsigned long flags; unsigned long flags;
struct r5l_io_unit *io; struct r5l_io_unit *io;
if (bio->bi_error) if (bio->bi_status)
md_error(log->rdev->mddev, log->rdev); md_error(log->rdev->mddev, log->rdev);
spin_lock_irqsave(&log->io_list_lock, flags); spin_lock_irqsave(&log->io_list_lock, flags);

Просмотреть файл

@ -397,7 +397,7 @@ static void ppl_log_endio(struct bio *bio)
pr_debug("%s: seq: %llu\n", __func__, io->seq); pr_debug("%s: seq: %llu\n", __func__, io->seq);
if (bio->bi_error) if (bio->bi_status)
md_error(ppl_conf->mddev, log->rdev); md_error(ppl_conf->mddev, log->rdev);
list_for_each_entry_safe(sh, next, &io->stripe_list, log_list) { list_for_each_entry_safe(sh, next, &io->stripe_list, log_list) {

Просмотреть файл

@ -2476,7 +2476,7 @@ static void raid5_end_read_request(struct bio * bi)
pr_debug("end_read_request %llu/%d, count: %d, error %d.\n", pr_debug("end_read_request %llu/%d, count: %d, error %d.\n",
(unsigned long long)sh->sector, i, atomic_read(&sh->count), (unsigned long long)sh->sector, i, atomic_read(&sh->count),
bi->bi_error); bi->bi_status);
if (i == disks) { if (i == disks) {
bio_reset(bi); bio_reset(bi);
BUG(); BUG();
@ -2496,7 +2496,7 @@ static void raid5_end_read_request(struct bio * bi)
s = sh->sector + rdev->new_data_offset; s = sh->sector + rdev->new_data_offset;
else else
s = sh->sector + rdev->data_offset; s = sh->sector + rdev->data_offset;
if (!bi->bi_error) { if (!bi->bi_status) {
set_bit(R5_UPTODATE, &sh->dev[i].flags); set_bit(R5_UPTODATE, &sh->dev[i].flags);
if (test_bit(R5_ReadError, &sh->dev[i].flags)) { if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
/* Note that this cannot happen on a /* Note that this cannot happen on a
@ -2613,7 +2613,7 @@ static void raid5_end_write_request(struct bio *bi)
} }
pr_debug("end_write_request %llu/%d, count %d, error: %d.\n", pr_debug("end_write_request %llu/%d, count %d, error: %d.\n",
(unsigned long long)sh->sector, i, atomic_read(&sh->count), (unsigned long long)sh->sector, i, atomic_read(&sh->count),
bi->bi_error); bi->bi_status);
if (i == disks) { if (i == disks) {
bio_reset(bi); bio_reset(bi);
BUG(); BUG();
@ -2621,14 +2621,14 @@ static void raid5_end_write_request(struct bio *bi)
} }
if (replacement) { if (replacement) {
if (bi->bi_error) if (bi->bi_status)
md_error(conf->mddev, rdev); md_error(conf->mddev, rdev);
else if (is_badblock(rdev, sh->sector, else if (is_badblock(rdev, sh->sector,
STRIPE_SECTORS, STRIPE_SECTORS,
&first_bad, &bad_sectors)) &first_bad, &bad_sectors))
set_bit(R5_MadeGoodRepl, &sh->dev[i].flags); set_bit(R5_MadeGoodRepl, &sh->dev[i].flags);
} else { } else {
if (bi->bi_error) { if (bi->bi_status) {
set_bit(STRIPE_DEGRADED, &sh->state); set_bit(STRIPE_DEGRADED, &sh->state);
set_bit(WriteErrorSeen, &rdev->flags); set_bit(WriteErrorSeen, &rdev->flags);
set_bit(R5_WriteError, &sh->dev[i].flags); set_bit(R5_WriteError, &sh->dev[i].flags);
@ -2649,7 +2649,7 @@ static void raid5_end_write_request(struct bio *bi)
} }
rdev_dec_pending(rdev, conf->mddev); rdev_dec_pending(rdev, conf->mddev);
if (sh->batch_head && bi->bi_error && !replacement) if (sh->batch_head && bi->bi_status && !replacement)
set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state); set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state);
bio_reset(bi); bio_reset(bi);
@ -3381,7 +3381,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
sh->dev[i].sector + STRIPE_SECTORS) { sh->dev[i].sector + STRIPE_SECTORS) {
struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
bi->bi_error = -EIO; bi->bi_status = BLK_STS_IOERR;
md_write_end(conf->mddev); md_write_end(conf->mddev);
bio_endio(bi); bio_endio(bi);
bi = nextbi; bi = nextbi;
@ -3403,7 +3403,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
sh->dev[i].sector + STRIPE_SECTORS) { sh->dev[i].sector + STRIPE_SECTORS) {
struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
bi->bi_error = -EIO; bi->bi_status = BLK_STS_IOERR;
md_write_end(conf->mddev); md_write_end(conf->mddev);
bio_endio(bi); bio_endio(bi);
bi = bi2; bi = bi2;
@ -3429,7 +3429,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
struct bio *nextbi = struct bio *nextbi =
r5_next_bio(bi, sh->dev[i].sector); r5_next_bio(bi, sh->dev[i].sector);
bi->bi_error = -EIO; bi->bi_status = BLK_STS_IOERR;
bio_endio(bi); bio_endio(bi);
bi = nextbi; bi = nextbi;
} }
@ -5144,7 +5144,7 @@ static void raid5_align_endio(struct bio *bi)
struct mddev *mddev; struct mddev *mddev;
struct r5conf *conf; struct r5conf *conf;
struct md_rdev *rdev; struct md_rdev *rdev;
int error = bi->bi_error; blk_status_t error = bi->bi_status;
bio_put(bi); bio_put(bi);
@ -5721,7 +5721,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
release_stripe_plug(mddev, sh); release_stripe_plug(mddev, sh);
} else { } else {
/* cannot get stripe for read-ahead, just give-up */ /* cannot get stripe for read-ahead, just give-up */
bi->bi_error = -EIO; bi->bi_status = BLK_STS_IOERR;
break; break;
} }
} }

Просмотреть файл

@ -186,7 +186,7 @@ static blk_qc_t nd_blk_make_request(struct request_queue *q, struct bio *bio)
* another kernel subsystem, and we just pass it through. * another kernel subsystem, and we just pass it through.
*/ */
if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
bio->bi_error = -EIO; bio->bi_status = BLK_STS_IOERR;
goto out; goto out;
} }
@ -205,7 +205,7 @@ static blk_qc_t nd_blk_make_request(struct request_queue *q, struct bio *bio)
"io error in %s sector %lld, len %d,\n", "io error in %s sector %lld, len %d,\n",
(rw == READ) ? "READ" : "WRITE", (rw == READ) ? "READ" : "WRITE",
(unsigned long long) iter.bi_sector, len); (unsigned long long) iter.bi_sector, len);
bio->bi_error = err; bio->bi_status = errno_to_blk_status(err);
break; break;
} }
} }

Просмотреть файл

@ -1210,7 +1210,7 @@ static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio)
* another kernel subsystem, and we just pass it through. * another kernel subsystem, and we just pass it through.
*/ */
if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
bio->bi_error = -EIO; bio->bi_status = BLK_STS_IOERR;
goto out; goto out;
} }
@ -1232,7 +1232,7 @@ static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio)
(op_is_write(bio_op(bio))) ? "WRITE" : (op_is_write(bio_op(bio))) ? "WRITE" :
"READ", "READ",
(unsigned long long) iter.bi_sector, len); (unsigned long long) iter.bi_sector, len);
bio->bi_error = err; bio->bi_status = errno_to_blk_status(err);
break; break;
} }
} }

Просмотреть файл

@ -49,19 +49,19 @@ static struct nd_region *to_region(struct pmem_device *pmem)
return to_nd_region(to_dev(pmem)->parent); return to_nd_region(to_dev(pmem)->parent);
} }
static int pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset, static blk_status_t pmem_clear_poison(struct pmem_device *pmem,
unsigned int len) phys_addr_t offset, unsigned int len)
{ {
struct device *dev = to_dev(pmem); struct device *dev = to_dev(pmem);
sector_t sector; sector_t sector;
long cleared; long cleared;
int rc = 0; blk_status_t rc = BLK_STS_OK;
sector = (offset - pmem->data_offset) / 512; sector = (offset - pmem->data_offset) / 512;
cleared = nvdimm_clear_poison(dev, pmem->phys_addr + offset, len); cleared = nvdimm_clear_poison(dev, pmem->phys_addr + offset, len);
if (cleared < len) if (cleared < len)
rc = -EIO; rc = BLK_STS_IOERR;
if (cleared > 0 && cleared / 512) { if (cleared > 0 && cleared / 512) {
cleared /= 512; cleared /= 512;
dev_dbg(dev, "%s: %#llx clear %ld sector%s\n", __func__, dev_dbg(dev, "%s: %#llx clear %ld sector%s\n", __func__,
@ -84,7 +84,7 @@ static void write_pmem(void *pmem_addr, struct page *page,
kunmap_atomic(mem); kunmap_atomic(mem);
} }
static int read_pmem(struct page *page, unsigned int off, static blk_status_t read_pmem(struct page *page, unsigned int off,
void *pmem_addr, unsigned int len) void *pmem_addr, unsigned int len)
{ {
int rc; int rc;
@ -93,15 +93,15 @@ static int read_pmem(struct page *page, unsigned int off,
rc = memcpy_mcsafe(mem + off, pmem_addr, len); rc = memcpy_mcsafe(mem + off, pmem_addr, len);
kunmap_atomic(mem); kunmap_atomic(mem);
if (rc) if (rc)
return -EIO; return BLK_STS_IOERR;
return 0; return BLK_STS_OK;
} }
static int pmem_do_bvec(struct pmem_device *pmem, struct page *page, static blk_status_t pmem_do_bvec(struct pmem_device *pmem, struct page *page,
unsigned int len, unsigned int off, bool is_write, unsigned int len, unsigned int off, bool is_write,
sector_t sector) sector_t sector)
{ {
int rc = 0; blk_status_t rc = BLK_STS_OK;
bool bad_pmem = false; bool bad_pmem = false;
phys_addr_t pmem_off = sector * 512 + pmem->data_offset; phys_addr_t pmem_off = sector * 512 + pmem->data_offset;
void *pmem_addr = pmem->virt_addr + pmem_off; void *pmem_addr = pmem->virt_addr + pmem_off;
@ -111,7 +111,7 @@ static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
if (!is_write) { if (!is_write) {
if (unlikely(bad_pmem)) if (unlikely(bad_pmem))
rc = -EIO; rc = BLK_STS_IOERR;
else { else {
rc = read_pmem(page, off, pmem_addr, len); rc = read_pmem(page, off, pmem_addr, len);
flush_dcache_page(page); flush_dcache_page(page);
@ -149,7 +149,7 @@ static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio) static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
{ {
int rc = 0; blk_status_t rc = 0;
bool do_acct; bool do_acct;
unsigned long start; unsigned long start;
struct bio_vec bvec; struct bio_vec bvec;
@ -166,7 +166,7 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
bvec.bv_offset, op_is_write(bio_op(bio)), bvec.bv_offset, op_is_write(bio_op(bio)),
iter.bi_sector); iter.bi_sector);
if (rc) { if (rc) {
bio->bi_error = rc; bio->bi_status = rc;
break; break;
} }
} }
@ -184,7 +184,7 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector,
struct page *page, bool is_write) struct page *page, bool is_write)
{ {
struct pmem_device *pmem = bdev->bd_queue->queuedata; struct pmem_device *pmem = bdev->bd_queue->queuedata;
int rc; blk_status_t rc;
rc = pmem_do_bvec(pmem, page, PAGE_SIZE, 0, is_write, sector); rc = pmem_do_bvec(pmem, page, PAGE_SIZE, 0, is_write, sector);
@ -197,7 +197,7 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector,
if (rc == 0) if (rc == 0)
page_endio(page, is_write, 0); page_endio(page, is_write, 0);
return rc; return blk_status_to_errno(rc);
} }
/* see "strong" declaration in tools/testing/nvdimm/pmem-dax.c */ /* see "strong" declaration in tools/testing/nvdimm/pmem-dax.c */

Просмотреть файл

@ -21,7 +21,7 @@ static void nvmet_bio_done(struct bio *bio)
struct nvmet_req *req = bio->bi_private; struct nvmet_req *req = bio->bi_private;
nvmet_req_complete(req, nvmet_req_complete(req,
bio->bi_error ? NVME_SC_INTERNAL | NVME_SC_DNR : 0); bio->bi_status ? NVME_SC_INTERNAL | NVME_SC_DNR : 0);
if (bio != &req->inline_bio) if (bio != &req->inline_bio)
bio_put(bio); bio_put(bio);
@ -145,7 +145,7 @@ static void nvmet_execute_discard(struct nvmet_req *req)
bio->bi_private = req; bio->bi_private = req;
bio->bi_end_io = nvmet_bio_done; bio->bi_end_io = nvmet_bio_done;
if (status) { if (status) {
bio->bi_error = -EIO; bio->bi_status = BLK_STS_IOERR;
bio_endio(bio); bio_endio(bio);
} else { } else {
submit_bio(bio); submit_bio(bio);

Просмотреть файл

@ -296,8 +296,8 @@ static void iblock_bio_done(struct bio *bio)
struct se_cmd *cmd = bio->bi_private; struct se_cmd *cmd = bio->bi_private;
struct iblock_req *ibr = cmd->priv; struct iblock_req *ibr = cmd->priv;
if (bio->bi_error) { if (bio->bi_status) {
pr_err("bio error: %p, err: %d\n", bio, bio->bi_error); pr_err("bio error: %p, err: %d\n", bio, bio->bi_status);
/* /*
* Bump the ib_bio_err_cnt and release bio. * Bump the ib_bio_err_cnt and release bio.
*/ */
@ -354,11 +354,11 @@ static void iblock_end_io_flush(struct bio *bio)
{ {
struct se_cmd *cmd = bio->bi_private; struct se_cmd *cmd = bio->bi_private;
if (bio->bi_error) if (bio->bi_status)
pr_err("IBLOCK: cache flush failed: %d\n", bio->bi_error); pr_err("IBLOCK: cache flush failed: %d\n", bio->bi_status);
if (cmd) { if (cmd) {
if (bio->bi_error) if (bio->bi_status)
target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION); target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
else else
target_complete_cmd(cmd, SAM_STAT_GOOD); target_complete_cmd(cmd, SAM_STAT_GOOD);

Просмотреть файл

@ -262,8 +262,8 @@ __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter,
if (vecs != inline_vecs) if (vecs != inline_vecs)
kfree(vecs); kfree(vecs);
if (unlikely(bio.bi_error)) if (unlikely(bio.bi_status))
return bio.bi_error; return blk_status_to_errno(bio.bi_status);
return ret; return ret;
} }
@ -288,16 +288,18 @@ static void blkdev_bio_end_io(struct bio *bio)
bool should_dirty = dio->should_dirty; bool should_dirty = dio->should_dirty;
if (dio->multi_bio && !atomic_dec_and_test(&dio->ref)) { if (dio->multi_bio && !atomic_dec_and_test(&dio->ref)) {
if (bio->bi_error && !dio->bio.bi_error) if (bio->bi_status && !dio->bio.bi_status)
dio->bio.bi_error = bio->bi_error; dio->bio.bi_status = bio->bi_status;
} else { } else {
if (!dio->is_sync) { if (!dio->is_sync) {
struct kiocb *iocb = dio->iocb; struct kiocb *iocb = dio->iocb;
ssize_t ret = dio->bio.bi_error; ssize_t ret;
if (likely(!ret)) { if (likely(!dio->bio.bi_status)) {
ret = dio->size; ret = dio->size;
iocb->ki_pos += ret; iocb->ki_pos += ret;
} else {
ret = blk_status_to_errno(dio->bio.bi_status);
} }
dio->iocb->ki_complete(iocb, ret, 0); dio->iocb->ki_complete(iocb, ret, 0);
@ -363,7 +365,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
ret = bio_iov_iter_get_pages(bio, iter); ret = bio_iov_iter_get_pages(bio, iter);
if (unlikely(ret)) { if (unlikely(ret)) {
bio->bi_error = -EIO; bio->bi_status = BLK_STS_IOERR;
bio_endio(bio); bio_endio(bio);
break; break;
} }
@ -413,7 +415,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
__set_current_state(TASK_RUNNING); __set_current_state(TASK_RUNNING);
if (!ret) if (!ret)
ret = dio->bio.bi_error; ret = blk_status_to_errno(dio->bio.bi_status);
if (likely(!ret)) if (likely(!ret))
ret = dio->size; ret = dio->size;

Просмотреть файл

@ -310,7 +310,8 @@ struct btrfs_dio_private {
* The original bio may be split to several sub-bios, this is * The original bio may be split to several sub-bios, this is
* done during endio of sub-bios * done during endio of sub-bios
*/ */
int (*subio_endio)(struct inode *, struct btrfs_io_bio *, int); blk_status_t (*subio_endio)(struct inode *, struct btrfs_io_bio *,
blk_status_t);
}; };
/* /*

Просмотреть файл

@ -2129,7 +2129,7 @@ static void btrfsic_bio_end_io(struct bio *bp)
/* mutex is not held! This is not save if IO is not yet completed /* mutex is not held! This is not save if IO is not yet completed
* on umount */ * on umount */
iodone_w_error = 0; iodone_w_error = 0;
if (bp->bi_error) if (bp->bi_status)
iodone_w_error = 1; iodone_w_error = 1;
BUG_ON(NULL == block); BUG_ON(NULL == block);
@ -2143,7 +2143,7 @@ static void btrfsic_bio_end_io(struct bio *bp)
if ((dev_state->state->print_mask & if ((dev_state->state->print_mask &
BTRFSIC_PRINT_MASK_END_IO_BIO_BH)) BTRFSIC_PRINT_MASK_END_IO_BIO_BH))
pr_info("bio_end_io(err=%d) for %c @%llu (%s/%llu/%d)\n", pr_info("bio_end_io(err=%d) for %c @%llu (%s/%llu/%d)\n",
bp->bi_error, bp->bi_status,
btrfsic_get_block_type(dev_state->state, block), btrfsic_get_block_type(dev_state->state, block),
block->logical_bytenr, dev_state->name, block->logical_bytenr, dev_state->name,
block->dev_bytenr, block->mirror_num); block->dev_bytenr, block->mirror_num);

Просмотреть файл

@ -155,7 +155,7 @@ static void end_compressed_bio_read(struct bio *bio)
unsigned long index; unsigned long index;
int ret; int ret;
if (bio->bi_error) if (bio->bi_status)
cb->errors = 1; cb->errors = 1;
/* if there are more bios still pending for this compressed /* if there are more bios still pending for this compressed
@ -268,7 +268,7 @@ static void end_compressed_bio_write(struct bio *bio)
struct page *page; struct page *page;
unsigned long index; unsigned long index;
if (bio->bi_error) if (bio->bi_status)
cb->errors = 1; cb->errors = 1;
/* if there are more bios still pending for this compressed /* if there are more bios still pending for this compressed
@ -287,7 +287,7 @@ static void end_compressed_bio_write(struct bio *bio)
cb->start, cb->start,
cb->start + cb->len - 1, cb->start + cb->len - 1,
NULL, NULL,
bio->bi_error ? 0 : 1); bio->bi_status ? 0 : 1);
cb->compressed_pages[0]->mapping = NULL; cb->compressed_pages[0]->mapping = NULL;
end_compressed_writeback(inode, cb); end_compressed_writeback(inode, cb);
@ -320,7 +320,7 @@ out:
* This also checksums the file bytes and gets things ready for * This also checksums the file bytes and gets things ready for
* the end io hooks. * the end io hooks.
*/ */
int btrfs_submit_compressed_write(struct inode *inode, u64 start, blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
unsigned long len, u64 disk_start, unsigned long len, u64 disk_start,
unsigned long compressed_len, unsigned long compressed_len,
struct page **compressed_pages, struct page **compressed_pages,
@ -335,13 +335,13 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
struct page *page; struct page *page;
u64 first_byte = disk_start; u64 first_byte = disk_start;
struct block_device *bdev; struct block_device *bdev;
int ret; blk_status_t ret;
int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
WARN_ON(start & ((u64)PAGE_SIZE - 1)); WARN_ON(start & ((u64)PAGE_SIZE - 1));
cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS); cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
if (!cb) if (!cb)
return -ENOMEM; return BLK_STS_RESOURCE;
refcount_set(&cb->pending_bios, 0); refcount_set(&cb->pending_bios, 0);
cb->errors = 0; cb->errors = 0;
cb->inode = inode; cb->inode = inode;
@ -358,7 +358,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS); bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS);
if (!bio) { if (!bio) {
kfree(cb); kfree(cb);
return -ENOMEM; return BLK_STS_RESOURCE;
} }
bio_set_op_attrs(bio, REQ_OP_WRITE, 0); bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
bio->bi_private = cb; bio->bi_private = cb;
@ -368,17 +368,17 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
/* create and submit bios for the compressed pages */ /* create and submit bios for the compressed pages */
bytes_left = compressed_len; bytes_left = compressed_len;
for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) { for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) {
int submit = 0;
page = compressed_pages[pg_index]; page = compressed_pages[pg_index];
page->mapping = inode->i_mapping; page->mapping = inode->i_mapping;
if (bio->bi_iter.bi_size) if (bio->bi_iter.bi_size)
ret = io_tree->ops->merge_bio_hook(page, 0, submit = io_tree->ops->merge_bio_hook(page, 0,
PAGE_SIZE, PAGE_SIZE,
bio, 0); bio, 0);
else
ret = 0;
page->mapping = NULL; page->mapping = NULL;
if (ret || bio_add_page(bio, page, PAGE_SIZE, 0) < if (submit || bio_add_page(bio, page, PAGE_SIZE, 0) <
PAGE_SIZE) { PAGE_SIZE) {
bio_get(bio); bio_get(bio);
@ -400,7 +400,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
ret = btrfs_map_bio(fs_info, bio, 0, 1); ret = btrfs_map_bio(fs_info, bio, 0, 1);
if (ret) { if (ret) {
bio->bi_error = ret; bio->bi_status = ret;
bio_endio(bio); bio_endio(bio);
} }
@ -434,7 +434,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
ret = btrfs_map_bio(fs_info, bio, 0, 1); ret = btrfs_map_bio(fs_info, bio, 0, 1);
if (ret) { if (ret) {
bio->bi_error = ret; bio->bi_status = ret;
bio_endio(bio); bio_endio(bio);
} }
@ -569,7 +569,7 @@ next:
* After the compressed pages are read, we copy the bytes into the * After the compressed pages are read, we copy the bytes into the
* bio we were passed and then call the bio end_io calls * bio we were passed and then call the bio end_io calls
*/ */
int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
int mirror_num, unsigned long bio_flags) int mirror_num, unsigned long bio_flags)
{ {
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
@ -586,7 +586,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
u64 em_len; u64 em_len;
u64 em_start; u64 em_start;
struct extent_map *em; struct extent_map *em;
int ret = -ENOMEM; blk_status_t ret = BLK_STS_RESOURCE;
int faili = 0; int faili = 0;
u32 *sums; u32 *sums;
@ -600,7 +600,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
PAGE_SIZE); PAGE_SIZE);
read_unlock(&em_tree->lock); read_unlock(&em_tree->lock);
if (!em) if (!em)
return -EIO; return BLK_STS_IOERR;
compressed_len = em->block_len; compressed_len = em->block_len;
cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS); cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
@ -659,19 +659,19 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
refcount_set(&cb->pending_bios, 1); refcount_set(&cb->pending_bios, 1);
for (pg_index = 0; pg_index < nr_pages; pg_index++) { for (pg_index = 0; pg_index < nr_pages; pg_index++) {
int submit = 0;
page = cb->compressed_pages[pg_index]; page = cb->compressed_pages[pg_index];
page->mapping = inode->i_mapping; page->mapping = inode->i_mapping;
page->index = em_start >> PAGE_SHIFT; page->index = em_start >> PAGE_SHIFT;
if (comp_bio->bi_iter.bi_size) if (comp_bio->bi_iter.bi_size)
ret = tree->ops->merge_bio_hook(page, 0, submit = tree->ops->merge_bio_hook(page, 0,
PAGE_SIZE, PAGE_SIZE,
comp_bio, 0); comp_bio, 0);
else
ret = 0;
page->mapping = NULL; page->mapping = NULL;
if (ret || bio_add_page(comp_bio, page, PAGE_SIZE, 0) < if (submit || bio_add_page(comp_bio, page, PAGE_SIZE, 0) <
PAGE_SIZE) { PAGE_SIZE) {
bio_get(comp_bio); bio_get(comp_bio);
@ -697,7 +697,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0); ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0);
if (ret) { if (ret) {
comp_bio->bi_error = ret; comp_bio->bi_status = ret;
bio_endio(comp_bio); bio_endio(comp_bio);
} }
@ -726,7 +726,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0); ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0);
if (ret) { if (ret) {
comp_bio->bi_error = ret; comp_bio->bi_status = ret;
bio_endio(comp_bio); bio_endio(comp_bio);
} }

Просмотреть файл

@ -48,12 +48,12 @@ int btrfs_decompress_buf2page(const char *buf, unsigned long buf_start,
unsigned long total_out, u64 disk_start, unsigned long total_out, u64 disk_start,
struct bio *bio); struct bio *bio);
int btrfs_submit_compressed_write(struct inode *inode, u64 start, blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
unsigned long len, u64 disk_start, unsigned long len, u64 disk_start,
unsigned long compressed_len, unsigned long compressed_len,
struct page **compressed_pages, struct page **compressed_pages,
unsigned long nr_pages); unsigned long nr_pages);
int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
int mirror_num, unsigned long bio_flags); int mirror_num, unsigned long bio_flags);
enum btrfs_compression_type { enum btrfs_compression_type {

Просмотреть файл

@ -3078,8 +3078,8 @@ int btrfs_find_name_in_ext_backref(struct btrfs_path *path,
struct btrfs_dio_private; struct btrfs_dio_private;
int btrfs_del_csums(struct btrfs_trans_handle *trans, int btrfs_del_csums(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, u64 bytenr, u64 len); struct btrfs_fs_info *fs_info, u64 bytenr, u64 len);
int btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u32 *dst); blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u32 *dst);
int btrfs_lookup_bio_sums_dio(struct inode *inode, struct bio *bio, blk_status_t btrfs_lookup_bio_sums_dio(struct inode *inode, struct bio *bio,
u64 logical_offset); u64 logical_offset);
int btrfs_insert_file_extent(struct btrfs_trans_handle *trans, int btrfs_insert_file_extent(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct btrfs_root *root,
@ -3094,7 +3094,7 @@ int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans,
int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct btrfs_root *root,
struct btrfs_ordered_sum *sums); struct btrfs_ordered_sum *sums);
int btrfs_csum_one_bio(struct inode *inode, struct bio *bio, blk_status_t btrfs_csum_one_bio(struct inode *inode, struct bio *bio,
u64 file_start, int contig); u64 file_start, int contig);
int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end, int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
struct list_head *list, int search_commit); struct list_head *list, int search_commit);

Просмотреть файл

@ -87,7 +87,7 @@ struct btrfs_end_io_wq {
bio_end_io_t *end_io; bio_end_io_t *end_io;
void *private; void *private;
struct btrfs_fs_info *info; struct btrfs_fs_info *info;
int error; blk_status_t status;
enum btrfs_wq_endio_type metadata; enum btrfs_wq_endio_type metadata;
struct list_head list; struct list_head list;
struct btrfs_work work; struct btrfs_work work;
@ -131,7 +131,7 @@ struct async_submit_bio {
*/ */
u64 bio_offset; u64 bio_offset;
struct btrfs_work work; struct btrfs_work work;
int error; blk_status_t status;
}; };
/* /*
@ -799,7 +799,7 @@ static void end_workqueue_bio(struct bio *bio)
btrfs_work_func_t func; btrfs_work_func_t func;
fs_info = end_io_wq->info; fs_info = end_io_wq->info;
end_io_wq->error = bio->bi_error; end_io_wq->status = bio->bi_status;
if (bio_op(bio) == REQ_OP_WRITE) { if (bio_op(bio) == REQ_OP_WRITE) {
if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) { if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) {
@ -836,19 +836,19 @@ static void end_workqueue_bio(struct bio *bio)
btrfs_queue_work(wq, &end_io_wq->work); btrfs_queue_work(wq, &end_io_wq->work);
} }
int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio, blk_status_t btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
enum btrfs_wq_endio_type metadata) enum btrfs_wq_endio_type metadata)
{ {
struct btrfs_end_io_wq *end_io_wq; struct btrfs_end_io_wq *end_io_wq;
end_io_wq = kmem_cache_alloc(btrfs_end_io_wq_cache, GFP_NOFS); end_io_wq = kmem_cache_alloc(btrfs_end_io_wq_cache, GFP_NOFS);
if (!end_io_wq) if (!end_io_wq)
return -ENOMEM; return BLK_STS_RESOURCE;
end_io_wq->private = bio->bi_private; end_io_wq->private = bio->bi_private;
end_io_wq->end_io = bio->bi_end_io; end_io_wq->end_io = bio->bi_end_io;
end_io_wq->info = info; end_io_wq->info = info;
end_io_wq->error = 0; end_io_wq->status = 0;
end_io_wq->bio = bio; end_io_wq->bio = bio;
end_io_wq->metadata = metadata; end_io_wq->metadata = metadata;
@ -868,14 +868,14 @@ unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info)
static void run_one_async_start(struct btrfs_work *work) static void run_one_async_start(struct btrfs_work *work)
{ {
struct async_submit_bio *async; struct async_submit_bio *async;
int ret; blk_status_t ret;
async = container_of(work, struct async_submit_bio, work); async = container_of(work, struct async_submit_bio, work);
ret = async->submit_bio_start(async->inode, async->bio, ret = async->submit_bio_start(async->inode, async->bio,
async->mirror_num, async->bio_flags, async->mirror_num, async->bio_flags,
async->bio_offset); async->bio_offset);
if (ret) if (ret)
async->error = ret; async->status = ret;
} }
static void run_one_async_done(struct btrfs_work *work) static void run_one_async_done(struct btrfs_work *work)
@ -898,8 +898,8 @@ static void run_one_async_done(struct btrfs_work *work)
wake_up(&fs_info->async_submit_wait); wake_up(&fs_info->async_submit_wait);
/* If an error occurred we just want to clean up the bio and move on */ /* If an error occurred we just want to clean up the bio and move on */
if (async->error) { if (async->status) {
async->bio->bi_error = async->error; async->bio->bi_status = async->status;
bio_endio(async->bio); bio_endio(async->bio);
return; return;
} }
@ -916,18 +916,17 @@ static void run_one_async_free(struct btrfs_work *work)
kfree(async); kfree(async);
} }
int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode, blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info,
struct bio *bio, int mirror_num, struct inode *inode, struct bio *bio, int mirror_num,
unsigned long bio_flags, unsigned long bio_flags, u64 bio_offset,
u64 bio_offset, extent_submit_bio_hook_t *submit_bio_start,
extent_submit_bio_hook_t *submit_bio_start, extent_submit_bio_hook_t *submit_bio_done)
extent_submit_bio_hook_t *submit_bio_done)
{ {
struct async_submit_bio *async; struct async_submit_bio *async;
async = kmalloc(sizeof(*async), GFP_NOFS); async = kmalloc(sizeof(*async), GFP_NOFS);
if (!async) if (!async)
return -ENOMEM; return BLK_STS_RESOURCE;
async->inode = inode; async->inode = inode;
async->bio = bio; async->bio = bio;
@ -941,7 +940,7 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
async->bio_flags = bio_flags; async->bio_flags = bio_flags;
async->bio_offset = bio_offset; async->bio_offset = bio_offset;
async->error = 0; async->status = 0;
atomic_inc(&fs_info->nr_async_submits); atomic_inc(&fs_info->nr_async_submits);
@ -959,7 +958,7 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
return 0; return 0;
} }
static int btree_csum_one_bio(struct bio *bio) static blk_status_t btree_csum_one_bio(struct bio *bio)
{ {
struct bio_vec *bvec; struct bio_vec *bvec;
struct btrfs_root *root; struct btrfs_root *root;
@ -972,12 +971,12 @@ static int btree_csum_one_bio(struct bio *bio)
break; break;
} }
return ret; return errno_to_blk_status(ret);
} }
static int __btree_submit_bio_start(struct inode *inode, struct bio *bio, static blk_status_t __btree_submit_bio_start(struct inode *inode,
int mirror_num, unsigned long bio_flags, struct bio *bio, int mirror_num, unsigned long bio_flags,
u64 bio_offset) u64 bio_offset)
{ {
/* /*
* when we're called for a write, we're already in the async * when we're called for a write, we're already in the async
@ -986,11 +985,11 @@ static int __btree_submit_bio_start(struct inode *inode, struct bio *bio,
return btree_csum_one_bio(bio); return btree_csum_one_bio(bio);
} }
static int __btree_submit_bio_done(struct inode *inode, struct bio *bio, static blk_status_t __btree_submit_bio_done(struct inode *inode,
int mirror_num, unsigned long bio_flags, struct bio *bio, int mirror_num, unsigned long bio_flags,
u64 bio_offset) u64 bio_offset)
{ {
int ret; blk_status_t ret;
/* /*
* when we're called for a write, we're already in the async * when we're called for a write, we're already in the async
@ -998,7 +997,7 @@ static int __btree_submit_bio_done(struct inode *inode, struct bio *bio,
*/ */
ret = btrfs_map_bio(btrfs_sb(inode->i_sb), bio, mirror_num, 1); ret = btrfs_map_bio(btrfs_sb(inode->i_sb), bio, mirror_num, 1);
if (ret) { if (ret) {
bio->bi_error = ret; bio->bi_status = ret;
bio_endio(bio); bio_endio(bio);
} }
return ret; return ret;
@ -1015,13 +1014,13 @@ static int check_async_write(unsigned long bio_flags)
return 1; return 1;
} }
static int btree_submit_bio_hook(struct inode *inode, struct bio *bio, static blk_status_t btree_submit_bio_hook(struct inode *inode, struct bio *bio,
int mirror_num, unsigned long bio_flags, int mirror_num, unsigned long bio_flags,
u64 bio_offset) u64 bio_offset)
{ {
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
int async = check_async_write(bio_flags); int async = check_async_write(bio_flags);
int ret; blk_status_t ret;
if (bio_op(bio) != REQ_OP_WRITE) { if (bio_op(bio) != REQ_OP_WRITE) {
/* /*
@ -1054,7 +1053,7 @@ static int btree_submit_bio_hook(struct inode *inode, struct bio *bio,
return 0; return 0;
out_w_error: out_w_error:
bio->bi_error = ret; bio->bi_status = ret;
bio_endio(bio); bio_endio(bio);
return ret; return ret;
} }
@ -1820,7 +1819,7 @@ static void end_workqueue_fn(struct btrfs_work *work)
end_io_wq = container_of(work, struct btrfs_end_io_wq, work); end_io_wq = container_of(work, struct btrfs_end_io_wq, work);
bio = end_io_wq->bio; bio = end_io_wq->bio;
bio->bi_error = end_io_wq->error; bio->bi_status = end_io_wq->status;
bio->bi_private = end_io_wq->private; bio->bi_private = end_io_wq->private;
bio->bi_end_io = end_io_wq->end_io; bio->bi_end_io = end_io_wq->end_io;
kmem_cache_free(btrfs_end_io_wq_cache, end_io_wq); kmem_cache_free(btrfs_end_io_wq_cache, end_io_wq);
@ -3495,11 +3494,11 @@ static void btrfs_end_empty_barrier(struct bio *bio)
* any device where the flush fails with eopnotsupp are flagged as not-barrier * any device where the flush fails with eopnotsupp are flagged as not-barrier
* capable * capable
*/ */
static int write_dev_flush(struct btrfs_device *device, int wait) static blk_status_t write_dev_flush(struct btrfs_device *device, int wait)
{ {
struct request_queue *q = bdev_get_queue(device->bdev); struct request_queue *q = bdev_get_queue(device->bdev);
struct bio *bio; struct bio *bio;
int ret = 0; blk_status_t ret = 0;
if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags)) if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
return 0; return 0;
@ -3511,8 +3510,8 @@ static int write_dev_flush(struct btrfs_device *device, int wait)
wait_for_completion(&device->flush_wait); wait_for_completion(&device->flush_wait);
if (bio->bi_error) { if (bio->bi_status) {
ret = bio->bi_error; ret = bio->bi_status;
btrfs_dev_stat_inc_and_print(device, btrfs_dev_stat_inc_and_print(device,
BTRFS_DEV_STAT_FLUSH_ERRS); BTRFS_DEV_STAT_FLUSH_ERRS);
} }
@ -3531,7 +3530,7 @@ static int write_dev_flush(struct btrfs_device *device, int wait)
device->flush_bio = NULL; device->flush_bio = NULL;
bio = btrfs_io_bio_alloc(GFP_NOFS, 0); bio = btrfs_io_bio_alloc(GFP_NOFS, 0);
if (!bio) if (!bio)
return -ENOMEM; return BLK_STS_RESOURCE;
bio->bi_end_io = btrfs_end_empty_barrier; bio->bi_end_io = btrfs_end_empty_barrier;
bio->bi_bdev = device->bdev; bio->bi_bdev = device->bdev;
@ -3556,7 +3555,7 @@ static int barrier_all_devices(struct btrfs_fs_info *info)
struct btrfs_device *dev; struct btrfs_device *dev;
int errors_send = 0; int errors_send = 0;
int errors_wait = 0; int errors_wait = 0;
int ret; blk_status_t ret;
/* send down all the barriers */ /* send down all the barriers */
head = &info->fs_devices->devices; head = &info->fs_devices->devices;

Просмотреть файл

@ -118,13 +118,13 @@ int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid); int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid);
u32 btrfs_csum_data(const char *data, u32 seed, size_t len); u32 btrfs_csum_data(const char *data, u32 seed, size_t len);
void btrfs_csum_final(u32 crc, u8 *result); void btrfs_csum_final(u32 crc, u8 *result);
int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio, blk_status_t btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
enum btrfs_wq_endio_type metadata); enum btrfs_wq_endio_type metadata);
int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode, blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info,
struct bio *bio, int mirror_num, struct inode *inode, struct bio *bio, int mirror_num,
unsigned long bio_flags, u64 bio_offset, unsigned long bio_flags, u64 bio_offset,
extent_submit_bio_hook_t *submit_bio_start, extent_submit_bio_hook_t *submit_bio_start,
extent_submit_bio_hook_t *submit_bio_done); extent_submit_bio_hook_t *submit_bio_done);
unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info); unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info);
int btrfs_write_tree_block(struct extent_buffer *buf); int btrfs_write_tree_block(struct extent_buffer *buf);
int btrfs_wait_tree_block_writeback(struct extent_buffer *buf); int btrfs_wait_tree_block_writeback(struct extent_buffer *buf);

Просмотреть файл

@ -2399,6 +2399,7 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree; struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
struct bio *bio; struct bio *bio;
int read_mode = 0; int read_mode = 0;
blk_status_t status;
int ret; int ret;
BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE); BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE);
@ -2431,11 +2432,12 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
"Repair Read Error: submitting new read[%#x] to this_mirror=%d, in_validation=%d", "Repair Read Error: submitting new read[%#x] to this_mirror=%d, in_validation=%d",
read_mode, failrec->this_mirror, failrec->in_validation); read_mode, failrec->this_mirror, failrec->in_validation);
ret = tree->ops->submit_bio_hook(inode, bio, failrec->this_mirror, status = tree->ops->submit_bio_hook(inode, bio, failrec->this_mirror,
failrec->bio_flags, 0); failrec->bio_flags, 0);
if (ret) { if (status) {
free_io_failure(BTRFS_I(inode), failrec); free_io_failure(BTRFS_I(inode), failrec);
bio_put(bio); bio_put(bio);
ret = blk_status_to_errno(status);
} }
return ret; return ret;
@ -2474,6 +2476,7 @@ void end_extent_writepage(struct page *page, int err, u64 start, u64 end)
*/ */
static void end_bio_extent_writepage(struct bio *bio) static void end_bio_extent_writepage(struct bio *bio)
{ {
int error = blk_status_to_errno(bio->bi_status);
struct bio_vec *bvec; struct bio_vec *bvec;
u64 start; u64 start;
u64 end; u64 end;
@ -2503,7 +2506,7 @@ static void end_bio_extent_writepage(struct bio *bio)
start = page_offset(page); start = page_offset(page);
end = start + bvec->bv_offset + bvec->bv_len - 1; end = start + bvec->bv_offset + bvec->bv_len - 1;
end_extent_writepage(page, bio->bi_error, start, end); end_extent_writepage(page, error, start, end);
end_page_writeback(page); end_page_writeback(page);
} }
@ -2536,7 +2539,7 @@ endio_readpage_release_extent(struct extent_io_tree *tree, u64 start, u64 len,
static void end_bio_extent_readpage(struct bio *bio) static void end_bio_extent_readpage(struct bio *bio)
{ {
struct bio_vec *bvec; struct bio_vec *bvec;
int uptodate = !bio->bi_error; int uptodate = !bio->bi_status;
struct btrfs_io_bio *io_bio = btrfs_io_bio(bio); struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
struct extent_io_tree *tree; struct extent_io_tree *tree;
u64 offset = 0; u64 offset = 0;
@ -2556,7 +2559,7 @@ static void end_bio_extent_readpage(struct bio *bio)
btrfs_debug(fs_info, btrfs_debug(fs_info,
"end_bio_extent_readpage: bi_sector=%llu, err=%d, mirror=%u", "end_bio_extent_readpage: bi_sector=%llu, err=%d, mirror=%u",
(u64)bio->bi_iter.bi_sector, bio->bi_error, (u64)bio->bi_iter.bi_sector, bio->bi_status,
io_bio->mirror_num); io_bio->mirror_num);
tree = &BTRFS_I(inode)->io_tree; tree = &BTRFS_I(inode)->io_tree;
@ -2615,7 +2618,7 @@ static void end_bio_extent_readpage(struct bio *bio)
ret = bio_readpage_error(bio, offset, page, ret = bio_readpage_error(bio, offset, page,
start, end, mirror); start, end, mirror);
if (ret == 0) { if (ret == 0) {
uptodate = !bio->bi_error; uptodate = !bio->bi_status;
offset += len; offset += len;
continue; continue;
} }
@ -2673,7 +2676,7 @@ readpage_ok:
endio_readpage_release_extent(tree, extent_start, extent_len, endio_readpage_release_extent(tree, extent_start, extent_len,
uptodate); uptodate);
if (io_bio->end_io) if (io_bio->end_io)
io_bio->end_io(io_bio, bio->bi_error); io_bio->end_io(io_bio, blk_status_to_errno(bio->bi_status));
bio_put(bio); bio_put(bio);
} }
@ -2743,7 +2746,7 @@ struct bio *btrfs_io_bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
static int __must_check submit_one_bio(struct bio *bio, int mirror_num, static int __must_check submit_one_bio(struct bio *bio, int mirror_num,
unsigned long bio_flags) unsigned long bio_flags)
{ {
int ret = 0; blk_status_t ret = 0;
struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
struct page *page = bvec->bv_page; struct page *page = bvec->bv_page;
struct extent_io_tree *tree = bio->bi_private; struct extent_io_tree *tree = bio->bi_private;
@ -2761,7 +2764,7 @@ static int __must_check submit_one_bio(struct bio *bio, int mirror_num,
btrfsic_submit_bio(bio); btrfsic_submit_bio(bio);
bio_put(bio); bio_put(bio);
return ret; return blk_status_to_errno(ret);
} }
static int merge_bio(struct extent_io_tree *tree, struct page *page, static int merge_bio(struct extent_io_tree *tree, struct page *page,
@ -3707,7 +3710,7 @@ static void end_bio_extent_buffer_writepage(struct bio *bio)
BUG_ON(!eb); BUG_ON(!eb);
done = atomic_dec_and_test(&eb->io_pages); done = atomic_dec_and_test(&eb->io_pages);
if (bio->bi_error || if (bio->bi_status ||
test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) { test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) {
ClearPageUptodate(page); ClearPageUptodate(page);
set_btree_ioerr(page); set_btree_ioerr(page);

Просмотреть файл

@ -92,9 +92,9 @@ struct btrfs_inode;
struct btrfs_io_bio; struct btrfs_io_bio;
struct io_failure_record; struct io_failure_record;
typedef int (extent_submit_bio_hook_t)(struct inode *inode, struct bio *bio, typedef blk_status_t (extent_submit_bio_hook_t)(struct inode *inode,
int mirror_num, unsigned long bio_flags, struct bio *bio, int mirror_num, unsigned long bio_flags,
u64 bio_offset); u64 bio_offset);
struct extent_io_ops { struct extent_io_ops {
/* /*
* The following callbacks must be allways defined, the function * The following callbacks must be allways defined, the function

Просмотреть файл

@ -160,7 +160,7 @@ static void btrfs_io_bio_endio_readpage(struct btrfs_io_bio *bio, int err)
kfree(bio->csum_allocated); kfree(bio->csum_allocated);
} }
static int __btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, static blk_status_t __btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio,
u64 logical_offset, u32 *dst, int dio) u64 logical_offset, u32 *dst, int dio)
{ {
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
@ -182,7 +182,7 @@ static int __btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio,
path = btrfs_alloc_path(); path = btrfs_alloc_path();
if (!path) if (!path)
return -ENOMEM; return BLK_STS_RESOURCE;
nblocks = bio->bi_iter.bi_size >> inode->i_sb->s_blocksize_bits; nblocks = bio->bi_iter.bi_size >> inode->i_sb->s_blocksize_bits;
if (!dst) { if (!dst) {
@ -191,7 +191,7 @@ static int __btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio,
csum_size, GFP_NOFS); csum_size, GFP_NOFS);
if (!btrfs_bio->csum_allocated) { if (!btrfs_bio->csum_allocated) {
btrfs_free_path(path); btrfs_free_path(path);
return -ENOMEM; return BLK_STS_RESOURCE;
} }
btrfs_bio->csum = btrfs_bio->csum_allocated; btrfs_bio->csum = btrfs_bio->csum_allocated;
btrfs_bio->end_io = btrfs_io_bio_endio_readpage; btrfs_bio->end_io = btrfs_io_bio_endio_readpage;
@ -303,12 +303,12 @@ next:
return 0; return 0;
} }
int btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u32 *dst) blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u32 *dst)
{ {
return __btrfs_lookup_bio_sums(inode, bio, 0, dst, 0); return __btrfs_lookup_bio_sums(inode, bio, 0, dst, 0);
} }
int btrfs_lookup_bio_sums_dio(struct inode *inode, struct bio *bio, u64 offset) blk_status_t btrfs_lookup_bio_sums_dio(struct inode *inode, struct bio *bio, u64 offset)
{ {
return __btrfs_lookup_bio_sums(inode, bio, offset, NULL, 1); return __btrfs_lookup_bio_sums(inode, bio, offset, NULL, 1);
} }
@ -433,7 +433,7 @@ fail:
return ret; return ret;
} }
int btrfs_csum_one_bio(struct inode *inode, struct bio *bio, blk_status_t btrfs_csum_one_bio(struct inode *inode, struct bio *bio,
u64 file_start, int contig) u64 file_start, int contig)
{ {
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
@ -452,7 +452,7 @@ int btrfs_csum_one_bio(struct inode *inode, struct bio *bio,
sums = kzalloc(btrfs_ordered_sum_size(fs_info, bio->bi_iter.bi_size), sums = kzalloc(btrfs_ordered_sum_size(fs_info, bio->bi_iter.bi_size),
GFP_NOFS); GFP_NOFS);
if (!sums) if (!sums)
return -ENOMEM; return BLK_STS_RESOURCE;
sums->len = bio->bi_iter.bi_size; sums->len = bio->bi_iter.bi_size;
INIT_LIST_HEAD(&sums->list); INIT_LIST_HEAD(&sums->list);

Просмотреть файл

@ -842,13 +842,12 @@ retry:
NULL, EXTENT_LOCKED | EXTENT_DELALLOC, NULL, EXTENT_LOCKED | EXTENT_DELALLOC,
PAGE_UNLOCK | PAGE_CLEAR_DIRTY | PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
PAGE_SET_WRITEBACK); PAGE_SET_WRITEBACK);
ret = btrfs_submit_compressed_write(inode, if (btrfs_submit_compressed_write(inode,
async_extent->start, async_extent->start,
async_extent->ram_size, async_extent->ram_size,
ins.objectid, ins.objectid,
ins.offset, async_extent->pages, ins.offset, async_extent->pages,
async_extent->nr_pages); async_extent->nr_pages)) {
if (ret) {
struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree; struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
struct page *p = async_extent->pages[0]; struct page *p = async_extent->pages[0];
const u64 start = async_extent->start; const u64 start = async_extent->start;
@ -1901,11 +1900,11 @@ int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
* At IO completion time the cums attached on the ordered extent record * At IO completion time the cums attached on the ordered extent record
* are inserted into the btree * are inserted into the btree
*/ */
static int __btrfs_submit_bio_start(struct inode *inode, struct bio *bio, static blk_status_t __btrfs_submit_bio_start(struct inode *inode,
int mirror_num, unsigned long bio_flags, struct bio *bio, int mirror_num, unsigned long bio_flags,
u64 bio_offset) u64 bio_offset)
{ {
int ret = 0; blk_status_t ret = 0;
ret = btrfs_csum_one_bio(inode, bio, 0, 0); ret = btrfs_csum_one_bio(inode, bio, 0, 0);
BUG_ON(ret); /* -ENOMEM */ BUG_ON(ret); /* -ENOMEM */
@ -1920,16 +1919,16 @@ static int __btrfs_submit_bio_start(struct inode *inode, struct bio *bio,
* At IO completion time the cums attached on the ordered extent record * At IO completion time the cums attached on the ordered extent record
* are inserted into the btree * are inserted into the btree
*/ */
static int __btrfs_submit_bio_done(struct inode *inode, struct bio *bio, static blk_status_t __btrfs_submit_bio_done(struct inode *inode,
int mirror_num, unsigned long bio_flags, struct bio *bio, int mirror_num, unsigned long bio_flags,
u64 bio_offset) u64 bio_offset)
{ {
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
int ret; blk_status_t ret;
ret = btrfs_map_bio(fs_info, bio, mirror_num, 1); ret = btrfs_map_bio(fs_info, bio, mirror_num, 1);
if (ret) { if (ret) {
bio->bi_error = ret; bio->bi_status = ret;
bio_endio(bio); bio_endio(bio);
} }
return ret; return ret;
@ -1939,14 +1938,14 @@ static int __btrfs_submit_bio_done(struct inode *inode, struct bio *bio,
* extent_io.c submission hook. This does the right thing for csum calculation * extent_io.c submission hook. This does the right thing for csum calculation
* on write, or reading the csums from the tree before a read * on write, or reading the csums from the tree before a read
*/ */
static int btrfs_submit_bio_hook(struct inode *inode, struct bio *bio, static blk_status_t btrfs_submit_bio_hook(struct inode *inode, struct bio *bio,
int mirror_num, unsigned long bio_flags, int mirror_num, unsigned long bio_flags,
u64 bio_offset) u64 bio_offset)
{ {
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_root *root = BTRFS_I(inode)->root;
enum btrfs_wq_endio_type metadata = BTRFS_WQ_ENDIO_DATA; enum btrfs_wq_endio_type metadata = BTRFS_WQ_ENDIO_DATA;
int ret = 0; blk_status_t ret = 0;
int skip_sum; int skip_sum;
int async = !atomic_read(&BTRFS_I(inode)->sync_writers); int async = !atomic_read(&BTRFS_I(inode)->sync_writers);
@ -1991,8 +1990,8 @@ mapit:
ret = btrfs_map_bio(fs_info, bio, mirror_num, 0); ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
out: out:
if (ret < 0) { if (ret) {
bio->bi_error = ret; bio->bi_status = ret;
bio_endio(bio); bio_endio(bio);
} }
return ret; return ret;
@ -8037,7 +8036,7 @@ static void btrfs_retry_endio_nocsum(struct bio *bio)
struct bio_vec *bvec; struct bio_vec *bvec;
int i; int i;
if (bio->bi_error) if (bio->bi_status)
goto end; goto end;
ASSERT(bio->bi_vcnt == 1); ASSERT(bio->bi_vcnt == 1);
@ -8116,7 +8115,7 @@ static void btrfs_retry_endio(struct bio *bio)
int ret; int ret;
int i; int i;
if (bio->bi_error) if (bio->bi_status)
goto end; goto end;
uptodate = 1; uptodate = 1;
@ -8141,8 +8140,8 @@ end:
bio_put(bio); bio_put(bio);
} }
static int __btrfs_subio_endio_read(struct inode *inode, static blk_status_t __btrfs_subio_endio_read(struct inode *inode,
struct btrfs_io_bio *io_bio, int err) struct btrfs_io_bio *io_bio, blk_status_t err)
{ {
struct btrfs_fs_info *fs_info; struct btrfs_fs_info *fs_info;
struct bio_vec *bvec; struct bio_vec *bvec;
@ -8184,7 +8183,7 @@ try_again:
io_bio->mirror_num, io_bio->mirror_num,
btrfs_retry_endio, &done); btrfs_retry_endio, &done);
if (ret) { if (ret) {
err = ret; err = errno_to_blk_status(ret);
goto next; goto next;
} }
@ -8211,8 +8210,8 @@ next:
return err; return err;
} }
static int btrfs_subio_endio_read(struct inode *inode, static blk_status_t btrfs_subio_endio_read(struct inode *inode,
struct btrfs_io_bio *io_bio, int err) struct btrfs_io_bio *io_bio, blk_status_t err)
{ {
bool skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; bool skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
@ -8232,7 +8231,7 @@ static void btrfs_endio_direct_read(struct bio *bio)
struct inode *inode = dip->inode; struct inode *inode = dip->inode;
struct bio *dio_bio; struct bio *dio_bio;
struct btrfs_io_bio *io_bio = btrfs_io_bio(bio); struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
int err = bio->bi_error; blk_status_t err = bio->bi_status;
if (dip->flags & BTRFS_DIO_ORIG_BIO_SUBMITTED) if (dip->flags & BTRFS_DIO_ORIG_BIO_SUBMITTED)
err = btrfs_subio_endio_read(inode, io_bio, err); err = btrfs_subio_endio_read(inode, io_bio, err);
@ -8243,11 +8242,11 @@ static void btrfs_endio_direct_read(struct bio *bio)
kfree(dip); kfree(dip);
dio_bio->bi_error = bio->bi_error; dio_bio->bi_status = bio->bi_status;
dio_end_io(dio_bio); dio_end_io(dio_bio);
if (io_bio->end_io) if (io_bio->end_io)
io_bio->end_io(io_bio, err); io_bio->end_io(io_bio, blk_status_to_errno(err));
bio_put(bio); bio_put(bio);
} }
@ -8299,20 +8298,20 @@ static void btrfs_endio_direct_write(struct bio *bio)
struct bio *dio_bio = dip->dio_bio; struct bio *dio_bio = dip->dio_bio;
__endio_write_update_ordered(dip->inode, dip->logical_offset, __endio_write_update_ordered(dip->inode, dip->logical_offset,
dip->bytes, !bio->bi_error); dip->bytes, !bio->bi_status);
kfree(dip); kfree(dip);
dio_bio->bi_error = bio->bi_error; dio_bio->bi_status = bio->bi_status;
dio_end_io(dio_bio); dio_end_io(dio_bio);
bio_put(bio); bio_put(bio);
} }
static int __btrfs_submit_bio_start_direct_io(struct inode *inode, static blk_status_t __btrfs_submit_bio_start_direct_io(struct inode *inode,
struct bio *bio, int mirror_num, struct bio *bio, int mirror_num,
unsigned long bio_flags, u64 offset) unsigned long bio_flags, u64 offset)
{ {
int ret; blk_status_t ret;
ret = btrfs_csum_one_bio(inode, bio, offset, 1); ret = btrfs_csum_one_bio(inode, bio, offset, 1);
BUG_ON(ret); /* -ENOMEM */ BUG_ON(ret); /* -ENOMEM */
return 0; return 0;
@ -8321,7 +8320,7 @@ static int __btrfs_submit_bio_start_direct_io(struct inode *inode,
static void btrfs_end_dio_bio(struct bio *bio) static void btrfs_end_dio_bio(struct bio *bio)
{ {
struct btrfs_dio_private *dip = bio->bi_private; struct btrfs_dio_private *dip = bio->bi_private;
int err = bio->bi_error; blk_status_t err = bio->bi_status;
if (err) if (err)
btrfs_warn(BTRFS_I(dip->inode)->root->fs_info, btrfs_warn(BTRFS_I(dip->inode)->root->fs_info,
@ -8351,7 +8350,7 @@ static void btrfs_end_dio_bio(struct bio *bio)
if (dip->errors) { if (dip->errors) {
bio_io_error(dip->orig_bio); bio_io_error(dip->orig_bio);
} else { } else {
dip->dio_bio->bi_error = 0; dip->dio_bio->bi_status = 0;
bio_endio(dip->orig_bio); bio_endio(dip->orig_bio);
} }
out: out:
@ -8368,14 +8367,14 @@ static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev,
return bio; return bio;
} }
static inline int btrfs_lookup_and_bind_dio_csum(struct inode *inode, static inline blk_status_t btrfs_lookup_and_bind_dio_csum(struct inode *inode,
struct btrfs_dio_private *dip, struct btrfs_dio_private *dip,
struct bio *bio, struct bio *bio,
u64 file_offset) u64 file_offset)
{ {
struct btrfs_io_bio *io_bio = btrfs_io_bio(bio); struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
struct btrfs_io_bio *orig_io_bio = btrfs_io_bio(dip->orig_bio); struct btrfs_io_bio *orig_io_bio = btrfs_io_bio(dip->orig_bio);
int ret; blk_status_t ret;
/* /*
* We load all the csum data we need when we submit * We load all the csum data we need when we submit
@ -8406,7 +8405,7 @@ static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_dio_private *dip = bio->bi_private; struct btrfs_dio_private *dip = bio->bi_private;
bool write = bio_op(bio) == REQ_OP_WRITE; bool write = bio_op(bio) == REQ_OP_WRITE;
int ret; blk_status_t ret;
if (async_submit) if (async_submit)
async_submit = !atomic_read(&BTRFS_I(inode)->sync_writers); async_submit = !atomic_read(&BTRFS_I(inode)->sync_writers);
@ -8649,7 +8648,7 @@ free_ordered:
* callbacks - they require an allocated dip and a clone of dio_bio. * callbacks - they require an allocated dip and a clone of dio_bio.
*/ */
if (io_bio && dip) { if (io_bio && dip) {
io_bio->bi_error = -EIO; io_bio->bi_status = BLK_STS_IOERR;
bio_endio(io_bio); bio_endio(io_bio);
/* /*
* The end io callbacks free our dip, do the final put on io_bio * The end io callbacks free our dip, do the final put on io_bio
@ -8668,7 +8667,7 @@ free_ordered:
unlock_extent(&BTRFS_I(inode)->io_tree, file_offset, unlock_extent(&BTRFS_I(inode)->io_tree, file_offset,
file_offset + dio_bio->bi_iter.bi_size - 1); file_offset + dio_bio->bi_iter.bi_size - 1);
dio_bio->bi_error = -EIO; dio_bio->bi_status = BLK_STS_IOERR;
/* /*
* Releases and cleans up our dio_bio, no need to bio_put() * Releases and cleans up our dio_bio, no need to bio_put()
* nor bio_endio()/bio_io_error() against dio_bio. * nor bio_endio()/bio_io_error() against dio_bio.

Просмотреть файл

@ -871,7 +871,7 @@ static void free_raid_bio(struct btrfs_raid_bio *rbio)
* this frees the rbio and runs through all the bios in the * this frees the rbio and runs through all the bios in the
* bio_list and calls end_io on them * bio_list and calls end_io on them
*/ */
static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, int err) static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t err)
{ {
struct bio *cur = bio_list_get(&rbio->bio_list); struct bio *cur = bio_list_get(&rbio->bio_list);
struct bio *next; struct bio *next;
@ -884,7 +884,7 @@ static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, int err)
while (cur) { while (cur) {
next = cur->bi_next; next = cur->bi_next;
cur->bi_next = NULL; cur->bi_next = NULL;
cur->bi_error = err; cur->bi_status = err;
bio_endio(cur); bio_endio(cur);
cur = next; cur = next;
} }
@ -897,7 +897,7 @@ static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, int err)
static void raid_write_end_io(struct bio *bio) static void raid_write_end_io(struct bio *bio)
{ {
struct btrfs_raid_bio *rbio = bio->bi_private; struct btrfs_raid_bio *rbio = bio->bi_private;
int err = bio->bi_error; blk_status_t err = bio->bi_status;
int max_errors; int max_errors;
if (err) if (err)
@ -914,7 +914,7 @@ static void raid_write_end_io(struct bio *bio)
max_errors = (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) ? max_errors = (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) ?
0 : rbio->bbio->max_errors; 0 : rbio->bbio->max_errors;
if (atomic_read(&rbio->error) > max_errors) if (atomic_read(&rbio->error) > max_errors)
err = -EIO; err = BLK_STS_IOERR;
rbio_orig_end_io(rbio, err); rbio_orig_end_io(rbio, err);
} }
@ -1092,7 +1092,7 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
* devices or if they are not contiguous * devices or if they are not contiguous
*/ */
if (last_end == disk_start && stripe->dev->bdev && if (last_end == disk_start && stripe->dev->bdev &&
!last->bi_error && !last->bi_status &&
last->bi_bdev == stripe->dev->bdev) { last->bi_bdev == stripe->dev->bdev) {
ret = bio_add_page(last, page, PAGE_SIZE, 0); ret = bio_add_page(last, page, PAGE_SIZE, 0);
if (ret == PAGE_SIZE) if (ret == PAGE_SIZE)
@ -1448,7 +1448,7 @@ static void raid_rmw_end_io(struct bio *bio)
{ {
struct btrfs_raid_bio *rbio = bio->bi_private; struct btrfs_raid_bio *rbio = bio->bi_private;
if (bio->bi_error) if (bio->bi_status)
fail_bio_stripe(rbio, bio); fail_bio_stripe(rbio, bio);
else else
set_bio_pages_uptodate(bio); set_bio_pages_uptodate(bio);
@ -1991,7 +1991,7 @@ static void raid_recover_end_io(struct bio *bio)
* we only read stripe pages off the disk, set them * we only read stripe pages off the disk, set them
* up to date if there were no errors * up to date if there were no errors
*/ */
if (bio->bi_error) if (bio->bi_status)
fail_bio_stripe(rbio, bio); fail_bio_stripe(rbio, bio);
else else
set_bio_pages_uptodate(bio); set_bio_pages_uptodate(bio);
@ -2530,7 +2530,7 @@ static void raid56_parity_scrub_end_io(struct bio *bio)
{ {
struct btrfs_raid_bio *rbio = bio->bi_private; struct btrfs_raid_bio *rbio = bio->bi_private;
if (bio->bi_error) if (bio->bi_status)
fail_bio_stripe(rbio, bio); fail_bio_stripe(rbio, bio);
else else
set_bio_pages_uptodate(bio); set_bio_pages_uptodate(bio);

Просмотреть файл

@ -95,7 +95,7 @@ struct scrub_bio {
struct scrub_ctx *sctx; struct scrub_ctx *sctx;
struct btrfs_device *dev; struct btrfs_device *dev;
struct bio *bio; struct bio *bio;
int err; blk_status_t status;
u64 logical; u64 logical;
u64 physical; u64 physical;
#if SCRUB_PAGES_PER_WR_BIO >= SCRUB_PAGES_PER_RD_BIO #if SCRUB_PAGES_PER_WR_BIO >= SCRUB_PAGES_PER_RD_BIO
@ -1668,14 +1668,14 @@ leave_nomem:
struct scrub_bio_ret { struct scrub_bio_ret {
struct completion event; struct completion event;
int error; blk_status_t status;
}; };
static void scrub_bio_wait_endio(struct bio *bio) static void scrub_bio_wait_endio(struct bio *bio)
{ {
struct scrub_bio_ret *ret = bio->bi_private; struct scrub_bio_ret *ret = bio->bi_private;
ret->error = bio->bi_error; ret->status = bio->bi_status;
complete(&ret->event); complete(&ret->event);
} }
@ -1693,7 +1693,7 @@ static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
int ret; int ret;
init_completion(&done.event); init_completion(&done.event);
done.error = 0; done.status = 0;
bio->bi_iter.bi_sector = page->logical >> 9; bio->bi_iter.bi_sector = page->logical >> 9;
bio->bi_private = &done; bio->bi_private = &done;
bio->bi_end_io = scrub_bio_wait_endio; bio->bi_end_io = scrub_bio_wait_endio;
@ -1705,7 +1705,7 @@ static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
return ret; return ret;
wait_for_completion(&done.event); wait_for_completion(&done.event);
if (done.error) if (done.status)
return -EIO; return -EIO;
return 0; return 0;
@ -1937,7 +1937,7 @@ again:
bio->bi_bdev = sbio->dev->bdev; bio->bi_bdev = sbio->dev->bdev;
bio->bi_iter.bi_sector = sbio->physical >> 9; bio->bi_iter.bi_sector = sbio->physical >> 9;
bio_set_op_attrs(bio, REQ_OP_WRITE, 0); bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
sbio->err = 0; sbio->status = 0;
} else if (sbio->physical + sbio->page_count * PAGE_SIZE != } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
spage->physical_for_dev_replace || spage->physical_for_dev_replace ||
sbio->logical + sbio->page_count * PAGE_SIZE != sbio->logical + sbio->page_count * PAGE_SIZE !=
@ -1992,7 +1992,7 @@ static void scrub_wr_bio_end_io(struct bio *bio)
struct scrub_bio *sbio = bio->bi_private; struct scrub_bio *sbio = bio->bi_private;
struct btrfs_fs_info *fs_info = sbio->dev->fs_info; struct btrfs_fs_info *fs_info = sbio->dev->fs_info;
sbio->err = bio->bi_error; sbio->status = bio->bi_status;
sbio->bio = bio; sbio->bio = bio;
btrfs_init_work(&sbio->work, btrfs_scrubwrc_helper, btrfs_init_work(&sbio->work, btrfs_scrubwrc_helper,
@ -2007,7 +2007,7 @@ static void scrub_wr_bio_end_io_worker(struct btrfs_work *work)
int i; int i;
WARN_ON(sbio->page_count > SCRUB_PAGES_PER_WR_BIO); WARN_ON(sbio->page_count > SCRUB_PAGES_PER_WR_BIO);
if (sbio->err) { if (sbio->status) {
struct btrfs_dev_replace *dev_replace = struct btrfs_dev_replace *dev_replace =
&sbio->sctx->fs_info->dev_replace; &sbio->sctx->fs_info->dev_replace;
@ -2341,7 +2341,7 @@ again:
bio->bi_bdev = sbio->dev->bdev; bio->bi_bdev = sbio->dev->bdev;
bio->bi_iter.bi_sector = sbio->physical >> 9; bio->bi_iter.bi_sector = sbio->physical >> 9;
bio_set_op_attrs(bio, REQ_OP_READ, 0); bio_set_op_attrs(bio, REQ_OP_READ, 0);
sbio->err = 0; sbio->status = 0;
} else if (sbio->physical + sbio->page_count * PAGE_SIZE != } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
spage->physical || spage->physical ||
sbio->logical + sbio->page_count * PAGE_SIZE != sbio->logical + sbio->page_count * PAGE_SIZE !=
@ -2377,7 +2377,7 @@ static void scrub_missing_raid56_end_io(struct bio *bio)
struct scrub_block *sblock = bio->bi_private; struct scrub_block *sblock = bio->bi_private;
struct btrfs_fs_info *fs_info = sblock->sctx->fs_info; struct btrfs_fs_info *fs_info = sblock->sctx->fs_info;
if (bio->bi_error) if (bio->bi_status)
sblock->no_io_error_seen = 0; sblock->no_io_error_seen = 0;
bio_put(bio); bio_put(bio);
@ -2588,7 +2588,7 @@ static void scrub_bio_end_io(struct bio *bio)
struct scrub_bio *sbio = bio->bi_private; struct scrub_bio *sbio = bio->bi_private;
struct btrfs_fs_info *fs_info = sbio->dev->fs_info; struct btrfs_fs_info *fs_info = sbio->dev->fs_info;
sbio->err = bio->bi_error; sbio->status = bio->bi_status;
sbio->bio = bio; sbio->bio = bio;
btrfs_queue_work(fs_info->scrub_workers, &sbio->work); btrfs_queue_work(fs_info->scrub_workers, &sbio->work);
@ -2601,7 +2601,7 @@ static void scrub_bio_end_io_worker(struct btrfs_work *work)
int i; int i;
BUG_ON(sbio->page_count > SCRUB_PAGES_PER_RD_BIO); BUG_ON(sbio->page_count > SCRUB_PAGES_PER_RD_BIO);
if (sbio->err) { if (sbio->status) {
for (i = 0; i < sbio->page_count; i++) { for (i = 0; i < sbio->page_count; i++) {
struct scrub_page *spage = sbio->pagev[i]; struct scrub_page *spage = sbio->pagev[i];
@ -3004,7 +3004,7 @@ static void scrub_parity_bio_endio(struct bio *bio)
struct scrub_parity *sparity = (struct scrub_parity *)bio->bi_private; struct scrub_parity *sparity = (struct scrub_parity *)bio->bi_private;
struct btrfs_fs_info *fs_info = sparity->sctx->fs_info; struct btrfs_fs_info *fs_info = sparity->sctx->fs_info;
if (bio->bi_error) if (bio->bi_status)
bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap, bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
sparity->nsectors); sparity->nsectors);

Просмотреть файл

@ -6042,9 +6042,10 @@ static void btrfs_end_bio(struct bio *bio)
struct btrfs_bio *bbio = bio->bi_private; struct btrfs_bio *bbio = bio->bi_private;
int is_orig_bio = 0; int is_orig_bio = 0;
if (bio->bi_error) { if (bio->bi_status) {
atomic_inc(&bbio->error); atomic_inc(&bbio->error);
if (bio->bi_error == -EIO || bio->bi_error == -EREMOTEIO) { if (bio->bi_status == BLK_STS_IOERR ||
bio->bi_status == BLK_STS_TARGET) {
unsigned int stripe_index = unsigned int stripe_index =
btrfs_io_bio(bio)->stripe_index; btrfs_io_bio(bio)->stripe_index;
struct btrfs_device *dev; struct btrfs_device *dev;
@ -6082,13 +6083,13 @@ static void btrfs_end_bio(struct bio *bio)
* beyond the tolerance of the btrfs bio * beyond the tolerance of the btrfs bio
*/ */
if (atomic_read(&bbio->error) > bbio->max_errors) { if (atomic_read(&bbio->error) > bbio->max_errors) {
bio->bi_error = -EIO; bio->bi_status = BLK_STS_IOERR;
} else { } else {
/* /*
* this bio is actually up to date, we didn't * this bio is actually up to date, we didn't
* go over the max number of errors * go over the max number of errors
*/ */
bio->bi_error = 0; bio->bi_status = 0;
} }
btrfs_end_bbio(bbio, bio); btrfs_end_bbio(bbio, bio);
@ -6199,7 +6200,7 @@ static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
btrfs_io_bio(bio)->mirror_num = bbio->mirror_num; btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
bio->bi_iter.bi_sector = logical >> 9; bio->bi_iter.bi_sector = logical >> 9;
bio->bi_error = -EIO; bio->bi_status = BLK_STS_IOERR;
btrfs_end_bbio(bbio, bio); btrfs_end_bbio(bbio, bio);
} }
} }

Просмотреть файл

@ -3038,7 +3038,7 @@ static void end_bio_bh_io_sync(struct bio *bio)
if (unlikely(bio_flagged(bio, BIO_QUIET))) if (unlikely(bio_flagged(bio, BIO_QUIET)))
set_bit(BH_Quiet, &bh->b_state); set_bit(BH_Quiet, &bh->b_state);
bh->b_end_io(bh, !bio->bi_error); bh->b_end_io(bh, !bio->bi_status);
bio_put(bio); bio_put(bio);
} }

Просмотреть файл

@ -129,7 +129,7 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
goto errout; goto errout;
} }
err = submit_bio_wait(bio); err = submit_bio_wait(bio);
if ((err == 0) && bio->bi_error) if (err == 0 && bio->bi_status)
err = -EIO; err = -EIO;
bio_put(bio); bio_put(bio);
if (err) if (err)

Просмотреть файл

@ -294,7 +294,7 @@ static void dio_aio_complete_work(struct work_struct *work)
dio_complete(dio, 0, true); dio_complete(dio, 0, true);
} }
static int dio_bio_complete(struct dio *dio, struct bio *bio); static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio);
/* /*
* Asynchronous IO callback. * Asynchronous IO callback.
@ -473,11 +473,11 @@ static struct bio *dio_await_one(struct dio *dio)
/* /*
* Process one completed BIO. No locks are held. * Process one completed BIO. No locks are held.
*/ */
static int dio_bio_complete(struct dio *dio, struct bio *bio) static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio)
{ {
struct bio_vec *bvec; struct bio_vec *bvec;
unsigned i; unsigned i;
int err = bio->bi_error; blk_status_t err = bio->bi_status;
if (err) if (err)
dio->io_error = -EIO; dio->io_error = -EIO;
@ -536,7 +536,7 @@ static inline int dio_bio_reap(struct dio *dio, struct dio_submit *sdio)
bio = dio->bio_list; bio = dio->bio_list;
dio->bio_list = bio->bi_private; dio->bio_list = bio->bi_private;
spin_unlock_irqrestore(&dio->bio_lock, flags); spin_unlock_irqrestore(&dio->bio_lock, flags);
ret2 = dio_bio_complete(dio, bio); ret2 = blk_status_to_errno(dio_bio_complete(dio, bio));
if (ret == 0) if (ret == 0)
ret = ret2; ret = ret2;
} }

Просмотреть файл

@ -85,7 +85,7 @@ static void ext4_finish_bio(struct bio *bio)
} }
#endif #endif
if (bio->bi_error) { if (bio->bi_status) {
SetPageError(page); SetPageError(page);
mapping_set_error(page->mapping, -EIO); mapping_set_error(page->mapping, -EIO);
} }
@ -104,7 +104,7 @@ static void ext4_finish_bio(struct bio *bio)
continue; continue;
} }
clear_buffer_async_write(bh); clear_buffer_async_write(bh);
if (bio->bi_error) if (bio->bi_status)
buffer_io_error(bh); buffer_io_error(bh);
} while ((bh = bh->b_this_page) != head); } while ((bh = bh->b_this_page) != head);
bit_spin_unlock(BH_Uptodate_Lock, &head->b_state); bit_spin_unlock(BH_Uptodate_Lock, &head->b_state);
@ -303,24 +303,25 @@ static void ext4_end_bio(struct bio *bio)
bdevname(bio->bi_bdev, b), bdevname(bio->bi_bdev, b),
(long long) bio->bi_iter.bi_sector, (long long) bio->bi_iter.bi_sector,
(unsigned) bio_sectors(bio), (unsigned) bio_sectors(bio),
bio->bi_error)) { bio->bi_status)) {
ext4_finish_bio(bio); ext4_finish_bio(bio);
bio_put(bio); bio_put(bio);
return; return;
} }
bio->bi_end_io = NULL; bio->bi_end_io = NULL;
if (bio->bi_error) { if (bio->bi_status) {
struct inode *inode = io_end->inode; struct inode *inode = io_end->inode;
ext4_warning(inode->i_sb, "I/O error %d writing to inode %lu " ext4_warning(inode->i_sb, "I/O error %d writing to inode %lu "
"(offset %llu size %ld starting block %llu)", "(offset %llu size %ld starting block %llu)",
bio->bi_error, inode->i_ino, bio->bi_status, inode->i_ino,
(unsigned long long) io_end->offset, (unsigned long long) io_end->offset,
(long) io_end->size, (long) io_end->size,
(unsigned long long) (unsigned long long)
bi_sector >> (inode->i_blkbits - 9)); bi_sector >> (inode->i_blkbits - 9));
mapping_set_error(inode->i_mapping, bio->bi_error); mapping_set_error(inode->i_mapping,
blk_status_to_errno(bio->bi_status));
} }
if (io_end->flag & EXT4_IO_END_UNWRITTEN) { if (io_end->flag & EXT4_IO_END_UNWRITTEN) {

Просмотреть файл

@ -73,7 +73,7 @@ static void mpage_end_io(struct bio *bio)
int i; int i;
if (ext4_bio_encrypted(bio)) { if (ext4_bio_encrypted(bio)) {
if (bio->bi_error) { if (bio->bi_status) {
fscrypt_release_ctx(bio->bi_private); fscrypt_release_ctx(bio->bi_private);
} else { } else {
fscrypt_decrypt_bio_pages(bio->bi_private, bio); fscrypt_decrypt_bio_pages(bio->bi_private, bio);
@ -83,7 +83,7 @@ static void mpage_end_io(struct bio *bio)
bio_for_each_segment_all(bv, bio, i) { bio_for_each_segment_all(bv, bio, i) {
struct page *page = bv->bv_page; struct page *page = bv->bv_page;
if (!bio->bi_error) { if (!bio->bi_status) {
SetPageUptodate(page); SetPageUptodate(page);
} else { } else {
ClearPageUptodate(page); ClearPageUptodate(page);

Просмотреть файл

@ -58,12 +58,12 @@ static void f2fs_read_end_io(struct bio *bio)
#ifdef CONFIG_F2FS_FAULT_INJECTION #ifdef CONFIG_F2FS_FAULT_INJECTION
if (time_to_inject(F2FS_P_SB(bio->bi_io_vec->bv_page), FAULT_IO)) { if (time_to_inject(F2FS_P_SB(bio->bi_io_vec->bv_page), FAULT_IO)) {
f2fs_show_injection_info(FAULT_IO); f2fs_show_injection_info(FAULT_IO);
bio->bi_error = -EIO; bio->bi_status = BLK_STS_IOERR;
} }
#endif #endif
if (f2fs_bio_encrypted(bio)) { if (f2fs_bio_encrypted(bio)) {
if (bio->bi_error) { if (bio->bi_status) {
fscrypt_release_ctx(bio->bi_private); fscrypt_release_ctx(bio->bi_private);
} else { } else {
fscrypt_decrypt_bio_pages(bio->bi_private, bio); fscrypt_decrypt_bio_pages(bio->bi_private, bio);
@ -74,7 +74,7 @@ static void f2fs_read_end_io(struct bio *bio)
bio_for_each_segment_all(bvec, bio, i) { bio_for_each_segment_all(bvec, bio, i) {
struct page *page = bvec->bv_page; struct page *page = bvec->bv_page;
if (!bio->bi_error) { if (!bio->bi_status) {
if (!PageUptodate(page)) if (!PageUptodate(page))
SetPageUptodate(page); SetPageUptodate(page);
} else { } else {
@ -102,14 +102,14 @@ static void f2fs_write_end_io(struct bio *bio)
unlock_page(page); unlock_page(page);
mempool_free(page, sbi->write_io_dummy); mempool_free(page, sbi->write_io_dummy);
if (unlikely(bio->bi_error)) if (unlikely(bio->bi_status))
f2fs_stop_checkpoint(sbi, true); f2fs_stop_checkpoint(sbi, true);
continue; continue;
} }
fscrypt_pullback_bio_page(&page, true); fscrypt_pullback_bio_page(&page, true);
if (unlikely(bio->bi_error)) { if (unlikely(bio->bi_status)) {
mapping_set_error(page->mapping, -EIO); mapping_set_error(page->mapping, -EIO);
f2fs_stop_checkpoint(sbi, true); f2fs_stop_checkpoint(sbi, true);
} }

Просмотреть файл

@ -749,7 +749,7 @@ static void f2fs_submit_discard_endio(struct bio *bio)
{ {
struct discard_cmd *dc = (struct discard_cmd *)bio->bi_private; struct discard_cmd *dc = (struct discard_cmd *)bio->bi_private;
dc->error = bio->bi_error; dc->error = blk_status_to_errno(bio->bi_status);
dc->state = D_DONE; dc->state = D_DONE;
complete(&dc->wait); complete(&dc->wait);
bio_put(bio); bio_put(bio);

Просмотреть файл

@ -170,7 +170,7 @@ static u64 gfs2_log_bmap(struct gfs2_sbd *sdp)
*/ */
static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp, struct bio_vec *bvec, static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp, struct bio_vec *bvec,
int error) blk_status_t error)
{ {
struct buffer_head *bh, *next; struct buffer_head *bh, *next;
struct page *page = bvec->bv_page; struct page *page = bvec->bv_page;
@ -209,13 +209,13 @@ static void gfs2_end_log_write(struct bio *bio)
struct page *page; struct page *page;
int i; int i;
if (bio->bi_error) if (bio->bi_status)
fs_err(sdp, "Error %d writing to log\n", bio->bi_error); fs_err(sdp, "Error %d writing to log\n", bio->bi_status);
bio_for_each_segment_all(bvec, bio, i) { bio_for_each_segment_all(bvec, bio, i) {
page = bvec->bv_page; page = bvec->bv_page;
if (page_has_buffers(page)) if (page_has_buffers(page))
gfs2_end_log_write_bh(sdp, bvec, bio->bi_error); gfs2_end_log_write_bh(sdp, bvec, bio->bi_status);
else else
mempool_free(page, gfs2_page_pool); mempool_free(page, gfs2_page_pool);
} }

Просмотреть файл

@ -201,7 +201,7 @@ static void gfs2_meta_read_endio(struct bio *bio)
do { do {
struct buffer_head *next = bh->b_this_page; struct buffer_head *next = bh->b_this_page;
len -= bh->b_size; len -= bh->b_size;
bh->b_end_io(bh, !bio->bi_error); bh->b_end_io(bh, !bio->bi_status);
bh = next; bh = next;
} while (bh && len); } while (bh && len);
} }

Просмотреть файл

@ -176,10 +176,10 @@ static void end_bio_io_page(struct bio *bio)
{ {
struct page *page = bio->bi_private; struct page *page = bio->bi_private;
if (!bio->bi_error) if (!bio->bi_status)
SetPageUptodate(page); SetPageUptodate(page);
else else
pr_warn("error %d reading superblock\n", bio->bi_error); pr_warn("error %d reading superblock\n", bio->bi_status);
unlock_page(page); unlock_page(page);
} }

Просмотреть файл

@ -672,8 +672,8 @@ static void iomap_dio_bio_end_io(struct bio *bio)
struct iomap_dio *dio = bio->bi_private; struct iomap_dio *dio = bio->bi_private;
bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY); bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY);
if (bio->bi_error) if (bio->bi_status)
iomap_dio_set_error(dio, bio->bi_error); iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status));
if (atomic_dec_and_test(&dio->ref)) { if (atomic_dec_and_test(&dio->ref)) {
if (is_sync_kiocb(dio->iocb)) { if (is_sync_kiocb(dio->iocb)) {

Просмотреть файл

@ -2205,7 +2205,7 @@ static void lbmIODone(struct bio *bio)
bp->l_flag |= lbmDONE; bp->l_flag |= lbmDONE;
if (bio->bi_error) { if (bio->bi_status) {
bp->l_flag |= lbmERROR; bp->l_flag |= lbmERROR;
jfs_err("lbmIODone: I/O error in JFS log"); jfs_err("lbmIODone: I/O error in JFS log");

Просмотреть файл

@ -280,7 +280,7 @@ static void metapage_read_end_io(struct bio *bio)
{ {
struct page *page = bio->bi_private; struct page *page = bio->bi_private;
if (bio->bi_error) { if (bio->bi_status) {
printk(KERN_ERR "metapage_read_end_io: I/O error\n"); printk(KERN_ERR "metapage_read_end_io: I/O error\n");
SetPageError(page); SetPageError(page);
} }
@ -337,7 +337,7 @@ static void metapage_write_end_io(struct bio *bio)
BUG_ON(!PagePrivate(page)); BUG_ON(!PagePrivate(page));
if (bio->bi_error) { if (bio->bi_status) {
printk(KERN_ERR "metapage_write_end_io: I/O error\n"); printk(KERN_ERR "metapage_write_end_io: I/O error\n");
SetPageError(page); SetPageError(page);
} }

Просмотреть файл

@ -50,7 +50,8 @@ static void mpage_end_io(struct bio *bio)
bio_for_each_segment_all(bv, bio, i) { bio_for_each_segment_all(bv, bio, i) {
struct page *page = bv->bv_page; struct page *page = bv->bv_page;
page_endio(page, op_is_write(bio_op(bio)), bio->bi_error); page_endio(page, op_is_write(bio_op(bio)),
blk_status_to_errno(bio->bi_status));
} }
bio_put(bio); bio_put(bio);

Просмотреть файл

@ -188,7 +188,7 @@ static void bl_end_io_read(struct bio *bio)
{ {
struct parallel_io *par = bio->bi_private; struct parallel_io *par = bio->bi_private;
if (bio->bi_error) { if (bio->bi_status) {
struct nfs_pgio_header *header = par->data; struct nfs_pgio_header *header = par->data;
if (!header->pnfs_error) if (!header->pnfs_error)
@ -319,7 +319,7 @@ static void bl_end_io_write(struct bio *bio)
struct parallel_io *par = bio->bi_private; struct parallel_io *par = bio->bi_private;
struct nfs_pgio_header *header = par->data; struct nfs_pgio_header *header = par->data;
if (bio->bi_error) { if (bio->bi_status) {
if (!header->pnfs_error) if (!header->pnfs_error)
header->pnfs_error = -EIO; header->pnfs_error = -EIO;
pnfs_set_lo_fail(header->lseg); pnfs_set_lo_fail(header->lseg);

Просмотреть файл

@ -338,7 +338,7 @@ static void nilfs_end_bio_write(struct bio *bio)
{ {
struct nilfs_segment_buffer *segbuf = bio->bi_private; struct nilfs_segment_buffer *segbuf = bio->bi_private;
if (bio->bi_error) if (bio->bi_status)
atomic_inc(&segbuf->sb_err); atomic_inc(&segbuf->sb_err);
bio_put(bio); bio_put(bio);

Просмотреть файл

@ -516,9 +516,9 @@ static void o2hb_bio_end_io(struct bio *bio)
{ {
struct o2hb_bio_wait_ctxt *wc = bio->bi_private; struct o2hb_bio_wait_ctxt *wc = bio->bi_private;
if (bio->bi_error) { if (bio->bi_status) {
mlog(ML_ERROR, "IO Error %d\n", bio->bi_error); mlog(ML_ERROR, "IO Error %d\n", bio->bi_status);
wc->wc_error = bio->bi_error; wc->wc_error = blk_status_to_errno(bio->bi_status);
} }
o2hb_bio_wait_dec(wc, 1); o2hb_bio_wait_dec(wc, 1);

Просмотреть файл

@ -276,7 +276,7 @@ xfs_end_io(
struct xfs_inode *ip = XFS_I(ioend->io_inode); struct xfs_inode *ip = XFS_I(ioend->io_inode);
xfs_off_t offset = ioend->io_offset; xfs_off_t offset = ioend->io_offset;
size_t size = ioend->io_size; size_t size = ioend->io_size;
int error = ioend->io_bio->bi_error; int error;
/* /*
* Just clean up the in-memory strutures if the fs has been shut down. * Just clean up the in-memory strutures if the fs has been shut down.
@ -289,6 +289,7 @@ xfs_end_io(
/* /*
* Clean up any COW blocks on an I/O error. * Clean up any COW blocks on an I/O error.
*/ */
error = blk_status_to_errno(ioend->io_bio->bi_status);
if (unlikely(error)) { if (unlikely(error)) {
switch (ioend->io_type) { switch (ioend->io_type) {
case XFS_IO_COW: case XFS_IO_COW:
@ -332,7 +333,7 @@ xfs_end_bio(
else if (ioend->io_append_trans) else if (ioend->io_append_trans)
queue_work(mp->m_data_workqueue, &ioend->io_work); queue_work(mp->m_data_workqueue, &ioend->io_work);
else else
xfs_destroy_ioend(ioend, bio->bi_error); xfs_destroy_ioend(ioend, blk_status_to_errno(bio->bi_status));
} }
STATIC int STATIC int
@ -500,7 +501,7 @@ xfs_submit_ioend(
* time. * time.
*/ */
if (status) { if (status) {
ioend->io_bio->bi_error = status; ioend->io_bio->bi_status = errno_to_blk_status(status);
bio_endio(ioend->io_bio); bio_endio(ioend->io_bio);
return status; return status;
} }

Просмотреть файл

@ -1213,8 +1213,11 @@ xfs_buf_bio_end_io(
* don't overwrite existing errors - otherwise we can lose errors on * don't overwrite existing errors - otherwise we can lose errors on
* buffers that require multiple bios to complete. * buffers that require multiple bios to complete.
*/ */
if (bio->bi_error) if (bio->bi_status) {
cmpxchg(&bp->b_io_error, 0, bio->bi_error); int error = blk_status_to_errno(bio->bi_status);
cmpxchg(&bp->b_io_error, 0, error);
}
if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ)) if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp)); invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));

Просмотреть файл

@ -414,7 +414,7 @@ extern void bio_endio(struct bio *);
static inline void bio_io_error(struct bio *bio) static inline void bio_io_error(struct bio *bio)
{ {
bio->bi_error = -EIO; bio->bi_status = BLK_STS_IOERR;
bio_endio(bio); bio_endio(bio);
} }

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше