dm: disable barriers
This patch causes device-mapper to reject any barrier requests. This is done since most of the targets won't handle this correctly anyway. So until the situation improves it is better to reject these requests at the first place. Since barrier requests won't get to the targets, the checks there can be removed. Cc: stable@kernel.org Signed-off-by: Stefan Bader <shbader@de.ibm.com> Signed-off-by: Alasdair G Kergon <agk@redhat.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
943317efdb
Коммит
07a83c47cf
|
@ -941,9 +941,6 @@ static int crypt_map(struct dm_target *ti, struct bio *bio,
|
|||
struct crypt_config *cc = ti->private;
|
||||
struct dm_crypt_io *io;
|
||||
|
||||
if (bio_barrier(bio))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
io = mempool_alloc(cc->io_pool, GFP_NOIO);
|
||||
io->target = ti;
|
||||
io->base_bio = bio;
|
||||
|
|
|
@ -798,9 +798,6 @@ static int multipath_map(struct dm_target *ti, struct bio *bio,
|
|||
struct dm_mpath_io *mpio;
|
||||
struct multipath *m = (struct multipath *) ti->private;
|
||||
|
||||
if (bio_barrier(bio))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
mpio = mempool_alloc(m->mpio_pool, GFP_NOIO);
|
||||
dm_bio_record(&mpio->details, bio);
|
||||
|
||||
|
|
|
@ -889,9 +889,6 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio,
|
|||
if (!s->valid)
|
||||
return -EIO;
|
||||
|
||||
if (unlikely(bio_barrier(bio)))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* FIXME: should only take write lock if we need
|
||||
* to copy an exception */
|
||||
down_write(&s->lock);
|
||||
|
@ -1162,9 +1159,6 @@ static int origin_map(struct dm_target *ti, struct bio *bio,
|
|||
struct dm_dev *dev = ti->private;
|
||||
bio->bi_bdev = dev->bdev;
|
||||
|
||||
if (unlikely(bio_barrier(bio)))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* Only tell snapshots if this is a write */
|
||||
return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED;
|
||||
}
|
||||
|
|
|
@ -800,6 +800,15 @@ static int dm_request(request_queue_t *q, struct bio *bio)
|
|||
int rw = bio_data_dir(bio);
|
||||
struct mapped_device *md = q->queuedata;
|
||||
|
||||
/*
|
||||
* There is no use in forwarding any barrier request since we can't
|
||||
* guarantee it is (or can be) handled by the targets correctly.
|
||||
*/
|
||||
if (unlikely(bio_barrier(bio))) {
|
||||
bio_endio(bio, bio->bi_size, -EOPNOTSUPP);
|
||||
return 0;
|
||||
}
|
||||
|
||||
down_read(&md->io_lock);
|
||||
|
||||
disk_stat_inc(dm_disk(md), ios[rw]);
|
||||
|
|
Загрузка…
Ссылка в новой задаче