dm: add zone open, close and finish support

Implement REQ_OP_ZONE_OPEN, REQ_OP_ZONE_CLOSE and REQ_OP_ZONE_FINISH
support to allow explicit control of zone states.

Contains contributions from Matias Bjorling, Hans Holmberg and
Damien Le Moal.

Acked-by: Mike Snitzer <snitzer@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Ajay Joshi <ajay.joshi@wdc.com>
Signed-off-by: Matias Bjorling <matias.bjorling@wdc.com>
Signed-off-by: Hans Holmberg <hans.holmberg@wdc.com>
Signed-off-by: Damien Le Moal <damien.lemoal@wdc.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Ajay Joshi 2019-10-27 23:05:48 +09:00 коммит произвёл Jens Axboe
Родитель 439b84fa17
Коммит 2e2d6f7e44
3 изменённых файлов: 7 добавлений и 7 удалений

Просмотреть файл

@ -280,7 +280,7 @@ static void flakey_map_bio(struct dm_target *ti, struct bio *bio)
struct flakey_c *fc = ti->private; struct flakey_c *fc = ti->private;
bio_set_dev(bio, fc->dev->bdev); bio_set_dev(bio, fc->dev->bdev);
if (bio_sectors(bio) || bio_op(bio) == REQ_OP_ZONE_RESET) if (bio_sectors(bio) || op_is_zone_mgmt(bio_op(bio)))
bio->bi_iter.bi_sector = bio->bi_iter.bi_sector =
flakey_map_sector(ti, bio->bi_iter.bi_sector); flakey_map_sector(ti, bio->bi_iter.bi_sector);
} }
@ -322,8 +322,7 @@ static int flakey_map(struct dm_target *ti, struct bio *bio)
struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data)); struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
pb->bio_submitted = false; pb->bio_submitted = false;
/* Do not fail reset zone */ if (op_is_zone_mgmt(bio_op(bio)))
if (bio_op(bio) == REQ_OP_ZONE_RESET)
goto map_bio; goto map_bio;
/* Are we alive ? */ /* Are we alive ? */
@ -384,7 +383,7 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio,
struct flakey_c *fc = ti->private; struct flakey_c *fc = ti->private;
struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data)); struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
if (bio_op(bio) == REQ_OP_ZONE_RESET) if (op_is_zone_mgmt(bio_op(bio)))
return DM_ENDIO_DONE; return DM_ENDIO_DONE;
if (!*error && pb->bio_submitted && (bio_data_dir(bio) == READ)) { if (!*error && pb->bio_submitted && (bio_data_dir(bio) == READ)) {

Просмотреть файл

@ -90,7 +90,7 @@ static void linear_map_bio(struct dm_target *ti, struct bio *bio)
struct linear_c *lc = ti->private; struct linear_c *lc = ti->private;
bio_set_dev(bio, lc->dev->bdev); bio_set_dev(bio, lc->dev->bdev);
if (bio_sectors(bio) || bio_op(bio) == REQ_OP_ZONE_RESET) if (bio_sectors(bio) || op_is_zone_mgmt(bio_op(bio)))
bio->bi_iter.bi_sector = bio->bi_iter.bi_sector =
linear_map_sector(ti, bio->bi_iter.bi_sector); linear_map_sector(ti, bio->bi_iter.bi_sector);
} }

Просмотреть файл

@ -1174,7 +1174,8 @@ static size_t dm_dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff,
/* /*
* A target may call dm_accept_partial_bio only from the map routine. It is * A target may call dm_accept_partial_bio only from the map routine. It is
* allowed for all bio types except REQ_PREFLUSH and REQ_OP_ZONE_RESET. * allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_RESET,
* REQ_OP_ZONE_OPEN, REQ_OP_ZONE_CLOSE and REQ_OP_ZONE_FINISH.
* *
* dm_accept_partial_bio informs the dm that the target only wants to process * dm_accept_partial_bio informs the dm that the target only wants to process
* additional n_sectors sectors of the bio and the rest of the data should be * additional n_sectors sectors of the bio and the rest of the data should be
@ -1627,7 +1628,7 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
ci.sector_count = 0; ci.sector_count = 0;
error = __send_empty_flush(&ci); error = __send_empty_flush(&ci);
/* dec_pending submits any data associated with flush */ /* dec_pending submits any data associated with flush */
} else if (bio_op(bio) == REQ_OP_ZONE_RESET) { } else if (op_is_zone_mgmt(bio_op(bio))) {
ci.bio = bio; ci.bio = bio;
ci.sector_count = 0; ci.sector_count = 0;
error = __split_and_process_non_flush(&ci); error = __split_and_process_non_flush(&ci);