scsi: dm: Remove WRITE_SAME support
There are no more end-users of REQ_OP_WRITE_SAME left, so we can start deleting it. Link: https://lore.kernel.org/r/20220209082828.2629273-7-hch@lst.de Reviewed-by: Mike Snitzer <snitzer@redhat.com> Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
Родитель
10fa225c33
Коммит
a773187e37
|
@ -141,7 +141,6 @@ struct mapped_device {
|
|||
#define DMF_EMULATE_ZONE_APPEND 9
|
||||
|
||||
void disable_discard(struct mapped_device *md);
|
||||
void disable_write_same(struct mapped_device *md);
|
||||
void disable_write_zeroes(struct mapped_device *md);
|
||||
|
||||
static inline sector_t dm_get_size(struct mapped_device *md)
|
||||
|
|
|
@ -2006,7 +2006,6 @@ static bool kcryptd_crypt_write_inline(struct crypt_config *cc,
|
|||
*/
|
||||
switch (bio_op(ctx->bio_in)) {
|
||||
case REQ_OP_WRITE:
|
||||
case REQ_OP_WRITE_SAME:
|
||||
case REQ_OP_WRITE_ZEROES:
|
||||
return true;
|
||||
default:
|
||||
|
|
|
@ -335,7 +335,6 @@ static int ebs_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
|||
ti->num_flush_bios = 1;
|
||||
ti->num_discard_bios = 1;
|
||||
ti->num_secure_erase_bios = 0;
|
||||
ti->num_write_same_bios = 0;
|
||||
ti->num_write_zeroes_bios = 0;
|
||||
return 0;
|
||||
bad:
|
||||
|
|
|
@ -304,7 +304,6 @@ static void do_region(int op, int op_flags, unsigned region,
|
|||
unsigned num_bvecs;
|
||||
sector_t remaining = where->count;
|
||||
struct request_queue *q = bdev_get_queue(where->bdev);
|
||||
unsigned short logical_block_size = queue_logical_block_size(q);
|
||||
sector_t num_sectors;
|
||||
unsigned int special_cmd_max_sectors;
|
||||
|
||||
|
@ -315,10 +314,8 @@ static void do_region(int op, int op_flags, unsigned region,
|
|||
special_cmd_max_sectors = q->limits.max_discard_sectors;
|
||||
else if (op == REQ_OP_WRITE_ZEROES)
|
||||
special_cmd_max_sectors = q->limits.max_write_zeroes_sectors;
|
||||
else if (op == REQ_OP_WRITE_SAME)
|
||||
special_cmd_max_sectors = q->limits.max_write_same_sectors;
|
||||
if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES ||
|
||||
op == REQ_OP_WRITE_SAME) && special_cmd_max_sectors == 0) {
|
||||
if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES) &&
|
||||
special_cmd_max_sectors == 0) {
|
||||
atomic_inc(&io->count);
|
||||
dec_count(io, region, BLK_STS_NOTSUPP);
|
||||
return;
|
||||
|
@ -337,9 +334,6 @@ static void do_region(int op, int op_flags, unsigned region,
|
|||
case REQ_OP_WRITE_ZEROES:
|
||||
num_bvecs = 0;
|
||||
break;
|
||||
case REQ_OP_WRITE_SAME:
|
||||
num_bvecs = 1;
|
||||
break;
|
||||
default:
|
||||
num_bvecs = bio_max_segs(dm_sector_div_up(remaining,
|
||||
(PAGE_SIZE >> SECTOR_SHIFT)));
|
||||
|
@ -356,18 +350,6 @@ static void do_region(int op, int op_flags, unsigned region,
|
|||
num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining);
|
||||
bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
|
||||
remaining -= num_sectors;
|
||||
} else if (op == REQ_OP_WRITE_SAME) {
|
||||
/*
|
||||
* WRITE SAME only uses a single page.
|
||||
*/
|
||||
dp->get_page(dp, &page, &len, &offset);
|
||||
bio_add_page(bio, page, logical_block_size, offset);
|
||||
num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining);
|
||||
bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
|
||||
|
||||
offset = 0;
|
||||
remaining -= num_sectors;
|
||||
dp->next_page(dp);
|
||||
} else while (remaining) {
|
||||
/*
|
||||
* Try and add as many pages as possible.
|
||||
|
|
|
@ -60,7 +60,6 @@ static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
|||
ti->num_flush_bios = 1;
|
||||
ti->num_discard_bios = 1;
|
||||
ti->num_secure_erase_bios = 1;
|
||||
ti->num_write_same_bios = 1;
|
||||
ti->num_write_zeroes_bios = 1;
|
||||
ti->private = lc;
|
||||
return 0;
|
||||
|
|
|
@ -1252,7 +1252,6 @@ static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)
|
|||
|
||||
ti->num_flush_bios = 1;
|
||||
ti->num_discard_bios = 1;
|
||||
ti->num_write_same_bios = 1;
|
||||
ti->num_write_zeroes_bios = 1;
|
||||
if (m->queue_mode == DM_TYPE_BIO_BASED)
|
||||
ti->per_io_data_size = multipath_per_bio_data_size();
|
||||
|
|
|
@ -217,9 +217,6 @@ static void dm_done(struct request *clone, blk_status_t error, bool mapped)
|
|||
if (req_op(clone) == REQ_OP_DISCARD &&
|
||||
!clone->q->limits.max_discard_sectors)
|
||||
disable_discard(tio->md);
|
||||
else if (req_op(clone) == REQ_OP_WRITE_SAME &&
|
||||
!clone->q->limits.max_write_same_sectors)
|
||||
disable_write_same(tio->md);
|
||||
else if (req_op(clone) == REQ_OP_WRITE_ZEROES &&
|
||||
!clone->q->limits.max_write_zeroes_sectors)
|
||||
disable_write_zeroes(tio->md);
|
||||
|
|
|
@ -157,7 +157,6 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
|||
ti->num_flush_bios = stripes;
|
||||
ti->num_discard_bios = stripes;
|
||||
ti->num_secure_erase_bios = stripes;
|
||||
ti->num_write_same_bios = stripes;
|
||||
ti->num_write_zeroes_bios = stripes;
|
||||
|
||||
sc->chunk_size = chunk_size;
|
||||
|
@ -284,8 +283,7 @@ static int stripe_map(struct dm_target *ti, struct bio *bio)
|
|||
}
|
||||
if (unlikely(bio_op(bio) == REQ_OP_DISCARD) ||
|
||||
unlikely(bio_op(bio) == REQ_OP_SECURE_ERASE) ||
|
||||
unlikely(bio_op(bio) == REQ_OP_WRITE_ZEROES) ||
|
||||
unlikely(bio_op(bio) == REQ_OP_WRITE_SAME)) {
|
||||
unlikely(bio_op(bio) == REQ_OP_WRITE_ZEROES)) {
|
||||
target_bio_nr = dm_bio_get_target_bio_nr(bio);
|
||||
BUG_ON(target_bio_nr >= sc->stripes);
|
||||
return stripe_map_range(sc, bio, target_bio_nr);
|
||||
|
|
|
@ -1822,33 +1822,6 @@ static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
|
|||
return !blk_queue_add_random(q);
|
||||
}
|
||||
|
||||
static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev,
|
||||
sector_t start, sector_t len, void *data)
|
||||
{
|
||||
struct request_queue *q = bdev_get_queue(dev->bdev);
|
||||
|
||||
return !q->limits.max_write_same_sectors;
|
||||
}
|
||||
|
||||
static bool dm_table_supports_write_same(struct dm_table *t)
|
||||
{
|
||||
struct dm_target *ti;
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < dm_table_get_num_targets(t); i++) {
|
||||
ti = dm_table_get_target(t, i);
|
||||
|
||||
if (!ti->num_write_same_bios)
|
||||
return false;
|
||||
|
||||
if (!ti->type->iterate_devices ||
|
||||
ti->type->iterate_devices(ti, device_not_write_same_capable, NULL))
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int device_not_write_zeroes_capable(struct dm_target *ti, struct dm_dev *dev,
|
||||
sector_t start, sector_t len, void *data)
|
||||
{
|
||||
|
@ -2027,8 +2000,6 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
|
|||
else
|
||||
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
|
||||
|
||||
if (!dm_table_supports_write_same(t))
|
||||
q->limits.max_write_same_sectors = 0;
|
||||
if (!dm_table_supports_write_zeroes(t))
|
||||
q->limits.max_write_zeroes_sectors = 0;
|
||||
|
||||
|
|
|
@ -130,7 +130,6 @@ bool dm_is_zone_write(struct mapped_device *md, struct bio *bio)
|
|||
|
||||
switch (bio_op(bio)) {
|
||||
case REQ_OP_WRITE_ZEROES:
|
||||
case REQ_OP_WRITE_SAME:
|
||||
case REQ_OP_WRITE:
|
||||
return !op_is_flush(bio->bi_opf) && bio_sectors(bio);
|
||||
default:
|
||||
|
@ -390,7 +389,6 @@ static bool dm_zone_map_bio_begin(struct mapped_device *md,
|
|||
case REQ_OP_ZONE_FINISH:
|
||||
return true;
|
||||
case REQ_OP_WRITE_ZEROES:
|
||||
case REQ_OP_WRITE_SAME:
|
||||
case REQ_OP_WRITE:
|
||||
/* Writes must be aligned to the zone write pointer */
|
||||
if ((clone->bi_iter.bi_sector & (zsectors - 1)) != zwp_offset)
|
||||
|
@ -446,7 +444,6 @@ static blk_status_t dm_zone_map_bio_end(struct mapped_device *md,
|
|||
blk_queue_zone_sectors(md->queue));
|
||||
return BLK_STS_OK;
|
||||
case REQ_OP_WRITE_ZEROES:
|
||||
case REQ_OP_WRITE_SAME:
|
||||
case REQ_OP_WRITE:
|
||||
WRITE_ONCE(md->zwp_offset[zno], zwp_offset + nr_sectors);
|
||||
return BLK_STS_OK;
|
||||
|
@ -503,7 +500,6 @@ static bool dm_need_zone_wp_tracking(struct bio *orig_bio)
|
|||
return false;
|
||||
switch (bio_op(orig_bio)) {
|
||||
case REQ_OP_WRITE_ZEROES:
|
||||
case REQ_OP_WRITE_SAME:
|
||||
case REQ_OP_WRITE:
|
||||
case REQ_OP_ZONE_RESET:
|
||||
case REQ_OP_ZONE_FINISH:
|
||||
|
|
|
@ -855,14 +855,6 @@ void disable_discard(struct mapped_device *md)
|
|||
blk_queue_flag_clear(QUEUE_FLAG_DISCARD, md->queue);
|
||||
}
|
||||
|
||||
void disable_write_same(struct mapped_device *md)
|
||||
{
|
||||
struct queue_limits *limits = dm_get_queue_limits(md);
|
||||
|
||||
/* device doesn't really support WRITE SAME, disable it */
|
||||
limits->max_write_same_sectors = 0;
|
||||
}
|
||||
|
||||
void disable_write_zeroes(struct mapped_device *md)
|
||||
{
|
||||
struct queue_limits *limits = dm_get_queue_limits(md);
|
||||
|
@ -889,9 +881,6 @@ static void clone_endio(struct bio *bio)
|
|||
if (bio_op(bio) == REQ_OP_DISCARD &&
|
||||
!q->limits.max_discard_sectors)
|
||||
disable_discard(md);
|
||||
else if (bio_op(bio) == REQ_OP_WRITE_SAME &&
|
||||
!q->limits.max_write_same_sectors)
|
||||
disable_write_same(md);
|
||||
else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
|
||||
!q->limits.max_write_zeroes_sectors)
|
||||
disable_write_zeroes(md);
|
||||
|
@ -1370,7 +1359,6 @@ static bool is_abnormal_io(struct bio *bio)
|
|||
switch (bio_op(bio)) {
|
||||
case REQ_OP_DISCARD:
|
||||
case REQ_OP_SECURE_ERASE:
|
||||
case REQ_OP_WRITE_SAME:
|
||||
case REQ_OP_WRITE_ZEROES:
|
||||
r = true;
|
||||
break;
|
||||
|
@ -1392,9 +1380,6 @@ static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti,
|
|||
case REQ_OP_SECURE_ERASE:
|
||||
num_bios = ti->num_secure_erase_bios;
|
||||
break;
|
||||
case REQ_OP_WRITE_SAME:
|
||||
num_bios = ti->num_write_same_bios;
|
||||
break;
|
||||
case REQ_OP_WRITE_ZEROES:
|
||||
num_bios = ti->num_write_zeroes_bios;
|
||||
break;
|
||||
|
|
|
@ -316,12 +316,6 @@ struct dm_target {
|
|||
*/
|
||||
unsigned num_secure_erase_bios;
|
||||
|
||||
/*
|
||||
* The number of WRITE SAME bios that will be submitted to the target.
|
||||
* The bio number can be accessed with dm_bio_get_target_bio_nr.
|
||||
*/
|
||||
unsigned num_write_same_bios;
|
||||
|
||||
/*
|
||||
* The number of WRITE ZEROES bios that will be submitted to the target.
|
||||
* The bio number can be accessed with dm_bio_get_target_bio_nr.
|
||||
|
|
Загрузка…
Ссылка в новой задаче