This makes md raid 10 support TRIM.

If one disk supports discard and another not, or one has
discard_zero_data and another not, there could be inconsistent between
data from such disks. But this should not matter, discarded data is
useless. This will add extra copy in rebuild though.

Signed-off-by: Shaohua Li <shli@fusionio.com>
Signed-off-by: NeilBrown <neilb@suse.de>
This commit is contained in:
Shaohua Li 2012-10-11 13:30:52 +11:00 коммит произвёл NeilBrown
Родитель 2ff8cc2c6d
Коммит 532a2a3fba
1 изменённых файлов: 25 добавлений и 4 удалений

Просмотреть файл

@ -911,6 +911,11 @@ static void flush_pending_writes(struct r10conf *conf)
while (bio) { /* submit pending writes */ while (bio) { /* submit pending writes */
struct bio *next = bio->bi_next; struct bio *next = bio->bi_next;
bio->bi_next = NULL; bio->bi_next = NULL;
if (unlikely((bio->bi_rw & REQ_DISCARD) &&
!blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
/* Just ignore it */
bio_endio(bio, 0);
else
generic_make_request(bio); generic_make_request(bio);
bio = next; bio = next;
} }
@ -1061,6 +1066,8 @@ static void make_request(struct mddev *mddev, struct bio * bio)
const int rw = bio_data_dir(bio); const int rw = bio_data_dir(bio);
const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
const unsigned long do_fua = (bio->bi_rw & REQ_FUA); const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
const unsigned long do_discard = (bio->bi_rw
& (REQ_DISCARD | REQ_SECURE));
unsigned long flags; unsigned long flags;
struct md_rdev *blocked_rdev; struct md_rdev *blocked_rdev;
int sectors_handled; int sectors_handled;
@ -1081,7 +1088,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
|| conf->prev.near_copies < conf->prev.raid_disks))) { || conf->prev.near_copies < conf->prev.raid_disks))) {
struct bio_pair *bp; struct bio_pair *bp;
/* Sanity check -- queue functions should prevent this happening */ /* Sanity check -- queue functions should prevent this happening */
if (bio->bi_vcnt != 1 || if ((bio->bi_vcnt != 1 && bio->bi_vcnt != 0) ||
bio->bi_idx != 0) bio->bi_idx != 0)
goto bad_map; goto bad_map;
/* This is a one page bio that upper layers /* This is a one page bio that upper layers
@ -1410,7 +1417,7 @@ retry_write:
conf->mirrors[d].rdev)); conf->mirrors[d].rdev));
mbio->bi_bdev = conf->mirrors[d].rdev->bdev; mbio->bi_bdev = conf->mirrors[d].rdev->bdev;
mbio->bi_end_io = raid10_end_write_request; mbio->bi_end_io = raid10_end_write_request;
mbio->bi_rw = WRITE | do_sync | do_fua; mbio->bi_rw = WRITE | do_sync | do_fua | do_discard;
mbio->bi_private = r10_bio; mbio->bi_private = r10_bio;
atomic_inc(&r10_bio->remaining); atomic_inc(&r10_bio->remaining);
@ -1439,7 +1446,7 @@ retry_write:
conf->mirrors[d].replacement)); conf->mirrors[d].replacement));
mbio->bi_bdev = conf->mirrors[d].replacement->bdev; mbio->bi_bdev = conf->mirrors[d].replacement->bdev;
mbio->bi_end_io = raid10_end_write_request; mbio->bi_end_io = raid10_end_write_request;
mbio->bi_rw = WRITE | do_sync | do_fua; mbio->bi_rw = WRITE | do_sync | do_fua | do_discard;
mbio->bi_private = r10_bio; mbio->bi_private = r10_bio;
atomic_inc(&r10_bio->remaining); atomic_inc(&r10_bio->remaining);
@ -1723,6 +1730,9 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
clear_bit(Unmerged, &rdev->flags); clear_bit(Unmerged, &rdev->flags);
} }
md_integrity_add_rdev(rdev, mddev); md_integrity_add_rdev(rdev, mddev);
if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
print_conf(conf); print_conf(conf);
return err; return err;
} }
@ -3480,6 +3490,7 @@ static int run(struct mddev *mddev)
sector_t size; sector_t size;
sector_t min_offset_diff = 0; sector_t min_offset_diff = 0;
int first = 1; int first = 1;
bool discard_supported = false;
if (mddev->private == NULL) { if (mddev->private == NULL) {
conf = setup_conf(mddev); conf = setup_conf(mddev);
@ -3496,6 +3507,8 @@ static int run(struct mddev *mddev)
chunk_size = mddev->chunk_sectors << 9; chunk_size = mddev->chunk_sectors << 9;
if (mddev->queue) { if (mddev->queue) {
blk_queue_max_discard_sectors(mddev->queue,
mddev->chunk_sectors);
blk_queue_io_min(mddev->queue, chunk_size); blk_queue_io_min(mddev->queue, chunk_size);
if (conf->geo.raid_disks % conf->geo.near_copies) if (conf->geo.raid_disks % conf->geo.near_copies)
blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks); blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks);
@ -3541,8 +3554,16 @@ static int run(struct mddev *mddev)
rdev->data_offset << 9); rdev->data_offset << 9);
disk->head_position = 0; disk->head_position = 0;
if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
discard_supported = true;
} }
if (discard_supported)
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
else
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
/* need to check that every block has at least one working mirror */ /* need to check that every block has at least one working mirror */
if (!enough(conf, -1)) { if (!enough(conf, -1)) {
printk(KERN_ERR "md/raid10:%s: not enough operational mirrors.\n", printk(KERN_ERR "md/raid10:%s: not enough operational mirrors.\n",