- one serious RAID0 data corruption - caused by recent bugfix that wasn't
   reviewed properly.
 - one raid5 fix in new code (a couple more of those to come).
 - one little fix to stop static analysis complaining about silly rcu
   annotation.
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v2
 
 iQIVAwUAVV6rmDnsnt1WYoG5AQICpQ//VleOWnZERlVQN1nYzGHqIyPUz+dV6x8W
 RfzccMzUWqaHpZY/BaTMqJxPWgt6lxqqtN5F3u+XMdM+L2Pl0NXoRTqB+FzpLucR
 d0p76FwbW2IWnycxXUZtGjm8xT942KPdqXIP+LQRKaIhguV9P9th9WbPRvlXFfB6
 pO1h1AdxVlsfn6qgBpe49pYoUuNmpwSTTJ8Gvb7YRsHXnmB/lsjQvVsWLiMqPwug
 +bR0gM07OvTfYenrY82ztu42+xAoVoeu/XhHmPPLEV3S/SA/9kVLgBcWjq2bBjw0
 REzR2i+exEtCRe2yvetDX8320IKZcZSOE8BwBB2iUm8OVjplPxw83Bk/kB2+Zr8n
 VxAa9/vCZPLNCWyzOSi2V471NeFBys9PVkVIroHT5nQZGadkw0JYt1cEbcHSBKS/
 NYmJCu1IM0o9DPzn8jW7sXjDcB0WE8VwFMJ09QSiEYQu2bAb7j2f1pcoweN2MeNF
 qOitGj/luygXg2TBvema9qPL533DUj+eSYyyO7OQBsfkfZSDM5di4bGoZBVw118Q
 kARaJ3IZffT8gsVpgSTCsviDv5QUcs0izVS1sMKB6/SDJjtLXBMERW+LkrmjBsnB
 6+CMmwNZUIzugzBgSPBVozdNQQYjAXL7LNV6Q9YQhE0RK0+10fLLxmpovJR62bRf
 PWlg23+n4+E=
 =jXKM
 -----END PGP SIGNATURE-----

Merge tag 'md/4.1-rc4-fixes' of git://neil.brown.name/md

Pull md bugfixes from Neil Brown:
 "I have a few more raid5 bugfixes pending, but I want them to get a bit
  more review first.  In the meantime:

   - one serious RAID0 data corruption - caused by recent bugfix that
     wasn't reviewed properly.

   - one raid5 fix in new code (a couple more of those to come).

   - one little fix to stop static analysis complaining about silly rcu
     annotation"

* tag 'md/4.1-rc4-fixes' of git://neil.brown.name/md:
  md/bitmap: remove rcu annotation from pointer arithmetic.
  md/raid0: fix restore to sector variable in raid0_make_request
  raid5: fix broken async operation chain
This commit is contained in:
Linus Torvalds 2015-05-22 15:10:07 -07:00
Родитель 1d82b0baf9 8532e34390
Коммит a30ec4b347
3 изменённых файлов: 10 добавлений и 3 удалений

Просмотреть файл

@ -177,11 +177,16 @@ static struct md_rdev *next_active_rdev(struct md_rdev *rdev, struct mddev *mdde
* nr_pending is 0 and In_sync is clear, the entries we return will * nr_pending is 0 and In_sync is clear, the entries we return will
* still be in the same position on the list when we re-enter * still be in the same position on the list when we re-enter
* list_for_each_entry_continue_rcu. * list_for_each_entry_continue_rcu.
*
* Note that if entered with 'rdev == NULL' to start at the
* beginning, we temporarily assign 'rdev' to an address which
* isn't really an rdev, but which can be used by
* list_for_each_entry_continue_rcu() to find the first entry.
*/ */
rcu_read_lock(); rcu_read_lock();
if (rdev == NULL) if (rdev == NULL)
/* start at the beginning */ /* start at the beginning */
rdev = list_entry_rcu(&mddev->disks, struct md_rdev, same_set); rdev = list_entry(&mddev->disks, struct md_rdev, same_set);
else { else {
/* release the previous rdev and start from there. */ /* release the previous rdev and start from there. */
rdev_dec_pending(rdev, mddev); rdev_dec_pending(rdev, mddev);

Просмотреть файл

@ -524,6 +524,9 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
? (sector & (chunk_sects-1)) ? (sector & (chunk_sects-1))
: sector_div(sector, chunk_sects)); : sector_div(sector, chunk_sects));
/* Restore due to sector_div */
sector = bio->bi_iter.bi_sector;
if (sectors < bio_sectors(bio)) { if (sectors < bio_sectors(bio)) {
split = bio_split(bio, sectors, GFP_NOIO, fs_bio_set); split = bio_split(bio, sectors, GFP_NOIO, fs_bio_set);
bio_chain(split, bio); bio_chain(split, bio);
@ -531,7 +534,6 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
split = bio; split = bio;
} }
sector = bio->bi_iter.bi_sector;
zone = find_zone(mddev->private, &sector); zone = find_zone(mddev->private, &sector);
tmp_dev = map_sector(mddev, zone, sector, &sector); tmp_dev = map_sector(mddev, zone, sector, &sector);
split->bi_bdev = tmp_dev->bdev; split->bi_bdev = tmp_dev->bdev;

Просмотреть файл

@ -1822,7 +1822,7 @@ again:
} else } else
init_async_submit(&submit, 0, tx, NULL, NULL, init_async_submit(&submit, 0, tx, NULL, NULL,
to_addr_conv(sh, percpu, j)); to_addr_conv(sh, percpu, j));
async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit); tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit);
if (!last_stripe) { if (!last_stripe) {
j++; j++;
sh = list_first_entry(&sh->batch_list, struct stripe_head, sh = list_first_entry(&sh->batch_list, struct stripe_head,