drbd: Fixed a race condition between detach and start of resync
drbd_state_lock() is only there to serialize cluster wide state changes. Testing the local disk state needs to happen while holding the global_state_lock. Otherwise you might see something like this (Oct 6 on kugel) 14:20:24 drbd0: conn( WFSyncUUID -> Connected ) disk( Inconsistent -> Failed ) 14:20:24 drbd0: helper command: /sbin/drbdadm before-resync-target minor-0 exit code 0 (0x0) 14:20:24 drbd0: conn( Connected -> SyncTarget ) disk( Failed -> Inconsistent ) Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com> Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>
This commit is contained in:
Родитель
6a9a92f4ef
Коммит
aaae506d54
|
@ -1537,14 +1537,14 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
|
|||
}
|
||||
|
||||
drbd_state_lock(mdev);
|
||||
|
||||
write_lock_irq(&global_state_lock);
|
||||
if (!get_ldev_if_state(mdev, D_NEGOTIATING)) {
|
||||
write_unlock_irq(&global_state_lock);
|
||||
drbd_state_unlock(mdev);
|
||||
return;
|
||||
}
|
||||
|
||||
write_lock_irq(&global_state_lock);
|
||||
ns = mdev->state;
|
||||
ns.i = mdev->state.i;
|
||||
|
||||
ns.aftr_isp = !_drbd_may_sync_now(mdev);
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче