rbd: don't assume RBD_LOCK_STATE_LOCKED for exclusive mappings
commit 2237ceb71f89837ac47c5dce2aaa2c2b3a337a3c upstream. Every time a watch is reestablished after getting lost, we need to update the cookie which involves quiescing exclusive lock. For this, we transition from RBD_LOCK_STATE_LOCKED to RBD_LOCK_STATE_QUIESCING roughly for the duration of rbd_reacquire_lock() call. If the mapping is exclusive and I/O happens to arrive in this time window, it's failed with EROFS (later translated to EIO) based on the wrong assumption in rbd_img_exclusive_lock() -- "lock got released?" check there stopped making sense with commita2b1da0979
("rbd: lock should be quiesced on reacquire"). To make it worse, any such I/O is added to the acquiring list before EROFS is returned and this sets up for violating rbd_lock_del_request() precondition that the request is either on the running list or not on any list at all -- see commit ded080c86b3f ("rbd: don't move requests to the running list on errors"). rbd_lock_del_request() ends up processing these requests as if they were on the running list which screws up quiescing_wait completion counter and ultimately leads to rbd_assert(!completion_done(&rbd_dev->quiescing_wait)); being triggered on the next watch error. Cc: stable@vger.kernel.org # 06ef84c4e9c4: rbd: rename RBD_LOCK_STATE_RELEASING and releasing_wait Cc: stable@vger.kernel.org Fixes:637cd06053
("rbd: new exclusive lock wait/wake code") Signed-off-by: Ilya Dryomov <idryomov@gmail.com> Reviewed-by: Dongsheng Yang <dongsheng.yang@easystack.cn> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Родитель
0a97cc547f
Коммит
4cf7e2a305
|
@ -3459,6 +3459,7 @@ static void rbd_lock_del_request(struct rbd_img_request *img_req)
|
||||||
lockdep_assert_held(&rbd_dev->lock_rwsem);
|
lockdep_assert_held(&rbd_dev->lock_rwsem);
|
||||||
spin_lock(&rbd_dev->lock_lists_lock);
|
spin_lock(&rbd_dev->lock_lists_lock);
|
||||||
if (!list_empty(&img_req->lock_item)) {
|
if (!list_empty(&img_req->lock_item)) {
|
||||||
|
rbd_assert(!list_empty(&rbd_dev->running_list));
|
||||||
list_del_init(&img_req->lock_item);
|
list_del_init(&img_req->lock_item);
|
||||||
need_wakeup = (rbd_dev->lock_state == RBD_LOCK_STATE_QUIESCING &&
|
need_wakeup = (rbd_dev->lock_state == RBD_LOCK_STATE_QUIESCING &&
|
||||||
list_empty(&rbd_dev->running_list));
|
list_empty(&rbd_dev->running_list));
|
||||||
|
@ -3478,11 +3479,6 @@ static int rbd_img_exclusive_lock(struct rbd_img_request *img_req)
|
||||||
if (rbd_lock_add_request(img_req))
|
if (rbd_lock_add_request(img_req))
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
if (rbd_dev->opts->exclusive) {
|
|
||||||
WARN_ON(1); /* lock got released? */
|
|
||||||
return -EROFS;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Note the use of mod_delayed_work() in rbd_acquire_lock()
|
* Note the use of mod_delayed_work() in rbd_acquire_lock()
|
||||||
* and cancel_delayed_work() in wake_lock_waiters().
|
* and cancel_delayed_work() in wake_lock_waiters().
|
||||||
|
@ -4603,6 +4599,10 @@ static void rbd_reacquire_lock(struct rbd_device *rbd_dev)
|
||||||
rbd_warn(rbd_dev, "failed to update lock cookie: %d",
|
rbd_warn(rbd_dev, "failed to update lock cookie: %d",
|
||||||
ret);
|
ret);
|
||||||
|
|
||||||
|
if (rbd_dev->opts->exclusive)
|
||||||
|
rbd_warn(rbd_dev,
|
||||||
|
"temporarily releasing lock on exclusive mapping");
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Lock cookie cannot be updated on older OSDs, so do
|
* Lock cookie cannot be updated on older OSDs, so do
|
||||||
* a manual release and queue an acquire.
|
* a manual release and queue an acquire.
|
||||||
|
|
Загрузка…
Ссылка в новой задаче