Fix a regression from the original addition of nfs lock support
586759f03e.  When a synchronous
(non-nfs) plock completes, the waiting thread will wake up and
free the op struct.  This races with the user thread in
dev_write() which goes on to read the op's callback field to
check if the lock is async and needs a callback.  This check
can happen on the freed op.  The fix is to note the callback
value before the op can be freed.

Signed-off-by: David Teigland <teigland@redhat.com>
This commit is contained in:
David Teigland 2009-06-18 13:20:24 -05:00
Родитель a566a6b11c
Коммит c78a87d0a1
1 изменённых файлов: 10 добавлений и 7 удалений

Просмотреть файл

@ -353,7 +353,7 @@ static ssize_t dev_write(struct file *file, const char __user *u, size_t count,
{ {
struct dlm_plock_info info; struct dlm_plock_info info;
struct plock_op *op; struct plock_op *op;
int found = 0; int found = 0, do_callback = 0;
if (count != sizeof(info)) if (count != sizeof(info))
return -EINVAL; return -EINVAL;
@ -366,21 +366,24 @@ static ssize_t dev_write(struct file *file, const char __user *u, size_t count,
spin_lock(&ops_lock); spin_lock(&ops_lock);
list_for_each_entry(op, &recv_list, list) { list_for_each_entry(op, &recv_list, list) {
if (op->info.fsid == info.fsid && op->info.number == info.number && if (op->info.fsid == info.fsid &&
op->info.number == info.number &&
op->info.owner == info.owner) { op->info.owner == info.owner) {
struct plock_xop *xop = (struct plock_xop *)op;
list_del_init(&op->list); list_del_init(&op->list);
found = 1;
op->done = 1;
memcpy(&op->info, &info, sizeof(info)); memcpy(&op->info, &info, sizeof(info));
if (xop->callback)
do_callback = 1;
else
op->done = 1;
found = 1;
break; break;
} }
} }
spin_unlock(&ops_lock); spin_unlock(&ops_lock);
if (found) { if (found) {
struct plock_xop *xop; if (do_callback)
xop = (struct plock_xop *)op;
if (xop->callback)
dlm_plock_callback(op); dlm_plock_callback(op);
else else
wake_up(&recv_wq); wake_up(&recv_wq);