signal: align __lock_task_sighand() irq disabling and RCU
The __lock_task_sighand() function calls rcu_read_lock() with interrupts and preemption enabled, but later calls rcu_read_unlock() with interrupts disabled. It is therefore possible that this RCU read-side critical section will be preempted and later RCU priority boosted, which means that rcu_read_unlock() will call rt_mutex_unlock() in order to deboost itself, but with interrupts disabled. This results in lockdep splats, so this commit nests the RCU read-side critical section within the interrupt-disabled region of code. This prevents the RCU read-side critical section from being preempted, and thus prevents the attempt to deboost with interrupts disabled. It is quite possible that a better long-term fix is to make rt_mutex_unlock() disable irqs when acquiring the rt_mutex structure's ->wait_lock. Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
This commit is contained in:
Родитель
ec433f0c51
Коммит
a841796f11
|
@ -1178,18 +1178,25 @@ struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
|
|||
{
|
||||
struct sighand_struct *sighand;
|
||||
|
||||
rcu_read_lock();
|
||||
for (;;) {
|
||||
local_irq_save(*flags);
|
||||
rcu_read_lock();
|
||||
sighand = rcu_dereference(tsk->sighand);
|
||||
if (unlikely(sighand == NULL))
|
||||
if (unlikely(sighand == NULL)) {
|
||||
rcu_read_unlock();
|
||||
local_irq_restore(*flags);
|
||||
break;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&sighand->siglock, *flags);
|
||||
if (likely(sighand == tsk->sighand))
|
||||
spin_lock(&sighand->siglock);
|
||||
if (likely(sighand == tsk->sighand)) {
|
||||
rcu_read_unlock();
|
||||
break;
|
||||
spin_unlock_irqrestore(&sighand->siglock, *flags);
|
||||
}
|
||||
spin_unlock(&sighand->siglock);
|
||||
rcu_read_unlock();
|
||||
local_irq_restore(*flags);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return sighand;
|
||||
}
|
||||
|
|
Загрузка…
Ссылка в новой задаче