softirq: Fix suspicious RCU usage in __do_softirq()
[ Upstream commit 1dd1eff161bd55968d3d46bc36def62d71fb4785 ] Currently, the condition "__this_cpu_read(ksoftirqd) == current" is used to invoke rcu_softirq_qs() in ksoftirqd tasks context for non-RT kernels. This works correctly as long as the context is actually task context but this condition is wrong when: - the current task is ksoftirqd - the task is interrupted in a RCU read side critical section - __do_softirq() is invoked on return from interrupt Syzkaller triggered the following scenario: -> finish_task_switch() -> put_task_struct_rcu_user() -> call_rcu(&task->rcu, delayed_put_task_struct) -> __kasan_record_aux_stack() -> pfn_valid() -> rcu_read_lock_sched() <interrupt> __irq_exit_rcu() -> __do_softirq)() -> if (!IS_ENABLED(CONFIG_PREEMPT_RT) && __this_cpu_read(ksoftirqd) == current) -> rcu_softirq_qs() -> RCU_LOCKDEP_WARN(lock_is_held(&rcu_sched_lock_map)) The rcu quiescent state is reported in the rcu-read critical section, so the lockdep warning is triggered. Fix this by splitting out the inner working of __do_softirq() into a helper function which takes an argument to distinguish between ksoftirqd task context and interrupted context and invoke it from the relevant call sites with the proper context information and use that for the conditional invocation of rcu_softirq_qs(). Reported-by: syzbot+dce04ed6d1438ad69656@syzkaller.appspotmail.com Suggested-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Zqiang <qiang.zhang1211@gmail.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Link: https://lore.kernel.org/r/20240427102808.29356-1-qiang.zhang1211@gmail.com Link: https://lore.kernel.org/lkml/8f281a10-b85a-4586-9586-5bbc12dc784f@paulmck-laptop/T/#mea8aba4abfcb97bbf499d169ce7f30c4cff1b0e3 Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
Родитель
1d3ff18aee
Коммит
19ef439df2
|
@ -512,7 +512,7 @@ static inline bool lockdep_softirq_start(void) { return false; }
|
||||||
static inline void lockdep_softirq_end(bool in_hardirq) { }
|
static inline void lockdep_softirq_end(bool in_hardirq) { }
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
asmlinkage __visible void __softirq_entry __do_softirq(void)
|
static void handle_softirqs(bool ksirqd)
|
||||||
{
|
{
|
||||||
unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
|
unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
|
||||||
unsigned long old_flags = current->flags;
|
unsigned long old_flags = current->flags;
|
||||||
|
@ -567,8 +567,7 @@ restart:
|
||||||
pending >>= softirq_bit;
|
pending >>= softirq_bit;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!IS_ENABLED(CONFIG_PREEMPT_RT) &&
|
if (!IS_ENABLED(CONFIG_PREEMPT_RT) && ksirqd)
|
||||||
__this_cpu_read(ksoftirqd) == current)
|
|
||||||
rcu_softirq_qs();
|
rcu_softirq_qs();
|
||||||
|
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
|
@ -588,6 +587,11 @@ restart:
|
||||||
current_restore_flags(old_flags, PF_MEMALLOC);
|
current_restore_flags(old_flags, PF_MEMALLOC);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
asmlinkage __visible void __softirq_entry __do_softirq(void)
|
||||||
|
{
|
||||||
|
handle_softirqs(false);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* irq_enter_rcu - Enter an interrupt context with RCU watching
|
* irq_enter_rcu - Enter an interrupt context with RCU watching
|
||||||
*/
|
*/
|
||||||
|
@ -918,7 +922,7 @@ static void run_ksoftirqd(unsigned int cpu)
|
||||||
* We can safely run softirq on inline stack, as we are not deep
|
* We can safely run softirq on inline stack, as we are not deep
|
||||||
* in the task stack here.
|
* in the task stack here.
|
||||||
*/
|
*/
|
||||||
__do_softirq();
|
handle_softirqs(true);
|
||||||
ksoftirqd_run_end();
|
ksoftirqd_run_end();
|
||||||
cond_resched();
|
cond_resched();
|
||||||
return;
|
return;
|
||||||
|
|
Загрузка…
Ссылка в новой задаче