x86/entry: Clean up idtentry_enter/exit() leftovers

Now that everything is converted to conditional RCU handling remove
idtentry_enter/exit() and tidy up the conditional functions.

This does not remove rcu_irq_exit_preempt(), to avoid conflicts with the RCU
tree. Will be removed once all of this hits Linus's tree.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Andy Lutomirski <luto@kernel.org>
Link: https://lore.kernel.org/r/20200521202117.473597954@linutronix.de
This commit is contained in:
Thomas Gleixner 2020-05-21 22:05:20 +02:00
Родитель fa95d7dc1a
Коммит 9ee01e0f69
2 изменённых файлов: 30 добавлений и 49 удалений

Просмотреть файл

@ -515,7 +515,6 @@ SYSCALL_DEFINE0(ni_syscall)
* idtentry_enter_cond_rcu - Handle state tracking on idtentry with conditional
* RCU handling
* @regs: Pointer to pt_regs of interrupted context
* @cond_rcu: Invoke rcu_irq_enter() only if RCU is not watching
*
* Invokes:
* - lockdep irqflag state tracking as low level ASM entry disabled
@ -545,14 +544,14 @@ SYSCALL_DEFINE0(ni_syscall)
* The return value must be fed into the rcu_exit argument of
* idtentry_exit_cond_rcu().
*/
bool noinstr idtentry_enter_cond_rcu(struct pt_regs *regs, bool cond_rcu)
bool noinstr idtentry_enter_cond_rcu(struct pt_regs *regs)
{
if (user_mode(regs)) {
enter_from_user_mode();
return false;
}
if (!cond_rcu || !__rcu_is_watching()) {
if (!__rcu_is_watching()) {
/*
* If RCU is not watching then the same careful
* sequence vs. lockdep and tracing is required
@ -608,52 +607,44 @@ void noinstr idtentry_exit_cond_rcu(struct pt_regs *regs, bool rcu_exit)
if (user_mode(regs)) {
prepare_exit_to_usermode(regs);
} else if (regs->flags & X86_EFLAGS_IF) {
/*
* If RCU was not watching on entry this needs to be done
* carefully and needs the same ordering of lockdep/tracing
* and RCU as the return to user mode path.
*/
if (rcu_exit) {
instrumentation_begin();
/* Tell the tracer that IRET will enable interrupts */
trace_hardirqs_on_prepare();
lockdep_hardirqs_on_prepare(CALLER_ADDR0);
instrumentation_end();
rcu_irq_exit();
lockdep_hardirqs_on(CALLER_ADDR0);
return;
}
instrumentation_begin();
/* Check kernel preemption, if enabled */
if (IS_ENABLED(CONFIG_PREEMPTION)) {
/*
* This needs to be done very carefully.
* idtentry_enter() invoked rcu_irq_enter(). This
* needs to be undone before scheduling.
*
* Preemption is disabled inside of RCU idle
* sections. When the task returns from
* preempt_schedule_irq(), RCU is still watching.
*
* rcu_irq_exit_preempt() has additional state
* checking if CONFIG_PROVE_RCU=y
*/
if (!preempt_count()) {
/* Sanity check RCU and thread stack */
rcu_irq_exit_check_preempt();
if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
WARN_ON_ONCE(!on_thread_stack());
instrumentation_begin();
if (rcu_exit)
rcu_irq_exit_preempt();
if (need_resched())
preempt_schedule_irq();
/* Covers both tracing and lockdep */
trace_hardirqs_on();
instrumentation_end();
return;
}
}
/*
* If preemption is disabled then this needs to be done
* carefully with respect to RCU. The exception might come
* from a RCU idle section in the idle task due to the fact
* that safe_halt() enables interrupts. So this needs the
* same ordering of lockdep/tracing and RCU as the return
* to user mode path.
*/
instrumentation_begin();
/* Tell the tracer that IRET will enable interrupts */
trace_hardirqs_on_prepare();
lockdep_hardirqs_on_prepare(CALLER_ADDR0);
/* Covers both tracing and lockdep */
trace_hardirqs_on();
instrumentation_end();
if (rcu_exit)
rcu_irq_exit();
lockdep_hardirqs_on(CALLER_ADDR0);
} else {
/* IRQ flags state is correct already. Just tell RCU. */
/*
* IRQ flags state is correct already. Just tell RCU if it
* was not watching on entry.
*/
if (rcu_exit)
rcu_irq_exit();
}

Просмотреть файл

@ -10,19 +10,9 @@
void idtentry_enter_user(struct pt_regs *regs);
void idtentry_exit_user(struct pt_regs *regs);
bool idtentry_enter_cond_rcu(struct pt_regs *regs, bool cond_rcu);
bool idtentry_enter_cond_rcu(struct pt_regs *regs);
void idtentry_exit_cond_rcu(struct pt_regs *regs, bool rcu_exit);
static __always_inline void idtentry_enter(struct pt_regs *regs)
{
idtentry_enter_cond_rcu(regs, false);
}
static __always_inline void idtentry_exit(struct pt_regs *regs)
{
idtentry_exit_cond_rcu(regs, true);
}
/**
* DECLARE_IDTENTRY - Declare functions for simple IDT entry points
* No error code pushed by hardware