rcu/context-tracking: Move deferred nocb resched to context tracking

To prepare for migrating the RCU eqs accounting code to context tracking,
split the last-resort deferred nocb resched from rcu_user_enter() and
move it into a separate call from context tracking.

Acked-by: Paul E. McKenney <paulmck@kernel.org>
Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Neeraj Upadhyay <quic_neeraju@quicinc.com>
Cc: Uladzislau Rezki <uladzislau.rezki@sony.com>
Cc: Joel Fernandes <joel@joelfernandes.org>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Nicolas Saenz Julienne <nsaenz@kernel.org>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Xiongfeng Wang <wangxiongfeng2@huawei.com>
Cc: Yu Liao <liaoyu15@huawei.com>
Cc: Phil Auld <pauld@redhat.com>
Cc: Paul Gortmaker<paul.gortmaker@windriver.com>
Cc: Alex Belits <abelits@marvell.com>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Reviewed-by: Nicolas Saenz Julienne <nsaenzju@redhat.com>
Tested-by: Nicolas Saenz Julienne <nsaenzju@redhat.com>
This commit is contained in:
Frederic Weisbecker 2022-06-08 16:40:32 +02:00 коммит произвёл Paul E. McKenney
Родитель 95e04f48ec
Коммит 564506495c
3 изменённых файлов: 16 добавлений и 13 удалений

Просмотреть файл

@ -112,6 +112,12 @@ static inline void rcu_user_enter(void) { }
static inline void rcu_user_exit(void) { }
#endif /* CONFIG_NO_HZ_FULL */
#if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK))
void rcu_irq_work_resched(void);
#else
static inline void rcu_irq_work_resched(void) { }
#endif
#ifdef CONFIG_RCU_NOCB_CPU
void rcu_init_nohz(void);
int rcu_nocb_cpu_offload(int cpu);

Просмотреть файл

@ -177,6 +177,8 @@ static __always_inline void context_tracking_recursion_exit(void)
*/
void noinstr __ct_user_enter(enum ctx_state state)
{
lockdep_assert_irqs_disabled();
/* Kernel threads aren't supposed to go to userspace */
WARN_ON_ONCE(!current->mm);
@ -198,6 +200,12 @@ void noinstr __ct_user_enter(enum ctx_state state)
vtime_user_enter(current);
instrumentation_end();
}
/*
* Other than generic entry implementation, we may be past the last
* rescheduling opportunity in the entry code. Trigger a self IPI
* that will fire and reschedule once we resume in user/guest mode.
*/
rcu_irq_work_resched();
rcu_user_enter();
}
/*

Просмотреть файл

@ -681,7 +681,7 @@ static DEFINE_PER_CPU(struct irq_work, late_wakeup_work) =
* last resort is to fire a local irq_work that will trigger a reschedule once IRQs
* get re-enabled again.
*/
noinstr static void rcu_irq_work_resched(void)
noinstr void rcu_irq_work_resched(void)
{
struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
@ -697,10 +697,7 @@ noinstr static void rcu_irq_work_resched(void)
}
instrumentation_end();
}
#else
static inline void rcu_irq_work_resched(void) { }
#endif
#endif /* #if !defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK) */
/**
* rcu_user_enter - inform RCU that we are resuming userspace.
@ -715,14 +712,6 @@ static inline void rcu_irq_work_resched(void) { }
*/
noinstr void rcu_user_enter(void)
{
lockdep_assert_irqs_disabled();
/*
* Other than generic entry implementation, we may be past the last
* rescheduling opportunity in the entry code. Trigger a self IPI
* that will fire and reschedule once we resume in user/guest mode.
*/
rcu_irq_work_resched();
rcu_eqs_enter(true);
}