context_tracking: avoid irq_save/irq_restore on guest entry and exit
guest_enter and guest_exit must be called with interrupts disabled, since they take the vtime_seqlock with write_seq{lock,unlock}. Therefore, it is not necessary to check for exceptions, nor to save/restore the IRQ state, when context tracking functions are called by guest_enter and guest_exit. Split the body of context_tracking_entry and context_tracking_exit out to __-prefixed functions, and use them from KVM. Rik van Riel has measured this to speed up a tight vmentry/vmexit loop by about 2%. Cc: Andy Lutomirski <luto@kernel.org> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Paul McKenney <paulmck@linux.vnet.ibm.com> Reviewed-by: Rik van Riel <riel@redhat.com> Tested-by: Rik van Riel <riel@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Родитель
f70cd6b07e
Коммит
d0e536d893
|
@ -10,6 +10,10 @@
|
||||||
#ifdef CONFIG_CONTEXT_TRACKING
|
#ifdef CONFIG_CONTEXT_TRACKING
|
||||||
extern void context_tracking_cpu_set(int cpu);
|
extern void context_tracking_cpu_set(int cpu);
|
||||||
|
|
||||||
|
/* Called with interrupts disabled. */
|
||||||
|
extern void __context_tracking_enter(enum ctx_state state);
|
||||||
|
extern void __context_tracking_exit(enum ctx_state state);
|
||||||
|
|
||||||
extern void context_tracking_enter(enum ctx_state state);
|
extern void context_tracking_enter(enum ctx_state state);
|
||||||
extern void context_tracking_exit(enum ctx_state state);
|
extern void context_tracking_exit(enum ctx_state state);
|
||||||
extern void context_tracking_user_enter(void);
|
extern void context_tracking_user_enter(void);
|
||||||
|
@ -88,13 +92,13 @@ static inline void guest_enter(void)
|
||||||
current->flags |= PF_VCPU;
|
current->flags |= PF_VCPU;
|
||||||
|
|
||||||
if (context_tracking_is_enabled())
|
if (context_tracking_is_enabled())
|
||||||
context_tracking_enter(CONTEXT_GUEST);
|
__context_tracking_enter(CONTEXT_GUEST);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void guest_exit(void)
|
static inline void guest_exit(void)
|
||||||
{
|
{
|
||||||
if (context_tracking_is_enabled())
|
if (context_tracking_is_enabled())
|
||||||
context_tracking_exit(CONTEXT_GUEST);
|
__context_tracking_exit(CONTEXT_GUEST);
|
||||||
|
|
||||||
if (vtime_accounting_enabled())
|
if (vtime_accounting_enabled())
|
||||||
vtime_guest_exit(current);
|
vtime_guest_exit(current);
|
||||||
|
|
|
@ -58,27 +58,13 @@ static void context_tracking_recursion_exit(void)
|
||||||
* instructions to execute won't use any RCU read side critical section
|
* instructions to execute won't use any RCU read side critical section
|
||||||
* because this function sets RCU in extended quiescent state.
|
* because this function sets RCU in extended quiescent state.
|
||||||
*/
|
*/
|
||||||
void context_tracking_enter(enum ctx_state state)
|
void __context_tracking_enter(enum ctx_state state)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Some contexts may involve an exception occuring in an irq,
|
|
||||||
* leading to that nesting:
|
|
||||||
* rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit()
|
|
||||||
* This would mess up the dyntick_nesting count though. And rcu_irq_*()
|
|
||||||
* helpers are enough to protect RCU uses inside the exception. So
|
|
||||||
* just return immediately if we detect we are in an IRQ.
|
|
||||||
*/
|
|
||||||
if (in_interrupt())
|
|
||||||
return;
|
|
||||||
|
|
||||||
/* Kernel threads aren't supposed to go to userspace */
|
/* Kernel threads aren't supposed to go to userspace */
|
||||||
WARN_ON_ONCE(!current->mm);
|
WARN_ON_ONCE(!current->mm);
|
||||||
|
|
||||||
local_irq_save(flags);
|
|
||||||
if (!context_tracking_recursion_enter())
|
if (!context_tracking_recursion_enter())
|
||||||
goto out_irq_restore;
|
return;
|
||||||
|
|
||||||
if ( __this_cpu_read(context_tracking.state) != state) {
|
if ( __this_cpu_read(context_tracking.state) != state) {
|
||||||
if (__this_cpu_read(context_tracking.active)) {
|
if (__this_cpu_read(context_tracking.active)) {
|
||||||
|
@ -111,7 +97,27 @@ void context_tracking_enter(enum ctx_state state)
|
||||||
__this_cpu_write(context_tracking.state, state);
|
__this_cpu_write(context_tracking.state, state);
|
||||||
}
|
}
|
||||||
context_tracking_recursion_exit();
|
context_tracking_recursion_exit();
|
||||||
out_irq_restore:
|
}
|
||||||
|
NOKPROBE_SYMBOL(__context_tracking_enter);
|
||||||
|
EXPORT_SYMBOL_GPL(__context_tracking_enter);
|
||||||
|
|
||||||
|
void context_tracking_enter(enum ctx_state state)
|
||||||
|
{
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Some contexts may involve an exception occuring in an irq,
|
||||||
|
* leading to that nesting:
|
||||||
|
* rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit()
|
||||||
|
* This would mess up the dyntick_nesting count though. And rcu_irq_*()
|
||||||
|
* helpers are enough to protect RCU uses inside the exception. So
|
||||||
|
* just return immediately if we detect we are in an IRQ.
|
||||||
|
*/
|
||||||
|
if (in_interrupt())
|
||||||
|
return;
|
||||||
|
|
||||||
|
local_irq_save(flags);
|
||||||
|
__context_tracking_enter(state);
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
NOKPROBE_SYMBOL(context_tracking_enter);
|
NOKPROBE_SYMBOL(context_tracking_enter);
|
||||||
|
@ -135,16 +141,10 @@ NOKPROBE_SYMBOL(context_tracking_user_enter);
|
||||||
* This call supports re-entrancy. This way it can be called from any exception
|
* This call supports re-entrancy. This way it can be called from any exception
|
||||||
* handler without needing to know if we came from userspace or not.
|
* handler without needing to know if we came from userspace or not.
|
||||||
*/
|
*/
|
||||||
void context_tracking_exit(enum ctx_state state)
|
void __context_tracking_exit(enum ctx_state state)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
if (in_interrupt())
|
|
||||||
return;
|
|
||||||
|
|
||||||
local_irq_save(flags);
|
|
||||||
if (!context_tracking_recursion_enter())
|
if (!context_tracking_recursion_enter())
|
||||||
goto out_irq_restore;
|
return;
|
||||||
|
|
||||||
if (__this_cpu_read(context_tracking.state) == state) {
|
if (__this_cpu_read(context_tracking.state) == state) {
|
||||||
if (__this_cpu_read(context_tracking.active)) {
|
if (__this_cpu_read(context_tracking.active)) {
|
||||||
|
@ -161,7 +161,19 @@ void context_tracking_exit(enum ctx_state state)
|
||||||
__this_cpu_write(context_tracking.state, CONTEXT_KERNEL);
|
__this_cpu_write(context_tracking.state, CONTEXT_KERNEL);
|
||||||
}
|
}
|
||||||
context_tracking_recursion_exit();
|
context_tracking_recursion_exit();
|
||||||
out_irq_restore:
|
}
|
||||||
|
NOKPROBE_SYMBOL(__context_tracking_exit);
|
||||||
|
EXPORT_SYMBOL_GPL(__context_tracking_exit);
|
||||||
|
|
||||||
|
void context_tracking_exit(enum ctx_state state)
|
||||||
|
{
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
if (in_interrupt())
|
||||||
|
return;
|
||||||
|
|
||||||
|
local_irq_save(flags);
|
||||||
|
__context_tracking_exit(state);
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
NOKPROBE_SYMBOL(context_tracking_exit);
|
NOKPROBE_SYMBOL(context_tracking_exit);
|
||||||
|
|
Загрузка…
Ссылка в новой задаче