sched/cputime: Spare a seqcount lock/unlock cycle on context switch
On context switch we are locking the vtime seqcount of the scheduling-out task twice: * On vtime_task_switch_common(), when we flush the pending vtime through vtime_account_system() * On arch_vtime_task_switch() to reset the vtime state. This is pointless as these actions can be performed without the need to unlock/lock in the middle. The reason these steps are separated is to consolidate a very small amount of common code between CONFIG_VIRT_CPU_ACCOUNTING_GEN and CONFIG_VIRT_CPU_ACCOUNTING_NATIVE. Performance in this fast path is definitely a priority over artificial code factorization so split the task switch code between GEN and NATIVE and mutualize the parts than can run under a single seqcount locked block. As a side effect, vtime_account_idle() becomes included in the seqcount protection. This happens to be a welcome preparation in order to properly support kcpustat under vtime in the future and fetch CPUTIME_IDLE without race. Signed-off-by: Frederic Weisbecker <frederic@kernel.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@redhat.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Wanpeng Li <wanpengli@tencent.com> Cc: Yauheni Kaliuta <yauheni.kaliuta@redhat.com> Link: https://lkml.kernel.org/r/20191003161745.28464-3-frederic@kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Родитель
f83eeb1a01
Коммит
8d495477d6
|
@ -14,8 +14,12 @@ struct task_struct;
|
|||
* vtime_accounting_cpu_enabled() definitions/declarations
|
||||
*/
|
||||
#if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE)
|
||||
|
||||
static inline bool vtime_accounting_cpu_enabled(void) { return true; }
|
||||
extern void vtime_task_switch(struct task_struct *prev);
|
||||
|
||||
#elif defined(CONFIG_VIRT_CPU_ACCOUNTING_GEN)
|
||||
|
||||
/*
|
||||
* Checks if vtime is enabled on some CPU. Cputime readers want to be careful
|
||||
* in that case and compute the tickless cputime.
|
||||
|
@ -36,33 +40,29 @@ static inline bool vtime_accounting_cpu_enabled(void)
|
|||
|
||||
return false;
|
||||
}
|
||||
#else /* !CONFIG_VIRT_CPU_ACCOUNTING */
|
||||
static inline bool vtime_accounting_cpu_enabled(void) { return false; }
|
||||
#endif
|
||||
|
||||
extern void vtime_task_switch_generic(struct task_struct *prev);
|
||||
|
||||
static inline void vtime_task_switch(struct task_struct *prev)
|
||||
{
|
||||
if (vtime_accounting_cpu_enabled())
|
||||
vtime_task_switch_generic(prev);
|
||||
}
|
||||
|
||||
#else /* !CONFIG_VIRT_CPU_ACCOUNTING */
|
||||
|
||||
static inline bool vtime_accounting_cpu_enabled(void) { return false; }
|
||||
static inline void vtime_task_switch(struct task_struct *prev) { }
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Common vtime APIs
|
||||
*/
|
||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
||||
|
||||
#ifdef __ARCH_HAS_VTIME_TASK_SWITCH
|
||||
extern void vtime_task_switch(struct task_struct *prev);
|
||||
#else
|
||||
extern void vtime_common_task_switch(struct task_struct *prev);
|
||||
static inline void vtime_task_switch(struct task_struct *prev)
|
||||
{
|
||||
if (vtime_accounting_cpu_enabled())
|
||||
vtime_common_task_switch(prev);
|
||||
}
|
||||
#endif /* __ARCH_HAS_VTIME_TASK_SWITCH */
|
||||
|
||||
extern void vtime_account_kernel(struct task_struct *tsk);
|
||||
extern void vtime_account_idle(struct task_struct *tsk);
|
||||
|
||||
#else /* !CONFIG_VIRT_CPU_ACCOUNTING */
|
||||
|
||||
static inline void vtime_task_switch(struct task_struct *prev) { }
|
||||
static inline void vtime_account_kernel(struct task_struct *tsk) { }
|
||||
#endif /* !CONFIG_VIRT_CPU_ACCOUNTING */
|
||||
|
||||
|
|
|
@ -405,9 +405,10 @@ static inline void irqtime_account_process_tick(struct task_struct *p, int user_
|
|||
/*
|
||||
* Use precise platform statistics if available:
|
||||
*/
|
||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
|
||||
|
||||
# ifndef __ARCH_HAS_VTIME_TASK_SWITCH
|
||||
void vtime_common_task_switch(struct task_struct *prev)
|
||||
void vtime_task_switch(struct task_struct *prev)
|
||||
{
|
||||
if (is_idle_task(prev))
|
||||
vtime_account_idle(prev);
|
||||
|
@ -418,10 +419,7 @@ void vtime_common_task_switch(struct task_struct *prev)
|
|||
arch_vtime_task_switch(prev);
|
||||
}
|
||||
# endif
|
||||
#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
|
||||
|
||||
|
||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
|
||||
/*
|
||||
* Archs that account the whole time spent in the idle task
|
||||
* (outside irq) as idle time can rely on this and just implement
|
||||
|
@ -731,6 +729,16 @@ static void vtime_account_guest(struct task_struct *tsk,
|
|||
}
|
||||
}
|
||||
|
||||
static void __vtime_account_kernel(struct task_struct *tsk,
|
||||
struct vtime *vtime)
|
||||
{
|
||||
/* We might have scheduled out from guest path */
|
||||
if (tsk->flags & PF_VCPU)
|
||||
vtime_account_guest(tsk, vtime);
|
||||
else
|
||||
vtime_account_system(tsk, vtime);
|
||||
}
|
||||
|
||||
void vtime_account_kernel(struct task_struct *tsk)
|
||||
{
|
||||
struct vtime *vtime = &tsk->vtime;
|
||||
|
@ -739,11 +747,7 @@ void vtime_account_kernel(struct task_struct *tsk)
|
|||
return;
|
||||
|
||||
write_seqcount_begin(&vtime->seqcount);
|
||||
/* We might have scheduled out from guest path */
|
||||
if (tsk->flags & PF_VCPU)
|
||||
vtime_account_guest(tsk, vtime);
|
||||
else
|
||||
vtime_account_system(tsk, vtime);
|
||||
__vtime_account_kernel(tsk, vtime);
|
||||
write_seqcount_end(&vtime->seqcount);
|
||||
}
|
||||
|
||||
|
@ -804,11 +808,15 @@ void vtime_account_idle(struct task_struct *tsk)
|
|||
account_idle_time(get_vtime_delta(&tsk->vtime));
|
||||
}
|
||||
|
||||
void arch_vtime_task_switch(struct task_struct *prev)
|
||||
void vtime_task_switch_generic(struct task_struct *prev)
|
||||
{
|
||||
struct vtime *vtime = &prev->vtime;
|
||||
|
||||
write_seqcount_begin(&vtime->seqcount);
|
||||
if (is_idle_task(prev))
|
||||
vtime_account_idle(prev);
|
||||
else
|
||||
__vtime_account_kernel(prev, vtime);
|
||||
vtime->state = VTIME_INACTIVE;
|
||||
write_seqcount_end(&vtime->seqcount);
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче