x86/delay: Preparatory code cleanup
The naming conventions in the delay code are confusing at best. All delay variants use a loops argument and or variable which originates from the original delay_loop() implementation. But all variants except delay_loop() are based on TSC cycles. Rename the argument to cycles and make it type u64 to avoid these weird expansions to u64 in the functions. Rename MWAITX_MAX_LOOPS to MWAITX_MAX_WAIT_CYCLES for the same reason and fixup the comment of delay_mwaitx() as well. Mark the delay_fn function pointer __ro_after_init and fixup the comment for it. No functional change and preparation for the upcoming TPAUSE based delay variant. [ Kyung Min Park: Added __init to use_tsc_delay() ] Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Kyung Min Park <kyung.min.park@intel.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Link: https://lkml.kernel.org/r/1587757076-30337-2-git-send-email-kyung.min.park@intel.com
This commit is contained in:
Родитель
3c40cdb0e9
Коммит
e882489024
|
@ -3,8 +3,9 @@
|
|||
#define _ASM_X86_DELAY_H
|
||||
|
||||
#include <asm-generic/delay.h>
|
||||
#include <linux/init.h>
|
||||
|
||||
void use_tsc_delay(void);
|
||||
void __init use_tsc_delay(void);
|
||||
void use_mwaitx_delay(void);
|
||||
|
||||
#endif /* _ASM_X86_DELAY_H */
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
|
||||
#define MWAIT_ECX_INTERRUPT_BREAK 0x1
|
||||
#define MWAITX_ECX_TIMER_ENABLE BIT(1)
|
||||
#define MWAITX_MAX_LOOPS ((u32)-1)
|
||||
#define MWAITX_MAX_WAIT_CYCLES UINT_MAX
|
||||
#define MWAITX_DISABLE_CSTATES 0xf0
|
||||
|
||||
u32 get_umwait_control_msr(void);
|
||||
|
|
|
@ -27,9 +27,19 @@
|
|||
# include <asm/smp.h>
|
||||
#endif
|
||||
|
||||
static void delay_loop(u64 __loops);
|
||||
|
||||
/*
|
||||
* Calibration and selection of the delay mechanism happens only once
|
||||
* during boot.
|
||||
*/
|
||||
static void (*delay_fn)(u64) __ro_after_init = delay_loop;
|
||||
|
||||
/* simple loop based delay: */
|
||||
static void delay_loop(unsigned long loops)
|
||||
static void delay_loop(u64 __loops)
|
||||
{
|
||||
unsigned long loops = (unsigned long)__loops;
|
||||
|
||||
asm volatile(
|
||||
" test %0,%0 \n"
|
||||
" jz 3f \n"
|
||||
|
@ -49,9 +59,9 @@ static void delay_loop(unsigned long loops)
|
|||
}
|
||||
|
||||
/* TSC based delay: */
|
||||
static void delay_tsc(unsigned long __loops)
|
||||
static void delay_tsc(u64 cycles)
|
||||
{
|
||||
u64 bclock, now, loops = __loops;
|
||||
u64 bclock, now;
|
||||
int cpu;
|
||||
|
||||
preempt_disable();
|
||||
|
@ -59,7 +69,7 @@ static void delay_tsc(unsigned long __loops)
|
|||
bclock = rdtsc_ordered();
|
||||
for (;;) {
|
||||
now = rdtsc_ordered();
|
||||
if ((now - bclock) >= loops)
|
||||
if ((now - bclock) >= cycles)
|
||||
break;
|
||||
|
||||
/* Allow RT tasks to run */
|
||||
|
@ -77,7 +87,7 @@ static void delay_tsc(unsigned long __loops)
|
|||
* counter for this CPU.
|
||||
*/
|
||||
if (unlikely(cpu != smp_processor_id())) {
|
||||
loops -= (now - bclock);
|
||||
cycles -= (now - bclock);
|
||||
cpu = smp_processor_id();
|
||||
bclock = rdtsc_ordered();
|
||||
}
|
||||
|
@ -87,24 +97,24 @@ static void delay_tsc(unsigned long __loops)
|
|||
|
||||
/*
|
||||
* On some AMD platforms, MWAITX has a configurable 32-bit timer, that
|
||||
* counts with TSC frequency. The input value is the loop of the
|
||||
* counter, it will exit when the timer expires.
|
||||
* counts with TSC frequency. The input value is the number of TSC cycles
|
||||
* to wait. MWAITX will also exit when the timer expires.
|
||||
*/
|
||||
static void delay_mwaitx(unsigned long __loops)
|
||||
static void delay_mwaitx(u64 cycles)
|
||||
{
|
||||
u64 start, end, delay, loops = __loops;
|
||||
u64 start, end, delay;
|
||||
|
||||
/*
|
||||
* Timer value of 0 causes MWAITX to wait indefinitely, unless there
|
||||
* is a store on the memory monitored by MONITORX.
|
||||
*/
|
||||
if (loops == 0)
|
||||
if (!cycles)
|
||||
return;
|
||||
|
||||
start = rdtsc_ordered();
|
||||
|
||||
for (;;) {
|
||||
delay = min_t(u64, MWAITX_MAX_LOOPS, loops);
|
||||
delay = min_t(u64, MWAITX_MAX_WAIT_CYCLES, cycles);
|
||||
|
||||
/*
|
||||
* Use cpu_tss_rw as a cacheline-aligned, seldomly
|
||||
|
@ -121,22 +131,15 @@ static void delay_mwaitx(unsigned long __loops)
|
|||
|
||||
end = rdtsc_ordered();
|
||||
|
||||
if (loops <= end - start)
|
||||
if (cycles <= end - start)
|
||||
break;
|
||||
|
||||
loops -= end - start;
|
||||
|
||||
cycles -= end - start;
|
||||
start = end;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Since we calibrate only once at boot, this
|
||||
* function should be set once at boot and not changed
|
||||
*/
|
||||
static void (*delay_fn)(unsigned long) = delay_loop;
|
||||
|
||||
void use_tsc_delay(void)
|
||||
void __init use_tsc_delay(void)
|
||||
{
|
||||
if (delay_fn == delay_loop)
|
||||
delay_fn = delay_tsc;
|
||||
|
|
Загрузка…
Ссылка в новой задаче