x86/vdso: Simplify the invalid vclock case

The code flow for the vclocks is convoluted as it requires the vclocks
which can be invalidated separately from the vsyscall_gtod_data sequence to
store the fact in a separate variable. That's inefficient.

Restructure the code so the vclock readout returns cycles and the
conversion to nanoseconds is handled at the call site.

If the clock gets invalidated or vclock is already VCLOCK_NONE, return
U64_MAX as the cycle value, which is invalid for all clocks and leave the
sequence loop immediately in that case by calling the fallback function
directly.

This allows to remove the gettimeofday fallback as it now uses the
clock_gettime() fallback and does the nanoseconds to microseconds
conversion in the same way as it does when the vclock is functional. It
does not make a difference whether the division by 1000 happens in the
kernel fallback or in userspace.

Generates way better code and gains a few cycles back.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Andy Lutomirski <luto@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Matt Rickard <matt@softrans.com.au>
Cc: Stephen Boyd <sboyd@kernel.org>
Cc: John Stultz <john.stultz@linaro.org>
Cc: Florian Weimer <fweimer@redhat.com>
Cc: "K. Y. Srinivasan" <kys@microsoft.com>
Cc: Vitaly Kuznetsov <vkuznets@redhat.com>
Cc: devel@linuxdriverproject.org
Cc: virtualization@lists.linux-foundation.org
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Juergen Gross <jgross@suse.com>
Link: https://lkml.kernel.org/r/20180917130707.657928937@linutronix.de
This commit is contained in:
Thomas Gleixner 2018-09-17 14:45:42 +02:00
Родитель f3e8393841
Коммит 4f72adc506
1 изменённых файлов: 21 добавлений и 61 удалений

Просмотреть файл

@ -49,17 +49,6 @@ notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
return ret; return ret;
} }
notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
{
long ret;
asm ("syscall" : "=a" (ret), "=m" (*tv), "=m" (*tz) :
"0" (__NR_gettimeofday), "D" (tv), "S" (tz) :
"memory", "rcx", "r11");
return ret;
}
#else #else
notrace static long vdso_fallback_gettime(long clock, struct timespec *ts) notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
@ -77,21 +66,6 @@ notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
return ret; return ret;
} }
notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
{
long ret;
asm (
"mov %%ebx, %%edx \n"
"mov %[tv], %%ebx \n"
"call __kernel_vsyscall \n"
"mov %%edx, %%ebx \n"
: "=a" (ret), "=m" (*tv), "=m" (*tz)
: "0" (__NR_gettimeofday), [tv] "g" (tv), "c" (tz)
: "memory", "edx");
return ret;
}
#endif #endif
#ifdef CONFIG_PARAVIRT_CLOCK #ifdef CONFIG_PARAVIRT_CLOCK
@ -100,7 +74,7 @@ static notrace const struct pvclock_vsyscall_time_info *get_pvti0(void)
return (const struct pvclock_vsyscall_time_info *)&pvclock_page; return (const struct pvclock_vsyscall_time_info *)&pvclock_page;
} }
static notrace u64 vread_pvclock(int *mode) static notrace u64 vread_pvclock(void)
{ {
const struct pvclock_vcpu_time_info *pvti = &get_pvti0()->pvti; const struct pvclock_vcpu_time_info *pvti = &get_pvti0()->pvti;
u64 ret; u64 ret;
@ -132,10 +106,8 @@ static notrace u64 vread_pvclock(int *mode)
do { do {
version = pvclock_read_begin(pvti); version = pvclock_read_begin(pvti);
if (unlikely(!(pvti->flags & PVCLOCK_TSC_STABLE_BIT))) { if (unlikely(!(pvti->flags & PVCLOCK_TSC_STABLE_BIT)))
*mode = VCLOCK_NONE; return U64_MAX;
return 0;
}
ret = __pvclock_read_cycles(pvti, rdtsc_ordered()); ret = __pvclock_read_cycles(pvti, rdtsc_ordered());
} while (pvclock_read_retry(pvti, version)); } while (pvclock_read_retry(pvti, version));
@ -150,17 +122,12 @@ static notrace u64 vread_pvclock(int *mode)
} }
#endif #endif
#ifdef CONFIG_HYPERV_TSCPAGE #ifdef CONFIG_HYPERV_TSCPAGE
static notrace u64 vread_hvclock(int *mode) static notrace u64 vread_hvclock(void)
{ {
const struct ms_hyperv_tsc_page *tsc_pg = const struct ms_hyperv_tsc_page *tsc_pg =
(const struct ms_hyperv_tsc_page *)&hvclock_page; (const struct ms_hyperv_tsc_page *)&hvclock_page;
u64 current_tick = hv_read_tsc_page(tsc_pg);
if (current_tick != U64_MAX) return hv_read_tsc_page(tsc_pg);
return current_tick;
*mode = VCLOCK_NONE;
return 0;
} }
#endif #endif
@ -184,47 +151,42 @@ notrace static u64 vread_tsc(void)
return last; return last;
} }
notrace static inline u64 vgetsns(int *mode) notrace static inline u64 vgetcyc(int mode)
{ {
u64 v; if (mode == VCLOCK_TSC)
cycles_t cycles; return vread_tsc();
if (gtod->vclock_mode == VCLOCK_TSC)
cycles = vread_tsc();
#ifdef CONFIG_PARAVIRT_CLOCK #ifdef CONFIG_PARAVIRT_CLOCK
else if (gtod->vclock_mode == VCLOCK_PVCLOCK) else if (mode == VCLOCK_PVCLOCK)
cycles = vread_pvclock(mode); return vread_pvclock();
#endif #endif
#ifdef CONFIG_HYPERV_TSCPAGE #ifdef CONFIG_HYPERV_TSCPAGE
else if (gtod->vclock_mode == VCLOCK_HVCLOCK) else if (mode == VCLOCK_HVCLOCK)
cycles = vread_hvclock(mode); return vread_hvclock();
#endif #endif
else return U64_MAX;
return 0;
v = cycles - gtod->cycle_last;
return v * gtod->mult;
} }
notrace static int do_hres(clockid_t clk, struct timespec *ts) notrace static int do_hres(clockid_t clk, struct timespec *ts)
{ {
struct vgtod_ts *base = &gtod->basetime[clk]; struct vgtod_ts *base = &gtod->basetime[clk];
unsigned int seq; unsigned int seq;
int mode; u64 cycles, ns;
u64 ns;
do { do {
seq = gtod_read_begin(gtod); seq = gtod_read_begin(gtod);
mode = gtod->vclock_mode;
ts->tv_sec = base->sec; ts->tv_sec = base->sec;
ns = base->nsec; ns = base->nsec;
ns += vgetsns(&mode); cycles = vgetcyc(gtod->vclock_mode);
if (unlikely((s64)cycles < 0))
return vdso_fallback_gettime(clk, ts);
ns += (cycles - gtod->cycle_last) * gtod->mult;
ns >>= gtod->shift; ns >>= gtod->shift;
} while (unlikely(gtod_read_retry(gtod, seq))); } while (unlikely(gtod_read_retry(gtod, seq)));
ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns); ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
ts->tv_nsec = ns; ts->tv_nsec = ns;
return mode; return 0;
} }
notrace static void do_coarse(clockid_t clk, struct timespec *ts) notrace static void do_coarse(clockid_t clk, struct timespec *ts)
@ -253,8 +215,7 @@ notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
*/ */
msk = 1U << clock; msk = 1U << clock;
if (likely(msk & VGTOD_HRES)) { if (likely(msk & VGTOD_HRES)) {
if (do_hres(clock, ts) != VCLOCK_NONE) return do_hres(clock, ts);
return 0;
} else if (msk & VGTOD_COARSE) { } else if (msk & VGTOD_COARSE) {
do_coarse(clock, ts); do_coarse(clock, ts);
return 0; return 0;
@ -270,8 +231,7 @@ notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
if (likely(tv != NULL)) { if (likely(tv != NULL)) {
struct timespec *ts = (struct timespec *) tv; struct timespec *ts = (struct timespec *) tv;
if (unlikely(do_hres(CLOCK_REALTIME, ts) == VCLOCK_NONE)) do_hres(CLOCK_REALTIME, ts);
return vdso_fallback_gtod(tv, tz);
tv->tv_usec /= 1000; tv->tv_usec /= 1000;
} }
if (unlikely(tz != NULL)) { if (unlikely(tz != NULL)) {