x86-64: Optimize vDSO time()
This function just reads a 64-bit variable that's updated atomically, so we don't need any locks. Signed-off-by: Andy Lutomirski <luto@mit.edu> Cc: Andi Kleen <andi@firstfloor.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: "David S. Miller" <davem@davemloft.net> Cc: Eric Dumazet <eric.dumazet@gmail.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Borislav Petkov <bp@amd64.org> Link: http://lkml.kernel.org/r/%3C40e2700f8cda4d511e5910be1e633025d28b36c2.1306156808.git.luto%40mit.edu%3E Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
Родитель
f144a6b4d1
Коммит
973aa8181e
|
@ -180,12 +180,8 @@ notrace time_t __vdso_time(time_t *t)
|
||||||
if (unlikely(!VVAR(vsyscall_gtod_data).sysctl_enabled))
|
if (unlikely(!VVAR(vsyscall_gtod_data).sysctl_enabled))
|
||||||
return time_syscall(t);
|
return time_syscall(t);
|
||||||
|
|
||||||
do {
|
/* This is atomic on x86_64 so we don't need any locks. */
|
||||||
seq = read_seqbegin(&VVAR(vsyscall_gtod_data).lock);
|
result = ACCESS_ONCE(VVAR(vsyscall_gtod_data).wall_time_sec);
|
||||||
|
|
||||||
result = VVAR(vsyscall_gtod_data).wall_time_sec;
|
|
||||||
|
|
||||||
} while (read_seqretry(&VVAR(vsyscall_gtod_data).lock, seq));
|
|
||||||
|
|
||||||
if (t)
|
if (t)
|
||||||
*t = result;
|
*t = result;
|
||||||
|
|
Загрузка…
Ссылка в новой задаче