x86/segments/64: Rename the GDT PER_CPU entry to CPU_NUMBER

The old 'per CPU' naming was misleading: 64-bit kernels don't use this
GDT entry for per CPU data, but to store the CPU (and node) ID.

[ mingo: Wrote new changelog. ]

Suggested-by: H. Peter Anvin <hpa@zytor.com>
Signed-off-by: Chang S. Bae <chang.seok.bae@intel.com>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Andy Lutomirski <luto@kernel.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Markus T Metzger <markus.t.metzger@intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ravi Shankar <ravi.v.shankar@intel.com>
Cc: Rik van Riel <riel@surriel.com>
Link: http://lkml.kernel.org/r/1537312139-5580-7-git-send-email-chang.seok.bae@intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Chang S. Bae 2018-09-18 16:08:57 -07:00 коммит произвёл Ingo Molnar
Родитель f4550b52e4
Коммит c4755613a1
3 изменённых файлов: 7 добавлений и 8 удалений

Просмотреть файл

@ -359,7 +359,7 @@ static void vgetcpu_cpu_init(void *arg)
d.p = 1; /* Present */ d.p = 1; /* Present */
d.d = 1; /* 32-bit */ d.d = 1; /* 32-bit */
write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S); write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_CPU_NUMBER, &d, DESCTYPE_S);
} }
static int vgetcpu_online(unsigned int cpu) static int vgetcpu_online(unsigned int cpu)

Просмотреть файл

@ -186,8 +186,7 @@
#define GDT_ENTRY_TLS_MIN 12 #define GDT_ENTRY_TLS_MIN 12
#define GDT_ENTRY_TLS_MAX 14 #define GDT_ENTRY_TLS_MAX 14
/* Abused to load per CPU data from limit */ #define GDT_ENTRY_CPU_NUMBER 15
#define GDT_ENTRY_PER_CPU 15
/* /*
* Number of entries in the GDT table: * Number of entries in the GDT table:
@ -207,7 +206,7 @@
#define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8 + 3) #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8 + 3)
#define __USER32_DS __USER_DS #define __USER32_DS __USER_DS
#define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8 + 3) #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8 + 3)
#define __PER_CPU_SEG (GDT_ENTRY_PER_CPU*8 + 3) #define __CPU_NUMBER_SEG (GDT_ENTRY_CPU_NUMBER*8 + 3)
#endif #endif

Просмотреть файл

@ -86,9 +86,9 @@ static inline unsigned int __getcpu(void)
unsigned int p; unsigned int p;
/* /*
* Load per CPU data from GDT. LSL is faster than RDTSCP and * Load CPU (and node) number from GDT. LSL is faster than RDTSCP
* works on all CPUs. This is volatile so that it orders * and works on all CPUs. This is volatile so that it orders
* correctly wrt barrier() and to keep gcc from cleverly * correctly with respect to barrier() and to keep GCC from cleverly
* hoisting it out of the calling function. * hoisting it out of the calling function.
* *
* If RDPID is available, use it. * If RDPID is available, use it.
@ -96,7 +96,7 @@ static inline unsigned int __getcpu(void)
alternative_io ("lsl %[seg],%[p]", alternative_io ("lsl %[seg],%[p]",
".byte 0xf3,0x0f,0xc7,0xf8", /* RDPID %eax/rax */ ".byte 0xf3,0x0f,0xc7,0xf8", /* RDPID %eax/rax */
X86_FEATURE_RDPID, X86_FEATURE_RDPID,
[p] "=a" (p), [seg] "r" (__PER_CPU_SEG)); [p] "=a" (p), [seg] "r" (__CPU_NUMBER_SEG));
return p; return p;
} }