2017-12-20 20:28:54 +03:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
|
|
|
|
#include <linux/spinlock.h>
|
|
|
|
#include <linux/percpu.h>
|
2018-06-06 15:54:10 +03:00
|
|
|
#include <linux/kallsyms.h>
|
x86: Add entry trampolines to kcore
Without program headers for PTI entry trampoline pages, the trampoline
virtual addresses do not map to anything.
Example before:
sudo gdb --quiet vmlinux /proc/kcore
Reading symbols from vmlinux...done.
[New process 1]
Core was generated by `BOOT_IMAGE=/boot/vmlinuz-4.16.0 root=UUID=a6096b83-b763-4101-807e-f33daff63233'.
#0 0x0000000000000000 in irq_stack_union ()
(gdb) x /21ib 0xfffffe0000006000
0xfffffe0000006000: Cannot access memory at address 0xfffffe0000006000
(gdb) quit
After:
sudo gdb --quiet vmlinux /proc/kcore
[sudo] password for ahunter:
Reading symbols from vmlinux...done.
[New process 1]
Core was generated by `BOOT_IMAGE=/boot/vmlinuz-4.16.0-fix-4-00005-gd6e65a8b4072 root=UUID=a6096b83-b7'.
#0 0x0000000000000000 in irq_stack_union ()
(gdb) x /21ib 0xfffffe0000006000
0xfffffe0000006000: swapgs
0xfffffe0000006003: mov %rsp,-0x3e12(%rip) # 0xfffffe00000021f8
0xfffffe000000600a: xchg %ax,%ax
0xfffffe000000600c: mov %cr3,%rsp
0xfffffe000000600f: bts $0x3f,%rsp
0xfffffe0000006014: and $0xffffffffffffe7ff,%rsp
0xfffffe000000601b: mov %rsp,%cr3
0xfffffe000000601e: mov -0x3019(%rip),%rsp # 0xfffffe000000300c
0xfffffe0000006025: pushq $0x2b
0xfffffe0000006027: pushq -0x3e35(%rip) # 0xfffffe00000021f8
0xfffffe000000602d: push %r11
0xfffffe000000602f: pushq $0x33
0xfffffe0000006031: push %rcx
0xfffffe0000006032: push %rdi
0xfffffe0000006033: mov $0xffffffff91a00010,%rdi
0xfffffe000000603a: callq 0xfffffe0000006046
0xfffffe000000603f: pause
0xfffffe0000006041: lfence
0xfffffe0000006044: jmp 0xfffffe000000603f
0xfffffe0000006046: mov %rdi,(%rsp)
0xfffffe000000604a: retq
(gdb) quit
In addition, entry trampolines all map to the same page. Represent that
by giving the corresponding program headers in kcore the same offset.
This has the benefit that, when perf tools uses /proc/kcore as a source
for kernel object code, samples from different CPU trampolines are
aggregated together. Note, such aggregation is normal for profiling
i.e. people want to profile the object code, not every different virtual
address the object code might be mapped to (across different processes
for example).
Notes by PeterZ:
This also adds the KCORE_REMAP functionality.
Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
Acked-by: Andi Kleen <ak@linux.intel.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Joerg Roedel <joro@8bytes.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: x86@kernel.org
Link: http://lkml.kernel.org/r/1528289651-4113-4-git-send-email-adrian.hunter@intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2018-06-06 15:54:11 +03:00
|
|
|
#include <linux/kcore.h>
|
2020-06-09 07:32:42 +03:00
|
|
|
#include <linux/pgtable.h>
|
2017-12-20 20:28:54 +03:00
|
|
|
|
|
|
|
#include <asm/cpu_entry_area.h>
|
|
|
|
#include <asm/fixmap.h>
|
|
|
|
#include <asm/desc.h>
|
|
|
|
|
|
|
|
static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage);
|
|
|
|
|
|
|
|
#ifdef CONFIG_X86_64
|
2019-04-14 18:59:47 +03:00
|
|
|
static DEFINE_PER_CPU_PAGE_ALIGNED(struct exception_stacks, exception_stacks);
|
2019-04-14 18:59:49 +03:00
|
|
|
DEFINE_PER_CPU(struct cea_exception_stacks*, cea_exception_stacks);
|
2017-12-20 20:28:54 +03:00
|
|
|
#endif
|
|
|
|
|
2020-04-04 02:33:05 +03:00
|
|
|
#ifdef CONFIG_X86_32
|
x86/doublefault/32: Move #DF stack and TSS to cpu_entry_area
There are three problems with the current layout of the doublefault
stack and TSS. First, the TSS is only cacheline-aligned, which is
not enough -- if the hardware portion of the TSS (struct x86_hw_tss)
crosses a page boundary, horrible things happen [0]. Second, the
stack and TSS are global, so simultaneous double faults on different
CPUs will cause massive corruption. Third, the whole mechanism
won't work if user CR3 is loaded, resulting in a triple fault [1].
Let the doublefault stack and TSS share a page (which prevents the
TSS from spanning a page boundary), make it percpu, and move it into
cpu_entry_area. Teach the stack dump code about the doublefault
stack.
[0] Real hardware will read past the end of the page onto the next
*physical* page if a task switch happens. Virtual machines may
have any number of bugs, and I would consider it reasonable for
a VM to summarily kill the guest if it tries to task-switch to
a page-spanning TSS.
[1] Real hardware triple faults. At least some VMs seem to hang.
I'm not sure what's going on.
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2019-11-26 20:27:16 +03:00
|
|
|
DECLARE_PER_CPU_PAGE_ALIGNED(struct doublefault_stack, doublefault_stack);
|
|
|
|
#endif
|
|
|
|
|
2017-12-20 20:51:31 +03:00
|
|
|
struct cpu_entry_area *get_cpu_entry_area(int cpu)
|
|
|
|
{
|
|
|
|
unsigned long va = CPU_ENTRY_AREA_PER_CPU + cpu * CPU_ENTRY_AREA_SIZE;
|
|
|
|
BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0);
|
|
|
|
|
|
|
|
return (struct cpu_entry_area *) va;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(get_cpu_entry_area);
|
|
|
|
|
|
|
|
void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags)
|
|
|
|
{
|
|
|
|
unsigned long va = (unsigned long) cea_vaddr;
|
2018-04-06 23:55:15 +03:00
|
|
|
pte_t pte = pfn_pte(pa >> PAGE_SHIFT, flags);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The cpu_entry_area is shared between the user and kernel
|
|
|
|
* page tables. All of its ptes can safely be global.
|
|
|
|
* _PAGE_GLOBAL gets reused to help indicate PROT_NONE for
|
|
|
|
* non-present PTEs, so be careful not to set it in that
|
|
|
|
* case to avoid confusion.
|
|
|
|
*/
|
|
|
|
if (boot_cpu_has(X86_FEATURE_PGE) &&
|
|
|
|
(pgprot_val(flags) & _PAGE_PRESENT))
|
|
|
|
pte = pte_set_flags(pte, _PAGE_GLOBAL);
|
|
|
|
|
|
|
|
set_pte_vaddr(va, pte);
|
2017-12-20 20:51:31 +03:00
|
|
|
}
|
|
|
|
|
2017-12-20 20:28:54 +03:00
|
|
|
static void __init
|
2017-12-20 20:51:31 +03:00
|
|
|
cea_map_percpu_pages(void *cea_vaddr, void *ptr, int pages, pgprot_t prot)
|
2017-12-20 20:28:54 +03:00
|
|
|
{
|
2017-12-20 20:51:31 +03:00
|
|
|
for ( ; pages; pages--, cea_vaddr+= PAGE_SIZE, ptr += PAGE_SIZE)
|
|
|
|
cea_set_pte(cea_vaddr, per_cpu_ptr_to_phys(ptr), prot);
|
2017-12-20 20:28:54 +03:00
|
|
|
}
|
|
|
|
|
2019-04-14 18:59:46 +03:00
|
|
|
static void __init percpu_setup_debug_store(unsigned int cpu)
|
2017-12-04 17:07:49 +03:00
|
|
|
{
|
|
|
|
#ifdef CONFIG_CPU_SUP_INTEL
|
2019-04-14 18:59:46 +03:00
|
|
|
unsigned int npages;
|
2017-12-04 17:07:49 +03:00
|
|
|
void *cea;
|
|
|
|
|
|
|
|
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
cea = &get_cpu_entry_area(cpu)->cpu_debug_store;
|
|
|
|
npages = sizeof(struct debug_store) / PAGE_SIZE;
|
|
|
|
BUILD_BUG_ON(sizeof(struct debug_store) % PAGE_SIZE != 0);
|
|
|
|
cea_map_percpu_pages(cea, &per_cpu(cpu_debug_store, cpu), npages,
|
|
|
|
PAGE_KERNEL);
|
|
|
|
|
|
|
|
cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers;
|
|
|
|
/*
|
|
|
|
* Force the population of PMDs for not yet allocated per cpu
|
|
|
|
* memory like debug store buffers.
|
|
|
|
*/
|
|
|
|
npages = sizeof(struct debug_store_buffers) / PAGE_SIZE;
|
|
|
|
for (; npages; npages--, cea += PAGE_SIZE)
|
|
|
|
cea_set_pte(cea, 0, PAGE_NONE);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2019-04-14 18:59:48 +03:00
|
|
|
#ifdef CONFIG_X86_64
|
|
|
|
|
|
|
|
#define cea_map_stack(name) do { \
|
|
|
|
npages = sizeof(estacks->name## _stack) / PAGE_SIZE; \
|
|
|
|
cea_map_percpu_pages(cea->estacks.name## _stack, \
|
|
|
|
estacks->name## _stack, npages, PAGE_KERNEL); \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
static void __init percpu_setup_exception_stacks(unsigned int cpu)
|
|
|
|
{
|
|
|
|
struct exception_stacks *estacks = per_cpu_ptr(&exception_stacks, cpu);
|
|
|
|
struct cpu_entry_area *cea = get_cpu_entry_area(cpu);
|
|
|
|
unsigned int npages;
|
|
|
|
|
|
|
|
BUILD_BUG_ON(sizeof(exception_stacks) % PAGE_SIZE != 0);
|
2019-04-14 18:59:49 +03:00
|
|
|
|
|
|
|
per_cpu(cea_exception_stacks, cpu) = &cea->estacks;
|
|
|
|
|
2019-04-14 18:59:48 +03:00
|
|
|
/*
|
|
|
|
* The exceptions stack mappings in the per cpu area are protected
|
2019-04-14 18:59:57 +03:00
|
|
|
* by guard pages so each stack must be mapped separately. DB2 is
|
|
|
|
* not mapped; it just exists to catch triple nesting of #DB.
|
2019-04-14 18:59:48 +03:00
|
|
|
*/
|
|
|
|
cea_map_stack(DF);
|
|
|
|
cea_map_stack(NMI);
|
|
|
|
cea_map_stack(DB);
|
|
|
|
cea_map_stack(MCE);
|
|
|
|
}
|
|
|
|
#else
|
x86/doublefault/32: Move #DF stack and TSS to cpu_entry_area
There are three problems with the current layout of the doublefault
stack and TSS. First, the TSS is only cacheline-aligned, which is
not enough -- if the hardware portion of the TSS (struct x86_hw_tss)
crosses a page boundary, horrible things happen [0]. Second, the
stack and TSS are global, so simultaneous double faults on different
CPUs will cause massive corruption. Third, the whole mechanism
won't work if user CR3 is loaded, resulting in a triple fault [1].
Let the doublefault stack and TSS share a page (which prevents the
TSS from spanning a page boundary), make it percpu, and move it into
cpu_entry_area. Teach the stack dump code about the doublefault
stack.
[0] Real hardware will read past the end of the page onto the next
*physical* page if a task switch happens. Virtual machines may
have any number of bugs, and I would consider it reasonable for
a VM to summarily kill the guest if it tries to task-switch to
a page-spanning TSS.
[1] Real hardware triple faults. At least some VMs seem to hang.
I'm not sure what's going on.
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2019-11-26 20:27:16 +03:00
|
|
|
static inline void percpu_setup_exception_stacks(unsigned int cpu)
|
|
|
|
{
|
|
|
|
struct cpu_entry_area *cea = get_cpu_entry_area(cpu);
|
|
|
|
|
|
|
|
cea_map_percpu_pages(&cea->doublefault_stack,
|
|
|
|
&per_cpu(doublefault_stack, cpu), 1, PAGE_KERNEL);
|
|
|
|
}
|
2019-04-14 18:59:48 +03:00
|
|
|
#endif
|
|
|
|
|
2017-12-20 20:28:54 +03:00
|
|
|
/* Setup the fixmap mappings only once per-processor */
|
2019-04-14 18:59:46 +03:00
|
|
|
static void __init setup_cpu_entry_area(unsigned int cpu)
|
2017-12-20 20:28:54 +03:00
|
|
|
{
|
2019-04-14 18:59:46 +03:00
|
|
|
struct cpu_entry_area *cea = get_cpu_entry_area(cpu);
|
2017-12-20 20:28:54 +03:00
|
|
|
#ifdef CONFIG_X86_64
|
|
|
|
/* On 64-bit systems, we use a read-only fixmap GDT and TSS. */
|
|
|
|
pgprot_t gdt_prot = PAGE_KERNEL_RO;
|
|
|
|
pgprot_t tss_prot = PAGE_KERNEL_RO;
|
|
|
|
#else
|
|
|
|
/*
|
|
|
|
* On native 32-bit systems, the GDT cannot be read-only because
|
|
|
|
* our double fault handler uses a task gate, and entering through
|
|
|
|
* a task gate needs to change an available TSS to busy. If the
|
|
|
|
* GDT is read-only, that will triple fault. The TSS cannot be
|
|
|
|
* read-only because the CPU writes to it on task switches.
|
|
|
|
*
|
|
|
|
* On Xen PV, the GDT must be read-only because the hypervisor
|
|
|
|
* requires it.
|
|
|
|
*/
|
|
|
|
pgprot_t gdt_prot = boot_cpu_has(X86_FEATURE_XENPV) ?
|
|
|
|
PAGE_KERNEL_RO : PAGE_KERNEL;
|
|
|
|
pgprot_t tss_prot = PAGE_KERNEL;
|
|
|
|
#endif
|
|
|
|
|
2019-04-14 18:59:46 +03:00
|
|
|
cea_set_pte(&cea->gdt, get_cpu_gdt_paddr(cpu), gdt_prot);
|
2017-12-20 20:51:31 +03:00
|
|
|
|
2019-04-14 18:59:46 +03:00
|
|
|
cea_map_percpu_pages(&cea->entry_stack_page,
|
2017-12-20 20:51:31 +03:00
|
|
|
per_cpu_ptr(&entry_stack_storage, cpu), 1,
|
|
|
|
PAGE_KERNEL);
|
2017-12-20 20:28:54 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The Intel SDM says (Volume 3, 7.2.1):
|
|
|
|
*
|
|
|
|
* Avoid placing a page boundary in the part of the TSS that the
|
|
|
|
* processor reads during a task switch (the first 104 bytes). The
|
|
|
|
* processor may not correctly perform address translations if a
|
|
|
|
* boundary occurs in this area. During a task switch, the processor
|
|
|
|
* reads and writes into the first 104 bytes of each TSS (using
|
|
|
|
* contiguous physical addresses beginning with the physical address
|
|
|
|
* of the first byte of the TSS). So, after TSS access begins, if
|
|
|
|
* part of the 104 bytes is not physically contiguous, the processor
|
|
|
|
* will access incorrect information without generating a page-fault
|
|
|
|
* exception.
|
|
|
|
*
|
|
|
|
* There are also a lot of errata involving the TSS spanning a page
|
|
|
|
* boundary. Assert that we're not doing that.
|
|
|
|
*/
|
|
|
|
BUILD_BUG_ON((offsetof(struct tss_struct, x86_tss) ^
|
|
|
|
offsetofend(struct tss_struct, x86_tss)) & PAGE_MASK);
|
|
|
|
BUILD_BUG_ON(sizeof(struct tss_struct) % PAGE_SIZE != 0);
|
2019-11-12 01:03:18 +03:00
|
|
|
/*
|
|
|
|
* VMX changes the host TR limit to 0x67 after a VM exit. This is
|
|
|
|
* okay, since 0x67 covers the size of struct x86_hw_tss. Make sure
|
|
|
|
* that this is correct.
|
|
|
|
*/
|
|
|
|
BUILD_BUG_ON(offsetof(struct tss_struct, x86_tss) != 0);
|
|
|
|
BUILD_BUG_ON(sizeof(struct x86_hw_tss) != 0x68);
|
|
|
|
|
2019-04-14 18:59:46 +03:00
|
|
|
cea_map_percpu_pages(&cea->tss, &per_cpu(cpu_tss_rw, cpu),
|
2017-12-20 20:51:31 +03:00
|
|
|
sizeof(struct tss_struct) / PAGE_SIZE, tss_prot);
|
2017-12-20 20:28:54 +03:00
|
|
|
|
|
|
|
#ifdef CONFIG_X86_32
|
2019-04-14 18:59:46 +03:00
|
|
|
per_cpu(cpu_entry_area, cpu) = cea;
|
2017-12-20 20:28:54 +03:00
|
|
|
#endif
|
|
|
|
|
2019-04-14 18:59:48 +03:00
|
|
|
percpu_setup_exception_stacks(cpu);
|
|
|
|
|
2017-12-04 17:07:49 +03:00
|
|
|
percpu_setup_debug_store(cpu);
|
2017-12-20 20:28:54 +03:00
|
|
|
}
|
|
|
|
|
2017-12-20 20:51:31 +03:00
|
|
|
static __init void setup_cpu_entry_area_ptes(void)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_X86_32
|
|
|
|
unsigned long start, end;
|
|
|
|
|
x86/pti/32: Calculate the various PTI cpu_entry_area sizes correctly, make the CPU_ENTRY_AREA_PAGES assert precise
When two recent commits that increased the size of the 'struct cpu_entry_area'
were merged in -tip, the 32-bit defconfig build started failing on the following
build time assert:
./include/linux/compiler.h:391:38: error: call to ‘__compiletime_assert_189’ declared with attribute error: BUILD_BUG_ON failed: CPU_ENTRY_AREA_PAGES * PAGE_SIZE < CPU_ENTRY_AREA_MAP_SIZE
arch/x86/mm/cpu_entry_area.c:189:2: note: in expansion of macro ‘BUILD_BUG_ON’
In function ‘setup_cpu_entry_area_ptes’,
Which corresponds to the following build time assert:
BUILD_BUG_ON(CPU_ENTRY_AREA_PAGES * PAGE_SIZE < CPU_ENTRY_AREA_MAP_SIZE);
The purpose of this assert is to sanity check the fixed-value definition of
CPU_ENTRY_AREA_PAGES arch/x86/include/asm/pgtable_32_types.h:
#define CPU_ENTRY_AREA_PAGES (NR_CPUS * 41)
The '41' is supposed to match sizeof(struct cpu_entry_area)/PAGE_SIZE, which value
we didn't want to define in such a low level header, because it would cause
dependency hell.
Every time the size of cpu_entry_area is changed, we have to adjust CPU_ENTRY_AREA_PAGES
accordingly - and this assert is checking that constraint.
But the assert is both imprecise and buggy, primarily because it doesn't
include the single readonly IDT page that is mapped at CPU_ENTRY_AREA_BASE
(which begins at a PMD boundary).
This bug was hidden by the fact that by accident CPU_ENTRY_AREA_PAGES is defined
too large upstream (v5.4-rc8):
#define CPU_ENTRY_AREA_PAGES (NR_CPUS * 40)
While 'struct cpu_entry_area' is 155648 bytes, or 38 pages. So we had two extra
pages, which hid the bug.
The following commit (not yet upstream) increased the size to 40 pages:
x86/iopl: ("Restrict iopl() permission scope")
... but increased CPU_ENTRY_AREA_PAGES only 41 - i.e. shortening the gap
to just 1 extra page.
Then another not-yet-upstream commit changed the size again:
880a98c33996: ("x86/cpu_entry_area: Add guard page for entry stack on 32bit")
Which increased the cpu_entry_area size from 38 to 39 pages, but
didn't change CPU_ENTRY_AREA_PAGES (kept it at 40). This worked
fine, because we still had a page left from the accidental 'reserve'.
But when these two commits were merged into the same tree, the
combined size of cpu_entry_area grew from 38 to 40 pages, while
CPU_ENTRY_AREA_PAGES finally caught up to 40 as well.
Which is fine in terms of functionality, but the assert broke:
BUILD_BUG_ON(CPU_ENTRY_AREA_PAGES * PAGE_SIZE < CPU_ENTRY_AREA_MAP_SIZE);
because CPU_ENTRY_AREA_MAP_SIZE is the total size of the area,
which is 1 page larger due to the IDT page.
To fix all this, change the assert to two precise asserts:
BUILD_BUG_ON((CPU_ENTRY_AREA_PAGES+1)*PAGE_SIZE != CPU_ENTRY_AREA_MAP_SIZE);
BUILD_BUG_ON(CPU_ENTRY_AREA_TOTAL_SIZE != CPU_ENTRY_AREA_MAP_SIZE);
This takes the IDT page into account, and also connects the size-based
define of CPU_ENTRY_AREA_TOTAL_SIZE with the address-subtraction based
define of CPU_ENTRY_AREA_MAP_SIZE.
Also clean up some of the names which made it rather confusing:
- 'CPU_ENTRY_AREA_TOT_SIZE' wasn't actually the 'total' size of
the cpu-entry-area, but the per-cpu array size, so rename this
to CPU_ENTRY_AREA_ARRAY_SIZE.
- Introduce CPU_ENTRY_AREA_TOTAL_SIZE that _is_ the total mapping
size, with the IDT included.
- Add comments where '+1' denotes the IDT mapping - it wasn't
obvious and took me about 3 hours to decode...
Finally, because this particular commit is actually applied after
this patch:
880a98c33996: ("x86/cpu_entry_area: Add guard page for entry stack on 32bit")
Fix the CPU_ENTRY_AREA_PAGES value from 40 pages to the correct 39 pages.
All future commits that change cpu_entry_area will have to adjust
this value precisely.
As a side note, we should probably attempt to remove CPU_ENTRY_AREA_PAGES
and derive its value directly from the structure, without causing
header hell - but that is an adventure for another day! :-)
Fixes: 880a98c33996: ("x86/cpu_entry_area: Add guard page for entry stack on 32bit")
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: stable@kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2019-11-24 13:21:44 +03:00
|
|
|
/* The +1 is for the readonly IDT: */
|
|
|
|
BUILD_BUG_ON((CPU_ENTRY_AREA_PAGES+1)*PAGE_SIZE != CPU_ENTRY_AREA_MAP_SIZE);
|
|
|
|
BUILD_BUG_ON(CPU_ENTRY_AREA_TOTAL_SIZE != CPU_ENTRY_AREA_MAP_SIZE);
|
2017-12-20 20:51:31 +03:00
|
|
|
BUG_ON(CPU_ENTRY_AREA_BASE & ~PMD_MASK);
|
|
|
|
|
|
|
|
start = CPU_ENTRY_AREA_BASE;
|
|
|
|
end = start + CPU_ENTRY_AREA_MAP_SIZE;
|
|
|
|
|
2017-12-23 21:45:11 +03:00
|
|
|
/* Careful here: start + PMD_SIZE might wrap around */
|
|
|
|
for (; start < end && start >= CPU_ENTRY_AREA_BASE; start += PMD_SIZE)
|
2017-12-20 20:51:31 +03:00
|
|
|
populate_extra_pte(start);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2017-12-20 20:28:54 +03:00
|
|
|
void __init setup_cpu_entry_areas(void)
|
|
|
|
{
|
|
|
|
unsigned int cpu;
|
|
|
|
|
2017-12-20 20:51:31 +03:00
|
|
|
setup_cpu_entry_area_ptes();
|
|
|
|
|
2017-12-20 20:28:54 +03:00
|
|
|
for_each_possible_cpu(cpu)
|
|
|
|
setup_cpu_entry_area(cpu);
|
2018-02-28 23:14:26 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* This is the last essential update to swapper_pgdir which needs
|
|
|
|
* to be synchronized to initial_page_table on 32bit.
|
|
|
|
*/
|
|
|
|
sync_initial_page_table();
|
2017-12-20 20:28:54 +03:00
|
|
|
}
|