x86/ldt: Split out sanity check in map_ldt_struct()
This splits out the mapping sanity check and the actual mapping of the LDT to user-space from the map_ldt_struct() function in a way so that it is re-usable for PAE paging. Signed-off-by: Joerg Roedel <jroedel@suse.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Tested-by: Pavel Machek <pavel@ucw.cz> Cc: "H . Peter Anvin" <hpa@zytor.com> Cc: linux-mm@kvack.org Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Juergen Gross <jgross@suse.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Jiri Kosina <jkosina@suse.cz> Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com> Cc: Brian Gerst <brgerst@gmail.com> Cc: David Laight <David.Laight@aculab.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: Eduardo Valentin <eduval@amazon.com> Cc: Greg KH <gregkh@linuxfoundation.org> Cc: Will Deacon <will.deacon@arm.com> Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Waiman Long <llong@redhat.com> Cc: "David H . Gutteridge" <dhgutteridge@sympatico.ca> Cc: joro@8bytes.org Link: https://lkml.kernel.org/r/1531906876-13451-36-git-send-email-joro@8bytes.org
This commit is contained in:
Родитель
8195d869d1
Коммит
9bae3197e1
|
@ -100,6 +100,49 @@ static struct ldt_struct *alloc_ldt_struct(unsigned int num_entries)
|
|||
return new_ldt;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PAGE_TABLE_ISOLATION
|
||||
|
||||
static void do_sanity_check(struct mm_struct *mm,
|
||||
bool had_kernel_mapping,
|
||||
bool had_user_mapping)
|
||||
{
|
||||
if (mm->context.ldt) {
|
||||
/*
|
||||
* We already had an LDT. The top-level entry should already
|
||||
* have been allocated and synchronized with the usermode
|
||||
* tables.
|
||||
*/
|
||||
WARN_ON(!had_kernel_mapping);
|
||||
if (static_cpu_has(X86_FEATURE_PTI))
|
||||
WARN_ON(!had_user_mapping);
|
||||
} else {
|
||||
/*
|
||||
* This is the first time we're mapping an LDT for this process.
|
||||
* Sync the pgd to the usermode tables.
|
||||
*/
|
||||
WARN_ON(had_kernel_mapping);
|
||||
if (static_cpu_has(X86_FEATURE_PTI))
|
||||
WARN_ON(had_user_mapping);
|
||||
}
|
||||
}
|
||||
|
||||
static void map_ldt_struct_to_user(struct mm_struct *mm)
|
||||
{
|
||||
pgd_t *pgd = pgd_offset(mm, LDT_BASE_ADDR);
|
||||
|
||||
if (static_cpu_has(X86_FEATURE_PTI) && !mm->context.ldt)
|
||||
set_pgd(kernel_to_user_pgdp(pgd), *pgd);
|
||||
}
|
||||
|
||||
static void sanity_check_ldt_mapping(struct mm_struct *mm)
|
||||
{
|
||||
pgd_t *pgd = pgd_offset(mm, LDT_BASE_ADDR);
|
||||
bool had_kernel = (pgd->pgd != 0);
|
||||
bool had_user = (kernel_to_user_pgdp(pgd)->pgd != 0);
|
||||
|
||||
do_sanity_check(mm, had_kernel, had_user);
|
||||
}
|
||||
|
||||
/*
|
||||
* If PTI is enabled, this maps the LDT into the kernelmode and
|
||||
* usermode tables for the given mm.
|
||||
|
@ -115,9 +158,8 @@ static struct ldt_struct *alloc_ldt_struct(unsigned int num_entries)
|
|||
static int
|
||||
map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
|
||||
{
|
||||
#ifdef CONFIG_PAGE_TABLE_ISOLATION
|
||||
bool is_vmalloc, had_top_level_entry;
|
||||
unsigned long va;
|
||||
bool is_vmalloc;
|
||||
spinlock_t *ptl;
|
||||
pgd_t *pgd;
|
||||
int i;
|
||||
|
@ -131,13 +173,15 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
|
|||
*/
|
||||
WARN_ON(ldt->slot != -1);
|
||||
|
||||
/* Check if the current mappings are sane */
|
||||
sanity_check_ldt_mapping(mm);
|
||||
|
||||
/*
|
||||
* Did we already have the top level entry allocated? We can't
|
||||
* use pgd_none() for this because it doens't do anything on
|
||||
* 4-level page table kernels.
|
||||
*/
|
||||
pgd = pgd_offset(mm, LDT_BASE_ADDR);
|
||||
had_top_level_entry = (pgd->pgd != 0);
|
||||
|
||||
is_vmalloc = is_vmalloc_addr(ldt->entries);
|
||||
|
||||
|
@ -172,35 +216,25 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
|
|||
pte_unmap_unlock(ptep, ptl);
|
||||
}
|
||||
|
||||
if (mm->context.ldt) {
|
||||
/*
|
||||
* We already had an LDT. The top-level entry should already
|
||||
* have been allocated and synchronized with the usermode
|
||||
* tables.
|
||||
*/
|
||||
WARN_ON(!had_top_level_entry);
|
||||
if (static_cpu_has(X86_FEATURE_PTI))
|
||||
WARN_ON(!kernel_to_user_pgdp(pgd)->pgd);
|
||||
} else {
|
||||
/*
|
||||
* This is the first time we're mapping an LDT for this process.
|
||||
* Sync the pgd to the usermode tables.
|
||||
*/
|
||||
WARN_ON(had_top_level_entry);
|
||||
if (static_cpu_has(X86_FEATURE_PTI)) {
|
||||
WARN_ON(kernel_to_user_pgdp(pgd)->pgd);
|
||||
set_pgd(kernel_to_user_pgdp(pgd), *pgd);
|
||||
}
|
||||
}
|
||||
/* Propagate LDT mapping to the user page-table */
|
||||
map_ldt_struct_to_user(mm);
|
||||
|
||||
va = (unsigned long)ldt_slot_va(slot);
|
||||
flush_tlb_mm_range(mm, va, va + LDT_SLOT_STRIDE, 0);
|
||||
|
||||
ldt->slot = slot;
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
#else /* !CONFIG_PAGE_TABLE_ISOLATION */
|
||||
|
||||
static int
|
||||
map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_PAGE_TABLE_ISOLATION */
|
||||
|
||||
static void free_ldt_pgtables(struct mm_struct *mm)
|
||||
{
|
||||
#ifdef CONFIG_PAGE_TABLE_ISOLATION
|
||||
|
|
Загрузка…
Ссылка в новой задаче