x86/pgtable/32: Allocate 8k page-tables when PTI is enabled
Allocate a kernel and a user page-table root when PTI is enabled. Also allocate a full page per root for PAE because otherwise the bit to flip in CR3 to switch between them would be non-constant, which creates a lot of hassle. Keep that for a later optimization. Signed-off-by: Joerg Roedel <jroedel@suse.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Tested-by: Pavel Machek <pavel@ucw.cz> Cc: "H . Peter Anvin" <hpa@zytor.com> Cc: linux-mm@kvack.org Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Juergen Gross <jgross@suse.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Jiri Kosina <jkosina@suse.cz> Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com> Cc: Brian Gerst <brgerst@gmail.com> Cc: David Laight <David.Laight@aculab.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: Eduardo Valentin <eduval@amazon.com> Cc: Greg KH <gregkh@linuxfoundation.org> Cc: Will Deacon <will.deacon@arm.com> Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Waiman Long <llong@redhat.com> Cc: "David H . Gutteridge" <dhgutteridge@sympatico.ca> Cc: joro@8bytes.org Link: https://lkml.kernel.org/r/1531906876-13451-18-git-send-email-joro@8bytes.org
This commit is contained in:
Родитель
7ffcf1497c
Коммит
e3238faf20
|
@ -512,11 +512,18 @@ ENTRY(initial_code)
|
|||
ENTRY(setup_once_ref)
|
||||
.long setup_once
|
||||
|
||||
#ifdef CONFIG_PAGE_TABLE_ISOLATION
|
||||
#define PGD_ALIGN (2 * PAGE_SIZE)
|
||||
#define PTI_USER_PGD_FILL 1024
|
||||
#else
|
||||
#define PGD_ALIGN (PAGE_SIZE)
|
||||
#define PTI_USER_PGD_FILL 0
|
||||
#endif
|
||||
/*
|
||||
* BSS section
|
||||
*/
|
||||
__PAGE_ALIGNED_BSS
|
||||
.align PAGE_SIZE
|
||||
.align PGD_ALIGN
|
||||
#ifdef CONFIG_X86_PAE
|
||||
.globl initial_pg_pmd
|
||||
initial_pg_pmd:
|
||||
|
@ -526,14 +533,17 @@ initial_pg_pmd:
|
|||
initial_page_table:
|
||||
.fill 1024,4,0
|
||||
#endif
|
||||
.align PGD_ALIGN
|
||||
initial_pg_fixmap:
|
||||
.fill 1024,4,0
|
||||
.globl swapper_pg_dir
|
||||
.align PGD_ALIGN
|
||||
swapper_pg_dir:
|
||||
.fill 1024,4,0
|
||||
.fill PTI_USER_PGD_FILL,4,0
|
||||
.globl empty_zero_page
|
||||
empty_zero_page:
|
||||
.fill 4096,1,0
|
||||
.globl swapper_pg_dir
|
||||
swapper_pg_dir:
|
||||
.fill 1024,4,0
|
||||
EXPORT_SYMBOL(empty_zero_page)
|
||||
|
||||
/*
|
||||
|
@ -542,7 +552,7 @@ EXPORT_SYMBOL(empty_zero_page)
|
|||
#ifdef CONFIG_X86_PAE
|
||||
__PAGE_ALIGNED_DATA
|
||||
/* Page-aligned for the benefit of paravirt? */
|
||||
.align PAGE_SIZE
|
||||
.align PGD_ALIGN
|
||||
ENTRY(initial_page_table)
|
||||
.long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
|
||||
# if KPMDS == 3
|
||||
|
|
|
@ -343,7 +343,8 @@ static inline pgd_t *_pgd_alloc(void)
|
|||
* We allocate one page for pgd.
|
||||
*/
|
||||
if (!SHARED_KERNEL_PMD)
|
||||
return (pgd_t *)__get_free_page(PGALLOC_GFP);
|
||||
return (pgd_t *)__get_free_pages(PGALLOC_GFP,
|
||||
PGD_ALLOCATION_ORDER);
|
||||
|
||||
/*
|
||||
* Now PAE kernel is not running as a Xen domain. We can allocate
|
||||
|
@ -355,7 +356,7 @@ static inline pgd_t *_pgd_alloc(void)
|
|||
static inline void _pgd_free(pgd_t *pgd)
|
||||
{
|
||||
if (!SHARED_KERNEL_PMD)
|
||||
free_page((unsigned long)pgd);
|
||||
free_pages((unsigned long)pgd, PGD_ALLOCATION_ORDER);
|
||||
else
|
||||
kmem_cache_free(pgd_cache, pgd);
|
||||
}
|
||||
|
|
Загрузка…
Ссылка в новой задаче