From 6ca055322da8fe25ff9ac50db6f3b7b59b6f961c Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Mon, 29 Jun 2020 11:15:26 +0000 Subject: [PATCH] powerpc/32s: Use dedicated segment for modules with STRICT_KERNEL_RWX When STRICT_KERNEL_RWX is set, we want to set NX bit on vmalloc segments. But modules require exec. Use a dedicated segment for modules. There is not much space above kernel, and we don't waste vmalloc space to do alignment. Therefore, we take the segment before PAGE_OFFSET for modules. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/eb8faba9148b6cf17c696ba776b4e8ee2f6313bf.1593428200.git.christophe.leroy@csgroup.eu --- arch/powerpc/Kconfig | 1 + arch/powerpc/include/asm/book3s/32/pgtable.h | 15 +++++---------- arch/powerpc/mm/ptdump/ptdump.c | 8 ++++++++ 3 files changed, 14 insertions(+), 10 deletions(-) diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index a751edacf4bc..8c7656cc10eb 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -1213,6 +1213,7 @@ config TASK_SIZE_BOOL config TASK_SIZE hex "Size of user task space" if TASK_SIZE_BOOL default "0x80000000" if PPC_8xx + default "0xb0000000" if PPC_BOOK3S_32 && STRICT_KERNEL_RWX default "0xc0000000" endmenu diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h index 224912432821..36443cda8dcf 100644 --- a/arch/powerpc/include/asm/book3s/32/pgtable.h +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h @@ -184,17 +184,7 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot); */ #define VMALLOC_OFFSET (0x1000000) /* 16M */ -/* - * With CONFIG_STRICT_KERNEL_RWX, kernel segments are set NX. But when modules - * are used, NX cannot be set on VMALLOC space. So vmalloc VM space and linear - * memory shall not share segments. - */ -#if defined(CONFIG_STRICT_KERNEL_RWX) && defined(CONFIG_MODULES) -#define VMALLOC_START ((ALIGN((long)high_memory, 256L << 20) + VMALLOC_OFFSET) & \ - ~(VMALLOC_OFFSET - 1)) -#else #define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))) -#endif #ifdef CONFIG_KASAN_VMALLOC #define VMALLOC_END ALIGN_DOWN(ioremap_bot, PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT) @@ -202,6 +192,11 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot); #define VMALLOC_END ioremap_bot #endif +#ifdef CONFIG_STRICT_KERNEL_RWX +#define MODULES_END ALIGN_DOWN(PAGE_OFFSET, SZ_256M) +#define MODULES_VADDR (MODULES_END - SZ_256M) +#endif + #ifndef __ASSEMBLY__ #include #include diff --git a/arch/powerpc/mm/ptdump/ptdump.c b/arch/powerpc/mm/ptdump/ptdump.c index 9d942136c7be..b2ed1ca4f254 100644 --- a/arch/powerpc/mm/ptdump/ptdump.c +++ b/arch/powerpc/mm/ptdump/ptdump.c @@ -74,6 +74,10 @@ struct addr_marker { static struct addr_marker address_markers[] = { { 0, "Start of kernel VM" }, +#ifdef MODULES_VADDR + { 0, "modules start" }, + { 0, "modules end" }, +#endif { 0, "vmalloc() Area" }, { 0, "vmalloc() End" }, #ifdef CONFIG_PPC64 @@ -352,6 +356,10 @@ static void populate_markers(void) address_markers[i++].start_address = PAGE_OFFSET; #else address_markers[i++].start_address = TASK_SIZE; +#endif +#ifdef MODULES_VADDR + address_markers[i++].start_address = MODULES_VADDR; + address_markers[i++].start_address = MODULES_END; #endif address_markers[i++].start_address = VMALLOC_START; address_markers[i++].start_address = VMALLOC_END;