From 579940bb451c2dd33396d2d56ce6ef5d92154b3b Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Wed, 29 Apr 2020 16:56:51 +1000 Subject: [PATCH] powerpc/64/kuap: Conditionally restore AMR in interrupt exit The AMR update is made conditional on AMR actually changing, which should be the less common case on most workloads (though kernel page faults on uaccess could be frequent, this doesn't significantly slow down that case). Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200429065654.1677541-4-npiggin@gmail.com --- .../powerpc/include/asm/book3s/64/kup-radix.h | 22 ++++++++++++++++--- arch/powerpc/kernel/syscall_64.c | 14 ++++++++---- 2 files changed, 29 insertions(+), 7 deletions(-) diff --git a/arch/powerpc/include/asm/book3s/64/kup-radix.h b/arch/powerpc/include/asm/book3s/64/kup-radix.h index 101d60f16d46..820169bac6c4 100644 --- a/arch/powerpc/include/asm/book3s/64/kup-radix.h +++ b/arch/powerpc/include/asm/book3s/64/kup-radix.h @@ -62,9 +62,9 @@ #include #include -static inline void kuap_restore_amr(struct pt_regs *regs) +static inline void kuap_restore_amr(struct pt_regs *regs, unsigned long amr) { - if (mmu_has_feature(MMU_FTR_RADIX_KUAP)) { + if (mmu_has_feature(MMU_FTR_RADIX_KUAP) && unlikely(regs->kuap != amr)) { isync(); mtspr(SPRN_AMR, regs->kuap); /* @@ -75,6 +75,17 @@ static inline void kuap_restore_amr(struct pt_regs *regs) } } +static inline unsigned long kuap_get_and_check_amr(void) +{ + if (mmu_has_feature(MMU_FTR_RADIX_KUAP)) { + unsigned long amr = mfspr(SPRN_AMR); + if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG)) /* kuap_check_amr() */ + WARN_ON_ONCE(amr != AMR_KUAP_BLOCKED); + return amr; + } + return 0; +} + static inline void kuap_check_amr(void) { if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG) && mmu_has_feature(MMU_FTR_RADIX_KUAP)) @@ -151,13 +162,18 @@ bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write) "Bug: %s fault blocked by AMR!", is_write ? "Write" : "Read"); } #else /* CONFIG_PPC_KUAP */ -static inline void kuap_restore_amr(struct pt_regs *regs) +static inline void kuap_restore_amr(struct pt_regs *regs, unsigned long amr) { } static inline void kuap_check_amr(void) { } + +static inline unsigned long kuap_get_and_check_amr(void) +{ + return 0; +} #endif /* CONFIG_PPC_KUAP */ #endif /* __ASSEMBLY__ */ diff --git a/arch/powerpc/kernel/syscall_64.c b/arch/powerpc/kernel/syscall_64.c index 613da0d0fa8c..79edba3ab312 100644 --- a/arch/powerpc/kernel/syscall_64.c +++ b/arch/powerpc/kernel/syscall_64.c @@ -242,6 +242,10 @@ notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned BUG_ON(!FULL_REGS(regs)); BUG_ON(regs->softe != IRQS_ENABLED); + /* + * We don't need to restore AMR on the way back to userspace for KUAP. + * AMR can only have been unlocked if we interrupted the kernel. + */ kuap_check_amr(); local_irq_save(flags); @@ -313,13 +317,14 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsign unsigned long *ti_flagsp = ¤t_thread_info()->flags; unsigned long flags; unsigned long ret = 0; + unsigned long amr; if (IS_ENABLED(CONFIG_PPC_BOOK3S) && unlikely(!(regs->msr & MSR_RI))) unrecoverable_exception(regs); BUG_ON(regs->msr & MSR_PR); BUG_ON(!FULL_REGS(regs)); - kuap_check_amr(); + amr = kuap_get_and_check_amr(); if (unlikely(*ti_flagsp & _TIF_EMULATE_STACK_STORE)) { clear_bits(_TIF_EMULATE_STACK_STORE, ti_flagsp); @@ -367,10 +372,11 @@ again: #endif /* - * We don't need to restore AMR on the way back to userspace for KUAP. - * The value of AMR only matters while we're in the kernel. + * Don't want to mfspr(SPRN_AMR) here, because this comes after mtmsr, + * which would cause Read-After-Write stalls. Hence, we take the AMR + * value from the check above. */ - kuap_restore_amr(regs); + kuap_restore_amr(regs, amr); return ret; }