diff --git a/arch/parisc/Kconfig.debug b/arch/parisc/Kconfig.debug index f66554cd5c45..3a059cb5e112 100644 --- a/arch/parisc/Kconfig.debug +++ b/arch/parisc/Kconfig.debug @@ -1 +1,12 @@ # SPDX-License-Identifier: GPL-2.0 +# +config LIGHTWEIGHT_SPINLOCK_CHECK + bool "Enable lightweight spinlock checks" + depends on SMP && !DEBUG_SPINLOCK + default y + help + Add checks with low performance impact to the spinlock functions + to catch memory overwrites at runtime. For more advanced + spinlock debugging you should choose the DEBUG_SPINLOCK option + which will detect unitialized spinlocks too. + If unsure say Y here. diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h index a6e5d66a7656..edfcb9858bcb 100644 --- a/arch/parisc/include/asm/spinlock.h +++ b/arch/parisc/include/asm/spinlock.h @@ -7,10 +7,26 @@ #include #include +#define SPINLOCK_BREAK_INSN 0x0000c006 /* break 6,6 */ + +static inline void arch_spin_val_check(int lock_val) +{ + if (IS_ENABLED(CONFIG_LIGHTWEIGHT_SPINLOCK_CHECK)) + asm volatile( "andcm,= %0,%1,%%r0\n" + ".word %2\n" + : : "r" (lock_val), "r" (__ARCH_SPIN_LOCK_UNLOCKED_VAL), + "i" (SPINLOCK_BREAK_INSN)); +} + static inline int arch_spin_is_locked(arch_spinlock_t *x) { - volatile unsigned int *a = __ldcw_align(x); - return READ_ONCE(*a) == 0; + volatile unsigned int *a; + int lock_val; + + a = __ldcw_align(x); + lock_val = READ_ONCE(*a); + arch_spin_val_check(lock_val); + return (lock_val == 0); } static inline void arch_spin_lock(arch_spinlock_t *x) @@ -18,9 +34,18 @@ static inline void arch_spin_lock(arch_spinlock_t *x) volatile unsigned int *a; a = __ldcw_align(x); - while (__ldcw(a) == 0) + do { + int lock_val_old; + + lock_val_old = __ldcw(a); + arch_spin_val_check(lock_val_old); + if (lock_val_old) + return; /* got lock */ + + /* wait until we should try to get lock again */ while (*a == 0) continue; + } while (1); } static inline void arch_spin_unlock(arch_spinlock_t *x) @@ -29,15 +54,19 @@ static inline void arch_spin_unlock(arch_spinlock_t *x) a = __ldcw_align(x); /* Release with ordered store. */ - __asm__ __volatile__("stw,ma %0,0(%1)" : : "r"(1), "r"(a) : "memory"); + __asm__ __volatile__("stw,ma %0,0(%1)" + : : "r"(__ARCH_SPIN_LOCK_UNLOCKED_VAL), "r"(a) : "memory"); } static inline int arch_spin_trylock(arch_spinlock_t *x) { volatile unsigned int *a; + int lock_val; a = __ldcw_align(x); - return __ldcw(a) != 0; + lock_val = __ldcw(a); + arch_spin_val_check(lock_val); + return lock_val != 0; } /* diff --git a/arch/parisc/include/asm/spinlock_types.h b/arch/parisc/include/asm/spinlock_types.h index ca39ee350c3f..d65934079ebd 100644 --- a/arch/parisc/include/asm/spinlock_types.h +++ b/arch/parisc/include/asm/spinlock_types.h @@ -2,13 +2,17 @@ #ifndef __ASM_SPINLOCK_TYPES_H #define __ASM_SPINLOCK_TYPES_H +#define __ARCH_SPIN_LOCK_UNLOCKED_VAL 0x1a46 + typedef struct { #ifdef CONFIG_PA20 volatile unsigned int slock; -# define __ARCH_SPIN_LOCK_UNLOCKED { 1 } +# define __ARCH_SPIN_LOCK_UNLOCKED { __ARCH_SPIN_LOCK_UNLOCKED_VAL } #else volatile unsigned int lock[4]; -# define __ARCH_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } } +# define __ARCH_SPIN_LOCK_UNLOCKED \ + { { __ARCH_SPIN_LOCK_UNLOCKED_VAL, __ARCH_SPIN_LOCK_UNLOCKED_VAL, \ + __ARCH_SPIN_LOCK_UNLOCKED_VAL, __ARCH_SPIN_LOCK_UNLOCKED_VAL } } #endif } arch_spinlock_t; diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c index f9696fbf646c..13df6169f9e2 100644 --- a/arch/parisc/kernel/traps.c +++ b/arch/parisc/kernel/traps.c @@ -47,6 +47,10 @@ #include #include +#if defined(CONFIG_LIGHTWEIGHT_SPINLOCK_CHECK) +#include +#endif + #include "../math-emu/math-emu.h" /* for handle_fpe() */ static void parisc_show_stack(struct task_struct *task, @@ -309,6 +313,12 @@ static void handle_break(struct pt_regs *regs) } #endif +#ifdef CONFIG_LIGHTWEIGHT_SPINLOCK_CHECK + if ((iir == SPINLOCK_BREAK_INSN) && !user_mode(regs)) { + die_if_kernel("Spinlock was trashed", regs, 1); + } +#endif + if (unlikely(iir != GDB_BREAK_INSN)) parisc_printk_ratelimited(0, regs, KERN_DEBUG "break %d,%d: pid=%d command='%s'\n",