diff --git a/include/linux/mutex.h b/include/linux/mutex.h index a56b0ccc8a6c..3793ed7feeeb 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h @@ -98,6 +98,10 @@ struct ww_acquire_ctx { #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; #endif +#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH + unsigned deadlock_inject_interval; + unsigned deadlock_inject_countdown; +#endif }; struct ww_mutex { @@ -283,6 +287,10 @@ static inline void ww_acquire_init(struct ww_acquire_ctx *ctx, &ww_class->acquire_key, 0); mutex_acquire(&ctx->dep_map, 0, 0, _RET_IP_); #endif +#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH + ctx->deadlock_inject_interval = 1; + ctx->deadlock_inject_countdown = ctx->stamp & 0xf; +#endif } /** diff --git a/kernel/mutex.c b/kernel/mutex.c index fc801aafe8fd..e581ada5faf4 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c @@ -651,22 +651,60 @@ mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); +static inline int +ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) +{ +#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH + unsigned tmp; + + if (ctx->deadlock_inject_countdown-- == 0) { + tmp = ctx->deadlock_inject_interval; + if (tmp > UINT_MAX/4) + tmp = UINT_MAX; + else + tmp = tmp*2 + tmp + tmp/2; + + ctx->deadlock_inject_interval = tmp; + ctx->deadlock_inject_countdown = tmp; + ctx->contending_lock = lock; + + ww_mutex_unlock(lock); + + return -EDEADLK; + } +#endif + + return 0; +} int __sched __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) { + int ret; + might_sleep(); - return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, + ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0, &ctx->dep_map, _RET_IP_, ctx); + if (!ret && ctx->acquired > 0) + return ww_mutex_deadlock_injection(lock, ctx); + + return ret; } EXPORT_SYMBOL_GPL(__ww_mutex_lock); int __sched __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) { + int ret; + might_sleep(); - return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, - 0, &ctx->dep_map, _RET_IP_, ctx); + ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, + 0, &ctx->dep_map, _RET_IP_, ctx); + + if (!ret && ctx->acquired > 0) + return ww_mutex_deadlock_injection(lock, ctx); + + return ret; } EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible); diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 566cf2bc08ea..7154f799541a 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -547,6 +547,19 @@ config DEBUG_MUTEXES This feature allows mutex semantics violations to be detected and reported. +config DEBUG_WW_MUTEX_SLOWPATH + bool "Wait/wound mutex debugging: Slowpath testing" + depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT + select DEBUG_LOCK_ALLOC + select DEBUG_SPINLOCK + select DEBUG_MUTEXES + help + This feature enables slowpath testing for w/w mutex users by + injecting additional -EDEADLK wound/backoff cases. Together with + the full mutex checks enabled with (CONFIG_PROVE_LOCKING) this + will test all possible w/w mutex interface abuse with the + exception of simply not acquiring all the required locks. + config DEBUG_LOCK_ALLOC bool "Lock debugging: detect incorrect freeing of live locks" depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT