locking/mutexes: Standardize arguments in lock/unlock slowpaths
Just how the locking-end behaves, when unlocking, go ahead and obtain the proper data structure immediately after the previous (asm-end) call exits and there are (probably) pending waiters. This simplifies a bit some of the layering. Signed-off-by: Davidlohr Bueso <davidlohr@hp.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Cc: jason.low2@hp.com Cc: aswin@hp.com Cc: mingo@kernel.org Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: linux-kernel@vger.kernel.org Link: http://lkml.kernel.org/r/1406752916-3341-1-git-send-email-davidlohr@hp.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Родитель
2e39465abc
Коммит
242489cfe9
|
@ -679,9 +679,8 @@ EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible);
|
|||
* Release the lock, slowpath:
|
||||
*/
|
||||
static inline void
|
||||
__mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
|
||||
__mutex_unlock_common_slowpath(struct mutex *lock, int nested)
|
||||
{
|
||||
struct mutex *lock = container_of(lock_count, struct mutex, count);
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
|
@ -716,7 +715,9 @@ __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
|
|||
__visible void
|
||||
__mutex_unlock_slowpath(atomic_t *lock_count)
|
||||
{
|
||||
__mutex_unlock_common_slowpath(lock_count, 1);
|
||||
struct mutex *lock = container_of(lock_count, struct mutex, count);
|
||||
|
||||
__mutex_unlock_common_slowpath(lock, 1);
|
||||
}
|
||||
|
||||
#ifndef CONFIG_DEBUG_LOCK_ALLOC
|
||||
|
|
Загрузка…
Ссылка в новой задаче