rwsem: fully separate code paths to wake writers vs readers
This is in preparation for later changes in the series. In __rwsem_do_wake(), the first queued waiter is checked first in order to determine whether it's a writer or a reader. The code paths diverge at this point. The code that checks and increments the rwsem active count is duplicated on both sides - the point is that later changes in the series will be able to independently modify both sides. Signed-off-by: Michel Lespinasse <walken@google.com> Acked-by: David Howells <dhowells@redhat.com> Cc: Mike Waychison <mikew@google.com> Cc: Suleiman Souhlal <suleiman@google.com> Cc: Ying Han <yinghan@google.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
7721fea3d0
Коммит
345af7bf33
65
lib/rwsem.c
65
lib/rwsem.c
|
@ -41,7 +41,7 @@ struct rwsem_waiter {
|
|||
* - if we come here from up_xxxx(), then:
|
||||
* - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed)
|
||||
* - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so)
|
||||
* - there must be someone on the queue
|
||||
* - there must be someone on the queue
|
||||
* - the spinlock must be held by the caller
|
||||
* - woken process blocks are discarded from the list after having task zeroed
|
||||
* - writers are only woken if downgrading is false
|
||||
|
@ -54,26 +54,23 @@ __rwsem_do_wake(struct rw_semaphore *sem, int downgrading)
|
|||
struct list_head *next;
|
||||
signed long oldcount, woken, loop;
|
||||
|
||||
if (downgrading)
|
||||
goto dont_wake_writers;
|
||||
waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
|
||||
if (!(waiter->flags & RWSEM_WAITING_FOR_WRITE))
|
||||
goto readers_only;
|
||||
|
||||
/* if we came through an up_xxxx() call, we only only wake someone up
|
||||
* if we can transition the active part of the count from 0 -> 1
|
||||
if (downgrading)
|
||||
goto out;
|
||||
|
||||
/* There's a writer at the front of the queue - try to grant it the
|
||||
* write lock. However, we only wake this writer if we can transition
|
||||
* the active part of the count from 0 -> 1
|
||||
*/
|
||||
try_again:
|
||||
try_again_write:
|
||||
oldcount = rwsem_atomic_update(RWSEM_ACTIVE_BIAS, sem)
|
||||
- RWSEM_ACTIVE_BIAS;
|
||||
if (oldcount & RWSEM_ACTIVE_MASK)
|
||||
goto undo;
|
||||
|
||||
waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
|
||||
|
||||
/* try to grant a single write lock if there's a writer at the front
|
||||
* of the queue - note we leave the 'active part' of the count
|
||||
* incremented by 1 and the waiting part incremented by 0x00010000
|
||||
*/
|
||||
if (!(waiter->flags & RWSEM_WAITING_FOR_WRITE))
|
||||
goto readers_only;
|
||||
/* Someone grabbed the sem already */
|
||||
goto undo_write;
|
||||
|
||||
/* We must be careful not to touch 'waiter' after we set ->task = NULL.
|
||||
* It is an allocated on the waiter's stack and may become invalid at
|
||||
|
@ -87,18 +84,24 @@ __rwsem_do_wake(struct rw_semaphore *sem, int downgrading)
|
|||
put_task_struct(tsk);
|
||||
goto out;
|
||||
|
||||
/* don't want to wake any writers */
|
||||
dont_wake_writers:
|
||||
waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
|
||||
if (waiter->flags & RWSEM_WAITING_FOR_WRITE)
|
||||
goto out;
|
||||
|
||||
/* grant an infinite number of read locks to the readers at the front
|
||||
* of the queue
|
||||
* - note we increment the 'active part' of the count by the number of
|
||||
* readers before waking any processes up
|
||||
*/
|
||||
readers_only:
|
||||
if (downgrading)
|
||||
goto wake_readers;
|
||||
|
||||
/* if we came through an up_xxxx() call, we only only wake someone up
|
||||
* if we can transition the active part of the count from 0 -> 1 */
|
||||
try_again_read:
|
||||
oldcount = rwsem_atomic_update(RWSEM_ACTIVE_BIAS, sem)
|
||||
- RWSEM_ACTIVE_BIAS;
|
||||
if (oldcount & RWSEM_ACTIVE_MASK)
|
||||
/* Someone grabbed the sem already */
|
||||
goto undo_read;
|
||||
|
||||
wake_readers:
|
||||
/* Grant an infinite number of read locks to the readers at the front
|
||||
* of the queue. Note we increment the 'active part' of the count by
|
||||
* the number of readers before waking any processes up.
|
||||
*/
|
||||
woken = 0;
|
||||
do {
|
||||
woken++;
|
||||
|
@ -138,10 +141,14 @@ __rwsem_do_wake(struct rw_semaphore *sem, int downgrading)
|
|||
|
||||
/* undo the change to the active count, but check for a transition
|
||||
* 1->0 */
|
||||
undo:
|
||||
undo_write:
|
||||
if (rwsem_atomic_update(-RWSEM_ACTIVE_BIAS, sem) & RWSEM_ACTIVE_MASK)
|
||||
goto out;
|
||||
goto try_again;
|
||||
goto try_again_write;
|
||||
undo_read:
|
||||
if (rwsem_atomic_update(-RWSEM_ACTIVE_BIAS, sem) & RWSEM_ACTIVE_MASK)
|
||||
goto out;
|
||||
goto try_again_read;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
Загрузка…
Ссылка в новой задаче