locking/percpu-rwsem: Extract __percpu_down_read_trylock()
In preparation for removing the embedded rwsem and building a custom lock, extract the read-trylock primitive. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Ingo Molnar <mingo@kernel.org> Reviewed-by: Davidlohr Bueso <dbueso@suse.de> Acked-by: Will Deacon <will@kernel.org> Acked-by: Waiman Long <longman@redhat.com> Tested-by: Juri Lelli <juri.lelli@redhat.com> Link: https://lkml.kernel.org/r/20200131151540.098485539@infradead.org
This commit is contained in:
Родитель
71365d4023
Коммит
75ff64572e
|
@ -45,7 +45,7 @@ void percpu_free_rwsem(struct percpu_rw_semaphore *sem)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(percpu_free_rwsem);
|
||||
|
||||
bool __percpu_down_read(struct percpu_rw_semaphore *sem, bool try)
|
||||
static bool __percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
|
||||
{
|
||||
__this_cpu_inc(*sem->read_count);
|
||||
|
||||
|
@ -73,11 +73,18 @@ bool __percpu_down_read(struct percpu_rw_semaphore *sem, bool try)
|
|||
if (likely(!smp_load_acquire(&sem->readers_block)))
|
||||
return true;
|
||||
|
||||
/*
|
||||
* Per the above comment; we still have preemption disabled and
|
||||
* will thus decrement on the same CPU as we incremented.
|
||||
*/
|
||||
__percpu_up_read(sem);
|
||||
__this_cpu_dec(*sem->read_count);
|
||||
|
||||
/* Prod writer to re-evaluate readers_active_check() */
|
||||
rcuwait_wake_up(&sem->writer);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool __percpu_down_read(struct percpu_rw_semaphore *sem, bool try)
|
||||
{
|
||||
if (__percpu_down_read_trylock(sem))
|
||||
return true;
|
||||
|
||||
if (try)
|
||||
return false;
|
||||
|
|
Загрузка…
Ссылка в новой задаче