locking/percpu-rwsem: Use this_cpu_{inc,dec}() for read_count
The __this_cpu*() accessors are (in general) IRQ-unsafe which, given
that percpu-rwsem is a blocking primitive, should be just fine.
However, file_end_write() is used from IRQ context and will cause
load-store issues on architectures where the per-cpu accessors are not
natively irq-safe.
Fix it by using the IRQ-safe this_cpu_*() for operations on
read_count. This will generate more expensive code on a number of
platforms, which might cause a performance regression for some of the
other percpu-rwsem users.
If any such is reported, we can consider alternative solutions.
Fixes: 70fe2f4815
("aio: fix freeze protection of aio writes")
Signed-off-by: Hou Tao <houtao1@huawei.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Will Deacon <will@kernel.org>
Acked-by: Oleg Nesterov <oleg@redhat.com>
Link: https://lkml.kernel.org/r/20200915140750.137881-1-houtao1@huawei.com
This commit is contained in:
Родитель
23870f1227
Коммит
e6b1a44ecc
|
@ -60,7 +60,7 @@ static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
|
|||
* anything we did within this RCU-sched read-size critical section.
|
||||
*/
|
||||
if (likely(rcu_sync_is_idle(&sem->rss)))
|
||||
__this_cpu_inc(*sem->read_count);
|
||||
this_cpu_inc(*sem->read_count);
|
||||
else
|
||||
__percpu_down_read(sem, false); /* Unconditional memory barrier */
|
||||
/*
|
||||
|
@ -79,7 +79,7 @@ static inline bool percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
|
|||
* Same as in percpu_down_read().
|
||||
*/
|
||||
if (likely(rcu_sync_is_idle(&sem->rss)))
|
||||
__this_cpu_inc(*sem->read_count);
|
||||
this_cpu_inc(*sem->read_count);
|
||||
else
|
||||
ret = __percpu_down_read(sem, true); /* Unconditional memory barrier */
|
||||
preempt_enable();
|
||||
|
@ -103,7 +103,7 @@ static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
|
|||
* Same as in percpu_down_read().
|
||||
*/
|
||||
if (likely(rcu_sync_is_idle(&sem->rss))) {
|
||||
__this_cpu_dec(*sem->read_count);
|
||||
this_cpu_dec(*sem->read_count);
|
||||
} else {
|
||||
/*
|
||||
* slowpath; reader will only ever wake a single blocked
|
||||
|
@ -115,7 +115,7 @@ static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
|
|||
* aggregate zero, as that is the only time it matters) they
|
||||
* will also see our critical section.
|
||||
*/
|
||||
__this_cpu_dec(*sem->read_count);
|
||||
this_cpu_dec(*sem->read_count);
|
||||
rcuwait_wake_up(&sem->writer);
|
||||
}
|
||||
preempt_enable();
|
||||
|
|
|
@ -45,7 +45,7 @@ EXPORT_SYMBOL_GPL(percpu_free_rwsem);
|
|||
|
||||
static bool __percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
|
||||
{
|
||||
__this_cpu_inc(*sem->read_count);
|
||||
this_cpu_inc(*sem->read_count);
|
||||
|
||||
/*
|
||||
* Due to having preemption disabled the decrement happens on
|
||||
|
@ -71,7 +71,7 @@ static bool __percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
|
|||
if (likely(!atomic_read_acquire(&sem->block)))
|
||||
return true;
|
||||
|
||||
__this_cpu_dec(*sem->read_count);
|
||||
this_cpu_dec(*sem->read_count);
|
||||
|
||||
/* Prod writer to re-evaluate readers_active_check() */
|
||||
rcuwait_wake_up(&sem->writer);
|
||||
|
|
Загрузка…
Ссылка в новой задаче