mmiowb: Hook up mmiowb helpers to spinlocks and generic I/O accessors

Removing explicit calls to mmiowb() from driver code means that we must
now call into the generic mmiowb_spin_{lock,unlock}() functions from the
core spinlock code. In order to elide barriers following critical
sections without any I/O writes, we also hook into the asm-generic I/O
routines.

Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
This commit is contained in:
Will Deacon 2019-02-22 12:59:59 +00:00
Родитель fdcd06a8ab
Коммит 60ca1e5a20
3 изменённых файлов: 17 добавлений и 3 удалений

Просмотреть файл

@ -19,6 +19,7 @@
#include <asm-generic/iomap.h> #include <asm-generic/iomap.h>
#endif #endif
#include <asm/mmiowb.h>
#include <asm-generic/pci_iomap.h> #include <asm-generic/pci_iomap.h>
#ifndef mmiowb #ifndef mmiowb
@ -49,7 +50,7 @@
/* serialize device access against a spin_unlock, usually handled there. */ /* serialize device access against a spin_unlock, usually handled there. */
#ifndef __io_aw #ifndef __io_aw
#define __io_aw() barrier() #define __io_aw() mmiowb_set_pending()
#endif #endif
#ifndef __io_pbw #ifndef __io_pbw

Просмотреть файл

@ -57,6 +57,7 @@
#include <linux/stringify.h> #include <linux/stringify.h>
#include <linux/bottom_half.h> #include <linux/bottom_half.h>
#include <asm/barrier.h> #include <asm/barrier.h>
#include <asm/mmiowb.h>
/* /*
@ -178,6 +179,7 @@ static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
{ {
__acquire(lock); __acquire(lock);
arch_spin_lock(&lock->raw_lock); arch_spin_lock(&lock->raw_lock);
mmiowb_spin_lock();
} }
#ifndef arch_spin_lock_flags #ifndef arch_spin_lock_flags
@ -189,15 +191,22 @@ do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lo
{ {
__acquire(lock); __acquire(lock);
arch_spin_lock_flags(&lock->raw_lock, *flags); arch_spin_lock_flags(&lock->raw_lock, *flags);
mmiowb_spin_lock();
} }
static inline int do_raw_spin_trylock(raw_spinlock_t *lock) static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
{ {
return arch_spin_trylock(&(lock)->raw_lock); int ret = arch_spin_trylock(&(lock)->raw_lock);
if (ret)
mmiowb_spin_lock();
return ret;
} }
static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
{ {
mmiowb_spin_unlock();
arch_spin_unlock(&lock->raw_lock); arch_spin_unlock(&lock->raw_lock);
__release(lock); __release(lock);
} }

Просмотреть файл

@ -111,6 +111,7 @@ void do_raw_spin_lock(raw_spinlock_t *lock)
{ {
debug_spin_lock_before(lock); debug_spin_lock_before(lock);
arch_spin_lock(&lock->raw_lock); arch_spin_lock(&lock->raw_lock);
mmiowb_spin_lock();
debug_spin_lock_after(lock); debug_spin_lock_after(lock);
} }
@ -118,8 +119,10 @@ int do_raw_spin_trylock(raw_spinlock_t *lock)
{ {
int ret = arch_spin_trylock(&lock->raw_lock); int ret = arch_spin_trylock(&lock->raw_lock);
if (ret) if (ret) {
mmiowb_spin_lock();
debug_spin_lock_after(lock); debug_spin_lock_after(lock);
}
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
/* /*
* Must not happen on UP: * Must not happen on UP:
@ -131,6 +134,7 @@ int do_raw_spin_trylock(raw_spinlock_t *lock)
void do_raw_spin_unlock(raw_spinlock_t *lock) void do_raw_spin_unlock(raw_spinlock_t *lock)
{ {
mmiowb_spin_unlock();
debug_spin_unlock(lock); debug_spin_unlock(lock);
arch_spin_unlock(&lock->raw_lock); arch_spin_unlock(&lock->raw_lock);
} }