x86/spinlock: Replace ACCESS_ONCE with READ_ONCE
ACCESS_ONCE does not work reliably on non-scalar types. For example gcc 4.6 and 4.7 might remove the volatile tag for such accesses during the SRA (scalar replacement of aggregates) step (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145) Change the spinlock code to replace ACCESS_ONCE with READ_ONCE. Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com> Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
This commit is contained in:
Родитель
e37c698270
Коммит
4f9d1382e6
|
@ -92,7 +92,7 @@ static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
|
|||
unsigned count = SPIN_THRESHOLD;
|
||||
|
||||
do {
|
||||
if (ACCESS_ONCE(lock->tickets.head) == inc.tail)
|
||||
if (READ_ONCE(lock->tickets.head) == inc.tail)
|
||||
goto out;
|
||||
cpu_relax();
|
||||
} while (--count);
|
||||
|
@ -105,7 +105,7 @@ static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
|
|||
{
|
||||
arch_spinlock_t old, new;
|
||||
|
||||
old.tickets = ACCESS_ONCE(lock->tickets);
|
||||
old.tickets = READ_ONCE(lock->tickets);
|
||||
if (old.tickets.head != (old.tickets.tail & ~TICKET_SLOWPATH_FLAG))
|
||||
return 0;
|
||||
|
||||
|
@ -162,14 +162,14 @@ static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
|
|||
|
||||
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
|
||||
{
|
||||
struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
|
||||
struct __raw_tickets tmp = READ_ONCE(lock->tickets);
|
||||
|
||||
return tmp.tail != tmp.head;
|
||||
}
|
||||
|
||||
static inline int arch_spin_is_contended(arch_spinlock_t *lock)
|
||||
{
|
||||
struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
|
||||
struct __raw_tickets tmp = READ_ONCE(lock->tickets);
|
||||
|
||||
return (__ticket_t)(tmp.tail - tmp.head) > TICKET_LOCK_INC;
|
||||
}
|
||||
|
|
Загрузка…
Ссылка в новой задаче