s390/rwlock: use directed yield for write-locked rwlocks
Add an owner field to the arch_rwlock_t to be able to pass the timeslice of a virtual CPU with diagnose 0x9c to the lock owner in case the rwlock is write-locked. The undirected yield in case the rwlock is acquired writable but the lock is read-locked is removed. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
This commit is contained in:
Родитель
46b05c7bd5
Коммит
d59b93da5e
|
@ -29,7 +29,6 @@ extern int smp_find_processor_id(u16 address);
|
|||
extern int smp_store_status(int cpu);
|
||||
extern int smp_vcpu_scheduled(int cpu);
|
||||
extern void smp_yield_cpu(int cpu);
|
||||
extern void smp_yield(void);
|
||||
extern void smp_cpu_set_polarization(int cpu, int val);
|
||||
extern int smp_cpu_get_polarization(int cpu);
|
||||
extern void smp_fill_possible_mask(void);
|
||||
|
@ -50,7 +49,6 @@ static inline int smp_find_processor_id(u16 address) { return 0; }
|
|||
static inline int smp_store_status(int cpu) { return 0; }
|
||||
static inline int smp_vcpu_scheduled(int cpu) { return 1; }
|
||||
static inline void smp_yield_cpu(int cpu) { }
|
||||
static inline void smp_yield(void) { }
|
||||
static inline void smp_fill_possible_mask(void) { }
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
|
|
@ -37,11 +37,17 @@ _raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new)
|
|||
* (the type definitions are in asm/spinlock_types.h)
|
||||
*/
|
||||
|
||||
void arch_lock_relax(unsigned int cpu);
|
||||
|
||||
void arch_spin_lock_wait(arch_spinlock_t *);
|
||||
int arch_spin_trylock_retry(arch_spinlock_t *);
|
||||
void arch_spin_relax(arch_spinlock_t *);
|
||||
void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
|
||||
|
||||
static inline void arch_spin_relax(arch_spinlock_t *lock)
|
||||
{
|
||||
arch_lock_relax(lock->lock);
|
||||
}
|
||||
|
||||
static inline u32 arch_spin_lockval(int cpu)
|
||||
{
|
||||
return ~cpu;
|
||||
|
@ -170,17 +176,21 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
|
|||
{
|
||||
if (!arch_write_trylock_once(rw))
|
||||
_raw_write_lock_wait(rw);
|
||||
rw->owner = SPINLOCK_LOCKVAL;
|
||||
}
|
||||
|
||||
static inline void arch_write_lock_flags(arch_rwlock_t *rw, unsigned long flags)
|
||||
{
|
||||
if (!arch_write_trylock_once(rw))
|
||||
_raw_write_lock_wait_flags(rw, flags);
|
||||
rw->owner = SPINLOCK_LOCKVAL;
|
||||
}
|
||||
|
||||
static inline void arch_write_unlock(arch_rwlock_t *rw)
|
||||
{
|
||||
typecheck(unsigned int, rw->lock);
|
||||
|
||||
rw->owner = 0;
|
||||
asm volatile(
|
||||
__ASM_BARRIER
|
||||
"st %1,%0\n"
|
||||
|
@ -198,12 +208,20 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
|
|||
|
||||
static inline int arch_write_trylock(arch_rwlock_t *rw)
|
||||
{
|
||||
if (!arch_write_trylock_once(rw))
|
||||
return _raw_write_trylock_retry(rw);
|
||||
if (!arch_write_trylock_once(rw) && !_raw_write_trylock_retry(rw))
|
||||
return 0;
|
||||
rw->owner = SPINLOCK_LOCKVAL;
|
||||
return 1;
|
||||
}
|
||||
|
||||
#define arch_read_relax(lock) cpu_relax()
|
||||
#define arch_write_relax(lock) cpu_relax()
|
||||
static inline void arch_read_relax(arch_rwlock_t *rw)
|
||||
{
|
||||
arch_lock_relax(rw->owner);
|
||||
}
|
||||
|
||||
static inline void arch_write_relax(arch_rwlock_t *rw)
|
||||
{
|
||||
arch_lock_relax(rw->owner);
|
||||
}
|
||||
|
||||
#endif /* __ASM_SPINLOCK_H */
|
||||
|
|
|
@ -13,6 +13,7 @@ typedef struct {
|
|||
|
||||
typedef struct {
|
||||
unsigned int lock;
|
||||
unsigned int owner;
|
||||
} arch_rwlock_t;
|
||||
|
||||
#define __ARCH_RW_LOCK_UNLOCKED { 0 }
|
||||
|
|
|
@ -333,12 +333,6 @@ int smp_vcpu_scheduled(int cpu)
|
|||
return pcpu_running(pcpu_devices + cpu);
|
||||
}
|
||||
|
||||
void smp_yield(void)
|
||||
{
|
||||
if (MACHINE_HAS_DIAG44)
|
||||
asm volatile("diag 0,0,0x44");
|
||||
}
|
||||
|
||||
void smp_yield_cpu(int cpu)
|
||||
{
|
||||
if (MACHINE_HAS_DIAG9C)
|
||||
|
|
|
@ -98,17 +98,6 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
|
|||
}
|
||||
EXPORT_SYMBOL(arch_spin_lock_wait_flags);
|
||||
|
||||
void arch_spin_relax(arch_spinlock_t *lp)
|
||||
{
|
||||
unsigned int cpu = lp->lock;
|
||||
if (cpu != 0) {
|
||||
if (MACHINE_IS_VM || MACHINE_IS_KVM ||
|
||||
!smp_vcpu_scheduled(~cpu))
|
||||
smp_yield_cpu(~cpu);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(arch_spin_relax);
|
||||
|
||||
int arch_spin_trylock_retry(arch_spinlock_t *lp)
|
||||
{
|
||||
int count;
|
||||
|
@ -122,15 +111,18 @@ EXPORT_SYMBOL(arch_spin_trylock_retry);
|
|||
|
||||
void _raw_read_lock_wait(arch_rwlock_t *rw)
|
||||
{
|
||||
unsigned int old;
|
||||
unsigned int owner, old;
|
||||
int count = spin_retry;
|
||||
|
||||
owner = 0;
|
||||
while (1) {
|
||||
if (count-- <= 0) {
|
||||
smp_yield();
|
||||
if (owner && !smp_vcpu_scheduled(~owner))
|
||||
smp_yield_cpu(~owner);
|
||||
count = spin_retry;
|
||||
}
|
||||
old = ACCESS_ONCE(rw->lock);
|
||||
owner = ACCESS_ONCE(rw->owner);
|
||||
if ((int) old < 0)
|
||||
continue;
|
||||
if (_raw_compare_and_swap(&rw->lock, old, old + 1))
|
||||
|
@ -141,16 +133,19 @@ EXPORT_SYMBOL(_raw_read_lock_wait);
|
|||
|
||||
void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
|
||||
{
|
||||
unsigned int old;
|
||||
unsigned int owner, old;
|
||||
int count = spin_retry;
|
||||
|
||||
local_irq_restore(flags);
|
||||
owner = 0;
|
||||
while (1) {
|
||||
if (count-- <= 0) {
|
||||
smp_yield();
|
||||
if (owner && !smp_vcpu_scheduled(~owner))
|
||||
smp_yield_cpu(~owner);
|
||||
count = spin_retry;
|
||||
}
|
||||
old = ACCESS_ONCE(rw->lock);
|
||||
owner = ACCESS_ONCE(rw->owner);
|
||||
if ((int) old < 0)
|
||||
continue;
|
||||
local_irq_disable();
|
||||
|
@ -179,15 +174,18 @@ EXPORT_SYMBOL(_raw_read_trylock_retry);
|
|||
|
||||
void _raw_write_lock_wait(arch_rwlock_t *rw)
|
||||
{
|
||||
unsigned int old;
|
||||
unsigned int owner, old;
|
||||
int count = spin_retry;
|
||||
|
||||
owner = 0;
|
||||
while (1) {
|
||||
if (count-- <= 0) {
|
||||
smp_yield();
|
||||
if (owner && !smp_vcpu_scheduled(~owner))
|
||||
smp_yield_cpu(~owner);
|
||||
count = spin_retry;
|
||||
}
|
||||
old = ACCESS_ONCE(rw->lock);
|
||||
owner = ACCESS_ONCE(rw->owner);
|
||||
if (old)
|
||||
continue;
|
||||
if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
|
||||
|
@ -198,16 +196,19 @@ EXPORT_SYMBOL(_raw_write_lock_wait);
|
|||
|
||||
void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
|
||||
{
|
||||
unsigned int old;
|
||||
unsigned int owner, old;
|
||||
int count = spin_retry;
|
||||
|
||||
local_irq_restore(flags);
|
||||
owner = 0;
|
||||
while (1) {
|
||||
if (count-- <= 0) {
|
||||
smp_yield();
|
||||
if (owner && !smp_vcpu_scheduled(~owner))
|
||||
smp_yield_cpu(~owner);
|
||||
count = spin_retry;
|
||||
}
|
||||
old = ACCESS_ONCE(rw->lock);
|
||||
owner = ACCESS_ONCE(rw->owner);
|
||||
if (old)
|
||||
continue;
|
||||
local_irq_disable();
|
||||
|
@ -233,3 +234,13 @@ int _raw_write_trylock_retry(arch_rwlock_t *rw)
|
|||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(_raw_write_trylock_retry);
|
||||
|
||||
void arch_lock_relax(unsigned int cpu)
|
||||
{
|
||||
if (!cpu)
|
||||
return;
|
||||
if (MACHINE_IS_LPAR && smp_vcpu_scheduled(~cpu))
|
||||
return;
|
||||
smp_yield_cpu(~cpu);
|
||||
}
|
||||
EXPORT_SYMBOL(arch_lock_relax);
|
||||
|
|
Загрузка…
Ссылка в новой задаче