x86: Cleanup rwsem_count_t typedef
Remove the typedef which has no real reason to be there. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: David Howells <dhowells@redhat.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Matt Turner <mattst88@gmail.com> Cc: Tony Luck <tony.luck@intel.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Paul Mundt <lethal@linux-sh.org> Cc: David Miller <davem@davemloft.net> Cc: Chris Zankel <chris@zankel.net> LKML-Reference: <20110126195833.580335506@linutronix.de>
This commit is contained in:
Родитель
c16a87ce06
Коммит
bde11efbc2
|
@ -68,10 +68,8 @@ extern asmregparm struct rw_semaphore *
|
||||||
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
|
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
|
||||||
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
|
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
|
||||||
|
|
||||||
typedef signed long rwsem_count_t;
|
|
||||||
|
|
||||||
struct rw_semaphore {
|
struct rw_semaphore {
|
||||||
rwsem_count_t count;
|
long count;
|
||||||
spinlock_t wait_lock;
|
spinlock_t wait_lock;
|
||||||
struct list_head wait_list;
|
struct list_head wait_list;
|
||||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||||
|
@ -127,7 +125,7 @@ static inline void __down_read(struct rw_semaphore *sem)
|
||||||
*/
|
*/
|
||||||
static inline int __down_read_trylock(struct rw_semaphore *sem)
|
static inline int __down_read_trylock(struct rw_semaphore *sem)
|
||||||
{
|
{
|
||||||
rwsem_count_t result, tmp;
|
long result, tmp;
|
||||||
asm volatile("# beginning __down_read_trylock\n\t"
|
asm volatile("# beginning __down_read_trylock\n\t"
|
||||||
" mov %0,%1\n\t"
|
" mov %0,%1\n\t"
|
||||||
"1:\n\t"
|
"1:\n\t"
|
||||||
|
@ -149,7 +147,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
|
||||||
*/
|
*/
|
||||||
static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
|
static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
|
||||||
{
|
{
|
||||||
rwsem_count_t tmp;
|
long tmp;
|
||||||
asm volatile("# beginning down_write\n\t"
|
asm volatile("# beginning down_write\n\t"
|
||||||
LOCK_PREFIX " xadd %1,(%2)\n\t"
|
LOCK_PREFIX " xadd %1,(%2)\n\t"
|
||||||
/* adds 0xffff0001, returns the old value */
|
/* adds 0xffff0001, returns the old value */
|
||||||
|
@ -174,9 +172,8 @@ static inline void __down_write(struct rw_semaphore *sem)
|
||||||
*/
|
*/
|
||||||
static inline int __down_write_trylock(struct rw_semaphore *sem)
|
static inline int __down_write_trylock(struct rw_semaphore *sem)
|
||||||
{
|
{
|
||||||
rwsem_count_t ret = cmpxchg(&sem->count,
|
long ret = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
|
||||||
RWSEM_UNLOCKED_VALUE,
|
RWSEM_ACTIVE_WRITE_BIAS);
|
||||||
RWSEM_ACTIVE_WRITE_BIAS);
|
|
||||||
if (ret == RWSEM_UNLOCKED_VALUE)
|
if (ret == RWSEM_UNLOCKED_VALUE)
|
||||||
return 1;
|
return 1;
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -187,7 +184,7 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
|
||||||
*/
|
*/
|
||||||
static inline void __up_read(struct rw_semaphore *sem)
|
static inline void __up_read(struct rw_semaphore *sem)
|
||||||
{
|
{
|
||||||
rwsem_count_t tmp;
|
long tmp;
|
||||||
asm volatile("# beginning __up_read\n\t"
|
asm volatile("# beginning __up_read\n\t"
|
||||||
LOCK_PREFIX " xadd %1,(%2)\n\t"
|
LOCK_PREFIX " xadd %1,(%2)\n\t"
|
||||||
/* subtracts 1, returns the old value */
|
/* subtracts 1, returns the old value */
|
||||||
|
@ -205,7 +202,7 @@ static inline void __up_read(struct rw_semaphore *sem)
|
||||||
*/
|
*/
|
||||||
static inline void __up_write(struct rw_semaphore *sem)
|
static inline void __up_write(struct rw_semaphore *sem)
|
||||||
{
|
{
|
||||||
rwsem_count_t tmp;
|
long tmp;
|
||||||
asm volatile("# beginning __up_write\n\t"
|
asm volatile("# beginning __up_write\n\t"
|
||||||
LOCK_PREFIX " xadd %1,(%2)\n\t"
|
LOCK_PREFIX " xadd %1,(%2)\n\t"
|
||||||
/* subtracts 0xffff0001, returns the old value */
|
/* subtracts 0xffff0001, returns the old value */
|
||||||
|
@ -241,8 +238,7 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
|
||||||
/*
|
/*
|
||||||
* implement atomic add functionality
|
* implement atomic add functionality
|
||||||
*/
|
*/
|
||||||
static inline void rwsem_atomic_add(rwsem_count_t delta,
|
static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
|
||||||
struct rw_semaphore *sem)
|
|
||||||
{
|
{
|
||||||
asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
|
asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
|
||||||
: "+m" (sem->count)
|
: "+m" (sem->count)
|
||||||
|
@ -252,10 +248,9 @@ static inline void rwsem_atomic_add(rwsem_count_t delta,
|
||||||
/*
|
/*
|
||||||
* implement exchange and add functionality
|
* implement exchange and add functionality
|
||||||
*/
|
*/
|
||||||
static inline rwsem_count_t rwsem_atomic_update(rwsem_count_t delta,
|
static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
|
||||||
struct rw_semaphore *sem)
|
|
||||||
{
|
{
|
||||||
rwsem_count_t tmp = delta;
|
long tmp = delta;
|
||||||
|
|
||||||
asm volatile(LOCK_PREFIX "xadd %0,%1"
|
asm volatile(LOCK_PREFIX "xadd %0,%1"
|
||||||
: "+r" (tmp), "+m" (sem->count)
|
: "+r" (tmp), "+m" (sem->count)
|
||||||
|
|
Загрузка…
Ссылка в новой задаче