sh: consolidate atomic_cmpxchg()/atomic_add_unless() definitions.
The LL/SC and IRQ versions were using generic stubs while the GRB version was just reimplementing what it already had for the standard cmpxchg() code. As we have optimized cmpxchg() implementations that are decoupled from the atomic code, simply falling back on the generic wrapper does the right thing. With this in place the GRB case is unaffected while the LL/SC case gets to use its optimized cmpxchg(). Signed-off-by: Paul Mundt <lethal@linux-sh.org>
This commit is contained in:
Родитель
56d45b62ce
Коммит
8c0b8139c8
|
@ -120,50 +120,4 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
|
|||
: "memory" , "r0", "r1");
|
||||
}
|
||||
|
||||
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
|
||||
{
|
||||
int ret;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
" .align 2 \n\t"
|
||||
" mova 1f, r0 \n\t"
|
||||
" nop \n\t"
|
||||
" mov r15, r1 \n\t"
|
||||
" mov #-8, r15 \n\t"
|
||||
" mov.l @%1, %0 \n\t"
|
||||
" cmp/eq %2, %0 \n\t"
|
||||
" bf 1f \n\t"
|
||||
" mov.l %3, @%1 \n\t"
|
||||
"1: mov r1, r15 \n\t"
|
||||
: "=&r" (ret)
|
||||
: "r" (v), "r" (old), "r" (new)
|
||||
: "memory" , "r0", "r1" , "t");
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int atomic_add_unless(atomic_t *v, int a, int u)
|
||||
{
|
||||
int ret;
|
||||
unsigned long tmp;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
" .align 2 \n\t"
|
||||
" mova 1f, r0 \n\t"
|
||||
" nop \n\t"
|
||||
" mov r15, r1 \n\t"
|
||||
" mov #-12, r15 \n\t"
|
||||
" mov.l @%2, %1 \n\t"
|
||||
" mov %1, %0 \n\t"
|
||||
" cmp/eq %4, %0 \n\t"
|
||||
" bt/s 1f \n\t"
|
||||
" add %3, %1 \n\t"
|
||||
" mov.l %1, @%2 \n\t"
|
||||
"1: mov r1, r15 \n\t"
|
||||
: "=&r" (ret), "=&r" (tmp)
|
||||
: "r" (v), "r" (a), "r" (u)
|
||||
: "memory" , "r0", "r1" , "t");
|
||||
|
||||
return ret != u;
|
||||
}
|
||||
#endif /* __ASM_SH_ATOMIC_GRB_H */
|
||||
|
|
|
@ -104,31 +104,4 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
|
|||
: "t");
|
||||
}
|
||||
|
||||
#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
|
||||
|
||||
/**
|
||||
* atomic_add_unless - add unless the number is a given value
|
||||
* @v: pointer of type atomic_t
|
||||
* @a: the amount to add to v...
|
||||
* @u: ...unless v is equal to u.
|
||||
*
|
||||
* Atomically adds @a to @v, so long as it was not @u.
|
||||
* Returns non-zero if @v was not @u, and zero otherwise.
|
||||
*/
|
||||
static inline int atomic_add_unless(atomic_t *v, int a, int u)
|
||||
{
|
||||
int c, old;
|
||||
c = atomic_read(v);
|
||||
for (;;) {
|
||||
if (unlikely(c == (u)))
|
||||
break;
|
||||
old = atomic_cmpxchg((v), c, c + (a));
|
||||
if (likely(old == c))
|
||||
break;
|
||||
c = old;
|
||||
}
|
||||
|
||||
return c != (u);
|
||||
}
|
||||
|
||||
#endif /* __ASM_SH_ATOMIC_LLSC_H */
|
||||
|
|
|
@ -25,58 +25,43 @@
|
|||
#endif
|
||||
|
||||
#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
|
||||
|
||||
#define atomic_dec_return(v) atomic_sub_return(1, (v))
|
||||
#define atomic_inc_return(v) atomic_add_return(1, (v))
|
||||
|
||||
/*
|
||||
* atomic_inc_and_test - increment and test
|
||||
* @v: pointer of type atomic_t
|
||||
*
|
||||
* Atomically increments @v by 1
|
||||
* and returns true if the result is zero, or false for all
|
||||
* other cases.
|
||||
*/
|
||||
#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
|
||||
|
||||
#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
|
||||
#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
|
||||
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
|
||||
|
||||
#define atomic_inc(v) atomic_add(1, (v))
|
||||
#define atomic_dec(v) atomic_sub(1, (v))
|
||||
|
||||
#if !defined(CONFIG_GUSA_RB) && !defined(CONFIG_CPU_SH4A)
|
||||
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
|
||||
{
|
||||
int ret;
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
ret = v->counter;
|
||||
if (likely(ret == old))
|
||||
v->counter = new;
|
||||
local_irq_restore(flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
|
||||
#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
|
||||
|
||||
/**
|
||||
* atomic_add_unless - add unless the number is a given value
|
||||
* @v: pointer of type atomic_t
|
||||
* @a: the amount to add to v...
|
||||
* @u: ...unless v is equal to u.
|
||||
*
|
||||
* Atomically adds @a to @v, so long as it was not @u.
|
||||
* Returns non-zero if @v was not @u, and zero otherwise.
|
||||
*/
|
||||
static inline int atomic_add_unless(atomic_t *v, int a, int u)
|
||||
{
|
||||
int ret;
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
ret = v->counter;
|
||||
if (ret != u)
|
||||
v->counter += a;
|
||||
local_irq_restore(flags);
|
||||
|
||||
return ret != u;
|
||||
int c, old;
|
||||
c = atomic_read(v);
|
||||
for (;;) {
|
||||
if (unlikely(c == (u)))
|
||||
break;
|
||||
old = atomic_cmpxchg((v), c, c + (a));
|
||||
if (likely(old == c))
|
||||
break;
|
||||
c = old;
|
||||
}
|
||||
#endif /* !CONFIG_GUSA_RB && !CONFIG_CPU_SH4A */
|
||||
|
||||
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
|
||||
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
|
||||
return c != (u);
|
||||
}
|
||||
|
||||
#define smp_mb__before_atomic_dec() smp_mb()
|
||||
#define smp_mb__after_atomic_dec() smp_mb()
|
||||
|
|
Загрузка…
Ссылка в новой задаче