[PATCH] m68k: fix cmpxchg compile errors if CONFIG_RMW_INSNS=n
We require that all archs implement atomic_cmpxchg(), for the generic version of atomic_add_unless(). Signed-off-by: Roman Zippel <zippel@linux-m68k.org> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Adrian Bunk <bunk@stusta.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
Родитель
b707dbe6c5
Коммит
7b61fcda8a
|
@ -55,6 +55,7 @@ static inline int atomic_inc_and_test(atomic_t *v)
|
|||
}
|
||||
|
||||
#ifdef CONFIG_RMW_INSNS
|
||||
|
||||
static inline int atomic_add_return(int i, atomic_t *v)
|
||||
{
|
||||
int t, tmp;
|
||||
|
@ -82,7 +83,12 @@ static inline int atomic_sub_return(int i, atomic_t *v)
|
|||
: "g" (i), "2" (atomic_read(v)));
|
||||
return t;
|
||||
}
|
||||
|
||||
#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
|
||||
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
|
||||
|
||||
#else /* !CONFIG_RMW_INSNS */
|
||||
|
||||
static inline int atomic_add_return(int i, atomic_t * v)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
@ -110,6 +116,32 @@ static inline int atomic_sub_return(int i, atomic_t * v)
|
|||
|
||||
return t;
|
||||
}
|
||||
|
||||
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
|
||||
{
|
||||
unsigned long flags;
|
||||
int prev;
|
||||
|
||||
local_irq_save(flags);
|
||||
prev = atomic_read(v);
|
||||
if (prev == old)
|
||||
atomic_set(v, new);
|
||||
local_irq_restore(flags);
|
||||
return prev;
|
||||
}
|
||||
|
||||
static inline int atomic_xchg(atomic_t *v, int new)
|
||||
{
|
||||
unsigned long flags;
|
||||
int prev;
|
||||
|
||||
local_irq_save(flags);
|
||||
prev = atomic_read(v);
|
||||
atomic_set(v, new);
|
||||
local_irq_restore(flags);
|
||||
return prev;
|
||||
}
|
||||
|
||||
#endif /* !CONFIG_RMW_INSNS */
|
||||
|
||||
#define atomic_dec_return(v) atomic_sub_return(1, (v))
|
||||
|
@ -139,9 +171,6 @@ static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
|
|||
__asm__ __volatile__("orl %1,%0" : "+m" (*v) : "id" (mask));
|
||||
}
|
||||
|
||||
#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
|
||||
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
|
||||
|
||||
#define atomic_add_unless(v, a, u) \
|
||||
({ \
|
||||
int c, old; \
|
||||
|
|
Загрузка…
Ссылка в новой задаче