sh: Fixup movli.l/movco.l atomic ops for gcc4.

gcc4 gets a bit pissy about the outputs:

include/asm/atomic.h: In function 'atomic_add':
include/asm/atomic.h:37: error: invalid lvalue in asm statement
include/asm/atomic.h:30: error: invalid lvalue in asm output 1
...

this ended up being a thinko anyways, so just fix it up.

Verified for proper behaviour with the older toolchains, too.

Signed-off-by: Paul Mundt <lethal@linux-sh.org>
This commit is contained in:
Paul Mundt 2006-12-01 14:32:54 +09:00
Родитель bd156147eb
Коммит c03c69610b
1 изменённых файлов: 24 добавлений и 24 удалений

Просмотреть файл

@ -28,11 +28,11 @@ static inline void atomic_add(int i, atomic_t *v)
unsigned long tmp; unsigned long tmp;
__asm__ __volatile__ ( __asm__ __volatile__ (
"1: movli.l @%3, %0 ! atomic_add \n" "1: movli.l @%2, %0 ! atomic_add \n"
" add %2, %0 \n" " add %1, %0 \n"
" movco.l %0, @%3 \n" " movco.l %0, @%2 \n"
" bf 1b \n" " bf 1b \n"
: "=&z" (tmp), "=r" (&v->counter) : "=&z" (tmp)
: "r" (i), "r" (&v->counter) : "r" (i), "r" (&v->counter)
: "t"); : "t");
#else #else
@ -50,11 +50,11 @@ static inline void atomic_sub(int i, atomic_t *v)
unsigned long tmp; unsigned long tmp;
__asm__ __volatile__ ( __asm__ __volatile__ (
"1: movli.l @%3, %0 ! atomic_sub \n" "1: movli.l @%2, %0 ! atomic_sub \n"
" sub %2, %0 \n" " sub %1, %0 \n"
" movco.l %0, @%3 \n" " movco.l %0, @%2 \n"
" bf 1b \n" " bf 1b \n"
: "=&z" (tmp), "=r" (&v->counter) : "=&z" (tmp)
: "r" (i), "r" (&v->counter) : "r" (i), "r" (&v->counter)
: "t"); : "t");
#else #else
@ -80,12 +80,12 @@ static inline int atomic_add_return(int i, atomic_t *v)
#ifdef CONFIG_CPU_SH4A #ifdef CONFIG_CPU_SH4A
__asm__ __volatile__ ( __asm__ __volatile__ (
"1: movli.l @%3, %0 ! atomic_add_return \n" "1: movli.l @%2, %0 ! atomic_add_return \n"
" add %2, %0 \n" " add %1, %0 \n"
" movco.l %0, @%3 \n" " movco.l %0, @%2 \n"
" bf 1b \n" " bf 1b \n"
" synco \n" " synco \n"
: "=&z" (temp), "=r" (&v->counter) : "=&z" (temp)
: "r" (i), "r" (&v->counter) : "r" (i), "r" (&v->counter)
: "t"); : "t");
#else #else
@ -109,12 +109,12 @@ static inline int atomic_sub_return(int i, atomic_t *v)
#ifdef CONFIG_CPU_SH4A #ifdef CONFIG_CPU_SH4A
__asm__ __volatile__ ( __asm__ __volatile__ (
"1: movli.l @%3, %0 ! atomic_sub_return \n" "1: movli.l @%2, %0 ! atomic_sub_return \n"
" sub %2, %0 \n" " sub %1, %0 \n"
" movco.l %0, @%3 \n" " movco.l %0, @%2 \n"
" bf 1b \n" " bf 1b \n"
" synco \n" " synco \n"
: "=&z" (temp), "=r" (&v->counter) : "=&z" (temp)
: "r" (i), "r" (&v->counter) : "r" (i), "r" (&v->counter)
: "t"); : "t");
#else #else
@ -186,11 +186,11 @@ static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
unsigned long tmp; unsigned long tmp;
__asm__ __volatile__ ( __asm__ __volatile__ (
"1: movli.l @%3, %0 ! atomic_clear_mask \n" "1: movli.l @%2, %0 ! atomic_clear_mask \n"
" and %2, %0 \n" " and %1, %0 \n"
" movco.l %0, @%3 \n" " movco.l %0, @%2 \n"
" bf 1b \n" " bf 1b \n"
: "=&z" (tmp), "=r" (&v->counter) : "=&z" (tmp)
: "r" (~mask), "r" (&v->counter) : "r" (~mask), "r" (&v->counter)
: "t"); : "t");
#else #else
@ -208,11 +208,11 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
unsigned long tmp; unsigned long tmp;
__asm__ __volatile__ ( __asm__ __volatile__ (
"1: movli.l @%3, %0 ! atomic_set_mask \n" "1: movli.l @%2, %0 ! atomic_set_mask \n"
" or %2, %0 \n" " or %1, %0 \n"
" movco.l %0, @%3 \n" " movco.l %0, @%2 \n"
" bf 1b \n" " bf 1b \n"
: "=&z" (tmp), "=r" (&v->counter) : "=&z" (tmp)
: "r" (mask), "r" (&v->counter) : "r" (mask), "r" (&v->counter)
: "t"); : "t");
#else #else