Merge branch 'locking/arch-atomic' into locking/core, because it's ready for upstream
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Коммит
f52609fdab
|
@ -29,13 +29,13 @@
|
|||
* branch back to restart the operation.
|
||||
*/
|
||||
|
||||
#define ATOMIC_OP(op) \
|
||||
#define ATOMIC_OP(op, asm_op) \
|
||||
static __inline__ void atomic_##op(int i, atomic_t * v) \
|
||||
{ \
|
||||
unsigned long temp; \
|
||||
__asm__ __volatile__( \
|
||||
"1: ldl_l %0,%1\n" \
|
||||
" " #op "l %0,%2,%0\n" \
|
||||
" " #asm_op " %0,%2,%0\n" \
|
||||
" stl_c %0,%1\n" \
|
||||
" beq %0,2f\n" \
|
||||
".subsection 2\n" \
|
||||
|
@ -45,15 +45,15 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \
|
|||
:"Ir" (i), "m" (v->counter)); \
|
||||
} \
|
||||
|
||||
#define ATOMIC_OP_RETURN(op) \
|
||||
#define ATOMIC_OP_RETURN(op, asm_op) \
|
||||
static inline int atomic_##op##_return(int i, atomic_t *v) \
|
||||
{ \
|
||||
long temp, result; \
|
||||
smp_mb(); \
|
||||
__asm__ __volatile__( \
|
||||
"1: ldl_l %0,%1\n" \
|
||||
" " #op "l %0,%3,%2\n" \
|
||||
" " #op "l %0,%3,%0\n" \
|
||||
" " #asm_op " %0,%3,%2\n" \
|
||||
" " #asm_op " %0,%3,%0\n" \
|
||||
" stl_c %0,%1\n" \
|
||||
" beq %0,2f\n" \
|
||||
".subsection 2\n" \
|
||||
|
@ -65,13 +65,13 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
|
|||
return result; \
|
||||
}
|
||||
|
||||
#define ATOMIC64_OP(op) \
|
||||
#define ATOMIC64_OP(op, asm_op) \
|
||||
static __inline__ void atomic64_##op(long i, atomic64_t * v) \
|
||||
{ \
|
||||
unsigned long temp; \
|
||||
__asm__ __volatile__( \
|
||||
"1: ldq_l %0,%1\n" \
|
||||
" " #op "q %0,%2,%0\n" \
|
||||
" " #asm_op " %0,%2,%0\n" \
|
||||
" stq_c %0,%1\n" \
|
||||
" beq %0,2f\n" \
|
||||
".subsection 2\n" \
|
||||
|
@ -81,15 +81,15 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v) \
|
|||
:"Ir" (i), "m" (v->counter)); \
|
||||
} \
|
||||
|
||||
#define ATOMIC64_OP_RETURN(op) \
|
||||
#define ATOMIC64_OP_RETURN(op, asm_op) \
|
||||
static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
|
||||
{ \
|
||||
long temp, result; \
|
||||
smp_mb(); \
|
||||
__asm__ __volatile__( \
|
||||
"1: ldq_l %0,%1\n" \
|
||||
" " #op "q %0,%3,%2\n" \
|
||||
" " #op "q %0,%3,%0\n" \
|
||||
" " #asm_op " %0,%3,%2\n" \
|
||||
" " #asm_op " %0,%3,%0\n" \
|
||||
" stq_c %0,%1\n" \
|
||||
" beq %0,2f\n" \
|
||||
".subsection 2\n" \
|
||||
|
@ -101,15 +101,27 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
|
|||
return result; \
|
||||
}
|
||||
|
||||
#define ATOMIC_OPS(opg) \
|
||||
ATOMIC_OP(opg) \
|
||||
ATOMIC_OP_RETURN(opg) \
|
||||
ATOMIC64_OP(opg) \
|
||||
ATOMIC64_OP_RETURN(opg)
|
||||
#define ATOMIC_OPS(op) \
|
||||
ATOMIC_OP(op, op##l) \
|
||||
ATOMIC_OP_RETURN(op, op##l) \
|
||||
ATOMIC64_OP(op, op##q) \
|
||||
ATOMIC64_OP_RETURN(op, op##q)
|
||||
|
||||
ATOMIC_OPS(add)
|
||||
ATOMIC_OPS(sub)
|
||||
|
||||
#define atomic_andnot atomic_andnot
|
||||
#define atomic64_andnot atomic64_andnot
|
||||
|
||||
ATOMIC_OP(and, and)
|
||||
ATOMIC_OP(andnot, bic)
|
||||
ATOMIC_OP(or, bis)
|
||||
ATOMIC_OP(xor, xor)
|
||||
ATOMIC64_OP(and, and)
|
||||
ATOMIC64_OP(andnot, bic)
|
||||
ATOMIC64_OP(or, bis)
|
||||
ATOMIC64_OP(xor, xor)
|
||||
|
||||
#undef ATOMIC_OPS
|
||||
#undef ATOMIC64_OP_RETURN
|
||||
#undef ATOMIC64_OP
|
||||
|
|
|
@ -143,9 +143,13 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
|
|||
|
||||
ATOMIC_OPS(add, +=, add)
|
||||
ATOMIC_OPS(sub, -=, sub)
|
||||
ATOMIC_OP(and, &=, and)
|
||||
|
||||
#define atomic_clear_mask(mask, v) atomic_and(~(mask), (v))
|
||||
#define atomic_andnot atomic_andnot
|
||||
|
||||
ATOMIC_OP(and, &=, and)
|
||||
ATOMIC_OP(andnot, &= ~, bic)
|
||||
ATOMIC_OP(or, |=, or)
|
||||
ATOMIC_OP(xor, ^=, xor)
|
||||
|
||||
#undef ATOMIC_OPS
|
||||
#undef ATOMIC_OP_RETURN
|
||||
|
|
|
@ -194,6 +194,13 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
|
|||
ATOMIC_OPS(add, +=, add)
|
||||
ATOMIC_OPS(sub, -=, sub)
|
||||
|
||||
#define atomic_andnot atomic_andnot
|
||||
|
||||
ATOMIC_OP(and, &=, and)
|
||||
ATOMIC_OP(andnot, &= ~, bic)
|
||||
ATOMIC_OP(or, |=, orr)
|
||||
ATOMIC_OP(xor, ^=, eor)
|
||||
|
||||
#undef ATOMIC_OPS
|
||||
#undef ATOMIC_OP_RETURN
|
||||
#undef ATOMIC_OP
|
||||
|
@ -321,6 +328,13 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
|
|||
ATOMIC64_OPS(add, adds, adc)
|
||||
ATOMIC64_OPS(sub, subs, sbc)
|
||||
|
||||
#define atomic64_andnot atomic64_andnot
|
||||
|
||||
ATOMIC64_OP(and, and, and)
|
||||
ATOMIC64_OP(andnot, bic, bic)
|
||||
ATOMIC64_OP(or, orr, orr)
|
||||
ATOMIC64_OP(xor, eor, eor)
|
||||
|
||||
#undef ATOMIC64_OPS
|
||||
#undef ATOMIC64_OP_RETURN
|
||||
#undef ATOMIC64_OP
|
||||
|
|
|
@ -85,6 +85,13 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
|
|||
ATOMIC_OPS(add, add)
|
||||
ATOMIC_OPS(sub, sub)
|
||||
|
||||
#define atomic_andnot atomic_andnot
|
||||
|
||||
ATOMIC_OP(and, and)
|
||||
ATOMIC_OP(andnot, bic)
|
||||
ATOMIC_OP(or, orr)
|
||||
ATOMIC_OP(xor, eor)
|
||||
|
||||
#undef ATOMIC_OPS
|
||||
#undef ATOMIC_OP_RETURN
|
||||
#undef ATOMIC_OP
|
||||
|
@ -183,6 +190,13 @@ static inline long atomic64_##op##_return(long i, atomic64_t *v) \
|
|||
ATOMIC64_OPS(add, add)
|
||||
ATOMIC64_OPS(sub, sub)
|
||||
|
||||
#define atomic64_andnot atomic64_andnot
|
||||
|
||||
ATOMIC64_OP(and, and)
|
||||
ATOMIC64_OP(andnot, bic)
|
||||
ATOMIC64_OP(or, orr)
|
||||
ATOMIC64_OP(xor, eor)
|
||||
|
||||
#undef ATOMIC64_OPS
|
||||
#undef ATOMIC64_OP_RETURN
|
||||
#undef ATOMIC64_OP
|
||||
|
|
|
@ -44,6 +44,18 @@ static inline int __atomic_##op##_return(int i, atomic_t *v) \
|
|||
ATOMIC_OP_RETURN(sub, sub, rKs21)
|
||||
ATOMIC_OP_RETURN(add, add, r)
|
||||
|
||||
#define ATOMIC_OP(op, asm_op) \
|
||||
ATOMIC_OP_RETURN(op, asm_op, r) \
|
||||
static inline void atomic_##op(int i, atomic_t *v) \
|
||||
{ \
|
||||
(void)__atomic_##op##_return(i, v); \
|
||||
}
|
||||
|
||||
ATOMIC_OP(and, and)
|
||||
ATOMIC_OP(or, or)
|
||||
ATOMIC_OP(xor, eor)
|
||||
|
||||
#undef ATOMIC_OP
|
||||
#undef ATOMIC_OP_RETURN
|
||||
|
||||
/*
|
||||
|
|
|
@ -16,19 +16,21 @@
|
|||
#include <linux/types.h>
|
||||
|
||||
asmlinkage int __raw_uncached_fetch_asm(const volatile int *ptr);
|
||||
asmlinkage int __raw_atomic_update_asm(volatile int *ptr, int value);
|
||||
asmlinkage int __raw_atomic_clear_asm(volatile int *ptr, int value);
|
||||
asmlinkage int __raw_atomic_set_asm(volatile int *ptr, int value);
|
||||
asmlinkage int __raw_atomic_add_asm(volatile int *ptr, int value);
|
||||
|
||||
asmlinkage int __raw_atomic_and_asm(volatile int *ptr, int value);
|
||||
asmlinkage int __raw_atomic_or_asm(volatile int *ptr, int value);
|
||||
asmlinkage int __raw_atomic_xor_asm(volatile int *ptr, int value);
|
||||
asmlinkage int __raw_atomic_test_asm(const volatile int *ptr, int value);
|
||||
|
||||
#define atomic_read(v) __raw_uncached_fetch_asm(&(v)->counter)
|
||||
|
||||
#define atomic_add_return(i, v) __raw_atomic_update_asm(&(v)->counter, i)
|
||||
#define atomic_sub_return(i, v) __raw_atomic_update_asm(&(v)->counter, -(i))
|
||||
#define atomic_add_return(i, v) __raw_atomic_add_asm(&(v)->counter, i)
|
||||
#define atomic_sub_return(i, v) __raw_atomic_add_asm(&(v)->counter, -(i))
|
||||
|
||||
#define atomic_clear_mask(m, v) __raw_atomic_clear_asm(&(v)->counter, m)
|
||||
#define atomic_set_mask(m, v) __raw_atomic_set_asm(&(v)->counter, m)
|
||||
#define atomic_or(i, v) (void)__raw_atomic_or_asm(&(v)->counter, i)
|
||||
#define atomic_and(i, v) (void)__raw_atomic_and_asm(&(v)->counter, i)
|
||||
#define atomic_xor(i, v) (void)__raw_atomic_xor_asm(&(v)->counter, i)
|
||||
|
||||
#endif
|
||||
|
||||
|
|
|
@ -83,11 +83,12 @@ EXPORT_SYMBOL(insl);
|
|||
EXPORT_SYMBOL(insl_16);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
EXPORT_SYMBOL(__raw_atomic_update_asm);
|
||||
EXPORT_SYMBOL(__raw_atomic_clear_asm);
|
||||
EXPORT_SYMBOL(__raw_atomic_set_asm);
|
||||
EXPORT_SYMBOL(__raw_atomic_add_asm);
|
||||
EXPORT_SYMBOL(__raw_atomic_and_asm);
|
||||
EXPORT_SYMBOL(__raw_atomic_or_asm);
|
||||
EXPORT_SYMBOL(__raw_atomic_xor_asm);
|
||||
EXPORT_SYMBOL(__raw_atomic_test_asm);
|
||||
|
||||
EXPORT_SYMBOL(__raw_xchg_1_asm);
|
||||
EXPORT_SYMBOL(__raw_xchg_2_asm);
|
||||
EXPORT_SYMBOL(__raw_xchg_4_asm);
|
||||
|
|
|
@ -587,10 +587,10 @@ ENDPROC(___raw_write_unlock_asm)
|
|||
* r0 = ptr
|
||||
* r1 = value
|
||||
*
|
||||
* Add a signed value to a 32bit word and return the new value atomically.
|
||||
* ADD a signed value to a 32bit word and return the new value atomically.
|
||||
* Clobbers: r3:0, p1:0
|
||||
*/
|
||||
ENTRY(___raw_atomic_update_asm)
|
||||
ENTRY(___raw_atomic_add_asm)
|
||||
p1 = r0;
|
||||
r3 = r1;
|
||||
[--sp] = rets;
|
||||
|
@ -603,19 +603,19 @@ ENTRY(___raw_atomic_update_asm)
|
|||
r0 = r3;
|
||||
rets = [sp++];
|
||||
rts;
|
||||
ENDPROC(___raw_atomic_update_asm)
|
||||
ENDPROC(___raw_atomic_add_asm)
|
||||
|
||||
/*
|
||||
* r0 = ptr
|
||||
* r1 = mask
|
||||
*
|
||||
* Clear the mask bits from a 32bit word and return the old 32bit value
|
||||
* AND the mask bits from a 32bit word and return the old 32bit value
|
||||
* atomically.
|
||||
* Clobbers: r3:0, p1:0
|
||||
*/
|
||||
ENTRY(___raw_atomic_clear_asm)
|
||||
ENTRY(___raw_atomic_and_asm)
|
||||
p1 = r0;
|
||||
r3 = ~r1;
|
||||
r3 = r1;
|
||||
[--sp] = rets;
|
||||
call _get_core_lock;
|
||||
r2 = [p1];
|
||||
|
@ -627,17 +627,17 @@ ENTRY(___raw_atomic_clear_asm)
|
|||
r0 = r3;
|
||||
rets = [sp++];
|
||||
rts;
|
||||
ENDPROC(___raw_atomic_clear_asm)
|
||||
ENDPROC(___raw_atomic_and_asm)
|
||||
|
||||
/*
|
||||
* r0 = ptr
|
||||
* r1 = mask
|
||||
*
|
||||
* Set the mask bits into a 32bit word and return the old 32bit value
|
||||
* OR the mask bits into a 32bit word and return the old 32bit value
|
||||
* atomically.
|
||||
* Clobbers: r3:0, p1:0
|
||||
*/
|
||||
ENTRY(___raw_atomic_set_asm)
|
||||
ENTRY(___raw_atomic_or_asm)
|
||||
p1 = r0;
|
||||
r3 = r1;
|
||||
[--sp] = rets;
|
||||
|
@ -651,7 +651,7 @@ ENTRY(___raw_atomic_set_asm)
|
|||
r0 = r3;
|
||||
rets = [sp++];
|
||||
rts;
|
||||
ENDPROC(___raw_atomic_set_asm)
|
||||
ENDPROC(___raw_atomic_or_asm)
|
||||
|
||||
/*
|
||||
* r0 = ptr
|
||||
|
@ -787,7 +787,7 @@ ENTRY(___raw_bit_set_asm)
|
|||
r2 = r1;
|
||||
r1 = 1;
|
||||
r1 <<= r2;
|
||||
jump ___raw_atomic_set_asm
|
||||
jump ___raw_atomic_or_asm
|
||||
ENDPROC(___raw_bit_set_asm)
|
||||
|
||||
/*
|
||||
|
@ -798,10 +798,10 @@ ENDPROC(___raw_bit_set_asm)
|
|||
* Clobbers: r3:0, p1:0
|
||||
*/
|
||||
ENTRY(___raw_bit_clear_asm)
|
||||
r2 = r1;
|
||||
r1 = 1;
|
||||
r1 <<= r2;
|
||||
jump ___raw_atomic_clear_asm
|
||||
r2 = 1;
|
||||
r2 <<= r1;
|
||||
r1 = ~r2;
|
||||
jump ___raw_atomic_and_asm
|
||||
ENDPROC(___raw_bit_clear_asm)
|
||||
|
||||
/*
|
||||
|
|
|
@ -195,7 +195,7 @@ void send_ipi(const struct cpumask *cpumask, enum ipi_message_type msg)
|
|||
local_irq_save(flags);
|
||||
for_each_cpu(cpu, cpumask) {
|
||||
bfin_ipi_data = &per_cpu(bfin_ipi, cpu);
|
||||
atomic_set_mask((1 << msg), &bfin_ipi_data->bits);
|
||||
atomic_or((1 << msg), &bfin_ipi_data->bits);
|
||||
atomic_inc(&bfin_ipi_data->count);
|
||||
}
|
||||
local_irq_restore(flags);
|
||||
|
|
|
@ -15,7 +15,6 @@
|
|||
#define _ASM_ATOMIC_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <asm/spr-regs.h>
|
||||
#include <asm/cmpxchg.h>
|
||||
#include <asm/barrier.h>
|
||||
|
||||
|
@ -23,6 +22,8 @@
|
|||
#error not SMP safe
|
||||
#endif
|
||||
|
||||
#include <asm/atomic_defs.h>
|
||||
|
||||
/*
|
||||
* Atomic operations that C can't guarantee us. Useful for
|
||||
* resource counting etc..
|
||||
|
@ -34,56 +35,26 @@
|
|||
#define atomic_read(v) ACCESS_ONCE((v)->counter)
|
||||
#define atomic_set(v, i) (((v)->counter) = (i))
|
||||
|
||||
#ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
|
||||
static inline int atomic_inc_return(atomic_t *v)
|
||||
{
|
||||
return __atomic_add_return(1, &v->counter);
|
||||
}
|
||||
|
||||
static inline int atomic_dec_return(atomic_t *v)
|
||||
{
|
||||
return __atomic_sub_return(1, &v->counter);
|
||||
}
|
||||
|
||||
static inline int atomic_add_return(int i, atomic_t *v)
|
||||
{
|
||||
unsigned long val;
|
||||
|
||||
asm("0: \n"
|
||||
" orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
|
||||
" ckeq icc3,cc7 \n"
|
||||
" ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */
|
||||
" orcr cc7,cc7,cc3 \n" /* set CC3 to true */
|
||||
" add%I2 %1,%2,%1 \n"
|
||||
" cst.p %1,%M0 ,cc3,#1 \n"
|
||||
" corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */
|
||||
" beq icc3,#0,0b \n"
|
||||
: "+U"(v->counter), "=&r"(val)
|
||||
: "NPr"(i)
|
||||
: "memory", "cc7", "cc3", "icc3"
|
||||
);
|
||||
|
||||
return val;
|
||||
return __atomic_add_return(i, &v->counter);
|
||||
}
|
||||
|
||||
static inline int atomic_sub_return(int i, atomic_t *v)
|
||||
{
|
||||
unsigned long val;
|
||||
|
||||
asm("0: \n"
|
||||
" orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
|
||||
" ckeq icc3,cc7 \n"
|
||||
" ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */
|
||||
" orcr cc7,cc7,cc3 \n" /* set CC3 to true */
|
||||
" sub%I2 %1,%2,%1 \n"
|
||||
" cst.p %1,%M0 ,cc3,#1 \n"
|
||||
" corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */
|
||||
" beq icc3,#0,0b \n"
|
||||
: "+U"(v->counter), "=&r"(val)
|
||||
: "NPr"(i)
|
||||
: "memory", "cc7", "cc3", "icc3"
|
||||
);
|
||||
|
||||
return val;
|
||||
return __atomic_sub_return(i, &v->counter);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
extern int atomic_add_return(int i, atomic_t *v);
|
||||
extern int atomic_sub_return(int i, atomic_t *v);
|
||||
|
||||
#endif
|
||||
|
||||
static inline int atomic_add_negative(int i, atomic_t *v)
|
||||
{
|
||||
return atomic_add_return(i, v) < 0;
|
||||
|
@ -101,17 +72,14 @@ static inline void atomic_sub(int i, atomic_t *v)
|
|||
|
||||
static inline void atomic_inc(atomic_t *v)
|
||||
{
|
||||
atomic_add_return(1, v);
|
||||
atomic_inc_return(v);
|
||||
}
|
||||
|
||||
static inline void atomic_dec(atomic_t *v)
|
||||
{
|
||||
atomic_sub_return(1, v);
|
||||
atomic_dec_return(v);
|
||||
}
|
||||
|
||||
#define atomic_dec_return(v) atomic_sub_return(1, (v))
|
||||
#define atomic_inc_return(v) atomic_add_return(1, (v))
|
||||
|
||||
#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
|
||||
#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
|
||||
#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
|
||||
|
@ -120,18 +88,19 @@ static inline void atomic_dec(atomic_t *v)
|
|||
* 64-bit atomic ops
|
||||
*/
|
||||
typedef struct {
|
||||
volatile long long counter;
|
||||
long long counter;
|
||||
} atomic64_t;
|
||||
|
||||
#define ATOMIC64_INIT(i) { (i) }
|
||||
|
||||
static inline long long atomic64_read(atomic64_t *v)
|
||||
static inline long long atomic64_read(const atomic64_t *v)
|
||||
{
|
||||
long long counter;
|
||||
|
||||
asm("ldd%I1 %M1,%0"
|
||||
: "=e"(counter)
|
||||
: "m"(v->counter));
|
||||
|
||||
return counter;
|
||||
}
|
||||
|
||||
|
@ -142,10 +111,25 @@ static inline void atomic64_set(atomic64_t *v, long long i)
|
|||
: "e"(i));
|
||||
}
|
||||
|
||||
extern long long atomic64_inc_return(atomic64_t *v);
|
||||
extern long long atomic64_dec_return(atomic64_t *v);
|
||||
extern long long atomic64_add_return(long long i, atomic64_t *v);
|
||||
extern long long atomic64_sub_return(long long i, atomic64_t *v);
|
||||
static inline long long atomic64_inc_return(atomic64_t *v)
|
||||
{
|
||||
return __atomic64_add_return(1, &v->counter);
|
||||
}
|
||||
|
||||
static inline long long atomic64_dec_return(atomic64_t *v)
|
||||
{
|
||||
return __atomic64_sub_return(1, &v->counter);
|
||||
}
|
||||
|
||||
static inline long long atomic64_add_return(long long i, atomic64_t *v)
|
||||
{
|
||||
return __atomic64_add_return(i, &v->counter);
|
||||
}
|
||||
|
||||
static inline long long atomic64_sub_return(long long i, atomic64_t *v)
|
||||
{
|
||||
return __atomic64_sub_return(i, &v->counter);
|
||||
}
|
||||
|
||||
static inline long long atomic64_add_negative(long long i, atomic64_t *v)
|
||||
{
|
||||
|
@ -176,6 +160,7 @@ static inline void atomic64_dec(atomic64_t *v)
|
|||
#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
|
||||
#define atomic64_inc_and_test(v) (atomic64_inc_return((v)) == 0)
|
||||
|
||||
|
||||
#define atomic_cmpxchg(v, old, new) (cmpxchg(&(v)->counter, old, new))
|
||||
#define atomic_xchg(v, new) (xchg(&(v)->counter, new))
|
||||
#define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
|
||||
|
@ -196,5 +181,21 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
|
|||
return c;
|
||||
}
|
||||
|
||||
#define ATOMIC_OP(op) \
|
||||
static inline void atomic_##op(int i, atomic_t *v) \
|
||||
{ \
|
||||
(void)__atomic32_fetch_##op(i, &v->counter); \
|
||||
} \
|
||||
\
|
||||
static inline void atomic64_##op(long long i, atomic64_t *v) \
|
||||
{ \
|
||||
(void)__atomic64_fetch_##op(i, &v->counter); \
|
||||
}
|
||||
|
||||
ATOMIC_OP(or)
|
||||
ATOMIC_OP(and)
|
||||
ATOMIC_OP(xor)
|
||||
|
||||
#undef ATOMIC_OP
|
||||
|
||||
#endif /* _ASM_ATOMIC_H */
|
||||
|
|
|
@ -0,0 +1,172 @@
|
|||
|
||||
#include <asm/spr-regs.h>
|
||||
|
||||
#ifdef __ATOMIC_LIB__
|
||||
|
||||
#ifdef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
|
||||
|
||||
#define ATOMIC_QUALS
|
||||
#define ATOMIC_EXPORT(x) EXPORT_SYMBOL(x)
|
||||
|
||||
#else /* !OUTOFLINE && LIB */
|
||||
|
||||
#define ATOMIC_OP_RETURN(op)
|
||||
#define ATOMIC_FETCH_OP(op)
|
||||
|
||||
#endif /* OUTOFLINE */
|
||||
|
||||
#else /* !__ATOMIC_LIB__ */
|
||||
|
||||
#define ATOMIC_EXPORT(x)
|
||||
|
||||
#ifdef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
|
||||
|
||||
#define ATOMIC_OP_RETURN(op) \
|
||||
extern int __atomic_##op##_return(int i, int *v); \
|
||||
extern long long __atomic64_##op##_return(long long i, long long *v);
|
||||
|
||||
#define ATOMIC_FETCH_OP(op) \
|
||||
extern int __atomic32_fetch_##op(int i, int *v); \
|
||||
extern long long __atomic64_fetch_##op(long long i, long long *v);
|
||||
|
||||
#else /* !OUTOFLINE && !LIB */
|
||||
|
||||
#define ATOMIC_QUALS static inline
|
||||
|
||||
#endif /* OUTOFLINE */
|
||||
#endif /* __ATOMIC_LIB__ */
|
||||
|
||||
|
||||
/*
|
||||
* Note on the 64 bit inline asm variants...
|
||||
*
|
||||
* CSTD is a conditional instruction and needs a constrained memory reference.
|
||||
* Normally 'U' provides the correct constraints for conditional instructions
|
||||
* and this is used for the 32 bit version, however 'U' does not appear to work
|
||||
* for 64 bit values (gcc-4.9)
|
||||
*
|
||||
* The exact constraint is that conditional instructions cannot deal with an
|
||||
* immediate displacement in the memory reference, so what we do is we read the
|
||||
* address through a volatile cast into a local variable in order to insure we
|
||||
* _have_ to compute the correct address without displacement. This allows us
|
||||
* to use the regular 'm' for the memory address.
|
||||
*
|
||||
* Furthermore, the %Ln operand, which prints the low word register (r+1),
|
||||
* really only works for registers, this means we cannot allow immediate values
|
||||
* for the 64 bit versions -- like we do for the 32 bit ones.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef ATOMIC_OP_RETURN
|
||||
#define ATOMIC_OP_RETURN(op) \
|
||||
ATOMIC_QUALS int __atomic_##op##_return(int i, int *v) \
|
||||
{ \
|
||||
int val; \
|
||||
\
|
||||
asm volatile( \
|
||||
"0: \n" \
|
||||
" orcc gr0,gr0,gr0,icc3 \n" \
|
||||
" ckeq icc3,cc7 \n" \
|
||||
" ld.p %M0,%1 \n" \
|
||||
" orcr cc7,cc7,cc3 \n" \
|
||||
" "#op"%I2 %1,%2,%1 \n" \
|
||||
" cst.p %1,%M0 ,cc3,#1 \n" \
|
||||
" corcc gr29,gr29,gr0 ,cc3,#1 \n" \
|
||||
" beq icc3,#0,0b \n" \
|
||||
: "+U"(*v), "=&r"(val) \
|
||||
: "NPr"(i) \
|
||||
: "memory", "cc7", "cc3", "icc3" \
|
||||
); \
|
||||
\
|
||||
return val; \
|
||||
} \
|
||||
ATOMIC_EXPORT(__atomic_##op##_return); \
|
||||
\
|
||||
ATOMIC_QUALS long long __atomic64_##op##_return(long long i, long long *v) \
|
||||
{ \
|
||||
long long *__v = READ_ONCE(v); \
|
||||
long long val; \
|
||||
\
|
||||
asm volatile( \
|
||||
"0: \n" \
|
||||
" orcc gr0,gr0,gr0,icc3 \n" \
|
||||
" ckeq icc3,cc7 \n" \
|
||||
" ldd.p %M0,%1 \n" \
|
||||
" orcr cc7,cc7,cc3 \n" \
|
||||
" "#op"cc %L1,%L2,%L1,icc0 \n" \
|
||||
" "#op"x %1,%2,%1,icc0 \n" \
|
||||
" cstd.p %1,%M0 ,cc3,#1 \n" \
|
||||
" corcc gr29,gr29,gr0 ,cc3,#1 \n" \
|
||||
" beq icc3,#0,0b \n" \
|
||||
: "+m"(*__v), "=&e"(val) \
|
||||
: "e"(i) \
|
||||
: "memory", "cc7", "cc3", "icc0", "icc3" \
|
||||
); \
|
||||
\
|
||||
return val; \
|
||||
} \
|
||||
ATOMIC_EXPORT(__atomic64_##op##_return);
|
||||
#endif
|
||||
|
||||
#ifndef ATOMIC_FETCH_OP
|
||||
#define ATOMIC_FETCH_OP(op) \
|
||||
ATOMIC_QUALS int __atomic32_fetch_##op(int i, int *v) \
|
||||
{ \
|
||||
int old, tmp; \
|
||||
\
|
||||
asm volatile( \
|
||||
"0: \n" \
|
||||
" orcc gr0,gr0,gr0,icc3 \n" \
|
||||
" ckeq icc3,cc7 \n" \
|
||||
" ld.p %M0,%1 \n" \
|
||||
" orcr cc7,cc7,cc3 \n" \
|
||||
" "#op"%I3 %1,%3,%2 \n" \
|
||||
" cst.p %2,%M0 ,cc3,#1 \n" \
|
||||
" corcc gr29,gr29,gr0 ,cc3,#1 \n" \
|
||||
" beq icc3,#0,0b \n" \
|
||||
: "+U"(*v), "=&r"(old), "=r"(tmp) \
|
||||
: "NPr"(i) \
|
||||
: "memory", "cc7", "cc3", "icc3" \
|
||||
); \
|
||||
\
|
||||
return old; \
|
||||
} \
|
||||
ATOMIC_EXPORT(__atomic32_fetch_##op); \
|
||||
\
|
||||
ATOMIC_QUALS long long __atomic64_fetch_##op(long long i, long long *v) \
|
||||
{ \
|
||||
long long *__v = READ_ONCE(v); \
|
||||
long long old, tmp; \
|
||||
\
|
||||
asm volatile( \
|
||||
"0: \n" \
|
||||
" orcc gr0,gr0,gr0,icc3 \n" \
|
||||
" ckeq icc3,cc7 \n" \
|
||||
" ldd.p %M0,%1 \n" \
|
||||
" orcr cc7,cc7,cc3 \n" \
|
||||
" "#op" %L1,%L3,%L2 \n" \
|
||||
" "#op" %1,%3,%2 \n" \
|
||||
" cstd.p %2,%M0 ,cc3,#1 \n" \
|
||||
" corcc gr29,gr29,gr0 ,cc3,#1 \n" \
|
||||
" beq icc3,#0,0b \n" \
|
||||
: "+m"(*__v), "=&e"(old), "=e"(tmp) \
|
||||
: "e"(i) \
|
||||
: "memory", "cc7", "cc3", "icc3" \
|
||||
); \
|
||||
\
|
||||
return old; \
|
||||
} \
|
||||
ATOMIC_EXPORT(__atomic64_fetch_##op);
|
||||
#endif
|
||||
|
||||
ATOMIC_FETCH_OP(or)
|
||||
ATOMIC_FETCH_OP(and)
|
||||
ATOMIC_FETCH_OP(xor)
|
||||
|
||||
ATOMIC_OP_RETURN(add)
|
||||
ATOMIC_OP_RETURN(sub)
|
||||
|
||||
#undef ATOMIC_FETCH_OP
|
||||
#undef ATOMIC_OP_RETURN
|
||||
#undef ATOMIC_QUALS
|
||||
#undef ATOMIC_EXPORT
|
|
@ -25,109 +25,30 @@
|
|||
|
||||
#include <asm-generic/bitops/ffz.h>
|
||||
|
||||
#ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
|
||||
static inline
|
||||
unsigned long atomic_test_and_ANDNOT_mask(unsigned long mask, volatile unsigned long *v)
|
||||
{
|
||||
unsigned long old, tmp;
|
||||
|
||||
asm volatile(
|
||||
"0: \n"
|
||||
" orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
|
||||
" ckeq icc3,cc7 \n"
|
||||
" ld.p %M0,%1 \n" /* LD.P/ORCR are atomic */
|
||||
" orcr cc7,cc7,cc3 \n" /* set CC3 to true */
|
||||
" and%I3 %1,%3,%2 \n"
|
||||
" cst.p %2,%M0 ,cc3,#1 \n" /* if store happens... */
|
||||
" corcc gr29,gr29,gr0 ,cc3,#1 \n" /* ... clear ICC3.Z */
|
||||
" beq icc3,#0,0b \n"
|
||||
: "+U"(*v), "=&r"(old), "=r"(tmp)
|
||||
: "NPr"(~mask)
|
||||
: "memory", "cc7", "cc3", "icc3"
|
||||
);
|
||||
|
||||
return old;
|
||||
}
|
||||
|
||||
static inline
|
||||
unsigned long atomic_test_and_OR_mask(unsigned long mask, volatile unsigned long *v)
|
||||
{
|
||||
unsigned long old, tmp;
|
||||
|
||||
asm volatile(
|
||||
"0: \n"
|
||||
" orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
|
||||
" ckeq icc3,cc7 \n"
|
||||
" ld.p %M0,%1 \n" /* LD.P/ORCR are atomic */
|
||||
" orcr cc7,cc7,cc3 \n" /* set CC3 to true */
|
||||
" or%I3 %1,%3,%2 \n"
|
||||
" cst.p %2,%M0 ,cc3,#1 \n" /* if store happens... */
|
||||
" corcc gr29,gr29,gr0 ,cc3,#1 \n" /* ... clear ICC3.Z */
|
||||
" beq icc3,#0,0b \n"
|
||||
: "+U"(*v), "=&r"(old), "=r"(tmp)
|
||||
: "NPr"(mask)
|
||||
: "memory", "cc7", "cc3", "icc3"
|
||||
);
|
||||
|
||||
return old;
|
||||
}
|
||||
|
||||
static inline
|
||||
unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsigned long *v)
|
||||
{
|
||||
unsigned long old, tmp;
|
||||
|
||||
asm volatile(
|
||||
"0: \n"
|
||||
" orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
|
||||
" ckeq icc3,cc7 \n"
|
||||
" ld.p %M0,%1 \n" /* LD.P/ORCR are atomic */
|
||||
" orcr cc7,cc7,cc3 \n" /* set CC3 to true */
|
||||
" xor%I3 %1,%3,%2 \n"
|
||||
" cst.p %2,%M0 ,cc3,#1 \n" /* if store happens... */
|
||||
" corcc gr29,gr29,gr0 ,cc3,#1 \n" /* ... clear ICC3.Z */
|
||||
" beq icc3,#0,0b \n"
|
||||
: "+U"(*v), "=&r"(old), "=r"(tmp)
|
||||
: "NPr"(mask)
|
||||
: "memory", "cc7", "cc3", "icc3"
|
||||
);
|
||||
|
||||
return old;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
extern unsigned long atomic_test_and_ANDNOT_mask(unsigned long mask, volatile unsigned long *v);
|
||||
extern unsigned long atomic_test_and_OR_mask(unsigned long mask, volatile unsigned long *v);
|
||||
extern unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsigned long *v);
|
||||
|
||||
#endif
|
||||
|
||||
#define atomic_clear_mask(mask, v) atomic_test_and_ANDNOT_mask((mask), (v))
|
||||
#define atomic_set_mask(mask, v) atomic_test_and_OR_mask((mask), (v))
|
||||
#include <asm/atomic.h>
|
||||
|
||||
static inline int test_and_clear_bit(unsigned long nr, volatile void *addr)
|
||||
{
|
||||
volatile unsigned long *ptr = addr;
|
||||
unsigned long mask = 1UL << (nr & 31);
|
||||
unsigned int *ptr = (void *)addr;
|
||||
unsigned int mask = 1UL << (nr & 31);
|
||||
ptr += nr >> 5;
|
||||
return (atomic_test_and_ANDNOT_mask(mask, ptr) & mask) != 0;
|
||||
return (__atomic32_fetch_and(~mask, ptr) & mask) != 0;
|
||||
}
|
||||
|
||||
static inline int test_and_set_bit(unsigned long nr, volatile void *addr)
|
||||
{
|
||||
volatile unsigned long *ptr = addr;
|
||||
unsigned long mask = 1UL << (nr & 31);
|
||||
unsigned int *ptr = (void *)addr;
|
||||
unsigned int mask = 1UL << (nr & 31);
|
||||
ptr += nr >> 5;
|
||||
return (atomic_test_and_OR_mask(mask, ptr) & mask) != 0;
|
||||
return (__atomic32_fetch_or(mask, ptr) & mask) != 0;
|
||||
}
|
||||
|
||||
static inline int test_and_change_bit(unsigned long nr, volatile void *addr)
|
||||
{
|
||||
volatile unsigned long *ptr = addr;
|
||||
unsigned long mask = 1UL << (nr & 31);
|
||||
unsigned int *ptr = (void *)addr;
|
||||
unsigned int mask = 1UL << (nr & 31);
|
||||
ptr += nr >> 5;
|
||||
return (atomic_test_and_XOR_mask(mask, ptr) & mask) != 0;
|
||||
return (__atomic32_fetch_xor(mask, ptr) & mask) != 0;
|
||||
}
|
||||
|
||||
static inline void clear_bit(unsigned long nr, volatile void *addr)
|
||||
|
|
|
@ -109,13 +109,13 @@ static struct frv_dma_channel frv_dma_channels[FRV_DMA_NCHANS] = {
|
|||
|
||||
static DEFINE_RWLOCK(frv_dma_channels_lock);
|
||||
|
||||
unsigned long frv_dma_inprogress;
|
||||
unsigned int frv_dma_inprogress;
|
||||
|
||||
#define frv_clear_dma_inprogress(channel) \
|
||||
atomic_clear_mask(1 << (channel), &frv_dma_inprogress);
|
||||
(void)__atomic32_fetch_and(~(1 << (channel)), &frv_dma_inprogress);
|
||||
|
||||
#define frv_set_dma_inprogress(channel) \
|
||||
atomic_set_mask(1 << (channel), &frv_dma_inprogress);
|
||||
(void)__atomic32_fetch_or(1 << (channel), &frv_dma_inprogress);
|
||||
|
||||
/*****************************************************************************/
|
||||
/*
|
||||
|
|
|
@ -58,11 +58,6 @@ EXPORT_SYMBOL(__outsl_ns);
|
|||
EXPORT_SYMBOL(__insl_ns);
|
||||
|
||||
#ifdef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
|
||||
EXPORT_SYMBOL(atomic_test_and_ANDNOT_mask);
|
||||
EXPORT_SYMBOL(atomic_test_and_OR_mask);
|
||||
EXPORT_SYMBOL(atomic_test_and_XOR_mask);
|
||||
EXPORT_SYMBOL(atomic_add_return);
|
||||
EXPORT_SYMBOL(atomic_sub_return);
|
||||
EXPORT_SYMBOL(__xchg_32);
|
||||
EXPORT_SYMBOL(__cmpxchg_32);
|
||||
#endif
|
||||
|
|
|
@ -5,4 +5,4 @@
|
|||
lib-y := \
|
||||
__ashldi3.o __lshrdi3.o __muldi3.o __ashrdi3.o __negdi2.o __ucmpdi2.o \
|
||||
checksum.o memcpy.o memset.o atomic-ops.o atomic64-ops.o \
|
||||
outsl_ns.o outsl_sw.o insl_ns.o insl_sw.o cache.o
|
||||
outsl_ns.o outsl_sw.o insl_ns.o insl_sw.o cache.o atomic-lib.o
|
||||
|
|
|
@ -0,0 +1,7 @@
|
|||
|
||||
#include <linux/export.h>
|
||||
#include <asm/atomic.h>
|
||||
|
||||
#define __ATOMIC_LIB__
|
||||
|
||||
#include <asm/atomic_defs.h>
|
|
@ -17,116 +17,6 @@
|
|||
.text
|
||||
.balign 4
|
||||
|
||||
###############################################################################
|
||||
#
|
||||
# unsigned long atomic_test_and_ANDNOT_mask(unsigned long mask, volatile unsigned long *v);
|
||||
#
|
||||
###############################################################################
|
||||
.globl atomic_test_and_ANDNOT_mask
|
||||
.type atomic_test_and_ANDNOT_mask,@function
|
||||
atomic_test_and_ANDNOT_mask:
|
||||
not.p gr8,gr10
|
||||
0:
|
||||
orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
|
||||
ckeq icc3,cc7
|
||||
ld.p @(gr9,gr0),gr8 /* LD.P/ORCR must be atomic */
|
||||
orcr cc7,cc7,cc3 /* set CC3 to true */
|
||||
and gr8,gr10,gr11
|
||||
cst.p gr11,@(gr9,gr0) ,cc3,#1
|
||||
corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
|
||||
beq icc3,#0,0b
|
||||
bralr
|
||||
|
||||
.size atomic_test_and_ANDNOT_mask, .-atomic_test_and_ANDNOT_mask
|
||||
|
||||
###############################################################################
|
||||
#
|
||||
# unsigned long atomic_test_and_OR_mask(unsigned long mask, volatile unsigned long *v);
|
||||
#
|
||||
###############################################################################
|
||||
.globl atomic_test_and_OR_mask
|
||||
.type atomic_test_and_OR_mask,@function
|
||||
atomic_test_and_OR_mask:
|
||||
or.p gr8,gr8,gr10
|
||||
0:
|
||||
orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
|
||||
ckeq icc3,cc7
|
||||
ld.p @(gr9,gr0),gr8 /* LD.P/ORCR must be atomic */
|
||||
orcr cc7,cc7,cc3 /* set CC3 to true */
|
||||
or gr8,gr10,gr11
|
||||
cst.p gr11,@(gr9,gr0) ,cc3,#1
|
||||
corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
|
||||
beq icc3,#0,0b
|
||||
bralr
|
||||
|
||||
.size atomic_test_and_OR_mask, .-atomic_test_and_OR_mask
|
||||
|
||||
###############################################################################
|
||||
#
|
||||
# unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsigned long *v);
|
||||
#
|
||||
###############################################################################
|
||||
.globl atomic_test_and_XOR_mask
|
||||
.type atomic_test_and_XOR_mask,@function
|
||||
atomic_test_and_XOR_mask:
|
||||
or.p gr8,gr8,gr10
|
||||
0:
|
||||
orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
|
||||
ckeq icc3,cc7
|
||||
ld.p @(gr9,gr0),gr8 /* LD.P/ORCR must be atomic */
|
||||
orcr cc7,cc7,cc3 /* set CC3 to true */
|
||||
xor gr8,gr10,gr11
|
||||
cst.p gr11,@(gr9,gr0) ,cc3,#1
|
||||
corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
|
||||
beq icc3,#0,0b
|
||||
bralr
|
||||
|
||||
.size atomic_test_and_XOR_mask, .-atomic_test_and_XOR_mask
|
||||
|
||||
###############################################################################
|
||||
#
|
||||
# int atomic_add_return(int i, atomic_t *v)
|
||||
#
|
||||
###############################################################################
|
||||
.globl atomic_add_return
|
||||
.type atomic_add_return,@function
|
||||
atomic_add_return:
|
||||
or.p gr8,gr8,gr10
|
||||
0:
|
||||
orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
|
||||
ckeq icc3,cc7
|
||||
ld.p @(gr9,gr0),gr8 /* LD.P/ORCR must be atomic */
|
||||
orcr cc7,cc7,cc3 /* set CC3 to true */
|
||||
add gr8,gr10,gr8
|
||||
cst.p gr8,@(gr9,gr0) ,cc3,#1
|
||||
corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
|
||||
beq icc3,#0,0b
|
||||
bralr
|
||||
|
||||
.size atomic_add_return, .-atomic_add_return
|
||||
|
||||
###############################################################################
|
||||
#
|
||||
# int atomic_sub_return(int i, atomic_t *v)
|
||||
#
|
||||
###############################################################################
|
||||
.globl atomic_sub_return
|
||||
.type atomic_sub_return,@function
|
||||
atomic_sub_return:
|
||||
or.p gr8,gr8,gr10
|
||||
0:
|
||||
orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
|
||||
ckeq icc3,cc7
|
||||
ld.p @(gr9,gr0),gr8 /* LD.P/ORCR must be atomic */
|
||||
orcr cc7,cc7,cc3 /* set CC3 to true */
|
||||
sub gr8,gr10,gr8
|
||||
cst.p gr8,@(gr9,gr0) ,cc3,#1
|
||||
corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
|
||||
beq icc3,#0,0b
|
||||
bralr
|
||||
|
||||
.size atomic_sub_return, .-atomic_sub_return
|
||||
|
||||
###############################################################################
|
||||
#
|
||||
# uint32_t __xchg_32(uint32_t i, uint32_t *v)
|
||||
|
|
|
@ -18,100 +18,6 @@
|
|||
.balign 4
|
||||
|
||||
|
||||
###############################################################################
|
||||
#
|
||||
# long long atomic64_inc_return(atomic64_t *v)
|
||||
#
|
||||
###############################################################################
|
||||
.globl atomic64_inc_return
|
||||
.type atomic64_inc_return,@function
|
||||
atomic64_inc_return:
|
||||
or.p gr8,gr8,gr10
|
||||
0:
|
||||
orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
|
||||
ckeq icc3,cc7
|
||||
ldd.p @(gr10,gr0),gr8 /* LDD.P/ORCR must be atomic */
|
||||
orcr cc7,cc7,cc3 /* set CC3 to true */
|
||||
addicc gr9,#1,gr9,icc0
|
||||
addxi gr8,#0,gr8,icc0
|
||||
cstd.p gr8,@(gr10,gr0) ,cc3,#1
|
||||
corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
|
||||
beq icc3,#0,0b
|
||||
bralr
|
||||
|
||||
.size atomic64_inc_return, .-atomic64_inc_return
|
||||
|
||||
###############################################################################
|
||||
#
|
||||
# long long atomic64_dec_return(atomic64_t *v)
|
||||
#
|
||||
###############################################################################
|
||||
.globl atomic64_dec_return
|
||||
.type atomic64_dec_return,@function
|
||||
atomic64_dec_return:
|
||||
or.p gr8,gr8,gr10
|
||||
0:
|
||||
orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
|
||||
ckeq icc3,cc7
|
||||
ldd.p @(gr10,gr0),gr8 /* LDD.P/ORCR must be atomic */
|
||||
orcr cc7,cc7,cc3 /* set CC3 to true */
|
||||
subicc gr9,#1,gr9,icc0
|
||||
subxi gr8,#0,gr8,icc0
|
||||
cstd.p gr8,@(gr10,gr0) ,cc3,#1
|
||||
corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
|
||||
beq icc3,#0,0b
|
||||
bralr
|
||||
|
||||
.size atomic64_dec_return, .-atomic64_dec_return
|
||||
|
||||
###############################################################################
|
||||
#
|
||||
# long long atomic64_add_return(long long i, atomic64_t *v)
|
||||
#
|
||||
###############################################################################
|
||||
.globl atomic64_add_return
|
||||
.type atomic64_add_return,@function
|
||||
atomic64_add_return:
|
||||
or.p gr8,gr8,gr4
|
||||
or gr9,gr9,gr5
|
||||
0:
|
||||
orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
|
||||
ckeq icc3,cc7
|
||||
ldd.p @(gr10,gr0),gr8 /* LDD.P/ORCR must be atomic */
|
||||
orcr cc7,cc7,cc3 /* set CC3 to true */
|
||||
addcc gr9,gr5,gr9,icc0
|
||||
addx gr8,gr4,gr8,icc0
|
||||
cstd.p gr8,@(gr10,gr0) ,cc3,#1
|
||||
corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
|
||||
beq icc3,#0,0b
|
||||
bralr
|
||||
|
||||
.size atomic64_add_return, .-atomic64_add_return
|
||||
|
||||
###############################################################################
|
||||
#
|
||||
# long long atomic64_sub_return(long long i, atomic64_t *v)
|
||||
#
|
||||
###############################################################################
|
||||
.globl atomic64_sub_return
|
||||
.type atomic64_sub_return,@function
|
||||
atomic64_sub_return:
|
||||
or.p gr8,gr8,gr4
|
||||
or gr9,gr9,gr5
|
||||
0:
|
||||
orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
|
||||
ckeq icc3,cc7
|
||||
ldd.p @(gr10,gr0),gr8 /* LDD.P/ORCR must be atomic */
|
||||
orcr cc7,cc7,cc3 /* set CC3 to true */
|
||||
subcc gr9,gr5,gr9,icc0
|
||||
subx gr8,gr4,gr8,icc0
|
||||
cstd.p gr8,@(gr10,gr0) ,cc3,#1
|
||||
corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
|
||||
beq icc3,#0,0b
|
||||
bralr
|
||||
|
||||
.size atomic64_sub_return, .-atomic64_sub_return
|
||||
|
||||
###############################################################################
|
||||
#
|
||||
# uint64_t __xchg_64(uint64_t i, uint64_t *v)
|
||||
|
|
|
@ -16,83 +16,52 @@
|
|||
|
||||
#include <linux/kernel.h>
|
||||
|
||||
static inline int atomic_add_return(int i, atomic_t *v)
|
||||
{
|
||||
h8300flags flags;
|
||||
int ret;
|
||||
|
||||
flags = arch_local_irq_save();
|
||||
ret = v->counter += i;
|
||||
arch_local_irq_restore(flags);
|
||||
return ret;
|
||||
#define ATOMIC_OP_RETURN(op, c_op) \
|
||||
static inline int atomic_##op##_return(int i, atomic_t *v) \
|
||||
{ \
|
||||
h8300flags flags; \
|
||||
int ret; \
|
||||
\
|
||||
flags = arch_local_irq_save(); \
|
||||
ret = v->counter c_op i; \
|
||||
arch_local_irq_restore(flags); \
|
||||
return ret; \
|
||||
}
|
||||
|
||||
#define atomic_add(i, v) atomic_add_return(i, v)
|
||||
#define ATOMIC_OP(op, c_op) \
|
||||
static inline void atomic_##op(int i, atomic_t *v) \
|
||||
{ \
|
||||
h8300flags flags; \
|
||||
\
|
||||
flags = arch_local_irq_save(); \
|
||||
v->counter c_op i; \
|
||||
arch_local_irq_restore(flags); \
|
||||
}
|
||||
|
||||
ATOMIC_OP_RETURN(add, +=)
|
||||
ATOMIC_OP_RETURN(sub, -=)
|
||||
|
||||
ATOMIC_OP(and, &=)
|
||||
ATOMIC_OP(or, |=)
|
||||
ATOMIC_OP(xor, ^=)
|
||||
|
||||
#undef ATOMIC_OP_RETURN
|
||||
#undef ATOMIC_OP
|
||||
|
||||
#define atomic_add(i, v) (void)atomic_add_return(i, v)
|
||||
#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
|
||||
|
||||
static inline int atomic_sub_return(int i, atomic_t *v)
|
||||
{
|
||||
h8300flags flags;
|
||||
int ret;
|
||||
#define atomic_sub(i, v) (void)atomic_sub_return(i, v)
|
||||
#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
|
||||
|
||||
flags = arch_local_irq_save();
|
||||
ret = v->counter -= i;
|
||||
arch_local_irq_restore(flags);
|
||||
return ret;
|
||||
}
|
||||
#define atomic_inc_return(v) atomic_add_return(1, v)
|
||||
#define atomic_dec_return(v) atomic_sub_return(1, v)
|
||||
|
||||
#define atomic_sub(i, v) atomic_sub_return(i, v)
|
||||
#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
|
||||
#define atomic_inc(v) (void)atomic_inc_return(v)
|
||||
#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
|
||||
|
||||
static inline int atomic_inc_return(atomic_t *v)
|
||||
{
|
||||
h8300flags flags;
|
||||
int ret;
|
||||
|
||||
flags = arch_local_irq_save();
|
||||
v->counter++;
|
||||
ret = v->counter;
|
||||
arch_local_irq_restore(flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define atomic_inc(v) atomic_inc_return(v)
|
||||
|
||||
/*
|
||||
* atomic_inc_and_test - increment and test
|
||||
* @v: pointer of type atomic_t
|
||||
*
|
||||
* Atomically increments @v by 1
|
||||
* and returns true if the result is zero, or false for all
|
||||
* other cases.
|
||||
*/
|
||||
#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
|
||||
|
||||
static inline int atomic_dec_return(atomic_t *v)
|
||||
{
|
||||
h8300flags flags;
|
||||
int ret;
|
||||
|
||||
flags = arch_local_irq_save();
|
||||
--v->counter;
|
||||
ret = v->counter;
|
||||
arch_local_irq_restore(flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define atomic_dec(v) atomic_dec_return(v)
|
||||
|
||||
static inline int atomic_dec_and_test(atomic_t *v)
|
||||
{
|
||||
h8300flags flags;
|
||||
int ret;
|
||||
|
||||
flags = arch_local_irq_save();
|
||||
--v->counter;
|
||||
ret = v->counter;
|
||||
arch_local_irq_restore(flags);
|
||||
return ret == 0;
|
||||
}
|
||||
#define atomic_dec(v) (void)atomic_dec_return(v)
|
||||
#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
|
||||
|
||||
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
|
||||
{
|
||||
|
@ -120,40 +89,4 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static inline void atomic_clear_mask(unsigned long mask, unsigned long *v)
|
||||
{
|
||||
unsigned char ccr;
|
||||
unsigned long tmp;
|
||||
|
||||
__asm__ __volatile__("stc ccr,%w3\n\t"
|
||||
"orc #0x80,ccr\n\t"
|
||||
"mov.l %0,%1\n\t"
|
||||
"and.l %2,%1\n\t"
|
||||
"mov.l %1,%0\n\t"
|
||||
"ldc %w3,ccr"
|
||||
: "=m"(*v), "=r"(tmp)
|
||||
: "g"(~(mask)), "r"(ccr));
|
||||
}
|
||||
|
||||
static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
|
||||
{
|
||||
unsigned char ccr;
|
||||
unsigned long tmp;
|
||||
|
||||
__asm__ __volatile__("stc ccr,%w3\n\t"
|
||||
"orc #0x80,ccr\n\t"
|
||||
"mov.l %0,%1\n\t"
|
||||
"or.l %2,%1\n\t"
|
||||
"mov.l %1,%0\n\t"
|
||||
"ldc %w3,ccr"
|
||||
: "=m"(*v), "=r"(tmp)
|
||||
: "g"(~(mask)), "r"(ccr));
|
||||
}
|
||||
|
||||
/* Atomic operations are already serializing */
|
||||
#define smp_mb__before_atomic_dec() barrier()
|
||||
#define smp_mb__after_atomic_dec() barrier()
|
||||
#define smp_mb__before_atomic_inc() barrier()
|
||||
#define smp_mb__after_atomic_inc() barrier()
|
||||
|
||||
#endif /* __ARCH_H8300_ATOMIC __ */
|
||||
|
|
|
@ -132,6 +132,10 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
|
|||
ATOMIC_OPS(add)
|
||||
ATOMIC_OPS(sub)
|
||||
|
||||
ATOMIC_OP(and)
|
||||
ATOMIC_OP(or)
|
||||
ATOMIC_OP(xor)
|
||||
|
||||
#undef ATOMIC_OPS
|
||||
#undef ATOMIC_OP_RETURN
|
||||
#undef ATOMIC_OP
|
||||
|
|
|
@ -45,8 +45,6 @@ ia64_atomic_##op (int i, atomic_t *v) \
|
|||
ATOMIC_OP(add, +)
|
||||
ATOMIC_OP(sub, -)
|
||||
|
||||
#undef ATOMIC_OP
|
||||
|
||||
#define atomic_add_return(i,v) \
|
||||
({ \
|
||||
int __ia64_aar_i = (i); \
|
||||
|
@ -71,6 +69,16 @@ ATOMIC_OP(sub, -)
|
|||
: ia64_atomic_sub(__ia64_asr_i, v); \
|
||||
})
|
||||
|
||||
ATOMIC_OP(and, &)
|
||||
ATOMIC_OP(or, |)
|
||||
ATOMIC_OP(xor, ^)
|
||||
|
||||
#define atomic_and(i,v) (void)ia64_atomic_and(i,v)
|
||||
#define atomic_or(i,v) (void)ia64_atomic_or(i,v)
|
||||
#define atomic_xor(i,v) (void)ia64_atomic_xor(i,v)
|
||||
|
||||
#undef ATOMIC_OP
|
||||
|
||||
#define ATOMIC64_OP(op, c_op) \
|
||||
static __inline__ long \
|
||||
ia64_atomic64_##op (__s64 i, atomic64_t *v) \
|
||||
|
@ -89,8 +97,6 @@ ia64_atomic64_##op (__s64 i, atomic64_t *v) \
|
|||
ATOMIC64_OP(add, +)
|
||||
ATOMIC64_OP(sub, -)
|
||||
|
||||
#undef ATOMIC64_OP
|
||||
|
||||
#define atomic64_add_return(i,v) \
|
||||
({ \
|
||||
long __ia64_aar_i = (i); \
|
||||
|
@ -115,6 +121,16 @@ ATOMIC64_OP(sub, -)
|
|||
: ia64_atomic64_sub(__ia64_asr_i, v); \
|
||||
})
|
||||
|
||||
ATOMIC64_OP(and, &)
|
||||
ATOMIC64_OP(or, |)
|
||||
ATOMIC64_OP(xor, ^)
|
||||
|
||||
#define atomic64_and(i,v) (void)ia64_atomic64_and(i,v)
|
||||
#define atomic64_or(i,v) (void)ia64_atomic64_or(i,v)
|
||||
#define atomic64_xor(i,v) (void)ia64_atomic64_xor(i,v)
|
||||
|
||||
#undef ATOMIC64_OP
|
||||
|
||||
#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
|
||||
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
|
||||
|
||||
|
|
|
@ -94,6 +94,10 @@ static __inline__ int atomic_##op##_return(int i, atomic_t *v) \
|
|||
ATOMIC_OPS(add)
|
||||
ATOMIC_OPS(sub)
|
||||
|
||||
ATOMIC_OP(and)
|
||||
ATOMIC_OP(or)
|
||||
ATOMIC_OP(xor)
|
||||
|
||||
#undef ATOMIC_OPS
|
||||
#undef ATOMIC_OP_RETURN
|
||||
#undef ATOMIC_OP
|
||||
|
@ -239,45 +243,4 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
|
|||
return c;
|
||||
}
|
||||
|
||||
|
||||
static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t *addr)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned long tmp;
|
||||
|
||||
local_irq_save(flags);
|
||||
__asm__ __volatile__ (
|
||||
"# atomic_clear_mask \n\t"
|
||||
DCACHE_CLEAR("%0", "r5", "%1")
|
||||
M32R_LOCK" %0, @%1; \n\t"
|
||||
"and %0, %2; \n\t"
|
||||
M32R_UNLOCK" %0, @%1; \n\t"
|
||||
: "=&r" (tmp)
|
||||
: "r" (addr), "r" (~mask)
|
||||
: "memory"
|
||||
__ATOMIC_CLOBBER
|
||||
);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static __inline__ void atomic_set_mask(unsigned long mask, atomic_t *addr)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned long tmp;
|
||||
|
||||
local_irq_save(flags);
|
||||
__asm__ __volatile__ (
|
||||
"# atomic_set_mask \n\t"
|
||||
DCACHE_CLEAR("%0", "r5", "%1")
|
||||
M32R_LOCK" %0, @%1; \n\t"
|
||||
"or %0, %2; \n\t"
|
||||
M32R_UNLOCK" %0, @%1; \n\t"
|
||||
: "=&r" (tmp)
|
||||
: "r" (addr), "r" (mask)
|
||||
: "memory"
|
||||
__ATOMIC_CLOBBER
|
||||
);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
#endif /* _ASM_M32R_ATOMIC_H */
|
||||
|
|
|
@ -156,7 +156,7 @@ void smp_flush_cache_all(void)
|
|||
cpumask_clear_cpu(smp_processor_id(), &cpumask);
|
||||
spin_lock(&flushcache_lock);
|
||||
mask=cpumask_bits(&cpumask);
|
||||
atomic_set_mask(*mask, (atomic_t *)&flushcache_cpumask);
|
||||
atomic_or(*mask, (atomic_t *)&flushcache_cpumask);
|
||||
send_IPI_mask(&cpumask, INVALIDATE_CACHE_IPI, 0);
|
||||
_flush_cache_copyback_all();
|
||||
while (flushcache_cpumask)
|
||||
|
@ -407,7 +407,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
|
|||
flush_vma = vma;
|
||||
flush_va = va;
|
||||
mask=cpumask_bits(&cpumask);
|
||||
atomic_set_mask(*mask, (atomic_t *)&flush_cpumask);
|
||||
atomic_or(*mask, (atomic_t *)&flush_cpumask);
|
||||
|
||||
/*
|
||||
* We have to send the IPI only to
|
||||
|
|
|
@ -77,6 +77,10 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \
|
|||
ATOMIC_OPS(add, +=, add)
|
||||
ATOMIC_OPS(sub, -=, sub)
|
||||
|
||||
ATOMIC_OP(and, &=, and)
|
||||
ATOMIC_OP(or, |=, or)
|
||||
ATOMIC_OP(xor, ^=, eor)
|
||||
|
||||
#undef ATOMIC_OPS
|
||||
#undef ATOMIC_OP_RETURN
|
||||
#undef ATOMIC_OP
|
||||
|
@ -170,16 +174,6 @@ static inline int atomic_add_negative(int i, atomic_t *v)
|
|||
return c != 0;
|
||||
}
|
||||
|
||||
static inline void atomic_clear_mask(unsigned long mask, unsigned long *v)
|
||||
{
|
||||
__asm__ __volatile__("andl %1,%0" : "+m" (*v) : ASM_DI (~(mask)));
|
||||
}
|
||||
|
||||
static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
|
||||
{
|
||||
__asm__ __volatile__("orl %1,%0" : "+m" (*v) : ASM_DI (mask));
|
||||
}
|
||||
|
||||
static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
|
||||
{
|
||||
int c, old;
|
||||
|
|
|
@ -74,44 +74,14 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
|
|||
ATOMIC_OPS(add)
|
||||
ATOMIC_OPS(sub)
|
||||
|
||||
ATOMIC_OP(and)
|
||||
ATOMIC_OP(or)
|
||||
ATOMIC_OP(xor)
|
||||
|
||||
#undef ATOMIC_OPS
|
||||
#undef ATOMIC_OP_RETURN
|
||||
#undef ATOMIC_OP
|
||||
|
||||
static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
|
||||
{
|
||||
int temp;
|
||||
|
||||
asm volatile (
|
||||
"1: LNKGETD %0, [%1]\n"
|
||||
" AND %0, %0, %2\n"
|
||||
" LNKSETD [%1] %0\n"
|
||||
" DEFR %0, TXSTAT\n"
|
||||
" ANDT %0, %0, #HI(0x3f000000)\n"
|
||||
" CMPT %0, #HI(0x02000000)\n"
|
||||
" BNZ 1b\n"
|
||||
: "=&d" (temp)
|
||||
: "da" (&v->counter), "bd" (~mask)
|
||||
: "cc");
|
||||
}
|
||||
|
||||
static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
|
||||
{
|
||||
int temp;
|
||||
|
||||
asm volatile (
|
||||
"1: LNKGETD %0, [%1]\n"
|
||||
" OR %0, %0, %2\n"
|
||||
" LNKSETD [%1], %0\n"
|
||||
" DEFR %0, TXSTAT\n"
|
||||
" ANDT %0, %0, #HI(0x3f000000)\n"
|
||||
" CMPT %0, #HI(0x02000000)\n"
|
||||
" BNZ 1b\n"
|
||||
: "=&d" (temp)
|
||||
: "da" (&v->counter), "bd" (mask)
|
||||
: "cc");
|
||||
}
|
||||
|
||||
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
|
||||
{
|
||||
int result, temp;
|
||||
|
|
|
@ -68,31 +68,14 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
|
|||
|
||||
ATOMIC_OPS(add, +=)
|
||||
ATOMIC_OPS(sub, -=)
|
||||
ATOMIC_OP(and, &=)
|
||||
ATOMIC_OP(or, |=)
|
||||
ATOMIC_OP(xor, ^=)
|
||||
|
||||
#undef ATOMIC_OPS
|
||||
#undef ATOMIC_OP_RETURN
|
||||
#undef ATOMIC_OP
|
||||
|
||||
static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
__global_lock1(flags);
|
||||
fence();
|
||||
v->counter &= ~mask;
|
||||
__global_unlock1(flags);
|
||||
}
|
||||
|
||||
static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
__global_lock1(flags);
|
||||
fence();
|
||||
v->counter |= mask;
|
||||
__global_unlock1(flags);
|
||||
}
|
||||
|
||||
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
|
||||
{
|
||||
int ret;
|
||||
|
|
|
@ -137,6 +137,10 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
|
|||
ATOMIC_OPS(add, +=, addu)
|
||||
ATOMIC_OPS(sub, -=, subu)
|
||||
|
||||
ATOMIC_OP(and, &=, and)
|
||||
ATOMIC_OP(or, |=, or)
|
||||
ATOMIC_OP(xor, ^=, xor)
|
||||
|
||||
#undef ATOMIC_OPS
|
||||
#undef ATOMIC_OP_RETURN
|
||||
#undef ATOMIC_OP
|
||||
|
@ -416,6 +420,9 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
|
|||
|
||||
ATOMIC64_OPS(add, +=, daddu)
|
||||
ATOMIC64_OPS(sub, -=, dsubu)
|
||||
ATOMIC64_OP(and, &=, and)
|
||||
ATOMIC64_OP(or, |=, or)
|
||||
ATOMIC64_OP(xor, ^=, xor)
|
||||
|
||||
#undef ATOMIC64_OPS
|
||||
#undef ATOMIC64_OP_RETURN
|
||||
|
|
|
@ -89,6 +89,10 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
|
|||
ATOMIC_OPS(add)
|
||||
ATOMIC_OPS(sub)
|
||||
|
||||
ATOMIC_OP(and)
|
||||
ATOMIC_OP(or)
|
||||
ATOMIC_OP(xor)
|
||||
|
||||
#undef ATOMIC_OPS
|
||||
#undef ATOMIC_OP_RETURN
|
||||
#undef ATOMIC_OP
|
||||
|
@ -127,73 +131,6 @@ static inline void atomic_dec(atomic_t *v)
|
|||
#define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v)))
|
||||
#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
|
||||
|
||||
/**
|
||||
* atomic_clear_mask - Atomically clear bits in memory
|
||||
* @mask: Mask of the bits to be cleared
|
||||
* @v: pointer to word in memory
|
||||
*
|
||||
* Atomically clears the bits set in mask from the memory word specified.
|
||||
*/
|
||||
static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
int status;
|
||||
|
||||
asm volatile(
|
||||
"1: mov %3,(_AAR,%2) \n"
|
||||
" mov (_ADR,%2),%0 \n"
|
||||
" and %4,%0 \n"
|
||||
" mov %0,(_ADR,%2) \n"
|
||||
" mov (_ADR,%2),%0 \n" /* flush */
|
||||
" mov (_ASR,%2),%0 \n"
|
||||
" or %0,%0 \n"
|
||||
" bne 1b \n"
|
||||
: "=&r"(status), "=m"(*addr)
|
||||
: "a"(ATOMIC_OPS_BASE_ADDR), "r"(addr), "r"(~mask)
|
||||
: "memory", "cc");
|
||||
#else
|
||||
unsigned long flags;
|
||||
|
||||
mask = ~mask;
|
||||
flags = arch_local_cli_save();
|
||||
*addr &= mask;
|
||||
arch_local_irq_restore(flags);
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* atomic_set_mask - Atomically set bits in memory
|
||||
* @mask: Mask of the bits to be set
|
||||
* @v: pointer to word in memory
|
||||
*
|
||||
* Atomically sets the bits set in mask from the memory word specified.
|
||||
*/
|
||||
static inline void atomic_set_mask(unsigned long mask, unsigned long *addr)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
int status;
|
||||
|
||||
asm volatile(
|
||||
"1: mov %3,(_AAR,%2) \n"
|
||||
" mov (_ADR,%2),%0 \n"
|
||||
" or %4,%0 \n"
|
||||
" mov %0,(_ADR,%2) \n"
|
||||
" mov (_ADR,%2),%0 \n" /* flush */
|
||||
" mov (_ASR,%2),%0 \n"
|
||||
" or %0,%0 \n"
|
||||
" bne 1b \n"
|
||||
: "=&r"(status), "=m"(*addr)
|
||||
: "a"(ATOMIC_OPS_BASE_ADDR), "r"(addr), "r"(mask)
|
||||
: "memory", "cc");
|
||||
#else
|
||||
unsigned long flags;
|
||||
|
||||
flags = arch_local_cli_save();
|
||||
*addr |= mask;
|
||||
arch_local_irq_restore(flags);
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* CONFIG_SMP */
|
||||
#endif /* _ASM_ATOMIC_H */
|
||||
|
|
|
@ -119,7 +119,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
|
|||
flush_mm = mm;
|
||||
flush_va = va;
|
||||
#if NR_CPUS <= BITS_PER_LONG
|
||||
atomic_set_mask(cpumask.bits[0], &flush_cpumask.bits[0]);
|
||||
atomic_or(cpumask.bits[0], (atomic_t *)&flush_cpumask.bits[0]);
|
||||
#else
|
||||
#error Not supported.
|
||||
#endif
|
||||
|
|
|
@ -126,6 +126,10 @@ static __inline__ int atomic_##op##_return(int i, atomic_t *v) \
|
|||
ATOMIC_OPS(add, +=)
|
||||
ATOMIC_OPS(sub, -=)
|
||||
|
||||
ATOMIC_OP(and, &=)
|
||||
ATOMIC_OP(or, |=)
|
||||
ATOMIC_OP(xor, ^=)
|
||||
|
||||
#undef ATOMIC_OPS
|
||||
#undef ATOMIC_OP_RETURN
|
||||
#undef ATOMIC_OP
|
||||
|
@ -185,6 +189,9 @@ static __inline__ s64 atomic64_##op##_return(s64 i, atomic64_t *v) \
|
|||
|
||||
ATOMIC64_OPS(add, +=)
|
||||
ATOMIC64_OPS(sub, -=)
|
||||
ATOMIC64_OP(and, &=)
|
||||
ATOMIC64_OP(or, |=)
|
||||
ATOMIC64_OP(xor, ^=)
|
||||
|
||||
#undef ATOMIC64_OPS
|
||||
#undef ATOMIC64_OP_RETURN
|
||||
|
|
|
@ -67,6 +67,10 @@ static __inline__ int atomic_##op##_return(int a, atomic_t *v) \
|
|||
ATOMIC_OPS(add, add)
|
||||
ATOMIC_OPS(sub, subf)
|
||||
|
||||
ATOMIC_OP(and, and)
|
||||
ATOMIC_OP(or, or)
|
||||
ATOMIC_OP(xor, xor)
|
||||
|
||||
#undef ATOMIC_OPS
|
||||
#undef ATOMIC_OP_RETURN
|
||||
#undef ATOMIC_OP
|
||||
|
@ -304,6 +308,9 @@ static __inline__ long atomic64_##op##_return(long a, atomic64_t *v) \
|
|||
|
||||
ATOMIC64_OPS(add, add)
|
||||
ATOMIC64_OPS(sub, subf)
|
||||
ATOMIC64_OP(and, and)
|
||||
ATOMIC64_OP(or, or)
|
||||
ATOMIC64_OP(xor, xor)
|
||||
|
||||
#undef ATOMIC64_OPS
|
||||
#undef ATOMIC64_OP_RETURN
|
||||
|
|
|
@ -595,25 +595,6 @@ _GLOBAL(copy_page)
|
|||
li r11,4
|
||||
b 2b
|
||||
|
||||
/*
|
||||
* void atomic_clear_mask(atomic_t mask, atomic_t *addr)
|
||||
* void atomic_set_mask(atomic_t mask, atomic_t *addr);
|
||||
*/
|
||||
_GLOBAL(atomic_clear_mask)
|
||||
10: lwarx r5,0,r4
|
||||
andc r5,r5,r3
|
||||
PPC405_ERR77(0,r4)
|
||||
stwcx. r5,0,r4
|
||||
bne- 10b
|
||||
blr
|
||||
_GLOBAL(atomic_set_mask)
|
||||
10: lwarx r5,0,r4
|
||||
or r5,r5,r3
|
||||
PPC405_ERR77(0,r4)
|
||||
stwcx. r5,0,r4
|
||||
bne- 10b
|
||||
blr
|
||||
|
||||
/*
|
||||
* Extended precision shifts.
|
||||
*
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
#define __ATOMIC_OR "lao"
|
||||
#define __ATOMIC_AND "lan"
|
||||
#define __ATOMIC_ADD "laa"
|
||||
#define __ATOMIC_XOR "lax"
|
||||
#define __ATOMIC_BARRIER "bcr 14,0\n"
|
||||
|
||||
#define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \
|
||||
|
@ -49,6 +50,7 @@
|
|||
#define __ATOMIC_OR "or"
|
||||
#define __ATOMIC_AND "nr"
|
||||
#define __ATOMIC_ADD "ar"
|
||||
#define __ATOMIC_XOR "xr"
|
||||
#define __ATOMIC_BARRIER "\n"
|
||||
|
||||
#define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \
|
||||
|
@ -118,15 +120,17 @@ static inline void atomic_add(int i, atomic_t *v)
|
|||
#define atomic_dec_return(_v) atomic_sub_return(1, _v)
|
||||
#define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0)
|
||||
|
||||
static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
|
||||
{
|
||||
__ATOMIC_LOOP(v, ~mask, __ATOMIC_AND, __ATOMIC_NO_BARRIER);
|
||||
#define ATOMIC_OP(op, OP) \
|
||||
static inline void atomic_##op(int i, atomic_t *v) \
|
||||
{ \
|
||||
__ATOMIC_LOOP(v, i, __ATOMIC_##OP, __ATOMIC_NO_BARRIER); \
|
||||
}
|
||||
|
||||
static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
|
||||
{
|
||||
__ATOMIC_LOOP(v, mask, __ATOMIC_OR, __ATOMIC_NO_BARRIER);
|
||||
}
|
||||
ATOMIC_OP(and, AND)
|
||||
ATOMIC_OP(or, OR)
|
||||
ATOMIC_OP(xor, XOR)
|
||||
|
||||
#undef ATOMIC_OP
|
||||
|
||||
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
|
||||
|
||||
|
@ -167,6 +171,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
|
|||
#define __ATOMIC64_OR "laog"
|
||||
#define __ATOMIC64_AND "lang"
|
||||
#define __ATOMIC64_ADD "laag"
|
||||
#define __ATOMIC64_XOR "laxg"
|
||||
#define __ATOMIC64_BARRIER "bcr 14,0\n"
|
||||
|
||||
#define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \
|
||||
|
@ -189,6 +194,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
|
|||
#define __ATOMIC64_OR "ogr"
|
||||
#define __ATOMIC64_AND "ngr"
|
||||
#define __ATOMIC64_ADD "agr"
|
||||
#define __ATOMIC64_XOR "xgr"
|
||||
#define __ATOMIC64_BARRIER "\n"
|
||||
|
||||
#define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \
|
||||
|
@ -247,16 +253,6 @@ static inline void atomic64_add(long long i, atomic64_t *v)
|
|||
__ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_NO_BARRIER);
|
||||
}
|
||||
|
||||
static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v)
|
||||
{
|
||||
__ATOMIC64_LOOP(v, ~mask, __ATOMIC64_AND, __ATOMIC64_NO_BARRIER);
|
||||
}
|
||||
|
||||
static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v)
|
||||
{
|
||||
__ATOMIC64_LOOP(v, mask, __ATOMIC64_OR, __ATOMIC64_NO_BARRIER);
|
||||
}
|
||||
|
||||
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
|
||||
|
||||
static inline long long atomic64_cmpxchg(atomic64_t *v,
|
||||
|
@ -270,6 +266,17 @@ static inline long long atomic64_cmpxchg(atomic64_t *v,
|
|||
return old;
|
||||
}
|
||||
|
||||
#define ATOMIC64_OP(op, OP) \
|
||||
static inline void atomic64_##op(long i, atomic64_t *v) \
|
||||
{ \
|
||||
__ATOMIC64_LOOP(v, i, __ATOMIC64_##OP, __ATOMIC64_NO_BARRIER); \
|
||||
}
|
||||
|
||||
ATOMIC64_OP(and, AND)
|
||||
ATOMIC64_OP(or, OR)
|
||||
ATOMIC64_OP(xor, XOR)
|
||||
|
||||
#undef ATOMIC64_OP
|
||||
#undef __ATOMIC64_LOOP
|
||||
|
||||
static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
|
||||
|
|
|
@ -381,7 +381,7 @@ static void disable_sync_clock(void *dummy)
|
|||
* increase the "sequence" counter to avoid the race of an
|
||||
* etr event and the complete recovery against get_sync_clock.
|
||||
*/
|
||||
atomic_clear_mask(0x80000000, sw_ptr);
|
||||
atomic_andnot(0x80000000, sw_ptr);
|
||||
atomic_inc(sw_ptr);
|
||||
}
|
||||
|
||||
|
@ -392,7 +392,7 @@ static void disable_sync_clock(void *dummy)
|
|||
static void enable_sync_clock(void)
|
||||
{
|
||||
atomic_t *sw_ptr = this_cpu_ptr(&clock_sync_word);
|
||||
atomic_set_mask(0x80000000, sw_ptr);
|
||||
atomic_or(0x80000000, sw_ptr);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -170,20 +170,20 @@ static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)
|
|||
|
||||
static void __set_cpu_idle(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
atomic_set_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
|
||||
atomic_or(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
|
||||
set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
|
||||
}
|
||||
|
||||
static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
atomic_clear_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
|
||||
atomic_andnot(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
|
||||
clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
|
||||
}
|
||||
|
||||
static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
atomic_clear_mask(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT,
|
||||
&vcpu->arch.sie_block->cpuflags);
|
||||
atomic_andnot(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT,
|
||||
&vcpu->arch.sie_block->cpuflags);
|
||||
vcpu->arch.sie_block->lctl = 0x0000;
|
||||
vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT);
|
||||
|
||||
|
@ -196,7 +196,7 @@ static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
|
|||
|
||||
static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
|
||||
{
|
||||
atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags);
|
||||
atomic_or(flag, &vcpu->arch.sie_block->cpuflags);
|
||||
}
|
||||
|
||||
static void set_intercept_indicators_io(struct kvm_vcpu *vcpu)
|
||||
|
@ -919,7 +919,7 @@ void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
|
|||
spin_unlock(&li->lock);
|
||||
|
||||
/* clear pending external calls set by sigp interpretation facility */
|
||||
atomic_clear_mask(CPUSTAT_ECALL_PEND, li->cpuflags);
|
||||
atomic_andnot(CPUSTAT_ECALL_PEND, li->cpuflags);
|
||||
vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl = 0;
|
||||
}
|
||||
|
||||
|
@ -1020,7 +1020,7 @@ static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
|
|||
|
||||
li->irq.ext = irq->u.ext;
|
||||
set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
|
||||
atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
|
||||
atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1035,7 +1035,7 @@ static int __inject_extcall_sigpif(struct kvm_vcpu *vcpu, uint16_t src_id)
|
|||
/* another external call is pending */
|
||||
return -EBUSY;
|
||||
}
|
||||
atomic_set_mask(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags);
|
||||
atomic_or(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1061,7 +1061,7 @@ static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
|
|||
if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs))
|
||||
return -EBUSY;
|
||||
*extcall = irq->u.extcall;
|
||||
atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
|
||||
atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1133,7 +1133,7 @@ static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
|
|||
|
||||
set_bit(irq->u.emerg.code, li->sigp_emerg_pending);
|
||||
set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
|
||||
atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
|
||||
atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1177,7 +1177,7 @@ static int __inject_ckc(struct kvm_vcpu *vcpu)
|
|||
0, 0, 2);
|
||||
|
||||
set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
|
||||
atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
|
||||
atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1190,7 +1190,7 @@ static int __inject_cpu_timer(struct kvm_vcpu *vcpu)
|
|||
0, 0, 2);
|
||||
|
||||
set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
|
||||
atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
|
||||
atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1369,13 +1369,13 @@ static void __floating_irq_kick(struct kvm *kvm, u64 type)
|
|||
spin_lock(&li->lock);
|
||||
switch (type) {
|
||||
case KVM_S390_MCHK:
|
||||
atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
|
||||
atomic_or(CPUSTAT_STOP_INT, li->cpuflags);
|
||||
break;
|
||||
case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
|
||||
atomic_set_mask(CPUSTAT_IO_INT, li->cpuflags);
|
||||
atomic_or(CPUSTAT_IO_INT, li->cpuflags);
|
||||
break;
|
||||
default:
|
||||
atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
|
||||
atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
|
||||
break;
|
||||
}
|
||||
spin_unlock(&li->lock);
|
||||
|
|
|
@ -1215,12 +1215,12 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
|||
}
|
||||
restore_access_regs(vcpu->run->s.regs.acrs);
|
||||
gmap_enable(vcpu->arch.gmap);
|
||||
atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
|
||||
atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
|
||||
}
|
||||
|
||||
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
|
||||
atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
|
||||
gmap_disable(vcpu->arch.gmap);
|
||||
if (test_kvm_facility(vcpu->kvm, 129)) {
|
||||
save_fp_ctl(&vcpu->run->s.regs.fpc);
|
||||
|
@ -1320,9 +1320,9 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
|
|||
CPUSTAT_STOPPED);
|
||||
|
||||
if (test_kvm_facility(vcpu->kvm, 78))
|
||||
atomic_set_mask(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
|
||||
atomic_or(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
|
||||
else if (test_kvm_facility(vcpu->kvm, 8))
|
||||
atomic_set_mask(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
|
||||
atomic_or(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
|
||||
|
||||
kvm_s390_vcpu_setup_model(vcpu);
|
||||
|
||||
|
@ -1422,24 +1422,24 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
|
|||
|
||||
void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
|
||||
atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
|
||||
exit_sie(vcpu);
|
||||
}
|
||||
|
||||
void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
|
||||
atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
|
||||
}
|
||||
|
||||
static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
atomic_set_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
|
||||
atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
|
||||
exit_sie(vcpu);
|
||||
}
|
||||
|
||||
static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
atomic_clear_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
|
||||
atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1448,7 +1448,7 @@ static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
|
|||
* return immediately. */
|
||||
void exit_sie(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
|
||||
atomic_or(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
|
||||
while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
|
||||
cpu_relax();
|
||||
}
|
||||
|
@ -1672,19 +1672,19 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
|
|||
if (dbg->control & KVM_GUESTDBG_ENABLE) {
|
||||
vcpu->guest_debug = dbg->control;
|
||||
/* enforce guest PER */
|
||||
atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
|
||||
atomic_or(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
|
||||
|
||||
if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
|
||||
rc = kvm_s390_import_bp_data(vcpu, dbg);
|
||||
} else {
|
||||
atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
|
||||
atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
|
||||
vcpu->arch.guestdbg.last_bp = 0;
|
||||
}
|
||||
|
||||
if (rc) {
|
||||
vcpu->guest_debug = 0;
|
||||
kvm_s390_clear_bp_data(vcpu);
|
||||
atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
|
||||
atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
|
||||
}
|
||||
|
||||
return rc;
|
||||
|
@ -1771,7 +1771,7 @@ retry:
|
|||
if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
|
||||
if (!ibs_enabled(vcpu)) {
|
||||
trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
|
||||
atomic_set_mask(CPUSTAT_IBS,
|
||||
atomic_or(CPUSTAT_IBS,
|
||||
&vcpu->arch.sie_block->cpuflags);
|
||||
}
|
||||
goto retry;
|
||||
|
@ -1780,7 +1780,7 @@ retry:
|
|||
if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
|
||||
if (ibs_enabled(vcpu)) {
|
||||
trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
|
||||
atomic_clear_mask(CPUSTAT_IBS,
|
||||
atomic_andnot(CPUSTAT_IBS,
|
||||
&vcpu->arch.sie_block->cpuflags);
|
||||
}
|
||||
goto retry;
|
||||
|
@ -2280,7 +2280,7 @@ void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
|
|||
__disable_ibs_on_all_vcpus(vcpu->kvm);
|
||||
}
|
||||
|
||||
atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
|
||||
atomic_andnot(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
|
||||
/*
|
||||
* Another VCPU might have used IBS while we were offline.
|
||||
* Let's play safe and flush the VCPU at startup.
|
||||
|
@ -2306,7 +2306,7 @@ void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
|
|||
/* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
|
||||
kvm_s390_clear_stop_irq(vcpu);
|
||||
|
||||
atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
|
||||
atomic_or(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
|
||||
__disable_ibs_on_vcpu(vcpu);
|
||||
|
||||
for (i = 0; i < online_vcpus; i++) {
|
||||
|
|
|
@ -48,47 +48,12 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
|
|||
ATOMIC_OPS(add)
|
||||
ATOMIC_OPS(sub)
|
||||
|
||||
ATOMIC_OP(and)
|
||||
ATOMIC_OP(or)
|
||||
ATOMIC_OP(xor)
|
||||
|
||||
#undef ATOMIC_OPS
|
||||
#undef ATOMIC_OP_RETURN
|
||||
#undef ATOMIC_OP
|
||||
|
||||
static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
|
||||
{
|
||||
int tmp;
|
||||
unsigned int _mask = ~mask;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
" .align 2 \n\t"
|
||||
" mova 1f, r0 \n\t" /* r0 = end point */
|
||||
" mov r15, r1 \n\t" /* r1 = saved sp */
|
||||
" mov #-6, r15 \n\t" /* LOGIN: r15 = size */
|
||||
" mov.l @%1, %0 \n\t" /* load old value */
|
||||
" and %2, %0 \n\t" /* add */
|
||||
" mov.l %0, @%1 \n\t" /* store new value */
|
||||
"1: mov r1, r15 \n\t" /* LOGOUT */
|
||||
: "=&r" (tmp),
|
||||
"+r" (v)
|
||||
: "r" (_mask)
|
||||
: "memory" , "r0", "r1");
|
||||
}
|
||||
|
||||
static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
|
||||
{
|
||||
int tmp;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
" .align 2 \n\t"
|
||||
" mova 1f, r0 \n\t" /* r0 = end point */
|
||||
" mov r15, r1 \n\t" /* r1 = saved sp */
|
||||
" mov #-6, r15 \n\t" /* LOGIN: r15 = size */
|
||||
" mov.l @%1, %0 \n\t" /* load old value */
|
||||
" or %2, %0 \n\t" /* or */
|
||||
" mov.l %0, @%1 \n\t" /* store new value */
|
||||
"1: mov r1, r15 \n\t" /* LOGOUT */
|
||||
: "=&r" (tmp),
|
||||
"+r" (v)
|
||||
: "r" (mask)
|
||||
: "memory" , "r0", "r1");
|
||||
}
|
||||
|
||||
#endif /* __ASM_SH_ATOMIC_GRB_H */
|
||||
|
|
|
@ -37,27 +37,12 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
|
|||
|
||||
ATOMIC_OPS(add, +=)
|
||||
ATOMIC_OPS(sub, -=)
|
||||
ATOMIC_OP(and, &=)
|
||||
ATOMIC_OP(or, |=)
|
||||
ATOMIC_OP(xor, ^=)
|
||||
|
||||
#undef ATOMIC_OPS
|
||||
#undef ATOMIC_OP_RETURN
|
||||
#undef ATOMIC_OP
|
||||
|
||||
static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
raw_local_irq_save(flags);
|
||||
v->counter &= ~mask;
|
||||
raw_local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
raw_local_irq_save(flags);
|
||||
v->counter |= mask;
|
||||
raw_local_irq_restore(flags);
|
||||
}
|
||||
|
||||
#endif /* __ASM_SH_ATOMIC_IRQ_H */
|
||||
|
|
|
@ -52,37 +52,12 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
|
|||
|
||||
ATOMIC_OPS(add)
|
||||
ATOMIC_OPS(sub)
|
||||
ATOMIC_OP(and)
|
||||
ATOMIC_OP(or)
|
||||
ATOMIC_OP(xor)
|
||||
|
||||
#undef ATOMIC_OPS
|
||||
#undef ATOMIC_OP_RETURN
|
||||
#undef ATOMIC_OP
|
||||
|
||||
static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
|
||||
{
|
||||
unsigned long tmp;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
"1: movli.l @%2, %0 ! atomic_clear_mask \n"
|
||||
" and %1, %0 \n"
|
||||
" movco.l %0, @%2 \n"
|
||||
" bf 1b \n"
|
||||
: "=&z" (tmp)
|
||||
: "r" (~mask), "r" (&v->counter)
|
||||
: "t");
|
||||
}
|
||||
|
||||
static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
|
||||
{
|
||||
unsigned long tmp;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
"1: movli.l @%2, %0 ! atomic_set_mask \n"
|
||||
" or %1, %0 \n"
|
||||
" movco.l %0, @%2 \n"
|
||||
" bf 1b \n"
|
||||
: "=&z" (tmp)
|
||||
: "r" (mask), "r" (&v->counter)
|
||||
: "t");
|
||||
}
|
||||
|
||||
#endif /* __ASM_SH_ATOMIC_LLSC_H */
|
||||
|
|
|
@ -17,10 +17,12 @@
|
|||
#include <asm/barrier.h>
|
||||
#include <asm-generic/atomic64.h>
|
||||
|
||||
|
||||
#define ATOMIC_INIT(i) { (i) }
|
||||
|
||||
int atomic_add_return(int, atomic_t *);
|
||||
void atomic_and(int, atomic_t *);
|
||||
void atomic_or(int, atomic_t *);
|
||||
void atomic_xor(int, atomic_t *);
|
||||
int atomic_cmpxchg(atomic_t *, int, int);
|
||||
int atomic_xchg(atomic_t *, int);
|
||||
int __atomic_add_unless(atomic_t *, int, int);
|
||||
|
|
|
@ -33,6 +33,10 @@ long atomic64_##op##_return(long, atomic64_t *);
|
|||
ATOMIC_OPS(add)
|
||||
ATOMIC_OPS(sub)
|
||||
|
||||
ATOMIC_OP(and)
|
||||
ATOMIC_OP(or)
|
||||
ATOMIC_OP(xor)
|
||||
|
||||
#undef ATOMIC_OPS
|
||||
#undef ATOMIC_OP_RETURN
|
||||
#undef ATOMIC_OP
|
||||
|
|
|
@ -27,22 +27,38 @@ static DEFINE_SPINLOCK(dummy);
|
|||
|
||||
#endif /* SMP */
|
||||
|
||||
#define ATOMIC_OP(op, cop) \
|
||||
#define ATOMIC_OP_RETURN(op, c_op) \
|
||||
int atomic_##op##_return(int i, atomic_t *v) \
|
||||
{ \
|
||||
int ret; \
|
||||
unsigned long flags; \
|
||||
spin_lock_irqsave(ATOMIC_HASH(v), flags); \
|
||||
\
|
||||
ret = (v->counter cop i); \
|
||||
ret = (v->counter c_op i); \
|
||||
\
|
||||
spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \
|
||||
return ret; \
|
||||
} \
|
||||
EXPORT_SYMBOL(atomic_##op##_return);
|
||||
|
||||
ATOMIC_OP(add, +=)
|
||||
#define ATOMIC_OP(op, c_op) \
|
||||
void atomic_##op(int i, atomic_t *v) \
|
||||
{ \
|
||||
unsigned long flags; \
|
||||
spin_lock_irqsave(ATOMIC_HASH(v), flags); \
|
||||
\
|
||||
v->counter c_op i; \
|
||||
\
|
||||
spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \
|
||||
} \
|
||||
EXPORT_SYMBOL(atomic_##op);
|
||||
|
||||
ATOMIC_OP_RETURN(add, +=)
|
||||
ATOMIC_OP(and, &=)
|
||||
ATOMIC_OP(or, |=)
|
||||
ATOMIC_OP(xor, ^=)
|
||||
|
||||
#undef ATOMIC_OP_RETURN
|
||||
#undef ATOMIC_OP
|
||||
|
||||
int atomic_xchg(atomic_t *v, int new)
|
||||
|
|
|
@ -47,6 +47,9 @@ ENDPROC(atomic_##op##_return);
|
|||
|
||||
ATOMIC_OPS(add)
|
||||
ATOMIC_OPS(sub)
|
||||
ATOMIC_OP(and)
|
||||
ATOMIC_OP(or)
|
||||
ATOMIC_OP(xor)
|
||||
|
||||
#undef ATOMIC_OPS
|
||||
#undef ATOMIC_OP_RETURN
|
||||
|
@ -84,6 +87,9 @@ ENDPROC(atomic64_##op##_return);
|
|||
|
||||
ATOMIC64_OPS(add)
|
||||
ATOMIC64_OPS(sub)
|
||||
ATOMIC64_OP(and)
|
||||
ATOMIC64_OP(or)
|
||||
ATOMIC64_OP(xor)
|
||||
|
||||
#undef ATOMIC64_OPS
|
||||
#undef ATOMIC64_OP_RETURN
|
||||
|
|
|
@ -111,6 +111,9 @@ EXPORT_SYMBOL(atomic64_##op##_return);
|
|||
|
||||
ATOMIC_OPS(add)
|
||||
ATOMIC_OPS(sub)
|
||||
ATOMIC_OP(and)
|
||||
ATOMIC_OP(or)
|
||||
ATOMIC_OP(xor)
|
||||
|
||||
#undef ATOMIC_OPS
|
||||
#undef ATOMIC_OP_RETURN
|
||||
|
|
|
@ -34,6 +34,19 @@ static inline void atomic_add(int i, atomic_t *v)
|
|||
_atomic_xchg_add(&v->counter, i);
|
||||
}
|
||||
|
||||
#define ATOMIC_OP(op) \
|
||||
unsigned long _atomic_##op(volatile unsigned long *p, unsigned long mask); \
|
||||
static inline void atomic_##op(int i, atomic_t *v) \
|
||||
{ \
|
||||
_atomic_##op((unsigned long *)&v->counter, i); \
|
||||
}
|
||||
|
||||
ATOMIC_OP(and)
|
||||
ATOMIC_OP(or)
|
||||
ATOMIC_OP(xor)
|
||||
|
||||
#undef ATOMIC_OP
|
||||
|
||||
/**
|
||||
* atomic_add_return - add integer and return
|
||||
* @v: pointer of type atomic_t
|
||||
|
@ -113,6 +126,17 @@ static inline void atomic64_add(long long i, atomic64_t *v)
|
|||
_atomic64_xchg_add(&v->counter, i);
|
||||
}
|
||||
|
||||
#define ATOMIC64_OP(op) \
|
||||
long long _atomic64_##op(long long *v, long long n); \
|
||||
static inline void atomic64_##op(long long i, atomic64_t *v) \
|
||||
{ \
|
||||
_atomic64_##op(&v->counter, i); \
|
||||
}
|
||||
|
||||
ATOMIC64_OP(and)
|
||||
ATOMIC64_OP(or)
|
||||
ATOMIC64_OP(xor)
|
||||
|
||||
/**
|
||||
* atomic64_add_return - add integer and return
|
||||
* @v: pointer of type atomic64_t
|
||||
|
@ -225,6 +249,7 @@ extern struct __get_user __atomic_xchg_add(volatile int *p, int *lock, int n);
|
|||
extern struct __get_user __atomic_xchg_add_unless(volatile int *p,
|
||||
int *lock, int o, int n);
|
||||
extern struct __get_user __atomic_or(volatile int *p, int *lock, int n);
|
||||
extern struct __get_user __atomic_and(volatile int *p, int *lock, int n);
|
||||
extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n);
|
||||
extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n);
|
||||
extern long long __atomic64_cmpxchg(volatile long long *p, int *lock,
|
||||
|
@ -234,6 +259,9 @@ extern long long __atomic64_xchg_add(volatile long long *p, int *lock,
|
|||
long long n);
|
||||
extern long long __atomic64_xchg_add_unless(volatile long long *p,
|
||||
int *lock, long long o, long long n);
|
||||
extern long long __atomic64_and(volatile long long *p, int *lock, long long n);
|
||||
extern long long __atomic64_or(volatile long long *p, int *lock, long long n);
|
||||
extern long long __atomic64_xor(volatile long long *p, int *lock, long long n);
|
||||
|
||||
/* Return failure from the atomic wrappers. */
|
||||
struct __get_user __atomic_bad_address(int __user *addr);
|
||||
|
|
|
@ -58,6 +58,26 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
|
|||
return oldval;
|
||||
}
|
||||
|
||||
static inline void atomic_and(int i, atomic_t *v)
|
||||
{
|
||||
__insn_fetchand4((void *)&v->counter, i);
|
||||
}
|
||||
|
||||
static inline void atomic_or(int i, atomic_t *v)
|
||||
{
|
||||
__insn_fetchor4((void *)&v->counter, i);
|
||||
}
|
||||
|
||||
static inline void atomic_xor(int i, atomic_t *v)
|
||||
{
|
||||
int guess, oldval = v->counter;
|
||||
do {
|
||||
guess = oldval;
|
||||
__insn_mtspr(SPR_CMPEXCH_VALUE, guess);
|
||||
oldval = __insn_cmpexch4(&v->counter, guess ^ i);
|
||||
} while (guess != oldval);
|
||||
}
|
||||
|
||||
/* Now the true 64-bit operations. */
|
||||
|
||||
#define ATOMIC64_INIT(i) { (i) }
|
||||
|
@ -91,6 +111,26 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
|
|||
return oldval != u;
|
||||
}
|
||||
|
||||
static inline void atomic64_and(long i, atomic64_t *v)
|
||||
{
|
||||
__insn_fetchand((void *)&v->counter, i);
|
||||
}
|
||||
|
||||
static inline void atomic64_or(long i, atomic64_t *v)
|
||||
{
|
||||
__insn_fetchor((void *)&v->counter, i);
|
||||
}
|
||||
|
||||
static inline void atomic64_xor(long i, atomic64_t *v)
|
||||
{
|
||||
long guess, oldval = v->counter;
|
||||
do {
|
||||
guess = oldval;
|
||||
__insn_mtspr(SPR_CMPEXCH_VALUE, guess);
|
||||
oldval = __insn_cmpexch(&v->counter, guess ^ i);
|
||||
} while (guess != oldval);
|
||||
}
|
||||
|
||||
#define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v))
|
||||
#define atomic64_sub(i, v) atomic64_add(-(i), (v))
|
||||
#define atomic64_inc_return(v) atomic64_add_return(1, (v))
|
||||
|
|
|
@ -94,6 +94,12 @@ unsigned long _atomic_or(volatile unsigned long *p, unsigned long mask)
|
|||
}
|
||||
EXPORT_SYMBOL(_atomic_or);
|
||||
|
||||
unsigned long _atomic_and(volatile unsigned long *p, unsigned long mask)
|
||||
{
|
||||
return __atomic_and((int *)p, __atomic_setup(p), mask).val;
|
||||
}
|
||||
EXPORT_SYMBOL(_atomic_and);
|
||||
|
||||
unsigned long _atomic_andn(volatile unsigned long *p, unsigned long mask)
|
||||
{
|
||||
return __atomic_andn((int *)p, __atomic_setup(p), mask).val;
|
||||
|
@ -136,6 +142,23 @@ long long _atomic64_cmpxchg(long long *v, long long o, long long n)
|
|||
}
|
||||
EXPORT_SYMBOL(_atomic64_cmpxchg);
|
||||
|
||||
long long _atomic64_and(long long *v, long long n)
|
||||
{
|
||||
return __atomic64_and(v, __atomic_setup(v), n);
|
||||
}
|
||||
EXPORT_SYMBOL(_atomic64_and);
|
||||
|
||||
long long _atomic64_or(long long *v, long long n)
|
||||
{
|
||||
return __atomic64_or(v, __atomic_setup(v), n);
|
||||
}
|
||||
EXPORT_SYMBOL(_atomic64_or);
|
||||
|
||||
long long _atomic64_xor(long long *v, long long n)
|
||||
{
|
||||
return __atomic64_xor(v, __atomic_setup(v), n);
|
||||
}
|
||||
EXPORT_SYMBOL(_atomic64_xor);
|
||||
|
||||
/*
|
||||
* If any of the atomic or futex routines hit a bad address (not in
|
||||
|
|
|
@ -178,6 +178,7 @@ atomic_op _xchg_add, 32, "add r24, r22, r2"
|
|||
atomic_op _xchg_add_unless, 32, \
|
||||
"sne r26, r22, r2; { bbns r26, 3f; add r24, r22, r3 }"
|
||||
atomic_op _or, 32, "or r24, r22, r2"
|
||||
atomic_op _and, 32, "and r24, r22, r2"
|
||||
atomic_op _andn, 32, "nor r2, r2, zero; and r24, r22, r2"
|
||||
atomic_op _xor, 32, "xor r24, r22, r2"
|
||||
|
||||
|
@ -191,6 +192,9 @@ atomic_op 64_xchg_add_unless, 64, \
|
|||
{ bbns r26, 3f; add r24, r22, r4 }; \
|
||||
{ bbns r27, 3f; add r25, r23, r5 }; \
|
||||
slt_u r26, r24, r22; add r25, r25, r26"
|
||||
atomic_op 64_or, 64, "{ or r24, r22, r2; or r25, r23, r3 }"
|
||||
atomic_op 64_and, 64, "{ and r24, r22, r2; and r25, r23, r3 }"
|
||||
atomic_op 64_xor, 64, "{ xor r24, r22, r2; xor r25, r23, r3 }"
|
||||
|
||||
jrp lr /* happy backtracer */
|
||||
|
||||
|
|
|
@ -182,6 +182,21 @@ static inline int atomic_xchg(atomic_t *v, int new)
|
|||
return xchg(&v->counter, new);
|
||||
}
|
||||
|
||||
#define ATOMIC_OP(op) \
|
||||
static inline void atomic_##op(int i, atomic_t *v) \
|
||||
{ \
|
||||
asm volatile(LOCK_PREFIX #op"l %1,%0" \
|
||||
: "+m" (v->counter) \
|
||||
: "ir" (i) \
|
||||
: "memory"); \
|
||||
}
|
||||
|
||||
ATOMIC_OP(and)
|
||||
ATOMIC_OP(or)
|
||||
ATOMIC_OP(xor)
|
||||
|
||||
#undef ATOMIC_OP
|
||||
|
||||
/**
|
||||
* __atomic_add_unless - add unless the number is already a given value
|
||||
* @v: pointer of type atomic_t
|
||||
|
@ -219,16 +234,6 @@ static __always_inline short int atomic_inc_short(short int *v)
|
|||
return *v;
|
||||
}
|
||||
|
||||
/* These are x86-specific, used by some header files */
|
||||
#define atomic_clear_mask(mask, addr) \
|
||||
asm volatile(LOCK_PREFIX "andl %0,%1" \
|
||||
: : "r" (~(mask)), "m" (*(addr)) : "memory")
|
||||
|
||||
#define atomic_set_mask(mask, addr) \
|
||||
asm volatile(LOCK_PREFIX "orl %0,%1" \
|
||||
: : "r" ((unsigned)(mask)), "m" (*(addr)) \
|
||||
: "memory")
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
# include <asm/atomic64_32.h>
|
||||
#else
|
||||
|
|
|
@ -313,4 +313,18 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
|
|||
#undef alternative_atomic64
|
||||
#undef __alternative_atomic64
|
||||
|
||||
#define ATOMIC64_OP(op, c_op) \
|
||||
static inline void atomic64_##op(long long i, atomic64_t *v) \
|
||||
{ \
|
||||
long long old, c = 0; \
|
||||
while ((old = atomic64_cmpxchg(v, c, c c_op i)) != c) \
|
||||
c = old; \
|
||||
}
|
||||
|
||||
ATOMIC64_OP(and, &)
|
||||
ATOMIC64_OP(or, |)
|
||||
ATOMIC64_OP(xor, ^)
|
||||
|
||||
#undef ATOMIC64_OP
|
||||
|
||||
#endif /* _ASM_X86_ATOMIC64_32_H */
|
||||
|
|
|
@ -220,4 +220,19 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
|
|||
return dec;
|
||||
}
|
||||
|
||||
#define ATOMIC64_OP(op) \
|
||||
static inline void atomic64_##op(long i, atomic64_t *v) \
|
||||
{ \
|
||||
asm volatile(LOCK_PREFIX #op"q %1,%0" \
|
||||
: "+m" (v->counter) \
|
||||
: "er" (i) \
|
||||
: "memory"); \
|
||||
}
|
||||
|
||||
ATOMIC64_OP(and)
|
||||
ATOMIC64_OP(or)
|
||||
ATOMIC64_OP(xor)
|
||||
|
||||
#undef ATOMIC64_OP
|
||||
|
||||
#endif /* _ASM_X86_ATOMIC64_64_H */
|
||||
|
|
|
@ -145,6 +145,10 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \
|
|||
ATOMIC_OPS(add)
|
||||
ATOMIC_OPS(sub)
|
||||
|
||||
ATOMIC_OP(and)
|
||||
ATOMIC_OP(or)
|
||||
ATOMIC_OP(xor)
|
||||
|
||||
#undef ATOMIC_OPS
|
||||
#undef ATOMIC_OP_RETURN
|
||||
#undef ATOMIC_OP
|
||||
|
@ -250,75 +254,6 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
|
|||
return c;
|
||||
}
|
||||
|
||||
|
||||
static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
|
||||
{
|
||||
#if XCHAL_HAVE_S32C1I
|
||||
unsigned long tmp;
|
||||
int result;
|
||||
|
||||
__asm__ __volatile__(
|
||||
"1: l32i %1, %3, 0\n"
|
||||
" wsr %1, scompare1\n"
|
||||
" and %0, %1, %2\n"
|
||||
" s32c1i %0, %3, 0\n"
|
||||
" bne %0, %1, 1b\n"
|
||||
: "=&a" (result), "=&a" (tmp)
|
||||
: "a" (~mask), "a" (v)
|
||||
: "memory"
|
||||
);
|
||||
#else
|
||||
unsigned int all_f = -1;
|
||||
unsigned int vval;
|
||||
|
||||
__asm__ __volatile__(
|
||||
" rsil a15,"__stringify(LOCKLEVEL)"\n"
|
||||
" l32i %0, %2, 0\n"
|
||||
" xor %1, %4, %3\n"
|
||||
" and %0, %0, %4\n"
|
||||
" s32i %0, %2, 0\n"
|
||||
" wsr a15, ps\n"
|
||||
" rsync\n"
|
||||
: "=&a" (vval), "=a" (mask)
|
||||
: "a" (v), "a" (all_f), "1" (mask)
|
||||
: "a15", "memory"
|
||||
);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
|
||||
{
|
||||
#if XCHAL_HAVE_S32C1I
|
||||
unsigned long tmp;
|
||||
int result;
|
||||
|
||||
__asm__ __volatile__(
|
||||
"1: l32i %1, %3, 0\n"
|
||||
" wsr %1, scompare1\n"
|
||||
" or %0, %1, %2\n"
|
||||
" s32c1i %0, %3, 0\n"
|
||||
" bne %0, %1, 1b\n"
|
||||
: "=&a" (result), "=&a" (tmp)
|
||||
: "a" (mask), "a" (v)
|
||||
: "memory"
|
||||
);
|
||||
#else
|
||||
unsigned int vval;
|
||||
|
||||
__asm__ __volatile__(
|
||||
" rsil a15,"__stringify(LOCKLEVEL)"\n"
|
||||
" l32i %0, %2, 0\n"
|
||||
" or %0, %0, %1\n"
|
||||
" s32i %0, %2, 0\n"
|
||||
" wsr a15, ps\n"
|
||||
" rsync\n"
|
||||
: "=&a" (vval)
|
||||
: "a" (mask), "a" (v)
|
||||
: "a15", "memory"
|
||||
);
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif /* _XTENSA_ATOMIC_H */
|
||||
|
|
|
@ -748,7 +748,7 @@ static int i915_drm_resume(struct drm_device *dev)
|
|||
mutex_lock(&dev->struct_mutex);
|
||||
if (i915_gem_init_hw(dev)) {
|
||||
DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
|
||||
atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
|
||||
atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
|
||||
}
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
|
|
|
@ -5091,7 +5091,7 @@ int i915_gem_init(struct drm_device *dev)
|
|||
* for all other failure, such as an allocation failure, bail.
|
||||
*/
|
||||
DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
|
||||
atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
|
||||
atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -2446,7 +2446,7 @@ static void i915_reset_and_wakeup(struct drm_device *dev)
|
|||
kobject_uevent_env(&dev->primary->kdev->kobj,
|
||||
KOBJ_CHANGE, reset_done_event);
|
||||
} else {
|
||||
atomic_set_mask(I915_WEDGED, &error->reset_counter);
|
||||
atomic_or(I915_WEDGED, &error->reset_counter);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2574,7 +2574,7 @@ void i915_handle_error(struct drm_device *dev, bool wedged,
|
|||
i915_report_and_clear_eir(dev);
|
||||
|
||||
if (wedged) {
|
||||
atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
|
||||
atomic_or(I915_RESET_IN_PROGRESS_FLAG,
|
||||
&dev_priv->gpu_error.reset_counter);
|
||||
|
||||
/*
|
||||
|
|
|
@ -529,7 +529,7 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
|
|||
list_add_tail(&port->list, &adapter->port_list);
|
||||
write_unlock_irq(&adapter->port_list_lock);
|
||||
|
||||
atomic_set_mask(status | ZFCP_STATUS_COMMON_RUNNING, &port->status);
|
||||
atomic_or(status | ZFCP_STATUS_COMMON_RUNNING, &port->status);
|
||||
|
||||
return port;
|
||||
|
||||
|
|
|
@ -190,7 +190,7 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
|
|||
if (!(act_status & ZFCP_STATUS_ERP_NO_REF))
|
||||
if (scsi_device_get(sdev))
|
||||
return NULL;
|
||||
atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE,
|
||||
atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE,
|
||||
&zfcp_sdev->status);
|
||||
erp_action = &zfcp_sdev->erp_action;
|
||||
memset(erp_action, 0, sizeof(struct zfcp_erp_action));
|
||||
|
@ -206,7 +206,7 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
|
|||
if (!get_device(&port->dev))
|
||||
return NULL;
|
||||
zfcp_erp_action_dismiss_port(port);
|
||||
atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status);
|
||||
atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status);
|
||||
erp_action = &port->erp_action;
|
||||
memset(erp_action, 0, sizeof(struct zfcp_erp_action));
|
||||
erp_action->port = port;
|
||||
|
@ -217,7 +217,7 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
|
|||
case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
|
||||
kref_get(&adapter->ref);
|
||||
zfcp_erp_action_dismiss_adapter(adapter);
|
||||
atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status);
|
||||
atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status);
|
||||
erp_action = &adapter->erp_action;
|
||||
memset(erp_action, 0, sizeof(struct zfcp_erp_action));
|
||||
if (!(atomic_read(&adapter->status) &
|
||||
|
@ -254,7 +254,7 @@ static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter,
|
|||
act = zfcp_erp_setup_act(need, act_status, adapter, port, sdev);
|
||||
if (!act)
|
||||
goto out;
|
||||
atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status);
|
||||
atomic_or(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status);
|
||||
++adapter->erp_total_count;
|
||||
list_add_tail(&act->list, &adapter->erp_ready_head);
|
||||
wake_up(&adapter->erp_ready_wq);
|
||||
|
@ -486,14 +486,14 @@ static void zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter)
|
|||
{
|
||||
if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status))
|
||||
zfcp_dbf_rec_run("eraubl1", &adapter->erp_action);
|
||||
atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status);
|
||||
atomic_or(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status);
|
||||
}
|
||||
|
||||
static void zfcp_erp_port_unblock(struct zfcp_port *port)
|
||||
{
|
||||
if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status))
|
||||
zfcp_dbf_rec_run("erpubl1", &port->erp_action);
|
||||
atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status);
|
||||
atomic_or(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status);
|
||||
}
|
||||
|
||||
static void zfcp_erp_lun_unblock(struct scsi_device *sdev)
|
||||
|
@ -502,7 +502,7 @@ static void zfcp_erp_lun_unblock(struct scsi_device *sdev)
|
|||
|
||||
if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status))
|
||||
zfcp_dbf_rec_run("erlubl1", &sdev_to_zfcp(sdev)->erp_action);
|
||||
atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status);
|
||||
atomic_or(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status);
|
||||
}
|
||||
|
||||
static void zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action)
|
||||
|
@ -642,7 +642,7 @@ static void zfcp_erp_wakeup(struct zfcp_adapter *adapter)
|
|||
read_lock_irqsave(&adapter->erp_lock, flags);
|
||||
if (list_empty(&adapter->erp_ready_head) &&
|
||||
list_empty(&adapter->erp_running_head)) {
|
||||
atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING,
|
||||
atomic_andnot(ZFCP_STATUS_ADAPTER_ERP_PENDING,
|
||||
&adapter->status);
|
||||
wake_up(&adapter->erp_done_wqh);
|
||||
}
|
||||
|
@ -665,16 +665,16 @@ static int zfcp_erp_adapter_strat_fsf_xconf(struct zfcp_erp_action *erp_action)
|
|||
int sleep = 1;
|
||||
struct zfcp_adapter *adapter = erp_action->adapter;
|
||||
|
||||
atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, &adapter->status);
|
||||
atomic_andnot(ZFCP_STATUS_ADAPTER_XCONFIG_OK, &adapter->status);
|
||||
|
||||
for (retries = 7; retries; retries--) {
|
||||
atomic_clear_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
|
||||
atomic_andnot(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
|
||||
&adapter->status);
|
||||
write_lock_irq(&adapter->erp_lock);
|
||||
zfcp_erp_action_to_running(erp_action);
|
||||
write_unlock_irq(&adapter->erp_lock);
|
||||
if (zfcp_fsf_exchange_config_data(erp_action)) {
|
||||
atomic_clear_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
|
||||
atomic_andnot(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
|
||||
&adapter->status);
|
||||
return ZFCP_ERP_FAILED;
|
||||
}
|
||||
|
@ -692,7 +692,7 @@ static int zfcp_erp_adapter_strat_fsf_xconf(struct zfcp_erp_action *erp_action)
|
|||
sleep *= 2;
|
||||
}
|
||||
|
||||
atomic_clear_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
|
||||
atomic_andnot(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
|
||||
&adapter->status);
|
||||
|
||||
if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_XCONFIG_OK))
|
||||
|
@ -764,7 +764,7 @@ static void zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *act)
|
|||
/* all ports and LUNs are closed */
|
||||
zfcp_erp_clear_adapter_status(adapter, ZFCP_STATUS_COMMON_OPEN);
|
||||
|
||||
atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK |
|
||||
atomic_andnot(ZFCP_STATUS_ADAPTER_XCONFIG_OK |
|
||||
ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
|
||||
}
|
||||
|
||||
|
@ -773,7 +773,7 @@ static int zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *act)
|
|||
struct zfcp_adapter *adapter = act->adapter;
|
||||
|
||||
if (zfcp_qdio_open(adapter->qdio)) {
|
||||
atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK |
|
||||
atomic_andnot(ZFCP_STATUS_ADAPTER_XCONFIG_OK |
|
||||
ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
|
||||
&adapter->status);
|
||||
return ZFCP_ERP_FAILED;
|
||||
|
@ -784,7 +784,7 @@ static int zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *act)
|
|||
return ZFCP_ERP_FAILED;
|
||||
}
|
||||
|
||||
atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &adapter->status);
|
||||
atomic_or(ZFCP_STATUS_COMMON_OPEN, &adapter->status);
|
||||
|
||||
return ZFCP_ERP_SUCCEEDED;
|
||||
}
|
||||
|
@ -948,7 +948,7 @@ static void zfcp_erp_lun_strategy_clearstati(struct scsi_device *sdev)
|
|||
{
|
||||
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
|
||||
|
||||
atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED,
|
||||
atomic_andnot(ZFCP_STATUS_COMMON_ACCESS_DENIED,
|
||||
&zfcp_sdev->status);
|
||||
}
|
||||
|
||||
|
@ -1187,18 +1187,18 @@ static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action)
|
|||
switch (erp_action->action) {
|
||||
case ZFCP_ERP_ACTION_REOPEN_LUN:
|
||||
zfcp_sdev = sdev_to_zfcp(erp_action->sdev);
|
||||
atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE,
|
||||
atomic_andnot(ZFCP_STATUS_COMMON_ERP_INUSE,
|
||||
&zfcp_sdev->status);
|
||||
break;
|
||||
|
||||
case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
|
||||
case ZFCP_ERP_ACTION_REOPEN_PORT:
|
||||
atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE,
|
||||
atomic_andnot(ZFCP_STATUS_COMMON_ERP_INUSE,
|
||||
&erp_action->port->status);
|
||||
break;
|
||||
|
||||
case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
|
||||
atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE,
|
||||
atomic_andnot(ZFCP_STATUS_COMMON_ERP_INUSE,
|
||||
&erp_action->adapter->status);
|
||||
break;
|
||||
}
|
||||
|
@ -1422,19 +1422,19 @@ void zfcp_erp_set_adapter_status(struct zfcp_adapter *adapter, u32 mask)
|
|||
unsigned long flags;
|
||||
u32 common_mask = mask & ZFCP_COMMON_FLAGS;
|
||||
|
||||
atomic_set_mask(mask, &adapter->status);
|
||||
atomic_or(mask, &adapter->status);
|
||||
|
||||
if (!common_mask)
|
||||
return;
|
||||
|
||||
read_lock_irqsave(&adapter->port_list_lock, flags);
|
||||
list_for_each_entry(port, &adapter->port_list, list)
|
||||
atomic_set_mask(common_mask, &port->status);
|
||||
atomic_or(common_mask, &port->status);
|
||||
read_unlock_irqrestore(&adapter->port_list_lock, flags);
|
||||
|
||||
spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
|
||||
__shost_for_each_device(sdev, adapter->scsi_host)
|
||||
atomic_set_mask(common_mask, &sdev_to_zfcp(sdev)->status);
|
||||
atomic_or(common_mask, &sdev_to_zfcp(sdev)->status);
|
||||
spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags);
|
||||
}
|
||||
|
||||
|
@ -1453,7 +1453,7 @@ void zfcp_erp_clear_adapter_status(struct zfcp_adapter *adapter, u32 mask)
|
|||
u32 common_mask = mask & ZFCP_COMMON_FLAGS;
|
||||
u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED;
|
||||
|
||||
atomic_clear_mask(mask, &adapter->status);
|
||||
atomic_andnot(mask, &adapter->status);
|
||||
|
||||
if (!common_mask)
|
||||
return;
|
||||
|
@ -1463,7 +1463,7 @@ void zfcp_erp_clear_adapter_status(struct zfcp_adapter *adapter, u32 mask)
|
|||
|
||||
read_lock_irqsave(&adapter->port_list_lock, flags);
|
||||
list_for_each_entry(port, &adapter->port_list, list) {
|
||||
atomic_clear_mask(common_mask, &port->status);
|
||||
atomic_andnot(common_mask, &port->status);
|
||||
if (clear_counter)
|
||||
atomic_set(&port->erp_counter, 0);
|
||||
}
|
||||
|
@ -1471,7 +1471,7 @@ void zfcp_erp_clear_adapter_status(struct zfcp_adapter *adapter, u32 mask)
|
|||
|
||||
spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
|
||||
__shost_for_each_device(sdev, adapter->scsi_host) {
|
||||
atomic_clear_mask(common_mask, &sdev_to_zfcp(sdev)->status);
|
||||
atomic_andnot(common_mask, &sdev_to_zfcp(sdev)->status);
|
||||
if (clear_counter)
|
||||
atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0);
|
||||
}
|
||||
|
@ -1491,7 +1491,7 @@ void zfcp_erp_set_port_status(struct zfcp_port *port, u32 mask)
|
|||
u32 common_mask = mask & ZFCP_COMMON_FLAGS;
|
||||
unsigned long flags;
|
||||
|
||||
atomic_set_mask(mask, &port->status);
|
||||
atomic_or(mask, &port->status);
|
||||
|
||||
if (!common_mask)
|
||||
return;
|
||||
|
@ -1499,7 +1499,7 @@ void zfcp_erp_set_port_status(struct zfcp_port *port, u32 mask)
|
|||
spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags);
|
||||
__shost_for_each_device(sdev, port->adapter->scsi_host)
|
||||
if (sdev_to_zfcp(sdev)->port == port)
|
||||
atomic_set_mask(common_mask,
|
||||
atomic_or(common_mask,
|
||||
&sdev_to_zfcp(sdev)->status);
|
||||
spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags);
|
||||
}
|
||||
|
@ -1518,7 +1518,7 @@ void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask)
|
|||
u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED;
|
||||
unsigned long flags;
|
||||
|
||||
atomic_clear_mask(mask, &port->status);
|
||||
atomic_andnot(mask, &port->status);
|
||||
|
||||
if (!common_mask)
|
||||
return;
|
||||
|
@ -1529,7 +1529,7 @@ void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask)
|
|||
spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags);
|
||||
__shost_for_each_device(sdev, port->adapter->scsi_host)
|
||||
if (sdev_to_zfcp(sdev)->port == port) {
|
||||
atomic_clear_mask(common_mask,
|
||||
atomic_andnot(common_mask,
|
||||
&sdev_to_zfcp(sdev)->status);
|
||||
if (clear_counter)
|
||||
atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0);
|
||||
|
@ -1546,7 +1546,7 @@ void zfcp_erp_set_lun_status(struct scsi_device *sdev, u32 mask)
|
|||
{
|
||||
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
|
||||
|
||||
atomic_set_mask(mask, &zfcp_sdev->status);
|
||||
atomic_or(mask, &zfcp_sdev->status);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1558,7 +1558,7 @@ void zfcp_erp_clear_lun_status(struct scsi_device *sdev, u32 mask)
|
|||
{
|
||||
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
|
||||
|
||||
atomic_clear_mask(mask, &zfcp_sdev->status);
|
||||
atomic_andnot(mask, &zfcp_sdev->status);
|
||||
|
||||
if (mask & ZFCP_STATUS_COMMON_ERP_FAILED)
|
||||
atomic_set(&zfcp_sdev->erp_counter, 0);
|
||||
|
|
|
@ -508,7 +508,7 @@ static void zfcp_fc_adisc_handler(void *data)
|
|||
/* port is good, unblock rport without going through erp */
|
||||
zfcp_scsi_schedule_rport_register(port);
|
||||
out:
|
||||
atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
|
||||
atomic_andnot(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
|
||||
put_device(&port->dev);
|
||||
kmem_cache_free(zfcp_fc_req_cache, fc_req);
|
||||
}
|
||||
|
@ -564,14 +564,14 @@ void zfcp_fc_link_test_work(struct work_struct *work)
|
|||
if (atomic_read(&port->status) & ZFCP_STATUS_PORT_LINK_TEST)
|
||||
goto out;
|
||||
|
||||
atomic_set_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
|
||||
atomic_or(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
|
||||
|
||||
retval = zfcp_fc_adisc(port);
|
||||
if (retval == 0)
|
||||
return;
|
||||
|
||||
/* send of ADISC was not possible */
|
||||
atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
|
||||
atomic_andnot(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
|
||||
zfcp_erp_port_forced_reopen(port, 0, "fcltwk1");
|
||||
|
||||
out:
|
||||
|
@ -640,7 +640,7 @@ static void zfcp_fc_validate_port(struct zfcp_port *port, struct list_head *lh)
|
|||
if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_NOESC))
|
||||
return;
|
||||
|
||||
atomic_clear_mask(ZFCP_STATUS_COMMON_NOESC, &port->status);
|
||||
atomic_andnot(ZFCP_STATUS_COMMON_NOESC, &port->status);
|
||||
|
||||
if ((port->supported_classes != 0) ||
|
||||
!list_empty(&port->unit_list))
|
||||
|
|
|
@ -114,7 +114,7 @@ static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req,
|
|||
if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED)
|
||||
return;
|
||||
|
||||
atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
|
||||
atomic_or(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
|
||||
|
||||
zfcp_scsi_schedule_rports_block(adapter);
|
||||
|
||||
|
@ -345,7 +345,7 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
|
|||
zfcp_erp_adapter_shutdown(adapter, 0, "fspse_3");
|
||||
break;
|
||||
case FSF_PROT_HOST_CONNECTION_INITIALIZING:
|
||||
atomic_set_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
|
||||
atomic_or(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
|
||||
&adapter->status);
|
||||
break;
|
||||
case FSF_PROT_DUPLICATE_REQUEST_ID:
|
||||
|
@ -554,7 +554,7 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
|
|||
zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh1");
|
||||
return;
|
||||
}
|
||||
atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
|
||||
atomic_or(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
|
||||
&adapter->status);
|
||||
break;
|
||||
case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
|
||||
|
@ -567,7 +567,7 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
|
|||
|
||||
/* avoids adapter shutdown to be able to recognize
|
||||
* events such as LINK UP */
|
||||
atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
|
||||
atomic_or(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
|
||||
&adapter->status);
|
||||
zfcp_fsf_link_down_info_eval(req,
|
||||
&qtcb->header.fsf_status_qual.link_down_info);
|
||||
|
@ -1394,9 +1394,9 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
|
|||
break;
|
||||
case FSF_GOOD:
|
||||
port->handle = header->port_handle;
|
||||
atomic_set_mask(ZFCP_STATUS_COMMON_OPEN |
|
||||
atomic_or(ZFCP_STATUS_COMMON_OPEN |
|
||||
ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
|
||||
atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_BOXED,
|
||||
atomic_andnot(ZFCP_STATUS_COMMON_ACCESS_BOXED,
|
||||
&port->status);
|
||||
/* check whether D_ID has changed during open */
|
||||
/*
|
||||
|
@ -1677,10 +1677,10 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
|
|||
case FSF_PORT_BOXED:
|
||||
/* can't use generic zfcp_erp_modify_port_status because
|
||||
* ZFCP_STATUS_COMMON_OPEN must not be reset for the port */
|
||||
atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
|
||||
atomic_andnot(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
|
||||
shost_for_each_device(sdev, port->adapter->scsi_host)
|
||||
if (sdev_to_zfcp(sdev)->port == port)
|
||||
atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
|
||||
atomic_andnot(ZFCP_STATUS_COMMON_OPEN,
|
||||
&sdev_to_zfcp(sdev)->status);
|
||||
zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ACCESS_BOXED);
|
||||
zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
|
||||
|
@ -1700,10 +1700,10 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
|
|||
/* can't use generic zfcp_erp_modify_port_status because
|
||||
* ZFCP_STATUS_COMMON_OPEN must not be reset for the port
|
||||
*/
|
||||
atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
|
||||
atomic_andnot(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
|
||||
shost_for_each_device(sdev, port->adapter->scsi_host)
|
||||
if (sdev_to_zfcp(sdev)->port == port)
|
||||
atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
|
||||
atomic_andnot(ZFCP_STATUS_COMMON_OPEN,
|
||||
&sdev_to_zfcp(sdev)->status);
|
||||
break;
|
||||
}
|
||||
|
@ -1766,7 +1766,7 @@ static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req)
|
|||
|
||||
zfcp_sdev = sdev_to_zfcp(sdev);
|
||||
|
||||
atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
|
||||
atomic_andnot(ZFCP_STATUS_COMMON_ACCESS_DENIED |
|
||||
ZFCP_STATUS_COMMON_ACCESS_BOXED,
|
||||
&zfcp_sdev->status);
|
||||
|
||||
|
@ -1822,7 +1822,7 @@ static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req)
|
|||
|
||||
case FSF_GOOD:
|
||||
zfcp_sdev->lun_handle = header->lun_handle;
|
||||
atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status);
|
||||
atomic_or(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -1913,7 +1913,7 @@ static void zfcp_fsf_close_lun_handler(struct zfcp_fsf_req *req)
|
|||
}
|
||||
break;
|
||||
case FSF_GOOD:
|
||||
atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status);
|
||||
atomic_andnot(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -349,7 +349,7 @@ void zfcp_qdio_close(struct zfcp_qdio *qdio)
|
|||
|
||||
/* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */
|
||||
spin_lock_irq(&qdio->req_q_lock);
|
||||
atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
|
||||
atomic_andnot(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
|
||||
spin_unlock_irq(&qdio->req_q_lock);
|
||||
|
||||
wake_up(&qdio->req_q_wq);
|
||||
|
@ -384,7 +384,7 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio)
|
|||
if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)
|
||||
return -EIO;
|
||||
|
||||
atomic_clear_mask(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
|
||||
atomic_andnot(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
|
||||
&qdio->adapter->status);
|
||||
|
||||
zfcp_qdio_setup_init_data(&init_data, qdio);
|
||||
|
@ -396,14 +396,14 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio)
|
|||
goto failed_qdio;
|
||||
|
||||
if (ssqd.qdioac2 & CHSC_AC2_DATA_DIV_ENABLED)
|
||||
atomic_set_mask(ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED,
|
||||
atomic_or(ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED,
|
||||
&qdio->adapter->status);
|
||||
|
||||
if (ssqd.qdioac2 & CHSC_AC2_MULTI_BUFFER_ENABLED) {
|
||||
atomic_set_mask(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status);
|
||||
atomic_or(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status);
|
||||
qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER;
|
||||
} else {
|
||||
atomic_clear_mask(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status);
|
||||
atomic_andnot(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status);
|
||||
qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER - 1;
|
||||
}
|
||||
|
||||
|
@ -427,7 +427,7 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio)
|
|||
/* set index of first available SBALS / number of available SBALS */
|
||||
qdio->req_q_idx = 0;
|
||||
atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q);
|
||||
atomic_set_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status);
|
||||
atomic_or(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status);
|
||||
|
||||
if (adapter->scsi_host) {
|
||||
adapter->scsi_host->sg_tablesize = qdio->max_sbale_per_req;
|
||||
|
@ -499,6 +499,6 @@ void zfcp_qdio_siosl(struct zfcp_adapter *adapter)
|
|||
|
||||
rc = ccw_device_siosl(adapter->ccw_device);
|
||||
if (!rc)
|
||||
atomic_set_mask(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
|
||||
atomic_or(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
|
||||
&adapter->status);
|
||||
}
|
||||
|
|
|
@ -98,15 +98,16 @@ ATOMIC_OP_RETURN(add, +)
|
|||
ATOMIC_OP_RETURN(sub, -)
|
||||
#endif
|
||||
|
||||
#ifndef atomic_clear_mask
|
||||
#ifndef atomic_and
|
||||
ATOMIC_OP(and, &)
|
||||
#define atomic_clear_mask(i, v) atomic_and(~(i), (v))
|
||||
#endif
|
||||
|
||||
#ifndef atomic_set_mask
|
||||
#define CONFIG_ARCH_HAS_ATOMIC_OR
|
||||
#ifndef atomic_or
|
||||
ATOMIC_OP(or, |)
|
||||
#define atomic_set_mask(i, v) atomic_or((i), (v))
|
||||
#endif
|
||||
|
||||
#ifndef atomic_xor
|
||||
ATOMIC_OP(xor, ^)
|
||||
#endif
|
||||
|
||||
#undef ATOMIC_OP_RETURN
|
||||
|
|
|
@ -32,6 +32,10 @@ extern long long atomic64_##op##_return(long long a, atomic64_t *v);
|
|||
ATOMIC64_OPS(add)
|
||||
ATOMIC64_OPS(sub)
|
||||
|
||||
ATOMIC64_OP(and)
|
||||
ATOMIC64_OP(or)
|
||||
ATOMIC64_OP(xor)
|
||||
|
||||
#undef ATOMIC64_OPS
|
||||
#undef ATOMIC64_OP_RETURN
|
||||
#undef ATOMIC64_OP
|
||||
|
|
|
@ -28,6 +28,23 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
|
|||
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
|
||||
#endif
|
||||
|
||||
#ifndef atomic_andnot
|
||||
static inline void atomic_andnot(int i, atomic_t *v)
|
||||
{
|
||||
atomic_and(~i, v);
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v)
|
||||
{
|
||||
atomic_andnot(mask, v);
|
||||
}
|
||||
|
||||
static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v)
|
||||
{
|
||||
atomic_or(mask, v);
|
||||
}
|
||||
|
||||
/**
|
||||
* atomic_inc_not_zero_hint - increment if not null
|
||||
* @v: pointer of type atomic_t
|
||||
|
@ -111,21 +128,16 @@ static inline int atomic_dec_if_positive(atomic_t *v)
|
|||
}
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_ARCH_HAS_ATOMIC_OR
|
||||
static inline void atomic_or(int i, atomic_t *v)
|
||||
{
|
||||
int old;
|
||||
int new;
|
||||
|
||||
do {
|
||||
old = atomic_read(v);
|
||||
new = old | i;
|
||||
} while (atomic_cmpxchg(v, old, new) != old);
|
||||
}
|
||||
#endif /* #ifndef CONFIG_ARCH_HAS_ATOMIC_OR */
|
||||
|
||||
#include <asm-generic/atomic-long.h>
|
||||
#ifdef CONFIG_GENERIC_ATOMIC64
|
||||
#include <asm-generic/atomic64.h>
|
||||
#endif
|
||||
|
||||
#ifndef atomic64_andnot
|
||||
static inline void atomic64_andnot(long long i, atomic64_t *v)
|
||||
{
|
||||
atomic64_and(~i, v);
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _LINUX_ATOMIC_H */
|
||||
|
|
|
@ -102,6 +102,9 @@ EXPORT_SYMBOL(atomic64_##op##_return);
|
|||
|
||||
ATOMIC64_OPS(add, +=)
|
||||
ATOMIC64_OPS(sub, -=)
|
||||
ATOMIC64_OP(and, &=)
|
||||
ATOMIC64_OP(or, |=)
|
||||
ATOMIC64_OP(xor, ^=)
|
||||
|
||||
#undef ATOMIC64_OPS
|
||||
#undef ATOMIC64_OP_RETURN
|
||||
|
|
|
@ -16,8 +16,39 @@
|
|||
#include <linux/kernel.h>
|
||||
#include <linux/atomic.h>
|
||||
|
||||
#define TEST(bit, op, c_op, val) \
|
||||
do { \
|
||||
atomic##bit##_set(&v, v0); \
|
||||
r = v0; \
|
||||
atomic##bit##_##op(val, &v); \
|
||||
r c_op val; \
|
||||
WARN(atomic##bit##_read(&v) != r, "%Lx != %Lx\n", \
|
||||
(unsigned long long)atomic##bit##_read(&v), \
|
||||
(unsigned long long)r); \
|
||||
} while (0)
|
||||
|
||||
static __init void test_atomic(void)
|
||||
{
|
||||
int v0 = 0xaaa31337;
|
||||
int v1 = 0xdeadbeef;
|
||||
int onestwos = 0x11112222;
|
||||
int one = 1;
|
||||
|
||||
atomic_t v;
|
||||
int r;
|
||||
|
||||
TEST(, add, +=, onestwos);
|
||||
TEST(, add, +=, -one);
|
||||
TEST(, sub, -=, onestwos);
|
||||
TEST(, sub, -=, -one);
|
||||
TEST(, or, |=, v1);
|
||||
TEST(, and, &=, v1);
|
||||
TEST(, xor, ^=, v1);
|
||||
TEST(, andnot, &= ~, v1);
|
||||
}
|
||||
|
||||
#define INIT(c) do { atomic64_set(&v, c); r = c; } while (0)
|
||||
static __init int test_atomic64(void)
|
||||
static __init void test_atomic64(void)
|
||||
{
|
||||
long long v0 = 0xaaa31337c001d00dLL;
|
||||
long long v1 = 0xdeadbeefdeafcafeLL;
|
||||
|
@ -34,15 +65,14 @@ static __init int test_atomic64(void)
|
|||
BUG_ON(v.counter != r);
|
||||
BUG_ON(atomic64_read(&v) != r);
|
||||
|
||||
INIT(v0);
|
||||
atomic64_add(onestwos, &v);
|
||||
r += onestwos;
|
||||
BUG_ON(v.counter != r);
|
||||
|
||||
INIT(v0);
|
||||
atomic64_add(-one, &v);
|
||||
r += -one;
|
||||
BUG_ON(v.counter != r);
|
||||
TEST(64, add, +=, onestwos);
|
||||
TEST(64, add, +=, -one);
|
||||
TEST(64, sub, -=, onestwos);
|
||||
TEST(64, sub, -=, -one);
|
||||
TEST(64, or, |=, v1);
|
||||
TEST(64, and, &=, v1);
|
||||
TEST(64, xor, ^=, v1);
|
||||
TEST(64, andnot, &= ~, v1);
|
||||
|
||||
INIT(v0);
|
||||
r += onestwos;
|
||||
|
@ -54,16 +84,6 @@ static __init int test_atomic64(void)
|
|||
BUG_ON(atomic64_add_return(-one, &v) != r);
|
||||
BUG_ON(v.counter != r);
|
||||
|
||||
INIT(v0);
|
||||
atomic64_sub(onestwos, &v);
|
||||
r -= onestwos;
|
||||
BUG_ON(v.counter != r);
|
||||
|
||||
INIT(v0);
|
||||
atomic64_sub(-one, &v);
|
||||
r -= -one;
|
||||
BUG_ON(v.counter != r);
|
||||
|
||||
INIT(v0);
|
||||
r -= onestwos;
|
||||
BUG_ON(atomic64_sub_return(onestwos, &v) != r);
|
||||
|
@ -147,6 +167,12 @@ static __init int test_atomic64(void)
|
|||
BUG_ON(!atomic64_inc_not_zero(&v));
|
||||
r += one;
|
||||
BUG_ON(v.counter != r);
|
||||
}
|
||||
|
||||
static __init int test_atomics(void)
|
||||
{
|
||||
test_atomic();
|
||||
test_atomic64();
|
||||
|
||||
#ifdef CONFIG_X86
|
||||
pr_info("passed for %s platform %s CX8 and %s SSE\n",
|
||||
|
@ -166,4 +192,4 @@ static __init int test_atomic64(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
core_initcall(test_atomic64);
|
||||
core_initcall(test_atomics);
|
||||
|
|
Загрузка…
Ссылка в новой задаче