percpu: Remove irqsafe_cpu_xxx variants
We simply say that regular this_cpu use must be safe regardless of preemption and interrupt state. That has no material change for x86 and s390 implementations of this_cpu operations. However, arches that do not provide their own implementation for this_cpu operations will now get code generated that disables interrupts instead of preemption. -tj: This is part of on-going percpu API cleanup. For detailed discussion of the subject, please refer to the following thread. http://thread.gmane.org/gmane.linux.kernel/1222078 Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Tejun Heo <tj@kernel.org> LKML-Reference: <alpine.DEB.2.00.1112221154380.11787@router.home>
This commit is contained in:
Родитель
ecefc36b41
Коммит
933393f58f
|
@ -19,7 +19,7 @@
|
||||||
#define ARCH_NEEDS_WEAK_PER_CPU
|
#define ARCH_NEEDS_WEAK_PER_CPU
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define arch_irqsafe_cpu_to_op(pcp, val, op) \
|
#define arch_this_cpu_to_op(pcp, val, op) \
|
||||||
do { \
|
do { \
|
||||||
typedef typeof(pcp) pcp_op_T__; \
|
typedef typeof(pcp) pcp_op_T__; \
|
||||||
pcp_op_T__ old__, new__, prev__; \
|
pcp_op_T__ old__, new__, prev__; \
|
||||||
|
@ -41,27 +41,27 @@ do { \
|
||||||
preempt_enable(); \
|
preempt_enable(); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#define irqsafe_cpu_add_1(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, +)
|
#define this_cpu_add_1(pcp, val) arch_this_cpu_to_op(pcp, val, +)
|
||||||
#define irqsafe_cpu_add_2(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, +)
|
#define this_cpu_add_2(pcp, val) arch_this_cpu_to_op(pcp, val, +)
|
||||||
#define irqsafe_cpu_add_4(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, +)
|
#define this_cpu_add_4(pcp, val) arch_this_cpu_to_op(pcp, val, +)
|
||||||
#define irqsafe_cpu_add_8(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, +)
|
#define this_cpu_add_8(pcp, val) arch_this_cpu_to_op(pcp, val, +)
|
||||||
|
|
||||||
#define irqsafe_cpu_and_1(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, &)
|
#define this_cpu_and_1(pcp, val) arch_this_cpu_to_op(pcp, val, &)
|
||||||
#define irqsafe_cpu_and_2(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, &)
|
#define this_cpu_and_2(pcp, val) arch_this_cpu_to_op(pcp, val, &)
|
||||||
#define irqsafe_cpu_and_4(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, &)
|
#define this_cpu_and_4(pcp, val) arch_this_cpu_to_op(pcp, val, &)
|
||||||
#define irqsafe_cpu_and_8(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, &)
|
#define this_cpu_and_8(pcp, val) arch_this_cpu_to_op(pcp, val, &)
|
||||||
|
|
||||||
#define irqsafe_cpu_or_1(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, |)
|
#define this_cpu_or_1(pcp, val) arch_this_cpu_to_op(pcp, val, |)
|
||||||
#define irqsafe_cpu_or_2(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, |)
|
#define this_cpu_or_2(pcp, val) arch_this_cpu_to_op(pcp, val, |)
|
||||||
#define irqsafe_cpu_or_4(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, |)
|
#define this_cpu_or_4(pcp, val) arch_this_cpu_to_op(pcp, val, |)
|
||||||
#define irqsafe_cpu_or_8(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, |)
|
#define this_cpu_or_8(pcp, val) arch_this_cpu_to_op(pcp, val, |)
|
||||||
|
|
||||||
#define irqsafe_cpu_xor_1(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, ^)
|
#define this_cpu_xor_1(pcp, val) arch_this_cpu_to_op(pcp, val, ^)
|
||||||
#define irqsafe_cpu_xor_2(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, ^)
|
#define this_cpu_xor_2(pcp, val) arch_this_cpu_to_op(pcp, val, ^)
|
||||||
#define irqsafe_cpu_xor_4(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, ^)
|
#define this_cpu_xor_4(pcp, val) arch_this_cpu_to_op(pcp, val, ^)
|
||||||
#define irqsafe_cpu_xor_8(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, ^)
|
#define this_cpu_xor_8(pcp, val) arch_this_cpu_to_op(pcp, val, ^)
|
||||||
|
|
||||||
#define arch_irqsafe_cpu_cmpxchg(pcp, oval, nval) \
|
#define arch_this_cpu_cmpxchg(pcp, oval, nval) \
|
||||||
({ \
|
({ \
|
||||||
typedef typeof(pcp) pcp_op_T__; \
|
typedef typeof(pcp) pcp_op_T__; \
|
||||||
pcp_op_T__ ret__; \
|
pcp_op_T__ ret__; \
|
||||||
|
@ -79,10 +79,10 @@ do { \
|
||||||
ret__; \
|
ret__; \
|
||||||
})
|
})
|
||||||
|
|
||||||
#define irqsafe_cpu_cmpxchg_1(pcp, oval, nval) arch_irqsafe_cpu_cmpxchg(pcp, oval, nval)
|
#define this_cpu_cmpxchg_1(pcp, oval, nval) arch_this_cpu_cmpxchg(pcp, oval, nval)
|
||||||
#define irqsafe_cpu_cmpxchg_2(pcp, oval, nval) arch_irqsafe_cpu_cmpxchg(pcp, oval, nval)
|
#define this_cpu_cmpxchg_2(pcp, oval, nval) arch_this_cpu_cmpxchg(pcp, oval, nval)
|
||||||
#define irqsafe_cpu_cmpxchg_4(pcp, oval, nval) arch_irqsafe_cpu_cmpxchg(pcp, oval, nval)
|
#define this_cpu_cmpxchg_4(pcp, oval, nval) arch_this_cpu_cmpxchg(pcp, oval, nval)
|
||||||
#define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) arch_irqsafe_cpu_cmpxchg(pcp, oval, nval)
|
#define this_cpu_cmpxchg_8(pcp, oval, nval) arch_this_cpu_cmpxchg(pcp, oval, nval)
|
||||||
|
|
||||||
#include <asm-generic/percpu.h>
|
#include <asm-generic/percpu.h>
|
||||||
|
|
||||||
|
|
|
@ -414,22 +414,6 @@ do { \
|
||||||
#define this_cpu_xchg_2(pcp, nval) percpu_xchg_op(pcp, nval)
|
#define this_cpu_xchg_2(pcp, nval) percpu_xchg_op(pcp, nval)
|
||||||
#define this_cpu_xchg_4(pcp, nval) percpu_xchg_op(pcp, nval)
|
#define this_cpu_xchg_4(pcp, nval) percpu_xchg_op(pcp, nval)
|
||||||
|
|
||||||
#define irqsafe_cpu_add_1(pcp, val) percpu_add_op((pcp), val)
|
|
||||||
#define irqsafe_cpu_add_2(pcp, val) percpu_add_op((pcp), val)
|
|
||||||
#define irqsafe_cpu_add_4(pcp, val) percpu_add_op((pcp), val)
|
|
||||||
#define irqsafe_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
|
|
||||||
#define irqsafe_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
|
|
||||||
#define irqsafe_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
|
|
||||||
#define irqsafe_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val)
|
|
||||||
#define irqsafe_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val)
|
|
||||||
#define irqsafe_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val)
|
|
||||||
#define irqsafe_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val)
|
|
||||||
#define irqsafe_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
|
|
||||||
#define irqsafe_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
|
|
||||||
#define irqsafe_cpu_xchg_1(pcp, nval) percpu_xchg_op(pcp, nval)
|
|
||||||
#define irqsafe_cpu_xchg_2(pcp, nval) percpu_xchg_op(pcp, nval)
|
|
||||||
#define irqsafe_cpu_xchg_4(pcp, nval) percpu_xchg_op(pcp, nval)
|
|
||||||
|
|
||||||
#ifndef CONFIG_M386
|
#ifndef CONFIG_M386
|
||||||
#define __this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val)
|
#define __this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val)
|
||||||
#define __this_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val)
|
#define __this_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val)
|
||||||
|
@ -445,9 +429,6 @@ do { \
|
||||||
#define this_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
|
#define this_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
|
||||||
#define this_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
|
#define this_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
|
||||||
|
|
||||||
#define irqsafe_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
|
|
||||||
#define irqsafe_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
|
|
||||||
#define irqsafe_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
|
|
||||||
#endif /* !CONFIG_M386 */
|
#endif /* !CONFIG_M386 */
|
||||||
|
|
||||||
#ifdef CONFIG_X86_CMPXCHG64
|
#ifdef CONFIG_X86_CMPXCHG64
|
||||||
|
@ -467,7 +448,6 @@ do { \
|
||||||
|
|
||||||
#define __this_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2)
|
#define __this_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2)
|
||||||
#define this_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2)
|
#define this_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2)
|
||||||
#define irqsafe_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2)
|
|
||||||
#endif /* CONFIG_X86_CMPXCHG64 */
|
#endif /* CONFIG_X86_CMPXCHG64 */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -495,13 +475,6 @@ do { \
|
||||||
#define this_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
|
#define this_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
|
||||||
#define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
|
#define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
|
||||||
|
|
||||||
#define irqsafe_cpu_add_8(pcp, val) percpu_add_op((pcp), val)
|
|
||||||
#define irqsafe_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
|
|
||||||
#define irqsafe_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
|
|
||||||
#define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
|
|
||||||
#define irqsafe_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
|
|
||||||
#define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Pretty complex macro to generate cmpxchg16 instruction. The instruction
|
* Pretty complex macro to generate cmpxchg16 instruction. The instruction
|
||||||
* is not supported on early AMD64 processors so we must be able to emulate
|
* is not supported on early AMD64 processors so we must be able to emulate
|
||||||
|
@ -532,7 +505,6 @@ do { \
|
||||||
|
|
||||||
#define __this_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2)
|
#define __this_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2)
|
||||||
#define this_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2)
|
#define this_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2)
|
||||||
#define irqsafe_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2)
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -2115,7 +2115,7 @@ extern void netdev_run_todo(void);
|
||||||
*/
|
*/
|
||||||
static inline void dev_put(struct net_device *dev)
|
static inline void dev_put(struct net_device *dev)
|
||||||
{
|
{
|
||||||
irqsafe_cpu_dec(*dev->pcpu_refcnt);
|
this_cpu_dec(*dev->pcpu_refcnt);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -2126,7 +2126,7 @@ static inline void dev_put(struct net_device *dev)
|
||||||
*/
|
*/
|
||||||
static inline void dev_hold(struct net_device *dev)
|
static inline void dev_hold(struct net_device *dev)
|
||||||
{
|
{
|
||||||
irqsafe_cpu_inc(*dev->pcpu_refcnt);
|
this_cpu_inc(*dev->pcpu_refcnt);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Carrier loss detection, dial on demand. The functions netif_carrier_on
|
/* Carrier loss detection, dial on demand. The functions netif_carrier_on
|
||||||
|
|
|
@ -471,7 +471,7 @@ DECLARE_PER_CPU(seqcount_t, xt_recseq);
|
||||||
*
|
*
|
||||||
* Begin packet processing : all readers must wait the end
|
* Begin packet processing : all readers must wait the end
|
||||||
* 1) Must be called with preemption disabled
|
* 1) Must be called with preemption disabled
|
||||||
* 2) softirqs must be disabled too (or we should use irqsafe_cpu_add())
|
* 2) softirqs must be disabled too (or we should use this_cpu_add())
|
||||||
* Returns :
|
* Returns :
|
||||||
* 1 if no recursion on this cpu
|
* 1 if no recursion on this cpu
|
||||||
* 0 if recursion detected
|
* 0 if recursion detected
|
||||||
|
@ -503,7 +503,7 @@ static inline unsigned int xt_write_recseq_begin(void)
|
||||||
*
|
*
|
||||||
* End packet processing : all readers can proceed
|
* End packet processing : all readers can proceed
|
||||||
* 1) Must be called with preemption disabled
|
* 1) Must be called with preemption disabled
|
||||||
* 2) softirqs must be disabled too (or we should use irqsafe_cpu_add())
|
* 2) softirqs must be disabled too (or we should use this_cpu_add())
|
||||||
*/
|
*/
|
||||||
static inline void xt_write_recseq_end(unsigned int addend)
|
static inline void xt_write_recseq_end(unsigned int addend)
|
||||||
{
|
{
|
||||||
|
|
|
@ -172,10 +172,10 @@ extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
|
||||||
* equal char, int or long. percpu_read() evaluates to a lvalue and
|
* equal char, int or long. percpu_read() evaluates to a lvalue and
|
||||||
* all others to void.
|
* all others to void.
|
||||||
*
|
*
|
||||||
* These operations are guaranteed to be atomic w.r.t. preemption.
|
* These operations are guaranteed to be atomic.
|
||||||
* The generic versions use plain get/put_cpu_var(). Archs are
|
* The generic versions disable interrupts. Archs are
|
||||||
* encouraged to implement single-instruction alternatives which don't
|
* encouraged to implement single-instruction alternatives which don't
|
||||||
* require preemption protection.
|
* require protection.
|
||||||
*/
|
*/
|
||||||
#ifndef percpu_read
|
#ifndef percpu_read
|
||||||
# define percpu_read(var) \
|
# define percpu_read(var) \
|
||||||
|
@ -347,9 +347,10 @@ do { \
|
||||||
|
|
||||||
#define _this_cpu_generic_to_op(pcp, val, op) \
|
#define _this_cpu_generic_to_op(pcp, val, op) \
|
||||||
do { \
|
do { \
|
||||||
preempt_disable(); \
|
unsigned long flags; \
|
||||||
|
local_irq_save(flags); \
|
||||||
*__this_cpu_ptr(&(pcp)) op val; \
|
*__this_cpu_ptr(&(pcp)) op val; \
|
||||||
preempt_enable(); \
|
local_irq_restore(flags); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#ifndef this_cpu_write
|
#ifndef this_cpu_write
|
||||||
|
@ -447,10 +448,11 @@ do { \
|
||||||
#define _this_cpu_generic_add_return(pcp, val) \
|
#define _this_cpu_generic_add_return(pcp, val) \
|
||||||
({ \
|
({ \
|
||||||
typeof(pcp) ret__; \
|
typeof(pcp) ret__; \
|
||||||
preempt_disable(); \
|
unsigned long flags; \
|
||||||
|
local_irq_save(flags); \
|
||||||
__this_cpu_add(pcp, val); \
|
__this_cpu_add(pcp, val); \
|
||||||
ret__ = __this_cpu_read(pcp); \
|
ret__ = __this_cpu_read(pcp); \
|
||||||
preempt_enable(); \
|
local_irq_restore(flags); \
|
||||||
ret__; \
|
ret__; \
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -476,10 +478,11 @@ do { \
|
||||||
|
|
||||||
#define _this_cpu_generic_xchg(pcp, nval) \
|
#define _this_cpu_generic_xchg(pcp, nval) \
|
||||||
({ typeof(pcp) ret__; \
|
({ typeof(pcp) ret__; \
|
||||||
preempt_disable(); \
|
unsigned long flags; \
|
||||||
|
local_irq_save(flags); \
|
||||||
ret__ = __this_cpu_read(pcp); \
|
ret__ = __this_cpu_read(pcp); \
|
||||||
__this_cpu_write(pcp, nval); \
|
__this_cpu_write(pcp, nval); \
|
||||||
preempt_enable(); \
|
local_irq_restore(flags); \
|
||||||
ret__; \
|
ret__; \
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -501,12 +504,14 @@ do { \
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define _this_cpu_generic_cmpxchg(pcp, oval, nval) \
|
#define _this_cpu_generic_cmpxchg(pcp, oval, nval) \
|
||||||
({ typeof(pcp) ret__; \
|
({ \
|
||||||
preempt_disable(); \
|
typeof(pcp) ret__; \
|
||||||
|
unsigned long flags; \
|
||||||
|
local_irq_save(flags); \
|
||||||
ret__ = __this_cpu_read(pcp); \
|
ret__ = __this_cpu_read(pcp); \
|
||||||
if (ret__ == (oval)) \
|
if (ret__ == (oval)) \
|
||||||
__this_cpu_write(pcp, nval); \
|
__this_cpu_write(pcp, nval); \
|
||||||
preempt_enable(); \
|
local_irq_restore(flags); \
|
||||||
ret__; \
|
ret__; \
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -538,10 +543,11 @@ do { \
|
||||||
#define _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
|
#define _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
|
||||||
({ \
|
({ \
|
||||||
int ret__; \
|
int ret__; \
|
||||||
preempt_disable(); \
|
unsigned long flags; \
|
||||||
|
local_irq_save(flags); \
|
||||||
ret__ = __this_cpu_generic_cmpxchg_double(pcp1, pcp2, \
|
ret__ = __this_cpu_generic_cmpxchg_double(pcp1, pcp2, \
|
||||||
oval1, oval2, nval1, nval2); \
|
oval1, oval2, nval1, nval2); \
|
||||||
preempt_enable(); \
|
local_irq_restore(flags); \
|
||||||
ret__; \
|
ret__; \
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -567,9 +573,9 @@ do { \
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Generic percpu operations that do not require preemption handling.
|
* Generic percpu operations for context that are safe from preemption/interrupts.
|
||||||
* Either we do not care about races or the caller has the
|
* Either we do not care about races or the caller has the
|
||||||
* responsibility of handling preemptions issues. Arch code can still
|
* responsibility of handling preemption/interrupt issues. Arch code can still
|
||||||
* override these instructions since the arch per cpu code may be more
|
* override these instructions since the arch per cpu code may be more
|
||||||
* efficient and may actually get race freeness for free (that is the
|
* efficient and may actually get race freeness for free (that is the
|
||||||
* case for x86 for example).
|
* case for x86 for example).
|
||||||
|
@ -802,156 +808,4 @@ do { \
|
||||||
__pcpu_double_call_return_bool(__this_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
|
__pcpu_double_call_return_bool(__this_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
|
||||||
* IRQ safe versions of the per cpu RMW operations. Note that these operations
|
|
||||||
* are *not* safe against modification of the same variable from another
|
|
||||||
* processors (which one gets when using regular atomic operations)
|
|
||||||
* They are guaranteed to be atomic vs. local interrupts and
|
|
||||||
* preemption only.
|
|
||||||
*/
|
|
||||||
#define irqsafe_cpu_generic_to_op(pcp, val, op) \
|
|
||||||
do { \
|
|
||||||
unsigned long flags; \
|
|
||||||
local_irq_save(flags); \
|
|
||||||
*__this_cpu_ptr(&(pcp)) op val; \
|
|
||||||
local_irq_restore(flags); \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
#ifndef irqsafe_cpu_add
|
|
||||||
# ifndef irqsafe_cpu_add_1
|
|
||||||
# define irqsafe_cpu_add_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
|
|
||||||
# endif
|
|
||||||
# ifndef irqsafe_cpu_add_2
|
|
||||||
# define irqsafe_cpu_add_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
|
|
||||||
# endif
|
|
||||||
# ifndef irqsafe_cpu_add_4
|
|
||||||
# define irqsafe_cpu_add_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
|
|
||||||
# endif
|
|
||||||
# ifndef irqsafe_cpu_add_8
|
|
||||||
# define irqsafe_cpu_add_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
|
|
||||||
# endif
|
|
||||||
# define irqsafe_cpu_add(pcp, val) __pcpu_size_call(irqsafe_cpu_add_, (pcp), (val))
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifndef irqsafe_cpu_sub
|
|
||||||
# define irqsafe_cpu_sub(pcp, val) irqsafe_cpu_add((pcp), -(val))
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifndef irqsafe_cpu_inc
|
|
||||||
# define irqsafe_cpu_inc(pcp) irqsafe_cpu_add((pcp), 1)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifndef irqsafe_cpu_dec
|
|
||||||
# define irqsafe_cpu_dec(pcp) irqsafe_cpu_sub((pcp), 1)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifndef irqsafe_cpu_and
|
|
||||||
# ifndef irqsafe_cpu_and_1
|
|
||||||
# define irqsafe_cpu_and_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
|
|
||||||
# endif
|
|
||||||
# ifndef irqsafe_cpu_and_2
|
|
||||||
# define irqsafe_cpu_and_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
|
|
||||||
# endif
|
|
||||||
# ifndef irqsafe_cpu_and_4
|
|
||||||
# define irqsafe_cpu_and_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
|
|
||||||
# endif
|
|
||||||
# ifndef irqsafe_cpu_and_8
|
|
||||||
# define irqsafe_cpu_and_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
|
|
||||||
# endif
|
|
||||||
# define irqsafe_cpu_and(pcp, val) __pcpu_size_call(irqsafe_cpu_and_, (val))
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifndef irqsafe_cpu_or
|
|
||||||
# ifndef irqsafe_cpu_or_1
|
|
||||||
# define irqsafe_cpu_or_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
|
|
||||||
# endif
|
|
||||||
# ifndef irqsafe_cpu_or_2
|
|
||||||
# define irqsafe_cpu_or_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
|
|
||||||
# endif
|
|
||||||
# ifndef irqsafe_cpu_or_4
|
|
||||||
# define irqsafe_cpu_or_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
|
|
||||||
# endif
|
|
||||||
# ifndef irqsafe_cpu_or_8
|
|
||||||
# define irqsafe_cpu_or_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
|
|
||||||
# endif
|
|
||||||
# define irqsafe_cpu_or(pcp, val) __pcpu_size_call(irqsafe_cpu_or_, (val))
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifndef irqsafe_cpu_xor
|
|
||||||
# ifndef irqsafe_cpu_xor_1
|
|
||||||
# define irqsafe_cpu_xor_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
|
|
||||||
# endif
|
|
||||||
# ifndef irqsafe_cpu_xor_2
|
|
||||||
# define irqsafe_cpu_xor_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
|
|
||||||
# endif
|
|
||||||
# ifndef irqsafe_cpu_xor_4
|
|
||||||
# define irqsafe_cpu_xor_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
|
|
||||||
# endif
|
|
||||||
# ifndef irqsafe_cpu_xor_8
|
|
||||||
# define irqsafe_cpu_xor_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
|
|
||||||
# endif
|
|
||||||
# define irqsafe_cpu_xor(pcp, val) __pcpu_size_call(irqsafe_cpu_xor_, (val))
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define irqsafe_cpu_generic_cmpxchg(pcp, oval, nval) \
|
|
||||||
({ \
|
|
||||||
typeof(pcp) ret__; \
|
|
||||||
unsigned long flags; \
|
|
||||||
local_irq_save(flags); \
|
|
||||||
ret__ = __this_cpu_read(pcp); \
|
|
||||||
if (ret__ == (oval)) \
|
|
||||||
__this_cpu_write(pcp, nval); \
|
|
||||||
local_irq_restore(flags); \
|
|
||||||
ret__; \
|
|
||||||
})
|
|
||||||
|
|
||||||
#ifndef irqsafe_cpu_cmpxchg
|
|
||||||
# ifndef irqsafe_cpu_cmpxchg_1
|
|
||||||
# define irqsafe_cpu_cmpxchg_1(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
|
|
||||||
# endif
|
|
||||||
# ifndef irqsafe_cpu_cmpxchg_2
|
|
||||||
# define irqsafe_cpu_cmpxchg_2(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
|
|
||||||
# endif
|
|
||||||
# ifndef irqsafe_cpu_cmpxchg_4
|
|
||||||
# define irqsafe_cpu_cmpxchg_4(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
|
|
||||||
# endif
|
|
||||||
# ifndef irqsafe_cpu_cmpxchg_8
|
|
||||||
# define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
|
|
||||||
# endif
|
|
||||||
# define irqsafe_cpu_cmpxchg(pcp, oval, nval) \
|
|
||||||
__pcpu_size_call_return2(irqsafe_cpu_cmpxchg_, (pcp), oval, nval)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
|
|
||||||
({ \
|
|
||||||
int ret__; \
|
|
||||||
unsigned long flags; \
|
|
||||||
local_irq_save(flags); \
|
|
||||||
ret__ = __this_cpu_generic_cmpxchg_double(pcp1, pcp2, \
|
|
||||||
oval1, oval2, nval1, nval2); \
|
|
||||||
local_irq_restore(flags); \
|
|
||||||
ret__; \
|
|
||||||
})
|
|
||||||
|
|
||||||
#ifndef irqsafe_cpu_cmpxchg_double
|
|
||||||
# ifndef irqsafe_cpu_cmpxchg_double_1
|
|
||||||
# define irqsafe_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \
|
|
||||||
irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
|
|
||||||
# endif
|
|
||||||
# ifndef irqsafe_cpu_cmpxchg_double_2
|
|
||||||
# define irqsafe_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \
|
|
||||||
irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
|
|
||||||
# endif
|
|
||||||
# ifndef irqsafe_cpu_cmpxchg_double_4
|
|
||||||
# define irqsafe_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \
|
|
||||||
irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
|
|
||||||
# endif
|
|
||||||
# ifndef irqsafe_cpu_cmpxchg_double_8
|
|
||||||
# define irqsafe_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \
|
|
||||||
irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
|
|
||||||
# endif
|
|
||||||
# define irqsafe_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
|
|
||||||
__pcpu_double_call_return_bool(irqsafe_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif /* __LINUX_PERCPU_H */
|
#endif /* __LINUX_PERCPU_H */
|
||||||
|
|
|
@ -129,33 +129,33 @@ struct linux_xfrm_mib {
|
||||||
__this_cpu_inc(mib[0]->mibs[field])
|
__this_cpu_inc(mib[0]->mibs[field])
|
||||||
|
|
||||||
#define SNMP_INC_STATS_USER(mib, field) \
|
#define SNMP_INC_STATS_USER(mib, field) \
|
||||||
irqsafe_cpu_inc(mib[0]->mibs[field])
|
this_cpu_inc(mib[0]->mibs[field])
|
||||||
|
|
||||||
#define SNMP_INC_STATS_ATOMIC_LONG(mib, field) \
|
#define SNMP_INC_STATS_ATOMIC_LONG(mib, field) \
|
||||||
atomic_long_inc(&mib->mibs[field])
|
atomic_long_inc(&mib->mibs[field])
|
||||||
|
|
||||||
#define SNMP_INC_STATS(mib, field) \
|
#define SNMP_INC_STATS(mib, field) \
|
||||||
irqsafe_cpu_inc(mib[0]->mibs[field])
|
this_cpu_inc(mib[0]->mibs[field])
|
||||||
|
|
||||||
#define SNMP_DEC_STATS(mib, field) \
|
#define SNMP_DEC_STATS(mib, field) \
|
||||||
irqsafe_cpu_dec(mib[0]->mibs[field])
|
this_cpu_dec(mib[0]->mibs[field])
|
||||||
|
|
||||||
#define SNMP_ADD_STATS_BH(mib, field, addend) \
|
#define SNMP_ADD_STATS_BH(mib, field, addend) \
|
||||||
__this_cpu_add(mib[0]->mibs[field], addend)
|
__this_cpu_add(mib[0]->mibs[field], addend)
|
||||||
|
|
||||||
#define SNMP_ADD_STATS_USER(mib, field, addend) \
|
#define SNMP_ADD_STATS_USER(mib, field, addend) \
|
||||||
irqsafe_cpu_add(mib[0]->mibs[field], addend)
|
this_cpu_add(mib[0]->mibs[field], addend)
|
||||||
|
|
||||||
#define SNMP_ADD_STATS(mib, field, addend) \
|
#define SNMP_ADD_STATS(mib, field, addend) \
|
||||||
irqsafe_cpu_add(mib[0]->mibs[field], addend)
|
this_cpu_add(mib[0]->mibs[field], addend)
|
||||||
/*
|
/*
|
||||||
* Use "__typeof__(*mib[0]) *ptr" instead of "__typeof__(mib[0]) ptr"
|
* Use "__typeof__(*mib[0]) *ptr" instead of "__typeof__(mib[0]) ptr"
|
||||||
* to make @ptr a non-percpu pointer.
|
* to make @ptr a non-percpu pointer.
|
||||||
*/
|
*/
|
||||||
#define SNMP_UPD_PO_STATS(mib, basefield, addend) \
|
#define SNMP_UPD_PO_STATS(mib, basefield, addend) \
|
||||||
do { \
|
do { \
|
||||||
irqsafe_cpu_inc(mib[0]->mibs[basefield##PKTS]); \
|
this_cpu_inc(mib[0]->mibs[basefield##PKTS]); \
|
||||||
irqsafe_cpu_add(mib[0]->mibs[basefield##OCTETS], addend); \
|
this_cpu_add(mib[0]->mibs[basefield##OCTETS], addend); \
|
||||||
} while (0)
|
} while (0)
|
||||||
#define SNMP_UPD_PO_STATS_BH(mib, basefield, addend) \
|
#define SNMP_UPD_PO_STATS_BH(mib, basefield, addend) \
|
||||||
do { \
|
do { \
|
||||||
|
|
|
@ -1978,7 +1978,7 @@ int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
|
||||||
page->pobjects = pobjects;
|
page->pobjects = pobjects;
|
||||||
page->next = oldpage;
|
page->next = oldpage;
|
||||||
|
|
||||||
} while (irqsafe_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage);
|
} while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage);
|
||||||
stat(s, CPU_PARTIAL_FREE);
|
stat(s, CPU_PARTIAL_FREE);
|
||||||
return pobjects;
|
return pobjects;
|
||||||
}
|
}
|
||||||
|
@ -2304,7 +2304,7 @@ redo:
|
||||||
* Since this is without lock semantics the protection is only against
|
* Since this is without lock semantics the protection is only against
|
||||||
* code executing on this cpu *not* from access by other cpus.
|
* code executing on this cpu *not* from access by other cpus.
|
||||||
*/
|
*/
|
||||||
if (unlikely(!irqsafe_cpu_cmpxchg_double(
|
if (unlikely(!this_cpu_cmpxchg_double(
|
||||||
s->cpu_slab->freelist, s->cpu_slab->tid,
|
s->cpu_slab->freelist, s->cpu_slab->tid,
|
||||||
object, tid,
|
object, tid,
|
||||||
get_freepointer_safe(s, object), next_tid(tid)))) {
|
get_freepointer_safe(s, object), next_tid(tid)))) {
|
||||||
|
@ -2534,7 +2534,7 @@ redo:
|
||||||
if (likely(page == c->page)) {
|
if (likely(page == c->page)) {
|
||||||
set_freepointer(s, object, c->freelist);
|
set_freepointer(s, object, c->freelist);
|
||||||
|
|
||||||
if (unlikely(!irqsafe_cpu_cmpxchg_double(
|
if (unlikely(!this_cpu_cmpxchg_double(
|
||||||
s->cpu_slab->freelist, s->cpu_slab->tid,
|
s->cpu_slab->freelist, s->cpu_slab->tid,
|
||||||
c->freelist, tid,
|
c->freelist, tid,
|
||||||
object, next_tid(tid)))) {
|
object, next_tid(tid)))) {
|
||||||
|
|
|
@ -69,12 +69,12 @@ static struct caif_device_entry_list *caif_device_list(struct net *net)
|
||||||
|
|
||||||
static void caifd_put(struct caif_device_entry *e)
|
static void caifd_put(struct caif_device_entry *e)
|
||||||
{
|
{
|
||||||
irqsafe_cpu_dec(*e->pcpu_refcnt);
|
this_cpu_dec(*e->pcpu_refcnt);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void caifd_hold(struct caif_device_entry *e)
|
static void caifd_hold(struct caif_device_entry *e)
|
||||||
{
|
{
|
||||||
irqsafe_cpu_inc(*e->pcpu_refcnt);
|
this_cpu_inc(*e->pcpu_refcnt);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int caifd_refcnt_read(struct caif_device_entry *e)
|
static int caifd_refcnt_read(struct caif_device_entry *e)
|
||||||
|
|
|
@ -177,14 +177,14 @@ void cffrml_put(struct cflayer *layr)
|
||||||
{
|
{
|
||||||
struct cffrml *this = container_obj(layr);
|
struct cffrml *this = container_obj(layr);
|
||||||
if (layr != NULL && this->pcpu_refcnt != NULL)
|
if (layr != NULL && this->pcpu_refcnt != NULL)
|
||||||
irqsafe_cpu_dec(*this->pcpu_refcnt);
|
this_cpu_dec(*this->pcpu_refcnt);
|
||||||
}
|
}
|
||||||
|
|
||||||
void cffrml_hold(struct cflayer *layr)
|
void cffrml_hold(struct cflayer *layr)
|
||||||
{
|
{
|
||||||
struct cffrml *this = container_obj(layr);
|
struct cffrml *this = container_obj(layr);
|
||||||
if (layr != NULL && this->pcpu_refcnt != NULL)
|
if (layr != NULL && this->pcpu_refcnt != NULL)
|
||||||
irqsafe_cpu_inc(*this->pcpu_refcnt);
|
this_cpu_inc(*this->pcpu_refcnt);
|
||||||
}
|
}
|
||||||
|
|
||||||
int cffrml_refcnt_read(struct cflayer *layr)
|
int cffrml_refcnt_read(struct cflayer *layr)
|
||||||
|
|
Загрузка…
Ссылка в новой задаче