WSL2-Linux-Kernel/include/linux/percpu.h

759 строки
25 KiB
C
Исходник Обычный вид История

#ifndef __LINUX_PERCPU_H
#define __LINUX_PERCPU_H
#include <linux/preempt.h>
#include <linux/smp.h>
#include <linux/cpumask.h>
#include <linux/pfn.h>
#include <linux/init.h>
#include <asm/percpu.h>
/* enough to cover all DEFINE_PER_CPUs in modules */
#ifdef CONFIG_MODULES
#define PERCPU_MODULE_RESERVE (8 << 10)
#else
#define PERCPU_MODULE_RESERVE 0
#endif
#ifndef PERCPU_ENOUGH_ROOM
#define PERCPU_ENOUGH_ROOM \
(ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) + \
PERCPU_MODULE_RESERVE)
#endif
/*
* Must be an lvalue. Since @var must be a simple identifier,
* we force a syntax error here if it isn't.
*/
#define get_cpu_var(var) (*({ \
preempt_disable(); \
&__get_cpu_var(var); }))
/*
* The weird & is necessary because sparse considers (void)(var) to be
* a direct dereference of percpu variable (var).
*/
#define put_cpu_var(var) do { \
(void)&(var); \
preempt_enable(); \
} while (0)
#define get_cpu_ptr(var) ({ \
preempt_disable(); \
this_cpu_ptr(var); })
#define put_cpu_ptr(var) do { \
(void)(var); \
preempt_enable(); \
} while (0)
/* minimum unit size, also is the maximum supported allocation size */
#define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10)
/*
* Percpu allocator can serve percpu allocations before slab is
* initialized which allows slab to depend on the percpu allocator.
* The following two parameters decide how much resource to
* preallocate for this. Keep PERCPU_DYNAMIC_RESERVE equal to or
* larger than PERCPU_DYNAMIC_EARLY_SIZE.
*/
#define PERCPU_DYNAMIC_EARLY_SLOTS 128
#define PERCPU_DYNAMIC_EARLY_SIZE (12 << 10)
/*
* PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy
* back on the first chunk for dynamic percpu allocation if arch is
* manually allocating and mapping it for faster access (as a part of
* large page mapping for example).
*
* The following values give between one and two pages of free space
* after typical minimal boot (2-way SMP, single disk and NIC) with
* both defconfig and a distro config on x86_64 and 32. More
* intelligent way to determine this would be nice.
*/
#if BITS_PER_LONG > 32
#define PERCPU_DYNAMIC_RESERVE (20 << 10)
#else
#define PERCPU_DYNAMIC_RESERVE (12 << 10)
#endif
extern void *pcpu_base_addr;
extern const unsigned long *pcpu_unit_offsets;
percpu: introduce pcpu_alloc_info and pcpu_group_info Till now, non-linear cpu->unit map was expressed using an integer array which maps each cpu to a unit and used only by lpage allocator. Although how many units have been placed in a single contiguos area (group) is known while building unit_map, the information is lost when the result is recorded into the unit_map array. For lpage allocator, as all allocations are done by lpages and whether two adjacent lpages are in the same group or not is irrelevant, this didn't cause any problem. Non-linear cpu->unit mapping will be used for sparse embedding and this grouping information is necessary for that. This patch introduces pcpu_alloc_info which contains all the information necessary for initializing percpu allocator. pcpu_alloc_info contains array of pcpu_group_info which describes how units are grouped and mapped to cpus. pcpu_group_info also has base_offset field to specify its offset from the chunk's base address. pcpu_build_alloc_info() initializes this field as if all groups are allocated back-to-back as is currently done but this will be used to sparsely place groups. pcpu_alloc_info is a rather complex data structure which contains a flexible array which in turn points to nested cpu_map arrays. * pcpu_alloc_alloc_info() and pcpu_free_alloc_info() are provided to help dealing with pcpu_alloc_info. * pcpu_lpage_build_unit_map() is updated to build pcpu_alloc_info, generalized and renamed to pcpu_build_alloc_info(). @cpu_distance_fn may be NULL indicating that all cpus are of LOCAL_DISTANCE. * pcpul_lpage_dump_cfg() is updated to process pcpu_alloc_info, generalized and renamed to pcpu_dump_alloc_info(). It now also prints which group each alloc unit belongs to. * pcpu_setup_first_chunk() now takes pcpu_alloc_info instead of the separate parameters. All first chunk allocators are updated to use pcpu_build_alloc_info() to build alloc_info and call pcpu_setup_first_chunk() with it. This has the side effect of packing units for sparse possible cpus. ie. if cpus 0, 2 and 4 are possible, they'll be assigned unit 0, 1 and 2 instead of 0, 2 and 4. * x86 setup_pcpu_lpage() is updated to deal with alloc_info. * sparc64 setup_per_cpu_areas() is updated to build alloc_info. Although the changes made by this patch are pretty pervasive, it doesn't cause any behavior difference other than packing of sparse cpus. It mostly changes how information is passed among initialization functions and makes room for more flexibility. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Ingo Molnar <mingo@elte.hu> Cc: David Miller <davem@davemloft.net>
2009-08-14 10:00:51 +04:00
struct pcpu_group_info {
int nr_units; /* aligned # of units */
unsigned long base_offset; /* base address offset */
unsigned int *cpu_map; /* unit->cpu map, empty
* entries contain NR_CPUS */
};
struct pcpu_alloc_info {
size_t static_size;
size_t reserved_size;
size_t dyn_size;
size_t unit_size;
size_t atom_size;
size_t alloc_size;
size_t __ai_size; /* internal, don't use */
int nr_groups; /* 0 if grouping unnecessary */
struct pcpu_group_info groups[];
};
enum pcpu_fc {
PCPU_FC_AUTO,
PCPU_FC_EMBED,
PCPU_FC_PAGE,
PCPU_FC_NR,
};
extern const char * const pcpu_fc_names[PCPU_FC_NR];
extern enum pcpu_fc pcpu_chosen_fc;
typedef void * (*pcpu_fc_alloc_fn_t)(unsigned int cpu, size_t size,
size_t align);
typedef void (*pcpu_fc_free_fn_t)(void *ptr, size_t size);
typedef void (*pcpu_fc_populate_pte_fn_t)(unsigned long addr);
typedef int (pcpu_fc_cpu_distance_fn_t)(unsigned int from, unsigned int to);
percpu: introduce pcpu_alloc_info and pcpu_group_info Till now, non-linear cpu->unit map was expressed using an integer array which maps each cpu to a unit and used only by lpage allocator. Although how many units have been placed in a single contiguos area (group) is known while building unit_map, the information is lost when the result is recorded into the unit_map array. For lpage allocator, as all allocations are done by lpages and whether two adjacent lpages are in the same group or not is irrelevant, this didn't cause any problem. Non-linear cpu->unit mapping will be used for sparse embedding and this grouping information is necessary for that. This patch introduces pcpu_alloc_info which contains all the information necessary for initializing percpu allocator. pcpu_alloc_info contains array of pcpu_group_info which describes how units are grouped and mapped to cpus. pcpu_group_info also has base_offset field to specify its offset from the chunk's base address. pcpu_build_alloc_info() initializes this field as if all groups are allocated back-to-back as is currently done but this will be used to sparsely place groups. pcpu_alloc_info is a rather complex data structure which contains a flexible array which in turn points to nested cpu_map arrays. * pcpu_alloc_alloc_info() and pcpu_free_alloc_info() are provided to help dealing with pcpu_alloc_info. * pcpu_lpage_build_unit_map() is updated to build pcpu_alloc_info, generalized and renamed to pcpu_build_alloc_info(). @cpu_distance_fn may be NULL indicating that all cpus are of LOCAL_DISTANCE. * pcpul_lpage_dump_cfg() is updated to process pcpu_alloc_info, generalized and renamed to pcpu_dump_alloc_info(). It now also prints which group each alloc unit belongs to. * pcpu_setup_first_chunk() now takes pcpu_alloc_info instead of the separate parameters. All first chunk allocators are updated to use pcpu_build_alloc_info() to build alloc_info and call pcpu_setup_first_chunk() with it. This has the side effect of packing units for sparse possible cpus. ie. if cpus 0, 2 and 4 are possible, they'll be assigned unit 0, 1 and 2 instead of 0, 2 and 4. * x86 setup_pcpu_lpage() is updated to deal with alloc_info. * sparc64 setup_per_cpu_areas() is updated to build alloc_info. Although the changes made by this patch are pretty pervasive, it doesn't cause any behavior difference other than packing of sparse cpus. It mostly changes how information is passed among initialization functions and makes room for more flexibility. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Ingo Molnar <mingo@elte.hu> Cc: David Miller <davem@davemloft.net>
2009-08-14 10:00:51 +04:00
extern struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
int nr_units);
extern void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai);
extern int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
void *base_addr);
#ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
extern int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
size_t atom_size,
pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
pcpu_fc_alloc_fn_t alloc_fn,
pcpu_fc_free_fn_t free_fn);
#endif
#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
extern int __init pcpu_page_first_chunk(size_t reserved_size,
pcpu_fc_alloc_fn_t alloc_fn,
pcpu_fc_free_fn_t free_fn,
pcpu_fc_populate_pte_fn_t populate_pte_fn);
#endif
/*
* Use this to get to a cpu's version of the per-cpu object
* dynamically allocated. Non-atomic access to the current CPU's
* version should probably be combined with get_cpu()/put_cpu().
*/
#ifdef CONFIG_SMP
#define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))
#else
#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR((ptr)); })
#endif
extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align);
extern bool is_kernel_percpu_address(unsigned long addr);
#if !defined(CONFIG_SMP) || !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
percpu: use dynamic percpu allocator as the default percpu allocator This patch makes most !CONFIG_HAVE_SETUP_PER_CPU_AREA archs use dynamic percpu allocator. The first chunk is allocated using embedding helper and 8k is reserved for modules. This ensures that the new allocator behaves almost identically to the original allocator as long as static percpu variables are concerned, so it shouldn't introduce much breakage. s390 and alpha use custom SHIFT_PERCPU_PTR() to work around addressing range limit the addressing model imposes. Unfortunately, this breaks if the address is specified using a variable, so for now, the two archs aren't converted. The following architectures are affected by this change. * sh * arm * cris * mips * sparc(32) * blackfin * avr32 * parisc (broken, under investigation) * m32r * powerpc(32) As this change makes the dynamic allocator the default one, CONFIG_HAVE_DYNAMIC_PER_CPU_AREA is replaced with its invert - CONFIG_HAVE_LEGACY_PER_CPU_AREA, which is added to yet-to-be converted archs. These archs implement their own setup_per_cpu_areas() and the conversion is not trivial. * powerpc(64) * sparc(64) * ia64 * alpha * s390 Boot and batch alloc/free tests on x86_32 with debug code (x86_32 doesn't use default first chunk initialization). Compile tested on sparc(32), powerpc(32), arm and alpha. Kyle McMartin reported that this change breaks parisc. The problem is still under investigation and he is okay with pushing this patch forward and fixing parisc later. [ Impact: use dynamic allocator for most archs w/o custom percpu setup ] Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Rusty Russell <rusty@rustcorp.com.au> Acked-by: David S. Miller <davem@davemloft.net> Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Reviewed-by: Christoph Lameter <cl@linux.com> Cc: Paul Mundt <lethal@linux-sh.org> Cc: Russell King <rmk@arm.linux.org.uk> Cc: Mikael Starvik <starvik@axis.com> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Bryan Wu <cooloney@kernel.org> Cc: Kyle McMartin <kyle@mcmartin.ca> Cc: Matthew Wilcox <matthew@wil.cx> Cc: Grant Grundler <grundler@parisc-linux.org> Cc: Hirokazu Takata <takata@linux-m32r.org> Cc: Richard Henderson <rth@twiddle.net> Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Ingo Molnar <mingo@elte.hu>
2009-03-30 14:07:44 +04:00
extern void __init setup_per_cpu_areas(void);
#endif
extern void __init percpu_init_late(void);
percpu: use dynamic percpu allocator as the default percpu allocator This patch makes most !CONFIG_HAVE_SETUP_PER_CPU_AREA archs use dynamic percpu allocator. The first chunk is allocated using embedding helper and 8k is reserved for modules. This ensures that the new allocator behaves almost identically to the original allocator as long as static percpu variables are concerned, so it shouldn't introduce much breakage. s390 and alpha use custom SHIFT_PERCPU_PTR() to work around addressing range limit the addressing model imposes. Unfortunately, this breaks if the address is specified using a variable, so for now, the two archs aren't converted. The following architectures are affected by this change. * sh * arm * cris * mips * sparc(32) * blackfin * avr32 * parisc (broken, under investigation) * m32r * powerpc(32) As this change makes the dynamic allocator the default one, CONFIG_HAVE_DYNAMIC_PER_CPU_AREA is replaced with its invert - CONFIG_HAVE_LEGACY_PER_CPU_AREA, which is added to yet-to-be converted archs. These archs implement their own setup_per_cpu_areas() and the conversion is not trivial. * powerpc(64) * sparc(64) * ia64 * alpha * s390 Boot and batch alloc/free tests on x86_32 with debug code (x86_32 doesn't use default first chunk initialization). Compile tested on sparc(32), powerpc(32), arm and alpha. Kyle McMartin reported that this change breaks parisc. The problem is still under investigation and he is okay with pushing this patch forward and fixing parisc later. [ Impact: use dynamic allocator for most archs w/o custom percpu setup ] Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Rusty Russell <rusty@rustcorp.com.au> Acked-by: David S. Miller <davem@davemloft.net> Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Reviewed-by: Christoph Lameter <cl@linux.com> Cc: Paul Mundt <lethal@linux-sh.org> Cc: Russell King <rmk@arm.linux.org.uk> Cc: Mikael Starvik <starvik@axis.com> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Bryan Wu <cooloney@kernel.org> Cc: Kyle McMartin <kyle@mcmartin.ca> Cc: Matthew Wilcox <matthew@wil.cx> Cc: Grant Grundler <grundler@parisc-linux.org> Cc: Hirokazu Takata <takata@linux-m32r.org> Cc: Richard Henderson <rth@twiddle.net> Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Ingo Molnar <mingo@elte.hu>
2009-03-30 14:07:44 +04:00
extern void __percpu *__alloc_percpu(size_t size, size_t align);
extern void free_percpu(void __percpu *__pdata);
extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
#define alloc_percpu(type) \
(typeof(type) __percpu *)__alloc_percpu(sizeof(type), __alignof__(type))
this_cpu: Introduce this_cpu_ptr() and generic this_cpu_* operations This patch introduces two things: First this_cpu_ptr and then per cpu atomic operations. this_cpu_ptr ------------ A common operation when dealing with cpu data is to get the instance of the cpu data associated with the currently executing processor. This can be optimized by this_cpu_ptr(xx) = per_cpu_ptr(xx, smp_processor_id). The problem with per_cpu_ptr(x, smp_processor_id) is that it requires an array lookup to find the offset for the cpu. Processors typically have the offset for the current cpu area in some kind of (arch dependent) efficiently accessible register or memory location. We can use that instead of doing the array lookup to speed up the determination of the address of the percpu variable. This is particularly significant because these lookups occur in performance critical paths of the core kernel. this_cpu_ptr() can avoid memory accesses and this_cpu_ptr comes in two flavors. The preemption context matters since we are referring the the currently executing processor. In many cases we must insure that the processor does not change while a code segment is executed. __this_cpu_ptr -> Do not check for preemption context this_cpu_ptr -> Check preemption context The parameter to these operations is a per cpu pointer. This can be the address of a statically defined per cpu variable (&per_cpu_var(xxx)) or the address of a per cpu variable allocated with the per cpu allocator. per cpu atomic operations: this_cpu_*(var, val) ----------------------------------------------- this_cpu_* operations (like this_cpu_add(struct->y, value) operate on abitrary scalars that are members of structures allocated with the new per cpu allocator. They can also operate on static per_cpu variables if they are passed to per_cpu_var() (See patch to use this_cpu_* operations for vm statistics). These operations are guaranteed to be atomic vs preemption when modifying the scalar. The calculation of the per cpu offset is also guaranteed to be atomic at the same time. This means that a this_cpu_* operation can be safely used to modify a per cpu variable in a context where interrupts are enabled and preemption is allowed. Many architectures can perform such a per cpu atomic operation with a single instruction. Note that the atomicity here is different from regular atomic operations. Atomicity is only guaranteed for data accessed from the currently executing processor. Modifications from other processors are still possible. There must be other guarantees that the per cpu data is not modified from another processor when using these instruction. The per cpu atomicity is created by the fact that the processor either executes and instruction or not. Embedded in the instruction is the relocation of the per cpu address to the are reserved for the current processor and the RMW action. Therefore interrupts or preemption cannot occur in the mids of this processing. Generic fallback functions are used if an arch does not define optimized this_cpu operations. The functions come also come in the two flavors used for this_cpu_ptr(). The firstparameter is a scalar that is a member of a structure allocated through allocpercpu or a per cpu variable (use per_cpu_var(xxx)). The operations are similar to what percpu_add() and friends do. this_cpu_read(scalar) this_cpu_write(scalar, value) this_cpu_add(scale, value) this_cpu_sub(scalar, value) this_cpu_inc(scalar) this_cpu_dec(scalar) this_cpu_and(scalar, value) this_cpu_or(scalar, value) this_cpu_xor(scalar, value) Arch code can override the generic functions and provide optimized atomic per cpu operations. These atomic operations must provide both the relocation (x86 does it through a segment override) and the operation on the data in a single instruction. Otherwise preempt needs to be disabled and there is no gain from providing arch implementations. A third variant is provided prefixed by irqsafe_. These variants are safe against hardware interrupts on the *same* processor (all per cpu atomic primitives are *always* *only* providing safety for code running on the *same* processor!). The increment needs to be implemented by the hardware in such a way that it is a single RMW instruction that is either processed before or after an interrupt. cc: David Howells <dhowells@redhat.com> cc: Ingo Molnar <mingo@elte.hu> cc: Rusty Russell <rusty@rustcorp.com.au> cc: Eric Dumazet <dada1@cosmosbay.com> Signed-off-by: Christoph Lameter <cl@linux-foundation.org> Signed-off-by: Tejun Heo <tj@kernel.org>
2009-10-03 14:48:22 +04:00
/*
* Branching function to split up a function into a set of functions that
* are called for different scalar sizes of the objects handled.
*/
extern void __bad_size_call_parameter(void);
#define __pcpu_size_call_return(stem, variable) \
({ typeof(variable) pscr_ret__; \
__verify_pcpu_ptr(&(variable)); \
this_cpu: Introduce this_cpu_ptr() and generic this_cpu_* operations This patch introduces two things: First this_cpu_ptr and then per cpu atomic operations. this_cpu_ptr ------------ A common operation when dealing with cpu data is to get the instance of the cpu data associated with the currently executing processor. This can be optimized by this_cpu_ptr(xx) = per_cpu_ptr(xx, smp_processor_id). The problem with per_cpu_ptr(x, smp_processor_id) is that it requires an array lookup to find the offset for the cpu. Processors typically have the offset for the current cpu area in some kind of (arch dependent) efficiently accessible register or memory location. We can use that instead of doing the array lookup to speed up the determination of the address of the percpu variable. This is particularly significant because these lookups occur in performance critical paths of the core kernel. this_cpu_ptr() can avoid memory accesses and this_cpu_ptr comes in two flavors. The preemption context matters since we are referring the the currently executing processor. In many cases we must insure that the processor does not change while a code segment is executed. __this_cpu_ptr -> Do not check for preemption context this_cpu_ptr -> Check preemption context The parameter to these operations is a per cpu pointer. This can be the address of a statically defined per cpu variable (&per_cpu_var(xxx)) or the address of a per cpu variable allocated with the per cpu allocator. per cpu atomic operations: this_cpu_*(var, val) ----------------------------------------------- this_cpu_* operations (like this_cpu_add(struct->y, value) operate on abitrary scalars that are members of structures allocated with the new per cpu allocator. They can also operate on static per_cpu variables if they are passed to per_cpu_var() (See patch to use this_cpu_* operations for vm statistics). These operations are guaranteed to be atomic vs preemption when modifying the scalar. The calculation of the per cpu offset is also guaranteed to be atomic at the same time. This means that a this_cpu_* operation can be safely used to modify a per cpu variable in a context where interrupts are enabled and preemption is allowed. Many architectures can perform such a per cpu atomic operation with a single instruction. Note that the atomicity here is different from regular atomic operations. Atomicity is only guaranteed for data accessed from the currently executing processor. Modifications from other processors are still possible. There must be other guarantees that the per cpu data is not modified from another processor when using these instruction. The per cpu atomicity is created by the fact that the processor either executes and instruction or not. Embedded in the instruction is the relocation of the per cpu address to the are reserved for the current processor and the RMW action. Therefore interrupts or preemption cannot occur in the mids of this processing. Generic fallback functions are used if an arch does not define optimized this_cpu operations. The functions come also come in the two flavors used for this_cpu_ptr(). The firstparameter is a scalar that is a member of a structure allocated through allocpercpu or a per cpu variable (use per_cpu_var(xxx)). The operations are similar to what percpu_add() and friends do. this_cpu_read(scalar) this_cpu_write(scalar, value) this_cpu_add(scale, value) this_cpu_sub(scalar, value) this_cpu_inc(scalar) this_cpu_dec(scalar) this_cpu_and(scalar, value) this_cpu_or(scalar, value) this_cpu_xor(scalar, value) Arch code can override the generic functions and provide optimized atomic per cpu operations. These atomic operations must provide both the relocation (x86 does it through a segment override) and the operation on the data in a single instruction. Otherwise preempt needs to be disabled and there is no gain from providing arch implementations. A third variant is provided prefixed by irqsafe_. These variants are safe against hardware interrupts on the *same* processor (all per cpu atomic primitives are *always* *only* providing safety for code running on the *same* processor!). The increment needs to be implemented by the hardware in such a way that it is a single RMW instruction that is either processed before or after an interrupt. cc: David Howells <dhowells@redhat.com> cc: Ingo Molnar <mingo@elte.hu> cc: Rusty Russell <rusty@rustcorp.com.au> cc: Eric Dumazet <dada1@cosmosbay.com> Signed-off-by: Christoph Lameter <cl@linux-foundation.org> Signed-off-by: Tejun Heo <tj@kernel.org>
2009-10-03 14:48:22 +04:00
switch(sizeof(variable)) { \
case 1: pscr_ret__ = stem##1(variable);break; \
case 2: pscr_ret__ = stem##2(variable);break; \
case 4: pscr_ret__ = stem##4(variable);break; \
case 8: pscr_ret__ = stem##8(variable);break; \
this_cpu: Introduce this_cpu_ptr() and generic this_cpu_* operations This patch introduces two things: First this_cpu_ptr and then per cpu atomic operations. this_cpu_ptr ------------ A common operation when dealing with cpu data is to get the instance of the cpu data associated with the currently executing processor. This can be optimized by this_cpu_ptr(xx) = per_cpu_ptr(xx, smp_processor_id). The problem with per_cpu_ptr(x, smp_processor_id) is that it requires an array lookup to find the offset for the cpu. Processors typically have the offset for the current cpu area in some kind of (arch dependent) efficiently accessible register or memory location. We can use that instead of doing the array lookup to speed up the determination of the address of the percpu variable. This is particularly significant because these lookups occur in performance critical paths of the core kernel. this_cpu_ptr() can avoid memory accesses and this_cpu_ptr comes in two flavors. The preemption context matters since we are referring the the currently executing processor. In many cases we must insure that the processor does not change while a code segment is executed. __this_cpu_ptr -> Do not check for preemption context this_cpu_ptr -> Check preemption context The parameter to these operations is a per cpu pointer. This can be the address of a statically defined per cpu variable (&per_cpu_var(xxx)) or the address of a per cpu variable allocated with the per cpu allocator. per cpu atomic operations: this_cpu_*(var, val) ----------------------------------------------- this_cpu_* operations (like this_cpu_add(struct->y, value) operate on abitrary scalars that are members of structures allocated with the new per cpu allocator. They can also operate on static per_cpu variables if they are passed to per_cpu_var() (See patch to use this_cpu_* operations for vm statistics). These operations are guaranteed to be atomic vs preemption when modifying the scalar. The calculation of the per cpu offset is also guaranteed to be atomic at the same time. This means that a this_cpu_* operation can be safely used to modify a per cpu variable in a context where interrupts are enabled and preemption is allowed. Many architectures can perform such a per cpu atomic operation with a single instruction. Note that the atomicity here is different from regular atomic operations. Atomicity is only guaranteed for data accessed from the currently executing processor. Modifications from other processors are still possible. There must be other guarantees that the per cpu data is not modified from another processor when using these instruction. The per cpu atomicity is created by the fact that the processor either executes and instruction or not. Embedded in the instruction is the relocation of the per cpu address to the are reserved for the current processor and the RMW action. Therefore interrupts or preemption cannot occur in the mids of this processing. Generic fallback functions are used if an arch does not define optimized this_cpu operations. The functions come also come in the two flavors used for this_cpu_ptr(). The firstparameter is a scalar that is a member of a structure allocated through allocpercpu or a per cpu variable (use per_cpu_var(xxx)). The operations are similar to what percpu_add() and friends do. this_cpu_read(scalar) this_cpu_write(scalar, value) this_cpu_add(scale, value) this_cpu_sub(scalar, value) this_cpu_inc(scalar) this_cpu_dec(scalar) this_cpu_and(scalar, value) this_cpu_or(scalar, value) this_cpu_xor(scalar, value) Arch code can override the generic functions and provide optimized atomic per cpu operations. These atomic operations must provide both the relocation (x86 does it through a segment override) and the operation on the data in a single instruction. Otherwise preempt needs to be disabled and there is no gain from providing arch implementations. A third variant is provided prefixed by irqsafe_. These variants are safe against hardware interrupts on the *same* processor (all per cpu atomic primitives are *always* *only* providing safety for code running on the *same* processor!). The increment needs to be implemented by the hardware in such a way that it is a single RMW instruction that is either processed before or after an interrupt. cc: David Howells <dhowells@redhat.com> cc: Ingo Molnar <mingo@elte.hu> cc: Rusty Russell <rusty@rustcorp.com.au> cc: Eric Dumazet <dada1@cosmosbay.com> Signed-off-by: Christoph Lameter <cl@linux-foundation.org> Signed-off-by: Tejun Heo <tj@kernel.org>
2009-10-03 14:48:22 +04:00
default: \
__bad_size_call_parameter();break; \
} \
pscr_ret__; \
this_cpu: Introduce this_cpu_ptr() and generic this_cpu_* operations This patch introduces two things: First this_cpu_ptr and then per cpu atomic operations. this_cpu_ptr ------------ A common operation when dealing with cpu data is to get the instance of the cpu data associated with the currently executing processor. This can be optimized by this_cpu_ptr(xx) = per_cpu_ptr(xx, smp_processor_id). The problem with per_cpu_ptr(x, smp_processor_id) is that it requires an array lookup to find the offset for the cpu. Processors typically have the offset for the current cpu area in some kind of (arch dependent) efficiently accessible register or memory location. We can use that instead of doing the array lookup to speed up the determination of the address of the percpu variable. This is particularly significant because these lookups occur in performance critical paths of the core kernel. this_cpu_ptr() can avoid memory accesses and this_cpu_ptr comes in two flavors. The preemption context matters since we are referring the the currently executing processor. In many cases we must insure that the processor does not change while a code segment is executed. __this_cpu_ptr -> Do not check for preemption context this_cpu_ptr -> Check preemption context The parameter to these operations is a per cpu pointer. This can be the address of a statically defined per cpu variable (&per_cpu_var(xxx)) or the address of a per cpu variable allocated with the per cpu allocator. per cpu atomic operations: this_cpu_*(var, val) ----------------------------------------------- this_cpu_* operations (like this_cpu_add(struct->y, value) operate on abitrary scalars that are members of structures allocated with the new per cpu allocator. They can also operate on static per_cpu variables if they are passed to per_cpu_var() (See patch to use this_cpu_* operations for vm statistics). These operations are guaranteed to be atomic vs preemption when modifying the scalar. The calculation of the per cpu offset is also guaranteed to be atomic at the same time. This means that a this_cpu_* operation can be safely used to modify a per cpu variable in a context where interrupts are enabled and preemption is allowed. Many architectures can perform such a per cpu atomic operation with a single instruction. Note that the atomicity here is different from regular atomic operations. Atomicity is only guaranteed for data accessed from the currently executing processor. Modifications from other processors are still possible. There must be other guarantees that the per cpu data is not modified from another processor when using these instruction. The per cpu atomicity is created by the fact that the processor either executes and instruction or not. Embedded in the instruction is the relocation of the per cpu address to the are reserved for the current processor and the RMW action. Therefore interrupts or preemption cannot occur in the mids of this processing. Generic fallback functions are used if an arch does not define optimized this_cpu operations. The functions come also come in the two flavors used for this_cpu_ptr(). The firstparameter is a scalar that is a member of a structure allocated through allocpercpu or a per cpu variable (use per_cpu_var(xxx)). The operations are similar to what percpu_add() and friends do. this_cpu_read(scalar) this_cpu_write(scalar, value) this_cpu_add(scale, value) this_cpu_sub(scalar, value) this_cpu_inc(scalar) this_cpu_dec(scalar) this_cpu_and(scalar, value) this_cpu_or(scalar, value) this_cpu_xor(scalar, value) Arch code can override the generic functions and provide optimized atomic per cpu operations. These atomic operations must provide both the relocation (x86 does it through a segment override) and the operation on the data in a single instruction. Otherwise preempt needs to be disabled and there is no gain from providing arch implementations. A third variant is provided prefixed by irqsafe_. These variants are safe against hardware interrupts on the *same* processor (all per cpu atomic primitives are *always* *only* providing safety for code running on the *same* processor!). The increment needs to be implemented by the hardware in such a way that it is a single RMW instruction that is either processed before or after an interrupt. cc: David Howells <dhowells@redhat.com> cc: Ingo Molnar <mingo@elte.hu> cc: Rusty Russell <rusty@rustcorp.com.au> cc: Eric Dumazet <dada1@cosmosbay.com> Signed-off-by: Christoph Lameter <cl@linux-foundation.org> Signed-off-by: Tejun Heo <tj@kernel.org>
2009-10-03 14:48:22 +04:00
})
#define __pcpu_size_call_return2(stem, variable, ...) \
({ \
typeof(variable) pscr2_ret__; \
__verify_pcpu_ptr(&(variable)); \
switch(sizeof(variable)) { \
case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break; \
case 2: pscr2_ret__ = stem##2(variable, __VA_ARGS__); break; \
case 4: pscr2_ret__ = stem##4(variable, __VA_ARGS__); break; \
case 8: pscr2_ret__ = stem##8(variable, __VA_ARGS__); break; \
default: \
__bad_size_call_parameter(); break; \
} \
pscr2_ret__; \
})
/*
* Special handling for cmpxchg_double. cmpxchg_double is passed two
* percpu variables. The first has to be aligned to a double word
* boundary and the second has to follow directly thereafter.
* We enforce this on all architectures even if they don't support
* a double cmpxchg instruction, since it's a cheap requirement, and it
* avoids breaking the requirement for architectures with the instruction.
*/
#define __pcpu_double_call_return_bool(stem, pcp1, pcp2, ...) \
({ \
bool pdcrb_ret__; \
__verify_pcpu_ptr(&pcp1); \
BUILD_BUG_ON(sizeof(pcp1) != sizeof(pcp2)); \
VM_BUG_ON((unsigned long)(&pcp1) % (2 * sizeof(pcp1))); \
VM_BUG_ON((unsigned long)(&pcp2) != \
(unsigned long)(&pcp1) + sizeof(pcp1)); \
switch(sizeof(pcp1)) { \
case 1: pdcrb_ret__ = stem##1(pcp1, pcp2, __VA_ARGS__); break; \
case 2: pdcrb_ret__ = stem##2(pcp1, pcp2, __VA_ARGS__); break; \
case 4: pdcrb_ret__ = stem##4(pcp1, pcp2, __VA_ARGS__); break; \
case 8: pdcrb_ret__ = stem##8(pcp1, pcp2, __VA_ARGS__); break; \
default: \
__bad_size_call_parameter(); break; \
} \
pdcrb_ret__; \
})
#define __pcpu_size_call(stem, variable, ...) \
this_cpu: Introduce this_cpu_ptr() and generic this_cpu_* operations This patch introduces two things: First this_cpu_ptr and then per cpu atomic operations. this_cpu_ptr ------------ A common operation when dealing with cpu data is to get the instance of the cpu data associated with the currently executing processor. This can be optimized by this_cpu_ptr(xx) = per_cpu_ptr(xx, smp_processor_id). The problem with per_cpu_ptr(x, smp_processor_id) is that it requires an array lookup to find the offset for the cpu. Processors typically have the offset for the current cpu area in some kind of (arch dependent) efficiently accessible register or memory location. We can use that instead of doing the array lookup to speed up the determination of the address of the percpu variable. This is particularly significant because these lookups occur in performance critical paths of the core kernel. this_cpu_ptr() can avoid memory accesses and this_cpu_ptr comes in two flavors. The preemption context matters since we are referring the the currently executing processor. In many cases we must insure that the processor does not change while a code segment is executed. __this_cpu_ptr -> Do not check for preemption context this_cpu_ptr -> Check preemption context The parameter to these operations is a per cpu pointer. This can be the address of a statically defined per cpu variable (&per_cpu_var(xxx)) or the address of a per cpu variable allocated with the per cpu allocator. per cpu atomic operations: this_cpu_*(var, val) ----------------------------------------------- this_cpu_* operations (like this_cpu_add(struct->y, value) operate on abitrary scalars that are members of structures allocated with the new per cpu allocator. They can also operate on static per_cpu variables if they are passed to per_cpu_var() (See patch to use this_cpu_* operations for vm statistics). These operations are guaranteed to be atomic vs preemption when modifying the scalar. The calculation of the per cpu offset is also guaranteed to be atomic at the same time. This means that a this_cpu_* operation can be safely used to modify a per cpu variable in a context where interrupts are enabled and preemption is allowed. Many architectures can perform such a per cpu atomic operation with a single instruction. Note that the atomicity here is different from regular atomic operations. Atomicity is only guaranteed for data accessed from the currently executing processor. Modifications from other processors are still possible. There must be other guarantees that the per cpu data is not modified from another processor when using these instruction. The per cpu atomicity is created by the fact that the processor either executes and instruction or not. Embedded in the instruction is the relocation of the per cpu address to the are reserved for the current processor and the RMW action. Therefore interrupts or preemption cannot occur in the mids of this processing. Generic fallback functions are used if an arch does not define optimized this_cpu operations. The functions come also come in the two flavors used for this_cpu_ptr(). The firstparameter is a scalar that is a member of a structure allocated through allocpercpu or a per cpu variable (use per_cpu_var(xxx)). The operations are similar to what percpu_add() and friends do. this_cpu_read(scalar) this_cpu_write(scalar, value) this_cpu_add(scale, value) this_cpu_sub(scalar, value) this_cpu_inc(scalar) this_cpu_dec(scalar) this_cpu_and(scalar, value) this_cpu_or(scalar, value) this_cpu_xor(scalar, value) Arch code can override the generic functions and provide optimized atomic per cpu operations. These atomic operations must provide both the relocation (x86 does it through a segment override) and the operation on the data in a single instruction. Otherwise preempt needs to be disabled and there is no gain from providing arch implementations. A third variant is provided prefixed by irqsafe_. These variants are safe against hardware interrupts on the *same* processor (all per cpu atomic primitives are *always* *only* providing safety for code running on the *same* processor!). The increment needs to be implemented by the hardware in such a way that it is a single RMW instruction that is either processed before or after an interrupt. cc: David Howells <dhowells@redhat.com> cc: Ingo Molnar <mingo@elte.hu> cc: Rusty Russell <rusty@rustcorp.com.au> cc: Eric Dumazet <dada1@cosmosbay.com> Signed-off-by: Christoph Lameter <cl@linux-foundation.org> Signed-off-by: Tejun Heo <tj@kernel.org>
2009-10-03 14:48:22 +04:00
do { \
__verify_pcpu_ptr(&(variable)); \
this_cpu: Introduce this_cpu_ptr() and generic this_cpu_* operations This patch introduces two things: First this_cpu_ptr and then per cpu atomic operations. this_cpu_ptr ------------ A common operation when dealing with cpu data is to get the instance of the cpu data associated with the currently executing processor. This can be optimized by this_cpu_ptr(xx) = per_cpu_ptr(xx, smp_processor_id). The problem with per_cpu_ptr(x, smp_processor_id) is that it requires an array lookup to find the offset for the cpu. Processors typically have the offset for the current cpu area in some kind of (arch dependent) efficiently accessible register or memory location. We can use that instead of doing the array lookup to speed up the determination of the address of the percpu variable. This is particularly significant because these lookups occur in performance critical paths of the core kernel. this_cpu_ptr() can avoid memory accesses and this_cpu_ptr comes in two flavors. The preemption context matters since we are referring the the currently executing processor. In many cases we must insure that the processor does not change while a code segment is executed. __this_cpu_ptr -> Do not check for preemption context this_cpu_ptr -> Check preemption context The parameter to these operations is a per cpu pointer. This can be the address of a statically defined per cpu variable (&per_cpu_var(xxx)) or the address of a per cpu variable allocated with the per cpu allocator. per cpu atomic operations: this_cpu_*(var, val) ----------------------------------------------- this_cpu_* operations (like this_cpu_add(struct->y, value) operate on abitrary scalars that are members of structures allocated with the new per cpu allocator. They can also operate on static per_cpu variables if they are passed to per_cpu_var() (See patch to use this_cpu_* operations for vm statistics). These operations are guaranteed to be atomic vs preemption when modifying the scalar. The calculation of the per cpu offset is also guaranteed to be atomic at the same time. This means that a this_cpu_* operation can be safely used to modify a per cpu variable in a context where interrupts are enabled and preemption is allowed. Many architectures can perform such a per cpu atomic operation with a single instruction. Note that the atomicity here is different from regular atomic operations. Atomicity is only guaranteed for data accessed from the currently executing processor. Modifications from other processors are still possible. There must be other guarantees that the per cpu data is not modified from another processor when using these instruction. The per cpu atomicity is created by the fact that the processor either executes and instruction or not. Embedded in the instruction is the relocation of the per cpu address to the are reserved for the current processor and the RMW action. Therefore interrupts or preemption cannot occur in the mids of this processing. Generic fallback functions are used if an arch does not define optimized this_cpu operations. The functions come also come in the two flavors used for this_cpu_ptr(). The firstparameter is a scalar that is a member of a structure allocated through allocpercpu or a per cpu variable (use per_cpu_var(xxx)). The operations are similar to what percpu_add() and friends do. this_cpu_read(scalar) this_cpu_write(scalar, value) this_cpu_add(scale, value) this_cpu_sub(scalar, value) this_cpu_inc(scalar) this_cpu_dec(scalar) this_cpu_and(scalar, value) this_cpu_or(scalar, value) this_cpu_xor(scalar, value) Arch code can override the generic functions and provide optimized atomic per cpu operations. These atomic operations must provide both the relocation (x86 does it through a segment override) and the operation on the data in a single instruction. Otherwise preempt needs to be disabled and there is no gain from providing arch implementations. A third variant is provided prefixed by irqsafe_. These variants are safe against hardware interrupts on the *same* processor (all per cpu atomic primitives are *always* *only* providing safety for code running on the *same* processor!). The increment needs to be implemented by the hardware in such a way that it is a single RMW instruction that is either processed before or after an interrupt. cc: David Howells <dhowells@redhat.com> cc: Ingo Molnar <mingo@elte.hu> cc: Rusty Russell <rusty@rustcorp.com.au> cc: Eric Dumazet <dada1@cosmosbay.com> Signed-off-by: Christoph Lameter <cl@linux-foundation.org> Signed-off-by: Tejun Heo <tj@kernel.org>
2009-10-03 14:48:22 +04:00
switch(sizeof(variable)) { \
case 1: stem##1(variable, __VA_ARGS__);break; \
case 2: stem##2(variable, __VA_ARGS__);break; \
case 4: stem##4(variable, __VA_ARGS__);break; \
case 8: stem##8(variable, __VA_ARGS__);break; \
default: \
__bad_size_call_parameter();break; \
} \
} while (0)
/*
* Optimized manipulation for memory allocated through the per cpu
* allocator or for addresses of per cpu variables.
this_cpu: Introduce this_cpu_ptr() and generic this_cpu_* operations This patch introduces two things: First this_cpu_ptr and then per cpu atomic operations. this_cpu_ptr ------------ A common operation when dealing with cpu data is to get the instance of the cpu data associated with the currently executing processor. This can be optimized by this_cpu_ptr(xx) = per_cpu_ptr(xx, smp_processor_id). The problem with per_cpu_ptr(x, smp_processor_id) is that it requires an array lookup to find the offset for the cpu. Processors typically have the offset for the current cpu area in some kind of (arch dependent) efficiently accessible register or memory location. We can use that instead of doing the array lookup to speed up the determination of the address of the percpu variable. This is particularly significant because these lookups occur in performance critical paths of the core kernel. this_cpu_ptr() can avoid memory accesses and this_cpu_ptr comes in two flavors. The preemption context matters since we are referring the the currently executing processor. In many cases we must insure that the processor does not change while a code segment is executed. __this_cpu_ptr -> Do not check for preemption context this_cpu_ptr -> Check preemption context The parameter to these operations is a per cpu pointer. This can be the address of a statically defined per cpu variable (&per_cpu_var(xxx)) or the address of a per cpu variable allocated with the per cpu allocator. per cpu atomic operations: this_cpu_*(var, val) ----------------------------------------------- this_cpu_* operations (like this_cpu_add(struct->y, value) operate on abitrary scalars that are members of structures allocated with the new per cpu allocator. They can also operate on static per_cpu variables if they are passed to per_cpu_var() (See patch to use this_cpu_* operations for vm statistics). These operations are guaranteed to be atomic vs preemption when modifying the scalar. The calculation of the per cpu offset is also guaranteed to be atomic at the same time. This means that a this_cpu_* operation can be safely used to modify a per cpu variable in a context where interrupts are enabled and preemption is allowed. Many architectures can perform such a per cpu atomic operation with a single instruction. Note that the atomicity here is different from regular atomic operations. Atomicity is only guaranteed for data accessed from the currently executing processor. Modifications from other processors are still possible. There must be other guarantees that the per cpu data is not modified from another processor when using these instruction. The per cpu atomicity is created by the fact that the processor either executes and instruction or not. Embedded in the instruction is the relocation of the per cpu address to the are reserved for the current processor and the RMW action. Therefore interrupts or preemption cannot occur in the mids of this processing. Generic fallback functions are used if an arch does not define optimized this_cpu operations. The functions come also come in the two flavors used for this_cpu_ptr(). The firstparameter is a scalar that is a member of a structure allocated through allocpercpu or a per cpu variable (use per_cpu_var(xxx)). The operations are similar to what percpu_add() and friends do. this_cpu_read(scalar) this_cpu_write(scalar, value) this_cpu_add(scale, value) this_cpu_sub(scalar, value) this_cpu_inc(scalar) this_cpu_dec(scalar) this_cpu_and(scalar, value) this_cpu_or(scalar, value) this_cpu_xor(scalar, value) Arch code can override the generic functions and provide optimized atomic per cpu operations. These atomic operations must provide both the relocation (x86 does it through a segment override) and the operation on the data in a single instruction. Otherwise preempt needs to be disabled and there is no gain from providing arch implementations. A third variant is provided prefixed by irqsafe_. These variants are safe against hardware interrupts on the *same* processor (all per cpu atomic primitives are *always* *only* providing safety for code running on the *same* processor!). The increment needs to be implemented by the hardware in such a way that it is a single RMW instruction that is either processed before or after an interrupt. cc: David Howells <dhowells@redhat.com> cc: Ingo Molnar <mingo@elte.hu> cc: Rusty Russell <rusty@rustcorp.com.au> cc: Eric Dumazet <dada1@cosmosbay.com> Signed-off-by: Christoph Lameter <cl@linux-foundation.org> Signed-off-by: Tejun Heo <tj@kernel.org>
2009-10-03 14:48:22 +04:00
*
* These operation guarantee exclusivity of access for other operations
* on the *same* processor. The assumption is that per cpu data is only
* accessed by a single processor instance (the current one).
*
* The first group is used for accesses that must be done in a
* preemption safe way since we know that the context is not preempt
* safe. Interrupts may occur. If the interrupt modifies the variable
* too then RMW actions will not be reliable.
*
* The arch code can provide optimized functions in two ways:
*
* 1. Override the function completely. F.e. define this_cpu_add().
* The arch must then ensure that the various scalar format passed
* are handled correctly.
*
* 2. Provide functions for certain scalar sizes. F.e. provide
* this_cpu_add_2() to provide per cpu atomic operations for 2 byte
* sized RMW actions. If arch code does not provide operations for
* a scalar size then the fallback in the generic code will be
* used.
*/
#define _this_cpu_generic_read(pcp) \
({ typeof(pcp) ret__; \
preempt_disable(); \
ret__ = *this_cpu_ptr(&(pcp)); \
preempt_enable(); \
ret__; \
})
#ifndef this_cpu_read
# ifndef this_cpu_read_1
# define this_cpu_read_1(pcp) _this_cpu_generic_read(pcp)
# endif
# ifndef this_cpu_read_2
# define this_cpu_read_2(pcp) _this_cpu_generic_read(pcp)
# endif
# ifndef this_cpu_read_4
# define this_cpu_read_4(pcp) _this_cpu_generic_read(pcp)
# endif
# ifndef this_cpu_read_8
# define this_cpu_read_8(pcp) _this_cpu_generic_read(pcp)
# endif
# define this_cpu_read(pcp) __pcpu_size_call_return(this_cpu_read_, (pcp))
this_cpu: Introduce this_cpu_ptr() and generic this_cpu_* operations This patch introduces two things: First this_cpu_ptr and then per cpu atomic operations. this_cpu_ptr ------------ A common operation when dealing with cpu data is to get the instance of the cpu data associated with the currently executing processor. This can be optimized by this_cpu_ptr(xx) = per_cpu_ptr(xx, smp_processor_id). The problem with per_cpu_ptr(x, smp_processor_id) is that it requires an array lookup to find the offset for the cpu. Processors typically have the offset for the current cpu area in some kind of (arch dependent) efficiently accessible register or memory location. We can use that instead of doing the array lookup to speed up the determination of the address of the percpu variable. This is particularly significant because these lookups occur in performance critical paths of the core kernel. this_cpu_ptr() can avoid memory accesses and this_cpu_ptr comes in two flavors. The preemption context matters since we are referring the the currently executing processor. In many cases we must insure that the processor does not change while a code segment is executed. __this_cpu_ptr -> Do not check for preemption context this_cpu_ptr -> Check preemption context The parameter to these operations is a per cpu pointer. This can be the address of a statically defined per cpu variable (&per_cpu_var(xxx)) or the address of a per cpu variable allocated with the per cpu allocator. per cpu atomic operations: this_cpu_*(var, val) ----------------------------------------------- this_cpu_* operations (like this_cpu_add(struct->y, value) operate on abitrary scalars that are members of structures allocated with the new per cpu allocator. They can also operate on static per_cpu variables if they are passed to per_cpu_var() (See patch to use this_cpu_* operations for vm statistics). These operations are guaranteed to be atomic vs preemption when modifying the scalar. The calculation of the per cpu offset is also guaranteed to be atomic at the same time. This means that a this_cpu_* operation can be safely used to modify a per cpu variable in a context where interrupts are enabled and preemption is allowed. Many architectures can perform such a per cpu atomic operation with a single instruction. Note that the atomicity here is different from regular atomic operations. Atomicity is only guaranteed for data accessed from the currently executing processor. Modifications from other processors are still possible. There must be other guarantees that the per cpu data is not modified from another processor when using these instruction. The per cpu atomicity is created by the fact that the processor either executes and instruction or not. Embedded in the instruction is the relocation of the per cpu address to the are reserved for the current processor and the RMW action. Therefore interrupts or preemption cannot occur in the mids of this processing. Generic fallback functions are used if an arch does not define optimized this_cpu operations. The functions come also come in the two flavors used for this_cpu_ptr(). The firstparameter is a scalar that is a member of a structure allocated through allocpercpu or a per cpu variable (use per_cpu_var(xxx)). The operations are similar to what percpu_add() and friends do. this_cpu_read(scalar) this_cpu_write(scalar, value) this_cpu_add(scale, value) this_cpu_sub(scalar, value) this_cpu_inc(scalar) this_cpu_dec(scalar) this_cpu_and(scalar, value) this_cpu_or(scalar, value) this_cpu_xor(scalar, value) Arch code can override the generic functions and provide optimized atomic per cpu operations. These atomic operations must provide both the relocation (x86 does it through a segment override) and the operation on the data in a single instruction. Otherwise preempt needs to be disabled and there is no gain from providing arch implementations. A third variant is provided prefixed by irqsafe_. These variants are safe against hardware interrupts on the *same* processor (all per cpu atomic primitives are *always* *only* providing safety for code running on the *same* processor!). The increment needs to be implemented by the hardware in such a way that it is a single RMW instruction that is either processed before or after an interrupt. cc: David Howells <dhowells@redhat.com> cc: Ingo Molnar <mingo@elte.hu> cc: Rusty Russell <rusty@rustcorp.com.au> cc: Eric Dumazet <dada1@cosmosbay.com> Signed-off-by: Christoph Lameter <cl@linux-foundation.org> Signed-off-by: Tejun Heo <tj@kernel.org>
2009-10-03 14:48:22 +04:00
#endif
#define _this_cpu_generic_to_op(pcp, val, op) \
do { \
unsigned long flags; \
percpu: use raw_local_irq_* in _this_cpu op It doesn't make sense to trace irq off or do irq flags lock proving inside 'this_cpu' operations, so replace local_irq_* with raw_local_irq_* in 'this_cpu' op. Also the patch fixes onelockdep warning[1] by the replacement, see below: In commit: 933393f58fef9963eac61db8093689544e29a600(percpu: Remove irqsafe_cpu_xxx variants), local_irq_save/restore(flags) are added inside this_cpu_inc operation, so that trace_hardirqs_off_caller will be called by trace_hardirqs_on_caller directly because __debug_atomic_inc is implemented as this_cpu_inc, which may trigger the lockdep warning[1], for example in the below ARM scenary: kernel_thread_helper /*irq disabled*/ ->trace_hardirqs_on_caller /*hardirqs_enabled was set*/ ->trace_hardirqs_off_caller /*hardirqs_enabled cleared*/ __this_cpu_add(redundant_hardirqs_on) ->trace_hardirqs_off_caller /*irq disabled, so call here*/ The 'unannotated irqs-on' warning will be triggered somewhere because irq is just enabled after the irq trace in kernel_thread_helper. [1], [ 0.162841] ------------[ cut here ]------------ [ 0.167694] WARNING: at kernel/lockdep.c:3493 check_flags+0xc0/0x1d0() [ 0.174468] Modules linked in: [ 0.177703] Backtrace: [ 0.180328] [<c00171f0>] (dump_backtrace+0x0/0x110) from [<c0412320>] (dump_stack+0x18/0x1c) [ 0.189086] r6:c051f778 r5:00000da5 r4:00000000 r3:60000093 [ 0.195007] [<c0412308>] (dump_stack+0x0/0x1c) from [<c00410e8>] (warn_slowpath_common+0x54/0x6c) [ 0.204223] [<c0041094>] (warn_slowpath_common+0x0/0x6c) from [<c0041124>] (warn_slowpath_null+0x24/0x2c) [ 0.214111] r8:00000000 r7:00000000 r6:ee069598 r5:60000013 r4:ee082000 [ 0.220825] r3:00000009 [ 0.223693] [<c0041100>] (warn_slowpath_null+0x0/0x2c) from [<c0088f38>] (check_flags+0xc0/0x1d0) [ 0.232910] [<c0088e78>] (check_flags+0x0/0x1d0) from [<c008d348>] (lock_acquire+0x4c/0x11c) [ 0.241668] [<c008d2fc>] (lock_acquire+0x0/0x11c) from [<c0415aa4>] (_raw_spin_lock+0x3c/0x74) [ 0.250610] [<c0415a68>] (_raw_spin_lock+0x0/0x74) from [<c010a844>] (set_task_comm+0x20/0xc0) [ 0.259521] r6:ee069588 r5:ee0691c0 r4:ee082000 [ 0.264404] [<c010a824>] (set_task_comm+0x0/0xc0) from [<c0060780>] (kthreadd+0x28/0x108) [ 0.272857] r8:00000000 r7:00000013 r6:c0044a08 r5:ee0691c0 r4:ee082000 [ 0.279571] r3:ee083fe0 [ 0.282470] [<c0060758>] (kthreadd+0x0/0x108) from [<c0044a08>] (do_exit+0x0/0x6dc) [ 0.290405] r5:c0060758 r4:00000000 [ 0.294189] ---[ end trace 1b75b31a2719ed1c ]--- [ 0.299041] possible reason: unannotated irqs-on. [ 0.303955] irq event stamp: 5 [ 0.307159] hardirqs last enabled at (4): [<c001331c>] no_work_pending+0x8/0x2c [ 0.314880] hardirqs last disabled at (5): [<c0089b08>] trace_hardirqs_on_caller+0x60/0x26c [ 0.323547] softirqs last enabled at (0): [<c003f754>] copy_process+0x33c/0xef4 [ 0.331207] softirqs last disabled at (0): [< (null)>] (null) [ 0.337585] CPU0: thread -1, cpu 0, socket 0, mpidr 80000000 Acked-by: Christoph Lameter <cl@linux.com> Signed-off-by: Ming Lei <tom.leiming@gmail.com> Signed-off-by: Tejun Heo <tj@kernel.org>
2012-02-15 12:54:38 +04:00
raw_local_irq_save(flags); \
*__this_cpu_ptr(&(pcp)) op val; \
percpu: use raw_local_irq_* in _this_cpu op It doesn't make sense to trace irq off or do irq flags lock proving inside 'this_cpu' operations, so replace local_irq_* with raw_local_irq_* in 'this_cpu' op. Also the patch fixes onelockdep warning[1] by the replacement, see below: In commit: 933393f58fef9963eac61db8093689544e29a600(percpu: Remove irqsafe_cpu_xxx variants), local_irq_save/restore(flags) are added inside this_cpu_inc operation, so that trace_hardirqs_off_caller will be called by trace_hardirqs_on_caller directly because __debug_atomic_inc is implemented as this_cpu_inc, which may trigger the lockdep warning[1], for example in the below ARM scenary: kernel_thread_helper /*irq disabled*/ ->trace_hardirqs_on_caller /*hardirqs_enabled was set*/ ->trace_hardirqs_off_caller /*hardirqs_enabled cleared*/ __this_cpu_add(redundant_hardirqs_on) ->trace_hardirqs_off_caller /*irq disabled, so call here*/ The 'unannotated irqs-on' warning will be triggered somewhere because irq is just enabled after the irq trace in kernel_thread_helper. [1], [ 0.162841] ------------[ cut here ]------------ [ 0.167694] WARNING: at kernel/lockdep.c:3493 check_flags+0xc0/0x1d0() [ 0.174468] Modules linked in: [ 0.177703] Backtrace: [ 0.180328] [<c00171f0>] (dump_backtrace+0x0/0x110) from [<c0412320>] (dump_stack+0x18/0x1c) [ 0.189086] r6:c051f778 r5:00000da5 r4:00000000 r3:60000093 [ 0.195007] [<c0412308>] (dump_stack+0x0/0x1c) from [<c00410e8>] (warn_slowpath_common+0x54/0x6c) [ 0.204223] [<c0041094>] (warn_slowpath_common+0x0/0x6c) from [<c0041124>] (warn_slowpath_null+0x24/0x2c) [ 0.214111] r8:00000000 r7:00000000 r6:ee069598 r5:60000013 r4:ee082000 [ 0.220825] r3:00000009 [ 0.223693] [<c0041100>] (warn_slowpath_null+0x0/0x2c) from [<c0088f38>] (check_flags+0xc0/0x1d0) [ 0.232910] [<c0088e78>] (check_flags+0x0/0x1d0) from [<c008d348>] (lock_acquire+0x4c/0x11c) [ 0.241668] [<c008d2fc>] (lock_acquire+0x0/0x11c) from [<c0415aa4>] (_raw_spin_lock+0x3c/0x74) [ 0.250610] [<c0415a68>] (_raw_spin_lock+0x0/0x74) from [<c010a844>] (set_task_comm+0x20/0xc0) [ 0.259521] r6:ee069588 r5:ee0691c0 r4:ee082000 [ 0.264404] [<c010a824>] (set_task_comm+0x0/0xc0) from [<c0060780>] (kthreadd+0x28/0x108) [ 0.272857] r8:00000000 r7:00000013 r6:c0044a08 r5:ee0691c0 r4:ee082000 [ 0.279571] r3:ee083fe0 [ 0.282470] [<c0060758>] (kthreadd+0x0/0x108) from [<c0044a08>] (do_exit+0x0/0x6dc) [ 0.290405] r5:c0060758 r4:00000000 [ 0.294189] ---[ end trace 1b75b31a2719ed1c ]--- [ 0.299041] possible reason: unannotated irqs-on. [ 0.303955] irq event stamp: 5 [ 0.307159] hardirqs last enabled at (4): [<c001331c>] no_work_pending+0x8/0x2c [ 0.314880] hardirqs last disabled at (5): [<c0089b08>] trace_hardirqs_on_caller+0x60/0x26c [ 0.323547] softirqs last enabled at (0): [<c003f754>] copy_process+0x33c/0xef4 [ 0.331207] softirqs last disabled at (0): [< (null)>] (null) [ 0.337585] CPU0: thread -1, cpu 0, socket 0, mpidr 80000000 Acked-by: Christoph Lameter <cl@linux.com> Signed-off-by: Ming Lei <tom.leiming@gmail.com> Signed-off-by: Tejun Heo <tj@kernel.org>
2012-02-15 12:54:38 +04:00
raw_local_irq_restore(flags); \
this_cpu: Introduce this_cpu_ptr() and generic this_cpu_* operations This patch introduces two things: First this_cpu_ptr and then per cpu atomic operations. this_cpu_ptr ------------ A common operation when dealing with cpu data is to get the instance of the cpu data associated with the currently executing processor. This can be optimized by this_cpu_ptr(xx) = per_cpu_ptr(xx, smp_processor_id). The problem with per_cpu_ptr(x, smp_processor_id) is that it requires an array lookup to find the offset for the cpu. Processors typically have the offset for the current cpu area in some kind of (arch dependent) efficiently accessible register or memory location. We can use that instead of doing the array lookup to speed up the determination of the address of the percpu variable. This is particularly significant because these lookups occur in performance critical paths of the core kernel. this_cpu_ptr() can avoid memory accesses and this_cpu_ptr comes in two flavors. The preemption context matters since we are referring the the currently executing processor. In many cases we must insure that the processor does not change while a code segment is executed. __this_cpu_ptr -> Do not check for preemption context this_cpu_ptr -> Check preemption context The parameter to these operations is a per cpu pointer. This can be the address of a statically defined per cpu variable (&per_cpu_var(xxx)) or the address of a per cpu variable allocated with the per cpu allocator. per cpu atomic operations: this_cpu_*(var, val) ----------------------------------------------- this_cpu_* operations (like this_cpu_add(struct->y, value) operate on abitrary scalars that are members of structures allocated with the new per cpu allocator. They can also operate on static per_cpu variables if they are passed to per_cpu_var() (See patch to use this_cpu_* operations for vm statistics). These operations are guaranteed to be atomic vs preemption when modifying the scalar. The calculation of the per cpu offset is also guaranteed to be atomic at the same time. This means that a this_cpu_* operation can be safely used to modify a per cpu variable in a context where interrupts are enabled and preemption is allowed. Many architectures can perform such a per cpu atomic operation with a single instruction. Note that the atomicity here is different from regular atomic operations. Atomicity is only guaranteed for data accessed from the currently executing processor. Modifications from other processors are still possible. There must be other guarantees that the per cpu data is not modified from another processor when using these instruction. The per cpu atomicity is created by the fact that the processor either executes and instruction or not. Embedded in the instruction is the relocation of the per cpu address to the are reserved for the current processor and the RMW action. Therefore interrupts or preemption cannot occur in the mids of this processing. Generic fallback functions are used if an arch does not define optimized this_cpu operations. The functions come also come in the two flavors used for this_cpu_ptr(). The firstparameter is a scalar that is a member of a structure allocated through allocpercpu or a per cpu variable (use per_cpu_var(xxx)). The operations are similar to what percpu_add() and friends do. this_cpu_read(scalar) this_cpu_write(scalar, value) this_cpu_add(scale, value) this_cpu_sub(scalar, value) this_cpu_inc(scalar) this_cpu_dec(scalar) this_cpu_and(scalar, value) this_cpu_or(scalar, value) this_cpu_xor(scalar, value) Arch code can override the generic functions and provide optimized atomic per cpu operations. These atomic operations must provide both the relocation (x86 does it through a segment override) and the operation on the data in a single instruction. Otherwise preempt needs to be disabled and there is no gain from providing arch implementations. A third variant is provided prefixed by irqsafe_. These variants are safe against hardware interrupts on the *same* processor (all per cpu atomic primitives are *always* *only* providing safety for code running on the *same* processor!). The increment needs to be implemented by the hardware in such a way that it is a single RMW instruction that is either processed before or after an interrupt. cc: David Howells <dhowells@redhat.com> cc: Ingo Molnar <mingo@elte.hu> cc: Rusty Russell <rusty@rustcorp.com.au> cc: Eric Dumazet <dada1@cosmosbay.com> Signed-off-by: Christoph Lameter <cl@linux-foundation.org> Signed-off-by: Tejun Heo <tj@kernel.org>
2009-10-03 14:48:22 +04:00
} while (0)
#ifndef this_cpu_write
# ifndef this_cpu_write_1
# define this_cpu_write_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
# endif
# ifndef this_cpu_write_2
# define this_cpu_write_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
# endif
# ifndef this_cpu_write_4
# define this_cpu_write_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
# endif
# ifndef this_cpu_write_8
# define this_cpu_write_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
# endif
# define this_cpu_write(pcp, val) __pcpu_size_call(this_cpu_write_, (pcp), (val))
this_cpu: Introduce this_cpu_ptr() and generic this_cpu_* operations This patch introduces two things: First this_cpu_ptr and then per cpu atomic operations. this_cpu_ptr ------------ A common operation when dealing with cpu data is to get the instance of the cpu data associated with the currently executing processor. This can be optimized by this_cpu_ptr(xx) = per_cpu_ptr(xx, smp_processor_id). The problem with per_cpu_ptr(x, smp_processor_id) is that it requires an array lookup to find the offset for the cpu. Processors typically have the offset for the current cpu area in some kind of (arch dependent) efficiently accessible register or memory location. We can use that instead of doing the array lookup to speed up the determination of the address of the percpu variable. This is particularly significant because these lookups occur in performance critical paths of the core kernel. this_cpu_ptr() can avoid memory accesses and this_cpu_ptr comes in two flavors. The preemption context matters since we are referring the the currently executing processor. In many cases we must insure that the processor does not change while a code segment is executed. __this_cpu_ptr -> Do not check for preemption context this_cpu_ptr -> Check preemption context The parameter to these operations is a per cpu pointer. This can be the address of a statically defined per cpu variable (&per_cpu_var(xxx)) or the address of a per cpu variable allocated with the per cpu allocator. per cpu atomic operations: this_cpu_*(var, val) ----------------------------------------------- this_cpu_* operations (like this_cpu_add(struct->y, value) operate on abitrary scalars that are members of structures allocated with the new per cpu allocator. They can also operate on static per_cpu variables if they are passed to per_cpu_var() (See patch to use this_cpu_* operations for vm statistics). These operations are guaranteed to be atomic vs preemption when modifying the scalar. The calculation of the per cpu offset is also guaranteed to be atomic at the same time. This means that a this_cpu_* operation can be safely used to modify a per cpu variable in a context where interrupts are enabled and preemption is allowed. Many architectures can perform such a per cpu atomic operation with a single instruction. Note that the atomicity here is different from regular atomic operations. Atomicity is only guaranteed for data accessed from the currently executing processor. Modifications from other processors are still possible. There must be other guarantees that the per cpu data is not modified from another processor when using these instruction. The per cpu atomicity is created by the fact that the processor either executes and instruction or not. Embedded in the instruction is the relocation of the per cpu address to the are reserved for the current processor and the RMW action. Therefore interrupts or preemption cannot occur in the mids of this processing. Generic fallback functions are used if an arch does not define optimized this_cpu operations. The functions come also come in the two flavors used for this_cpu_ptr(). The firstparameter is a scalar that is a member of a structure allocated through allocpercpu or a per cpu variable (use per_cpu_var(xxx)). The operations are similar to what percpu_add() and friends do. this_cpu_read(scalar) this_cpu_write(scalar, value) this_cpu_add(scale, value) this_cpu_sub(scalar, value) this_cpu_inc(scalar) this_cpu_dec(scalar) this_cpu_and(scalar, value) this_cpu_or(scalar, value) this_cpu_xor(scalar, value) Arch code can override the generic functions and provide optimized atomic per cpu operations. These atomic operations must provide both the relocation (x86 does it through a segment override) and the operation on the data in a single instruction. Otherwise preempt needs to be disabled and there is no gain from providing arch implementations. A third variant is provided prefixed by irqsafe_. These variants are safe against hardware interrupts on the *same* processor (all per cpu atomic primitives are *always* *only* providing safety for code running on the *same* processor!). The increment needs to be implemented by the hardware in such a way that it is a single RMW instruction that is either processed before or after an interrupt. cc: David Howells <dhowells@redhat.com> cc: Ingo Molnar <mingo@elte.hu> cc: Rusty Russell <rusty@rustcorp.com.au> cc: Eric Dumazet <dada1@cosmosbay.com> Signed-off-by: Christoph Lameter <cl@linux-foundation.org> Signed-off-by: Tejun Heo <tj@kernel.org>
2009-10-03 14:48:22 +04:00
#endif
#ifndef this_cpu_add
# ifndef this_cpu_add_1
# define this_cpu_add_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
# endif
# ifndef this_cpu_add_2
# define this_cpu_add_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
# endif
# ifndef this_cpu_add_4
# define this_cpu_add_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
# endif
# ifndef this_cpu_add_8
# define this_cpu_add_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
# endif
# define this_cpu_add(pcp, val) __pcpu_size_call(this_cpu_add_, (pcp), (val))
this_cpu: Introduce this_cpu_ptr() and generic this_cpu_* operations This patch introduces two things: First this_cpu_ptr and then per cpu atomic operations. this_cpu_ptr ------------ A common operation when dealing with cpu data is to get the instance of the cpu data associated with the currently executing processor. This can be optimized by this_cpu_ptr(xx) = per_cpu_ptr(xx, smp_processor_id). The problem with per_cpu_ptr(x, smp_processor_id) is that it requires an array lookup to find the offset for the cpu. Processors typically have the offset for the current cpu area in some kind of (arch dependent) efficiently accessible register or memory location. We can use that instead of doing the array lookup to speed up the determination of the address of the percpu variable. This is particularly significant because these lookups occur in performance critical paths of the core kernel. this_cpu_ptr() can avoid memory accesses and this_cpu_ptr comes in two flavors. The preemption context matters since we are referring the the currently executing processor. In many cases we must insure that the processor does not change while a code segment is executed. __this_cpu_ptr -> Do not check for preemption context this_cpu_ptr -> Check preemption context The parameter to these operations is a per cpu pointer. This can be the address of a statically defined per cpu variable (&per_cpu_var(xxx)) or the address of a per cpu variable allocated with the per cpu allocator. per cpu atomic operations: this_cpu_*(var, val) ----------------------------------------------- this_cpu_* operations (like this_cpu_add(struct->y, value) operate on abitrary scalars that are members of structures allocated with the new per cpu allocator. They can also operate on static per_cpu variables if they are passed to per_cpu_var() (See patch to use this_cpu_* operations for vm statistics). These operations are guaranteed to be atomic vs preemption when modifying the scalar. The calculation of the per cpu offset is also guaranteed to be atomic at the same time. This means that a this_cpu_* operation can be safely used to modify a per cpu variable in a context where interrupts are enabled and preemption is allowed. Many architectures can perform such a per cpu atomic operation with a single instruction. Note that the atomicity here is different from regular atomic operations. Atomicity is only guaranteed for data accessed from the currently executing processor. Modifications from other processors are still possible. There must be other guarantees that the per cpu data is not modified from another processor when using these instruction. The per cpu atomicity is created by the fact that the processor either executes and instruction or not. Embedded in the instruction is the relocation of the per cpu address to the are reserved for the current processor and the RMW action. Therefore interrupts or preemption cannot occur in the mids of this processing. Generic fallback functions are used if an arch does not define optimized this_cpu operations. The functions come also come in the two flavors used for this_cpu_ptr(). The firstparameter is a scalar that is a member of a structure allocated through allocpercpu or a per cpu variable (use per_cpu_var(xxx)). The operations are similar to what percpu_add() and friends do. this_cpu_read(scalar) this_cpu_write(scalar, value) this_cpu_add(scale, value) this_cpu_sub(scalar, value) this_cpu_inc(scalar) this_cpu_dec(scalar) this_cpu_and(scalar, value) this_cpu_or(scalar, value) this_cpu_xor(scalar, value) Arch code can override the generic functions and provide optimized atomic per cpu operations. These atomic operations must provide both the relocation (x86 does it through a segment override) and the operation on the data in a single instruction. Otherwise preempt needs to be disabled and there is no gain from providing arch implementations. A third variant is provided prefixed by irqsafe_. These variants are safe against hardware interrupts on the *same* processor (all per cpu atomic primitives are *always* *only* providing safety for code running on the *same* processor!). The increment needs to be implemented by the hardware in such a way that it is a single RMW instruction that is either processed before or after an interrupt. cc: David Howells <dhowells@redhat.com> cc: Ingo Molnar <mingo@elte.hu> cc: Rusty Russell <rusty@rustcorp.com.au> cc: Eric Dumazet <dada1@cosmosbay.com> Signed-off-by: Christoph Lameter <cl@linux-foundation.org> Signed-off-by: Tejun Heo <tj@kernel.org>
2009-10-03 14:48:22 +04:00
#endif
#ifndef this_cpu_sub
# define this_cpu_sub(pcp, val) this_cpu_add((pcp), -(val))
#endif
#ifndef this_cpu_inc
# define this_cpu_inc(pcp) this_cpu_add((pcp), 1)
#endif
#ifndef this_cpu_dec
# define this_cpu_dec(pcp) this_cpu_sub((pcp), 1)
#endif
#ifndef this_cpu_and
# ifndef this_cpu_and_1
# define this_cpu_and_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
# endif
# ifndef this_cpu_and_2
# define this_cpu_and_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
# endif
# ifndef this_cpu_and_4
# define this_cpu_and_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
# endif
# ifndef this_cpu_and_8
# define this_cpu_and_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
# endif
# define this_cpu_and(pcp, val) __pcpu_size_call(this_cpu_and_, (pcp), (val))
this_cpu: Introduce this_cpu_ptr() and generic this_cpu_* operations This patch introduces two things: First this_cpu_ptr and then per cpu atomic operations. this_cpu_ptr ------------ A common operation when dealing with cpu data is to get the instance of the cpu data associated with the currently executing processor. This can be optimized by this_cpu_ptr(xx) = per_cpu_ptr(xx, smp_processor_id). The problem with per_cpu_ptr(x, smp_processor_id) is that it requires an array lookup to find the offset for the cpu. Processors typically have the offset for the current cpu area in some kind of (arch dependent) efficiently accessible register or memory location. We can use that instead of doing the array lookup to speed up the determination of the address of the percpu variable. This is particularly significant because these lookups occur in performance critical paths of the core kernel. this_cpu_ptr() can avoid memory accesses and this_cpu_ptr comes in two flavors. The preemption context matters since we are referring the the currently executing processor. In many cases we must insure that the processor does not change while a code segment is executed. __this_cpu_ptr -> Do not check for preemption context this_cpu_ptr -> Check preemption context The parameter to these operations is a per cpu pointer. This can be the address of a statically defined per cpu variable (&per_cpu_var(xxx)) or the address of a per cpu variable allocated with the per cpu allocator. per cpu atomic operations: this_cpu_*(var, val) ----------------------------------------------- this_cpu_* operations (like this_cpu_add(struct->y, value) operate on abitrary scalars that are members of structures allocated with the new per cpu allocator. They can also operate on static per_cpu variables if they are passed to per_cpu_var() (See patch to use this_cpu_* operations for vm statistics). These operations are guaranteed to be atomic vs preemption when modifying the scalar. The calculation of the per cpu offset is also guaranteed to be atomic at the same time. This means that a this_cpu_* operation can be safely used to modify a per cpu variable in a context where interrupts are enabled and preemption is allowed. Many architectures can perform such a per cpu atomic operation with a single instruction. Note that the atomicity here is different from regular atomic operations. Atomicity is only guaranteed for data accessed from the currently executing processor. Modifications from other processors are still possible. There must be other guarantees that the per cpu data is not modified from another processor when using these instruction. The per cpu atomicity is created by the fact that the processor either executes and instruction or not. Embedded in the instruction is the relocation of the per cpu address to the are reserved for the current processor and the RMW action. Therefore interrupts or preemption cannot occur in the mids of this processing. Generic fallback functions are used if an arch does not define optimized this_cpu operations. The functions come also come in the two flavors used for this_cpu_ptr(). The firstparameter is a scalar that is a member of a structure allocated through allocpercpu or a per cpu variable (use per_cpu_var(xxx)). The operations are similar to what percpu_add() and friends do. this_cpu_read(scalar) this_cpu_write(scalar, value) this_cpu_add(scale, value) this_cpu_sub(scalar, value) this_cpu_inc(scalar) this_cpu_dec(scalar) this_cpu_and(scalar, value) this_cpu_or(scalar, value) this_cpu_xor(scalar, value) Arch code can override the generic functions and provide optimized atomic per cpu operations. These atomic operations must provide both the relocation (x86 does it through a segment override) and the operation on the data in a single instruction. Otherwise preempt needs to be disabled and there is no gain from providing arch implementations. A third variant is provided prefixed by irqsafe_. These variants are safe against hardware interrupts on the *same* processor (all per cpu atomic primitives are *always* *only* providing safety for code running on the *same* processor!). The increment needs to be implemented by the hardware in such a way that it is a single RMW instruction that is either processed before or after an interrupt. cc: David Howells <dhowells@redhat.com> cc: Ingo Molnar <mingo@elte.hu> cc: Rusty Russell <rusty@rustcorp.com.au> cc: Eric Dumazet <dada1@cosmosbay.com> Signed-off-by: Christoph Lameter <cl@linux-foundation.org> Signed-off-by: Tejun Heo <tj@kernel.org>
2009-10-03 14:48:22 +04:00
#endif
#ifndef this_cpu_or
# ifndef this_cpu_or_1
# define this_cpu_or_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
# endif
# ifndef this_cpu_or_2
# define this_cpu_or_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
# endif
# ifndef this_cpu_or_4
# define this_cpu_or_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
# endif
# ifndef this_cpu_or_8
# define this_cpu_or_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
# endif
# define this_cpu_or(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val))
this_cpu: Introduce this_cpu_ptr() and generic this_cpu_* operations This patch introduces two things: First this_cpu_ptr and then per cpu atomic operations. this_cpu_ptr ------------ A common operation when dealing with cpu data is to get the instance of the cpu data associated with the currently executing processor. This can be optimized by this_cpu_ptr(xx) = per_cpu_ptr(xx, smp_processor_id). The problem with per_cpu_ptr(x, smp_processor_id) is that it requires an array lookup to find the offset for the cpu. Processors typically have the offset for the current cpu area in some kind of (arch dependent) efficiently accessible register or memory location. We can use that instead of doing the array lookup to speed up the determination of the address of the percpu variable. This is particularly significant because these lookups occur in performance critical paths of the core kernel. this_cpu_ptr() can avoid memory accesses and this_cpu_ptr comes in two flavors. The preemption context matters since we are referring the the currently executing processor. In many cases we must insure that the processor does not change while a code segment is executed. __this_cpu_ptr -> Do not check for preemption context this_cpu_ptr -> Check preemption context The parameter to these operations is a per cpu pointer. This can be the address of a statically defined per cpu variable (&per_cpu_var(xxx)) or the address of a per cpu variable allocated with the per cpu allocator. per cpu atomic operations: this_cpu_*(var, val) ----------------------------------------------- this_cpu_* operations (like this_cpu_add(struct->y, value) operate on abitrary scalars that are members of structures allocated with the new per cpu allocator. They can also operate on static per_cpu variables if they are passed to per_cpu_var() (See patch to use this_cpu_* operations for vm statistics). These operations are guaranteed to be atomic vs preemption when modifying the scalar. The calculation of the per cpu offset is also guaranteed to be atomic at the same time. This means that a this_cpu_* operation can be safely used to modify a per cpu variable in a context where interrupts are enabled and preemption is allowed. Many architectures can perform such a per cpu atomic operation with a single instruction. Note that the atomicity here is different from regular atomic operations. Atomicity is only guaranteed for data accessed from the currently executing processor. Modifications from other processors are still possible. There must be other guarantees that the per cpu data is not modified from another processor when using these instruction. The per cpu atomicity is created by the fact that the processor either executes and instruction or not. Embedded in the instruction is the relocation of the per cpu address to the are reserved for the current processor and the RMW action. Therefore interrupts or preemption cannot occur in the mids of this processing. Generic fallback functions are used if an arch does not define optimized this_cpu operations. The functions come also come in the two flavors used for this_cpu_ptr(). The firstparameter is a scalar that is a member of a structure allocated through allocpercpu or a per cpu variable (use per_cpu_var(xxx)). The operations are similar to what percpu_add() and friends do. this_cpu_read(scalar) this_cpu_write(scalar, value) this_cpu_add(scale, value) this_cpu_sub(scalar, value) this_cpu_inc(scalar) this_cpu_dec(scalar) this_cpu_and(scalar, value) this_cpu_or(scalar, value) this_cpu_xor(scalar, value) Arch code can override the generic functions and provide optimized atomic per cpu operations. These atomic operations must provide both the relocation (x86 does it through a segment override) and the operation on the data in a single instruction. Otherwise preempt needs to be disabled and there is no gain from providing arch implementations. A third variant is provided prefixed by irqsafe_. These variants are safe against hardware interrupts on the *same* processor (all per cpu atomic primitives are *always* *only* providing safety for code running on the *same* processor!). The increment needs to be implemented by the hardware in such a way that it is a single RMW instruction that is either processed before or after an interrupt. cc: David Howells <dhowells@redhat.com> cc: Ingo Molnar <mingo@elte.hu> cc: Rusty Russell <rusty@rustcorp.com.au> cc: Eric Dumazet <dada1@cosmosbay.com> Signed-off-by: Christoph Lameter <cl@linux-foundation.org> Signed-off-by: Tejun Heo <tj@kernel.org>
2009-10-03 14:48:22 +04:00
#endif
#ifndef this_cpu_xor
# ifndef this_cpu_xor_1
# define this_cpu_xor_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
# endif
# ifndef this_cpu_xor_2
# define this_cpu_xor_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
# endif
# ifndef this_cpu_xor_4
# define this_cpu_xor_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
# endif
# ifndef this_cpu_xor_8
# define this_cpu_xor_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
# endif
# define this_cpu_xor(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val))
this_cpu: Introduce this_cpu_ptr() and generic this_cpu_* operations This patch introduces two things: First this_cpu_ptr and then per cpu atomic operations. this_cpu_ptr ------------ A common operation when dealing with cpu data is to get the instance of the cpu data associated with the currently executing processor. This can be optimized by this_cpu_ptr(xx) = per_cpu_ptr(xx, smp_processor_id). The problem with per_cpu_ptr(x, smp_processor_id) is that it requires an array lookup to find the offset for the cpu. Processors typically have the offset for the current cpu area in some kind of (arch dependent) efficiently accessible register or memory location. We can use that instead of doing the array lookup to speed up the determination of the address of the percpu variable. This is particularly significant because these lookups occur in performance critical paths of the core kernel. this_cpu_ptr() can avoid memory accesses and this_cpu_ptr comes in two flavors. The preemption context matters since we are referring the the currently executing processor. In many cases we must insure that the processor does not change while a code segment is executed. __this_cpu_ptr -> Do not check for preemption context this_cpu_ptr -> Check preemption context The parameter to these operations is a per cpu pointer. This can be the address of a statically defined per cpu variable (&per_cpu_var(xxx)) or the address of a per cpu variable allocated with the per cpu allocator. per cpu atomic operations: this_cpu_*(var, val) ----------------------------------------------- this_cpu_* operations (like this_cpu_add(struct->y, value) operate on abitrary scalars that are members of structures allocated with the new per cpu allocator. They can also operate on static per_cpu variables if they are passed to per_cpu_var() (See patch to use this_cpu_* operations for vm statistics). These operations are guaranteed to be atomic vs preemption when modifying the scalar. The calculation of the per cpu offset is also guaranteed to be atomic at the same time. This means that a this_cpu_* operation can be safely used to modify a per cpu variable in a context where interrupts are enabled and preemption is allowed. Many architectures can perform such a per cpu atomic operation with a single instruction. Note that the atomicity here is different from regular atomic operations. Atomicity is only guaranteed for data accessed from the currently executing processor. Modifications from other processors are still possible. There must be other guarantees that the per cpu data is not modified from another processor when using these instruction. The per cpu atomicity is created by the fact that the processor either executes and instruction or not. Embedded in the instruction is the relocation of the per cpu address to the are reserved for the current processor and the RMW action. Therefore interrupts or preemption cannot occur in the mids of this processing. Generic fallback functions are used if an arch does not define optimized this_cpu operations. The functions come also come in the two flavors used for this_cpu_ptr(). The firstparameter is a scalar that is a member of a structure allocated through allocpercpu or a per cpu variable (use per_cpu_var(xxx)). The operations are similar to what percpu_add() and friends do. this_cpu_read(scalar) this_cpu_write(scalar, value) this_cpu_add(scale, value) this_cpu_sub(scalar, value) this_cpu_inc(scalar) this_cpu_dec(scalar) this_cpu_and(scalar, value) this_cpu_or(scalar, value) this_cpu_xor(scalar, value) Arch code can override the generic functions and provide optimized atomic per cpu operations. These atomic operations must provide both the relocation (x86 does it through a segment override) and the operation on the data in a single instruction. Otherwise preempt needs to be disabled and there is no gain from providing arch implementations. A third variant is provided prefixed by irqsafe_. These variants are safe against hardware interrupts on the *same* processor (all per cpu atomic primitives are *always* *only* providing safety for code running on the *same* processor!). The increment needs to be implemented by the hardware in such a way that it is a single RMW instruction that is either processed before or after an interrupt. cc: David Howells <dhowells@redhat.com> cc: Ingo Molnar <mingo@elte.hu> cc: Rusty Russell <rusty@rustcorp.com.au> cc: Eric Dumazet <dada1@cosmosbay.com> Signed-off-by: Christoph Lameter <cl@linux-foundation.org> Signed-off-by: Tejun Heo <tj@kernel.org>
2009-10-03 14:48:22 +04:00
#endif
#define _this_cpu_generic_add_return(pcp, val) \
({ \
typeof(pcp) ret__; \
unsigned long flags; \
percpu: use raw_local_irq_* in _this_cpu op It doesn't make sense to trace irq off or do irq flags lock proving inside 'this_cpu' operations, so replace local_irq_* with raw_local_irq_* in 'this_cpu' op. Also the patch fixes onelockdep warning[1] by the replacement, see below: In commit: 933393f58fef9963eac61db8093689544e29a600(percpu: Remove irqsafe_cpu_xxx variants), local_irq_save/restore(flags) are added inside this_cpu_inc operation, so that trace_hardirqs_off_caller will be called by trace_hardirqs_on_caller directly because __debug_atomic_inc is implemented as this_cpu_inc, which may trigger the lockdep warning[1], for example in the below ARM scenary: kernel_thread_helper /*irq disabled*/ ->trace_hardirqs_on_caller /*hardirqs_enabled was set*/ ->trace_hardirqs_off_caller /*hardirqs_enabled cleared*/ __this_cpu_add(redundant_hardirqs_on) ->trace_hardirqs_off_caller /*irq disabled, so call here*/ The 'unannotated irqs-on' warning will be triggered somewhere because irq is just enabled after the irq trace in kernel_thread_helper. [1], [ 0.162841] ------------[ cut here ]------------ [ 0.167694] WARNING: at kernel/lockdep.c:3493 check_flags+0xc0/0x1d0() [ 0.174468] Modules linked in: [ 0.177703] Backtrace: [ 0.180328] [<c00171f0>] (dump_backtrace+0x0/0x110) from [<c0412320>] (dump_stack+0x18/0x1c) [ 0.189086] r6:c051f778 r5:00000da5 r4:00000000 r3:60000093 [ 0.195007] [<c0412308>] (dump_stack+0x0/0x1c) from [<c00410e8>] (warn_slowpath_common+0x54/0x6c) [ 0.204223] [<c0041094>] (warn_slowpath_common+0x0/0x6c) from [<c0041124>] (warn_slowpath_null+0x24/0x2c) [ 0.214111] r8:00000000 r7:00000000 r6:ee069598 r5:60000013 r4:ee082000 [ 0.220825] r3:00000009 [ 0.223693] [<c0041100>] (warn_slowpath_null+0x0/0x2c) from [<c0088f38>] (check_flags+0xc0/0x1d0) [ 0.232910] [<c0088e78>] (check_flags+0x0/0x1d0) from [<c008d348>] (lock_acquire+0x4c/0x11c) [ 0.241668] [<c008d2fc>] (lock_acquire+0x0/0x11c) from [<c0415aa4>] (_raw_spin_lock+0x3c/0x74) [ 0.250610] [<c0415a68>] (_raw_spin_lock+0x0/0x74) from [<c010a844>] (set_task_comm+0x20/0xc0) [ 0.259521] r6:ee069588 r5:ee0691c0 r4:ee082000 [ 0.264404] [<c010a824>] (set_task_comm+0x0/0xc0) from [<c0060780>] (kthreadd+0x28/0x108) [ 0.272857] r8:00000000 r7:00000013 r6:c0044a08 r5:ee0691c0 r4:ee082000 [ 0.279571] r3:ee083fe0 [ 0.282470] [<c0060758>] (kthreadd+0x0/0x108) from [<c0044a08>] (do_exit+0x0/0x6dc) [ 0.290405] r5:c0060758 r4:00000000 [ 0.294189] ---[ end trace 1b75b31a2719ed1c ]--- [ 0.299041] possible reason: unannotated irqs-on. [ 0.303955] irq event stamp: 5 [ 0.307159] hardirqs last enabled at (4): [<c001331c>] no_work_pending+0x8/0x2c [ 0.314880] hardirqs last disabled at (5): [<c0089b08>] trace_hardirqs_on_caller+0x60/0x26c [ 0.323547] softirqs last enabled at (0): [<c003f754>] copy_process+0x33c/0xef4 [ 0.331207] softirqs last disabled at (0): [< (null)>] (null) [ 0.337585] CPU0: thread -1, cpu 0, socket 0, mpidr 80000000 Acked-by: Christoph Lameter <cl@linux.com> Signed-off-by: Ming Lei <tom.leiming@gmail.com> Signed-off-by: Tejun Heo <tj@kernel.org>
2012-02-15 12:54:38 +04:00
raw_local_irq_save(flags); \
__this_cpu_add(pcp, val); \
ret__ = __this_cpu_read(pcp); \
percpu: use raw_local_irq_* in _this_cpu op It doesn't make sense to trace irq off or do irq flags lock proving inside 'this_cpu' operations, so replace local_irq_* with raw_local_irq_* in 'this_cpu' op. Also the patch fixes onelockdep warning[1] by the replacement, see below: In commit: 933393f58fef9963eac61db8093689544e29a600(percpu: Remove irqsafe_cpu_xxx variants), local_irq_save/restore(flags) are added inside this_cpu_inc operation, so that trace_hardirqs_off_caller will be called by trace_hardirqs_on_caller directly because __debug_atomic_inc is implemented as this_cpu_inc, which may trigger the lockdep warning[1], for example in the below ARM scenary: kernel_thread_helper /*irq disabled*/ ->trace_hardirqs_on_caller /*hardirqs_enabled was set*/ ->trace_hardirqs_off_caller /*hardirqs_enabled cleared*/ __this_cpu_add(redundant_hardirqs_on) ->trace_hardirqs_off_caller /*irq disabled, so call here*/ The 'unannotated irqs-on' warning will be triggered somewhere because irq is just enabled after the irq trace in kernel_thread_helper. [1], [ 0.162841] ------------[ cut here ]------------ [ 0.167694] WARNING: at kernel/lockdep.c:3493 check_flags+0xc0/0x1d0() [ 0.174468] Modules linked in: [ 0.177703] Backtrace: [ 0.180328] [<c00171f0>] (dump_backtrace+0x0/0x110) from [<c0412320>] (dump_stack+0x18/0x1c) [ 0.189086] r6:c051f778 r5:00000da5 r4:00000000 r3:60000093 [ 0.195007] [<c0412308>] (dump_stack+0x0/0x1c) from [<c00410e8>] (warn_slowpath_common+0x54/0x6c) [ 0.204223] [<c0041094>] (warn_slowpath_common+0x0/0x6c) from [<c0041124>] (warn_slowpath_null+0x24/0x2c) [ 0.214111] r8:00000000 r7:00000000 r6:ee069598 r5:60000013 r4:ee082000 [ 0.220825] r3:00000009 [ 0.223693] [<c0041100>] (warn_slowpath_null+0x0/0x2c) from [<c0088f38>] (check_flags+0xc0/0x1d0) [ 0.232910] [<c0088e78>] (check_flags+0x0/0x1d0) from [<c008d348>] (lock_acquire+0x4c/0x11c) [ 0.241668] [<c008d2fc>] (lock_acquire+0x0/0x11c) from [<c0415aa4>] (_raw_spin_lock+0x3c/0x74) [ 0.250610] [<c0415a68>] (_raw_spin_lock+0x0/0x74) from [<c010a844>] (set_task_comm+0x20/0xc0) [ 0.259521] r6:ee069588 r5:ee0691c0 r4:ee082000 [ 0.264404] [<c010a824>] (set_task_comm+0x0/0xc0) from [<c0060780>] (kthreadd+0x28/0x108) [ 0.272857] r8:00000000 r7:00000013 r6:c0044a08 r5:ee0691c0 r4:ee082000 [ 0.279571] r3:ee083fe0 [ 0.282470] [<c0060758>] (kthreadd+0x0/0x108) from [<c0044a08>] (do_exit+0x0/0x6dc) [ 0.290405] r5:c0060758 r4:00000000 [ 0.294189] ---[ end trace 1b75b31a2719ed1c ]--- [ 0.299041] possible reason: unannotated irqs-on. [ 0.303955] irq event stamp: 5 [ 0.307159] hardirqs last enabled at (4): [<c001331c>] no_work_pending+0x8/0x2c [ 0.314880] hardirqs last disabled at (5): [<c0089b08>] trace_hardirqs_on_caller+0x60/0x26c [ 0.323547] softirqs last enabled at (0): [<c003f754>] copy_process+0x33c/0xef4 [ 0.331207] softirqs last disabled at (0): [< (null)>] (null) [ 0.337585] CPU0: thread -1, cpu 0, socket 0, mpidr 80000000 Acked-by: Christoph Lameter <cl@linux.com> Signed-off-by: Ming Lei <tom.leiming@gmail.com> Signed-off-by: Tejun Heo <tj@kernel.org>
2012-02-15 12:54:38 +04:00
raw_local_irq_restore(flags); \
ret__; \
})
#ifndef this_cpu_add_return
# ifndef this_cpu_add_return_1
# define this_cpu_add_return_1(pcp, val) _this_cpu_generic_add_return(pcp, val)
# endif
# ifndef this_cpu_add_return_2
# define this_cpu_add_return_2(pcp, val) _this_cpu_generic_add_return(pcp, val)
# endif
# ifndef this_cpu_add_return_4
# define this_cpu_add_return_4(pcp, val) _this_cpu_generic_add_return(pcp, val)
# endif
# ifndef this_cpu_add_return_8
# define this_cpu_add_return_8(pcp, val) _this_cpu_generic_add_return(pcp, val)
# endif
# define this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val)
#endif
#define this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(val))
#define this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1)
#define this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1)
#define _this_cpu_generic_xchg(pcp, nval) \
({ typeof(pcp) ret__; \
unsigned long flags; \
percpu: use raw_local_irq_* in _this_cpu op It doesn't make sense to trace irq off or do irq flags lock proving inside 'this_cpu' operations, so replace local_irq_* with raw_local_irq_* in 'this_cpu' op. Also the patch fixes onelockdep warning[1] by the replacement, see below: In commit: 933393f58fef9963eac61db8093689544e29a600(percpu: Remove irqsafe_cpu_xxx variants), local_irq_save/restore(flags) are added inside this_cpu_inc operation, so that trace_hardirqs_off_caller will be called by trace_hardirqs_on_caller directly because __debug_atomic_inc is implemented as this_cpu_inc, which may trigger the lockdep warning[1], for example in the below ARM scenary: kernel_thread_helper /*irq disabled*/ ->trace_hardirqs_on_caller /*hardirqs_enabled was set*/ ->trace_hardirqs_off_caller /*hardirqs_enabled cleared*/ __this_cpu_add(redundant_hardirqs_on) ->trace_hardirqs_off_caller /*irq disabled, so call here*/ The 'unannotated irqs-on' warning will be triggered somewhere because irq is just enabled after the irq trace in kernel_thread_helper. [1], [ 0.162841] ------------[ cut here ]------------ [ 0.167694] WARNING: at kernel/lockdep.c:3493 check_flags+0xc0/0x1d0() [ 0.174468] Modules linked in: [ 0.177703] Backtrace: [ 0.180328] [<c00171f0>] (dump_backtrace+0x0/0x110) from [<c0412320>] (dump_stack+0x18/0x1c) [ 0.189086] r6:c051f778 r5:00000da5 r4:00000000 r3:60000093 [ 0.195007] [<c0412308>] (dump_stack+0x0/0x1c) from [<c00410e8>] (warn_slowpath_common+0x54/0x6c) [ 0.204223] [<c0041094>] (warn_slowpath_common+0x0/0x6c) from [<c0041124>] (warn_slowpath_null+0x24/0x2c) [ 0.214111] r8:00000000 r7:00000000 r6:ee069598 r5:60000013 r4:ee082000 [ 0.220825] r3:00000009 [ 0.223693] [<c0041100>] (warn_slowpath_null+0x0/0x2c) from [<c0088f38>] (check_flags+0xc0/0x1d0) [ 0.232910] [<c0088e78>] (check_flags+0x0/0x1d0) from [<c008d348>] (lock_acquire+0x4c/0x11c) [ 0.241668] [<c008d2fc>] (lock_acquire+0x0/0x11c) from [<c0415aa4>] (_raw_spin_lock+0x3c/0x74) [ 0.250610] [<c0415a68>] (_raw_spin_lock+0x0/0x74) from [<c010a844>] (set_task_comm+0x20/0xc0) [ 0.259521] r6:ee069588 r5:ee0691c0 r4:ee082000 [ 0.264404] [<c010a824>] (set_task_comm+0x0/0xc0) from [<c0060780>] (kthreadd+0x28/0x108) [ 0.272857] r8:00000000 r7:00000013 r6:c0044a08 r5:ee0691c0 r4:ee082000 [ 0.279571] r3:ee083fe0 [ 0.282470] [<c0060758>] (kthreadd+0x0/0x108) from [<c0044a08>] (do_exit+0x0/0x6dc) [ 0.290405] r5:c0060758 r4:00000000 [ 0.294189] ---[ end trace 1b75b31a2719ed1c ]--- [ 0.299041] possible reason: unannotated irqs-on. [ 0.303955] irq event stamp: 5 [ 0.307159] hardirqs last enabled at (4): [<c001331c>] no_work_pending+0x8/0x2c [ 0.314880] hardirqs last disabled at (5): [<c0089b08>] trace_hardirqs_on_caller+0x60/0x26c [ 0.323547] softirqs last enabled at (0): [<c003f754>] copy_process+0x33c/0xef4 [ 0.331207] softirqs last disabled at (0): [< (null)>] (null) [ 0.337585] CPU0: thread -1, cpu 0, socket 0, mpidr 80000000 Acked-by: Christoph Lameter <cl@linux.com> Signed-off-by: Ming Lei <tom.leiming@gmail.com> Signed-off-by: Tejun Heo <tj@kernel.org>
2012-02-15 12:54:38 +04:00
raw_local_irq_save(flags); \
ret__ = __this_cpu_read(pcp); \
__this_cpu_write(pcp, nval); \
percpu: use raw_local_irq_* in _this_cpu op It doesn't make sense to trace irq off or do irq flags lock proving inside 'this_cpu' operations, so replace local_irq_* with raw_local_irq_* in 'this_cpu' op. Also the patch fixes onelockdep warning[1] by the replacement, see below: In commit: 933393f58fef9963eac61db8093689544e29a600(percpu: Remove irqsafe_cpu_xxx variants), local_irq_save/restore(flags) are added inside this_cpu_inc operation, so that trace_hardirqs_off_caller will be called by trace_hardirqs_on_caller directly because __debug_atomic_inc is implemented as this_cpu_inc, which may trigger the lockdep warning[1], for example in the below ARM scenary: kernel_thread_helper /*irq disabled*/ ->trace_hardirqs_on_caller /*hardirqs_enabled was set*/ ->trace_hardirqs_off_caller /*hardirqs_enabled cleared*/ __this_cpu_add(redundant_hardirqs_on) ->trace_hardirqs_off_caller /*irq disabled, so call here*/ The 'unannotated irqs-on' warning will be triggered somewhere because irq is just enabled after the irq trace in kernel_thread_helper. [1], [ 0.162841] ------------[ cut here ]------------ [ 0.167694] WARNING: at kernel/lockdep.c:3493 check_flags+0xc0/0x1d0() [ 0.174468] Modules linked in: [ 0.177703] Backtrace: [ 0.180328] [<c00171f0>] (dump_backtrace+0x0/0x110) from [<c0412320>] (dump_stack+0x18/0x1c) [ 0.189086] r6:c051f778 r5:00000da5 r4:00000000 r3:60000093 [ 0.195007] [<c0412308>] (dump_stack+0x0/0x1c) from [<c00410e8>] (warn_slowpath_common+0x54/0x6c) [ 0.204223] [<c0041094>] (warn_slowpath_common+0x0/0x6c) from [<c0041124>] (warn_slowpath_null+0x24/0x2c) [ 0.214111] r8:00000000 r7:00000000 r6:ee069598 r5:60000013 r4:ee082000 [ 0.220825] r3:00000009 [ 0.223693] [<c0041100>] (warn_slowpath_null+0x0/0x2c) from [<c0088f38>] (check_flags+0xc0/0x1d0) [ 0.232910] [<c0088e78>] (check_flags+0x0/0x1d0) from [<c008d348>] (lock_acquire+0x4c/0x11c) [ 0.241668] [<c008d2fc>] (lock_acquire+0x0/0x11c) from [<c0415aa4>] (_raw_spin_lock+0x3c/0x74) [ 0.250610] [<c0415a68>] (_raw_spin_lock+0x0/0x74) from [<c010a844>] (set_task_comm+0x20/0xc0) [ 0.259521] r6:ee069588 r5:ee0691c0 r4:ee082000 [ 0.264404] [<c010a824>] (set_task_comm+0x0/0xc0) from [<c0060780>] (kthreadd+0x28/0x108) [ 0.272857] r8:00000000 r7:00000013 r6:c0044a08 r5:ee0691c0 r4:ee082000 [ 0.279571] r3:ee083fe0 [ 0.282470] [<c0060758>] (kthreadd+0x0/0x108) from [<c0044a08>] (do_exit+0x0/0x6dc) [ 0.290405] r5:c0060758 r4:00000000 [ 0.294189] ---[ end trace 1b75b31a2719ed1c ]--- [ 0.299041] possible reason: unannotated irqs-on. [ 0.303955] irq event stamp: 5 [ 0.307159] hardirqs last enabled at (4): [<c001331c>] no_work_pending+0x8/0x2c [ 0.314880] hardirqs last disabled at (5): [<c0089b08>] trace_hardirqs_on_caller+0x60/0x26c [ 0.323547] softirqs last enabled at (0): [<c003f754>] copy_process+0x33c/0xef4 [ 0.331207] softirqs last disabled at (0): [< (null)>] (null) [ 0.337585] CPU0: thread -1, cpu 0, socket 0, mpidr 80000000 Acked-by: Christoph Lameter <cl@linux.com> Signed-off-by: Ming Lei <tom.leiming@gmail.com> Signed-off-by: Tejun Heo <tj@kernel.org>
2012-02-15 12:54:38 +04:00
raw_local_irq_restore(flags); \
ret__; \
})
#ifndef this_cpu_xchg
# ifndef this_cpu_xchg_1
# define this_cpu_xchg_1(pcp, nval) _this_cpu_generic_xchg(pcp, nval)
# endif
# ifndef this_cpu_xchg_2
# define this_cpu_xchg_2(pcp, nval) _this_cpu_generic_xchg(pcp, nval)
# endif
# ifndef this_cpu_xchg_4
# define this_cpu_xchg_4(pcp, nval) _this_cpu_generic_xchg(pcp, nval)
# endif
# ifndef this_cpu_xchg_8
# define this_cpu_xchg_8(pcp, nval) _this_cpu_generic_xchg(pcp, nval)
# endif
# define this_cpu_xchg(pcp, nval) \
__pcpu_size_call_return2(this_cpu_xchg_, (pcp), nval)
#endif
#define _this_cpu_generic_cmpxchg(pcp, oval, nval) \
({ \
typeof(pcp) ret__; \
unsigned long flags; \
percpu: use raw_local_irq_* in _this_cpu op It doesn't make sense to trace irq off or do irq flags lock proving inside 'this_cpu' operations, so replace local_irq_* with raw_local_irq_* in 'this_cpu' op. Also the patch fixes onelockdep warning[1] by the replacement, see below: In commit: 933393f58fef9963eac61db8093689544e29a600(percpu: Remove irqsafe_cpu_xxx variants), local_irq_save/restore(flags) are added inside this_cpu_inc operation, so that trace_hardirqs_off_caller will be called by trace_hardirqs_on_caller directly because __debug_atomic_inc is implemented as this_cpu_inc, which may trigger the lockdep warning[1], for example in the below ARM scenary: kernel_thread_helper /*irq disabled*/ ->trace_hardirqs_on_caller /*hardirqs_enabled was set*/ ->trace_hardirqs_off_caller /*hardirqs_enabled cleared*/ __this_cpu_add(redundant_hardirqs_on) ->trace_hardirqs_off_caller /*irq disabled, so call here*/ The 'unannotated irqs-on' warning will be triggered somewhere because irq is just enabled after the irq trace in kernel_thread_helper. [1], [ 0.162841] ------------[ cut here ]------------ [ 0.167694] WARNING: at kernel/lockdep.c:3493 check_flags+0xc0/0x1d0() [ 0.174468] Modules linked in: [ 0.177703] Backtrace: [ 0.180328] [<c00171f0>] (dump_backtrace+0x0/0x110) from [<c0412320>] (dump_stack+0x18/0x1c) [ 0.189086] r6:c051f778 r5:00000da5 r4:00000000 r3:60000093 [ 0.195007] [<c0412308>] (dump_stack+0x0/0x1c) from [<c00410e8>] (warn_slowpath_common+0x54/0x6c) [ 0.204223] [<c0041094>] (warn_slowpath_common+0x0/0x6c) from [<c0041124>] (warn_slowpath_null+0x24/0x2c) [ 0.214111] r8:00000000 r7:00000000 r6:ee069598 r5:60000013 r4:ee082000 [ 0.220825] r3:00000009 [ 0.223693] [<c0041100>] (warn_slowpath_null+0x0/0x2c) from [<c0088f38>] (check_flags+0xc0/0x1d0) [ 0.232910] [<c0088e78>] (check_flags+0x0/0x1d0) from [<c008d348>] (lock_acquire+0x4c/0x11c) [ 0.241668] [<c008d2fc>] (lock_acquire+0x0/0x11c) from [<c0415aa4>] (_raw_spin_lock+0x3c/0x74) [ 0.250610] [<c0415a68>] (_raw_spin_lock+0x0/0x74) from [<c010a844>] (set_task_comm+0x20/0xc0) [ 0.259521] r6:ee069588 r5:ee0691c0 r4:ee082000 [ 0.264404] [<c010a824>] (set_task_comm+0x0/0xc0) from [<c0060780>] (kthreadd+0x28/0x108) [ 0.272857] r8:00000000 r7:00000013 r6:c0044a08 r5:ee0691c0 r4:ee082000 [ 0.279571] r3:ee083fe0 [ 0.282470] [<c0060758>] (kthreadd+0x0/0x108) from [<c0044a08>] (do_exit+0x0/0x6dc) [ 0.290405] r5:c0060758 r4:00000000 [ 0.294189] ---[ end trace 1b75b31a2719ed1c ]--- [ 0.299041] possible reason: unannotated irqs-on. [ 0.303955] irq event stamp: 5 [ 0.307159] hardirqs last enabled at (4): [<c001331c>] no_work_pending+0x8/0x2c [ 0.314880] hardirqs last disabled at (5): [<c0089b08>] trace_hardirqs_on_caller+0x60/0x26c [ 0.323547] softirqs last enabled at (0): [<c003f754>] copy_process+0x33c/0xef4 [ 0.331207] softirqs last disabled at (0): [< (null)>] (null) [ 0.337585] CPU0: thread -1, cpu 0, socket 0, mpidr 80000000 Acked-by: Christoph Lameter <cl@linux.com> Signed-off-by: Ming Lei <tom.leiming@gmail.com> Signed-off-by: Tejun Heo <tj@kernel.org>
2012-02-15 12:54:38 +04:00
raw_local_irq_save(flags); \
ret__ = __this_cpu_read(pcp); \
if (ret__ == (oval)) \
__this_cpu_write(pcp, nval); \
percpu: use raw_local_irq_* in _this_cpu op It doesn't make sense to trace irq off or do irq flags lock proving inside 'this_cpu' operations, so replace local_irq_* with raw_local_irq_* in 'this_cpu' op. Also the patch fixes onelockdep warning[1] by the replacement, see below: In commit: 933393f58fef9963eac61db8093689544e29a600(percpu: Remove irqsafe_cpu_xxx variants), local_irq_save/restore(flags) are added inside this_cpu_inc operation, so that trace_hardirqs_off_caller will be called by trace_hardirqs_on_caller directly because __debug_atomic_inc is implemented as this_cpu_inc, which may trigger the lockdep warning[1], for example in the below ARM scenary: kernel_thread_helper /*irq disabled*/ ->trace_hardirqs_on_caller /*hardirqs_enabled was set*/ ->trace_hardirqs_off_caller /*hardirqs_enabled cleared*/ __this_cpu_add(redundant_hardirqs_on) ->trace_hardirqs_off_caller /*irq disabled, so call here*/ The 'unannotated irqs-on' warning will be triggered somewhere because irq is just enabled after the irq trace in kernel_thread_helper. [1], [ 0.162841] ------------[ cut here ]------------ [ 0.167694] WARNING: at kernel/lockdep.c:3493 check_flags+0xc0/0x1d0() [ 0.174468] Modules linked in: [ 0.177703] Backtrace: [ 0.180328] [<c00171f0>] (dump_backtrace+0x0/0x110) from [<c0412320>] (dump_stack+0x18/0x1c) [ 0.189086] r6:c051f778 r5:00000da5 r4:00000000 r3:60000093 [ 0.195007] [<c0412308>] (dump_stack+0x0/0x1c) from [<c00410e8>] (warn_slowpath_common+0x54/0x6c) [ 0.204223] [<c0041094>] (warn_slowpath_common+0x0/0x6c) from [<c0041124>] (warn_slowpath_null+0x24/0x2c) [ 0.214111] r8:00000000 r7:00000000 r6:ee069598 r5:60000013 r4:ee082000 [ 0.220825] r3:00000009 [ 0.223693] [<c0041100>] (warn_slowpath_null+0x0/0x2c) from [<c0088f38>] (check_flags+0xc0/0x1d0) [ 0.232910] [<c0088e78>] (check_flags+0x0/0x1d0) from [<c008d348>] (lock_acquire+0x4c/0x11c) [ 0.241668] [<c008d2fc>] (lock_acquire+0x0/0x11c) from [<c0415aa4>] (_raw_spin_lock+0x3c/0x74) [ 0.250610] [<c0415a68>] (_raw_spin_lock+0x0/0x74) from [<c010a844>] (set_task_comm+0x20/0xc0) [ 0.259521] r6:ee069588 r5:ee0691c0 r4:ee082000 [ 0.264404] [<c010a824>] (set_task_comm+0x0/0xc0) from [<c0060780>] (kthreadd+0x28/0x108) [ 0.272857] r8:00000000 r7:00000013 r6:c0044a08 r5:ee0691c0 r4:ee082000 [ 0.279571] r3:ee083fe0 [ 0.282470] [<c0060758>] (kthreadd+0x0/0x108) from [<c0044a08>] (do_exit+0x0/0x6dc) [ 0.290405] r5:c0060758 r4:00000000 [ 0.294189] ---[ end trace 1b75b31a2719ed1c ]--- [ 0.299041] possible reason: unannotated irqs-on. [ 0.303955] irq event stamp: 5 [ 0.307159] hardirqs last enabled at (4): [<c001331c>] no_work_pending+0x8/0x2c [ 0.314880] hardirqs last disabled at (5): [<c0089b08>] trace_hardirqs_on_caller+0x60/0x26c [ 0.323547] softirqs last enabled at (0): [<c003f754>] copy_process+0x33c/0xef4 [ 0.331207] softirqs last disabled at (0): [< (null)>] (null) [ 0.337585] CPU0: thread -1, cpu 0, socket 0, mpidr 80000000 Acked-by: Christoph Lameter <cl@linux.com> Signed-off-by: Ming Lei <tom.leiming@gmail.com> Signed-off-by: Tejun Heo <tj@kernel.org>
2012-02-15 12:54:38 +04:00
raw_local_irq_restore(flags); \
ret__; \
})
#ifndef this_cpu_cmpxchg
# ifndef this_cpu_cmpxchg_1
# define this_cpu_cmpxchg_1(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval)
# endif
# ifndef this_cpu_cmpxchg_2
# define this_cpu_cmpxchg_2(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval)
# endif
# ifndef this_cpu_cmpxchg_4
# define this_cpu_cmpxchg_4(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval)
# endif
# ifndef this_cpu_cmpxchg_8
# define this_cpu_cmpxchg_8(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval)
# endif
# define this_cpu_cmpxchg(pcp, oval, nval) \
__pcpu_size_call_return2(this_cpu_cmpxchg_, pcp, oval, nval)
#endif
/*
* cmpxchg_double replaces two adjacent scalars at once. The first
* two parameters are per cpu variables which have to be of the same
* size. A truth value is returned to indicate success or failure
* (since a double register result is difficult to handle). There is
* very limited hardware support for these operations, so only certain
* sizes may work.
*/
#define _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
({ \
int ret__; \
unsigned long flags; \
percpu: use raw_local_irq_* in _this_cpu op It doesn't make sense to trace irq off or do irq flags lock proving inside 'this_cpu' operations, so replace local_irq_* with raw_local_irq_* in 'this_cpu' op. Also the patch fixes onelockdep warning[1] by the replacement, see below: In commit: 933393f58fef9963eac61db8093689544e29a600(percpu: Remove irqsafe_cpu_xxx variants), local_irq_save/restore(flags) are added inside this_cpu_inc operation, so that trace_hardirqs_off_caller will be called by trace_hardirqs_on_caller directly because __debug_atomic_inc is implemented as this_cpu_inc, which may trigger the lockdep warning[1], for example in the below ARM scenary: kernel_thread_helper /*irq disabled*/ ->trace_hardirqs_on_caller /*hardirqs_enabled was set*/ ->trace_hardirqs_off_caller /*hardirqs_enabled cleared*/ __this_cpu_add(redundant_hardirqs_on) ->trace_hardirqs_off_caller /*irq disabled, so call here*/ The 'unannotated irqs-on' warning will be triggered somewhere because irq is just enabled after the irq trace in kernel_thread_helper. [1], [ 0.162841] ------------[ cut here ]------------ [ 0.167694] WARNING: at kernel/lockdep.c:3493 check_flags+0xc0/0x1d0() [ 0.174468] Modules linked in: [ 0.177703] Backtrace: [ 0.180328] [<c00171f0>] (dump_backtrace+0x0/0x110) from [<c0412320>] (dump_stack+0x18/0x1c) [ 0.189086] r6:c051f778 r5:00000da5 r4:00000000 r3:60000093 [ 0.195007] [<c0412308>] (dump_stack+0x0/0x1c) from [<c00410e8>] (warn_slowpath_common+0x54/0x6c) [ 0.204223] [<c0041094>] (warn_slowpath_common+0x0/0x6c) from [<c0041124>] (warn_slowpath_null+0x24/0x2c) [ 0.214111] r8:00000000 r7:00000000 r6:ee069598 r5:60000013 r4:ee082000 [ 0.220825] r3:00000009 [ 0.223693] [<c0041100>] (warn_slowpath_null+0x0/0x2c) from [<c0088f38>] (check_flags+0xc0/0x1d0) [ 0.232910] [<c0088e78>] (check_flags+0x0/0x1d0) from [<c008d348>] (lock_acquire+0x4c/0x11c) [ 0.241668] [<c008d2fc>] (lock_acquire+0x0/0x11c) from [<c0415aa4>] (_raw_spin_lock+0x3c/0x74) [ 0.250610] [<c0415a68>] (_raw_spin_lock+0x0/0x74) from [<c010a844>] (set_task_comm+0x20/0xc0) [ 0.259521] r6:ee069588 r5:ee0691c0 r4:ee082000 [ 0.264404] [<c010a824>] (set_task_comm+0x0/0xc0) from [<c0060780>] (kthreadd+0x28/0x108) [ 0.272857] r8:00000000 r7:00000013 r6:c0044a08 r5:ee0691c0 r4:ee082000 [ 0.279571] r3:ee083fe0 [ 0.282470] [<c0060758>] (kthreadd+0x0/0x108) from [<c0044a08>] (do_exit+0x0/0x6dc) [ 0.290405] r5:c0060758 r4:00000000 [ 0.294189] ---[ end trace 1b75b31a2719ed1c ]--- [ 0.299041] possible reason: unannotated irqs-on. [ 0.303955] irq event stamp: 5 [ 0.307159] hardirqs last enabled at (4): [<c001331c>] no_work_pending+0x8/0x2c [ 0.314880] hardirqs last disabled at (5): [<c0089b08>] trace_hardirqs_on_caller+0x60/0x26c [ 0.323547] softirqs last enabled at (0): [<c003f754>] copy_process+0x33c/0xef4 [ 0.331207] softirqs last disabled at (0): [< (null)>] (null) [ 0.337585] CPU0: thread -1, cpu 0, socket 0, mpidr 80000000 Acked-by: Christoph Lameter <cl@linux.com> Signed-off-by: Ming Lei <tom.leiming@gmail.com> Signed-off-by: Tejun Heo <tj@kernel.org>
2012-02-15 12:54:38 +04:00
raw_local_irq_save(flags); \
ret__ = __this_cpu_generic_cmpxchg_double(pcp1, pcp2, \
oval1, oval2, nval1, nval2); \
percpu: use raw_local_irq_* in _this_cpu op It doesn't make sense to trace irq off or do irq flags lock proving inside 'this_cpu' operations, so replace local_irq_* with raw_local_irq_* in 'this_cpu' op. Also the patch fixes onelockdep warning[1] by the replacement, see below: In commit: 933393f58fef9963eac61db8093689544e29a600(percpu: Remove irqsafe_cpu_xxx variants), local_irq_save/restore(flags) are added inside this_cpu_inc operation, so that trace_hardirqs_off_caller will be called by trace_hardirqs_on_caller directly because __debug_atomic_inc is implemented as this_cpu_inc, which may trigger the lockdep warning[1], for example in the below ARM scenary: kernel_thread_helper /*irq disabled*/ ->trace_hardirqs_on_caller /*hardirqs_enabled was set*/ ->trace_hardirqs_off_caller /*hardirqs_enabled cleared*/ __this_cpu_add(redundant_hardirqs_on) ->trace_hardirqs_off_caller /*irq disabled, so call here*/ The 'unannotated irqs-on' warning will be triggered somewhere because irq is just enabled after the irq trace in kernel_thread_helper. [1], [ 0.162841] ------------[ cut here ]------------ [ 0.167694] WARNING: at kernel/lockdep.c:3493 check_flags+0xc0/0x1d0() [ 0.174468] Modules linked in: [ 0.177703] Backtrace: [ 0.180328] [<c00171f0>] (dump_backtrace+0x0/0x110) from [<c0412320>] (dump_stack+0x18/0x1c) [ 0.189086] r6:c051f778 r5:00000da5 r4:00000000 r3:60000093 [ 0.195007] [<c0412308>] (dump_stack+0x0/0x1c) from [<c00410e8>] (warn_slowpath_common+0x54/0x6c) [ 0.204223] [<c0041094>] (warn_slowpath_common+0x0/0x6c) from [<c0041124>] (warn_slowpath_null+0x24/0x2c) [ 0.214111] r8:00000000 r7:00000000 r6:ee069598 r5:60000013 r4:ee082000 [ 0.220825] r3:00000009 [ 0.223693] [<c0041100>] (warn_slowpath_null+0x0/0x2c) from [<c0088f38>] (check_flags+0xc0/0x1d0) [ 0.232910] [<c0088e78>] (check_flags+0x0/0x1d0) from [<c008d348>] (lock_acquire+0x4c/0x11c) [ 0.241668] [<c008d2fc>] (lock_acquire+0x0/0x11c) from [<c0415aa4>] (_raw_spin_lock+0x3c/0x74) [ 0.250610] [<c0415a68>] (_raw_spin_lock+0x0/0x74) from [<c010a844>] (set_task_comm+0x20/0xc0) [ 0.259521] r6:ee069588 r5:ee0691c0 r4:ee082000 [ 0.264404] [<c010a824>] (set_task_comm+0x0/0xc0) from [<c0060780>] (kthreadd+0x28/0x108) [ 0.272857] r8:00000000 r7:00000013 r6:c0044a08 r5:ee0691c0 r4:ee082000 [ 0.279571] r3:ee083fe0 [ 0.282470] [<c0060758>] (kthreadd+0x0/0x108) from [<c0044a08>] (do_exit+0x0/0x6dc) [ 0.290405] r5:c0060758 r4:00000000 [ 0.294189] ---[ end trace 1b75b31a2719ed1c ]--- [ 0.299041] possible reason: unannotated irqs-on. [ 0.303955] irq event stamp: 5 [ 0.307159] hardirqs last enabled at (4): [<c001331c>] no_work_pending+0x8/0x2c [ 0.314880] hardirqs last disabled at (5): [<c0089b08>] trace_hardirqs_on_caller+0x60/0x26c [ 0.323547] softirqs last enabled at (0): [<c003f754>] copy_process+0x33c/0xef4 [ 0.331207] softirqs last disabled at (0): [< (null)>] (null) [ 0.337585] CPU0: thread -1, cpu 0, socket 0, mpidr 80000000 Acked-by: Christoph Lameter <cl@linux.com> Signed-off-by: Ming Lei <tom.leiming@gmail.com> Signed-off-by: Tejun Heo <tj@kernel.org>
2012-02-15 12:54:38 +04:00
raw_local_irq_restore(flags); \
ret__; \
})
#ifndef this_cpu_cmpxchg_double
# ifndef this_cpu_cmpxchg_double_1
# define this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \
_this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
# endif
# ifndef this_cpu_cmpxchg_double_2
# define this_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \
_this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
# endif
# ifndef this_cpu_cmpxchg_double_4
# define this_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \
_this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
# endif
# ifndef this_cpu_cmpxchg_double_8
# define this_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \
_this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
# endif
# define this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
__pcpu_double_call_return_bool(this_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
#endif
this_cpu: Introduce this_cpu_ptr() and generic this_cpu_* operations This patch introduces two things: First this_cpu_ptr and then per cpu atomic operations. this_cpu_ptr ------------ A common operation when dealing with cpu data is to get the instance of the cpu data associated with the currently executing processor. This can be optimized by this_cpu_ptr(xx) = per_cpu_ptr(xx, smp_processor_id). The problem with per_cpu_ptr(x, smp_processor_id) is that it requires an array lookup to find the offset for the cpu. Processors typically have the offset for the current cpu area in some kind of (arch dependent) efficiently accessible register or memory location. We can use that instead of doing the array lookup to speed up the determination of the address of the percpu variable. This is particularly significant because these lookups occur in performance critical paths of the core kernel. this_cpu_ptr() can avoid memory accesses and this_cpu_ptr comes in two flavors. The preemption context matters since we are referring the the currently executing processor. In many cases we must insure that the processor does not change while a code segment is executed. __this_cpu_ptr -> Do not check for preemption context this_cpu_ptr -> Check preemption context The parameter to these operations is a per cpu pointer. This can be the address of a statically defined per cpu variable (&per_cpu_var(xxx)) or the address of a per cpu variable allocated with the per cpu allocator. per cpu atomic operations: this_cpu_*(var, val) ----------------------------------------------- this_cpu_* operations (like this_cpu_add(struct->y, value) operate on abitrary scalars that are members of structures allocated with the new per cpu allocator. They can also operate on static per_cpu variables if they are passed to per_cpu_var() (See patch to use this_cpu_* operations for vm statistics). These operations are guaranteed to be atomic vs preemption when modifying the scalar. The calculation of the per cpu offset is also guaranteed to be atomic at the same time. This means that a this_cpu_* operation can be safely used to modify a per cpu variable in a context where interrupts are enabled and preemption is allowed. Many architectures can perform such a per cpu atomic operation with a single instruction. Note that the atomicity here is different from regular atomic operations. Atomicity is only guaranteed for data accessed from the currently executing processor. Modifications from other processors are still possible. There must be other guarantees that the per cpu data is not modified from another processor when using these instruction. The per cpu atomicity is created by the fact that the processor either executes and instruction or not. Embedded in the instruction is the relocation of the per cpu address to the are reserved for the current processor and the RMW action. Therefore interrupts or preemption cannot occur in the mids of this processing. Generic fallback functions are used if an arch does not define optimized this_cpu operations. The functions come also come in the two flavors used for this_cpu_ptr(). The firstparameter is a scalar that is a member of a structure allocated through allocpercpu or a per cpu variable (use per_cpu_var(xxx)). The operations are similar to what percpu_add() and friends do. this_cpu_read(scalar) this_cpu_write(scalar, value) this_cpu_add(scale, value) this_cpu_sub(scalar, value) this_cpu_inc(scalar) this_cpu_dec(scalar) this_cpu_and(scalar, value) this_cpu_or(scalar, value) this_cpu_xor(scalar, value) Arch code can override the generic functions and provide optimized atomic per cpu operations. These atomic operations must provide both the relocation (x86 does it through a segment override) and the operation on the data in a single instruction. Otherwise preempt needs to be disabled and there is no gain from providing arch implementations. A third variant is provided prefixed by irqsafe_. These variants are safe against hardware interrupts on the *same* processor (all per cpu atomic primitives are *always* *only* providing safety for code running on the *same* processor!). The increment needs to be implemented by the hardware in such a way that it is a single RMW instruction that is either processed before or after an interrupt. cc: David Howells <dhowells@redhat.com> cc: Ingo Molnar <mingo@elte.hu> cc: Rusty Russell <rusty@rustcorp.com.au> cc: Eric Dumazet <dada1@cosmosbay.com> Signed-off-by: Christoph Lameter <cl@linux-foundation.org> Signed-off-by: Tejun Heo <tj@kernel.org>
2009-10-03 14:48:22 +04:00
/*
* Generic percpu operations for context that are safe from preemption/interrupts.
this_cpu: Introduce this_cpu_ptr() and generic this_cpu_* operations This patch introduces two things: First this_cpu_ptr and then per cpu atomic operations. this_cpu_ptr ------------ A common operation when dealing with cpu data is to get the instance of the cpu data associated with the currently executing processor. This can be optimized by this_cpu_ptr(xx) = per_cpu_ptr(xx, smp_processor_id). The problem with per_cpu_ptr(x, smp_processor_id) is that it requires an array lookup to find the offset for the cpu. Processors typically have the offset for the current cpu area in some kind of (arch dependent) efficiently accessible register or memory location. We can use that instead of doing the array lookup to speed up the determination of the address of the percpu variable. This is particularly significant because these lookups occur in performance critical paths of the core kernel. this_cpu_ptr() can avoid memory accesses and this_cpu_ptr comes in two flavors. The preemption context matters since we are referring the the currently executing processor. In many cases we must insure that the processor does not change while a code segment is executed. __this_cpu_ptr -> Do not check for preemption context this_cpu_ptr -> Check preemption context The parameter to these operations is a per cpu pointer. This can be the address of a statically defined per cpu variable (&per_cpu_var(xxx)) or the address of a per cpu variable allocated with the per cpu allocator. per cpu atomic operations: this_cpu_*(var, val) ----------------------------------------------- this_cpu_* operations (like this_cpu_add(struct->y, value) operate on abitrary scalars that are members of structures allocated with the new per cpu allocator. They can also operate on static per_cpu variables if they are passed to per_cpu_var() (See patch to use this_cpu_* operations for vm statistics). These operations are guaranteed to be atomic vs preemption when modifying the scalar. The calculation of the per cpu offset is also guaranteed to be atomic at the same time. This means that a this_cpu_* operation can be safely used to modify a per cpu variable in a context where interrupts are enabled and preemption is allowed. Many architectures can perform such a per cpu atomic operation with a single instruction. Note that the atomicity here is different from regular atomic operations. Atomicity is only guaranteed for data accessed from the currently executing processor. Modifications from other processors are still possible. There must be other guarantees that the per cpu data is not modified from another processor when using these instruction. The per cpu atomicity is created by the fact that the processor either executes and instruction or not. Embedded in the instruction is the relocation of the per cpu address to the are reserved for the current processor and the RMW action. Therefore interrupts or preemption cannot occur in the mids of this processing. Generic fallback functions are used if an arch does not define optimized this_cpu operations. The functions come also come in the two flavors used for this_cpu_ptr(). The firstparameter is a scalar that is a member of a structure allocated through allocpercpu or a per cpu variable (use per_cpu_var(xxx)). The operations are similar to what percpu_add() and friends do. this_cpu_read(scalar) this_cpu_write(scalar, value) this_cpu_add(scale, value) this_cpu_sub(scalar, value) this_cpu_inc(scalar) this_cpu_dec(scalar) this_cpu_and(scalar, value) this_cpu_or(scalar, value) this_cpu_xor(scalar, value) Arch code can override the generic functions and provide optimized atomic per cpu operations. These atomic operations must provide both the relocation (x86 does it through a segment override) and the operation on the data in a single instruction. Otherwise preempt needs to be disabled and there is no gain from providing arch implementations. A third variant is provided prefixed by irqsafe_. These variants are safe against hardware interrupts on the *same* processor (all per cpu atomic primitives are *always* *only* providing safety for code running on the *same* processor!). The increment needs to be implemented by the hardware in such a way that it is a single RMW instruction that is either processed before or after an interrupt. cc: David Howells <dhowells@redhat.com> cc: Ingo Molnar <mingo@elte.hu> cc: Rusty Russell <rusty@rustcorp.com.au> cc: Eric Dumazet <dada1@cosmosbay.com> Signed-off-by: Christoph Lameter <cl@linux-foundation.org> Signed-off-by: Tejun Heo <tj@kernel.org>
2009-10-03 14:48:22 +04:00
* Either we do not care about races or the caller has the
* responsibility of handling preemption/interrupt issues. Arch code can still
this_cpu: Introduce this_cpu_ptr() and generic this_cpu_* operations This patch introduces two things: First this_cpu_ptr and then per cpu atomic operations. this_cpu_ptr ------------ A common operation when dealing with cpu data is to get the instance of the cpu data associated with the currently executing processor. This can be optimized by this_cpu_ptr(xx) = per_cpu_ptr(xx, smp_processor_id). The problem with per_cpu_ptr(x, smp_processor_id) is that it requires an array lookup to find the offset for the cpu. Processors typically have the offset for the current cpu area in some kind of (arch dependent) efficiently accessible register or memory location. We can use that instead of doing the array lookup to speed up the determination of the address of the percpu variable. This is particularly significant because these lookups occur in performance critical paths of the core kernel. this_cpu_ptr() can avoid memory accesses and this_cpu_ptr comes in two flavors. The preemption context matters since we are referring the the currently executing processor. In many cases we must insure that the processor does not change while a code segment is executed. __this_cpu_ptr -> Do not check for preemption context this_cpu_ptr -> Check preemption context The parameter to these operations is a per cpu pointer. This can be the address of a statically defined per cpu variable (&per_cpu_var(xxx)) or the address of a per cpu variable allocated with the per cpu allocator. per cpu atomic operations: this_cpu_*(var, val) ----------------------------------------------- this_cpu_* operations (like this_cpu_add(struct->y, value) operate on abitrary scalars that are members of structures allocated with the new per cpu allocator. They can also operate on static per_cpu variables if they are passed to per_cpu_var() (See patch to use this_cpu_* operations for vm statistics). These operations are guaranteed to be atomic vs preemption when modifying the scalar. The calculation of the per cpu offset is also guaranteed to be atomic at the same time. This means that a this_cpu_* operation can be safely used to modify a per cpu variable in a context where interrupts are enabled and preemption is allowed. Many architectures can perform such a per cpu atomic operation with a single instruction. Note that the atomicity here is different from regular atomic operations. Atomicity is only guaranteed for data accessed from the currently executing processor. Modifications from other processors are still possible. There must be other guarantees that the per cpu data is not modified from another processor when using these instruction. The per cpu atomicity is created by the fact that the processor either executes and instruction or not. Embedded in the instruction is the relocation of the per cpu address to the are reserved for the current processor and the RMW action. Therefore interrupts or preemption cannot occur in the mids of this processing. Generic fallback functions are used if an arch does not define optimized this_cpu operations. The functions come also come in the two flavors used for this_cpu_ptr(). The firstparameter is a scalar that is a member of a structure allocated through allocpercpu or a per cpu variable (use per_cpu_var(xxx)). The operations are similar to what percpu_add() and friends do. this_cpu_read(scalar) this_cpu_write(scalar, value) this_cpu_add(scale, value) this_cpu_sub(scalar, value) this_cpu_inc(scalar) this_cpu_dec(scalar) this_cpu_and(scalar, value) this_cpu_or(scalar, value) this_cpu_xor(scalar, value) Arch code can override the generic functions and provide optimized atomic per cpu operations. These atomic operations must provide both the relocation (x86 does it through a segment override) and the operation on the data in a single instruction. Otherwise preempt needs to be disabled and there is no gain from providing arch implementations. A third variant is provided prefixed by irqsafe_. These variants are safe against hardware interrupts on the *same* processor (all per cpu atomic primitives are *always* *only* providing safety for code running on the *same* processor!). The increment needs to be implemented by the hardware in such a way that it is a single RMW instruction that is either processed before or after an interrupt. cc: David Howells <dhowells@redhat.com> cc: Ingo Molnar <mingo@elte.hu> cc: Rusty Russell <rusty@rustcorp.com.au> cc: Eric Dumazet <dada1@cosmosbay.com> Signed-off-by: Christoph Lameter <cl@linux-foundation.org> Signed-off-by: Tejun Heo <tj@kernel.org>
2009-10-03 14:48:22 +04:00
* override these instructions since the arch per cpu code may be more
* efficient and may actually get race freeness for free (that is the
* case for x86 for example).
*
* If there is no other protection through preempt disable and/or
* disabling interupts then one of these RMW operations can show unexpected
* behavior because the execution thread was rescheduled on another processor
* or an interrupt occurred and the same percpu variable was modified from
* the interrupt context.
*/
#ifndef __this_cpu_read
# ifndef __this_cpu_read_1
# define __this_cpu_read_1(pcp) (*__this_cpu_ptr(&(pcp)))
# endif
# ifndef __this_cpu_read_2
# define __this_cpu_read_2(pcp) (*__this_cpu_ptr(&(pcp)))
# endif
# ifndef __this_cpu_read_4
# define __this_cpu_read_4(pcp) (*__this_cpu_ptr(&(pcp)))
# endif
# ifndef __this_cpu_read_8
# define __this_cpu_read_8(pcp) (*__this_cpu_ptr(&(pcp)))
# endif
# define __this_cpu_read(pcp) __pcpu_size_call_return(__this_cpu_read_, (pcp))
this_cpu: Introduce this_cpu_ptr() and generic this_cpu_* operations This patch introduces two things: First this_cpu_ptr and then per cpu atomic operations. this_cpu_ptr ------------ A common operation when dealing with cpu data is to get the instance of the cpu data associated with the currently executing processor. This can be optimized by this_cpu_ptr(xx) = per_cpu_ptr(xx, smp_processor_id). The problem with per_cpu_ptr(x, smp_processor_id) is that it requires an array lookup to find the offset for the cpu. Processors typically have the offset for the current cpu area in some kind of (arch dependent) efficiently accessible register or memory location. We can use that instead of doing the array lookup to speed up the determination of the address of the percpu variable. This is particularly significant because these lookups occur in performance critical paths of the core kernel. this_cpu_ptr() can avoid memory accesses and this_cpu_ptr comes in two flavors. The preemption context matters since we are referring the the currently executing processor. In many cases we must insure that the processor does not change while a code segment is executed. __this_cpu_ptr -> Do not check for preemption context this_cpu_ptr -> Check preemption context The parameter to these operations is a per cpu pointer. This can be the address of a statically defined per cpu variable (&per_cpu_var(xxx)) or the address of a per cpu variable allocated with the per cpu allocator. per cpu atomic operations: this_cpu_*(var, val) ----------------------------------------------- this_cpu_* operations (like this_cpu_add(struct->y, value) operate on abitrary scalars that are members of structures allocated with the new per cpu allocator. They can also operate on static per_cpu variables if they are passed to per_cpu_var() (See patch to use this_cpu_* operations for vm statistics). These operations are guaranteed to be atomic vs preemption when modifying the scalar. The calculation of the per cpu offset is also guaranteed to be atomic at the same time. This means that a this_cpu_* operation can be safely used to modify a per cpu variable in a context where interrupts are enabled and preemption is allowed. Many architectures can perform such a per cpu atomic operation with a single instruction. Note that the atomicity here is different from regular atomic operations. Atomicity is only guaranteed for data accessed from the currently executing processor. Modifications from other processors are still possible. There must be other guarantees that the per cpu data is not modified from another processor when using these instruction. The per cpu atomicity is created by the fact that the processor either executes and instruction or not. Embedded in the instruction is the relocation of the per cpu address to the are reserved for the current processor and the RMW action. Therefore interrupts or preemption cannot occur in the mids of this processing. Generic fallback functions are used if an arch does not define optimized this_cpu operations. The functions come also come in the two flavors used for this_cpu_ptr(). The firstparameter is a scalar that is a member of a structure allocated through allocpercpu or a per cpu variable (use per_cpu_var(xxx)). The operations are similar to what percpu_add() and friends do. this_cpu_read(scalar) this_cpu_write(scalar, value) this_cpu_add(scale, value) this_cpu_sub(scalar, value) this_cpu_inc(scalar) this_cpu_dec(scalar) this_cpu_and(scalar, value) this_cpu_or(scalar, value) this_cpu_xor(scalar, value) Arch code can override the generic functions and provide optimized atomic per cpu operations. These atomic operations must provide both the relocation (x86 does it through a segment override) and the operation on the data in a single instruction. Otherwise preempt needs to be disabled and there is no gain from providing arch implementations. A third variant is provided prefixed by irqsafe_. These variants are safe against hardware interrupts on the *same* processor (all per cpu atomic primitives are *always* *only* providing safety for code running on the *same* processor!). The increment needs to be implemented by the hardware in such a way that it is a single RMW instruction that is either processed before or after an interrupt. cc: David Howells <dhowells@redhat.com> cc: Ingo Molnar <mingo@elte.hu> cc: Rusty Russell <rusty@rustcorp.com.au> cc: Eric Dumazet <dada1@cosmosbay.com> Signed-off-by: Christoph Lameter <cl@linux-foundation.org> Signed-off-by: Tejun Heo <tj@kernel.org>
2009-10-03 14:48:22 +04:00
#endif
#define __this_cpu_generic_to_op(pcp, val, op) \
do { \
*__this_cpu_ptr(&(pcp)) op val; \
} while (0)
#ifndef __this_cpu_write
# ifndef __this_cpu_write_1
# define __this_cpu_write_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), =)
# endif
# ifndef __this_cpu_write_2
# define __this_cpu_write_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), =)
# endif
# ifndef __this_cpu_write_4
# define __this_cpu_write_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), =)
# endif
# ifndef __this_cpu_write_8
# define __this_cpu_write_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), =)
# endif
# define __this_cpu_write(pcp, val) __pcpu_size_call(__this_cpu_write_, (pcp), (val))
this_cpu: Introduce this_cpu_ptr() and generic this_cpu_* operations This patch introduces two things: First this_cpu_ptr and then per cpu atomic operations. this_cpu_ptr ------------ A common operation when dealing with cpu data is to get the instance of the cpu data associated with the currently executing processor. This can be optimized by this_cpu_ptr(xx) = per_cpu_ptr(xx, smp_processor_id). The problem with per_cpu_ptr(x, smp_processor_id) is that it requires an array lookup to find the offset for the cpu. Processors typically have the offset for the current cpu area in some kind of (arch dependent) efficiently accessible register or memory location. We can use that instead of doing the array lookup to speed up the determination of the address of the percpu variable. This is particularly significant because these lookups occur in performance critical paths of the core kernel. this_cpu_ptr() can avoid memory accesses and this_cpu_ptr comes in two flavors. The preemption context matters since we are referring the the currently executing processor. In many cases we must insure that the processor does not change while a code segment is executed. __this_cpu_ptr -> Do not check for preemption context this_cpu_ptr -> Check preemption context The parameter to these operations is a per cpu pointer. This can be the address of a statically defined per cpu variable (&per_cpu_var(xxx)) or the address of a per cpu variable allocated with the per cpu allocator. per cpu atomic operations: this_cpu_*(var, val) ----------------------------------------------- this_cpu_* operations (like this_cpu_add(struct->y, value) operate on abitrary scalars that are members of structures allocated with the new per cpu allocator. They can also operate on static per_cpu variables if they are passed to per_cpu_var() (See patch to use this_cpu_* operations for vm statistics). These operations are guaranteed to be atomic vs preemption when modifying the scalar. The calculation of the per cpu offset is also guaranteed to be atomic at the same time. This means that a this_cpu_* operation can be safely used to modify a per cpu variable in a context where interrupts are enabled and preemption is allowed. Many architectures can perform such a per cpu atomic operation with a single instruction. Note that the atomicity here is different from regular atomic operations. Atomicity is only guaranteed for data accessed from the currently executing processor. Modifications from other processors are still possible. There must be other guarantees that the per cpu data is not modified from another processor when using these instruction. The per cpu atomicity is created by the fact that the processor either executes and instruction or not. Embedded in the instruction is the relocation of the per cpu address to the are reserved for the current processor and the RMW action. Therefore interrupts or preemption cannot occur in the mids of this processing. Generic fallback functions are used if an arch does not define optimized this_cpu operations. The functions come also come in the two flavors used for this_cpu_ptr(). The firstparameter is a scalar that is a member of a structure allocated through allocpercpu or a per cpu variable (use per_cpu_var(xxx)). The operations are similar to what percpu_add() and friends do. this_cpu_read(scalar) this_cpu_write(scalar, value) this_cpu_add(scale, value) this_cpu_sub(scalar, value) this_cpu_inc(scalar) this_cpu_dec(scalar) this_cpu_and(scalar, value) this_cpu_or(scalar, value) this_cpu_xor(scalar, value) Arch code can override the generic functions and provide optimized atomic per cpu operations. These atomic operations must provide both the relocation (x86 does it through a segment override) and the operation on the data in a single instruction. Otherwise preempt needs to be disabled and there is no gain from providing arch implementations. A third variant is provided prefixed by irqsafe_. These variants are safe against hardware interrupts on the *same* processor (all per cpu atomic primitives are *always* *only* providing safety for code running on the *same* processor!). The increment needs to be implemented by the hardware in such a way that it is a single RMW instruction that is either processed before or after an interrupt. cc: David Howells <dhowells@redhat.com> cc: Ingo Molnar <mingo@elte.hu> cc: Rusty Russell <rusty@rustcorp.com.au> cc: Eric Dumazet <dada1@cosmosbay.com> Signed-off-by: Christoph Lameter <cl@linux-foundation.org> Signed-off-by: Tejun Heo <tj@kernel.org>
2009-10-03 14:48:22 +04:00
#endif
#ifndef __this_cpu_add
# ifndef __this_cpu_add_1
# define __this_cpu_add_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=)
# endif
# ifndef __this_cpu_add_2
# define __this_cpu_add_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=)
# endif
# ifndef __this_cpu_add_4
# define __this_cpu_add_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=)
# endif
# ifndef __this_cpu_add_8
# define __this_cpu_add_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=)
# endif
# define __this_cpu_add(pcp, val) __pcpu_size_call(__this_cpu_add_, (pcp), (val))
this_cpu: Introduce this_cpu_ptr() and generic this_cpu_* operations This patch introduces two things: First this_cpu_ptr and then per cpu atomic operations. this_cpu_ptr ------------ A common operation when dealing with cpu data is to get the instance of the cpu data associated with the currently executing processor. This can be optimized by this_cpu_ptr(xx) = per_cpu_ptr(xx, smp_processor_id). The problem with per_cpu_ptr(x, smp_processor_id) is that it requires an array lookup to find the offset for the cpu. Processors typically have the offset for the current cpu area in some kind of (arch dependent) efficiently accessible register or memory location. We can use that instead of doing the array lookup to speed up the determination of the address of the percpu variable. This is particularly significant because these lookups occur in performance critical paths of the core kernel. this_cpu_ptr() can avoid memory accesses and this_cpu_ptr comes in two flavors. The preemption context matters since we are referring the the currently executing processor. In many cases we must insure that the processor does not change while a code segment is executed. __this_cpu_ptr -> Do not check for preemption context this_cpu_ptr -> Check preemption context The parameter to these operations is a per cpu pointer. This can be the address of a statically defined per cpu variable (&per_cpu_var(xxx)) or the address of a per cpu variable allocated with the per cpu allocator. per cpu atomic operations: this_cpu_*(var, val) ----------------------------------------------- this_cpu_* operations (like this_cpu_add(struct->y, value) operate on abitrary scalars that are members of structures allocated with the new per cpu allocator. They can also operate on static per_cpu variables if they are passed to per_cpu_var() (See patch to use this_cpu_* operations for vm statistics). These operations are guaranteed to be atomic vs preemption when modifying the scalar. The calculation of the per cpu offset is also guaranteed to be atomic at the same time. This means that a this_cpu_* operation can be safely used to modify a per cpu variable in a context where interrupts are enabled and preemption is allowed. Many architectures can perform such a per cpu atomic operation with a single instruction. Note that the atomicity here is different from regular atomic operations. Atomicity is only guaranteed for data accessed from the currently executing processor. Modifications from other processors are still possible. There must be other guarantees that the per cpu data is not modified from another processor when using these instruction. The per cpu atomicity is created by the fact that the processor either executes and instruction or not. Embedded in the instruction is the relocation of the per cpu address to the are reserved for the current processor and the RMW action. Therefore interrupts or preemption cannot occur in the mids of this processing. Generic fallback functions are used if an arch does not define optimized this_cpu operations. The functions come also come in the two flavors used for this_cpu_ptr(). The firstparameter is a scalar that is a member of a structure allocated through allocpercpu or a per cpu variable (use per_cpu_var(xxx)). The operations are similar to what percpu_add() and friends do. this_cpu_read(scalar) this_cpu_write(scalar, value) this_cpu_add(scale, value) this_cpu_sub(scalar, value) this_cpu_inc(scalar) this_cpu_dec(scalar) this_cpu_and(scalar, value) this_cpu_or(scalar, value) this_cpu_xor(scalar, value) Arch code can override the generic functions and provide optimized atomic per cpu operations. These atomic operations must provide both the relocation (x86 does it through a segment override) and the operation on the data in a single instruction. Otherwise preempt needs to be disabled and there is no gain from providing arch implementations. A third variant is provided prefixed by irqsafe_. These variants are safe against hardware interrupts on the *same* processor (all per cpu atomic primitives are *always* *only* providing safety for code running on the *same* processor!). The increment needs to be implemented by the hardware in such a way that it is a single RMW instruction that is either processed before or after an interrupt. cc: David Howells <dhowells@redhat.com> cc: Ingo Molnar <mingo@elte.hu> cc: Rusty Russell <rusty@rustcorp.com.au> cc: Eric Dumazet <dada1@cosmosbay.com> Signed-off-by: Christoph Lameter <cl@linux-foundation.org> Signed-off-by: Tejun Heo <tj@kernel.org>
2009-10-03 14:48:22 +04:00
#endif
#ifndef __this_cpu_sub
# define __this_cpu_sub(pcp, val) __this_cpu_add((pcp), -(val))
#endif
#ifndef __this_cpu_inc
# define __this_cpu_inc(pcp) __this_cpu_add((pcp), 1)
#endif
#ifndef __this_cpu_dec
# define __this_cpu_dec(pcp) __this_cpu_sub((pcp), 1)
#endif
#ifndef __this_cpu_and
# ifndef __this_cpu_and_1
# define __this_cpu_and_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=)
# endif
# ifndef __this_cpu_and_2
# define __this_cpu_and_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=)
# endif
# ifndef __this_cpu_and_4
# define __this_cpu_and_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=)
# endif
# ifndef __this_cpu_and_8
# define __this_cpu_and_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=)
# endif
# define __this_cpu_and(pcp, val) __pcpu_size_call(__this_cpu_and_, (pcp), (val))
this_cpu: Introduce this_cpu_ptr() and generic this_cpu_* operations This patch introduces two things: First this_cpu_ptr and then per cpu atomic operations. this_cpu_ptr ------------ A common operation when dealing with cpu data is to get the instance of the cpu data associated with the currently executing processor. This can be optimized by this_cpu_ptr(xx) = per_cpu_ptr(xx, smp_processor_id). The problem with per_cpu_ptr(x, smp_processor_id) is that it requires an array lookup to find the offset for the cpu. Processors typically have the offset for the current cpu area in some kind of (arch dependent) efficiently accessible register or memory location. We can use that instead of doing the array lookup to speed up the determination of the address of the percpu variable. This is particularly significant because these lookups occur in performance critical paths of the core kernel. this_cpu_ptr() can avoid memory accesses and this_cpu_ptr comes in two flavors. The preemption context matters since we are referring the the currently executing processor. In many cases we must insure that the processor does not change while a code segment is executed. __this_cpu_ptr -> Do not check for preemption context this_cpu_ptr -> Check preemption context The parameter to these operations is a per cpu pointer. This can be the address of a statically defined per cpu variable (&per_cpu_var(xxx)) or the address of a per cpu variable allocated with the per cpu allocator. per cpu atomic operations: this_cpu_*(var, val) ----------------------------------------------- this_cpu_* operations (like this_cpu_add(struct->y, value) operate on abitrary scalars that are members of structures allocated with the new per cpu allocator. They can also operate on static per_cpu variables if they are passed to per_cpu_var() (See patch to use this_cpu_* operations for vm statistics). These operations are guaranteed to be atomic vs preemption when modifying the scalar. The calculation of the per cpu offset is also guaranteed to be atomic at the same time. This means that a this_cpu_* operation can be safely used to modify a per cpu variable in a context where interrupts are enabled and preemption is allowed. Many architectures can perform such a per cpu atomic operation with a single instruction. Note that the atomicity here is different from regular atomic operations. Atomicity is only guaranteed for data accessed from the currently executing processor. Modifications from other processors are still possible. There must be other guarantees that the per cpu data is not modified from another processor when using these instruction. The per cpu atomicity is created by the fact that the processor either executes and instruction or not. Embedded in the instruction is the relocation of the per cpu address to the are reserved for the current processor and the RMW action. Therefore interrupts or preemption cannot occur in the mids of this processing. Generic fallback functions are used if an arch does not define optimized this_cpu operations. The functions come also come in the two flavors used for this_cpu_ptr(). The firstparameter is a scalar that is a member of a structure allocated through allocpercpu or a per cpu variable (use per_cpu_var(xxx)). The operations are similar to what percpu_add() and friends do. this_cpu_read(scalar) this_cpu_write(scalar, value) this_cpu_add(scale, value) this_cpu_sub(scalar, value) this_cpu_inc(scalar) this_cpu_dec(scalar) this_cpu_and(scalar, value) this_cpu_or(scalar, value) this_cpu_xor(scalar, value) Arch code can override the generic functions and provide optimized atomic per cpu operations. These atomic operations must provide both the relocation (x86 does it through a segment override) and the operation on the data in a single instruction. Otherwise preempt needs to be disabled and there is no gain from providing arch implementations. A third variant is provided prefixed by irqsafe_. These variants are safe against hardware interrupts on the *same* processor (all per cpu atomic primitives are *always* *only* providing safety for code running on the *same* processor!). The increment needs to be implemented by the hardware in such a way that it is a single RMW instruction that is either processed before or after an interrupt. cc: David Howells <dhowells@redhat.com> cc: Ingo Molnar <mingo@elte.hu> cc: Rusty Russell <rusty@rustcorp.com.au> cc: Eric Dumazet <dada1@cosmosbay.com> Signed-off-by: Christoph Lameter <cl@linux-foundation.org> Signed-off-by: Tejun Heo <tj@kernel.org>
2009-10-03 14:48:22 +04:00
#endif
#ifndef __this_cpu_or
# ifndef __this_cpu_or_1
# define __this_cpu_or_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=)
# endif
# ifndef __this_cpu_or_2
# define __this_cpu_or_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=)
# endif
# ifndef __this_cpu_or_4
# define __this_cpu_or_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=)
# endif
# ifndef __this_cpu_or_8
# define __this_cpu_or_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=)
# endif
# define __this_cpu_or(pcp, val) __pcpu_size_call(__this_cpu_or_, (pcp), (val))
this_cpu: Introduce this_cpu_ptr() and generic this_cpu_* operations This patch introduces two things: First this_cpu_ptr and then per cpu atomic operations. this_cpu_ptr ------------ A common operation when dealing with cpu data is to get the instance of the cpu data associated with the currently executing processor. This can be optimized by this_cpu_ptr(xx) = per_cpu_ptr(xx, smp_processor_id). The problem with per_cpu_ptr(x, smp_processor_id) is that it requires an array lookup to find the offset for the cpu. Processors typically have the offset for the current cpu area in some kind of (arch dependent) efficiently accessible register or memory location. We can use that instead of doing the array lookup to speed up the determination of the address of the percpu variable. This is particularly significant because these lookups occur in performance critical paths of the core kernel. this_cpu_ptr() can avoid memory accesses and this_cpu_ptr comes in two flavors. The preemption context matters since we are referring the the currently executing processor. In many cases we must insure that the processor does not change while a code segment is executed. __this_cpu_ptr -> Do not check for preemption context this_cpu_ptr -> Check preemption context The parameter to these operations is a per cpu pointer. This can be the address of a statically defined per cpu variable (&per_cpu_var(xxx)) or the address of a per cpu variable allocated with the per cpu allocator. per cpu atomic operations: this_cpu_*(var, val) ----------------------------------------------- this_cpu_* operations (like this_cpu_add(struct->y, value) operate on abitrary scalars that are members of structures allocated with the new per cpu allocator. They can also operate on static per_cpu variables if they are passed to per_cpu_var() (See patch to use this_cpu_* operations for vm statistics). These operations are guaranteed to be atomic vs preemption when modifying the scalar. The calculation of the per cpu offset is also guaranteed to be atomic at the same time. This means that a this_cpu_* operation can be safely used to modify a per cpu variable in a context where interrupts are enabled and preemption is allowed. Many architectures can perform such a per cpu atomic operation with a single instruction. Note that the atomicity here is different from regular atomic operations. Atomicity is only guaranteed for data accessed from the currently executing processor. Modifications from other processors are still possible. There must be other guarantees that the per cpu data is not modified from another processor when using these instruction. The per cpu atomicity is created by the fact that the processor either executes and instruction or not. Embedded in the instruction is the relocation of the per cpu address to the are reserved for the current processor and the RMW action. Therefore interrupts or preemption cannot occur in the mids of this processing. Generic fallback functions are used if an arch does not define optimized this_cpu operations. The functions come also come in the two flavors used for this_cpu_ptr(). The firstparameter is a scalar that is a member of a structure allocated through allocpercpu or a per cpu variable (use per_cpu_var(xxx)). The operations are similar to what percpu_add() and friends do. this_cpu_read(scalar) this_cpu_write(scalar, value) this_cpu_add(scale, value) this_cpu_sub(scalar, value) this_cpu_inc(scalar) this_cpu_dec(scalar) this_cpu_and(scalar, value) this_cpu_or(scalar, value) this_cpu_xor(scalar, value) Arch code can override the generic functions and provide optimized atomic per cpu operations. These atomic operations must provide both the relocation (x86 does it through a segment override) and the operation on the data in a single instruction. Otherwise preempt needs to be disabled and there is no gain from providing arch implementations. A third variant is provided prefixed by irqsafe_. These variants are safe against hardware interrupts on the *same* processor (all per cpu atomic primitives are *always* *only* providing safety for code running on the *same* processor!). The increment needs to be implemented by the hardware in such a way that it is a single RMW instruction that is either processed before or after an interrupt. cc: David Howells <dhowells@redhat.com> cc: Ingo Molnar <mingo@elte.hu> cc: Rusty Russell <rusty@rustcorp.com.au> cc: Eric Dumazet <dada1@cosmosbay.com> Signed-off-by: Christoph Lameter <cl@linux-foundation.org> Signed-off-by: Tejun Heo <tj@kernel.org>
2009-10-03 14:48:22 +04:00
#endif
#ifndef __this_cpu_xor
# ifndef __this_cpu_xor_1
# define __this_cpu_xor_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
# endif
# ifndef __this_cpu_xor_2
# define __this_cpu_xor_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
# endif
# ifndef __this_cpu_xor_4
# define __this_cpu_xor_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
# endif
# ifndef __this_cpu_xor_8
# define __this_cpu_xor_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
# endif
# define __this_cpu_xor(pcp, val) __pcpu_size_call(__this_cpu_xor_, (pcp), (val))
this_cpu: Introduce this_cpu_ptr() and generic this_cpu_* operations This patch introduces two things: First this_cpu_ptr and then per cpu atomic operations. this_cpu_ptr ------------ A common operation when dealing with cpu data is to get the instance of the cpu data associated with the currently executing processor. This can be optimized by this_cpu_ptr(xx) = per_cpu_ptr(xx, smp_processor_id). The problem with per_cpu_ptr(x, smp_processor_id) is that it requires an array lookup to find the offset for the cpu. Processors typically have the offset for the current cpu area in some kind of (arch dependent) efficiently accessible register or memory location. We can use that instead of doing the array lookup to speed up the determination of the address of the percpu variable. This is particularly significant because these lookups occur in performance critical paths of the core kernel. this_cpu_ptr() can avoid memory accesses and this_cpu_ptr comes in two flavors. The preemption context matters since we are referring the the currently executing processor. In many cases we must insure that the processor does not change while a code segment is executed. __this_cpu_ptr -> Do not check for preemption context this_cpu_ptr -> Check preemption context The parameter to these operations is a per cpu pointer. This can be the address of a statically defined per cpu variable (&per_cpu_var(xxx)) or the address of a per cpu variable allocated with the per cpu allocator. per cpu atomic operations: this_cpu_*(var, val) ----------------------------------------------- this_cpu_* operations (like this_cpu_add(struct->y, value) operate on abitrary scalars that are members of structures allocated with the new per cpu allocator. They can also operate on static per_cpu variables if they are passed to per_cpu_var() (See patch to use this_cpu_* operations for vm statistics). These operations are guaranteed to be atomic vs preemption when modifying the scalar. The calculation of the per cpu offset is also guaranteed to be atomic at the same time. This means that a this_cpu_* operation can be safely used to modify a per cpu variable in a context where interrupts are enabled and preemption is allowed. Many architectures can perform such a per cpu atomic operation with a single instruction. Note that the atomicity here is different from regular atomic operations. Atomicity is only guaranteed for data accessed from the currently executing processor. Modifications from other processors are still possible. There must be other guarantees that the per cpu data is not modified from another processor when using these instruction. The per cpu atomicity is created by the fact that the processor either executes and instruction or not. Embedded in the instruction is the relocation of the per cpu address to the are reserved for the current processor and the RMW action. Therefore interrupts or preemption cannot occur in the mids of this processing. Generic fallback functions are used if an arch does not define optimized this_cpu operations. The functions come also come in the two flavors used for this_cpu_ptr(). The firstparameter is a scalar that is a member of a structure allocated through allocpercpu or a per cpu variable (use per_cpu_var(xxx)). The operations are similar to what percpu_add() and friends do. this_cpu_read(scalar) this_cpu_write(scalar, value) this_cpu_add(scale, value) this_cpu_sub(scalar, value) this_cpu_inc(scalar) this_cpu_dec(scalar) this_cpu_and(scalar, value) this_cpu_or(scalar, value) this_cpu_xor(scalar, value) Arch code can override the generic functions and provide optimized atomic per cpu operations. These atomic operations must provide both the relocation (x86 does it through a segment override) and the operation on the data in a single instruction. Otherwise preempt needs to be disabled and there is no gain from providing arch implementations. A third variant is provided prefixed by irqsafe_. These variants are safe against hardware interrupts on the *same* processor (all per cpu atomic primitives are *always* *only* providing safety for code running on the *same* processor!). The increment needs to be implemented by the hardware in such a way that it is a single RMW instruction that is either processed before or after an interrupt. cc: David Howells <dhowells@redhat.com> cc: Ingo Molnar <mingo@elte.hu> cc: Rusty Russell <rusty@rustcorp.com.au> cc: Eric Dumazet <dada1@cosmosbay.com> Signed-off-by: Christoph Lameter <cl@linux-foundation.org> Signed-off-by: Tejun Heo <tj@kernel.org>
2009-10-03 14:48:22 +04:00
#endif
#define __this_cpu_generic_add_return(pcp, val) \
({ \
__this_cpu_add(pcp, val); \
__this_cpu_read(pcp); \
})
#ifndef __this_cpu_add_return
# ifndef __this_cpu_add_return_1
# define __this_cpu_add_return_1(pcp, val) __this_cpu_generic_add_return(pcp, val)
# endif
# ifndef __this_cpu_add_return_2
# define __this_cpu_add_return_2(pcp, val) __this_cpu_generic_add_return(pcp, val)
# endif
# ifndef __this_cpu_add_return_4
# define __this_cpu_add_return_4(pcp, val) __this_cpu_generic_add_return(pcp, val)
# endif
# ifndef __this_cpu_add_return_8
# define __this_cpu_add_return_8(pcp, val) __this_cpu_generic_add_return(pcp, val)
# endif
# define __this_cpu_add_return(pcp, val) \
__pcpu_size_call_return2(__this_cpu_add_return_, pcp, val)
#endif
#define __this_cpu_sub_return(pcp, val) __this_cpu_add_return(pcp, -(val))
#define __this_cpu_inc_return(pcp) __this_cpu_add_return(pcp, 1)
#define __this_cpu_dec_return(pcp) __this_cpu_add_return(pcp, -1)
#define __this_cpu_generic_xchg(pcp, nval) \
({ typeof(pcp) ret__; \
ret__ = __this_cpu_read(pcp); \
__this_cpu_write(pcp, nval); \
ret__; \
})
#ifndef __this_cpu_xchg
# ifndef __this_cpu_xchg_1
# define __this_cpu_xchg_1(pcp, nval) __this_cpu_generic_xchg(pcp, nval)
# endif
# ifndef __this_cpu_xchg_2
# define __this_cpu_xchg_2(pcp, nval) __this_cpu_generic_xchg(pcp, nval)
# endif
# ifndef __this_cpu_xchg_4
# define __this_cpu_xchg_4(pcp, nval) __this_cpu_generic_xchg(pcp, nval)
# endif
# ifndef __this_cpu_xchg_8
# define __this_cpu_xchg_8(pcp, nval) __this_cpu_generic_xchg(pcp, nval)
# endif
# define __this_cpu_xchg(pcp, nval) \
__pcpu_size_call_return2(__this_cpu_xchg_, (pcp), nval)
#endif
#define __this_cpu_generic_cmpxchg(pcp, oval, nval) \
({ \
typeof(pcp) ret__; \
ret__ = __this_cpu_read(pcp); \
if (ret__ == (oval)) \
__this_cpu_write(pcp, nval); \
ret__; \
})
#ifndef __this_cpu_cmpxchg
# ifndef __this_cpu_cmpxchg_1
# define __this_cpu_cmpxchg_1(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval)
# endif
# ifndef __this_cpu_cmpxchg_2
# define __this_cpu_cmpxchg_2(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval)
# endif
# ifndef __this_cpu_cmpxchg_4
# define __this_cpu_cmpxchg_4(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval)
# endif
# ifndef __this_cpu_cmpxchg_8
# define __this_cpu_cmpxchg_8(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval)
# endif
# define __this_cpu_cmpxchg(pcp, oval, nval) \
__pcpu_size_call_return2(__this_cpu_cmpxchg_, pcp, oval, nval)
#endif
#define __this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
({ \
int __ret = 0; \
if (__this_cpu_read(pcp1) == (oval1) && \
__this_cpu_read(pcp2) == (oval2)) { \
__this_cpu_write(pcp1, (nval1)); \
__this_cpu_write(pcp2, (nval2)); \
__ret = 1; \
} \
(__ret); \
})
#ifndef __this_cpu_cmpxchg_double
# ifndef __this_cpu_cmpxchg_double_1
# define __this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \
__this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
# endif
# ifndef __this_cpu_cmpxchg_double_2
# define __this_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \
__this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
# endif
# ifndef __this_cpu_cmpxchg_double_4
# define __this_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \
__this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
# endif
# ifndef __this_cpu_cmpxchg_double_8
# define __this_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \
__this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
# endif
# define __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
__pcpu_double_call_return_bool(__this_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
#endif
#endif /* __LINUX_PERCPU_H */