Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu: percpu: add __percpu sparse annotations to what's left percpu: add __percpu sparse annotations to fs percpu: add __percpu sparse annotations to core kernel subsystems local_t: Remove leftover local.h this_cpu: Remove pageset_notifier this_cpu: Page allocator conversion percpu, x86: Generic inc / dec percpu instructions local_t: Move local.h include to ringbuffer.c and ring_buffer_benchmark.c module: Use this_cpu_xx to dynamically allocate counters local_t: Remove cpu_local_xx macros percpu: refactor the code in pcpu_[de]populate_chunk() percpu: remove compile warnings caused by __verify_pcpu_ptr() percpu: make accessors check for percpu pointer in sparse percpu: add __percpu for sparse. percpu: make access macros universal percpu: remove per_cpu__ prefix.
This commit is contained in:
Коммит
0a135ba14d
|
@ -98,21 +98,4 @@ static __inline__ long local_sub_return(long i, local_t * l)
|
||||||
#define __local_add(i,l) ((l)->a.counter+=(i))
|
#define __local_add(i,l) ((l)->a.counter+=(i))
|
||||||
#define __local_sub(i,l) ((l)->a.counter-=(i))
|
#define __local_sub(i,l) ((l)->a.counter-=(i))
|
||||||
|
|
||||||
/* Use these for per-cpu local_t variables: on some archs they are
|
|
||||||
* much more efficient than these naive implementations. Note they take
|
|
||||||
* a variable, not an address.
|
|
||||||
*/
|
|
||||||
#define cpu_local_read(l) local_read(&__get_cpu_var(l))
|
|
||||||
#define cpu_local_set(l, i) local_set(&__get_cpu_var(l), (i))
|
|
||||||
|
|
||||||
#define cpu_local_inc(l) local_inc(&__get_cpu_var(l))
|
|
||||||
#define cpu_local_dec(l) local_dec(&__get_cpu_var(l))
|
|
||||||
#define cpu_local_add(i, l) local_add((i), &__get_cpu_var(l))
|
|
||||||
#define cpu_local_sub(i, l) local_sub((i), &__get_cpu_var(l))
|
|
||||||
|
|
||||||
#define __cpu_local_inc(l) __local_inc(&__get_cpu_var(l))
|
|
||||||
#define __cpu_local_dec(l) __local_dec(&__get_cpu_var(l))
|
|
||||||
#define __cpu_local_add(i, l) __local_add((i), &__get_cpu_var(l))
|
|
||||||
#define __cpu_local_sub(i, l) __local_sub((i), &__get_cpu_var(l))
|
|
||||||
|
|
||||||
#endif /* _ALPHA_LOCAL_H */
|
#endif /* _ALPHA_LOCAL_H */
|
||||||
|
|
|
@ -816,8 +816,8 @@ ENDPROC(_resume)
|
||||||
|
|
||||||
ENTRY(_ret_from_exception)
|
ENTRY(_ret_from_exception)
|
||||||
#ifdef CONFIG_IPIPE
|
#ifdef CONFIG_IPIPE
|
||||||
p2.l = _per_cpu__ipipe_percpu_domain;
|
p2.l = _ipipe_percpu_domain;
|
||||||
p2.h = _per_cpu__ipipe_percpu_domain;
|
p2.h = _ipipe_percpu_domain;
|
||||||
r0.l = _ipipe_root;
|
r0.l = _ipipe_root;
|
||||||
r0.h = _ipipe_root;
|
r0.h = _ipipe_root;
|
||||||
r2 = [p2];
|
r2 = [p2];
|
||||||
|
|
|
@ -358,7 +358,7 @@ mmu_bus_fault:
|
||||||
1: btstq 12, $r1 ; Refill?
|
1: btstq 12, $r1 ; Refill?
|
||||||
bpl 2f
|
bpl 2f
|
||||||
lsrq 24, $r1 ; Get PGD index (bit 24-31)
|
lsrq 24, $r1 ; Get PGD index (bit 24-31)
|
||||||
move.d [per_cpu__current_pgd], $r0 ; PGD for the current process
|
move.d [current_pgd], $r0 ; PGD for the current process
|
||||||
move.d [$r0+$r1.d], $r0 ; Get PMD
|
move.d [$r0+$r1.d], $r0 ; Get PMD
|
||||||
beq 2f
|
beq 2f
|
||||||
nop
|
nop
|
||||||
|
|
|
@ -115,7 +115,7 @@
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
move $s7, $acr ; PGD
|
move $s7, $acr ; PGD
|
||||||
#else
|
#else
|
||||||
move.d per_cpu__current_pgd, $acr ; PGD
|
move.d current_pgd, $acr ; PGD
|
||||||
#endif
|
#endif
|
||||||
; Look up PMD in PGD
|
; Look up PMD in PGD
|
||||||
lsrq 24, $r0 ; Get PMD index into PGD (bit 24-31)
|
lsrq 24, $r0 ; Get PMD index into PGD (bit 24-31)
|
||||||
|
|
|
@ -9,7 +9,7 @@
|
||||||
#define PERCPU_ENOUGH_ROOM PERCPU_PAGE_SIZE
|
#define PERCPU_ENOUGH_ROOM PERCPU_PAGE_SIZE
|
||||||
|
|
||||||
#ifdef __ASSEMBLY__
|
#ifdef __ASSEMBLY__
|
||||||
# define THIS_CPU(var) (per_cpu__##var) /* use this to mark accesses to per-CPU variables... */
|
# define THIS_CPU(var) (var) /* use this to mark accesses to per-CPU variables... */
|
||||||
#else /* !__ASSEMBLY__ */
|
#else /* !__ASSEMBLY__ */
|
||||||
|
|
||||||
|
|
||||||
|
@ -39,7 +39,7 @@ extern void *per_cpu_init(void);
|
||||||
* On the positive side, using __ia64_per_cpu_var() instead of __get_cpu_var() is slightly
|
* On the positive side, using __ia64_per_cpu_var() instead of __get_cpu_var() is slightly
|
||||||
* more efficient.
|
* more efficient.
|
||||||
*/
|
*/
|
||||||
#define __ia64_per_cpu_var(var) per_cpu__##var
|
#define __ia64_per_cpu_var(var) var
|
||||||
|
|
||||||
#include <asm-generic/percpu.h>
|
#include <asm-generic/percpu.h>
|
||||||
|
|
||||||
|
|
|
@ -30,9 +30,9 @@ EXPORT_SYMBOL(max_low_pfn); /* defined by bootmem.c, but not exported by generic
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#include <asm/processor.h>
|
#include <asm/processor.h>
|
||||||
EXPORT_SYMBOL(per_cpu__ia64_cpu_info);
|
EXPORT_SYMBOL(ia64_cpu_info);
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
EXPORT_SYMBOL(per_cpu__local_per_cpu_offset);
|
EXPORT_SYMBOL(local_per_cpu_offset);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#include <asm/uaccess.h>
|
#include <asm/uaccess.h>
|
||||||
|
|
|
@ -459,7 +459,7 @@ static void __init initialize_pernode_data(void)
|
||||||
cpu = 0;
|
cpu = 0;
|
||||||
node = node_cpuid[cpu].nid;
|
node = node_cpuid[cpu].nid;
|
||||||
cpu0_cpu_info = (struct cpuinfo_ia64 *)(__phys_per_cpu_start +
|
cpu0_cpu_info = (struct cpuinfo_ia64 *)(__phys_per_cpu_start +
|
||||||
((char *)&per_cpu__ia64_cpu_info - __per_cpu_start));
|
((char *)&ia64_cpu_info - __per_cpu_start));
|
||||||
cpu0_cpu_info->node_data = mem_data[node].node_data;
|
cpu0_cpu_info->node_data = mem_data[node].node_data;
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_SMP */
|
#endif /* CONFIG_SMP */
|
||||||
|
|
|
@ -338,29 +338,4 @@ static inline void local_set_mask(unsigned long mask, local_t *addr)
|
||||||
* a variable, not an address.
|
* a variable, not an address.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/* Need to disable preemption for the cpu local counters otherwise we could
|
|
||||||
still access a variable of a previous CPU in a non local way. */
|
|
||||||
#define cpu_local_wrap_v(l) \
|
|
||||||
({ local_t res__; \
|
|
||||||
preempt_disable(); \
|
|
||||||
res__ = (l); \
|
|
||||||
preempt_enable(); \
|
|
||||||
res__; })
|
|
||||||
#define cpu_local_wrap(l) \
|
|
||||||
({ preempt_disable(); \
|
|
||||||
l; \
|
|
||||||
preempt_enable(); }) \
|
|
||||||
|
|
||||||
#define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l)))
|
|
||||||
#define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i)))
|
|
||||||
#define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l)))
|
|
||||||
#define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l)))
|
|
||||||
#define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l)))
|
|
||||||
#define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l)))
|
|
||||||
|
|
||||||
#define __cpu_local_inc(l) cpu_local_inc(l)
|
|
||||||
#define __cpu_local_dec(l) cpu_local_dec(l)
|
|
||||||
#define __cpu_local_add(i, l) cpu_local_add((i), (l))
|
|
||||||
#define __cpu_local_sub(i, l) cpu_local_sub((i), (l))
|
|
||||||
|
|
||||||
#endif /* __M32R_LOCAL_H */
|
#endif /* __M32R_LOCAL_H */
|
||||||
|
|
|
@ -21,7 +21,7 @@
|
||||||
* places
|
* places
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define PER_CPU(var) per_cpu__##var
|
#define PER_CPU(var) var
|
||||||
|
|
||||||
# ifndef __ASSEMBLY__
|
# ifndef __ASSEMBLY__
|
||||||
DECLARE_PER_CPU(unsigned int, KSP); /* Saved kernel stack pointer */
|
DECLARE_PER_CPU(unsigned int, KSP); /* Saved kernel stack pointer */
|
||||||
|
|
|
@ -193,29 +193,4 @@ static __inline__ long local_sub_return(long i, local_t * l)
|
||||||
#define __local_add(i, l) ((l)->a.counter+=(i))
|
#define __local_add(i, l) ((l)->a.counter+=(i))
|
||||||
#define __local_sub(i, l) ((l)->a.counter-=(i))
|
#define __local_sub(i, l) ((l)->a.counter-=(i))
|
||||||
|
|
||||||
/* Need to disable preemption for the cpu local counters otherwise we could
|
|
||||||
still access a variable of a previous CPU in a non atomic way. */
|
|
||||||
#define cpu_local_wrap_v(l) \
|
|
||||||
({ local_t res__; \
|
|
||||||
preempt_disable(); \
|
|
||||||
res__ = (l); \
|
|
||||||
preempt_enable(); \
|
|
||||||
res__; })
|
|
||||||
#define cpu_local_wrap(l) \
|
|
||||||
({ preempt_disable(); \
|
|
||||||
l; \
|
|
||||||
preempt_enable(); }) \
|
|
||||||
|
|
||||||
#define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l)))
|
|
||||||
#define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i)))
|
|
||||||
#define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l)))
|
|
||||||
#define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l)))
|
|
||||||
#define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l)))
|
|
||||||
#define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l)))
|
|
||||||
|
|
||||||
#define __cpu_local_inc(l) cpu_local_inc(l)
|
|
||||||
#define __cpu_local_dec(l) cpu_local_dec(l)
|
|
||||||
#define __cpu_local_add(i, l) cpu_local_add((i), (l))
|
|
||||||
#define __cpu_local_sub(i, l) cpu_local_sub((i), (l))
|
|
||||||
|
|
||||||
#endif /* _ARCH_MIPS_LOCAL_H */
|
#endif /* _ARCH_MIPS_LOCAL_H */
|
||||||
|
|
|
@ -36,8 +36,8 @@
|
||||||
#endif
|
#endif
|
||||||
/* t2 = &__per_cpu_offset[smp_processor_id()]; */
|
/* t2 = &__per_cpu_offset[smp_processor_id()]; */
|
||||||
LDREGX \t2(\t1),\t2
|
LDREGX \t2(\t1),\t2
|
||||||
addil LT%per_cpu__exception_data,%r27
|
addil LT%exception_data,%r27
|
||||||
LDREG RT%per_cpu__exception_data(%r1),\t1
|
LDREG RT%exception_data(%r1),\t1
|
||||||
/* t1 = &__get_cpu_var(exception_data) */
|
/* t1 = &__get_cpu_var(exception_data) */
|
||||||
add,l \t1,\t2,\t1
|
add,l \t1,\t2,\t1
|
||||||
/* t1 = t1->fault_ip */
|
/* t1 = t1->fault_ip */
|
||||||
|
@ -46,8 +46,8 @@
|
||||||
#else
|
#else
|
||||||
.macro get_fault_ip t1 t2
|
.macro get_fault_ip t1 t2
|
||||||
/* t1 = &__get_cpu_var(exception_data) */
|
/* t1 = &__get_cpu_var(exception_data) */
|
||||||
addil LT%per_cpu__exception_data,%r27
|
addil LT%exception_data,%r27
|
||||||
LDREG RT%per_cpu__exception_data(%r1),\t2
|
LDREG RT%exception_data(%r1),\t2
|
||||||
/* t1 = t2->fault_ip */
|
/* t1 = t2->fault_ip */
|
||||||
LDREG EXCDATA_IP(\t2), \t1
|
LDREG EXCDATA_IP(\t2), \t1
|
||||||
.endm
|
.endm
|
||||||
|
|
|
@ -172,29 +172,4 @@ static __inline__ long local_dec_if_positive(local_t *l)
|
||||||
#define __local_add(i,l) ((l)->a.counter+=(i))
|
#define __local_add(i,l) ((l)->a.counter+=(i))
|
||||||
#define __local_sub(i,l) ((l)->a.counter-=(i))
|
#define __local_sub(i,l) ((l)->a.counter-=(i))
|
||||||
|
|
||||||
/* Need to disable preemption for the cpu local counters otherwise we could
|
|
||||||
still access a variable of a previous CPU in a non atomic way. */
|
|
||||||
#define cpu_local_wrap_v(l) \
|
|
||||||
({ local_t res__; \
|
|
||||||
preempt_disable(); \
|
|
||||||
res__ = (l); \
|
|
||||||
preempt_enable(); \
|
|
||||||
res__; })
|
|
||||||
#define cpu_local_wrap(l) \
|
|
||||||
({ preempt_disable(); \
|
|
||||||
l; \
|
|
||||||
preempt_enable(); }) \
|
|
||||||
|
|
||||||
#define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l)))
|
|
||||||
#define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i)))
|
|
||||||
#define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l)))
|
|
||||||
#define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l)))
|
|
||||||
#define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l)))
|
|
||||||
#define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l)))
|
|
||||||
|
|
||||||
#define __cpu_local_inc(l) cpu_local_inc(l)
|
|
||||||
#define __cpu_local_dec(l) cpu_local_dec(l)
|
|
||||||
#define __cpu_local_add(i, l) cpu_local_add((i), (l))
|
|
||||||
#define __cpu_local_sub(i, l) cpu_local_sub((i), (l))
|
|
||||||
|
|
||||||
#endif /* _ARCH_POWERPC_LOCAL_H */
|
#endif /* _ARCH_POWERPC_LOCAL_H */
|
||||||
|
|
|
@ -21,7 +21,6 @@
|
||||||
|
|
||||||
#include <asm/perf_event.h>
|
#include <asm/perf_event.h>
|
||||||
#include <asm/ptrace.h>
|
#include <asm/ptrace.h>
|
||||||
#include <asm/local.h>
|
|
||||||
#include <asm/pcr.h>
|
#include <asm/pcr.h>
|
||||||
|
|
||||||
/* We don't have a real NMI on sparc64, but we can fake one
|
/* We don't have a real NMI on sparc64, but we can fake one
|
||||||
|
@ -113,13 +112,13 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
|
||||||
touched = 1;
|
touched = 1;
|
||||||
}
|
}
|
||||||
if (!touched && __get_cpu_var(last_irq_sum) == sum) {
|
if (!touched && __get_cpu_var(last_irq_sum) == sum) {
|
||||||
__this_cpu_inc(per_cpu_var(alert_counter));
|
__this_cpu_inc(alert_counter);
|
||||||
if (__this_cpu_read(per_cpu_var(alert_counter)) == 30 * nmi_hz)
|
if (__this_cpu_read(alert_counter) == 30 * nmi_hz)
|
||||||
die_nmi("BUG: NMI Watchdog detected LOCKUP",
|
die_nmi("BUG: NMI Watchdog detected LOCKUP",
|
||||||
regs, panic_on_timeout);
|
regs, panic_on_timeout);
|
||||||
} else {
|
} else {
|
||||||
__get_cpu_var(last_irq_sum) = sum;
|
__get_cpu_var(last_irq_sum) = sum;
|
||||||
__this_cpu_write(per_cpu_var(alert_counter), 0);
|
__this_cpu_write(alert_counter, 0);
|
||||||
}
|
}
|
||||||
if (__get_cpu_var(wd_enabled)) {
|
if (__get_cpu_var(wd_enabled)) {
|
||||||
write_pic(picl_value(nmi_hz));
|
write_pic(picl_value(nmi_hz));
|
||||||
|
|
|
@ -149,11 +149,11 @@ rtrap_nmi: ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
|
||||||
rtrap_irq:
|
rtrap_irq:
|
||||||
rtrap:
|
rtrap:
|
||||||
#ifndef CONFIG_SMP
|
#ifndef CONFIG_SMP
|
||||||
sethi %hi(per_cpu____cpu_data), %l0
|
sethi %hi(__cpu_data), %l0
|
||||||
lduw [%l0 + %lo(per_cpu____cpu_data)], %l1
|
lduw [%l0 + %lo(__cpu_data)], %l1
|
||||||
#else
|
#else
|
||||||
sethi %hi(per_cpu____cpu_data), %l0
|
sethi %hi(__cpu_data), %l0
|
||||||
or %l0, %lo(per_cpu____cpu_data), %l0
|
or %l0, %lo(__cpu_data), %l0
|
||||||
lduw [%l0 + %g5], %l1
|
lduw [%l0 + %g5], %l1
|
||||||
#endif
|
#endif
|
||||||
cmp %l1, 0
|
cmp %l1, 0
|
||||||
|
|
|
@ -195,41 +195,4 @@ static inline long local_sub_return(long i, local_t *l)
|
||||||
#define __local_add(i, l) local_add((i), (l))
|
#define __local_add(i, l) local_add((i), (l))
|
||||||
#define __local_sub(i, l) local_sub((i), (l))
|
#define __local_sub(i, l) local_sub((i), (l))
|
||||||
|
|
||||||
/* Use these for per-cpu local_t variables: on some archs they are
|
|
||||||
* much more efficient than these naive implementations. Note they take
|
|
||||||
* a variable, not an address.
|
|
||||||
*
|
|
||||||
* X86_64: This could be done better if we moved the per cpu data directly
|
|
||||||
* after GS.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* Need to disable preemption for the cpu local counters otherwise we could
|
|
||||||
still access a variable of a previous CPU in a non atomic way. */
|
|
||||||
#define cpu_local_wrap_v(l) \
|
|
||||||
({ \
|
|
||||||
local_t res__; \
|
|
||||||
preempt_disable(); \
|
|
||||||
res__ = (l); \
|
|
||||||
preempt_enable(); \
|
|
||||||
res__; \
|
|
||||||
})
|
|
||||||
#define cpu_local_wrap(l) \
|
|
||||||
({ \
|
|
||||||
preempt_disable(); \
|
|
||||||
(l); \
|
|
||||||
preempt_enable(); \
|
|
||||||
}) \
|
|
||||||
|
|
||||||
#define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var((l))))
|
|
||||||
#define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var((l)), (i)))
|
|
||||||
#define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var((l))))
|
|
||||||
#define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var((l))))
|
|
||||||
#define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var((l))))
|
|
||||||
#define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var((l))))
|
|
||||||
|
|
||||||
#define __cpu_local_inc(l) cpu_local_inc((l))
|
|
||||||
#define __cpu_local_dec(l) cpu_local_dec((l))
|
|
||||||
#define __cpu_local_add(i, l) cpu_local_add((i), (l))
|
|
||||||
#define __cpu_local_sub(i, l) cpu_local_sub((i), (l))
|
|
||||||
|
|
||||||
#endif /* _ASM_X86_LOCAL_H */
|
#endif /* _ASM_X86_LOCAL_H */
|
||||||
|
|
|
@ -25,19 +25,18 @@
|
||||||
*/
|
*/
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
#define PER_CPU(var, reg) \
|
#define PER_CPU(var, reg) \
|
||||||
__percpu_mov_op %__percpu_seg:per_cpu__this_cpu_off, reg; \
|
__percpu_mov_op %__percpu_seg:this_cpu_off, reg; \
|
||||||
lea per_cpu__##var(reg), reg
|
lea var(reg), reg
|
||||||
#define PER_CPU_VAR(var) %__percpu_seg:per_cpu__##var
|
#define PER_CPU_VAR(var) %__percpu_seg:var
|
||||||
#else /* ! SMP */
|
#else /* ! SMP */
|
||||||
#define PER_CPU(var, reg) \
|
#define PER_CPU(var, reg) __percpu_mov_op $var, reg
|
||||||
__percpu_mov_op $per_cpu__##var, reg
|
#define PER_CPU_VAR(var) var
|
||||||
#define PER_CPU_VAR(var) per_cpu__##var
|
|
||||||
#endif /* SMP */
|
#endif /* SMP */
|
||||||
|
|
||||||
#ifdef CONFIG_X86_64_SMP
|
#ifdef CONFIG_X86_64_SMP
|
||||||
#define INIT_PER_CPU_VAR(var) init_per_cpu__##var
|
#define INIT_PER_CPU_VAR(var) init_per_cpu__##var
|
||||||
#else
|
#else
|
||||||
#define INIT_PER_CPU_VAR(var) per_cpu__##var
|
#define INIT_PER_CPU_VAR(var) var
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#else /* ...!ASSEMBLY */
|
#else /* ...!ASSEMBLY */
|
||||||
|
@ -60,12 +59,12 @@
|
||||||
* There also must be an entry in vmlinux_64.lds.S
|
* There also must be an entry in vmlinux_64.lds.S
|
||||||
*/
|
*/
|
||||||
#define DECLARE_INIT_PER_CPU(var) \
|
#define DECLARE_INIT_PER_CPU(var) \
|
||||||
extern typeof(per_cpu_var(var)) init_per_cpu_var(var)
|
extern typeof(var) init_per_cpu_var(var)
|
||||||
|
|
||||||
#ifdef CONFIG_X86_64_SMP
|
#ifdef CONFIG_X86_64_SMP
|
||||||
#define init_per_cpu_var(var) init_per_cpu__##var
|
#define init_per_cpu_var(var) init_per_cpu__##var
|
||||||
#else
|
#else
|
||||||
#define init_per_cpu_var(var) per_cpu_var(var)
|
#define init_per_cpu_var(var) var
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* For arch-specific code, we can use direct single-insn ops (they
|
/* For arch-specific code, we can use direct single-insn ops (they
|
||||||
|
@ -104,6 +103,64 @@ do { \
|
||||||
} \
|
} \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Generate a percpu add to memory instruction and optimize code
|
||||||
|
* if a one is added or subtracted.
|
||||||
|
*/
|
||||||
|
#define percpu_add_op(var, val) \
|
||||||
|
do { \
|
||||||
|
typedef typeof(var) pao_T__; \
|
||||||
|
const int pao_ID__ = (__builtin_constant_p(val) && \
|
||||||
|
((val) == 1 || (val) == -1)) ? (val) : 0; \
|
||||||
|
if (0) { \
|
||||||
|
pao_T__ pao_tmp__; \
|
||||||
|
pao_tmp__ = (val); \
|
||||||
|
} \
|
||||||
|
switch (sizeof(var)) { \
|
||||||
|
case 1: \
|
||||||
|
if (pao_ID__ == 1) \
|
||||||
|
asm("incb "__percpu_arg(0) : "+m" (var)); \
|
||||||
|
else if (pao_ID__ == -1) \
|
||||||
|
asm("decb "__percpu_arg(0) : "+m" (var)); \
|
||||||
|
else \
|
||||||
|
asm("addb %1, "__percpu_arg(0) \
|
||||||
|
: "+m" (var) \
|
||||||
|
: "qi" ((pao_T__)(val))); \
|
||||||
|
break; \
|
||||||
|
case 2: \
|
||||||
|
if (pao_ID__ == 1) \
|
||||||
|
asm("incw "__percpu_arg(0) : "+m" (var)); \
|
||||||
|
else if (pao_ID__ == -1) \
|
||||||
|
asm("decw "__percpu_arg(0) : "+m" (var)); \
|
||||||
|
else \
|
||||||
|
asm("addw %1, "__percpu_arg(0) \
|
||||||
|
: "+m" (var) \
|
||||||
|
: "ri" ((pao_T__)(val))); \
|
||||||
|
break; \
|
||||||
|
case 4: \
|
||||||
|
if (pao_ID__ == 1) \
|
||||||
|
asm("incl "__percpu_arg(0) : "+m" (var)); \
|
||||||
|
else if (pao_ID__ == -1) \
|
||||||
|
asm("decl "__percpu_arg(0) : "+m" (var)); \
|
||||||
|
else \
|
||||||
|
asm("addl %1, "__percpu_arg(0) \
|
||||||
|
: "+m" (var) \
|
||||||
|
: "ri" ((pao_T__)(val))); \
|
||||||
|
break; \
|
||||||
|
case 8: \
|
||||||
|
if (pao_ID__ == 1) \
|
||||||
|
asm("incq "__percpu_arg(0) : "+m" (var)); \
|
||||||
|
else if (pao_ID__ == -1) \
|
||||||
|
asm("decq "__percpu_arg(0) : "+m" (var)); \
|
||||||
|
else \
|
||||||
|
asm("addq %1, "__percpu_arg(0) \
|
||||||
|
: "+m" (var) \
|
||||||
|
: "re" ((pao_T__)(val))); \
|
||||||
|
break; \
|
||||||
|
default: __bad_percpu_size(); \
|
||||||
|
} \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
#define percpu_from_op(op, var, constraint) \
|
#define percpu_from_op(op, var, constraint) \
|
||||||
({ \
|
({ \
|
||||||
typeof(var) pfo_ret__; \
|
typeof(var) pfo_ret__; \
|
||||||
|
@ -142,16 +199,14 @@ do { \
|
||||||
* per-thread variables implemented as per-cpu variables and thus
|
* per-thread variables implemented as per-cpu variables and thus
|
||||||
* stable for the duration of the respective task.
|
* stable for the duration of the respective task.
|
||||||
*/
|
*/
|
||||||
#define percpu_read(var) percpu_from_op("mov", per_cpu__##var, \
|
#define percpu_read(var) percpu_from_op("mov", var, "m" (var))
|
||||||
"m" (per_cpu__##var))
|
#define percpu_read_stable(var) percpu_from_op("mov", var, "p" (&(var)))
|
||||||
#define percpu_read_stable(var) percpu_from_op("mov", per_cpu__##var, \
|
#define percpu_write(var, val) percpu_to_op("mov", var, val)
|
||||||
"p" (&per_cpu__##var))
|
#define percpu_add(var, val) percpu_add_op(var, val)
|
||||||
#define percpu_write(var, val) percpu_to_op("mov", per_cpu__##var, val)
|
#define percpu_sub(var, val) percpu_add_op(var, -(val))
|
||||||
#define percpu_add(var, val) percpu_to_op("add", per_cpu__##var, val)
|
#define percpu_and(var, val) percpu_to_op("and", var, val)
|
||||||
#define percpu_sub(var, val) percpu_to_op("sub", per_cpu__##var, val)
|
#define percpu_or(var, val) percpu_to_op("or", var, val)
|
||||||
#define percpu_and(var, val) percpu_to_op("and", per_cpu__##var, val)
|
#define percpu_xor(var, val) percpu_to_op("xor", var, val)
|
||||||
#define percpu_or(var, val) percpu_to_op("or", per_cpu__##var, val)
|
|
||||||
#define percpu_xor(var, val) percpu_to_op("xor", per_cpu__##var, val)
|
|
||||||
|
|
||||||
#define __this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
|
#define __this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
|
||||||
#define __this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
|
#define __this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
|
||||||
|
@ -160,9 +215,9 @@ do { \
|
||||||
#define __this_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val)
|
#define __this_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val)
|
||||||
#define __this_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val)
|
#define __this_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val)
|
||||||
#define __this_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val)
|
#define __this_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val)
|
||||||
#define __this_cpu_add_1(pcp, val) percpu_to_op("add", (pcp), val)
|
#define __this_cpu_add_1(pcp, val) percpu_add_op((pcp), val)
|
||||||
#define __this_cpu_add_2(pcp, val) percpu_to_op("add", (pcp), val)
|
#define __this_cpu_add_2(pcp, val) percpu_add_op((pcp), val)
|
||||||
#define __this_cpu_add_4(pcp, val) percpu_to_op("add", (pcp), val)
|
#define __this_cpu_add_4(pcp, val) percpu_add_op((pcp), val)
|
||||||
#define __this_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
|
#define __this_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
|
||||||
#define __this_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
|
#define __this_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
|
||||||
#define __this_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
|
#define __this_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
|
||||||
|
@ -179,9 +234,9 @@ do { \
|
||||||
#define this_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val)
|
#define this_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val)
|
||||||
#define this_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val)
|
#define this_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val)
|
||||||
#define this_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val)
|
#define this_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val)
|
||||||
#define this_cpu_add_1(pcp, val) percpu_to_op("add", (pcp), val)
|
#define this_cpu_add_1(pcp, val) percpu_add_op((pcp), val)
|
||||||
#define this_cpu_add_2(pcp, val) percpu_to_op("add", (pcp), val)
|
#define this_cpu_add_2(pcp, val) percpu_add_op((pcp), val)
|
||||||
#define this_cpu_add_4(pcp, val) percpu_to_op("add", (pcp), val)
|
#define this_cpu_add_4(pcp, val) percpu_add_op((pcp), val)
|
||||||
#define this_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
|
#define this_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
|
||||||
#define this_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
|
#define this_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
|
||||||
#define this_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
|
#define this_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
|
||||||
|
@ -192,9 +247,9 @@ do { \
|
||||||
#define this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
|
#define this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
|
||||||
#define this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
|
#define this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
|
||||||
|
|
||||||
#define irqsafe_cpu_add_1(pcp, val) percpu_to_op("add", (pcp), val)
|
#define irqsafe_cpu_add_1(pcp, val) percpu_add_op((pcp), val)
|
||||||
#define irqsafe_cpu_add_2(pcp, val) percpu_to_op("add", (pcp), val)
|
#define irqsafe_cpu_add_2(pcp, val) percpu_add_op((pcp), val)
|
||||||
#define irqsafe_cpu_add_4(pcp, val) percpu_to_op("add", (pcp), val)
|
#define irqsafe_cpu_add_4(pcp, val) percpu_add_op((pcp), val)
|
||||||
#define irqsafe_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
|
#define irqsafe_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
|
||||||
#define irqsafe_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
|
#define irqsafe_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
|
||||||
#define irqsafe_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
|
#define irqsafe_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
|
||||||
|
@ -212,19 +267,19 @@ do { \
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
#define __this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
|
#define __this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
|
||||||
#define __this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val)
|
#define __this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val)
|
||||||
#define __this_cpu_add_8(pcp, val) percpu_to_op("add", (pcp), val)
|
#define __this_cpu_add_8(pcp, val) percpu_add_op((pcp), val)
|
||||||
#define __this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
|
#define __this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
|
||||||
#define __this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
|
#define __this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
|
||||||
#define __this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
|
#define __this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
|
||||||
|
|
||||||
#define this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
|
#define this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
|
||||||
#define this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val)
|
#define this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val)
|
||||||
#define this_cpu_add_8(pcp, val) percpu_to_op("add", (pcp), val)
|
#define this_cpu_add_8(pcp, val) percpu_add_op((pcp), val)
|
||||||
#define this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
|
#define this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
|
||||||
#define this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
|
#define this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
|
||||||
#define this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
|
#define this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
|
||||||
|
|
||||||
#define irqsafe_cpu_add_8(pcp, val) percpu_to_op("add", (pcp), val)
|
#define irqsafe_cpu_add_8(pcp, val) percpu_add_op((pcp), val)
|
||||||
#define irqsafe_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
|
#define irqsafe_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
|
||||||
#define irqsafe_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
|
#define irqsafe_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
|
||||||
#define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
|
#define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
|
||||||
|
@ -236,7 +291,7 @@ do { \
|
||||||
({ \
|
({ \
|
||||||
int old__; \
|
int old__; \
|
||||||
asm volatile("btr %2,"__percpu_arg(1)"\n\tsbbl %0,%0" \
|
asm volatile("btr %2,"__percpu_arg(1)"\n\tsbbl %0,%0" \
|
||||||
: "=r" (old__), "+m" (per_cpu__##var) \
|
: "=r" (old__), "+m" (var) \
|
||||||
: "dIr" (bit)); \
|
: "dIr" (bit)); \
|
||||||
old__; \
|
old__; \
|
||||||
})
|
})
|
||||||
|
|
|
@ -32,7 +32,7 @@ extern void show_regs_common(void);
|
||||||
"movl %P[task_canary](%[next]), %%ebx\n\t" \
|
"movl %P[task_canary](%[next]), %%ebx\n\t" \
|
||||||
"movl %%ebx, "__percpu_arg([stack_canary])"\n\t"
|
"movl %%ebx, "__percpu_arg([stack_canary])"\n\t"
|
||||||
#define __switch_canary_oparam \
|
#define __switch_canary_oparam \
|
||||||
, [stack_canary] "=m" (per_cpu_var(stack_canary.canary))
|
, [stack_canary] "=m" (stack_canary.canary)
|
||||||
#define __switch_canary_iparam \
|
#define __switch_canary_iparam \
|
||||||
, [task_canary] "i" (offsetof(struct task_struct, stack_canary))
|
, [task_canary] "i" (offsetof(struct task_struct, stack_canary))
|
||||||
#else /* CC_STACKPROTECTOR */
|
#else /* CC_STACKPROTECTOR */
|
||||||
|
@ -114,7 +114,7 @@ do { \
|
||||||
"movq %P[task_canary](%%rsi),%%r8\n\t" \
|
"movq %P[task_canary](%%rsi),%%r8\n\t" \
|
||||||
"movq %%r8,"__percpu_arg([gs_canary])"\n\t"
|
"movq %%r8,"__percpu_arg([gs_canary])"\n\t"
|
||||||
#define __switch_canary_oparam \
|
#define __switch_canary_oparam \
|
||||||
, [gs_canary] "=m" (per_cpu_var(irq_stack_union.stack_canary))
|
, [gs_canary] "=m" (irq_stack_union.stack_canary)
|
||||||
#define __switch_canary_iparam \
|
#define __switch_canary_iparam \
|
||||||
, [task_canary] "i" (offsetof(struct task_struct, stack_canary))
|
, [task_canary] "i" (offsetof(struct task_struct, stack_canary))
|
||||||
#else /* CC_STACKPROTECTOR */
|
#else /* CC_STACKPROTECTOR */
|
||||||
|
@ -143,7 +143,7 @@ do { \
|
||||||
[ti_flags] "i" (offsetof(struct thread_info, flags)), \
|
[ti_flags] "i" (offsetof(struct thread_info, flags)), \
|
||||||
[_tif_fork] "i" (_TIF_FORK), \
|
[_tif_fork] "i" (_TIF_FORK), \
|
||||||
[thread_info] "i" (offsetof(struct task_struct, stack)), \
|
[thread_info] "i" (offsetof(struct task_struct, stack)), \
|
||||||
[current_task] "m" (per_cpu_var(current_task)) \
|
[current_task] "m" (current_task) \
|
||||||
__switch_canary_iparam \
|
__switch_canary_iparam \
|
||||||
: "memory", "cc" __EXTRA_CLOBBER)
|
: "memory", "cc" __EXTRA_CLOBBER)
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -438,8 +438,8 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
|
||||||
* Ayiee, looks like this CPU is stuck ...
|
* Ayiee, looks like this CPU is stuck ...
|
||||||
* wait a few IRQs (5 seconds) before doing the oops ...
|
* wait a few IRQs (5 seconds) before doing the oops ...
|
||||||
*/
|
*/
|
||||||
__this_cpu_inc(per_cpu_var(alert_counter));
|
__this_cpu_inc(alert_counter);
|
||||||
if (__this_cpu_read(per_cpu_var(alert_counter)) == 5 * nmi_hz)
|
if (__this_cpu_read(alert_counter) == 5 * nmi_hz)
|
||||||
/*
|
/*
|
||||||
* die_nmi will return ONLY if NOTIFY_STOP happens..
|
* die_nmi will return ONLY if NOTIFY_STOP happens..
|
||||||
*/
|
*/
|
||||||
|
@ -447,7 +447,7 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
|
||||||
regs, panic_on_timeout);
|
regs, panic_on_timeout);
|
||||||
} else {
|
} else {
|
||||||
__get_cpu_var(last_irq_sum) = sum;
|
__get_cpu_var(last_irq_sum) = sum;
|
||||||
__this_cpu_write(per_cpu_var(alert_counter), 0);
|
__this_cpu_write(alert_counter, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* see if the nmi watchdog went off */
|
/* see if the nmi watchdog went off */
|
||||||
|
|
|
@ -442,8 +442,8 @@ is386: movl $2,%ecx # set MP
|
||||||
*/
|
*/
|
||||||
cmpb $0,ready
|
cmpb $0,ready
|
||||||
jne 1f
|
jne 1f
|
||||||
movl $per_cpu__gdt_page,%eax
|
movl $gdt_page,%eax
|
||||||
movl $per_cpu__stack_canary,%ecx
|
movl $stack_canary,%ecx
|
||||||
movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
|
movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
|
||||||
shrl $16, %ecx
|
shrl $16, %ecx
|
||||||
movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
|
movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
|
||||||
|
@ -706,7 +706,7 @@ idt_descr:
|
||||||
.word 0 # 32 bit align gdt_desc.address
|
.word 0 # 32 bit align gdt_desc.address
|
||||||
ENTRY(early_gdt_descr)
|
ENTRY(early_gdt_descr)
|
||||||
.word GDT_ENTRIES*8-1
|
.word GDT_ENTRIES*8-1
|
||||||
.long per_cpu__gdt_page /* Overwritten for secondary CPUs */
|
.long gdt_page /* Overwritten for secondary CPUs */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The boot_gdt must mirror the equivalent in setup.S and is
|
* The boot_gdt must mirror the equivalent in setup.S and is
|
||||||
|
|
|
@ -341,7 +341,7 @@ SECTIONS
|
||||||
* Per-cpu symbols which need to be offset from __per_cpu_load
|
* Per-cpu symbols which need to be offset from __per_cpu_load
|
||||||
* for the boot processor.
|
* for the boot processor.
|
||||||
*/
|
*/
|
||||||
#define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
|
#define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
|
||||||
INIT_PER_CPU(gdt_page);
|
INIT_PER_CPU(gdt_page);
|
||||||
INIT_PER_CPU(irq_stack_union);
|
INIT_PER_CPU(irq_stack_union);
|
||||||
|
|
||||||
|
@ -352,7 +352,7 @@ INIT_PER_CPU(irq_stack_union);
|
||||||
"kernel image bigger than KERNEL_IMAGE_SIZE");
|
"kernel image bigger than KERNEL_IMAGE_SIZE");
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
. = ASSERT((per_cpu__irq_stack_union == 0),
|
. = ASSERT((irq_stack_union == 0),
|
||||||
"irq_stack_union is not at start of per-cpu area");
|
"irq_stack_union is not at start of per-cpu area");
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -90,9 +90,9 @@ ENTRY(xen_iret)
|
||||||
GET_THREAD_INFO(%eax)
|
GET_THREAD_INFO(%eax)
|
||||||
movl TI_cpu(%eax), %eax
|
movl TI_cpu(%eax), %eax
|
||||||
movl __per_cpu_offset(,%eax,4), %eax
|
movl __per_cpu_offset(,%eax,4), %eax
|
||||||
mov per_cpu__xen_vcpu(%eax), %eax
|
mov xen_vcpu(%eax), %eax
|
||||||
#else
|
#else
|
||||||
movl per_cpu__xen_vcpu, %eax
|
movl xen_vcpu, %eax
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* check IF state we're restoring */
|
/* check IF state we're restoring */
|
||||||
|
|
|
@ -31,7 +31,7 @@ struct cryptd_cpu_queue {
|
||||||
};
|
};
|
||||||
|
|
||||||
struct cryptd_queue {
|
struct cryptd_queue {
|
||||||
struct cryptd_cpu_queue *cpu_queue;
|
struct cryptd_cpu_queue __percpu *cpu_queue;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct cryptd_instance_ctx {
|
struct cryptd_instance_ctx {
|
||||||
|
|
|
@ -561,7 +561,7 @@ end:
|
||||||
}
|
}
|
||||||
|
|
||||||
int acpi_processor_preregister_performance(
|
int acpi_processor_preregister_performance(
|
||||||
struct acpi_processor_performance *performance)
|
struct acpi_processor_performance __percpu *performance)
|
||||||
{
|
{
|
||||||
int count, count_target;
|
int count, count_target;
|
||||||
int retval = 0;
|
int retval = 0;
|
||||||
|
|
|
@ -284,7 +284,7 @@ struct dma_chan_tbl_ent {
|
||||||
/**
|
/**
|
||||||
* channel_table - percpu lookup table for memory-to-memory offload providers
|
* channel_table - percpu lookup table for memory-to-memory offload providers
|
||||||
*/
|
*/
|
||||||
static struct dma_chan_tbl_ent *channel_table[DMA_TX_TYPE_END];
|
static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
|
||||||
|
|
||||||
static int __init dma_channel_table_init(void)
|
static int __init dma_channel_table_init(void)
|
||||||
{
|
{
|
||||||
|
|
|
@ -13,7 +13,7 @@ module_param(report_gart_errors, int, 0644);
|
||||||
static int ecc_enable_override;
|
static int ecc_enable_override;
|
||||||
module_param(ecc_enable_override, int, 0644);
|
module_param(ecc_enable_override, int, 0644);
|
||||||
|
|
||||||
static struct msr *msrs;
|
static struct msr __percpu *msrs;
|
||||||
|
|
||||||
/* Lookup table for all possible MC control instances */
|
/* Lookup table for all possible MC control instances */
|
||||||
struct amd64_pvt;
|
struct amd64_pvt;
|
||||||
|
|
|
@ -4680,7 +4680,7 @@ static int raid5_alloc_percpu(raid5_conf_t *conf)
|
||||||
{
|
{
|
||||||
unsigned long cpu;
|
unsigned long cpu;
|
||||||
struct page *spare_page;
|
struct page *spare_page;
|
||||||
struct raid5_percpu *allcpus;
|
struct raid5_percpu __percpu *allcpus;
|
||||||
void *scribble;
|
void *scribble;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
|
|
@ -405,7 +405,7 @@ struct raid5_private_data {
|
||||||
* lists and performing address
|
* lists and performing address
|
||||||
* conversions
|
* conversions
|
||||||
*/
|
*/
|
||||||
} *percpu;
|
} __percpu *percpu;
|
||||||
size_t scribble_len; /* size of scribble region must be
|
size_t scribble_len; /* size of scribble region must be
|
||||||
* associated with conf to handle
|
* associated with conf to handle
|
||||||
* cpu hotplug while reshaping
|
* cpu hotplug while reshaping
|
||||||
|
|
|
@ -1014,7 +1014,7 @@ struct ext4_sb_info {
|
||||||
atomic_t s_lock_busy;
|
atomic_t s_lock_busy;
|
||||||
|
|
||||||
/* locality groups */
|
/* locality groups */
|
||||||
struct ext4_locality_group *s_locality_groups;
|
struct ext4_locality_group __percpu *s_locality_groups;
|
||||||
|
|
||||||
/* for write statistics */
|
/* for write statistics */
|
||||||
unsigned long s_sectors_written_start;
|
unsigned long s_sectors_written_start;
|
||||||
|
|
|
@ -57,12 +57,12 @@ static inline void nfs_add_fscache_stats(struct inode *inode,
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static inline struct nfs_iostats *nfs_alloc_iostats(void)
|
static inline struct nfs_iostats __percpu *nfs_alloc_iostats(void)
|
||||||
{
|
{
|
||||||
return alloc_percpu(struct nfs_iostats);
|
return alloc_percpu(struct nfs_iostats);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void nfs_free_iostats(struct nfs_iostats *stats)
|
static inline void nfs_free_iostats(struct nfs_iostats __percpu *stats)
|
||||||
{
|
{
|
||||||
if (stats != NULL)
|
if (stats != NULL)
|
||||||
free_percpu(stats);
|
free_percpu(stats);
|
||||||
|
|
|
@ -245,7 +245,7 @@ typedef struct xfs_mount {
|
||||||
struct xfs_qmops *m_qm_ops; /* vector of XQM ops */
|
struct xfs_qmops *m_qm_ops; /* vector of XQM ops */
|
||||||
atomic_t m_active_trans; /* number trans frozen */
|
atomic_t m_active_trans; /* number trans frozen */
|
||||||
#ifdef HAVE_PERCPU_SB
|
#ifdef HAVE_PERCPU_SB
|
||||||
xfs_icsb_cnts_t *m_sb_cnts; /* per-cpu superblock counters */
|
xfs_icsb_cnts_t __percpu *m_sb_cnts; /* per-cpu superblock counters */
|
||||||
unsigned long m_icsb_counters; /* disabled per-cpu counters */
|
unsigned long m_icsb_counters; /* disabled per-cpu counters */
|
||||||
struct notifier_block m_icsb_notifier; /* hotplug cpu notifier */
|
struct notifier_block m_icsb_notifier; /* hotplug cpu notifier */
|
||||||
struct mutex m_icsb_mutex; /* balancer sync lock */
|
struct mutex m_icsb_mutex; /* balancer sync lock */
|
||||||
|
|
|
@ -238,7 +238,7 @@ struct acpi_processor_errata {
|
||||||
|
|
||||||
extern int acpi_processor_preregister_performance(struct
|
extern int acpi_processor_preregister_performance(struct
|
||||||
acpi_processor_performance
|
acpi_processor_performance
|
||||||
*performance);
|
__percpu *performance);
|
||||||
|
|
||||||
extern int acpi_processor_register_performance(struct acpi_processor_performance
|
extern int acpi_processor_register_performance(struct acpi_processor_performance
|
||||||
*performance, unsigned int cpu);
|
*performance, unsigned int cpu);
|
||||||
|
|
|
@ -52,23 +52,4 @@ typedef struct
|
||||||
#define __local_add(i,l) local_set((l), local_read(l) + (i))
|
#define __local_add(i,l) local_set((l), local_read(l) + (i))
|
||||||
#define __local_sub(i,l) local_set((l), local_read(l) - (i))
|
#define __local_sub(i,l) local_set((l), local_read(l) - (i))
|
||||||
|
|
||||||
/* Use these for per-cpu local_t variables: on some archs they are
|
|
||||||
* much more efficient than these naive implementations. Note they take
|
|
||||||
* a variable (eg. mystruct.foo), not an address.
|
|
||||||
*/
|
|
||||||
#define cpu_local_read(l) local_read(&__get_cpu_var(l))
|
|
||||||
#define cpu_local_set(l, i) local_set(&__get_cpu_var(l), (i))
|
|
||||||
#define cpu_local_inc(l) local_inc(&__get_cpu_var(l))
|
|
||||||
#define cpu_local_dec(l) local_dec(&__get_cpu_var(l))
|
|
||||||
#define cpu_local_add(i, l) local_add((i), &__get_cpu_var(l))
|
|
||||||
#define cpu_local_sub(i, l) local_sub((i), &__get_cpu_var(l))
|
|
||||||
|
|
||||||
/* Non-atomic increments, ie. preemption disabled and won't be touched
|
|
||||||
* in interrupt, etc. Some archs can optimize this case well.
|
|
||||||
*/
|
|
||||||
#define __cpu_local_inc(l) __local_inc(&__get_cpu_var(l))
|
|
||||||
#define __cpu_local_dec(l) __local_dec(&__get_cpu_var(l))
|
|
||||||
#define __cpu_local_add(i, l) __local_add((i), &__get_cpu_var(l))
|
|
||||||
#define __cpu_local_sub(i, l) __local_sub((i), &__get_cpu_var(l))
|
|
||||||
|
|
||||||
#endif /* _ASM_GENERIC_LOCAL_H */
|
#endif /* _ASM_GENERIC_LOCAL_H */
|
||||||
|
|
|
@ -41,7 +41,11 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
|
||||||
* Only S390 provides its own means of moving the pointer.
|
* Only S390 provides its own means of moving the pointer.
|
||||||
*/
|
*/
|
||||||
#ifndef SHIFT_PERCPU_PTR
|
#ifndef SHIFT_PERCPU_PTR
|
||||||
#define SHIFT_PERCPU_PTR(__p, __offset) RELOC_HIDE((__p), (__offset))
|
/* Weird cast keeps both GCC and sparse happy. */
|
||||||
|
#define SHIFT_PERCPU_PTR(__p, __offset) ({ \
|
||||||
|
__verify_pcpu_ptr((__p)); \
|
||||||
|
RELOC_HIDE((typeof(*(__p)) __kernel __force *)(__p), (__offset)); \
|
||||||
|
})
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -50,11 +54,11 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
|
||||||
* offset.
|
* offset.
|
||||||
*/
|
*/
|
||||||
#define per_cpu(var, cpu) \
|
#define per_cpu(var, cpu) \
|
||||||
(*SHIFT_PERCPU_PTR(&per_cpu_var(var), per_cpu_offset(cpu)))
|
(*SHIFT_PERCPU_PTR(&(var), per_cpu_offset(cpu)))
|
||||||
#define __get_cpu_var(var) \
|
#define __get_cpu_var(var) \
|
||||||
(*SHIFT_PERCPU_PTR(&per_cpu_var(var), my_cpu_offset))
|
(*SHIFT_PERCPU_PTR(&(var), my_cpu_offset))
|
||||||
#define __raw_get_cpu_var(var) \
|
#define __raw_get_cpu_var(var) \
|
||||||
(*SHIFT_PERCPU_PTR(&per_cpu_var(var), __my_cpu_offset))
|
(*SHIFT_PERCPU_PTR(&(var), __my_cpu_offset))
|
||||||
|
|
||||||
#define this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, my_cpu_offset)
|
#define this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, my_cpu_offset)
|
||||||
#define __this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset)
|
#define __this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset)
|
||||||
|
@ -66,9 +70,9 @@ extern void setup_per_cpu_areas(void);
|
||||||
|
|
||||||
#else /* ! SMP */
|
#else /* ! SMP */
|
||||||
|
|
||||||
#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu_var(var)))
|
#define per_cpu(var, cpu) (*((void)(cpu), &(var)))
|
||||||
#define __get_cpu_var(var) per_cpu_var(var)
|
#define __get_cpu_var(var) (var)
|
||||||
#define __raw_get_cpu_var(var) per_cpu_var(var)
|
#define __raw_get_cpu_var(var) (var)
|
||||||
#define this_cpu_ptr(ptr) per_cpu_ptr(ptr, 0)
|
#define this_cpu_ptr(ptr) per_cpu_ptr(ptr, 0)
|
||||||
#define __this_cpu_ptr(ptr) this_cpu_ptr(ptr)
|
#define __this_cpu_ptr(ptr) this_cpu_ptr(ptr)
|
||||||
|
|
||||||
|
|
|
@ -150,8 +150,8 @@ struct blk_user_trace_setup {
|
||||||
struct blk_trace {
|
struct blk_trace {
|
||||||
int trace_state;
|
int trace_state;
|
||||||
struct rchan *rchan;
|
struct rchan *rchan;
|
||||||
unsigned long *sequence;
|
unsigned long __percpu *sequence;
|
||||||
unsigned char *msg_data;
|
unsigned char __percpu *msg_data;
|
||||||
u16 act_mask;
|
u16 act_mask;
|
||||||
u64 start_lba;
|
u64 start_lba;
|
||||||
u64 end_lba;
|
u64 end_lba;
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
|
|
||||||
#ifdef __CHECKER__
|
#ifdef __CHECKER__
|
||||||
# define __user __attribute__((noderef, address_space(1)))
|
# define __user __attribute__((noderef, address_space(1)))
|
||||||
# define __kernel /* default address space */
|
# define __kernel __attribute__((address_space(0)))
|
||||||
# define __safe __attribute__((safe))
|
# define __safe __attribute__((safe))
|
||||||
# define __force __attribute__((force))
|
# define __force __attribute__((force))
|
||||||
# define __nocast __attribute__((nocast))
|
# define __nocast __attribute__((nocast))
|
||||||
|
|
|
@ -162,7 +162,7 @@ struct dma_chan {
|
||||||
struct dma_chan_dev *dev;
|
struct dma_chan_dev *dev;
|
||||||
|
|
||||||
struct list_head device_node;
|
struct list_head device_node;
|
||||||
struct dma_chan_percpu *local;
|
struct dma_chan_percpu __percpu *local;
|
||||||
int client_count;
|
int client_count;
|
||||||
int table_count;
|
int table_count;
|
||||||
void *private;
|
void *private;
|
||||||
|
|
|
@ -101,7 +101,7 @@ struct hd_struct {
|
||||||
unsigned long stamp;
|
unsigned long stamp;
|
||||||
int in_flight[2];
|
int in_flight[2];
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
struct disk_stats *dkstats;
|
struct disk_stats __percpu *dkstats;
|
||||||
#else
|
#else
|
||||||
struct disk_stats dkstats;
|
struct disk_stats dkstats;
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -199,7 +199,7 @@ extern struct kimage *kexec_crash_image;
|
||||||
*/
|
*/
|
||||||
extern struct resource crashk_res;
|
extern struct resource crashk_res;
|
||||||
typedef u32 note_buf_t[KEXEC_NOTE_BYTES/4];
|
typedef u32 note_buf_t[KEXEC_NOTE_BYTES/4];
|
||||||
extern note_buf_t *crash_notes;
|
extern note_buf_t __percpu *crash_notes;
|
||||||
extern u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
|
extern u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
|
||||||
extern size_t vmcoreinfo_size;
|
extern size_t vmcoreinfo_size;
|
||||||
extern size_t vmcoreinfo_max_size;
|
extern size_t vmcoreinfo_max_size;
|
||||||
|
|
|
@ -1081,11 +1081,7 @@ extern void si_meminfo(struct sysinfo * val);
|
||||||
extern void si_meminfo_node(struct sysinfo *val, int nid);
|
extern void si_meminfo_node(struct sysinfo *val, int nid);
|
||||||
extern int after_bootmem;
|
extern int after_bootmem;
|
||||||
|
|
||||||
#ifdef CONFIG_NUMA
|
|
||||||
extern void setup_per_cpu_pageset(void);
|
extern void setup_per_cpu_pageset(void);
|
||||||
#else
|
|
||||||
static inline void setup_per_cpu_pageset(void) {}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
extern void zone_pcp_update(struct zone *zone);
|
extern void zone_pcp_update(struct zone *zone);
|
||||||
|
|
||||||
|
|
|
@ -184,13 +184,7 @@ struct per_cpu_pageset {
|
||||||
s8 stat_threshold;
|
s8 stat_threshold;
|
||||||
s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
|
s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
|
||||||
#endif
|
#endif
|
||||||
} ____cacheline_aligned_in_smp;
|
};
|
||||||
|
|
||||||
#ifdef CONFIG_NUMA
|
|
||||||
#define zone_pcp(__z, __cpu) ((__z)->pageset[(__cpu)])
|
|
||||||
#else
|
|
||||||
#define zone_pcp(__z, __cpu) (&(__z)->pageset[(__cpu)])
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif /* !__GENERATING_BOUNDS.H */
|
#endif /* !__GENERATING_BOUNDS.H */
|
||||||
|
|
||||||
|
@ -306,10 +300,8 @@ struct zone {
|
||||||
*/
|
*/
|
||||||
unsigned long min_unmapped_pages;
|
unsigned long min_unmapped_pages;
|
||||||
unsigned long min_slab_pages;
|
unsigned long min_slab_pages;
|
||||||
struct per_cpu_pageset *pageset[NR_CPUS];
|
|
||||||
#else
|
|
||||||
struct per_cpu_pageset pageset[NR_CPUS];
|
|
||||||
#endif
|
#endif
|
||||||
|
struct per_cpu_pageset __percpu *pageset;
|
||||||
/*
|
/*
|
||||||
* free areas of different sizes
|
* free areas of different sizes
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -17,7 +17,7 @@
|
||||||
#include <linux/moduleparam.h>
|
#include <linux/moduleparam.h>
|
||||||
#include <linux/tracepoint.h>
|
#include <linux/tracepoint.h>
|
||||||
|
|
||||||
#include <asm/local.h>
|
#include <linux/percpu.h>
|
||||||
#include <asm/module.h>
|
#include <asm/module.h>
|
||||||
|
|
||||||
#include <trace/events/module.h>
|
#include <trace/events/module.h>
|
||||||
|
@ -363,11 +363,9 @@ struct module
|
||||||
/* Destruction function. */
|
/* Destruction function. */
|
||||||
void (*exit)(void);
|
void (*exit)(void);
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
struct module_ref {
|
||||||
char *refptr;
|
int count;
|
||||||
#else
|
} __percpu *refptr;
|
||||||
local_t ref;
|
|
||||||
#endif
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_CONSTRUCTORS
|
#ifdef CONFIG_CONSTRUCTORS
|
||||||
|
@ -454,25 +452,16 @@ void __symbol_put(const char *symbol);
|
||||||
#define symbol_put(x) __symbol_put(MODULE_SYMBOL_PREFIX #x)
|
#define symbol_put(x) __symbol_put(MODULE_SYMBOL_PREFIX #x)
|
||||||
void symbol_put_addr(void *addr);
|
void symbol_put_addr(void *addr);
|
||||||
|
|
||||||
static inline local_t *__module_ref_addr(struct module *mod, int cpu)
|
|
||||||
{
|
|
||||||
#ifdef CONFIG_SMP
|
|
||||||
return (local_t *) (mod->refptr + per_cpu_offset(cpu));
|
|
||||||
#else
|
|
||||||
return &mod->ref;
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Sometimes we know we already have a refcount, and it's easier not
|
/* Sometimes we know we already have a refcount, and it's easier not
|
||||||
to handle the error case (which only happens with rmmod --wait). */
|
to handle the error case (which only happens with rmmod --wait). */
|
||||||
static inline void __module_get(struct module *module)
|
static inline void __module_get(struct module *module)
|
||||||
{
|
{
|
||||||
if (module) {
|
if (module) {
|
||||||
unsigned int cpu = get_cpu();
|
preempt_disable();
|
||||||
local_inc(__module_ref_addr(module, cpu));
|
__this_cpu_inc(module->refptr->count);
|
||||||
trace_module_get(module, _THIS_IP_,
|
trace_module_get(module, _THIS_IP_,
|
||||||
local_read(__module_ref_addr(module, cpu)));
|
__this_cpu_read(module->refptr->count));
|
||||||
put_cpu();
|
preempt_enable();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -481,15 +470,17 @@ static inline int try_module_get(struct module *module)
|
||||||
int ret = 1;
|
int ret = 1;
|
||||||
|
|
||||||
if (module) {
|
if (module) {
|
||||||
unsigned int cpu = get_cpu();
|
preempt_disable();
|
||||||
|
|
||||||
if (likely(module_is_live(module))) {
|
if (likely(module_is_live(module))) {
|
||||||
local_inc(__module_ref_addr(module, cpu));
|
__this_cpu_inc(module->refptr->count);
|
||||||
trace_module_get(module, _THIS_IP_,
|
trace_module_get(module, _THIS_IP_,
|
||||||
local_read(__module_ref_addr(module, cpu)));
|
__this_cpu_read(module->refptr->count));
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
ret = 0;
|
ret = 0;
|
||||||
put_cpu();
|
|
||||||
|
preempt_enable();
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
|
@ -66,7 +66,7 @@ struct vfsmount {
|
||||||
int mnt_pinned;
|
int mnt_pinned;
|
||||||
int mnt_ghosts;
|
int mnt_ghosts;
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
int *mnt_writers;
|
int __percpu *mnt_writers;
|
||||||
#else
|
#else
|
||||||
int mnt_writers;
|
int mnt_writers;
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -105,7 +105,7 @@ struct nfs_server {
|
||||||
struct rpc_clnt * client; /* RPC client handle */
|
struct rpc_clnt * client; /* RPC client handle */
|
||||||
struct rpc_clnt * client_acl; /* ACL RPC client handle */
|
struct rpc_clnt * client_acl; /* ACL RPC client handle */
|
||||||
struct nlm_host *nlm_host; /* NLM client handle */
|
struct nlm_host *nlm_host; /* NLM client handle */
|
||||||
struct nfs_iostats * io_stats; /* I/O statistics */
|
struct nfs_iostats __percpu *io_stats; /* I/O statistics */
|
||||||
struct backing_dev_info backing_dev_info;
|
struct backing_dev_info backing_dev_info;
|
||||||
atomic_long_t writeback; /* number of writeback pages */
|
atomic_long_t writeback; /* number of writeback pages */
|
||||||
int flags; /* various flags */
|
int flags; /* various flags */
|
||||||
|
|
|
@ -1,12 +1,6 @@
|
||||||
#ifndef _LINUX_PERCPU_DEFS_H
|
#ifndef _LINUX_PERCPU_DEFS_H
|
||||||
#define _LINUX_PERCPU_DEFS_H
|
#define _LINUX_PERCPU_DEFS_H
|
||||||
|
|
||||||
/*
|
|
||||||
* Determine the real variable name from the name visible in the
|
|
||||||
* kernel sources.
|
|
||||||
*/
|
|
||||||
#define per_cpu_var(var) per_cpu__##var
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Base implementations of per-CPU variable declarations and definitions, where
|
* Base implementations of per-CPU variable declarations and definitions, where
|
||||||
* the section in which the variable is to be placed is provided by the
|
* the section in which the variable is to be placed is provided by the
|
||||||
|
@ -18,12 +12,22 @@
|
||||||
* that section.
|
* that section.
|
||||||
*/
|
*/
|
||||||
#define __PCPU_ATTRS(sec) \
|
#define __PCPU_ATTRS(sec) \
|
||||||
__attribute__((section(PER_CPU_BASE_SECTION sec))) \
|
__percpu __attribute__((section(PER_CPU_BASE_SECTION sec))) \
|
||||||
PER_CPU_ATTRIBUTES
|
PER_CPU_ATTRIBUTES
|
||||||
|
|
||||||
#define __PCPU_DUMMY_ATTRS \
|
#define __PCPU_DUMMY_ATTRS \
|
||||||
__attribute__((section(".discard"), unused))
|
__attribute__((section(".discard"), unused))
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Macro which verifies @ptr is a percpu pointer without evaluating
|
||||||
|
* @ptr. This is to be used in percpu accessors to verify that the
|
||||||
|
* input parameter is a percpu pointer.
|
||||||
|
*/
|
||||||
|
#define __verify_pcpu_ptr(ptr) do { \
|
||||||
|
const void __percpu *__vpp_verify = (typeof(ptr))NULL; \
|
||||||
|
(void)__vpp_verify; \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* s390 and alpha modules require percpu variables to be defined as
|
* s390 and alpha modules require percpu variables to be defined as
|
||||||
* weak to force the compiler to generate GOT based external
|
* weak to force the compiler to generate GOT based external
|
||||||
|
@ -56,24 +60,24 @@
|
||||||
*/
|
*/
|
||||||
#define DECLARE_PER_CPU_SECTION(type, name, sec) \
|
#define DECLARE_PER_CPU_SECTION(type, name, sec) \
|
||||||
extern __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \
|
extern __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \
|
||||||
extern __PCPU_ATTRS(sec) __typeof__(type) per_cpu__##name
|
extern __PCPU_ATTRS(sec) __typeof__(type) name
|
||||||
|
|
||||||
#define DEFINE_PER_CPU_SECTION(type, name, sec) \
|
#define DEFINE_PER_CPU_SECTION(type, name, sec) \
|
||||||
__PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \
|
__PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \
|
||||||
extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \
|
extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \
|
||||||
__PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \
|
__PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \
|
||||||
__PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak \
|
__PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak \
|
||||||
__typeof__(type) per_cpu__##name
|
__typeof__(type) name
|
||||||
#else
|
#else
|
||||||
/*
|
/*
|
||||||
* Normal declaration and definition macros.
|
* Normal declaration and definition macros.
|
||||||
*/
|
*/
|
||||||
#define DECLARE_PER_CPU_SECTION(type, name, sec) \
|
#define DECLARE_PER_CPU_SECTION(type, name, sec) \
|
||||||
extern __PCPU_ATTRS(sec) __typeof__(type) per_cpu__##name
|
extern __PCPU_ATTRS(sec) __typeof__(type) name
|
||||||
|
|
||||||
#define DEFINE_PER_CPU_SECTION(type, name, sec) \
|
#define DEFINE_PER_CPU_SECTION(type, name, sec) \
|
||||||
__PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES \
|
__PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES \
|
||||||
__typeof__(type) per_cpu__##name
|
__typeof__(type) name
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -135,10 +139,16 @@
|
||||||
__aligned(PAGE_SIZE)
|
__aligned(PAGE_SIZE)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Intermodule exports for per-CPU variables.
|
* Intermodule exports for per-CPU variables. sparse forgets about
|
||||||
|
* address space across EXPORT_SYMBOL(), change EXPORT_SYMBOL() to
|
||||||
|
* noop if __CHECKER__.
|
||||||
*/
|
*/
|
||||||
#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
|
#ifndef __CHECKER__
|
||||||
#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
|
#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(var)
|
||||||
|
#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(var)
|
||||||
|
#else
|
||||||
|
#define EXPORT_PER_CPU_SYMBOL(var)
|
||||||
|
#define EXPORT_PER_CPU_SYMBOL_GPL(var)
|
||||||
|
#endif
|
||||||
|
|
||||||
#endif /* _LINUX_PERCPU_DEFS_H */
|
#endif /* _LINUX_PERCPU_DEFS_H */
|
||||||
|
|
|
@ -27,10 +27,17 @@
|
||||||
* we force a syntax error here if it isn't.
|
* we force a syntax error here if it isn't.
|
||||||
*/
|
*/
|
||||||
#define get_cpu_var(var) (*({ \
|
#define get_cpu_var(var) (*({ \
|
||||||
extern int simple_identifier_##var(void); \
|
|
||||||
preempt_disable(); \
|
preempt_disable(); \
|
||||||
&__get_cpu_var(var); }))
|
&__get_cpu_var(var); }))
|
||||||
#define put_cpu_var(var) preempt_enable()
|
|
||||||
|
/*
|
||||||
|
* The weird & is necessary because sparse considers (void)(var) to be
|
||||||
|
* a direct dereference of percpu variable (var).
|
||||||
|
*/
|
||||||
|
#define put_cpu_var(var) do { \
|
||||||
|
(void)&(var); \
|
||||||
|
preempt_enable(); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
|
|
||||||
|
@ -127,9 +134,9 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size,
|
||||||
*/
|
*/
|
||||||
#define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))
|
#define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))
|
||||||
|
|
||||||
extern void *__alloc_reserved_percpu(size_t size, size_t align);
|
extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align);
|
||||||
extern void *__alloc_percpu(size_t size, size_t align);
|
extern void __percpu *__alloc_percpu(size_t size, size_t align);
|
||||||
extern void free_percpu(void *__pdata);
|
extern void free_percpu(void __percpu *__pdata);
|
||||||
extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
|
extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
|
||||||
|
|
||||||
#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
|
#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
|
||||||
|
@ -140,7 +147,7 @@ extern void __init setup_per_cpu_areas(void);
|
||||||
|
|
||||||
#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
|
#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
|
||||||
|
|
||||||
static inline void *__alloc_percpu(size_t size, size_t align)
|
static inline void __percpu *__alloc_percpu(size_t size, size_t align)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* Can't easily make larger alignment work with kmalloc. WARN
|
* Can't easily make larger alignment work with kmalloc. WARN
|
||||||
|
@ -151,7 +158,7 @@ static inline void *__alloc_percpu(size_t size, size_t align)
|
||||||
return kzalloc(size, GFP_KERNEL);
|
return kzalloc(size, GFP_KERNEL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void free_percpu(void *p)
|
static inline void free_percpu(void __percpu *p)
|
||||||
{
|
{
|
||||||
kfree(p);
|
kfree(p);
|
||||||
}
|
}
|
||||||
|
@ -171,7 +178,7 @@ static inline void *pcpu_lpage_remapped(void *kaddr)
|
||||||
#endif /* CONFIG_SMP */
|
#endif /* CONFIG_SMP */
|
||||||
|
|
||||||
#define alloc_percpu(type) \
|
#define alloc_percpu(type) \
|
||||||
(typeof(type) *)__alloc_percpu(sizeof(type), __alignof__(type))
|
(typeof(type) __percpu *)__alloc_percpu(sizeof(type), __alignof__(type))
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Optional methods for optimized non-lvalue per-cpu variable access.
|
* Optional methods for optimized non-lvalue per-cpu variable access.
|
||||||
|
@ -188,17 +195,19 @@ static inline void *pcpu_lpage_remapped(void *kaddr)
|
||||||
#ifndef percpu_read
|
#ifndef percpu_read
|
||||||
# define percpu_read(var) \
|
# define percpu_read(var) \
|
||||||
({ \
|
({ \
|
||||||
typeof(per_cpu_var(var)) __tmp_var__; \
|
typeof(var) *pr_ptr__ = &(var); \
|
||||||
__tmp_var__ = get_cpu_var(var); \
|
typeof(var) pr_ret__; \
|
||||||
put_cpu_var(var); \
|
pr_ret__ = get_cpu_var(*pr_ptr__); \
|
||||||
__tmp_var__; \
|
put_cpu_var(*pr_ptr__); \
|
||||||
|
pr_ret__; \
|
||||||
})
|
})
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define __percpu_generic_to_op(var, val, op) \
|
#define __percpu_generic_to_op(var, val, op) \
|
||||||
do { \
|
do { \
|
||||||
get_cpu_var(var) op val; \
|
typeof(var) *pgto_ptr__ = &(var); \
|
||||||
put_cpu_var(var); \
|
get_cpu_var(*pgto_ptr__) op val; \
|
||||||
|
put_cpu_var(*pgto_ptr__); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#ifndef percpu_write
|
#ifndef percpu_write
|
||||||
|
@ -234,6 +243,7 @@ extern void __bad_size_call_parameter(void);
|
||||||
|
|
||||||
#define __pcpu_size_call_return(stem, variable) \
|
#define __pcpu_size_call_return(stem, variable) \
|
||||||
({ typeof(variable) pscr_ret__; \
|
({ typeof(variable) pscr_ret__; \
|
||||||
|
__verify_pcpu_ptr(&(variable)); \
|
||||||
switch(sizeof(variable)) { \
|
switch(sizeof(variable)) { \
|
||||||
case 1: pscr_ret__ = stem##1(variable);break; \
|
case 1: pscr_ret__ = stem##1(variable);break; \
|
||||||
case 2: pscr_ret__ = stem##2(variable);break; \
|
case 2: pscr_ret__ = stem##2(variable);break; \
|
||||||
|
@ -247,6 +257,7 @@ extern void __bad_size_call_parameter(void);
|
||||||
|
|
||||||
#define __pcpu_size_call(stem, variable, ...) \
|
#define __pcpu_size_call(stem, variable, ...) \
|
||||||
do { \
|
do { \
|
||||||
|
__verify_pcpu_ptr(&(variable)); \
|
||||||
switch(sizeof(variable)) { \
|
switch(sizeof(variable)) { \
|
||||||
case 1: stem##1(variable, __VA_ARGS__);break; \
|
case 1: stem##1(variable, __VA_ARGS__);break; \
|
||||||
case 2: stem##2(variable, __VA_ARGS__);break; \
|
case 2: stem##2(variable, __VA_ARGS__);break; \
|
||||||
|
@ -259,8 +270,7 @@ do { \
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Optimized manipulation for memory allocated through the per cpu
|
* Optimized manipulation for memory allocated through the per cpu
|
||||||
* allocator or for addresses of per cpu variables (can be determined
|
* allocator or for addresses of per cpu variables.
|
||||||
* using per_cpu_var(xx).
|
|
||||||
*
|
*
|
||||||
* These operation guarantee exclusivity of access for other operations
|
* These operation guarantee exclusivity of access for other operations
|
||||||
* on the *same* processor. The assumption is that per cpu data is only
|
* on the *same* processor. The assumption is that per cpu data is only
|
||||||
|
@ -311,7 +321,7 @@ do { \
|
||||||
#define _this_cpu_generic_to_op(pcp, val, op) \
|
#define _this_cpu_generic_to_op(pcp, val, op) \
|
||||||
do { \
|
do { \
|
||||||
preempt_disable(); \
|
preempt_disable(); \
|
||||||
*__this_cpu_ptr(&pcp) op val; \
|
*__this_cpu_ptr(&(pcp)) op val; \
|
||||||
preempt_enable(); \
|
preempt_enable(); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
|
|
|
@ -21,7 +21,7 @@ struct percpu_counter {
|
||||||
#ifdef CONFIG_HOTPLUG_CPU
|
#ifdef CONFIG_HOTPLUG_CPU
|
||||||
struct list_head list; /* All percpu_counters are on a list */
|
struct list_head list; /* All percpu_counters are on a list */
|
||||||
#endif
|
#endif
|
||||||
s32 *counters;
|
s32 __percpu *counters;
|
||||||
};
|
};
|
||||||
|
|
||||||
extern int percpu_counter_batch;
|
extern int percpu_counter_batch;
|
||||||
|
|
|
@ -33,7 +33,7 @@ struct srcu_struct_array {
|
||||||
|
|
||||||
struct srcu_struct {
|
struct srcu_struct {
|
||||||
int completed;
|
int completed;
|
||||||
struct srcu_struct_array *per_cpu_ref;
|
struct srcu_struct_array __percpu *per_cpu_ref;
|
||||||
struct mutex mutex;
|
struct mutex mutex;
|
||||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||||
struct lockdep_map dep_map;
|
struct lockdep_map dep_map;
|
||||||
|
|
|
@ -78,22 +78,22 @@ DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
|
||||||
|
|
||||||
static inline void __count_vm_event(enum vm_event_item item)
|
static inline void __count_vm_event(enum vm_event_item item)
|
||||||
{
|
{
|
||||||
__this_cpu_inc(per_cpu_var(vm_event_states).event[item]);
|
__this_cpu_inc(vm_event_states.event[item]);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void count_vm_event(enum vm_event_item item)
|
static inline void count_vm_event(enum vm_event_item item)
|
||||||
{
|
{
|
||||||
this_cpu_inc(per_cpu_var(vm_event_states).event[item]);
|
this_cpu_inc(vm_event_states.event[item]);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __count_vm_events(enum vm_event_item item, long delta)
|
static inline void __count_vm_events(enum vm_event_item item, long delta)
|
||||||
{
|
{
|
||||||
__this_cpu_add(per_cpu_var(vm_event_states).event[item], delta);
|
__this_cpu_add(vm_event_states.event[item], delta);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void count_vm_events(enum vm_event_item item, long delta)
|
static inline void count_vm_events(enum vm_event_item item, long delta)
|
||||||
{
|
{
|
||||||
this_cpu_add(per_cpu_var(vm_event_states).event[item], delta);
|
this_cpu_add(vm_event_states.event[item], delta);
|
||||||
}
|
}
|
||||||
|
|
||||||
extern void all_vm_events(unsigned long *);
|
extern void all_vm_events(unsigned long *);
|
||||||
|
|
|
@ -41,7 +41,7 @@
|
||||||
#include <asm/sections.h>
|
#include <asm/sections.h>
|
||||||
|
|
||||||
/* Per cpu memory for storing cpu states in case of system crash. */
|
/* Per cpu memory for storing cpu states in case of system crash. */
|
||||||
note_buf_t* crash_notes;
|
note_buf_t __percpu *crash_notes;
|
||||||
|
|
||||||
/* vmcoreinfo stuff */
|
/* vmcoreinfo stuff */
|
||||||
static unsigned char vmcoreinfo_data[VMCOREINFO_BYTES];
|
static unsigned char vmcoreinfo_data[VMCOREINFO_BYTES];
|
||||||
|
|
|
@ -474,9 +474,10 @@ static void module_unload_init(struct module *mod)
|
||||||
|
|
||||||
INIT_LIST_HEAD(&mod->modules_which_use_me);
|
INIT_LIST_HEAD(&mod->modules_which_use_me);
|
||||||
for_each_possible_cpu(cpu)
|
for_each_possible_cpu(cpu)
|
||||||
local_set(__module_ref_addr(mod, cpu), 0);
|
per_cpu_ptr(mod->refptr, cpu)->count = 0;
|
||||||
|
|
||||||
/* Hold reference count during initialization. */
|
/* Hold reference count during initialization. */
|
||||||
local_set(__module_ref_addr(mod, raw_smp_processor_id()), 1);
|
__this_cpu_write(mod->refptr->count, 1);
|
||||||
/* Backwards compatibility macros put refcount during init. */
|
/* Backwards compatibility macros put refcount during init. */
|
||||||
mod->waiter = current;
|
mod->waiter = current;
|
||||||
}
|
}
|
||||||
|
@ -619,7 +620,7 @@ unsigned int module_refcount(struct module *mod)
|
||||||
int cpu;
|
int cpu;
|
||||||
|
|
||||||
for_each_possible_cpu(cpu)
|
for_each_possible_cpu(cpu)
|
||||||
total += local_read(__module_ref_addr(mod, cpu));
|
total += per_cpu_ptr(mod->refptr, cpu)->count;
|
||||||
return total;
|
return total;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(module_refcount);
|
EXPORT_SYMBOL(module_refcount);
|
||||||
|
@ -796,14 +797,15 @@ static struct module_attribute refcnt = {
|
||||||
void module_put(struct module *module)
|
void module_put(struct module *module)
|
||||||
{
|
{
|
||||||
if (module) {
|
if (module) {
|
||||||
unsigned int cpu = get_cpu();
|
preempt_disable();
|
||||||
local_dec(__module_ref_addr(module, cpu));
|
__this_cpu_dec(module->refptr->count);
|
||||||
|
|
||||||
trace_module_put(module, _RET_IP_,
|
trace_module_put(module, _RET_IP_,
|
||||||
local_read(__module_ref_addr(module, cpu)));
|
__this_cpu_read(module->refptr->count));
|
||||||
/* Maybe they're waiting for us to drop reference? */
|
/* Maybe they're waiting for us to drop reference? */
|
||||||
if (unlikely(!module_is_live(module)))
|
if (unlikely(!module_is_live(module)))
|
||||||
wake_up_process(module->waiter);
|
wake_up_process(module->waiter);
|
||||||
put_cpu();
|
preempt_enable();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(module_put);
|
EXPORT_SYMBOL(module_put);
|
||||||
|
@ -1397,9 +1399,9 @@ static void free_module(struct module *mod)
|
||||||
kfree(mod->args);
|
kfree(mod->args);
|
||||||
if (mod->percpu)
|
if (mod->percpu)
|
||||||
percpu_modfree(mod->percpu);
|
percpu_modfree(mod->percpu);
|
||||||
#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
|
#if defined(CONFIG_MODULE_UNLOAD)
|
||||||
if (mod->refptr)
|
if (mod->refptr)
|
||||||
percpu_modfree(mod->refptr);
|
free_percpu(mod->refptr);
|
||||||
#endif
|
#endif
|
||||||
/* Free lock-classes: */
|
/* Free lock-classes: */
|
||||||
lockdep_free_key_range(mod->module_core, mod->core_size);
|
lockdep_free_key_range(mod->module_core, mod->core_size);
|
||||||
|
@ -2162,9 +2164,8 @@ static noinline struct module *load_module(void __user *umod,
|
||||||
mod = (void *)sechdrs[modindex].sh_addr;
|
mod = (void *)sechdrs[modindex].sh_addr;
|
||||||
kmemleak_load_module(mod, hdr, sechdrs, secstrings);
|
kmemleak_load_module(mod, hdr, sechdrs, secstrings);
|
||||||
|
|
||||||
#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
|
#if defined(CONFIG_MODULE_UNLOAD)
|
||||||
mod->refptr = percpu_modalloc(sizeof(local_t), __alignof__(local_t),
|
mod->refptr = alloc_percpu(struct module_ref);
|
||||||
mod->name);
|
|
||||||
if (!mod->refptr) {
|
if (!mod->refptr) {
|
||||||
err = -ENOMEM;
|
err = -ENOMEM;
|
||||||
goto free_init;
|
goto free_init;
|
||||||
|
@ -2396,8 +2397,8 @@ static noinline struct module *load_module(void __user *umod,
|
||||||
kobject_put(&mod->mkobj.kobj);
|
kobject_put(&mod->mkobj.kobj);
|
||||||
free_unload:
|
free_unload:
|
||||||
module_unload_free(mod);
|
module_unload_free(mod);
|
||||||
#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
|
#if defined(CONFIG_MODULE_UNLOAD)
|
||||||
percpu_modfree(mod->refptr);
|
free_percpu(mod->refptr);
|
||||||
free_init:
|
free_init:
|
||||||
#endif
|
#endif
|
||||||
module_free(mod, mod->module_init);
|
module_free(mod, mod->module_init);
|
||||||
|
|
|
@ -818,13 +818,13 @@ static void rcu_torture_timer(unsigned long unused)
|
||||||
/* Should not happen, but... */
|
/* Should not happen, but... */
|
||||||
pipe_count = RCU_TORTURE_PIPE_LEN;
|
pipe_count = RCU_TORTURE_PIPE_LEN;
|
||||||
}
|
}
|
||||||
__this_cpu_inc(per_cpu_var(rcu_torture_count)[pipe_count]);
|
__this_cpu_inc(rcu_torture_count[pipe_count]);
|
||||||
completed = cur_ops->completed() - completed;
|
completed = cur_ops->completed() - completed;
|
||||||
if (completed > RCU_TORTURE_PIPE_LEN) {
|
if (completed > RCU_TORTURE_PIPE_LEN) {
|
||||||
/* Should not happen, but... */
|
/* Should not happen, but... */
|
||||||
completed = RCU_TORTURE_PIPE_LEN;
|
completed = RCU_TORTURE_PIPE_LEN;
|
||||||
}
|
}
|
||||||
__this_cpu_inc(per_cpu_var(rcu_torture_batch)[completed]);
|
__this_cpu_inc(rcu_torture_batch[completed]);
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
cur_ops->readunlock(idx);
|
cur_ops->readunlock(idx);
|
||||||
}
|
}
|
||||||
|
@ -877,13 +877,13 @@ rcu_torture_reader(void *arg)
|
||||||
/* Should not happen, but... */
|
/* Should not happen, but... */
|
||||||
pipe_count = RCU_TORTURE_PIPE_LEN;
|
pipe_count = RCU_TORTURE_PIPE_LEN;
|
||||||
}
|
}
|
||||||
__this_cpu_inc(per_cpu_var(rcu_torture_count)[pipe_count]);
|
__this_cpu_inc(rcu_torture_count[pipe_count]);
|
||||||
completed = cur_ops->completed() - completed;
|
completed = cur_ops->completed() - completed;
|
||||||
if (completed > RCU_TORTURE_PIPE_LEN) {
|
if (completed > RCU_TORTURE_PIPE_LEN) {
|
||||||
/* Should not happen, but... */
|
/* Should not happen, but... */
|
||||||
completed = RCU_TORTURE_PIPE_LEN;
|
completed = RCU_TORTURE_PIPE_LEN;
|
||||||
}
|
}
|
||||||
__this_cpu_inc(per_cpu_var(rcu_torture_batch)[completed]);
|
__this_cpu_inc(rcu_torture_batch[completed]);
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
cur_ops->readunlock(idx);
|
cur_ops->readunlock(idx);
|
||||||
schedule();
|
schedule();
|
||||||
|
|
|
@ -1521,7 +1521,7 @@ static unsigned long cpu_avg_load_per_task(int cpu)
|
||||||
|
|
||||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||||
|
|
||||||
static __read_mostly unsigned long *update_shares_data;
|
static __read_mostly unsigned long __percpu *update_shares_data;
|
||||||
|
|
||||||
static void __set_se_shares(struct sched_entity *se, unsigned long shares);
|
static void __set_se_shares(struct sched_entity *se, unsigned long shares);
|
||||||
|
|
||||||
|
@ -8813,7 +8813,7 @@ struct cgroup_subsys cpu_cgroup_subsys = {
|
||||||
struct cpuacct {
|
struct cpuacct {
|
||||||
struct cgroup_subsys_state css;
|
struct cgroup_subsys_state css;
|
||||||
/* cpuusage holds pointer to a u64-type object on every cpu */
|
/* cpuusage holds pointer to a u64-type object on every cpu */
|
||||||
u64 *cpuusage;
|
u64 __percpu *cpuusage;
|
||||||
struct percpu_counter cpustat[CPUACCT_STAT_NSTATS];
|
struct percpu_counter cpustat[CPUACCT_STAT_NSTATS];
|
||||||
struct cpuacct *parent;
|
struct cpuacct *parent;
|
||||||
};
|
};
|
||||||
|
|
|
@ -45,7 +45,7 @@ static int refcount;
|
||||||
static struct workqueue_struct *stop_machine_wq;
|
static struct workqueue_struct *stop_machine_wq;
|
||||||
static struct stop_machine_data active, idle;
|
static struct stop_machine_data active, idle;
|
||||||
static const struct cpumask *active_cpus;
|
static const struct cpumask *active_cpus;
|
||||||
static void *stop_machine_work;
|
static void __percpu *stop_machine_work;
|
||||||
|
|
||||||
static void set_state(enum stopmachine_state newstate)
|
static void set_state(enum stopmachine_state newstate)
|
||||||
{
|
{
|
||||||
|
|
|
@ -20,6 +20,7 @@
|
||||||
#include <linux/cpu.h>
|
#include <linux/cpu.h>
|
||||||
#include <linux/fs.h>
|
#include <linux/fs.h>
|
||||||
|
|
||||||
|
#include <asm/local.h>
|
||||||
#include "trace.h"
|
#include "trace.h"
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -8,6 +8,7 @@
|
||||||
#include <linux/kthread.h>
|
#include <linux/kthread.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/time.h>
|
#include <linux/time.h>
|
||||||
|
#include <asm/local.h>
|
||||||
|
|
||||||
struct rb_page {
|
struct rb_page {
|
||||||
u64 ts;
|
u64 ts;
|
||||||
|
|
|
@ -92,12 +92,12 @@ DEFINE_PER_CPU(int, ftrace_cpu_disabled);
|
||||||
static inline void ftrace_disable_cpu(void)
|
static inline void ftrace_disable_cpu(void)
|
||||||
{
|
{
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
__this_cpu_inc(per_cpu_var(ftrace_cpu_disabled));
|
__this_cpu_inc(ftrace_cpu_disabled);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void ftrace_enable_cpu(void)
|
static inline void ftrace_enable_cpu(void)
|
||||||
{
|
{
|
||||||
__this_cpu_dec(per_cpu_var(ftrace_cpu_disabled));
|
__this_cpu_dec(ftrace_cpu_disabled);
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1166,7 +1166,7 @@ trace_function(struct trace_array *tr,
|
||||||
struct ftrace_entry *entry;
|
struct ftrace_entry *entry;
|
||||||
|
|
||||||
/* If we are reading the ring buffer, don't trace */
|
/* If we are reading the ring buffer, don't trace */
|
||||||
if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled))))
|
if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
|
event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
|
||||||
|
|
|
@ -188,7 +188,7 @@ static int __trace_graph_entry(struct trace_array *tr,
|
||||||
struct ring_buffer *buffer = tr->buffer;
|
struct ring_buffer *buffer = tr->buffer;
|
||||||
struct ftrace_graph_ent_entry *entry;
|
struct ftrace_graph_ent_entry *entry;
|
||||||
|
|
||||||
if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled))))
|
if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
|
event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
|
||||||
|
@ -247,7 +247,7 @@ static void __trace_graph_return(struct trace_array *tr,
|
||||||
struct ring_buffer *buffer = tr->buffer;
|
struct ring_buffer *buffer = tr->buffer;
|
||||||
struct ftrace_graph_ret_entry *entry;
|
struct ftrace_graph_ret_entry *entry;
|
||||||
|
|
||||||
if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled))))
|
if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
|
event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
|
||||||
|
|
210
mm/page_alloc.c
210
mm/page_alloc.c
|
@ -1009,10 +1009,10 @@ static void drain_pages(unsigned int cpu)
|
||||||
struct per_cpu_pageset *pset;
|
struct per_cpu_pageset *pset;
|
||||||
struct per_cpu_pages *pcp;
|
struct per_cpu_pages *pcp;
|
||||||
|
|
||||||
pset = zone_pcp(zone, cpu);
|
local_irq_save(flags);
|
||||||
|
pset = per_cpu_ptr(zone->pageset, cpu);
|
||||||
|
|
||||||
pcp = &pset->pcp;
|
pcp = &pset->pcp;
|
||||||
local_irq_save(flags);
|
|
||||||
free_pcppages_bulk(zone, pcp->count, pcp);
|
free_pcppages_bulk(zone, pcp->count, pcp);
|
||||||
pcp->count = 0;
|
pcp->count = 0;
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
|
@ -1096,7 +1096,6 @@ static void free_hot_cold_page(struct page *page, int cold)
|
||||||
arch_free_page(page, 0);
|
arch_free_page(page, 0);
|
||||||
kernel_map_pages(page, 1, 0);
|
kernel_map_pages(page, 1, 0);
|
||||||
|
|
||||||
pcp = &zone_pcp(zone, get_cpu())->pcp;
|
|
||||||
migratetype = get_pageblock_migratetype(page);
|
migratetype = get_pageblock_migratetype(page);
|
||||||
set_page_private(page, migratetype);
|
set_page_private(page, migratetype);
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
|
@ -1119,6 +1118,7 @@ static void free_hot_cold_page(struct page *page, int cold)
|
||||||
migratetype = MIGRATE_MOVABLE;
|
migratetype = MIGRATE_MOVABLE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pcp = &this_cpu_ptr(zone->pageset)->pcp;
|
||||||
if (cold)
|
if (cold)
|
||||||
list_add_tail(&page->lru, &pcp->lists[migratetype]);
|
list_add_tail(&page->lru, &pcp->lists[migratetype]);
|
||||||
else
|
else
|
||||||
|
@ -1131,7 +1131,6 @@ static void free_hot_cold_page(struct page *page, int cold)
|
||||||
|
|
||||||
out:
|
out:
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
put_cpu();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void free_hot_page(struct page *page)
|
void free_hot_page(struct page *page)
|
||||||
|
@ -1181,17 +1180,15 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
int cold = !!(gfp_flags & __GFP_COLD);
|
int cold = !!(gfp_flags & __GFP_COLD);
|
||||||
int cpu;
|
|
||||||
|
|
||||||
again:
|
again:
|
||||||
cpu = get_cpu();
|
|
||||||
if (likely(order == 0)) {
|
if (likely(order == 0)) {
|
||||||
struct per_cpu_pages *pcp;
|
struct per_cpu_pages *pcp;
|
||||||
struct list_head *list;
|
struct list_head *list;
|
||||||
|
|
||||||
pcp = &zone_pcp(zone, cpu)->pcp;
|
|
||||||
list = &pcp->lists[migratetype];
|
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
|
pcp = &this_cpu_ptr(zone->pageset)->pcp;
|
||||||
|
list = &pcp->lists[migratetype];
|
||||||
if (list_empty(list)) {
|
if (list_empty(list)) {
|
||||||
pcp->count += rmqueue_bulk(zone, 0,
|
pcp->count += rmqueue_bulk(zone, 0,
|
||||||
pcp->batch, list,
|
pcp->batch, list,
|
||||||
|
@ -1232,7 +1229,6 @@ again:
|
||||||
__count_zone_vm_events(PGALLOC, zone, 1 << order);
|
__count_zone_vm_events(PGALLOC, zone, 1 << order);
|
||||||
zone_statistics(preferred_zone, zone);
|
zone_statistics(preferred_zone, zone);
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
put_cpu();
|
|
||||||
|
|
||||||
VM_BUG_ON(bad_range(zone, page));
|
VM_BUG_ON(bad_range(zone, page));
|
||||||
if (prep_new_page(page, order, gfp_flags))
|
if (prep_new_page(page, order, gfp_flags))
|
||||||
|
@ -1241,7 +1237,6 @@ again:
|
||||||
|
|
||||||
failed:
|
failed:
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
put_cpu();
|
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2180,7 +2175,7 @@ void show_free_areas(void)
|
||||||
for_each_online_cpu(cpu) {
|
for_each_online_cpu(cpu) {
|
||||||
struct per_cpu_pageset *pageset;
|
struct per_cpu_pageset *pageset;
|
||||||
|
|
||||||
pageset = zone_pcp(zone, cpu);
|
pageset = per_cpu_ptr(zone->pageset, cpu);
|
||||||
|
|
||||||
printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n",
|
printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n",
|
||||||
cpu, pageset->pcp.high,
|
cpu, pageset->pcp.high,
|
||||||
|
@ -2745,10 +2740,29 @@ static void build_zonelist_cache(pg_data_t *pgdat)
|
||||||
|
|
||||||
#endif /* CONFIG_NUMA */
|
#endif /* CONFIG_NUMA */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Boot pageset table. One per cpu which is going to be used for all
|
||||||
|
* zones and all nodes. The parameters will be set in such a way
|
||||||
|
* that an item put on a list will immediately be handed over to
|
||||||
|
* the buddy list. This is safe since pageset manipulation is done
|
||||||
|
* with interrupts disabled.
|
||||||
|
*
|
||||||
|
* The boot_pagesets must be kept even after bootup is complete for
|
||||||
|
* unused processors and/or zones. They do play a role for bootstrapping
|
||||||
|
* hotplugged processors.
|
||||||
|
*
|
||||||
|
* zoneinfo_show() and maybe other functions do
|
||||||
|
* not check if the processor is online before following the pageset pointer.
|
||||||
|
* Other parts of the kernel may not check if the zone is available.
|
||||||
|
*/
|
||||||
|
static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
|
||||||
|
static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
|
||||||
|
|
||||||
/* return values int ....just for stop_machine() */
|
/* return values int ....just for stop_machine() */
|
||||||
static int __build_all_zonelists(void *dummy)
|
static int __build_all_zonelists(void *dummy)
|
||||||
{
|
{
|
||||||
int nid;
|
int nid;
|
||||||
|
int cpu;
|
||||||
|
|
||||||
#ifdef CONFIG_NUMA
|
#ifdef CONFIG_NUMA
|
||||||
memset(node_load, 0, sizeof(node_load));
|
memset(node_load, 0, sizeof(node_load));
|
||||||
|
@ -2759,6 +2773,23 @@ static int __build_all_zonelists(void *dummy)
|
||||||
build_zonelists(pgdat);
|
build_zonelists(pgdat);
|
||||||
build_zonelist_cache(pgdat);
|
build_zonelist_cache(pgdat);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Initialize the boot_pagesets that are going to be used
|
||||||
|
* for bootstrapping processors. The real pagesets for
|
||||||
|
* each zone will be allocated later when the per cpu
|
||||||
|
* allocator is available.
|
||||||
|
*
|
||||||
|
* boot_pagesets are used also for bootstrapping offline
|
||||||
|
* cpus if the system is already booted because the pagesets
|
||||||
|
* are needed to initialize allocators on a specific cpu too.
|
||||||
|
* F.e. the percpu allocator needs the page allocator which
|
||||||
|
* needs the percpu allocator in order to allocate its pagesets
|
||||||
|
* (a chicken-egg dilemma).
|
||||||
|
*/
|
||||||
|
for_each_possible_cpu(cpu)
|
||||||
|
setup_pageset(&per_cpu(boot_pageset, cpu), 0);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3096,120 +3127,32 @@ static void setup_pagelist_highmark(struct per_cpu_pageset *p,
|
||||||
pcp->batch = PAGE_SHIFT * 8;
|
pcp->batch = PAGE_SHIFT * 8;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
#ifdef CONFIG_NUMA
|
|
||||||
/*
|
/*
|
||||||
* Boot pageset table. One per cpu which is going to be used for all
|
* Allocate per cpu pagesets and initialize them.
|
||||||
* zones and all nodes. The parameters will be set in such a way
|
* Before this call only boot pagesets were available.
|
||||||
* that an item put on a list will immediately be handed over to
|
* Boot pagesets will no longer be used by this processorr
|
||||||
* the buddy list. This is safe since pageset manipulation is done
|
* after setup_per_cpu_pageset().
|
||||||
* with interrupts disabled.
|
|
||||||
*
|
|
||||||
* Some NUMA counter updates may also be caught by the boot pagesets.
|
|
||||||
*
|
|
||||||
* The boot_pagesets must be kept even after bootup is complete for
|
|
||||||
* unused processors and/or zones. They do play a role for bootstrapping
|
|
||||||
* hotplugged processors.
|
|
||||||
*
|
|
||||||
* zoneinfo_show() and maybe other functions do
|
|
||||||
* not check if the processor is online before following the pageset pointer.
|
|
||||||
* Other parts of the kernel may not check if the zone is available.
|
|
||||||
*/
|
*/
|
||||||
static struct per_cpu_pageset boot_pageset[NR_CPUS];
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Dynamically allocate memory for the
|
|
||||||
* per cpu pageset array in struct zone.
|
|
||||||
*/
|
|
||||||
static int __cpuinit process_zones(int cpu)
|
|
||||||
{
|
|
||||||
struct zone *zone, *dzone;
|
|
||||||
int node = cpu_to_node(cpu);
|
|
||||||
|
|
||||||
node_set_state(node, N_CPU); /* this node has a cpu */
|
|
||||||
|
|
||||||
for_each_populated_zone(zone) {
|
|
||||||
zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset),
|
|
||||||
GFP_KERNEL, node);
|
|
||||||
if (!zone_pcp(zone, cpu))
|
|
||||||
goto bad;
|
|
||||||
|
|
||||||
setup_pageset(zone_pcp(zone, cpu), zone_batchsize(zone));
|
|
||||||
|
|
||||||
if (percpu_pagelist_fraction)
|
|
||||||
setup_pagelist_highmark(zone_pcp(zone, cpu),
|
|
||||||
(zone->present_pages / percpu_pagelist_fraction));
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
bad:
|
|
||||||
for_each_zone(dzone) {
|
|
||||||
if (!populated_zone(dzone))
|
|
||||||
continue;
|
|
||||||
if (dzone == zone)
|
|
||||||
break;
|
|
||||||
kfree(zone_pcp(dzone, cpu));
|
|
||||||
zone_pcp(dzone, cpu) = &boot_pageset[cpu];
|
|
||||||
}
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void free_zone_pagesets(int cpu)
|
|
||||||
{
|
|
||||||
struct zone *zone;
|
|
||||||
|
|
||||||
for_each_zone(zone) {
|
|
||||||
struct per_cpu_pageset *pset = zone_pcp(zone, cpu);
|
|
||||||
|
|
||||||
/* Free per_cpu_pageset if it is slab allocated */
|
|
||||||
if (pset != &boot_pageset[cpu])
|
|
||||||
kfree(pset);
|
|
||||||
zone_pcp(zone, cpu) = &boot_pageset[cpu];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb,
|
|
||||||
unsigned long action,
|
|
||||||
void *hcpu)
|
|
||||||
{
|
|
||||||
int cpu = (long)hcpu;
|
|
||||||
int ret = NOTIFY_OK;
|
|
||||||
|
|
||||||
switch (action) {
|
|
||||||
case CPU_UP_PREPARE:
|
|
||||||
case CPU_UP_PREPARE_FROZEN:
|
|
||||||
if (process_zones(cpu))
|
|
||||||
ret = NOTIFY_BAD;
|
|
||||||
break;
|
|
||||||
case CPU_UP_CANCELED:
|
|
||||||
case CPU_UP_CANCELED_FROZEN:
|
|
||||||
case CPU_DEAD:
|
|
||||||
case CPU_DEAD_FROZEN:
|
|
||||||
free_zone_pagesets(cpu);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct notifier_block __cpuinitdata pageset_notifier =
|
|
||||||
{ &pageset_cpuup_callback, NULL, 0 };
|
|
||||||
|
|
||||||
void __init setup_per_cpu_pageset(void)
|
void __init setup_per_cpu_pageset(void)
|
||||||
{
|
{
|
||||||
int err;
|
struct zone *zone;
|
||||||
|
int cpu;
|
||||||
|
|
||||||
/* Initialize per_cpu_pageset for cpu 0.
|
for_each_populated_zone(zone) {
|
||||||
* A cpuup callback will do this for every cpu
|
zone->pageset = alloc_percpu(struct per_cpu_pageset);
|
||||||
* as it comes online
|
|
||||||
*/
|
for_each_possible_cpu(cpu) {
|
||||||
err = process_zones(smp_processor_id());
|
struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
|
||||||
BUG_ON(err);
|
|
||||||
register_cpu_notifier(&pageset_notifier);
|
setup_pageset(pcp, zone_batchsize(zone));
|
||||||
|
|
||||||
|
if (percpu_pagelist_fraction)
|
||||||
|
setup_pagelist_highmark(pcp,
|
||||||
|
(zone->present_pages /
|
||||||
|
percpu_pagelist_fraction));
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static noinline __init_refok
|
static noinline __init_refok
|
||||||
int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
|
int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
|
||||||
|
@ -3264,7 +3207,7 @@ static int __zone_pcp_update(void *data)
|
||||||
struct per_cpu_pageset *pset;
|
struct per_cpu_pageset *pset;
|
||||||
struct per_cpu_pages *pcp;
|
struct per_cpu_pages *pcp;
|
||||||
|
|
||||||
pset = zone_pcp(zone, cpu);
|
pset = per_cpu_ptr(zone->pageset, cpu);
|
||||||
pcp = &pset->pcp;
|
pcp = &pset->pcp;
|
||||||
|
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
|
@ -3282,21 +3225,17 @@ void zone_pcp_update(struct zone *zone)
|
||||||
|
|
||||||
static __meminit void zone_pcp_init(struct zone *zone)
|
static __meminit void zone_pcp_init(struct zone *zone)
|
||||||
{
|
{
|
||||||
int cpu;
|
/*
|
||||||
unsigned long batch = zone_batchsize(zone);
|
* per cpu subsystem is not up at this point. The following code
|
||||||
|
* relies on the ability of the linker to provide the
|
||||||
|
* offset of a (static) per cpu variable into the per cpu area.
|
||||||
|
*/
|
||||||
|
zone->pageset = &boot_pageset;
|
||||||
|
|
||||||
for (cpu = 0; cpu < NR_CPUS; cpu++) {
|
|
||||||
#ifdef CONFIG_NUMA
|
|
||||||
/* Early boot. Slab allocator not functional yet */
|
|
||||||
zone_pcp(zone, cpu) = &boot_pageset[cpu];
|
|
||||||
setup_pageset(&boot_pageset[cpu],0);
|
|
||||||
#else
|
|
||||||
setup_pageset(zone_pcp(zone,cpu), batch);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
if (zone->present_pages)
|
if (zone->present_pages)
|
||||||
printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%lu\n",
|
printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%u\n",
|
||||||
zone->name, zone->present_pages, batch);
|
zone->name, zone->present_pages,
|
||||||
|
zone_batchsize(zone));
|
||||||
}
|
}
|
||||||
|
|
||||||
__meminit int init_currently_empty_zone(struct zone *zone,
|
__meminit int init_currently_empty_zone(struct zone *zone,
|
||||||
|
@ -4810,10 +4749,11 @@ int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
|
||||||
if (!write || (ret == -EINVAL))
|
if (!write || (ret == -EINVAL))
|
||||||
return ret;
|
return ret;
|
||||||
for_each_populated_zone(zone) {
|
for_each_populated_zone(zone) {
|
||||||
for_each_online_cpu(cpu) {
|
for_each_possible_cpu(cpu) {
|
||||||
unsigned long high;
|
unsigned long high;
|
||||||
high = zone->present_pages / percpu_pagelist_fraction;
|
high = zone->present_pages / percpu_pagelist_fraction;
|
||||||
setup_pagelist_highmark(zone_pcp(zone, cpu), high);
|
setup_pagelist_highmark(
|
||||||
|
per_cpu_ptr(zone->pageset, cpu), high);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
|
28
mm/percpu.c
28
mm/percpu.c
|
@ -80,13 +80,15 @@
|
||||||
/* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
|
/* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
|
||||||
#ifndef __addr_to_pcpu_ptr
|
#ifndef __addr_to_pcpu_ptr
|
||||||
#define __addr_to_pcpu_ptr(addr) \
|
#define __addr_to_pcpu_ptr(addr) \
|
||||||
(void *)((unsigned long)(addr) - (unsigned long)pcpu_base_addr \
|
(void __percpu *)((unsigned long)(addr) - \
|
||||||
+ (unsigned long)__per_cpu_start)
|
(unsigned long)pcpu_base_addr + \
|
||||||
|
(unsigned long)__per_cpu_start)
|
||||||
#endif
|
#endif
|
||||||
#ifndef __pcpu_ptr_to_addr
|
#ifndef __pcpu_ptr_to_addr
|
||||||
#define __pcpu_ptr_to_addr(ptr) \
|
#define __pcpu_ptr_to_addr(ptr) \
|
||||||
(void *)((unsigned long)(ptr) + (unsigned long)pcpu_base_addr \
|
(void __force *)((unsigned long)(ptr) + \
|
||||||
- (unsigned long)__per_cpu_start)
|
(unsigned long)pcpu_base_addr - \
|
||||||
|
(unsigned long)__per_cpu_start)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
struct pcpu_chunk {
|
struct pcpu_chunk {
|
||||||
|
@ -913,11 +915,10 @@ static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size)
|
||||||
int rs, re;
|
int rs, re;
|
||||||
|
|
||||||
/* quick path, check whether it's empty already */
|
/* quick path, check whether it's empty already */
|
||||||
pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) {
|
rs = page_start;
|
||||||
|
pcpu_next_unpop(chunk, &rs, &re, page_end);
|
||||||
if (rs == page_start && re == page_end)
|
if (rs == page_start && re == page_end)
|
||||||
return;
|
return;
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* immutable chunks can't be depopulated */
|
/* immutable chunks can't be depopulated */
|
||||||
WARN_ON(chunk->immutable);
|
WARN_ON(chunk->immutable);
|
||||||
|
@ -968,11 +969,10 @@ static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size)
|
||||||
int rs, re, rc;
|
int rs, re, rc;
|
||||||
|
|
||||||
/* quick path, check whether all pages are already there */
|
/* quick path, check whether all pages are already there */
|
||||||
pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end) {
|
rs = page_start;
|
||||||
|
pcpu_next_pop(chunk, &rs, &re, page_end);
|
||||||
if (rs == page_start && re == page_end)
|
if (rs == page_start && re == page_end)
|
||||||
goto clear;
|
goto clear;
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* need to allocate and map pages, this chunk can't be immutable */
|
/* need to allocate and map pages, this chunk can't be immutable */
|
||||||
WARN_ON(chunk->immutable);
|
WARN_ON(chunk->immutable);
|
||||||
|
@ -1067,7 +1067,7 @@ static struct pcpu_chunk *alloc_pcpu_chunk(void)
|
||||||
* RETURNS:
|
* RETURNS:
|
||||||
* Percpu pointer to the allocated area on success, NULL on failure.
|
* Percpu pointer to the allocated area on success, NULL on failure.
|
||||||
*/
|
*/
|
||||||
static void *pcpu_alloc(size_t size, size_t align, bool reserved)
|
static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved)
|
||||||
{
|
{
|
||||||
static int warn_limit = 10;
|
static int warn_limit = 10;
|
||||||
struct pcpu_chunk *chunk;
|
struct pcpu_chunk *chunk;
|
||||||
|
@ -1196,7 +1196,7 @@ fail_unlock_mutex:
|
||||||
* RETURNS:
|
* RETURNS:
|
||||||
* Percpu pointer to the allocated area on success, NULL on failure.
|
* Percpu pointer to the allocated area on success, NULL on failure.
|
||||||
*/
|
*/
|
||||||
void *__alloc_percpu(size_t size, size_t align)
|
void __percpu *__alloc_percpu(size_t size, size_t align)
|
||||||
{
|
{
|
||||||
return pcpu_alloc(size, align, false);
|
return pcpu_alloc(size, align, false);
|
||||||
}
|
}
|
||||||
|
@ -1217,7 +1217,7 @@ EXPORT_SYMBOL_GPL(__alloc_percpu);
|
||||||
* RETURNS:
|
* RETURNS:
|
||||||
* Percpu pointer to the allocated area on success, NULL on failure.
|
* Percpu pointer to the allocated area on success, NULL on failure.
|
||||||
*/
|
*/
|
||||||
void *__alloc_reserved_percpu(size_t size, size_t align)
|
void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
|
||||||
{
|
{
|
||||||
return pcpu_alloc(size, align, true);
|
return pcpu_alloc(size, align, true);
|
||||||
}
|
}
|
||||||
|
@ -1269,7 +1269,7 @@ static void pcpu_reclaim(struct work_struct *work)
|
||||||
* CONTEXT:
|
* CONTEXT:
|
||||||
* Can be called from atomic context.
|
* Can be called from atomic context.
|
||||||
*/
|
*/
|
||||||
void free_percpu(void *ptr)
|
void free_percpu(void __percpu *ptr)
|
||||||
{
|
{
|
||||||
void *addr;
|
void *addr;
|
||||||
struct pcpu_chunk *chunk;
|
struct pcpu_chunk *chunk;
|
||||||
|
|
15
mm/vmstat.c
15
mm/vmstat.c
|
@ -139,7 +139,8 @@ static void refresh_zone_stat_thresholds(void)
|
||||||
threshold = calculate_threshold(zone);
|
threshold = calculate_threshold(zone);
|
||||||
|
|
||||||
for_each_online_cpu(cpu)
|
for_each_online_cpu(cpu)
|
||||||
zone_pcp(zone, cpu)->stat_threshold = threshold;
|
per_cpu_ptr(zone->pageset, cpu)->stat_threshold
|
||||||
|
= threshold;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -149,7 +150,8 @@ static void refresh_zone_stat_thresholds(void)
|
||||||
void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
|
void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
|
||||||
int delta)
|
int delta)
|
||||||
{
|
{
|
||||||
struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id());
|
struct per_cpu_pageset *pcp = this_cpu_ptr(zone->pageset);
|
||||||
|
|
||||||
s8 *p = pcp->vm_stat_diff + item;
|
s8 *p = pcp->vm_stat_diff + item;
|
||||||
long x;
|
long x;
|
||||||
|
|
||||||
|
@ -202,7 +204,7 @@ EXPORT_SYMBOL(mod_zone_page_state);
|
||||||
*/
|
*/
|
||||||
void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
|
void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
|
||||||
{
|
{
|
||||||
struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id());
|
struct per_cpu_pageset *pcp = this_cpu_ptr(zone->pageset);
|
||||||
s8 *p = pcp->vm_stat_diff + item;
|
s8 *p = pcp->vm_stat_diff + item;
|
||||||
|
|
||||||
(*p)++;
|
(*p)++;
|
||||||
|
@ -223,7 +225,7 @@ EXPORT_SYMBOL(__inc_zone_page_state);
|
||||||
|
|
||||||
void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
|
void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
|
||||||
{
|
{
|
||||||
struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id());
|
struct per_cpu_pageset *pcp = this_cpu_ptr(zone->pageset);
|
||||||
s8 *p = pcp->vm_stat_diff + item;
|
s8 *p = pcp->vm_stat_diff + item;
|
||||||
|
|
||||||
(*p)--;
|
(*p)--;
|
||||||
|
@ -300,7 +302,7 @@ void refresh_cpu_vm_stats(int cpu)
|
||||||
for_each_populated_zone(zone) {
|
for_each_populated_zone(zone) {
|
||||||
struct per_cpu_pageset *p;
|
struct per_cpu_pageset *p;
|
||||||
|
|
||||||
p = zone_pcp(zone, cpu);
|
p = per_cpu_ptr(zone->pageset, cpu);
|
||||||
|
|
||||||
for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
|
for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
|
||||||
if (p->vm_stat_diff[i]) {
|
if (p->vm_stat_diff[i]) {
|
||||||
|
@ -741,7 +743,7 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
|
||||||
for_each_online_cpu(i) {
|
for_each_online_cpu(i) {
|
||||||
struct per_cpu_pageset *pageset;
|
struct per_cpu_pageset *pageset;
|
||||||
|
|
||||||
pageset = zone_pcp(zone, i);
|
pageset = per_cpu_ptr(zone->pageset, i);
|
||||||
seq_printf(m,
|
seq_printf(m,
|
||||||
"\n cpu: %i"
|
"\n cpu: %i"
|
||||||
"\n count: %i"
|
"\n count: %i"
|
||||||
|
@ -906,6 +908,7 @@ static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb,
|
||||||
case CPU_ONLINE:
|
case CPU_ONLINE:
|
||||||
case CPU_ONLINE_FROZEN:
|
case CPU_ONLINE_FROZEN:
|
||||||
start_cpu_timer(cpu);
|
start_cpu_timer(cpu);
|
||||||
|
node_set_state(cpu_to_node(cpu), N_CPU);
|
||||||
break;
|
break;
|
||||||
case CPU_DOWN_PREPARE:
|
case CPU_DOWN_PREPARE:
|
||||||
case CPU_DOWN_PREPARE_FROZEN:
|
case CPU_DOWN_PREPARE_FROZEN:
|
||||||
|
|
Загрузка…
Ссылка в новой задаче