[PATCH] percpu_counters: create lib/percpu_counter.c
- Move percpu_counter routines from mm/swap.c to lib/percpu_counter.c Signed-off-by: Ravikiran Thirumalai <kiran@scalex86.org> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
Родитель
d09042da72
Коммит
3cbc564024
|
@ -46,6 +46,7 @@ obj-$(CONFIG_TEXTSEARCH) += textsearch.o
|
|||
obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o
|
||||
obj-$(CONFIG_TEXTSEARCH_BM) += ts_bm.o
|
||||
obj-$(CONFIG_TEXTSEARCH_FSM) += ts_fsm.o
|
||||
obj-$(CONFIG_SMP) += percpu_counter.o
|
||||
|
||||
obj-$(CONFIG_SWIOTLB) += swiotlb.o
|
||||
|
||||
|
|
|
@ -0,0 +1,46 @@
|
|||
/*
|
||||
* Fast batching percpu counters.
|
||||
*/
|
||||
|
||||
#include <linux/percpu_counter.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
void percpu_counter_mod(struct percpu_counter *fbc, long amount)
|
||||
{
|
||||
long count;
|
||||
long *pcount;
|
||||
int cpu = get_cpu();
|
||||
|
||||
pcount = per_cpu_ptr(fbc->counters, cpu);
|
||||
count = *pcount + amount;
|
||||
if (count >= FBC_BATCH || count <= -FBC_BATCH) {
|
||||
spin_lock(&fbc->lock);
|
||||
fbc->count += count;
|
||||
*pcount = 0;
|
||||
spin_unlock(&fbc->lock);
|
||||
} else {
|
||||
*pcount = count;
|
||||
}
|
||||
put_cpu();
|
||||
}
|
||||
EXPORT_SYMBOL(percpu_counter_mod);
|
||||
|
||||
/*
|
||||
* Add up all the per-cpu counts, return the result. This is a more accurate
|
||||
* but much slower version of percpu_counter_read_positive()
|
||||
*/
|
||||
long percpu_counter_sum(struct percpu_counter *fbc)
|
||||
{
|
||||
long ret;
|
||||
int cpu;
|
||||
|
||||
spin_lock(&fbc->lock);
|
||||
ret = fbc->count;
|
||||
for_each_possible_cpu(cpu) {
|
||||
long *pcount = per_cpu_ptr(fbc->counters, cpu);
|
||||
ret += *pcount;
|
||||
}
|
||||
spin_unlock(&fbc->lock);
|
||||
return ret < 0 ? 0 : ret;
|
||||
}
|
||||
EXPORT_SYMBOL(percpu_counter_sum);
|
42
mm/swap.c
42
mm/swap.c
|
@ -480,48 +480,6 @@ static int cpu_swap_callback(struct notifier_block *nfb,
|
|||
#endif /* CONFIG_HOTPLUG_CPU */
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
void percpu_counter_mod(struct percpu_counter *fbc, long amount)
|
||||
{
|
||||
long count;
|
||||
long *pcount;
|
||||
int cpu = get_cpu();
|
||||
|
||||
pcount = per_cpu_ptr(fbc->counters, cpu);
|
||||
count = *pcount + amount;
|
||||
if (count >= FBC_BATCH || count <= -FBC_BATCH) {
|
||||
spin_lock(&fbc->lock);
|
||||
fbc->count += count;
|
||||
*pcount = 0;
|
||||
spin_unlock(&fbc->lock);
|
||||
} else {
|
||||
*pcount = count;
|
||||
}
|
||||
put_cpu();
|
||||
}
|
||||
EXPORT_SYMBOL(percpu_counter_mod);
|
||||
|
||||
/*
|
||||
* Add up all the per-cpu counts, return the result. This is a more accurate
|
||||
* but much slower version of percpu_counter_read_positive()
|
||||
*/
|
||||
long percpu_counter_sum(struct percpu_counter *fbc)
|
||||
{
|
||||
long ret;
|
||||
int cpu;
|
||||
|
||||
spin_lock(&fbc->lock);
|
||||
ret = fbc->count;
|
||||
for_each_possible_cpu(cpu) {
|
||||
long *pcount = per_cpu_ptr(fbc->counters, cpu);
|
||||
ret += *pcount;
|
||||
}
|
||||
spin_unlock(&fbc->lock);
|
||||
return ret < 0 ? 0 : ret;
|
||||
}
|
||||
EXPORT_SYMBOL(percpu_counter_sum);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Perform any setup for the swap system
|
||||
*/
|
||||
|
|
Загрузка…
Ссылка в новой задаче