diff --git a/mm/slab.h b/mm/slab.h index 495008f89bf6..f14e723b9e3c 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -25,10 +25,12 @@ struct slab { union { struct list_head slab_list; struct rcu_head rcu_head; +#ifdef CONFIG_SLUB_CPU_PARTIAL struct { struct slab *next; int slabs; /* Nr of slabs left */ }; +#endif }; struct kmem_cache *slab_cache; /* Double-word boundary */ diff --git a/mm/slub.c b/mm/slub.c index d08ba1025aae..261474092e43 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -5258,6 +5258,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s, total += x; nodes[node] += x; +#ifdef CONFIG_SLUB_CPU_PARTIAL slab = slub_percpu_partial_read_once(c); if (slab) { node = slab_nid(slab); @@ -5270,6 +5271,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s, total += x; nodes[node] += x; } +#endif } } @@ -5469,9 +5471,10 @@ static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf) { int objects = 0; int slabs = 0; - int cpu; + int cpu __maybe_unused; int len = 0; +#ifdef CONFIG_SLUB_CPU_PARTIAL for_each_online_cpu(cpu) { struct slab *slab; @@ -5480,12 +5483,13 @@ static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf) if (slab) slabs += slab->slabs; } +#endif /* Approximate half-full slabs, see slub_set_cpu_partial() */ objects = (slabs * oo_objects(s->oo)) / 2; len += sysfs_emit_at(buf, len, "%d(%d)", objects, slabs); -#ifdef CONFIG_SMP +#if defined(CONFIG_SLUB_CPU_PARTIAL) && defined(CONFIG_SMP) for_each_online_cpu(cpu) { struct slab *slab;