SLUB: move resiliency check into SYSFS section
Move the resiliency check into the SYSFS section after validate_slab that is used by the resiliency check. This will avoid a forward declaration. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
7656c72b5a
Коммит
b345970905
112
mm/slub.c
112
mm/slub.c
|
@ -2512,63 +2512,6 @@ static int __init cpucache_init(void)
|
|||
__initcall(cpucache_init);
|
||||
#endif
|
||||
|
||||
#ifdef SLUB_RESILIENCY_TEST
|
||||
static unsigned long validate_slab_cache(struct kmem_cache *s);
|
||||
|
||||
static void resiliency_test(void)
|
||||
{
|
||||
u8 *p;
|
||||
|
||||
printk(KERN_ERR "SLUB resiliency testing\n");
|
||||
printk(KERN_ERR "-----------------------\n");
|
||||
printk(KERN_ERR "A. Corruption after allocation\n");
|
||||
|
||||
p = kzalloc(16, GFP_KERNEL);
|
||||
p[16] = 0x12;
|
||||
printk(KERN_ERR "\n1. kmalloc-16: Clobber Redzone/next pointer"
|
||||
" 0x12->0x%p\n\n", p + 16);
|
||||
|
||||
validate_slab_cache(kmalloc_caches + 4);
|
||||
|
||||
/* Hmmm... The next two are dangerous */
|
||||
p = kzalloc(32, GFP_KERNEL);
|
||||
p[32 + sizeof(void *)] = 0x34;
|
||||
printk(KERN_ERR "\n2. kmalloc-32: Clobber next pointer/next slab"
|
||||
" 0x34 -> -0x%p\n", p);
|
||||
printk(KERN_ERR "If allocated object is overwritten then not detectable\n\n");
|
||||
|
||||
validate_slab_cache(kmalloc_caches + 5);
|
||||
p = kzalloc(64, GFP_KERNEL);
|
||||
p += 64 + (get_cycles() & 0xff) * sizeof(void *);
|
||||
*p = 0x56;
|
||||
printk(KERN_ERR "\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n",
|
||||
p);
|
||||
printk(KERN_ERR "If allocated object is overwritten then not detectable\n\n");
|
||||
validate_slab_cache(kmalloc_caches + 6);
|
||||
|
||||
printk(KERN_ERR "\nB. Corruption after free\n");
|
||||
p = kzalloc(128, GFP_KERNEL);
|
||||
kfree(p);
|
||||
*p = 0x78;
|
||||
printk(KERN_ERR "1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p);
|
||||
validate_slab_cache(kmalloc_caches + 7);
|
||||
|
||||
p = kzalloc(256, GFP_KERNEL);
|
||||
kfree(p);
|
||||
p[50] = 0x9a;
|
||||
printk(KERN_ERR "\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n", p);
|
||||
validate_slab_cache(kmalloc_caches + 8);
|
||||
|
||||
p = kzalloc(512, GFP_KERNEL);
|
||||
kfree(p);
|
||||
p[512] = 0xab;
|
||||
printk(KERN_ERR "\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p);
|
||||
validate_slab_cache(kmalloc_caches + 9);
|
||||
}
|
||||
#else
|
||||
static void resiliency_test(void) {};
|
||||
#endif
|
||||
|
||||
void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller)
|
||||
{
|
||||
struct kmem_cache *s = get_slab(size, gfpflags);
|
||||
|
@ -2685,6 +2628,61 @@ static unsigned long validate_slab_cache(struct kmem_cache *s)
|
|||
return count;
|
||||
}
|
||||
|
||||
#ifdef SLUB_RESILIENCY_TEST
|
||||
static void resiliency_test(void)
|
||||
{
|
||||
u8 *p;
|
||||
|
||||
printk(KERN_ERR "SLUB resiliency testing\n");
|
||||
printk(KERN_ERR "-----------------------\n");
|
||||
printk(KERN_ERR "A. Corruption after allocation\n");
|
||||
|
||||
p = kzalloc(16, GFP_KERNEL);
|
||||
p[16] = 0x12;
|
||||
printk(KERN_ERR "\n1. kmalloc-16: Clobber Redzone/next pointer"
|
||||
" 0x12->0x%p\n\n", p + 16);
|
||||
|
||||
validate_slab_cache(kmalloc_caches + 4);
|
||||
|
||||
/* Hmmm... The next two are dangerous */
|
||||
p = kzalloc(32, GFP_KERNEL);
|
||||
p[32 + sizeof(void *)] = 0x34;
|
||||
printk(KERN_ERR "\n2. kmalloc-32: Clobber next pointer/next slab"
|
||||
" 0x34 -> -0x%p\n", p);
|
||||
printk(KERN_ERR "If allocated object is overwritten then not detectable\n\n");
|
||||
|
||||
validate_slab_cache(kmalloc_caches + 5);
|
||||
p = kzalloc(64, GFP_KERNEL);
|
||||
p += 64 + (get_cycles() & 0xff) * sizeof(void *);
|
||||
*p = 0x56;
|
||||
printk(KERN_ERR "\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n",
|
||||
p);
|
||||
printk(KERN_ERR "If allocated object is overwritten then not detectable\n\n");
|
||||
validate_slab_cache(kmalloc_caches + 6);
|
||||
|
||||
printk(KERN_ERR "\nB. Corruption after free\n");
|
||||
p = kzalloc(128, GFP_KERNEL);
|
||||
kfree(p);
|
||||
*p = 0x78;
|
||||
printk(KERN_ERR "1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p);
|
||||
validate_slab_cache(kmalloc_caches + 7);
|
||||
|
||||
p = kzalloc(256, GFP_KERNEL);
|
||||
kfree(p);
|
||||
p[50] = 0x9a;
|
||||
printk(KERN_ERR "\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n", p);
|
||||
validate_slab_cache(kmalloc_caches + 8);
|
||||
|
||||
p = kzalloc(512, GFP_KERNEL);
|
||||
kfree(p);
|
||||
p[512] = 0xab;
|
||||
printk(KERN_ERR "\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p);
|
||||
validate_slab_cache(kmalloc_caches + 9);
|
||||
}
|
||||
#else
|
||||
static void resiliency_test(void) {};
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Generate lists of code addresses where slabcache objects are allocated
|
||||
* and freed.
|
||||
|
|
Загрузка…
Ссылка в новой задаче