This patch handles the kmemleak operations needed for modules loading so
that memory allocations from inside a module are properly tracked.

Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
This commit is contained in:
Catalin Marinas 2009-06-11 13:23:20 +01:00
Родитель dbb1f81ca6
Коммит 4f2294b6dc
1 изменённых файлов: 56 добавлений и 0 удалений

Просмотреть файл

@ -53,6 +53,7 @@
#include <linux/ftrace.h>
#include <linux/async.h>
#include <linux/percpu.h>
#include <linux/kmemleak.h>
#if 0
#define DEBUGP printk
@ -430,6 +431,7 @@ static void *percpu_modalloc(unsigned long size, unsigned long align,
unsigned long extra;
unsigned int i;
void *ptr;
int cpu;
if (align > PAGE_SIZE) {
printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
@ -459,6 +461,11 @@ static void *percpu_modalloc(unsigned long size, unsigned long align,
if (!split_block(i, size))
return NULL;
/* add the per-cpu scanning areas */
for_each_possible_cpu(cpu)
kmemleak_alloc(ptr + per_cpu_offset(cpu), size, 0,
GFP_KERNEL);
/* Mark allocated */
pcpu_size[i] = -pcpu_size[i];
return ptr;
@ -473,6 +480,7 @@ static void percpu_modfree(void *freeme)
{
unsigned int i;
void *ptr = __per_cpu_start + block_size(pcpu_size[0]);
int cpu;
/* First entry is core kernel percpu data. */
for (i = 1; i < pcpu_num_used; ptr += block_size(pcpu_size[i]), i++) {
@ -484,6 +492,10 @@ static void percpu_modfree(void *freeme)
BUG();
free:
/* remove the per-cpu scanning areas */
for_each_possible_cpu(cpu)
kmemleak_free(freeme + per_cpu_offset(cpu));
/* Merge with previous? */
if (pcpu_size[i-1] >= 0) {
pcpu_size[i-1] += pcpu_size[i];
@ -1876,6 +1888,36 @@ static void *module_alloc_update_bounds(unsigned long size)
return ret;
}
#ifdef CONFIG_DEBUG_KMEMLEAK
static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr,
Elf_Shdr *sechdrs, char *secstrings)
{
unsigned int i;
/* only scan the sections containing data */
kmemleak_scan_area(mod->module_core, (unsigned long)mod -
(unsigned long)mod->module_core,
sizeof(struct module), GFP_KERNEL);
for (i = 1; i < hdr->e_shnum; i++) {
if (!(sechdrs[i].sh_flags & SHF_ALLOC))
continue;
if (strncmp(secstrings + sechdrs[i].sh_name, ".data", 5) != 0
&& strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0)
continue;
kmemleak_scan_area(mod->module_core, sechdrs[i].sh_addr -
(unsigned long)mod->module_core,
sechdrs[i].sh_size, GFP_KERNEL);
}
}
#else
static inline void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr,
Elf_Shdr *sechdrs, char *secstrings)
{
}
#endif
/* Allocate and load the module: note that size of section 0 is always
zero, and we rely on this for optional sections. */
static noinline struct module *load_module(void __user *umod,
@ -2046,6 +2088,12 @@ static noinline struct module *load_module(void __user *umod,
/* Do the allocs. */
ptr = module_alloc_update_bounds(mod->core_size);
/*
* The pointer to this block is stored in the module structure
* which is inside the block. Just mark it as not being a
* leak.
*/
kmemleak_not_leak(ptr);
if (!ptr) {
err = -ENOMEM;
goto free_percpu;
@ -2054,6 +2102,13 @@ static noinline struct module *load_module(void __user *umod,
mod->module_core = ptr;
ptr = module_alloc_update_bounds(mod->init_size);
/*
* The pointer to this block is stored in the module structure
* which is inside the block. This block doesn't need to be
* scanned as it contains data and code that will be freed
* after the module is initialized.
*/
kmemleak_ignore(ptr);
if (!ptr && mod->init_size) {
err = -ENOMEM;
goto free_core;
@ -2084,6 +2139,7 @@ static noinline struct module *load_module(void __user *umod,
}
/* Module has been moved. */
mod = (void *)sechdrs[modindex].sh_addr;
kmemleak_load_module(mod, hdr, sechdrs, secstrings);
#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
mod->refptr = percpu_modalloc(sizeof(local_t), __alignof__(local_t),