kasan: group vmalloc code
This is a preparatory commit for the upcoming addition of a new hardware tag-based (MTE-based) KASAN mode. Group all vmalloc-related function declarations in include/linux/kasan.h, and their implementations in mm/kasan/common.c. No functional changes. Link: https://lkml.kernel.org/r/80a6fdd29b039962843bd6cf22ce2643a7c8904e.1606161801.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov <andreyknvl@google.com> Signed-off-by: Vincenzo Frascino <vincenzo.frascino@arm.com> Reviewed-by: Marco Elver <elver@google.com> Reviewed-by: Alexander Potapenko <glider@google.com> Tested-by: Vincenzo Frascino <vincenzo.frascino@arm.com> Cc: Andrey Ryabinin <aryabinin@virtuozzo.com> Cc: Branislav Rankov <Branislav.Rankov@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Evgenii Stepanov <eugenis@google.com> Cc: Kevin Brodsky <kevin.brodsky@arm.com> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Will Deacon <will.deacon@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
71f6af6d52
Коммит
3b1a4a8640
|
@ -75,19 +75,6 @@ struct kasan_cache {
|
||||||
int free_meta_offset;
|
int free_meta_offset;
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
|
||||||
* These functions provide a special case to support backing module
|
|
||||||
* allocations with real shadow memory. With KASAN vmalloc, the special
|
|
||||||
* case is unnecessary, as the work is handled in the generic case.
|
|
||||||
*/
|
|
||||||
#ifndef CONFIG_KASAN_VMALLOC
|
|
||||||
int kasan_module_alloc(void *addr, size_t size);
|
|
||||||
void kasan_free_shadow(const struct vm_struct *vm);
|
|
||||||
#else
|
|
||||||
static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
|
|
||||||
static inline void kasan_free_shadow(const struct vm_struct *vm) {}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
int kasan_add_zero_shadow(void *start, unsigned long size);
|
int kasan_add_zero_shadow(void *start, unsigned long size);
|
||||||
void kasan_remove_zero_shadow(void *start, unsigned long size);
|
void kasan_remove_zero_shadow(void *start, unsigned long size);
|
||||||
|
|
||||||
|
@ -156,9 +143,6 @@ static inline bool kasan_slab_free(struct kmem_cache *s, void *object,
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
|
|
||||||
static inline void kasan_free_shadow(const struct vm_struct *vm) {}
|
|
||||||
|
|
||||||
static inline int kasan_add_zero_shadow(void *start, unsigned long size)
|
static inline int kasan_add_zero_shadow(void *start, unsigned long size)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -211,13 +195,16 @@ static inline void *kasan_reset_tag(const void *addr)
|
||||||
#endif /* CONFIG_KASAN_SW_TAGS */
|
#endif /* CONFIG_KASAN_SW_TAGS */
|
||||||
|
|
||||||
#ifdef CONFIG_KASAN_VMALLOC
|
#ifdef CONFIG_KASAN_VMALLOC
|
||||||
|
|
||||||
int kasan_populate_vmalloc(unsigned long addr, unsigned long size);
|
int kasan_populate_vmalloc(unsigned long addr, unsigned long size);
|
||||||
void kasan_poison_vmalloc(const void *start, unsigned long size);
|
void kasan_poison_vmalloc(const void *start, unsigned long size);
|
||||||
void kasan_unpoison_vmalloc(const void *start, unsigned long size);
|
void kasan_unpoison_vmalloc(const void *start, unsigned long size);
|
||||||
void kasan_release_vmalloc(unsigned long start, unsigned long end,
|
void kasan_release_vmalloc(unsigned long start, unsigned long end,
|
||||||
unsigned long free_region_start,
|
unsigned long free_region_start,
|
||||||
unsigned long free_region_end);
|
unsigned long free_region_end);
|
||||||
#else
|
|
||||||
|
#else /* CONFIG_KASAN_VMALLOC */
|
||||||
|
|
||||||
static inline int kasan_populate_vmalloc(unsigned long start,
|
static inline int kasan_populate_vmalloc(unsigned long start,
|
||||||
unsigned long size)
|
unsigned long size)
|
||||||
{
|
{
|
||||||
|
@ -232,7 +219,25 @@ static inline void kasan_release_vmalloc(unsigned long start,
|
||||||
unsigned long end,
|
unsigned long end,
|
||||||
unsigned long free_region_start,
|
unsigned long free_region_start,
|
||||||
unsigned long free_region_end) {}
|
unsigned long free_region_end) {}
|
||||||
#endif
|
|
||||||
|
#endif /* CONFIG_KASAN_VMALLOC */
|
||||||
|
|
||||||
|
#if defined(CONFIG_KASAN) && !defined(CONFIG_KASAN_VMALLOC)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* These functions provide a special case to support backing module
|
||||||
|
* allocations with real shadow memory. With KASAN vmalloc, the special
|
||||||
|
* case is unnecessary, as the work is handled in the generic case.
|
||||||
|
*/
|
||||||
|
int kasan_module_alloc(void *addr, size_t size);
|
||||||
|
void kasan_free_shadow(const struct vm_struct *vm);
|
||||||
|
|
||||||
|
#else /* CONFIG_KASAN && !CONFIG_KASAN_VMALLOC */
|
||||||
|
|
||||||
|
static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
|
||||||
|
static inline void kasan_free_shadow(const struct vm_struct *vm) {}
|
||||||
|
|
||||||
|
#endif /* CONFIG_KASAN && !CONFIG_KASAN_VMALLOC */
|
||||||
|
|
||||||
#ifdef CONFIG_KASAN_INLINE
|
#ifdef CONFIG_KASAN_INLINE
|
||||||
void kasan_non_canonical_hook(unsigned long addr);
|
void kasan_non_canonical_hook(unsigned long addr);
|
||||||
|
|
|
@ -536,44 +536,6 @@ void kasan_kfree_large(void *ptr, unsigned long ip)
|
||||||
/* The object will be poisoned by page_alloc. */
|
/* The object will be poisoned by page_alloc. */
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifndef CONFIG_KASAN_VMALLOC
|
|
||||||
int kasan_module_alloc(void *addr, size_t size)
|
|
||||||
{
|
|
||||||
void *ret;
|
|
||||||
size_t scaled_size;
|
|
||||||
size_t shadow_size;
|
|
||||||
unsigned long shadow_start;
|
|
||||||
|
|
||||||
shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
|
|
||||||
scaled_size = (size + KASAN_SHADOW_MASK) >> KASAN_SHADOW_SCALE_SHIFT;
|
|
||||||
shadow_size = round_up(scaled_size, PAGE_SIZE);
|
|
||||||
|
|
||||||
if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
|
|
||||||
shadow_start + shadow_size,
|
|
||||||
GFP_KERNEL,
|
|
||||||
PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
|
|
||||||
__builtin_return_address(0));
|
|
||||||
|
|
||||||
if (ret) {
|
|
||||||
__memset(ret, KASAN_SHADOW_INIT, shadow_size);
|
|
||||||
find_vm_area(addr)->flags |= VM_KASAN;
|
|
||||||
kmemleak_ignore(ret);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
|
|
||||||
void kasan_free_shadow(const struct vm_struct *vm)
|
|
||||||
{
|
|
||||||
if (vm->flags & VM_KASAN)
|
|
||||||
vfree(kasan_mem_to_shadow(vm->addr));
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_MEMORY_HOTPLUG
|
#ifdef CONFIG_MEMORY_HOTPLUG
|
||||||
static bool shadow_mapped(unsigned long addr)
|
static bool shadow_mapped(unsigned long addr)
|
||||||
{
|
{
|
||||||
|
@ -685,6 +647,7 @@ core_initcall(kasan_memhotplug_init);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_KASAN_VMALLOC
|
#ifdef CONFIG_KASAN_VMALLOC
|
||||||
|
|
||||||
static int kasan_populate_vmalloc_pte(pte_t *ptep, unsigned long addr,
|
static int kasan_populate_vmalloc_pte(pte_t *ptep, unsigned long addr,
|
||||||
void *unused)
|
void *unused)
|
||||||
{
|
{
|
||||||
|
@ -923,4 +886,43 @@ void kasan_release_vmalloc(unsigned long start, unsigned long end,
|
||||||
(unsigned long)shadow_end);
|
(unsigned long)shadow_end);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#else /* CONFIG_KASAN_VMALLOC */
|
||||||
|
|
||||||
|
int kasan_module_alloc(void *addr, size_t size)
|
||||||
|
{
|
||||||
|
void *ret;
|
||||||
|
size_t scaled_size;
|
||||||
|
size_t shadow_size;
|
||||||
|
unsigned long shadow_start;
|
||||||
|
|
||||||
|
shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
|
||||||
|
scaled_size = (size + KASAN_SHADOW_MASK) >> KASAN_SHADOW_SCALE_SHIFT;
|
||||||
|
shadow_size = round_up(scaled_size, PAGE_SIZE);
|
||||||
|
|
||||||
|
if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
|
||||||
|
shadow_start + shadow_size,
|
||||||
|
GFP_KERNEL,
|
||||||
|
PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
|
||||||
|
__builtin_return_address(0));
|
||||||
|
|
||||||
|
if (ret) {
|
||||||
|
__memset(ret, KASAN_SHADOW_INIT, shadow_size);
|
||||||
|
find_vm_area(addr)->flags |= VM_KASAN;
|
||||||
|
kmemleak_ignore(ret);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
void kasan_free_shadow(const struct vm_struct *vm)
|
||||||
|
{
|
||||||
|
if (vm->flags & VM_KASAN)
|
||||||
|
vfree(kasan_mem_to_shadow(vm->addr));
|
||||||
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
Загрузка…
Ссылка в новой задаче