kasan: detect invalid frees for large objects
Patch series "kasan: detect invalid frees". KASAN detects double-frees, but does not detect invalid-frees (when a pointer into a middle of heap object is passed to free). We recently had a very unpleasant case in crypto code which freed an inner object inside of a heap allocation. This left unnoticed during free, but totally corrupted heap and later lead to a bunch of random crashes all over kernel code. Detect invalid frees. This patch (of 5): Detect frees of pointers into middle of large heap objects. I dropped const from kasan_kfree_large() because it starts propagating through a bunch of functions in kasan_report.c, slab/slub nearest_obj(), all of their local variables, fixup_red_left(), etc. Link: http://lkml.kernel.org/r/1b45b4fe1d20fc0de1329aab674c1dd973fee723.1514378558.git.dvyukov@google.com Signed-off-by: Dmitry Vyukov <dvyukov@google.com> Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>a Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
d321599cf6
Коммит
47adccce3e
|
@ -56,7 +56,7 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object);
|
||||||
void kasan_init_slab_obj(struct kmem_cache *cache, const void *object);
|
void kasan_init_slab_obj(struct kmem_cache *cache, const void *object);
|
||||||
|
|
||||||
void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags);
|
void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags);
|
||||||
void kasan_kfree_large(const void *ptr);
|
void kasan_kfree_large(void *ptr);
|
||||||
void kasan_poison_kfree(void *ptr);
|
void kasan_poison_kfree(void *ptr);
|
||||||
void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size,
|
void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size,
|
||||||
gfp_t flags);
|
gfp_t flags);
|
||||||
|
@ -108,7 +108,7 @@ static inline void kasan_init_slab_obj(struct kmem_cache *cache,
|
||||||
const void *object) {}
|
const void *object) {}
|
||||||
|
|
||||||
static inline void kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags) {}
|
static inline void kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags) {}
|
||||||
static inline void kasan_kfree_large(const void *ptr) {}
|
static inline void kasan_kfree_large(void *ptr) {}
|
||||||
static inline void kasan_poison_kfree(void *ptr) {}
|
static inline void kasan_poison_kfree(void *ptr) {}
|
||||||
static inline void kasan_kmalloc(struct kmem_cache *s, const void *object,
|
static inline void kasan_kmalloc(struct kmem_cache *s, const void *object,
|
||||||
size_t size, gfp_t flags) {}
|
size_t size, gfp_t flags) {}
|
||||||
|
|
|
@ -94,6 +94,37 @@ static noinline void __init kmalloc_pagealloc_oob_right(void)
|
||||||
ptr[size] = 0;
|
ptr[size] = 0;
|
||||||
kfree(ptr);
|
kfree(ptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static noinline void __init kmalloc_pagealloc_uaf(void)
|
||||||
|
{
|
||||||
|
char *ptr;
|
||||||
|
size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
|
||||||
|
|
||||||
|
pr_info("kmalloc pagealloc allocation: use-after-free\n");
|
||||||
|
ptr = kmalloc(size, GFP_KERNEL);
|
||||||
|
if (!ptr) {
|
||||||
|
pr_err("Allocation failed\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
kfree(ptr);
|
||||||
|
ptr[0] = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static noinline void __init kmalloc_pagealloc_invalid_free(void)
|
||||||
|
{
|
||||||
|
char *ptr;
|
||||||
|
size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
|
||||||
|
|
||||||
|
pr_info("kmalloc pagealloc allocation: invalid-free\n");
|
||||||
|
ptr = kmalloc(size, GFP_KERNEL);
|
||||||
|
if (!ptr) {
|
||||||
|
pr_err("Allocation failed\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
kfree(ptr + 1);
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static noinline void __init kmalloc_large_oob_right(void)
|
static noinline void __init kmalloc_large_oob_right(void)
|
||||||
|
@ -505,6 +536,8 @@ static int __init kmalloc_tests_init(void)
|
||||||
kmalloc_node_oob_right();
|
kmalloc_node_oob_right();
|
||||||
#ifdef CONFIG_SLUB
|
#ifdef CONFIG_SLUB
|
||||||
kmalloc_pagealloc_oob_right();
|
kmalloc_pagealloc_oob_right();
|
||||||
|
kmalloc_pagealloc_uaf();
|
||||||
|
kmalloc_pagealloc_invalid_free();
|
||||||
#endif
|
#endif
|
||||||
kmalloc_large_oob_right();
|
kmalloc_large_oob_right();
|
||||||
kmalloc_oob_krealloc_more();
|
kmalloc_oob_krealloc_more();
|
||||||
|
|
|
@ -511,8 +511,7 @@ bool kasan_slab_free(struct kmem_cache *cache, void *object)
|
||||||
|
|
||||||
shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object));
|
shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object));
|
||||||
if (shadow_byte < 0 || shadow_byte >= KASAN_SHADOW_SCALE_SIZE) {
|
if (shadow_byte < 0 || shadow_byte >= KASAN_SHADOW_SCALE_SIZE) {
|
||||||
kasan_report_double_free(cache, object,
|
kasan_report_invalid_free(object, __builtin_return_address(1));
|
||||||
__builtin_return_address(1));
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -602,12 +601,11 @@ void kasan_poison_kfree(void *ptr)
|
||||||
kasan_poison_slab_free(page->slab_cache, ptr);
|
kasan_poison_slab_free(page->slab_cache, ptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
void kasan_kfree_large(const void *ptr)
|
void kasan_kfree_large(void *ptr)
|
||||||
{
|
{
|
||||||
struct page *page = virt_to_page(ptr);
|
if (ptr != page_address(virt_to_head_page(ptr)))
|
||||||
|
kasan_report_invalid_free(ptr, __builtin_return_address(1));
|
||||||
kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
|
/* The object will be poisoned by page_alloc. */
|
||||||
KASAN_FREE_PAGE);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int kasan_module_alloc(void *addr, size_t size)
|
int kasan_module_alloc(void *addr, size_t size)
|
||||||
|
|
|
@ -107,8 +107,7 @@ static inline const void *kasan_shadow_to_mem(const void *shadow_addr)
|
||||||
|
|
||||||
void kasan_report(unsigned long addr, size_t size,
|
void kasan_report(unsigned long addr, size_t size,
|
||||||
bool is_write, unsigned long ip);
|
bool is_write, unsigned long ip);
|
||||||
void kasan_report_double_free(struct kmem_cache *cache, void *object,
|
void kasan_report_invalid_free(void *object, void *ip);
|
||||||
void *ip);
|
|
||||||
|
|
||||||
#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB)
|
#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB)
|
||||||
void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache);
|
void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache);
|
||||||
|
|
|
@ -326,8 +326,7 @@ static void print_shadow_for_address(const void *addr)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void kasan_report_double_free(struct kmem_cache *cache, void *object,
|
void kasan_report_invalid_free(void *object, void *ip)
|
||||||
void *ip)
|
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
|
|
|
@ -1356,7 +1356,7 @@ static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
|
||||||
kasan_kmalloc_large(ptr, size, flags);
|
kasan_kmalloc_large(ptr, size, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void kfree_hook(const void *x)
|
static inline void kfree_hook(void *x)
|
||||||
{
|
{
|
||||||
kmemleak_free(x);
|
kmemleak_free(x);
|
||||||
kasan_kfree_large(x);
|
kasan_kfree_large(x);
|
||||||
|
@ -3910,7 +3910,7 @@ void kfree(const void *x)
|
||||||
page = virt_to_head_page(x);
|
page = virt_to_head_page(x);
|
||||||
if (unlikely(!PageSlab(page))) {
|
if (unlikely(!PageSlab(page))) {
|
||||||
BUG_ON(!PageCompound(page));
|
BUG_ON(!PageCompound(page));
|
||||||
kfree_hook(x);
|
kfree_hook(object);
|
||||||
__free_pages(page, compound_order(page));
|
__free_pages(page, compound_order(page));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
Загрузка…
Ссылка в новой задаче