mm: page_alloc: add kasan hooks on alloc and free paths

Add kernel address sanitizer hooks to mark allocated page's addresses as
accessible in corresponding shadow region.  Mark freed pages as
inaccessible.

Signed-off-by: Andrey Ryabinin <a.ryabinin@samsung.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Konstantin Serebryany <kcc@google.com>
Cc: Dmitry Chernenkov <dmitryc@google.com>
Signed-off-by: Andrey Konovalov <adech.fo@gmail.com>
Cc: Yuri Gribov <tetra2005@gmail.com>
Cc: Konstantin Khlebnikov <koct9i@gmail.com>
Cc: Sasha Levin <sasha.levin@oracle.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Andrey Ryabinin 2015-02-13 14:39:28 -08:00 коммит произвёл Linus Torvalds
Родитель ef7f0d6a6c
Коммит b8c73fc249
6 изменённых файлов: 38 добавлений и 0 удалений

Просмотреть файл

@ -34,6 +34,9 @@ static inline void kasan_disable_current(void)
void kasan_unpoison_shadow(const void *address, size_t size); void kasan_unpoison_shadow(const void *address, size_t size);
void kasan_alloc_pages(struct page *page, unsigned int order);
void kasan_free_pages(struct page *page, unsigned int order);
#else /* CONFIG_KASAN */ #else /* CONFIG_KASAN */
static inline void kasan_unpoison_shadow(const void *address, size_t size) {} static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
@ -41,6 +44,9 @@ static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
static inline void kasan_enable_current(void) {} static inline void kasan_enable_current(void) {}
static inline void kasan_disable_current(void) {} static inline void kasan_disable_current(void) {}
static inline void kasan_alloc_pages(struct page *page, unsigned int order) {}
static inline void kasan_free_pages(struct page *page, unsigned int order) {}
#endif /* CONFIG_KASAN */ #endif /* CONFIG_KASAN */
#endif /* LINUX_KASAN_H */ #endif /* LINUX_KASAN_H */

Просмотреть файл

@ -16,6 +16,7 @@
#include <linux/sysfs.h> #include <linux/sysfs.h>
#include <linux/balloon_compaction.h> #include <linux/balloon_compaction.h>
#include <linux/page-isolation.h> #include <linux/page-isolation.h>
#include <linux/kasan.h>
#include "internal.h" #include "internal.h"
#ifdef CONFIG_COMPACTION #ifdef CONFIG_COMPACTION
@ -72,6 +73,7 @@ static void map_pages(struct list_head *list)
list_for_each_entry(page, list, lru) { list_for_each_entry(page, list, lru) {
arch_alloc_page(page, 0); arch_alloc_page(page, 0);
kernel_map_pages(page, 1, 1); kernel_map_pages(page, 1, 1);
kasan_alloc_pages(page, 0);
} }
} }

Просмотреть файл

@ -254,6 +254,20 @@ static __always_inline void check_memory_region(unsigned long addr,
kasan_report(addr, size, write, _RET_IP_); kasan_report(addr, size, write, _RET_IP_);
} }
void kasan_alloc_pages(struct page *page, unsigned int order)
{
if (likely(!PageHighMem(page)))
kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order);
}
void kasan_free_pages(struct page *page, unsigned int order)
{
if (likely(!PageHighMem(page)))
kasan_poison_shadow(page_address(page),
PAGE_SIZE << order,
KASAN_FREE_PAGE);
}
#define DEFINE_ASAN_LOAD_STORE(size) \ #define DEFINE_ASAN_LOAD_STORE(size) \
void __asan_load##size(unsigned long addr) \ void __asan_load##size(unsigned long addr) \
{ \ { \

Просмотреть файл

@ -6,6 +6,8 @@
#define KASAN_SHADOW_SCALE_SIZE (1UL << KASAN_SHADOW_SCALE_SHIFT) #define KASAN_SHADOW_SCALE_SIZE (1UL << KASAN_SHADOW_SCALE_SHIFT)
#define KASAN_SHADOW_MASK (KASAN_SHADOW_SCALE_SIZE - 1) #define KASAN_SHADOW_MASK (KASAN_SHADOW_SCALE_SIZE - 1)
#define KASAN_FREE_PAGE 0xFF /* page was freed */
struct kasan_access_info { struct kasan_access_info {
const void *access_addr; const void *access_addr;
const void *first_bad_addr; const void *first_bad_addr;

Просмотреть файл

@ -54,6 +54,9 @@ static void print_error_description(struct kasan_access_info *info)
shadow_val = *(u8 *)kasan_mem_to_shadow(info->first_bad_addr); shadow_val = *(u8 *)kasan_mem_to_shadow(info->first_bad_addr);
switch (shadow_val) { switch (shadow_val) {
case KASAN_FREE_PAGE:
bug_type = "use after free";
break;
case 0 ... KASAN_SHADOW_SCALE_SIZE - 1: case 0 ... KASAN_SHADOW_SCALE_SIZE - 1:
bug_type = "out of bounds access"; bug_type = "out of bounds access";
break; break;
@ -69,6 +72,14 @@ static void print_error_description(struct kasan_access_info *info)
static void print_address_description(struct kasan_access_info *info) static void print_address_description(struct kasan_access_info *info)
{ {
const void *addr = info->access_addr;
if ((addr >= (void *)PAGE_OFFSET) &&
(addr < high_memory)) {
struct page *page = virt_to_head_page(addr);
dump_page(page, "kasan: bad access detected");
}
dump_stack(); dump_stack();
} }

Просмотреть файл

@ -25,6 +25,7 @@
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/kmemcheck.h> #include <linux/kmemcheck.h>
#include <linux/kasan.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/suspend.h> #include <linux/suspend.h>
#include <linux/pagevec.h> #include <linux/pagevec.h>
@ -787,6 +788,7 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
trace_mm_page_free(page, order); trace_mm_page_free(page, order);
kmemcheck_free_shadow(page, order); kmemcheck_free_shadow(page, order);
kasan_free_pages(page, order);
if (PageAnon(page)) if (PageAnon(page))
page->mapping = NULL; page->mapping = NULL;
@ -970,6 +972,7 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
arch_alloc_page(page, order); arch_alloc_page(page, order);
kernel_map_pages(page, 1 << order, 1); kernel_map_pages(page, 1 << order, 1);
kasan_alloc_pages(page, order);
if (gfp_flags & __GFP_ZERO) if (gfp_flags & __GFP_ZERO)
prep_zero_page(page, order, gfp_flags); prep_zero_page(page, order, gfp_flags);