kasan: rename KASAN_SHADOW_* to KASAN_GRANULE_*

This is a preparatory commit for the upcoming addition of a new hardware
tag-based (MTE-based) KASAN mode.

The new mode won't be using shadow memory, but will still use the concept
of memory granules.  Each memory granule maps to a single metadata entry:
8 bytes per one shadow byte for generic mode, 16 bytes per one shadow byte
for software tag-based mode, and 16 bytes per one allocation tag for
hardware tag-based mode.

Rename KASAN_SHADOW_SCALE_SIZE to KASAN_GRANULE_SIZE, and
KASAN_SHADOW_MASK to KASAN_GRANULE_MASK.

Also use MASK when used as a mask, otherwise use SIZE.

No functional changes.

Link: https://lkml.kernel.org/r/939b5754e47f528a6e6a6f28ffc5815d8d128033.1606161801.git.andreyknvl@google.com
Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
Signed-off-by: Vincenzo Frascino <vincenzo.frascino@arm.com>
Reviewed-by: Marco Elver <elver@google.com>
Reviewed-by: Alexander Potapenko <glider@google.com>
Tested-by: Vincenzo Frascino <vincenzo.frascino@arm.com>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Branislav Rankov <Branislav.Rankov@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Evgenii Stepanov <eugenis@google.com>
Cc: Kevin Brodsky <kevin.brodsky@arm.com>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Andrey Konovalov 2020-12-22 12:00:24 -08:00 коммит произвёл Linus Torvalds
Родитель cebd0eb29a
Коммит 1f600626b3
10 изменённых файлов: 46 добавлений и 45 удалений

Просмотреть файл

@ -265,7 +265,7 @@ Most mappings in vmalloc space are small, requiring less than a full
page of shadow space. Allocating a full shadow page per mapping would page of shadow space. Allocating a full shadow page per mapping would
therefore be wasteful. Furthermore, to ensure that different mappings therefore be wasteful. Furthermore, to ensure that different mappings
use different shadow pages, mappings would have to be aligned to use different shadow pages, mappings would have to be aligned to
``KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE``. ``KASAN_GRANULE_SIZE * PAGE_SIZE``.
Instead, we share backing space across multiple mappings. We allocate Instead, we share backing space across multiple mappings. We allocate
a backing page when a mapping in vmalloc space uses a particular page a backing page when a mapping in vmalloc space uses a particular page

Просмотреть файл

@ -25,7 +25,7 @@
#include "../mm/kasan/kasan.h" #include "../mm/kasan/kasan.h"
#define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_SHADOW_SCALE_SIZE) #define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_GRANULE_SIZE)
/* /*
* We assign some test results to these globals to make sure the tests * We assign some test results to these globals to make sure the tests

Просмотреть файл

@ -15,7 +15,7 @@
#include "../mm/kasan/kasan.h" #include "../mm/kasan/kasan.h"
#define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_SHADOW_SCALE_SIZE) #define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_GRANULE_SIZE)
static noinline void __init copy_user_test(void) static noinline void __init copy_user_test(void)
{ {

Просмотреть файл

@ -106,7 +106,7 @@ void *memcpy(void *dest, const void *src, size_t len)
/* /*
* Poisons the shadow memory for 'size' bytes starting from 'addr'. * Poisons the shadow memory for 'size' bytes starting from 'addr'.
* Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE. * Memory addresses should be aligned to KASAN_GRANULE_SIZE.
*/ */
void poison_range(const void *address, size_t size, u8 value) void poison_range(const void *address, size_t size, u8 value)
{ {
@ -138,13 +138,13 @@ void unpoison_range(const void *address, size_t size)
poison_range(address, size, tag); poison_range(address, size, tag);
if (size & KASAN_SHADOW_MASK) { if (size & KASAN_GRANULE_MASK) {
u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size); u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size);
if (IS_ENABLED(CONFIG_KASAN_SW_TAGS)) if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
*shadow = tag; *shadow = tag;
else else
*shadow = size & KASAN_SHADOW_MASK; *shadow = size & KASAN_GRANULE_MASK;
} }
} }
@ -301,7 +301,7 @@ void kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
void kasan_poison_object_data(struct kmem_cache *cache, void *object) void kasan_poison_object_data(struct kmem_cache *cache, void *object)
{ {
poison_range(object, poison_range(object,
round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE), round_up(cache->object_size, KASAN_GRANULE_SIZE),
KASAN_KMALLOC_REDZONE); KASAN_KMALLOC_REDZONE);
} }
@ -373,7 +373,7 @@ static inline bool shadow_invalid(u8 tag, s8 shadow_byte)
{ {
if (IS_ENABLED(CONFIG_KASAN_GENERIC)) if (IS_ENABLED(CONFIG_KASAN_GENERIC))
return shadow_byte < 0 || return shadow_byte < 0 ||
shadow_byte >= KASAN_SHADOW_SCALE_SIZE; shadow_byte >= KASAN_GRANULE_SIZE;
/* else CONFIG_KASAN_SW_TAGS: */ /* else CONFIG_KASAN_SW_TAGS: */
if ((u8)shadow_byte == KASAN_TAG_INVALID) if ((u8)shadow_byte == KASAN_TAG_INVALID)
@ -412,7 +412,7 @@ static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
return true; return true;
} }
rounded_up_size = round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE); rounded_up_size = round_up(cache->object_size, KASAN_GRANULE_SIZE);
poison_range(object, rounded_up_size, KASAN_KMALLOC_FREE); poison_range(object, rounded_up_size, KASAN_KMALLOC_FREE);
if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine) || if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine) ||
@ -445,9 +445,9 @@ static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object,
return NULL; return NULL;
redzone_start = round_up((unsigned long)(object + size), redzone_start = round_up((unsigned long)(object + size),
KASAN_SHADOW_SCALE_SIZE); KASAN_GRANULE_SIZE);
redzone_end = round_up((unsigned long)object + cache->object_size, redzone_end = round_up((unsigned long)object + cache->object_size,
KASAN_SHADOW_SCALE_SIZE); KASAN_GRANULE_SIZE);
if (IS_ENABLED(CONFIG_KASAN_SW_TAGS)) if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
tag = assign_tag(cache, object, false, keep_tag); tag = assign_tag(cache, object, false, keep_tag);
@ -491,7 +491,7 @@ void * __must_check kasan_kmalloc_large(const void *ptr, size_t size,
page = virt_to_page(ptr); page = virt_to_page(ptr);
redzone_start = round_up((unsigned long)(ptr + size), redzone_start = round_up((unsigned long)(ptr + size),
KASAN_SHADOW_SCALE_SIZE); KASAN_GRANULE_SIZE);
redzone_end = (unsigned long)ptr + page_size(page); redzone_end = (unsigned long)ptr + page_size(page);
unpoison_range(ptr, size); unpoison_range(ptr, size);
@ -589,8 +589,8 @@ static int __meminit kasan_mem_notifier(struct notifier_block *nb,
shadow_size = nr_shadow_pages << PAGE_SHIFT; shadow_size = nr_shadow_pages << PAGE_SHIFT;
shadow_end = shadow_start + shadow_size; shadow_end = shadow_start + shadow_size;
if (WARN_ON(mem_data->nr_pages % KASAN_SHADOW_SCALE_SIZE) || if (WARN_ON(mem_data->nr_pages % KASAN_GRANULE_SIZE) ||
WARN_ON(start_kaddr % (KASAN_SHADOW_SCALE_SIZE << PAGE_SHIFT))) WARN_ON(start_kaddr % (KASAN_GRANULE_SIZE << PAGE_SHIFT)))
return NOTIFY_BAD; return NOTIFY_BAD;
switch (action) { switch (action) {
@ -748,7 +748,7 @@ void kasan_poison_vmalloc(const void *start, unsigned long size)
if (!is_vmalloc_or_module_addr(start)) if (!is_vmalloc_or_module_addr(start))
return; return;
size = round_up(size, KASAN_SHADOW_SCALE_SIZE); size = round_up(size, KASAN_GRANULE_SIZE);
poison_range(start, size, KASAN_VMALLOC_INVALID); poison_range(start, size, KASAN_VMALLOC_INVALID);
} }
@ -861,22 +861,22 @@ void kasan_release_vmalloc(unsigned long start, unsigned long end,
unsigned long region_start, region_end; unsigned long region_start, region_end;
unsigned long size; unsigned long size;
region_start = ALIGN(start, PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE); region_start = ALIGN(start, PAGE_SIZE * KASAN_GRANULE_SIZE);
region_end = ALIGN_DOWN(end, PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE); region_end = ALIGN_DOWN(end, PAGE_SIZE * KASAN_GRANULE_SIZE);
free_region_start = ALIGN(free_region_start, free_region_start = ALIGN(free_region_start,
PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE); PAGE_SIZE * KASAN_GRANULE_SIZE);
if (start != region_start && if (start != region_start &&
free_region_start < region_start) free_region_start < region_start)
region_start -= PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE; region_start -= PAGE_SIZE * KASAN_GRANULE_SIZE;
free_region_end = ALIGN_DOWN(free_region_end, free_region_end = ALIGN_DOWN(free_region_end,
PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE); PAGE_SIZE * KASAN_GRANULE_SIZE);
if (end != region_end && if (end != region_end &&
free_region_end > region_end) free_region_end > region_end)
region_end += PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE; region_end += PAGE_SIZE * KASAN_GRANULE_SIZE;
shadow_start = kasan_mem_to_shadow((void *)region_start); shadow_start = kasan_mem_to_shadow((void *)region_start);
shadow_end = kasan_mem_to_shadow((void *)region_end); shadow_end = kasan_mem_to_shadow((void *)region_end);
@ -902,7 +902,8 @@ int kasan_module_alloc(void *addr, size_t size)
unsigned long shadow_start; unsigned long shadow_start;
shadow_start = (unsigned long)kasan_mem_to_shadow(addr); shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
scaled_size = (size + KASAN_SHADOW_MASK) >> KASAN_SHADOW_SCALE_SHIFT; scaled_size = (size + KASAN_GRANULE_SIZE - 1) >>
KASAN_SHADOW_SCALE_SHIFT;
shadow_size = round_up(scaled_size, PAGE_SIZE); shadow_size = round_up(scaled_size, PAGE_SIZE);
if (WARN_ON(!PAGE_ALIGNED(shadow_start))) if (WARN_ON(!PAGE_ALIGNED(shadow_start)))

Просмотреть файл

@ -46,7 +46,7 @@ static __always_inline bool memory_is_poisoned_1(unsigned long addr)
s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr); s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr);
if (unlikely(shadow_value)) { if (unlikely(shadow_value)) {
s8 last_accessible_byte = addr & KASAN_SHADOW_MASK; s8 last_accessible_byte = addr & KASAN_GRANULE_MASK;
return unlikely(last_accessible_byte >= shadow_value); return unlikely(last_accessible_byte >= shadow_value);
} }
@ -62,7 +62,7 @@ static __always_inline bool memory_is_poisoned_2_4_8(unsigned long addr,
* Access crosses 8(shadow size)-byte boundary. Such access maps * Access crosses 8(shadow size)-byte boundary. Such access maps
* into 2 shadow bytes, so we need to check them both. * into 2 shadow bytes, so we need to check them both.
*/ */
if (unlikely(((addr + size - 1) & KASAN_SHADOW_MASK) < size - 1)) if (unlikely(((addr + size - 1) & KASAN_GRANULE_MASK) < size - 1))
return *shadow_addr || memory_is_poisoned_1(addr + size - 1); return *shadow_addr || memory_is_poisoned_1(addr + size - 1);
return memory_is_poisoned_1(addr + size - 1); return memory_is_poisoned_1(addr + size - 1);
@ -73,7 +73,7 @@ static __always_inline bool memory_is_poisoned_16(unsigned long addr)
u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr); u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
/* Unaligned 16-bytes access maps into 3 shadow bytes. */ /* Unaligned 16-bytes access maps into 3 shadow bytes. */
if (unlikely(!IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE))) if (unlikely(!IS_ALIGNED(addr, KASAN_GRANULE_SIZE)))
return *shadow_addr || memory_is_poisoned_1(addr + 15); return *shadow_addr || memory_is_poisoned_1(addr + 15);
return *shadow_addr; return *shadow_addr;
@ -134,7 +134,7 @@ static __always_inline bool memory_is_poisoned_n(unsigned long addr,
s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte); s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte);
if (unlikely(ret != (unsigned long)last_shadow || if (unlikely(ret != (unsigned long)last_shadow ||
((long)(last_byte & KASAN_SHADOW_MASK) >= *last_shadow))) ((long)(last_byte & KASAN_GRANULE_MASK) >= *last_shadow)))
return true; return true;
} }
return false; return false;
@ -200,7 +200,7 @@ void kasan_cache_shutdown(struct kmem_cache *cache)
static void register_global(struct kasan_global *global) static void register_global(struct kasan_global *global)
{ {
size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE); size_t aligned_size = round_up(global->size, KASAN_GRANULE_SIZE);
unpoison_range(global->beg, global->size); unpoison_range(global->beg, global->size);
@ -274,10 +274,10 @@ EXPORT_SYMBOL(__asan_handle_no_return);
/* Emitted by compiler to poison alloca()ed objects. */ /* Emitted by compiler to poison alloca()ed objects. */
void __asan_alloca_poison(unsigned long addr, size_t size) void __asan_alloca_poison(unsigned long addr, size_t size)
{ {
size_t rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE); size_t rounded_up_size = round_up(size, KASAN_GRANULE_SIZE);
size_t padding_size = round_up(size, KASAN_ALLOCA_REDZONE_SIZE) - size_t padding_size = round_up(size, KASAN_ALLOCA_REDZONE_SIZE) -
rounded_up_size; rounded_up_size;
size_t rounded_down_size = round_down(size, KASAN_SHADOW_SCALE_SIZE); size_t rounded_down_size = round_down(size, KASAN_GRANULE_SIZE);
const void *left_redzone = (const void *)(addr - const void *left_redzone = (const void *)(addr -
KASAN_ALLOCA_REDZONE_SIZE); KASAN_ALLOCA_REDZONE_SIZE);

Просмотреть файл

@ -34,7 +34,7 @@ void *find_first_bad_addr(void *addr, size_t size)
void *p = addr; void *p = addr;
while (p < addr + size && !(*(u8 *)kasan_mem_to_shadow(p))) while (p < addr + size && !(*(u8 *)kasan_mem_to_shadow(p)))
p += KASAN_SHADOW_SCALE_SIZE; p += KASAN_GRANULE_SIZE;
return p; return p;
} }
@ -46,14 +46,14 @@ static const char *get_shadow_bug_type(struct kasan_access_info *info)
shadow_addr = (u8 *)kasan_mem_to_shadow(info->first_bad_addr); shadow_addr = (u8 *)kasan_mem_to_shadow(info->first_bad_addr);
/* /*
* If shadow byte value is in [0, KASAN_SHADOW_SCALE_SIZE) we can look * If shadow byte value is in [0, KASAN_GRANULE_SIZE) we can look
* at the next shadow byte to determine the type of the bad access. * at the next shadow byte to determine the type of the bad access.
*/ */
if (*shadow_addr > 0 && *shadow_addr <= KASAN_SHADOW_SCALE_SIZE - 1) if (*shadow_addr > 0 && *shadow_addr <= KASAN_GRANULE_SIZE - 1)
shadow_addr++; shadow_addr++;
switch (*shadow_addr) { switch (*shadow_addr) {
case 0 ... KASAN_SHADOW_SCALE_SIZE - 1: case 0 ... KASAN_GRANULE_SIZE - 1:
/* /*
* In theory it's still possible to see these shadow values * In theory it's still possible to see these shadow values
* due to a data race in the kernel code. * due to a data race in the kernel code.

Просмотреть файл

@ -442,8 +442,8 @@ void kasan_remove_zero_shadow(void *start, unsigned long size)
end = addr + (size >> KASAN_SHADOW_SCALE_SHIFT); end = addr + (size >> KASAN_SHADOW_SCALE_SHIFT);
if (WARN_ON((unsigned long)start % if (WARN_ON((unsigned long)start %
(KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE)) || (KASAN_GRANULE_SIZE * PAGE_SIZE)) ||
WARN_ON(size % (KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE))) WARN_ON(size % (KASAN_GRANULE_SIZE * PAGE_SIZE)))
return; return;
for (; addr < end; addr = next) { for (; addr < end; addr = next) {
@ -477,8 +477,8 @@ int kasan_add_zero_shadow(void *start, unsigned long size)
shadow_end = shadow_start + (size >> KASAN_SHADOW_SCALE_SHIFT); shadow_end = shadow_start + (size >> KASAN_SHADOW_SCALE_SHIFT);
if (WARN_ON((unsigned long)start % if (WARN_ON((unsigned long)start %
(KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE)) || (KASAN_GRANULE_SIZE * PAGE_SIZE)) ||
WARN_ON(size % (KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE))) WARN_ON(size % (KASAN_GRANULE_SIZE * PAGE_SIZE)))
return -EINVAL; return -EINVAL;
ret = kasan_populate_early_shadow(shadow_start, shadow_end); ret = kasan_populate_early_shadow(shadow_start, shadow_end);

Просмотреть файл

@ -5,8 +5,8 @@
#include <linux/kasan.h> #include <linux/kasan.h>
#include <linux/stackdepot.h> #include <linux/stackdepot.h>
#define KASAN_SHADOW_SCALE_SIZE (1UL << KASAN_SHADOW_SCALE_SHIFT) #define KASAN_GRANULE_SIZE (1UL << KASAN_SHADOW_SCALE_SHIFT)
#define KASAN_SHADOW_MASK (KASAN_SHADOW_SCALE_SIZE - 1) #define KASAN_GRANULE_MASK (KASAN_GRANULE_SIZE - 1)
#define KASAN_TAG_KERNEL 0xFF /* native kernel pointers tag */ #define KASAN_TAG_KERNEL 0xFF /* native kernel pointers tag */
#define KASAN_TAG_INVALID 0xFE /* inaccessible memory tag */ #define KASAN_TAG_INVALID 0xFE /* inaccessible memory tag */

Просмотреть файл

@ -314,24 +314,24 @@ static bool __must_check get_address_stack_frame_info(const void *addr,
return false; return false;
aligned_addr = round_down((unsigned long)addr, sizeof(long)); aligned_addr = round_down((unsigned long)addr, sizeof(long));
mem_ptr = round_down(aligned_addr, KASAN_SHADOW_SCALE_SIZE); mem_ptr = round_down(aligned_addr, KASAN_GRANULE_SIZE);
shadow_ptr = kasan_mem_to_shadow((void *)aligned_addr); shadow_ptr = kasan_mem_to_shadow((void *)aligned_addr);
shadow_bottom = kasan_mem_to_shadow(end_of_stack(current)); shadow_bottom = kasan_mem_to_shadow(end_of_stack(current));
while (shadow_ptr >= shadow_bottom && *shadow_ptr != KASAN_STACK_LEFT) { while (shadow_ptr >= shadow_bottom && *shadow_ptr != KASAN_STACK_LEFT) {
shadow_ptr--; shadow_ptr--;
mem_ptr -= KASAN_SHADOW_SCALE_SIZE; mem_ptr -= KASAN_GRANULE_SIZE;
} }
while (shadow_ptr >= shadow_bottom && *shadow_ptr == KASAN_STACK_LEFT) { while (shadow_ptr >= shadow_bottom && *shadow_ptr == KASAN_STACK_LEFT) {
shadow_ptr--; shadow_ptr--;
mem_ptr -= KASAN_SHADOW_SCALE_SIZE; mem_ptr -= KASAN_GRANULE_SIZE;
} }
if (shadow_ptr < shadow_bottom) if (shadow_ptr < shadow_bottom)
return false; return false;
frame = (const unsigned long *)(mem_ptr + KASAN_SHADOW_SCALE_SIZE); frame = (const unsigned long *)(mem_ptr + KASAN_GRANULE_SIZE);
if (frame[0] != KASAN_CURRENT_STACK_FRAME_MAGIC) { if (frame[0] != KASAN_CURRENT_STACK_FRAME_MAGIC) {
pr_err("KASAN internal error: frame info validation failed; invalid marker: %lu\n", pr_err("KASAN internal error: frame info validation failed; invalid marker: %lu\n",
frame[0]); frame[0]);
@ -599,6 +599,6 @@ void kasan_non_canonical_hook(unsigned long addr)
else else
bug_type = "maybe wild-memory-access"; bug_type = "maybe wild-memory-access";
pr_alert("KASAN: %s in range [0x%016lx-0x%016lx]\n", bug_type, pr_alert("KASAN: %s in range [0x%016lx-0x%016lx]\n", bug_type,
orig_addr, orig_addr + KASAN_SHADOW_MASK); orig_addr, orig_addr + KASAN_GRANULE_SIZE - 1);
} }
#endif #endif

Просмотреть файл

@ -76,7 +76,7 @@ void *find_first_bad_addr(void *addr, size_t size)
void *end = p + size; void *end = p + size;
while (p < end && tag == *(u8 *)kasan_mem_to_shadow(p)) while (p < end && tag == *(u8 *)kasan_mem_to_shadow(p))
p += KASAN_SHADOW_SCALE_SIZE; p += KASAN_GRANULE_SIZE;
return p; return p;
} }