kasan: optimize large kmalloc poisoning
Similarly to kasan_kmalloc(), kasan_kmalloc_large() doesn't need to unpoison the object as it as already unpoisoned by alloc_pages() (or by ksize() for krealloc()). This patch changes kasan_kmalloc_large() to only poison the redzone. Link: https://lkml.kernel.org/r/33dee5aac0e550ad7f8e26f590c9b02c6129b4a3.1612546384.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov <andreyknvl@google.com> Reviewed-by: Marco Elver <elver@google.com> Cc: Alexander Potapenko <glider@google.com> Cc: Andrey Ryabinin <aryabinin@virtuozzo.com> Cc: Branislav Rankov <Branislav.Rankov@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Evgenii Stepanov <eugenis@google.com> Cc: Kevin Brodsky <kevin.brodsky@arm.com> Cc: Peter Collingbourne <pcc@google.com> Cc: Vincenzo Frascino <vincenzo.frascino@arm.com> Cc: Will Deacon <will.deacon@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
e2db1a9aa3
Коммит
43a219cbe5
|
@ -494,7 +494,6 @@ EXPORT_SYMBOL(__kasan_kmalloc);
|
||||||
void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
|
void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
|
||||||
gfp_t flags)
|
gfp_t flags)
|
||||||
{
|
{
|
||||||
struct page *page;
|
|
||||||
unsigned long redzone_start;
|
unsigned long redzone_start;
|
||||||
unsigned long redzone_end;
|
unsigned long redzone_end;
|
||||||
|
|
||||||
|
@ -504,12 +503,23 @@ void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
|
||||||
if (unlikely(ptr == NULL))
|
if (unlikely(ptr == NULL))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
page = virt_to_page(ptr);
|
/*
|
||||||
|
* The object has already been unpoisoned by kasan_alloc_pages() for
|
||||||
|
* alloc_pages() or by ksize() for krealloc().
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The redzone has byte-level precision for the generic mode.
|
||||||
|
* Partially poison the last object granule to cover the unaligned
|
||||||
|
* part of the redzone.
|
||||||
|
*/
|
||||||
|
if (IS_ENABLED(CONFIG_KASAN_GENERIC))
|
||||||
|
kasan_poison_last_granule(ptr, size);
|
||||||
|
|
||||||
|
/* Poison the aligned part of the redzone. */
|
||||||
redzone_start = round_up((unsigned long)(ptr + size),
|
redzone_start = round_up((unsigned long)(ptr + size),
|
||||||
KASAN_GRANULE_SIZE);
|
KASAN_GRANULE_SIZE);
|
||||||
redzone_end = (unsigned long)ptr + page_size(page);
|
redzone_end = (unsigned long)ptr + page_size(virt_to_page(ptr));
|
||||||
|
|
||||||
kasan_unpoison(ptr, size);
|
|
||||||
kasan_poison((void *)redzone_start, redzone_end - redzone_start,
|
kasan_poison((void *)redzone_start, redzone_end - redzone_start,
|
||||||
KASAN_PAGE_REDZONE);
|
KASAN_PAGE_REDZONE);
|
||||||
|
|
||||||
|
|
Загрузка…
Ссылка в новой задаче