powerpc/8xx: Implement dedicated kasan_init_region()

Implement a kasan_init_region() dedicated to 8xx that
allocates KASAN regions using huge pages.

Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d2d60202a8821dc81cffe6ff59cc13c15b7e4bb6.1589866984.git.christophe.leroy@csgroup.eu
This commit is contained in:
Christophe Leroy 2020-05-19 05:49:27 +00:00 коммит произвёл Michael Ellerman
Родитель fcdafd10a3
Коммит a2feeb2c2e
2 изменённых файлов: 75 добавлений и 0 удалений

Просмотреть файл

@ -0,0 +1,74 @@
// SPDX-License-Identifier: GPL-2.0
#define DISABLE_BRANCH_PROFILING
#include <linux/kasan.h>
#include <linux/memblock.h>
#include <linux/hugetlb.h>
#include <asm/pgalloc.h>
static int __init
kasan_init_shadow_8M(unsigned long k_start, unsigned long k_end, void *block)
{
pmd_t *pmd = pmd_ptr_k(k_start);
unsigned long k_cur, k_next;
for (k_cur = k_start; k_cur != k_end; k_cur = k_next, pmd += 2, block += SZ_8M) {
pte_basic_t *new;
k_next = pgd_addr_end(k_cur, k_end);
k_next = pgd_addr_end(k_next, k_end);
if ((void *)pmd_page_vaddr(*pmd) != kasan_early_shadow_pte)
continue;
new = memblock_alloc(sizeof(pte_basic_t), SZ_4K);
if (!new)
return -ENOMEM;
*new = pte_val(pte_mkhuge(pfn_pte(PHYS_PFN(__pa(block)), PAGE_KERNEL)));
hugepd_populate_kernel((hugepd_t *)pmd, (pte_t *)new, PAGE_SHIFT_8M);
hugepd_populate_kernel((hugepd_t *)pmd + 1, (pte_t *)new, PAGE_SHIFT_8M);
}
return 0;
}
int __init kasan_init_region(void *start, size_t size)
{
unsigned long k_start = (unsigned long)kasan_mem_to_shadow(start);
unsigned long k_end = (unsigned long)kasan_mem_to_shadow(start + size);
unsigned long k_cur;
int ret;
void *block;
block = memblock_alloc(k_end - k_start, SZ_8M);
if (!block)
return -ENOMEM;
if (IS_ALIGNED(k_start, SZ_8M)) {
kasan_init_shadow_8M(k_start, ALIGN_DOWN(k_end, SZ_8M), block);
k_cur = ALIGN_DOWN(k_end, SZ_8M);
if (k_cur == k_end)
goto finish;
} else {
k_cur = k_start;
}
ret = kasan_init_shadow_page_tables(k_start, k_end);
if (ret)
return ret;
for (; k_cur < k_end; k_cur += PAGE_SIZE) {
pmd_t *pmd = pmd_ptr_k(k_cur);
void *va = block + k_cur - k_start;
pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL);
if (k_cur < ALIGN_DOWN(k_end, SZ_512K))
pte = pte_mkhuge(pte);
__set_pte_at(&init_mm, k_cur, pte_offset_kernel(pmd, k_cur), pte, 0);
}
finish:
flush_tlb_kernel_range(k_start, k_end);
return 0;
}

Просмотреть файл

@ -3,3 +3,4 @@
KASAN_SANITIZE := n
obj-$(CONFIG_PPC32) += kasan_init_32.o
obj-$(CONFIG_PPC_8xx) += 8xx.o