2020-01-06 21:38:32 +03:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
// Copyright (C) 2019 Andes Technology Corporation
|
|
|
|
|
|
|
|
#include <linux/pfn.h>
|
|
|
|
#include <linux/init_task.h>
|
|
|
|
#include <linux/kasan.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/memblock.h>
|
2020-06-09 07:32:38 +03:00
|
|
|
#include <linux/pgtable.h>
|
2020-06-09 07:32:42 +03:00
|
|
|
#include <asm/tlbflush.h>
|
2020-01-06 21:38:32 +03:00
|
|
|
#include <asm/fixmap.h>
|
2021-01-16 08:58:35 +03:00
|
|
|
#include <asm/pgalloc.h>
|
|
|
|
|
2021-12-06 13:46:51 +03:00
|
|
|
/*
|
|
|
|
* Kasan shadow region must lie at a fixed address across sv39, sv48 and sv57
|
|
|
|
* which is right before the kernel.
|
|
|
|
*
|
|
|
|
* For sv39, the region is aligned on PGDIR_SIZE so we only need to populate
|
|
|
|
* the page global directory with kasan_early_shadow_pmd.
|
|
|
|
*
|
|
|
|
* For sv48 and sv57, the region is not aligned on PGDIR_SIZE so the mapping
|
|
|
|
* must be divided as follows:
|
|
|
|
* - the first PGD entry, although incomplete, is populated with
|
|
|
|
* kasan_early_shadow_pud/p4d
|
|
|
|
* - the PGD entries in the middle are populated with kasan_early_shadow_pud/p4d
|
|
|
|
* - the last PGD entry is shared with the kernel mapping so populated at the
|
|
|
|
* lower levels pud/p4d
|
|
|
|
*
|
|
|
|
* In addition, when shallow populating a kasan region (for example vmalloc),
|
|
|
|
* this region may also not be aligned on PGDIR size, so we must go down to the
|
|
|
|
* pud level too.
|
|
|
|
*/
|
2020-01-06 21:38:32 +03:00
|
|
|
|
|
|
|
extern pgd_t early_pg_dir[PTRS_PER_PGD];
|
|
|
|
|
2021-03-29 21:22:21 +03:00
|
|
|
static void __init kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned long end)
|
2021-02-08 22:30:16 +03:00
|
|
|
{
|
|
|
|
phys_addr_t phys_addr;
|
|
|
|
pte_t *ptep, *base_pte;
|
|
|
|
|
|
|
|
if (pmd_none(*pmd))
|
|
|
|
base_pte = memblock_alloc(PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE);
|
|
|
|
else
|
|
|
|
base_pte = (pte_t *)pmd_page_vaddr(*pmd);
|
|
|
|
|
|
|
|
ptep = base_pte + pte_index(vaddr);
|
|
|
|
|
|
|
|
do {
|
|
|
|
if (pte_none(*ptep)) {
|
|
|
|
phys_addr = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
|
|
|
|
set_pte(ptep, pfn_pte(PFN_DOWN(phys_addr), PAGE_KERNEL));
|
|
|
|
}
|
|
|
|
} while (ptep++, vaddr += PAGE_SIZE, vaddr != end);
|
|
|
|
|
|
|
|
set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa(base_pte)), PAGE_TABLE));
|
|
|
|
}
|
|
|
|
|
2021-12-06 13:46:51 +03:00
|
|
|
static void __init kasan_populate_pmd(pud_t *pud, unsigned long vaddr, unsigned long end)
|
2021-02-08 22:30:16 +03:00
|
|
|
{
|
|
|
|
phys_addr_t phys_addr;
|
|
|
|
pmd_t *pmdp, *base_pmd;
|
|
|
|
unsigned long next;
|
|
|
|
|
2021-12-06 13:46:51 +03:00
|
|
|
if (pud_none(*pud)) {
|
2021-02-08 22:30:16 +03:00
|
|
|
base_pmd = memblock_alloc(PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
|
2021-12-06 13:46:51 +03:00
|
|
|
} else {
|
|
|
|
base_pmd = (pmd_t *)pud_pgtable(*pud);
|
|
|
|
if (base_pmd == lm_alias(kasan_early_shadow_pmd))
|
|
|
|
base_pmd = memblock_alloc(PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
|
|
|
|
}
|
2021-02-08 22:30:16 +03:00
|
|
|
|
|
|
|
pmdp = base_pmd + pmd_index(vaddr);
|
|
|
|
|
|
|
|
do {
|
|
|
|
next = pmd_addr_end(vaddr, end);
|
2021-02-08 22:30:17 +03:00
|
|
|
|
|
|
|
if (pmd_none(*pmdp) && IS_ALIGNED(vaddr, PMD_SIZE) && (next - vaddr) >= PMD_SIZE) {
|
|
|
|
phys_addr = memblock_phys_alloc(PMD_SIZE, PMD_SIZE);
|
|
|
|
if (phys_addr) {
|
|
|
|
set_pmd(pmdp, pfn_pmd(PFN_DOWN(phys_addr), PAGE_KERNEL));
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-02-08 22:30:16 +03:00
|
|
|
kasan_populate_pte(pmdp, vaddr, next);
|
|
|
|
} while (pmdp++, vaddr = next, vaddr != end);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Wait for the whole PGD to be populated before setting the PGD in
|
|
|
|
* the page table, otherwise, if we did set the PGD before populating
|
|
|
|
* it entirely, memblock could allocate a page at a physical address
|
|
|
|
* where KASAN is not populated yet and then we'd get a page fault.
|
|
|
|
*/
|
2021-12-06 13:46:51 +03:00
|
|
|
set_pud(pud, pfn_pud(PFN_DOWN(__pa(base_pmd)), PAGE_TABLE));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __init kasan_populate_pud(pgd_t *pgd,
|
|
|
|
unsigned long vaddr, unsigned long end,
|
|
|
|
bool early)
|
|
|
|
{
|
|
|
|
phys_addr_t phys_addr;
|
|
|
|
pud_t *pudp, *base_pud;
|
|
|
|
unsigned long next;
|
|
|
|
|
|
|
|
if (early) {
|
|
|
|
/*
|
|
|
|
* We can't use pgd_page_vaddr here as it would return a linear
|
|
|
|
* mapping address but it is not mapped yet, but when populating
|
|
|
|
* early_pg_dir, we need the physical address and when populating
|
|
|
|
* swapper_pg_dir, we need the kernel virtual address so use
|
|
|
|
* pt_ops facility.
|
|
|
|
*/
|
|
|
|
base_pud = pt_ops.get_pud_virt(pfn_to_phys(_pgd_pfn(*pgd)));
|
2022-01-27 05:48:44 +03:00
|
|
|
} else if (pgd_none(*pgd)) {
|
|
|
|
base_pud = memblock_alloc(PTRS_PER_PUD * sizeof(pud_t), PAGE_SIZE);
|
2022-10-09 11:30:50 +03:00
|
|
|
memcpy(base_pud, (void *)kasan_early_shadow_pud,
|
|
|
|
sizeof(pud_t) * PTRS_PER_PUD);
|
2021-12-06 13:46:51 +03:00
|
|
|
} else {
|
|
|
|
base_pud = (pud_t *)pgd_page_vaddr(*pgd);
|
2022-02-25 15:39:53 +03:00
|
|
|
if (base_pud == lm_alias(kasan_early_shadow_pud)) {
|
2021-12-06 13:46:51 +03:00
|
|
|
base_pud = memblock_alloc(PTRS_PER_PUD * sizeof(pud_t), PAGE_SIZE);
|
2022-02-25 15:39:53 +03:00
|
|
|
memcpy(base_pud, (void *)kasan_early_shadow_pud,
|
|
|
|
sizeof(pud_t) * PTRS_PER_PUD);
|
|
|
|
}
|
2021-12-06 13:46:51 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
pudp = base_pud + pud_index(vaddr);
|
|
|
|
|
|
|
|
do {
|
|
|
|
next = pud_addr_end(vaddr, end);
|
|
|
|
|
|
|
|
if (pud_none(*pudp) && IS_ALIGNED(vaddr, PUD_SIZE) && (next - vaddr) >= PUD_SIZE) {
|
|
|
|
if (early) {
|
|
|
|
phys_addr = __pa(((uintptr_t)kasan_early_shadow_pmd));
|
|
|
|
set_pud(pudp, pfn_pud(PFN_DOWN(phys_addr), PAGE_TABLE));
|
|
|
|
continue;
|
|
|
|
} else {
|
|
|
|
phys_addr = memblock_phys_alloc(PUD_SIZE, PUD_SIZE);
|
|
|
|
if (phys_addr) {
|
|
|
|
set_pud(pudp, pfn_pud(PFN_DOWN(phys_addr), PAGE_KERNEL));
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
kasan_populate_pmd(pudp, vaddr, next);
|
|
|
|
} while (pudp++, vaddr = next, vaddr != end);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Wait for the whole PGD to be populated before setting the PGD in
|
|
|
|
* the page table, otherwise, if we did set the PGD before populating
|
|
|
|
* it entirely, memblock could allocate a page at a physical address
|
|
|
|
* where KASAN is not populated yet and then we'd get a page fault.
|
|
|
|
*/
|
|
|
|
if (!early)
|
|
|
|
set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(base_pud)), PAGE_TABLE));
|
2021-02-08 22:30:16 +03:00
|
|
|
}
|
|
|
|
|
2022-01-27 05:48:44 +03:00
|
|
|
static void __init kasan_populate_p4d(pgd_t *pgd,
|
|
|
|
unsigned long vaddr, unsigned long end,
|
|
|
|
bool early)
|
|
|
|
{
|
|
|
|
phys_addr_t phys_addr;
|
|
|
|
p4d_t *p4dp, *base_p4d;
|
|
|
|
unsigned long next;
|
|
|
|
|
|
|
|
if (early) {
|
|
|
|
/*
|
|
|
|
* We can't use pgd_page_vaddr here as it would return a linear
|
|
|
|
* mapping address but it is not mapped yet, but when populating
|
|
|
|
* early_pg_dir, we need the physical address and when populating
|
|
|
|
* swapper_pg_dir, we need the kernel virtual address so use
|
|
|
|
* pt_ops facility.
|
|
|
|
*/
|
|
|
|
base_p4d = pt_ops.get_p4d_virt(pfn_to_phys(_pgd_pfn(*pgd)));
|
|
|
|
} else {
|
|
|
|
base_p4d = (p4d_t *)pgd_page_vaddr(*pgd);
|
2022-10-09 11:30:50 +03:00
|
|
|
if (base_p4d == lm_alias(kasan_early_shadow_p4d)) {
|
2022-01-27 05:48:44 +03:00
|
|
|
base_p4d = memblock_alloc(PTRS_PER_PUD * sizeof(p4d_t), PAGE_SIZE);
|
2022-10-09 11:30:50 +03:00
|
|
|
memcpy(base_p4d, (void *)kasan_early_shadow_p4d,
|
|
|
|
sizeof(p4d_t) * PTRS_PER_P4D);
|
|
|
|
}
|
2022-01-27 05:48:44 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
p4dp = base_p4d + p4d_index(vaddr);
|
|
|
|
|
|
|
|
do {
|
|
|
|
next = p4d_addr_end(vaddr, end);
|
|
|
|
|
|
|
|
if (p4d_none(*p4dp) && IS_ALIGNED(vaddr, P4D_SIZE) && (next - vaddr) >= P4D_SIZE) {
|
|
|
|
if (early) {
|
|
|
|
phys_addr = __pa(((uintptr_t)kasan_early_shadow_pud));
|
|
|
|
set_p4d(p4dp, pfn_p4d(PFN_DOWN(phys_addr), PAGE_TABLE));
|
|
|
|
continue;
|
|
|
|
} else {
|
|
|
|
phys_addr = memblock_phys_alloc(P4D_SIZE, P4D_SIZE);
|
|
|
|
if (phys_addr) {
|
|
|
|
set_p4d(p4dp, pfn_p4d(PFN_DOWN(phys_addr), PAGE_KERNEL));
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
kasan_populate_pud((pgd_t *)p4dp, vaddr, next, early);
|
|
|
|
} while (p4dp++, vaddr = next, vaddr != end);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Wait for the whole P4D to be populated before setting the P4D in
|
|
|
|
* the page table, otherwise, if we did set the P4D before populating
|
|
|
|
* it entirely, memblock could allocate a page at a physical address
|
|
|
|
* where KASAN is not populated yet and then we'd get a page fault.
|
|
|
|
*/
|
|
|
|
if (!early)
|
|
|
|
set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(base_p4d)), PAGE_TABLE));
|
|
|
|
}
|
|
|
|
|
|
|
|
#define kasan_early_shadow_pgd_next (pgtable_l5_enabled ? \
|
|
|
|
(uintptr_t)kasan_early_shadow_p4d : \
|
|
|
|
(pgtable_l4_enabled ? \
|
2021-12-06 13:46:51 +03:00
|
|
|
(uintptr_t)kasan_early_shadow_pud : \
|
2022-01-27 05:48:44 +03:00
|
|
|
(uintptr_t)kasan_early_shadow_pmd))
|
2021-12-06 13:46:51 +03:00
|
|
|
#define kasan_populate_pgd_next(pgdp, vaddr, next, early) \
|
2022-01-27 05:48:44 +03:00
|
|
|
(pgtable_l5_enabled ? \
|
|
|
|
kasan_populate_p4d(pgdp, vaddr, next, early) : \
|
2021-12-06 13:46:51 +03:00
|
|
|
(pgtable_l4_enabled ? \
|
|
|
|
kasan_populate_pud(pgdp, vaddr, next, early) : \
|
2022-01-27 05:48:44 +03:00
|
|
|
kasan_populate_pmd((pud_t *)pgdp, vaddr, next)))
|
2021-12-06 13:46:51 +03:00
|
|
|
|
2021-12-06 13:46:46 +03:00
|
|
|
static void __init kasan_populate_pgd(pgd_t *pgdp,
|
|
|
|
unsigned long vaddr, unsigned long end,
|
|
|
|
bool early)
|
2021-02-08 22:30:16 +03:00
|
|
|
{
|
|
|
|
phys_addr_t phys_addr;
|
|
|
|
unsigned long next;
|
|
|
|
|
|
|
|
do {
|
|
|
|
next = pgd_addr_end(vaddr, end);
|
2021-02-08 22:30:17 +03:00
|
|
|
|
2021-12-06 13:46:46 +03:00
|
|
|
if (IS_ALIGNED(vaddr, PGDIR_SIZE) && (next - vaddr) >= PGDIR_SIZE) {
|
|
|
|
if (early) {
|
|
|
|
phys_addr = __pa((uintptr_t)kasan_early_shadow_pgd_next);
|
|
|
|
set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_TABLE));
|
2021-02-08 22:30:17 +03:00
|
|
|
continue;
|
2021-12-06 13:46:46 +03:00
|
|
|
} else if (pgd_page_vaddr(*pgdp) ==
|
|
|
|
(unsigned long)lm_alias(kasan_early_shadow_pgd_next)) {
|
|
|
|
/*
|
|
|
|
* pgdp can't be none since kasan_early_init
|
|
|
|
* initialized all KASAN shadow region with
|
|
|
|
* kasan_early_shadow_pud: if this is still the
|
|
|
|
* case, that means we can try to allocate a
|
|
|
|
* hugepage as a replacement.
|
|
|
|
*/
|
|
|
|
phys_addr = memblock_phys_alloc(PGDIR_SIZE, PGDIR_SIZE);
|
|
|
|
if (phys_addr) {
|
|
|
|
set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_KERNEL));
|
|
|
|
continue;
|
|
|
|
}
|
2021-02-08 22:30:17 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-12-06 13:46:51 +03:00
|
|
|
kasan_populate_pgd_next(pgdp, vaddr, next, early);
|
2021-02-08 22:30:16 +03:00
|
|
|
} while (pgdp++, vaddr = next, vaddr != end);
|
|
|
|
}
|
|
|
|
|
2021-12-06 13:46:46 +03:00
|
|
|
asmlinkage void __init kasan_early_init(void)
|
|
|
|
{
|
|
|
|
uintptr_t i;
|
|
|
|
|
|
|
|
BUILD_BUG_ON(KASAN_SHADOW_OFFSET !=
|
|
|
|
KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT)));
|
|
|
|
|
|
|
|
for (i = 0; i < PTRS_PER_PTE; ++i)
|
|
|
|
set_pte(kasan_early_shadow_pte + i,
|
2022-02-25 15:39:49 +03:00
|
|
|
pfn_pte(virt_to_pfn(kasan_early_shadow_page), PAGE_KERNEL));
|
2021-12-06 13:46:46 +03:00
|
|
|
|
|
|
|
for (i = 0; i < PTRS_PER_PMD; ++i)
|
|
|
|
set_pmd(kasan_early_shadow_pmd + i,
|
|
|
|
pfn_pmd(PFN_DOWN
|
|
|
|
(__pa((uintptr_t)kasan_early_shadow_pte)),
|
|
|
|
PAGE_TABLE));
|
|
|
|
|
|
|
|
if (pgtable_l4_enabled) {
|
|
|
|
for (i = 0; i < PTRS_PER_PUD; ++i)
|
|
|
|
set_pud(kasan_early_shadow_pud + i,
|
|
|
|
pfn_pud(PFN_DOWN
|
|
|
|
(__pa(((uintptr_t)kasan_early_shadow_pmd))),
|
|
|
|
PAGE_TABLE));
|
|
|
|
}
|
|
|
|
|
2022-01-27 05:48:44 +03:00
|
|
|
if (pgtable_l5_enabled) {
|
|
|
|
for (i = 0; i < PTRS_PER_P4D; ++i)
|
|
|
|
set_p4d(kasan_early_shadow_p4d + i,
|
|
|
|
pfn_p4d(PFN_DOWN
|
|
|
|
(__pa(((uintptr_t)kasan_early_shadow_pud))),
|
|
|
|
PAGE_TABLE));
|
|
|
|
}
|
|
|
|
|
2021-12-06 13:46:46 +03:00
|
|
|
kasan_populate_pgd(early_pg_dir + pgd_index(KASAN_SHADOW_START),
|
|
|
|
KASAN_SHADOW_START, KASAN_SHADOW_END, true);
|
|
|
|
|
|
|
|
local_flush_tlb_all();
|
|
|
|
}
|
|
|
|
|
|
|
|
void __init kasan_swapper_init(void)
|
|
|
|
{
|
|
|
|
kasan_populate_pgd(pgd_offset_k(KASAN_SHADOW_START),
|
|
|
|
KASAN_SHADOW_START, KASAN_SHADOW_END, true);
|
|
|
|
|
|
|
|
local_flush_tlb_all();
|
|
|
|
}
|
|
|
|
|
2021-02-08 22:30:16 +03:00
|
|
|
static void __init kasan_populate(void *start, void *end)
|
2020-01-06 21:38:32 +03:00
|
|
|
{
|
|
|
|
unsigned long vaddr = (unsigned long)start & PAGE_MASK;
|
|
|
|
unsigned long vend = PAGE_ALIGN((unsigned long)end);
|
|
|
|
|
2021-12-06 13:46:46 +03:00
|
|
|
kasan_populate_pgd(pgd_offset_k(vaddr), vaddr, vend, false);
|
2020-01-06 21:38:32 +03:00
|
|
|
|
2020-07-10 05:40:54 +03:00
|
|
|
local_flush_tlb_all();
|
2021-02-08 22:30:15 +03:00
|
|
|
memset(start, KASAN_SHADOW_INIT, end - start);
|
2020-01-06 21:38:32 +03:00
|
|
|
}
|
|
|
|
|
2022-01-27 05:48:44 +03:00
|
|
|
static void __init kasan_shallow_populate_pmd(pgd_t *pgdp,
|
|
|
|
unsigned long vaddr, unsigned long end)
|
|
|
|
{
|
|
|
|
unsigned long next;
|
|
|
|
pmd_t *pmdp, *base_pmd;
|
|
|
|
bool is_kasan_pte;
|
|
|
|
|
|
|
|
base_pmd = (pmd_t *)pgd_page_vaddr(*pgdp);
|
|
|
|
pmdp = base_pmd + pmd_index(vaddr);
|
|
|
|
|
|
|
|
do {
|
|
|
|
next = pmd_addr_end(vaddr, end);
|
|
|
|
is_kasan_pte = (pmd_pgtable(*pmdp) == lm_alias(kasan_early_shadow_pte));
|
|
|
|
|
|
|
|
if (is_kasan_pte)
|
|
|
|
pmd_clear(pmdp);
|
|
|
|
} while (pmdp++, vaddr = next, vaddr != end);
|
|
|
|
}
|
|
|
|
|
2021-12-06 13:46:51 +03:00
|
|
|
static void __init kasan_shallow_populate_pud(pgd_t *pgdp,
|
2022-01-27 05:48:44 +03:00
|
|
|
unsigned long vaddr, unsigned long end)
|
2021-12-06 13:46:51 +03:00
|
|
|
{
|
|
|
|
unsigned long next;
|
|
|
|
pud_t *pudp, *base_pud;
|
|
|
|
pmd_t *base_pmd;
|
|
|
|
bool is_kasan_pmd;
|
|
|
|
|
|
|
|
base_pud = (pud_t *)pgd_page_vaddr(*pgdp);
|
|
|
|
pudp = base_pud + pud_index(vaddr);
|
|
|
|
|
|
|
|
do {
|
|
|
|
next = pud_addr_end(vaddr, end);
|
|
|
|
is_kasan_pmd = (pud_pgtable(*pudp) == lm_alias(kasan_early_shadow_pmd));
|
|
|
|
|
2022-01-27 05:48:44 +03:00
|
|
|
if (!is_kasan_pmd)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
base_pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
|
|
|
|
set_pud(pudp, pfn_pud(PFN_DOWN(__pa(base_pmd)), PAGE_TABLE));
|
|
|
|
|
|
|
|
if (IS_ALIGNED(vaddr, PUD_SIZE) && (next - vaddr) >= PUD_SIZE)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
memcpy(base_pmd, (void *)kasan_early_shadow_pmd, PAGE_SIZE);
|
|
|
|
kasan_shallow_populate_pmd((pgd_t *)pudp, vaddr, next);
|
2021-12-06 13:46:51 +03:00
|
|
|
} while (pudp++, vaddr = next, vaddr != end);
|
|
|
|
}
|
|
|
|
|
2022-01-27 05:48:44 +03:00
|
|
|
static void __init kasan_shallow_populate_p4d(pgd_t *pgdp,
|
|
|
|
unsigned long vaddr, unsigned long end)
|
|
|
|
{
|
|
|
|
unsigned long next;
|
|
|
|
p4d_t *p4dp, *base_p4d;
|
|
|
|
pud_t *base_pud;
|
|
|
|
bool is_kasan_pud;
|
|
|
|
|
|
|
|
base_p4d = (p4d_t *)pgd_page_vaddr(*pgdp);
|
|
|
|
p4dp = base_p4d + p4d_index(vaddr);
|
|
|
|
|
|
|
|
do {
|
|
|
|
next = p4d_addr_end(vaddr, end);
|
|
|
|
is_kasan_pud = (p4d_pgtable(*p4dp) == lm_alias(kasan_early_shadow_pud));
|
|
|
|
|
|
|
|
if (!is_kasan_pud)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
base_pud = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
|
|
|
|
set_p4d(p4dp, pfn_p4d(PFN_DOWN(__pa(base_pud)), PAGE_TABLE));
|
|
|
|
|
|
|
|
if (IS_ALIGNED(vaddr, P4D_SIZE) && (next - vaddr) >= P4D_SIZE)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
memcpy(base_pud, (void *)kasan_early_shadow_pud, PAGE_SIZE);
|
|
|
|
kasan_shallow_populate_pud((pgd_t *)p4dp, vaddr, next);
|
|
|
|
} while (p4dp++, vaddr = next, vaddr != end);
|
|
|
|
}
|
|
|
|
|
|
|
|
#define kasan_shallow_populate_pgd_next(pgdp, vaddr, next) \
|
|
|
|
(pgtable_l5_enabled ? \
|
|
|
|
kasan_shallow_populate_p4d(pgdp, vaddr, next) : \
|
|
|
|
(pgtable_l4_enabled ? \
|
|
|
|
kasan_shallow_populate_pud(pgdp, vaddr, next) : \
|
|
|
|
kasan_shallow_populate_pmd(pgdp, vaddr, next)))
|
|
|
|
|
2021-03-13 11:45:05 +03:00
|
|
|
static void __init kasan_shallow_populate_pgd(unsigned long vaddr, unsigned long end)
|
2021-01-16 08:58:35 +03:00
|
|
|
{
|
2021-03-13 11:45:05 +03:00
|
|
|
unsigned long next;
|
2021-01-16 08:58:35 +03:00
|
|
|
void *p;
|
2021-03-13 11:45:05 +03:00
|
|
|
pgd_t *pgd_k = pgd_offset_k(vaddr);
|
2021-12-06 13:46:51 +03:00
|
|
|
bool is_kasan_pgd_next;
|
2021-03-13 11:45:05 +03:00
|
|
|
|
|
|
|
do {
|
|
|
|
next = pgd_addr_end(vaddr, end);
|
2021-12-06 13:46:51 +03:00
|
|
|
is_kasan_pgd_next = (pgd_page_vaddr(*pgd_k) ==
|
|
|
|
(unsigned long)lm_alias(kasan_early_shadow_pgd_next));
|
|
|
|
|
|
|
|
if (is_kasan_pgd_next) {
|
2021-03-13 11:45:05 +03:00
|
|
|
p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
|
|
|
|
set_pgd(pgd_k, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE));
|
2021-01-16 08:58:35 +03:00
|
|
|
}
|
2021-12-06 13:46:51 +03:00
|
|
|
|
|
|
|
if (IS_ALIGNED(vaddr, PGDIR_SIZE) && (next - vaddr) >= PGDIR_SIZE)
|
|
|
|
continue;
|
|
|
|
|
2022-01-27 05:48:44 +03:00
|
|
|
memcpy(p, (void *)kasan_early_shadow_pgd_next, PAGE_SIZE);
|
|
|
|
kasan_shallow_populate_pgd_next(pgd_k, vaddr, next);
|
2021-03-13 11:45:05 +03:00
|
|
|
} while (pgd_k++, vaddr = next, vaddr != end);
|
|
|
|
}
|
|
|
|
|
2021-03-17 08:01:04 +03:00
|
|
|
static void __init kasan_shallow_populate(void *start, void *end)
|
2021-01-16 08:58:35 +03:00
|
|
|
{
|
|
|
|
unsigned long vaddr = (unsigned long)start & PAGE_MASK;
|
|
|
|
unsigned long vend = PAGE_ALIGN((unsigned long)end);
|
2021-03-13 11:45:04 +03:00
|
|
|
|
2021-03-13 11:45:05 +03:00
|
|
|
kasan_shallow_populate_pgd(vaddr, vend);
|
2021-03-13 11:45:04 +03:00
|
|
|
local_flush_tlb_all();
|
2020-01-06 21:38:32 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
void __init kasan_init(void)
|
|
|
|
{
|
2021-06-18 17:01:36 +03:00
|
|
|
phys_addr_t p_start, p_end;
|
2020-10-14 02:58:08 +03:00
|
|
|
u64 i;
|
2020-01-06 21:38:32 +03:00
|
|
|
|
2021-01-16 08:58:35 +03:00
|
|
|
if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
|
|
|
|
kasan_shallow_populate(
|
|
|
|
(void *)kasan_mem_to_shadow((void *)VMALLOC_START),
|
|
|
|
(void *)kasan_mem_to_shadow((void *)VMALLOC_END));
|
2020-01-06 21:38:32 +03:00
|
|
|
|
2021-04-11 19:41:44 +03:00
|
|
|
/* Populate the linear mapping */
|
2021-06-18 17:01:36 +03:00
|
|
|
for_each_mem_range(i, &p_start, &p_end) {
|
|
|
|
void *start = (void *)__va(p_start);
|
|
|
|
void *end = (void *)__va(p_end);
|
2020-01-06 21:38:32 +03:00
|
|
|
|
|
|
|
if (start >= end)
|
|
|
|
break;
|
|
|
|
|
2021-02-08 22:30:16 +03:00
|
|
|
kasan_populate(kasan_mem_to_shadow(start), kasan_mem_to_shadow(end));
|
2021-03-22 11:38:36 +03:00
|
|
|
}
|
2020-01-06 21:38:32 +03:00
|
|
|
|
2021-04-11 19:41:44 +03:00
|
|
|
/* Populate kernel, BPF, modules mapping */
|
|
|
|
kasan_populate(kasan_mem_to_shadow((const void *)MODULES_VADDR),
|
riscv: Ensure BPF_JIT_REGION_START aligned with PMD size
Andreas reported commit fc8504765ec5 ("riscv: bpf: Avoid breaking W^X")
breaks booting with one kind of defconfig, I reproduced a kernel panic
with the defconfig:
[ 0.138553] Unable to handle kernel paging request at virtual address ffffffff81201220
[ 0.139159] Oops [#1]
[ 0.139303] Modules linked in:
[ 0.139601] CPU: 0 PID: 1 Comm: swapper/0 Not tainted 5.13.0-rc5-default+ #1
[ 0.139934] Hardware name: riscv-virtio,qemu (DT)
[ 0.140193] epc : __memset+0xc4/0xfc
[ 0.140416] ra : skb_flow_dissector_init+0x1e/0x82
[ 0.140609] epc : ffffffff8029806c ra : ffffffff8033be78 sp : ffffffe001647da0
[ 0.140878] gp : ffffffff81134b08 tp : ffffffe001654380 t0 : ffffffff81201158
[ 0.141156] t1 : 0000000000000002 t2 : 0000000000000154 s0 : ffffffe001647dd0
[ 0.141424] s1 : ffffffff80a43250 a0 : ffffffff81201220 a1 : 0000000000000000
[ 0.141654] a2 : 000000000000003c a3 : ffffffff81201258 a4 : 0000000000000064
[ 0.141893] a5 : ffffffff8029806c a6 : 0000000000000040 a7 : ffffffffffffffff
[ 0.142126] s2 : ffffffff81201220 s3 : 0000000000000009 s4 : ffffffff81135088
[ 0.142353] s5 : ffffffff81135038 s6 : ffffffff8080ce80 s7 : ffffffff80800438
[ 0.142584] s8 : ffffffff80bc6578 s9 : 0000000000000008 s10: ffffffff806000ac
[ 0.142810] s11: 0000000000000000 t3 : fffffffffffffffc t4 : 0000000000000000
[ 0.143042] t5 : 0000000000000155 t6 : 00000000000003ff
[ 0.143220] status: 0000000000000120 badaddr: ffffffff81201220 cause: 000000000000000f
[ 0.143560] [<ffffffff8029806c>] __memset+0xc4/0xfc
[ 0.143859] [<ffffffff8061e984>] init_default_flow_dissectors+0x22/0x60
[ 0.144092] [<ffffffff800010fc>] do_one_initcall+0x3e/0x168
[ 0.144278] [<ffffffff80600df0>] kernel_init_freeable+0x1c8/0x224
[ 0.144479] [<ffffffff804868a8>] kernel_init+0x12/0x110
[ 0.144658] [<ffffffff800022de>] ret_from_exception+0x0/0xc
[ 0.145124] ---[ end trace f1e9643daa46d591 ]---
After some investigation, I think I found the root cause: commit
2bfc6cd81bd ("move kernel mapping outside of linear mapping") moves
BPF JIT region after the kernel:
| #define BPF_JIT_REGION_START PFN_ALIGN((unsigned long)&_end)
The &_end is unlikely aligned with PMD size, so the front bpf jit
region sits with part of kernel .data section in one PMD size mapping.
But kernel is mapped in PMD SIZE, when bpf_jit_binary_lock_ro() is
called to make the first bpf jit prog ROX, we will make part of kernel
.data section RO too, so when we write to, for example memset the
.data section, MMU will trigger a store page fault.
To fix the issue, we need to ensure the BPF JIT region is PMD size
aligned. This patch acchieve this goal by restoring the BPF JIT region
to original position, I.E the 128MB before kernel .text section. The
modification to kasan_init.c is inspired by Alexandre.
Fixes: fc8504765ec5 ("riscv: bpf: Avoid breaking W^X")
Reported-by: Andreas Schwab <schwab@linux-m68k.org>
Signed-off-by: Jisheng Zhang <jszhang@kernel.org>
Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
2021-06-18 17:09:13 +03:00
|
|
|
kasan_mem_to_shadow((const void *)MODULES_VADDR + SZ_2G));
|
2021-04-11 19:41:44 +03:00
|
|
|
|
2020-01-06 21:38:32 +03:00
|
|
|
for (i = 0; i < PTRS_PER_PTE; i++)
|
|
|
|
set_pte(&kasan_early_shadow_pte[i],
|
|
|
|
mk_pte(virt_to_page(kasan_early_shadow_page),
|
2020-02-07 12:52:45 +03:00
|
|
|
__pgprot(_PAGE_PRESENT | _PAGE_READ |
|
|
|
|
_PAGE_ACCESSED)));
|
2020-01-06 21:38:32 +03:00
|
|
|
|
2021-02-08 22:30:15 +03:00
|
|
|
memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
|
2020-01-06 21:38:32 +03:00
|
|
|
init_task.kasan_depth = 0;
|
|
|
|
}
|