powerpc/book3s64/kuap: Use Key 3 for kernel mapping with hash translation
This patch updates kernel hash page table entries to use storage key 3 for its mapping. This implies all kernel access will now use key 3 to control READ/WRITE. The patch also prevents the allocation of key 3 from userspace and UAMOR value is updated such that userspace cannot modify key 3. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> Reviewed-by: Sandipan Das <sandipan@linux.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20201127044424.40686-9-aneesh.kumar@linux.ibm.com
This commit is contained in:
Родитель
d5b810b5c9
Коммит
d94b827e89
|
@ -2,6 +2,9 @@
|
|||
#ifndef _ASM_POWERPC_BOOK3S_64_HASH_PKEY_H
|
||||
#define _ASM_POWERPC_BOOK3S_64_HASH_PKEY_H
|
||||
|
||||
/* We use key 3 for KERNEL */
|
||||
#define HASH_DEFAULT_KERNEL_KEY (HPTE_R_KEY_BIT0 | HPTE_R_KEY_BIT1)
|
||||
|
||||
static inline u64 hash__vmflag_to_pte_pkey_bits(u64 vm_flags)
|
||||
{
|
||||
return (((vm_flags & VM_PKEY_BIT0) ? H_PTE_PKEY_BIT0 : 0x0UL) |
|
||||
|
@ -11,13 +14,23 @@ static inline u64 hash__vmflag_to_pte_pkey_bits(u64 vm_flags)
|
|||
((vm_flags & VM_PKEY_BIT4) ? H_PTE_PKEY_BIT4 : 0x0UL));
|
||||
}
|
||||
|
||||
static inline u64 pte_to_hpte_pkey_bits(u64 pteflags)
|
||||
static inline u64 pte_to_hpte_pkey_bits(u64 pteflags, unsigned long flags)
|
||||
{
|
||||
return (((pteflags & H_PTE_PKEY_BIT4) ? HPTE_R_KEY_BIT4 : 0x0UL) |
|
||||
((pteflags & H_PTE_PKEY_BIT3) ? HPTE_R_KEY_BIT3 : 0x0UL) |
|
||||
((pteflags & H_PTE_PKEY_BIT2) ? HPTE_R_KEY_BIT2 : 0x0UL) |
|
||||
((pteflags & H_PTE_PKEY_BIT1) ? HPTE_R_KEY_BIT1 : 0x0UL) |
|
||||
((pteflags & H_PTE_PKEY_BIT0) ? HPTE_R_KEY_BIT0 : 0x0UL));
|
||||
unsigned long pte_pkey;
|
||||
|
||||
pte_pkey = (((pteflags & H_PTE_PKEY_BIT4) ? HPTE_R_KEY_BIT4 : 0x0UL) |
|
||||
((pteflags & H_PTE_PKEY_BIT3) ? HPTE_R_KEY_BIT3 : 0x0UL) |
|
||||
((pteflags & H_PTE_PKEY_BIT2) ? HPTE_R_KEY_BIT2 : 0x0UL) |
|
||||
((pteflags & H_PTE_PKEY_BIT1) ? HPTE_R_KEY_BIT1 : 0x0UL) |
|
||||
((pteflags & H_PTE_PKEY_BIT0) ? HPTE_R_KEY_BIT0 : 0x0UL));
|
||||
|
||||
if (mmu_has_feature(MMU_FTR_BOOK3S_KUAP) ||
|
||||
mmu_has_feature(MMU_FTR_BOOK3S_KUEP)) {
|
||||
if ((pte_pkey == 0) && (flags & HPTE_USE_KERNEL_KEY))
|
||||
return HASH_DEFAULT_KERNEL_KEY;
|
||||
}
|
||||
|
||||
return pte_pkey;
|
||||
}
|
||||
|
||||
static inline u16 hash__pte_to_pkey_bits(u64 pteflags)
|
||||
|
|
|
@ -145,7 +145,7 @@ extern void hash__mark_initmem_nx(void);
|
|||
|
||||
extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep, unsigned long pte, int huge);
|
||||
extern unsigned long htab_convert_pte_flags(unsigned long pteflags);
|
||||
unsigned long htab_convert_pte_flags(unsigned long pteflags, unsigned long flags);
|
||||
/* Atomic PTE updates */
|
||||
static inline unsigned long hash__pte_update(struct mm_struct *mm,
|
||||
unsigned long addr,
|
||||
|
|
|
@ -452,6 +452,7 @@ static inline unsigned long hpt_hash(unsigned long vpn,
|
|||
|
||||
#define HPTE_LOCAL_UPDATE 0x1
|
||||
#define HPTE_NOHPTE_UPDATE 0x2
|
||||
#define HPTE_USE_KERNEL_KEY 0x4
|
||||
|
||||
extern int __hash_page_4K(unsigned long ea, unsigned long access,
|
||||
unsigned long vsid, pte_t *ptep, unsigned long trap,
|
||||
|
|
|
@ -286,7 +286,7 @@ static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
|
|||
#define thread_pkey_regs_init(thread)
|
||||
#define arch_dup_pkeys(oldmm, mm)
|
||||
|
||||
static inline u64 pte_to_hpte_pkey_bits(u64 pteflags)
|
||||
static inline u64 pte_to_hpte_pkey_bits(u64 pteflags, unsigned long flags)
|
||||
{
|
||||
return 0x0UL;
|
||||
}
|
||||
|
|
|
@ -54,7 +54,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
|
|||
* PP bits. _PAGE_USER is already PP bit 0x2, so we only
|
||||
* need to add in 0x1 if it's a read-only user page
|
||||
*/
|
||||
rflags = htab_convert_pte_flags(new_pte);
|
||||
rflags = htab_convert_pte_flags(new_pte, flags);
|
||||
rpte = __real_pte(__pte(old_pte), ptep, PTRS_PER_PTE);
|
||||
|
||||
if (cpu_has_feature(CPU_FTR_NOEXECUTE) &&
|
||||
|
|
|
@ -72,7 +72,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
|
|||
* Handle the subpage protection bits
|
||||
*/
|
||||
subpg_pte = new_pte & ~subpg_prot;
|
||||
rflags = htab_convert_pte_flags(subpg_pte);
|
||||
rflags = htab_convert_pte_flags(subpg_pte, flags);
|
||||
|
||||
if (cpu_has_feature(CPU_FTR_NOEXECUTE) &&
|
||||
!cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) {
|
||||
|
@ -260,7 +260,7 @@ int __hash_page_64K(unsigned long ea, unsigned long access,
|
|||
new_pte |= _PAGE_DIRTY;
|
||||
} while (!pte_xchg(ptep, __pte(old_pte), __pte(new_pte)));
|
||||
|
||||
rflags = htab_convert_pte_flags(new_pte);
|
||||
rflags = htab_convert_pte_flags(new_pte, flags);
|
||||
rpte = __real_pte(__pte(old_pte), ptep, PTRS_PER_PTE);
|
||||
|
||||
if (cpu_has_feature(CPU_FTR_NOEXECUTE) &&
|
||||
|
|
|
@ -57,7 +57,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
|
|||
if (!(old_pmd & (H_PAGE_THP_HUGE | _PAGE_DEVMAP)))
|
||||
return 0;
|
||||
|
||||
rflags = htab_convert_pte_flags(new_pmd);
|
||||
rflags = htab_convert_pte_flags(new_pmd, flags);
|
||||
|
||||
#if 0
|
||||
if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) {
|
||||
|
|
|
@ -70,7 +70,7 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
|
|||
if (old_pte & (H_PAGE_THP_HUGE | _PAGE_DEVMAP))
|
||||
return 0;
|
||||
|
||||
rflags = htab_convert_pte_flags(new_pte);
|
||||
rflags = htab_convert_pte_flags(new_pte, flags);
|
||||
if (unlikely(mmu_psize == MMU_PAGE_16G))
|
||||
offset = PTRS_PER_PUD;
|
||||
else
|
||||
|
|
|
@ -443,7 +443,7 @@ void hash__mark_initmem_nx(void)
|
|||
start = (unsigned long)__init_begin;
|
||||
end = (unsigned long)__init_end;
|
||||
|
||||
pp = htab_convert_pte_flags(pgprot_val(PAGE_KERNEL));
|
||||
pp = htab_convert_pte_flags(pgprot_val(PAGE_KERNEL), HPTE_USE_KERNEL_KEY);
|
||||
|
||||
WARN_ON(!hash__change_memory_range(start, end, pp));
|
||||
}
|
||||
|
|
|
@ -186,7 +186,7 @@ static struct mmu_psize_def mmu_psize_defaults_gp[] = {
|
|||
* - We make sure R is always set and never lost
|
||||
* - C is _PAGE_DIRTY, and *should* always be set for a writeable mapping
|
||||
*/
|
||||
unsigned long htab_convert_pte_flags(unsigned long pteflags)
|
||||
unsigned long htab_convert_pte_flags(unsigned long pteflags, unsigned long flags)
|
||||
{
|
||||
unsigned long rflags = 0;
|
||||
|
||||
|
@ -240,7 +240,7 @@ unsigned long htab_convert_pte_flags(unsigned long pteflags)
|
|||
*/
|
||||
rflags |= HPTE_R_M;
|
||||
|
||||
rflags |= pte_to_hpte_pkey_bits(pteflags);
|
||||
rflags |= pte_to_hpte_pkey_bits(pteflags, flags);
|
||||
return rflags;
|
||||
}
|
||||
|
||||
|
@ -255,7 +255,7 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
|
|||
shift = mmu_psize_defs[psize].shift;
|
||||
step = 1 << shift;
|
||||
|
||||
prot = htab_convert_pte_flags(prot);
|
||||
prot = htab_convert_pte_flags(prot, HPTE_USE_KERNEL_KEY);
|
||||
|
||||
DBG("htab_bolt_mapping(%lx..%lx -> %lx (%lx,%d,%d)\n",
|
||||
vstart, vend, pstart, prot, psize, ssize);
|
||||
|
@ -1316,12 +1316,14 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea,
|
|||
vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
|
||||
psize = mmu_vmalloc_psize;
|
||||
ssize = mmu_kernel_ssize;
|
||||
flags |= HPTE_USE_KERNEL_KEY;
|
||||
break;
|
||||
|
||||
case IO_REGION_ID:
|
||||
vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
|
||||
psize = mmu_io_psize;
|
||||
ssize = mmu_kernel_ssize;
|
||||
flags |= HPTE_USE_KERNEL_KEY;
|
||||
break;
|
||||
default:
|
||||
/*
|
||||
|
@ -1900,7 +1902,7 @@ static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
|
|||
unsigned long hash;
|
||||
unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize);
|
||||
unsigned long vpn = hpt_vpn(vaddr, vsid, mmu_kernel_ssize);
|
||||
unsigned long mode = htab_convert_pte_flags(pgprot_val(PAGE_KERNEL));
|
||||
unsigned long mode = htab_convert_pte_flags(pgprot_val(PAGE_KERNEL), HPTE_USE_KERNEL_KEY);
|
||||
long ret;
|
||||
|
||||
hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize);
|
||||
|
|
|
@ -205,6 +205,18 @@ void __init pkey_early_init_devtree(void)
|
|||
reserved_allocation_mask |= (0x1 << 1);
|
||||
default_uamor &= ~(0x3ul << pkeyshift(1));
|
||||
|
||||
/* handle key which is used by kernel for KAUP */
|
||||
reserved_allocation_mask |= (0x1 << 3);
|
||||
/*
|
||||
* Mark access for KUAP key in default amr so that
|
||||
* we continue to operate with that AMR in
|
||||
* copy_to/from_user().
|
||||
*/
|
||||
default_amr &= ~(0x3ul << pkeyshift(3));
|
||||
default_iamr &= ~(0x1ul << pkeyshift(3));
|
||||
default_uamor &= ~(0x3ul << pkeyshift(3));
|
||||
|
||||
|
||||
/*
|
||||
* Prevent the usage of OS reserved keys. Update UAMOR
|
||||
* for those keys. Also mark the rest of the bits in the
|
||||
|
|
Загрузка…
Ссылка в новой задаче