powerpc/mm: Replace _PAGE_USER with _PAGE_PRIVILEGED
_PAGE_PRIVILEGED means the page can be accessed only by the kernel. This is done to keep pte bits similar to PowerISA 3.0 Radix PTE format. User pages are now marked by clearing _PAGE_PRIVILEGED bit. Previously we allowed the kernel to have a privileged page in the lower address range (USER_REGION). With this patch such access is denied. We also prevent a kernel access to a non-privileged page in higher address range (ie, REGION_ID != 0). Both the above access scenarios should never happen. Cc: Arnd Bergmann <arnd@arndb.de> Cc: Jeremy Kerr <jk@ozlabs.org> Cc: Frederic Barrat <fbarrat@linux.vnet.ibm.com> Acked-by: Ian Munsie <imunsie@au1.ibm.com> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:
Родитель
e7bfc462d3
Коммит
ac29c64089
|
@ -20,7 +20,7 @@
|
||||||
#define _PAGE_READ 0x00004 /* read access allowed */
|
#define _PAGE_READ 0x00004 /* read access allowed */
|
||||||
#define _PAGE_RW (_PAGE_READ | _PAGE_WRITE)
|
#define _PAGE_RW (_PAGE_READ | _PAGE_WRITE)
|
||||||
#define _PAGE_RWX (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC)
|
#define _PAGE_RWX (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC)
|
||||||
#define _PAGE_USER 0x00008 /* page may be accessed by userspace */
|
#define _PAGE_PRIVILEGED 0x00008 /* kernel access only */
|
||||||
#define _PAGE_GUARDED 0x00010 /* G: guarded (side-effect) page */
|
#define _PAGE_GUARDED 0x00010 /* G: guarded (side-effect) page */
|
||||||
/* M (memory coherence) is always set in the HPTE, so we don't need it here */
|
/* M (memory coherence) is always set in the HPTE, so we don't need it here */
|
||||||
#define _PAGE_COHERENT 0x0
|
#define _PAGE_COHERENT 0x0
|
||||||
|
@ -114,10 +114,13 @@
|
||||||
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
|
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
|
||||||
#endif /* CONFIG_PPC_MM_SLICES */
|
#endif /* CONFIG_PPC_MM_SLICES */
|
||||||
|
|
||||||
/* No separate kernel read-only */
|
/*
|
||||||
#define _PAGE_KERNEL_RW (_PAGE_RW | _PAGE_DIRTY) /* user access blocked by key */
|
* No separate kernel read-only, user access blocked by key
|
||||||
|
*/
|
||||||
|
#define _PAGE_KERNEL_RW (_PAGE_PRIVILEGED | _PAGE_RW | _PAGE_DIRTY)
|
||||||
#define _PAGE_KERNEL_RO _PAGE_KERNEL_RW
|
#define _PAGE_KERNEL_RO _PAGE_KERNEL_RW
|
||||||
#define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC)
|
#define _PAGE_KERNEL_RWX (_PAGE_PRIVILEGED | _PAGE_DIRTY | \
|
||||||
|
_PAGE_RW | _PAGE_EXEC)
|
||||||
|
|
||||||
/* Strong Access Ordering */
|
/* Strong Access Ordering */
|
||||||
#define _PAGE_SAO (_PAGE_WRITETHRU | _PAGE_NO_CACHE | _PAGE_COHERENT)
|
#define _PAGE_SAO (_PAGE_WRITETHRU | _PAGE_NO_CACHE | _PAGE_COHERENT)
|
||||||
|
@ -147,7 +150,7 @@
|
||||||
*/
|
*/
|
||||||
#define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \
|
#define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \
|
||||||
_PAGE_WRITETHRU | _PAGE_4K_PFN | \
|
_PAGE_WRITETHRU | _PAGE_4K_PFN | \
|
||||||
_PAGE_USER | _PAGE_ACCESSED | _PAGE_READ |\
|
_PAGE_PRIVILEGED | _PAGE_ACCESSED | _PAGE_READ |\
|
||||||
_PAGE_WRITE | _PAGE_DIRTY | _PAGE_EXEC | \
|
_PAGE_WRITE | _PAGE_DIRTY | _PAGE_EXEC | \
|
||||||
_PAGE_SOFT_DIRTY)
|
_PAGE_SOFT_DIRTY)
|
||||||
/*
|
/*
|
||||||
|
@ -169,16 +172,13 @@
|
||||||
*
|
*
|
||||||
* Note due to the way vm flags are laid out, the bits are XWR
|
* Note due to the way vm flags are laid out, the bits are XWR
|
||||||
*/
|
*/
|
||||||
#define PAGE_NONE __pgprot(_PAGE_BASE)
|
#define PAGE_NONE __pgprot(_PAGE_BASE | _PAGE_PRIVILEGED)
|
||||||
#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
|
#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_RW)
|
||||||
#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | \
|
#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_EXEC)
|
||||||
_PAGE_EXEC)
|
#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_READ)
|
||||||
#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_READ)
|
#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC)
|
||||||
#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_READ| \
|
#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_READ)
|
||||||
_PAGE_EXEC)
|
#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC)
|
||||||
#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_READ)
|
|
||||||
#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_READ| \
|
|
||||||
_PAGE_EXEC)
|
|
||||||
|
|
||||||
#define __P000 PAGE_NONE
|
#define __P000 PAGE_NONE
|
||||||
#define __P001 PAGE_READONLY
|
#define __P001 PAGE_READONLY
|
||||||
|
@ -419,8 +419,8 @@ static inline pte_t pte_clear_soft_dirty(pte_t pte)
|
||||||
*/
|
*/
|
||||||
static inline int pte_protnone(pte_t pte)
|
static inline int pte_protnone(pte_t pte)
|
||||||
{
|
{
|
||||||
return (pte_val(pte) &
|
return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PRIVILEGED)) ==
|
||||||
(_PAGE_PRESENT | _PAGE_USER)) == _PAGE_PRESENT;
|
(_PAGE_PRESENT | _PAGE_PRIVILEGED);
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_NUMA_BALANCING */
|
#endif /* CONFIG_NUMA_BALANCING */
|
||||||
|
|
||||||
|
|
|
@ -187,7 +187,7 @@ extern struct page *pgd_page(pgd_t pgd);
|
||||||
|
|
||||||
static inline bool pte_user(pte_t pte)
|
static inline bool pte_user(pte_t pte)
|
||||||
{
|
{
|
||||||
return !!(pte_val(pte) & _PAGE_USER);
|
return !(pte_val(pte) & _PAGE_PRIVILEGED);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_MEM_SOFT_DIRTY
|
#ifdef CONFIG_MEM_SOFT_DIRTY
|
||||||
|
@ -211,6 +211,22 @@ static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
|
#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
|
||||||
|
|
||||||
|
static inline bool check_pte_access(unsigned long access, unsigned long ptev)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* This check for _PAGE_RWX and _PAGE_PRESENT bits
|
||||||
|
*/
|
||||||
|
if (access & ~ptev)
|
||||||
|
return false;
|
||||||
|
/*
|
||||||
|
* This check for access to privilege space
|
||||||
|
*/
|
||||||
|
if ((access & _PAGE_PRIVILEGED) != (ptev & _PAGE_PRIVILEGED))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
void pgtable_cache_add(unsigned shift, void (*ctor)(void *));
|
void pgtable_cache_add(unsigned shift, void (*ctor)(void *));
|
||||||
void pgtable_cache_init(void);
|
void pgtable_cache_init(void);
|
||||||
|
|
||||||
|
|
|
@ -37,7 +37,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
|
||||||
if (unlikely(old_pte & _PAGE_BUSY))
|
if (unlikely(old_pte & _PAGE_BUSY))
|
||||||
return 0;
|
return 0;
|
||||||
/* If PTE permissions don't match, take page fault */
|
/* If PTE permissions don't match, take page fault */
|
||||||
if (unlikely(access & ~old_pte))
|
if (unlikely(!check_pte_access(access, old_pte)))
|
||||||
return 1;
|
return 1;
|
||||||
/*
|
/*
|
||||||
* Try to lock the PTE, add ACCESSED and DIRTY if it was
|
* Try to lock the PTE, add ACCESSED and DIRTY if it was
|
||||||
|
|
|
@ -69,7 +69,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
|
||||||
if (unlikely(old_pte & _PAGE_BUSY))
|
if (unlikely(old_pte & _PAGE_BUSY))
|
||||||
return 0;
|
return 0;
|
||||||
/* If PTE permissions don't match, take page fault */
|
/* If PTE permissions don't match, take page fault */
|
||||||
if (unlikely(access & ~old_pte))
|
if (unlikely(!check_pte_access(access, old_pte)))
|
||||||
return 1;
|
return 1;
|
||||||
/*
|
/*
|
||||||
* Try to lock the PTE, add ACCESSED and DIRTY if it was
|
* Try to lock the PTE, add ACCESSED and DIRTY if it was
|
||||||
|
@ -237,7 +237,7 @@ int __hash_page_64K(unsigned long ea, unsigned long access,
|
||||||
if (unlikely(old_pte & _PAGE_BUSY))
|
if (unlikely(old_pte & _PAGE_BUSY))
|
||||||
return 0;
|
return 0;
|
||||||
/* If PTE permissions don't match, take page fault */
|
/* If PTE permissions don't match, take page fault */
|
||||||
if (unlikely(access & ~old_pte))
|
if (unlikely(!check_pte_access(access, old_pte)))
|
||||||
return 1;
|
return 1;
|
||||||
/*
|
/*
|
||||||
* Check if PTE has the cache-inhibit bit set
|
* Check if PTE has the cache-inhibit bit set
|
||||||
|
|
|
@ -174,7 +174,7 @@ unsigned long htab_convert_pte_flags(unsigned long pteflags)
|
||||||
* User area is mapped with PP=0x2 for read/write
|
* User area is mapped with PP=0x2 for read/write
|
||||||
* or PP=0x3 for read-only (including writeable but clean pages).
|
* or PP=0x3 for read-only (including writeable but clean pages).
|
||||||
*/
|
*/
|
||||||
if (pteflags & _PAGE_USER) {
|
if (!(pteflags & _PAGE_PRIVILEGED)) {
|
||||||
if (pteflags & _PAGE_RWX)
|
if (pteflags & _PAGE_RWX)
|
||||||
rflags |= 0x2;
|
rflags |= 0x2;
|
||||||
if (!((pteflags & _PAGE_WRITE) && (pteflags & _PAGE_DIRTY)))
|
if (!((pteflags & _PAGE_WRITE) && (pteflags & _PAGE_DIRTY)))
|
||||||
|
@ -1090,7 +1090,7 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea,
|
||||||
/* Pre-check access permissions (will be re-checked atomically
|
/* Pre-check access permissions (will be re-checked atomically
|
||||||
* in __hash_page_XX but this pre-check is a fast path
|
* in __hash_page_XX but this pre-check is a fast path
|
||||||
*/
|
*/
|
||||||
if (access & ~pte_val(*ptep)) {
|
if (!check_pte_access(access, pte_val(*ptep))) {
|
||||||
DBG_LOW(" no access !\n");
|
DBG_LOW(" no access !\n");
|
||||||
rc = 1;
|
rc = 1;
|
||||||
goto bail;
|
goto bail;
|
||||||
|
@ -1228,12 +1228,16 @@ int __hash_page(unsigned long ea, unsigned long msr, unsigned long trap,
|
||||||
if (dsisr & DSISR_ISSTORE)
|
if (dsisr & DSISR_ISSTORE)
|
||||||
access |= _PAGE_WRITE;
|
access |= _PAGE_WRITE;
|
||||||
/*
|
/*
|
||||||
* We need to set the _PAGE_USER bit if MSR_PR is set or if we are
|
* We set _PAGE_PRIVILEGED only when
|
||||||
* accessing a userspace segment (even from the kernel). We assume
|
* kernel mode access kernel space.
|
||||||
* kernel addresses always have the high bit set.
|
*
|
||||||
|
* _PAGE_PRIVILEGED is NOT set
|
||||||
|
* 1) when kernel mode access user space
|
||||||
|
* 2) user space access kernel space.
|
||||||
*/
|
*/
|
||||||
|
access |= _PAGE_PRIVILEGED;
|
||||||
if ((msr & MSR_PR) || (REGION_ID(ea) == USER_REGION_ID))
|
if ((msr & MSR_PR) || (REGION_ID(ea) == USER_REGION_ID))
|
||||||
access |= _PAGE_USER;
|
access &= ~_PAGE_PRIVILEGED;
|
||||||
|
|
||||||
if (trap == 0x400)
|
if (trap == 0x400)
|
||||||
access |= _PAGE_EXEC;
|
access |= _PAGE_EXEC;
|
||||||
|
|
|
@ -40,7 +40,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
|
||||||
if (unlikely(old_pmd & _PAGE_BUSY))
|
if (unlikely(old_pmd & _PAGE_BUSY))
|
||||||
return 0;
|
return 0;
|
||||||
/* If PMD permissions don't match, take page fault */
|
/* If PMD permissions don't match, take page fault */
|
||||||
if (unlikely(access & ~old_pmd))
|
if (unlikely(!check_pte_access(access, old_pmd)))
|
||||||
return 1;
|
return 1;
|
||||||
/*
|
/*
|
||||||
* Try to lock the PTE, add ACCESSED and DIRTY if it was
|
* Try to lock the PTE, add ACCESSED and DIRTY if it was
|
||||||
|
|
|
@ -50,8 +50,9 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
|
||||||
if (unlikely(old_pte & _PAGE_BUSY))
|
if (unlikely(old_pte & _PAGE_BUSY))
|
||||||
return 0;
|
return 0;
|
||||||
/* If PTE permissions don't match, take page fault */
|
/* If PTE permissions don't match, take page fault */
|
||||||
if (unlikely(access & ~old_pte))
|
if (unlikely(!check_pte_access(access, old_pte)))
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
/* Try to lock the PTE, add ACCESSED and DIRTY if it was
|
/* Try to lock the PTE, add ACCESSED and DIRTY if it was
|
||||||
* a write access */
|
* a write access */
|
||||||
new_pte = old_pte | _PAGE_BUSY | _PAGE_ACCESSED;
|
new_pte = old_pte | _PAGE_BUSY | _PAGE_ACCESSED;
|
||||||
|
|
|
@ -1003,7 +1003,7 @@ int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
|
||||||
end = pte_end;
|
end = pte_end;
|
||||||
|
|
||||||
pte = READ_ONCE(*ptep);
|
pte = READ_ONCE(*ptep);
|
||||||
mask = _PAGE_PRESENT | _PAGE_USER | _PAGE_READ;
|
mask = _PAGE_PRESENT | _PAGE_READ;
|
||||||
if (write)
|
if (write)
|
||||||
mask |= _PAGE_WRITE;
|
mask |= _PAGE_WRITE;
|
||||||
|
|
||||||
|
|
|
@ -43,9 +43,20 @@ static inline int is_exec_fault(void)
|
||||||
*/
|
*/
|
||||||
static inline int pte_looks_normal(pte_t pte)
|
static inline int pte_looks_normal(pte_t pte)
|
||||||
{
|
{
|
||||||
|
|
||||||
|
#if defined(CONFIG_PPC_BOOK3S_64)
|
||||||
|
if ((pte_val(pte) &
|
||||||
|
(_PAGE_PRESENT | _PAGE_SPECIAL | _PAGE_NO_CACHE)) ==
|
||||||
|
_PAGE_PRESENT) {
|
||||||
|
if (pte_user(pte))
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
#else
|
||||||
return (pte_val(pte) &
|
return (pte_val(pte) &
|
||||||
(_PAGE_PRESENT | _PAGE_SPECIAL | _PAGE_NO_CACHE | _PAGE_USER)) ==
|
(_PAGE_PRESENT | _PAGE_SPECIAL | _PAGE_NO_CACHE | _PAGE_USER)) ==
|
||||||
(_PAGE_PRESENT | _PAGE_USER);
|
(_PAGE_PRESENT | _PAGE_USER);
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct page *maybe_pte_to_page(pte_t pte)
|
static struct page *maybe_pte_to_page(pte_t pte)
|
||||||
|
|
|
@ -280,8 +280,17 @@ void __iomem * ioremap_prot(phys_addr_t addr, unsigned long size,
|
||||||
if (flags & _PAGE_WRITE)
|
if (flags & _PAGE_WRITE)
|
||||||
flags |= _PAGE_DIRTY;
|
flags |= _PAGE_DIRTY;
|
||||||
|
|
||||||
/* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */
|
/* we don't want to let _PAGE_EXEC leak out */
|
||||||
flags &= ~(_PAGE_USER | _PAGE_EXEC);
|
flags &= ~_PAGE_EXEC;
|
||||||
|
/*
|
||||||
|
* Force kernel mapping.
|
||||||
|
*/
|
||||||
|
#if defined(CONFIG_PPC_BOOK3S_64)
|
||||||
|
flags |= _PAGE_PRIVILEGED;
|
||||||
|
#else
|
||||||
|
flags &= ~_PAGE_USER;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
#ifdef _PAGE_BAP_SR
|
#ifdef _PAGE_BAP_SR
|
||||||
/* _PAGE_USER contains _PAGE_BAP_SR on BookE using the new PTE format
|
/* _PAGE_USER contains _PAGE_BAP_SR on BookE using the new PTE format
|
||||||
|
@ -664,7 +673,7 @@ void pmdp_huge_split_prepare(struct vm_area_struct *vma,
|
||||||
* the translation is still valid, because we will withdraw
|
* the translation is still valid, because we will withdraw
|
||||||
* pgtable_t after this.
|
* pgtable_t after this.
|
||||||
*/
|
*/
|
||||||
pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_USER, 0);
|
pmd_hugepage_update(vma->vm_mm, address, pmdp, 0, _PAGE_PRIVILEGED);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -141,7 +141,7 @@ int spufs_handle_class1(struct spu_context *ctx)
|
||||||
/* we must not hold the lock when entering copro_handle_mm_fault */
|
/* we must not hold the lock when entering copro_handle_mm_fault */
|
||||||
spu_release(ctx);
|
spu_release(ctx);
|
||||||
|
|
||||||
access = (_PAGE_PRESENT | _PAGE_READ | _PAGE_USER);
|
access = (_PAGE_PRESENT | _PAGE_READ);
|
||||||
access |= (dsisr & MFC_DSISR_ACCESS_PUT) ? _PAGE_WRITE : 0UL;
|
access |= (dsisr & MFC_DSISR_ACCESS_PUT) ? _PAGE_WRITE : 0UL;
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
ret = hash_page(ea, access, 0x300, dsisr);
|
ret = hash_page(ea, access, 0x300, dsisr);
|
||||||
|
|
|
@ -152,8 +152,10 @@ static void cxl_handle_page_fault(struct cxl_context *ctx,
|
||||||
access = _PAGE_PRESENT | _PAGE_READ;
|
access = _PAGE_PRESENT | _PAGE_READ;
|
||||||
if (dsisr & CXL_PSL_DSISR_An_S)
|
if (dsisr & CXL_PSL_DSISR_An_S)
|
||||||
access |= _PAGE_WRITE;
|
access |= _PAGE_WRITE;
|
||||||
|
|
||||||
|
access |= _PAGE_PRIVILEGED;
|
||||||
if ((!ctx->kernel) || (REGION_ID(dar) == USER_REGION_ID))
|
if ((!ctx->kernel) || (REGION_ID(dar) == USER_REGION_ID))
|
||||||
access |= _PAGE_USER;
|
access &= ~_PAGE_PRIVILEGED;
|
||||||
|
|
||||||
if (dsisr & DSISR_NOHPTE)
|
if (dsisr & DSISR_NOHPTE)
|
||||||
inv_flags |= HPTE_NOHPTE_UPDATE;
|
inv_flags |= HPTE_NOHPTE_UPDATE;
|
||||||
|
|
Загрузка…
Ссылка в новой задаче