powerpc/mm: Use _PAGE_READ to indicate Read access
This splits the _PAGE_RW bit into _PAGE_READ and _PAGE_WRITE. It also
removes the dependency on _PAGE_USER for implying read only. Few things
to note here is that, we have read implied with write and execute
permission. Hence we should always find _PAGE_READ set on hash pte
fault.
We still can't switch PROT_NONE to !(_PAGE_RWX). Auto numa depends on
marking a prot none pte _PAGE_WRITE. (For more details look at
b191f9b106
"mm: numa: preserve PTE write permissions across a NUMA
hinting fault")
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Jeremy Kerr <jk@ozlabs.org>
Cc: Frederic Barrat <fbarrat@linux.vnet.ibm.com>
Acked-by: Ian Munsie <imunsie@au1.ibm.com>
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:
Родитель
ee3caed37d
Коммит
c7d54842de
|
@ -291,10 +291,10 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr,
|
|||
pmd_t *pmdp)
|
||||
{
|
||||
|
||||
if ((pmd_val(*pmdp) & _PAGE_RW) == 0)
|
||||
if ((pmd_val(*pmdp) & _PAGE_WRITE) == 0)
|
||||
return;
|
||||
|
||||
pmd_hugepage_update(mm, addr, pmdp, _PAGE_RW, 0);
|
||||
pmd_hugepage_update(mm, addr, pmdp, _PAGE_WRITE, 0);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
|
||||
|
|
|
@ -16,8 +16,10 @@
|
|||
#define _PAGE_BIT_SWAP_TYPE 0
|
||||
|
||||
#define _PAGE_EXEC 0x00001 /* execute permission */
|
||||
#define _PAGE_RW 0x00002 /* read & write access allowed */
|
||||
#define _PAGE_WRITE 0x00002 /* write access allowed */
|
||||
#define _PAGE_READ 0x00004 /* read access allowed */
|
||||
#define _PAGE_RW (_PAGE_READ | _PAGE_WRITE)
|
||||
#define _PAGE_RWX (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC)
|
||||
#define _PAGE_USER 0x00008 /* page may be accessed by userspace */
|
||||
#define _PAGE_GUARDED 0x00010 /* G: guarded (side-effect) page */
|
||||
/* M (memory coherence) is always set in the HPTE, so we don't need it here */
|
||||
|
@ -145,8 +147,8 @@
|
|||
*/
|
||||
#define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \
|
||||
_PAGE_WRITETHRU | _PAGE_4K_PFN | \
|
||||
_PAGE_USER | _PAGE_ACCESSED | \
|
||||
_PAGE_RW | _PAGE_DIRTY | _PAGE_EXEC | \
|
||||
_PAGE_USER | _PAGE_ACCESSED | _PAGE_READ |\
|
||||
_PAGE_WRITE | _PAGE_DIRTY | _PAGE_EXEC | \
|
||||
_PAGE_SOFT_DIRTY)
|
||||
/*
|
||||
* We define 2 sets of base prot bits, one for basic pages (ie,
|
||||
|
@ -171,10 +173,12 @@
|
|||
#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
|
||||
#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | \
|
||||
_PAGE_EXEC)
|
||||
#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER )
|
||||
#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
|
||||
#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER )
|
||||
#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
|
||||
#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_READ)
|
||||
#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_READ| \
|
||||
_PAGE_EXEC)
|
||||
#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_READ)
|
||||
#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_READ| \
|
||||
_PAGE_EXEC)
|
||||
|
||||
#define __P000 PAGE_NONE
|
||||
#define __P001 PAGE_READONLY
|
||||
|
@ -296,19 +300,19 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
|
|||
pte_t *ptep)
|
||||
{
|
||||
|
||||
if ((pte_val(*ptep) & _PAGE_RW) == 0)
|
||||
if ((pte_val(*ptep) & _PAGE_WRITE) == 0)
|
||||
return;
|
||||
|
||||
pte_update(mm, addr, ptep, _PAGE_RW, 0, 0);
|
||||
pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 0);
|
||||
}
|
||||
|
||||
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
if ((pte_val(*ptep) & _PAGE_RW) == 0)
|
||||
if ((pte_val(*ptep) & _PAGE_WRITE) == 0)
|
||||
return;
|
||||
|
||||
pte_update(mm, addr, ptep, _PAGE_RW, 0, 1);
|
||||
pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -349,7 +353,7 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
|
|||
{
|
||||
__be64 old, tmp, val, mask;
|
||||
|
||||
mask = cpu_to_be64(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW |
|
||||
mask = cpu_to_be64(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_READ | _PAGE_WRITE |
|
||||
_PAGE_EXEC | _PAGE_SOFT_DIRTY);
|
||||
|
||||
val = pte_raw(entry) & mask;
|
||||
|
@ -384,7 +388,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
|
|||
|
||||
|
||||
/* Generic accessors to PTE bits */
|
||||
static inline int pte_write(pte_t pte) { return !!(pte_val(pte) & _PAGE_RW);}
|
||||
static inline int pte_write(pte_t pte) { return !!(pte_val(pte) & _PAGE_WRITE);}
|
||||
static inline int pte_dirty(pte_t pte) { return !!(pte_val(pte) & _PAGE_DIRTY); }
|
||||
static inline int pte_young(pte_t pte) { return !!(pte_val(pte) & _PAGE_ACCESSED); }
|
||||
static inline int pte_special(pte_t pte) { return !!(pte_val(pte) & _PAGE_SPECIAL); }
|
||||
|
@ -445,7 +449,7 @@ static inline unsigned long pte_pfn(pte_t pte)
|
|||
/* Generic modifiers for PTE bits */
|
||||
static inline pte_t pte_wrprotect(pte_t pte)
|
||||
{
|
||||
return __pte(pte_val(pte) & ~_PAGE_RW);
|
||||
return __pte(pte_val(pte) & ~_PAGE_WRITE);
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkclean(pte_t pte)
|
||||
|
@ -460,6 +464,9 @@ static inline pte_t pte_mkold(pte_t pte)
|
|||
|
||||
static inline pte_t pte_mkwrite(pte_t pte)
|
||||
{
|
||||
/*
|
||||
* write implies read, hence set both
|
||||
*/
|
||||
return __pte(pte_val(pte) | _PAGE_RW);
|
||||
}
|
||||
|
||||
|
|
|
@ -198,3 +198,8 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void);
|
|||
/* Advertise support for _PAGE_SPECIAL */
|
||||
#define __HAVE_ARCH_PTE_SPECIAL
|
||||
|
||||
#ifndef _PAGE_READ
|
||||
/* if not defined, we should not find _PAGE_WRITE too */
|
||||
#define _PAGE_READ 0
|
||||
#define _PAGE_WRITE _PAGE_RW
|
||||
#endif
|
||||
|
|
|
@ -45,7 +45,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
|
|||
* also add _PAGE_COMBO
|
||||
*/
|
||||
new_pte = old_pte | _PAGE_BUSY | _PAGE_ACCESSED;
|
||||
if (access & _PAGE_RW)
|
||||
if (access & _PAGE_WRITE)
|
||||
new_pte |= _PAGE_DIRTY;
|
||||
} while (!pte_xchg(ptep, __pte(old_pte), __pte(new_pte)));
|
||||
|
||||
|
|
|
@ -77,7 +77,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
|
|||
* also add _PAGE_COMBO
|
||||
*/
|
||||
new_pte = old_pte | _PAGE_BUSY | _PAGE_ACCESSED | _PAGE_COMBO;
|
||||
if (access & _PAGE_RW)
|
||||
if (access & _PAGE_WRITE)
|
||||
new_pte |= _PAGE_DIRTY;
|
||||
} while (!pte_xchg(ptep, __pte(old_pte), __pte(new_pte)));
|
||||
|
||||
|
@ -251,7 +251,7 @@ int __hash_page_64K(unsigned long ea, unsigned long access,
|
|||
* a write access.
|
||||
*/
|
||||
new_pte = old_pte | _PAGE_BUSY | _PAGE_ACCESSED;
|
||||
if (access & _PAGE_RW)
|
||||
if (access & _PAGE_WRITE)
|
||||
new_pte |= _PAGE_DIRTY;
|
||||
} while (!pte_xchg(ptep, __pte(old_pte), __pte(new_pte)));
|
||||
|
||||
|
|
|
@ -175,8 +175,9 @@ unsigned long htab_convert_pte_flags(unsigned long pteflags)
|
|||
* or PP=0x3 for read-only (including writeable but clean pages).
|
||||
*/
|
||||
if (pteflags & _PAGE_USER) {
|
||||
rflags |= 0x2;
|
||||
if (!((pteflags & _PAGE_RW) && (pteflags & _PAGE_DIRTY)))
|
||||
if (pteflags & _PAGE_RWX)
|
||||
rflags |= 0x2;
|
||||
if (!((pteflags & _PAGE_WRITE) && (pteflags & _PAGE_DIRTY)))
|
||||
rflags |= 0x1;
|
||||
}
|
||||
/*
|
||||
|
@ -1209,7 +1210,7 @@ EXPORT_SYMBOL_GPL(hash_page);
|
|||
int __hash_page(unsigned long ea, unsigned long msr, unsigned long trap,
|
||||
unsigned long dsisr)
|
||||
{
|
||||
unsigned long access = _PAGE_PRESENT;
|
||||
unsigned long access = _PAGE_PRESENT | _PAGE_READ;
|
||||
unsigned long flags = 0;
|
||||
struct mm_struct *mm = current->mm;
|
||||
|
||||
|
@ -1220,7 +1221,7 @@ int __hash_page(unsigned long ea, unsigned long msr, unsigned long trap,
|
|||
flags |= HPTE_NOHPTE_UPDATE;
|
||||
|
||||
if (dsisr & DSISR_ISSTORE)
|
||||
access |= _PAGE_RW;
|
||||
access |= _PAGE_WRITE;
|
||||
/*
|
||||
* We need to set the _PAGE_USER bit if MSR_PR is set or if we are
|
||||
* accessing a userspace segment (even from the kernel). We assume
|
||||
|
|
|
@ -47,7 +47,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
|
|||
* a write access
|
||||
*/
|
||||
new_pmd = old_pmd | _PAGE_BUSY | _PAGE_ACCESSED;
|
||||
if (access & _PAGE_RW)
|
||||
if (access & _PAGE_WRITE)
|
||||
new_pmd |= _PAGE_DIRTY;
|
||||
} while (!pmd_xchg(pmdp, __pmd(old_pmd), __pmd(new_pmd)));
|
||||
|
||||
|
|
|
@ -55,7 +55,7 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
|
|||
/* Try to lock the PTE, add ACCESSED and DIRTY if it was
|
||||
* a write access */
|
||||
new_pte = old_pte | _PAGE_BUSY | _PAGE_ACCESSED;
|
||||
if (access & _PAGE_RW)
|
||||
if (access & _PAGE_WRITE)
|
||||
new_pte |= _PAGE_DIRTY;
|
||||
} while(!pte_xchg(ptep, __pte(old_pte), __pte(new_pte)));
|
||||
|
||||
|
|
|
@ -1003,9 +1003,9 @@ int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
|
|||
end = pte_end;
|
||||
|
||||
pte = READ_ONCE(*ptep);
|
||||
mask = _PAGE_PRESENT | _PAGE_USER;
|
||||
mask = _PAGE_PRESENT | _PAGE_USER | _PAGE_READ;
|
||||
if (write)
|
||||
mask |= _PAGE_RW;
|
||||
mask |= _PAGE_WRITE;
|
||||
|
||||
if ((pte_val(pte) & mask) != mask)
|
||||
return 0;
|
||||
|
|
|
@ -177,8 +177,8 @@ void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
|
|||
* _PAGE_PRESENT, but we can be sure that it is not in hpte.
|
||||
* Hence we can use set_pte_at for them.
|
||||
*/
|
||||
VM_WARN_ON((pte_val(*ptep) & (_PAGE_PRESENT | _PAGE_USER)) ==
|
||||
(_PAGE_PRESENT | _PAGE_USER));
|
||||
VM_WARN_ON(pte_present(*ptep) && !pte_protnone(*ptep));
|
||||
|
||||
/*
|
||||
* Add the pte bit when tryint set a pte
|
||||
*/
|
||||
|
|
|
@ -277,7 +277,7 @@ void __iomem * ioremap_prot(phys_addr_t addr, unsigned long size,
|
|||
void *caller = __builtin_return_address(0);
|
||||
|
||||
/* writeable implies dirty for kernel addresses */
|
||||
if (flags & _PAGE_RW)
|
||||
if (flags & _PAGE_WRITE)
|
||||
flags |= _PAGE_DIRTY;
|
||||
|
||||
/* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */
|
||||
|
@ -676,8 +676,7 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
|
|||
pmd_t *pmdp, pmd_t pmd)
|
||||
{
|
||||
#ifdef CONFIG_DEBUG_VM
|
||||
WARN_ON((pmd_val(*pmdp) & (_PAGE_PRESENT | _PAGE_USER)) ==
|
||||
(_PAGE_PRESENT | _PAGE_USER));
|
||||
WARN_ON(pte_present(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp)));
|
||||
assert_spin_locked(&mm->page_table_lock);
|
||||
WARN_ON(!pmd_trans_huge(pmd));
|
||||
#endif
|
||||
|
|
|
@ -197,7 +197,7 @@ static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
|
|||
(REGION_ID(ea) != USER_REGION_ID)) {
|
||||
|
||||
spin_unlock(&spu->register_lock);
|
||||
ret = hash_page(ea, _PAGE_PRESENT, 0x300, dsisr);
|
||||
ret = hash_page(ea, _PAGE_PRESENT | _PAGE_READ, 0x300, dsisr);
|
||||
spin_lock(&spu->register_lock);
|
||||
|
||||
if (!ret) {
|
||||
|
|
|
@ -141,8 +141,8 @@ int spufs_handle_class1(struct spu_context *ctx)
|
|||
/* we must not hold the lock when entering copro_handle_mm_fault */
|
||||
spu_release(ctx);
|
||||
|
||||
access = (_PAGE_PRESENT | _PAGE_USER);
|
||||
access |= (dsisr & MFC_DSISR_ACCESS_PUT) ? _PAGE_RW : 0UL;
|
||||
access = (_PAGE_PRESENT | _PAGE_READ | _PAGE_USER);
|
||||
access |= (dsisr & MFC_DSISR_ACCESS_PUT) ? _PAGE_WRITE : 0UL;
|
||||
local_irq_save(flags);
|
||||
ret = hash_page(ea, access, 0x300, dsisr);
|
||||
local_irq_restore(flags);
|
||||
|
|
|
@ -149,9 +149,9 @@ static void cxl_handle_page_fault(struct cxl_context *ctx,
|
|||
* update_mmu_cache() will not have loaded the hash since current->trap
|
||||
* is not a 0x400 or 0x300, so just call hash_page_mm() here.
|
||||
*/
|
||||
access = _PAGE_PRESENT;
|
||||
access = _PAGE_PRESENT | _PAGE_READ;
|
||||
if (dsisr & CXL_PSL_DSISR_An_S)
|
||||
access |= _PAGE_RW;
|
||||
access |= _PAGE_WRITE;
|
||||
if ((!ctx->kernel) || (REGION_ID(dar) == USER_REGION_ID))
|
||||
access |= _PAGE_USER;
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче