powerpc ioremap_prot
This adds ioremap_prot and pte_pgprot() so that one can extract protection bits from a PTE and use them to ioremap_prot() (in order to support ptrace of VM_IO | VM_PFNMAP as per Rik's patch). This moves a couple of flag checks around in the ioremap implementations of arch/powerpc. There's a side effect of allowing non-cacheable and non-guarded mappings on ppc32 which before would always have _PAGE_GUARDED set whenever _PAGE_NO_CACHE is. (standard ioremap will still set _PAGE_GUARDED, but ioremap_prot will be capable of setting such a non guarded mapping). Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Rik van Riel <riel@redhat.com> Cc: Dave Airlie <airlied@linux.ie> Cc: Hugh Dickins <hugh@veritas.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Arnd Bergmann <arnd@arndb.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
7ae8ed5053
Коммит
a1f242ff46
|
@ -111,6 +111,7 @@ config PPC
|
||||||
select HAVE_DYNAMIC_FTRACE
|
select HAVE_DYNAMIC_FTRACE
|
||||||
select HAVE_FTRACE
|
select HAVE_FTRACE
|
||||||
select HAVE_IDE
|
select HAVE_IDE
|
||||||
|
select HAVE_IOREMAP_PROT
|
||||||
select HAVE_KPROBES
|
select HAVE_KPROBES
|
||||||
select HAVE_ARCH_KGDB
|
select HAVE_ARCH_KGDB
|
||||||
select HAVE_KRETPROBES
|
select HAVE_KRETPROBES
|
||||||
|
|
|
@ -145,13 +145,20 @@ void pte_free(struct mm_struct *mm, pgtable_t ptepage)
|
||||||
void __iomem *
|
void __iomem *
|
||||||
ioremap(phys_addr_t addr, unsigned long size)
|
ioremap(phys_addr_t addr, unsigned long size)
|
||||||
{
|
{
|
||||||
return __ioremap(addr, size, _PAGE_NO_CACHE);
|
return __ioremap(addr, size, _PAGE_NO_CACHE | _PAGE_GUARDED);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(ioremap);
|
EXPORT_SYMBOL(ioremap);
|
||||||
|
|
||||||
void __iomem *
|
void __iomem *
|
||||||
ioremap_flags(phys_addr_t addr, unsigned long size, unsigned long flags)
|
ioremap_flags(phys_addr_t addr, unsigned long size, unsigned long flags)
|
||||||
{
|
{
|
||||||
|
/* writeable implies dirty for kernel addresses */
|
||||||
|
if (flags & _PAGE_RW)
|
||||||
|
flags |= _PAGE_DIRTY | _PAGE_HWWRITE;
|
||||||
|
|
||||||
|
/* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */
|
||||||
|
flags &= ~(_PAGE_USER | _PAGE_EXEC | _PAGE_HWEXEC);
|
||||||
|
|
||||||
return __ioremap(addr, size, flags);
|
return __ioremap(addr, size, flags);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(ioremap_flags);
|
EXPORT_SYMBOL(ioremap_flags);
|
||||||
|
@ -163,6 +170,14 @@ __ioremap(phys_addr_t addr, unsigned long size, unsigned long flags)
|
||||||
phys_addr_t p;
|
phys_addr_t p;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
/* Make sure we have the base flags */
|
||||||
|
if ((flags & _PAGE_PRESENT) == 0)
|
||||||
|
flags |= _PAGE_KERNEL;
|
||||||
|
|
||||||
|
/* Non-cacheable page cannot be coherent */
|
||||||
|
if (flags & _PAGE_NO_CACHE)
|
||||||
|
flags &= ~_PAGE_COHERENT;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Choose an address to map it to.
|
* Choose an address to map it to.
|
||||||
* Once the vmalloc system is running, we use it.
|
* Once the vmalloc system is running, we use it.
|
||||||
|
@ -219,11 +234,6 @@ __ioremap(phys_addr_t addr, unsigned long size, unsigned long flags)
|
||||||
v = (ioremap_bot -= size);
|
v = (ioremap_bot -= size);
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((flags & _PAGE_PRESENT) == 0)
|
|
||||||
flags |= _PAGE_KERNEL;
|
|
||||||
if (flags & _PAGE_NO_CACHE)
|
|
||||||
flags |= _PAGE_GUARDED;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Should check if it is a candidate for a BAT mapping
|
* Should check if it is a candidate for a BAT mapping
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -107,9 +107,18 @@ void __iomem * __ioremap_at(phys_addr_t pa, void *ea, unsigned long size,
|
||||||
{
|
{
|
||||||
unsigned long i;
|
unsigned long i;
|
||||||
|
|
||||||
|
/* Make sure we have the base flags */
|
||||||
if ((flags & _PAGE_PRESENT) == 0)
|
if ((flags & _PAGE_PRESENT) == 0)
|
||||||
flags |= pgprot_val(PAGE_KERNEL);
|
flags |= pgprot_val(PAGE_KERNEL);
|
||||||
|
|
||||||
|
/* Non-cacheable page cannot be coherent */
|
||||||
|
if (flags & _PAGE_NO_CACHE)
|
||||||
|
flags &= ~_PAGE_COHERENT;
|
||||||
|
|
||||||
|
/* We don't support the 4K PFN hack with ioremap */
|
||||||
|
if (flags & _PAGE_4K_PFN)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
WARN_ON(pa & ~PAGE_MASK);
|
WARN_ON(pa & ~PAGE_MASK);
|
||||||
WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
|
WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
|
||||||
WARN_ON(size & ~PAGE_MASK);
|
WARN_ON(size & ~PAGE_MASK);
|
||||||
|
@ -190,6 +199,13 @@ void __iomem * ioremap(phys_addr_t addr, unsigned long size)
|
||||||
void __iomem * ioremap_flags(phys_addr_t addr, unsigned long size,
|
void __iomem * ioremap_flags(phys_addr_t addr, unsigned long size,
|
||||||
unsigned long flags)
|
unsigned long flags)
|
||||||
{
|
{
|
||||||
|
/* writeable implies dirty for kernel addresses */
|
||||||
|
if (flags & _PAGE_RW)
|
||||||
|
flags |= _PAGE_DIRTY;
|
||||||
|
|
||||||
|
/* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */
|
||||||
|
flags &= ~(_PAGE_USER | _PAGE_EXEC);
|
||||||
|
|
||||||
if (ppc_md.ioremap)
|
if (ppc_md.ioremap)
|
||||||
return ppc_md.ioremap(addr, size, flags);
|
return ppc_md.ioremap(addr, size, flags);
|
||||||
return __ioremap(addr, size, flags);
|
return __ioremap(addr, size, flags);
|
||||||
|
|
|
@ -617,7 +617,8 @@ static inline void iosync(void)
|
||||||
* and can be hooked by the platform via ppc_md
|
* and can be hooked by the platform via ppc_md
|
||||||
*
|
*
|
||||||
* * ioremap_flags allows to specify the page flags as an argument and can
|
* * ioremap_flags allows to specify the page flags as an argument and can
|
||||||
* also be hooked by the platform via ppc_md
|
* also be hooked by the platform via ppc_md. ioremap_prot is the exact
|
||||||
|
* same thing as ioremap_flags.
|
||||||
*
|
*
|
||||||
* * ioremap_nocache is identical to ioremap
|
* * ioremap_nocache is identical to ioremap
|
||||||
*
|
*
|
||||||
|
@ -639,6 +640,8 @@ extern void __iomem *ioremap(phys_addr_t address, unsigned long size);
|
||||||
extern void __iomem *ioremap_flags(phys_addr_t address, unsigned long size,
|
extern void __iomem *ioremap_flags(phys_addr_t address, unsigned long size,
|
||||||
unsigned long flags);
|
unsigned long flags);
|
||||||
#define ioremap_nocache(addr, size) ioremap((addr), (size))
|
#define ioremap_nocache(addr, size) ioremap((addr), (size))
|
||||||
|
#define ioremap_prot(addr, size, prot) ioremap_flags((addr), (size), (prot))
|
||||||
|
|
||||||
extern void iounmap(volatile void __iomem *addr);
|
extern void iounmap(volatile void __iomem *addr);
|
||||||
|
|
||||||
extern void __iomem *__ioremap(phys_addr_t, unsigned long size,
|
extern void __iomem *__ioremap(phys_addr_t, unsigned long size,
|
||||||
|
|
|
@ -51,6 +51,9 @@
|
||||||
#define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_HASHPTE | \
|
#define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_HASHPTE | \
|
||||||
_PAGE_SECONDARY | _PAGE_GROUP_IX)
|
_PAGE_SECONDARY | _PAGE_GROUP_IX)
|
||||||
|
|
||||||
|
/* There is no 4K PFN hack on 4K pages */
|
||||||
|
#define _PAGE_4K_PFN 0
|
||||||
|
|
||||||
/* PAGE_MASK gives the right answer below, but only by accident */
|
/* PAGE_MASK gives the right answer below, but only by accident */
|
||||||
/* It should be preserving the high 48 bits and then specifically */
|
/* It should be preserving the high 48 bits and then specifically */
|
||||||
/* preserving _PAGE_SECONDARY | _PAGE_GROUP_IX */
|
/* preserving _PAGE_SECONDARY | _PAGE_GROUP_IX */
|
||||||
|
|
|
@ -395,6 +395,12 @@ extern int icache_44x_need_flush;
|
||||||
#ifndef _PAGE_EXEC
|
#ifndef _PAGE_EXEC
|
||||||
#define _PAGE_EXEC 0
|
#define _PAGE_EXEC 0
|
||||||
#endif
|
#endif
|
||||||
|
#ifndef _PAGE_ENDIAN
|
||||||
|
#define _PAGE_ENDIAN 0
|
||||||
|
#endif
|
||||||
|
#ifndef _PAGE_COHERENT
|
||||||
|
#define _PAGE_COHERENT 0
|
||||||
|
#endif
|
||||||
#ifndef _PMD_PRESENT_MASK
|
#ifndef _PMD_PRESENT_MASK
|
||||||
#define _PMD_PRESENT_MASK _PMD_PRESENT
|
#define _PMD_PRESENT_MASK _PMD_PRESENT
|
||||||
#endif
|
#endif
|
||||||
|
@ -405,6 +411,12 @@ extern int icache_44x_need_flush;
|
||||||
|
|
||||||
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
|
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
|
||||||
|
|
||||||
|
|
||||||
|
#define PAGE_PROT_BITS __pgprot(_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \
|
||||||
|
_PAGE_WRITETHRU | _PAGE_ENDIAN | \
|
||||||
|
_PAGE_USER | _PAGE_ACCESSED | \
|
||||||
|
_PAGE_RW | _PAGE_HWWRITE | _PAGE_DIRTY | \
|
||||||
|
_PAGE_EXEC | _PAGE_HWEXEC)
|
||||||
/*
|
/*
|
||||||
* Note: the _PAGE_COHERENT bit automatically gets set in the hardware
|
* Note: the _PAGE_COHERENT bit automatically gets set in the hardware
|
||||||
* PTE if CONFIG_SMP is defined (hash_page does this); there is no need
|
* PTE if CONFIG_SMP is defined (hash_page does this); there is no need
|
||||||
|
@ -538,6 +550,10 @@ static inline pte_t pte_mkyoung(pte_t pte) {
|
||||||
pte_val(pte) |= _PAGE_ACCESSED; return pte; }
|
pte_val(pte) |= _PAGE_ACCESSED; return pte; }
|
||||||
static inline pte_t pte_mkspecial(pte_t pte) {
|
static inline pte_t pte_mkspecial(pte_t pte) {
|
||||||
return pte; }
|
return pte; }
|
||||||
|
static inline unsigned long pte_pgprot(pte_t pte)
|
||||||
|
{
|
||||||
|
return __pgprot(pte_val(pte)) & PAGE_PROT_BITS;
|
||||||
|
}
|
||||||
|
|
||||||
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
|
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
|
||||||
{
|
{
|
||||||
|
|
|
@ -117,6 +117,10 @@
|
||||||
#define PAGE_AGP __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_NO_CACHE)
|
#define PAGE_AGP __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_NO_CACHE)
|
||||||
#define HAVE_PAGE_AGP
|
#define HAVE_PAGE_AGP
|
||||||
|
|
||||||
|
#define PAGE_PROT_BITS __pgprot(_PAGE_GUARDED | _PAGE_COHERENT | \
|
||||||
|
_PAGE_NO_CACHE | _PAGE_WRITETHRU | \
|
||||||
|
_PAGE_4K_PFN | _PAGE_RW | _PAGE_USER | \
|
||||||
|
_PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_EXEC)
|
||||||
/* PTEIDX nibble */
|
/* PTEIDX nibble */
|
||||||
#define _PTEIDX_SECONDARY 0x8
|
#define _PTEIDX_SECONDARY 0x8
|
||||||
#define _PTEIDX_GROUP_IX 0x7
|
#define _PTEIDX_GROUP_IX 0x7
|
||||||
|
@ -262,6 +266,10 @@ static inline pte_t pte_mkhuge(pte_t pte) {
|
||||||
return pte; }
|
return pte; }
|
||||||
static inline pte_t pte_mkspecial(pte_t pte) {
|
static inline pte_t pte_mkspecial(pte_t pte) {
|
||||||
return pte; }
|
return pte; }
|
||||||
|
static inline unsigned long pte_pgprot(pte_t pte)
|
||||||
|
{
|
||||||
|
return __pgprot(pte_val(pte)) & PAGE_PROT_BITS;
|
||||||
|
}
|
||||||
|
|
||||||
/* Atomic PTE updates */
|
/* Atomic PTE updates */
|
||||||
static inline unsigned long pte_update(struct mm_struct *mm,
|
static inline unsigned long pte_update(struct mm_struct *mm,
|
||||||
|
|
Загрузка…
Ссылка в новой задаче