mm/thp: define default pmd_pgtable()
Currently most platforms define pmd_pgtable() as pmd_page() duplicating the same code all over. Instead just define a default value i.e pmd_page() for pmd_pgtable() and let platforms override when required via <asm/pgtable.h>. All the existing platform that override pmd_pgtable() have been moved into their respective <asm/pgtable.h> header in order to precede before the new generic definition. This makes it much cleaner with reduced code. Link: https://lkml.kernel.org/r/1623646133-20306-1-git-send-email-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com> Acked-by: Geert Uytterhoeven <geert@linux-m68k.org> Acked-by: Mike Rapoport <rppt@linux.ibm.com> Cc: Nick Hu <nickhu@andestech.com> Cc: Richard Henderson <rth@twiddle.net> Cc: Vineet Gupta <vgupta@synopsys.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Will Deacon <will@kernel.org> Cc: Guo Ren <guoren@kernel.org> Cc: Brian Cain <bcain@codeaurora.org> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Michal Simek <monstr@monstr.eu> Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de> Cc: Ley Foon Tan <ley.foon.tan@intel.com> Cc: Jonas Bonn <jonas@southpole.se> Cc: Stefan Kristiansson <stefan.kristiansson@saunalahti.fi> Cc: Stafford Horne <shorne@gmail.com> Cc: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: Paul Walmsley <paul.walmsley@sifive.com> Cc: Palmer Dabbelt <palmer@dabbelt.com> Cc: Heiko Carstens <hca@linux.ibm.com> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Cc: "David S. Miller" <davem@davemloft.net> Cc: Jeff Dike <jdike@addtoit.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Chris Zankel <chris@zankel.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
351de44fde
Коммит
1c2f7d14d8
|
@ -18,7 +18,6 @@ pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte)
|
|||
{
|
||||
pmd_set(pmd, (pte_t *)(page_to_pa(pte) + PAGE_OFFSET));
|
||||
}
|
||||
#define pmd_pgtable(pmd) pmd_page(pmd)
|
||||
|
||||
static inline void
|
||||
pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
|
||||
|
|
|
@ -129,6 +129,4 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t ptep)
|
|||
|
||||
#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte)
|
||||
|
||||
#define pmd_pgtable(pmd) ((pgtable_t) pmd_page_vaddr(pmd))
|
||||
|
||||
#endif /* _ASM_ARC_PGALLOC_H */
|
||||
|
|
|
@ -350,6 +350,8 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
|
|||
|
||||
#define kern_addr_valid(addr) (1)
|
||||
|
||||
#define pmd_pgtable(pmd) ((pgtable_t) pmd_page_vaddr(pmd))
|
||||
|
||||
/*
|
||||
* remap a physical page `pfn' of size `size' with page protection `prot'
|
||||
* into virtual address `from'
|
||||
|
|
|
@ -143,7 +143,6 @@ pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
|
|||
|
||||
__pmd_populate(pmdp, page_to_phys(ptep), prot);
|
||||
}
|
||||
#define pmd_pgtable(pmd) pmd_page(pmd)
|
||||
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
|
|
|
@ -86,6 +86,5 @@ pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
|
|||
VM_BUG_ON(mm == &init_mm);
|
||||
__pmd_populate(pmdp, page_to_phys(ptep), PMD_TYPE_TABLE | PMD_TABLE_PXN);
|
||||
}
|
||||
#define pmd_pgtable(pmd) pmd_page(pmd)
|
||||
|
||||
#endif
|
||||
|
|
|
@ -22,8 +22,6 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
|
|||
set_pmd(pmd, __pmd(__pa(page_address(pte))));
|
||||
}
|
||||
|
||||
#define pmd_pgtable(pmd) pmd_page(pmd)
|
||||
|
||||
extern void pgd_init(unsigned long *p);
|
||||
|
||||
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
|
||||
|
|
|
@ -239,7 +239,6 @@ static inline int pmd_bad(pmd_t pmd)
|
|||
* pmd_page - converts a PMD entry to a page pointer
|
||||
*/
|
||||
#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
|
||||
#define pmd_pgtable(pmd) pmd_page(pmd)
|
||||
|
||||
/**
|
||||
* pte_none - check if pte is mapped
|
||||
|
|
|
@ -52,7 +52,6 @@ pmd_populate(struct mm_struct *mm, pmd_t * pmd_entry, pgtable_t pte)
|
|||
{
|
||||
pmd_val(*pmd_entry) = page_to_phys(pte);
|
||||
}
|
||||
#define pmd_pgtable(pmd) pmd_page(pmd)
|
||||
|
||||
static inline void
|
||||
pmd_populate_kernel(struct mm_struct *mm, pmd_t * pmd_entry, pte_t * pte)
|
||||
|
|
|
@ -32,8 +32,6 @@ extern inline pmd_t *pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
|
|||
|
||||
#define pmd_populate_kernel pmd_populate
|
||||
|
||||
#define pmd_pgtable(pmd) pfn_to_virt(pmd_val(pmd) >> PAGE_SHIFT)
|
||||
|
||||
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pgtable,
|
||||
unsigned long address)
|
||||
{
|
||||
|
|
|
@ -150,6 +150,8 @@
|
|||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#define pmd_pgtable(pmd) pfn_to_virt(pmd_val(pmd) >> PAGE_SHIFT)
|
||||
|
||||
/*
|
||||
* Conversion functions: convert a page and protection to a page entry,
|
||||
* and a page entry and page directory to the page they refer to.
|
||||
|
|
|
@ -88,7 +88,6 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t page
|
|||
{
|
||||
pmd_set(pmd, page);
|
||||
}
|
||||
#define pmd_pgtable(pmd) ((pgtable_t)pmd_page_vaddr(pmd))
|
||||
|
||||
static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
|
||||
{
|
||||
|
|
|
@ -105,6 +105,8 @@ extern unsigned long mm_cachebits;
|
|||
#define __S110 PAGE_SHARED_C
|
||||
#define __S111 PAGE_SHARED_C
|
||||
|
||||
#define pmd_pgtable(pmd) ((pgtable_t)pmd_page_vaddr(pmd))
|
||||
|
||||
/*
|
||||
* Conversion functions: convert a page and protection to a page entry,
|
||||
* and a page entry and page directory to the page they refer to.
|
||||
|
|
|
@ -32,7 +32,6 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t page
|
|||
{
|
||||
pmd_val(*pmd) = __pa((unsigned long)page_address(page));
|
||||
}
|
||||
#define pmd_pgtable(pmd) pmd_page(pmd)
|
||||
|
||||
/*
|
||||
* allocating and freeing a pmd is trivial: the 1-entry pmd is
|
||||
|
|
|
@ -28,8 +28,6 @@ static inline pgd_t *get_pgd(void)
|
|||
|
||||
#define pgd_alloc(mm) get_pgd()
|
||||
|
||||
#define pmd_pgtable(pmd) pmd_page(pmd)
|
||||
|
||||
extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm);
|
||||
|
||||
#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, (pte))
|
||||
|
|
|
@ -28,7 +28,6 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
|
|||
{
|
||||
set_pmd(pmd, __pmd((unsigned long)page_address(pte)));
|
||||
}
|
||||
#define pmd_pgtable(pmd) pmd_page(pmd)
|
||||
|
||||
/*
|
||||
* Initialize a new pmd table with invalid pointers.
|
||||
|
|
|
@ -12,11 +12,6 @@
|
|||
#define __HAVE_ARCH_PTE_ALLOC_ONE
|
||||
#include <asm-generic/pgalloc.h> /* for pte_{alloc,free}_one */
|
||||
|
||||
/*
|
||||
* Since we have only two-level page tables, these are trivial
|
||||
*/
|
||||
#define pmd_pgtable(pmd) pmd_page(pmd)
|
||||
|
||||
extern pgd_t *pgd_alloc(struct mm_struct *mm);
|
||||
extern void pgd_free(struct mm_struct *mm, pgd_t * pgd);
|
||||
|
||||
|
|
|
@ -25,7 +25,6 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
|
|||
{
|
||||
set_pmd(pmd, __pmd((unsigned long)page_address(pte)));
|
||||
}
|
||||
#define pmd_pgtable(pmd) pmd_page(pmd)
|
||||
|
||||
/*
|
||||
* Initialize a new pmd table with invalid pointers.
|
||||
|
|
|
@ -72,6 +72,4 @@ do { \
|
|||
tlb_remove_page((tlb), (pte)); \
|
||||
} while (0)
|
||||
|
||||
#define pmd_pgtable(pmd) pmd_page(pmd)
|
||||
|
||||
#endif
|
||||
|
|
|
@ -69,6 +69,5 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
|
|||
|
||||
#define pmd_populate(mm, pmd, pte_page) \
|
||||
pmd_populate_kernel(mm, pmd, page_address(pte_page))
|
||||
#define pmd_pgtable(pmd) pmd_page(pmd)
|
||||
|
||||
#endif
|
||||
|
|
|
@ -70,9 +70,4 @@ extern struct kmem_cache *pgtable_cache[];
|
|||
#include <asm/nohash/pgalloc.h>
|
||||
#endif
|
||||
|
||||
static inline pgtable_t pmd_pgtable(pmd_t pmd)
|
||||
{
|
||||
return (pgtable_t)pmd_page_vaddr(pmd);
|
||||
}
|
||||
|
||||
#endif /* _ASM_POWERPC_PGALLOC_H */
|
||||
|
|
|
@ -152,6 +152,12 @@ static inline bool p4d_is_leaf(p4d_t p4d)
|
|||
}
|
||||
#endif
|
||||
|
||||
#define pmd_pgtable pmd_pgtable
|
||||
static inline pgtable_t pmd_pgtable(pmd_t pmd)
|
||||
{
|
||||
return (pgtable_t)pmd_page_vaddr(pmd);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
#define is_ioremap_addr is_ioremap_addr
|
||||
static inline bool is_ioremap_addr(const void *x)
|
||||
|
|
|
@ -38,8 +38,6 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
|
|||
}
|
||||
#endif /* __PAGETABLE_PMD_FOLDED */
|
||||
|
||||
#define pmd_pgtable(pmd) pmd_page(pmd)
|
||||
|
||||
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
|
|
|
@ -134,9 +134,6 @@ static inline void pmd_populate(struct mm_struct *mm,
|
|||
|
||||
#define pmd_populate_kernel(mm, pmd, pte) pmd_populate(mm, pmd, pte)
|
||||
|
||||
#define pmd_pgtable(pmd) \
|
||||
((pgtable_t)__va(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE))
|
||||
|
||||
/*
|
||||
* page table entry allocation/free routines.
|
||||
*/
|
||||
|
|
|
@ -1709,4 +1709,7 @@ extern void s390_reset_cmma(struct mm_struct *mm);
|
|||
#define HAVE_ARCH_UNMAPPED_AREA
|
||||
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
|
||||
|
||||
#define pmd_pgtable(pmd) \
|
||||
((pgtable_t)__va(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE))
|
||||
|
||||
#endif /* _S390_PAGE_H */
|
||||
|
|
|
@ -30,7 +30,6 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
|
|||
{
|
||||
set_pmd(pmd, __pmd((unsigned long)page_address(pte)));
|
||||
}
|
||||
#define pmd_pgtable(pmd) pmd_page(pmd)
|
||||
|
||||
#define __pte_free_tlb(tlb,pte,addr) \
|
||||
do { \
|
||||
|
|
|
@ -51,7 +51,6 @@ static inline void free_pmd_fast(pmd_t * pmd)
|
|||
#define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd)
|
||||
|
||||
#define pmd_populate(mm, pmd, pte) pmd_set(pmd, pte)
|
||||
#define pmd_pgtable(pmd) (pgtable_t)__pmd_page(pmd)
|
||||
|
||||
void pmd_set(pmd_t *pmdp, pte_t *ptep);
|
||||
#define pmd_populate_kernel pmd_populate
|
||||
|
|
|
@ -67,7 +67,6 @@ void pte_free(struct mm_struct *mm, pgtable_t ptepage);
|
|||
|
||||
#define pmd_populate_kernel(MM, PMD, PTE) pmd_set(MM, PMD, PTE)
|
||||
#define pmd_populate(MM, PMD, PTE) pmd_set(MM, PMD, PTE)
|
||||
#define pmd_pgtable(PMD) ((pte_t *)pmd_page_vaddr(PMD))
|
||||
|
||||
void pgtable_free(void *table, bool is_page);
|
||||
|
||||
|
|
|
@ -432,4 +432,6 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
|
|||
/* We provide our own get_unmapped_area to cope with VA holes for userland */
|
||||
#define HAVE_ARCH_UNMAPPED_AREA
|
||||
|
||||
#define pmd_pgtable(pmd) ((pgtable_t)__pmd_page(pmd))
|
||||
|
||||
#endif /* !(_SPARC_PGTABLE_H) */
|
||||
|
|
|
@ -1117,6 +1117,8 @@ extern unsigned long cmdline_memory_size;
|
|||
|
||||
asmlinkage void do_sparc64_fault(struct pt_regs *regs);
|
||||
|
||||
#define pmd_pgtable(PMD) ((pte_t *)pmd_page_vaddr(PMD))
|
||||
|
||||
#ifdef CONFIG_HUGETLB_PAGE
|
||||
|
||||
#define pud_leaf_size pud_leaf_size
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
set_pmd(pmd, __pmd(_PAGE_TABLE + \
|
||||
((unsigned long long)page_to_pfn(pte) << \
|
||||
(unsigned long long) PAGE_SHIFT)))
|
||||
#define pmd_pgtable(pmd) pmd_page(pmd)
|
||||
|
||||
/*
|
||||
* Allocate and free page tables.
|
||||
|
|
|
@ -84,8 +84,6 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
|
|||
set_pmd(pmd, __pmd(((pteval_t)pfn << PAGE_SHIFT) | _PAGE_TABLE));
|
||||
}
|
||||
|
||||
#define pmd_pgtable(pmd) pmd_page(pmd)
|
||||
|
||||
#if CONFIG_PGTABLE_LEVELS > 2
|
||||
extern void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd);
|
||||
|
||||
|
|
|
@ -25,7 +25,6 @@
|
|||
(pmd_val(*(pmdp)) = ((unsigned long)ptep))
|
||||
#define pmd_populate(mm, pmdp, page) \
|
||||
(pmd_val(*(pmdp)) = ((unsigned long)page_to_virt(page)))
|
||||
#define pmd_pgtable(pmd) pmd_page(pmd)
|
||||
|
||||
static inline pgd_t*
|
||||
pgd_alloc(struct mm_struct *mm)
|
||||
|
@ -63,7 +62,6 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
|
|||
return page;
|
||||
}
|
||||
|
||||
#define pmd_pgtable(pmd) pmd_page(pmd)
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
#endif /* _XTENSA_PGALLOC_H */
|
||||
|
|
|
@ -37,6 +37,15 @@
|
|||
#define FIRST_USER_ADDRESS 0UL
|
||||
#endif
|
||||
|
||||
/*
|
||||
* This defines the generic helper for accessing PMD page
|
||||
* table page. Although platforms can still override this
|
||||
* via their respective <asm/pgtable.h>.
|
||||
*/
|
||||
#ifndef pmd_pgtable
|
||||
#define pmd_pgtable(pmd) pmd_page(pmd)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* A page table page can be thought of an array like this: pXd_t[PTRS_PER_PxD]
|
||||
*
|
||||
|
|
Загрузка…
Ссылка в новой задаче