powerpc/8xx: reintroduce 16K pages with HW assistance
Using this HW assistance implies some constraints on the page table structure: - Regardless of the main page size used (4k or 16k), the level 1 table (PGD) contains 1024 entries and each PGD entry covers a 4Mbytes area which is managed by a level 2 table (PTE) containing also 1024 entries each describing a 4k page. - 16k pages require 4 identifical entries in the L2 table - 512k pages PTE have to be spread every 128 bytes in the L2 table - 8M pages PTE are at the address pointed by the L1 entry and each 8M page require 2 identical entries in the PGD. In order to use hardware assistance with 16K pages, this patch does the following modifications: - Make PGD size independent of the main page size - In 16k pages mode, redefine pte_t as a struct with 4 elements, and populate those 4 elements in __set_pte_at() and pte_update() - Adapt the size of the hugepage tables. - Define a PTE_FRAGMENT_NB so that a 16k page contains 4 page tables. Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:
Родитель
3fb69c6a1a
Коммит
55c8fc3f49
|
@ -689,7 +689,7 @@ config PPC_4K_PAGES
|
|||
|
||||
config PPC_16K_PAGES
|
||||
bool "16k page size"
|
||||
depends on 44x
|
||||
depends on 44x || PPC_8xx
|
||||
|
||||
config PPC_64K_PAGES
|
||||
bool "64k page size"
|
||||
|
|
|
@ -190,6 +190,7 @@ typedef struct {
|
|||
struct slice_mask mask_8m;
|
||||
# endif
|
||||
#endif
|
||||
void *pte_frag;
|
||||
} mm_context_t;
|
||||
|
||||
#define PHYS_IMMR_BASE (mfspr(SPRN_IMMR) & 0xfff80000)
|
||||
|
@ -244,6 +245,9 @@ extern s32 patch__itlbmiss_perf, patch__dtlbmiss_perf;
|
|||
#define mmu_virtual_psize MMU_PAGE_4K
|
||||
#elif defined(CONFIG_PPC_16K_PAGES)
|
||||
#define mmu_virtual_psize MMU_PAGE_16K
|
||||
#define PTE_FRAG_NR 4
|
||||
#define PTE_FRAG_SIZE_SHIFT 12
|
||||
#define PTE_FRAG_SIZE (1UL << 12)
|
||||
#else
|
||||
#error "Unsupported PAGE_SIZE"
|
||||
#endif
|
||||
|
|
|
@ -232,7 +232,13 @@ static inline unsigned long pte_update(pte_t *p,
|
|||
: "cc" );
|
||||
#else /* PTE_ATOMIC_UPDATES */
|
||||
unsigned long old = pte_val(*p);
|
||||
*p = __pte((old & ~clr) | set);
|
||||
unsigned long new = (old & ~clr) | set;
|
||||
|
||||
#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES)
|
||||
p->pte = p->pte1 = p->pte2 = p->pte3 = new;
|
||||
#else
|
||||
*p = __pte(new);
|
||||
#endif
|
||||
#endif /* !PTE_ATOMIC_UPDATES */
|
||||
|
||||
#ifdef CONFIG_44x
|
||||
|
|
|
@ -209,7 +209,11 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
|
|||
/* Anything else just stores the PTE normally. That covers all 64-bit
|
||||
* cases, and 32-bit non-hash with 32-bit PTEs.
|
||||
*/
|
||||
#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES)
|
||||
ptep->pte = ptep->pte1 = ptep->pte2 = ptep->pte3 = pte_val(pte);
|
||||
#else
|
||||
*ptep = pte;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* With hardware tablewalk, a sync is needed to ensure that
|
||||
|
|
|
@ -22,7 +22,8 @@
|
|||
#define PTE_FLAGS_OFFSET 0
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PPC_256K_PAGES
|
||||
#if defined(CONFIG_PPC_256K_PAGES) || \
|
||||
(defined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES))
|
||||
#define PTE_SHIFT (PAGE_SHIFT - PTE_T_LOG2 - 2) /* 1/4 of a page */
|
||||
#else
|
||||
#define PTE_SHIFT (PAGE_SHIFT - PTE_T_LOG2) /* full page */
|
||||
|
|
|
@ -3,7 +3,11 @@
|
|||
#define _ASM_POWERPC_PGTABLE_TYPES_H
|
||||
|
||||
/* PTE level */
|
||||
#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES)
|
||||
typedef struct { pte_basic_t pte, pte1, pte2, pte3; } pte_t;
|
||||
#else
|
||||
typedef struct { pte_basic_t pte; } pte_t;
|
||||
#endif
|
||||
#define __pte(x) ((pte_t) { (x) })
|
||||
static inline pte_basic_t pte_val(pte_t x)
|
||||
{
|
||||
|
|
Загрузка…
Ссылка в новой задаче