ARC: mm: switch pgtable_t back to struct page *
So far ARC pgtable_t has not been struct page based to avoid extra page_address() calls involved. However the differences are down to noise and get in the way of using generic code, hence this patch. This also allows us to reuse generic THP depost/withdraw code. There's some additional consideration for PGDIR_SHIFT in 4K page config. Now due to page tables being PAGE_SIZE deep only, the address split can't be really arbitrary. Tested-by: kernel test robot <lkp@intel.com> Suggested-by: Mike Rapoport <rppt@linux.ibm.com> Acked-by: Mike Rapoport <rppt@linux.ibm.com> Signed-off-by: Vineet Gupta <vgupta@kernel.org>
This commit is contained in:
Родитель
f35534a2bc
Коммит
d9820ff76f
|
@ -58,14 +58,6 @@ static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
|
||||||
extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
|
extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
|
||||||
pmd_t *pmd);
|
pmd_t *pmd);
|
||||||
|
|
||||||
/* Generic variants assume pgtable_t is struct page *, hence need for these */
|
|
||||||
#define __HAVE_ARCH_PGTABLE_DEPOSIT
|
|
||||||
extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
|
|
||||||
pgtable_t pgtable);
|
|
||||||
|
|
||||||
#define __HAVE_ARCH_PGTABLE_WITHDRAW
|
|
||||||
extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
|
|
||||||
|
|
||||||
#define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
|
#define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
|
||||||
extern void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
|
extern void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
|
||||||
unsigned long end);
|
unsigned long end);
|
||||||
|
|
|
@ -60,7 +60,7 @@ typedef struct {
|
||||||
#define __pgprot(x) ((pgprot_t) { (x) })
|
#define __pgprot(x) ((pgprot_t) { (x) })
|
||||||
#define pte_pgprot(x) __pgprot(pte_val(x))
|
#define pte_pgprot(x) __pgprot(pte_val(x))
|
||||||
|
|
||||||
typedef pte_t * pgtable_t;
|
typedef struct page *pgtable_t;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Use virt_to_pfn with caution:
|
* Use virt_to_pfn with caution:
|
||||||
|
|
|
@ -45,22 +45,17 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
|
||||||
set_pmd(pmd, __pmd((unsigned long)pte));
|
set_pmd(pmd, __pmd((unsigned long)pte));
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte)
|
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte_page)
|
||||||
{
|
{
|
||||||
set_pmd(pmd, __pmd((unsigned long)pte));
|
set_pmd(pmd, __pmd((unsigned long)page_address(pte_page)));
|
||||||
}
|
|
||||||
|
|
||||||
static inline int __get_order_pgd(void)
|
|
||||||
{
|
|
||||||
return get_order(PTRS_PER_PGD * sizeof(pgd_t));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
|
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
int num, num2;
|
pgd_t *ret = (pgd_t *) __get_free_page(GFP_KERNEL);
|
||||||
pgd_t *ret = (pgd_t *) __get_free_pages(GFP_KERNEL, __get_order_pgd());
|
|
||||||
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
|
int num, num2;
|
||||||
num = USER_PTRS_PER_PGD + USER_KERNEL_GUTTER / PGDIR_SIZE;
|
num = USER_PTRS_PER_PGD + USER_KERNEL_GUTTER / PGDIR_SIZE;
|
||||||
memzero(ret, num * sizeof(pgd_t));
|
memzero(ret, num * sizeof(pgd_t));
|
||||||
|
|
||||||
|
@ -76,61 +71,43 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
|
||||||
|
|
||||||
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
|
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
|
||||||
{
|
{
|
||||||
free_pages((unsigned long)pgd, __get_order_pgd());
|
free_page((unsigned long)pgd);
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
|
||||||
* With software-only page-tables, addr-split for traversal is tweakable and
|
|
||||||
* that directly governs how big tables would be at each level.
|
|
||||||
* Further, the MMU page size is configurable.
|
|
||||||
* Thus we need to programatically assert the size constraint
|
|
||||||
* All of this is const math, allowing gcc to do constant folding/propagation.
|
|
||||||
*/
|
|
||||||
|
|
||||||
static inline int __get_order_pte(void)
|
|
||||||
{
|
|
||||||
return get_order(PTRS_PER_PTE * sizeof(pte_t));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
|
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
pte_t *pte;
|
pte_t *pte;
|
||||||
|
|
||||||
pte = (pte_t *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
|
pte = (pte_t *) __get_free_page(GFP_KERNEL | __GFP_ZERO);
|
||||||
__get_order_pte());
|
|
||||||
|
|
||||||
return pte;
|
return pte;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline pgtable_t
|
static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
|
||||||
pte_alloc_one(struct mm_struct *mm)
|
|
||||||
{
|
{
|
||||||
pgtable_t pte_pg;
|
|
||||||
struct page *page;
|
struct page *page;
|
||||||
|
|
||||||
pte_pg = (pgtable_t)__get_free_pages(GFP_KERNEL, __get_order_pte());
|
page = (pgtable_t)alloc_page(GFP_KERNEL | __GFP_ZERO | __GFP_ACCOUNT);
|
||||||
if (!pte_pg)
|
if (!page)
|
||||||
return 0;
|
return NULL;
|
||||||
memzero((void *)pte_pg, PTRS_PER_PTE * sizeof(pte_t));
|
|
||||||
page = virt_to_page(pte_pg);
|
|
||||||
if (!pgtable_pte_page_ctor(page)) {
|
if (!pgtable_pte_page_ctor(page)) {
|
||||||
__free_page(page);
|
__free_page(page);
|
||||||
return 0;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
return pte_pg;
|
return page;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
|
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
|
||||||
{
|
{
|
||||||
free_pages((unsigned long)pte, __get_order_pte()); /* takes phy addr */
|
free_page((unsigned long)pte);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void pte_free(struct mm_struct *mm, pgtable_t ptep)
|
static inline void pte_free(struct mm_struct *mm, pgtable_t pte_page)
|
||||||
{
|
{
|
||||||
pgtable_pte_page_dtor(virt_to_page(ptep));
|
pgtable_pte_page_dtor(pte_page);
|
||||||
free_pages((unsigned long)ptep, __get_order_pte());
|
__free_page(pte_page);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte)
|
#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte)
|
||||||
|
|
|
@ -35,9 +35,15 @@
|
||||||
#else
|
#else
|
||||||
/*
|
/*
|
||||||
* No Super page case
|
* No Super page case
|
||||||
* Default value provides 11:8:13 (8K), 11:9:12 (4K)
|
* Default value provides 11:8:13 (8K), 10:10:12 (4K)
|
||||||
|
* Limits imposed by pgtable_t only PAGE_SIZE long
|
||||||
|
* (so 4K page can only have 1K entries: or 10 bits)
|
||||||
*/
|
*/
|
||||||
|
#ifdef CONFIG_ARC_PAGE_SIZE_4K
|
||||||
|
#define PGDIR_SHIFT 22
|
||||||
|
#else
|
||||||
#define PGDIR_SHIFT 21
|
#define PGDIR_SHIFT 21
|
||||||
|
#endif
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -189,6 +189,9 @@ void __init mem_init(void)
|
||||||
{
|
{
|
||||||
memblock_free_all();
|
memblock_free_all();
|
||||||
highmem_init();
|
highmem_init();
|
||||||
|
|
||||||
|
BUILD_BUG_ON((PTRS_PER_PGD * sizeof(pgd_t)) > PAGE_SIZE);
|
||||||
|
BUILD_BUG_ON((PTRS_PER_PTE * sizeof(pte_t)) > PAGE_SIZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_HIGHMEM
|
#ifdef CONFIG_HIGHMEM
|
||||||
|
|
|
@ -534,43 +534,6 @@ void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
|
||||||
update_mmu_cache(vma, addr, &pte);
|
update_mmu_cache(vma, addr, &pte);
|
||||||
}
|
}
|
||||||
|
|
||||||
void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
|
|
||||||
pgtable_t pgtable)
|
|
||||||
{
|
|
||||||
struct list_head *lh = (struct list_head *) pgtable;
|
|
||||||
|
|
||||||
assert_spin_locked(&mm->page_table_lock);
|
|
||||||
|
|
||||||
/* FIFO */
|
|
||||||
if (!pmd_huge_pte(mm, pmdp))
|
|
||||||
INIT_LIST_HEAD(lh);
|
|
||||||
else
|
|
||||||
list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
|
|
||||||
pmd_huge_pte(mm, pmdp) = pgtable;
|
|
||||||
}
|
|
||||||
|
|
||||||
pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
|
|
||||||
{
|
|
||||||
struct list_head *lh;
|
|
||||||
pgtable_t pgtable;
|
|
||||||
|
|
||||||
assert_spin_locked(&mm->page_table_lock);
|
|
||||||
|
|
||||||
pgtable = pmd_huge_pte(mm, pmdp);
|
|
||||||
lh = (struct list_head *) pgtable;
|
|
||||||
if (list_empty(lh))
|
|
||||||
pmd_huge_pte(mm, pmdp) = NULL;
|
|
||||||
else {
|
|
||||||
pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
|
|
||||||
list_del(lh);
|
|
||||||
}
|
|
||||||
|
|
||||||
pte_val(pgtable[0]) = 0;
|
|
||||||
pte_val(pgtable[1]) = 0;
|
|
||||||
|
|
||||||
return pgtable;
|
|
||||||
}
|
|
||||||
|
|
||||||
void local_flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
|
void local_flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
|
||||||
unsigned long end)
|
unsigned long end)
|
||||||
{
|
{
|
||||||
|
|
Загрузка…
Ссылка в новой задаче