ARC: mm: switch pgtable_t back to struct page *

So far ARC pgtable_t has not been struct page based to avoid extra
page_address() calls involved. However the differences are down to
noise and get in the way of using generic code, hence this patch.

This also allows us to reuse generic THP depost/withdraw code.

There's some additional consideration for PGDIR_SHIFT in 4K page config.
Now due to page tables being PAGE_SIZE deep only, the address split
can't be really arbitrary.

Tested-by: kernel test robot <lkp@intel.com>
Suggested-by: Mike Rapoport <rppt@linux.ibm.com>
Acked-by: Mike Rapoport <rppt@linux.ibm.com>
Signed-off-by: Vineet Gupta <vgupta@kernel.org>
This commit is contained in:
Vineet Gupta 2021-08-12 12:54:43 -07:00
Родитель f35534a2bc
Коммит d9820ff76f
6 изменённых файлов: 28 добавлений и 87 удалений

Просмотреть файл

@ -58,14 +58,6 @@ static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t *pmd);
/* Generic variants assume pgtable_t is struct page *, hence need for these */
#define __HAVE_ARCH_PGTABLE_DEPOSIT
extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
pgtable_t pgtable);
#define __HAVE_ARCH_PGTABLE_WITHDRAW
extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
#define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
extern void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end);

Просмотреть файл

@ -60,7 +60,7 @@ typedef struct {
#define __pgprot(x) ((pgprot_t) { (x) })
#define pte_pgprot(x) __pgprot(pte_val(x))
typedef pte_t * pgtable_t;
typedef struct page *pgtable_t;
/*
* Use virt_to_pfn with caution:

Просмотреть файл

@ -45,22 +45,17 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
set_pmd(pmd, __pmd((unsigned long)pte));
}
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte)
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte_page)
{
set_pmd(pmd, __pmd((unsigned long)pte));
}
static inline int __get_order_pgd(void)
{
return get_order(PTRS_PER_PGD * sizeof(pgd_t));
set_pmd(pmd, __pmd((unsigned long)page_address(pte_page)));
}
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
int num, num2;
pgd_t *ret = (pgd_t *) __get_free_pages(GFP_KERNEL, __get_order_pgd());
pgd_t *ret = (pgd_t *) __get_free_page(GFP_KERNEL);
if (ret) {
int num, num2;
num = USER_PTRS_PER_PGD + USER_KERNEL_GUTTER / PGDIR_SIZE;
memzero(ret, num * sizeof(pgd_t));
@ -76,61 +71,43 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
free_pages((unsigned long)pgd, __get_order_pgd());
}
/*
* With software-only page-tables, addr-split for traversal is tweakable and
* that directly governs how big tables would be at each level.
* Further, the MMU page size is configurable.
* Thus we need to programatically assert the size constraint
* All of this is const math, allowing gcc to do constant folding/propagation.
*/
static inline int __get_order_pte(void)
{
return get_order(PTRS_PER_PTE * sizeof(pte_t));
free_page((unsigned long)pgd);
}
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
{
pte_t *pte;
pte = (pte_t *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
__get_order_pte());
pte = (pte_t *) __get_free_page(GFP_KERNEL | __GFP_ZERO);
return pte;
}
static inline pgtable_t
pte_alloc_one(struct mm_struct *mm)
static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
{
pgtable_t pte_pg;
struct page *page;
pte_pg = (pgtable_t)__get_free_pages(GFP_KERNEL, __get_order_pte());
if (!pte_pg)
return 0;
memzero((void *)pte_pg, PTRS_PER_PTE * sizeof(pte_t));
page = virt_to_page(pte_pg);
page = (pgtable_t)alloc_page(GFP_KERNEL | __GFP_ZERO | __GFP_ACCOUNT);
if (!page)
return NULL;
if (!pgtable_pte_page_ctor(page)) {
__free_page(page);
return 0;
return NULL;
}
return pte_pg;
return page;
}
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
free_pages((unsigned long)pte, __get_order_pte()); /* takes phy addr */
free_page((unsigned long)pte);
}
static inline void pte_free(struct mm_struct *mm, pgtable_t ptep)
static inline void pte_free(struct mm_struct *mm, pgtable_t pte_page)
{
pgtable_pte_page_dtor(virt_to_page(ptep));
free_pages((unsigned long)ptep, __get_order_pte());
pgtable_pte_page_dtor(pte_page);
__free_page(pte_page);
}
#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte)

Просмотреть файл

@ -35,9 +35,15 @@
#else
/*
* No Super page case
* Default value provides 11:8:13 (8K), 11:9:12 (4K)
* Default value provides 11:8:13 (8K), 10:10:12 (4K)
* Limits imposed by pgtable_t only PAGE_SIZE long
* (so 4K page can only have 1K entries: or 10 bits)
*/
#ifdef CONFIG_ARC_PAGE_SIZE_4K
#define PGDIR_SHIFT 22
#else
#define PGDIR_SHIFT 21
#endif
#endif

Просмотреть файл

@ -189,6 +189,9 @@ void __init mem_init(void)
{
memblock_free_all();
highmem_init();
BUILD_BUG_ON((PTRS_PER_PGD * sizeof(pgd_t)) > PAGE_SIZE);
BUILD_BUG_ON((PTRS_PER_PTE * sizeof(pte_t)) > PAGE_SIZE);
}
#ifdef CONFIG_HIGHMEM

Просмотреть файл

@ -534,43 +534,6 @@ void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
update_mmu_cache(vma, addr, &pte);
}
void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
pgtable_t pgtable)
{
struct list_head *lh = (struct list_head *) pgtable;
assert_spin_locked(&mm->page_table_lock);
/* FIFO */
if (!pmd_huge_pte(mm, pmdp))
INIT_LIST_HEAD(lh);
else
list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
pmd_huge_pte(mm, pmdp) = pgtable;
}
pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
{
struct list_head *lh;
pgtable_t pgtable;
assert_spin_locked(&mm->page_table_lock);
pgtable = pmd_huge_pte(mm, pmdp);
lh = (struct list_head *) pgtable;
if (list_empty(lh))
pmd_huge_pte(mm, pmdp) = NULL;
else {
pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
list_del(lh);
}
pte_val(pgtable[0]) = 0;
pte_val(pgtable[1]) = 0;
return pgtable;
}
void local_flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{