powerpc/mm: refactor definition of pgtable_cache[]
pgtable_cache[] is the same for the 4 subarches, lets make it common. Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:
Родитель
dc096864ba
Коммит
e80789a3c1
|
@ -5,26 +5,6 @@
|
|||
#include <linux/threads.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
/*
|
||||
* Functions that deal with pagetables that could be at any level of
|
||||
* the table need to be passed an "index_size" so they know how to
|
||||
* handle allocation. For PTE pages (which are linked to a struct
|
||||
* page for now, and drawn from the main get_free_pages() pool), the
|
||||
* allocation size will be (2^index_size * sizeof(pointer)) and
|
||||
* allocations are drawn from the kmem_cache in PGT_CACHE(index_size).
|
||||
*
|
||||
* The maximum index size needs to be big enough to allow any
|
||||
* pagetable sizes we need, but small enough to fit in the low bits of
|
||||
* any page table pointer. In other words all pagetables, even tiny
|
||||
* ones, must be aligned to allow at least enough low 0 bits to
|
||||
* contain this value. This value is also used as a mask, so it must
|
||||
* be one less than a power of two.
|
||||
*/
|
||||
#define MAX_PGTABLE_INDEX_SIZE 0xf
|
||||
|
||||
extern struct kmem_cache *pgtable_cache[];
|
||||
#define PGT_CACHE(shift) pgtable_cache[shift]
|
||||
|
||||
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
|
||||
{
|
||||
return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
|
||||
|
@ -69,7 +49,6 @@ static inline void pgtable_free(void *table, unsigned index_size)
|
|||
}
|
||||
}
|
||||
|
||||
#define check_pgt_cache() do { } while (0)
|
||||
#define get_hugepd_cache_index(x) (x)
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
|
|
@ -19,26 +19,6 @@ struct vmemmap_backing {
|
|||
};
|
||||
extern struct vmemmap_backing *vmemmap_list;
|
||||
|
||||
/*
|
||||
* Functions that deal with pagetables that could be at any level of
|
||||
* the table need to be passed an "index_size" so they know how to
|
||||
* handle allocation. For PTE pages (which are linked to a struct
|
||||
* page for now, and drawn from the main get_free_pages() pool), the
|
||||
* allocation size will be (2^index_size * sizeof(pointer)) and
|
||||
* allocations are drawn from the kmem_cache in PGT_CACHE(index_size).
|
||||
*
|
||||
* The maximum index size needs to be big enough to allow any
|
||||
* pagetable sizes we need, but small enough to fit in the low bits of
|
||||
* any page table pointer. In other words all pagetables, even tiny
|
||||
* ones, must be aligned to allow at least enough low 0 bits to
|
||||
* contain this value. This value is also used as a mask, so it must
|
||||
* be one less than a power of two.
|
||||
*/
|
||||
#define MAX_PGTABLE_INDEX_SIZE 0xf
|
||||
|
||||
extern struct kmem_cache *pgtable_cache[];
|
||||
#define PGT_CACHE(shift) pgtable_cache[shift]
|
||||
|
||||
extern pmd_t *pmd_fragment_alloc(struct mm_struct *, unsigned long);
|
||||
extern void pmd_fragment_free(unsigned long *);
|
||||
extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift);
|
||||
|
@ -199,8 +179,6 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
|
|||
pgtable_free_tlb(tlb, table, PTE_INDEX);
|
||||
}
|
||||
|
||||
#define check_pgt_cache() do { } while (0)
|
||||
|
||||
extern atomic_long_t direct_pages_count[MMU_PAGE_COUNT];
|
||||
static inline void update_page_count(int psize, long count)
|
||||
{
|
||||
|
|
|
@ -5,26 +5,6 @@
|
|||
#include <linux/threads.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
/*
|
||||
* Functions that deal with pagetables that could be at any level of
|
||||
* the table need to be passed an "index_size" so they know how to
|
||||
* handle allocation. For PTE pages (which are linked to a struct
|
||||
* page for now, and drawn from the main get_free_pages() pool), the
|
||||
* allocation size will be (2^index_size * sizeof(pointer)) and
|
||||
* allocations are drawn from the kmem_cache in PGT_CACHE(index_size).
|
||||
*
|
||||
* The maximum index size needs to be big enough to allow any
|
||||
* pagetable sizes we need, but small enough to fit in the low bits of
|
||||
* any page table pointer. In other words all pagetables, even tiny
|
||||
* ones, must be aligned to allow at least enough low 0 bits to
|
||||
* contain this value. This value is also used as a mask, so it must
|
||||
* be one less than a power of two.
|
||||
*/
|
||||
#define MAX_PGTABLE_INDEX_SIZE 0xf
|
||||
|
||||
extern struct kmem_cache *pgtable_cache[];
|
||||
#define PGT_CACHE(shift) pgtable_cache[shift]
|
||||
|
||||
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
|
||||
{
|
||||
return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
|
||||
|
@ -87,7 +67,6 @@ static inline void pgtable_free(void *table, unsigned index_size)
|
|||
}
|
||||
}
|
||||
|
||||
#define check_pgt_cache() do { } while (0)
|
||||
#define get_hugepd_cache_index(x) (x)
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
|
|
@ -18,26 +18,6 @@ struct vmemmap_backing {
|
|||
};
|
||||
extern struct vmemmap_backing *vmemmap_list;
|
||||
|
||||
/*
|
||||
* Functions that deal with pagetables that could be at any level of
|
||||
* the table need to be passed an "index_size" so they know how to
|
||||
* handle allocation. For PTE pages (which are linked to a struct
|
||||
* page for now, and drawn from the main get_free_pages() pool), the
|
||||
* allocation size will be (2^index_size * sizeof(pointer)) and
|
||||
* allocations are drawn from the kmem_cache in PGT_CACHE(index_size).
|
||||
*
|
||||
* The maximum index size needs to be big enough to allow any
|
||||
* pagetable sizes we need, but small enough to fit in the low bits of
|
||||
* any page table pointer. In other words all pagetables, even tiny
|
||||
* ones, must be aligned to allow at least enough low 0 bits to
|
||||
* contain this value. This value is also used as a mask, so it must
|
||||
* be one less than a power of two.
|
||||
*/
|
||||
#define MAX_PGTABLE_INDEX_SIZE 0xf
|
||||
|
||||
extern struct kmem_cache *pgtable_cache[];
|
||||
#define PGT_CACHE(shift) pgtable_cache[shift]
|
||||
|
||||
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
|
||||
{
|
||||
return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
|
||||
|
@ -140,6 +120,4 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
|
|||
#define __pud_free_tlb(tlb, pud, addr) \
|
||||
pgtable_free_tlb(tlb, pud, PUD_INDEX_SIZE)
|
||||
|
||||
#define check_pgt_cache() do { } while (0)
|
||||
|
||||
#endif /* _ASM_POWERPC_PGALLOC_64_H */
|
||||
|
|
|
@ -45,6 +45,27 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
|
|||
pte_fragment_free((unsigned long *)ptepage, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Functions that deal with pagetables that could be at any level of
|
||||
* the table need to be passed an "index_size" so they know how to
|
||||
* handle allocation. For PTE pages, the allocation size will be
|
||||
* (2^index_size * sizeof(pointer)) and allocations are drawn from
|
||||
* the kmem_cache in PGT_CACHE(index_size).
|
||||
*
|
||||
* The maximum index size needs to be big enough to allow any
|
||||
* pagetable sizes we need, but small enough to fit in the low bits of
|
||||
* any page table pointer. In other words all pagetables, even tiny
|
||||
* ones, must be aligned to allow at least enough low 0 bits to
|
||||
* contain this value. This value is also used as a mask, so it must
|
||||
* be one less than a power of two.
|
||||
*/
|
||||
#define MAX_PGTABLE_INDEX_SIZE 0xf
|
||||
|
||||
extern struct kmem_cache *pgtable_cache[];
|
||||
#define PGT_CACHE(shift) pgtable_cache[shift]
|
||||
|
||||
static inline void check_pgt_cache(void) { }
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S
|
||||
#include <asm/book3s/pgalloc.h>
|
||||
#else
|
||||
|
|
Загрузка…
Ссылка в новой задаче