powerpc/mm: Add radix support for hugetlb
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:
Родитель
2f5f0dfd1e
Коммит
484837601d
|
@ -0,0 +1,14 @@
|
||||||
|
#ifndef _ASM_POWERPC_BOOK3S_64_HUGETLB_RADIX_H
|
||||||
|
#define _ASM_POWERPC_BOOK3S_64_HUGETLB_RADIX_H
|
||||||
|
/*
|
||||||
|
* For radix we want generic code to handle hugetlb. But then if we want
|
||||||
|
* both hash and radix to be enabled together we need to workaround the
|
||||||
|
* limitations.
|
||||||
|
*/
|
||||||
|
void radix__flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
|
||||||
|
void radix__local_flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
|
||||||
|
extern unsigned long
|
||||||
|
radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
|
||||||
|
unsigned long len, unsigned long pgoff,
|
||||||
|
unsigned long flags);
|
||||||
|
#endif
|
|
@ -8,6 +8,8 @@
|
||||||
extern struct kmem_cache *hugepte_cache;
|
extern struct kmem_cache *hugepte_cache;
|
||||||
|
|
||||||
#ifdef CONFIG_PPC_BOOK3S_64
|
#ifdef CONFIG_PPC_BOOK3S_64
|
||||||
|
|
||||||
|
#include <asm/book3s/64/hugetlb-radix.h>
|
||||||
/*
|
/*
|
||||||
* This should work for other subarchs too. But right now we use the
|
* This should work for other subarchs too. But right now we use the
|
||||||
* new format only for 64bit book3s
|
* new format only for 64bit book3s
|
||||||
|
@ -31,7 +33,19 @@ static inline unsigned int hugepd_shift(hugepd_t hpd)
|
||||||
{
|
{
|
||||||
return mmu_psize_to_shift(hugepd_mmu_psize(hpd));
|
return mmu_psize_to_shift(hugepd_mmu_psize(hpd));
|
||||||
}
|
}
|
||||||
|
static inline void flush_hugetlb_page(struct vm_area_struct *vma,
|
||||||
|
unsigned long vmaddr)
|
||||||
|
{
|
||||||
|
if (radix_enabled())
|
||||||
|
return radix__flush_hugetlb_page(vma, vmaddr);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void __local_flush_hugetlb_page(struct vm_area_struct *vma,
|
||||||
|
unsigned long vmaddr)
|
||||||
|
{
|
||||||
|
if (radix_enabled())
|
||||||
|
return radix__local_flush_hugetlb_page(vma, vmaddr);
|
||||||
|
}
|
||||||
#else
|
#else
|
||||||
|
|
||||||
static inline pte_t *hugepd_page(hugepd_t hpd)
|
static inline pte_t *hugepd_page(hugepd_t hpd)
|
||||||
|
|
|
@ -34,6 +34,7 @@ obj-$(CONFIG_PPC_MM_SLICES) += slice.o
|
||||||
obj-y += hugetlbpage.o
|
obj-y += hugetlbpage.o
|
||||||
ifeq ($(CONFIG_HUGETLB_PAGE),y)
|
ifeq ($(CONFIG_HUGETLB_PAGE),y)
|
||||||
obj-$(CONFIG_PPC_STD_MMU_64) += hugetlbpage-hash64.o
|
obj-$(CONFIG_PPC_STD_MMU_64) += hugetlbpage-hash64.o
|
||||||
|
obj-$(CONFIG_PPC_RADIX_MMU) += hugetlbpage-radix.o
|
||||||
obj-$(CONFIG_PPC_BOOK3E_MMU) += hugetlbpage-book3e.o
|
obj-$(CONFIG_PPC_BOOK3E_MMU) += hugetlbpage-book3e.o
|
||||||
endif
|
endif
|
||||||
obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += hugepage-hash64.o
|
obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += hugepage-hash64.o
|
||||||
|
|
|
@ -0,0 +1,87 @@
|
||||||
|
#include <linux/mm.h>
|
||||||
|
#include <linux/hugetlb.h>
|
||||||
|
#include <asm/pgtable.h>
|
||||||
|
#include <asm/pgalloc.h>
|
||||||
|
#include <asm/cacheflush.h>
|
||||||
|
#include <asm/machdep.h>
|
||||||
|
#include <asm/mman.h>
|
||||||
|
|
||||||
|
void radix__flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
|
||||||
|
{
|
||||||
|
unsigned long ap, shift;
|
||||||
|
struct hstate *hstate = hstate_file(vma->vm_file);
|
||||||
|
|
||||||
|
shift = huge_page_shift(hstate);
|
||||||
|
if (shift == mmu_psize_defs[MMU_PAGE_2M].shift)
|
||||||
|
ap = mmu_get_ap(MMU_PAGE_2M);
|
||||||
|
else if (shift == mmu_psize_defs[MMU_PAGE_1G].shift)
|
||||||
|
ap = mmu_get_ap(MMU_PAGE_1G);
|
||||||
|
else {
|
||||||
|
WARN(1, "Wrong huge page shift\n");
|
||||||
|
return ;
|
||||||
|
}
|
||||||
|
radix___flush_tlb_page(vma->vm_mm, vmaddr, ap, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
void radix__local_flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
|
||||||
|
{
|
||||||
|
unsigned long ap, shift;
|
||||||
|
struct hstate *hstate = hstate_file(vma->vm_file);
|
||||||
|
|
||||||
|
shift = huge_page_shift(hstate);
|
||||||
|
if (shift == mmu_psize_defs[MMU_PAGE_2M].shift)
|
||||||
|
ap = mmu_get_ap(MMU_PAGE_2M);
|
||||||
|
else if (shift == mmu_psize_defs[MMU_PAGE_1G].shift)
|
||||||
|
ap = mmu_get_ap(MMU_PAGE_1G);
|
||||||
|
else {
|
||||||
|
WARN(1, "Wrong huge page shift\n");
|
||||||
|
return ;
|
||||||
|
}
|
||||||
|
radix___local_flush_tlb_page(vma->vm_mm, vmaddr, ap, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* A vairant of hugetlb_get_unmapped_area doing topdown search
|
||||||
|
* FIXME!! should we do as x86 does or non hugetlb area does ?
|
||||||
|
* ie, use topdown or not based on mmap_is_legacy check ?
|
||||||
|
*/
|
||||||
|
unsigned long
|
||||||
|
radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
|
||||||
|
unsigned long len, unsigned long pgoff,
|
||||||
|
unsigned long flags)
|
||||||
|
{
|
||||||
|
struct mm_struct *mm = current->mm;
|
||||||
|
struct vm_area_struct *vma;
|
||||||
|
struct hstate *h = hstate_file(file);
|
||||||
|
struct vm_unmapped_area_info info;
|
||||||
|
|
||||||
|
if (len & ~huge_page_mask(h))
|
||||||
|
return -EINVAL;
|
||||||
|
if (len > TASK_SIZE)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
if (flags & MAP_FIXED) {
|
||||||
|
if (prepare_hugepage_range(file, addr, len))
|
||||||
|
return -EINVAL;
|
||||||
|
return addr;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (addr) {
|
||||||
|
addr = ALIGN(addr, huge_page_size(h));
|
||||||
|
vma = find_vma(mm, addr);
|
||||||
|
if (TASK_SIZE - len >= addr &&
|
||||||
|
(!vma || addr + len <= vma->vm_start))
|
||||||
|
return addr;
|
||||||
|
}
|
||||||
|
/*
|
||||||
|
* We are always doing an topdown search here. Slice code
|
||||||
|
* does that too.
|
||||||
|
*/
|
||||||
|
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
|
||||||
|
info.length = len;
|
||||||
|
info.low_limit = PAGE_SIZE;
|
||||||
|
info.high_limit = current->mm->mmap_base;
|
||||||
|
info.align_mask = PAGE_MASK & ~huge_page_mask(h);
|
||||||
|
info.align_offset = 0;
|
||||||
|
return vm_unmapped_area(&info);
|
||||||
|
}
|
|
@ -711,6 +711,9 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
|
||||||
struct hstate *hstate = hstate_file(file);
|
struct hstate *hstate = hstate_file(file);
|
||||||
int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate));
|
int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate));
|
||||||
|
|
||||||
|
if (radix_enabled())
|
||||||
|
return radix__hugetlb_get_unmapped_area(file, addr, len,
|
||||||
|
pgoff, flags);
|
||||||
return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1);
|
return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -823,7 +826,7 @@ static int __init hugetlbpage_init(void)
|
||||||
{
|
{
|
||||||
int psize;
|
int psize;
|
||||||
|
|
||||||
if (!mmu_has_feature(MMU_FTR_16M_PAGE))
|
if (!radix_enabled() && !mmu_has_feature(MMU_FTR_16M_PAGE))
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
|
for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
|
||||||
|
@ -863,6 +866,9 @@ static int __init hugetlbpage_init(void)
|
||||||
HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_16M].shift;
|
HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_16M].shift;
|
||||||
else if (mmu_psize_defs[MMU_PAGE_1M].shift)
|
else if (mmu_psize_defs[MMU_PAGE_1M].shift)
|
||||||
HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_1M].shift;
|
HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_1M].shift;
|
||||||
|
else if (mmu_psize_defs[MMU_PAGE_2M].shift)
|
||||||
|
HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_2M].shift;
|
||||||
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -141,6 +141,11 @@ void radix___local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
|
||||||
|
|
||||||
void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
|
void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
|
||||||
{
|
{
|
||||||
|
#ifdef CONFIG_HUGETLB_PAGE
|
||||||
|
/* need the return fix for nohash.c */
|
||||||
|
if (vma && is_vm_hugetlb_page(vma))
|
||||||
|
return __local_flush_hugetlb_page(vma, vmaddr);
|
||||||
|
#endif
|
||||||
radix___local_flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
|
radix___local_flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
|
||||||
mmu_get_ap(mmu_virtual_psize), 0);
|
mmu_get_ap(mmu_virtual_psize), 0);
|
||||||
}
|
}
|
||||||
|
@ -202,6 +207,10 @@ bail:
|
||||||
|
|
||||||
void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
|
void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
|
||||||
{
|
{
|
||||||
|
#ifdef CONFIG_HUGETLB_PAGE
|
||||||
|
if (vma && is_vm_hugetlb_page(vma))
|
||||||
|
return flush_hugetlb_page(vma, vmaddr);
|
||||||
|
#endif
|
||||||
radix___flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
|
radix___flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
|
||||||
mmu_get_ap(mmu_virtual_psize), 0);
|
mmu_get_ap(mmu_virtual_psize), 0);
|
||||||
}
|
}
|
||||||
|
|
Загрузка…
Ссылка в новой задаче