mm: Add vmf_insert_pfn_xxx_prot() for huge page-table entries

For graphics drivers needing to modify the page-protection, add
huge page-table entries counterparts to vmf_insert_pfn_prot().

Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Cc: "Jérôme Glisse" <jglisse@redhat.com>
Cc: "Christian König" <christian.koenig@amd.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: Thomas Hellstrom (VMware) <thomas_os@shipmail.org>
Acked-by: Christian König <christian.koenig@amd.com>
Acked-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Thomas Hellstrom (VMware) 2020-03-24 18:48:09 +01:00
Родитель 327e9fd489
Коммит 9a9731b18c
2 изменённых файлов: 71 добавлений и 8 удалений

Просмотреть файл

@ -47,8 +47,45 @@ extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, pgprot_t newprot, unsigned long addr, pgprot_t newprot,
int prot_numa); int prot_numa);
vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write); vm_fault_t vmf_insert_pfn_pmd_prot(struct vm_fault *vmf, pfn_t pfn,
vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write); pgprot_t pgprot, bool write);
/**
* vmf_insert_pfn_pmd - insert a pmd size pfn
* @vmf: Structure describing the fault
* @pfn: pfn to insert
* @pgprot: page protection to use
* @write: whether it's a write fault
*
* Insert a pmd size pfn. See vmf_insert_pfn() for additional info.
*
* Return: vm_fault_t value.
*/
static inline vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn,
bool write)
{
return vmf_insert_pfn_pmd_prot(vmf, pfn, vmf->vma->vm_page_prot, write);
}
vm_fault_t vmf_insert_pfn_pud_prot(struct vm_fault *vmf, pfn_t pfn,
pgprot_t pgprot, bool write);
/**
* vmf_insert_pfn_pud - insert a pud size pfn
* @vmf: Structure describing the fault
* @pfn: pfn to insert
* @pgprot: page protection to use
* @write: whether it's a write fault
*
* Insert a pud size pfn. See vmf_insert_pfn() for additional info.
*
* Return: vm_fault_t value.
*/
static inline vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn,
bool write)
{
return vmf_insert_pfn_pud_prot(vmf, pfn, vmf->vma->vm_page_prot, write);
}
enum transparent_hugepage_flag { enum transparent_hugepage_flag {
TRANSPARENT_HUGEPAGE_FLAG, TRANSPARENT_HUGEPAGE_FLAG,
TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,

Просмотреть файл

@ -824,11 +824,24 @@ out_unlock:
pte_free(mm, pgtable); pte_free(mm, pgtable);
} }
vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write) /**
* vmf_insert_pfn_pmd_prot - insert a pmd size pfn
* @vmf: Structure describing the fault
* @pfn: pfn to insert
* @pgprot: page protection to use
* @write: whether it's a write fault
*
* Insert a pmd size pfn. See vmf_insert_pfn() for additional info and
* also consult the vmf_insert_mixed_prot() documentation when
* @pgprot != @vmf->vma->vm_page_prot.
*
* Return: vm_fault_t value.
*/
vm_fault_t vmf_insert_pfn_pmd_prot(struct vm_fault *vmf, pfn_t pfn,
pgprot_t pgprot, bool write)
{ {
unsigned long addr = vmf->address & PMD_MASK; unsigned long addr = vmf->address & PMD_MASK;
struct vm_area_struct *vma = vmf->vma; struct vm_area_struct *vma = vmf->vma;
pgprot_t pgprot = vma->vm_page_prot;
pgtable_t pgtable = NULL; pgtable_t pgtable = NULL;
/* /*
@ -856,7 +869,7 @@ vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write)
insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, pgtable); insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, pgtable);
return VM_FAULT_NOPAGE; return VM_FAULT_NOPAGE;
} }
EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd); EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd_prot);
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma) static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma)
@ -902,11 +915,24 @@ out_unlock:
spin_unlock(ptl); spin_unlock(ptl);
} }
vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write) /**
* vmf_insert_pfn_pud_prot - insert a pud size pfn
* @vmf: Structure describing the fault
* @pfn: pfn to insert
* @pgprot: page protection to use
* @write: whether it's a write fault
*
* Insert a pud size pfn. See vmf_insert_pfn() for additional info and
* also consult the vmf_insert_mixed_prot() documentation when
* @pgprot != @vmf->vma->vm_page_prot.
*
* Return: vm_fault_t value.
*/
vm_fault_t vmf_insert_pfn_pud_prot(struct vm_fault *vmf, pfn_t pfn,
pgprot_t pgprot, bool write)
{ {
unsigned long addr = vmf->address & PUD_MASK; unsigned long addr = vmf->address & PUD_MASK;
struct vm_area_struct *vma = vmf->vma; struct vm_area_struct *vma = vmf->vma;
pgprot_t pgprot = vma->vm_page_prot;
/* /*
* If we had pud_special, we could avoid all these restrictions, * If we had pud_special, we could avoid all these restrictions,
@ -927,7 +953,7 @@ vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write)
insert_pfn_pud(vma, addr, vmf->pud, pfn, pgprot, write); insert_pfn_pud(vma, addr, vmf->pud, pfn, pgprot, write);
return VM_FAULT_NOPAGE; return VM_FAULT_NOPAGE;
} }
EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud); EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud_prot);
#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
static void touch_pmd(struct vm_area_struct *vma, unsigned long addr, static void touch_pmd(struct vm_area_struct *vma, unsigned long addr,