paravirt: add an "mm" argument to alloc_pt
It's useful to know which mm is allocating a pagetable. Xen uses this to determine whether the pagetable being added to is pinned or not. Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
This commit is contained in:
Родитель
810bab448e
Коммит
fdb4c338c8
|
@ -362,7 +362,7 @@ static void *vmi_kmap_atomic_pte(struct page *page, enum km_type type)
|
|||
}
|
||||
#endif
|
||||
|
||||
static void vmi_allocate_pt(u32 pfn)
|
||||
static void vmi_allocate_pt(struct mm_struct *mm, u32 pfn)
|
||||
{
|
||||
vmi_set_page_type(pfn, VMI_PAGE_L1);
|
||||
vmi_ops.allocate_page(pfn, VMI_PAGE_L1, 0, 0, 0);
|
||||
|
|
|
@ -87,7 +87,7 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
|
|||
if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
|
||||
pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
|
||||
|
||||
paravirt_alloc_pt(__pa(page_table) >> PAGE_SHIFT);
|
||||
paravirt_alloc_pt(&init_mm, __pa(page_table) >> PAGE_SHIFT);
|
||||
set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
|
||||
BUG_ON(page_table != pte_offset_kernel(pmd, 0));
|
||||
}
|
||||
|
|
|
@ -60,7 +60,7 @@ static struct page *split_large_page(unsigned long address, pgprot_t prot,
|
|||
address = __pa(address);
|
||||
addr = address & LARGE_PAGE_MASK;
|
||||
pbase = (pte_t *)page_address(base);
|
||||
paravirt_alloc_pt(page_to_pfn(base));
|
||||
paravirt_alloc_pt(&init_mm, page_to_pfn(base));
|
||||
for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
|
||||
set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT,
|
||||
addr == address ? prot : ref_prot));
|
||||
|
|
|
@ -173,7 +173,7 @@ struct paravirt_ops
|
|||
unsigned long va);
|
||||
|
||||
/* Hooks for allocating/releasing pagetable pages */
|
||||
void (*alloc_pt)(u32 pfn);
|
||||
void (*alloc_pt)(struct mm_struct *mm, u32 pfn);
|
||||
void (*alloc_pd)(u32 pfn);
|
||||
void (*alloc_pd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count);
|
||||
void (*release_pt)(u32 pfn);
|
||||
|
@ -725,9 +725,9 @@ static inline void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
|
|||
PVOP_VCALL3(flush_tlb_others, &cpumask, mm, va);
|
||||
}
|
||||
|
||||
static inline void paravirt_alloc_pt(unsigned pfn)
|
||||
static inline void paravirt_alloc_pt(struct mm_struct *mm, unsigned pfn)
|
||||
{
|
||||
PVOP_VCALL1(alloc_pt, pfn);
|
||||
PVOP_VCALL2(alloc_pt, mm, pfn);
|
||||
}
|
||||
static inline void paravirt_release_pt(unsigned pfn)
|
||||
{
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
#ifdef CONFIG_PARAVIRT
|
||||
#include <asm/paravirt.h>
|
||||
#else
|
||||
#define paravirt_alloc_pt(pfn) do { } while (0)
|
||||
#define paravirt_alloc_pt(mm, pfn) do { } while (0)
|
||||
#define paravirt_alloc_pd(pfn) do { } while (0)
|
||||
#define paravirt_alloc_pd(pfn) do { } while (0)
|
||||
#define paravirt_alloc_pd_clone(pfn, clonepfn, start, count) do { } while (0)
|
||||
|
@ -17,13 +17,13 @@
|
|||
|
||||
#define pmd_populate_kernel(mm, pmd, pte) \
|
||||
do { \
|
||||
paravirt_alloc_pt(__pa(pte) >> PAGE_SHIFT); \
|
||||
paravirt_alloc_pt(mm, __pa(pte) >> PAGE_SHIFT); \
|
||||
set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte))); \
|
||||
} while (0)
|
||||
|
||||
#define pmd_populate(mm, pmd, pte) \
|
||||
do { \
|
||||
paravirt_alloc_pt(page_to_pfn(pte)); \
|
||||
paravirt_alloc_pt(mm, page_to_pfn(pte)); \
|
||||
set_pmd(pmd, __pmd(_PAGE_TABLE + \
|
||||
((unsigned long long)page_to_pfn(pte) << \
|
||||
(unsigned long long) PAGE_SHIFT))); \
|
||||
|
|
Загрузка…
Ссылка в новой задаче