From 814f91bf3ea0962e4f802324766bf301ef6f5431 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Thu, 2 Feb 2017 01:21:35 +0000 Subject: [PATCH 1/5] MIPS: Move pgd_alloc() out of header MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit pgd_alloc() references init_mm which is not exported to modules. In order for KVM to be able to use pgd_alloc() to allocate GVA page tables, move pgd_alloc() into a new pgtable.c file and export it to modules. Signed-off-by: James Hogan Acked-by: Ralf Baechle Cc: Ralf Baechle Cc: Paolo Bonzini Cc: "Radim Krčmář" Cc: linux-mips@linux-mips.org Cc: kvm@vger.kernel.org --- arch/mips/include/asm/pgalloc.h | 16 +--------------- arch/mips/mm/Makefile | 2 +- arch/mips/mm/pgtable.c | 25 +++++++++++++++++++++++++ 3 files changed, 27 insertions(+), 16 deletions(-) create mode 100644 arch/mips/mm/pgtable.c diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h index a03e86969f78..a8705f6c8180 100644 --- a/arch/mips/include/asm/pgalloc.h +++ b/arch/mips/include/asm/pgalloc.h @@ -43,21 +43,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) * Initialize a new pgd / pmd table with invalid pointers. */ extern void pgd_init(unsigned long page); - -static inline pgd_t *pgd_alloc(struct mm_struct *mm) -{ - pgd_t *ret, *init; - - ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER); - if (ret) { - init = pgd_offset(&init_mm, 0UL); - pgd_init((unsigned long)ret); - memcpy(ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD, - (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); - } - - return ret; -} +extern pgd_t *pgd_alloc(struct mm_struct *mm); static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) { diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile index b4c64bd3f723..b4cc8811a664 100644 --- a/arch/mips/mm/Makefile +++ b/arch/mips/mm/Makefile @@ -4,7 +4,7 @@ obj-y += cache.o dma-default.o extable.o fault.o \ gup.o init.o mmap.o page.o page-funcs.o \ - tlbex.o tlbex-fault.o tlb-funcs.o + pgtable.o tlbex.o tlbex-fault.o tlb-funcs.o ifdef CONFIG_CPU_MICROMIPS obj-y += uasm-micromips.o diff --git a/arch/mips/mm/pgtable.c b/arch/mips/mm/pgtable.c new file mode 100644 index 000000000000..05560b042d82 --- /dev/null +++ b/arch/mips/mm/pgtable.c @@ -0,0 +1,25 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include +#include +#include +#include + +pgd_t *pgd_alloc(struct mm_struct *mm) +{ + pgd_t *ret, *init; + + ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER); + if (ret) { + init = pgd_offset(&init_mm, 0UL); + pgd_init((unsigned long)ret); + memcpy(ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD, + (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); + } + + return ret; +} +EXPORT_SYMBOL_GPL(pgd_alloc); From ccf015166ddbbd4c43a16d6871ea15f1fcf51ccc Mon Sep 17 00:00:00 2001 From: James Hogan Date: Fri, 16 Oct 2015 16:33:13 +0100 Subject: [PATCH 2/5] MIPS: Export pgd/pmd symbols for KVM MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Export pmd_init(), invalid_pmd_table and tlbmiss_handler_setup_pgd to GPL kernel modules so that MIPS KVM can use the inline page table management functions and switch between page tables: - pmd_init() will be used directly by KVM to initialise newly allocated pmd tables with invalid lower level table pointers. - invalid_pmd_table is used by pud_present(), pud_none(), and pud_clear(), which KVM will use to test and clear pud entries. - tlbmiss_handler_setup_pgd() will be called by KVM entry code to switch to the appropriate GVA page tables. Signed-off-by: James Hogan Acked-by: Ralf Baechle Cc: Ralf Baechle Cc: Paolo Bonzini Cc: "Radim Krčmář" Cc: linux-mips@linux-mips.org Cc: kvm@vger.kernel.org --- arch/mips/mm/init.c | 1 + arch/mips/mm/pgtable-64.c | 2 ++ arch/mips/mm/tlbex.c | 5 ++++- 3 files changed, 7 insertions(+), 1 deletion(-) diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index e86ebcf5c071..653569bc0da7 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -538,5 +538,6 @@ unsigned long pgd_current[NR_CPUS]; pgd_t swapper_pg_dir[_PTRS_PER_PGD] __section(.bss..swapper_pg_dir); #ifndef __PAGETABLE_PMD_FOLDED pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss; +EXPORT_SYMBOL_GPL(invalid_pmd_table); #endif pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss; diff --git a/arch/mips/mm/pgtable-64.c b/arch/mips/mm/pgtable-64.c index ce4473e7c0d2..0ae7b28b4db5 100644 --- a/arch/mips/mm/pgtable-64.c +++ b/arch/mips/mm/pgtable-64.c @@ -6,6 +6,7 @@ * Copyright (C) 1999, 2000 by Silicon Graphics * Copyright (C) 2003 by Ralf Baechle */ +#include #include #include #include @@ -60,6 +61,7 @@ void pmd_init(unsigned long addr, unsigned long pagetable) p[-1] = pagetable; } while (p != end); } +EXPORT_SYMBOL_GPL(pmd_init); #endif pmd_t mk_pmd(struct page *page, pgprot_t prot) diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 55ce39606cb8..dc7bb1506103 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -22,6 +22,7 @@ */ #include +#include #include #include #include @@ -1536,7 +1537,9 @@ static void build_loongson3_tlb_refill_handler(void) extern u32 handle_tlbl[], handle_tlbl_end[]; extern u32 handle_tlbs[], handle_tlbs_end[]; extern u32 handle_tlbm[], handle_tlbm_end[]; -extern u32 tlbmiss_handler_setup_pgd_start[], tlbmiss_handler_setup_pgd[]; +extern u32 tlbmiss_handler_setup_pgd_start[]; +extern u32 tlbmiss_handler_setup_pgd[]; +EXPORT_SYMBOL_GPL(tlbmiss_handler_setup_pgd); extern u32 tlbmiss_handler_setup_pgd_end[]; static void build_setup_pgd(void) From 93a93c2461f9416a58c7d1e32fec201af8cc3aad Mon Sep 17 00:00:00 2001 From: James Hogan Date: Sat, 10 Sep 2016 23:53:57 +0100 Subject: [PATCH 3/5] MIPS: uasm: Add include guards in asm/uasm.h MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add include guards in asm/uasm.h to allow it to be safely used by a new header asm/tlbex.h in the next patch to expose TLB exception building functions for KVM to use. Signed-off-by: James Hogan Acked-by: Ralf Baechle Cc: Ralf Baechle Cc: Paolo Bonzini Cc: "Radim Krčmář" Cc: linux-mips@linux-mips.org Cc: kvm@vger.kernel.org --- arch/mips/include/asm/uasm.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/mips/include/asm/uasm.h b/arch/mips/include/asm/uasm.h index f7929f65f7ca..e9a9e2ade1d2 100644 --- a/arch/mips/include/asm/uasm.h +++ b/arch/mips/include/asm/uasm.h @@ -9,6 +9,9 @@ * Copyright (C) 2012, 2013 MIPS Technologies, Inc. All rights reserved. */ +#ifndef __ASM_UASM_H +#define __ASM_UASM_H + #include #ifdef CONFIG_EXPORT_UASM @@ -309,3 +312,5 @@ void uasm_il_bltz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid); void uasm_il_bne(u32 **p, struct uasm_reloc **r, unsigned int reg1, unsigned int reg2, int lid); void uasm_il_bnez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid); + +#endif /* __ASM_UASM_H */ From 722b45443146f425453525d3c2270ff2733f5dc4 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Sat, 10 Sep 2016 23:55:07 +0100 Subject: [PATCH 4/5] MIPS: Export some tlbex internals for KVM to use MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Export to TLB exception code generating functions so that KVM can construct a fast TLB refill handler for guest context without reinventing the wheel quite so much. Signed-off-by: James Hogan Acked-by: Ralf Baechle Cc: Ralf Baechle Cc: Paolo Bonzini Cc: "Radim Krčmář" Cc: linux-mips@linux-mips.org Cc: kvm@vger.kernel.org --- arch/mips/include/asm/tlbex.h | 26 ++++++++++++++++++++++++++ arch/mips/mm/tlbex.c | 33 ++++++++++++++++----------------- 2 files changed, 42 insertions(+), 17 deletions(-) create mode 100644 arch/mips/include/asm/tlbex.h diff --git a/arch/mips/include/asm/tlbex.h b/arch/mips/include/asm/tlbex.h new file mode 100644 index 000000000000..53050e9dd2c9 --- /dev/null +++ b/arch/mips/include/asm/tlbex.h @@ -0,0 +1,26 @@ +#ifndef __ASM_TLBEX_H +#define __ASM_TLBEX_H + +#include + +/* + * Write random or indexed TLB entry, and care about the hazards from + * the preceding mtc0 and for the following eret. + */ +enum tlb_write_entry { + tlb_random, + tlb_indexed +}; + +extern int pgd_reg; + +void build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, + unsigned int tmp, unsigned int ptr); +void build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr); +void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr); +void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep); +void build_tlb_write_entry(u32 **p, struct uasm_label **l, + struct uasm_reloc **r, + enum tlb_write_entry wmode); + +#endif /* __ASM_TLBEX_H */ diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index dc7bb1506103..2465f83c79c3 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -35,6 +35,7 @@ #include #include #include +#include static int mips_xpa_disabled; @@ -345,7 +346,8 @@ static int allocate_kscratch(void) } static int scratch_reg; -static int pgd_reg; +int pgd_reg; +EXPORT_SYMBOL_GPL(pgd_reg); enum vmalloc64_mode {not_refill, refill_scratch, refill_noscratch}; static struct work_registers build_get_work_registers(u32 **p) @@ -497,15 +499,9 @@ static void __maybe_unused build_tlb_probe_entry(u32 **p) } } -/* - * Write random or indexed TLB entry, and care about the hazards from - * the preceding mtc0 and for the following eret. - */ -enum tlb_write_entry { tlb_random, tlb_indexed }; - -static void build_tlb_write_entry(u32 **p, struct uasm_label **l, - struct uasm_reloc **r, - enum tlb_write_entry wmode) +void build_tlb_write_entry(u32 **p, struct uasm_label **l, + struct uasm_reloc **r, + enum tlb_write_entry wmode) { void(*tlbw)(u32 **) = NULL; @@ -628,6 +624,7 @@ static void build_tlb_write_entry(u32 **p, struct uasm_label **l, break; } } +EXPORT_SYMBOL_GPL(build_tlb_write_entry); static __maybe_unused void build_convert_pte_to_entrylo(u32 **p, unsigned int reg) @@ -782,9 +779,8 @@ static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r, * TMP and PTR are scratch. * TMP will be clobbered, PTR will hold the pmd entry. */ -static void -build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, - unsigned int tmp, unsigned int ptr) +void build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, + unsigned int tmp, unsigned int ptr) { #ifndef CONFIG_MIPS_PGD_C0_CONTEXT long pgdc = (long)pgd_current; @@ -860,6 +856,7 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, uasm_i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */ #endif } +EXPORT_SYMBOL_GPL(build_get_pmde64); /* * BVADDR is the faulting address, PTR is scratch. @@ -935,8 +932,7 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, * TMP and PTR are scratch. * TMP will be clobbered, PTR will hold the pgd entry. */ -static void __maybe_unused -build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr) +void build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr) { if (pgd_reg != -1) { /* pgd is in pgd_reg */ @@ -961,6 +957,7 @@ build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr) uasm_i_sll(p, tmp, tmp, PGD_T_LOG2); uasm_i_addu(p, ptr, ptr, tmp); /* add in pgd offset */ } +EXPORT_SYMBOL_GPL(build_get_pgde32); #endif /* !CONFIG_64BIT */ @@ -990,7 +987,7 @@ static void build_adjust_context(u32 **p, unsigned int ctx) uasm_i_andi(p, ctx, ctx, mask); } -static void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr) +void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr) { /* * Bug workaround for the Nevada. It seems as if under certain @@ -1014,8 +1011,9 @@ static void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr) build_adjust_context(p, tmp); UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */ } +EXPORT_SYMBOL_GPL(build_get_ptep); -static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep) +void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep) { int pte_off_even = 0; int pte_off_odd = sizeof(pte_t); @@ -1064,6 +1062,7 @@ static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep) UASM_i_MTC0(p, 0, C0_ENTRYLO1); UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */ } +EXPORT_SYMBOL_GPL(build_update_entries); struct mips_huge_tlb_info { int huge_pte; From 7170bdc777556dad87f92e6a73a6cc557bf3290e Mon Sep 17 00:00:00 2001 From: James Hogan Date: Mon, 28 Nov 2016 16:38:01 +0000 Subject: [PATCH 5/5] MIPS: Add return errors to protected cache ops MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The protected cache ops contain no out of line fixup code to return an error code in the event of a fault, with the cache op being skipped in that case. For KVM however we'd like to detect this case as page faulting will be disabled so it could happen during normal operation if the GVA page tables were flushed, and need to be handled by the caller. Add the out-of-line fixup code to load the error value -EFAULT into the return variable, and adapt the protected cache line functions to pass the error back to the caller. Signed-off-by: James Hogan Acked-by: Ralf Baechle Cc: Ralf Baechle Cc: Paolo Bonzini Cc: "Radim Krčmář" Cc: linux-mips@linux-mips.org Cc: kvm@vger.kernel.org --- arch/mips/include/asm/r4kcache.h | 55 ++++++++++++++++++++------------ 1 file changed, 35 insertions(+), 20 deletions(-) diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h index b42b513007a2..7227c158cbf8 100644 --- a/arch/mips/include/asm/r4kcache.h +++ b/arch/mips/include/asm/r4kcache.h @@ -147,49 +147,64 @@ static inline void flush_scache_line(unsigned long addr) } #define protected_cache_op(op,addr) \ +({ \ + int __err = 0; \ __asm__ __volatile__( \ " .set push \n" \ " .set noreorder \n" \ " .set "MIPS_ISA_ARCH_LEVEL" \n" \ - "1: cache %0, (%1) \n" \ + "1: cache %1, (%2) \n" \ "2: .set pop \n" \ + " .section .fixup,\"ax\" \n" \ + "3: li %0, %3 \n" \ + " j 2b \n" \ + " .previous \n" \ " .section __ex_table,\"a\" \n" \ - " "STR(PTR)" 1b, 2b \n" \ + " "STR(PTR)" 1b, 3b \n" \ " .previous" \ - : \ - : "i" (op), "r" (addr)) + : "+r" (__err) \ + : "i" (op), "r" (addr), "i" (-EFAULT)); \ + __err; \ +}) + #define protected_cachee_op(op,addr) \ +({ \ + int __err = 0; \ __asm__ __volatile__( \ " .set push \n" \ " .set noreorder \n" \ " .set mips0 \n" \ " .set eva \n" \ - "1: cachee %0, (%1) \n" \ + "1: cachee %1, (%2) \n" \ "2: .set pop \n" \ + " .section .fixup,\"ax\" \n" \ + "3: li %0, %3 \n" \ + " j 2b \n" \ + " .previous \n" \ " .section __ex_table,\"a\" \n" \ - " "STR(PTR)" 1b, 2b \n" \ + " "STR(PTR)" 1b, 3b \n" \ " .previous" \ - : \ - : "i" (op), "r" (addr)) + : "+r" (__err) \ + : "i" (op), "r" (addr), "i" (-EFAULT)); \ + __err; \ +}) /* * The next two are for badland addresses like signal trampolines. */ -static inline void protected_flush_icache_line(unsigned long addr) +static inline int protected_flush_icache_line(unsigned long addr) { switch (boot_cpu_type()) { case CPU_LOONGSON2: - protected_cache_op(Hit_Invalidate_I_Loongson2, addr); - break; + return protected_cache_op(Hit_Invalidate_I_Loongson2, addr); default: #ifdef CONFIG_EVA - protected_cachee_op(Hit_Invalidate_I, addr); + return protected_cachee_op(Hit_Invalidate_I, addr); #else - protected_cache_op(Hit_Invalidate_I, addr); + return protected_cache_op(Hit_Invalidate_I, addr); #endif - break; } } @@ -199,21 +214,21 @@ static inline void protected_flush_icache_line(unsigned long addr) * caches. We're talking about one cacheline unnecessarily getting invalidated * here so the penalty isn't overly hard. */ -static inline void protected_writeback_dcache_line(unsigned long addr) +static inline int protected_writeback_dcache_line(unsigned long addr) { #ifdef CONFIG_EVA - protected_cachee_op(Hit_Writeback_Inv_D, addr); + return protected_cachee_op(Hit_Writeback_Inv_D, addr); #else - protected_cache_op(Hit_Writeback_Inv_D, addr); + return protected_cache_op(Hit_Writeback_Inv_D, addr); #endif } -static inline void protected_writeback_scache_line(unsigned long addr) +static inline int protected_writeback_scache_line(unsigned long addr) { #ifdef CONFIG_EVA - protected_cachee_op(Hit_Writeback_Inv_SD, addr); + return protected_cachee_op(Hit_Writeback_Inv_SD, addr); #else - protected_cache_op(Hit_Writeback_Inv_SD, addr); + return protected_cache_op(Hit_Writeback_Inv_SD, addr); #endif }