From 34b4d20399e6fad2e3379b11e68dff1d1549274e Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Tue, 13 Sep 2022 09:44:34 +0000 Subject: [PATCH 01/57] KVM: arm64: Use visibility hook to treat ID regs as RAZ The generic id reg accessors already handle RAZ registers by way of the visibility hook. Add a visibility hook that returns REG_RAZ unconditionally and throw out the RAZ specific accessors. Reviewed-by: Reiji Watanabe Signed-off-by: Oliver Upton Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220913094441.3957645-2-oliver.upton@linux.dev --- arch/arm64/kvm/sys_regs.c | 33 ++++++++++++++------------------- 1 file changed, 14 insertions(+), 19 deletions(-) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 3234f50b8c4b..e18efb9211f0 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1145,6 +1145,12 @@ static unsigned int id_visibility(const struct kvm_vcpu *vcpu, return 0; } +static unsigned int raz_visibility(const struct kvm_vcpu *vcpu, + const struct sys_reg_desc *r) +{ + return REG_RAZ; +} + /* cpufeature ID register access trap handlers */ static bool __access_id_reg(struct kvm_vcpu *vcpu, @@ -1168,13 +1174,6 @@ static bool access_id_reg(struct kvm_vcpu *vcpu, return __access_id_reg(vcpu, p, r, raz); } -static bool access_raz_id_reg(struct kvm_vcpu *vcpu, - struct sys_reg_params *p, - const struct sys_reg_desc *r) -{ - return __access_id_reg(vcpu, p, r, true); -} - /* Visibility overrides for SVE-specific control registers */ static unsigned int sve_visibility(const struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd) @@ -1262,12 +1261,6 @@ static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, return __set_id_reg(vcpu, rd, val, raz); } -static int set_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, - u64 val) -{ - return __set_id_reg(vcpu, rd, val, true); -} - static int get_raz_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, u64 *val) { @@ -1374,9 +1367,10 @@ static unsigned int mte_visibility(const struct kvm_vcpu *vcpu, */ #define ID_UNALLOCATED(crm, op2) { \ Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2), \ - .access = access_raz_id_reg, \ - .get_user = get_raz_reg, \ - .set_user = set_raz_id_reg, \ + .access = access_id_reg, \ + .get_user = get_id_reg, \ + .set_user = set_id_reg, \ + .visibility = raz_visibility \ } /* @@ -1386,9 +1380,10 @@ static unsigned int mte_visibility(const struct kvm_vcpu *vcpu, */ #define ID_HIDDEN(name) { \ SYS_DESC(SYS_##name), \ - .access = access_raz_id_reg, \ - .get_user = get_raz_reg, \ - .set_user = set_raz_id_reg, \ + .access = access_id_reg, \ + .get_user = get_id_reg, \ + .set_user = set_id_reg, \ + .visibility = raz_visibility, \ } /* From 4782ccc8ef50fabb70bab9fa73186285dba6d91d Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Tue, 13 Sep 2022 09:44:35 +0000 Subject: [PATCH 02/57] KVM: arm64: Remove internal accessor helpers for id regs The internal accessors are only ever called once. Dump out their contents in the caller. No functional change intended. Signed-off-by: Oliver Upton Reviewed-by: Reiji Watanabe Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220913094441.3957645-3-oliver.upton@linux.dev --- arch/arm64/kvm/sys_regs.c | 46 ++++++++++----------------------------- 1 file changed, 12 insertions(+), 34 deletions(-) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index e18efb9211f0..26210f3a0b27 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1153,25 +1153,17 @@ static unsigned int raz_visibility(const struct kvm_vcpu *vcpu, /* cpufeature ID register access trap handlers */ -static bool __access_id_reg(struct kvm_vcpu *vcpu, - struct sys_reg_params *p, - const struct sys_reg_desc *r, - bool raz) -{ - if (p->is_write) - return write_to_read_only(vcpu, p, r); - - p->regval = read_id_reg(vcpu, r, raz); - return true; -} - static bool access_id_reg(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { bool raz = sysreg_visible_as_raz(vcpu, r); - return __access_id_reg(vcpu, p, r, raz); + if (p->is_write) + return write_to_read_only(vcpu, p, r); + + p->regval = read_id_reg(vcpu, r, raz); + return true; } /* Visibility overrides for SVE-specific control registers */ @@ -1226,31 +1218,13 @@ static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu, * are stored, and for set_id_reg() we don't allow the effective value * to be changed. */ -static int __get_id_reg(const struct kvm_vcpu *vcpu, - const struct sys_reg_desc *rd, u64 *val, - bool raz) -{ - *val = read_id_reg(vcpu, rd, raz); - return 0; -} - -static int __set_id_reg(const struct kvm_vcpu *vcpu, - const struct sys_reg_desc *rd, u64 val, - bool raz) -{ - /* This is what we mean by invariant: you can't change it. */ - if (val != read_id_reg(vcpu, rd, raz)) - return -EINVAL; - - return 0; -} - static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, u64 *val) { bool raz = sysreg_visible_as_raz(vcpu, rd); - return __get_id_reg(vcpu, rd, val, raz); + *val = read_id_reg(vcpu, rd, raz); + return 0; } static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, @@ -1258,7 +1232,11 @@ static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, { bool raz = sysreg_visible_as_raz(vcpu, rd); - return __set_id_reg(vcpu, rd, val, raz); + /* This is what we mean by invariant: you can't change it. */ + if (val != read_id_reg(vcpu, rd, raz)) + return -EINVAL; + + return 0; } static int get_raz_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, From cdd5036d048ca96ef5212fb37f4f56db40cb1bc2 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Tue, 13 Sep 2022 09:44:36 +0000 Subject: [PATCH 03/57] KVM: arm64: Drop raz parameter from read_id_reg() There is no longer a need for caller-specified RAZ visibility. Hoist the call to sysreg_visible_as_raz() into read_id_reg() and drop the parameter. No functional change intended. Suggested-by: Reiji Watanabe Signed-off-by: Oliver Upton Reviewed-by: Reiji Watanabe Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220913094441.3957645-4-oliver.upton@linux.dev --- arch/arm64/kvm/sys_regs.c | 19 ++++++------------- 1 file changed, 6 insertions(+), 13 deletions(-) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 26210f3a0b27..0e20a311ea20 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1063,13 +1063,12 @@ static bool access_arch_timer(struct kvm_vcpu *vcpu, } /* Read a sanitised cpufeature ID register by sys_reg_desc */ -static u64 read_id_reg(const struct kvm_vcpu *vcpu, - struct sys_reg_desc const *r, bool raz) +static u64 read_id_reg(const struct kvm_vcpu *vcpu, struct sys_reg_desc const *r) { u32 id = reg_to_encoding(r); u64 val; - if (raz) + if (sysreg_visible_as_raz(vcpu, r)) return 0; val = read_sanitised_ftr_reg(id); @@ -1157,12 +1156,10 @@ static bool access_id_reg(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { - bool raz = sysreg_visible_as_raz(vcpu, r); - if (p->is_write) return write_to_read_only(vcpu, p, r); - p->regval = read_id_reg(vcpu, r, raz); + p->regval = read_id_reg(vcpu, r); return true; } @@ -1199,7 +1196,7 @@ static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu, return -EINVAL; /* We can only differ with CSV[23], and anything else is an error */ - val ^= read_id_reg(vcpu, rd, false); + val ^= read_id_reg(vcpu, rd); val &= ~((0xFUL << ID_AA64PFR0_CSV2_SHIFT) | (0xFUL << ID_AA64PFR0_CSV3_SHIFT)); if (val) @@ -1221,19 +1218,15 @@ static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu, static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, u64 *val) { - bool raz = sysreg_visible_as_raz(vcpu, rd); - - *val = read_id_reg(vcpu, rd, raz); + *val = read_id_reg(vcpu, rd); return 0; } static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, u64 val) { - bool raz = sysreg_visible_as_raz(vcpu, rd); - /* This is what we mean by invariant: you can't change it. */ - if (val != read_id_reg(vcpu, rd, raz)) + if (val != read_id_reg(vcpu, rd)) return -EINVAL; return 0; From 5d9a718b64e428a40939806873ecf16f072008b3 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Tue, 13 Sep 2022 09:44:37 +0000 Subject: [PATCH 04/57] KVM: arm64: Spin off helper for calling visibility hook No functional change intended. Reviewed-by: Reiji Watanabe Signed-off-by: Oliver Upton Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220913094441.3957645-5-oliver.upton@linux.dev --- arch/arm64/kvm/sys_regs.h | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/arch/arm64/kvm/sys_regs.h b/arch/arm64/kvm/sys_regs.h index a8c4cc32eb9a..e78b51059622 100644 --- a/arch/arm64/kvm/sys_regs.h +++ b/arch/arm64/kvm/sys_regs.h @@ -136,22 +136,25 @@ static inline void reset_val(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r __vcpu_sys_reg(vcpu, r->reg) = r->val; } +static inline unsigned int sysreg_visibility(const struct kvm_vcpu *vcpu, + const struct sys_reg_desc *r) +{ + if (likely(!r->visibility)) + return 0; + + return r->visibility(vcpu, r); +} + static inline bool sysreg_hidden(const struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) { - if (likely(!r->visibility)) - return false; - - return r->visibility(vcpu, r) & REG_HIDDEN; + return sysreg_visibility(vcpu, r) & REG_HIDDEN; } static inline bool sysreg_visible_as_raz(const struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) { - if (likely(!r->visibility)) - return false; - - return r->visibility(vcpu, r) & REG_RAZ; + return sysreg_visibility(vcpu, r) & REG_RAZ; } static inline int cmp_sys_reg(const struct sys_reg_desc *i1, From 4de06e4c1dc949c35c16e4423b4ccd735264b0a9 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Tue, 13 Sep 2022 09:44:38 +0000 Subject: [PATCH 05/57] KVM: arm64: Add a visibility bit to ignore user writes We're about to ignore writes to AArch32 ID registers on AArch64-only systems. Add a bit to indicate a register is handled as write ignore when accessed from userspace. Signed-off-by: Oliver Upton Reviewed-by: Reiji Watanabe Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220913094441.3957645-6-oliver.upton@linux.dev --- arch/arm64/kvm/sys_regs.c | 3 +++ arch/arm64/kvm/sys_regs.h | 7 +++++++ 2 files changed, 10 insertions(+) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 0e20a311ea20..6d0511247df4 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -2775,6 +2775,9 @@ int kvm_sys_reg_set_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg, if (!r) return -ENOENT; + if (sysreg_user_write_ignore(vcpu, r)) + return 0; + if (r->set_user) { ret = (r->set_user)(vcpu, r, val); } else { diff --git a/arch/arm64/kvm/sys_regs.h b/arch/arm64/kvm/sys_regs.h index e78b51059622..e4ebb3a379fd 100644 --- a/arch/arm64/kvm/sys_regs.h +++ b/arch/arm64/kvm/sys_regs.h @@ -86,6 +86,7 @@ struct sys_reg_desc { #define REG_HIDDEN (1 << 0) /* hidden from userspace and guest */ #define REG_RAZ (1 << 1) /* RAZ from userspace and guest */ +#define REG_USER_WI (1 << 2) /* WI from userspace only */ static __printf(2, 3) inline void print_sys_reg_msg(const struct sys_reg_params *p, @@ -157,6 +158,12 @@ static inline bool sysreg_visible_as_raz(const struct kvm_vcpu *vcpu, return sysreg_visibility(vcpu, r) & REG_RAZ; } +static inline bool sysreg_user_write_ignore(const struct kvm_vcpu *vcpu, + const struct sys_reg_desc *r) +{ + return sysreg_visibility(vcpu, r) & REG_USER_WI; +} + static inline int cmp_sys_reg(const struct sys_reg_desc *i1, const struct sys_reg_desc *i2) { From d5efec7ed826b3b29c6847bf59383d8d07347a4e Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Tue, 13 Sep 2022 09:44:39 +0000 Subject: [PATCH 06/57] KVM: arm64: Treat 32bit ID registers as RAZ/WI on 64bit-only system One of the oddities of the architecture is that the AArch64 views of the AArch32 ID registers are UNKNOWN if AArch32 isn't implemented at any EL. Nonetheless, KVM exposes these registers to userspace for the sake of save/restore. It is possible that the UNKNOWN value could differ between systems, leading to a rejected write from userspace. Avoid the issue altogether by handling the AArch32 ID registers as RAZ/WI when on an AArch64-only system. Signed-off-by: Oliver Upton Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220913094441.3957645-7-oliver.upton@linux.dev --- arch/arm64/kvm/sys_regs.c | 63 ++++++++++++++++++++++++++------------- 1 file changed, 43 insertions(+), 20 deletions(-) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 6d0511247df4..9569772cf09a 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1144,6 +1144,20 @@ static unsigned int id_visibility(const struct kvm_vcpu *vcpu, return 0; } +static unsigned int aa32_id_visibility(const struct kvm_vcpu *vcpu, + const struct sys_reg_desc *r) +{ + /* + * AArch32 ID registers are UNKNOWN if AArch32 isn't implemented at any + * EL. Promote to RAZ/WI in order to guarantee consistency between + * systems. + */ + if (!kvm_supports_32bit_el0()) + return REG_RAZ | REG_USER_WI; + + return id_visibility(vcpu, r); +} + static unsigned int raz_visibility(const struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) { @@ -1331,6 +1345,15 @@ static unsigned int mte_visibility(const struct kvm_vcpu *vcpu, .visibility = id_visibility, \ } +/* sys_reg_desc initialiser for known cpufeature ID registers */ +#define AA32_ID_SANITISED(name) { \ + SYS_DESC(SYS_##name), \ + .access = access_id_reg, \ + .get_user = get_id_reg, \ + .set_user = set_id_reg, \ + .visibility = aa32_id_visibility, \ +} + /* * sys_reg_desc initialiser for architecturally unallocated cpufeature ID * register with encoding Op0=3, Op1=0, CRn=0, CRm=crm, Op2=op2 @@ -1418,33 +1441,33 @@ static const struct sys_reg_desc sys_reg_descs[] = { /* AArch64 mappings of the AArch32 ID registers */ /* CRm=1 */ - ID_SANITISED(ID_PFR0_EL1), - ID_SANITISED(ID_PFR1_EL1), - ID_SANITISED(ID_DFR0_EL1), + AA32_ID_SANITISED(ID_PFR0_EL1), + AA32_ID_SANITISED(ID_PFR1_EL1), + AA32_ID_SANITISED(ID_DFR0_EL1), ID_HIDDEN(ID_AFR0_EL1), - ID_SANITISED(ID_MMFR0_EL1), - ID_SANITISED(ID_MMFR1_EL1), - ID_SANITISED(ID_MMFR2_EL1), - ID_SANITISED(ID_MMFR3_EL1), + AA32_ID_SANITISED(ID_MMFR0_EL1), + AA32_ID_SANITISED(ID_MMFR1_EL1), + AA32_ID_SANITISED(ID_MMFR2_EL1), + AA32_ID_SANITISED(ID_MMFR3_EL1), /* CRm=2 */ - ID_SANITISED(ID_ISAR0_EL1), - ID_SANITISED(ID_ISAR1_EL1), - ID_SANITISED(ID_ISAR2_EL1), - ID_SANITISED(ID_ISAR3_EL1), - ID_SANITISED(ID_ISAR4_EL1), - ID_SANITISED(ID_ISAR5_EL1), - ID_SANITISED(ID_MMFR4_EL1), - ID_SANITISED(ID_ISAR6_EL1), + AA32_ID_SANITISED(ID_ISAR0_EL1), + AA32_ID_SANITISED(ID_ISAR1_EL1), + AA32_ID_SANITISED(ID_ISAR2_EL1), + AA32_ID_SANITISED(ID_ISAR3_EL1), + AA32_ID_SANITISED(ID_ISAR4_EL1), + AA32_ID_SANITISED(ID_ISAR5_EL1), + AA32_ID_SANITISED(ID_MMFR4_EL1), + AA32_ID_SANITISED(ID_ISAR6_EL1), /* CRm=3 */ - ID_SANITISED(MVFR0_EL1), - ID_SANITISED(MVFR1_EL1), - ID_SANITISED(MVFR2_EL1), + AA32_ID_SANITISED(MVFR0_EL1), + AA32_ID_SANITISED(MVFR1_EL1), + AA32_ID_SANITISED(MVFR2_EL1), ID_UNALLOCATED(3,3), - ID_SANITISED(ID_PFR2_EL1), + AA32_ID_SANITISED(ID_PFR2_EL1), ID_HIDDEN(ID_DFR1_EL1), - ID_SANITISED(ID_MMFR5_EL1), + AA32_ID_SANITISED(ID_MMFR5_EL1), ID_UNALLOCATED(3,7), /* AArch64 ID registers */ From 797b84517c190053597e3f7e03ead15da872e04d Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Tue, 13 Sep 2022 09:44:40 +0000 Subject: [PATCH 07/57] KVM: selftests: Add test for AArch32 ID registers Add a test to assert that KVM handles the AArch64 views of the AArch32 ID registers as RAZ/WI (writable only from userspace). For registers that were already hidden or unallocated, expect RAZ + invariant behavior. Signed-off-by: Oliver Upton Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220913094441.3957645-8-oliver.upton@linux.dev --- tools/testing/selftests/kvm/.gitignore | 1 + tools/testing/selftests/kvm/Makefile | 1 + .../selftests/kvm/aarch64/aarch32_id_regs.c | 169 ++++++++++++++++++ 3 files changed, 171 insertions(+) create mode 100644 tools/testing/selftests/kvm/aarch64/aarch32_id_regs.c diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore index d625a3f83780..87d1a0b1bae0 100644 --- a/tools/testing/selftests/kvm/.gitignore +++ b/tools/testing/selftests/kvm/.gitignore @@ -1,4 +1,5 @@ # SPDX-License-Identifier: GPL-2.0-only +/aarch64/aarch32_id_regs /aarch64/arch_timer /aarch64/debug-exceptions /aarch64/get-reg-list diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index 4c122f1b1737..784abe7f0962 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -144,6 +144,7 @@ TEST_GEN_PROGS_x86_64 += system_counter_offset_test # Compiled outputs used by test targets TEST_GEN_PROGS_EXTENDED_x86_64 += x86_64/nx_huge_pages_test +TEST_GEN_PROGS_aarch64 += aarch64/aarch32_id_regs TEST_GEN_PROGS_aarch64 += aarch64/arch_timer TEST_GEN_PROGS_aarch64 += aarch64/debug-exceptions TEST_GEN_PROGS_aarch64 += aarch64/get-reg-list diff --git a/tools/testing/selftests/kvm/aarch64/aarch32_id_regs.c b/tools/testing/selftests/kvm/aarch64/aarch32_id_regs.c new file mode 100644 index 000000000000..6f9c1f19c7f6 --- /dev/null +++ b/tools/testing/selftests/kvm/aarch64/aarch32_id_regs.c @@ -0,0 +1,169 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * aarch32_id_regs - Test for ID register behavior on AArch64-only systems + * + * Copyright (c) 2022 Google LLC. + * + * Test that KVM handles the AArch64 views of the AArch32 ID registers as RAZ + * and WI from userspace. + */ + +#include + +#include "kvm_util.h" +#include "processor.h" +#include "test_util.h" + +#define BAD_ID_REG_VAL 0x1badc0deul + +#define GUEST_ASSERT_REG_RAZ(reg) GUEST_ASSERT_EQ(read_sysreg_s(reg), 0) + +static void guest_main(void) +{ + GUEST_ASSERT_REG_RAZ(SYS_ID_PFR0_EL1); + GUEST_ASSERT_REG_RAZ(SYS_ID_PFR1_EL1); + GUEST_ASSERT_REG_RAZ(SYS_ID_DFR0_EL1); + GUEST_ASSERT_REG_RAZ(SYS_ID_AFR0_EL1); + GUEST_ASSERT_REG_RAZ(SYS_ID_MMFR0_EL1); + GUEST_ASSERT_REG_RAZ(SYS_ID_MMFR1_EL1); + GUEST_ASSERT_REG_RAZ(SYS_ID_MMFR2_EL1); + GUEST_ASSERT_REG_RAZ(SYS_ID_MMFR3_EL1); + GUEST_ASSERT_REG_RAZ(SYS_ID_ISAR0_EL1); + GUEST_ASSERT_REG_RAZ(SYS_ID_ISAR1_EL1); + GUEST_ASSERT_REG_RAZ(SYS_ID_ISAR2_EL1); + GUEST_ASSERT_REG_RAZ(SYS_ID_ISAR3_EL1); + GUEST_ASSERT_REG_RAZ(SYS_ID_ISAR4_EL1); + GUEST_ASSERT_REG_RAZ(SYS_ID_ISAR5_EL1); + GUEST_ASSERT_REG_RAZ(SYS_ID_MMFR4_EL1); + GUEST_ASSERT_REG_RAZ(SYS_ID_ISAR6_EL1); + GUEST_ASSERT_REG_RAZ(SYS_MVFR0_EL1); + GUEST_ASSERT_REG_RAZ(SYS_MVFR1_EL1); + GUEST_ASSERT_REG_RAZ(SYS_MVFR2_EL1); + GUEST_ASSERT_REG_RAZ(sys_reg(3, 0, 0, 3, 3)); + GUEST_ASSERT_REG_RAZ(SYS_ID_PFR2_EL1); + GUEST_ASSERT_REG_RAZ(SYS_ID_DFR1_EL1); + GUEST_ASSERT_REG_RAZ(SYS_ID_MMFR5_EL1); + GUEST_ASSERT_REG_RAZ(sys_reg(3, 0, 0, 3, 7)); + + GUEST_DONE(); +} + +static void test_guest_raz(struct kvm_vcpu *vcpu) +{ + struct ucall uc; + + vcpu_run(vcpu); + + switch (get_ucall(vcpu, &uc)) { + case UCALL_ABORT: + REPORT_GUEST_ASSERT(uc); + break; + case UCALL_DONE: + break; + default: + TEST_FAIL("Unexpected ucall: %lu", uc.cmd); + } +} + +static uint64_t raz_wi_reg_ids[] = { + KVM_ARM64_SYS_REG(SYS_ID_PFR0_EL1), + KVM_ARM64_SYS_REG(SYS_ID_PFR1_EL1), + KVM_ARM64_SYS_REG(SYS_ID_DFR0_EL1), + KVM_ARM64_SYS_REG(SYS_ID_MMFR0_EL1), + KVM_ARM64_SYS_REG(SYS_ID_MMFR1_EL1), + KVM_ARM64_SYS_REG(SYS_ID_MMFR2_EL1), + KVM_ARM64_SYS_REG(SYS_ID_MMFR3_EL1), + KVM_ARM64_SYS_REG(SYS_ID_ISAR0_EL1), + KVM_ARM64_SYS_REG(SYS_ID_ISAR1_EL1), + KVM_ARM64_SYS_REG(SYS_ID_ISAR2_EL1), + KVM_ARM64_SYS_REG(SYS_ID_ISAR3_EL1), + KVM_ARM64_SYS_REG(SYS_ID_ISAR4_EL1), + KVM_ARM64_SYS_REG(SYS_ID_ISAR5_EL1), + KVM_ARM64_SYS_REG(SYS_ID_MMFR4_EL1), + KVM_ARM64_SYS_REG(SYS_ID_ISAR6_EL1), + KVM_ARM64_SYS_REG(SYS_MVFR0_EL1), + KVM_ARM64_SYS_REG(SYS_MVFR1_EL1), + KVM_ARM64_SYS_REG(SYS_MVFR2_EL1), + KVM_ARM64_SYS_REG(SYS_ID_PFR2_EL1), + KVM_ARM64_SYS_REG(SYS_ID_MMFR5_EL1), +}; + +static void test_user_raz_wi(struct kvm_vcpu *vcpu) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(raz_wi_reg_ids); i++) { + uint64_t reg_id = raz_wi_reg_ids[i]; + uint64_t val; + + vcpu_get_reg(vcpu, reg_id, &val); + ASSERT_EQ(val, 0); + + /* + * Expect the ioctl to succeed with no effect on the register + * value. + */ + vcpu_set_reg(vcpu, reg_id, BAD_ID_REG_VAL); + + vcpu_get_reg(vcpu, reg_id, &val); + ASSERT_EQ(val, 0); + } +} + +static uint64_t raz_invariant_reg_ids[] = { + KVM_ARM64_SYS_REG(SYS_ID_AFR0_EL1), + KVM_ARM64_SYS_REG(sys_reg(3, 0, 0, 3, 3)), + KVM_ARM64_SYS_REG(SYS_ID_DFR1_EL1), + KVM_ARM64_SYS_REG(sys_reg(3, 0, 0, 3, 7)), +}; + +static void test_user_raz_invariant(struct kvm_vcpu *vcpu) +{ + int i, r; + + for (i = 0; i < ARRAY_SIZE(raz_invariant_reg_ids); i++) { + uint64_t reg_id = raz_invariant_reg_ids[i]; + uint64_t val; + + vcpu_get_reg(vcpu, reg_id, &val); + ASSERT_EQ(val, 0); + + r = __vcpu_set_reg(vcpu, reg_id, BAD_ID_REG_VAL); + TEST_ASSERT(r < 0 && errno == EINVAL, + "unexpected KVM_SET_ONE_REG error: r=%d, errno=%d", r, errno); + + vcpu_get_reg(vcpu, reg_id, &val); + ASSERT_EQ(val, 0); + } +} + + + +static bool vcpu_aarch64_only(struct kvm_vcpu *vcpu) +{ + uint64_t val, el0; + + vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1), &val); + + el0 = (val & ARM64_FEATURE_MASK(ID_AA64PFR0_EL0)) >> ID_AA64PFR0_EL0_SHIFT; + return el0 == ID_AA64PFR0_ELx_64BIT_ONLY; +} + +int main(void) +{ + struct kvm_vcpu *vcpu; + struct kvm_vm *vm; + + vm = vm_create_with_one_vcpu(&vcpu, guest_main); + + TEST_REQUIRE(vcpu_aarch64_only(vcpu)); + + ucall_init(vm, NULL); + + test_user_raz_wi(vcpu); + test_user_raz_invariant(vcpu); + test_guest_raz(vcpu); + + ucall_uninit(vm); + kvm_vm_free(vm); +} From 34fbdee086cfcc20fe889d2b83afddfbe2ac3096 Mon Sep 17 00:00:00 2001 From: Reiji Watanabe Date: Fri, 16 Sep 2022 18:05:57 -0700 Subject: [PATCH 08/57] KVM: arm64: Preserve PSTATE.SS for the guest while single-step is enabled Preserve the PSTATE.SS value for the guest while userspace enables single-step (i.e. while KVM manipulates the PSTATE.SS) for the vCPU. Currently, while userspace enables single-step for the vCPU (with KVM_GUESTDBG_SINGLESTEP), KVM sets PSTATE.SS to 1 on every guest entry, not saving its original value. When userspace disables single-step, KVM doesn't restore the original value for the subsequent guest entry (use the current value instead). Exception return instructions copy PSTATE.SS from SPSR_ELx.SS only in certain cases when single-step is enabled (and set it to 0 in other cases). So, the value matters only when the guest enables single-step (and when the guest's Software step state isn't affected by single-step enabled by userspace, practically), though. Fix this by preserving the original PSTATE.SS value while userspace enables single-step, and restoring the value once it is disabled. This fix modifies the behavior of GET_ONE_REG/SET_ONE_REG for the PSTATE.SS while single-step is enabled by userspace. Presently, GET_ONE_REG/SET_ONE_REG gets/sets the current PSTATE.SS value, which KVM will override on the next guest entry (i.e. the value userspace gets/sets is not used for the next guest entry). With this patch, GET_ONE_REG/SET_ONE_REG will get/set the guest's preserved value, which KVM will preserve and try to restore after single-step is disabled. Fixes: 337b99bf7edf ("KVM: arm64: guest debug, add support for single-step") Signed-off-by: Reiji Watanabe Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220917010600.532642-2-reijiw@google.com --- arch/arm64/include/asm/kvm_host.h | 1 + arch/arm64/kvm/debug.c | 12 ++++++++++++ 2 files changed, 13 insertions(+) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index e9c9388ccc02..ccf8a144f009 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -393,6 +393,7 @@ struct kvm_vcpu_arch { */ struct { u32 mdscr_el1; + bool pstate_ss; } guest_debug_preserved; /* vcpu power state */ diff --git a/arch/arm64/kvm/debug.c b/arch/arm64/kvm/debug.c index 0b28d7db7c76..1bd2a1aee11c 100644 --- a/arch/arm64/kvm/debug.c +++ b/arch/arm64/kvm/debug.c @@ -32,6 +32,10 @@ static DEFINE_PER_CPU(u64, mdcr_el2); * * Guest access to MDSCR_EL1 is trapped by the hypervisor and handled * after we have restored the preserved value to the main context. + * + * When single-step is enabled by userspace, we tweak PSTATE.SS on every + * guest entry. Preserve PSTATE.SS so we can restore the original value + * for the vcpu after the single-step is disabled. */ static void save_guest_debug_regs(struct kvm_vcpu *vcpu) { @@ -41,6 +45,9 @@ static void save_guest_debug_regs(struct kvm_vcpu *vcpu) trace_kvm_arm_set_dreg32("Saved MDSCR_EL1", vcpu->arch.guest_debug_preserved.mdscr_el1); + + vcpu->arch.guest_debug_preserved.pstate_ss = + (*vcpu_cpsr(vcpu) & DBG_SPSR_SS); } static void restore_guest_debug_regs(struct kvm_vcpu *vcpu) @@ -51,6 +58,11 @@ static void restore_guest_debug_regs(struct kvm_vcpu *vcpu) trace_kvm_arm_set_dreg32("Restored MDSCR_EL1", vcpu_read_sys_reg(vcpu, MDSCR_EL1)); + + if (vcpu->arch.guest_debug_preserved.pstate_ss) + *vcpu_cpsr(vcpu) |= DBG_SPSR_SS; + else + *vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS; } /** From 370531d1e95be57c62fdf065fb04fd8db7ade8f9 Mon Sep 17 00:00:00 2001 From: Reiji Watanabe Date: Fri, 16 Sep 2022 18:05:58 -0700 Subject: [PATCH 09/57] KVM: arm64: Clear PSTATE.SS when the Software Step state was Active-pending While userspace enables single-step, if the Software Step state at the last guest exit was "Active-pending", clear PSTATE.SS on guest entry to restore the state. Currently, KVM sets PSTATE.SS to 1 on every guest entry while userspace enables single-step for the vCPU (with KVM_GUESTDBG_SINGLESTEP). It means KVM always makes the vCPU's Software Step state "Active-not-pending" on the guest entry, which lets the VCPU perform single-step (then Software Step exception is taken). This could cause extra single-step (without returning to userspace) if the Software Step state at the last guest exit was "Active-pending" (i.e. the last exit was triggered by an asynchronous exception after the single-step is performed, but before the Software Step exception is taken. See "Figure D2-3 Software step state machine" and "D2.12.7 Behavior in the active-pending state" in ARM DDI 0487I.a for more info about this behavior). Fix this by clearing PSTATE.SS on guest entry if the Software Step state at the last exit was "Active-pending" so that KVM restore the state (and the exception is taken before further single-step is performed). Fixes: 337b99bf7edf ("KVM: arm64: guest debug, add support for single-step") Signed-off-by: Reiji Watanabe Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220917010600.532642-3-reijiw@google.com --- arch/arm64/include/asm/kvm_host.h | 3 +++ arch/arm64/kvm/debug.c | 22 +++++++++++++++++++++- arch/arm64/kvm/guest.c | 1 + arch/arm64/kvm/handle_exit.c | 8 +++++++- 4 files changed, 32 insertions(+), 2 deletions(-) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index ccf8a144f009..45e2136322ba 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -536,6 +536,9 @@ struct kvm_vcpu_arch { #define IN_WFIT __vcpu_single_flag(sflags, BIT(3)) /* vcpu system registers loaded on physical CPU */ #define SYSREGS_ON_CPU __vcpu_single_flag(sflags, BIT(4)) +/* Software step state is Active-pending */ +#define DBG_SS_ACTIVE_PENDING __vcpu_single_flag(sflags, BIT(5)) + /* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */ #define vcpu_sve_pffr(vcpu) (kern_hyp_va((vcpu)->arch.sve_state) + \ diff --git a/arch/arm64/kvm/debug.c b/arch/arm64/kvm/debug.c index 1bd2a1aee11c..56361e512b8a 100644 --- a/arch/arm64/kvm/debug.c +++ b/arch/arm64/kvm/debug.c @@ -200,7 +200,18 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) * debugging the system. */ if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { - *vcpu_cpsr(vcpu) |= DBG_SPSR_SS; + /* + * If the software step state at the last guest exit + * was Active-pending, we don't set DBG_SPSR_SS so + * that the state is maintained (to not run another + * single-step until the pending Software Step + * exception is taken). + */ + if (!vcpu_get_flag(vcpu, DBG_SS_ACTIVE_PENDING)) + *vcpu_cpsr(vcpu) |= DBG_SPSR_SS; + else + *vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS; + mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1); mdscr |= DBG_MDSCR_SS; vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1); @@ -274,6 +285,15 @@ void kvm_arm_clear_debug(struct kvm_vcpu *vcpu) * Restore the guest's debug registers if we were using them. */ if (vcpu->guest_debug || kvm_vcpu_os_lock_enabled(vcpu)) { + if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { + if (!(*vcpu_cpsr(vcpu) & DBG_SPSR_SS)) + /* + * Mark the vcpu as ACTIVE_PENDING + * until Software Step exception is taken. + */ + vcpu_set_flag(vcpu, DBG_SS_ACTIVE_PENDING); + } + restore_guest_debug_regs(vcpu); /* diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index f802a3b3f8db..2ff13a3f8479 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c @@ -937,6 +937,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, } else { /* If not enabled clear all flags */ vcpu->guest_debug = 0; + vcpu_clear_flag(vcpu, DBG_SS_ACTIVE_PENDING); } out: diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index bbe5b393d689..e778eefcf214 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -152,8 +152,14 @@ static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu) run->debug.arch.hsr_high = upper_32_bits(esr); run->flags = KVM_DEBUG_ARCH_HSR_HIGH_VALID; - if (ESR_ELx_EC(esr) == ESR_ELx_EC_WATCHPT_LOW) + switch (ESR_ELx_EC(esr)) { + case ESR_ELx_EC_WATCHPT_LOW: run->debug.arch.far = vcpu->arch.fault.far_el2; + break; + case ESR_ELx_EC_SOFTSTP_LOW: + vcpu_clear_flag(vcpu, DBG_SS_ACTIVE_PENDING); + break; + } return 0; } From ff00e737090e0f015059e59829aaa58565b16321 Mon Sep 17 00:00:00 2001 From: Reiji Watanabe Date: Fri, 16 Sep 2022 18:05:59 -0700 Subject: [PATCH 10/57] KVM: arm64: selftests: Refactor debug-exceptions to make it amenable to new test cases Split up the current test into a helper, but leave the debug version checking in main(), to make it convenient to add a new debug exception test case in a subsequent patch. Signed-off-by: Reiji Watanabe Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220917010600.532642-4-reijiw@google.com --- .../selftests/kvm/aarch64/debug-exceptions.c | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/tools/testing/selftests/kvm/aarch64/debug-exceptions.c b/tools/testing/selftests/kvm/aarch64/debug-exceptions.c index 2ee35cf9801e..e6e83b895fd5 100644 --- a/tools/testing/selftests/kvm/aarch64/debug-exceptions.c +++ b/tools/testing/selftests/kvm/aarch64/debug-exceptions.c @@ -246,7 +246,7 @@ static int debug_version(struct kvm_vcpu *vcpu) return id_aa64dfr0 & 0xf; } -int main(int argc, char *argv[]) +static void test_guest_debug_exceptions(void) { struct kvm_vcpu *vcpu; struct kvm_vm *vm; @@ -259,9 +259,6 @@ int main(int argc, char *argv[]) vm_init_descriptor_tables(vm); vcpu_init_descriptor_tables(vcpu); - __TEST_REQUIRE(debug_version(vcpu) >= 6, - "Armv8 debug architecture not supported."); - vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT, ESR_EC_BRK_INS, guest_sw_bp_handler); vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT, @@ -294,5 +291,18 @@ int main(int argc, char *argv[]) done: kvm_vm_free(vm); +} + +int main(int argc, char *argv[]) +{ + struct kvm_vcpu *vcpu; + struct kvm_vm *vm; + + vm = vm_create_with_one_vcpu(&vcpu, guest_code); + __TEST_REQUIRE(debug_version(vcpu) >= 6, + "Armv8 debug architecture not supported."); + kvm_vm_free(vm); + test_guest_debug_exceptions(); + return 0; } From b18e4d4aebdddd05810ceb2f73d7f72afcd11b41 Mon Sep 17 00:00:00 2001 From: Reiji Watanabe Date: Fri, 16 Sep 2022 18:06:00 -0700 Subject: [PATCH 11/57] KVM: arm64: selftests: Add a test case for KVM_GUESTDBG_SINGLESTEP Add a test case for KVM_GUESTDBG_SINGLESTEP to the debug-exceptions test. The test enables single-step execution from userspace, and check if the exit to userspace occurs for each instruction that is stepped. Set the default number of the test iterations to a number of iterations sufficient to always reproduce the problem that the previous patch fixes on an Ampere Altra machine. Signed-off-by: Reiji Watanabe Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220917010600.532642-5-reijiw@google.com --- .../selftests/kvm/aarch64/debug-exceptions.c | 131 ++++++++++++++++++ 1 file changed, 131 insertions(+) diff --git a/tools/testing/selftests/kvm/aarch64/debug-exceptions.c b/tools/testing/selftests/kvm/aarch64/debug-exceptions.c index e6e83b895fd5..947bd201435c 100644 --- a/tools/testing/selftests/kvm/aarch64/debug-exceptions.c +++ b/tools/testing/selftests/kvm/aarch64/debug-exceptions.c @@ -22,6 +22,7 @@ #define SPSR_SS (1 << 21) extern unsigned char sw_bp, sw_bp2, hw_bp, hw_bp2, bp_svc, bp_brk, hw_wp, ss_start; +extern unsigned char iter_ss_begin, iter_ss_end; static volatile uint64_t sw_bp_addr, hw_bp_addr; static volatile uint64_t wp_addr, wp_data_addr; static volatile uint64_t svc_addr; @@ -238,6 +239,46 @@ static void guest_svc_handler(struct ex_regs *regs) svc_addr = regs->pc; } +enum single_step_op { + SINGLE_STEP_ENABLE = 0, + SINGLE_STEP_DISABLE = 1, +}; + +static void guest_code_ss(int test_cnt) +{ + uint64_t i; + uint64_t bvr, wvr, w_bvr, w_wvr; + + for (i = 0; i < test_cnt; i++) { + /* Bits [1:0] of dbg{b,w}vr are RES0 */ + w_bvr = i << 2; + w_wvr = i << 2; + + /* Enable Single Step execution */ + GUEST_SYNC(SINGLE_STEP_ENABLE); + + /* + * The userspace will veriry that the pc is as expected during + * single step execution between iter_ss_begin and iter_ss_end. + */ + asm volatile("iter_ss_begin:nop\n"); + + write_sysreg(w_bvr, dbgbvr0_el1); + write_sysreg(w_wvr, dbgwvr0_el1); + bvr = read_sysreg(dbgbvr0_el1); + wvr = read_sysreg(dbgwvr0_el1); + + asm volatile("iter_ss_end:\n"); + + /* Disable Single Step execution */ + GUEST_SYNC(SINGLE_STEP_DISABLE); + + GUEST_ASSERT(bvr == w_bvr); + GUEST_ASSERT(wvr == w_wvr); + } + GUEST_DONE(); +} + static int debug_version(struct kvm_vcpu *vcpu) { uint64_t id_aa64dfr0; @@ -293,16 +334,106 @@ done: kvm_vm_free(vm); } +void test_single_step_from_userspace(int test_cnt) +{ + struct kvm_vcpu *vcpu; + struct kvm_vm *vm; + struct ucall uc; + struct kvm_run *run; + uint64_t pc, cmd; + uint64_t test_pc = 0; + bool ss_enable = false; + struct kvm_guest_debug debug = {}; + + vm = vm_create_with_one_vcpu(&vcpu, guest_code_ss); + ucall_init(vm, NULL); + run = vcpu->run; + vcpu_args_set(vcpu, 1, test_cnt); + + while (1) { + vcpu_run(vcpu); + if (run->exit_reason != KVM_EXIT_DEBUG) { + cmd = get_ucall(vcpu, &uc); + if (cmd == UCALL_ABORT) { + REPORT_GUEST_ASSERT(uc); + /* NOT REACHED */ + } else if (cmd == UCALL_DONE) { + break; + } + + TEST_ASSERT(cmd == UCALL_SYNC, + "Unexpected ucall cmd 0x%lx", cmd); + + if (uc.args[1] == SINGLE_STEP_ENABLE) { + debug.control = KVM_GUESTDBG_ENABLE | + KVM_GUESTDBG_SINGLESTEP; + ss_enable = true; + } else { + debug.control = SINGLE_STEP_DISABLE; + ss_enable = false; + } + + vcpu_guest_debug_set(vcpu, &debug); + continue; + } + + TEST_ASSERT(ss_enable, "Unexpected KVM_EXIT_DEBUG"); + + /* Check if the current pc is expected. */ + vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pc), &pc); + TEST_ASSERT(!test_pc || pc == test_pc, + "Unexpected pc 0x%lx (expected 0x%lx)", + pc, test_pc); + + /* + * If the current pc is between iter_ss_bgin and + * iter_ss_end, the pc for the next KVM_EXIT_DEBUG should + * be the current pc + 4. + */ + if ((pc >= (uint64_t)&iter_ss_begin) && + (pc < (uint64_t)&iter_ss_end)) + test_pc = pc + 4; + else + test_pc = 0; + } + + kvm_vm_free(vm); +} + +static void help(char *name) +{ + puts(""); + printf("Usage: %s [-h] [-i iterations of the single step test]\n", name); + puts(""); + exit(0); +} + int main(int argc, char *argv[]) { struct kvm_vcpu *vcpu; struct kvm_vm *vm; + int opt; + int ss_iteration = 10000; vm = vm_create_with_one_vcpu(&vcpu, guest_code); __TEST_REQUIRE(debug_version(vcpu) >= 6, "Armv8 debug architecture not supported."); kvm_vm_free(vm); + + while ((opt = getopt(argc, argv, "i:")) != -1) { + switch (opt) { + case 'i': + ss_iteration = atoi(optarg); + break; + case 'h': + default: + help(argv[0]); + break; + } + } + test_guest_debug_exceptions(); + test_single_step_from_userspace(ss_iteration); return 0; } From 096560dd13251e351176aef54b7aee91c99920a3 Mon Sep 17 00:00:00 2001 From: Gavin Shan Date: Fri, 23 Sep 2022 14:54:47 +0800 Subject: [PATCH 12/57] KVM: arm64: vgic: Remove duplicate check in update_affinity_collection() The 'coll' parameter to update_affinity_collection() is never NULL, so comparing it with 'ite->collection' is enough to cover both the NULL case and the "another collection" case. Remove the duplicate check in update_affinity_collection(). Signed-off-by: Gavin Shan [maz: repainted commit message] Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220923065447.323445-1-gshan@redhat.com --- arch/arm64/kvm/vgic/vgic-its.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/kvm/vgic/vgic-its.c b/arch/arm64/kvm/vgic/vgic-its.c index 9d3299a70242..24d7778d1ce6 100644 --- a/arch/arm64/kvm/vgic/vgic-its.c +++ b/arch/arm64/kvm/vgic/vgic-its.c @@ -406,7 +406,7 @@ static void update_affinity_collection(struct kvm *kvm, struct vgic_its *its, struct its_ite *ite; for_each_lpi_its(device, ite, its) { - if (!ite->collection || coll != ite->collection) + if (ite->collection != coll) continue; update_affinity_ite(kvm, ite); From b2a4d007c347b4cb4c60f7512733c3f8300a129c Mon Sep 17 00:00:00 2001 From: Elliot Berman Date: Tue, 20 Sep 2022 12:06:58 -0700 Subject: [PATCH 13/57] KVM: arm64: Ignore kvm-arm.mode if !is_hyp_mode_available() Ignore kvm-arm.mode if !is_hyp_mode_available(). Specifically, we want to avoid switching kvm_mode to KVM_MODE_PROTECTED if hypervisor mode is not available. This prevents "Protected KVM" cpu capability being reported when Linux is booting in EL1 and would not have KVM enabled. Reasonably though, we should warn if the command line is requesting a KVM mode at all if KVM isn't actually available. Allow "kvm-arm.mode=none" to skip the warning since this would disable KVM anyway. Signed-off-by: Elliot Berman Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220920190658.2880184-1-quic_eberman@quicinc.com --- arch/arm64/kvm/arm.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 2ff0ef62abad..c7fb2ad8be9f 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -2270,6 +2270,16 @@ static int __init early_kvm_mode_cfg(char *arg) if (!arg) return -EINVAL; + if (strcmp(arg, "none") == 0) { + kvm_mode = KVM_MODE_NONE; + return 0; + } + + if (!is_hyp_mode_available()) { + pr_warn_once("KVM is not available. Ignoring kvm-arm.mode\n"); + return 0; + } + if (strcmp(arg, "protected") == 0) { if (!is_kernel_in_hyp_mode()) kvm_mode = KVM_MODE_PROTECTED; @@ -2284,11 +2294,6 @@ static int __init early_kvm_mode_cfg(char *arg) return 0; } - if (strcmp(arg, "none") == 0) { - kvm_mode = KVM_MODE_NONE; - return 0; - } - return -EINVAL; } early_param("kvm-arm.mode", early_kvm_mode_cfg); From 448e711693e48d03f7933ab3673334701b0c3f41 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Fri, 19 Aug 2022 16:21:00 +0000 Subject: [PATCH 14/57] KVM: selftests: Update top-of-file comment in psci_test Fix the comment to accurately describe the test and recently added SYSTEM_SUSPEND test case. What was once psci_cpu_on_test was renamed and extended to squeeze in a test case for PSCI SYSTEM_SUSPEND. Nonetheless, the author of those changes (whoever they may be...) failed to update the file comment to reflect what had changed. Reported-by: Reiji Watanabe Signed-off-by: Oliver Upton Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220819162100.213854-1-oliver.upton@linux.dev --- tools/testing/selftests/kvm/aarch64/psci_test.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/tools/testing/selftests/kvm/aarch64/psci_test.c b/tools/testing/selftests/kvm/aarch64/psci_test.c index f7621f6e938e..e0b9e81a3e09 100644 --- a/tools/testing/selftests/kvm/aarch64/psci_test.c +++ b/tools/testing/selftests/kvm/aarch64/psci_test.c @@ -1,12 +1,14 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * psci_cpu_on_test - Test that the observable state of a vCPU targeted by the - * CPU_ON PSCI call matches what the caller requested. + * psci_test - Tests relating to KVM's PSCI implementation. * * Copyright (c) 2021 Google LLC. * - * This is a regression test for a race between KVM servicing the PSCI call and - * userspace reading the vCPUs registers. + * This test includes: + * - A regression test for a race between KVM servicing the PSCI CPU_ON call + * and userspace reading the targeted vCPU's registers. + * - A test for KVM's handling of PSCI SYSTEM_SUSPEND and the associated + * KVM_SYSTEM_EVENT_SUSPEND UAPI. */ #define _GNU_SOURCE From bf3f11581893494a5fb01eb87b99627edc2a85ff Mon Sep 17 00:00:00 2001 From: Vipin Sharma Date: Wed, 21 Sep 2022 23:24:51 -0700 Subject: [PATCH 15/57] KVM: selftests: Check result in hyperv_features for successful hypercalls Commit cc5851c6be86 ("KVM: selftests: Use exception fixup for #UD/#GP Hyper-V MSR/hcall tests") introduced a wrong guest assert in guest_hcall(). It is not checking the successful hypercall results and only checks the result when a fault happens. GUEST_ASSERT_2(!hcall->ud_expected || res == hcall->expect, hcall->expect, res); Correct the assertion by only checking results of the successful hypercalls. This issue was observed when this test started failing after building it in Clang. Above guest assert statement fails because "res" is not equal to "hcall->expect" when "hcall->ud_expected" is true. "res" gets some garbage value in Clang from the RAX register. In GCC, RAX is 0 because it using RAX for @output_address in the asm statement and resetting it to 0 before using it as output operand in the same asm statement. Clang is not using RAX for @output_address. Fixes: cc5851c6be86 ("KVM: selftests: Use exception fixup for #UD/#GP Hyper-V MSR/hcall tests") Signed-off-by: Vipin Sharma Suggested-by: Sean Christopherson Reviewed-by: Jim Mattson Reviewed-by: Vitaly Kuznetsov Link: https://lore.kernel.org/r/20220922062451.2927010-1-vipinsh@google.com [sean: wrap changelog at ~75 chars, move -EFAULT change to separate patch] Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/x86_64/hyperv_features.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_features.c b/tools/testing/selftests/kvm/x86_64/hyperv_features.c index 79ab0152d281..ad01868548f9 100644 --- a/tools/testing/selftests/kvm/x86_64/hyperv_features.c +++ b/tools/testing/selftests/kvm/x86_64/hyperv_features.c @@ -81,13 +81,13 @@ static void guest_hcall(vm_vaddr_t pgs_gpa, struct hcall_data *hcall) } vector = hypercall(hcall->control, input, output, &res); - if (hcall->ud_expected) + if (hcall->ud_expected) { GUEST_ASSERT_2(vector == UD_VECTOR, hcall->control, vector); - else + } else { GUEST_ASSERT_2(!vector, hcall->control, vector); + GUEST_ASSERT_2(res == hcall->expect, hcall->expect, res); + } - GUEST_ASSERT_2(!hcall->ud_expected || res == hcall->expect, - hcall->expect, res); GUEST_DONE(); } From dfb45db43e9f6283a79230c8ea9cb589f14791b0 Mon Sep 17 00:00:00 2001 From: Vipin Sharma Date: Wed, 21 Sep 2022 23:24:51 -0700 Subject: [PATCH 16/57] KVM: selftests: Load RAX with -EFAULT before Hyper-V hypercall Load RAX with -EFAULT prior to making a Hyper-V hypercall so that tests can't get false negatives due to the compiler coincidentally loading the "right" value into RAX, i.e. to ensure that _KVM_ and not the compiler is correctly clearing RAX on a successful hypercall. Note, initializing *hv_status (in C code) to -EFAULT is not sufficient to avoid false negatives, as the compiler can still "clobber" RAX and thus load garbage into *hv_status if the hypercall faults (or if KVM doesn't set RAX). Suggested-by: Sean Christopherson Signed-off-by: Vipin Sharma Reviewed-by: Vitaly Kuznetsov Link: https://lore.kernel.org/r/20220922062451.2927010-1-vipinsh@google.com [sean: move to separate patch, massage changelog] Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/x86_64/hyperv_features.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_features.c b/tools/testing/selftests/kvm/x86_64/hyperv_features.c index ad01868548f9..4d55e038c2d7 100644 --- a/tools/testing/selftests/kvm/x86_64/hyperv_features.c +++ b/tools/testing/selftests/kvm/x86_64/hyperv_features.c @@ -26,7 +26,8 @@ static inline uint8_t hypercall(u64 control, vm_vaddr_t input_address, : "=a" (*hv_status), "+c" (control), "+d" (input_address), KVM_ASM_SAFE_OUTPUTS(vector) - : [output_address] "r"(output_address) + : [output_address] "r"(output_address), + "a" (-EFAULT) : "cc", "memory", "r8", KVM_ASM_SAFE_CLOBBERS); return vector; } From 31d3b871f5ee9b195d90b4d14b74a7864209c6e8 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Thu, 22 Sep 2022 10:39:41 +0200 Subject: [PATCH 17/57] KVM: selftests: Don't set reserved bits for invalid Hyper-V hypercall number Bits 27 through 31 in Hyper-V hypercall 'control' are reserved (see HV_HYPERCALL_RSVD0_MASK) but '0xdeadbeef' includes them. This causes KVM to return HV_STATUS_INVALID_HYPERCALL_INPUT instead of the expected HV_STATUS_INVALID_HYPERCALL_CODE. Signed-off-by: Vitaly Kuznetsov Link: https://lore.kernel.org/all/87fsgjol20.fsf@redhat.com Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/x86_64/hyperv_features.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_features.c b/tools/testing/selftests/kvm/x86_64/hyperv_features.c index 4d55e038c2d7..05b32e550a80 100644 --- a/tools/testing/selftests/kvm/x86_64/hyperv_features.c +++ b/tools/testing/selftests/kvm/x86_64/hyperv_features.c @@ -508,7 +508,7 @@ static void guest_test_hcalls_access(void) switch (stage) { case 0: feat->eax |= HV_MSR_HYPERCALL_AVAILABLE; - hcall->control = 0xdeadbeef; + hcall->control = 0xbeef; hcall->expect = HV_STATUS_INVALID_HYPERCALL_CODE; break; From c23981df6642eec1da94a8125ec0ec402f7b1b7b Mon Sep 17 00:00:00 2001 From: Like Xu Date: Wed, 31 Aug 2022 16:53:22 +0800 Subject: [PATCH 18/57] KVM: x86/pmu: Avoid setting BIT_ULL(-1) to pmu->host_cross_mapped_mask In the extreme case of host counters multiplexing and contention, the perf_event requested by the guest's pebs counter is not allocated to any actual physical counter, in which case hw.idx is bookkept as -1, resulting in an out-of-bounds access to host_cross_mapped_mask. Fixes: 854250329c02 ("KVM: x86/pmu: Disable guest PEBS temporarily in two rare situations") Signed-off-by: Like Xu Link: https://lore.kernel.org/r/20220831085328.45489-2-likexu@tencent.com [sean: expand comment to explain how a negative idx can be encountered] Signed-off-by: Sean Christopherson --- arch/x86/kvm/vmx/pmu_intel.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c index c399637a3a79..78dec4dc6e6f 100644 --- a/arch/x86/kvm/vmx/pmu_intel.c +++ b/arch/x86/kvm/vmx/pmu_intel.c @@ -776,20 +776,23 @@ static void intel_pmu_cleanup(struct kvm_vcpu *vcpu) void intel_pmu_cross_mapped_check(struct kvm_pmu *pmu) { struct kvm_pmc *pmc = NULL; - int bit; + int bit, hw_idx; for_each_set_bit(bit, (unsigned long *)&pmu->global_ctrl, X86_PMC_IDX_MAX) { pmc = intel_pmc_idx_to_pmc(pmu, bit); if (!pmc || !pmc_speculative_in_use(pmc) || - !intel_pmc_is_enabled(pmc)) + !intel_pmc_is_enabled(pmc) || !pmc->perf_event) continue; - if (pmc->perf_event && pmc->idx != pmc->perf_event->hw.idx) { - pmu->host_cross_mapped_mask |= - BIT_ULL(pmc->perf_event->hw.idx); - } + /* + * A negative index indicates the event isn't mapped to a + * physical counter in the host, e.g. due to contention. + */ + hw_idx = pmc->perf_event->hw.idx; + if (hw_idx != pmc->idx && hw_idx > -1) + pmu->host_cross_mapped_mask |= BIT_ULL(hw_idx); } } From f331601c65ad217a5c000ce20c26266d3f0aceb3 Mon Sep 17 00:00:00 2001 From: Like Xu Date: Wed, 31 Aug 2022 16:53:23 +0800 Subject: [PATCH 19/57] KVM: x86/pmu: Don't generate PEBS records for emulated instructions KVM will accumulate an enabled counter for at least INSTRUCTIONS or BRANCH_INSTRUCTION hw event from any KVM emulated instructions, generating emulated overflow interrupt on counter overflow, which in theory should also happen when the PEBS counter overflows but it currently lacks this part of the underlying support (e.g. through software injection of records in the irq context or a lazy approach). In this case, KVM skips the injection of this BUFFER_OVF PMI (effectively dropping one PEBS record) and let the overflow counter move on. The loss of a single sample does not introduce a loss of accuracy, but is easily noticeable for certain specific instructions. This issue is expected to be addressed along with the issue of PEBS cross-mapped counters with a slow-path proposal. Fixes: 79f3e3b58386 ("KVM: x86/pmu: Reprogram PEBS event to emulate guest PEBS counter") Signed-off-by: Like Xu Link: https://lore.kernel.org/r/20220831085328.45489-3-likexu@tencent.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/pmu.c | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index 02f9e4f245bd..390d697efde1 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -106,9 +106,19 @@ static inline void __kvm_perf_overflow(struct kvm_pmc *pmc, bool in_pmi) return; if (pmc->perf_event && pmc->perf_event->attr.precise_ip) { - /* Indicate PEBS overflow PMI to guest. */ - skip_pmi = __test_and_set_bit(GLOBAL_STATUS_BUFFER_OVF_BIT, - (unsigned long *)&pmu->global_status); + if (!in_pmi) { + /* + * TODO: KVM is currently _choosing_ to not generate records + * for emulated instructions, avoiding BUFFER_OVF PMI when + * there are no records. Strictly speaking, it should be done + * as well in the right context to improve sampling accuracy. + */ + skip_pmi = true; + } else { + /* Indicate PEBS overflow PMI to guest. */ + skip_pmi = __test_and_set_bit(GLOBAL_STATUS_BUFFER_OVF_BIT, + (unsigned long *)&pmu->global_status); + } } else { __set_bit(pmc->idx, (unsigned long *)&pmu->global_status); } From c0245b774203f7341ddb1cce29a6ee607857f325 Mon Sep 17 00:00:00 2001 From: Like Xu Date: Thu, 22 Sep 2022 13:40:38 -0700 Subject: [PATCH 20/57] KVM: x86/pmu: Refactor PERF_GLOBAL_CTRL update helper for reuse by PEBS Extract the "global ctrl" specific bits out of global_ctrl_changed() so that the helper only deals with reprogramming general purpose counters, and rename the helper accordingly. PEBS needs the same logic, i.e needs to reprogram counters associated when PEBS_ENABLE bits are toggled, and will use the helper in a future fix. No functional change intended. Signed-off-by: Like Xu Link: https://lore.kernel.org/r/20220831085328.45489-4-likexu@tencent.com [sean: split to separate patch, write changelog] Signed-off-by: Sean Christopherson --- arch/x86/kvm/vmx/pmu_intel.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c index 78dec4dc6e6f..5592b1259e1b 100644 --- a/arch/x86/kvm/vmx/pmu_intel.c +++ b/arch/x86/kvm/vmx/pmu_intel.c @@ -68,15 +68,11 @@ static struct kvm_pmc *intel_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx) } } -/* function is called when global control register has been updated. */ -static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data) +static void reprogram_counters(struct kvm_pmu *pmu, u64 diff) { int bit; - u64 diff = pmu->global_ctrl ^ data; struct kvm_pmc *pmc; - pmu->global_ctrl = data; - for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX) { pmc = intel_pmc_idx_to_pmc(pmu, bit); if (pmc) @@ -397,7 +393,7 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) struct kvm_pmc *pmc; u32 msr = msr_info->index; u64 data = msr_info->data; - u64 reserved_bits; + u64 reserved_bits, diff; switch (msr) { case MSR_CORE_PERF_FIXED_CTR_CTRL: @@ -418,7 +414,9 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) if (pmu->global_ctrl == data) return 0; if (kvm_valid_perf_global_ctrl(pmu, data)) { - global_ctrl_changed(pmu, data); + diff = pmu->global_ctrl ^ data; + pmu->global_ctrl = data; + reprogram_counters(pmu, diff); return 0; } break; From cf52de619c67bd1f6b1cf2751c3827815f74a5a5 Mon Sep 17 00:00:00 2001 From: Like Xu Date: Wed, 31 Aug 2022 16:53:24 +0800 Subject: [PATCH 21/57] KVM: x86/pmu: Avoid using PEBS perf_events for normal counters The check logic in the pmc_resume_counter() to determine whether a perf_event is reusable is partial and flawed, especially when it comes to a pseudocode sequence (contrived, but valid) like: - enabling a counter and its PEBS bit - enable global_ctrl - run workload - disable only the PEBS bit, leaving the global_ctrl bit enabled In this corner case, a perf_event created for PEBS can be reused by a normal counter before it has been released and recreated, and when this normal counter overflows, it triggers a PEBS interrupt (precise_ip != 0). To address this issue, reprogram all affected counters when PEBS_ENABLE change and reuse a counter if and only if PEBS exactly matches precise. Fixes: 79f3e3b58386 ("KVM: x86/pmu: Reprogram PEBS event to emulate guest PEBS counter") Signed-off-by: Like Xu Link: https://lore.kernel.org/r/20220831085328.45489-4-likexu@tencent.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/pmu.c | 4 ++-- arch/x86/kvm/vmx/pmu_intel.c | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index 390d697efde1..d9b9a0f0db17 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -237,8 +237,8 @@ static bool pmc_resume_counter(struct kvm_pmc *pmc) get_sample_period(pmc, pmc->counter))) return false; - if (!test_bit(pmc->idx, (unsigned long *)&pmc_to_pmu(pmc)->pebs_enable) && - pmc->perf_event->attr.precise_ip) + if (test_bit(pmc->idx, (unsigned long *)&pmc_to_pmu(pmc)->pebs_enable) != + (!!pmc->perf_event->attr.precise_ip)) return false; /* reuse perf_event to serve as pmc_reprogram_counter() does*/ diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c index 5592b1259e1b..25b70a85bef5 100644 --- a/arch/x86/kvm/vmx/pmu_intel.c +++ b/arch/x86/kvm/vmx/pmu_intel.c @@ -431,7 +431,9 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) if (pmu->pebs_enable == data) return 0; if (!(data & pmu->pebs_enable_mask)) { + diff = pmu->pebs_enable ^ data; pmu->pebs_enable = data; + reprogram_counters(pmu, diff); return 0; } break; From 5c6a67f4f265f84e1b8582f82562dda2a53f52d1 Mon Sep 17 00:00:00 2001 From: Like Xu Date: Wed, 31 Aug 2022 16:53:27 +0800 Subject: [PATCH 22/57] KVM: x86/svm/pmu: Direct access pmu->gp_counter[] to implement amd_*_to_pmc() Access PMU counters on AMD by directly indexing the array of general purpose counters instead of translating the PMC index to an MSR index. AMD only supports gp counters, there's no need to translate a PMC index to an MSR index and back to a PMC index. Opportunistically apply array_index_nospec() to reduce the attack surface for speculative execution and remove the dead code. Signed-off-by: Like Xu Link: https://lore.kernel.org/r/20220831085328.45489-7-likexu@tencent.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/svm/pmu.c | 41 +++++------------------------------------ 1 file changed, 5 insertions(+), 36 deletions(-) diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c index f24613a108c5..d1c3b766841e 100644 --- a/arch/x86/kvm/svm/pmu.c +++ b/arch/x86/kvm/svm/pmu.c @@ -33,23 +33,6 @@ enum index { INDEX_ERROR, }; -static unsigned int get_msr_base(struct kvm_pmu *pmu, enum pmu_type type) -{ - struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu); - - if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) { - if (type == PMU_TYPE_COUNTER) - return MSR_F15H_PERF_CTR; - else - return MSR_F15H_PERF_CTL; - } else { - if (type == PMU_TYPE_COUNTER) - return MSR_K7_PERFCTR0; - else - return MSR_K7_EVNTSEL0; - } -} - static enum index msr_to_index(u32 msr) { switch (msr) { @@ -141,18 +124,12 @@ static bool amd_pmc_is_enabled(struct kvm_pmc *pmc) static struct kvm_pmc *amd_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx) { - unsigned int base = get_msr_base(pmu, PMU_TYPE_COUNTER); - struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu); + unsigned int num_counters = pmu->nr_arch_gp_counters; - if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) { - /* - * The idx is contiguous. The MSRs are not. The counter MSRs - * are interleaved with the event select MSRs. - */ - pmc_idx *= 2; - } + if (pmc_idx >= num_counters) + return NULL; - return get_gp_pmc_amd(pmu, base + pmc_idx, PMU_TYPE_COUNTER); + return &pmu->gp_counters[array_index_nospec(pmc_idx, num_counters)]; } static bool amd_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx) @@ -168,15 +145,7 @@ static bool amd_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx) static struct kvm_pmc *amd_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu, unsigned int idx, u64 *mask) { - struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); - struct kvm_pmc *counters; - - idx &= ~(3u << 30); - if (idx >= pmu->nr_arch_gp_counters) - return NULL; - counters = pmu->gp_counters; - - return &counters[idx]; + return amd_pmc_idx_to_pmc(vcpu_to_pmu(vcpu), idx & ~(3u << 30)); } static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) From ea5cbc9ff839091a86558d4e2c082225b13e0055 Mon Sep 17 00:00:00 2001 From: Like Xu Date: Wed, 31 Aug 2022 16:53:28 +0800 Subject: [PATCH 23/57] KVM: x86/svm/pmu: Rewrite get_gp_pmc_amd() for more counters scalability If the number of AMD gp counters continues to grow, the code will be very clumsy and the switch-case design of inline get_gp_pmc_amd() will also bloat the kernel text size. The target code is taught to manage two groups of MSRs, each representing a different version of the AMD PMU counter MSRs. The MSR addresses of each group are contiguous, with no holes, and there is no intersection between two sets of addresses, but they are discrete in functionality by design like this: [Group A : All counter MSRs are tightly bound to all event select MSRs ] MSR_K7_EVNTSEL0 0xc0010000 MSR_K7_EVNTSELi 0xc0010000 + i ... MSR_K7_EVNTSEL3 0xc0010003 MSR_K7_PERFCTR0 0xc0010004 MSR_K7_PERFCTRi 0xc0010004 + i ... MSR_K7_PERFCTR3 0xc0010007 [Group B : The counter MSRs are interleaved with the event select MSRs ] MSR_F15H_PERF_CTL0 0xc0010200 MSR_F15H_PERF_CTR0 (0xc0010200 + 1) ... MSR_F15H_PERF_CTLi (0xc0010200 + 2 * i) MSR_F15H_PERF_CTRi (0xc0010200 + 2 * i + 1) ... MSR_F15H_PERF_CTL5 (0xc0010200 + 2 * 5) MSR_F15H_PERF_CTR5 (0xc0010200 + 2 * 5 + 1) Rewrite get_gp_pmc_amd() in this way: first determine which group of registers is accessed, then determine if it matches its requested type, applying different scaling ratios respectively, and finally get pmc_idx to pass into amd_pmc_idx_to_pmc(). Signed-off-by: Like Xu Link: https://lore.kernel.org/r/20220831085328.45489-8-likexu@tencent.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/svm/pmu.c | 88 ++++++++++-------------------------------- 1 file changed, 20 insertions(+), 68 deletions(-) diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c index d1c3b766841e..b68956299fa8 100644 --- a/arch/x86/kvm/svm/pmu.c +++ b/arch/x86/kvm/svm/pmu.c @@ -23,90 +23,52 @@ enum pmu_type { PMU_TYPE_EVNTSEL, }; -enum index { - INDEX_ZERO = 0, - INDEX_ONE, - INDEX_TWO, - INDEX_THREE, - INDEX_FOUR, - INDEX_FIVE, - INDEX_ERROR, -}; - -static enum index msr_to_index(u32 msr) +static struct kvm_pmc *amd_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx) { - switch (msr) { - case MSR_F15H_PERF_CTL0: - case MSR_F15H_PERF_CTR0: - case MSR_K7_EVNTSEL0: - case MSR_K7_PERFCTR0: - return INDEX_ZERO; - case MSR_F15H_PERF_CTL1: - case MSR_F15H_PERF_CTR1: - case MSR_K7_EVNTSEL1: - case MSR_K7_PERFCTR1: - return INDEX_ONE; - case MSR_F15H_PERF_CTL2: - case MSR_F15H_PERF_CTR2: - case MSR_K7_EVNTSEL2: - case MSR_K7_PERFCTR2: - return INDEX_TWO; - case MSR_F15H_PERF_CTL3: - case MSR_F15H_PERF_CTR3: - case MSR_K7_EVNTSEL3: - case MSR_K7_PERFCTR3: - return INDEX_THREE; - case MSR_F15H_PERF_CTL4: - case MSR_F15H_PERF_CTR4: - return INDEX_FOUR; - case MSR_F15H_PERF_CTL5: - case MSR_F15H_PERF_CTR5: - return INDEX_FIVE; - default: - return INDEX_ERROR; - } + unsigned int num_counters = pmu->nr_arch_gp_counters; + + if (pmc_idx >= num_counters) + return NULL; + + return &pmu->gp_counters[array_index_nospec(pmc_idx, num_counters)]; } static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr, enum pmu_type type) { struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu); + unsigned int idx; if (!vcpu->kvm->arch.enable_pmu) return NULL; switch (msr) { - case MSR_F15H_PERF_CTL0: - case MSR_F15H_PERF_CTL1: - case MSR_F15H_PERF_CTL2: - case MSR_F15H_PERF_CTL3: - case MSR_F15H_PERF_CTL4: - case MSR_F15H_PERF_CTL5: + case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5: if (!guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) return NULL; - fallthrough; + /* + * Each PMU counter has a pair of CTL and CTR MSRs. CTLn + * MSRs (accessed via EVNTSEL) are even, CTRn MSRs are odd. + */ + idx = (unsigned int)((msr - MSR_F15H_PERF_CTL0) / 2); + if (!(msr & 0x1) != (type == PMU_TYPE_EVNTSEL)) + return NULL; + break; case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3: if (type != PMU_TYPE_EVNTSEL) return NULL; + idx = msr - MSR_K7_EVNTSEL0; break; - case MSR_F15H_PERF_CTR0: - case MSR_F15H_PERF_CTR1: - case MSR_F15H_PERF_CTR2: - case MSR_F15H_PERF_CTR3: - case MSR_F15H_PERF_CTR4: - case MSR_F15H_PERF_CTR5: - if (!guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) - return NULL; - fallthrough; case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3: if (type != PMU_TYPE_COUNTER) return NULL; + idx = msr - MSR_K7_PERFCTR0; break; default: return NULL; } - return &pmu->gp_counters[msr_to_index(msr)]; + return amd_pmc_idx_to_pmc(pmu, idx); } static bool amd_hw_event_available(struct kvm_pmc *pmc) @@ -122,16 +84,6 @@ static bool amd_pmc_is_enabled(struct kvm_pmc *pmc) return true; } -static struct kvm_pmc *amd_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx) -{ - unsigned int num_counters = pmu->nr_arch_gp_counters; - - if (pmc_idx >= num_counters) - return NULL; - - return &pmu->gp_counters[array_index_nospec(pmc_idx, num_counters)]; -} - static bool amd_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx) { struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); From 43b233b1582de501e441deb7c4ed1f944e60b1f9 Mon Sep 17 00:00:00 2001 From: Wei-Lin Chang Date: Thu, 29 Sep 2022 12:28:39 +0800 Subject: [PATCH 24/57] KVM: arm64: Fix comment typo in nvhe/switch.c Fix the comment of __hyp_vgic_restore_state() from saying VEH to VHE, also change the underscore to a dash to match the comment above __hyp_vgic_save_state(). Signed-off-by: Wei-Lin Chang Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220929042839.24277-1-r09922117@csie.ntu.edu.tw --- arch/arm64/kvm/hyp/nvhe/switch.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c index 9f6385702061..8e9d49a964be 100644 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@ -143,7 +143,7 @@ static void __hyp_vgic_save_state(struct kvm_vcpu *vcpu) } } -/* Restore VGICv3 state on non_VEH systems */ +/* Restore VGICv3 state on non-VHE systems */ static void __hyp_vgic_restore_state(struct kvm_vcpu *vcpu) { if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) { From 8929bc9659640f35dd2ef8373263cbd885b4a072 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 26 Sep 2022 15:51:15 +0100 Subject: [PATCH 25/57] KVM: Use acquire/release semantics when accessing dirty ring GFN state The current implementation of the dirty ring has an implicit requirement that stores to the dirty ring from userspace must be: - be ordered with one another - visible from another CPU executing a ring reset While these implicit requirements work well for x86 (and any other TSO-like architecture), they do not work for more relaxed architectures such as arm64 where stores to different addresses can be freely reordered, and loads from these addresses not observing writes from another CPU unless the required barriers (or acquire/release semantics) are used. In order to start fixing this, upgrade the ring reset accesses: - the kvm_dirty_gfn_harvested() helper now uses acquire semantics so it is ordered after all previous writes, including that from userspace - the kvm_dirty_gfn_set_invalid() helper now uses release semantics so that the next_slot and next_offset reads don't drift past the entry invalidation This is only a partial fix as the userspace side also need upgrading. Signed-off-by: Marc Zyngier Reviewed-by: Gavin Shan Reviewed-by: Peter Xu Link: https://lore.kernel.org/r/20220926145120.27974-2-maz@kernel.org --- virt/kvm/dirty_ring.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/virt/kvm/dirty_ring.c b/virt/kvm/dirty_ring.c index f4c2a6eb1666..d6fabf238032 100644 --- a/virt/kvm/dirty_ring.c +++ b/virt/kvm/dirty_ring.c @@ -74,7 +74,7 @@ int kvm_dirty_ring_alloc(struct kvm_dirty_ring *ring, int index, u32 size) static inline void kvm_dirty_gfn_set_invalid(struct kvm_dirty_gfn *gfn) { - gfn->flags = 0; + smp_store_release(&gfn->flags, 0); } static inline void kvm_dirty_gfn_set_dirtied(struct kvm_dirty_gfn *gfn) @@ -84,7 +84,7 @@ static inline void kvm_dirty_gfn_set_dirtied(struct kvm_dirty_gfn *gfn) static inline bool kvm_dirty_gfn_harvested(struct kvm_dirty_gfn *gfn) { - return gfn->flags & KVM_DIRTY_GFN_F_RESET; + return smp_load_acquire(&gfn->flags) & KVM_DIRTY_GFN_F_RESET; } int kvm_dirty_ring_reset(struct kvm *kvm, struct kvm_dirty_ring *ring) From 17601bfed909fa080fcfd227b57da2bd4dc2d2a6 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 26 Sep 2022 15:51:16 +0100 Subject: [PATCH 26/57] KVM: Add KVM_CAP_DIRTY_LOG_RING_ACQ_REL capability and config option In order to differenciate between architectures that require no extra synchronisation when accessing the dirty ring and those who do, add a new capability (KVM_CAP_DIRTY_LOG_RING_ACQ_REL) that identify the latter sort. TSO architectures can obviously advertise both, while relaxed architectures must only advertise the ACQ_REL version. This requires some configuration symbol rejigging, with HAVE_KVM_DIRTY_RING being only indirectly selected by two top-level config symbols: - HAVE_KVM_DIRTY_RING_TSO for strongly ordered architectures (x86) - HAVE_KVM_DIRTY_RING_ACQ_REL for weakly ordered architectures (arm64) Suggested-by: Paolo Bonzini Signed-off-by: Marc Zyngier Reviewed-by: Gavin Shan Reviewed-by: Peter Xu Link: https://lore.kernel.org/r/20220926145120.27974-3-maz@kernel.org --- arch/x86/kvm/Kconfig | 2 +- include/uapi/linux/kvm.h | 1 + virt/kvm/Kconfig | 14 ++++++++++++++ virt/kvm/kvm_main.c | 9 ++++++++- 4 files changed, 24 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig index e3cbd7706136..876748b236ff 100644 --- a/arch/x86/kvm/Kconfig +++ b/arch/x86/kvm/Kconfig @@ -28,7 +28,7 @@ config KVM select HAVE_KVM_IRQCHIP select HAVE_KVM_PFNCACHE select HAVE_KVM_IRQFD - select HAVE_KVM_DIRTY_RING + select HAVE_KVM_DIRTY_RING_TSO select IRQ_BYPASS_MANAGER select HAVE_KVM_IRQ_BYPASS select HAVE_KVM_IRQ_ROUTING diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index eed0315a77a6..0d5d4419139a 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1177,6 +1177,7 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_VM_DISABLE_NX_HUGE_PAGES 220 #define KVM_CAP_S390_ZPCI_OP 221 #define KVM_CAP_S390_CPU_TOPOLOGY 222 +#define KVM_CAP_DIRTY_LOG_RING_ACQ_REL 223 #ifdef KVM_CAP_IRQ_ROUTING diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig index a8c5c9f06b3c..800f9470e36b 100644 --- a/virt/kvm/Kconfig +++ b/virt/kvm/Kconfig @@ -19,6 +19,20 @@ config HAVE_KVM_IRQ_ROUTING config HAVE_KVM_DIRTY_RING bool +# Only strongly ordered architectures can select this, as it doesn't +# put any explicit constraint on userspace ordering. They can also +# select the _ACQ_REL version. +config HAVE_KVM_DIRTY_RING_TSO + bool + select HAVE_KVM_DIRTY_RING + depends on X86 + +# Weakly ordered architectures can only select this, advertising +# to userspace the additional ordering requirements. +config HAVE_KVM_DIRTY_RING_ACQ_REL + bool + select HAVE_KVM_DIRTY_RING + config HAVE_KVM_EVENTFD bool select EVENTFD diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 584a5bab3af3..5b064dbadaf4 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -4475,7 +4475,13 @@ static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg) case KVM_CAP_NR_MEMSLOTS: return KVM_USER_MEM_SLOTS; case KVM_CAP_DIRTY_LOG_RING: -#ifdef CONFIG_HAVE_KVM_DIRTY_RING +#ifdef CONFIG_HAVE_KVM_DIRTY_RING_TSO + return KVM_DIRTY_RING_MAX_ENTRIES * sizeof(struct kvm_dirty_gfn); +#else + return 0; +#endif + case KVM_CAP_DIRTY_LOG_RING_ACQ_REL: +#ifdef CONFIG_HAVE_KVM_DIRTY_RING_ACQ_REL return KVM_DIRTY_RING_MAX_ENTRIES * sizeof(struct kvm_dirty_gfn); #else return 0; @@ -4580,6 +4586,7 @@ static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm, return 0; } case KVM_CAP_DIRTY_LOG_RING: + case KVM_CAP_DIRTY_LOG_RING_ACQ_REL: return kvm_vm_ioctl_enable_dirty_log_ring(kvm, cap->args[0]); default: return kvm_vm_ioctl_enable_cap(kvm, cap); From fc0693d4e5afe3c110503c3afa9f60600f9e964b Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 26 Sep 2022 15:51:17 +0100 Subject: [PATCH 27/57] KVM: x86: Select CONFIG_HAVE_KVM_DIRTY_RING_ACQ_REL Since x86 is TSO (give or take), allow it to advertise the new ACQ_REL version of the dirty ring capability. No other change is required for it. Signed-off-by: Marc Zyngier Reviewed-by: Gavin Shan Reviewed-by: Peter Xu Link: https://lore.kernel.org/r/20220926145120.27974-4-maz@kernel.org --- arch/x86/kvm/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig index 876748b236ff..67be7f217e37 100644 --- a/arch/x86/kvm/Kconfig +++ b/arch/x86/kvm/Kconfig @@ -29,6 +29,7 @@ config KVM select HAVE_KVM_PFNCACHE select HAVE_KVM_IRQFD select HAVE_KVM_DIRTY_RING_TSO + select HAVE_KVM_DIRTY_RING_ACQ_REL select IRQ_BYPASS_MANAGER select HAVE_KVM_IRQ_BYPASS select HAVE_KVM_IRQ_ROUTING From 671c8c7f9f2349d8b2176ad810f1406794011f63 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 26 Sep 2022 15:51:18 +0100 Subject: [PATCH 28/57] KVM: Document weakly ordered architecture requirements for dirty ring Now that the kernel can expose to userspace that its dirty ring management relies on explicit ordering, document these new requirements for VMMs to do the right thing. Signed-off-by: Marc Zyngier Reviewed-by: Gavin Shan Reviewed-by: Peter Xu Link: https://lore.kernel.org/r/20220926145120.27974-5-maz@kernel.org --- Documentation/virt/kvm/api.rst | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index abd7c32126ce..32427ea160df 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -8019,8 +8019,8 @@ guest according to the bits in the KVM_CPUID_FEATURES CPUID leaf (0x40000001). Otherwise, a guest may use the paravirtual features regardless of what has actually been exposed through the CPUID leaf. -8.29 KVM_CAP_DIRTY_LOG_RING ---------------------------- +8.29 KVM_CAP_DIRTY_LOG_RING/KVM_CAP_DIRTY_LOG_RING_ACQ_REL +---------------------------------------------------------- :Architectures: x86 :Parameters: args[0] - size of the dirty log ring @@ -8078,6 +8078,11 @@ on to the next GFN. The userspace should continue to do this until the flags of a GFN have the DIRTY bit cleared, meaning that it has harvested all the dirty GFNs that were available. +Note that on weakly ordered architectures, userspace accesses to the +ring buffer (and more specifically the 'flags' field) must be ordered, +using load-acquire/store-release accessors when available, or any +other memory barrier that will ensure this ordering. + It's not necessary for userspace to harvest the all dirty GFNs at once. However it must collect the dirty GFNs in sequence, i.e., the userspace program cannot skip one dirty GFN to collect the one next to it. @@ -8106,6 +8111,14 @@ KVM_CAP_DIRTY_LOG_RING with an acceptable dirty ring size, the virtual machine will switch to ring-buffer dirty page tracking and further KVM_GET_DIRTY_LOG or KVM_CLEAR_DIRTY_LOG ioctls will fail. +NOTE: KVM_CAP_DIRTY_LOG_RING_ACQ_REL is the only capability that +should be exposed by weakly ordered architecture, in order to indicate +the additional memory ordering requirements imposed on userspace when +reading the state of an entry and mutating it from DIRTY to HARVESTED. +Architecture with TSO-like ordering (such as x86) are allowed to +expose both KVM_CAP_DIRTY_LOG_RING and KVM_CAP_DIRTY_LOG_RING_ACQ_REL +to userspace. + 8.30 KVM_CAP_XEN_HVM -------------------- From 4eb6486cb43c93382c27a2659ba978c660e98498 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 26 Sep 2022 15:51:19 +0100 Subject: [PATCH 29/57] KVM: selftests: dirty-log: Upgrade flag accesses to acquire/release semantics In order to preserve ordering, make sure that the flag accesses in the dirty log are done using acquire/release accessors. Signed-off-by: Marc Zyngier Reviewed-by: Gavin Shan Reviewed-by: Peter Xu Link: https://lore.kernel.org/r/20220926145120.27974-6-maz@kernel.org --- tools/testing/selftests/kvm/dirty_log_test.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c index 9c883c94d478..53627add8a7c 100644 --- a/tools/testing/selftests/kvm/dirty_log_test.c +++ b/tools/testing/selftests/kvm/dirty_log_test.c @@ -17,6 +17,7 @@ #include #include #include +#include #include "kvm_util.h" #include "test_util.h" @@ -279,12 +280,12 @@ static void dirty_ring_create_vm_done(struct kvm_vm *vm) static inline bool dirty_gfn_is_dirtied(struct kvm_dirty_gfn *gfn) { - return gfn->flags == KVM_DIRTY_GFN_F_DIRTY; + return smp_load_acquire(&gfn->flags) == KVM_DIRTY_GFN_F_DIRTY; } static inline void dirty_gfn_set_collected(struct kvm_dirty_gfn *gfn) { - gfn->flags = KVM_DIRTY_GFN_F_RESET; + smp_store_release(&gfn->flags, KVM_DIRTY_GFN_F_RESET); } static uint32_t dirty_ring_collect_one(struct kvm_dirty_gfn *dirty_gfns, From 4b3402f1f4d9860301d6d5cd7aff3b67f678d577 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 26 Sep 2022 15:51:20 +0100 Subject: [PATCH 30/57] KVM: selftests: dirty-log: Use KVM_CAP_DIRTY_LOG_RING_ACQ_REL if available Pick KVM_CAP_DIRTY_LOG_RING_ACQ_REL if exposed by the kernel. Signed-off-by: Marc Zyngier Reviewed-by: Gavin Shan Reviewed-by: Peter Xu Link: https://lore.kernel.org/r/20220926145120.27974-7-maz@kernel.org --- tools/testing/selftests/kvm/dirty_log_test.c | 3 ++- tools/testing/selftests/kvm/lib/kvm_util.c | 5 ++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c index 53627add8a7c..b5234d6efbe1 100644 --- a/tools/testing/selftests/kvm/dirty_log_test.c +++ b/tools/testing/selftests/kvm/dirty_log_test.c @@ -265,7 +265,8 @@ static void default_after_vcpu_run(struct kvm_vcpu *vcpu, int ret, int err) static bool dirty_ring_supported(void) { - return kvm_has_cap(KVM_CAP_DIRTY_LOG_RING); + return (kvm_has_cap(KVM_CAP_DIRTY_LOG_RING) || + kvm_has_cap(KVM_CAP_DIRTY_LOG_RING_ACQ_REL)); } static void dirty_ring_create_vm_done(struct kvm_vm *vm) diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c index 9889fe0d8919..411a4c0bc81c 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c @@ -82,7 +82,10 @@ unsigned int kvm_check_cap(long cap) void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size) { - vm_enable_cap(vm, KVM_CAP_DIRTY_LOG_RING, ring_size); + if (vm_check_cap(vm, KVM_CAP_DIRTY_LOG_RING_ACQ_REL)) + vm_enable_cap(vm, KVM_CAP_DIRTY_LOG_RING_ACQ_REL, ring_size); + else + vm_enable_cap(vm, KVM_CAP_DIRTY_LOG_RING, ring_size); vm->dirty_ring_size = ring_size; } From 04f2f60befc9af274c1790e626cc79334b1f4489 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 28 Sep 2022 23:36:48 +0000 Subject: [PATCH 31/57] KVM: selftests: Remove unnecessary register shuffling in fix_hypercall_test Use input constraints to load RAX and RBX when testing that KVM correctly does/doesn't patch the "wrong" hypercall. There's no need to manually load RAX and RBX, and no reason to clobber them either (KVM is not supposed to modify anything other than RAX). Signed-off-by: Sean Christopherson Reviewed-by: Oliver Upton Message-Id: <20220928233652.783504-4-seanjc@google.com> Signed-off-by: Paolo Bonzini --- .../selftests/kvm/x86_64/fix_hypercall_test.c | 22 +++++++------------ 1 file changed, 8 insertions(+), 14 deletions(-) diff --git a/tools/testing/selftests/kvm/x86_64/fix_hypercall_test.c b/tools/testing/selftests/kvm/x86_64/fix_hypercall_test.c index e0004bd26536..6864eb0d5d14 100644 --- a/tools/testing/selftests/kvm/x86_64/fix_hypercall_test.c +++ b/tools/testing/selftests/kvm/x86_64/fix_hypercall_test.c @@ -30,14 +30,11 @@ static uint64_t svm_do_sched_yield(uint8_t apic_id) { uint64_t ret; - asm volatile("mov %1, %%rax\n\t" - "mov %2, %%rbx\n\t" - "svm_hypercall_insn:\n\t" + asm volatile("svm_hypercall_insn:\n\t" "vmmcall\n\t" - "mov %%rax, %0\n\t" - : "=r"(ret) - : "r"((uint64_t)KVM_HC_SCHED_YIELD), "r"((uint64_t)apic_id) - : "rax", "rbx", "memory"); + : "=a"(ret) + : "a"((uint64_t)KVM_HC_SCHED_YIELD), "b"((uint64_t)apic_id) + : "memory"); return ret; } @@ -47,14 +44,11 @@ static uint64_t vmx_do_sched_yield(uint8_t apic_id) { uint64_t ret; - asm volatile("mov %1, %%rax\n\t" - "mov %2, %%rbx\n\t" - "vmx_hypercall_insn:\n\t" + asm volatile("vmx_hypercall_insn:\n\t" "vmcall\n\t" - "mov %%rax, %0\n\t" - : "=r"(ret) - : "r"((uint64_t)KVM_HC_SCHED_YIELD), "r"((uint64_t)apic_id) - : "rax", "rbx", "memory"); + : "=a"(ret) + : "a"((uint64_t)KVM_HC_SCHED_YIELD), "b"((uint64_t)apic_id) + : "memory"); return ret; } From fca6d06cd164c6c3029be6323ed06020fca0d933 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 28 Sep 2022 23:36:49 +0000 Subject: [PATCH 32/57] KVM: selftests: Hardcode VMCALL/VMMCALL opcodes in "fix hypercall" test Hardcode the VMCALL/VMMCALL opcodes in dedicated arrays instead of extracting the opcodes from inline asm, and patch in the "other" opcode so as to preserve the original opcode, i.e. the opcode that the test executes in the guest. Preserving the original opcode (by not patching the source), will make it easier to implement a check that KVM doesn't modify the opcode (the test currently only verifies that a #UD occurred). Use INT3 (0xcc) as the placeholder so that the guest will likely die a horrible death if the test's patching goes awry. As a bonus, patching from within the test dedups a decent chunk of code. Signed-off-by: Sean Christopherson Message-Id: <20220928233652.783504-5-seanjc@google.com> Signed-off-by: Paolo Bonzini --- .../selftests/kvm/x86_64/fix_hypercall_test.c | 45 +++++++------------ 1 file changed, 17 insertions(+), 28 deletions(-) diff --git a/tools/testing/selftests/kvm/x86_64/fix_hypercall_test.c b/tools/testing/selftests/kvm/x86_64/fix_hypercall_test.c index 6864eb0d5d14..cebc84b26352 100644 --- a/tools/testing/selftests/kvm/x86_64/fix_hypercall_test.c +++ b/tools/testing/selftests/kvm/x86_64/fix_hypercall_test.c @@ -25,27 +25,16 @@ static void guest_ud_handler(struct ex_regs *regs) GUEST_DONE(); } -extern uint8_t svm_hypercall_insn[HYPERCALL_INSN_SIZE]; -static uint64_t svm_do_sched_yield(uint8_t apic_id) +static const uint8_t vmx_vmcall[HYPERCALL_INSN_SIZE] = { 0x0f, 0x01, 0xc1 }; +static const uint8_t svm_vmmcall[HYPERCALL_INSN_SIZE] = { 0x0f, 0x01, 0xd9 }; + +extern uint8_t hypercall_insn[HYPERCALL_INSN_SIZE]; +static uint64_t do_sched_yield(uint8_t apic_id) { uint64_t ret; - asm volatile("svm_hypercall_insn:\n\t" - "vmmcall\n\t" - : "=a"(ret) - : "a"((uint64_t)KVM_HC_SCHED_YIELD), "b"((uint64_t)apic_id) - : "memory"); - - return ret; -} - -extern uint8_t vmx_hypercall_insn[HYPERCALL_INSN_SIZE]; -static uint64_t vmx_do_sched_yield(uint8_t apic_id) -{ - uint64_t ret; - - asm volatile("vmx_hypercall_insn:\n\t" - "vmcall\n\t" + asm volatile("hypercall_insn:\n\t" + ".byte 0xcc,0xcc,0xcc\n\t" : "=a"(ret) : "a"((uint64_t)KVM_HC_SCHED_YIELD), "b"((uint64_t)apic_id) : "memory"); @@ -55,25 +44,25 @@ static uint64_t vmx_do_sched_yield(uint8_t apic_id) static void guest_main(void) { - uint8_t *native_hypercall_insn, *hypercall_insn; - uint8_t apic_id; - - apic_id = GET_APIC_ID_FIELD(xapic_read_reg(APIC_ID)); + const uint8_t *native_hypercall_insn; + const uint8_t *other_hypercall_insn; if (is_intel_cpu()) { - native_hypercall_insn = vmx_hypercall_insn; - hypercall_insn = svm_hypercall_insn; - svm_do_sched_yield(apic_id); + native_hypercall_insn = vmx_vmcall; + other_hypercall_insn = svm_vmmcall; } else if (is_amd_cpu()) { - native_hypercall_insn = svm_hypercall_insn; - hypercall_insn = vmx_hypercall_insn; - vmx_do_sched_yield(apic_id); + native_hypercall_insn = svm_vmmcall; + other_hypercall_insn = vmx_vmcall; } else { GUEST_ASSERT(0); /* unreachable */ return; } + memcpy(hypercall_insn, other_hypercall_insn, HYPERCALL_INSN_SIZE); + + do_sched_yield(GET_APIC_ID_FIELD(xapic_read_reg(APIC_ID))); + /* * The hypercall didn't #UD (guest_ud_handler() signals "done" if a #UD * occurs). Verify that a #UD is NOT expected and that KVM patched in From b7ab6d7d2cf7d5400592d1ff0448e8e0a09e5188 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 28 Sep 2022 23:36:50 +0000 Subject: [PATCH 33/57] KVM: selftests: Explicitly verify KVM doesn't patch hypercall if quirk==off Explicitly verify that KVM doesn't patch in the native hypercall if the FIX_HYPERCALL_INSN quirk is disabled. The test currently verifies that a #UD occurred, but doesn't actually verify that no patching occurred. Signed-off-by: Sean Christopherson Message-Id: <20220928233652.783504-6-seanjc@google.com> Signed-off-by: Paolo Bonzini --- .../selftests/kvm/x86_64/fix_hypercall_test.c | 26 +++++++++++++------ 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/tools/testing/selftests/kvm/x86_64/fix_hypercall_test.c b/tools/testing/selftests/kvm/x86_64/fix_hypercall_test.c index cebc84b26352..10b9482fc4d7 100644 --- a/tools/testing/selftests/kvm/x86_64/fix_hypercall_test.c +++ b/tools/testing/selftests/kvm/x86_64/fix_hypercall_test.c @@ -21,8 +21,8 @@ static bool ud_expected; static void guest_ud_handler(struct ex_regs *regs) { - GUEST_ASSERT(ud_expected); - GUEST_DONE(); + regs->rax = -EFAULT; + regs->rip += HYPERCALL_INSN_SIZE; } static const uint8_t vmx_vmcall[HYPERCALL_INSN_SIZE] = { 0x0f, 0x01, 0xc1 }; @@ -46,6 +46,7 @@ static void guest_main(void) { const uint8_t *native_hypercall_insn; const uint8_t *other_hypercall_insn; + uint64_t ret; if (is_intel_cpu()) { native_hypercall_insn = vmx_vmcall; @@ -61,15 +62,24 @@ static void guest_main(void) memcpy(hypercall_insn, other_hypercall_insn, HYPERCALL_INSN_SIZE); - do_sched_yield(GET_APIC_ID_FIELD(xapic_read_reg(APIC_ID))); + ret = do_sched_yield(GET_APIC_ID_FIELD(xapic_read_reg(APIC_ID))); /* - * The hypercall didn't #UD (guest_ud_handler() signals "done" if a #UD - * occurs). Verify that a #UD is NOT expected and that KVM patched in - * the native hypercall. + * If the quirk is disabled, verify that guest_ud_handler() "returned" + * -EFAULT and that KVM did NOT patch the hypercall. If the quirk is + * enabled, verify that the hypercall succeeded and that KVM patched in + * the "right" hypercall. */ - GUEST_ASSERT(!ud_expected); - GUEST_ASSERT(!memcmp(native_hypercall_insn, hypercall_insn, HYPERCALL_INSN_SIZE)); + if (ud_expected) { + GUEST_ASSERT(ret == (uint64_t)-EFAULT); + GUEST_ASSERT(!memcmp(other_hypercall_insn, hypercall_insn, + HYPERCALL_INSN_SIZE)); + } else { + GUEST_ASSERT(!ret); + GUEST_ASSERT(!memcmp(native_hypercall_insn, hypercall_insn, + HYPERCALL_INSN_SIZE)); + } + GUEST_DONE(); } From 53c9bdb922f40a7abf3b1642f8a39d3b94d10d62 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 28 Sep 2022 23:36:51 +0000 Subject: [PATCH 34/57] KVM: selftests: Dedup subtests of fix_hypercall_test Combine fix_hypercall_test's two subtests into a common routine, the only difference between the two is whether or not the quirk is disabled. Passing a boolean is a little gross, but using an enum to make it super obvious that the callers are enabling/disabling the quirk seems like overkill. No functional change intended. Signed-off-by: Sean Christopherson Reviewed-by: Oliver Upton Message-Id: <20220928233652.783504-7-seanjc@google.com> Signed-off-by: Paolo Bonzini --- .../selftests/kvm/x86_64/fix_hypercall_test.c | 45 ++++++------------- 1 file changed, 13 insertions(+), 32 deletions(-) diff --git a/tools/testing/selftests/kvm/x86_64/fix_hypercall_test.c b/tools/testing/selftests/kvm/x86_64/fix_hypercall_test.c index 10b9482fc4d7..32f7e09ef67c 100644 --- a/tools/testing/selftests/kvm/x86_64/fix_hypercall_test.c +++ b/tools/testing/selftests/kvm/x86_64/fix_hypercall_test.c @@ -17,7 +17,7 @@ /* VMCALL and VMMCALL are both 3-byte opcodes. */ #define HYPERCALL_INSN_SIZE 3 -static bool ud_expected; +static bool quirk_disabled; static void guest_ud_handler(struct ex_regs *regs) { @@ -70,7 +70,7 @@ static void guest_main(void) * enabled, verify that the hypercall succeeded and that KVM patched in * the "right" hypercall. */ - if (ud_expected) { + if (quirk_disabled) { GUEST_ASSERT(ret == (uint64_t)-EFAULT); GUEST_ASSERT(!memcmp(other_hypercall_insn, hypercall_insn, HYPERCALL_INSN_SIZE)); @@ -83,13 +83,6 @@ static void guest_main(void) GUEST_DONE(); } -static void setup_ud_vector(struct kvm_vcpu *vcpu) -{ - vm_init_descriptor_tables(vcpu->vm); - vcpu_init_descriptor_tables(vcpu); - vm_install_exception_handler(vcpu->vm, UD_VECTOR, guest_ud_handler); -} - static void enter_guest(struct kvm_vcpu *vcpu) { struct kvm_run *run = vcpu->run; @@ -110,35 +103,23 @@ static void enter_guest(struct kvm_vcpu *vcpu) } } -static void test_fix_hypercall(void) +static void test_fix_hypercall(bool disable_quirk) { struct kvm_vcpu *vcpu; struct kvm_vm *vm; vm = vm_create_with_one_vcpu(&vcpu, guest_main); - setup_ud_vector(vcpu); - ud_expected = false; - sync_global_to_guest(vm, ud_expected); + vm_init_descriptor_tables(vcpu->vm); + vcpu_init_descriptor_tables(vcpu); + vm_install_exception_handler(vcpu->vm, UD_VECTOR, guest_ud_handler); - virt_pg_map(vm, APIC_DEFAULT_GPA, APIC_DEFAULT_GPA); + if (disable_quirk) + vm_enable_cap(vm, KVM_CAP_DISABLE_QUIRKS2, + KVM_X86_QUIRK_FIX_HYPERCALL_INSN); - enter_guest(vcpu); -} - -static void test_fix_hypercall_disabled(void) -{ - struct kvm_vcpu *vcpu; - struct kvm_vm *vm; - - vm = vm_create_with_one_vcpu(&vcpu, guest_main); - setup_ud_vector(vcpu); - - vm_enable_cap(vm, KVM_CAP_DISABLE_QUIRKS2, - KVM_X86_QUIRK_FIX_HYPERCALL_INSN); - - ud_expected = true; - sync_global_to_guest(vm, ud_expected); + quirk_disabled = disable_quirk; + sync_global_to_guest(vm, quirk_disabled); virt_pg_map(vm, APIC_DEFAULT_GPA, APIC_DEFAULT_GPA); @@ -149,6 +130,6 @@ int main(void) { TEST_REQUIRE(kvm_check_cap(KVM_CAP_DISABLE_QUIRKS2) & KVM_X86_QUIRK_FIX_HYPERCALL_INSN); - test_fix_hypercall(); - test_fix_hypercall_disabled(); + test_fix_hypercall(false); + test_fix_hypercall(true); } From c96409d1e58905bfc8c73b630481228382ab8846 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 28 Sep 2022 23:36:52 +0000 Subject: [PATCH 35/57] Revert "KVM: selftests: Fix nested SVM tests when built with clang" Revert back to using memset() in generic_svm_setup() now that KVM selftests override memset() and friends specifically to prevent the compiler from generating fancy code and/or linking to the libc implementation. This reverts commit ed290e1c20da19fa100a3e0f421aa31b65984960. Suggested-by: Jim Mattson Signed-off-by: Sean Christopherson Message-Id: <20220928233652.783504-8-seanjc@google.com> Reviewed-by: Jim Mattson Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/lib/x86_64/svm.c | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) diff --git a/tools/testing/selftests/kvm/lib/x86_64/svm.c b/tools/testing/selftests/kvm/lib/x86_64/svm.c index 6d445886e16c..5495a92dfd5a 100644 --- a/tools/testing/selftests/kvm/lib/x86_64/svm.c +++ b/tools/testing/selftests/kvm/lib/x86_64/svm.c @@ -60,18 +60,6 @@ static void vmcb_set_seg(struct vmcb_seg *seg, u16 selector, seg->base = base; } -/* - * Avoid using memset to clear the vmcb, since libc may not be - * available in L1 (and, even if it is, features that libc memset may - * want to use, like AVX, may not be enabled). - */ -static void clear_vmcb(struct vmcb *vmcb) -{ - int n = sizeof(*vmcb) / sizeof(u32); - - asm volatile ("rep stosl" : "+c"(n), "+D"(vmcb) : "a"(0) : "memory"); -} - void generic_svm_setup(struct svm_test_data *svm, void *guest_rip, void *guest_rsp) { struct vmcb *vmcb = svm->vmcb; @@ -88,7 +76,7 @@ void generic_svm_setup(struct svm_test_data *svm, void *guest_rip, void *guest_r wrmsr(MSR_EFER, efer | EFER_SVME); wrmsr(MSR_VM_HSAVE_PA, svm->save_area_gpa); - clear_vmcb(vmcb); + memset(vmcb, 0, sizeof(*vmcb)); asm volatile ("vmsave %0\n\t" : : "a" (vmcb_gpa) : "memory"); vmcb_set_seg(&save->es, get_es(), 0, -1U, data_seg_attr); vmcb_set_seg(&save->cs, get_cs(), 0, -1U, code_seg_attr); From 62ece2c5a95cc989648c39155173d3bae27e89a3 Mon Sep 17 00:00:00 2001 From: David Matlack Date: Thu, 29 Sep 2022 11:12:05 -0700 Subject: [PATCH 36/57] KVM: selftests: Tell the compiler that code after TEST_FAIL() is unreachable Add __builtin_unreachable() to TEST_FAIL() so that the compiler knows that any code after a TEST_FAIL() is unreachable. Signed-off-by: David Matlack Message-Id: <20220929181207.2281449-2-dmatlack@google.com> Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/include/test_util.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/kvm/include/test_util.h b/tools/testing/selftests/kvm/include/test_util.h index 5c5a88180b6c..befc754ce9b3 100644 --- a/tools/testing/selftests/kvm/include/test_util.h +++ b/tools/testing/selftests/kvm/include/test_util.h @@ -63,8 +63,10 @@ void test_assert(bool exp, const char *exp_str, #a, #b, #a, (unsigned long) __a, #b, (unsigned long) __b); \ } while (0) -#define TEST_FAIL(fmt, ...) \ - TEST_ASSERT(false, fmt, ##__VA_ARGS__) +#define TEST_FAIL(fmt, ...) do { \ + TEST_ASSERT(false, fmt, ##__VA_ARGS__); \ + __builtin_unreachable(); \ +} while (0) size_t parse_size(const char *size); From 4d2bd14319e4e0a49fc868c50ca0b2a747b58208 Mon Sep 17 00:00:00 2001 From: David Matlack Date: Thu, 29 Sep 2022 11:12:06 -0700 Subject: [PATCH 37/57] KVM: selftests: Add helpers to read kvm_{intel,amd} boolean module parameters Add helper functions for reading the value of kvm_intel and kvm_amd boolean module parameters. Use the kvm_intel variant in vm_is_unrestricted_guest() to simplify the check for kvm_intel.unrestricted_guest. No functional change intended. Signed-off-by: David Matlack Message-Id: <20220929181207.2281449-3-dmatlack@google.com> Signed-off-by: Paolo Bonzini --- .../selftests/kvm/include/kvm_util_base.h | 4 ++ tools/testing/selftests/kvm/lib/kvm_util.c | 39 +++++++++++++++++++ .../selftests/kvm/lib/x86_64/processor.c | 13 +------ 3 files changed, 44 insertions(+), 12 deletions(-) diff --git a/tools/testing/selftests/kvm/include/kvm_util_base.h b/tools/testing/selftests/kvm/include/kvm_util_base.h index 24fde97f6121..e42a09cd24a0 100644 --- a/tools/testing/selftests/kvm/include/kvm_util_base.h +++ b/tools/testing/selftests/kvm/include/kvm_util_base.h @@ -175,6 +175,10 @@ extern const struct vm_guest_mode_params vm_guest_mode_params[]; int open_path_or_exit(const char *path, int flags); int open_kvm_dev_path_or_exit(void); + +bool get_kvm_intel_param_bool(const char *param); +bool get_kvm_amd_param_bool(const char *param); + unsigned int kvm_check_cap(long cap); static inline bool kvm_has_cap(long cap) diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c index 9889fe0d8919..504c1e1355c3 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c @@ -50,6 +50,45 @@ int open_kvm_dev_path_or_exit(void) return _open_kvm_dev_path_or_exit(O_RDONLY); } +static bool get_module_param_bool(const char *module_name, const char *param) +{ + const int path_size = 128; + char path[path_size]; + char value; + ssize_t r; + int fd; + + r = snprintf(path, path_size, "/sys/module/%s/parameters/%s", + module_name, param); + TEST_ASSERT(r < path_size, + "Failed to construct sysfs path in %d bytes.", path_size); + + fd = open_path_or_exit(path, O_RDONLY); + + r = read(fd, &value, 1); + TEST_ASSERT(r == 1, "read(%s) failed", path); + + r = close(fd); + TEST_ASSERT(!r, "close(%s) failed", path); + + if (value == 'Y') + return true; + else if (value == 'N') + return false; + + TEST_FAIL("Unrecognized value '%c' for boolean module param", value); +} + +bool get_kvm_intel_param_bool(const char *param) +{ + return get_module_param_bool("kvm_intel", param); +} + +bool get_kvm_amd_param_bool(const char *param) +{ + return get_module_param_bool("kvm_amd", param); +} + /* * Capability * diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c index 2e6e61bbe81b..fab0f526fb81 100644 --- a/tools/testing/selftests/kvm/lib/x86_64/processor.c +++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c @@ -1294,20 +1294,9 @@ done: /* Returns true if kvm_intel was loaded with unrestricted_guest=1. */ bool vm_is_unrestricted_guest(struct kvm_vm *vm) { - char val = 'N'; - size_t count; - FILE *f; - /* Ensure that a KVM vendor-specific module is loaded. */ if (vm == NULL) close(open_kvm_dev_path_or_exit()); - f = fopen("/sys/module/kvm_intel/parameters/unrestricted_guest", "r"); - if (f) { - count = fread(&val, sizeof(char), 1, f); - TEST_ASSERT(count == 1, "Unable to read from param file."); - fclose(f); - } - - return val == 'Y'; + return get_kvm_intel_param_bool("unrestricted_guest"); } From 458e98746fa852d744d34b5a8d0b1673959efc2f Mon Sep 17 00:00:00 2001 From: David Matlack Date: Thu, 29 Sep 2022 11:12:07 -0700 Subject: [PATCH 38/57] KVM: selftests: Fix nx_huge_pages_test on TDP-disabled hosts Map the test's huge page region with 2MiB virtual mappings when TDP is disabled so that KVM can shadow the region with huge pages. This fixes nx_huge_pages_test on hosts where TDP hardware support is disabled. Purposely do not skip this test on TDP-disabled hosts. While we don't care about NX Huge Pages on TDP-disabled hosts from a security perspective, KVM does support it, and so we should test it. For TDP-enabled hosts, continue mapping the region with 4KiB pages to ensure that KVM can map it with huge pages irrespective of the guest mappings. Fixes: 8448ec5993be ("KVM: selftests: Add NX huge pages test") Signed-off-by: David Matlack Message-Id: <20220929181207.2281449-4-dmatlack@google.com> Signed-off-by: Paolo Bonzini --- .../selftests/kvm/include/x86_64/processor.h | 4 +++ .../selftests/kvm/lib/x86_64/processor.c | 27 +++++++++++++++++++ .../selftests/kvm/x86_64/nx_huge_pages_test.c | 19 +++++++++++-- 3 files changed, 48 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h index 0cbc71b7af50..e8ca0d8a6a7e 100644 --- a/tools/testing/selftests/kvm/include/x86_64/processor.h +++ b/tools/testing/selftests/kvm/include/x86_64/processor.h @@ -825,6 +825,8 @@ static inline uint8_t wrmsr_safe(uint32_t msr, uint64_t val) return kvm_asm_safe("wrmsr", "a"(val & -1u), "d"(val >> 32), "c"(msr)); } +bool kvm_is_tdp_enabled(void); + uint64_t vm_get_page_table_entry(struct kvm_vm *vm, struct kvm_vcpu *vcpu, uint64_t vaddr); void vm_set_page_table_entry(struct kvm_vm *vm, struct kvm_vcpu *vcpu, @@ -855,6 +857,8 @@ enum pg_level { #define PG_SIZE_1G PG_LEVEL_SIZE(PG_LEVEL_1G) void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, int level); +void virt_map_level(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, + uint64_t nr_bytes, int level); /* * Basic CPU control in CR0 diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c index fab0f526fb81..39c4409ef56a 100644 --- a/tools/testing/selftests/kvm/lib/x86_64/processor.c +++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c @@ -111,6 +111,14 @@ static void sregs_dump(FILE *stream, struct kvm_sregs *sregs, uint8_t indent) } } +bool kvm_is_tdp_enabled(void) +{ + if (is_intel_cpu()) + return get_kvm_intel_param_bool("ept"); + else + return get_kvm_amd_param_bool("npt"); +} + void virt_arch_pgd_alloc(struct kvm_vm *vm) { TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use " @@ -214,6 +222,25 @@ void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr) __virt_pg_map(vm, vaddr, paddr, PG_LEVEL_4K); } +void virt_map_level(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, + uint64_t nr_bytes, int level) +{ + uint64_t pg_size = PG_LEVEL_SIZE(level); + uint64_t nr_pages = nr_bytes / pg_size; + int i; + + TEST_ASSERT(nr_bytes % pg_size == 0, + "Region size not aligned: nr_bytes: 0x%lx, page size: 0x%lx", + nr_bytes, pg_size); + + for (i = 0; i < nr_pages; i++) { + __virt_pg_map(vm, vaddr, paddr, level); + + vaddr += pg_size; + paddr += pg_size; + } +} + static uint64_t *_vm_get_page_table_entry(struct kvm_vm *vm, struct kvm_vcpu *vcpu, uint64_t vaddr) diff --git a/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c b/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c index cc6421716400..8c1181a5ba56 100644 --- a/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c +++ b/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c @@ -112,6 +112,7 @@ void run_test(int reclaim_period_ms, bool disable_nx_huge_pages, { struct kvm_vcpu *vcpu; struct kvm_vm *vm; + uint64_t nr_bytes; void *hva; int r; @@ -141,10 +142,24 @@ void run_test(int reclaim_period_ms, bool disable_nx_huge_pages, HPAGE_GPA, HPAGE_SLOT, HPAGE_SLOT_NPAGES, 0); - virt_map(vm, HPAGE_GVA, HPAGE_GPA, HPAGE_SLOT_NPAGES); + nr_bytes = HPAGE_SLOT_NPAGES * vm->page_size; + + /* + * Ensure that KVM can map HPAGE_SLOT with huge pages by mapping the + * region into the guest with 2MiB pages whenever TDP is disabled (i.e. + * whenever KVM is shadowing the guest page tables). + * + * When TDP is enabled, KVM should be able to map HPAGE_SLOT with huge + * pages irrespective of the guest page size, so map with 4KiB pages + * to test that that is the case. + */ + if (kvm_is_tdp_enabled()) + virt_map_level(vm, HPAGE_GVA, HPAGE_GPA, nr_bytes, PG_LEVEL_4K); + else + virt_map_level(vm, HPAGE_GVA, HPAGE_GPA, nr_bytes, PG_LEVEL_2M); hva = addr_gpa2hva(vm, HPAGE_GPA); - memset(hva, RETURN_OPCODE, HPAGE_SLOT_NPAGES * PAGE_SIZE); + memset(hva, RETURN_OPCODE, nr_bytes); check_2m_page_count(vm, 0); check_split_count(vm, 0); From f96c48e9ddf40f6abf0a67aa94642701294daf79 Mon Sep 17 00:00:00 2001 From: Peng Hao Date: Fri, 23 Sep 2022 23:05:36 +0800 Subject: [PATCH 39/57] kvm: mmu: fix typos in struct kvm_arch No 'kvmp_mmu_pages', it should be 'kvm_mmu_page'. And struct kvm_mmu_pages and struct kvm_mmu_page are different structures, here should be kvm_mmu_page. kvm_mmu_pages is defined in arch/x86/kvm/mmu/mmu.c. Suggested-by: David Matlack Signed-off-by: Peng Hao Reviewed-by: David Matlack Message-Id: Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 61b9dd34d333..7551b6f9c31c 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1280,8 +1280,8 @@ struct kvm_arch { bool tdp_mmu_enabled; /* - * List of struct kvm_mmu_pages being used as roots. - * All struct kvm_mmu_pages in the list should have + * List of kvm_mmu_page structs being used as roots. + * All kvm_mmu_page structs in the list should have * tdp_mmu_page set. * * For reads, this list is protected by: @@ -1300,8 +1300,8 @@ struct kvm_arch { struct list_head tdp_mmu_roots; /* - * List of struct kvmp_mmu_pages not being used as roots. - * All struct kvm_mmu_pages in the list should have + * List of kvm_mmu_page structs not being used as roots. + * All kvm_mmu_page structs in the list should have * tdp_mmu_page set and a tdp_mmu_root_count of 0. */ struct list_head tdp_mmu_pages; @@ -1311,9 +1311,9 @@ struct kvm_arch { * is held in read mode: * - tdp_mmu_roots (above) * - tdp_mmu_pages (above) - * - the link field of struct kvm_mmu_pages used by the TDP MMU + * - the link field of kvm_mmu_page structs used by the TDP MMU * - lpage_disallowed_mmu_pages - * - the lpage_disallowed_link field of struct kvm_mmu_pages used + * - the lpage_disallowed_link field of kvm_mmu_page structs used * by the TDP MMU * It is acceptable, but not necessary, to acquire this lock when * the thread holds the MMU lock in write mode. From e779ce9d17c44a338b4fa3be8715e3b7eb9706f0 Mon Sep 17 00:00:00 2001 From: Peng Hao Date: Fri, 23 Sep 2022 23:03:03 +0800 Subject: [PATCH 40/57] kvm: vmx: keep constant definition format consistent Keep all constants using lowercase "x". Signed-off-by: Peng Hao Message-Id: Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/vmx.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index c371ef695fcc..498dc600bd5c 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h @@ -309,7 +309,7 @@ enum vmcs_field { GUEST_LDTR_AR_BYTES = 0x00004820, GUEST_TR_AR_BYTES = 0x00004822, GUEST_INTERRUPTIBILITY_INFO = 0x00004824, - GUEST_ACTIVITY_STATE = 0X00004826, + GUEST_ACTIVITY_STATE = 0x00004826, GUEST_SYSENTER_CS = 0x0000482A, VMX_PREEMPTION_TIMER_VALUE = 0x0000482E, HOST_IA32_SYSENTER_CS = 0x00004c00, From ac107abef197660c9db529fe550080ad07b46a67 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sat, 1 Oct 2022 10:12:45 +0100 Subject: [PATCH 41/57] KVM: arm64: Advertise new kvmarm mailing list As announced on the kvmarm list, we're moving the mailing list over to kvmarm@lists.linux.dev: As you probably all know, the kvmarm mailing has been hosted on Columbia's machines for as long as the project existed (over 13 years). After all this time, the university has decided to retire the list infrastructure and asked us to find a new hosting. A new mailing list has been created on lists.linux.dev[1], and I'm kindly asking everyone interested in following the KVM/arm64 developments to start subscribing to it (and start posting your patches there). I hope that people will move over to it quickly enough that we can soon give Columbia the green light to turn their systems off. Note that the new list will only get archived automatically once we fully switch over, but I'll make sure we fill any gap and not lose any message. In the meantime, please Cc both lists. [...] [1] https://subspace.kernel.org/lists.linux.dev.html Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20221001091245.3900668-1-maz@kernel.org --- MAINTAINERS | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/MAINTAINERS b/MAINTAINERS index 589517372408..f29f27717de4 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -11124,7 +11124,8 @@ R: Alexandru Elisei R: Suzuki K Poulose R: Oliver Upton L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) -L: kvmarm@lists.cs.columbia.edu (moderated for non-subscribers) +L: kvmarm@lists.linux.dev +L: kvmarm@lists.cs.columbia.edu (deprecated, moderated for non-subscribers) S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm.git F: arch/arm64/include/asm/kvm* From 7fc4426959e17178654404e6bde4b920b5fee7c7 Mon Sep 17 00:00:00 2001 From: Andrew Jones Date: Sun, 2 Oct 2022 10:17:58 +0530 Subject: [PATCH 42/57] riscv: Add X register names to gpr-nums When encoding instructions it's sometimes necessary to set a register field to a precise number. This is easiest to do using the x naming. Signed-off-by: Andrew Jones Reviewed-by: Anup Patel Signed-off-by: Anup Patel --- arch/riscv/include/asm/gpr-num.h | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/arch/riscv/include/asm/gpr-num.h b/arch/riscv/include/asm/gpr-num.h index dfee2829fc7c..efeb5edf8a3a 100644 --- a/arch/riscv/include/asm/gpr-num.h +++ b/arch/riscv/include/asm/gpr-num.h @@ -3,6 +3,11 @@ #define __ASM_GPR_NUM_H #ifdef __ASSEMBLY__ + + .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 + .equ .L__gpr_num_x\num, \num + .endr + .equ .L__gpr_num_zero, 0 .equ .L__gpr_num_ra, 1 .equ .L__gpr_num_sp, 2 @@ -39,6 +44,9 @@ #else /* __ASSEMBLY__ */ #define __DEFINE_ASM_GPR_NUMS \ +" .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31\n" \ +" .equ .L__gpr_num_x\\num, \\num\n" \ +" .endr\n" \ " .equ .L__gpr_num_zero, 0\n" \ " .equ .L__gpr_num_ra, 1\n" \ " .equ .L__gpr_num_sp, 2\n" \ From 5ac43ab2e3fe4e5d48ef313a99d0591021c3bbdd Mon Sep 17 00:00:00 2001 From: Andrew Jones Date: Sun, 2 Oct 2022 10:18:07 +0530 Subject: [PATCH 43/57] riscv: Introduce support for defining instructions When compiling with toolchains that haven't yet been taught about new instructions we need to encode them ourselves. Create a new file where support for instruction definitions will evolve. We initiate the file with a macro called INSN_R(), which implements the R-type instruction encoding. INSN_R() will use the assembler's .insn directive when available, which should give the assembler a chance to do some validation. When .insn is not available we fall back to manual encoding. Not only should using instruction encoding macros improve readability and maintainability of code over the alternative of inserting instructions directly (e.g. '.word 0xc0de'), but we should also gain potential for more optimized code after compilation because the compiler will have control over the input and output registers used. Signed-off-by: Andrew Jones Reviewed-by: Anup Patel Signed-off-by: Anup Patel --- arch/riscv/Kconfig | 3 ++ arch/riscv/include/asm/insn-def.h | 90 +++++++++++++++++++++++++++++++ 2 files changed, 93 insertions(+) create mode 100644 arch/riscv/include/asm/insn-def.h diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 59d18881f35b..d6b0ffd9bf00 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -227,6 +227,9 @@ config RISCV_DMA_NONCOHERENT select ARCH_HAS_SETUP_DMA_OPS select DMA_DIRECT_REMAP +config AS_HAS_INSN + def_bool $(as-instr,.insn r 51$(comma) 0$(comma) 0$(comma) t0$(comma) t0$(comma) zero) + source "arch/riscv/Kconfig.socs" source "arch/riscv/Kconfig.erratas" diff --git a/arch/riscv/include/asm/insn-def.h b/arch/riscv/include/asm/insn-def.h new file mode 100644 index 000000000000..635612e59b25 --- /dev/null +++ b/arch/riscv/include/asm/insn-def.h @@ -0,0 +1,90 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef __ASM_INSN_DEF_H +#define __ASM_INSN_DEF_H + +#include + +#define INSN_R_FUNC7_SHIFT 25 +#define INSN_R_RS2_SHIFT 20 +#define INSN_R_RS1_SHIFT 15 +#define INSN_R_FUNC3_SHIFT 12 +#define INSN_R_RD_SHIFT 7 +#define INSN_R_OPCODE_SHIFT 0 + +#ifdef __ASSEMBLY__ + +#ifdef CONFIG_AS_HAS_INSN + + .macro insn_r, opcode, func3, func7, rd, rs1, rs2 + .insn r \opcode, \func3, \func7, \rd, \rs1, \rs2 + .endm + +#else + +#include + + .macro insn_r, opcode, func3, func7, rd, rs1, rs2 + .4byte ((\opcode << INSN_R_OPCODE_SHIFT) | \ + (\func3 << INSN_R_FUNC3_SHIFT) | \ + (\func7 << INSN_R_FUNC7_SHIFT) | \ + (.L__gpr_num_\rd << INSN_R_RD_SHIFT) | \ + (.L__gpr_num_\rs1 << INSN_R_RS1_SHIFT) | \ + (.L__gpr_num_\rs2 << INSN_R_RS2_SHIFT)) + .endm + +#endif + +#define __INSN_R(...) insn_r __VA_ARGS__ + +#else /* ! __ASSEMBLY__ */ + +#ifdef CONFIG_AS_HAS_INSN + +#define __INSN_R(opcode, func3, func7, rd, rs1, rs2) \ + ".insn r " opcode ", " func3 ", " func7 ", " rd ", " rs1 ", " rs2 "\n" + +#else + +#include +#include + +#define DEFINE_INSN_R \ + __DEFINE_ASM_GPR_NUMS \ +" .macro insn_r, opcode, func3, func7, rd, rs1, rs2\n" \ +" .4byte ((\\opcode << " __stringify(INSN_R_OPCODE_SHIFT) ") |" \ +" (\\func3 << " __stringify(INSN_R_FUNC3_SHIFT) ") |" \ +" (\\func7 << " __stringify(INSN_R_FUNC7_SHIFT) ") |" \ +" (.L__gpr_num_\\rd << " __stringify(INSN_R_RD_SHIFT) ") |" \ +" (.L__gpr_num_\\rs1 << " __stringify(INSN_R_RS1_SHIFT) ") |" \ +" (.L__gpr_num_\\rs2 << " __stringify(INSN_R_RS2_SHIFT) "))\n" \ +" .endm\n" + +#define UNDEFINE_INSN_R \ +" .purgem insn_r\n" + +#define __INSN_R(opcode, func3, func7, rd, rs1, rs2) \ + DEFINE_INSN_R \ + "insn_r " opcode ", " func3 ", " func7 ", " rd ", " rs1 ", " rs2 "\n" \ + UNDEFINE_INSN_R + +#endif + +#endif /* ! __ASSEMBLY__ */ + +#define INSN_R(opcode, func3, func7, rd, rs1, rs2) \ + __INSN_R(RV_##opcode, RV_##func3, RV_##func7, \ + RV_##rd, RV_##rs1, RV_##rs2) + +#define RV_OPCODE(v) __ASM_STR(v) +#define RV_FUNC3(v) __ASM_STR(v) +#define RV_FUNC7(v) __ASM_STR(v) +#define RV_RD(v) __ASM_STR(v) +#define RV_RS1(v) __ASM_STR(v) +#define RV_RS2(v) __ASM_STR(v) +#define __RV_REG(v) __ASM_STR(x ## v) +#define RV___RD(v) __RV_REG(v) +#define RV___RS1(v) __RV_REG(v) +#define RV___RS2(v) __RV_REG(v) + +#endif /* __ASM_INSN_DEF_H */ From bb233a11dc6b3774fd46087242d7627ecf5293ed Mon Sep 17 00:00:00 2001 From: Andrew Jones Date: Sun, 2 Oct 2022 10:18:14 +0530 Subject: [PATCH 44/57] riscv: KVM: Apply insn-def to hfence encodings Introduce hfence instruction encodings and apply them to KVM's use. With the self-documenting nature of the instruction encoding macros, and a spec always within arm's reach, it's safe to remove the comments, so we do that too. Signed-off-by: Andrew Jones Reviewed-by: Anup Patel Signed-off-by: Anup Patel --- arch/riscv/include/asm/insn-def.h | 10 +++ arch/riscv/kvm/tlb.c | 129 ++++-------------------------- 2 files changed, 27 insertions(+), 112 deletions(-) diff --git a/arch/riscv/include/asm/insn-def.h b/arch/riscv/include/asm/insn-def.h index 635612e59b25..c8aca3c27433 100644 --- a/arch/riscv/include/asm/insn-def.h +++ b/arch/riscv/include/asm/insn-def.h @@ -87,4 +87,14 @@ #define RV___RS1(v) __RV_REG(v) #define RV___RS2(v) __RV_REG(v) +#define RV_OPCODE_SYSTEM RV_OPCODE(115) + +#define HFENCE_VVMA(vaddr, asid) \ + INSN_R(OPCODE_SYSTEM, FUNC3(0), FUNC7(17), \ + __RD(0), RS1(vaddr), RS2(asid)) + +#define HFENCE_GVMA(gaddr, vmid) \ + INSN_R(OPCODE_SYSTEM, FUNC3(0), FUNC7(49), \ + __RD(0), RS1(gaddr), RS2(vmid)) + #endif /* __ASM_INSN_DEF_H */ diff --git a/arch/riscv/kvm/tlb.c b/arch/riscv/kvm/tlb.c index 1a76d0b1907d..1ce3394b3acf 100644 --- a/arch/riscv/kvm/tlb.c +++ b/arch/riscv/kvm/tlb.c @@ -12,22 +12,7 @@ #include #include #include - -/* - * Instruction encoding of hfence.gvma is: - * HFENCE.GVMA rs1, rs2 - * HFENCE.GVMA zero, rs2 - * HFENCE.GVMA rs1 - * HFENCE.GVMA - * - * rs1!=zero and rs2!=zero ==> HFENCE.GVMA rs1, rs2 - * rs1==zero and rs2!=zero ==> HFENCE.GVMA zero, rs2 - * rs1!=zero and rs2==zero ==> HFENCE.GVMA rs1 - * rs1==zero and rs2==zero ==> HFENCE.GVMA - * - * Instruction encoding of HFENCE.GVMA is: - * 0110001 rs2(5) rs1(5) 000 00000 1110011 - */ +#include void kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid, gpa_t gpa, gpa_t gpsz, @@ -40,32 +25,14 @@ void kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid, return; } - for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order)) { - /* - * rs1 = a0 (GPA >> 2) - * rs2 = a1 (VMID) - * HFENCE.GVMA a0, a1 - * 0110001 01011 01010 000 00000 1110011 - */ - asm volatile ("srli a0, %0, 2\n" - "add a1, %1, zero\n" - ".word 0x62b50073\n" - :: "r" (pos), "r" (vmid) - : "a0", "a1", "memory"); - } + for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order)) + asm volatile (HFENCE_GVMA(%0, %1) + : : "r" (pos >> 2), "r" (vmid) : "memory"); } void kvm_riscv_local_hfence_gvma_vmid_all(unsigned long vmid) { - /* - * rs1 = zero - * rs2 = a0 (VMID) - * HFENCE.GVMA zero, a0 - * 0110001 01010 00000 000 00000 1110011 - */ - asm volatile ("add a0, %0, zero\n" - ".word 0x62a00073\n" - :: "r" (vmid) : "a0", "memory"); + asm volatile(HFENCE_GVMA(zero, %0) : : "r" (vmid) : "memory"); } void kvm_riscv_local_hfence_gvma_gpa(gpa_t gpa, gpa_t gpsz, @@ -78,46 +45,16 @@ void kvm_riscv_local_hfence_gvma_gpa(gpa_t gpa, gpa_t gpsz, return; } - for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order)) { - /* - * rs1 = a0 (GPA >> 2) - * rs2 = zero - * HFENCE.GVMA a0 - * 0110001 00000 01010 000 00000 1110011 - */ - asm volatile ("srli a0, %0, 2\n" - ".word 0x62050073\n" - :: "r" (pos) : "a0", "memory"); - } + for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order)) + asm volatile(HFENCE_GVMA(%0, zero) + : : "r" (pos >> 2) : "memory"); } void kvm_riscv_local_hfence_gvma_all(void) { - /* - * rs1 = zero - * rs2 = zero - * HFENCE.GVMA - * 0110001 00000 00000 000 00000 1110011 - */ - asm volatile (".word 0x62000073" ::: "memory"); + asm volatile(HFENCE_GVMA(zero, zero) : : : "memory"); } -/* - * Instruction encoding of hfence.gvma is: - * HFENCE.VVMA rs1, rs2 - * HFENCE.VVMA zero, rs2 - * HFENCE.VVMA rs1 - * HFENCE.VVMA - * - * rs1!=zero and rs2!=zero ==> HFENCE.VVMA rs1, rs2 - * rs1==zero and rs2!=zero ==> HFENCE.VVMA zero, rs2 - * rs1!=zero and rs2==zero ==> HFENCE.VVMA rs1 - * rs1==zero and rs2==zero ==> HFENCE.VVMA - * - * Instruction encoding of HFENCE.VVMA is: - * 0010001 rs2(5) rs1(5) 000 00000 1110011 - */ - void kvm_riscv_local_hfence_vvma_asid_gva(unsigned long vmid, unsigned long asid, unsigned long gva, @@ -133,19 +70,9 @@ void kvm_riscv_local_hfence_vvma_asid_gva(unsigned long vmid, hgatp = csr_swap(CSR_HGATP, vmid << HGATP_VMID_SHIFT); - for (pos = gva; pos < (gva + gvsz); pos += BIT(order)) { - /* - * rs1 = a0 (GVA) - * rs2 = a1 (ASID) - * HFENCE.VVMA a0, a1 - * 0010001 01011 01010 000 00000 1110011 - */ - asm volatile ("add a0, %0, zero\n" - "add a1, %1, zero\n" - ".word 0x22b50073\n" - :: "r" (pos), "r" (asid) - : "a0", "a1", "memory"); - } + for (pos = gva; pos < (gva + gvsz); pos += BIT(order)) + asm volatile(HFENCE_VVMA(%0, %1) + : : "r" (pos), "r" (asid) : "memory"); csr_write(CSR_HGATP, hgatp); } @@ -157,15 +84,7 @@ void kvm_riscv_local_hfence_vvma_asid_all(unsigned long vmid, hgatp = csr_swap(CSR_HGATP, vmid << HGATP_VMID_SHIFT); - /* - * rs1 = zero - * rs2 = a0 (ASID) - * HFENCE.VVMA zero, a0 - * 0010001 01010 00000 000 00000 1110011 - */ - asm volatile ("add a0, %0, zero\n" - ".word 0x22a00073\n" - :: "r" (asid) : "a0", "memory"); + asm volatile(HFENCE_VVMA(zero, %0) : : "r" (asid) : "memory"); csr_write(CSR_HGATP, hgatp); } @@ -183,17 +102,9 @@ void kvm_riscv_local_hfence_vvma_gva(unsigned long vmid, hgatp = csr_swap(CSR_HGATP, vmid << HGATP_VMID_SHIFT); - for (pos = gva; pos < (gva + gvsz); pos += BIT(order)) { - /* - * rs1 = a0 (GVA) - * rs2 = zero - * HFENCE.VVMA a0 - * 0010001 00000 01010 000 00000 1110011 - */ - asm volatile ("add a0, %0, zero\n" - ".word 0x22050073\n" - :: "r" (pos) : "a0", "memory"); - } + for (pos = gva; pos < (gva + gvsz); pos += BIT(order)) + asm volatile(HFENCE_VVMA(%0, zero) + : : "r" (pos) : "memory"); csr_write(CSR_HGATP, hgatp); } @@ -204,13 +115,7 @@ void kvm_riscv_local_hfence_vvma_all(unsigned long vmid) hgatp = csr_swap(CSR_HGATP, vmid << HGATP_VMID_SHIFT); - /* - * rs1 = zero - * rs2 = zero - * HFENCE.VVMA - * 0010001 00000 00000 000 00000 1110011 - */ - asm volatile (".word 0x22000073" ::: "memory"); + asm volatile(HFENCE_VVMA(zero, zero) : : : "memory"); csr_write(CSR_HGATP, hgatp); } From 26b73f14933e9c0beb88bb2fcee69d93572558ef Mon Sep 17 00:00:00 2001 From: Andrew Jones Date: Sun, 2 Oct 2022 10:18:20 +0530 Subject: [PATCH 45/57] riscv: KVM: Apply insn-def to hlv encodings Introduce hlv instruction encodings and apply them to KVM's use. We're careful not to introduce hlv.d to 32-bit builds. Indeed, we ensure the build fails if someone tries to use it. Signed-off-by: Andrew Jones Reviewed-by: Anup Patel Signed-off-by: Anup Patel --- arch/riscv/include/asm/insn-def.h | 17 ++++++++++++++ arch/riscv/kvm/vcpu_exit.c | 39 +++++++------------------------ 2 files changed, 25 insertions(+), 31 deletions(-) diff --git a/arch/riscv/include/asm/insn-def.h b/arch/riscv/include/asm/insn-def.h index c8aca3c27433..af7b0b55815c 100644 --- a/arch/riscv/include/asm/insn-def.h +++ b/arch/riscv/include/asm/insn-def.h @@ -97,4 +97,21 @@ INSN_R(OPCODE_SYSTEM, FUNC3(0), FUNC7(49), \ __RD(0), RS1(gaddr), RS2(vmid)) +#define HLVX_HU(dest, addr) \ + INSN_R(OPCODE_SYSTEM, FUNC3(4), FUNC7(50), \ + RD(dest), RS1(addr), __RS2(3)) + +#define HLV_W(dest, addr) \ + INSN_R(OPCODE_SYSTEM, FUNC3(4), FUNC7(52), \ + RD(dest), RS1(addr), __RS2(0)) + +#ifdef CONFIG_64BIT +#define HLV_D(dest, addr) \ + INSN_R(OPCODE_SYSTEM, FUNC3(4), FUNC7(54), \ + RD(dest), RS1(addr), __RS2(0)) +#else +#define HLV_D(dest, addr) \ + __ASM_STR(.error "hlv.d requires 64-bit support") +#endif + #endif /* __ASM_INSN_DEF_H */ diff --git a/arch/riscv/kvm/vcpu_exit.c b/arch/riscv/kvm/vcpu_exit.c index d5c36386878a..c9f741ab26f5 100644 --- a/arch/riscv/kvm/vcpu_exit.c +++ b/arch/riscv/kvm/vcpu_exit.c @@ -8,6 +8,7 @@ #include #include +#include static int gstage_page_fault(struct kvm_vcpu *vcpu, struct kvm_run *run, struct kvm_cpu_trap *trap) @@ -62,11 +63,7 @@ unsigned long kvm_riscv_vcpu_unpriv_read(struct kvm_vcpu *vcpu, { register unsigned long taddr asm("a0") = (unsigned long)trap; register unsigned long ttmp asm("a1"); - register unsigned long val asm("t0"); - register unsigned long tmp asm("t1"); - register unsigned long addr asm("t2") = guest_addr; - unsigned long flags; - unsigned long old_stvec, old_hstatus; + unsigned long flags, val, tmp, old_stvec, old_hstatus; local_irq_save(flags); @@ -82,29 +79,19 @@ unsigned long kvm_riscv_vcpu_unpriv_read(struct kvm_vcpu *vcpu, ".option push\n" ".option norvc\n" "add %[ttmp], %[taddr], 0\n" - /* - * HLVX.HU %[val], (%[addr]) - * HLVX.HU t0, (t2) - * 0110010 00011 00111 100 00101 1110011 - */ - ".word 0x6433c2f3\n" + HLVX_HU(%[val], %[addr]) "andi %[tmp], %[val], 3\n" "addi %[tmp], %[tmp], -3\n" "bne %[tmp], zero, 2f\n" "addi %[addr], %[addr], 2\n" - /* - * HLVX.HU %[tmp], (%[addr]) - * HLVX.HU t1, (t2) - * 0110010 00011 00111 100 00110 1110011 - */ - ".word 0x6433c373\n" + HLVX_HU(%[tmp], %[addr]) "sll %[tmp], %[tmp], 16\n" "add %[val], %[val], %[tmp]\n" "2:\n" ".option pop" : [val] "=&r" (val), [tmp] "=&r" (tmp), [taddr] "+&r" (taddr), [ttmp] "+&r" (ttmp), - [addr] "+&r" (addr) : : "memory"); + [addr] "+&r" (guest_addr) : : "memory"); if (trap->scause == EXC_LOAD_PAGE_FAULT) trap->scause = EXC_INST_PAGE_FAULT; @@ -121,24 +108,14 @@ unsigned long kvm_riscv_vcpu_unpriv_read(struct kvm_vcpu *vcpu, ".option norvc\n" "add %[ttmp], %[taddr], 0\n" #ifdef CONFIG_64BIT - /* - * HLV.D %[val], (%[addr]) - * HLV.D t0, (t2) - * 0110110 00000 00111 100 00101 1110011 - */ - ".word 0x6c03c2f3\n" + HLV_D(%[val], %[addr]) #else - /* - * HLV.W %[val], (%[addr]) - * HLV.W t0, (t2) - * 0110100 00000 00111 100 00101 1110011 - */ - ".word 0x6803c2f3\n" + HLV_W(%[val], %[addr]) #endif ".option pop" : [val] "=&r" (val), [taddr] "+&r" (taddr), [ttmp] "+&r" (ttmp) - : [addr] "r" (addr) : "memory"); + : [addr] "r" (guest_addr) : "memory"); } csr_write(CSR_STVEC, old_stvec); From d837f19195e77f7c89b6645c8311f5ea6ff67905 Mon Sep 17 00:00:00 2001 From: Anup Patel Date: Sun, 2 Oct 2022 10:18:25 +0530 Subject: [PATCH 46/57] RISC-V: KVM: Change the SBI specification version to v1.0 The SBI v1.0 specificaiton is functionally same as SBI v0.3 specification except that SBI v1.0 specification went through the full RISC-V International ratification process. Let us change the SBI specification version to v1.0. Signed-off-by: Anup Patel Reviewed-by: Atish Patra Signed-off-by: Anup Patel --- arch/riscv/include/asm/kvm_vcpu_sbi.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/riscv/include/asm/kvm_vcpu_sbi.h b/arch/riscv/include/asm/kvm_vcpu_sbi.h index 26a446a34057..d4e3e600beef 100644 --- a/arch/riscv/include/asm/kvm_vcpu_sbi.h +++ b/arch/riscv/include/asm/kvm_vcpu_sbi.h @@ -11,8 +11,8 @@ #define KVM_SBI_IMPID 3 -#define KVM_SBI_VERSION_MAJOR 0 -#define KVM_SBI_VERSION_MINOR 3 +#define KVM_SBI_VERSION_MAJOR 1 +#define KVM_SBI_VERSION_MINOR 0 struct kvm_vcpu_sbi_extension { unsigned long extid_start; From 122979aa26cd4a314aae889a0496eb829d50bc9e Mon Sep 17 00:00:00 2001 From: Mayuresh Chitale Date: Sun, 2 Oct 2022 10:18:31 +0530 Subject: [PATCH 47/57] RISC-V: Probe Svinval extension form ISA string Just like other ISA extensions, we allow callers/users to detect the presence of Svinval extension from ISA string. Signed-off-by: Mayuresh Chitale Signed-off-by: Anup Patel Reviewed-by: Andrew Jones Signed-off-by: Anup Patel --- arch/riscv/include/asm/hwcap.h | 4 ++++ arch/riscv/kernel/cpu.c | 1 + arch/riscv/kernel/cpufeature.c | 1 + 3 files changed, 6 insertions(+) diff --git a/arch/riscv/include/asm/hwcap.h b/arch/riscv/include/asm/hwcap.h index 6f59ec64175e..b22525290073 100644 --- a/arch/riscv/include/asm/hwcap.h +++ b/arch/riscv/include/asm/hwcap.h @@ -58,6 +58,7 @@ enum riscv_isa_ext_id { RISCV_ISA_EXT_ZICBOM, RISCV_ISA_EXT_ZIHINTPAUSE, RISCV_ISA_EXT_SSTC, + RISCV_ISA_EXT_SVINVAL, RISCV_ISA_EXT_ID_MAX = RISCV_ISA_EXT_MAX, }; @@ -69,6 +70,7 @@ enum riscv_isa_ext_id { enum riscv_isa_ext_key { RISCV_ISA_EXT_KEY_FPU, /* For 'F' and 'D' */ RISCV_ISA_EXT_KEY_ZIHINTPAUSE, + RISCV_ISA_EXT_KEY_SVINVAL, RISCV_ISA_EXT_KEY_MAX, }; @@ -90,6 +92,8 @@ static __always_inline int riscv_isa_ext2key(int num) return RISCV_ISA_EXT_KEY_FPU; case RISCV_ISA_EXT_ZIHINTPAUSE: return RISCV_ISA_EXT_KEY_ZIHINTPAUSE; + case RISCV_ISA_EXT_SVINVAL: + return RISCV_ISA_EXT_KEY_SVINVAL; default: return -EINVAL; } diff --git a/arch/riscv/kernel/cpu.c b/arch/riscv/kernel/cpu.c index 0be8a2403212..7d1cd653ca02 100644 --- a/arch/riscv/kernel/cpu.c +++ b/arch/riscv/kernel/cpu.c @@ -96,6 +96,7 @@ static struct riscv_isa_ext_data isa_ext_arr[] = { __RISCV_ISA_EXT_DATA(zicbom, RISCV_ISA_EXT_ZICBOM), __RISCV_ISA_EXT_DATA(zihintpause, RISCV_ISA_EXT_ZIHINTPAUSE), __RISCV_ISA_EXT_DATA(sstc, RISCV_ISA_EXT_SSTC), + __RISCV_ISA_EXT_DATA(svinval, RISCV_ISA_EXT_SVINVAL), __RISCV_ISA_EXT_DATA("", RISCV_ISA_EXT_MAX), }; diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c index 3b5583db9d80..9774f1271f93 100644 --- a/arch/riscv/kernel/cpufeature.c +++ b/arch/riscv/kernel/cpufeature.c @@ -204,6 +204,7 @@ void __init riscv_fill_hwcap(void) SET_ISA_EXT_MAP("zicbom", RISCV_ISA_EXT_ZICBOM); SET_ISA_EXT_MAP("zihintpause", RISCV_ISA_EXT_ZIHINTPAUSE); SET_ISA_EXT_MAP("sstc", RISCV_ISA_EXT_SSTC); + SET_ISA_EXT_MAP("svinval", RISCV_ISA_EXT_SVINVAL); } #undef SET_ISA_EXT_MAP } From 5ff112484f2e63c5cac9f865181ca7ce467d0f89 Mon Sep 17 00:00:00 2001 From: Anup Patel Date: Sun, 2 Oct 2022 10:18:37 +0530 Subject: [PATCH 48/57] RISC-V: KVM: Use Svinval for local TLB maintenance when available We should prefer HINVAL.GVMA and HINVAL.VVMA instruction for local TLB maintenance when underlying host supports Svinval extension. Signed-off-by: Anup Patel Reviewed-by: Andrew Jones Signed-off-by: Anup Patel --- arch/riscv/include/asm/insn-def.h | 20 +++++++++++ arch/riscv/kvm/tlb.c | 60 ++++++++++++++++++++++++------- 2 files changed, 68 insertions(+), 12 deletions(-) diff --git a/arch/riscv/include/asm/insn-def.h b/arch/riscv/include/asm/insn-def.h index af7b0b55815c..16044affa57c 100644 --- a/arch/riscv/include/asm/insn-def.h +++ b/arch/riscv/include/asm/insn-def.h @@ -114,4 +114,24 @@ __ASM_STR(.error "hlv.d requires 64-bit support") #endif +#define SINVAL_VMA(vaddr, asid) \ + INSN_R(OPCODE_SYSTEM, FUNC3(0), FUNC7(11), \ + __RD(0), RS1(vaddr), RS2(asid)) + +#define SFENCE_W_INVAL() \ + INSN_R(OPCODE_SYSTEM, FUNC3(0), FUNC7(12), \ + __RD(0), __RS1(0), __RS2(0)) + +#define SFENCE_INVAL_IR() \ + INSN_R(OPCODE_SYSTEM, FUNC3(0), FUNC7(12), \ + __RD(0), __RS1(0), __RS2(1)) + +#define HINVAL_VVMA(vaddr, asid) \ + INSN_R(OPCODE_SYSTEM, FUNC3(0), FUNC7(19), \ + __RD(0), RS1(vaddr), RS2(asid)) + +#define HINVAL_GVMA(gaddr, vmid) \ + INSN_R(OPCODE_SYSTEM, FUNC3(0), FUNC7(51), \ + __RD(0), RS1(gaddr), RS2(vmid)) + #endif /* __ASM_INSN_DEF_H */ diff --git a/arch/riscv/kvm/tlb.c b/arch/riscv/kvm/tlb.c index 1ce3394b3acf..309d79b3e5cd 100644 --- a/arch/riscv/kvm/tlb.c +++ b/arch/riscv/kvm/tlb.c @@ -12,8 +12,12 @@ #include #include #include +#include #include +#define has_svinval() \ + static_branch_unlikely(&riscv_isa_ext_keys[RISCV_ISA_EXT_KEY_SVINVAL]) + void kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid, gpa_t gpa, gpa_t gpsz, unsigned long order) @@ -25,9 +29,17 @@ void kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid, return; } - for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order)) - asm volatile (HFENCE_GVMA(%0, %1) - : : "r" (pos >> 2), "r" (vmid) : "memory"); + if (has_svinval()) { + asm volatile (SFENCE_W_INVAL() ::: "memory"); + for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order)) + asm volatile (HINVAL_GVMA(%0, %1) + : : "r" (pos >> 2), "r" (vmid) : "memory"); + asm volatile (SFENCE_INVAL_IR() ::: "memory"); + } else { + for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order)) + asm volatile (HFENCE_GVMA(%0, %1) + : : "r" (pos >> 2), "r" (vmid) : "memory"); + } } void kvm_riscv_local_hfence_gvma_vmid_all(unsigned long vmid) @@ -45,9 +57,17 @@ void kvm_riscv_local_hfence_gvma_gpa(gpa_t gpa, gpa_t gpsz, return; } - for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order)) - asm volatile(HFENCE_GVMA(%0, zero) - : : "r" (pos >> 2) : "memory"); + if (has_svinval()) { + asm volatile (SFENCE_W_INVAL() ::: "memory"); + for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order)) + asm volatile(HINVAL_GVMA(%0, zero) + : : "r" (pos >> 2) : "memory"); + asm volatile (SFENCE_INVAL_IR() ::: "memory"); + } else { + for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order)) + asm volatile(HFENCE_GVMA(%0, zero) + : : "r" (pos >> 2) : "memory"); + } } void kvm_riscv_local_hfence_gvma_all(void) @@ -70,9 +90,17 @@ void kvm_riscv_local_hfence_vvma_asid_gva(unsigned long vmid, hgatp = csr_swap(CSR_HGATP, vmid << HGATP_VMID_SHIFT); - for (pos = gva; pos < (gva + gvsz); pos += BIT(order)) - asm volatile(HFENCE_VVMA(%0, %1) - : : "r" (pos), "r" (asid) : "memory"); + if (has_svinval()) { + asm volatile (SFENCE_W_INVAL() ::: "memory"); + for (pos = gva; pos < (gva + gvsz); pos += BIT(order)) + asm volatile(HINVAL_VVMA(%0, %1) + : : "r" (pos), "r" (asid) : "memory"); + asm volatile (SFENCE_INVAL_IR() ::: "memory"); + } else { + for (pos = gva; pos < (gva + gvsz); pos += BIT(order)) + asm volatile(HFENCE_VVMA(%0, %1) + : : "r" (pos), "r" (asid) : "memory"); + } csr_write(CSR_HGATP, hgatp); } @@ -102,9 +130,17 @@ void kvm_riscv_local_hfence_vvma_gva(unsigned long vmid, hgatp = csr_swap(CSR_HGATP, vmid << HGATP_VMID_SHIFT); - for (pos = gva; pos < (gva + gvsz); pos += BIT(order)) - asm volatile(HFENCE_VVMA(%0, zero) - : : "r" (pos) : "memory"); + if (has_svinval()) { + asm volatile (SFENCE_W_INVAL() ::: "memory"); + for (pos = gva; pos < (gva + gvsz); pos += BIT(order)) + asm volatile(HINVAL_VVMA(%0, zero) + : : "r" (pos) : "memory"); + asm volatile (SFENCE_INVAL_IR() ::: "memory"); + } else { + for (pos = gva; pos < (gva + gvsz); pos += BIT(order)) + asm volatile(HFENCE_VVMA(%0, zero) + : : "r" (pos) : "memory"); + } csr_write(CSR_HGATP, hgatp); } From bad6ea07c876a67c4d8f46b0c565ab500150720f Mon Sep 17 00:00:00 2001 From: Anup Patel Date: Sun, 2 Oct 2022 10:18:42 +0530 Subject: [PATCH 49/57] RISC-V: KVM: Allow Guest use Svinval extension We should advertise Svinval ISA extension to KVM user-space whenever host supports it. This will allow KVM user-space (i.e. QEMU or KVMTOOL) to pass on this information to Guest via ISA string. Signed-off-by: Anup Patel Reviewed-by: Andrew Jones Signed-off-by: Anup Patel --- arch/riscv/include/uapi/asm/kvm.h | 1 + arch/riscv/kvm/vcpu.c | 2 ++ 2 files changed, 3 insertions(+) diff --git a/arch/riscv/include/uapi/asm/kvm.h b/arch/riscv/include/uapi/asm/kvm.h index 7351417afd62..b6770ee08872 100644 --- a/arch/riscv/include/uapi/asm/kvm.h +++ b/arch/riscv/include/uapi/asm/kvm.h @@ -98,6 +98,7 @@ enum KVM_RISCV_ISA_EXT_ID { KVM_RISCV_ISA_EXT_M, KVM_RISCV_ISA_EXT_SVPBMT, KVM_RISCV_ISA_EXT_SSTC, + KVM_RISCV_ISA_EXT_SVINVAL, KVM_RISCV_ISA_EXT_MAX, }; diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c index d0f08d5b4282..901bb5c0cb50 100644 --- a/arch/riscv/kvm/vcpu.c +++ b/arch/riscv/kvm/vcpu.c @@ -53,6 +53,7 @@ static const unsigned long kvm_isa_ext_arr[] = { RISCV_ISA_EXT_m, RISCV_ISA_EXT_SVPBMT, RISCV_ISA_EXT_SSTC, + RISCV_ISA_EXT_SVINVAL, }; static unsigned long kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext) @@ -87,6 +88,7 @@ static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext) case KVM_RISCV_ISA_EXT_I: case KVM_RISCV_ISA_EXT_M: case KVM_RISCV_ISA_EXT_SSTC: + case KVM_RISCV_ISA_EXT_SVINVAL: return false; default: break; From 0bba48978f6b63aee0fa4ee3a8097ec94e75f7f2 Mon Sep 17 00:00:00 2001 From: Mayuresh Chitale Date: Sun, 2 Oct 2022 10:18:48 +0530 Subject: [PATCH 50/57] RISC-V: KVM: Allow Guest use Zihintpause extension We should advertise Zihintpause ISA extension to KVM user-space whenever host supports it. This will allow KVM user-space (i.e. QEMU or KVMTOOL) to pass on this information to Guest via ISA string. Signed-off-by: Mayuresh Chitale Reviewed-by: Andrew Jones Signed-off-by: Anup Patel --- arch/riscv/include/uapi/asm/kvm.h | 1 + arch/riscv/kvm/vcpu.c | 2 ++ 2 files changed, 3 insertions(+) diff --git a/arch/riscv/include/uapi/asm/kvm.h b/arch/riscv/include/uapi/asm/kvm.h index b6770ee08872..9085b90cf324 100644 --- a/arch/riscv/include/uapi/asm/kvm.h +++ b/arch/riscv/include/uapi/asm/kvm.h @@ -99,6 +99,7 @@ enum KVM_RISCV_ISA_EXT_ID { KVM_RISCV_ISA_EXT_SVPBMT, KVM_RISCV_ISA_EXT_SSTC, KVM_RISCV_ISA_EXT_SVINVAL, + KVM_RISCV_ISA_EXT_ZIHINTPAUSE, KVM_RISCV_ISA_EXT_MAX, }; diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c index 901bb5c0cb50..0de0dd22e734 100644 --- a/arch/riscv/kvm/vcpu.c +++ b/arch/riscv/kvm/vcpu.c @@ -54,6 +54,7 @@ static const unsigned long kvm_isa_ext_arr[] = { RISCV_ISA_EXT_SVPBMT, RISCV_ISA_EXT_SSTC, RISCV_ISA_EXT_SVINVAL, + RISCV_ISA_EXT_ZIHINTPAUSE, }; static unsigned long kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext) @@ -89,6 +90,7 @@ static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext) case KVM_RISCV_ISA_EXT_M: case KVM_RISCV_ISA_EXT_SSTC: case KVM_RISCV_ISA_EXT_SVINVAL: + case KVM_RISCV_ISA_EXT_ZIHINTPAUSE: return false; default: break; From 1b5cbb8733f924c99bc48a8e4c2a95449f0f514d Mon Sep 17 00:00:00 2001 From: Andrew Jones Date: Sun, 2 Oct 2022 10:18:54 +0530 Subject: [PATCH 51/57] RISC-V: KVM: Make ISA ext mappings explicit While adding new extensions at the bottom of the array isn't hard to do, it's a pain to review in order to ensure we're not missing any. Also, resolving merge conflicts for multiple new ISA extensions can be error-prone. To make adding new mappings foolproof, explicitly assign the array elements. And, now that the order doesn't matter, we can alphabetize the extensions, so we do that too. Signed-off-by: Andrew Jones Signed-off-by: Anup Patel --- arch/riscv/kvm/vcpu.c | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c index 0de0dd22e734..61fe1604e8ea 100644 --- a/arch/riscv/kvm/vcpu.c +++ b/arch/riscv/kvm/vcpu.c @@ -42,19 +42,22 @@ const struct kvm_stats_header kvm_vcpu_stats_header = { #define KVM_RISCV_BASE_ISA_MASK GENMASK(25, 0) +#define KVM_ISA_EXT_ARR(ext) [KVM_RISCV_ISA_EXT_##ext] = RISCV_ISA_EXT_##ext + /* Mapping between KVM ISA Extension ID & Host ISA extension ID */ static const unsigned long kvm_isa_ext_arr[] = { - RISCV_ISA_EXT_a, - RISCV_ISA_EXT_c, - RISCV_ISA_EXT_d, - RISCV_ISA_EXT_f, - RISCV_ISA_EXT_h, - RISCV_ISA_EXT_i, - RISCV_ISA_EXT_m, - RISCV_ISA_EXT_SVPBMT, - RISCV_ISA_EXT_SSTC, - RISCV_ISA_EXT_SVINVAL, - RISCV_ISA_EXT_ZIHINTPAUSE, + [KVM_RISCV_ISA_EXT_A] = RISCV_ISA_EXT_a, + [KVM_RISCV_ISA_EXT_C] = RISCV_ISA_EXT_c, + [KVM_RISCV_ISA_EXT_D] = RISCV_ISA_EXT_d, + [KVM_RISCV_ISA_EXT_F] = RISCV_ISA_EXT_f, + [KVM_RISCV_ISA_EXT_H] = RISCV_ISA_EXT_h, + [KVM_RISCV_ISA_EXT_I] = RISCV_ISA_EXT_i, + [KVM_RISCV_ISA_EXT_M] = RISCV_ISA_EXT_m, + + KVM_ISA_EXT_ARR(SSTC), + KVM_ISA_EXT_ARR(SVINVAL), + KVM_ISA_EXT_ARR(SVPBMT), + KVM_ISA_EXT_ARR(ZIHINTPAUSE), }; static unsigned long kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext) From afd5dde9a186b8fc5742fff707f184760c4af1a9 Mon Sep 17 00:00:00 2001 From: Andrew Jones Date: Sun, 2 Oct 2022 10:18:59 +0530 Subject: [PATCH 52/57] RISC-V: KVM: Provide UAPI for Zicbom block size We're about to allow guests to use the Zicbom extension. KVM userspace needs to know the cache block size in order to properly advertise it to the guest. Provide a virtual config register for userspace to get it with the GET_ONE_REG API, but setting it cannot be supported, so disallow SET_ONE_REG. Signed-off-by: Andrew Jones Reviewed-by: Conor Dooley Reviewed-by: Atish Patra Signed-off-by: Anup Patel --- arch/riscv/include/uapi/asm/kvm.h | 1 + arch/riscv/kvm/vcpu.c | 8 ++++++++ arch/riscv/mm/dma-noncoherent.c | 2 ++ 3 files changed, 11 insertions(+) diff --git a/arch/riscv/include/uapi/asm/kvm.h b/arch/riscv/include/uapi/asm/kvm.h index 9085b90cf324..3d7771300567 100644 --- a/arch/riscv/include/uapi/asm/kvm.h +++ b/arch/riscv/include/uapi/asm/kvm.h @@ -48,6 +48,7 @@ struct kvm_sregs { /* CONFIG registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */ struct kvm_riscv_config { unsigned long isa; + unsigned long zicbom_block_size; }; /* CORE registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */ diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c index 61fe1604e8ea..b0a0ce6d16ef 100644 --- a/arch/riscv/kvm/vcpu.c +++ b/arch/riscv/kvm/vcpu.c @@ -18,6 +18,7 @@ #include #include #include +#include #include const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { @@ -261,6 +262,11 @@ static int kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu *vcpu, case KVM_REG_RISCV_CONFIG_REG(isa): reg_val = vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK; break; + case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size): + if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM)) + return -EINVAL; + reg_val = riscv_cbom_block_size; + break; default: return -EINVAL; } @@ -318,6 +324,8 @@ static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu, return -EOPNOTSUPP; } break; + case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size): + return -EOPNOTSUPP; default: return -EINVAL; } diff --git a/arch/riscv/mm/dma-noncoherent.c b/arch/riscv/mm/dma-noncoherent.c index e3f9bdf47c5f..b0add983530a 100644 --- a/arch/riscv/mm/dma-noncoherent.c +++ b/arch/riscv/mm/dma-noncoherent.c @@ -13,6 +13,8 @@ #include unsigned int riscv_cbom_block_size; +EXPORT_SYMBOL_GPL(riscv_cbom_block_size); + static bool noncoherent_supported; void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, From 56852c6211971798dfbe4098c8a8528b59234de2 Mon Sep 17 00:00:00 2001 From: Andrew Jones Date: Sun, 2 Oct 2022 10:19:05 +0530 Subject: [PATCH 53/57] RISC-V: KVM: Expose Zicbom to the guest Guests may use the cbo.inval,clean,flush instructions when the CPU has the Zicbom extension and the hypervisor sets henvcfg.CBIE (for cbo.inval) and henvcfg.CBCFE (for cbo.clean,flush). Add Zicbom support for KVM guests which may be enabled and disabled from KVM userspace using the ISA extension ONE_REG API. Also opportunistically switch the other isa extension checks in kvm_riscv_vcpu_update_config() to riscv_isa_extension_available(). Signed-off-by: Andrew Jones Reviewed-by: Conor Dooley Reviewed-by: Atish Patra Signed-off-by: Anup Patel --- arch/riscv/include/uapi/asm/kvm.h | 1 + arch/riscv/kvm/vcpu.c | 9 +++++++-- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/arch/riscv/include/uapi/asm/kvm.h b/arch/riscv/include/uapi/asm/kvm.h index 3d7771300567..8985ff234c01 100644 --- a/arch/riscv/include/uapi/asm/kvm.h +++ b/arch/riscv/include/uapi/asm/kvm.h @@ -101,6 +101,7 @@ enum KVM_RISCV_ISA_EXT_ID { KVM_RISCV_ISA_EXT_SSTC, KVM_RISCV_ISA_EXT_SVINVAL, KVM_RISCV_ISA_EXT_ZIHINTPAUSE, + KVM_RISCV_ISA_EXT_ZICBOM, KVM_RISCV_ISA_EXT_MAX, }; diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c index b0a0ce6d16ef..f55d15a8a410 100644 --- a/arch/riscv/kvm/vcpu.c +++ b/arch/riscv/kvm/vcpu.c @@ -59,6 +59,7 @@ static const unsigned long kvm_isa_ext_arr[] = { KVM_ISA_EXT_ARR(SVINVAL), KVM_ISA_EXT_ARR(SVPBMT), KVM_ISA_EXT_ARR(ZIHINTPAUSE), + KVM_ISA_EXT_ARR(ZICBOM), }; static unsigned long kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext) @@ -799,11 +800,15 @@ static void kvm_riscv_vcpu_update_config(const unsigned long *isa) { u64 henvcfg = 0; - if (__riscv_isa_extension_available(isa, RISCV_ISA_EXT_SVPBMT)) + if (riscv_isa_extension_available(isa, SVPBMT)) henvcfg |= ENVCFG_PBMTE; - if (__riscv_isa_extension_available(isa, RISCV_ISA_EXT_SSTC)) + if (riscv_isa_extension_available(isa, SSTC)) henvcfg |= ENVCFG_STCE; + + if (riscv_isa_extension_available(isa, ZICBOM)) + henvcfg |= (ENVCFG_CBIE | ENVCFG_CBCFE); + csr_write(CSR_HENVCFG, henvcfg); #ifdef CONFIG_32BIT csr_write(CSR_HENVCFGH, henvcfg >> 32); From f493cdc92d9b9e9a0db0a9049609457e43a56066 Mon Sep 17 00:00:00 2001 From: Xiu Jianfeng Date: Sun, 2 Oct 2022 10:19:11 +0530 Subject: [PATCH 54/57] RISC-V: KVM: add __init annotation to riscv_kvm_init() The riscv_kvm_init() is a module_init entry so let us add __init annotation to it. Signed-off-by: Xiu Jianfeng Signed-off-by: Anup Patel --- arch/riscv/kvm/main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/riscv/kvm/main.c b/arch/riscv/kvm/main.c index 1549205fe5fe..df2d8716851f 100644 --- a/arch/riscv/kvm/main.c +++ b/arch/riscv/kvm/main.c @@ -122,7 +122,7 @@ void kvm_arch_exit(void) { } -static int riscv_kvm_init(void) +static int __init riscv_kvm_init(void) { return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); } From 54ce3f7ff3395f12ad142d46b628606ab1e926ef Mon Sep 17 00:00:00 2001 From: Jisheng Zhang Date: Sun, 2 Oct 2022 10:19:16 +0530 Subject: [PATCH 55/57] RISC-V: KVM: Record number of signal exits as a vCPU stat Record a statistic indicating the number of times a vCPU has exited due to a pending signal. Signed-off-by: Jisheng Zhang Reviewed-by: Guo Ren Reviewed-by: Andrew Jones Signed-off-by: Anup Patel exit_reason = KVM_EXIT_INTR; + ++vcpu->stat.signal_exits; } /* From 9c00fbdd93a22a6657378292f2eb29e9754cde7f Mon Sep 17 00:00:00 2001 From: Jisheng Zhang Date: Sun, 2 Oct 2022 10:19:25 +0530 Subject: [PATCH 56/57] RISC-V: KVM: Use generic guest entry infrastructure Use generic guest entry infrastructure to properly handle TIF_NOTIFY_RESUME. Signed-off-by: Jisheng Zhang Reviewed-by: Andrew Jones Signed-off-by: Anup Patel --- arch/riscv/kvm/Kconfig | 1 + arch/riscv/kvm/vcpu.c | 18 ++++++------------ 2 files changed, 7 insertions(+), 12 deletions(-) diff --git a/arch/riscv/kvm/Kconfig b/arch/riscv/kvm/Kconfig index f5a342fa1b1d..f36a737d5f96 100644 --- a/arch/riscv/kvm/Kconfig +++ b/arch/riscv/kvm/Kconfig @@ -24,6 +24,7 @@ config KVM select PREEMPT_NOTIFIERS select KVM_MMIO select KVM_GENERIC_DIRTYLOG_READ_PROTECT + select KVM_XFER_TO_GUEST_WORK select HAVE_KVM_VCPU_ASYNC_IOCTL select HAVE_KVM_EVENTFD select SRCU diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c index 5414bf56bce5..a032c4f0d600 100644 --- a/arch/riscv/kvm/vcpu.c +++ b/arch/riscv/kvm/vcpu.c @@ -7,6 +7,7 @@ */ #include +#include #include #include #include @@ -979,7 +980,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) run->exit_reason = KVM_EXIT_UNKNOWN; while (ret > 0) { /* Check conditions before entering the guest */ - cond_resched(); + ret = xfer_to_guest_mode_handle_work(vcpu); + if (!ret) + ret = 1; kvm_riscv_gstage_vmid_update(vcpu); @@ -987,16 +990,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) local_irq_disable(); - /* - * Exit if we have a signal pending so that we can deliver - * the signal to user space. - */ - if (signal_pending(current)) { - ret = -EINTR; - run->exit_reason = KVM_EXIT_INTR; - ++vcpu->stat.signal_exits; - } - /* * Ensure we set mode to IN_GUEST_MODE after we disable * interrupts and before the final VCPU requests check. @@ -1019,7 +1012,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) if (ret <= 0 || kvm_riscv_gstage_vmid_ver_changed(&vcpu->kvm->arch.vmid) || - kvm_request_pending(vcpu)) { + kvm_request_pending(vcpu) || + xfer_to_guest_mode_work_pending()) { vcpu->mode = OUTSIDE_GUEST_MODE; local_irq_enable(); kvm_vcpu_srcu_read_lock(vcpu); From b60ca69715fcc39a5f4bdd56ca2ea691b7358455 Mon Sep 17 00:00:00 2001 From: Jisheng Zhang Date: Sun, 2 Oct 2022 10:19:31 +0530 Subject: [PATCH 57/57] riscv: select HAVE_POSIX_CPU_TIMERS_TASK_WORK Move POSIX CPU timer expiry and signal delivery into task context to allow PREEMPT_RT setups to coexist with KVM. Signed-off-by: Jisheng Zhang Reviewed-by: Andrew Jones Signed-off-by: Anup Patel --- arch/riscv/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index d6b0ffd9bf00..74082e2d7ce8 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -103,6 +103,7 @@ config RISCV select HAVE_PERF_EVENTS select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP + select HAVE_POSIX_CPU_TIMERS_TASK_WORK select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_FUNCTION_ARG_ACCESS_API select HAVE_STACKPROTECTOR