From 022ecb960c89faad42ff0b417a71d9255dd115a3 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Tue, 23 Nov 2021 19:52:15 +1000 Subject: [PATCH] KVM: PPC: Book3S HV P9: Demand fault TM facility registers Use HFSCR facility disabling to implement demand faulting for TM, with a hysteresis counter similar to the load_fp etc counters in context switching that implement the equivalent demand faulting for userspace facilities. This speeds up guest entry/exit by avoiding the register save/restore when a guest is not frequently using them. When a guest does use them often, there will be some additional demand fault overhead, but these are not commonly used facilities. Signed-off-by: Nicholas Piggin Reviewed-by: Fabiano Rosas Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20211123095231.1036501-38-npiggin@gmail.com --- arch/powerpc/include/asm/kvm_host.h | 3 +++ arch/powerpc/kvm/book3s_hv.c | 26 ++++++++++++++++++++------ arch/powerpc/kvm/book3s_hv_p9_entry.c | 15 +++++++++++---- 3 files changed, 34 insertions(+), 10 deletions(-) diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 7a55b19eb6c0..d7004412b859 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -580,6 +580,9 @@ struct kvm_vcpu_arch { ulong ppr; u32 pspb; u8 load_ebb; +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + u8 load_tm; +#endif ulong fscr; ulong shadow_fscr; ulong ebbhr; diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index da29cf9236c8..198f6d997330 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -1446,6 +1446,16 @@ static int kvmppc_ebb_unavailable(struct kvm_vcpu *vcpu) return RESUME_GUEST; } +static int kvmppc_tm_unavailable(struct kvm_vcpu *vcpu) +{ + if (!(vcpu->arch.hfscr_permitted & HFSCR_TM)) + return EMULATE_FAIL; + + vcpu->arch.hfscr |= HFSCR_TM; + + return RESUME_GUEST; +} + static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu, struct task_struct *tsk) { @@ -1739,6 +1749,8 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu, r = kvmppc_pmu_unavailable(vcpu); if (cause == FSCR_EBB_LG) r = kvmppc_ebb_unavailable(vcpu); + if (cause == FSCR_TM_LG) + r = kvmppc_tm_unavailable(vcpu); } if (r == EMULATE_FAIL) { kvmppc_core_queue_program(vcpu, SRR1_PROGILL); @@ -2783,9 +2795,9 @@ static int kvmppc_core_vcpu_create_hv(struct kvm_vcpu *vcpu) vcpu->arch.hfscr_permitted = vcpu->arch.hfscr; /* - * PM, EBB is demand-faulted so start with it clear. + * PM, EBB, TM are demand-faulted so start with it clear. */ - vcpu->arch.hfscr &= ~(HFSCR_PM | HFSCR_EBB); + vcpu->arch.hfscr &= ~(HFSCR_PM | HFSCR_EBB | HFSCR_TM); kvmppc_mmu_book3s_hv_init(vcpu); @@ -3868,8 +3880,9 @@ static int kvmhv_vcpu_entry_p9_nested(struct kvm_vcpu *vcpu, u64 time_limit, uns msr |= MSR_VEC; if (cpu_has_feature(CPU_FTR_VSX)) msr |= MSR_VSX; - if (cpu_has_feature(CPU_FTR_TM) || - cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) + if ((cpu_has_feature(CPU_FTR_TM) || + cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) && + (vcpu->arch.hfscr & HFSCR_TM)) msr |= MSR_TM; msr = msr_check_and_set(msr); @@ -4608,8 +4621,9 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu) msr |= MSR_VEC; if (cpu_has_feature(CPU_FTR_VSX)) msr |= MSR_VSX; - if (cpu_has_feature(CPU_FTR_TM) || - cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) + if ((cpu_has_feature(CPU_FTR_TM) || + cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) && + (vcpu->arch.hfscr & HFSCR_TM)) msr |= MSR_TM; msr = msr_check_and_set(msr); diff --git a/arch/powerpc/kvm/book3s_hv_p9_entry.c b/arch/powerpc/kvm/book3s_hv_p9_entry.c index 929a7c336b09..8499e8a9ca8f 100644 --- a/arch/powerpc/kvm/book3s_hv_p9_entry.c +++ b/arch/powerpc/kvm/book3s_hv_p9_entry.c @@ -310,7 +310,7 @@ bool load_vcpu_state(struct kvm_vcpu *vcpu, if (MSR_TM_ACTIVE(guest_msr)) { kvmppc_restore_tm_hv(vcpu, guest_msr, true); ret = true; - } else { + } else if (vcpu->arch.hfscr & HFSCR_TM) { mtspr(SPRN_TEXASR, vcpu->arch.texasr); mtspr(SPRN_TFHAR, vcpu->arch.tfhar); mtspr(SPRN_TFIAR, vcpu->arch.tfiar); @@ -346,10 +346,16 @@ void store_vcpu_state(struct kvm_vcpu *vcpu) unsigned long guest_msr = vcpu->arch.shregs.msr; if (MSR_TM_ACTIVE(guest_msr)) { kvmppc_save_tm_hv(vcpu, guest_msr, true); - } else { + } else if (vcpu->arch.hfscr & HFSCR_TM) { vcpu->arch.texasr = mfspr(SPRN_TEXASR); vcpu->arch.tfhar = mfspr(SPRN_TFHAR); vcpu->arch.tfiar = mfspr(SPRN_TFIAR); + + if (!vcpu->arch.nested) { + vcpu->arch.load_tm++; /* see load_ebb comment */ + if (!vcpu->arch.load_tm) + vcpu->arch.hfscr &= ~HFSCR_TM; + } } } #endif @@ -641,8 +647,9 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc msr |= MSR_VEC; if (cpu_has_feature(CPU_FTR_VSX)) msr |= MSR_VSX; - if (cpu_has_feature(CPU_FTR_TM) || - cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) + if ((cpu_has_feature(CPU_FTR_TM) || + cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) && + (vcpu->arch.hfscr & HFSCR_TM)) msr |= MSR_TM; msr = msr_check_and_set(msr); /* Save MSR for restore. This is after hard disable, so EE is clear. */