KVM: PPC: Book3S HV P9: Demand fault TM facility registers
Use HFSCR facility disabling to implement demand faulting for TM, with a hysteresis counter similar to the load_fp etc counters in context switching that implement the equivalent demand faulting for userspace facilities. This speeds up guest entry/exit by avoiding the register save/restore when a guest is not frequently using them. When a guest does use them often, there will be some additional demand fault overhead, but these are not commonly used facilities. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Reviewed-by: Fabiano Rosas <farosas@linux.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20211123095231.1036501-38-npiggin@gmail.com
This commit is contained in:
Родитель
a3e18ca8ab
Коммит
022ecb960c
|
@ -580,6 +580,9 @@ struct kvm_vcpu_arch {
|
|||
ulong ppr;
|
||||
u32 pspb;
|
||||
u8 load_ebb;
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
u8 load_tm;
|
||||
#endif
|
||||
ulong fscr;
|
||||
ulong shadow_fscr;
|
||||
ulong ebbhr;
|
||||
|
|
|
@ -1446,6 +1446,16 @@ static int kvmppc_ebb_unavailable(struct kvm_vcpu *vcpu)
|
|||
return RESUME_GUEST;
|
||||
}
|
||||
|
||||
static int kvmppc_tm_unavailable(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (!(vcpu->arch.hfscr_permitted & HFSCR_TM))
|
||||
return EMULATE_FAIL;
|
||||
|
||||
vcpu->arch.hfscr |= HFSCR_TM;
|
||||
|
||||
return RESUME_GUEST;
|
||||
}
|
||||
|
||||
static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
|
||||
struct task_struct *tsk)
|
||||
{
|
||||
|
@ -1739,6 +1749,8 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
|
|||
r = kvmppc_pmu_unavailable(vcpu);
|
||||
if (cause == FSCR_EBB_LG)
|
||||
r = kvmppc_ebb_unavailable(vcpu);
|
||||
if (cause == FSCR_TM_LG)
|
||||
r = kvmppc_tm_unavailable(vcpu);
|
||||
}
|
||||
if (r == EMULATE_FAIL) {
|
||||
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
|
||||
|
@ -2783,9 +2795,9 @@ static int kvmppc_core_vcpu_create_hv(struct kvm_vcpu *vcpu)
|
|||
vcpu->arch.hfscr_permitted = vcpu->arch.hfscr;
|
||||
|
||||
/*
|
||||
* PM, EBB is demand-faulted so start with it clear.
|
||||
* PM, EBB, TM are demand-faulted so start with it clear.
|
||||
*/
|
||||
vcpu->arch.hfscr &= ~(HFSCR_PM | HFSCR_EBB);
|
||||
vcpu->arch.hfscr &= ~(HFSCR_PM | HFSCR_EBB | HFSCR_TM);
|
||||
|
||||
kvmppc_mmu_book3s_hv_init(vcpu);
|
||||
|
||||
|
@ -3868,8 +3880,9 @@ static int kvmhv_vcpu_entry_p9_nested(struct kvm_vcpu *vcpu, u64 time_limit, uns
|
|||
msr |= MSR_VEC;
|
||||
if (cpu_has_feature(CPU_FTR_VSX))
|
||||
msr |= MSR_VSX;
|
||||
if (cpu_has_feature(CPU_FTR_TM) ||
|
||||
cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
|
||||
if ((cpu_has_feature(CPU_FTR_TM) ||
|
||||
cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) &&
|
||||
(vcpu->arch.hfscr & HFSCR_TM))
|
||||
msr |= MSR_TM;
|
||||
msr = msr_check_and_set(msr);
|
||||
|
||||
|
@ -4608,8 +4621,9 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)
|
|||
msr |= MSR_VEC;
|
||||
if (cpu_has_feature(CPU_FTR_VSX))
|
||||
msr |= MSR_VSX;
|
||||
if (cpu_has_feature(CPU_FTR_TM) ||
|
||||
cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
|
||||
if ((cpu_has_feature(CPU_FTR_TM) ||
|
||||
cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) &&
|
||||
(vcpu->arch.hfscr & HFSCR_TM))
|
||||
msr |= MSR_TM;
|
||||
msr = msr_check_and_set(msr);
|
||||
|
||||
|
|
|
@ -310,7 +310,7 @@ bool load_vcpu_state(struct kvm_vcpu *vcpu,
|
|||
if (MSR_TM_ACTIVE(guest_msr)) {
|
||||
kvmppc_restore_tm_hv(vcpu, guest_msr, true);
|
||||
ret = true;
|
||||
} else {
|
||||
} else if (vcpu->arch.hfscr & HFSCR_TM) {
|
||||
mtspr(SPRN_TEXASR, vcpu->arch.texasr);
|
||||
mtspr(SPRN_TFHAR, vcpu->arch.tfhar);
|
||||
mtspr(SPRN_TFIAR, vcpu->arch.tfiar);
|
||||
|
@ -346,10 +346,16 @@ void store_vcpu_state(struct kvm_vcpu *vcpu)
|
|||
unsigned long guest_msr = vcpu->arch.shregs.msr;
|
||||
if (MSR_TM_ACTIVE(guest_msr)) {
|
||||
kvmppc_save_tm_hv(vcpu, guest_msr, true);
|
||||
} else {
|
||||
} else if (vcpu->arch.hfscr & HFSCR_TM) {
|
||||
vcpu->arch.texasr = mfspr(SPRN_TEXASR);
|
||||
vcpu->arch.tfhar = mfspr(SPRN_TFHAR);
|
||||
vcpu->arch.tfiar = mfspr(SPRN_TFIAR);
|
||||
|
||||
if (!vcpu->arch.nested) {
|
||||
vcpu->arch.load_tm++; /* see load_ebb comment */
|
||||
if (!vcpu->arch.load_tm)
|
||||
vcpu->arch.hfscr &= ~HFSCR_TM;
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
@ -641,8 +647,9 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc
|
|||
msr |= MSR_VEC;
|
||||
if (cpu_has_feature(CPU_FTR_VSX))
|
||||
msr |= MSR_VSX;
|
||||
if (cpu_has_feature(CPU_FTR_TM) ||
|
||||
cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
|
||||
if ((cpu_has_feature(CPU_FTR_TM) ||
|
||||
cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) &&
|
||||
(vcpu->arch.hfscr & HFSCR_TM))
|
||||
msr |= MSR_TM;
|
||||
msr = msr_check_and_set(msr);
|
||||
/* Save MSR for restore. This is after hard disable, so EE is clear. */
|
||||
|
|
Загрузка…
Ссылка в новой задаче