KVM: PPC: Book3S HV P9: Move nested guest entry into its own function
Move the part of the guest entry which is specific to nested HV into its own function. This is just refactoring. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20211123095231.1036501-31-npiggin@gmail.com
This commit is contained in:
Родитель
aabcaf6ae2
Коммит
08b3f08af5
|
@ -3822,6 +3822,72 @@ static void vcpu_vpa_increment_dispatch(struct kvm_vcpu *vcpu)
|
|||
}
|
||||
}
|
||||
|
||||
/* call our hypervisor to load up HV regs and go */
|
||||
static int kvmhv_vcpu_entry_p9_nested(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr, u64 *tb)
|
||||
{
|
||||
struct kvmppc_vcore *vc = vcpu->arch.vcore;
|
||||
unsigned long host_psscr;
|
||||
struct hv_guest_state hvregs;
|
||||
int trap;
|
||||
s64 dec;
|
||||
|
||||
/*
|
||||
* We need to save and restore the guest visible part of the
|
||||
* psscr (i.e. using SPRN_PSSCR_PR) since the hypervisor
|
||||
* doesn't do this for us. Note only required if pseries since
|
||||
* this is done in kvmhv_vcpu_entry_p9() below otherwise.
|
||||
*/
|
||||
host_psscr = mfspr(SPRN_PSSCR_PR);
|
||||
mtspr(SPRN_PSSCR_PR, vcpu->arch.psscr);
|
||||
kvmhv_save_hv_regs(vcpu, &hvregs);
|
||||
hvregs.lpcr = lpcr;
|
||||
vcpu->arch.regs.msr = vcpu->arch.shregs.msr;
|
||||
hvregs.version = HV_GUEST_STATE_VERSION;
|
||||
if (vcpu->arch.nested) {
|
||||
hvregs.lpid = vcpu->arch.nested->shadow_lpid;
|
||||
hvregs.vcpu_token = vcpu->arch.nested_vcpu_id;
|
||||
} else {
|
||||
hvregs.lpid = vcpu->kvm->arch.lpid;
|
||||
hvregs.vcpu_token = vcpu->vcpu_id;
|
||||
}
|
||||
hvregs.hdec_expiry = time_limit;
|
||||
|
||||
/*
|
||||
* When setting DEC, we must always deal with irq_work_raise
|
||||
* via NMI vs setting DEC. The problem occurs right as we
|
||||
* switch into guest mode if a NMI hits and sets pending work
|
||||
* and sets DEC, then that will apply to the guest and not
|
||||
* bring us back to the host.
|
||||
*
|
||||
* irq_work_raise could check a flag (or possibly LPCR[HDICE]
|
||||
* for example) and set HDEC to 1? That wouldn't solve the
|
||||
* nested hv case which needs to abort the hcall or zero the
|
||||
* time limit.
|
||||
*
|
||||
* XXX: Another day's problem.
|
||||
*/
|
||||
mtspr(SPRN_DEC, kvmppc_dec_expires_host_tb(vcpu) - *tb);
|
||||
|
||||
mtspr(SPRN_DAR, vcpu->arch.shregs.dar);
|
||||
mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr);
|
||||
trap = plpar_hcall_norets(H_ENTER_NESTED, __pa(&hvregs),
|
||||
__pa(&vcpu->arch.regs));
|
||||
kvmhv_restore_hv_return_state(vcpu, &hvregs);
|
||||
vcpu->arch.shregs.msr = vcpu->arch.regs.msr;
|
||||
vcpu->arch.shregs.dar = mfspr(SPRN_DAR);
|
||||
vcpu->arch.shregs.dsisr = mfspr(SPRN_DSISR);
|
||||
vcpu->arch.psscr = mfspr(SPRN_PSSCR_PR);
|
||||
mtspr(SPRN_PSSCR_PR, host_psscr);
|
||||
|
||||
dec = mfspr(SPRN_DEC);
|
||||
if (!(lpcr & LPCR_LD)) /* Sign extend if not using large decrementer */
|
||||
dec = (s32) dec;
|
||||
*tb = mftb();
|
||||
vcpu->arch.dec_expires = dec + (*tb + vc->tb_offset);
|
||||
|
||||
return trap;
|
||||
}
|
||||
|
||||
/*
|
||||
* Guest entry for POWER9 and later CPUs.
|
||||
*/
|
||||
|
@ -3830,7 +3896,6 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
|
|||
{
|
||||
struct kvmppc_vcore *vc = vcpu->arch.vcore;
|
||||
struct p9_host_os_sprs host_os_sprs;
|
||||
s64 dec;
|
||||
u64 next_timer;
|
||||
unsigned long msr;
|
||||
int trap;
|
||||
|
@ -3883,63 +3948,7 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
|
|||
switch_pmu_to_guest(vcpu, &host_os_sprs);
|
||||
|
||||
if (kvmhv_on_pseries()) {
|
||||
/*
|
||||
* We need to save and restore the guest visible part of the
|
||||
* psscr (i.e. using SPRN_PSSCR_PR) since the hypervisor
|
||||
* doesn't do this for us. Note only required if pseries since
|
||||
* this is done in kvmhv_vcpu_entry_p9() below otherwise.
|
||||
*/
|
||||
unsigned long host_psscr;
|
||||
/* call our hypervisor to load up HV regs and go */
|
||||
struct hv_guest_state hvregs;
|
||||
|
||||
host_psscr = mfspr(SPRN_PSSCR_PR);
|
||||
mtspr(SPRN_PSSCR_PR, vcpu->arch.psscr);
|
||||
kvmhv_save_hv_regs(vcpu, &hvregs);
|
||||
hvregs.lpcr = lpcr;
|
||||
vcpu->arch.regs.msr = vcpu->arch.shregs.msr;
|
||||
hvregs.version = HV_GUEST_STATE_VERSION;
|
||||
if (vcpu->arch.nested) {
|
||||
hvregs.lpid = vcpu->arch.nested->shadow_lpid;
|
||||
hvregs.vcpu_token = vcpu->arch.nested_vcpu_id;
|
||||
} else {
|
||||
hvregs.lpid = vcpu->kvm->arch.lpid;
|
||||
hvregs.vcpu_token = vcpu->vcpu_id;
|
||||
}
|
||||
hvregs.hdec_expiry = time_limit;
|
||||
|
||||
/*
|
||||
* When setting DEC, we must always deal with irq_work_raise
|
||||
* via NMI vs setting DEC. The problem occurs right as we
|
||||
* switch into guest mode if a NMI hits and sets pending work
|
||||
* and sets DEC, then that will apply to the guest and not
|
||||
* bring us back to the host.
|
||||
*
|
||||
* irq_work_raise could check a flag (or possibly LPCR[HDICE]
|
||||
* for example) and set HDEC to 1? That wouldn't solve the
|
||||
* nested hv case which needs to abort the hcall or zero the
|
||||
* time limit.
|
||||
*
|
||||
* XXX: Another day's problem.
|
||||
*/
|
||||
mtspr(SPRN_DEC, kvmppc_dec_expires_host_tb(vcpu) - *tb);
|
||||
|
||||
mtspr(SPRN_DAR, vcpu->arch.shregs.dar);
|
||||
mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr);
|
||||
trap = plpar_hcall_norets(H_ENTER_NESTED, __pa(&hvregs),
|
||||
__pa(&vcpu->arch.regs));
|
||||
kvmhv_restore_hv_return_state(vcpu, &hvregs);
|
||||
vcpu->arch.shregs.msr = vcpu->arch.regs.msr;
|
||||
vcpu->arch.shregs.dar = mfspr(SPRN_DAR);
|
||||
vcpu->arch.shregs.dsisr = mfspr(SPRN_DSISR);
|
||||
vcpu->arch.psscr = mfspr(SPRN_PSSCR_PR);
|
||||
mtspr(SPRN_PSSCR_PR, host_psscr);
|
||||
|
||||
dec = mfspr(SPRN_DEC);
|
||||
if (!(lpcr & LPCR_LD)) /* Sign extend if not using large decrementer */
|
||||
dec = (s32) dec;
|
||||
*tb = mftb();
|
||||
vcpu->arch.dec_expires = dec + (*tb + vc->tb_offset);
|
||||
trap = kvmhv_vcpu_entry_p9_nested(vcpu, time_limit, lpcr, tb);
|
||||
|
||||
/* H_CEDE has to be handled now, not later */
|
||||
if (trap == BOOK3S_INTERRUPT_SYSCALL && !vcpu->arch.nested &&
|
||||
|
|
Загрузка…
Ссылка в новой задаче