KVM: PPC: Book3S HV P9: Optimise loads around context switch
It is better to get all loads for the register values in flight before starting to switch LPID, PID, and LPCR because those mtSPRs are expensive and serialising. This also just tidies up the code for a potential future change to the context switching sequence. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Reviewed-by: Fabiano Rosas <farosas@linux.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20220123114725.3549202-1-npiggin@gmail.com
This commit is contained in:
Родитель
861604614a
Коммит
361234d7a1
|
@ -539,8 +539,10 @@ static void switch_mmu_to_guest_radix(struct kvm *kvm, struct kvm_vcpu *vcpu, u6
|
|||
{
|
||||
struct kvm_nested_guest *nested = vcpu->arch.nested;
|
||||
u32 lpid;
|
||||
u32 pid;
|
||||
|
||||
lpid = nested ? nested->shadow_lpid : kvm->arch.lpid;
|
||||
pid = vcpu->arch.pid;
|
||||
|
||||
/*
|
||||
* Prior memory accesses to host PID Q3 must be completed before we
|
||||
|
@ -551,7 +553,7 @@ static void switch_mmu_to_guest_radix(struct kvm *kvm, struct kvm_vcpu *vcpu, u6
|
|||
isync();
|
||||
mtspr(SPRN_LPID, lpid);
|
||||
mtspr(SPRN_LPCR, lpcr);
|
||||
mtspr(SPRN_PID, vcpu->arch.pid);
|
||||
mtspr(SPRN_PID, pid);
|
||||
/*
|
||||
* isync not required here because we are HRFID'ing to guest before
|
||||
* any guest context access, which is context synchronising.
|
||||
|
@ -561,9 +563,11 @@ static void switch_mmu_to_guest_radix(struct kvm *kvm, struct kvm_vcpu *vcpu, u6
|
|||
static void switch_mmu_to_guest_hpt(struct kvm *kvm, struct kvm_vcpu *vcpu, u64 lpcr)
|
||||
{
|
||||
u32 lpid;
|
||||
u32 pid;
|
||||
int i;
|
||||
|
||||
lpid = kvm->arch.lpid;
|
||||
pid = vcpu->arch.pid;
|
||||
|
||||
/*
|
||||
* See switch_mmu_to_guest_radix. ptesync should not be required here
|
||||
|
@ -574,7 +578,7 @@ static void switch_mmu_to_guest_hpt(struct kvm *kvm, struct kvm_vcpu *vcpu, u64
|
|||
isync();
|
||||
mtspr(SPRN_LPID, lpid);
|
||||
mtspr(SPRN_LPCR, lpcr);
|
||||
mtspr(SPRN_PID, vcpu->arch.pid);
|
||||
mtspr(SPRN_PID, pid);
|
||||
|
||||
for (i = 0; i < vcpu->arch.slb_max; i++)
|
||||
mtslb(vcpu->arch.slb[i].orige, vcpu->arch.slb[i].origv);
|
||||
|
@ -585,6 +589,9 @@ static void switch_mmu_to_guest_hpt(struct kvm *kvm, struct kvm_vcpu *vcpu, u64
|
|||
|
||||
static void switch_mmu_to_host(struct kvm *kvm, u32 pid)
|
||||
{
|
||||
u32 lpid = kvm->arch.host_lpid;
|
||||
u64 lpcr = kvm->arch.host_lpcr;
|
||||
|
||||
/*
|
||||
* The guest has exited, so guest MMU context is no longer being
|
||||
* non-speculatively accessed, but a hwsync is needed before the
|
||||
|
@ -594,8 +601,8 @@ static void switch_mmu_to_host(struct kvm *kvm, u32 pid)
|
|||
asm volatile("hwsync" ::: "memory");
|
||||
isync();
|
||||
mtspr(SPRN_PID, pid);
|
||||
mtspr(SPRN_LPID, kvm->arch.host_lpid);
|
||||
mtspr(SPRN_LPCR, kvm->arch.host_lpcr);
|
||||
mtspr(SPRN_LPID, lpid);
|
||||
mtspr(SPRN_LPCR, lpcr);
|
||||
/*
|
||||
* isync is not required after the switch, because mtmsrd with L=0
|
||||
* is performed after this switch, which is context synchronising.
|
||||
|
|
Загрузка…
Ссылка в новой задаче