diff --git a/arch/powerpc/kvm/book3s_hv_p9_entry.c b/arch/powerpc/kvm/book3s_hv_p9_entry.c index d123813296ba..8fa48ba01f79 100644 --- a/arch/powerpc/kvm/book3s_hv_p9_entry.c +++ b/arch/powerpc/kvm/book3s_hv_p9_entry.c @@ -487,10 +487,22 @@ static void __accumulate_time(struct kvm_vcpu *vcpu, struct kvmhv_tb_accumulator #define accumulate_time(vcpu, next) do {} while (0) #endif -static inline void mfslb(unsigned int idx, u64 *slbee, u64 *slbev) +static inline u64 mfslbv(unsigned int idx) { - asm volatile("slbmfev %0,%1" : "=r" (*slbev) : "r" (idx)); - asm volatile("slbmfee %0,%1" : "=r" (*slbee) : "r" (idx)); + u64 slbev; + + asm volatile("slbmfev %0,%1" : "=r" (slbev) : "r" (idx)); + + return slbev; +} + +static inline u64 mfslbe(unsigned int idx) +{ + u64 slbee; + + asm volatile("slbmfee %0,%1" : "=r" (slbee) : "r" (idx)); + + return slbee; } static inline void mtslb(u64 slbee, u64 slbev) @@ -620,8 +632,10 @@ static void save_clear_guest_mmu(struct kvm *kvm, struct kvm_vcpu *vcpu) */ for (i = 0; i < vcpu->arch.slb_nr; i++) { u64 slbee, slbev; - mfslb(i, &slbee, &slbev); + + slbee = mfslbe(i); if (slbee & SLB_ESID_V) { + slbev = mfslbv(i); vcpu->arch.slb[nr].orige = slbee | i; vcpu->arch.slb[nr].origv = slbev; nr++;