powerpc/kvm: Fix magic page vs. 32-bit RTAS on ppc64

When the kernel calls into RTAS, it switches to 32-bit mode. The
magic page was is longer accessible in that case, causing the
patched instructions in the RTAS call wrapper to crash.

This fixes it by making available a 32-bit mapping of the magic
page in that case. This mapping is flushed whenever we switch
the kernel back to 64-bit mode.

Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
[agraf: add a check if the magic page is mapped]
Signed-off-by: Alexander Graf <agraf@suse.de>
Signed-off-by: Avi Kivity <avi@redhat.com>
This commit is contained in:
Benjamin Herrenschmidt 2012-03-13 21:52:44 +00:00 коммит произвёл Avi Kivity
Родитель 966cd0f3bd
Коммит bbcc9c0669
2 изменённых файлов: 21 добавлений и 0 удалений

Просмотреть файл

@ -291,6 +291,9 @@ pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn)
{ {
ulong mp_pa = vcpu->arch.magic_page_pa; ulong mp_pa = vcpu->arch.magic_page_pa;
if (!(vcpu->arch.shared->msr & MSR_SF))
mp_pa = (uint32_t)mp_pa;
/* Magic page override */ /* Magic page override */
if (unlikely(mp_pa) && if (unlikely(mp_pa) &&
unlikely(((gfn << PAGE_SHIFT) & KVM_PAM) == unlikely(((gfn << PAGE_SHIFT) & KVM_PAM) ==

Просмотреть файл

@ -145,6 +145,21 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
} }
} }
/*
* When switching from 32 to 64-bit, we may have a stale 32-bit
* magic page around, we need to flush it. Typically 32-bit magic
* page will be instanciated when calling into RTAS. Note: We
* assume that such transition only happens while in kernel mode,
* ie, we never transition from user 32-bit to kernel 64-bit with
* a 32-bit magic page around.
*/
if (vcpu->arch.magic_page_pa &&
!(old_msr & MSR_PR) && !(old_msr & MSR_SF) && (msr & MSR_SF)) {
/* going from RTAS to normal kernel code */
kvmppc_mmu_pte_flush(vcpu, (uint32_t)vcpu->arch.magic_page_pa,
~0xFFFUL);
}
/* Preload FPU if it's enabled */ /* Preload FPU if it's enabled */
if (vcpu->arch.shared->msr & MSR_FP) if (vcpu->arch.shared->msr & MSR_FP)
kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
@ -252,6 +267,9 @@ static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
{ {
ulong mp_pa = vcpu->arch.magic_page_pa; ulong mp_pa = vcpu->arch.magic_page_pa;
if (!(vcpu->arch.shared->msr & MSR_SF))
mp_pa = (uint32_t)mp_pa;
if (unlikely(mp_pa) && if (unlikely(mp_pa) &&
unlikely((mp_pa & KVM_PAM) >> PAGE_SHIFT == gfn)) { unlikely((mp_pa & KVM_PAM) >> PAGE_SHIFT == gfn)) {
return 1; return 1;