diff --git a/arch/powerpc/include/asm/kvm_book3s_uvmem.h b/arch/powerpc/include/asm/kvm_book3s_uvmem.h index 3033a9585b43..50204e228f16 100644 --- a/arch/powerpc/include/asm/kvm_book3s_uvmem.h +++ b/arch/powerpc/include/asm/kvm_book3s_uvmem.h @@ -19,6 +19,8 @@ unsigned long kvmppc_h_svm_page_out(struct kvm *kvm, unsigned long kvmppc_h_svm_init_start(struct kvm *kvm); unsigned long kvmppc_h_svm_init_done(struct kvm *kvm); int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn); +void kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *free, + struct kvm *kvm); #else static inline int kvmppc_uvmem_init(void) { @@ -64,5 +66,9 @@ static inline int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn) { return -EFAULT; } + +static inline void +kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *free, + struct kvm *kvm) { } #endif /* CONFIG_PPC_UV */ #endif /* __ASM_KVM_BOOK3S_UVMEM_H__ */ diff --git a/arch/powerpc/include/asm/ultravisor-api.h b/arch/powerpc/include/asm/ultravisor-api.h index e774274ab30e..4b0d044caa2a 100644 --- a/arch/powerpc/include/asm/ultravisor-api.h +++ b/arch/powerpc/include/asm/ultravisor-api.h @@ -27,6 +27,7 @@ #define UV_RETURN 0xF11C #define UV_ESM 0xF110 #define UV_REGISTER_MEM_SLOT 0xF120 +#define UV_UNREGISTER_MEM_SLOT 0xF124 #define UV_PAGE_IN 0xF128 #define UV_PAGE_OUT 0xF12C #define UV_SHARE_PAGE 0xF130 diff --git a/arch/powerpc/include/asm/ultravisor.h b/arch/powerpc/include/asm/ultravisor.h index 40cc8bace654..b8e59b7b4ac8 100644 --- a/arch/powerpc/include/asm/ultravisor.h +++ b/arch/powerpc/include/asm/ultravisor.h @@ -67,6 +67,11 @@ static inline int uv_register_mem_slot(u64 lpid, u64 start_gpa, u64 size, size, flags, slotid); } +static inline int uv_unregister_mem_slot(u64 lpid, u64 slotid) +{ + return ucall_norets(UV_UNREGISTER_MEM_SLOT, lpid, slotid); +} + static inline int uv_page_inval(u64 lpid, u64 gpa, u64 page_shift) { return ucall_norets(UV_PAGE_INVAL, lpid, gpa, page_shift); diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c index 9f6ba113ffe3..da857c8ba6e4 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c @@ -1101,6 +1101,9 @@ void kvmppc_radix_flush_memslot(struct kvm *kvm, unsigned long gpa; unsigned int shift; + if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START) + kvmppc_uvmem_drop_pages(memslot, kvm); + if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) return; diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 03d56aeec714..a8e815648b0a 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -74,6 +74,7 @@ #include #include #include +#include #include "book3s.h" @@ -4515,6 +4516,29 @@ static void kvmppc_core_commit_memory_region_hv(struct kvm *kvm, if (change == KVM_MR_FLAGS_ONLY && kvm_is_radix(kvm) && ((new->flags ^ old->flags) & KVM_MEM_LOG_DIRTY_PAGES)) kvmppc_radix_flush_memslot(kvm, old); + /* + * If UV hasn't yet called H_SVM_INIT_START, don't register memslots. + */ + if (!kvm->arch.secure_guest) + return; + + switch (change) { + case KVM_MR_CREATE: + if (kvmppc_uvmem_slot_init(kvm, new)) + return; + uv_register_mem_slot(kvm->arch.lpid, + new->base_gfn << PAGE_SHIFT, + new->npages * PAGE_SIZE, + 0, new->id); + break; + case KVM_MR_DELETE: + uv_unregister_mem_slot(kvm->arch.lpid, old->id); + kvmppc_uvmem_slot_free(kvm, old); + break; + default: + /* TODO: Handle KVM_MR_MOVE */ + break; + } } /* diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c index ed51498b20ee..2de264fc3156 100644 --- a/arch/powerpc/kvm/book3s_hv_uvmem.c +++ b/arch/powerpc/kvm/book3s_hv_uvmem.c @@ -249,6 +249,43 @@ unsigned long kvmppc_h_svm_init_done(struct kvm *kvm) return H_SUCCESS; } +/* + * Drop device pages that we maintain for the secure guest + * + * We first mark the pages to be skipped from UV_PAGE_OUT when there + * is HV side fault on these pages. Next we *get* these pages, forcing + * fault on them, do fault time migration to replace the device PTEs in + * QEMU page table with normal PTEs from newly allocated pages. + */ +void kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *free, + struct kvm *kvm) +{ + int i; + struct kvmppc_uvmem_page_pvt *pvt; + unsigned long pfn, uvmem_pfn; + unsigned long gfn = free->base_gfn; + + for (i = free->npages; i; --i, ++gfn) { + struct page *uvmem_page; + + mutex_lock(&kvm->arch.uvmem_lock); + if (!kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) { + mutex_unlock(&kvm->arch.uvmem_lock); + continue; + } + + uvmem_page = pfn_to_page(uvmem_pfn); + pvt = uvmem_page->zone_device_data; + pvt->skip_page_out = true; + mutex_unlock(&kvm->arch.uvmem_lock); + + pfn = gfn_to_pfn(kvm, gfn); + if (is_error_noslot_pfn(pfn)) + continue; + kvm_release_pfn_clean(pfn); + } +} + /* * Get a free device PFN from the pool *