KVM: Synchronize guest physical memory map to host virtual memory map

Synchronize changes to host virtual addresses which are part of
a KVM memory slot to the KVM shadow mmu.  This allows pte operations
like swapping, page migration, and madvise() to transparently work
with KVM.

Signed-off-by: Andrea Arcangeli <andrea@qumranet.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
This commit is contained in:
Andrea Arcangeli 2008-07-25 16:24:52 +02:00 коммит произвёл Avi Kivity
Родитель 604b38ac03
Коммит e930bffe95
5 изменённых файлов: 277 добавлений и 0 удалений

Просмотреть файл

@ -653,6 +653,84 @@ static void rmap_write_protect(struct kvm *kvm, u64 gfn)
account_shadowed(kvm, gfn); account_shadowed(kvm, gfn);
} }
static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp)
{
u64 *spte;
int need_tlb_flush = 0;
while ((spte = rmap_next(kvm, rmapp, NULL))) {
BUG_ON(!(*spte & PT_PRESENT_MASK));
rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", spte, *spte);
rmap_remove(kvm, spte);
set_shadow_pte(spte, shadow_trap_nonpresent_pte);
need_tlb_flush = 1;
}
return need_tlb_flush;
}
static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
int (*handler)(struct kvm *kvm, unsigned long *rmapp))
{
int i;
int retval = 0;
/*
* If mmap_sem isn't taken, we can look the memslots with only
* the mmu_lock by skipping over the slots with userspace_addr == 0.
*/
for (i = 0; i < kvm->nmemslots; i++) {
struct kvm_memory_slot *memslot = &kvm->memslots[i];
unsigned long start = memslot->userspace_addr;
unsigned long end;
/* mmu_lock protects userspace_addr */
if (!start)
continue;
end = start + (memslot->npages << PAGE_SHIFT);
if (hva >= start && hva < end) {
gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
retval |= handler(kvm, &memslot->rmap[gfn_offset]);
retval |= handler(kvm,
&memslot->lpage_info[
gfn_offset /
KVM_PAGES_PER_HPAGE].rmap_pde);
}
}
return retval;
}
int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
{
return kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
}
static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp)
{
u64 *spte;
int young = 0;
spte = rmap_next(kvm, rmapp, NULL);
while (spte) {
int _young;
u64 _spte = *spte;
BUG_ON(!(_spte & PT_PRESENT_MASK));
_young = _spte & PT_ACCESSED_MASK;
if (_young) {
young = 1;
clear_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
}
spte = rmap_next(kvm, rmapp, spte);
}
return young;
}
int kvm_age_hva(struct kvm *kvm, unsigned long hva)
{
return kvm_handle_hva(kvm, hva, kvm_age_rmapp);
}
#ifdef MMU_DEBUG #ifdef MMU_DEBUG
static int is_empty_shadow_page(u64 *spt) static int is_empty_shadow_page(u64 *spt)
{ {
@ -1203,6 +1281,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
int r; int r;
int largepage = 0; int largepage = 0;
pfn_t pfn; pfn_t pfn;
unsigned long mmu_seq;
down_read(&current->mm->mmap_sem); down_read(&current->mm->mmap_sem);
if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) { if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
@ -1210,6 +1289,8 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
largepage = 1; largepage = 1;
} }
mmu_seq = vcpu->kvm->mmu_notifier_seq;
/* implicit mb(), we'll read before PT lock is unlocked */
pfn = gfn_to_pfn(vcpu->kvm, gfn); pfn = gfn_to_pfn(vcpu->kvm, gfn);
up_read(&current->mm->mmap_sem); up_read(&current->mm->mmap_sem);
@ -1220,6 +1301,8 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
} }
spin_lock(&vcpu->kvm->mmu_lock); spin_lock(&vcpu->kvm->mmu_lock);
if (mmu_notifier_retry(vcpu, mmu_seq))
goto out_unlock;
kvm_mmu_free_some_pages(vcpu); kvm_mmu_free_some_pages(vcpu);
r = __direct_map(vcpu, v, write, largepage, gfn, pfn, r = __direct_map(vcpu, v, write, largepage, gfn, pfn,
PT32E_ROOT_LEVEL); PT32E_ROOT_LEVEL);
@ -1227,6 +1310,11 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
return r; return r;
out_unlock:
spin_unlock(&vcpu->kvm->mmu_lock);
kvm_release_pfn_clean(pfn);
return 0;
} }
@ -1345,6 +1433,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
int r; int r;
int largepage = 0; int largepage = 0;
gfn_t gfn = gpa >> PAGE_SHIFT; gfn_t gfn = gpa >> PAGE_SHIFT;
unsigned long mmu_seq;
ASSERT(vcpu); ASSERT(vcpu);
ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa)); ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
@ -1358,6 +1447,8 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
gfn &= ~(KVM_PAGES_PER_HPAGE-1); gfn &= ~(KVM_PAGES_PER_HPAGE-1);
largepage = 1; largepage = 1;
} }
mmu_seq = vcpu->kvm->mmu_notifier_seq;
/* implicit mb(), we'll read before PT lock is unlocked */
pfn = gfn_to_pfn(vcpu->kvm, gfn); pfn = gfn_to_pfn(vcpu->kvm, gfn);
up_read(&current->mm->mmap_sem); up_read(&current->mm->mmap_sem);
if (is_error_pfn(pfn)) { if (is_error_pfn(pfn)) {
@ -1365,12 +1456,19 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
return 1; return 1;
} }
spin_lock(&vcpu->kvm->mmu_lock); spin_lock(&vcpu->kvm->mmu_lock);
if (mmu_notifier_retry(vcpu, mmu_seq))
goto out_unlock;
kvm_mmu_free_some_pages(vcpu); kvm_mmu_free_some_pages(vcpu);
r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK, r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK,
largepage, gfn, pfn, kvm_x86_ops->get_tdp_level()); largepage, gfn, pfn, kvm_x86_ops->get_tdp_level());
spin_unlock(&vcpu->kvm->mmu_lock); spin_unlock(&vcpu->kvm->mmu_lock);
return r; return r;
out_unlock:
spin_unlock(&vcpu->kvm->mmu_lock);
kvm_release_pfn_clean(pfn);
return 0;
} }
static void nonpaging_free(struct kvm_vcpu *vcpu) static void nonpaging_free(struct kvm_vcpu *vcpu)
@ -1670,6 +1768,8 @@ static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
gfn &= ~(KVM_PAGES_PER_HPAGE-1); gfn &= ~(KVM_PAGES_PER_HPAGE-1);
vcpu->arch.update_pte.largepage = 1; vcpu->arch.update_pte.largepage = 1;
} }
vcpu->arch.update_pte.mmu_seq = vcpu->kvm->mmu_notifier_seq;
/* implicit mb(), we'll read before PT lock is unlocked */
pfn = gfn_to_pfn(vcpu->kvm, gfn); pfn = gfn_to_pfn(vcpu->kvm, gfn);
up_read(&current->mm->mmap_sem); up_read(&current->mm->mmap_sem);

Просмотреть файл

@ -263,6 +263,8 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
pfn = vcpu->arch.update_pte.pfn; pfn = vcpu->arch.update_pte.pfn;
if (is_error_pfn(pfn)) if (is_error_pfn(pfn))
return; return;
if (mmu_notifier_retry(vcpu, vcpu->arch.update_pte.mmu_seq))
return;
kvm_get_pfn(pfn); kvm_get_pfn(pfn);
mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0, mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0,
gpte & PT_DIRTY_MASK, NULL, largepage, gpte_to_gfn(gpte), gpte & PT_DIRTY_MASK, NULL, largepage, gpte_to_gfn(gpte),
@ -380,6 +382,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
int r; int r;
pfn_t pfn; pfn_t pfn;
int largepage = 0; int largepage = 0;
unsigned long mmu_seq;
pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code); pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
kvm_mmu_audit(vcpu, "pre page fault"); kvm_mmu_audit(vcpu, "pre page fault");
@ -413,6 +416,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
largepage = 1; largepage = 1;
} }
} }
mmu_seq = vcpu->kvm->mmu_notifier_seq;
/* implicit mb(), we'll read before PT lock is unlocked */
pfn = gfn_to_pfn(vcpu->kvm, walker.gfn); pfn = gfn_to_pfn(vcpu->kvm, walker.gfn);
up_read(&current->mm->mmap_sem); up_read(&current->mm->mmap_sem);
@ -424,6 +429,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
} }
spin_lock(&vcpu->kvm->mmu_lock); spin_lock(&vcpu->kvm->mmu_lock);
if (mmu_notifier_retry(vcpu, mmu_seq))
goto out_unlock;
kvm_mmu_free_some_pages(vcpu); kvm_mmu_free_some_pages(vcpu);
shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault, shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
largepage, &write_pt, pfn); largepage, &write_pt, pfn);
@ -439,6 +446,11 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
spin_unlock(&vcpu->kvm->mmu_lock); spin_unlock(&vcpu->kvm->mmu_lock);
return write_pt; return write_pt;
out_unlock:
spin_unlock(&vcpu->kvm->mmu_lock);
kvm_release_pfn_clean(pfn);
return 0;
} }
static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr) static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)

Просмотреть файл

@ -13,6 +13,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/mmu_notifier.h>
#include <linux/kvm.h> #include <linux/kvm.h>
#include <linux/kvm_para.h> #include <linux/kvm_para.h>
@ -251,6 +252,7 @@ struct kvm_vcpu_arch {
gfn_t gfn; /* presumed gfn during guest pte update */ gfn_t gfn; /* presumed gfn during guest pte update */
pfn_t pfn; /* pfn corresponding to that gfn */ pfn_t pfn; /* pfn corresponding to that gfn */
int largepage; int largepage;
unsigned long mmu_seq;
} update_pte; } update_pte;
struct i387_fxsave_struct host_fx_image; struct i387_fxsave_struct host_fx_image;
@ -729,4 +731,8 @@ asmlinkage void kvm_handle_fault_on_reboot(void);
KVM_EX_ENTRY " 666b, 667b \n\t" \ KVM_EX_ENTRY " 666b, 667b \n\t" \
".popsection" ".popsection"
#define KVM_ARCH_WANT_MMU_NOTIFIER
int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
int kvm_age_hva(struct kvm *kvm, unsigned long hva);
#endif #endif

Просмотреть файл

@ -121,6 +121,12 @@ struct kvm {
struct kvm_coalesced_mmio_dev *coalesced_mmio_dev; struct kvm_coalesced_mmio_dev *coalesced_mmio_dev;
struct kvm_coalesced_mmio_ring *coalesced_mmio_ring; struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
#endif #endif
#ifdef KVM_ARCH_WANT_MMU_NOTIFIER
struct mmu_notifier mmu_notifier;
unsigned long mmu_notifier_seq;
long mmu_notifier_count;
#endif
}; };
/* The guest did something we don't support. */ /* The guest did something we don't support. */
@ -332,4 +338,22 @@ int kvm_trace_ioctl(unsigned int ioctl, unsigned long arg)
#define kvm_trace_cleanup() ((void)0) #define kvm_trace_cleanup() ((void)0)
#endif #endif
#ifdef KVM_ARCH_WANT_MMU_NOTIFIER
static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_seq)
{
if (unlikely(vcpu->kvm->mmu_notifier_count))
return 1;
/*
* Both reads happen under the mmu_lock and both values are
* modified under mmu_lock, so there's no need of smb_rmb()
* here in between, otherwise mmu_notifier_count should be
* read before mmu_notifier_seq, see
* mmu_notifier_invalidate_range_end write side.
*/
if (vcpu->kvm->mmu_notifier_seq != mmu_seq)
return 1;
return 0;
}
#endif
#endif #endif

Просмотреть файл

@ -192,6 +192,123 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
} }
EXPORT_SYMBOL_GPL(kvm_vcpu_uninit); EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
{
return container_of(mn, struct kvm, mmu_notifier);
}
static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long address)
{
struct kvm *kvm = mmu_notifier_to_kvm(mn);
int need_tlb_flush;
/*
* When ->invalidate_page runs, the linux pte has been zapped
* already but the page is still allocated until
* ->invalidate_page returns. So if we increase the sequence
* here the kvm page fault will notice if the spte can't be
* established because the page is going to be freed. If
* instead the kvm page fault establishes the spte before
* ->invalidate_page runs, kvm_unmap_hva will release it
* before returning.
*
* The sequence increase only need to be seen at spin_unlock
* time, and not at spin_lock time.
*
* Increasing the sequence after the spin_unlock would be
* unsafe because the kvm page fault could then establish the
* pte after kvm_unmap_hva returned, without noticing the page
* is going to be freed.
*/
spin_lock(&kvm->mmu_lock);
kvm->mmu_notifier_seq++;
need_tlb_flush = kvm_unmap_hva(kvm, address);
spin_unlock(&kvm->mmu_lock);
/* we've to flush the tlb before the pages can be freed */
if (need_tlb_flush)
kvm_flush_remote_tlbs(kvm);
}
static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start,
unsigned long end)
{
struct kvm *kvm = mmu_notifier_to_kvm(mn);
int need_tlb_flush = 0;
spin_lock(&kvm->mmu_lock);
/*
* The count increase must become visible at unlock time as no
* spte can be established without taking the mmu_lock and
* count is also read inside the mmu_lock critical section.
*/
kvm->mmu_notifier_count++;
for (; start < end; start += PAGE_SIZE)
need_tlb_flush |= kvm_unmap_hva(kvm, start);
spin_unlock(&kvm->mmu_lock);
/* we've to flush the tlb before the pages can be freed */
if (need_tlb_flush)
kvm_flush_remote_tlbs(kvm);
}
static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start,
unsigned long end)
{
struct kvm *kvm = mmu_notifier_to_kvm(mn);
spin_lock(&kvm->mmu_lock);
/*
* This sequence increase will notify the kvm page fault that
* the page that is going to be mapped in the spte could have
* been freed.
*/
kvm->mmu_notifier_seq++;
/*
* The above sequence increase must be visible before the
* below count decrease but both values are read by the kvm
* page fault under mmu_lock spinlock so we don't need to add
* a smb_wmb() here in between the two.
*/
kvm->mmu_notifier_count--;
spin_unlock(&kvm->mmu_lock);
BUG_ON(kvm->mmu_notifier_count < 0);
}
static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long address)
{
struct kvm *kvm = mmu_notifier_to_kvm(mn);
int young;
spin_lock(&kvm->mmu_lock);
young = kvm_age_hva(kvm, address);
spin_unlock(&kvm->mmu_lock);
if (young)
kvm_flush_remote_tlbs(kvm);
return young;
}
static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
.invalidate_page = kvm_mmu_notifier_invalidate_page,
.invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
.invalidate_range_end = kvm_mmu_notifier_invalidate_range_end,
.clear_flush_young = kvm_mmu_notifier_clear_flush_young,
};
#endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
static struct kvm *kvm_create_vm(void) static struct kvm *kvm_create_vm(void)
{ {
struct kvm *kvm = kvm_arch_create_vm(); struct kvm *kvm = kvm_arch_create_vm();
@ -212,6 +329,21 @@ static struct kvm *kvm_create_vm(void)
(struct kvm_coalesced_mmio_ring *)page_address(page); (struct kvm_coalesced_mmio_ring *)page_address(page);
#endif #endif
#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
{
int err;
kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
err = mmu_notifier_register(&kvm->mmu_notifier, current->mm);
if (err) {
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
put_page(page);
#endif
kfree(kvm);
return ERR_PTR(err);
}
}
#endif
kvm->mm = current->mm; kvm->mm = current->mm;
atomic_inc(&kvm->mm->mm_count); atomic_inc(&kvm->mm->mm_count);
spin_lock_init(&kvm->mmu_lock); spin_lock_init(&kvm->mmu_lock);
@ -271,6 +403,9 @@ static void kvm_destroy_vm(struct kvm *kvm)
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
if (kvm->coalesced_mmio_ring != NULL) if (kvm->coalesced_mmio_ring != NULL)
free_page((unsigned long)kvm->coalesced_mmio_ring); free_page((unsigned long)kvm->coalesced_mmio_ring);
#endif
#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
#endif #endif
kvm_arch_destroy_vm(kvm); kvm_arch_destroy_vm(kvm);
mmdrop(mm); mmdrop(mm);