KVM: MMU: Add mmu help functions to support PML
This patch adds new mmu layer functions to clear/set D-bit for memory slot, and to write protect superpages for memory slot. In case of PML, CPU logs the dirty GPA automatically to PML buffer when CPU updates D-bit from 0 to 1, therefore we don't have to write protect 4K pages, instead, we only need to clear D-bit in order to log that GPA. For superpages, we still write protect it and let page fault code to handle dirty page logging, as we still need to split superpage to 4K pages in PML. As PML is always enabled during guest's lifetime, to eliminate unnecessary PML GPA logging, we set D-bit manually for the slot with dirty logging disabled. Signed-off-by: Kai Huang <kai.huang@linux.intel.com> Reviewed-by: Xiao Guangrong <guangrong.xiao@linux.intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Родитель
3b0f1d01e5
Коммит
f4b4b18086
|
@ -835,6 +835,15 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
|
|||
|
||||
void kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
|
||||
void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot);
|
||||
void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
|
||||
struct kvm_memory_slot *memslot);
|
||||
void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
|
||||
struct kvm_memory_slot *memslot);
|
||||
void kvm_mmu_slot_set_dirty(struct kvm *kvm,
|
||||
struct kvm_memory_slot *memslot);
|
||||
void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
|
||||
struct kvm_memory_slot *slot,
|
||||
gfn_t gfn_offset, unsigned long mask);
|
||||
void kvm_mmu_zap_all(struct kvm *kvm);
|
||||
void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm);
|
||||
unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
|
||||
|
|
|
@ -1215,6 +1215,60 @@ static bool __rmap_write_protect(struct kvm *kvm, unsigned long *rmapp,
|
|||
return flush;
|
||||
}
|
||||
|
||||
static bool spte_clear_dirty(struct kvm *kvm, u64 *sptep)
|
||||
{
|
||||
u64 spte = *sptep;
|
||||
|
||||
rmap_printk("rmap_clear_dirty: spte %p %llx\n", sptep, *sptep);
|
||||
|
||||
spte &= ~shadow_dirty_mask;
|
||||
|
||||
return mmu_spte_update(sptep, spte);
|
||||
}
|
||||
|
||||
static bool __rmap_clear_dirty(struct kvm *kvm, unsigned long *rmapp)
|
||||
{
|
||||
u64 *sptep;
|
||||
struct rmap_iterator iter;
|
||||
bool flush = false;
|
||||
|
||||
for (sptep = rmap_get_first(*rmapp, &iter); sptep;) {
|
||||
BUG_ON(!(*sptep & PT_PRESENT_MASK));
|
||||
|
||||
flush |= spte_clear_dirty(kvm, sptep);
|
||||
sptep = rmap_get_next(&iter);
|
||||
}
|
||||
|
||||
return flush;
|
||||
}
|
||||
|
||||
static bool spte_set_dirty(struct kvm *kvm, u64 *sptep)
|
||||
{
|
||||
u64 spte = *sptep;
|
||||
|
||||
rmap_printk("rmap_set_dirty: spte %p %llx\n", sptep, *sptep);
|
||||
|
||||
spte |= shadow_dirty_mask;
|
||||
|
||||
return mmu_spte_update(sptep, spte);
|
||||
}
|
||||
|
||||
static bool __rmap_set_dirty(struct kvm *kvm, unsigned long *rmapp)
|
||||
{
|
||||
u64 *sptep;
|
||||
struct rmap_iterator iter;
|
||||
bool flush = false;
|
||||
|
||||
for (sptep = rmap_get_first(*rmapp, &iter); sptep;) {
|
||||
BUG_ON(!(*sptep & PT_PRESENT_MASK));
|
||||
|
||||
flush |= spte_set_dirty(kvm, sptep);
|
||||
sptep = rmap_get_next(&iter);
|
||||
}
|
||||
|
||||
return flush;
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_mmu_write_protect_pt_masked - write protect selected PT level pages
|
||||
* @kvm: kvm instance
|
||||
|
@ -1241,6 +1295,32 @@ static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_mmu_clear_dirty_pt_masked - clear MMU D-bit for PT level pages
|
||||
* @kvm: kvm instance
|
||||
* @slot: slot to clear D-bit
|
||||
* @gfn_offset: start of the BITS_PER_LONG pages we care about
|
||||
* @mask: indicates which pages we should clear D-bit
|
||||
*
|
||||
* Used for PML to re-log the dirty GPAs after userspace querying dirty_bitmap.
|
||||
*/
|
||||
void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
|
||||
struct kvm_memory_slot *slot,
|
||||
gfn_t gfn_offset, unsigned long mask)
|
||||
{
|
||||
unsigned long *rmapp;
|
||||
|
||||
while (mask) {
|
||||
rmapp = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
|
||||
PT_PAGE_TABLE_LEVEL, slot);
|
||||
__rmap_clear_dirty(kvm, rmapp);
|
||||
|
||||
/* clear the first set bit */
|
||||
mask &= mask - 1;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_mmu_clear_dirty_pt_masked);
|
||||
|
||||
/**
|
||||
* kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
|
||||
* PT level pages.
|
||||
|
@ -4368,6 +4448,121 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
|
|||
kvm_flush_remote_tlbs(kvm);
|
||||
}
|
||||
|
||||
void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
|
||||
struct kvm_memory_slot *memslot)
|
||||
{
|
||||
gfn_t last_gfn;
|
||||
unsigned long *rmapp;
|
||||
unsigned long last_index, index;
|
||||
bool flush = false;
|
||||
|
||||
last_gfn = memslot->base_gfn + memslot->npages - 1;
|
||||
|
||||
spin_lock(&kvm->mmu_lock);
|
||||
|
||||
rmapp = memslot->arch.rmap[PT_PAGE_TABLE_LEVEL - 1];
|
||||
last_index = gfn_to_index(last_gfn, memslot->base_gfn,
|
||||
PT_PAGE_TABLE_LEVEL);
|
||||
|
||||
for (index = 0; index <= last_index; ++index, ++rmapp) {
|
||||
if (*rmapp)
|
||||
flush |= __rmap_clear_dirty(kvm, rmapp);
|
||||
|
||||
if (need_resched() || spin_needbreak(&kvm->mmu_lock))
|
||||
cond_resched_lock(&kvm->mmu_lock);
|
||||
}
|
||||
|
||||
spin_unlock(&kvm->mmu_lock);
|
||||
|
||||
lockdep_assert_held(&kvm->slots_lock);
|
||||
|
||||
/*
|
||||
* It's also safe to flush TLBs out of mmu lock here as currently this
|
||||
* function is only used for dirty logging, in which case flushing TLB
|
||||
* out of mmu lock also guarantees no dirty pages will be lost in
|
||||
* dirty_bitmap.
|
||||
*/
|
||||
if (flush)
|
||||
kvm_flush_remote_tlbs(kvm);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_mmu_slot_leaf_clear_dirty);
|
||||
|
||||
void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
|
||||
struct kvm_memory_slot *memslot)
|
||||
{
|
||||
gfn_t last_gfn;
|
||||
int i;
|
||||
bool flush = false;
|
||||
|
||||
last_gfn = memslot->base_gfn + memslot->npages - 1;
|
||||
|
||||
spin_lock(&kvm->mmu_lock);
|
||||
|
||||
for (i = PT_PAGE_TABLE_LEVEL + 1; /* skip rmap for 4K page */
|
||||
i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
|
||||
unsigned long *rmapp;
|
||||
unsigned long last_index, index;
|
||||
|
||||
rmapp = memslot->arch.rmap[i - PT_PAGE_TABLE_LEVEL];
|
||||
last_index = gfn_to_index(last_gfn, memslot->base_gfn, i);
|
||||
|
||||
for (index = 0; index <= last_index; ++index, ++rmapp) {
|
||||
if (*rmapp)
|
||||
flush |= __rmap_write_protect(kvm, rmapp,
|
||||
false);
|
||||
|
||||
if (need_resched() || spin_needbreak(&kvm->mmu_lock))
|
||||
cond_resched_lock(&kvm->mmu_lock);
|
||||
}
|
||||
}
|
||||
spin_unlock(&kvm->mmu_lock);
|
||||
|
||||
/* see kvm_mmu_slot_remove_write_access */
|
||||
lockdep_assert_held(&kvm->slots_lock);
|
||||
|
||||
if (flush)
|
||||
kvm_flush_remote_tlbs(kvm);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_mmu_slot_largepage_remove_write_access);
|
||||
|
||||
void kvm_mmu_slot_set_dirty(struct kvm *kvm,
|
||||
struct kvm_memory_slot *memslot)
|
||||
{
|
||||
gfn_t last_gfn;
|
||||
int i;
|
||||
bool flush = false;
|
||||
|
||||
last_gfn = memslot->base_gfn + memslot->npages - 1;
|
||||
|
||||
spin_lock(&kvm->mmu_lock);
|
||||
|
||||
for (i = PT_PAGE_TABLE_LEVEL;
|
||||
i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
|
||||
unsigned long *rmapp;
|
||||
unsigned long last_index, index;
|
||||
|
||||
rmapp = memslot->arch.rmap[i - PT_PAGE_TABLE_LEVEL];
|
||||
last_index = gfn_to_index(last_gfn, memslot->base_gfn, i);
|
||||
|
||||
for (index = 0; index <= last_index; ++index, ++rmapp) {
|
||||
if (*rmapp)
|
||||
flush |= __rmap_set_dirty(kvm, rmapp);
|
||||
|
||||
if (need_resched() || spin_needbreak(&kvm->mmu_lock))
|
||||
cond_resched_lock(&kvm->mmu_lock);
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock(&kvm->mmu_lock);
|
||||
|
||||
lockdep_assert_held(&kvm->slots_lock);
|
||||
|
||||
/* see kvm_mmu_slot_leaf_clear_dirty */
|
||||
if (flush)
|
||||
kvm_flush_remote_tlbs(kvm);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_mmu_slot_set_dirty);
|
||||
|
||||
#define BATCH_ZAP_PAGES 10
|
||||
static void kvm_zap_obsolete_pages(struct kvm *kvm)
|
||||
{
|
||||
|
|
Загрузка…
Ссылка в новой задаче