KVM: MMU: apply page track notifier
Register the notifier to receive write track event so that we can update our shadow page table It makes kvm_mmu_pte_write() be the callback of the notifier, no function is changed Signed-off-by: Xiao Guangrong <guangrong.xiao@linux.intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Родитель
5c520e90af
Коммит
13d268ca2c
|
@ -704,6 +704,7 @@ struct kvm_arch {
|
|||
*/
|
||||
struct list_head active_mmu_pages;
|
||||
struct list_head zapped_obsolete_pages;
|
||||
struct kvm_page_track_notifier_node mmu_sp_tracker;
|
||||
struct kvm_page_track_notifier_head track_notifier_head;
|
||||
|
||||
struct list_head assigned_dev_head;
|
||||
|
@ -1001,6 +1002,8 @@ void kvm_mmu_module_exit(void);
|
|||
void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
|
||||
int kvm_mmu_create(struct kvm_vcpu *vcpu);
|
||||
void kvm_mmu_setup(struct kvm_vcpu *vcpu);
|
||||
void kvm_mmu_init_vm(struct kvm *kvm);
|
||||
void kvm_mmu_uninit_vm(struct kvm *kvm);
|
||||
void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
|
||||
u64 dirty_mask, u64 nx_mask, u64 x_mask);
|
||||
|
||||
|
@ -1140,8 +1143,6 @@ void kvm_pic_clear_all(struct kvm_pic *pic, int irq_source_id);
|
|||
|
||||
void kvm_inject_nmi(struct kvm_vcpu *vcpu);
|
||||
|
||||
void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
|
||||
const u8 *new, int bytes);
|
||||
int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
|
||||
int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
|
||||
void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
|
||||
|
|
|
@ -4302,7 +4302,7 @@ static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte)
|
|||
return spte;
|
||||
}
|
||||
|
||||
void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
|
||||
static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
|
||||
const u8 *new, int bytes)
|
||||
{
|
||||
gfn_t gfn = gpa >> PAGE_SHIFT;
|
||||
|
@ -4517,6 +4517,21 @@ void kvm_mmu_setup(struct kvm_vcpu *vcpu)
|
|||
init_kvm_mmu(vcpu);
|
||||
}
|
||||
|
||||
void kvm_mmu_init_vm(struct kvm *kvm)
|
||||
{
|
||||
struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
|
||||
|
||||
node->track_write = kvm_mmu_pte_write;
|
||||
kvm_page_track_register_notifier(kvm, node);
|
||||
}
|
||||
|
||||
void kvm_mmu_uninit_vm(struct kvm *kvm)
|
||||
{
|
||||
struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
|
||||
|
||||
kvm_page_track_unregister_notifier(kvm, node);
|
||||
}
|
||||
|
||||
/* The return value indicates if tlb flush on all vcpus is needed. */
|
||||
typedef bool (*slot_level_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_head);
|
||||
|
||||
|
|
|
@ -4345,7 +4345,6 @@ int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
|
|||
ret = kvm_vcpu_write_guest(vcpu, gpa, val, bytes);
|
||||
if (ret < 0)
|
||||
return 0;
|
||||
kvm_mmu_pte_write(vcpu, gpa, val, bytes);
|
||||
kvm_page_track_write(vcpu, gpa, val, bytes);
|
||||
return 1;
|
||||
}
|
||||
|
@ -4604,7 +4603,6 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
|
|||
return X86EMUL_CMPXCHG_FAILED;
|
||||
|
||||
kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT);
|
||||
kvm_mmu_pte_write(vcpu, gpa, new, bytes);
|
||||
kvm_page_track_write(vcpu, gpa, new, bytes);
|
||||
|
||||
return X86EMUL_CONTINUE;
|
||||
|
@ -7727,6 +7725,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
|||
INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn);
|
||||
|
||||
kvm_page_track_init(kvm);
|
||||
kvm_mmu_init_vm(kvm);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -7854,6 +7853,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
|
|||
kfree(kvm->arch.vioapic);
|
||||
kvm_free_vcpus(kvm);
|
||||
kfree(rcu_dereference_check(kvm->arch.apic_map, 1));
|
||||
kvm_mmu_uninit_vm(kvm);
|
||||
}
|
||||
|
||||
void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
|
||||
|
|
Загрузка…
Ссылка в новой задаче