KVM: x86: Avoid guest page table walk when gpa_available is set
When a guest causes a page fault which requires emulation, the vcpu->arch.gpa_available flag is set to indicate that cr2 contains a valid GPA. Currently, emulator_read_write_onepage() makes use of gpa_available flag to avoid a guest page walk for a known MMIO regions. Lets not limit the gpa_available optimization to just MMIO region. The patch extends the check to avoid page walk whenever gpa_available flag is set. Signed-off-by: Brijesh Singh <brijesh.singh@amd.com> [Fix EPT=0 according to Wanpeng Li's fix, plus ensure VMX also uses the new code. - Paolo] Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Reviewed-by: David Hildenbrand <david@redhat.com> [Moved "ret < 0" to the else brach, as per David's review. - Radim] Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
This commit is contained in:
Родитель
e08d26f071
Коммит
618232e219
|
@ -685,8 +685,9 @@ struct kvm_vcpu_arch {
|
|||
int pending_ioapic_eoi;
|
||||
int pending_external_vector;
|
||||
|
||||
/* GPA available (AMD only) */
|
||||
/* GPA available */
|
||||
bool gpa_available;
|
||||
gpa_t gpa_val;
|
||||
|
||||
/* be preempted when it's in kernel-mode(cpl=0) */
|
||||
bool preempted_in_kernel;
|
||||
|
|
|
@ -4843,6 +4843,12 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
|
|||
enum emulation_result er;
|
||||
bool direct = vcpu->arch.mmu.direct_map || mmu_is_nested(vcpu);
|
||||
|
||||
/* With shadow page tables, fault_address contains a GVA or nGPA. */
|
||||
if (vcpu->arch.mmu.direct_map) {
|
||||
vcpu->arch.gpa_available = true;
|
||||
vcpu->arch.gpa_val = cr2;
|
||||
}
|
||||
|
||||
if (unlikely(error_code & PFERR_RSVD_MASK)) {
|
||||
r = handle_mmio_page_fault(vcpu, cr2, direct);
|
||||
if (r == RET_MMIO_PF_EMULATE) {
|
||||
|
|
|
@ -4236,8 +4236,6 @@ static int handle_exit(struct kvm_vcpu *vcpu)
|
|||
|
||||
trace_kvm_exit(exit_code, vcpu, KVM_ISA_SVM);
|
||||
|
||||
vcpu->arch.gpa_available = (exit_code == SVM_EXIT_NPF);
|
||||
|
||||
if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE))
|
||||
vcpu->arch.cr0 = svm->vmcb->save.cr0;
|
||||
if (npt_enabled)
|
||||
|
|
|
@ -6393,9 +6393,7 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
|
|||
error_code |= (exit_qualification & 0x100) != 0 ?
|
||||
PFERR_GUEST_FINAL_MASK : PFERR_GUEST_PAGE_MASK;
|
||||
|
||||
vcpu->arch.gpa_available = true;
|
||||
vcpu->arch.exit_qualification = exit_qualification;
|
||||
|
||||
return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0);
|
||||
}
|
||||
|
||||
|
@ -6410,7 +6408,6 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
|
|||
return kvm_skip_emulated_instruction(vcpu);
|
||||
}
|
||||
|
||||
vcpu->arch.gpa_available = true;
|
||||
ret = kvm_mmu_page_fault(vcpu, gpa, PFERR_RSVD_MASK, NULL, 0);
|
||||
if (ret >= 0)
|
||||
return ret;
|
||||
|
@ -8644,7 +8641,6 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
|
|||
u32 vectoring_info = vmx->idt_vectoring_info;
|
||||
|
||||
trace_kvm_exit(exit_reason, vcpu, KVM_ISA_VMX);
|
||||
vcpu->arch.gpa_available = false;
|
||||
|
||||
/*
|
||||
* Flush logged GPAs PML buffer, this will make dirty_bitmap more
|
||||
|
|
|
@ -4657,25 +4657,18 @@ static int emulator_read_write_onepage(unsigned long addr, void *val,
|
|||
*/
|
||||
if (vcpu->arch.gpa_available &&
|
||||
emulator_can_use_gpa(ctxt) &&
|
||||
vcpu_is_mmio_gpa(vcpu, addr, exception->address, write) &&
|
||||
(addr & ~PAGE_MASK) == (exception->address & ~PAGE_MASK)) {
|
||||
gpa = exception->address;
|
||||
goto mmio;
|
||||
(addr & ~PAGE_MASK) == (vcpu->arch.gpa_val & ~PAGE_MASK)) {
|
||||
gpa = vcpu->arch.gpa_val;
|
||||
ret = vcpu_is_mmio_gpa(vcpu, addr, gpa, write);
|
||||
} else {
|
||||
ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write);
|
||||
if (ret < 0)
|
||||
return X86EMUL_PROPAGATE_FAULT;
|
||||
}
|
||||
|
||||
ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write);
|
||||
|
||||
if (ret < 0)
|
||||
return X86EMUL_PROPAGATE_FAULT;
|
||||
|
||||
/* For APIC access vmexit */
|
||||
if (ret)
|
||||
goto mmio;
|
||||
|
||||
if (ops->read_write_emulate(vcpu, gpa, val, bytes))
|
||||
if (!ret && ops->read_write_emulate(vcpu, gpa, val, bytes))
|
||||
return X86EMUL_CONTINUE;
|
||||
|
||||
mmio:
|
||||
/*
|
||||
* Is this MMIO handled locally?
|
||||
*/
|
||||
|
@ -7002,6 +6995,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
|||
if (vcpu->arch.apic_attention)
|
||||
kvm_lapic_sync_from_vapic(vcpu);
|
||||
|
||||
vcpu->arch.gpa_available = false;
|
||||
r = kvm_x86_ops->handle_exit(vcpu);
|
||||
return r;
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче