KVM: VMX: Introduce generic fastpath handler
Introduce generic fastpath handler to handle MSR fastpath, VMX-preemption timer fastpath etc; move it after vmx_complete_interrupts() in order to catch events delivered to the guest, and abort the fast path in later patches. While at it, move the kvm_exit tracepoint so that it is printed for fastpath vmexits as well. There is no observed performance effect for the IPI fastpath after this patch. Tested-by: Haiwei Li <lihaiwei@tencent.com> Cc: Haiwei Li <lihaiwei@tencent.com> Signed-off-by: Wanpeng Li <wanpengli@tencent.com> Suggested-by: Sean Christopherson <sean.j.christopherson@intel.com> Message-Id: <1588055009-12677-2-git-send-email-wanpengli@tencent.com> Reviewed-by: Vitaly Kuznetsov <vkuznets@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Родитель
9e826feb8f
Коммит
dcf068da7e
|
@ -5933,8 +5933,6 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu,
|
||||||
u32 exit_reason = vmx->exit_reason;
|
u32 exit_reason = vmx->exit_reason;
|
||||||
u32 vectoring_info = vmx->idt_vectoring_info;
|
u32 vectoring_info = vmx->idt_vectoring_info;
|
||||||
|
|
||||||
trace_kvm_exit(exit_reason, vcpu, KVM_ISA_VMX);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Flush logged GPAs PML buffer, this will make dirty_bitmap more
|
* Flush logged GPAs PML buffer, this will make dirty_bitmap more
|
||||||
* updated. Another good is, in kvm_vm_ioctl_get_dirty_log, before
|
* updated. Another good is, in kvm_vm_ioctl_get_dirty_log, before
|
||||||
|
@ -6630,6 +6628,16 @@ void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static enum exit_fastpath_completion vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
switch (to_vmx(vcpu)->exit_reason) {
|
||||||
|
case EXIT_REASON_MSR_WRITE:
|
||||||
|
return handle_fastpath_set_msr_irqoff(vcpu);
|
||||||
|
default:
|
||||||
|
return EXIT_FASTPATH_NONE;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, bool launched);
|
bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, bool launched);
|
||||||
|
|
||||||
static enum exit_fastpath_completion vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
static enum exit_fastpath_completion vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
||||||
|
@ -6784,20 +6792,21 @@ static enum exit_fastpath_completion vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
||||||
if (unlikely((u16)vmx->exit_reason == EXIT_REASON_MCE_DURING_VMENTRY))
|
if (unlikely((u16)vmx->exit_reason == EXIT_REASON_MCE_DURING_VMENTRY))
|
||||||
kvm_machine_check();
|
kvm_machine_check();
|
||||||
|
|
||||||
|
trace_kvm_exit(vmx->exit_reason, vcpu, KVM_ISA_VMX);
|
||||||
|
|
||||||
if (unlikely(vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY))
|
if (unlikely(vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY))
|
||||||
return EXIT_FASTPATH_NONE;
|
return EXIT_FASTPATH_NONE;
|
||||||
|
|
||||||
if (!is_guest_mode(vcpu) && vmx->exit_reason == EXIT_REASON_MSR_WRITE)
|
|
||||||
exit_fastpath = handle_fastpath_set_msr_irqoff(vcpu);
|
|
||||||
else
|
|
||||||
exit_fastpath = EXIT_FASTPATH_NONE;
|
|
||||||
|
|
||||||
vmx->loaded_vmcs->launched = 1;
|
vmx->loaded_vmcs->launched = 1;
|
||||||
vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
|
vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
|
||||||
|
|
||||||
vmx_recover_nmi_blocking(vmx);
|
vmx_recover_nmi_blocking(vmx);
|
||||||
vmx_complete_interrupts(vmx);
|
vmx_complete_interrupts(vmx);
|
||||||
|
|
||||||
|
if (is_guest_mode(vcpu))
|
||||||
|
return EXIT_FASTPATH_NONE;
|
||||||
|
|
||||||
|
exit_fastpath = vmx_exit_handlers_fastpath(vcpu);
|
||||||
return exit_fastpath;
|
return exit_fastpath;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Загрузка…
Ссылка в новой задаче