KVM: x86: allow KVM_REQ_GET_NESTED_STATE_PAGES outside guest mode for VMX
VMX also uses KVM_REQ_GET_NESTED_STATE_PAGES for the Hyper-V eVMCS, which may need to be loaded outside guest mode. Therefore we cannot WARN in that case. However, that part of nested_get_vmcs12_pages is _not_ needed at vmentry time. Split it out of KVM_REQ_GET_NESTED_STATE_PAGES handling, so that both vmentry and migration (and in the latter case, independent of is_guest_mode) do the parts that are needed. Cc: <stable@vger.kernel.org> # 5.10.x: f2c7ef3ba: KVM: nSVM: cancel KVM_REQ_GET_NESTED_STATE_PAGES Cc: <stable@vger.kernel.org> # 5.10.x Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Родитель
aed89418de
Коммит
9a78e15802
|
@ -200,6 +200,9 @@ static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu)
|
|||
{
|
||||
struct vcpu_svm *svm = to_svm(vcpu);
|
||||
|
||||
if (WARN_ON(!is_guest_mode(vcpu)))
|
||||
return true;
|
||||
|
||||
if (!nested_svm_vmrun_msrpm(svm)) {
|
||||
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
vcpu->run->internal.suberror =
|
||||
|
|
|
@ -3124,13 +3124,9 @@ static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
|
||||
static bool nested_get_evmcs_page(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
|
||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||
struct kvm_host_map *map;
|
||||
struct page *page;
|
||||
u64 hpa;
|
||||
|
||||
/*
|
||||
* hv_evmcs may end up being not mapped after migration (when
|
||||
|
@ -3153,6 +3149,17 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
|
|||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
|
||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||
struct kvm_host_map *map;
|
||||
struct page *page;
|
||||
u64 hpa;
|
||||
|
||||
if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
|
||||
/*
|
||||
* Translate L1 physical address to host physical
|
||||
|
@ -3221,6 +3228,18 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
|
|||
exec_controls_setbit(vmx, CPU_BASED_USE_MSR_BITMAPS);
|
||||
else
|
||||
exec_controls_clearbit(vmx, CPU_BASED_USE_MSR_BITMAPS);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool vmx_get_nested_state_pages(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (!nested_get_evmcs_page(vcpu))
|
||||
return false;
|
||||
|
||||
if (is_guest_mode(vcpu) && !nested_get_vmcs12_pages(vcpu))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -6605,7 +6624,7 @@ struct kvm_x86_nested_ops vmx_nested_ops = {
|
|||
.hv_timer_pending = nested_vmx_preemption_timer_pending,
|
||||
.get_state = vmx_get_nested_state,
|
||||
.set_state = vmx_set_nested_state,
|
||||
.get_nested_state_pages = nested_get_vmcs12_pages,
|
||||
.get_nested_state_pages = vmx_get_nested_state_pages,
|
||||
.write_log_dirty = nested_vmx_write_pml_buffer,
|
||||
.enable_evmcs = nested_enable_evmcs,
|
||||
.get_evmcs_version = nested_get_evmcs_version,
|
||||
|
|
|
@ -8806,9 +8806,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
|||
|
||||
if (kvm_request_pending(vcpu)) {
|
||||
if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) {
|
||||
if (WARN_ON_ONCE(!is_guest_mode(vcpu)))
|
||||
;
|
||||
else if (unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu))) {
|
||||
if (unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu))) {
|
||||
r = 0;
|
||||
goto out;
|
||||
}
|
||||
|
|
Загрузка…
Ссылка в новой задаче