KVM: VMX: introduce vmx_need_pf_intercept
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Message-Id: <20200710154811.418214-7-mgamal@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Родитель
32de2b5ee3
Коммит
a0c134347b
|
@ -2438,22 +2438,28 @@ static void prepare_vmcs02_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Whether page-faults are trapped is determined by a combination of
|
* Whether page-faults are trapped is determined by a combination of
|
||||||
* 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF.
|
* 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF. If L0
|
||||||
* If enable_ept, L0 doesn't care about page faults and we should
|
* doesn't care about page faults then we should set all of these to
|
||||||
* set all of these to L1's desires. However, if !enable_ept, L0 does
|
* L1's desires. However, if L0 does care about (some) page faults, it
|
||||||
* care about (at least some) page faults, and because it is not easy
|
* is not easy (if at all possible?) to merge L0 and L1's desires, we
|
||||||
* (if at all possible?) to merge L0 and L1's desires, we simply ask
|
* simply ask to exit on each and every L2 page fault. This is done by
|
||||||
* to exit on each and every L2 page fault. This is done by setting
|
* setting MASK=MATCH=0 and (see below) EB.PF=1.
|
||||||
* MASK=MATCH=0 and (see below) EB.PF=1.
|
|
||||||
* Note that below we don't need special code to set EB.PF beyond the
|
* Note that below we don't need special code to set EB.PF beyond the
|
||||||
* "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept,
|
* "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept,
|
||||||
* vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when
|
* vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when
|
||||||
* !enable_ept, EB.PF is 1, so the "or" will always be 1.
|
* !enable_ept, EB.PF is 1, so the "or" will always be 1.
|
||||||
*/
|
*/
|
||||||
vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK,
|
if (vmx_need_pf_intercept(&vmx->vcpu)) {
|
||||||
enable_ept ? vmcs12->page_fault_error_code_mask : 0);
|
/*
|
||||||
vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH,
|
* TODO: if both L0 and L1 need the same MASK and MATCH,
|
||||||
enable_ept ? vmcs12->page_fault_error_code_match : 0);
|
* go ahead and use it?
|
||||||
|
*/
|
||||||
|
vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0);
|
||||||
|
vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0);
|
||||||
|
} else {
|
||||||
|
vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, vmcs12->page_fault_error_code_mask);
|
||||||
|
vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, vmcs12->page_fault_error_code_match);
|
||||||
|
}
|
||||||
|
|
||||||
if (cpu_has_vmx_apicv()) {
|
if (cpu_has_vmx_apicv()) {
|
||||||
vmcs_write64(EOI_EXIT_BITMAP0, vmcs12->eoi_exit_bitmap0);
|
vmcs_write64(EOI_EXIT_BITMAP0, vmcs12->eoi_exit_bitmap0);
|
||||||
|
|
|
@ -780,7 +780,7 @@ void update_exception_bitmap(struct kvm_vcpu *vcpu)
|
||||||
eb |= 1u << BP_VECTOR;
|
eb |= 1u << BP_VECTOR;
|
||||||
if (to_vmx(vcpu)->rmode.vm86_active)
|
if (to_vmx(vcpu)->rmode.vm86_active)
|
||||||
eb = ~0;
|
eb = ~0;
|
||||||
if (enable_ept)
|
if (!vmx_need_pf_intercept(vcpu))
|
||||||
eb &= ~(1u << PF_VECTOR);
|
eb &= ~(1u << PF_VECTOR);
|
||||||
|
|
||||||
/* When we are running a nested L2 guest and L1 specified for it a
|
/* When we are running a nested L2 guest and L1 specified for it a
|
||||||
|
|
|
@ -550,6 +550,11 @@ static inline bool vmx_has_waitpkg(struct vcpu_vmx *vmx)
|
||||||
SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE;
|
SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool vmx_need_pf_intercept(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
return !enable_ept;
|
||||||
|
}
|
||||||
|
|
||||||
void dump_vmcs(void);
|
void dump_vmcs(void);
|
||||||
|
|
||||||
#endif /* __KVM_X86_VMX_H */
|
#endif /* __KVM_X86_VMX_H */
|
||||||
|
|
Загрузка…
Ссылка в новой задаче