KVM: nSVM: avoid picking up unsupported bits from L2 in int_ctl (CVE-2021-3653)
* Invert the mask of bits that we pick from L2 in
nested_vmcb02_prepare_control
* Invert and explicitly use VIRQ related bits bitmask in svm_clear_vintr
This fixes a security issue that allowed a malicious L1 to run L2 with
AVIC enabled, which allowed the L2 to exploit the uninitialized and enabled
AVIC to read/write the host physical memory at some offsets.
Fixes: 3d6368ef58
("KVM: SVM: Add VMRUN handler")
Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Родитель
6e949ddb0a
Коммит
0f923e0712
|
@ -184,6 +184,8 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
|
||||||
#define V_IGN_TPR_SHIFT 20
|
#define V_IGN_TPR_SHIFT 20
|
||||||
#define V_IGN_TPR_MASK (1 << V_IGN_TPR_SHIFT)
|
#define V_IGN_TPR_MASK (1 << V_IGN_TPR_SHIFT)
|
||||||
|
|
||||||
|
#define V_IRQ_INJECTION_BITS_MASK (V_IRQ_MASK | V_INTR_PRIO_MASK | V_IGN_TPR_MASK)
|
||||||
|
|
||||||
#define V_INTR_MASKING_SHIFT 24
|
#define V_INTR_MASKING_SHIFT 24
|
||||||
#define V_INTR_MASKING_MASK (1 << V_INTR_MASKING_SHIFT)
|
#define V_INTR_MASKING_MASK (1 << V_INTR_MASKING_SHIFT)
|
||||||
|
|
||||||
|
|
|
@ -503,7 +503,11 @@ static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12
|
||||||
|
|
||||||
static void nested_vmcb02_prepare_control(struct vcpu_svm *svm)
|
static void nested_vmcb02_prepare_control(struct vcpu_svm *svm)
|
||||||
{
|
{
|
||||||
const u32 mask = V_INTR_MASKING_MASK | V_GIF_ENABLE_MASK | V_GIF_MASK;
|
const u32 int_ctl_vmcb01_bits =
|
||||||
|
V_INTR_MASKING_MASK | V_GIF_MASK | V_GIF_ENABLE_MASK;
|
||||||
|
|
||||||
|
const u32 int_ctl_vmcb12_bits = V_TPR_MASK | V_IRQ_INJECTION_BITS_MASK;
|
||||||
|
|
||||||
struct kvm_vcpu *vcpu = &svm->vcpu;
|
struct kvm_vcpu *vcpu = &svm->vcpu;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -535,8 +539,8 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm)
|
||||||
vcpu->arch.l1_tsc_offset + svm->nested.ctl.tsc_offset;
|
vcpu->arch.l1_tsc_offset + svm->nested.ctl.tsc_offset;
|
||||||
|
|
||||||
svm->vmcb->control.int_ctl =
|
svm->vmcb->control.int_ctl =
|
||||||
(svm->nested.ctl.int_ctl & ~mask) |
|
(svm->nested.ctl.int_ctl & int_ctl_vmcb12_bits) |
|
||||||
(svm->vmcb01.ptr->control.int_ctl & mask);
|
(svm->vmcb01.ptr->control.int_ctl & int_ctl_vmcb01_bits);
|
||||||
|
|
||||||
svm->vmcb->control.virt_ext = svm->nested.ctl.virt_ext;
|
svm->vmcb->control.virt_ext = svm->nested.ctl.virt_ext;
|
||||||
svm->vmcb->control.int_vector = svm->nested.ctl.int_vector;
|
svm->vmcb->control.int_vector = svm->nested.ctl.int_vector;
|
||||||
|
|
|
@ -1589,17 +1589,18 @@ static void svm_set_vintr(struct vcpu_svm *svm)
|
||||||
|
|
||||||
static void svm_clear_vintr(struct vcpu_svm *svm)
|
static void svm_clear_vintr(struct vcpu_svm *svm)
|
||||||
{
|
{
|
||||||
const u32 mask = V_TPR_MASK | V_GIF_ENABLE_MASK | V_GIF_MASK | V_INTR_MASKING_MASK;
|
|
||||||
svm_clr_intercept(svm, INTERCEPT_VINTR);
|
svm_clr_intercept(svm, INTERCEPT_VINTR);
|
||||||
|
|
||||||
/* Drop int_ctl fields related to VINTR injection. */
|
/* Drop int_ctl fields related to VINTR injection. */
|
||||||
svm->vmcb->control.int_ctl &= mask;
|
svm->vmcb->control.int_ctl &= ~V_IRQ_INJECTION_BITS_MASK;
|
||||||
if (is_guest_mode(&svm->vcpu)) {
|
if (is_guest_mode(&svm->vcpu)) {
|
||||||
svm->vmcb01.ptr->control.int_ctl &= mask;
|
svm->vmcb01.ptr->control.int_ctl &= ~V_IRQ_INJECTION_BITS_MASK;
|
||||||
|
|
||||||
WARN_ON((svm->vmcb->control.int_ctl & V_TPR_MASK) !=
|
WARN_ON((svm->vmcb->control.int_ctl & V_TPR_MASK) !=
|
||||||
(svm->nested.ctl.int_ctl & V_TPR_MASK));
|
(svm->nested.ctl.int_ctl & V_TPR_MASK));
|
||||||
svm->vmcb->control.int_ctl |= svm->nested.ctl.int_ctl & ~mask;
|
|
||||||
|
svm->vmcb->control.int_ctl |= svm->nested.ctl.int_ctl &
|
||||||
|
V_IRQ_INJECTION_BITS_MASK;
|
||||||
}
|
}
|
||||||
|
|
||||||
vmcb_mark_dirty(svm->vmcb, VMCB_INTR);
|
vmcb_mark_dirty(svm->vmcb, VMCB_INTR);
|
||||||
|
|
Загрузка…
Ссылка в новой задаче