KVM: allow compiling out SMM support
Some users of KVM implement the UEFI variable store through a paravirtual device that does not require the "SMM lockbox" component of edk2; allow them to compile out system management mode, which is not a full implementation especially in how it interacts with nested virtualization. Suggested-by: Sean Christopherson <seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com> Message-Id: <20220929172016.319443-6-pbonzini@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Родитель
1d0da94cda
Коммит
4b8e1b3201
|
@ -118,6 +118,17 @@ config KVM_AMD_SEV
|
||||||
Provides support for launching Encrypted VMs (SEV) and Encrypted VMs
|
Provides support for launching Encrypted VMs (SEV) and Encrypted VMs
|
||||||
with Encrypted State (SEV-ES) on AMD processors.
|
with Encrypted State (SEV-ES) on AMD processors.
|
||||||
|
|
||||||
|
config KVM_SMM
|
||||||
|
bool "System Management Mode emulation"
|
||||||
|
default y
|
||||||
|
depends on KVM
|
||||||
|
help
|
||||||
|
Provides support for KVM to emulate System Management Mode (SMM)
|
||||||
|
in virtual machines. This can be used by the virtual machine
|
||||||
|
firmware to implement UEFI secure boot.
|
||||||
|
|
||||||
|
If unsure, say Y.
|
||||||
|
|
||||||
config KVM_XEN
|
config KVM_XEN
|
||||||
bool "Support for Xen hypercall interface"
|
bool "Support for Xen hypercall interface"
|
||||||
depends on KVM
|
depends on KVM
|
||||||
|
|
|
@ -20,7 +20,7 @@ endif
|
||||||
|
|
||||||
kvm-$(CONFIG_X86_64) += mmu/tdp_iter.o mmu/tdp_mmu.o
|
kvm-$(CONFIG_X86_64) += mmu/tdp_iter.o mmu/tdp_mmu.o
|
||||||
kvm-$(CONFIG_KVM_XEN) += xen.o
|
kvm-$(CONFIG_KVM_XEN) += xen.o
|
||||||
kvm-y += smm.o
|
kvm-$(CONFIG_KVM_SMM) += smm.o
|
||||||
|
|
||||||
kvm-intel-y += vmx/vmx.o vmx/vmenter.o vmx/pmu_intel.o vmx/vmcs12.o \
|
kvm-intel-y += vmx/vmx.o vmx/vmenter.o vmx/pmu_intel.o vmx/vmcs12.o \
|
||||||
vmx/evmcs.o vmx/nested.o vmx/posted_intr.o
|
vmx/evmcs.o vmx/nested.o vmx/posted_intr.o
|
||||||
|
|
|
@ -8,6 +8,7 @@
|
||||||
#define PUT_SMSTATE(type, buf, offset, val) \
|
#define PUT_SMSTATE(type, buf, offset, val) \
|
||||||
*(type *)((buf) + (offset) - 0x7e00) = val
|
*(type *)((buf) + (offset) - 0x7e00) = val
|
||||||
|
|
||||||
|
#ifdef CONFIG_KVM_SMM
|
||||||
static inline int kvm_inject_smi(struct kvm_vcpu *vcpu)
|
static inline int kvm_inject_smi(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
kvm_make_request(KVM_REQ_SMI, vcpu);
|
kvm_make_request(KVM_REQ_SMI, vcpu);
|
||||||
|
@ -23,5 +24,16 @@ void kvm_smm_changed(struct kvm_vcpu *vcpu, bool in_smm);
|
||||||
void enter_smm(struct kvm_vcpu *vcpu);
|
void enter_smm(struct kvm_vcpu *vcpu);
|
||||||
int emulator_leave_smm(struct x86_emulate_ctxt *ctxt);
|
int emulator_leave_smm(struct x86_emulate_ctxt *ctxt);
|
||||||
void process_smi(struct kvm_vcpu *vcpu);
|
void process_smi(struct kvm_vcpu *vcpu);
|
||||||
|
#else
|
||||||
|
static inline int kvm_inject_smi(struct kvm_vcpu *vcpu) { return -ENOTTY; }
|
||||||
|
static inline bool is_smm(struct kvm_vcpu *vcpu) { return false; }
|
||||||
|
static inline void enter_smm(struct kvm_vcpu *vcpu) { WARN_ON_ONCE(1); }
|
||||||
|
static inline void process_smi(struct kvm_vcpu *vcpu) { WARN_ON_ONCE(1); }
|
||||||
|
|
||||||
|
/*
|
||||||
|
* emulator_leave_smm is used as a function pointer, so the
|
||||||
|
* stub is defined in x86.c.
|
||||||
|
*/
|
||||||
|
#endif
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -4115,6 +4115,8 @@ static bool svm_has_emulated_msr(struct kvm *kvm, u32 index)
|
||||||
case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
|
case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
|
||||||
return false;
|
return false;
|
||||||
case MSR_IA32_SMBASE:
|
case MSR_IA32_SMBASE:
|
||||||
|
if (!IS_ENABLED(CONFIG_KVM_SMM))
|
||||||
|
return false;
|
||||||
/* SEV-ES guests do not support SMM, so report false */
|
/* SEV-ES guests do not support SMM, so report false */
|
||||||
if (kvm && sev_es_guest(kvm))
|
if (kvm && sev_es_guest(kvm))
|
||||||
return false;
|
return false;
|
||||||
|
|
|
@ -6842,6 +6842,8 @@ static bool vmx_has_emulated_msr(struct kvm *kvm, u32 index)
|
||||||
{
|
{
|
||||||
switch (index) {
|
switch (index) {
|
||||||
case MSR_IA32_SMBASE:
|
case MSR_IA32_SMBASE:
|
||||||
|
if (!IS_ENABLED(CONFIG_KVM_SMM))
|
||||||
|
return false;
|
||||||
/*
|
/*
|
||||||
* We cannot do SMM unless we can run the guest in big
|
* We cannot do SMM unless we can run the guest in big
|
||||||
* real mode.
|
* real mode.
|
||||||
|
|
|
@ -3642,7 +3642,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case MSR_IA32_SMBASE:
|
case MSR_IA32_SMBASE:
|
||||||
if (!msr_info->host_initiated)
|
if (!IS_ENABLED(CONFIG_KVM_SMM) || !msr_info->host_initiated)
|
||||||
return 1;
|
return 1;
|
||||||
vcpu->arch.smbase = data;
|
vcpu->arch.smbase = data;
|
||||||
break;
|
break;
|
||||||
|
@ -4058,7 +4058,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
||||||
msr_info->data = vcpu->arch.ia32_misc_enable_msr;
|
msr_info->data = vcpu->arch.ia32_misc_enable_msr;
|
||||||
break;
|
break;
|
||||||
case MSR_IA32_SMBASE:
|
case MSR_IA32_SMBASE:
|
||||||
if (!msr_info->host_initiated)
|
if (!IS_ENABLED(CONFIG_KVM_SMM) || !msr_info->host_initiated)
|
||||||
return 1;
|
return 1;
|
||||||
msr_info->data = vcpu->arch.smbase;
|
msr_info->data = vcpu->arch.smbase;
|
||||||
break;
|
break;
|
||||||
|
@ -4432,6 +4432,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
|
||||||
r |= KVM_X86_DISABLE_EXITS_MWAIT;
|
r |= KVM_X86_DISABLE_EXITS_MWAIT;
|
||||||
break;
|
break;
|
||||||
case KVM_CAP_X86_SMM:
|
case KVM_CAP_X86_SMM:
|
||||||
|
if (!IS_ENABLED(CONFIG_KVM_SMM))
|
||||||
|
break;
|
||||||
|
|
||||||
/* SMBASE is usually relocated above 1M on modern chipsets,
|
/* SMBASE is usually relocated above 1M on modern chipsets,
|
||||||
* and SMM handlers might indeed rely on 4G segment limits,
|
* and SMM handlers might indeed rely on 4G segment limits,
|
||||||
* so do not report SMM to be available if real mode is
|
* so do not report SMM to be available if real mode is
|
||||||
|
@ -5182,6 +5185,7 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
|
||||||
vcpu->arch.apic->sipi_vector = events->sipi_vector;
|
vcpu->arch.apic->sipi_vector = events->sipi_vector;
|
||||||
|
|
||||||
if (events->flags & KVM_VCPUEVENT_VALID_SMM) {
|
if (events->flags & KVM_VCPUEVENT_VALID_SMM) {
|
||||||
|
#ifdef CONFIG_KVM_SMM
|
||||||
if (!!(vcpu->arch.hflags & HF_SMM_MASK) != events->smi.smm) {
|
if (!!(vcpu->arch.hflags & HF_SMM_MASK) != events->smi.smm) {
|
||||||
kvm_x86_ops.nested_ops->leave_nested(vcpu);
|
kvm_x86_ops.nested_ops->leave_nested(vcpu);
|
||||||
kvm_smm_changed(vcpu, events->smi.smm);
|
kvm_smm_changed(vcpu, events->smi.smm);
|
||||||
|
@ -5196,6 +5200,12 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
|
||||||
vcpu->arch.hflags &= ~HF_SMM_INSIDE_NMI_MASK;
|
vcpu->arch.hflags &= ~HF_SMM_INSIDE_NMI_MASK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#else
|
||||||
|
if (events->smi.smm || events->smi.pending ||
|
||||||
|
events->smi.smm_inside_nmi)
|
||||||
|
return -EINVAL;
|
||||||
|
#endif
|
||||||
|
|
||||||
if (lapic_in_kernel(vcpu)) {
|
if (lapic_in_kernel(vcpu)) {
|
||||||
if (events->smi.latched_init)
|
if (events->smi.latched_init)
|
||||||
set_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events);
|
set_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events);
|
||||||
|
@ -8121,6 +8131,14 @@ static unsigned emulator_get_hflags(struct x86_emulate_ctxt *ctxt)
|
||||||
return emul_to_vcpu(ctxt)->arch.hflags;
|
return emul_to_vcpu(ctxt)->arch.hflags;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifndef CONFIG_KVM_SMM
|
||||||
|
static int emulator_leave_smm(struct x86_emulate_ctxt *ctxt)
|
||||||
|
{
|
||||||
|
WARN_ON_ONCE(1);
|
||||||
|
return X86EMUL_UNHANDLEABLE;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
static void emulator_triple_fault(struct x86_emulate_ctxt *ctxt)
|
static void emulator_triple_fault(struct x86_emulate_ctxt *ctxt)
|
||||||
{
|
{
|
||||||
kvm_make_request(KVM_REQ_TRIPLE_FAULT, emul_to_vcpu(ctxt));
|
kvm_make_request(KVM_REQ_TRIPLE_FAULT, emul_to_vcpu(ctxt));
|
||||||
|
|
|
@ -137,6 +137,8 @@ int main(int argc, char *argv[])
|
||||||
struct kvm_x86_state *state;
|
struct kvm_x86_state *state;
|
||||||
int stage, stage_reported;
|
int stage, stage_reported;
|
||||||
|
|
||||||
|
TEST_REQUIRE(kvm_has_cap(KVM_CAP_X86_SMM));
|
||||||
|
|
||||||
/* Create VM */
|
/* Create VM */
|
||||||
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
|
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
|
||||||
|
|
||||||
|
|
Загрузка…
Ссылка в новой задаче