KVM: SVM: Provide support for SEV-ES vCPU loading

An SEV-ES vCPU requires additional VMCB vCPU load/put requirements. SEV-ES
hardware will restore certain registers on VMEXIT, but not save them on
VMRUN (see Table B-3 and Table B-4 of the AMD64 APM Volume 2), so make the
following changes:

General vCPU load changes:
  - During vCPU loading, perform a VMSAVE to the per-CPU SVM save area and
    save the current values of XCR0, XSS and PKRU to the per-CPU SVM save
    area as these registers will be restored on VMEXIT.

General vCPU put changes:
  - Do not attempt to restore registers that SEV-ES hardware has already
    restored on VMEXIT.

Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
Message-Id: <019390e9cb5e93cd73014fa5a040c17d42588733.1607620209.git.thomas.lendacky@amd.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Tom Lendacky 2020-12-10 11:10:07 -06:00 коммит произвёл Paolo Bonzini
Родитель 376c6d2850
Коммит 861377730a
6 изменённых файлов: 103 добавлений и 23 удалений

Просмотреть файл

@ -234,7 +234,8 @@ struct vmcb_save_area {
u8 cpl;
u8 reserved_2[4];
u64 efer;
u8 reserved_3[112];
u8 reserved_3[104];
u64 xss; /* Valid for SEV-ES only */
u64 cr4;
u64 cr3;
u64 cr0;
@ -265,9 +266,12 @@ struct vmcb_save_area {
/*
* The following part of the save area is valid only for
* SEV-ES guests when referenced through the GHCB.
* SEV-ES guests when referenced through the GHCB or for
* saving to the host save area.
*/
u8 reserved_7[104];
u8 reserved_7[80];
u32 pkru;
u8 reserved_7a[20];
u64 reserved_8; /* rax already available at 0x01f8 */
u64 rcx;
u64 rdx;

Просмотреть файл

@ -16,12 +16,15 @@
#include <linux/swap.h>
#include <linux/processor.h>
#include <linux/trace_events.h>
#include <asm/fpu/internal.h>
#include "x86.h"
#include "svm.h"
#include "cpuid.h"
#include "trace.h"
#define __ex(x) __kvm_handle_fault_on_reboot(x)
static u8 sev_enc_bit;
static int sev_flush_asids(void);
static DECLARE_RWSEM(sev_deactivate_lock);
@ -1852,3 +1855,54 @@ void sev_es_create_vcpu(struct vcpu_svm *svm)
GHCB_VERSION_MIN,
sev_enc_bit));
}
void sev_es_vcpu_load(struct vcpu_svm *svm, int cpu)
{
struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
struct vmcb_save_area *hostsa;
unsigned int i;
/*
* As an SEV-ES guest, hardware will restore the host state on VMEXIT,
* of which one step is to perform a VMLOAD. Since hardware does not
* perform a VMSAVE on VMRUN, the host savearea must be updated.
*/
asm volatile(__ex("vmsave") : : "a" (__sme_page_pa(sd->save_area)) : "memory");
/*
* Certain MSRs are restored on VMEXIT, only save ones that aren't
* restored.
*/
for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) {
if (host_save_user_msrs[i].sev_es_restored)
continue;
rdmsrl(host_save_user_msrs[i].index, svm->host_user_msrs[i]);
}
/* XCR0 is restored on VMEXIT, save the current host value */
hostsa = (struct vmcb_save_area *)(page_address(sd->save_area) + 0x400);
hostsa->xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
/* PKRU is restored on VMEXIT, save the curent host value */
hostsa->pkru = read_pkru();
/* MSR_IA32_XSS is restored on VMEXIT, save the currnet host value */
hostsa->xss = host_xss;
}
void sev_es_vcpu_put(struct vcpu_svm *svm)
{
unsigned int i;
/*
* Certain MSRs are restored on VMEXIT and were saved with vmsave in
* sev_es_vcpu_load() above. Only restore ones that weren't.
*/
for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) {
if (host_save_user_msrs[i].sev_es_restored)
continue;
wrmsrl(host_save_user_msrs[i].index, svm->host_user_msrs[i]);
}
}

Просмотреть файл

@ -1417,6 +1417,9 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
vmcb_mark_all_dirty(svm->vmcb);
}
if (sev_es_guest(svm->vcpu.kvm)) {
sev_es_vcpu_load(svm, cpu);
} else {
#ifdef CONFIG_X86_64
rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host.gs_base);
#endif
@ -1425,7 +1428,9 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
svm->host.ldt = kvm_read_ldt();
for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
rdmsrl(host_save_user_msrs[i].index,
svm->host_user_msrs[i]);
}
if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
u64 tsc_ratio = vcpu->arch.tsc_scaling_ratio;
@ -1453,6 +1458,9 @@ static void svm_vcpu_put(struct kvm_vcpu *vcpu)
avic_vcpu_put(vcpu);
++vcpu->stat.host_state_reload;
if (sev_es_guest(svm->vcpu.kvm)) {
sev_es_vcpu_put(svm);
} else {
kvm_load_ldt(svm->host.ldt);
#ifdef CONFIG_X86_64
loadsegment(fs, svm->host.fs);
@ -1465,7 +1473,9 @@ static void svm_vcpu_put(struct kvm_vcpu *vcpu)
#endif
for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
wrmsrl(host_save_user_msrs[i].index,
svm->host_user_msrs[i]);
}
}
static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)

Просмотреть файл

@ -23,15 +23,23 @@
#define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
static const u32 host_save_user_msrs[] = {
static const struct svm_host_save_msrs {
u32 index; /* Index of the MSR */
bool sev_es_restored; /* True if MSR is restored on SEV-ES VMEXIT */
} host_save_user_msrs[] = {
#ifdef CONFIG_X86_64
MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE,
MSR_FS_BASE,
{ .index = MSR_STAR, .sev_es_restored = true },
{ .index = MSR_LSTAR, .sev_es_restored = true },
{ .index = MSR_CSTAR, .sev_es_restored = true },
{ .index = MSR_SYSCALL_MASK, .sev_es_restored = true },
{ .index = MSR_KERNEL_GS_BASE, .sev_es_restored = true },
{ .index = MSR_FS_BASE, .sev_es_restored = true },
#endif
MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
MSR_TSC_AUX,
{ .index = MSR_IA32_SYSENTER_CS, .sev_es_restored = true },
{ .index = MSR_IA32_SYSENTER_ESP, .sev_es_restored = true },
{ .index = MSR_IA32_SYSENTER_EIP, .sev_es_restored = true },
{ .index = MSR_TSC_AUX, .sev_es_restored = false },
};
#define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs)
#define MAX_DIRECT_ACCESS_MSRS 18
@ -580,5 +588,7 @@ int sev_handle_vmgexit(struct vcpu_svm *svm);
int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in);
void sev_es_init_vmcb(struct vcpu_svm *svm);
void sev_es_create_vcpu(struct vcpu_svm *svm);
void sev_es_vcpu_load(struct vcpu_svm *svm, int cpu);
void sev_es_vcpu_put(struct vcpu_svm *svm);
#endif

Просмотреть файл

@ -197,7 +197,8 @@ EXPORT_SYMBOL_GPL(host_efer);
bool __read_mostly allow_smaller_maxphyaddr = 0;
EXPORT_SYMBOL_GPL(allow_smaller_maxphyaddr);
static u64 __read_mostly host_xss;
u64 __read_mostly host_xss;
EXPORT_SYMBOL_GPL(host_xss);
u64 __read_mostly supported_xss;
EXPORT_SYMBOL_GPL(supported_xss);

Просмотреть файл

@ -279,6 +279,7 @@ fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu);
extern u64 host_xcr0;
extern u64 supported_xcr0;
extern u64 host_xss;
extern u64 supported_xss;
static inline bool kvm_mpx_supported(void)