- Enable -Wundef for the compressed kernel build stage
- Reorganize SEV code to streamline and simplify future development -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEzv7L6UO9uDPlPSfHEsHwGGHeVUoFAmCg1XQACgkQEsHwGGHe VUpRKA//dwzDD1QU16JucfhgFlv/9OTm48ukSwAb9lZjDEy4H1CtVL3xEHFd7L3G LJp0LTW+OQf0/0aGlQp/cP6sBF6G9Bf4mydx70Id4SyCQt8eZDodB+ZOOWbeteWq p92fJPbX8CzAglutbE+3v/MD8CCAllTiLZnJZPVj4Kux2/wF6EryDgF1+rb5q8jp ObTT9817mHVwWVUYzbgceZtd43IocOlKZRmF1qivwScMGylQTe1wfMjunpD5pVt8 Zg4UDNknNfYduqpaG546E6e1zerGNaJK7SHnsuzHRUVU5icNqtgBk061CehP9Ksq DvYXLUl4xF16j6xJAqIZPNrBkJGdQf4q1g5x2FiBm7rSQU5owzqh5rkVk4EBFFzn UtzeXpqbStbsZHXycyxBNdq2HXxkFPf2NXZ+bkripPg+DifOGots1uwvAft+6iAE GudK6qxAvr8phR1cRyy6BahGtgOStXbZYEz0ZdU6t7qFfZMz+DomD5Jimj0kAe6B s6ras5xm8q3/Py87N/KNjKtSEpgsHv/7F+idde7ODtHhpRL5HCBqhkZOSRkMMZqI ptX1oSTvBXwRKyi5x9YhkKHUFqfFSUTfJhiRFCWK+IEAv3Y7SipJtfkqxRbI6fEV FfCeueKDDdViBtseaRceVLJ8Tlr6Qjy27fkPPTqJpthqPpCdoZ0= =ENfF -----END PGP SIGNATURE----- Merge tag 'x86_urgent_for_v5.13_rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull x86 fixes from Borislav Petkov: "The three SEV commits are not really urgent material. But we figured since getting them in now will avoid a huge amount of conflicts between future SEV changes touching tip, the kvm and probably other trees, sending them to you now would be best. The idea is that the tip, kvm etc branches for 5.14 will all base ontop of -rc2 and thus everything will be peachy. What is more, those changes are purely mechanical and defines movement so they should be fine to go now (famous last words). Summary: - Enable -Wundef for the compressed kernel build stage - Reorganize SEV code to streamline and simplify future development" * tag 'x86_urgent_for_v5.13_rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/boot/compressed: Enable -Wundef x86/msr: Rename MSR_K8_SYSCFG to MSR_AMD64_SYSCFG x86/sev: Move GHCB MSR protocol and NAE definitions in a common header x86/sev-es: Rename sev-es.{ch} to sev.{ch}
This commit is contained in:
Коммит
ccb013c29d
|
@ -22,7 +22,7 @@ to SEV::
|
|||
[ecx]:
|
||||
Bits[31:0] Number of encrypted guests supported simultaneously
|
||||
|
||||
If support for SEV is present, MSR 0xc001_0010 (MSR_K8_SYSCFG) and MSR 0xc001_0015
|
||||
If support for SEV is present, MSR 0xc001_0010 (MSR_AMD64_SYSCFG) and MSR 0xc001_0015
|
||||
(MSR_K7_HWCR) can be used to determine if it can be enabled::
|
||||
|
||||
0xc001_0010:
|
||||
|
|
|
@ -53,7 +53,7 @@ CPUID function 0x8000001f reports information related to SME::
|
|||
system physical addresses, not guest physical
|
||||
addresses)
|
||||
|
||||
If support for SME is present, MSR 0xc00100010 (MSR_K8_SYSCFG) can be used to
|
||||
If support for SME is present, MSR 0xc00100010 (MSR_AMD64_SYSCFG) can be used to
|
||||
determine if SME is enabled and/or to enable memory encryption::
|
||||
|
||||
0xc0010010:
|
||||
|
@ -79,7 +79,7 @@ The state of SME in the Linux kernel can be documented as follows:
|
|||
The CPU supports SME (determined through CPUID instruction).
|
||||
|
||||
- Enabled:
|
||||
Supported and bit 23 of MSR_K8_SYSCFG is set.
|
||||
Supported and bit 23 of MSR_AMD64_SYSCFG is set.
|
||||
|
||||
- Active:
|
||||
Supported, Enabled and the Linux kernel is actively applying
|
||||
|
@ -89,7 +89,7 @@ The state of SME in the Linux kernel can be documented as follows:
|
|||
SME can also be enabled and activated in the BIOS. If SME is enabled and
|
||||
activated in the BIOS, then all memory accesses will be encrypted and it will
|
||||
not be necessary to activate the Linux memory encryption support. If the BIOS
|
||||
merely enables SME (sets bit 23 of the MSR_K8_SYSCFG), then Linux can activate
|
||||
merely enables SME (sets bit 23 of the MSR_AMD64_SYSCFG), then Linux can activate
|
||||
memory encryption by default (CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT=y) or
|
||||
by supplying mem_encrypt=on on the kernel command line. However, if BIOS does
|
||||
not enable SME, then Linux will not be able to activate memory encryption, even
|
||||
|
|
|
@ -30,6 +30,7 @@ targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma \
|
|||
|
||||
KBUILD_CFLAGS := -m$(BITS) -O2
|
||||
KBUILD_CFLAGS += -fno-strict-aliasing -fPIE
|
||||
KBUILD_CFLAGS += -Wundef
|
||||
KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
|
||||
cflags-$(CONFIG_X86_32) := -march=i386
|
||||
cflags-$(CONFIG_X86_64) := -mcmodel=small -mno-red-zone
|
||||
|
@ -48,10 +49,10 @@ KBUILD_CFLAGS += $(call as-option,-Wa$(comma)-mrelax-relocations=no)
|
|||
KBUILD_CFLAGS += -include $(srctree)/include/linux/hidden.h
|
||||
KBUILD_CFLAGS += $(CLANG_FLAGS)
|
||||
|
||||
# sev-es.c indirectly inludes inat-table.h which is generated during
|
||||
# sev.c indirectly inludes inat-table.h which is generated during
|
||||
# compilation and stored in $(objtree). Add the directory to the includes so
|
||||
# that the compiler finds it even with out-of-tree builds (make O=/some/path).
|
||||
CFLAGS_sev-es.o += -I$(objtree)/arch/x86/lib/
|
||||
CFLAGS_sev.o += -I$(objtree)/arch/x86/lib/
|
||||
|
||||
KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
|
||||
GCOV_PROFILE := n
|
||||
|
@ -93,7 +94,7 @@ ifdef CONFIG_X86_64
|
|||
vmlinux-objs-y += $(obj)/idt_64.o $(obj)/idt_handlers_64.o
|
||||
vmlinux-objs-y += $(obj)/mem_encrypt.o
|
||||
vmlinux-objs-y += $(obj)/pgtable_64.o
|
||||
vmlinux-objs-$(CONFIG_AMD_MEM_ENCRYPT) += $(obj)/sev-es.o
|
||||
vmlinux-objs-$(CONFIG_AMD_MEM_ENCRYPT) += $(obj)/sev.o
|
||||
endif
|
||||
|
||||
vmlinux-objs-$(CONFIG_ACPI) += $(obj)/acpi.o
|
||||
|
|
|
@ -172,7 +172,7 @@ void __puthex(unsigned long value)
|
|||
}
|
||||
}
|
||||
|
||||
#if CONFIG_X86_NEED_RELOCS
|
||||
#ifdef CONFIG_X86_NEED_RELOCS
|
||||
static void handle_relocations(void *output, unsigned long output_len,
|
||||
unsigned long virt_addr)
|
||||
{
|
||||
|
|
|
@ -79,7 +79,7 @@ struct mem_vector {
|
|||
u64 size;
|
||||
};
|
||||
|
||||
#if CONFIG_RANDOMIZE_BASE
|
||||
#ifdef CONFIG_RANDOMIZE_BASE
|
||||
/* kaslr.c */
|
||||
void choose_random_location(unsigned long input,
|
||||
unsigned long input_size,
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
#include "misc.h"
|
||||
|
||||
#include <asm/pgtable_types.h>
|
||||
#include <asm/sev-es.h>
|
||||
#include <asm/sev.h>
|
||||
#include <asm/trapnr.h>
|
||||
#include <asm/trap_pf.h>
|
||||
#include <asm/msr-index.h>
|
||||
|
@ -117,7 +117,7 @@ static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
|
|||
#include "../../lib/insn.c"
|
||||
|
||||
/* Include code for early handlers */
|
||||
#include "../../kernel/sev-es-shared.c"
|
||||
#include "../../kernel/sev-shared.c"
|
||||
|
||||
static bool early_setup_sev_es(void)
|
||||
{
|
|
@ -537,9 +537,9 @@
|
|||
/* K8 MSRs */
|
||||
#define MSR_K8_TOP_MEM1 0xc001001a
|
||||
#define MSR_K8_TOP_MEM2 0xc001001d
|
||||
#define MSR_K8_SYSCFG 0xc0010010
|
||||
#define MSR_K8_SYSCFG_MEM_ENCRYPT_BIT 23
|
||||
#define MSR_K8_SYSCFG_MEM_ENCRYPT BIT_ULL(MSR_K8_SYSCFG_MEM_ENCRYPT_BIT)
|
||||
#define MSR_AMD64_SYSCFG 0xc0010010
|
||||
#define MSR_AMD64_SYSCFG_MEM_ENCRYPT_BIT 23
|
||||
#define MSR_AMD64_SYSCFG_MEM_ENCRYPT BIT_ULL(MSR_AMD64_SYSCFG_MEM_ENCRYPT_BIT)
|
||||
#define MSR_K8_INT_PENDING_MSG 0xc0010055
|
||||
/* C1E active bits in int pending message */
|
||||
#define K8_INTP_C1E_ACTIVE_MASK 0x18000000
|
||||
|
|
|
@ -0,0 +1,62 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* AMD SEV header common between the guest and the hypervisor.
|
||||
*
|
||||
* Author: Brijesh Singh <brijesh.singh@amd.com>
|
||||
*/
|
||||
|
||||
#ifndef __ASM_X86_SEV_COMMON_H
|
||||
#define __ASM_X86_SEV_COMMON_H
|
||||
|
||||
#define GHCB_MSR_INFO_POS 0
|
||||
#define GHCB_MSR_INFO_MASK (BIT_ULL(12) - 1)
|
||||
|
||||
#define GHCB_MSR_SEV_INFO_RESP 0x001
|
||||
#define GHCB_MSR_SEV_INFO_REQ 0x002
|
||||
#define GHCB_MSR_VER_MAX_POS 48
|
||||
#define GHCB_MSR_VER_MAX_MASK 0xffff
|
||||
#define GHCB_MSR_VER_MIN_POS 32
|
||||
#define GHCB_MSR_VER_MIN_MASK 0xffff
|
||||
#define GHCB_MSR_CBIT_POS 24
|
||||
#define GHCB_MSR_CBIT_MASK 0xff
|
||||
#define GHCB_MSR_SEV_INFO(_max, _min, _cbit) \
|
||||
((((_max) & GHCB_MSR_VER_MAX_MASK) << GHCB_MSR_VER_MAX_POS) | \
|
||||
(((_min) & GHCB_MSR_VER_MIN_MASK) << GHCB_MSR_VER_MIN_POS) | \
|
||||
(((_cbit) & GHCB_MSR_CBIT_MASK) << GHCB_MSR_CBIT_POS) | \
|
||||
GHCB_MSR_SEV_INFO_RESP)
|
||||
#define GHCB_MSR_INFO(v) ((v) & 0xfffUL)
|
||||
#define GHCB_MSR_PROTO_MAX(v) (((v) >> GHCB_MSR_VER_MAX_POS) & GHCB_MSR_VER_MAX_MASK)
|
||||
#define GHCB_MSR_PROTO_MIN(v) (((v) >> GHCB_MSR_VER_MIN_POS) & GHCB_MSR_VER_MIN_MASK)
|
||||
|
||||
#define GHCB_MSR_CPUID_REQ 0x004
|
||||
#define GHCB_MSR_CPUID_RESP 0x005
|
||||
#define GHCB_MSR_CPUID_FUNC_POS 32
|
||||
#define GHCB_MSR_CPUID_FUNC_MASK 0xffffffff
|
||||
#define GHCB_MSR_CPUID_VALUE_POS 32
|
||||
#define GHCB_MSR_CPUID_VALUE_MASK 0xffffffff
|
||||
#define GHCB_MSR_CPUID_REG_POS 30
|
||||
#define GHCB_MSR_CPUID_REG_MASK 0x3
|
||||
#define GHCB_CPUID_REQ_EAX 0
|
||||
#define GHCB_CPUID_REQ_EBX 1
|
||||
#define GHCB_CPUID_REQ_ECX 2
|
||||
#define GHCB_CPUID_REQ_EDX 3
|
||||
#define GHCB_CPUID_REQ(fn, reg) \
|
||||
(GHCB_MSR_CPUID_REQ | \
|
||||
(((unsigned long)reg & GHCB_MSR_CPUID_REG_MASK) << GHCB_MSR_CPUID_REG_POS) | \
|
||||
(((unsigned long)fn) << GHCB_MSR_CPUID_FUNC_POS))
|
||||
|
||||
#define GHCB_MSR_TERM_REQ 0x100
|
||||
#define GHCB_MSR_TERM_REASON_SET_POS 12
|
||||
#define GHCB_MSR_TERM_REASON_SET_MASK 0xf
|
||||
#define GHCB_MSR_TERM_REASON_POS 16
|
||||
#define GHCB_MSR_TERM_REASON_MASK 0xff
|
||||
#define GHCB_SEV_TERM_REASON(reason_set, reason_val) \
|
||||
(((((u64)reason_set) & GHCB_MSR_TERM_REASON_SET_MASK) << GHCB_MSR_TERM_REASON_SET_POS) | \
|
||||
((((u64)reason_val) & GHCB_MSR_TERM_REASON_MASK) << GHCB_MSR_TERM_REASON_POS))
|
||||
|
||||
#define GHCB_SEV_ES_REASON_GENERAL_REQUEST 0
|
||||
#define GHCB_SEV_ES_REASON_PROTOCOL_UNSUPPORTED 1
|
||||
|
||||
#define GHCB_RESP_CODE(v) ((v) & GHCB_MSR_INFO_MASK)
|
||||
|
||||
#endif
|
|
@ -10,34 +10,12 @@
|
|||
|
||||
#include <linux/types.h>
|
||||
#include <asm/insn.h>
|
||||
#include <asm/sev-common.h>
|
||||
|
||||
#define GHCB_SEV_INFO 0x001UL
|
||||
#define GHCB_SEV_INFO_REQ 0x002UL
|
||||
#define GHCB_INFO(v) ((v) & 0xfffUL)
|
||||
#define GHCB_PROTO_MAX(v) (((v) >> 48) & 0xffffUL)
|
||||
#define GHCB_PROTO_MIN(v) (((v) >> 32) & 0xffffUL)
|
||||
#define GHCB_PROTO_OUR 0x0001UL
|
||||
#define GHCB_SEV_CPUID_REQ 0x004UL
|
||||
#define GHCB_CPUID_REQ_EAX 0
|
||||
#define GHCB_CPUID_REQ_EBX 1
|
||||
#define GHCB_CPUID_REQ_ECX 2
|
||||
#define GHCB_CPUID_REQ_EDX 3
|
||||
#define GHCB_CPUID_REQ(fn, reg) (GHCB_SEV_CPUID_REQ | \
|
||||
(((unsigned long)reg & 3) << 30) | \
|
||||
(((unsigned long)fn) << 32))
|
||||
#define GHCB_PROTOCOL_MAX 1ULL
|
||||
#define GHCB_DEFAULT_USAGE 0ULL
|
||||
|
||||
#define GHCB_PROTOCOL_MAX 0x0001UL
|
||||
#define GHCB_DEFAULT_USAGE 0x0000UL
|
||||
|
||||
#define GHCB_SEV_CPUID_RESP 0x005UL
|
||||
#define GHCB_SEV_TERMINATE 0x100UL
|
||||
#define GHCB_SEV_TERMINATE_REASON(reason_set, reason_val) \
|
||||
(((((u64)reason_set) & 0x7) << 12) | \
|
||||
((((u64)reason_val) & 0xff) << 16))
|
||||
#define GHCB_SEV_ES_REASON_GENERAL_REQUEST 0
|
||||
#define GHCB_SEV_ES_REASON_PROTOCOL_UNSUPPORTED 1
|
||||
|
||||
#define GHCB_SEV_GHCB_RESP_CODE(v) ((v) & 0xfff)
|
||||
#define VMGEXIT() { asm volatile("rep; vmmcall\n\r"); }
|
||||
|
||||
enum es_result {
|
|
@ -20,7 +20,7 @@ CFLAGS_REMOVE_kvmclock.o = -pg
|
|||
CFLAGS_REMOVE_ftrace.o = -pg
|
||||
CFLAGS_REMOVE_early_printk.o = -pg
|
||||
CFLAGS_REMOVE_head64.o = -pg
|
||||
CFLAGS_REMOVE_sev-es.o = -pg
|
||||
CFLAGS_REMOVE_sev.o = -pg
|
||||
endif
|
||||
|
||||
KASAN_SANITIZE_head$(BITS).o := n
|
||||
|
@ -28,7 +28,7 @@ KASAN_SANITIZE_dumpstack.o := n
|
|||
KASAN_SANITIZE_dumpstack_$(BITS).o := n
|
||||
KASAN_SANITIZE_stacktrace.o := n
|
||||
KASAN_SANITIZE_paravirt.o := n
|
||||
KASAN_SANITIZE_sev-es.o := n
|
||||
KASAN_SANITIZE_sev.o := n
|
||||
|
||||
# With some compiler versions the generated code results in boot hangs, caused
|
||||
# by several compilation units. To be safe, disable all instrumentation.
|
||||
|
@ -148,7 +148,7 @@ obj-$(CONFIG_UNWINDER_ORC) += unwind_orc.o
|
|||
obj-$(CONFIG_UNWINDER_FRAME_POINTER) += unwind_frame.o
|
||||
obj-$(CONFIG_UNWINDER_GUESS) += unwind_guess.o
|
||||
|
||||
obj-$(CONFIG_AMD_MEM_ENCRYPT) += sev-es.o
|
||||
obj-$(CONFIG_AMD_MEM_ENCRYPT) += sev.o
|
||||
###
|
||||
# 64 bit specific files
|
||||
ifeq ($(CONFIG_X86_64),y)
|
||||
|
|
|
@ -593,8 +593,8 @@ static void early_detect_mem_encrypt(struct cpuinfo_x86 *c)
|
|||
*/
|
||||
if (cpu_has(c, X86_FEATURE_SME) || cpu_has(c, X86_FEATURE_SEV)) {
|
||||
/* Check if memory encryption is enabled */
|
||||
rdmsrl(MSR_K8_SYSCFG, msr);
|
||||
if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
|
||||
rdmsrl(MSR_AMD64_SYSCFG, msr);
|
||||
if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT))
|
||||
goto clear_all;
|
||||
|
||||
/*
|
||||
|
|
|
@ -836,7 +836,7 @@ int __init amd_special_default_mtrr(void)
|
|||
if (boot_cpu_data.x86 < 0xf)
|
||||
return 0;
|
||||
/* In case some hypervisor doesn't pass SYSCFG through: */
|
||||
if (rdmsr_safe(MSR_K8_SYSCFG, &l, &h) < 0)
|
||||
if (rdmsr_safe(MSR_AMD64_SYSCFG, &l, &h) < 0)
|
||||
return 0;
|
||||
/*
|
||||
* Memory between 4GB and top of mem is forced WB by this magic bit.
|
||||
|
|
|
@ -53,13 +53,13 @@ static inline void k8_check_syscfg_dram_mod_en(void)
|
|||
(boot_cpu_data.x86 >= 0x0f)))
|
||||
return;
|
||||
|
||||
rdmsr(MSR_K8_SYSCFG, lo, hi);
|
||||
rdmsr(MSR_AMD64_SYSCFG, lo, hi);
|
||||
if (lo & K8_MTRRFIXRANGE_DRAM_MODIFY) {
|
||||
pr_err(FW_WARN "MTRR: CPU %u: SYSCFG[MtrrFixDramModEn]"
|
||||
" not cleared by BIOS, clearing this bit\n",
|
||||
smp_processor_id());
|
||||
lo &= ~K8_MTRRFIXRANGE_DRAM_MODIFY;
|
||||
mtrr_wrmsr(MSR_K8_SYSCFG, lo, hi);
|
||||
mtrr_wrmsr(MSR_AMD64_SYSCFG, lo, hi);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -39,7 +39,7 @@
|
|||
#include <asm/realmode.h>
|
||||
#include <asm/extable.h>
|
||||
#include <asm/trapnr.h>
|
||||
#include <asm/sev-es.h>
|
||||
#include <asm/sev.h>
|
||||
|
||||
/*
|
||||
* Manage page tables very early on.
|
||||
|
|
|
@ -95,7 +95,7 @@ static void get_fam10h_pci_mmconf_base(void)
|
|||
return;
|
||||
|
||||
/* SYS_CFG */
|
||||
address = MSR_K8_SYSCFG;
|
||||
address = MSR_AMD64_SYSCFG;
|
||||
rdmsrl(address, val);
|
||||
|
||||
/* TOP_MEM2 is not enabled? */
|
||||
|
|
|
@ -33,7 +33,7 @@
|
|||
#include <asm/reboot.h>
|
||||
#include <asm/cache.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
#include <asm/sev-es.h>
|
||||
#include <asm/sev.h>
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <trace/events/nmi.h>
|
||||
|
|
|
@ -26,13 +26,13 @@ static bool __init sev_es_check_cpu_features(void)
|
|||
|
||||
static void __noreturn sev_es_terminate(unsigned int reason)
|
||||
{
|
||||
u64 val = GHCB_SEV_TERMINATE;
|
||||
u64 val = GHCB_MSR_TERM_REQ;
|
||||
|
||||
/*
|
||||
* Tell the hypervisor what went wrong - only reason-set 0 is
|
||||
* currently supported.
|
||||
*/
|
||||
val |= GHCB_SEV_TERMINATE_REASON(0, reason);
|
||||
val |= GHCB_SEV_TERM_REASON(0, reason);
|
||||
|
||||
/* Request Guest Termination from Hypvervisor */
|
||||
sev_es_wr_ghcb_msr(val);
|
||||
|
@ -47,15 +47,15 @@ static bool sev_es_negotiate_protocol(void)
|
|||
u64 val;
|
||||
|
||||
/* Do the GHCB protocol version negotiation */
|
||||
sev_es_wr_ghcb_msr(GHCB_SEV_INFO_REQ);
|
||||
sev_es_wr_ghcb_msr(GHCB_MSR_SEV_INFO_REQ);
|
||||
VMGEXIT();
|
||||
val = sev_es_rd_ghcb_msr();
|
||||
|
||||
if (GHCB_INFO(val) != GHCB_SEV_INFO)
|
||||
if (GHCB_MSR_INFO(val) != GHCB_MSR_SEV_INFO_RESP)
|
||||
return false;
|
||||
|
||||
if (GHCB_PROTO_MAX(val) < GHCB_PROTO_OUR ||
|
||||
GHCB_PROTO_MIN(val) > GHCB_PROTO_OUR)
|
||||
if (GHCB_MSR_PROTO_MAX(val) < GHCB_PROTO_OUR ||
|
||||
GHCB_MSR_PROTO_MIN(val) > GHCB_PROTO_OUR)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
|
@ -153,28 +153,28 @@ void __init do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code)
|
|||
sev_es_wr_ghcb_msr(GHCB_CPUID_REQ(fn, GHCB_CPUID_REQ_EAX));
|
||||
VMGEXIT();
|
||||
val = sev_es_rd_ghcb_msr();
|
||||
if (GHCB_SEV_GHCB_RESP_CODE(val) != GHCB_SEV_CPUID_RESP)
|
||||
if (GHCB_RESP_CODE(val) != GHCB_MSR_CPUID_RESP)
|
||||
goto fail;
|
||||
regs->ax = val >> 32;
|
||||
|
||||
sev_es_wr_ghcb_msr(GHCB_CPUID_REQ(fn, GHCB_CPUID_REQ_EBX));
|
||||
VMGEXIT();
|
||||
val = sev_es_rd_ghcb_msr();
|
||||
if (GHCB_SEV_GHCB_RESP_CODE(val) != GHCB_SEV_CPUID_RESP)
|
||||
if (GHCB_RESP_CODE(val) != GHCB_MSR_CPUID_RESP)
|
||||
goto fail;
|
||||
regs->bx = val >> 32;
|
||||
|
||||
sev_es_wr_ghcb_msr(GHCB_CPUID_REQ(fn, GHCB_CPUID_REQ_ECX));
|
||||
VMGEXIT();
|
||||
val = sev_es_rd_ghcb_msr();
|
||||
if (GHCB_SEV_GHCB_RESP_CODE(val) != GHCB_SEV_CPUID_RESP)
|
||||
if (GHCB_RESP_CODE(val) != GHCB_MSR_CPUID_RESP)
|
||||
goto fail;
|
||||
regs->cx = val >> 32;
|
||||
|
||||
sev_es_wr_ghcb_msr(GHCB_CPUID_REQ(fn, GHCB_CPUID_REQ_EDX));
|
||||
VMGEXIT();
|
||||
val = sev_es_rd_ghcb_msr();
|
||||
if (GHCB_SEV_GHCB_RESP_CODE(val) != GHCB_SEV_CPUID_RESP)
|
||||
if (GHCB_RESP_CODE(val) != GHCB_MSR_CPUID_RESP)
|
||||
goto fail;
|
||||
regs->dx = val >> 32;
|
||||
|
|
@ -22,7 +22,7 @@
|
|||
|
||||
#include <asm/cpu_entry_area.h>
|
||||
#include <asm/stacktrace.h>
|
||||
#include <asm/sev-es.h>
|
||||
#include <asm/sev.h>
|
||||
#include <asm/insn-eval.h>
|
||||
#include <asm/fpu/internal.h>
|
||||
#include <asm/processor.h>
|
||||
|
@ -459,7 +459,7 @@ static enum es_result vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt
|
|||
}
|
||||
|
||||
/* Include code shared with pre-decompression boot stage */
|
||||
#include "sev-es-shared.c"
|
||||
#include "sev-shared.c"
|
||||
|
||||
void noinstr __sev_es_nmi_complete(void)
|
||||
{
|
|
@ -863,8 +863,8 @@ static __init void svm_adjust_mmio_mask(void)
|
|||
return;
|
||||
|
||||
/* If memory encryption is not enabled, use existing mask */
|
||||
rdmsrl(MSR_K8_SYSCFG, msr);
|
||||
if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
|
||||
rdmsrl(MSR_AMD64_SYSCFG, msr);
|
||||
if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT))
|
||||
return;
|
||||
|
||||
enc_bit = cpuid_ebx(0x8000001f) & 0x3f;
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
#include <linux/bits.h>
|
||||
|
||||
#include <asm/svm.h>
|
||||
#include <asm/sev-common.h>
|
||||
|
||||
#define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
|
||||
|
||||
|
@ -528,37 +529,6 @@ void svm_vcpu_unblocking(struct kvm_vcpu *vcpu);
|
|||
#define GHCB_VERSION_MAX 1ULL
|
||||
#define GHCB_VERSION_MIN 1ULL
|
||||
|
||||
#define GHCB_MSR_INFO_POS 0
|
||||
#define GHCB_MSR_INFO_MASK (BIT_ULL(12) - 1)
|
||||
|
||||
#define GHCB_MSR_SEV_INFO_RESP 0x001
|
||||
#define GHCB_MSR_SEV_INFO_REQ 0x002
|
||||
#define GHCB_MSR_VER_MAX_POS 48
|
||||
#define GHCB_MSR_VER_MAX_MASK 0xffff
|
||||
#define GHCB_MSR_VER_MIN_POS 32
|
||||
#define GHCB_MSR_VER_MIN_MASK 0xffff
|
||||
#define GHCB_MSR_CBIT_POS 24
|
||||
#define GHCB_MSR_CBIT_MASK 0xff
|
||||
#define GHCB_MSR_SEV_INFO(_max, _min, _cbit) \
|
||||
((((_max) & GHCB_MSR_VER_MAX_MASK) << GHCB_MSR_VER_MAX_POS) | \
|
||||
(((_min) & GHCB_MSR_VER_MIN_MASK) << GHCB_MSR_VER_MIN_POS) | \
|
||||
(((_cbit) & GHCB_MSR_CBIT_MASK) << GHCB_MSR_CBIT_POS) | \
|
||||
GHCB_MSR_SEV_INFO_RESP)
|
||||
|
||||
#define GHCB_MSR_CPUID_REQ 0x004
|
||||
#define GHCB_MSR_CPUID_RESP 0x005
|
||||
#define GHCB_MSR_CPUID_FUNC_POS 32
|
||||
#define GHCB_MSR_CPUID_FUNC_MASK 0xffffffff
|
||||
#define GHCB_MSR_CPUID_VALUE_POS 32
|
||||
#define GHCB_MSR_CPUID_VALUE_MASK 0xffffffff
|
||||
#define GHCB_MSR_CPUID_REG_POS 30
|
||||
#define GHCB_MSR_CPUID_REG_MASK 0x3
|
||||
|
||||
#define GHCB_MSR_TERM_REQ 0x100
|
||||
#define GHCB_MSR_TERM_REASON_SET_POS 12
|
||||
#define GHCB_MSR_TERM_REASON_SET_MASK 0xf
|
||||
#define GHCB_MSR_TERM_REASON_POS 16
|
||||
#define GHCB_MSR_TERM_REASON_MASK 0xff
|
||||
|
||||
extern unsigned int max_sev_asid;
|
||||
|
||||
|
|
|
@ -3468,7 +3468,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
|||
case MSR_IA32_LASTBRANCHTOIP:
|
||||
case MSR_IA32_LASTINTFROMIP:
|
||||
case MSR_IA32_LASTINTTOIP:
|
||||
case MSR_K8_SYSCFG:
|
||||
case MSR_AMD64_SYSCFG:
|
||||
case MSR_K8_TSEG_ADDR:
|
||||
case MSR_K8_TSEG_MASK:
|
||||
case MSR_VM_HSAVE_PA:
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
#include <xen/xen.h>
|
||||
|
||||
#include <asm/fpu/internal.h>
|
||||
#include <asm/sev-es.h>
|
||||
#include <asm/sev.h>
|
||||
#include <asm/traps.h>
|
||||
#include <asm/kdebug.h>
|
||||
|
||||
|
|
|
@ -529,7 +529,7 @@ void __init sme_enable(struct boot_params *bp)
|
|||
/*
|
||||
* No SME if Hypervisor bit is set. This check is here to
|
||||
* prevent a guest from trying to enable SME. For running as a
|
||||
* KVM guest the MSR_K8_SYSCFG will be sufficient, but there
|
||||
* KVM guest the MSR_AMD64_SYSCFG will be sufficient, but there
|
||||
* might be other hypervisors which emulate that MSR as non-zero
|
||||
* or even pass it through to the guest.
|
||||
* A malicious hypervisor can still trick a guest into this
|
||||
|
@ -542,8 +542,8 @@ void __init sme_enable(struct boot_params *bp)
|
|||
return;
|
||||
|
||||
/* For SME, check the SYSCFG MSR */
|
||||
msr = __rdmsr(MSR_K8_SYSCFG);
|
||||
if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
|
||||
msr = __rdmsr(MSR_AMD64_SYSCFG);
|
||||
if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT))
|
||||
return;
|
||||
} else {
|
||||
/* SEV state cannot be controlled by a command line option */
|
||||
|
|
|
@ -284,7 +284,7 @@ static int __init early_root_info_init(void)
|
|||
|
||||
/* need to take out [4G, TOM2) for RAM*/
|
||||
/* SYS_CFG */
|
||||
address = MSR_K8_SYSCFG;
|
||||
address = MSR_AMD64_SYSCFG;
|
||||
rdmsrl(address, val);
|
||||
/* TOP_MEM2 is enabled? */
|
||||
if (val & (1<<21)) {
|
||||
|
|
|
@ -47,7 +47,7 @@
|
|||
#include <asm/realmode.h>
|
||||
#include <asm/time.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/sev-es.h>
|
||||
#include <asm/sev.h>
|
||||
|
||||
/*
|
||||
* We allocate runtime services regions top-down, starting from -4G, i.e.
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
#include <asm/realmode.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/crash.h>
|
||||
#include <asm/sev-es.h>
|
||||
#include <asm/sev.h>
|
||||
|
||||
struct real_mode_header *real_mode_header;
|
||||
u32 *trampoline_cr4_features;
|
||||
|
|
|
@ -123,9 +123,9 @@ SYM_CODE_START(startup_32)
|
|||
*/
|
||||
btl $TH_FLAGS_SME_ACTIVE_BIT, pa_tr_flags
|
||||
jnc .Ldone
|
||||
movl $MSR_K8_SYSCFG, %ecx
|
||||
movl $MSR_AMD64_SYSCFG, %ecx
|
||||
rdmsr
|
||||
bts $MSR_K8_SYSCFG_MEM_ENCRYPT_BIT, %eax
|
||||
bts $MSR_AMD64_SYSCFG_MEM_ENCRYPT_BIT, %eax
|
||||
jc .Ldone
|
||||
|
||||
/*
|
||||
|
|
|
@ -3083,7 +3083,7 @@ static void read_mc_regs(struct amd64_pvt *pvt)
|
|||
edac_dbg(0, " TOP_MEM: 0x%016llx\n", pvt->top_mem);
|
||||
|
||||
/* Check first whether TOP_MEM2 is enabled: */
|
||||
rdmsrl(MSR_K8_SYSCFG, msr_val);
|
||||
rdmsrl(MSR_AMD64_SYSCFG, msr_val);
|
||||
if (msr_val & BIT(21)) {
|
||||
rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
|
||||
edac_dbg(0, " TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
|
||||
|
|
|
@ -537,9 +537,9 @@
|
|||
/* K8 MSRs */
|
||||
#define MSR_K8_TOP_MEM1 0xc001001a
|
||||
#define MSR_K8_TOP_MEM2 0xc001001d
|
||||
#define MSR_K8_SYSCFG 0xc0010010
|
||||
#define MSR_K8_SYSCFG_MEM_ENCRYPT_BIT 23
|
||||
#define MSR_K8_SYSCFG_MEM_ENCRYPT BIT_ULL(MSR_K8_SYSCFG_MEM_ENCRYPT_BIT)
|
||||
#define MSR_AMD64_SYSCFG 0xc0010010
|
||||
#define MSR_AMD64_SYSCFG_MEM_ENCRYPT_BIT 23
|
||||
#define MSR_AMD64_SYSCFG_MEM_ENCRYPT BIT_ULL(MSR_AMD64_SYSCFG_MEM_ENCRYPT_BIT)
|
||||
#define MSR_K8_INT_PENDING_MSG 0xc0010055
|
||||
/* C1E active bits in int pending message */
|
||||
#define K8_INTP_C1E_ACTIVE_MASK 0x18000000
|
||||
|
|
Загрузка…
Ссылка в новой задаче