Merge remote-tracking branch 'tip/x86/cc' into hyperv-next
This commit is contained in:
Коммит
e82f2069b5
|
@ -1234,6 +1234,9 @@ config RELR
|
|||
config ARCH_HAS_MEM_ENCRYPT
|
||||
bool
|
||||
|
||||
config ARCH_HAS_CC_PLATFORM
|
||||
bool
|
||||
|
||||
config HAVE_SPARSE_SYSCALL_NR
|
||||
bool
|
||||
help
|
||||
|
|
|
@ -10,11 +10,6 @@
|
|||
|
||||
#include <asm/svm.h>
|
||||
|
||||
static inline bool mem_encrypt_active(void)
|
||||
{
|
||||
return is_secure_guest();
|
||||
}
|
||||
|
||||
static inline bool force_dma_unencrypted(struct device *dev)
|
||||
{
|
||||
return is_secure_guest();
|
||||
|
|
|
@ -159,6 +159,7 @@ config PPC_SVM
|
|||
select SWIOTLB
|
||||
select ARCH_HAS_MEM_ENCRYPT
|
||||
select ARCH_HAS_FORCE_DMA_UNENCRYPTED
|
||||
select ARCH_HAS_CC_PLATFORM
|
||||
help
|
||||
There are certain POWER platforms which support secure guests using
|
||||
the Protected Execution Facility, with the help of an Ultravisor
|
||||
|
|
|
@ -31,3 +31,5 @@ obj-$(CONFIG_FA_DUMP) += rtas-fadump.o
|
|||
|
||||
obj-$(CONFIG_SUSPEND) += suspend.o
|
||||
obj-$(CONFIG_PPC_VAS) += vas.o
|
||||
|
||||
obj-$(CONFIG_ARCH_HAS_CC_PLATFORM) += cc_platform.o
|
||||
|
|
|
@ -0,0 +1,26 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Confidential Computing Platform Capability checks
|
||||
*
|
||||
* Copyright (C) 2021 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Author: Tom Lendacky <thomas.lendacky@amd.com>
|
||||
*/
|
||||
|
||||
#include <linux/export.h>
|
||||
#include <linux/cc_platform.h>
|
||||
|
||||
#include <asm/machdep.h>
|
||||
#include <asm/svm.h>
|
||||
|
||||
bool cc_platform_has(enum cc_attr attr)
|
||||
{
|
||||
switch (attr) {
|
||||
case CC_ATTR_MEM_ENCRYPT:
|
||||
return is_secure_guest();
|
||||
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cc_platform_has);
|
|
@ -8,6 +8,7 @@
|
|||
|
||||
#include <linux/mm.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/cc_platform.h>
|
||||
#include <asm/machdep.h>
|
||||
#include <asm/svm.h>
|
||||
#include <asm/swiotlb.h>
|
||||
|
@ -63,7 +64,7 @@ void __init svm_swiotlb_init(void)
|
|||
|
||||
int set_memory_encrypted(unsigned long addr, int numpages)
|
||||
{
|
||||
if (!mem_encrypt_active())
|
||||
if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT))
|
||||
return 0;
|
||||
|
||||
if (!PAGE_ALIGNED(addr))
|
||||
|
@ -76,7 +77,7 @@ int set_memory_encrypted(unsigned long addr, int numpages)
|
|||
|
||||
int set_memory_decrypted(unsigned long addr, int numpages)
|
||||
{
|
||||
if (!mem_encrypt_active())
|
||||
if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT))
|
||||
return 0;
|
||||
|
||||
if (!PAGE_ALIGNED(addr))
|
||||
|
|
|
@ -4,8 +4,6 @@
|
|||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
static inline bool mem_encrypt_active(void) { return false; }
|
||||
|
||||
int set_memory_encrypted(unsigned long addr, int numpages);
|
||||
int set_memory_decrypted(unsigned long addr, int numpages);
|
||||
|
||||
|
|
|
@ -1518,6 +1518,7 @@ config AMD_MEM_ENCRYPT
|
|||
select ARCH_HAS_FORCE_DMA_UNENCRYPTED
|
||||
select INSTRUCTION_DECODER
|
||||
select ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS
|
||||
select ARCH_HAS_CC_PLATFORM
|
||||
help
|
||||
Say yes to enable support for the encryption of system memory.
|
||||
This requires an AMD processor that supports Secure Memory
|
||||
|
|
|
@ -391,6 +391,7 @@ extern void arch_io_free_memtype_wc(resource_size_t start, resource_size_t size)
|
|||
#define arch_io_reserve_memtype_wc arch_io_reserve_memtype_wc
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_AMD_MEM_ENCRYPT
|
||||
extern bool arch_memremap_can_ram_remap(resource_size_t offset,
|
||||
unsigned long size,
|
||||
unsigned long flags);
|
||||
|
@ -398,6 +399,13 @@ extern bool arch_memremap_can_ram_remap(resource_size_t offset,
|
|||
|
||||
extern bool phys_mem_access_encrypted(unsigned long phys_addr,
|
||||
unsigned long size);
|
||||
#else
|
||||
static inline bool phys_mem_access_encrypted(unsigned long phys_addr,
|
||||
unsigned long size)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* iosubmit_cmds512 - copy data to single MMIO location, in 512-bit units
|
||||
|
|
|
@ -129,7 +129,7 @@ relocate_kernel(unsigned long indirection_page,
|
|||
unsigned long page_list,
|
||||
unsigned long start_address,
|
||||
unsigned int preserve_context,
|
||||
unsigned int sme_active);
|
||||
unsigned int host_mem_enc_active);
|
||||
#endif
|
||||
|
||||
#define ARCH_HAS_KIMAGE_ARCH
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/cc_platform.h>
|
||||
|
||||
#include <asm/bootparam.h>
|
||||
|
||||
|
@ -50,9 +51,6 @@ void __init mem_encrypt_free_decrypted_mem(void);
|
|||
void __init mem_encrypt_init(void);
|
||||
|
||||
void __init sev_es_init_vc_handling(void);
|
||||
bool sme_active(void);
|
||||
bool sev_active(void);
|
||||
bool sev_es_active(void);
|
||||
|
||||
#define __bss_decrypted __section(".bss..decrypted")
|
||||
|
||||
|
@ -75,9 +73,6 @@ static inline void __init sme_encrypt_kernel(struct boot_params *bp) { }
|
|||
static inline void __init sme_enable(struct boot_params *bp) { }
|
||||
|
||||
static inline void sev_es_init_vc_handling(void) { }
|
||||
static inline bool sme_active(void) { return false; }
|
||||
static inline bool sev_active(void) { return false; }
|
||||
static inline bool sev_es_active(void) { return false; }
|
||||
|
||||
static inline int __init
|
||||
early_set_memory_decrypted(unsigned long vaddr, unsigned long size) { return 0; }
|
||||
|
@ -101,11 +96,6 @@ static inline void mem_encrypt_free_decrypted_mem(void) { }
|
|||
|
||||
extern char __start_bss_decrypted[], __end_bss_decrypted[], __start_bss_decrypted_unused[];
|
||||
|
||||
static inline bool mem_encrypt_active(void)
|
||||
{
|
||||
return sme_me_mask;
|
||||
}
|
||||
|
||||
static inline u64 sme_get_me_mask(void)
|
||||
{
|
||||
return sme_me_mask;
|
||||
|
|
|
@ -21,6 +21,7 @@ CFLAGS_REMOVE_ftrace.o = -pg
|
|||
CFLAGS_REMOVE_early_printk.o = -pg
|
||||
CFLAGS_REMOVE_head64.o = -pg
|
||||
CFLAGS_REMOVE_sev.o = -pg
|
||||
CFLAGS_REMOVE_cc_platform.o = -pg
|
||||
endif
|
||||
|
||||
KASAN_SANITIZE_head$(BITS).o := n
|
||||
|
@ -29,6 +30,7 @@ KASAN_SANITIZE_dumpstack_$(BITS).o := n
|
|||
KASAN_SANITIZE_stacktrace.o := n
|
||||
KASAN_SANITIZE_paravirt.o := n
|
||||
KASAN_SANITIZE_sev.o := n
|
||||
KASAN_SANITIZE_cc_platform.o := n
|
||||
|
||||
# With some compiler versions the generated code results in boot hangs, caused
|
||||
# by several compilation units. To be safe, disable all instrumentation.
|
||||
|
@ -47,6 +49,7 @@ endif
|
|||
KCOV_INSTRUMENT := n
|
||||
|
||||
CFLAGS_head$(BITS).o += -fno-stack-protector
|
||||
CFLAGS_cc_platform.o += -fno-stack-protector
|
||||
|
||||
CFLAGS_irq.o := -I $(srctree)/$(src)/../include/asm/trace
|
||||
|
||||
|
@ -147,6 +150,9 @@ obj-$(CONFIG_UNWINDER_FRAME_POINTER) += unwind_frame.o
|
|||
obj-$(CONFIG_UNWINDER_GUESS) += unwind_guess.o
|
||||
|
||||
obj-$(CONFIG_AMD_MEM_ENCRYPT) += sev.o
|
||||
|
||||
obj-$(CONFIG_ARCH_HAS_CC_PLATFORM) += cc_platform.o
|
||||
|
||||
###
|
||||
# 64 bit specific files
|
||||
ifeq ($(CONFIG_X86_64),y)
|
||||
|
|
|
@ -0,0 +1,69 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Confidential Computing Platform Capability checks
|
||||
*
|
||||
* Copyright (C) 2021 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Author: Tom Lendacky <thomas.lendacky@amd.com>
|
||||
*/
|
||||
|
||||
#include <linux/export.h>
|
||||
#include <linux/cc_platform.h>
|
||||
#include <linux/mem_encrypt.h>
|
||||
|
||||
#include <asm/processor.h>
|
||||
|
||||
static bool __maybe_unused intel_cc_platform_has(enum cc_attr attr)
|
||||
{
|
||||
#ifdef CONFIG_INTEL_TDX_GUEST
|
||||
return false;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* SME and SEV are very similar but they are not the same, so there are
|
||||
* times that the kernel will need to distinguish between SME and SEV. The
|
||||
* cc_platform_has() function is used for this. When a distinction isn't
|
||||
* needed, the CC_ATTR_MEM_ENCRYPT attribute can be used.
|
||||
*
|
||||
* The trampoline code is a good example for this requirement. Before
|
||||
* paging is activated, SME will access all memory as decrypted, but SEV
|
||||
* will access all memory as encrypted. So, when APs are being brought
|
||||
* up under SME the trampoline area cannot be encrypted, whereas under SEV
|
||||
* the trampoline area must be encrypted.
|
||||
*/
|
||||
static bool amd_cc_platform_has(enum cc_attr attr)
|
||||
{
|
||||
#ifdef CONFIG_AMD_MEM_ENCRYPT
|
||||
switch (attr) {
|
||||
case CC_ATTR_MEM_ENCRYPT:
|
||||
return sme_me_mask;
|
||||
|
||||
case CC_ATTR_HOST_MEM_ENCRYPT:
|
||||
return sme_me_mask && !(sev_status & MSR_AMD64_SEV_ENABLED);
|
||||
|
||||
case CC_ATTR_GUEST_MEM_ENCRYPT:
|
||||
return sev_status & MSR_AMD64_SEV_ENABLED;
|
||||
|
||||
case CC_ATTR_GUEST_STATE_ENCRYPT:
|
||||
return sev_status & MSR_AMD64_SEV_ES_ENABLED;
|
||||
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
bool cc_platform_has(enum cc_attr attr)
|
||||
{
|
||||
if (sme_me_mask)
|
||||
return amd_cc_platform_has(attr);
|
||||
|
||||
return false;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cc_platform_has);
|
|
@ -10,6 +10,7 @@
|
|||
#include <linux/crash_dump.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/cc_platform.h>
|
||||
|
||||
static ssize_t __copy_oldmem_page(unsigned long pfn, char *buf, size_t csize,
|
||||
unsigned long offset, int userbuf,
|
||||
|
@ -73,5 +74,6 @@ ssize_t copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize,
|
|||
|
||||
ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos)
|
||||
{
|
||||
return read_from_oldmem(buf, count, ppos, 0, sev_active());
|
||||
return read_from_oldmem(buf, count, ppos, 0,
|
||||
cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT));
|
||||
}
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
#include <linux/start_kernel.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/mem_encrypt.h>
|
||||
#include <linux/cc_platform.h>
|
||||
#include <linux/pgtable.h>
|
||||
|
||||
#include <asm/processor.h>
|
||||
|
@ -284,8 +284,13 @@ unsigned long __head __startup_64(unsigned long physaddr,
|
|||
* The bss section will be memset to zero later in the initialization so
|
||||
* there is no need to zero it after changing the memory encryption
|
||||
* attribute.
|
||||
*
|
||||
* This is early code, use an open coded check for SME instead of
|
||||
* using cc_platform_has(). This eliminates worries about removing
|
||||
* instrumentation or checking boot_cpu_data in the cc_platform_has()
|
||||
* function.
|
||||
*/
|
||||
if (mem_encrypt_active()) {
|
||||
if (sme_get_me_mask()) {
|
||||
vaddr = (unsigned long)__start_bss_decrypted;
|
||||
vaddr_end = (unsigned long)__end_bss_decrypted;
|
||||
for (; vaddr < vaddr_end; vaddr += PMD_SIZE) {
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
#include <linux/nmi.h>
|
||||
#include <linux/swait.h>
|
||||
#include <linux/syscore_ops.h>
|
||||
#include <linux/cc_platform.h>
|
||||
#include <asm/timer.h>
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/traps.h>
|
||||
|
@ -418,7 +419,7 @@ static void __init sev_map_percpu_data(void)
|
|||
{
|
||||
int cpu;
|
||||
|
||||
if (!sev_active())
|
||||
if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
|
||||
return;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
|
|
|
@ -16,9 +16,9 @@
|
|||
#include <linux/mm.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/set_memory.h>
|
||||
#include <linux/cc_platform.h>
|
||||
|
||||
#include <asm/hypervisor.h>
|
||||
#include <asm/mem_encrypt.h>
|
||||
#include <asm/x86_init.h>
|
||||
#include <asm/kvmclock.h>
|
||||
|
||||
|
@ -223,7 +223,7 @@ static void __init kvmclock_init_mem(void)
|
|||
* hvclock is shared between the guest and the hypervisor, must
|
||||
* be mapped decrypted.
|
||||
*/
|
||||
if (sev_active()) {
|
||||
if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
|
||||
r = set_memory_decrypted((unsigned long) hvclock_mem,
|
||||
1UL << order);
|
||||
if (r) {
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#include <linux/suspend.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/efi.h>
|
||||
#include <linux/cc_platform.h>
|
||||
|
||||
#include <asm/init.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
@ -166,7 +167,7 @@ static int init_transition_pgtable(struct kimage *image, pgd_t *pgd)
|
|||
}
|
||||
pte = pte_offset_kernel(pmd, vaddr);
|
||||
|
||||
if (sev_active())
|
||||
if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
|
||||
prot = PAGE_KERNEL_EXEC;
|
||||
|
||||
set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, prot));
|
||||
|
@ -206,7 +207,7 @@ static int init_pgtable(struct kimage *image, unsigned long start_pgtable)
|
|||
level4p = (pgd_t *)__va(start_pgtable);
|
||||
clear_page(level4p);
|
||||
|
||||
if (sev_active()) {
|
||||
if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
|
||||
info.page_flag |= _PAGE_ENC;
|
||||
info.kernpg_flag |= _PAGE_ENC;
|
||||
}
|
||||
|
@ -358,7 +359,7 @@ void machine_kexec(struct kimage *image)
|
|||
(unsigned long)page_list,
|
||||
image->start,
|
||||
image->preserve_context,
|
||||
sme_active());
|
||||
cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT));
|
||||
|
||||
#ifdef CONFIG_KEXEC_JUMP
|
||||
if (image->preserve_context)
|
||||
|
@ -569,12 +570,12 @@ void arch_kexec_unprotect_crashkres(void)
|
|||
*/
|
||||
int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages, gfp_t gfp)
|
||||
{
|
||||
if (sev_active())
|
||||
if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* If SME is active we need to be sure that kexec pages are
|
||||
* not encrypted because when we boot to the new kernel the
|
||||
* If host memory encryption is active we need to be sure that kexec
|
||||
* pages are not encrypted because when we boot to the new kernel the
|
||||
* pages won't be accessed encrypted (initially).
|
||||
*/
|
||||
return set_memory_decrypted((unsigned long)vaddr, pages);
|
||||
|
@ -582,12 +583,12 @@ int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages, gfp_t gfp)
|
|||
|
||||
void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages)
|
||||
{
|
||||
if (sev_active())
|
||||
if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
|
||||
return;
|
||||
|
||||
/*
|
||||
* If SME is active we need to reset the pages back to being
|
||||
* an encrypted mapping before freeing them.
|
||||
* If host memory encryption is active we need to reset the pages back
|
||||
* to being an encrypted mapping before freeing them.
|
||||
*/
|
||||
set_memory_encrypted((unsigned long)vaddr, pages);
|
||||
}
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
#include <linux/swiotlb.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/dma-direct.h>
|
||||
#include <linux/mem_encrypt.h>
|
||||
#include <linux/cc_platform.h>
|
||||
|
||||
#include <asm/iommu.h>
|
||||
#include <asm/swiotlb.h>
|
||||
|
@ -45,11 +45,10 @@ int __init pci_swiotlb_detect_4gb(void)
|
|||
swiotlb = 1;
|
||||
|
||||
/*
|
||||
* If SME is active then swiotlb will be set to 1 so that bounce
|
||||
* buffers are allocated and used for devices that do not support
|
||||
* the addressing range required for the encryption mask.
|
||||
* Set swiotlb to 1 so that bounce buffers are allocated and used for
|
||||
* devices that can't support DMA to encrypted memory.
|
||||
*/
|
||||
if (sme_active())
|
||||
if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
|
||||
swiotlb = 1;
|
||||
|
||||
return swiotlb;
|
||||
|
|
|
@ -47,7 +47,7 @@ SYM_CODE_START_NOALIGN(relocate_kernel)
|
|||
* %rsi page_list
|
||||
* %rdx start address
|
||||
* %rcx preserve_context
|
||||
* %r8 sme_active
|
||||
* %r8 host_mem_enc_active
|
||||
*/
|
||||
|
||||
/* Save the CPU context, used for jumping back */
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
|
||||
#include <linux/sched/debug.h> /* For show_regs() */
|
||||
#include <linux/percpu-defs.h>
|
||||
#include <linux/mem_encrypt.h>
|
||||
#include <linux/cc_platform.h>
|
||||
#include <linux/printk.h>
|
||||
#include <linux/mm_types.h>
|
||||
#include <linux/set_memory.h>
|
||||
|
@ -615,7 +615,7 @@ int __init sev_es_efi_map_ghcbs(pgd_t *pgd)
|
|||
int cpu;
|
||||
u64 pfn;
|
||||
|
||||
if (!sev_es_active())
|
||||
if (!cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT))
|
||||
return 0;
|
||||
|
||||
pflags = _PAGE_NX | _PAGE_RW;
|
||||
|
@ -775,7 +775,7 @@ void __init sev_es_init_vc_handling(void)
|
|||
|
||||
BUILD_BUG_ON(offsetof(struct sev_es_runtime_data, ghcb_page) % PAGE_SIZE);
|
||||
|
||||
if (!sev_es_active())
|
||||
if (!cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT))
|
||||
return;
|
||||
|
||||
if (!sev_es_check_cpu_features())
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
#include <linux/pagemap.h>
|
||||
#include <linux/swap.h>
|
||||
#include <linux/rwsem.h>
|
||||
#include <linux/cc_platform.h>
|
||||
|
||||
#include <asm/apic.h>
|
||||
#include <asm/perf_event.h>
|
||||
|
@ -455,7 +456,7 @@ static int has_svm(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
if (sev_active()) {
|
||||
if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
|
||||
pr_info("KVM is unsupported when running as an SEV guest\n");
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
#include <linux/slab.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/mmiotrace.h>
|
||||
#include <linux/mem_encrypt.h>
|
||||
#include <linux/cc_platform.h>
|
||||
#include <linux/efi.h>
|
||||
#include <linux/pgtable.h>
|
||||
|
||||
|
@ -92,7 +92,7 @@ static unsigned int __ioremap_check_ram(struct resource *res)
|
|||
*/
|
||||
static unsigned int __ioremap_check_encrypted(struct resource *res)
|
||||
{
|
||||
if (!sev_active())
|
||||
if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
|
||||
return 0;
|
||||
|
||||
switch (res->desc) {
|
||||
|
@ -112,7 +112,7 @@ static unsigned int __ioremap_check_encrypted(struct resource *res)
|
|||
*/
|
||||
static void __ioremap_check_other(resource_size_t addr, struct ioremap_desc *desc)
|
||||
{
|
||||
if (!sev_active())
|
||||
if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
|
||||
return;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_EFI))
|
||||
|
@ -508,6 +508,7 @@ void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
|
|||
memunmap((void *)((unsigned long)addr & PAGE_MASK));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_AMD_MEM_ENCRYPT
|
||||
/*
|
||||
* Examine the physical address to determine if it is an area of memory
|
||||
* that should be mapped decrypted. If the memory is not part of the
|
||||
|
@ -555,7 +556,7 @@ static bool memremap_should_map_decrypted(resource_size_t phys_addr,
|
|||
case E820_TYPE_NVS:
|
||||
case E820_TYPE_UNUSABLE:
|
||||
/* For SEV, these areas are encrypted */
|
||||
if (sev_active())
|
||||
if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
|
||||
break;
|
||||
fallthrough;
|
||||
|
||||
|
@ -693,7 +694,7 @@ static bool __init early_memremap_is_setup_data(resource_size_t phys_addr,
|
|||
bool arch_memremap_can_ram_remap(resource_size_t phys_addr, unsigned long size,
|
||||
unsigned long flags)
|
||||
{
|
||||
if (!mem_encrypt_active())
|
||||
if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT))
|
||||
return true;
|
||||
|
||||
if (flags & MEMREMAP_ENC)
|
||||
|
@ -702,7 +703,7 @@ bool arch_memremap_can_ram_remap(resource_size_t phys_addr, unsigned long size,
|
|||
if (flags & MEMREMAP_DEC)
|
||||
return false;
|
||||
|
||||
if (sme_active()) {
|
||||
if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) {
|
||||
if (memremap_is_setup_data(phys_addr, size) ||
|
||||
memremap_is_efi_data(phys_addr, size))
|
||||
return false;
|
||||
|
@ -723,12 +724,12 @@ pgprot_t __init early_memremap_pgprot_adjust(resource_size_t phys_addr,
|
|||
{
|
||||
bool encrypted_prot;
|
||||
|
||||
if (!mem_encrypt_active())
|
||||
if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT))
|
||||
return prot;
|
||||
|
||||
encrypted_prot = true;
|
||||
|
||||
if (sme_active()) {
|
||||
if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) {
|
||||
if (early_memremap_is_setup_data(phys_addr, size) ||
|
||||
memremap_is_efi_data(phys_addr, size))
|
||||
encrypted_prot = false;
|
||||
|
@ -746,7 +747,6 @@ bool phys_mem_access_encrypted(unsigned long phys_addr, unsigned long size)
|
|||
return arch_memremap_can_ram_remap(phys_addr, size, 0);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_AMD_MEM_ENCRYPT
|
||||
/* Remap memory with encryption */
|
||||
void __init *early_memremap_encrypted(resource_size_t phys_addr,
|
||||
unsigned long size)
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
#include <linux/bitops.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/virtio_config.h>
|
||||
#include <linux/cc_platform.h>
|
||||
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/fixmap.h>
|
||||
|
@ -143,7 +144,7 @@ void __init sme_unmap_bootdata(char *real_mode_data)
|
|||
struct boot_params *boot_data;
|
||||
unsigned long cmdline_paddr;
|
||||
|
||||
if (!sme_active())
|
||||
if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
|
||||
return;
|
||||
|
||||
/* Get the command line address before unmapping the real_mode_data */
|
||||
|
@ -163,7 +164,7 @@ void __init sme_map_bootdata(char *real_mode_data)
|
|||
struct boot_params *boot_data;
|
||||
unsigned long cmdline_paddr;
|
||||
|
||||
if (!sme_active())
|
||||
if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
|
||||
return;
|
||||
|
||||
__sme_early_map_unmap_mem(real_mode_data, sizeof(boot_params), true);
|
||||
|
@ -193,7 +194,7 @@ void __init sme_early_init(void)
|
|||
for (i = 0; i < ARRAY_SIZE(protection_map); i++)
|
||||
protection_map[i] = pgprot_encrypted(protection_map[i]);
|
||||
|
||||
if (sev_active())
|
||||
if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
|
||||
swiotlb_force = SWIOTLB_FORCE;
|
||||
}
|
||||
|
||||
|
@ -202,7 +203,7 @@ void __init sev_setup_arch(void)
|
|||
phys_addr_t total_mem = memblock_phys_mem_size();
|
||||
unsigned long size;
|
||||
|
||||
if (!sev_active())
|
||||
if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
|
||||
return;
|
||||
|
||||
/*
|
||||
|
@ -360,42 +361,13 @@ int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size)
|
|||
return early_set_memory_enc_dec(vaddr, size, true);
|
||||
}
|
||||
|
||||
/*
|
||||
* SME and SEV are very similar but they are not the same, so there are
|
||||
* times that the kernel will need to distinguish between SME and SEV. The
|
||||
* sme_active() and sev_active() functions are used for this. When a
|
||||
* distinction isn't needed, the mem_encrypt_active() function can be used.
|
||||
*
|
||||
* The trampoline code is a good example for this requirement. Before
|
||||
* paging is activated, SME will access all memory as decrypted, but SEV
|
||||
* will access all memory as encrypted. So, when APs are being brought
|
||||
* up under SME the trampoline area cannot be encrypted, whereas under SEV
|
||||
* the trampoline area must be encrypted.
|
||||
*/
|
||||
bool sev_active(void)
|
||||
{
|
||||
return sev_status & MSR_AMD64_SEV_ENABLED;
|
||||
}
|
||||
|
||||
bool sme_active(void)
|
||||
{
|
||||
return sme_me_mask && !sev_active();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sev_active);
|
||||
|
||||
/* Needs to be called from non-instrumentable code */
|
||||
bool noinstr sev_es_active(void)
|
||||
{
|
||||
return sev_status & MSR_AMD64_SEV_ES_ENABLED;
|
||||
}
|
||||
|
||||
/* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */
|
||||
bool force_dma_unencrypted(struct device *dev)
|
||||
{
|
||||
/*
|
||||
* For SEV, all DMA must be to unencrypted addresses.
|
||||
*/
|
||||
if (sev_active())
|
||||
if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
|
||||
return true;
|
||||
|
||||
/*
|
||||
|
@ -403,7 +375,7 @@ bool force_dma_unencrypted(struct device *dev)
|
|||
* device does not support DMA to addresses that include the
|
||||
* encryption mask.
|
||||
*/
|
||||
if (sme_active()) {
|
||||
if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) {
|
||||
u64 dma_enc_mask = DMA_BIT_MASK(__ffs64(sme_me_mask));
|
||||
u64 dma_dev_mask = min_not_zero(dev->coherent_dma_mask,
|
||||
dev->bus_dma_limit);
|
||||
|
@ -428,7 +400,7 @@ void __init mem_encrypt_free_decrypted_mem(void)
|
|||
* The unused memory range was mapped decrypted, change the encryption
|
||||
* attribute from decrypted to encrypted before freeing it.
|
||||
*/
|
||||
if (mem_encrypt_active()) {
|
||||
if (cc_platform_has(CC_ATTR_MEM_ENCRYPT)) {
|
||||
r = set_memory_encrypted(vaddr, npages);
|
||||
if (r) {
|
||||
pr_warn("failed to free unused decrypted pages\n");
|
||||
|
@ -444,7 +416,7 @@ static void print_mem_encrypt_feature_info(void)
|
|||
pr_info("AMD Memory Encryption Features active:");
|
||||
|
||||
/* Secure Memory Encryption */
|
||||
if (sme_active()) {
|
||||
if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) {
|
||||
/*
|
||||
* SME is mutually exclusive with any of the SEV
|
||||
* features below.
|
||||
|
@ -454,11 +426,11 @@ static void print_mem_encrypt_feature_info(void)
|
|||
}
|
||||
|
||||
/* Secure Encrypted Virtualization */
|
||||
if (sev_active())
|
||||
if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
|
||||
pr_cont(" SEV");
|
||||
|
||||
/* Encrypted Register State */
|
||||
if (sev_es_active())
|
||||
if (cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT))
|
||||
pr_cont(" SEV-ES");
|
||||
|
||||
pr_cont("\n");
|
||||
|
@ -477,7 +449,8 @@ void __init mem_encrypt_init(void)
|
|||
* With SEV, we need to unroll the rep string I/O instructions,
|
||||
* but SEV-ES supports them through the #VC handler.
|
||||
*/
|
||||
if (sev_active() && !sev_es_active())
|
||||
if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT) &&
|
||||
!cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT))
|
||||
static_branch_enable(&sev_enable_key);
|
||||
|
||||
print_mem_encrypt_feature_info();
|
||||
|
@ -485,6 +458,6 @@ void __init mem_encrypt_init(void)
|
|||
|
||||
int arch_has_restricted_virtio_memory_access(void)
|
||||
{
|
||||
return sev_active();
|
||||
return cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(arch_has_restricted_virtio_memory_access);
|
||||
|
|
|
@ -39,6 +39,7 @@
|
|||
#include <linux/kernel.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/mem_encrypt.h>
|
||||
#include <linux/cc_platform.h>
|
||||
|
||||
#include <asm/setup.h>
|
||||
#include <asm/sections.h>
|
||||
|
@ -296,7 +297,13 @@ void __init sme_encrypt_kernel(struct boot_params *bp)
|
|||
unsigned long pgtable_area_len;
|
||||
unsigned long decrypted_base;
|
||||
|
||||
if (!sme_active())
|
||||
/*
|
||||
* This is early code, use an open coded check for SME instead of
|
||||
* using cc_platform_has(). This eliminates worries about removing
|
||||
* instrumentation or checking boot_cpu_data in the cc_platform_has()
|
||||
* function.
|
||||
*/
|
||||
if (!sme_get_me_mask() || sev_status & MSR_AMD64_SEV_ENABLED)
|
||||
return;
|
||||
|
||||
/*
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include <linux/libnvdimm.h>
|
||||
#include <linux/vmstat.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/cc_platform.h>
|
||||
|
||||
#include <asm/e820/api.h>
|
||||
#include <asm/processor.h>
|
||||
|
@ -1986,7 +1987,7 @@ static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
|
|||
int ret;
|
||||
|
||||
/* Nothing to do if memory encryption is not active */
|
||||
if (!mem_encrypt_active())
|
||||
if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT))
|
||||
return 0;
|
||||
|
||||
/* Should not be working on unaligned addresses */
|
||||
|
|
|
@ -33,7 +33,7 @@
|
|||
#include <linux/reboot.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/ucs2_string.h>
|
||||
#include <linux/mem_encrypt.h>
|
||||
#include <linux/cc_platform.h>
|
||||
#include <linux/sched/task.h>
|
||||
|
||||
#include <asm/setup.h>
|
||||
|
@ -284,7 +284,8 @@ static void __init __map_region(efi_memory_desc_t *md, u64 va)
|
|||
if (!(md->attribute & EFI_MEMORY_WB))
|
||||
flags |= _PAGE_PCD;
|
||||
|
||||
if (sev_active() && md->type != EFI_MEMORY_MAPPED_IO)
|
||||
if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT) &&
|
||||
md->type != EFI_MEMORY_MAPPED_IO)
|
||||
flags |= _PAGE_ENC;
|
||||
|
||||
pfn = md->phys_addr >> PAGE_SHIFT;
|
||||
|
@ -390,7 +391,7 @@ static int __init efi_update_mem_attr(struct mm_struct *mm, efi_memory_desc_t *m
|
|||
if (!(md->attribute & EFI_MEMORY_RO))
|
||||
pf |= _PAGE_RW;
|
||||
|
||||
if (sev_active())
|
||||
if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
|
||||
pf |= _PAGE_ENC;
|
||||
|
||||
return efi_update_mappings(md, pf);
|
||||
|
@ -438,7 +439,7 @@ void __init efi_runtime_update_mappings(void)
|
|||
(md->type != EFI_RUNTIME_SERVICES_CODE))
|
||||
pf |= _PAGE_RW;
|
||||
|
||||
if (sev_active())
|
||||
if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
|
||||
pf |= _PAGE_ENC;
|
||||
|
||||
efi_update_mappings(md, pf);
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
#include <linux/io.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/mem_encrypt.h>
|
||||
#include <linux/cc_platform.h>
|
||||
#include <linux/pgtable.h>
|
||||
|
||||
#include <asm/set_memory.h>
|
||||
|
@ -44,10 +44,10 @@ void __init reserve_real_mode(void)
|
|||
static void sme_sev_setup_real_mode(struct trampoline_header *th)
|
||||
{
|
||||
#ifdef CONFIG_AMD_MEM_ENCRYPT
|
||||
if (sme_active())
|
||||
if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
|
||||
th->flags |= TH_FLAGS_SME_ACTIVE;
|
||||
|
||||
if (sev_es_active()) {
|
||||
if (cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT)) {
|
||||
/*
|
||||
* Skip the call to verify_cpu() in secondary_startup_64 as it
|
||||
* will cause #VC exceptions when the AP can't handle them yet.
|
||||
|
@ -81,7 +81,7 @@ static void __init setup_real_mode(void)
|
|||
* decrypted memory in order to bring up other processors
|
||||
* successfully. This is not needed for SEV.
|
||||
*/
|
||||
if (sme_active())
|
||||
if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
|
||||
set_memory_decrypted((unsigned long)base, size >> PAGE_SHIFT);
|
||||
|
||||
memcpy(base, real_mode_blob, size);
|
||||
|
|
|
@ -38,6 +38,7 @@
|
|||
#include <drm/drm_probe_helper.h>
|
||||
#include <linux/mmu_notifier.h>
|
||||
#include <linux/suspend.h>
|
||||
#include <linux/cc_platform.h>
|
||||
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_irq.h"
|
||||
|
@ -1269,7 +1270,8 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
|
|||
* however, SME requires an indirect IOMMU mapping because the encryption
|
||||
* bit is beyond the DMA mask of the chip.
|
||||
*/
|
||||
if (mem_encrypt_active() && ((flags & AMD_ASIC_MASK) == CHIP_RAVEN)) {
|
||||
if (cc_platform_has(CC_ATTR_MEM_ENCRYPT) &&
|
||||
((flags & AMD_ASIC_MASK) == CHIP_RAVEN)) {
|
||||
dev_info(&pdev->dev,
|
||||
"SME is not compatible with RAVEN\n");
|
||||
return -ENOTSUPP;
|
||||
|
|
|
@ -31,7 +31,7 @@
|
|||
#include <linux/dma-buf-map.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/mem_encrypt.h>
|
||||
#include <linux/cc_platform.h>
|
||||
#include <xen/xen.h>
|
||||
|
||||
#include <drm/drm_cache.h>
|
||||
|
@ -204,7 +204,7 @@ bool drm_need_swiotlb(int dma_bits)
|
|||
* Enforce dma_alloc_coherent when memory encryption is active as well
|
||||
* for the same reasons as for Xen paravirtual hosts.
|
||||
*/
|
||||
if (mem_encrypt_active())
|
||||
if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
|
||||
return true;
|
||||
|
||||
for (tmp = iomem_resource.child; tmp; tmp = tmp->sibling)
|
||||
|
|
|
@ -29,7 +29,7 @@
|
|||
#include <linux/dma-mapping.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/mem_encrypt.h>
|
||||
#include <linux/cc_platform.h>
|
||||
|
||||
#include <drm/drm_aperture.h>
|
||||
#include <drm/drm_drv.h>
|
||||
|
@ -666,7 +666,7 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv)
|
|||
[vmw_dma_map_bind] = "Giving up DMA mappings early."};
|
||||
|
||||
/* TTM currently doesn't fully support SEV encryption. */
|
||||
if (mem_encrypt_active())
|
||||
if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
|
||||
return -EINVAL;
|
||||
|
||||
if (vmw_force_coherent)
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/mem_encrypt.h>
|
||||
#include <linux/cc_platform.h>
|
||||
|
||||
#include <asm/hypervisor.h>
|
||||
#include <drm/drm_ioctl.h>
|
||||
|
@ -160,7 +160,7 @@ static unsigned long vmw_port_hb_out(struct rpc_channel *channel,
|
|||
unsigned long msg_len = strlen(msg);
|
||||
|
||||
/* HB port can't access encrypted memory. */
|
||||
if (hb && !mem_encrypt_active()) {
|
||||
if (hb && !cc_platform_has(CC_ATTR_MEM_ENCRYPT)) {
|
||||
unsigned long bp = channel->cookie_high;
|
||||
u32 channel_id = (channel->channel_id << 16);
|
||||
|
||||
|
@ -216,7 +216,7 @@ static unsigned long vmw_port_hb_in(struct rpc_channel *channel, char *reply,
|
|||
unsigned long si, di, eax, ebx, ecx, edx;
|
||||
|
||||
/* HB port can't access encrypted memory */
|
||||
if (hb && !mem_encrypt_active()) {
|
||||
if (hb && !cc_platform_has(CC_ATTR_MEM_ENCRYPT)) {
|
||||
unsigned long bp = channel->cookie_low;
|
||||
u32 channel_id = (channel->channel_id << 16);
|
||||
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
#include <linux/amd-iommu.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/kmemleak.h>
|
||||
#include <linux/mem_encrypt.h>
|
||||
#include <linux/cc_platform.h>
|
||||
#include <asm/pci-direct.h>
|
||||
#include <asm/iommu.h>
|
||||
#include <asm/apic.h>
|
||||
|
@ -964,7 +964,7 @@ static bool copy_device_table(void)
|
|||
pr_err("The address of old device table is above 4G, not trustworthy!\n");
|
||||
return false;
|
||||
}
|
||||
old_devtb = (sme_active() && is_kdump_kernel())
|
||||
old_devtb = (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT) && is_kdump_kernel())
|
||||
? (__force void *)ioremap_encrypted(old_devtb_phys,
|
||||
dev_table_size)
|
||||
: memremap(old_devtb_phys, dev_table_size, MEMREMAP_WB);
|
||||
|
@ -3032,7 +3032,8 @@ static int __init amd_iommu_init(void)
|
|||
|
||||
static bool amd_iommu_sme_check(void)
|
||||
{
|
||||
if (!sme_active() || (boot_cpu_data.x86 != 0x17))
|
||||
if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT) ||
|
||||
(boot_cpu_data.x86 != 0x17))
|
||||
return true;
|
||||
|
||||
/* For Fam17h, a specific level of support is required */
|
||||
|
|
|
@ -31,6 +31,7 @@
|
|||
#include <linux/irqdomain.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/io-pgtable.h>
|
||||
#include <linux/cc_platform.h>
|
||||
#include <asm/irq_remapping.h>
|
||||
#include <asm/io_apic.h>
|
||||
#include <asm/apic.h>
|
||||
|
@ -2238,7 +2239,7 @@ static int amd_iommu_def_domain_type(struct device *dev)
|
|||
* active, because some of those devices (AMD GPUs) don't have the
|
||||
* encryption bit in their DMA-mask and require remapping.
|
||||
*/
|
||||
if (!mem_encrypt_active() && dev_data->iommu_v2)
|
||||
if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT) && dev_data->iommu_v2)
|
||||
return IOMMU_DOMAIN_IDENTITY;
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#include <linux/wait.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/cc_platform.h>
|
||||
|
||||
#include "amd_iommu.h"
|
||||
|
||||
|
@ -742,7 +743,7 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
|
|||
* When memory encryption is active the device is likely not in a
|
||||
* direct-mapped domain. Forbid using IOMMUv2 functionality for now.
|
||||
*/
|
||||
if (mem_encrypt_active())
|
||||
if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
|
||||
return -ENODEV;
|
||||
|
||||
if (!amd_iommu_v2_supported())
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
#include <linux/property.h>
|
||||
#include <linux/fsl/mc.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/cc_platform.h>
|
||||
#include <trace/events/iommu.h>
|
||||
|
||||
static struct kset *iommu_group_kset;
|
||||
|
@ -130,7 +131,7 @@ static int __init iommu_subsys_init(void)
|
|||
else
|
||||
iommu_set_default_translated(false);
|
||||
|
||||
if (iommu_default_passthrough() && mem_encrypt_active()) {
|
||||
if (iommu_default_passthrough() && cc_platform_has(CC_ATTR_MEM_ENCRYPT)) {
|
||||
pr_info("Memory encryption detected - Disabling default IOMMU Passthrough\n");
|
||||
iommu_set_default_translated(false);
|
||||
}
|
||||
|
|
|
@ -26,7 +26,7 @@
|
|||
#include <linux/vmalloc.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/mem_encrypt.h>
|
||||
#include <linux/cc_platform.h>
|
||||
#include <asm/io.h>
|
||||
#include "internal.h"
|
||||
|
||||
|
@ -177,7 +177,7 @@ ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos)
|
|||
*/
|
||||
ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos)
|
||||
{
|
||||
return read_from_oldmem(buf, count, ppos, 0, mem_encrypt_active());
|
||||
return read_from_oldmem(buf, count, ppos, 0, cc_platform_has(CC_ATTR_MEM_ENCRYPT));
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -378,7 +378,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
|
|||
buflen);
|
||||
start = m->paddr + *fpos - m->offset;
|
||||
tmp = read_from_oldmem(buffer, tsz, &start,
|
||||
userbuf, mem_encrypt_active());
|
||||
userbuf, cc_platform_has(CC_ATTR_MEM_ENCRYPT));
|
||||
if (tmp < 0)
|
||||
return tmp;
|
||||
buflen -= tsz;
|
||||
|
|
|
@ -0,0 +1,88 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Confidential Computing Platform Capability checks
|
||||
*
|
||||
* Copyright (C) 2021 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Author: Tom Lendacky <thomas.lendacky@amd.com>
|
||||
*/
|
||||
|
||||
#ifndef _LINUX_CC_PLATFORM_H
|
||||
#define _LINUX_CC_PLATFORM_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/stddef.h>
|
||||
|
||||
/**
|
||||
* enum cc_attr - Confidential computing attributes
|
||||
*
|
||||
* These attributes represent confidential computing features that are
|
||||
* currently active.
|
||||
*/
|
||||
enum cc_attr {
|
||||
/**
|
||||
* @CC_ATTR_MEM_ENCRYPT: Memory encryption is active
|
||||
*
|
||||
* The platform/OS is running with active memory encryption. This
|
||||
* includes running either as a bare-metal system or a hypervisor
|
||||
* and actively using memory encryption or as a guest/virtual machine
|
||||
* and actively using memory encryption.
|
||||
*
|
||||
* Examples include SME, SEV and SEV-ES.
|
||||
*/
|
||||
CC_ATTR_MEM_ENCRYPT,
|
||||
|
||||
/**
|
||||
* @CC_ATTR_HOST_MEM_ENCRYPT: Host memory encryption is active
|
||||
*
|
||||
* The platform/OS is running as a bare-metal system or a hypervisor
|
||||
* and actively using memory encryption.
|
||||
*
|
||||
* Examples include SME.
|
||||
*/
|
||||
CC_ATTR_HOST_MEM_ENCRYPT,
|
||||
|
||||
/**
|
||||
* @CC_ATTR_GUEST_MEM_ENCRYPT: Guest memory encryption is active
|
||||
*
|
||||
* The platform/OS is running as a guest/virtual machine and actively
|
||||
* using memory encryption.
|
||||
*
|
||||
* Examples include SEV and SEV-ES.
|
||||
*/
|
||||
CC_ATTR_GUEST_MEM_ENCRYPT,
|
||||
|
||||
/**
|
||||
* @CC_ATTR_GUEST_STATE_ENCRYPT: Guest state encryption is active
|
||||
*
|
||||
* The platform/OS is running as a guest/virtual machine and actively
|
||||
* using memory encryption and register state encryption.
|
||||
*
|
||||
* Examples include SEV-ES.
|
||||
*/
|
||||
CC_ATTR_GUEST_STATE_ENCRYPT,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_CC_PLATFORM
|
||||
|
||||
/**
|
||||
* cc_platform_has() - Checks if the specified cc_attr attribute is active
|
||||
* @attr: Confidential computing attribute to check
|
||||
*
|
||||
* The cc_platform_has() function will return an indicator as to whether the
|
||||
* specified Confidential Computing attribute is currently active.
|
||||
*
|
||||
* Context: Any context
|
||||
* Return:
|
||||
* * TRUE - Specified Confidential Computing attribute is active
|
||||
* * FALSE - Specified Confidential Computing attribute is not active
|
||||
*/
|
||||
bool cc_platform_has(enum cc_attr attr);
|
||||
|
||||
#else /* !CONFIG_ARCH_HAS_CC_PLATFORM */
|
||||
|
||||
static inline bool cc_platform_has(enum cc_attr attr) { return false; }
|
||||
|
||||
#endif /* CONFIG_ARCH_HAS_CC_PLATFORM */
|
||||
|
||||
#endif /* _LINUX_CC_PLATFORM_H */
|
|
@ -16,10 +16,6 @@
|
|||
|
||||
#include <asm/mem_encrypt.h>
|
||||
|
||||
#else /* !CONFIG_ARCH_HAS_MEM_ENCRYPT */
|
||||
|
||||
static inline bool mem_encrypt_active(void) { return false; }
|
||||
|
||||
#endif /* CONFIG_ARCH_HAS_MEM_ENCRYPT */
|
||||
|
||||
#ifdef CONFIG_AMD_MEM_ENCRYPT
|
||||
|
|
|
@ -34,7 +34,7 @@
|
|||
#include <linux/highmem.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/mem_encrypt.h>
|
||||
#include <linux/cc_platform.h>
|
||||
#include <linux/set_memory.h>
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
#include <linux/debugfs.h>
|
||||
|
@ -552,7 +552,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
|
|||
if (!mem)
|
||||
panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
|
||||
|
||||
if (mem_encrypt_active())
|
||||
if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
|
||||
pr_warn_once("Memory encryption is active and system is using DMA bounce buffers\n");
|
||||
|
||||
if (mapping_size > alloc_size) {
|
||||
|
|
Загрузка…
Ссылка в новой задаче