2019-06-04 11:11:33 +03:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2017-07-18 00:10:03 +03:00
|
|
|
/*
|
|
|
|
* AMD Memory Encryption Support
|
|
|
|
*
|
|
|
|
* Copyright (C) 2016 Advanced Micro Devices, Inc.
|
|
|
|
*
|
|
|
|
* Author: Tom Lendacky <thomas.lendacky@amd.com>
|
|
|
|
*/
|
|
|
|
|
2017-09-29 19:24:19 +03:00
|
|
|
#define DISABLE_BRANCH_PROFILING
|
|
|
|
|
2017-07-18 00:10:03 +03:00
|
|
|
#include <linux/linkage.h>
|
2017-07-18 00:10:05 +03:00
|
|
|
#include <linux/init.h>
|
2017-07-18 00:10:07 +03:00
|
|
|
#include <linux/mm.h>
|
2018-01-10 18:21:13 +03:00
|
|
|
#include <linux/dma-direct.h>
|
2017-07-18 00:10:21 +03:00
|
|
|
#include <linux/swiotlb.h>
|
2017-07-18 00:10:35 +03:00
|
|
|
#include <linux/mem_encrypt.h>
|
2019-07-10 22:01:19 +03:00
|
|
|
#include <linux/device.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/bitops.h>
|
|
|
|
#include <linux/dma-mapping.h>
|
2021-03-05 01:40:11 +03:00
|
|
|
#include <linux/virtio_config.h>
|
2022-06-22 09:38:36 +03:00
|
|
|
#include <linux/virtio_anchor.h>
|
2021-09-09 01:58:34 +03:00
|
|
|
#include <linux/cc_platform.h>
|
2017-07-18 00:10:03 +03:00
|
|
|
|
2017-07-18 00:10:10 +03:00
|
|
|
#include <asm/tlbflush.h>
|
|
|
|
#include <asm/fixmap.h>
|
2017-07-18 00:10:11 +03:00
|
|
|
#include <asm/setup.h>
|
2022-07-11 10:05:40 +03:00
|
|
|
#include <asm/mem_encrypt.h>
|
2017-07-18 00:10:11 +03:00
|
|
|
#include <asm/bootparam.h>
|
2017-07-18 00:10:21 +03:00
|
|
|
#include <asm/set_memory.h>
|
2017-07-18 00:10:32 +03:00
|
|
|
#include <asm/cacheflush.h>
|
2017-07-18 00:10:35 +03:00
|
|
|
#include <asm/processor-flags.h>
|
|
|
|
#include <asm/msr.h>
|
|
|
|
#include <asm/cmdline.h>
|
2022-02-09 21:10:12 +03:00
|
|
|
#include <asm/sev.h>
|
2017-07-18 00:10:35 +03:00
|
|
|
|
2017-10-20 17:30:56 +03:00
|
|
|
#include "mm_internal.h"
|
|
|
|
|
2017-07-18 00:10:03 +03:00
|
|
|
/*
|
|
|
|
* Since SME related variables are set early in the boot process they must
|
|
|
|
* reside in the .data section so as not to be zeroed out when the .bss
|
|
|
|
* section is later cleared.
|
|
|
|
*/
|
2020-10-22 05:36:07 +03:00
|
|
|
u64 sme_me_mask __section(".data") = 0;
|
|
|
|
u64 sev_status __section(".data") = 0;
|
2020-10-28 19:46:58 +03:00
|
|
|
u64 sev_check_data __section(".data") = 0;
|
x86/mm: Unbreak modules that rely on external PAGE_KERNEL availability
Commit 7744ccdbc16f0 ("x86/mm: Add Secure Memory Encryption (SME)
support") as a side-effect made PAGE_KERNEL all of a sudden unavailable
to modules which can't make use of EXPORT_SYMBOL_GPL() symbols.
This is because once SME is enabled, sme_me_mask (which is introduced as
EXPORT_SYMBOL_GPL) makes its way to PAGE_KERNEL through _PAGE_ENC,
causing imminent build failure for all the modules which make use of all
the EXPORT-SYMBOL()-exported API (such as vmap(), __vmalloc(),
remap_pfn_range(), ...).
Exporting (as EXPORT_SYMBOL()) interfaces (and having done so for ages)
that take pgprot_t argument, while making it impossible to -- all of a
sudden -- pass PAGE_KERNEL to it, feels rather incosistent.
Restore the original behavior and make it possible to pass PAGE_KERNEL
to all its EXPORT_SYMBOL() consumers.
[ This is all so not wonderful. We shouldn't need that "sme_me_mask"
access at all in all those places that really don't care about that
level of detail, and just want _PAGE_KERNEL or whatever.
We have some similar issues with _PAGE_CACHE_WP and _PAGE_NOCACHE,
both of which hide a "cachemode2protval()" call, and which also ends
up using another EXPORT_SYMBOL(), but at least that only triggers for
the much more rare cases.
Maybe we could move these dynamic page table bits to be generated much
deeper down in the VM layer, instead of hiding them in the macros that
everybody uses.
So this all would merit some cleanup. But not today. - Linus ]
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: Jiri Kosina <jkosina@suse.cz>
Despised-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2017-11-08 23:18:18 +03:00
|
|
|
EXPORT_SYMBOL(sme_me_mask);
|
2017-07-18 00:10:05 +03:00
|
|
|
|
2017-07-18 00:10:10 +03:00
|
|
|
/* Buffer used for early in-place encryption by BSP, no locking needed */
|
2019-07-10 23:19:35 +03:00
|
|
|
static char sme_early_buffer[PAGE_SIZE] __initdata __aligned(PAGE_SIZE);
|
2017-07-18 00:10:10 +03:00
|
|
|
|
2022-02-09 21:10:12 +03:00
|
|
|
/*
|
|
|
|
* SNP-specific routine which needs to additionally change the page state from
|
|
|
|
* private to shared before copying the data from the source to destination and
|
|
|
|
* restore after the copy.
|
|
|
|
*/
|
|
|
|
static inline void __init snp_memcpy(void *dst, void *src, size_t sz,
|
|
|
|
unsigned long paddr, bool decrypt)
|
|
|
|
{
|
|
|
|
unsigned long npages = PAGE_ALIGN(sz) >> PAGE_SHIFT;
|
|
|
|
|
|
|
|
if (decrypt) {
|
|
|
|
/*
|
|
|
|
* @paddr needs to be accessed decrypted, mark the page shared in
|
|
|
|
* the RMP table before copying it.
|
|
|
|
*/
|
|
|
|
early_snp_set_memory_shared((unsigned long)__va(paddr), paddr, npages);
|
|
|
|
|
|
|
|
memcpy(dst, src, sz);
|
|
|
|
|
|
|
|
/* Restore the page state after the memcpy. */
|
|
|
|
early_snp_set_memory_private((unsigned long)__va(paddr), paddr, npages);
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* @paddr need to be accessed encrypted, no need for the page state
|
|
|
|
* change.
|
|
|
|
*/
|
|
|
|
memcpy(dst, src, sz);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-18 00:10:10 +03:00
|
|
|
/*
|
|
|
|
* This routine does not change the underlying encryption setting of the
|
|
|
|
* page(s) that map this memory. It assumes that eventually the memory is
|
|
|
|
* meant to be accessed as either encrypted or decrypted but the contents
|
|
|
|
* are currently not in the desired state.
|
|
|
|
*
|
|
|
|
* This routine follows the steps outlined in the AMD64 Architecture
|
|
|
|
* Programmer's Manual Volume 2, Section 7.10.8 Encrypt-in-Place.
|
|
|
|
*/
|
|
|
|
static void __init __sme_early_enc_dec(resource_size_t paddr,
|
|
|
|
unsigned long size, bool enc)
|
|
|
|
{
|
|
|
|
void *src, *dst;
|
|
|
|
size_t len;
|
|
|
|
|
|
|
|
if (!sme_me_mask)
|
|
|
|
return;
|
|
|
|
|
|
|
|
wbinvd();
|
|
|
|
|
|
|
|
/*
|
|
|
|
* There are limited number of early mapping slots, so map (at most)
|
|
|
|
* one page at time.
|
|
|
|
*/
|
|
|
|
while (size) {
|
|
|
|
len = min_t(size_t, sizeof(sme_early_buffer), size);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Create mappings for the current and desired format of
|
|
|
|
* the memory. Use a write-protected mapping for the source.
|
|
|
|
*/
|
|
|
|
src = enc ? early_memremap_decrypted_wp(paddr, len) :
|
|
|
|
early_memremap_encrypted_wp(paddr, len);
|
|
|
|
|
|
|
|
dst = enc ? early_memremap_encrypted(paddr, len) :
|
|
|
|
early_memremap_decrypted(paddr, len);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If a mapping can't be obtained to perform the operation,
|
|
|
|
* then eventual access of that area in the desired mode
|
|
|
|
* will cause a crash.
|
|
|
|
*/
|
|
|
|
BUG_ON(!src || !dst);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Use a temporary buffer, of cache-line multiple size, to
|
|
|
|
* avoid data corruption as documented in the APM.
|
|
|
|
*/
|
2022-02-09 21:10:12 +03:00
|
|
|
if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) {
|
|
|
|
snp_memcpy(sme_early_buffer, src, len, paddr, enc);
|
|
|
|
snp_memcpy(dst, sme_early_buffer, len, paddr, !enc);
|
|
|
|
} else {
|
|
|
|
memcpy(sme_early_buffer, src, len);
|
|
|
|
memcpy(dst, sme_early_buffer, len);
|
|
|
|
}
|
2017-07-18 00:10:10 +03:00
|
|
|
|
|
|
|
early_memunmap(dst, len);
|
|
|
|
early_memunmap(src, len);
|
|
|
|
|
|
|
|
paddr += len;
|
|
|
|
size -= len;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void __init sme_early_encrypt(resource_size_t paddr, unsigned long size)
|
|
|
|
{
|
|
|
|
__sme_early_enc_dec(paddr, size, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
void __init sme_early_decrypt(resource_size_t paddr, unsigned long size)
|
|
|
|
{
|
|
|
|
__sme_early_enc_dec(paddr, size, false);
|
|
|
|
}
|
|
|
|
|
2017-07-18 00:10:11 +03:00
|
|
|
static void __init __sme_early_map_unmap_mem(void *vaddr, unsigned long size,
|
|
|
|
bool map)
|
|
|
|
{
|
|
|
|
unsigned long paddr = (unsigned long)vaddr - __PAGE_OFFSET;
|
|
|
|
pmdval_t pmd_flags, pmd;
|
|
|
|
|
|
|
|
/* Use early_pmd_flags but remove the encryption mask */
|
|
|
|
pmd_flags = __sme_clr(early_pmd_flags);
|
|
|
|
|
|
|
|
do {
|
|
|
|
pmd = map ? (paddr & PMD_MASK) + pmd_flags : 0;
|
|
|
|
__early_make_pgtable((unsigned long)vaddr, pmd);
|
|
|
|
|
|
|
|
vaddr += PMD_SIZE;
|
|
|
|
paddr += PMD_SIZE;
|
|
|
|
size = (size <= PMD_SIZE) ? 0 : size - PMD_SIZE;
|
|
|
|
} while (size);
|
|
|
|
|
2020-04-21 12:20:32 +03:00
|
|
|
flush_tlb_local();
|
2017-07-18 00:10:11 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
void __init sme_unmap_bootdata(char *real_mode_data)
|
|
|
|
{
|
|
|
|
struct boot_params *boot_data;
|
|
|
|
unsigned long cmdline_paddr;
|
|
|
|
|
2021-09-09 01:58:36 +03:00
|
|
|
if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
|
2017-07-18 00:10:11 +03:00
|
|
|
return;
|
|
|
|
|
|
|
|
/* Get the command line address before unmapping the real_mode_data */
|
|
|
|
boot_data = (struct boot_params *)real_mode_data;
|
|
|
|
cmdline_paddr = boot_data->hdr.cmd_line_ptr | ((u64)boot_data->ext_cmd_line_ptr << 32);
|
|
|
|
|
|
|
|
__sme_early_map_unmap_mem(real_mode_data, sizeof(boot_params), false);
|
|
|
|
|
|
|
|
if (!cmdline_paddr)
|
|
|
|
return;
|
|
|
|
|
|
|
|
__sme_early_map_unmap_mem(__va(cmdline_paddr), COMMAND_LINE_SIZE, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
void __init sme_map_bootdata(char *real_mode_data)
|
|
|
|
{
|
|
|
|
struct boot_params *boot_data;
|
|
|
|
unsigned long cmdline_paddr;
|
|
|
|
|
2021-09-09 01:58:36 +03:00
|
|
|
if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
|
2017-07-18 00:10:11 +03:00
|
|
|
return;
|
|
|
|
|
|
|
|
__sme_early_map_unmap_mem(real_mode_data, sizeof(boot_params), true);
|
|
|
|
|
|
|
|
/* Get the command line address after mapping the real_mode_data */
|
|
|
|
boot_data = (struct boot_params *)real_mode_data;
|
|
|
|
cmdline_paddr = boot_data->hdr.cmd_line_ptr | ((u64)boot_data->ext_cmd_line_ptr << 32);
|
|
|
|
|
|
|
|
if (!cmdline_paddr)
|
|
|
|
return;
|
|
|
|
|
|
|
|
__sme_early_map_unmap_mem(__va(cmdline_paddr), COMMAND_LINE_SIZE, true);
|
|
|
|
}
|
|
|
|
|
2020-12-10 04:25:15 +03:00
|
|
|
void __init sev_setup_arch(void)
|
|
|
|
{
|
|
|
|
phys_addr_t total_mem = memblock_phys_mem_size();
|
|
|
|
unsigned long size;
|
|
|
|
|
2021-09-09 01:58:37 +03:00
|
|
|
if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
|
2020-12-10 04:25:15 +03:00
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For SEV, all DMA has to occur via shared/unencrypted pages.
|
|
|
|
* SEV uses SWIOTLB to make this happen without changing device
|
|
|
|
* drivers. However, depending on the workload being run, the
|
|
|
|
* default 64MB of SWIOTLB may not be enough and SWIOTLB may
|
|
|
|
* run out of buffers for DMA, resulting in I/O errors and/or
|
|
|
|
* performance degradation especially with high I/O workloads.
|
|
|
|
*
|
|
|
|
* Adjust the default size of SWIOTLB for SEV guests using
|
|
|
|
* a percentage of guest memory for SWIOTLB buffers.
|
|
|
|
* Also, as the SWIOTLB bounce buffer memory is allocated
|
|
|
|
* from low memory, ensure that the adjusted size is within
|
|
|
|
* the limits of low available memory.
|
|
|
|
*
|
|
|
|
* The percentage of guest memory used here for SWIOTLB buffers
|
|
|
|
* is more of an approximation of the static adjustment which
|
|
|
|
* 64MB for <1G, and ~128M to 256M for 1G-to-4G, i.e., the 6%
|
|
|
|
*/
|
|
|
|
size = total_mem * 6 / 100;
|
|
|
|
size = clamp_val(size, IO_TLB_DEFAULT_SIZE, SZ_1G);
|
|
|
|
swiotlb_adjust_size(size);
|
2022-06-06 09:09:16 +03:00
|
|
|
|
|
|
|
/* Set restricted memory access for virtio. */
|
2022-06-22 09:38:36 +03:00
|
|
|
virtio_set_mem_acc_cb(virtio_require_restricted_mem_acc);
|
2020-12-10 04:25:15 +03:00
|
|
|
}
|
|
|
|
|
2021-08-24 14:05:00 +03:00
|
|
|
static unsigned long pg_level_to_pfn(int level, pte_t *kpte, pgprot_t *ret_prot)
|
2017-10-20 17:30:56 +03:00
|
|
|
{
|
2021-08-24 14:05:00 +03:00
|
|
|
unsigned long pfn = 0;
|
|
|
|
pgprot_t prot;
|
2017-10-20 17:30:56 +03:00
|
|
|
|
|
|
|
switch (level) {
|
|
|
|
case PG_LEVEL_4K:
|
|
|
|
pfn = pte_pfn(*kpte);
|
2021-08-24 14:05:00 +03:00
|
|
|
prot = pte_pgprot(*kpte);
|
2017-10-20 17:30:56 +03:00
|
|
|
break;
|
|
|
|
case PG_LEVEL_2M:
|
|
|
|
pfn = pmd_pfn(*(pmd_t *)kpte);
|
2021-08-24 14:05:00 +03:00
|
|
|
prot = pmd_pgprot(*(pmd_t *)kpte);
|
2017-10-20 17:30:56 +03:00
|
|
|
break;
|
|
|
|
case PG_LEVEL_1G:
|
|
|
|
pfn = pud_pfn(*(pud_t *)kpte);
|
2021-08-24 14:05:00 +03:00
|
|
|
prot = pud_pgprot(*(pud_t *)kpte);
|
2017-10-20 17:30:56 +03:00
|
|
|
break;
|
|
|
|
default:
|
2021-08-24 14:05:00 +03:00
|
|
|
WARN_ONCE(1, "Invalid level for kpte\n");
|
|
|
|
return 0;
|
2017-10-20 17:30:56 +03:00
|
|
|
}
|
|
|
|
|
2021-08-24 14:05:00 +03:00
|
|
|
if (ret_prot)
|
|
|
|
*ret_prot = prot;
|
|
|
|
|
|
|
|
return pfn;
|
|
|
|
}
|
|
|
|
|
2022-02-23 07:35:28 +03:00
|
|
|
static bool amd_enc_tlb_flush_required(bool enc)
|
|
|
|
{
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool amd_enc_cache_flush_required(void)
|
|
|
|
{
|
|
|
|
return !cpu_feature_enabled(X86_FEATURE_SME_COHERENT);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void enc_dec_hypercall(unsigned long vaddr, int npages, bool enc)
|
2021-08-24 14:05:00 +03:00
|
|
|
{
|
|
|
|
#ifdef CONFIG_PARAVIRT
|
|
|
|
unsigned long sz = npages << PAGE_SHIFT;
|
|
|
|
unsigned long vaddr_end = vaddr + sz;
|
|
|
|
|
|
|
|
while (vaddr < vaddr_end) {
|
|
|
|
int psize, pmask, level;
|
|
|
|
unsigned long pfn;
|
|
|
|
pte_t *kpte;
|
|
|
|
|
|
|
|
kpte = lookup_address(vaddr, &level);
|
|
|
|
if (!kpte || pte_none(*kpte)) {
|
|
|
|
WARN_ONCE(1, "kpte lookup for vaddr\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
pfn = pg_level_to_pfn(level, kpte, NULL);
|
|
|
|
if (!pfn)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
psize = page_level_size(level);
|
|
|
|
pmask = page_level_mask(level);
|
|
|
|
|
|
|
|
notify_page_enc_status_changed(pfn, psize >> PAGE_SHIFT, enc);
|
|
|
|
|
|
|
|
vaddr = (vaddr & pmask) + psize;
|
2017-10-20 17:30:56 +03:00
|
|
|
}
|
2021-08-24 14:05:00 +03:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2022-02-23 07:35:28 +03:00
|
|
|
static void amd_enc_status_change_prepare(unsigned long vaddr, int npages, bool enc)
|
|
|
|
{
|
2022-02-24 19:56:01 +03:00
|
|
|
/*
|
|
|
|
* To maintain the security guarantees of SEV-SNP guests, make sure
|
|
|
|
* to invalidate the memory before encryption attribute is cleared.
|
|
|
|
*/
|
|
|
|
if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP) && !enc)
|
|
|
|
snp_set_memory_shared(vaddr, npages);
|
2022-02-23 07:35:28 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Return true unconditionally: return value doesn't matter for the SEV side */
|
|
|
|
static bool amd_enc_status_change_finish(unsigned long vaddr, int npages, bool enc)
|
|
|
|
{
|
2022-02-24 19:56:01 +03:00
|
|
|
/*
|
|
|
|
* After memory is mapped encrypted in the page table, validate it
|
|
|
|
* so that it is consistent with the page table updates.
|
|
|
|
*/
|
|
|
|
if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP) && enc)
|
|
|
|
snp_set_memory_private(vaddr, npages);
|
|
|
|
|
2022-02-23 07:35:28 +03:00
|
|
|
if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
|
|
|
|
enc_dec_hypercall(vaddr, npages, enc);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2021-08-24 14:05:00 +03:00
|
|
|
static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc)
|
|
|
|
{
|
|
|
|
pgprot_t old_prot, new_prot;
|
|
|
|
unsigned long pfn, pa, size;
|
|
|
|
pte_t new_pte;
|
|
|
|
|
|
|
|
pfn = pg_level_to_pfn(level, kpte, &old_prot);
|
|
|
|
if (!pfn)
|
|
|
|
return;
|
2017-10-20 17:30:56 +03:00
|
|
|
|
|
|
|
new_prot = old_prot;
|
|
|
|
if (enc)
|
|
|
|
pgprot_val(new_prot) |= _PAGE_ENC;
|
|
|
|
else
|
|
|
|
pgprot_val(new_prot) &= ~_PAGE_ENC;
|
|
|
|
|
|
|
|
/* If prot is same then do nothing. */
|
|
|
|
if (pgprot_val(old_prot) == pgprot_val(new_prot))
|
|
|
|
return;
|
|
|
|
|
2021-03-18 23:26:57 +03:00
|
|
|
pa = pfn << PAGE_SHIFT;
|
2017-10-20 17:30:56 +03:00
|
|
|
size = page_level_size(level);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We are going to perform in-place en-/decryption and change the
|
|
|
|
* physical page attribute from C=1 to C=0 or vice versa. Flush the
|
|
|
|
* caches to ensure that data gets accessed with the correct C-bit.
|
|
|
|
*/
|
|
|
|
clflush_cache_range(__va(pa), size);
|
|
|
|
|
|
|
|
/* Encrypt/decrypt the contents in-place */
|
2022-02-09 21:10:12 +03:00
|
|
|
if (enc) {
|
2017-10-20 17:30:56 +03:00
|
|
|
sme_early_encrypt(pa, size);
|
2022-02-09 21:10:12 +03:00
|
|
|
} else {
|
2017-10-20 17:30:56 +03:00
|
|
|
sme_early_decrypt(pa, size);
|
|
|
|
|
2022-02-09 21:10:12 +03:00
|
|
|
/*
|
|
|
|
* ON SNP, the page state in the RMP table must happen
|
|
|
|
* before the page table updates.
|
|
|
|
*/
|
|
|
|
early_snp_set_memory_shared((unsigned long)__va(pa), pa, 1);
|
|
|
|
}
|
|
|
|
|
2017-10-20 17:30:56 +03:00
|
|
|
/* Change the page encryption mask. */
|
|
|
|
new_pte = pfn_pte(pfn, new_prot);
|
|
|
|
set_pte_atomic(kpte, new_pte);
|
2022-02-09 21:10:12 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If page is set encrypted in the page table, then update the RMP table to
|
|
|
|
* add this page as private.
|
|
|
|
*/
|
|
|
|
if (enc)
|
|
|
|
early_snp_set_memory_private((unsigned long)__va(pa), pa, 1);
|
2017-10-20 17:30:56 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static int __init early_set_memory_enc_dec(unsigned long vaddr,
|
|
|
|
unsigned long size, bool enc)
|
|
|
|
{
|
2021-08-24 14:05:00 +03:00
|
|
|
unsigned long vaddr_end, vaddr_next, start;
|
2017-10-20 17:30:56 +03:00
|
|
|
unsigned long psize, pmask;
|
|
|
|
int split_page_size_mask;
|
|
|
|
int level, ret;
|
|
|
|
pte_t *kpte;
|
|
|
|
|
2021-08-24 14:05:00 +03:00
|
|
|
start = vaddr;
|
2017-10-20 17:30:56 +03:00
|
|
|
vaddr_next = vaddr;
|
|
|
|
vaddr_end = vaddr + size;
|
|
|
|
|
|
|
|
for (; vaddr < vaddr_end; vaddr = vaddr_next) {
|
|
|
|
kpte = lookup_address(vaddr, &level);
|
|
|
|
if (!kpte || pte_none(*kpte)) {
|
|
|
|
ret = 1;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (level == PG_LEVEL_4K) {
|
|
|
|
__set_clr_pte_enc(kpte, level, enc);
|
|
|
|
vaddr_next = (vaddr & PAGE_MASK) + PAGE_SIZE;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
psize = page_level_size(level);
|
|
|
|
pmask = page_level_mask(level);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check whether we can change the large page in one go.
|
|
|
|
* We request a split when the address is not aligned and
|
|
|
|
* the number of pages to set/clear encryption bit is smaller
|
|
|
|
* than the number of pages in the large page.
|
|
|
|
*/
|
|
|
|
if (vaddr == (vaddr & pmask) &&
|
|
|
|
((vaddr_end - vaddr) >= psize)) {
|
|
|
|
__set_clr_pte_enc(kpte, level, enc);
|
|
|
|
vaddr_next = (vaddr & pmask) + psize;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The virtual address is part of a larger page, create the next
|
|
|
|
* level page table mapping (4K or 2M). If it is part of a 2M
|
|
|
|
* page then we request a split of the large page into 4K
|
|
|
|
* chunks. A 1GB large page is split into 2M pages, resp.
|
|
|
|
*/
|
|
|
|
if (level == PG_LEVEL_2M)
|
|
|
|
split_page_size_mask = 0;
|
|
|
|
else
|
|
|
|
split_page_size_mask = 1 << PG_LEVEL_2M;
|
|
|
|
|
2019-04-17 18:41:17 +03:00
|
|
|
/*
|
|
|
|
* kernel_physical_mapping_change() does not flush the TLBs, so
|
|
|
|
* a TLB flush is required after we exit from the for loop.
|
|
|
|
*/
|
|
|
|
kernel_physical_mapping_change(__pa(vaddr & pmask),
|
|
|
|
__pa((vaddr_end & pmask) + psize),
|
|
|
|
split_page_size_mask);
|
2017-10-20 17:30:56 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
|
2022-02-23 07:35:28 +03:00
|
|
|
early_set_mem_enc_dec_hypercall(start, PAGE_ALIGN(size) >> PAGE_SHIFT, enc);
|
2017-10-20 17:30:56 +03:00
|
|
|
out:
|
|
|
|
__flush_tlb_all();
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int __init early_set_memory_decrypted(unsigned long vaddr, unsigned long size)
|
|
|
|
{
|
|
|
|
return early_set_memory_enc_dec(vaddr, size, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size)
|
|
|
|
{
|
|
|
|
return early_set_memory_enc_dec(vaddr, size, true);
|
|
|
|
}
|
|
|
|
|
2021-08-24 14:07:07 +03:00
|
|
|
void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages, bool enc)
|
|
|
|
{
|
2022-02-23 07:35:28 +03:00
|
|
|
enc_dec_hypercall(vaddr, npages, enc);
|
|
|
|
}
|
|
|
|
|
|
|
|
void __init sme_early_init(void)
|
|
|
|
{
|
|
|
|
if (!sme_me_mask)
|
|
|
|
return;
|
|
|
|
|
|
|
|
early_pmd_flags = __sme_set(early_pmd_flags);
|
|
|
|
|
|
|
|
__supported_pte_mask = __sme_set(__supported_pte_mask);
|
|
|
|
|
|
|
|
/* Update the protection map with memory encryption mask */
|
2022-07-11 10:05:40 +03:00
|
|
|
add_encrypt_protection_map();
|
2022-02-23 07:35:28 +03:00
|
|
|
|
|
|
|
x86_platform.guest.enc_status_change_prepare = amd_enc_status_change_prepare;
|
|
|
|
x86_platform.guest.enc_status_change_finish = amd_enc_status_change_finish;
|
|
|
|
x86_platform.guest.enc_tlb_flush_required = amd_enc_tlb_flush_required;
|
|
|
|
x86_platform.guest.enc_cache_flush_required = amd_enc_cache_flush_required;
|
2021-08-24 14:07:07 +03:00
|
|
|
}
|
|
|
|
|
2018-09-14 16:45:58 +03:00
|
|
|
void __init mem_encrypt_free_decrypted_mem(void)
|
|
|
|
{
|
|
|
|
unsigned long vaddr, vaddr_end, npages;
|
|
|
|
int r;
|
|
|
|
|
|
|
|
vaddr = (unsigned long)__start_bss_decrypted_unused;
|
|
|
|
vaddr_end = (unsigned long)__end_bss_decrypted;
|
|
|
|
npages = (vaddr_end - vaddr) >> PAGE_SHIFT;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The unused memory range was mapped decrypted, change the encryption
|
|
|
|
* attribute from decrypted to encrypted before freeing it.
|
|
|
|
*/
|
2021-09-09 01:58:39 +03:00
|
|
|
if (cc_platform_has(CC_ATTR_MEM_ENCRYPT)) {
|
2018-09-14 16:45:58 +03:00
|
|
|
r = set_memory_encrypted(vaddr, npages);
|
|
|
|
if (r) {
|
|
|
|
pr_warn("failed to free unused decrypted pages\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
free_init_pages("unused decrypted", vaddr, vaddr_end);
|
|
|
|
}
|