mm: x86: Invoke hypercall when page encryption status is changed
Invoke a hypercall when a memory region is changed from encrypted -> decrypted and vice versa. Hypervisor needs to know the page encryption status during the guest migration. Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@redhat.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Joerg Roedel <joro@8bytes.org> Cc: Borislav Petkov <bp@suse.de> Cc: Tom Lendacky <thomas.lendacky@amd.com> Cc: x86@kernel.org Cc: kvm@vger.kernel.org Cc: linux-kernel@vger.kernel.org Reviewed-by: Steve Rutherford <srutherford@google.com> Reviewed-by: Venu Busireddy <venu.busireddy@oracle.com> Signed-off-by: Brijesh Singh <brijesh.singh@amd.com> Signed-off-by: Ashish Kalra <ashish.kalra@amd.com> Reviewed-by: Borislav Petkov <bp@suse.de> Message-Id: <0a237d5bb08793916c7790a3e653a2cbe7485761.1629726117.git.ashish.kalra@amd.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Родитель
08c2336df7
Коммит
064ce6c550
|
@ -97,6 +97,12 @@ static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
|
|||
PVOP_VCALL1(mmu.exit_mmap, mm);
|
||||
}
|
||||
|
||||
static inline void notify_page_enc_status_changed(unsigned long pfn,
|
||||
int npages, bool enc)
|
||||
{
|
||||
PVOP_VCALL3(mmu.notify_page_enc_status_changed, pfn, npages, enc);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PARAVIRT_XXL
|
||||
static inline void load_sp0(unsigned long sp0)
|
||||
{
|
||||
|
|
|
@ -168,6 +168,7 @@ struct pv_mmu_ops {
|
|||
|
||||
/* Hook for intercepting the destruction of an mm_struct. */
|
||||
void (*exit_mmap)(struct mm_struct *mm);
|
||||
void (*notify_page_enc_status_changed)(unsigned long pfn, int npages, bool enc);
|
||||
|
||||
#ifdef CONFIG_PARAVIRT_XXL
|
||||
struct paravirt_callee_save read_cr2;
|
||||
|
|
|
@ -83,6 +83,7 @@ int set_pages_rw(struct page *page, int numpages);
|
|||
int set_direct_map_invalid_noflush(struct page *page);
|
||||
int set_direct_map_default_noflush(struct page *page);
|
||||
bool kernel_page_present(struct page *page);
|
||||
void notify_range_enc_status_changed(unsigned long vaddr, int npages, bool enc);
|
||||
|
||||
extern int kernel_set_to_readonly;
|
||||
|
||||
|
|
|
@ -296,6 +296,7 @@ struct paravirt_patch_template pv_ops = {
|
|||
(void (*)(struct mmu_gather *, void *))tlb_remove_page,
|
||||
|
||||
.mmu.exit_mmap = paravirt_nop,
|
||||
.mmu.notify_page_enc_status_changed = paravirt_nop,
|
||||
|
||||
#ifdef CONFIG_PARAVIRT_XXL
|
||||
.mmu.read_cr2 = __PV_IS_CALLEE_SAVE(native_read_cr2),
|
||||
|
|
|
@ -228,28 +228,75 @@ void __init sev_setup_arch(void)
|
|||
swiotlb_adjust_size(size);
|
||||
}
|
||||
|
||||
static unsigned long pg_level_to_pfn(int level, pte_t *kpte, pgprot_t *ret_prot)
|
||||
{
|
||||
unsigned long pfn = 0;
|
||||
pgprot_t prot;
|
||||
|
||||
switch (level) {
|
||||
case PG_LEVEL_4K:
|
||||
pfn = pte_pfn(*kpte);
|
||||
prot = pte_pgprot(*kpte);
|
||||
break;
|
||||
case PG_LEVEL_2M:
|
||||
pfn = pmd_pfn(*(pmd_t *)kpte);
|
||||
prot = pmd_pgprot(*(pmd_t *)kpte);
|
||||
break;
|
||||
case PG_LEVEL_1G:
|
||||
pfn = pud_pfn(*(pud_t *)kpte);
|
||||
prot = pud_pgprot(*(pud_t *)kpte);
|
||||
break;
|
||||
default:
|
||||
WARN_ONCE(1, "Invalid level for kpte\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (ret_prot)
|
||||
*ret_prot = prot;
|
||||
|
||||
return pfn;
|
||||
}
|
||||
|
||||
void notify_range_enc_status_changed(unsigned long vaddr, int npages, bool enc)
|
||||
{
|
||||
#ifdef CONFIG_PARAVIRT
|
||||
unsigned long sz = npages << PAGE_SHIFT;
|
||||
unsigned long vaddr_end = vaddr + sz;
|
||||
|
||||
while (vaddr < vaddr_end) {
|
||||
int psize, pmask, level;
|
||||
unsigned long pfn;
|
||||
pte_t *kpte;
|
||||
|
||||
kpte = lookup_address(vaddr, &level);
|
||||
if (!kpte || pte_none(*kpte)) {
|
||||
WARN_ONCE(1, "kpte lookup for vaddr\n");
|
||||
return;
|
||||
}
|
||||
|
||||
pfn = pg_level_to_pfn(level, kpte, NULL);
|
||||
if (!pfn)
|
||||
continue;
|
||||
|
||||
psize = page_level_size(level);
|
||||
pmask = page_level_mask(level);
|
||||
|
||||
notify_page_enc_status_changed(pfn, psize >> PAGE_SHIFT, enc);
|
||||
|
||||
vaddr = (vaddr & pmask) + psize;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc)
|
||||
{
|
||||
pgprot_t old_prot, new_prot;
|
||||
unsigned long pfn, pa, size;
|
||||
pte_t new_pte;
|
||||
|
||||
switch (level) {
|
||||
case PG_LEVEL_4K:
|
||||
pfn = pte_pfn(*kpte);
|
||||
old_prot = pte_pgprot(*kpte);
|
||||
break;
|
||||
case PG_LEVEL_2M:
|
||||
pfn = pmd_pfn(*(pmd_t *)kpte);
|
||||
old_prot = pmd_pgprot(*(pmd_t *)kpte);
|
||||
break;
|
||||
case PG_LEVEL_1G:
|
||||
pfn = pud_pfn(*(pud_t *)kpte);
|
||||
old_prot = pud_pgprot(*(pud_t *)kpte);
|
||||
break;
|
||||
default:
|
||||
pfn = pg_level_to_pfn(level, kpte, &old_prot);
|
||||
if (!pfn)
|
||||
return;
|
||||
}
|
||||
|
||||
new_prot = old_prot;
|
||||
if (enc)
|
||||
|
@ -285,12 +332,13 @@ static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc)
|
|||
static int __init early_set_memory_enc_dec(unsigned long vaddr,
|
||||
unsigned long size, bool enc)
|
||||
{
|
||||
unsigned long vaddr_end, vaddr_next;
|
||||
unsigned long vaddr_end, vaddr_next, start;
|
||||
unsigned long psize, pmask;
|
||||
int split_page_size_mask;
|
||||
int level, ret;
|
||||
pte_t *kpte;
|
||||
|
||||
start = vaddr;
|
||||
vaddr_next = vaddr;
|
||||
vaddr_end = vaddr + size;
|
||||
|
||||
|
@ -345,6 +393,7 @@ static int __init early_set_memory_enc_dec(unsigned long vaddr,
|
|||
|
||||
ret = 0;
|
||||
|
||||
notify_range_enc_status_changed(start, PAGE_ALIGN(size) >> PAGE_SHIFT, enc);
|
||||
out:
|
||||
__flush_tlb_all();
|
||||
return ret;
|
||||
|
|
|
@ -2020,6 +2020,12 @@ static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
|
|||
*/
|
||||
cpa_flush(&cpa, 0);
|
||||
|
||||
/*
|
||||
* Notify hypervisor that a given memory range is mapped encrypted
|
||||
* or decrypted.
|
||||
*/
|
||||
notify_range_enc_status_changed(addr, numpages, enc);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче