KVM: arm64: Back the hypervisor 'struct hyp_page' array for all memory
The EL2 'vmemmap' array in nVHE Protected mode is currently very sparse: only memory pages owned by the hypervisor itself have a matching 'struct hyp_page'. However, as the size of this struct has been reduced significantly since its introduction, it appears that we can now afford to back the vmemmap for all of memory. Having an easily accessible 'struct hyp_page' for every physical page in memory provides the hypervisor with a simple mechanism to store metadata (e.g. a refcount) that wouldn't otherwise fit in the very limited number of software bits available in the host stage-2 page-table entries. This will be used in subsequent patches when pinning host memory pages for use by the hypervisor at EL2. Tested-by: Vincent Donnefort <vdonnefort@google.com> Signed-off-by: Quentin Perret <qperret@google.com> Signed-off-by: Will Deacon <will@kernel.org> Signed-off-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20221110190259.26861-4-will@kernel.org
This commit is contained in:
Родитель
72a5bc0f15
Коммит
8e6bcc3a45
|
@ -14,6 +14,32 @@
|
|||
extern struct memblock_region kvm_nvhe_sym(hyp_memory)[];
|
||||
extern unsigned int kvm_nvhe_sym(hyp_memblock_nr);
|
||||
|
||||
static inline unsigned long
|
||||
hyp_vmemmap_memblock_size(struct memblock_region *reg, size_t vmemmap_entry_size)
|
||||
{
|
||||
unsigned long nr_pages = reg->size >> PAGE_SHIFT;
|
||||
unsigned long start, end;
|
||||
|
||||
start = (reg->base >> PAGE_SHIFT) * vmemmap_entry_size;
|
||||
end = start + nr_pages * vmemmap_entry_size;
|
||||
start = ALIGN_DOWN(start, PAGE_SIZE);
|
||||
end = ALIGN(end, PAGE_SIZE);
|
||||
|
||||
return end - start;
|
||||
}
|
||||
|
||||
static inline unsigned long hyp_vmemmap_pages(size_t vmemmap_entry_size)
|
||||
{
|
||||
unsigned long res = 0, i;
|
||||
|
||||
for (i = 0; i < kvm_nvhe_sym(hyp_memblock_nr); i++) {
|
||||
res += hyp_vmemmap_memblock_size(&kvm_nvhe_sym(hyp_memory)[i],
|
||||
vmemmap_entry_size);
|
||||
}
|
||||
|
||||
return res >> PAGE_SHIFT;
|
||||
}
|
||||
|
||||
static inline unsigned long __hyp_pgtable_max_pages(unsigned long nr_pages)
|
||||
{
|
||||
unsigned long total = 0, i;
|
||||
|
|
|
@ -15,7 +15,7 @@ extern hyp_spinlock_t pkvm_pgd_lock;
|
|||
|
||||
int hyp_create_idmap(u32 hyp_va_bits);
|
||||
int hyp_map_vectors(void);
|
||||
int hyp_back_vmemmap(phys_addr_t phys, unsigned long size, phys_addr_t back);
|
||||
int hyp_back_vmemmap(phys_addr_t back);
|
||||
int pkvm_cpu_set_vector(enum arm64_hyp_spectre_vector slot);
|
||||
int pkvm_create_mappings(void *from, void *to, enum kvm_pgtable_prot prot);
|
||||
int pkvm_create_mappings_locked(void *from, void *to, enum kvm_pgtable_prot prot);
|
||||
|
@ -24,16 +24,4 @@ int __pkvm_create_private_mapping(phys_addr_t phys, size_t size,
|
|||
unsigned long *haddr);
|
||||
int pkvm_alloc_private_va_range(size_t size, unsigned long *haddr);
|
||||
|
||||
static inline void hyp_vmemmap_range(phys_addr_t phys, unsigned long size,
|
||||
unsigned long *start, unsigned long *end)
|
||||
{
|
||||
unsigned long nr_pages = size >> PAGE_SHIFT;
|
||||
struct hyp_page *p = hyp_phys_to_page(phys);
|
||||
|
||||
*start = (unsigned long)p;
|
||||
*end = *start + nr_pages * sizeof(struct hyp_page);
|
||||
*start = ALIGN_DOWN(*start, PAGE_SIZE);
|
||||
*end = ALIGN(*end, PAGE_SIZE);
|
||||
}
|
||||
|
||||
#endif /* __KVM_HYP_MM_H */
|
||||
|
|
|
@ -129,13 +129,36 @@ int pkvm_create_mappings(void *from, void *to, enum kvm_pgtable_prot prot)
|
|||
return ret;
|
||||
}
|
||||
|
||||
int hyp_back_vmemmap(phys_addr_t phys, unsigned long size, phys_addr_t back)
|
||||
int hyp_back_vmemmap(phys_addr_t back)
|
||||
{
|
||||
unsigned long start, end;
|
||||
unsigned long i, start, size, end = 0;
|
||||
int ret;
|
||||
|
||||
hyp_vmemmap_range(phys, size, &start, &end);
|
||||
for (i = 0; i < hyp_memblock_nr; i++) {
|
||||
start = hyp_memory[i].base;
|
||||
start = ALIGN_DOWN((u64)hyp_phys_to_page(start), PAGE_SIZE);
|
||||
/*
|
||||
* The begining of the hyp_vmemmap region for the current
|
||||
* memblock may already be backed by the page backing the end
|
||||
* the previous region, so avoid mapping it twice.
|
||||
*/
|
||||
start = max(start, end);
|
||||
|
||||
return __pkvm_create_mappings(start, end - start, back, PAGE_HYP);
|
||||
end = hyp_memory[i].base + hyp_memory[i].size;
|
||||
end = PAGE_ALIGN((u64)hyp_phys_to_page(end));
|
||||
if (start >= end)
|
||||
continue;
|
||||
|
||||
size = end - start;
|
||||
ret = __pkvm_create_mappings(start, size, back, PAGE_HYP);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
memset(hyp_phys_to_virt(back), 0, size);
|
||||
back += size;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void *__hyp_bp_vect_base;
|
||||
|
|
|
@ -236,10 +236,8 @@ int hyp_pool_init(struct hyp_pool *pool, u64 pfn, unsigned int nr_pages,
|
|||
|
||||
/* Init the vmemmap portion */
|
||||
p = hyp_phys_to_page(phys);
|
||||
for (i = 0; i < nr_pages; i++) {
|
||||
p[i].order = 0;
|
||||
for (i = 0; i < nr_pages; i++)
|
||||
hyp_set_page_refcounted(&p[i]);
|
||||
}
|
||||
|
||||
/* Attach the unused pages to the buddy tree */
|
||||
for (i = reserved_pages; i < nr_pages; i++)
|
||||
|
|
|
@ -31,12 +31,11 @@ static struct hyp_pool hpool;
|
|||
|
||||
static int divide_memory_pool(void *virt, unsigned long size)
|
||||
{
|
||||
unsigned long vstart, vend, nr_pages;
|
||||
unsigned long nr_pages;
|
||||
|
||||
hyp_early_alloc_init(virt, size);
|
||||
|
||||
hyp_vmemmap_range(__hyp_pa(virt), size, &vstart, &vend);
|
||||
nr_pages = (vend - vstart) >> PAGE_SHIFT;
|
||||
nr_pages = hyp_vmemmap_pages(sizeof(struct hyp_page));
|
||||
vmemmap_base = hyp_early_alloc_contig(nr_pages);
|
||||
if (!vmemmap_base)
|
||||
return -ENOMEM;
|
||||
|
@ -78,7 +77,7 @@ static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = hyp_back_vmemmap(phys, size, hyp_virt_to_phys(vmemmap_base));
|
||||
ret = hyp_back_vmemmap(hyp_virt_to_phys(vmemmap_base));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
|
|
@ -53,7 +53,7 @@ static int __init register_memblock_regions(void)
|
|||
|
||||
void __init kvm_hyp_reserve(void)
|
||||
{
|
||||
u64 nr_pages, prev, hyp_mem_pages = 0;
|
||||
u64 hyp_mem_pages = 0;
|
||||
int ret;
|
||||
|
||||
if (!is_hyp_mode_available() || is_kernel_in_hyp_mode())
|
||||
|
@ -71,21 +71,7 @@ void __init kvm_hyp_reserve(void)
|
|||
|
||||
hyp_mem_pages += hyp_s1_pgtable_pages();
|
||||
hyp_mem_pages += host_s2_pgtable_pages();
|
||||
|
||||
/*
|
||||
* The hyp_vmemmap needs to be backed by pages, but these pages
|
||||
* themselves need to be present in the vmemmap, so compute the number
|
||||
* of pages needed by looking for a fixed point.
|
||||
*/
|
||||
nr_pages = 0;
|
||||
do {
|
||||
prev = nr_pages;
|
||||
nr_pages = hyp_mem_pages + prev;
|
||||
nr_pages = DIV_ROUND_UP(nr_pages * STRUCT_HYP_PAGE_SIZE,
|
||||
PAGE_SIZE);
|
||||
nr_pages += __hyp_pgtable_max_pages(nr_pages);
|
||||
} while (nr_pages != prev);
|
||||
hyp_mem_pages += nr_pages;
|
||||
hyp_mem_pages += hyp_vmemmap_pages(STRUCT_HYP_PAGE_SIZE);
|
||||
|
||||
/*
|
||||
* Try to allocate a PMD-aligned region to reduce TLB pressure once
|
||||
|
|
Загрузка…
Ссылка в новой задаче