Merge commit 'kvm-pagedata-alloc-fixes' into HEAD
This commit is contained in:
Коммит
a25c78d04c
|
@ -24,6 +24,7 @@ struct hyp_pool {
|
|||
|
||||
/* Allocation */
|
||||
void *hyp_alloc_pages(struct hyp_pool *pool, unsigned short order);
|
||||
void hyp_split_page(struct hyp_page *page);
|
||||
void hyp_get_page(struct hyp_pool *pool, void *addr);
|
||||
void hyp_put_page(struct hyp_pool *pool, void *addr);
|
||||
|
||||
|
|
|
@ -35,7 +35,18 @@ const u8 pkvm_hyp_id = 1;
|
|||
|
||||
static void *host_s2_zalloc_pages_exact(size_t size)
|
||||
{
|
||||
return hyp_alloc_pages(&host_s2_pool, get_order(size));
|
||||
void *addr = hyp_alloc_pages(&host_s2_pool, get_order(size));
|
||||
|
||||
hyp_split_page(hyp_virt_to_page(addr));
|
||||
|
||||
/*
|
||||
* The size of concatenated PGDs is always a power of two of PAGE_SIZE,
|
||||
* so there should be no need to free any of the tail pages to make the
|
||||
* allocation exact.
|
||||
*/
|
||||
WARN_ON(size != (PAGE_SIZE << get_order(size)));
|
||||
|
||||
return addr;
|
||||
}
|
||||
|
||||
static void *host_s2_zalloc_page(void *pool)
|
||||
|
|
|
@ -152,6 +152,7 @@ static inline void hyp_page_ref_inc(struct hyp_page *p)
|
|||
|
||||
static inline int hyp_page_ref_dec_and_test(struct hyp_page *p)
|
||||
{
|
||||
BUG_ON(!p->refcount);
|
||||
p->refcount--;
|
||||
return (p->refcount == 0);
|
||||
}
|
||||
|
@ -193,6 +194,20 @@ void hyp_get_page(struct hyp_pool *pool, void *addr)
|
|||
hyp_spin_unlock(&pool->lock);
|
||||
}
|
||||
|
||||
void hyp_split_page(struct hyp_page *p)
|
||||
{
|
||||
unsigned short order = p->order;
|
||||
unsigned int i;
|
||||
|
||||
p->order = 0;
|
||||
for (i = 1; i < (1 << order); i++) {
|
||||
struct hyp_page *tail = p + i;
|
||||
|
||||
tail->order = 0;
|
||||
hyp_set_page_refcounted(tail);
|
||||
}
|
||||
}
|
||||
|
||||
void *hyp_alloc_pages(struct hyp_pool *pool, unsigned short order)
|
||||
{
|
||||
unsigned short i = order;
|
||||
|
|
|
@ -1529,8 +1529,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
|||
* when updating the PG_mte_tagged page flag, see
|
||||
* sanitise_mte_tags for more details.
|
||||
*/
|
||||
if (kvm_has_mte(kvm) && vma->vm_flags & VM_SHARED)
|
||||
return -EINVAL;
|
||||
if (kvm_has_mte(kvm) && vma->vm_flags & VM_SHARED) {
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
if (vma->vm_flags & VM_PFNMAP) {
|
||||
/* IO region dirty page logging not allowed */
|
||||
|
|
|
@ -2583,7 +2583,7 @@ int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in)
|
|||
return -EINVAL;
|
||||
|
||||
return kvm_sev_es_string_io(&svm->vcpu, size, port,
|
||||
svm->ghcb_sa, svm->ghcb_sa_len, in);
|
||||
svm->ghcb_sa, svm->ghcb_sa_len / size, in);
|
||||
}
|
||||
|
||||
void sev_es_init_vmcb(struct vcpu_svm *svm)
|
||||
|
|
|
@ -11370,7 +11370,8 @@ static int memslot_rmap_alloc(struct kvm_memory_slot *slot,
|
|||
int level = i + 1;
|
||||
int lpages = __kvm_mmu_slot_lpages(slot, npages, level);
|
||||
|
||||
WARN_ON(slot->arch.rmap[i]);
|
||||
if (slot->arch.rmap[i])
|
||||
continue;
|
||||
|
||||
slot->arch.rmap[i] = kvcalloc(lpages, sz, GFP_KERNEL_ACCOUNT);
|
||||
if (!slot->arch.rmap[i]) {
|
||||
|
|
Загрузка…
Ссылка в новой задаче