KVM: selftests: Move per-VM/per-vCPU nr pages calculation to __vm_create()

Handle all memslot0 size adjustments in __vm_create().  Currently, the
adjustments reside in __vm_create_with_vcpus(), which means tests that
call vm_create() or __vm_create() directly are left to their own devices.
Some tests just pass DEFAULT_GUEST_PHY_PAGES and don't bother with any
adjustments, while others mimic the per-vCPU calculations.

For vm_create(), and thus __vm_create(), take the number of vCPUs that
will be runnable to calculate that number of per-vCPU pages needed for
memslot0.  To give readers a hint that neither vm_create() nor
__vm_create() create vCPUs, name the parameter @nr_runnable_vcpus instead
of @nr_vcpus.  That also gives readers a hint as to why tests that create
larger numbers of vCPUs but never actually run those vCPUs can skip
straight to the vm_create_barebones() variant.

Signed-off-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Sean Christopherson 2022-05-03 09:52:48 -07:00 коммит произвёл Paolo Bonzini
Родитель acaf50ad6d
Коммит 6e1d13bf38
10 изменённых файлов: 53 добавлений и 35 удалений

Просмотреть файл

@ -76,7 +76,7 @@ static struct kvm_vm *setup_vm(void *guest_code, struct kvm_vcpu **source,
struct kvm_vcpu_init init;
struct kvm_vm *vm;
vm = vm_create(DEFAULT_GUEST_PHY_PAGES);
vm = vm_create(2);
ucall_init(vm, NULL);
vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, &init);

Просмотреть файл

@ -419,7 +419,7 @@ static void test_v3_typer_accesses(void)
uint64_t addr;
int ret, i;
v.vm = vm_create(DEFAULT_GUEST_PHY_PAGES);
v.vm = vm_create(NR_VCPUS);
(void)vm_vcpu_add(v.vm, 0, guest_code);
v.gic_fd = kvm_create_device(v.vm, KVM_DEV_TYPE_ARM_VGIC_V3);
@ -479,7 +479,7 @@ static struct vm_gic vm_gic_v3_create_with_vcpuids(int nr_vcpus,
struct vm_gic v;
int i;
v.vm = vm_create(DEFAULT_GUEST_PHY_PAGES);
v.vm = vm_create(nr_vcpus);
for (i = 0; i < nr_vcpus; i++)
vm_vcpu_add(v.vm, vcpuids[i], guest_code);

Просмотреть файл

@ -669,11 +669,10 @@ static struct kvm_vm *create_vm(enum vm_guest_mode mode, struct kvm_vcpu **vcpu,
uint64_t extra_mem_pages, void *guest_code)
{
struct kvm_vm *vm;
uint64_t extra_pg_pages = extra_mem_pages / 512 * 2;
pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode));
vm = __vm_create(mode, DEFAULT_GUEST_PHY_PAGES + extra_pg_pages);
vm = __vm_create(mode, 1, extra_mem_pages);
log_mode_create_vm_done(vm);
*vcpu = vm_vcpu_add(vm, 0, guest_code);

Просмотреть файл

@ -98,7 +98,7 @@ static void run_test(uint32_t run)
for (i = 0; i < VCPU_NUM; i++)
CPU_SET(i, &cpu_set);
vm = vm_create(DEFAULT_GUEST_PHY_PAGES);
vm = vm_create(VCPU_NUM);
pr_debug("%s: [%d] start vcpus\n", __func__, run);
for (i = 0; i < VCPU_NUM; ++i) {

Просмотреть файл

@ -547,18 +547,21 @@ vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm);
/*
* ____vm_create() does KVM_CREATE_VM and little else. __vm_create() also
* loads the test binary into guest memory and creates an IRQ chip (x86 only).
* __vm_create() does NOT create vCPUs, @nr_runnable_vcpus is used purely to
* calculate the amount of memory needed for per-vCPU data, e.g. stacks.
*/
struct kvm_vm *____vm_create(enum vm_guest_mode mode, uint64_t nr_pages);
struct kvm_vm *__vm_create(enum vm_guest_mode mode, uint64_t nr_pages);
struct kvm_vm *__vm_create(enum vm_guest_mode mode, uint32_t nr_runnable_vcpus,
uint64_t nr_extra_pages);
static inline struct kvm_vm *vm_create_barebones(void)
{
return ____vm_create(VM_MODE_DEFAULT, 0);
}
static inline struct kvm_vm *vm_create(uint64_t nr_pages)
static inline struct kvm_vm *vm_create(uint32_t nr_runnable_vcpus)
{
return __vm_create(VM_MODE_DEFAULT, nr_pages);
return __vm_create(VM_MODE_DEFAULT, nr_runnable_vcpus, 0);
}
struct kvm_vm *__vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus,

Просмотреть файл

@ -258,11 +258,45 @@ struct kvm_vm *____vm_create(enum vm_guest_mode mode, uint64_t nr_pages)
return vm;
}
struct kvm_vm *__vm_create(enum vm_guest_mode mode, uint64_t nr_pages)
static uint64_t vm_nr_pages_required(enum vm_guest_mode mode,
uint32_t nr_runnable_vcpus,
uint64_t extra_mem_pages)
{
struct kvm_vm *vm;
uint64_t nr_pages;
nr_pages = vm_adjust_num_guest_pages(mode, nr_pages);
TEST_ASSERT(nr_runnable_vcpus,
"Use vm_create_barebones() for VMs that _never_ have vCPUs\n");
TEST_ASSERT(nr_runnable_vcpus <= kvm_check_cap(KVM_CAP_MAX_VCPUS),
"nr_vcpus = %d too large for host, max-vcpus = %d",
nr_runnable_vcpus, kvm_check_cap(KVM_CAP_MAX_VCPUS));
nr_pages = DEFAULT_GUEST_PHY_PAGES;
nr_pages += nr_runnable_vcpus * DEFAULT_STACK_PGS;
/*
* Account for the number of pages needed for the page tables. The
* maximum page table size for a memory region will be when the
* smallest page size is used. Considering each page contains x page
* table descriptors, the total extra size for page tables (for extra
* N pages) will be: N/x+N/x^2+N/x^3+... which is definitely smaller
* than N/x*2.
*/
nr_pages += (nr_pages + extra_mem_pages) / PTES_PER_MIN_PAGE * 2;
TEST_ASSERT(nr_runnable_vcpus <= kvm_check_cap(KVM_CAP_MAX_VCPUS),
"Host doesn't support %d vCPUs, max-vcpus = %d",
nr_runnable_vcpus, kvm_check_cap(KVM_CAP_MAX_VCPUS));
return vm_adjust_num_guest_pages(mode, nr_pages);
}
struct kvm_vm *__vm_create(enum vm_guest_mode mode, uint32_t nr_runnable_vcpus,
uint64_t nr_extra_pages)
{
uint64_t nr_pages = vm_nr_pages_required(mode, nr_runnable_vcpus,
nr_extra_pages);
struct kvm_vm *vm;
vm = ____vm_create(mode, nr_pages);
@ -297,27 +331,12 @@ struct kvm_vm *__vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus
uint64_t extra_mem_pages,
void *guest_code, struct kvm_vcpu *vcpus[])
{
uint64_t vcpu_pages, extra_pg_pages, pages;
struct kvm_vm *vm;
int i;
TEST_ASSERT(!nr_vcpus || vcpus, "Must provide vCPU array");
/* The maximum page table size for a memory region will be when the
* smallest pages are used. Considering each page contains x page
* table descriptors, the total extra size for page tables (for extra
* N pages) will be: N/x+N/x^2+N/x^3+... which is definitely smaller
* than N/x*2.
*/
vcpu_pages = nr_vcpus * DEFAULT_STACK_PGS;
extra_pg_pages = (DEFAULT_GUEST_PHY_PAGES + extra_mem_pages + vcpu_pages) / PTES_PER_MIN_PAGE * 2;
pages = DEFAULT_GUEST_PHY_PAGES + vcpu_pages + extra_pg_pages;
TEST_ASSERT(nr_vcpus <= kvm_check_cap(KVM_CAP_MAX_VCPUS),
"nr_vcpus = %d too large for host, max-vcpus = %d",
nr_vcpus, kvm_check_cap(KVM_CAP_MAX_VCPUS));
vm = __vm_create(mode, pages);
vm = __vm_create(mode, nr_vcpus, extra_mem_pages);
for (i = 0; i < nr_vcpus; ++i)
vcpus[i] = vm_vcpu_add(vm, i, guest_code);

Просмотреть файл

@ -206,7 +206,7 @@ static struct kvm_vm *create_vm(struct kvm_vcpu **vcpu)
{
struct kvm_vm *vm;
vm = vm_create(DEFAULT_GUEST_PHY_PAGES);
vm = vm_create(1);
*vcpu = vm_vcpu_add(vm, ARBITRARY_NON_ZERO_VCPU_ID, guest_code_initial);

Просмотреть файл

@ -365,7 +365,7 @@ static void test_pmu_config_disable(void (*guest_code)(void))
if (!(r & KVM_PMU_CAP_DISABLE))
return;
vm = vm_create(DEFAULT_GUEST_PHY_PAGES);
vm = vm_create(1);
vm_enable_cap(vm, KVM_CAP_PMU_CAPABILITY, KVM_PMU_CAP_DISABLE);

Просмотреть файл

@ -78,13 +78,10 @@ static void run_vcpu(struct kvm_vcpu *vcpu)
static struct kvm_vm *create_vm(uint32_t nr_vcpus, uint32_t bsp_vcpu_id,
struct kvm_vcpu *vcpus[])
{
uint64_t vcpu_pages = (DEFAULT_STACK_PGS) * nr_vcpus;
uint64_t extra_pg_pages = vcpu_pages / PTES_PER_MIN_PAGE * nr_vcpus;
uint64_t pages = DEFAULT_GUEST_PHY_PAGES + vcpu_pages + extra_pg_pages;
struct kvm_vm *vm;
uint32_t i;
vm = vm_create(pages);
vm = vm_create(nr_vcpus);
vm_ioctl(vm, KVM_SET_BOOT_CPU_ID, (void *)(unsigned long)bsp_vcpu_id);

Просмотреть файл

@ -98,7 +98,7 @@ int main(int argc, char *argv[])
exit(KSFT_SKIP);
}
vm = vm_create(DEFAULT_GUEST_PHY_PAGES + DEFAULT_STACK_PGS * NR_TEST_VCPUS);
vm = vm_create(NR_TEST_VCPUS);
vm_ioctl(vm, KVM_SET_TSC_KHZ, (void *) TEST_TSC_KHZ);
pthread_spin_init(&create_lock, PTHREAD_PROCESS_PRIVATE);