KVM: selftests: Make vm_create() a wrapper that specifies VM_MODE_DEFAULT

Add ____vm_create() to be the innermost helper, and turn vm_create() into
a wrapper the specifies VM_MODE_DEFAULT.  Most of the vm_create() callers
just want the default mode, or more accurately, don't care about the mode.

Signed-off-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Sean Christopherson 2022-04-19 14:21:38 -07:00 коммит произвёл Paolo Bonzini
Родитель cfe122db3e
Коммит 3f44e7fdca
7 изменённых файлов: 26 добавлений и 18 удалений

Просмотреть файл

@ -78,7 +78,7 @@ static struct kvm_vm *setup_vm(void *guest_code)
struct kvm_vcpu_init init; struct kvm_vcpu_init init;
struct kvm_vm *vm; struct kvm_vm *vm;
vm = vm_create(VM_MODE_DEFAULT, DEFAULT_GUEST_PHY_PAGES); vm = vm_create(DEFAULT_GUEST_PHY_PAGES);
ucall_init(vm, NULL); ucall_init(vm, NULL);
vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, &init); vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, &init);

Просмотреть файл

@ -674,7 +674,7 @@ static struct kvm_vm *create_vm(enum vm_guest_mode mode, uint32_t vcpuid,
pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode)); pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode));
vm = vm_create(mode, DEFAULT_GUEST_PHY_PAGES + extra_pg_pages); vm = __vm_create(mode, DEFAULT_GUEST_PHY_PAGES + extra_pg_pages);
log_mode_create_vm_done(vm); log_mode_create_vm_done(vm);
vm_vcpu_add_default(vm, vcpuid, guest_code); vm_vcpu_add_default(vm, vcpuid, guest_code);

Просмотреть файл

@ -104,7 +104,7 @@ static void run_test(uint32_t run)
for (i = 0; i < VCPU_NUM; i++) for (i = 0; i < VCPU_NUM; i++)
CPU_SET(i, &cpu_set); CPU_SET(i, &cpu_set);
vm = vm_create(VM_MODE_DEFAULT, DEFAULT_GUEST_PHY_PAGES); vm = vm_create(DEFAULT_GUEST_PHY_PAGES);
pr_debug("%s: [%d] start vcpus\n", __func__, run); pr_debug("%s: [%d] start vcpus\n", __func__, run);
for (i = 0; i < VCPU_NUM; ++i) { for (i = 0; i < VCPU_NUM; ++i) {

Просмотреть файл

@ -247,7 +247,6 @@ static inline void vm_enable_cap(struct kvm_vm *vm, uint32_t cap, uint64_t arg0)
void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size); void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size);
const char *vm_guest_mode_string(uint32_t i); const char *vm_guest_mode_string(uint32_t i);
struct kvm_vm *__vm_create(enum vm_guest_mode mode, uint64_t phy_pages);
void kvm_vm_free(struct kvm_vm *vmp); void kvm_vm_free(struct kvm_vm *vmp);
void kvm_vm_restart(struct kvm_vm *vmp); void kvm_vm_restart(struct kvm_vm *vmp);
void kvm_vm_release(struct kvm_vm *vmp); void kvm_vm_release(struct kvm_vm *vmp);
@ -595,9 +594,21 @@ vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
vm_paddr_t paddr_min, uint32_t memslot); vm_paddr_t paddr_min, uint32_t memslot);
vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm); vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm);
/*
* ____vm_create() does KVM_CREATE_VM and little else. __vm_create() also
* loads the test binary into guest memory and creates an IRQ chip (x86 only).
*/
struct kvm_vm *____vm_create(enum vm_guest_mode mode, uint64_t nr_pages);
struct kvm_vm *__vm_create(enum vm_guest_mode mode, uint64_t nr_pages);
static inline struct kvm_vm *vm_create_barebones(void) static inline struct kvm_vm *vm_create_barebones(void)
{ {
return __vm_create(VM_MODE_DEFAULT, 0); return ____vm_create(VM_MODE_DEFAULT, 0);
}
static inline struct kvm_vm *vm_create(uint64_t nr_pages)
{
return __vm_create(VM_MODE_DEFAULT, nr_pages);
} }
/* /*
@ -629,9 +640,6 @@ struct kvm_vm *vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus,
uint32_t num_percpu_pages, void *guest_code, uint32_t num_percpu_pages, void *guest_code,
uint32_t vcpuids[]); uint32_t vcpuids[]);
/* Create a default VM without any vcpus. */
struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t pages);
/* /*
* Create a VM with a single vCPU with reasonable defaults and @extra_mem_pages * Create a VM with a single vCPU with reasonable defaults and @extra_mem_pages
* additional pages of guest memory. Returns the VM and vCPU (via out param). * additional pages of guest memory. Returns the VM and vCPU (via out param).

Просмотреть файл

@ -149,12 +149,12 @@ const struct vm_guest_mode_params vm_guest_mode_params[] = {
_Static_assert(sizeof(vm_guest_mode_params)/sizeof(struct vm_guest_mode_params) == NUM_VM_MODES, _Static_assert(sizeof(vm_guest_mode_params)/sizeof(struct vm_guest_mode_params) == NUM_VM_MODES,
"Missing new mode params?"); "Missing new mode params?");
struct kvm_vm *__vm_create(enum vm_guest_mode mode, uint64_t phy_pages) struct kvm_vm *____vm_create(enum vm_guest_mode mode, uint64_t nr_pages)
{ {
struct kvm_vm *vm; struct kvm_vm *vm;
pr_debug("%s: mode='%s' pages='%ld'\n", __func__, pr_debug("%s: mode='%s' pages='%ld'\n", __func__,
vm_guest_mode_string(mode), phy_pages); vm_guest_mode_string(mode), nr_pages);
vm = calloc(1, sizeof(*vm)); vm = calloc(1, sizeof(*vm));
TEST_ASSERT(vm != NULL, "Insufficient Memory"); TEST_ASSERT(vm != NULL, "Insufficient Memory");
@ -251,20 +251,20 @@ struct kvm_vm *__vm_create(enum vm_guest_mode mode, uint64_t phy_pages)
/* Allocate and setup memory for guest. */ /* Allocate and setup memory for guest. */
vm->vpages_mapped = sparsebit_alloc(); vm->vpages_mapped = sparsebit_alloc();
if (phy_pages != 0) if (nr_pages != 0)
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
0, 0, phy_pages, 0); 0, 0, nr_pages, 0);
return vm; return vm;
} }
struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t pages) struct kvm_vm *__vm_create(enum vm_guest_mode mode, uint64_t nr_pages)
{ {
struct kvm_vm *vm; struct kvm_vm *vm;
pages = vm_adjust_num_guest_pages(mode, pages); nr_pages = vm_adjust_num_guest_pages(mode, nr_pages);
vm = __vm_create(mode, pages); vm = ____vm_create(mode, nr_pages);
kvm_vm_elf_load(vm, program_invocation_name); kvm_vm_elf_load(vm, program_invocation_name);
@ -323,7 +323,7 @@ struct kvm_vm *vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus,
"nr_vcpus = %d too large for host, max-vcpus = %d", "nr_vcpus = %d too large for host, max-vcpus = %d",
nr_vcpus, kvm_check_cap(KVM_CAP_MAX_VCPUS)); nr_vcpus, kvm_check_cap(KVM_CAP_MAX_VCPUS));
vm = vm_create(mode, pages); vm = __vm_create(mode, pages);
for (i = 0; i < nr_vcpus; ++i) { for (i = 0; i < nr_vcpus; ++i) {
uint32_t vcpuid = vcpuids ? vcpuids[i] : i; uint32_t vcpuid = vcpuids ? vcpuids[i] : i;

Просмотреть файл

@ -365,7 +365,7 @@ static void test_pmu_config_disable(void (*guest_code)(void))
if (!(r & KVM_PMU_CAP_DISABLE)) if (!(r & KVM_PMU_CAP_DISABLE))
return; return;
vm = vm_create(VM_MODE_DEFAULT, DEFAULT_GUEST_PHY_PAGES); vm = vm_create(DEFAULT_GUEST_PHY_PAGES);
vm_enable_cap(vm, KVM_CAP_PMU_CAPABILITY, KVM_PMU_CAP_DISABLE); vm_enable_cap(vm, KVM_CAP_PMU_CAPABILITY, KVM_PMU_CAP_DISABLE);

Просмотреть файл

@ -86,7 +86,7 @@ static struct kvm_vm *create_vm(void)
uint64_t extra_pg_pages = vcpu_pages / PTES_PER_MIN_PAGE * N_VCPU; uint64_t extra_pg_pages = vcpu_pages / PTES_PER_MIN_PAGE * N_VCPU;
uint64_t pages = DEFAULT_GUEST_PHY_PAGES + vcpu_pages + extra_pg_pages; uint64_t pages = DEFAULT_GUEST_PHY_PAGES + vcpu_pages + extra_pg_pages;
return vm_create(VM_MODE_DEFAULT, pages); return vm_create(pages);
} }
static void add_x86_vcpu(struct kvm_vm *vm, uint32_t vcpuid, bool bsp_code) static void add_x86_vcpu(struct kvm_vm *vm, uint32_t vcpuid, bool bsp_code)