selftests: kvm: Merge user_msr_test into userspace_msr_exit_test

Both user_msr_test and userspace_msr_exit_test tests the functionality
of kvm_msr_filter.  Instead of testing this feature in two tests, merge
them together, so there is only one test for this feature.

Signed-off-by: Aaron Lewis <aaronlewis@google.com>
Message-Id: <20201204172530.2958493-1-aaronlewis@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Aaron Lewis 2020-12-04 09:25:31 -08:00 коммит произвёл Paolo Bonzini
Родитель 3cea189174
Коммит fb6360534e
4 изменённых файлов: 236 добавлений и 279 удалений

1
tools/testing/selftests/kvm/.gitignore поставляемый
Просмотреть файл

@ -17,7 +17,6 @@
/x86_64/svm_vmcall_test
/x86_64/sync_regs_test
/x86_64/tsc_msrs_test
/x86_64/user_msr_test
/x86_64/userspace_msr_exit_test
/x86_64/vmx_apic_access_test
/x86_64/vmx_close_while_nested_test

Просмотреть файл

@ -50,7 +50,6 @@ TEST_GEN_PROGS_x86_64 += x86_64/state_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_preemption_timer_test
TEST_GEN_PROGS_x86_64 += x86_64/svm_vmcall_test
TEST_GEN_PROGS_x86_64 += x86_64/sync_regs_test
TEST_GEN_PROGS_x86_64 += x86_64/user_msr_test
TEST_GEN_PROGS_x86_64 += x86_64/userspace_msr_exit_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_apic_access_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_close_while_nested_test

Просмотреть файл

@ -1,251 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* tests for KVM_CAP_X86_USER_SPACE_MSR and KVM_X86_SET_MSR_FILTER
*
* Copyright (C) 2020, Amazon Inc.
*
* This is a functional test to verify that we can deflect MSR events
* into user space.
*/
#define _GNU_SOURCE /* for program_invocation_short_name */
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include "test_util.h"
#include "kvm_util.h"
#include "processor.h"
#define VCPU_ID 5
static u32 msr_reads, msr_writes;
static u8 bitmap_00000000[KVM_MSR_FILTER_MAX_BITMAP_SIZE];
static u8 bitmap_00000000_write[KVM_MSR_FILTER_MAX_BITMAP_SIZE];
static u8 bitmap_40000000[KVM_MSR_FILTER_MAX_BITMAP_SIZE];
static u8 bitmap_c0000000[KVM_MSR_FILTER_MAX_BITMAP_SIZE];
static u8 bitmap_c0000000_read[KVM_MSR_FILTER_MAX_BITMAP_SIZE];
static u8 bitmap_deadbeef[1] = { 0x1 };
static void deny_msr(uint8_t *bitmap, u32 msr)
{
u32 idx = msr & (KVM_MSR_FILTER_MAX_BITMAP_SIZE - 1);
bitmap[idx / 8] &= ~(1 << (idx % 8));
}
static void prepare_bitmaps(void)
{
memset(bitmap_00000000, 0xff, sizeof(bitmap_00000000));
memset(bitmap_00000000_write, 0xff, sizeof(bitmap_00000000_write));
memset(bitmap_40000000, 0xff, sizeof(bitmap_40000000));
memset(bitmap_c0000000, 0xff, sizeof(bitmap_c0000000));
memset(bitmap_c0000000_read, 0xff, sizeof(bitmap_c0000000_read));
deny_msr(bitmap_00000000_write, MSR_IA32_POWER_CTL);
deny_msr(bitmap_c0000000_read, MSR_SYSCALL_MASK);
deny_msr(bitmap_c0000000_read, MSR_GS_BASE);
}
struct kvm_msr_filter filter = {
.flags = KVM_MSR_FILTER_DEFAULT_DENY,
.ranges = {
{
.flags = KVM_MSR_FILTER_READ,
.base = 0x00000000,
.nmsrs = KVM_MSR_FILTER_MAX_BITMAP_SIZE * BITS_PER_BYTE,
.bitmap = bitmap_00000000,
}, {
.flags = KVM_MSR_FILTER_WRITE,
.base = 0x00000000,
.nmsrs = KVM_MSR_FILTER_MAX_BITMAP_SIZE * BITS_PER_BYTE,
.bitmap = bitmap_00000000_write,
}, {
.flags = KVM_MSR_FILTER_READ | KVM_MSR_FILTER_WRITE,
.base = 0x40000000,
.nmsrs = KVM_MSR_FILTER_MAX_BITMAP_SIZE * BITS_PER_BYTE,
.bitmap = bitmap_40000000,
}, {
.flags = KVM_MSR_FILTER_READ,
.base = 0xc0000000,
.nmsrs = KVM_MSR_FILTER_MAX_BITMAP_SIZE * BITS_PER_BYTE,
.bitmap = bitmap_c0000000_read,
}, {
.flags = KVM_MSR_FILTER_WRITE,
.base = 0xc0000000,
.nmsrs = KVM_MSR_FILTER_MAX_BITMAP_SIZE * BITS_PER_BYTE,
.bitmap = bitmap_c0000000,
}, {
.flags = KVM_MSR_FILTER_WRITE | KVM_MSR_FILTER_READ,
.base = 0xdeadbeef,
.nmsrs = 1,
.bitmap = bitmap_deadbeef,
},
},
};
struct kvm_msr_filter no_filter = {
.flags = KVM_MSR_FILTER_DEFAULT_ALLOW,
};
static void guest_msr_calls(bool trapped)
{
/* This goes into the in-kernel emulation */
wrmsr(MSR_SYSCALL_MASK, 0);
if (trapped) {
/* This goes into user space emulation */
GUEST_ASSERT(rdmsr(MSR_SYSCALL_MASK) == MSR_SYSCALL_MASK);
GUEST_ASSERT(rdmsr(MSR_GS_BASE) == MSR_GS_BASE);
} else {
GUEST_ASSERT(rdmsr(MSR_SYSCALL_MASK) != MSR_SYSCALL_MASK);
GUEST_ASSERT(rdmsr(MSR_GS_BASE) != MSR_GS_BASE);
}
/* If trapped == true, this goes into user space emulation */
wrmsr(MSR_IA32_POWER_CTL, 0x1234);
/* This goes into the in-kernel emulation */
rdmsr(MSR_IA32_POWER_CTL);
/* Invalid MSR, should always be handled by user space exit */
GUEST_ASSERT(rdmsr(0xdeadbeef) == 0xdeadbeef);
wrmsr(0xdeadbeef, 0x1234);
}
static void guest_code(void)
{
guest_msr_calls(true);
/*
* Disable msr filtering, so that the kernel
* handles everything in the next round
*/
GUEST_SYNC(0);
guest_msr_calls(false);
GUEST_DONE();
}
static int handle_ucall(struct kvm_vm *vm)
{
struct ucall uc;
switch (get_ucall(vm, VCPU_ID, &uc)) {
case UCALL_ABORT:
TEST_FAIL("Guest assertion not met");
break;
case UCALL_SYNC:
vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &no_filter);
break;
case UCALL_DONE:
return 1;
default:
TEST_FAIL("Unknown ucall %lu", uc.cmd);
}
return 0;
}
static void handle_rdmsr(struct kvm_run *run)
{
run->msr.data = run->msr.index;
msr_reads++;
if (run->msr.index == MSR_SYSCALL_MASK ||
run->msr.index == MSR_GS_BASE) {
TEST_ASSERT(run->msr.reason == KVM_MSR_EXIT_REASON_FILTER,
"MSR read trap w/o access fault");
}
if (run->msr.index == 0xdeadbeef) {
TEST_ASSERT(run->msr.reason == KVM_MSR_EXIT_REASON_UNKNOWN,
"MSR deadbeef read trap w/o inval fault");
}
}
static void handle_wrmsr(struct kvm_run *run)
{
/* ignore */
msr_writes++;
if (run->msr.index == MSR_IA32_POWER_CTL) {
TEST_ASSERT(run->msr.data == 0x1234,
"MSR data for MSR_IA32_POWER_CTL incorrect");
TEST_ASSERT(run->msr.reason == KVM_MSR_EXIT_REASON_FILTER,
"MSR_IA32_POWER_CTL trap w/o access fault");
}
if (run->msr.index == 0xdeadbeef) {
TEST_ASSERT(run->msr.data == 0x1234,
"MSR data for deadbeef incorrect");
TEST_ASSERT(run->msr.reason == KVM_MSR_EXIT_REASON_UNKNOWN,
"deadbeef trap w/o inval fault");
}
}
int main(int argc, char *argv[])
{
struct kvm_enable_cap cap = {
.cap = KVM_CAP_X86_USER_SPACE_MSR,
.args[0] = KVM_MSR_EXIT_REASON_INVAL |
KVM_MSR_EXIT_REASON_UNKNOWN |
KVM_MSR_EXIT_REASON_FILTER,
};
struct kvm_vm *vm;
struct kvm_run *run;
int rc;
/* Tell stdout not to buffer its content */
setbuf(stdout, NULL);
/* Create VM */
vm = vm_create_default(VCPU_ID, 0, guest_code);
run = vcpu_state(vm, VCPU_ID);
rc = kvm_check_cap(KVM_CAP_X86_USER_SPACE_MSR);
if (!rc) {
print_skip("KVM_CAP_X86_USER_SPACE_MSR not supported");
exit(KSFT_SKIP);
}
vm_enable_cap(vm, &cap);
rc = kvm_check_cap(KVM_CAP_X86_MSR_FILTER);
TEST_ASSERT(rc, "KVM_CAP_X86_MSR_FILTER is available");
prepare_bitmaps();
vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter);
while (1) {
rc = _vcpu_run(vm, VCPU_ID);
TEST_ASSERT(rc == 0, "vcpu_run failed: %d\n", rc);
switch (run->exit_reason) {
case KVM_EXIT_X86_RDMSR:
handle_rdmsr(run);
break;
case KVM_EXIT_X86_WRMSR:
handle_wrmsr(run);
break;
case KVM_EXIT_IO:
if (handle_ucall(vm))
goto done;
break;
}
}
done:
TEST_ASSERT(msr_reads == 4, "Handled 4 rdmsr in user space");
TEST_ASSERT(msr_writes == 3, "Handled 3 wrmsr in user space");
kvm_vm_free(vm);
return 0;
}

Просмотреть файл

@ -20,8 +20,8 @@ static int fep_available = 1;
#define VCPU_ID 1
#define MSR_NON_EXISTENT 0x474f4f00
u64 deny_bits = 0;
struct kvm_msr_filter filter = {
static u64 deny_bits = 0;
struct kvm_msr_filter filter_allow = {
.flags = KVM_MSR_FILTER_DEFAULT_ALLOW,
.ranges = {
{
@ -53,8 +53,7 @@ struct kvm_msr_filter filter_fs = {
.flags = KVM_MSR_FILTER_DEFAULT_ALLOW,
.ranges = {
{
.flags = KVM_MSR_FILTER_READ |
KVM_MSR_FILTER_WRITE,
.flags = KVM_MSR_FILTER_READ,
.nmsrs = 1,
.base = MSR_FS_BASE,
.bitmap = (uint8_t*)&deny_bits,
@ -66,8 +65,7 @@ struct kvm_msr_filter filter_gs = {
.flags = KVM_MSR_FILTER_DEFAULT_ALLOW,
.ranges = {
{
.flags = KVM_MSR_FILTER_READ |
KVM_MSR_FILTER_WRITE,
.flags = KVM_MSR_FILTER_READ,
.nmsrs = 1,
.base = MSR_GS_BASE,
.bitmap = (uint8_t*)&deny_bits,
@ -75,8 +73,77 @@ struct kvm_msr_filter filter_gs = {
},
};
uint64_t msr_non_existent_data;
int guest_exception_count;
static uint64_t msr_non_existent_data;
static int guest_exception_count;
static u32 msr_reads, msr_writes;
static u8 bitmap_00000000[KVM_MSR_FILTER_MAX_BITMAP_SIZE];
static u8 bitmap_00000000_write[KVM_MSR_FILTER_MAX_BITMAP_SIZE];
static u8 bitmap_40000000[KVM_MSR_FILTER_MAX_BITMAP_SIZE];
static u8 bitmap_c0000000[KVM_MSR_FILTER_MAX_BITMAP_SIZE];
static u8 bitmap_c0000000_read[KVM_MSR_FILTER_MAX_BITMAP_SIZE];
static u8 bitmap_deadbeef[1] = { 0x1 };
static void deny_msr(uint8_t *bitmap, u32 msr)
{
u32 idx = msr & (KVM_MSR_FILTER_MAX_BITMAP_SIZE - 1);
bitmap[idx / 8] &= ~(1 << (idx % 8));
}
static void prepare_bitmaps(void)
{
memset(bitmap_00000000, 0xff, sizeof(bitmap_00000000));
memset(bitmap_00000000_write, 0xff, sizeof(bitmap_00000000_write));
memset(bitmap_40000000, 0xff, sizeof(bitmap_40000000));
memset(bitmap_c0000000, 0xff, sizeof(bitmap_c0000000));
memset(bitmap_c0000000_read, 0xff, sizeof(bitmap_c0000000_read));
deny_msr(bitmap_00000000_write, MSR_IA32_POWER_CTL);
deny_msr(bitmap_c0000000_read, MSR_SYSCALL_MASK);
deny_msr(bitmap_c0000000_read, MSR_GS_BASE);
}
struct kvm_msr_filter filter_deny = {
.flags = KVM_MSR_FILTER_DEFAULT_DENY,
.ranges = {
{
.flags = KVM_MSR_FILTER_READ,
.base = 0x00000000,
.nmsrs = KVM_MSR_FILTER_MAX_BITMAP_SIZE * BITS_PER_BYTE,
.bitmap = bitmap_00000000,
}, {
.flags = KVM_MSR_FILTER_WRITE,
.base = 0x00000000,
.nmsrs = KVM_MSR_FILTER_MAX_BITMAP_SIZE * BITS_PER_BYTE,
.bitmap = bitmap_00000000_write,
}, {
.flags = KVM_MSR_FILTER_READ | KVM_MSR_FILTER_WRITE,
.base = 0x40000000,
.nmsrs = KVM_MSR_FILTER_MAX_BITMAP_SIZE * BITS_PER_BYTE,
.bitmap = bitmap_40000000,
}, {
.flags = KVM_MSR_FILTER_READ,
.base = 0xc0000000,
.nmsrs = KVM_MSR_FILTER_MAX_BITMAP_SIZE * BITS_PER_BYTE,
.bitmap = bitmap_c0000000_read,
}, {
.flags = KVM_MSR_FILTER_WRITE,
.base = 0xc0000000,
.nmsrs = KVM_MSR_FILTER_MAX_BITMAP_SIZE * BITS_PER_BYTE,
.bitmap = bitmap_c0000000,
}, {
.flags = KVM_MSR_FILTER_WRITE | KVM_MSR_FILTER_READ,
.base = 0xdeadbeef,
.nmsrs = 1,
.bitmap = bitmap_deadbeef,
},
},
};
struct kvm_msr_filter no_filter_deny = {
.flags = KVM_MSR_FILTER_DEFAULT_ALLOW,
};
/*
* Note: Force test_rdmsr() to not be inlined to prevent the labels,
@ -146,7 +213,7 @@ static noinline void test_em_wrmsr(uint32_t msr, uint64_t value)
extern char em_rdmsr_start, em_rdmsr_end;
extern char em_wrmsr_start, em_wrmsr_end;
static void guest_code(void)
static void guest_code_filter_allow(void)
{
uint64_t data;
@ -233,27 +300,60 @@ static void guest_code(void)
GUEST_DONE();
}
static void guest_msr_calls(bool trapped)
{
/* This goes into the in-kernel emulation */
wrmsr(MSR_SYSCALL_MASK, 0);
if (trapped) {
/* This goes into user space emulation */
GUEST_ASSERT(rdmsr(MSR_SYSCALL_MASK) == MSR_SYSCALL_MASK);
GUEST_ASSERT(rdmsr(MSR_GS_BASE) == MSR_GS_BASE);
} else {
GUEST_ASSERT(rdmsr(MSR_SYSCALL_MASK) != MSR_SYSCALL_MASK);
GUEST_ASSERT(rdmsr(MSR_GS_BASE) != MSR_GS_BASE);
}
/* If trapped == true, this goes into user space emulation */
wrmsr(MSR_IA32_POWER_CTL, 0x1234);
/* This goes into the in-kernel emulation */
rdmsr(MSR_IA32_POWER_CTL);
/* Invalid MSR, should always be handled by user space exit */
GUEST_ASSERT(rdmsr(0xdeadbeef) == 0xdeadbeef);
wrmsr(0xdeadbeef, 0x1234);
}
static void guest_code_filter_deny(void)
{
guest_msr_calls(true);
/*
* Disable msr filtering, so that the kernel
* handles everything in the next round
*/
GUEST_SYNC(0);
guest_msr_calls(false);
GUEST_DONE();
}
static void guest_code_permission_bitmap(void)
{
uint64_t data;
test_wrmsr(MSR_FS_BASE, 0);
data = test_rdmsr(MSR_FS_BASE);
GUEST_ASSERT(data == MSR_FS_BASE);
test_wrmsr(MSR_GS_BASE, 0);
data = test_rdmsr(MSR_GS_BASE);
GUEST_ASSERT(data == 0);
GUEST_ASSERT(data != MSR_GS_BASE);
/* Let userspace know to switch the filter */
GUEST_SYNC(0);
test_wrmsr(MSR_FS_BASE, 0);
data = test_rdmsr(MSR_FS_BASE);
GUEST_ASSERT(data == 0);
test_wrmsr(MSR_GS_BASE, 0);
GUEST_ASSERT(data != MSR_FS_BASE);
data = test_rdmsr(MSR_GS_BASE);
GUEST_ASSERT(data == MSR_GS_BASE);
@ -376,9 +476,6 @@ static void process_wrmsr(struct kvm_vm *vm, uint32_t msr_index)
case MSR_NON_EXISTENT:
msr_non_existent_data = run->msr.data;
break;
case MSR_FS_BASE:
case MSR_GS_BASE:
break;
default:
TEST_ASSERT(false, "Unexpected MSR: 0x%04x", run->msr.index);
}
@ -453,7 +550,7 @@ static void run_guest_then_process_ucall_done(struct kvm_vm *vm)
process_ucall_done(vm);
}
static void test_msr_filter(void) {
static void test_msr_filter_allow(void) {
struct kvm_enable_cap cap = {
.cap = KVM_CAP_X86_USER_SPACE_MSR,
.args[0] = KVM_MSR_EXIT_REASON_FILTER,
@ -462,7 +559,7 @@ static void test_msr_filter(void) {
int rc;
/* Create VM */
vm = vm_create_default(VCPU_ID, 0, guest_code);
vm = vm_create_default(VCPU_ID, 0, guest_code_filter_allow);
vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
rc = kvm_check_cap(KVM_CAP_X86_USER_SPACE_MSR);
@ -472,7 +569,7 @@ static void test_msr_filter(void) {
rc = kvm_check_cap(KVM_CAP_X86_MSR_FILTER);
TEST_ASSERT(rc, "KVM_CAP_X86_MSR_FILTER is available");
vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter);
vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter_allow);
vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(vm, VCPU_ID);
@ -519,6 +616,116 @@ static void test_msr_filter(void) {
kvm_vm_free(vm);
}
static int handle_ucall(struct kvm_vm *vm)
{
struct ucall uc;
switch (get_ucall(vm, VCPU_ID, &uc)) {
case UCALL_ABORT:
TEST_FAIL("Guest assertion not met");
break;
case UCALL_SYNC:
vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &no_filter_deny);
break;
case UCALL_DONE:
return 1;
default:
TEST_FAIL("Unknown ucall %lu", uc.cmd);
}
return 0;
}
static void handle_rdmsr(struct kvm_run *run)
{
run->msr.data = run->msr.index;
msr_reads++;
if (run->msr.index == MSR_SYSCALL_MASK ||
run->msr.index == MSR_GS_BASE) {
TEST_ASSERT(run->msr.reason == KVM_MSR_EXIT_REASON_FILTER,
"MSR read trap w/o access fault");
}
if (run->msr.index == 0xdeadbeef) {
TEST_ASSERT(run->msr.reason == KVM_MSR_EXIT_REASON_UNKNOWN,
"MSR deadbeef read trap w/o inval fault");
}
}
static void handle_wrmsr(struct kvm_run *run)
{
/* ignore */
msr_writes++;
if (run->msr.index == MSR_IA32_POWER_CTL) {
TEST_ASSERT(run->msr.data == 0x1234,
"MSR data for MSR_IA32_POWER_CTL incorrect");
TEST_ASSERT(run->msr.reason == KVM_MSR_EXIT_REASON_FILTER,
"MSR_IA32_POWER_CTL trap w/o access fault");
}
if (run->msr.index == 0xdeadbeef) {
TEST_ASSERT(run->msr.data == 0x1234,
"MSR data for deadbeef incorrect");
TEST_ASSERT(run->msr.reason == KVM_MSR_EXIT_REASON_UNKNOWN,
"deadbeef trap w/o inval fault");
}
}
static void test_msr_filter_deny(void) {
struct kvm_enable_cap cap = {
.cap = KVM_CAP_X86_USER_SPACE_MSR,
.args[0] = KVM_MSR_EXIT_REASON_INVAL |
KVM_MSR_EXIT_REASON_UNKNOWN |
KVM_MSR_EXIT_REASON_FILTER,
};
struct kvm_vm *vm;
struct kvm_run *run;
int rc;
/* Create VM */
vm = vm_create_default(VCPU_ID, 0, guest_code_filter_deny);
vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
run = vcpu_state(vm, VCPU_ID);
rc = kvm_check_cap(KVM_CAP_X86_USER_SPACE_MSR);
TEST_ASSERT(rc, "KVM_CAP_X86_USER_SPACE_MSR is available");
vm_enable_cap(vm, &cap);
rc = kvm_check_cap(KVM_CAP_X86_MSR_FILTER);
TEST_ASSERT(rc, "KVM_CAP_X86_MSR_FILTER is available");
prepare_bitmaps();
vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter_deny);
while (1) {
rc = _vcpu_run(vm, VCPU_ID);
TEST_ASSERT(rc == 0, "vcpu_run failed: %d\n", rc);
switch (run->exit_reason) {
case KVM_EXIT_X86_RDMSR:
handle_rdmsr(run);
break;
case KVM_EXIT_X86_WRMSR:
handle_wrmsr(run);
break;
case KVM_EXIT_IO:
if (handle_ucall(vm))
goto done;
break;
}
}
done:
TEST_ASSERT(msr_reads == 4, "Handled 4 rdmsr in user space");
TEST_ASSERT(msr_writes == 3, "Handled 3 wrmsr in user space");
kvm_vm_free(vm);
}
static void test_msr_permission_bitmap(void) {
struct kvm_enable_cap cap = {
.cap = KVM_CAP_X86_USER_SPACE_MSR,
@ -539,11 +746,9 @@ static void test_msr_permission_bitmap(void) {
TEST_ASSERT(rc, "KVM_CAP_X86_MSR_FILTER is available");
vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter_fs);
run_guest_then_process_wrmsr(vm, MSR_FS_BASE);
run_guest_then_process_rdmsr(vm, MSR_FS_BASE);
TEST_ASSERT(run_guest_then_process_ucall(vm) == UCALL_SYNC, "Expected ucall state to be UCALL_SYNC.");
vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter_gs);
run_guest_then_process_wrmsr(vm, MSR_GS_BASE);
run_guest_then_process_rdmsr(vm, MSR_GS_BASE);
run_guest_then_process_ucall_done(vm);
@ -552,7 +757,12 @@ static void test_msr_permission_bitmap(void) {
int main(int argc, char *argv[])
{
test_msr_filter();
/* Tell stdout not to buffer its content */
setbuf(stdout, NULL);
test_msr_filter_allow();
test_msr_filter_deny();
test_msr_permission_bitmap();