KVM: MMU: audit: allow audit more guests at the same time

It only allows to audit one guest in the system since:
- 'audit_point' is a glob variable
- mmu_audit_disable() is called in kvm_mmu_destroy(), so audit is disabled
  after a guest exited

this patch fix those issues then allow to audit more guests at the same time

Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
This commit is contained in:
Xiao Guangrong 2010-12-23 16:08:35 +08:00 коммит произвёл Avi Kivity
Родитель aff48baa34
Коммит b034cf0105
3 изменённых файлов: 39 добавлений и 30 удалений

Просмотреть файл

@ -461,6 +461,10 @@ struct kvm_arch {
/* fields used by HYPER-V emulation */ /* fields used by HYPER-V emulation */
u64 hv_guest_os_id; u64 hv_guest_os_id;
u64 hv_hypercall; u64 hv_hypercall;
#ifdef CONFIG_KVM_MMU_AUDIT
int audit_point;
#endif
}; };
struct kvm_vm_stat { struct kvm_vm_stat {

Просмотреть файл

@ -3534,13 +3534,6 @@ static void mmu_destroy_caches(void)
kmem_cache_destroy(mmu_page_header_cache); kmem_cache_destroy(mmu_page_header_cache);
} }
void kvm_mmu_module_exit(void)
{
mmu_destroy_caches();
percpu_counter_destroy(&kvm_total_used_mmu_pages);
unregister_shrinker(&mmu_shrinker);
}
int kvm_mmu_module_init(void) int kvm_mmu_module_init(void)
{ {
pte_chain_cache = kmem_cache_create("kvm_pte_chain", pte_chain_cache = kmem_cache_create("kvm_pte_chain",
@ -3733,12 +3726,6 @@ int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4])
} }
EXPORT_SYMBOL_GPL(kvm_mmu_get_spte_hierarchy); EXPORT_SYMBOL_GPL(kvm_mmu_get_spte_hierarchy);
#ifdef CONFIG_KVM_MMU_AUDIT
#include "mmu_audit.c"
#else
static void mmu_audit_disable(void) { }
#endif
void kvm_mmu_destroy(struct kvm_vcpu *vcpu) void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
{ {
ASSERT(vcpu); ASSERT(vcpu);
@ -3746,5 +3733,18 @@ void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
destroy_kvm_mmu(vcpu); destroy_kvm_mmu(vcpu);
free_mmu_pages(vcpu); free_mmu_pages(vcpu);
mmu_free_memory_caches(vcpu); mmu_free_memory_caches(vcpu);
}
#ifdef CONFIG_KVM_MMU_AUDIT
#include "mmu_audit.c"
#else
static void mmu_audit_disable(void) { }
#endif
void kvm_mmu_module_exit(void)
{
mmu_destroy_caches();
percpu_counter_destroy(&kvm_total_used_mmu_pages);
unregister_shrinker(&mmu_shrinker);
mmu_audit_disable(); mmu_audit_disable();
} }

Просмотреть файл

@ -19,11 +19,9 @@
#include <linux/ratelimit.h> #include <linux/ratelimit.h>
static int audit_point; #define audit_printk(kvm, fmt, args...) \
#define audit_printk(fmt, args...) \
printk(KERN_ERR "audit: (%s) error: " \ printk(KERN_ERR "audit: (%s) error: " \
fmt, audit_point_name[audit_point], ##args) fmt, audit_point_name[kvm->arch.audit_point], ##args)
typedef void (*inspect_spte_fn) (struct kvm_vcpu *vcpu, u64 *sptep, int level); typedef void (*inspect_spte_fn) (struct kvm_vcpu *vcpu, u64 *sptep, int level);
@ -97,18 +95,21 @@ static void audit_mappings(struct kvm_vcpu *vcpu, u64 *sptep, int level)
if (sp->unsync) { if (sp->unsync) {
if (level != PT_PAGE_TABLE_LEVEL) { if (level != PT_PAGE_TABLE_LEVEL) {
audit_printk("unsync sp: %p level = %d\n", sp, level); audit_printk(vcpu->kvm, "unsync sp: %p "
"level = %d\n", sp, level);
return; return;
} }
if (*sptep == shadow_notrap_nonpresent_pte) { if (*sptep == shadow_notrap_nonpresent_pte) {
audit_printk("notrap spte in unsync sp: %p\n", sp); audit_printk(vcpu->kvm, "notrap spte in unsync "
"sp: %p\n", sp);
return; return;
} }
} }
if (sp->role.direct && *sptep == shadow_notrap_nonpresent_pte) { if (sp->role.direct && *sptep == shadow_notrap_nonpresent_pte) {
audit_printk("notrap spte in direct sp: %p\n", sp); audit_printk(vcpu->kvm, "notrap spte in direct sp: %p\n",
sp);
return; return;
} }
@ -125,8 +126,9 @@ static void audit_mappings(struct kvm_vcpu *vcpu, u64 *sptep, int level)
hpa = pfn << PAGE_SHIFT; hpa = pfn << PAGE_SHIFT;
if ((*sptep & PT64_BASE_ADDR_MASK) != hpa) if ((*sptep & PT64_BASE_ADDR_MASK) != hpa)
audit_printk("levels %d pfn %llx hpa %llx ent %llxn", audit_printk(vcpu->kvm, "levels %d pfn %llx hpa %llx "
vcpu->arch.mmu.root_level, pfn, hpa, *sptep); "ent %llxn", vcpu->arch.mmu.root_level, pfn,
hpa, *sptep);
} }
static void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep) static void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep)
@ -142,8 +144,8 @@ static void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep)
if (!gfn_to_memslot(kvm, gfn)) { if (!gfn_to_memslot(kvm, gfn)) {
if (!printk_ratelimit()) if (!printk_ratelimit())
return; return;
audit_printk("no memslot for gfn %llx\n", gfn); audit_printk(kvm, "no memslot for gfn %llx\n", gfn);
audit_printk("index %ld of sp (gfn=%llx)\n", audit_printk(kvm, "index %ld of sp (gfn=%llx)\n",
(long int)(sptep - rev_sp->spt), rev_sp->gfn); (long int)(sptep - rev_sp->spt), rev_sp->gfn);
dump_stack(); dump_stack();
return; return;
@ -153,7 +155,8 @@ static void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep)
if (!*rmapp) { if (!*rmapp) {
if (!printk_ratelimit()) if (!printk_ratelimit())
return; return;
audit_printk("no rmap for writable spte %llx\n", *sptep); audit_printk(kvm, "no rmap for writable spte %llx\n",
*sptep);
dump_stack(); dump_stack();
} }
} }
@ -168,8 +171,9 @@ static void audit_spte_after_sync(struct kvm_vcpu *vcpu, u64 *sptep, int level)
{ {
struct kvm_mmu_page *sp = page_header(__pa(sptep)); struct kvm_mmu_page *sp = page_header(__pa(sptep));
if (audit_point == AUDIT_POST_SYNC && sp->unsync) if (vcpu->kvm->arch.audit_point == AUDIT_POST_SYNC && sp->unsync)
audit_printk("meet unsync sp(%p) after sync root.\n", sp); audit_printk(vcpu->kvm, "meet unsync sp(%p) after sync "
"root.\n", sp);
} }
static void check_mappings_rmap(struct kvm *kvm, struct kvm_mmu_page *sp) static void check_mappings_rmap(struct kvm *kvm, struct kvm_mmu_page *sp)
@ -202,8 +206,9 @@ static void audit_write_protection(struct kvm *kvm, struct kvm_mmu_page *sp)
spte = rmap_next(kvm, rmapp, NULL); spte = rmap_next(kvm, rmapp, NULL);
while (spte) { while (spte) {
if (is_writable_pte(*spte)) if (is_writable_pte(*spte))
audit_printk("shadow page has writable mappings: gfn " audit_printk(kvm, "shadow page has writable "
"%llx role %x\n", sp->gfn, sp->role.word); "mappings: gfn %llx role %x\n",
sp->gfn, sp->role.word);
spte = rmap_next(kvm, rmapp, spte); spte = rmap_next(kvm, rmapp, spte);
} }
} }
@ -238,7 +243,7 @@ static void kvm_mmu_audit(void *ignore, struct kvm_vcpu *vcpu, int point)
if (!__ratelimit(&ratelimit_state)) if (!__ratelimit(&ratelimit_state))
return; return;
audit_point = point; vcpu->kvm->arch.audit_point = point;
audit_all_active_sps(vcpu->kvm); audit_all_active_sps(vcpu->kvm);
audit_vcpu_spte(vcpu); audit_vcpu_spte(vcpu);
} }