KVM: Portability: Split mmu-related static inline functions to mmu.h

Since these functions need to know the details of kvm or kvm_vcpu structure,
it can't be put in x86.h.  Create mmu.h to hold them.

Signed-off-by: Zhang Xiantao <xiantao.zhang@intel.com>
Acked-by: Carsten Otte <cotte@de.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
This commit is contained in:
Zhang Xiantao 2007-12-14 09:35:10 +08:00 коммит произвёл Avi Kivity
Родитель ad312c7c79
Коммит 1d737c8a68
7 изменённых файлов: 55 добавлений и 44 удалений

Просмотреть файл

@ -396,6 +396,7 @@ void kvm_arch_hardware_disable(void *garbage);
int kvm_arch_hardware_setup(void);
void kvm_arch_hardware_unsetup(void);
void kvm_arch_check_processor_compat(void *rtn);
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
void kvm_free_physmem(struct kvm *kvm);

Просмотреть файл

@ -20,6 +20,7 @@
#include "vmx.h"
#include "kvm.h"
#include "x86.h"
#include "mmu.h"
#include <linux/types.h>
#include <linux/string.h>

44
drivers/kvm/mmu.h Normal file
Просмотреть файл

@ -0,0 +1,44 @@
#ifndef __KVM_X86_MMU_H
#define __KVM_X86_MMU_H
#include "kvm.h"
static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
{
if (unlikely(vcpu->kvm->n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES))
__kvm_mmu_free_some_pages(vcpu);
}
static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
{
if (likely(vcpu->arch.mmu.root_hpa != INVALID_PAGE))
return 0;
return kvm_mmu_load(vcpu);
}
static inline int is_long_mode(struct kvm_vcpu *vcpu)
{
#ifdef CONFIG_X86_64
return vcpu->arch.shadow_efer & EFER_LME;
#else
return 0;
#endif
}
static inline int is_pae(struct kvm_vcpu *vcpu)
{
return vcpu->arch.cr4 & X86_CR4_PAE;
}
static inline int is_pse(struct kvm_vcpu *vcpu)
{
return vcpu->arch.cr4 & X86_CR4_PSE;
}
static inline int is_paging(struct kvm_vcpu *vcpu)
{
return vcpu->arch.cr0 & X86_CR0_PG;
}
#endif

Просмотреть файл

@ -17,6 +17,7 @@
#include "kvm_svm.h"
#include "x86_emulate.h"
#include "irq.h"
#include "mmu.h"
#include <linux/module.h>
#include <linux/kernel.h>

Просмотреть файл

@ -21,6 +21,7 @@
#include "irq.h"
#include "vmx.h"
#include "segment_descriptor.h"
#include "mmu.h"
#include <linux/module.h>
#include <linux/kernel.h>

Просмотреть файл

@ -19,6 +19,7 @@
#include "x86_emulate.h"
#include "segment_descriptor.h"
#include "irq.h"
#include "mmu.h"
#include <linux/kvm.h>
#include <linux/fs.h>
@ -3139,3 +3140,9 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
return 0;
}
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
{
return vcpu->arch.mp_state == VCPU_MP_STATE_RUNNABLE
|| vcpu->arch.mp_state == VCPU_MP_STATE_SIPI_RECEIVED;
}

Просмотреть файл

@ -334,44 +334,6 @@ int kvm_fix_hypercall(struct kvm_vcpu *vcpu);
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code);
static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
{
if (unlikely(vcpu->kvm->n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES))
__kvm_mmu_free_some_pages(vcpu);
}
static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
{
if (likely(vcpu->arch.mmu.root_hpa != INVALID_PAGE))
return 0;
return kvm_mmu_load(vcpu);
}
static inline int is_long_mode(struct kvm_vcpu *vcpu)
{
#ifdef CONFIG_X86_64
return vcpu->arch.shadow_efer & EFER_LME;
#else
return 0;
#endif
}
static inline int is_pae(struct kvm_vcpu *vcpu)
{
return vcpu->arch.cr4 & X86_CR4_PAE;
}
static inline int is_pse(struct kvm_vcpu *vcpu)
{
return vcpu->arch.cr4 & X86_CR4_PSE;
}
static inline int is_paging(struct kvm_vcpu *vcpu)
{
return vcpu->arch.cr0 & X86_CR0_PG;
}
int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3);
int complete_pio(struct kvm_vcpu *vcpu);
@ -490,10 +452,4 @@ static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
#define TSS_REDIRECTION_SIZE (256 / 8)
#define RMODE_TSS_SIZE (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)
static inline int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
{
return vcpu->arch.mp_state == VCPU_MP_STATE_RUNNABLE
|| vcpu->arch.mp_state == VCPU_MP_STATE_SIPI_RECEIVED;
}
#endif