KVM: MMU: Rename kvm_mmu_free_some_pages() to make_mmu_pages_available()
The current name "kvm_mmu_free_some_pages" should be used for something that actually frees some shadow pages, as we expect from the name, but what the function is doing is to make some, KVM_MIN_FREE_MMU_PAGES, shadow pages available: it does nothing when there are enough. This patch changes the name to reflect this meaning better; while doing this renaming, the code in the wrapper function is inlined into the main body since the whole function will be inlined into the only caller now. Signed-off-by: Takuya Yoshikawa <yoshikawa_takuya_b1@lab.ntt.co.jp> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
This commit is contained in:
Родитель
7ddca7e43c
Коммит
81f4f76bbc
|
@ -1501,12 +1501,14 @@ static void drop_parent_pte(struct kvm_mmu_page *sp,
|
||||||
mmu_spte_clear_no_track(parent_pte);
|
mmu_spte_clear_no_track(parent_pte);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void make_mmu_pages_available(struct kvm_vcpu *vcpu);
|
||||||
|
|
||||||
static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
|
static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
|
||||||
u64 *parent_pte, int direct)
|
u64 *parent_pte, int direct)
|
||||||
{
|
{
|
||||||
struct kvm_mmu_page *sp;
|
struct kvm_mmu_page *sp;
|
||||||
|
|
||||||
kvm_mmu_free_some_pages(vcpu);
|
make_mmu_pages_available(vcpu);
|
||||||
|
|
||||||
sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
|
sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
|
||||||
sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache);
|
sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache);
|
||||||
|
@ -4010,10 +4012,13 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
|
EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
|
||||||
|
|
||||||
void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
|
static void make_mmu_pages_available(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
LIST_HEAD(invalid_list);
|
LIST_HEAD(invalid_list);
|
||||||
|
|
||||||
|
if (likely(kvm_mmu_available_pages(vcpu->kvm) >= KVM_MIN_FREE_MMU_PAGES))
|
||||||
|
return;
|
||||||
|
|
||||||
while (kvm_mmu_available_pages(vcpu->kvm) < KVM_REFILL_PAGES) {
|
while (kvm_mmu_available_pages(vcpu->kvm) < KVM_REFILL_PAGES) {
|
||||||
if (!prepare_zap_oldest_mmu_page(vcpu->kvm, &invalid_list))
|
if (!prepare_zap_oldest_mmu_page(vcpu->kvm, &invalid_list))
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -64,12 +64,6 @@ static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
|
|
||||||
{
|
|
||||||
if (unlikely(kvm_mmu_available_pages(vcpu->kvm)< KVM_MIN_FREE_MMU_PAGES))
|
|
||||||
__kvm_mmu_free_some_pages(vcpu);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
|
static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
if (likely(vcpu->arch.mmu.root_hpa != INVALID_PAGE))
|
if (likely(vcpu->arch.mmu.root_hpa != INVALID_PAGE))
|
||||||
|
|
Загрузка…
Ссылка в новой задаче