KVM: export __gfn_to_pfn_memslot, drop gfn_to_pfn_async

gfn_to_pfn_async is used in just one place, and because of x86-specific
treatment that place will need to look at the memory slot.  Hence inline
it into try_async_pf and export __gfn_to_pfn_memslot.

The patch also switches the subsequent call to gfn_to_pfn_prot to use
__gfn_to_pfn_memslot.  This is a small optimization.  Finally, remove
the now-unused async argument of __gfn_to_pfn.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Paolo Bonzini 2015-04-02 11:20:48 +02:00
Родитель 69a1220060
Коммит 3520469d65
3 изменённых файлов: 15 добавлений и 24 удалений

Просмотреть файл

@ -3511,10 +3511,12 @@ static bool can_do_async_pf(struct kvm_vcpu *vcpu)
static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn, static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
gva_t gva, pfn_t *pfn, bool write, bool *writable) gva_t gva, pfn_t *pfn, bool write, bool *writable)
{ {
struct kvm_memory_slot *slot;
bool async; bool async;
*pfn = gfn_to_pfn_async(vcpu->kvm, gfn, &async, write, writable); slot = gfn_to_memslot(vcpu->kvm, gfn);
async = false;
*pfn = __gfn_to_pfn_memslot(slot, gfn, false, &async, write, writable);
if (!async) if (!async)
return false; /* *pfn has correct page already */ return false; /* *pfn has correct page already */
@ -3528,8 +3530,7 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
return true; return true;
} }
*pfn = gfn_to_pfn_prot(vcpu->kvm, gfn, write, writable); *pfn = __gfn_to_pfn_memslot(slot, gfn, false, NULL, write, writable);
return false; return false;
} }

Просмотреть файл

@ -539,13 +539,13 @@ void kvm_release_page_dirty(struct page *page);
void kvm_set_page_accessed(struct page *page); void kvm_set_page_accessed(struct page *page);
pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn); pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn);
pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async,
bool write_fault, bool *writable);
pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
bool *writable); bool *writable);
pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn); pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn); pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn);
pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, bool atomic,
bool *async, bool write_fault, bool *writable);
void kvm_release_pfn_clean(pfn_t pfn); void kvm_release_pfn_clean(pfn_t pfn);
void kvm_set_pfn_dirty(pfn_t pfn); void kvm_set_pfn_dirty(pfn_t pfn);

Просмотреть файл

@ -1355,9 +1355,8 @@ exit:
return pfn; return pfn;
} }
static pfn_t pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, bool atomic,
__gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, bool atomic, bool *async, bool write_fault, bool *writable)
bool *async, bool write_fault, bool *writable)
{ {
unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault); unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault);
@ -1376,44 +1375,35 @@ __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, bool atomic,
return hva_to_pfn(addr, atomic, async, write_fault, return hva_to_pfn(addr, atomic, async, write_fault,
writable); writable);
} }
EXPORT_SYMBOL_GPL(__gfn_to_pfn_memslot);
static pfn_t __gfn_to_pfn(struct kvm *kvm, gfn_t gfn, bool atomic, bool *async, static pfn_t __gfn_to_pfn(struct kvm *kvm, gfn_t gfn, bool atomic,
bool write_fault, bool *writable) bool write_fault, bool *writable)
{ {
struct kvm_memory_slot *slot; struct kvm_memory_slot *slot;
if (async)
*async = false;
slot = gfn_to_memslot(kvm, gfn); slot = gfn_to_memslot(kvm, gfn);
return __gfn_to_pfn_memslot(slot, gfn, atomic, async, write_fault, return __gfn_to_pfn_memslot(slot, gfn, atomic, NULL, write_fault,
writable); writable);
} }
pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn) pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn)
{ {
return __gfn_to_pfn(kvm, gfn, true, NULL, true, NULL); return __gfn_to_pfn(kvm, gfn, true, true, NULL);
} }
EXPORT_SYMBOL_GPL(gfn_to_pfn_atomic); EXPORT_SYMBOL_GPL(gfn_to_pfn_atomic);
pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async,
bool write_fault, bool *writable)
{
return __gfn_to_pfn(kvm, gfn, false, async, write_fault, writable);
}
EXPORT_SYMBOL_GPL(gfn_to_pfn_async);
pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
{ {
return __gfn_to_pfn(kvm, gfn, false, NULL, true, NULL); return __gfn_to_pfn(kvm, gfn, false, true, NULL);
} }
EXPORT_SYMBOL_GPL(gfn_to_pfn); EXPORT_SYMBOL_GPL(gfn_to_pfn);
pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
bool *writable) bool *writable)
{ {
return __gfn_to_pfn(kvm, gfn, false, NULL, write_fault, writable); return __gfn_to_pfn(kvm, gfn, false, write_fault, writable);
} }
EXPORT_SYMBOL_GPL(gfn_to_pfn_prot); EXPORT_SYMBOL_GPL(gfn_to_pfn_prot);