KVM: Introduce hva_to_gfn_memslot() for kvm_handle_hva()
This restricts hva handling in mmu code and makes it easier to extend kvm_handle_hva() so that it can treat a range of addresses later in this patch series. Signed-off-by: Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp> Cc: Alexander Graf <agraf@suse.de> Cc: Paul Mackerras <paulus@samba.org> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
This commit is contained in:
Родитель
9594a49861
Коммит
d19a748b1c
|
@ -772,10 +772,10 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
|
|||
|
||||
end = start + (memslot->npages << PAGE_SHIFT);
|
||||
if (hva >= start && hva < end) {
|
||||
gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
|
||||
gfn_t gfn = hva_to_gfn_memslot(hva, memslot);
|
||||
gfn_t gfn_offset = gfn - memslot->base_gfn;
|
||||
|
||||
ret = handler(kvm, &memslot->rmap[gfn_offset],
|
||||
memslot->base_gfn + gfn_offset);
|
||||
ret = handler(kvm, &memslot->rmap[gfn_offset], gfn);
|
||||
retval |= ret;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1278,8 +1278,7 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
|
|||
|
||||
end = start + (memslot->npages << PAGE_SHIFT);
|
||||
if (hva >= start && hva < end) {
|
||||
gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
|
||||
gfn_t gfn = memslot->base_gfn + gfn_offset;
|
||||
gfn_t gfn = hva_to_gfn_memslot(hva, memslot);
|
||||
|
||||
ret = 0;
|
||||
|
||||
|
|
|
@ -740,6 +740,14 @@ static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
|
|||
(base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
|
||||
}
|
||||
|
||||
static inline gfn_t
|
||||
hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot)
|
||||
{
|
||||
gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT;
|
||||
|
||||
return slot->base_gfn + gfn_offset;
|
||||
}
|
||||
|
||||
static inline unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot,
|
||||
gfn_t gfn)
|
||||
{
|
||||
|
|
Загрузка…
Ссылка в новой задаче