KVM: MMU: Fix aliased gfns treated as unaliased

Some areas of kvm x86 mmu are using gfn offset inside a slot without
unaliasing the gfn first.  This patch makes sure that the gfn will be
unaliased and add gfn_to_memslot_unaliased() to save the calculating
of the gfn unaliasing in case we have it unaliased already.

Signed-off-by: Izik Eidus <ieidus@redhat.com>
Acked-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
This commit is contained in:
Izik Eidus 2008-10-03 17:40:32 +03:00 коммит произвёл Avi Kivity
Родитель 6eb55818c0
Коммит 2843099fee
3 изменённых файлов: 17 добавлений и 8 удалений

Просмотреть файл

@ -617,6 +617,8 @@ void kvm_disable_tdp(void);
int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3); int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3);
int complete_pio(struct kvm_vcpu *vcpu); int complete_pio(struct kvm_vcpu *vcpu);
struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn);
static inline struct kvm_mmu_page *page_header(hpa_t shadow_page) static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
{ {
struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT); struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);

Просмотреть файл

@ -386,7 +386,9 @@ static void account_shadowed(struct kvm *kvm, gfn_t gfn)
{ {
int *write_count; int *write_count;
write_count = slot_largepage_idx(gfn, gfn_to_memslot(kvm, gfn)); gfn = unalias_gfn(kvm, gfn);
write_count = slot_largepage_idx(gfn,
gfn_to_memslot_unaliased(kvm, gfn));
*write_count += 1; *write_count += 1;
} }
@ -394,16 +396,20 @@ static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
{ {
int *write_count; int *write_count;
write_count = slot_largepage_idx(gfn, gfn_to_memslot(kvm, gfn)); gfn = unalias_gfn(kvm, gfn);
write_count = slot_largepage_idx(gfn,
gfn_to_memslot_unaliased(kvm, gfn));
*write_count -= 1; *write_count -= 1;
WARN_ON(*write_count < 0); WARN_ON(*write_count < 0);
} }
static int has_wrprotected_page(struct kvm *kvm, gfn_t gfn) static int has_wrprotected_page(struct kvm *kvm, gfn_t gfn)
{ {
struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); struct kvm_memory_slot *slot;
int *largepage_idx; int *largepage_idx;
gfn = unalias_gfn(kvm, gfn);
slot = gfn_to_memslot_unaliased(kvm, gfn);
if (slot) { if (slot) {
largepage_idx = slot_largepage_idx(gfn, slot); largepage_idx = slot_largepage_idx(gfn, slot);
return *largepage_idx; return *largepage_idx;
@ -2973,8 +2979,8 @@ static void audit_write_protection(struct kvm_vcpu *vcpu)
if (sp->role.metaphysical) if (sp->role.metaphysical)
continue; continue;
slot = gfn_to_memslot(vcpu->kvm, sp->gfn);
gfn = unalias_gfn(vcpu->kvm, sp->gfn); gfn = unalias_gfn(vcpu->kvm, sp->gfn);
slot = gfn_to_memslot_unaliased(vcpu->kvm, sp->gfn);
rmapp = &slot->rmap[gfn - slot->base_gfn]; rmapp = &slot->rmap[gfn - slot->base_gfn];
if (*rmapp) if (*rmapp)
printk(KERN_ERR "%s: (%s) shadow page has writable" printk(KERN_ERR "%s: (%s) shadow page has writable"

Просмотреть файл

@ -923,7 +923,7 @@ int kvm_is_error_hva(unsigned long addr)
} }
EXPORT_SYMBOL_GPL(kvm_is_error_hva); EXPORT_SYMBOL_GPL(kvm_is_error_hva);
static struct kvm_memory_slot *__gfn_to_memslot(struct kvm *kvm, gfn_t gfn) struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn)
{ {
int i; int i;
@ -936,11 +936,12 @@ static struct kvm_memory_slot *__gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
} }
return NULL; return NULL;
} }
EXPORT_SYMBOL_GPL(gfn_to_memslot_unaliased);
struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
{ {
gfn = unalias_gfn(kvm, gfn); gfn = unalias_gfn(kvm, gfn);
return __gfn_to_memslot(kvm, gfn); return gfn_to_memslot_unaliased(kvm, gfn);
} }
int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
@ -964,7 +965,7 @@ unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
struct kvm_memory_slot *slot; struct kvm_memory_slot *slot;
gfn = unalias_gfn(kvm, gfn); gfn = unalias_gfn(kvm, gfn);
slot = __gfn_to_memslot(kvm, gfn); slot = gfn_to_memslot_unaliased(kvm, gfn);
if (!slot) if (!slot)
return bad_hva(); return bad_hva();
return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE); return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE);
@ -1215,7 +1216,7 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
struct kvm_memory_slot *memslot; struct kvm_memory_slot *memslot;
gfn = unalias_gfn(kvm, gfn); gfn = unalias_gfn(kvm, gfn);
memslot = __gfn_to_memslot(kvm, gfn); memslot = gfn_to_memslot_unaliased(kvm, gfn);
if (memslot && memslot->dirty_bitmap) { if (memslot && memslot->dirty_bitmap) {
unsigned long rel_gfn = gfn - memslot->base_gfn; unsigned long rel_gfn = gfn - memslot->base_gfn;