KVM: MMU: Fix host memory corruption on i386 with >= 4GB ram
PAGE_MASK is an unsigned long, so using it to mask physical addresses on i386 (which are 64-bit wide) leads to truncation. This can result in page->private of unrelated memory pages being modified, with disasterous results. Fix by not using PAGE_MASK for physical addresses; instead calculate the correct value directly from PAGE_SIZE. Also fix a similar BUG_ON(). Acked-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Avi Kivity <avi@qumranet.com>
This commit is contained in:
Родитель
ac1b714e78
Коммит
27aba76615
|
@ -131,7 +131,7 @@ static int dbg = 1;
|
|||
(((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
|
||||
|
||||
|
||||
#define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & PAGE_MASK)
|
||||
#define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
|
||||
#define PT64_DIR_BASE_ADDR_MASK \
|
||||
(PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
|
||||
|
||||
|
@ -406,8 +406,8 @@ static void rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
|
|||
spte = desc->shadow_ptes[0];
|
||||
}
|
||||
BUG_ON(!spte);
|
||||
BUG_ON((*spte & PT64_BASE_ADDR_MASK) !=
|
||||
page_to_pfn(page) << PAGE_SHIFT);
|
||||
BUG_ON((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT
|
||||
!= page_to_pfn(page));
|
||||
BUG_ON(!(*spte & PT_PRESENT_MASK));
|
||||
BUG_ON(!(*spte & PT_WRITABLE_MASK));
|
||||
rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
|
||||
|
|
Загрузка…
Ссылка в новой задаче