Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/avi/kvm

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/avi/kvm:
  KVM: MMU: Fix memory leak on guest demand faults
  KVM: VMX: convert init_rmode_tss() to slots_lock
  KVM: MMU: handle page removal with shadow mapping
  KVM: MMU: Fix is_rmap_pte() with io ptes
  KVM: VMX: Restore tss even on x86_64
This commit is contained in:
Linus Torvalds 2008-03-25 09:06:19 -07:00
Родитель 7ed7fe5e82 e48bb497b9
Коммит e584152571
2 изменённых файлов: 16 добавлений и 9 удалений

Просмотреть файл

@ -222,8 +222,7 @@ static int is_io_pte(unsigned long pte)
static int is_rmap_pte(u64 pte) static int is_rmap_pte(u64 pte)
{ {
return pte != shadow_trap_nonpresent_pte return is_shadow_present_pte(pte);
&& pte != shadow_notrap_nonpresent_pte;
} }
static gfn_t pse36_gfn_delta(u32 gpte) static gfn_t pse36_gfn_delta(u32 gpte)
@ -893,14 +892,25 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
int *ptwrite, gfn_t gfn, struct page *page) int *ptwrite, gfn_t gfn, struct page *page)
{ {
u64 spte; u64 spte;
int was_rmapped = is_rmap_pte(*shadow_pte); int was_rmapped = 0;
int was_writeble = is_writeble_pte(*shadow_pte); int was_writeble = is_writeble_pte(*shadow_pte);
hfn_t host_pfn = (*shadow_pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
pgprintk("%s: spte %llx access %x write_fault %d" pgprintk("%s: spte %llx access %x write_fault %d"
" user_fault %d gfn %lx\n", " user_fault %d gfn %lx\n",
__FUNCTION__, *shadow_pte, pt_access, __FUNCTION__, *shadow_pte, pt_access,
write_fault, user_fault, gfn); write_fault, user_fault, gfn);
if (is_rmap_pte(*shadow_pte)) {
if (host_pfn != page_to_pfn(page)) {
pgprintk("hfn old %lx new %lx\n",
host_pfn, page_to_pfn(page));
rmap_remove(vcpu->kvm, shadow_pte);
}
else
was_rmapped = 1;
}
/* /*
* We don't set the accessed bit, since we sometimes want to see * We don't set the accessed bit, since we sometimes want to see
* whether the guest actually used the pte (in order to detect * whether the guest actually used the pte (in order to detect
@ -1402,7 +1412,7 @@ static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
up_read(&current->mm->mmap_sem); up_read(&current->mm->mmap_sem);
vcpu->arch.update_pte.gfn = gfn; vcpu->arch.update_pte.gfn = gfn;
vcpu->arch.update_pte.page = gfn_to_page(vcpu->kvm, gfn); vcpu->arch.update_pte.page = page;
} }
void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,

Просмотреть файл

@ -349,8 +349,6 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
static void reload_tss(void) static void reload_tss(void)
{ {
#ifndef CONFIG_X86_64
/* /*
* VT restores TR but not its size. Useless. * VT restores TR but not its size. Useless.
*/ */
@ -361,7 +359,6 @@ static void reload_tss(void)
descs = (void *)gdt.base; descs = (void *)gdt.base;
descs[GDT_ENTRY_TSS].type = 9; /* available TSS */ descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
load_TR_desc(); load_TR_desc();
#endif
} }
static void load_transition_efer(struct vcpu_vmx *vmx) static void load_transition_efer(struct vcpu_vmx *vmx)
@ -1436,7 +1433,7 @@ static int init_rmode_tss(struct kvm *kvm)
int ret = 0; int ret = 0;
int r; int r;
down_read(&current->mm->mmap_sem); down_read(&kvm->slots_lock);
r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE); r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
if (r < 0) if (r < 0)
goto out; goto out;
@ -1459,7 +1456,7 @@ static int init_rmode_tss(struct kvm *kvm)
ret = 1; ret = 1;
out: out:
up_read(&current->mm->mmap_sem); up_read(&kvm->slots_lock);
return ret; return ret;
} }