Merge branch 'kvm-arm64/memslot-fixes' into kvmarm-master/next
Signed-off-by: Marc Zyngier <maz@kernel.org>
This commit is contained in:
Коммит
3d63ef4d52
|
@ -644,7 +644,6 @@ void kvm_arm_resume_guest(struct kvm *kvm);
|
|||
#endif /* __KVM_NVHE_HYPERVISOR__ */
|
||||
|
||||
void force_vm_exit(const cpumask_t *mask);
|
||||
void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
|
||||
|
||||
int handle_exit(struct kvm_vcpu *vcpu, int exception_index);
|
||||
void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index);
|
||||
|
|
|
@ -504,10 +504,11 @@ static void stage2_unmap_memslot(struct kvm *kvm,
|
|||
* +--------------------------------------------+
|
||||
*/
|
||||
do {
|
||||
struct vm_area_struct *vma = find_vma(current->mm, hva);
|
||||
struct vm_area_struct *vma;
|
||||
hva_t vm_start, vm_end;
|
||||
|
||||
if (!vma || vma->vm_start >= reg_end)
|
||||
vma = find_vma_intersection(current->mm, hva, reg_end);
|
||||
if (!vma)
|
||||
break;
|
||||
|
||||
/*
|
||||
|
@ -638,7 +639,7 @@ static void stage2_wp_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_
|
|||
* Acquires kvm_mmu_lock. Called with kvm->slots_lock mutex acquired,
|
||||
* serializing operations for VM memory regions.
|
||||
*/
|
||||
void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot)
|
||||
static void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot)
|
||||
{
|
||||
struct kvm_memslots *slots = kvm_memslots(kvm);
|
||||
struct kvm_memory_slot *memslot = id_to_memslot(slots, slot);
|
||||
|
@ -925,10 +926,15 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
|||
* unmapped afterwards, the call to kvm_unmap_hva will take it away
|
||||
* from us again properly. This smp_rmb() interacts with the smp_wmb()
|
||||
* in kvm_mmu_notifier_invalidate_<page|range_end>.
|
||||
*
|
||||
* Besides, __gfn_to_pfn_memslot() instead of gfn_to_pfn_prot() is
|
||||
* used to avoid unnecessary overhead introduced to locate the memory
|
||||
* slot because it's always fixed even @gfn is adjusted for huge pages.
|
||||
*/
|
||||
smp_rmb();
|
||||
|
||||
pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable);
|
||||
pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL,
|
||||
write_fault, &writable, NULL);
|
||||
if (pfn == KVM_PFN_ERR_HWPOISON) {
|
||||
kvm_send_hwpoison_signal(hva, vma_shift);
|
||||
return 0;
|
||||
|
@ -994,7 +1000,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
|||
/* Mark the page dirty only if the fault is handled successfully */
|
||||
if (writable && !ret) {
|
||||
kvm_set_pfn_dirty(pfn);
|
||||
mark_page_dirty(kvm, gfn);
|
||||
mark_page_dirty_in_slot(kvm, memslot, gfn);
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
|
@ -1424,10 +1430,11 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
|||
* +--------------------------------------------+
|
||||
*/
|
||||
do {
|
||||
struct vm_area_struct *vma = find_vma(current->mm, hva);
|
||||
struct vm_area_struct *vma;
|
||||
hva_t vm_start, vm_end;
|
||||
|
||||
if (!vma || vma->vm_start >= reg_end)
|
||||
vma = find_vma_intersection(current->mm, hva, reg_end);
|
||||
if (!vma)
|
||||
break;
|
||||
|
||||
/*
|
||||
|
|
Загрузка…
Ссылка в новой задаче