Fixes for KVM/ARM for Linux v4.19 v2:
- Fix a VFP corruption in 32-bit guest - Add missing cache invalidation for CoW pages - Two small cleanups -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABAgAGBQJbkngmAAoJEEtpOizt6ddyeaoH/15bbGHlwWf23tGjSoDzhyD4 zAXfy+SJdm4cR8K7jEkVrNffkEMAby7Zl28hTHKB9jsY1K8DD+EuCE3Nd4kkVAsc iHJwV4aiHil/zC5SyE0MqMzELeS8UhsxESYebG6yNF0ElQDQ0SG+QAFr47/OBN9S u4I7x0rhyJP6Kg8z9U4KtEX0hM6C7VVunGWu44/xZSAecTaMuJnItCIM4UMdEkSs xpAoI59lwM6BWrXLvEunekAkxEXoR7AVpQER2PDINoLK2I0i0oavhPim9Xdt2ZXs rqQqfmwmPOVvYbexDp97JtfWo3/psGLqvgoK1tq9bzF3u6Y3ylnUK5IspyVYwuQ= =TK8A -----END PGP SIGNATURE----- Merge tag 'kvm-arm-fixes-for-v4.19-v2' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm Fixes for KVM/ARM for Linux v4.19 v2: - Fix a VFP corruption in 32-bit guest - Add missing cache invalidation for CoW pages - Two small cleanups
This commit is contained in:
Коммит
564ad0aa85
|
@ -223,7 +223,6 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
|
||||||
struct kvm_vcpu_events *events);
|
struct kvm_vcpu_events *events);
|
||||||
|
|
||||||
#define KVM_ARCH_WANT_MMU_NOTIFIER
|
#define KVM_ARCH_WANT_MMU_NOTIFIER
|
||||||
int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
|
|
||||||
int kvm_unmap_hva_range(struct kvm *kvm,
|
int kvm_unmap_hva_range(struct kvm *kvm,
|
||||||
unsigned long start, unsigned long end);
|
unsigned long start, unsigned long end);
|
||||||
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
|
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
|
||||||
|
|
|
@ -61,8 +61,7 @@ struct kvm_arch {
|
||||||
u64 vmid_gen;
|
u64 vmid_gen;
|
||||||
u32 vmid;
|
u32 vmid;
|
||||||
|
|
||||||
/* 1-level 2nd stage table and lock */
|
/* 1-level 2nd stage table, protected by kvm->mmu_lock */
|
||||||
spinlock_t pgd_lock;
|
|
||||||
pgd_t *pgd;
|
pgd_t *pgd;
|
||||||
|
|
||||||
/* VTTBR value associated with above pgd and vmid */
|
/* VTTBR value associated with above pgd and vmid */
|
||||||
|
@ -357,7 +356,6 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
|
||||||
struct kvm_vcpu_events *events);
|
struct kvm_vcpu_events *events);
|
||||||
|
|
||||||
#define KVM_ARCH_WANT_MMU_NOTIFIER
|
#define KVM_ARCH_WANT_MMU_NOTIFIER
|
||||||
int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
|
|
||||||
int kvm_unmap_hva_range(struct kvm *kvm,
|
int kvm_unmap_hva_range(struct kvm *kvm,
|
||||||
unsigned long start, unsigned long end);
|
unsigned long start, unsigned long end);
|
||||||
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
|
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
|
||||||
|
|
|
@ -98,8 +98,10 @@ static void activate_traps_vhe(struct kvm_vcpu *vcpu)
|
||||||
val = read_sysreg(cpacr_el1);
|
val = read_sysreg(cpacr_el1);
|
||||||
val |= CPACR_EL1_TTA;
|
val |= CPACR_EL1_TTA;
|
||||||
val &= ~CPACR_EL1_ZEN;
|
val &= ~CPACR_EL1_ZEN;
|
||||||
if (!update_fp_enabled(vcpu))
|
if (!update_fp_enabled(vcpu)) {
|
||||||
val &= ~CPACR_EL1_FPEN;
|
val &= ~CPACR_EL1_FPEN;
|
||||||
|
__activate_traps_fpsimd32(vcpu);
|
||||||
|
}
|
||||||
|
|
||||||
write_sysreg(val, cpacr_el1);
|
write_sysreg(val, cpacr_el1);
|
||||||
|
|
||||||
|
@ -114,8 +116,10 @@ static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu)
|
||||||
|
|
||||||
val = CPTR_EL2_DEFAULT;
|
val = CPTR_EL2_DEFAULT;
|
||||||
val |= CPTR_EL2_TTA | CPTR_EL2_TZ;
|
val |= CPTR_EL2_TTA | CPTR_EL2_TZ;
|
||||||
if (!update_fp_enabled(vcpu))
|
if (!update_fp_enabled(vcpu)) {
|
||||||
val |= CPTR_EL2_TFP;
|
val |= CPTR_EL2_TFP;
|
||||||
|
__activate_traps_fpsimd32(vcpu);
|
||||||
|
}
|
||||||
|
|
||||||
write_sysreg(val, cptr_el2);
|
write_sysreg(val, cptr_el2);
|
||||||
}
|
}
|
||||||
|
@ -129,7 +133,6 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
|
||||||
if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE))
|
if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE))
|
||||||
write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2);
|
write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2);
|
||||||
|
|
||||||
__activate_traps_fpsimd32(vcpu);
|
|
||||||
if (has_vhe())
|
if (has_vhe())
|
||||||
activate_traps_vhe(vcpu);
|
activate_traps_vhe(vcpu);
|
||||||
else
|
else
|
||||||
|
|
|
@ -931,7 +931,6 @@ enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu,
|
||||||
bool write);
|
bool write);
|
||||||
|
|
||||||
#define KVM_ARCH_WANT_MMU_NOTIFIER
|
#define KVM_ARCH_WANT_MMU_NOTIFIER
|
||||||
int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
|
|
||||||
int kvm_unmap_hva_range(struct kvm *kvm,
|
int kvm_unmap_hva_range(struct kvm *kvm,
|
||||||
unsigned long start, unsigned long end);
|
unsigned long start, unsigned long end);
|
||||||
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
|
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
|
||||||
|
|
|
@ -512,16 +512,6 @@ static int kvm_unmap_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
|
|
||||||
{
|
|
||||||
unsigned long end = hva + PAGE_SIZE;
|
|
||||||
|
|
||||||
handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL);
|
|
||||||
|
|
||||||
kvm_mips_callbacks->flush_shadow_all(kvm);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
|
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
|
||||||
{
|
{
|
||||||
handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
|
handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
|
||||||
|
|
|
@ -1443,7 +1443,6 @@ asmlinkage void kvm_spurious_fault(void);
|
||||||
____kvm_handle_fault_on_reboot(insn, "")
|
____kvm_handle_fault_on_reboot(insn, "")
|
||||||
|
|
||||||
#define KVM_ARCH_WANT_MMU_NOTIFIER
|
#define KVM_ARCH_WANT_MMU_NOTIFIER
|
||||||
int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
|
|
||||||
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end);
|
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end);
|
||||||
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
|
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
|
||||||
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
|
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
|
||||||
|
|
|
@ -1853,11 +1853,6 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
|
||||||
return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler);
|
return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler);
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
|
|
||||||
{
|
|
||||||
return kvm_handle_hva(kvm, hva, 0, kvm_unmap_rmapp);
|
|
||||||
}
|
|
||||||
|
|
||||||
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
|
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
|
||||||
{
|
{
|
||||||
return kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp);
|
return kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp);
|
||||||
|
|
|
@ -1817,18 +1817,6 @@ static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *dat
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
|
|
||||||
{
|
|
||||||
unsigned long end = hva + PAGE_SIZE;
|
|
||||||
|
|
||||||
if (!kvm->arch.pgd)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
trace_kvm_unmap_hva(hva);
|
|
||||||
handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int kvm_unmap_hva_range(struct kvm *kvm,
|
int kvm_unmap_hva_range(struct kvm *kvm,
|
||||||
unsigned long start, unsigned long end)
|
unsigned long start, unsigned long end)
|
||||||
{
|
{
|
||||||
|
@ -1860,13 +1848,20 @@ static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data
|
||||||
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
|
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
|
||||||
{
|
{
|
||||||
unsigned long end = hva + PAGE_SIZE;
|
unsigned long end = hva + PAGE_SIZE;
|
||||||
|
kvm_pfn_t pfn = pte_pfn(pte);
|
||||||
pte_t stage2_pte;
|
pte_t stage2_pte;
|
||||||
|
|
||||||
if (!kvm->arch.pgd)
|
if (!kvm->arch.pgd)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
trace_kvm_set_spte_hva(hva);
|
trace_kvm_set_spte_hva(hva);
|
||||||
stage2_pte = pfn_pte(pte_pfn(pte), PAGE_S2);
|
|
||||||
|
/*
|
||||||
|
* We've moved a page around, probably through CoW, so let's treat it
|
||||||
|
* just like a translation fault and clean the cache to the PoC.
|
||||||
|
*/
|
||||||
|
clean_dcache_guest_page(pfn, PAGE_SIZE);
|
||||||
|
stage2_pte = pfn_pte(pfn, PAGE_S2);
|
||||||
handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
|
handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -134,21 +134,6 @@ TRACE_EVENT(kvm_mmio_emulate,
|
||||||
__entry->vcpu_pc, __entry->instr, __entry->cpsr)
|
__entry->vcpu_pc, __entry->instr, __entry->cpsr)
|
||||||
);
|
);
|
||||||
|
|
||||||
TRACE_EVENT(kvm_unmap_hva,
|
|
||||||
TP_PROTO(unsigned long hva),
|
|
||||||
TP_ARGS(hva),
|
|
||||||
|
|
||||||
TP_STRUCT__entry(
|
|
||||||
__field( unsigned long, hva )
|
|
||||||
),
|
|
||||||
|
|
||||||
TP_fast_assign(
|
|
||||||
__entry->hva = hva;
|
|
||||||
),
|
|
||||||
|
|
||||||
TP_printk("mmu notifier unmap hva: %#08lx", __entry->hva)
|
|
||||||
);
|
|
||||||
|
|
||||||
TRACE_EVENT(kvm_unmap_hva_range,
|
TRACE_EVENT(kvm_unmap_hva_range,
|
||||||
TP_PROTO(unsigned long start, unsigned long end),
|
TP_PROTO(unsigned long start, unsigned long end),
|
||||||
TP_ARGS(start, end),
|
TP_ARGS(start, end),
|
||||||
|
|
Загрузка…
Ссылка в новой задаче