KVM/arm64 fixes for 5.18, take #2
- Take care of faults occuring between the PARange and IPA range by injecting an exception - Fix S2 faults taken from a host EL0 in protected mode - Work around Oops caused by a PMU access from a 32bit guest when PMU has been created. This is a temporary bodge until we fix it for good. -----BEGIN PGP SIGNATURE----- iQJDBAABCgAtFiEEn9UcU+C1Yxj9lZw9I9DQutE9ekMFAmJsA9kPHG1hekBrZXJu ZWwub3JnAAoJECPQ0LrRPXpD/FYP/1G0ctyOaZDWC90sZVtBmVfO9Sc1xiZ2Kael 3+79N96ncFHp8oKTDz4zcSvs2hDG+9tUhG54MVr1DnPuO5rQubEGr+inYnb1cQfr j0qBxvVAyP9wnQmtPeTvbHJCumB3RCm5d1tDLhmr4vnh5vBOn36fpCEhKhuv/2ib xsXE8rhL0PjRV+NV5KAlO5bJuUU0F8OaZ8612yyO0ExtV+6t1WQGsIBhf3aBRhAY oaYG2CmSgsifW9fgXdz/CVzRdPGV4E/hhAcQbY3CmzMbzlO6tqbOIK1NoMivoNIg smKMJNhnaVLSybprAcQvAG9H1vbGSQWLurd42EvZIzn59+ILFHW90fYmW4QM69P3 mqOW40RXlZ/8BwIVEftzyZ8J8XNTpaDeGZ4D1aHY6yvKTd7ApiiU3OZOrYgZ7H/a d/2W3tQfV9MlifJjNFZDWj8fMqxLifcZksvMfwbl775B/9SO66UHx00qFhZxQ5nI 7A8RKyB2jRRM/ndb337XMWoHQVwjof4CUZG1XuLSyYtb1K9joROoLD0hiNYTJOek QxT3T+g5FF3YpxrB270lKA4xJ6fUqUgAisThY5AZnK3Ezw8udx18ADT+X76/OqqQ GTxG3v45YObjA2t4xSk8TszFg83JubsCGKYQLKEbdMsfBIkhvT+fyrW4fCxODGHH r/rK7iuK =Ejoh -----END PGP SIGNATURE----- Merge tag 'kvmarm-fixes-5.18-2' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD KVM/arm64 fixes for 5.18, take #2 - Take care of faults occuring between the PARange and IPA range by injecting an exception - Fix S2 faults taken from a host EL0 in protected mode - Work around Oops caused by a PMU access from a 32bit guest when PMU has been created. This is a temporary bodge until we fix it for good.
This commit is contained in:
Коммит
484c22df5a
|
@ -40,6 +40,7 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu);
|
|||
void kvm_inject_vabt(struct kvm_vcpu *vcpu);
|
||||
void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
|
||||
void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
|
||||
void kvm_inject_size_fault(struct kvm_vcpu *vcpu);
|
||||
|
||||
void kvm_vcpu_wfi(struct kvm_vcpu *vcpu);
|
||||
|
||||
|
|
|
@ -198,15 +198,15 @@ SYM_CODE_START(__kvm_hyp_host_vector)
|
|||
invalid_host_el2_vect // FIQ EL2h
|
||||
invalid_host_el2_vect // Error EL2h
|
||||
|
||||
host_el1_sync_vect // Synchronous 64-bit EL1
|
||||
invalid_host_el1_vect // IRQ 64-bit EL1
|
||||
invalid_host_el1_vect // FIQ 64-bit EL1
|
||||
invalid_host_el1_vect // Error 64-bit EL1
|
||||
host_el1_sync_vect // Synchronous 64-bit EL1/EL0
|
||||
invalid_host_el1_vect // IRQ 64-bit EL1/EL0
|
||||
invalid_host_el1_vect // FIQ 64-bit EL1/EL0
|
||||
invalid_host_el1_vect // Error 64-bit EL1/EL0
|
||||
|
||||
invalid_host_el1_vect // Synchronous 32-bit EL1
|
||||
invalid_host_el1_vect // IRQ 32-bit EL1
|
||||
invalid_host_el1_vect // FIQ 32-bit EL1
|
||||
invalid_host_el1_vect // Error 32-bit EL1
|
||||
host_el1_sync_vect // Synchronous 32-bit EL1/EL0
|
||||
invalid_host_el1_vect // IRQ 32-bit EL1/EL0
|
||||
invalid_host_el1_vect // FIQ 32-bit EL1/EL0
|
||||
invalid_host_el1_vect // Error 32-bit EL1/EL0
|
||||
SYM_CODE_END(__kvm_hyp_host_vector)
|
||||
|
||||
/*
|
||||
|
|
|
@ -145,6 +145,34 @@ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
|
|||
inject_abt64(vcpu, true, addr);
|
||||
}
|
||||
|
||||
void kvm_inject_size_fault(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
unsigned long addr, esr;
|
||||
|
||||
addr = kvm_vcpu_get_fault_ipa(vcpu);
|
||||
addr |= kvm_vcpu_get_hfar(vcpu) & GENMASK(11, 0);
|
||||
|
||||
if (kvm_vcpu_trap_is_iabt(vcpu))
|
||||
kvm_inject_pabt(vcpu, addr);
|
||||
else
|
||||
kvm_inject_dabt(vcpu, addr);
|
||||
|
||||
/*
|
||||
* If AArch64 or LPAE, set FSC to 0 to indicate an Address
|
||||
* Size Fault at level 0, as if exceeding PARange.
|
||||
*
|
||||
* Non-LPAE guests will only get the external abort, as there
|
||||
* is no way to to describe the ASF.
|
||||
*/
|
||||
if (vcpu_el1_is_32bit(vcpu) &&
|
||||
!(vcpu_read_sys_reg(vcpu, TCR_EL1) & TTBCR_EAE))
|
||||
return;
|
||||
|
||||
esr = vcpu_read_sys_reg(vcpu, ESR_EL1);
|
||||
esr &= ~GENMASK_ULL(5, 0);
|
||||
vcpu_write_sys_reg(vcpu, esr, ESR_EL1);
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_inject_undefined - inject an undefined instruction into the guest
|
||||
* @vcpu: The vCPU in which to inject the exception
|
||||
|
|
|
@ -1337,6 +1337,25 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
|
|||
fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
|
||||
is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
|
||||
|
||||
if (fault_status == FSC_FAULT) {
|
||||
/* Beyond sanitised PARange (which is the IPA limit) */
|
||||
if (fault_ipa >= BIT_ULL(get_kvm_ipa_limit())) {
|
||||
kvm_inject_size_fault(vcpu);
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Falls between the IPA range and the PARange? */
|
||||
if (fault_ipa >= BIT_ULL(vcpu->arch.hw_mmu->pgt->ia_bits)) {
|
||||
fault_ipa |= kvm_vcpu_get_hfar(vcpu) & GENMASK(11, 0);
|
||||
|
||||
if (is_iabt)
|
||||
kvm_inject_pabt(vcpu, fault_ipa);
|
||||
else
|
||||
kvm_inject_dabt(vcpu, fault_ipa);
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
/* Synchronous External Abort? */
|
||||
if (kvm_vcpu_abt_issea(vcpu)) {
|
||||
/*
|
||||
|
|
|
@ -177,6 +177,9 @@ u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
|
|||
struct kvm_pmu *pmu = &vcpu->arch.pmu;
|
||||
struct kvm_pmc *pmc = &pmu->pmc[select_idx];
|
||||
|
||||
if (!kvm_vcpu_has_pmu(vcpu))
|
||||
return 0;
|
||||
|
||||
counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
|
||||
|
||||
if (kvm_pmu_pmc_is_chained(pmc) &&
|
||||
|
@ -198,6 +201,9 @@ void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
|
|||
{
|
||||
u64 reg;
|
||||
|
||||
if (!kvm_vcpu_has_pmu(vcpu))
|
||||
return;
|
||||
|
||||
reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
|
||||
? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx;
|
||||
__vcpu_sys_reg(vcpu, reg) += (s64)val - kvm_pmu_get_counter_value(vcpu, select_idx);
|
||||
|
@ -322,6 +328,9 @@ void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
|
|||
struct kvm_pmu *pmu = &vcpu->arch.pmu;
|
||||
struct kvm_pmc *pmc;
|
||||
|
||||
if (!kvm_vcpu_has_pmu(vcpu))
|
||||
return;
|
||||
|
||||
if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val)
|
||||
return;
|
||||
|
||||
|
@ -357,7 +366,7 @@ void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
|
|||
struct kvm_pmu *pmu = &vcpu->arch.pmu;
|
||||
struct kvm_pmc *pmc;
|
||||
|
||||
if (!val)
|
||||
if (!kvm_vcpu_has_pmu(vcpu) || !val)
|
||||
return;
|
||||
|
||||
for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
|
||||
|
@ -527,6 +536,9 @@ void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
|
|||
struct kvm_pmu *pmu = &vcpu->arch.pmu;
|
||||
int i;
|
||||
|
||||
if (!kvm_vcpu_has_pmu(vcpu))
|
||||
return;
|
||||
|
||||
if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E))
|
||||
return;
|
||||
|
||||
|
@ -576,6 +588,9 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
|
|||
{
|
||||
int i;
|
||||
|
||||
if (!kvm_vcpu_has_pmu(vcpu))
|
||||
return;
|
||||
|
||||
if (val & ARMV8_PMU_PMCR_E) {
|
||||
kvm_pmu_enable_counter_mask(vcpu,
|
||||
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0));
|
||||
|
@ -739,6 +754,9 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
|
|||
{
|
||||
u64 reg, mask;
|
||||
|
||||
if (!kvm_vcpu_has_pmu(vcpu))
|
||||
return;
|
||||
|
||||
mask = ARMV8_PMU_EVTYPE_MASK;
|
||||
mask &= ~ARMV8_PMU_EVTYPE_EVENT;
|
||||
mask |= kvm_pmu_event_mask(vcpu->kvm);
|
||||
|
@ -827,6 +845,9 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
|
|||
u64 val, mask = 0;
|
||||
int base, i, nr_events;
|
||||
|
||||
if (!kvm_vcpu_has_pmu(vcpu))
|
||||
return 0;
|
||||
|
||||
if (!pmceid1) {
|
||||
val = read_sysreg(pmceid0_el0);
|
||||
base = 0;
|
||||
|
|
Загрузка…
Ссылка в новой задаче