KVM: arm64: Move AArch32 exceptions over to AArch64 sysregs
The use of the AArch32-specific accessors have always been a bit annoying on 64bit, and it is time for a change. Let's move the AArch32 exception injection over to the AArch64 encoding, which requires us to split the two halves of FAR_EL1 into DFAR and IFAR. This enables us to drop the preempt_disable() games on VHE, and to kill the last user of the vcpu_cp15() macro. Signed-off-by: Marc Zyngier <maz@kernel.org>
This commit is contained in:
Родитель
ca4e514774
Коммит
4ff3fc316d
|
@ -562,7 +562,6 @@ static inline bool __vcpu_write_sys_reg_to_cpu(u64 val, int reg)
|
|||
#define CPx_BIAS IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)
|
||||
|
||||
#define vcpu_cp14(v,r) ((v)->arch.ctxt.copro[(r) ^ CPx_BIAS])
|
||||
#define vcpu_cp15(v,r) ((v)->arch.ctxt.copro[(r) ^ CPx_BIAS])
|
||||
|
||||
struct kvm_vm_stat {
|
||||
ulong remote_tlb_flush;
|
||||
|
|
|
@ -69,26 +69,7 @@ static void inject_undef64(struct kvm_vcpu *vcpu)
|
|||
#define DFSR_FSC_EXTABT_LPAE 0x10
|
||||
#define DFSR_FSC_EXTABT_nLPAE 0x08
|
||||
#define DFSR_LPAE BIT(9)
|
||||
|
||||
static bool pre_fault_synchronize(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
preempt_disable();
|
||||
if (vcpu->arch.sysregs_loaded_on_cpu) {
|
||||
kvm_arch_vcpu_put(vcpu);
|
||||
return true;
|
||||
}
|
||||
|
||||
preempt_enable();
|
||||
return false;
|
||||
}
|
||||
|
||||
static void post_fault_synchronize(struct kvm_vcpu *vcpu, bool loaded)
|
||||
{
|
||||
if (loaded) {
|
||||
kvm_arch_vcpu_load(vcpu, smp_processor_id());
|
||||
preempt_enable();
|
||||
}
|
||||
}
|
||||
#define TTBCR_EAE BIT(31)
|
||||
|
||||
static void inject_undef32(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
|
@ -100,39 +81,36 @@ static void inject_undef32(struct kvm_vcpu *vcpu)
|
|||
* Modelled after TakeDataAbortException() and TakePrefetchAbortException
|
||||
* pseudocode.
|
||||
*/
|
||||
static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
|
||||
unsigned long addr)
|
||||
static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt, u32 addr)
|
||||
{
|
||||
u32 *far, *fsr;
|
||||
bool is_lpae;
|
||||
bool loaded;
|
||||
u64 far;
|
||||
u32 fsr;
|
||||
|
||||
loaded = pre_fault_synchronize(vcpu);
|
||||
/* Give the guest an IMPLEMENTATION DEFINED exception */
|
||||
if (vcpu_read_sys_reg(vcpu, TCR_EL1) & TTBCR_EAE) {
|
||||
fsr = DFSR_LPAE | DFSR_FSC_EXTABT_LPAE;
|
||||
} else {
|
||||
/* no need to shuffle FS[4] into DFSR[10] as its 0 */
|
||||
fsr = DFSR_FSC_EXTABT_nLPAE;
|
||||
}
|
||||
|
||||
far = vcpu_read_sys_reg(vcpu, FAR_EL1);
|
||||
|
||||
if (is_pabt) {
|
||||
vcpu->arch.flags |= (KVM_ARM64_EXCEPT_AA32_IABT |
|
||||
KVM_ARM64_PENDING_EXCEPTION);
|
||||
far = &vcpu_cp15(vcpu, c6_IFAR);
|
||||
fsr = &vcpu_cp15(vcpu, c5_IFSR);
|
||||
far &= GENMASK(31, 0);
|
||||
far |= (u64)addr << 32;
|
||||
vcpu_write_sys_reg(vcpu, fsr, IFSR32_EL2);
|
||||
} else { /* !iabt */
|
||||
vcpu->arch.flags |= (KVM_ARM64_EXCEPT_AA32_DABT |
|
||||
KVM_ARM64_PENDING_EXCEPTION);
|
||||
far = &vcpu_cp15(vcpu, c6_DFAR);
|
||||
fsr = &vcpu_cp15(vcpu, c5_DFSR);
|
||||
far &= GENMASK(63, 32);
|
||||
far |= addr;
|
||||
vcpu_write_sys_reg(vcpu, fsr, ESR_EL1);
|
||||
}
|
||||
|
||||
*far = addr;
|
||||
|
||||
/* Give the guest an IMPLEMENTATION DEFINED exception */
|
||||
is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31);
|
||||
if (is_lpae) {
|
||||
*fsr = DFSR_LPAE | DFSR_FSC_EXTABT_LPAE;
|
||||
} else {
|
||||
/* no need to shuffle FS[4] into DFSR[10] as its 0 */
|
||||
*fsr = DFSR_FSC_EXTABT_nLPAE;
|
||||
}
|
||||
|
||||
post_fault_synchronize(vcpu, loaded);
|
||||
vcpu_write_sys_reg(vcpu, far, FAR_EL1);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
Загрузка…
Ссылка в новой задаче