arm: Remove 32bit KVM host support

That's it. Remove all references to KVM itself, and document
that although it is no more, the ABI between SVC and HYP still
exists.

Signed-off-by: Marc Zyngier <maz@kernel.org>
Acked-by: Olof Johansson <olof@lixom.net>
Acked-by: Arnd Bergmann <arnd@arndb.de>
Acked-by: Will Deacon <will@kernel.org>
Acked-by: Vladimir Murzin <vladimir.murzin@arm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Linus Walleij <linus.walleij@linaro.org>
Acked-by: Christoffer Dall <christoffer.dall@arm.com>
This commit is contained in:
Marc Zyngier 2020-01-24 22:42:15 +00:00
Родитель bb7c62bcb8
Коммит 541ad0150c
37 изменённых файлов: 5 добавлений и 6046 удалений

Просмотреть файл

@ -11,6 +11,11 @@ hypervisor when running as a guest (under Xen, KVM or any other
hypervisor), or any hypervisor-specific interaction when the kernel is
used as a host.
Note: KVM/arm has been removed from the kernel. The API described
here is still valid though, as it allows the kernel to kexec when
booted at HYP. It can also be used by a hypervisor other than KVM
if necessary.
On arm and arm64 (without VHE), the kernel doesn't run in hypervisor
mode, but still needs to interact with it, allowing a built-in
hypervisor to be either installed or torn down.

Просмотреть файл

@ -1,239 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
*/
#ifndef __ARM_KVM_ARM_H__
#define __ARM_KVM_ARM_H__
#include <linux/const.h>
#include <linux/types.h>
/* Hyp Configuration Register (HCR) bits */
#define HCR_TGE (1 << 27)
#define HCR_TVM (1 << 26)
#define HCR_TTLB (1 << 25)
#define HCR_TPU (1 << 24)
#define HCR_TPC (1 << 23)
#define HCR_TSW (1 << 22)
#define HCR_TAC (1 << 21)
#define HCR_TIDCP (1 << 20)
#define HCR_TSC (1 << 19)
#define HCR_TID3 (1 << 18)
#define HCR_TID2 (1 << 17)
#define HCR_TID1 (1 << 16)
#define HCR_TID0 (1 << 15)
#define HCR_TWE (1 << 14)
#define HCR_TWI (1 << 13)
#define HCR_DC (1 << 12)
#define HCR_BSU (3 << 10)
#define HCR_BSU_IS (1 << 10)
#define HCR_FB (1 << 9)
#define HCR_VA (1 << 8)
#define HCR_VI (1 << 7)
#define HCR_VF (1 << 6)
#define HCR_AMO (1 << 5)
#define HCR_IMO (1 << 4)
#define HCR_FMO (1 << 3)
#define HCR_PTW (1 << 2)
#define HCR_SWIO (1 << 1)
#define HCR_VM 1
/*
* The bits we set in HCR:
* TAC: Trap ACTLR
* TSC: Trap SMC
* TVM: Trap VM ops (until MMU and caches are on)
* TSW: Trap cache operations by set/way
* TWI: Trap WFI
* TWE: Trap WFE
* TIDCP: Trap L2CTLR/L2ECTLR
* BSU_IS: Upgrade barriers to the inner shareable domain
* FB: Force broadcast of all maintainance operations
* AMO: Override CPSR.A and enable signaling with VA
* IMO: Override CPSR.I and enable signaling with VI
* FMO: Override CPSR.F and enable signaling with VF
* SWIO: Turn set/way invalidates into set/way clean+invalidate
*/
#define HCR_GUEST_MASK (HCR_TSC | HCR_TSW | HCR_TWI | HCR_VM | HCR_BSU_IS | \
HCR_FB | HCR_TAC | HCR_AMO | HCR_IMO | HCR_FMO | \
HCR_TVM | HCR_TWE | HCR_SWIO | HCR_TIDCP)
/* System Control Register (SCTLR) bits */
#define SCTLR_TE (1 << 30)
#define SCTLR_EE (1 << 25)
#define SCTLR_V (1 << 13)
/* Hyp System Control Register (HSCTLR) bits */
#define HSCTLR_TE (1 << 30)
#define HSCTLR_EE (1 << 25)
#define HSCTLR_FI (1 << 21)
#define HSCTLR_WXN (1 << 19)
#define HSCTLR_I (1 << 12)
#define HSCTLR_C (1 << 2)
#define HSCTLR_A (1 << 1)
#define HSCTLR_M 1
#define HSCTLR_MASK (HSCTLR_M | HSCTLR_A | HSCTLR_C | HSCTLR_I | \
HSCTLR_WXN | HSCTLR_FI | HSCTLR_EE | HSCTLR_TE)
/* TTBCR and HTCR Registers bits */
#define TTBCR_EAE (1 << 31)
#define TTBCR_IMP (1 << 30)
#define TTBCR_SH1 (3 << 28)
#define TTBCR_ORGN1 (3 << 26)
#define TTBCR_IRGN1 (3 << 24)
#define TTBCR_EPD1 (1 << 23)
#define TTBCR_A1 (1 << 22)
#define TTBCR_T1SZ (7 << 16)
#define TTBCR_SH0 (3 << 12)
#define TTBCR_ORGN0 (3 << 10)
#define TTBCR_IRGN0 (3 << 8)
#define TTBCR_EPD0 (1 << 7)
#define TTBCR_T0SZ (7 << 0)
#define HTCR_MASK (TTBCR_T0SZ | TTBCR_IRGN0 | TTBCR_ORGN0 | TTBCR_SH0)
/* Hyp System Trap Register */
#define HSTR_T(x) (1 << x)
#define HSTR_TTEE (1 << 16)
#define HSTR_TJDBX (1 << 17)
/* Hyp Coprocessor Trap Register */
#define HCPTR_TCP(x) (1 << x)
#define HCPTR_TCP_MASK (0x3fff)
#define HCPTR_TASE (1 << 15)
#define HCPTR_TTA (1 << 20)
#define HCPTR_TCPAC (1 << 31)
/* Hyp Debug Configuration Register bits */
#define HDCR_TDRA (1 << 11)
#define HDCR_TDOSA (1 << 10)
#define HDCR_TDA (1 << 9)
#define HDCR_TDE (1 << 8)
#define HDCR_HPME (1 << 7)
#define HDCR_TPM (1 << 6)
#define HDCR_TPMCR (1 << 5)
#define HDCR_HPMN_MASK (0x1F)
/*
* The architecture supports 40-bit IPA as input to the 2nd stage translations
* and PTRS_PER_S2_PGD becomes 1024, because each entry covers 1GB of address
* space.
*/
#define KVM_PHYS_SHIFT (40)
#define PTRS_PER_S2_PGD (_AC(1, ULL) << (KVM_PHYS_SHIFT - 30))
/* Virtualization Translation Control Register (VTCR) bits */
#define VTCR_SH0 (3 << 12)
#define VTCR_ORGN0 (3 << 10)
#define VTCR_IRGN0 (3 << 8)
#define VTCR_SL0 (3 << 6)
#define VTCR_S (1 << 4)
#define VTCR_T0SZ (0xf)
#define VTCR_MASK (VTCR_SH0 | VTCR_ORGN0 | VTCR_IRGN0 | VTCR_SL0 | \
VTCR_S | VTCR_T0SZ)
#define VTCR_HTCR_SH (VTCR_SH0 | VTCR_ORGN0 | VTCR_IRGN0)
#define VTCR_SL_L2 (0 << 6) /* Starting-level: 2 */
#define VTCR_SL_L1 (1 << 6) /* Starting-level: 1 */
#define KVM_VTCR_SL0 VTCR_SL_L1
/* stage-2 input address range defined as 2^(32-T0SZ) */
#define KVM_T0SZ (32 - KVM_PHYS_SHIFT)
#define KVM_VTCR_T0SZ (KVM_T0SZ & VTCR_T0SZ)
#define KVM_VTCR_S ((KVM_VTCR_T0SZ << 1) & VTCR_S)
/* Virtualization Translation Table Base Register (VTTBR) bits */
#if KVM_VTCR_SL0 == VTCR_SL_L2 /* see ARM DDI 0406C: B4-1720 */
#define VTTBR_X (14 - KVM_T0SZ)
#else
#define VTTBR_X (5 - KVM_T0SZ)
#endif
#define VTTBR_CNP_BIT _AC(1, UL)
#define VTTBR_BADDR_MASK (((_AC(1, ULL) << (40 - VTTBR_X)) - 1) << VTTBR_X)
#define VTTBR_VMID_SHIFT _AC(48, ULL)
#define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT)
/* Hyp Syndrome Register (HSR) bits */
#define HSR_EC_SHIFT (26)
#define HSR_EC (_AC(0x3f, UL) << HSR_EC_SHIFT)
#define HSR_IL (_AC(1, UL) << 25)
#define HSR_ISS (HSR_IL - 1)
#define HSR_ISV_SHIFT (24)
#define HSR_ISV (_AC(1, UL) << HSR_ISV_SHIFT)
#define HSR_SRT_SHIFT (16)
#define HSR_SRT_MASK (0xf << HSR_SRT_SHIFT)
#define HSR_CM (1 << 8)
#define HSR_FSC (0x3f)
#define HSR_FSC_TYPE (0x3c)
#define HSR_SSE (1 << 21)
#define HSR_WNR (1 << 6)
#define HSR_CV_SHIFT (24)
#define HSR_CV (_AC(1, UL) << HSR_CV_SHIFT)
#define HSR_COND_SHIFT (20)
#define HSR_COND (_AC(0xf, UL) << HSR_COND_SHIFT)
#define FSC_FAULT (0x04)
#define FSC_ACCESS (0x08)
#define FSC_PERM (0x0c)
#define FSC_SEA (0x10)
#define FSC_SEA_TTW0 (0x14)
#define FSC_SEA_TTW1 (0x15)
#define FSC_SEA_TTW2 (0x16)
#define FSC_SEA_TTW3 (0x17)
#define FSC_SECC (0x18)
#define FSC_SECC_TTW0 (0x1c)
#define FSC_SECC_TTW1 (0x1d)
#define FSC_SECC_TTW2 (0x1e)
#define FSC_SECC_TTW3 (0x1f)
/* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
#define HPFAR_MASK (~0xf)
#define HSR_EC_UNKNOWN (0x00)
#define HSR_EC_WFI (0x01)
#define HSR_EC_CP15_32 (0x03)
#define HSR_EC_CP15_64 (0x04)
#define HSR_EC_CP14_MR (0x05)
#define HSR_EC_CP14_LS (0x06)
#define HSR_EC_CP_0_13 (0x07)
#define HSR_EC_CP10_ID (0x08)
#define HSR_EC_JAZELLE (0x09)
#define HSR_EC_BXJ (0x0A)
#define HSR_EC_CP14_64 (0x0C)
#define HSR_EC_SVC_HYP (0x11)
#define HSR_EC_HVC (0x12)
#define HSR_EC_SMC (0x13)
#define HSR_EC_IABT (0x20)
#define HSR_EC_IABT_HYP (0x21)
#define HSR_EC_DABT (0x24)
#define HSR_EC_DABT_HYP (0x25)
#define HSR_EC_MAX (0x3f)
#define HSR_WFI_IS_WFE (_AC(1, UL) << 0)
#define HSR_HVC_IMM_MASK ((_AC(1, UL) << 16) - 1)
#define HSR_DABT_S1PTW (_AC(1, UL) << 7)
#define HSR_DABT_CM (_AC(1, UL) << 8)
#define kvm_arm_exception_type \
{0, "RESET" }, \
{1, "UNDEFINED" }, \
{2, "SOFTWARE" }, \
{3, "PREF_ABORT" }, \
{4, "DATA_ABORT" }, \
{5, "IRQ" }, \
{6, "FIQ" }, \
{7, "HVC" }
#define HSRECN(x) { HSR_EC_##x, #x }
#define kvm_arm_exception_class \
HSRECN(UNKNOWN), HSRECN(WFI), HSRECN(CP15_32), HSRECN(CP15_64), \
HSRECN(CP14_MR), HSRECN(CP14_LS), HSRECN(CP_0_13), HSRECN(CP10_ID), \
HSRECN(JAZELLE), HSRECN(BXJ), HSRECN(CP14_64), HSRECN(SVC_HYP), \
HSRECN(HVC), HSRECN(SMC), HSRECN(IABT), HSRECN(IABT_HYP), \
HSRECN(DABT), HSRECN(DABT_HYP)
#endif /* __ARM_KVM_ARM_H__ */

Просмотреть файл

@ -1,77 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
*/
#ifndef __ARM_KVM_ASM_H__
#define __ARM_KVM_ASM_H__
#include <asm/virt.h>
#define ARM_EXIT_WITH_ABORT_BIT 31
#define ARM_EXCEPTION_CODE(x) ((x) & ~(1U << ARM_EXIT_WITH_ABORT_BIT))
#define ARM_EXCEPTION_IS_TRAP(x) \
(ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_PREF_ABORT || \
ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_DATA_ABORT || \
ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_HVC)
#define ARM_ABORT_PENDING(x) !!((x) & (1U << ARM_EXIT_WITH_ABORT_BIT))
#define ARM_EXCEPTION_RESET 0
#define ARM_EXCEPTION_UNDEFINED 1
#define ARM_EXCEPTION_SOFTWARE 2
#define ARM_EXCEPTION_PREF_ABORT 3
#define ARM_EXCEPTION_DATA_ABORT 4
#define ARM_EXCEPTION_IRQ 5
#define ARM_EXCEPTION_FIQ 6
#define ARM_EXCEPTION_HVC 7
#define ARM_EXCEPTION_HYP_GONE HVC_STUB_ERR
/*
* The rr_lo_hi macro swaps a pair of registers depending on
* current endianness. It is used in conjunction with ldrd and strd
* instructions that load/store a 64-bit value from/to memory to/from
* a pair of registers which are used with the mrrc and mcrr instructions.
* If used with the ldrd/strd instructions, the a1 parameter is the first
* source/destination register and the a2 parameter is the second
* source/destination register. Note that the ldrd/strd instructions
* already swap the bytes within the words correctly according to the
* endianness setting, but the order of the registers need to be effectively
* swapped when used with the mrrc/mcrr instructions.
*/
#ifdef CONFIG_CPU_ENDIAN_BE8
#define rr_lo_hi(a1, a2) a2, a1
#else
#define rr_lo_hi(a1, a2) a1, a2
#endif
#define kvm_ksym_ref(kva) (kva)
#ifndef __ASSEMBLY__
struct kvm;
struct kvm_vcpu;
extern char __kvm_hyp_init[];
extern char __kvm_hyp_init_end[];
extern void __kvm_flush_vm_context(void);
extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu);
extern void __kvm_timer_set_cntvoff(u32 cntvoff_low, u32 cntvoff_high);
/* no VHE on 32-bit :( */
static inline int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) { BUG(); return 0; }
extern int __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu);
extern void __init_stage2_translation(void);
extern u64 __vgic_v3_get_ich_vtr_el2(void);
extern u64 __vgic_v3_read_vmcr(void);
extern void __vgic_v3_write_vmcr(u32 vmcr);
extern void __vgic_v3_init_lrs(void);
#endif
#endif /* __ARM_KVM_ASM_H__ */

Просмотреть файл

@ -1,36 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2012 Rusty Russell IBM Corporation
*/
#ifndef __ARM_KVM_COPROC_H__
#define __ARM_KVM_COPROC_H__
#include <linux/kvm_host.h>
void kvm_reset_coprocs(struct kvm_vcpu *vcpu);
struct kvm_coproc_target_table {
unsigned target;
const struct coproc_reg *table;
size_t num;
};
void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table);
int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run);
int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run);
int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run);
int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
unsigned long kvm_arm_num_guest_msrs(struct kvm_vcpu *vcpu);
int kvm_arm_copy_msrindices(struct kvm_vcpu *vcpu, u64 __user *uindices);
void kvm_coproc_table_init(void);
struct kvm_one_reg;
int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu);
#endif /* __ARM_KVM_COPROC_H__ */

Просмотреть файл

@ -1,372 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
*/
#ifndef __ARM_KVM_EMULATE_H__
#define __ARM_KVM_EMULATE_H__
#include <linux/kvm_host.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_arm.h>
#include <asm/cputype.h>
/* arm64 compatibility macros */
#define PSR_AA32_MODE_FIQ FIQ_MODE
#define PSR_AA32_MODE_SVC SVC_MODE
#define PSR_AA32_MODE_ABT ABT_MODE
#define PSR_AA32_MODE_UND UND_MODE
#define PSR_AA32_T_BIT PSR_T_BIT
#define PSR_AA32_F_BIT PSR_F_BIT
#define PSR_AA32_I_BIT PSR_I_BIT
#define PSR_AA32_A_BIT PSR_A_BIT
#define PSR_AA32_E_BIT PSR_E_BIT
#define PSR_AA32_IT_MASK PSR_IT_MASK
#define PSR_AA32_GE_MASK 0x000f0000
#define PSR_AA32_DIT_BIT 0x00200000
#define PSR_AA32_PAN_BIT 0x00400000
#define PSR_AA32_SSBS_BIT 0x00800000
#define PSR_AA32_Q_BIT PSR_Q_BIT
#define PSR_AA32_V_BIT PSR_V_BIT
#define PSR_AA32_C_BIT PSR_C_BIT
#define PSR_AA32_Z_BIT PSR_Z_BIT
#define PSR_AA32_N_BIT PSR_N_BIT
unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
static inline unsigned long *vcpu_reg32(struct kvm_vcpu *vcpu, u8 reg_num)
{
return vcpu_reg(vcpu, reg_num);
}
unsigned long *__vcpu_spsr(struct kvm_vcpu *vcpu);
static inline unsigned long vpcu_read_spsr(struct kvm_vcpu *vcpu)
{
return *__vcpu_spsr(vcpu);
}
static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v)
{
*__vcpu_spsr(vcpu) = v;
}
static inline unsigned long host_spsr_to_spsr32(unsigned long spsr)
{
return spsr;
}
static inline unsigned long vcpu_get_reg(struct kvm_vcpu *vcpu,
u8 reg_num)
{
return *vcpu_reg(vcpu, reg_num);
}
static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
unsigned long val)
{
*vcpu_reg(vcpu, reg_num) = val;
}
bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr);
void kvm_inject_undef32(struct kvm_vcpu *vcpu);
void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr);
void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr);
void kvm_inject_vabt(struct kvm_vcpu *vcpu);
static inline void kvm_inject_undefined(struct kvm_vcpu *vcpu)
{
kvm_inject_undef32(vcpu);
}
static inline void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
{
kvm_inject_dabt32(vcpu, addr);
}
static inline void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
{
kvm_inject_pabt32(vcpu, addr);
}
static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
{
return kvm_condition_valid32(vcpu);
}
static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
{
kvm_skip_instr32(vcpu, is_wide_instr);
}
static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
{
vcpu->arch.hcr = HCR_GUEST_MASK;
}
static inline unsigned long *vcpu_hcr(const struct kvm_vcpu *vcpu)
{
return (unsigned long *)&vcpu->arch.hcr;
}
static inline void vcpu_clear_wfx_traps(struct kvm_vcpu *vcpu)
{
vcpu->arch.hcr &= ~HCR_TWE;
}
static inline void vcpu_set_wfx_traps(struct kvm_vcpu *vcpu)
{
vcpu->arch.hcr |= HCR_TWE;
}
static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
{
return true;
}
static inline unsigned long *vcpu_pc(struct kvm_vcpu *vcpu)
{
return &vcpu->arch.ctxt.gp_regs.usr_regs.ARM_pc;
}
static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
{
return (unsigned long *)&vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr;
}
static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
{
*vcpu_cpsr(vcpu) |= PSR_T_BIT;
}
static inline bool mode_has_spsr(struct kvm_vcpu *vcpu)
{
unsigned long cpsr_mode = vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr & MODE_MASK;
return (cpsr_mode > USR_MODE && cpsr_mode < SYSTEM_MODE);
}
static inline bool vcpu_mode_priv(struct kvm_vcpu *vcpu)
{
unsigned long cpsr_mode = vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr & MODE_MASK;
return cpsr_mode > USR_MODE;
}
static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu)
{
return vcpu->arch.fault.hsr;
}
static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
{
u32 hsr = kvm_vcpu_get_hsr(vcpu);
if (hsr & HSR_CV)
return (hsr & HSR_COND) >> HSR_COND_SHIFT;
return -1;
}
static inline unsigned long kvm_vcpu_get_hfar(struct kvm_vcpu *vcpu)
{
return vcpu->arch.fault.hxfar;
}
static inline phys_addr_t kvm_vcpu_get_fault_ipa(struct kvm_vcpu *vcpu)
{
return ((phys_addr_t)vcpu->arch.fault.hpfar & HPFAR_MASK) << 8;
}
static inline bool kvm_vcpu_dabt_isvalid(struct kvm_vcpu *vcpu)
{
return kvm_vcpu_get_hsr(vcpu) & HSR_ISV;
}
static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu *vcpu)
{
return kvm_vcpu_get_hsr(vcpu) & (HSR_CM | HSR_WNR | HSR_FSC);
}
static inline bool kvm_vcpu_dabt_iswrite(struct kvm_vcpu *vcpu)
{
return kvm_vcpu_get_hsr(vcpu) & HSR_WNR;
}
static inline bool kvm_vcpu_dabt_issext(struct kvm_vcpu *vcpu)
{
return kvm_vcpu_get_hsr(vcpu) & HSR_SSE;
}
static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
{
return false;
}
static inline int kvm_vcpu_dabt_get_rd(struct kvm_vcpu *vcpu)
{
return (kvm_vcpu_get_hsr(vcpu) & HSR_SRT_MASK) >> HSR_SRT_SHIFT;
}
static inline bool kvm_vcpu_dabt_iss1tw(struct kvm_vcpu *vcpu)
{
return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_S1PTW;
}
static inline bool kvm_vcpu_dabt_is_cm(struct kvm_vcpu *vcpu)
{
return !!(kvm_vcpu_get_hsr(vcpu) & HSR_DABT_CM);
}
/* Get Access Size from a data abort */
static inline unsigned int kvm_vcpu_dabt_get_as(struct kvm_vcpu *vcpu)
{
switch ((kvm_vcpu_get_hsr(vcpu) >> 22) & 0x3) {
case 0:
return 1;
case 1:
return 2;
case 2:
return 4;
default:
kvm_err("Hardware is weird: SAS 0b11 is reserved\n");
return 4;
}
}
/* This one is not specific to Data Abort */
static inline bool kvm_vcpu_trap_il_is32bit(struct kvm_vcpu *vcpu)
{
return kvm_vcpu_get_hsr(vcpu) & HSR_IL;
}
static inline u8 kvm_vcpu_trap_get_class(struct kvm_vcpu *vcpu)
{
return kvm_vcpu_get_hsr(vcpu) >> HSR_EC_SHIFT;
}
static inline bool kvm_vcpu_trap_is_iabt(struct kvm_vcpu *vcpu)
{
return kvm_vcpu_trap_get_class(vcpu) == HSR_EC_IABT;
}
static inline u8 kvm_vcpu_trap_get_fault(struct kvm_vcpu *vcpu)
{
return kvm_vcpu_get_hsr(vcpu) & HSR_FSC;
}
static inline u8 kvm_vcpu_trap_get_fault_type(struct kvm_vcpu *vcpu)
{
return kvm_vcpu_get_hsr(vcpu) & HSR_FSC_TYPE;
}
static inline bool kvm_vcpu_dabt_isextabt(struct kvm_vcpu *vcpu)
{
switch (kvm_vcpu_trap_get_fault(vcpu)) {
case FSC_SEA:
case FSC_SEA_TTW0:
case FSC_SEA_TTW1:
case FSC_SEA_TTW2:
case FSC_SEA_TTW3:
case FSC_SECC:
case FSC_SECC_TTW0:
case FSC_SECC_TTW1:
case FSC_SECC_TTW2:
case FSC_SECC_TTW3:
return true;
default:
return false;
}
}
static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
{
if (kvm_vcpu_trap_is_iabt(vcpu))
return false;
return kvm_vcpu_dabt_iswrite(vcpu);
}
static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu)
{
return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK;
}
static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
{
return vcpu_cp15(vcpu, c0_MPIDR) & MPIDR_HWID_BITMASK;
}
static inline bool kvm_arm_get_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu)
{
return false;
}
static inline void kvm_arm_set_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu,
bool flag)
{
}
static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
{
*vcpu_cpsr(vcpu) |= PSR_E_BIT;
}
static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
{
return !!(*vcpu_cpsr(vcpu) & PSR_E_BIT);
}
static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
unsigned long data,
unsigned int len)
{
if (kvm_vcpu_is_be(vcpu)) {
switch (len) {
case 1:
return data & 0xff;
case 2:
return be16_to_cpu(data & 0xffff);
default:
return be32_to_cpu(data);
}
} else {
switch (len) {
case 1:
return data & 0xff;
case 2:
return le16_to_cpu(data & 0xffff);
default:
return le32_to_cpu(data);
}
}
}
static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
unsigned long data,
unsigned int len)
{
if (kvm_vcpu_is_be(vcpu)) {
switch (len) {
case 1:
return data & 0xff;
case 2:
return cpu_to_be16(data & 0xffff);
default:
return cpu_to_be32(data);
}
} else {
switch (len) {
case 1:
return data & 0xff;
case 2:
return cpu_to_le16(data & 0xffff);
default:
return cpu_to_le32(data);
}
}
}
static inline void vcpu_ptrauth_setup_lazy(struct kvm_vcpu *vcpu) {}
#endif /* __ARM_KVM_EMULATE_H__ */

Просмотреть файл

@ -1,456 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
*/
#ifndef __ARM_KVM_HOST_H__
#define __ARM_KVM_HOST_H__
#include <linux/arm-smccc.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/kvm_types.h>
#include <asm/cputype.h>
#include <asm/kvm.h>
#include <asm/kvm_asm.h>
#include <asm/fpstate.h>
#include <kvm/arm_arch_timer.h>
#define __KVM_HAVE_ARCH_INTC_INITIALIZED
#define KVM_USER_MEM_SLOTS 32
#define KVM_HAVE_ONE_REG
#define KVM_HALT_POLL_NS_DEFAULT 500000
#define KVM_VCPU_MAX_FEATURES 2
#include <kvm/arm_vgic.h>
#ifdef CONFIG_ARM_GIC_V3
#define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS
#else
#define KVM_MAX_VCPUS VGIC_V2_MAX_CPUS
#endif
#define KVM_REQ_SLEEP \
KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
#define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3)
DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
static inline int kvm_arm_init_sve(void) { return 0; }
u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode);
int __attribute_const__ kvm_target_cpu(void);
int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
void kvm_reset_coprocs(struct kvm_vcpu *vcpu);
struct kvm_vmid {
/* The VMID generation used for the virt. memory system */
u64 vmid_gen;
u32 vmid;
};
struct kvm_arch {
/* The last vcpu id that ran on each physical CPU */
int __percpu *last_vcpu_ran;
/*
* Anything that is not used directly from assembly code goes
* here.
*/
/* The VMID generation used for the virt. memory system */
struct kvm_vmid vmid;
/* Stage-2 page table */
pgd_t *pgd;
phys_addr_t pgd_phys;
/* Interrupt controller */
struct vgic_dist vgic;
int max_vcpus;
/* Mandated version of PSCI */
u32 psci_version;
/*
* If we encounter a data abort without valid instruction syndrome
* information, report this to user space. User space can (and
* should) opt in to this feature if KVM_CAP_ARM_NISV_TO_USER is
* supported.
*/
bool return_nisv_io_abort_to_user;
};
#define KVM_NR_MEM_OBJS 40
/*
* We don't want allocation failures within the mmu code, so we preallocate
* enough memory for a single page fault in a cache.
*/
struct kvm_mmu_memory_cache {
int nobjs;
void *objects[KVM_NR_MEM_OBJS];
};
struct kvm_vcpu_fault_info {
u32 hsr; /* Hyp Syndrome Register */
u32 hxfar; /* Hyp Data/Inst. Fault Address Register */
u32 hpfar; /* Hyp IPA Fault Address Register */
};
/*
* 0 is reserved as an invalid value.
* Order should be kept in sync with the save/restore code.
*/
enum vcpu_sysreg {
__INVALID_SYSREG__,
c0_MPIDR, /* MultiProcessor ID Register */
c0_CSSELR, /* Cache Size Selection Register */
c1_SCTLR, /* System Control Register */
c1_ACTLR, /* Auxiliary Control Register */
c1_CPACR, /* Coprocessor Access Control */
c2_TTBR0, /* Translation Table Base Register 0 */
c2_TTBR0_high, /* TTBR0 top 32 bits */
c2_TTBR1, /* Translation Table Base Register 1 */
c2_TTBR1_high, /* TTBR1 top 32 bits */
c2_TTBCR, /* Translation Table Base Control R. */
c3_DACR, /* Domain Access Control Register */
c5_DFSR, /* Data Fault Status Register */
c5_IFSR, /* Instruction Fault Status Register */
c5_ADFSR, /* Auxilary Data Fault Status R */
c5_AIFSR, /* Auxilary Instrunction Fault Status R */
c6_DFAR, /* Data Fault Address Register */
c6_IFAR, /* Instruction Fault Address Register */
c7_PAR, /* Physical Address Register */
c7_PAR_high, /* PAR top 32 bits */
c9_L2CTLR, /* Cortex A15/A7 L2 Control Register */
c10_PRRR, /* Primary Region Remap Register */
c10_NMRR, /* Normal Memory Remap Register */
c12_VBAR, /* Vector Base Address Register */
c13_CID, /* Context ID Register */
c13_TID_URW, /* Thread ID, User R/W */
c13_TID_URO, /* Thread ID, User R/O */
c13_TID_PRIV, /* Thread ID, Privileged */
c14_CNTKCTL, /* Timer Control Register (PL1) */
c10_AMAIR0, /* Auxilary Memory Attribute Indirection Reg0 */
c10_AMAIR1, /* Auxilary Memory Attribute Indirection Reg1 */
NR_CP15_REGS /* Number of regs (incl. invalid) */
};
struct kvm_cpu_context {
struct kvm_regs gp_regs;
struct vfp_hard_struct vfp;
u32 cp15[NR_CP15_REGS];
};
struct kvm_host_data {
struct kvm_cpu_context host_ctxt;
};
typedef struct kvm_host_data kvm_host_data_t;
static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt)
{
/* The host's MPIDR is immutable, so let's set it up at boot time */
cpu_ctxt->cp15[c0_MPIDR] = read_cpuid_mpidr();
}
struct vcpu_reset_state {
unsigned long pc;
unsigned long r0;
bool be;
bool reset;
};
struct kvm_vcpu_arch {
struct kvm_cpu_context ctxt;
int target; /* Processor target */
DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
/* The CPU type we expose to the VM */
u32 midr;
/* HYP trapping configuration */
u32 hcr;
/* Exception Information */
struct kvm_vcpu_fault_info fault;
/* Host FP context */
struct kvm_cpu_context *host_cpu_context;
/* VGIC state */
struct vgic_cpu vgic_cpu;
struct arch_timer_cpu timer_cpu;
/*
* Anything that is not used directly from assembly code goes
* here.
*/
/* vcpu power-off state */
bool power_off;
/* Don't run the guest (internal implementation need) */
bool pause;
/* Cache some mmu pages needed inside spinlock regions */
struct kvm_mmu_memory_cache mmu_page_cache;
struct vcpu_reset_state reset_state;
/* Detect first run of a vcpu */
bool has_run_once;
};
struct kvm_vm_stat {
ulong remote_tlb_flush;
};
struct kvm_vcpu_stat {
u64 halt_successful_poll;
u64 halt_attempted_poll;
u64 halt_poll_invalid;
u64 halt_wakeup;
u64 hvc_exit_stat;
u64 wfe_exit_stat;
u64 wfi_exit_stat;
u64 mmio_exit_user;
u64 mmio_exit_kernel;
u64 exits;
};
#define vcpu_cp15(v,r) (v)->arch.ctxt.cp15[r]
int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
unsigned long __kvm_call_hyp(void *hypfn, ...);
/*
* The has_vhe() part doesn't get emitted, but is used for type-checking.
*/
#define kvm_call_hyp(f, ...) \
do { \
if (has_vhe()) { \
f(__VA_ARGS__); \
} else { \
__kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__); \
} \
} while(0)
#define kvm_call_hyp_ret(f, ...) \
({ \
typeof(f(__VA_ARGS__)) ret; \
\
if (has_vhe()) { \
ret = f(__VA_ARGS__); \
} else { \
ret = __kvm_call_hyp(kvm_ksym_ref(f), \
##__VA_ARGS__); \
} \
\
ret; \
})
void force_vm_exit(const cpumask_t *mask);
int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
struct kvm_vcpu_events *events);
int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
struct kvm_vcpu_events *events);
#define KVM_ARCH_WANT_MMU_NOTIFIER
int kvm_unmap_hva_range(struct kvm *kvm,
unsigned long start, unsigned long end);
int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
void kvm_arm_halt_guest(struct kvm *kvm);
void kvm_arm_resume_guest(struct kvm *kvm);
int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu);
int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
int exception_index);
static inline void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run,
int exception_index) {}
/* MMIO helpers */
void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);
int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
phys_addr_t fault_ipa);
static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
unsigned long hyp_stack_ptr,
unsigned long vector_ptr)
{
/*
* Call initialization code, and switch to the full blown HYP
* code. The init code doesn't need to preserve these
* registers as r0-r3 are already callee saved according to
* the AAPCS.
* Note that we slightly misuse the prototype by casting the
* stack pointer to a void *.
* The PGDs are always passed as the third argument, in order
* to be passed into r2-r3 to the init code (yes, this is
* compliant with the PCS!).
*/
__kvm_call_hyp((void*)hyp_stack_ptr, vector_ptr, pgd_ptr);
}
static inline void __cpu_init_stage2(void)
{
kvm_call_hyp(__init_stage2_translation);
}
static inline int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext)
{
return 0;
}
int kvm_perf_init(void);
int kvm_perf_teardown(void);
static inline long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu)
{
return SMCCC_RET_NOT_SUPPORTED;
}
static inline gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu)
{
return GPA_INVALID;
}
static inline void kvm_update_stolen_time(struct kvm_vcpu *vcpu)
{
}
static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch)
{
}
static inline bool kvm_arm_is_pvtime_enabled(struct kvm_vcpu_arch *vcpu_arch)
{
return false;
}
void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
static inline bool kvm_arch_requires_vhe(void) { return false; }
static inline void kvm_arch_hardware_unsetup(void) {}
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
static inline void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu) {}
static inline void kvm_arm_init_debug(void) {}
static inline void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) {}
static inline void kvm_arm_clear_debug(struct kvm_vcpu *vcpu) {}
static inline void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu) {}
int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr);
int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr);
int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr);
/*
* VFP/NEON switching is all done by the hyp switch code, so no need to
* coordinate with host context handling for this state:
*/
static inline void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) {}
static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {}
static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {}
#define KVM_BP_HARDEN_UNKNOWN -1
#define KVM_BP_HARDEN_WA_NEEDED 0
#define KVM_BP_HARDEN_NOT_REQUIRED 1
static inline int kvm_arm_harden_branch_predictor(void)
{
switch(read_cpuid_part()) {
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
case ARM_CPU_PART_BRAHMA_B15:
case ARM_CPU_PART_CORTEX_A12:
case ARM_CPU_PART_CORTEX_A15:
case ARM_CPU_PART_CORTEX_A17:
return KVM_BP_HARDEN_WA_NEEDED;
#endif
case ARM_CPU_PART_CORTEX_A7:
return KVM_BP_HARDEN_NOT_REQUIRED;
default:
return KVM_BP_HARDEN_UNKNOWN;
}
}
#define KVM_SSBD_UNKNOWN -1
#define KVM_SSBD_FORCE_DISABLE 0
#define KVM_SSBD_KERNEL 1
#define KVM_SSBD_FORCE_ENABLE 2
#define KVM_SSBD_MITIGATED 3
static inline int kvm_arm_have_ssbd(void)
{
/* No way to detect it yet, pretend it is not there. */
return KVM_SSBD_UNKNOWN;
}
static inline void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu) {}
static inline void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu) {}
#define __KVM_HAVE_ARCH_VM_ALLOC
struct kvm *kvm_arch_alloc_vm(void);
void kvm_arch_free_vm(struct kvm *kvm);
static inline int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type)
{
/*
* On 32bit ARM, VMs get a static 40bit IPA stage2 setup,
* so any non-zero value used as type is illegal.
*/
if (type)
return -EINVAL;
return 0;
}
static inline int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature)
{
return -EINVAL;
}
static inline bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu)
{
return true;
}
#endif /* __ARM_KVM_HOST_H__ */

Просмотреть файл

@ -1,127 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2015 - ARM Ltd
* Author: Marc Zyngier <marc.zyngier@arm.com>
*/
#ifndef __ARM_KVM_HYP_H__
#define __ARM_KVM_HYP_H__
#include <linux/compiler.h>
#include <linux/kvm_host.h>
#include <asm/cp15.h>
#include <asm/kvm_arm.h>
#include <asm/vfp.h>
#define __hyp_text __section(.hyp.text) notrace
#define __ACCESS_VFP(CRn) \
"mrc", "mcr", __stringify(p10, 7, %0, CRn, cr0, 0), u32
#define write_special(v, r) \
asm volatile("msr " __stringify(r) ", %0" : : "r" (v))
#define read_special(r) ({ \
u32 __val; \
asm volatile("mrs %0, " __stringify(r) : "=r" (__val)); \
__val; \
})
#define TTBR0 __ACCESS_CP15_64(0, c2)
#define TTBR1 __ACCESS_CP15_64(1, c2)
#define VTTBR __ACCESS_CP15_64(6, c2)
#define PAR __ACCESS_CP15_64(0, c7)
#define CNTP_CVAL __ACCESS_CP15_64(2, c14)
#define CNTV_CVAL __ACCESS_CP15_64(3, c14)
#define CNTVOFF __ACCESS_CP15_64(4, c14)
#define MIDR __ACCESS_CP15(c0, 0, c0, 0)
#define CSSELR __ACCESS_CP15(c0, 2, c0, 0)
#define VPIDR __ACCESS_CP15(c0, 4, c0, 0)
#define VMPIDR __ACCESS_CP15(c0, 4, c0, 5)
#define SCTLR __ACCESS_CP15(c1, 0, c0, 0)
#define CPACR __ACCESS_CP15(c1, 0, c0, 2)
#define HCR __ACCESS_CP15(c1, 4, c1, 0)
#define HDCR __ACCESS_CP15(c1, 4, c1, 1)
#define HCPTR __ACCESS_CP15(c1, 4, c1, 2)
#define HSTR __ACCESS_CP15(c1, 4, c1, 3)
#define TTBCR __ACCESS_CP15(c2, 0, c0, 2)
#define HTCR __ACCESS_CP15(c2, 4, c0, 2)
#define VTCR __ACCESS_CP15(c2, 4, c1, 2)
#define DACR __ACCESS_CP15(c3, 0, c0, 0)
#define DFSR __ACCESS_CP15(c5, 0, c0, 0)
#define IFSR __ACCESS_CP15(c5, 0, c0, 1)
#define ADFSR __ACCESS_CP15(c5, 0, c1, 0)
#define AIFSR __ACCESS_CP15(c5, 0, c1, 1)
#define HSR __ACCESS_CP15(c5, 4, c2, 0)
#define DFAR __ACCESS_CP15(c6, 0, c0, 0)
#define IFAR __ACCESS_CP15(c6, 0, c0, 2)
#define HDFAR __ACCESS_CP15(c6, 4, c0, 0)
#define HIFAR __ACCESS_CP15(c6, 4, c0, 2)
#define HPFAR __ACCESS_CP15(c6, 4, c0, 4)
#define ICIALLUIS __ACCESS_CP15(c7, 0, c1, 0)
#define BPIALLIS __ACCESS_CP15(c7, 0, c1, 6)
#define ICIMVAU __ACCESS_CP15(c7, 0, c5, 1)
#define ATS1CPR __ACCESS_CP15(c7, 0, c8, 0)
#define TLBIALLIS __ACCESS_CP15(c8, 0, c3, 0)
#define TLBIALL __ACCESS_CP15(c8, 0, c7, 0)
#define TLBIALLNSNHIS __ACCESS_CP15(c8, 4, c3, 4)
#define PRRR __ACCESS_CP15(c10, 0, c2, 0)
#define NMRR __ACCESS_CP15(c10, 0, c2, 1)
#define AMAIR0 __ACCESS_CP15(c10, 0, c3, 0)
#define AMAIR1 __ACCESS_CP15(c10, 0, c3, 1)
#define VBAR __ACCESS_CP15(c12, 0, c0, 0)
#define CID __ACCESS_CP15(c13, 0, c0, 1)
#define TID_URW __ACCESS_CP15(c13, 0, c0, 2)
#define TID_URO __ACCESS_CP15(c13, 0, c0, 3)
#define TID_PRIV __ACCESS_CP15(c13, 0, c0, 4)
#define HTPIDR __ACCESS_CP15(c13, 4, c0, 2)
#define CNTKCTL __ACCESS_CP15(c14, 0, c1, 0)
#define CNTP_CTL __ACCESS_CP15(c14, 0, c2, 1)
#define CNTV_CTL __ACCESS_CP15(c14, 0, c3, 1)
#define CNTHCTL __ACCESS_CP15(c14, 4, c1, 0)
#define VFP_FPEXC __ACCESS_VFP(FPEXC)
/* AArch64 compatibility macros, only for the timer so far */
#define read_sysreg_el0(r) read_sysreg(r##_EL0)
#define write_sysreg_el0(v, r) write_sysreg(v, r##_EL0)
#define SYS_CNTP_CTL_EL0 CNTP_CTL
#define SYS_CNTP_CVAL_EL0 CNTP_CVAL
#define SYS_CNTV_CTL_EL0 CNTV_CTL
#define SYS_CNTV_CVAL_EL0 CNTV_CVAL
#define cntvoff_el2 CNTVOFF
#define cnthctl_el2 CNTHCTL
void __timer_enable_traps(struct kvm_vcpu *vcpu);
void __timer_disable_traps(struct kvm_vcpu *vcpu);
void __vgic_v2_save_state(struct kvm_vcpu *vcpu);
void __vgic_v2_restore_state(struct kvm_vcpu *vcpu);
void __sysreg_save_state(struct kvm_cpu_context *ctxt);
void __sysreg_restore_state(struct kvm_cpu_context *ctxt);
void __vgic_v3_save_state(struct kvm_vcpu *vcpu);
void __vgic_v3_restore_state(struct kvm_vcpu *vcpu);
void __vgic_v3_activate_traps(struct kvm_vcpu *vcpu);
void __vgic_v3_deactivate_traps(struct kvm_vcpu *vcpu);
void __vgic_v3_save_aprs(struct kvm_vcpu *vcpu);
void __vgic_v3_restore_aprs(struct kvm_vcpu *vcpu);
asmlinkage void __vfp_save_state(struct vfp_hard_struct *vfp);
asmlinkage void __vfp_restore_state(struct vfp_hard_struct *vfp);
static inline bool __vfp_enabled(void)
{
return !(read_sysreg(HCPTR) & (HCPTR_TCP(11) | HCPTR_TCP(10)));
}
void __hyp_text __banked_save_state(struct kvm_cpu_context *ctxt);
void __hyp_text __banked_restore_state(struct kvm_cpu_context *ctxt);
asmlinkage int __guest_enter(struct kvm_vcpu *vcpu,
struct kvm_cpu_context *host);
asmlinkage int __hyp_do_panic(const char *, int, u32);
#endif /* __ARM_KVM_HYP_H__ */

Просмотреть файл

@ -1,435 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
*/
#ifndef __ARM_KVM_MMU_H__
#define __ARM_KVM_MMU_H__
#include <asm/memory.h>
#include <asm/page.h>
/*
* We directly use the kernel VA for the HYP, as we can directly share
* the mapping (HTTBR "covers" TTBR1).
*/
#define kern_hyp_va(kva) (kva)
/* Contrary to arm64, there is no need to generate a PC-relative address */
#define hyp_symbol_addr(s) \
({ \
typeof(s) *addr = &(s); \
addr; \
})
#ifndef __ASSEMBLY__
#include <linux/highmem.h>
#include <asm/cacheflush.h>
#include <asm/cputype.h>
#include <asm/kvm_arm.h>
#include <asm/kvm_hyp.h>
#include <asm/pgalloc.h>
#include <asm/stage2_pgtable.h>
/* Ensure compatibility with arm64 */
#define VA_BITS 32
#define kvm_phys_shift(kvm) KVM_PHYS_SHIFT
#define kvm_phys_size(kvm) (1ULL << kvm_phys_shift(kvm))
#define kvm_phys_mask(kvm) (kvm_phys_size(kvm) - 1ULL)
#define kvm_vttbr_baddr_mask(kvm) VTTBR_BADDR_MASK
#define stage2_pgd_size(kvm) (PTRS_PER_S2_PGD * sizeof(pgd_t))
int create_hyp_mappings(void *from, void *to, pgprot_t prot);
int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
void __iomem **kaddr,
void __iomem **haddr);
int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
void **haddr);
void free_hyp_pgds(void);
void stage2_unmap_vm(struct kvm *kvm);
int kvm_alloc_stage2_pgd(struct kvm *kvm);
void kvm_free_stage2_pgd(struct kvm *kvm);
int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
phys_addr_t pa, unsigned long size, bool writable);
int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
phys_addr_t kvm_mmu_get_httbr(void);
phys_addr_t kvm_get_idmap_vector(void);
int kvm_mmu_init(void);
void kvm_clear_hyp_idmap(void);
#define kvm_mk_pmd(ptep) __pmd(__pa(ptep) | PMD_TYPE_TABLE)
#define kvm_mk_pud(pmdp) __pud(__pa(pmdp) | PMD_TYPE_TABLE)
#define kvm_mk_pgd(pudp) ({ BUILD_BUG(); 0; })
#define kvm_pfn_pte(pfn, prot) pfn_pte(pfn, prot)
#define kvm_pfn_pmd(pfn, prot) pfn_pmd(pfn, prot)
#define kvm_pfn_pud(pfn, prot) (__pud(0))
#define kvm_pud_pfn(pud) ({ WARN_ON(1); 0; })
#define kvm_pmd_mkhuge(pmd) pmd_mkhuge(pmd)
/* No support for pud hugepages */
#define kvm_pud_mkhuge(pud) ( {WARN_ON(1); pud; })
/*
* The following kvm_*pud*() functions are provided strictly to allow
* sharing code with arm64. They should never be called in practice.
*/
static inline void kvm_set_s2pud_readonly(pud_t *pud)
{
WARN_ON(1);
}
static inline bool kvm_s2pud_readonly(pud_t *pud)
{
WARN_ON(1);
return false;
}
static inline void kvm_set_pud(pud_t *pud, pud_t new_pud)
{
WARN_ON(1);
}
static inline pud_t kvm_s2pud_mkwrite(pud_t pud)
{
WARN_ON(1);
return pud;
}
static inline pud_t kvm_s2pud_mkexec(pud_t pud)
{
WARN_ON(1);
return pud;
}
static inline bool kvm_s2pud_exec(pud_t *pud)
{
WARN_ON(1);
return false;
}
static inline pud_t kvm_s2pud_mkyoung(pud_t pud)
{
BUG();
return pud;
}
static inline bool kvm_s2pud_young(pud_t pud)
{
WARN_ON(1);
return false;
}
static inline pte_t kvm_s2pte_mkwrite(pte_t pte)
{
pte_val(pte) |= L_PTE_S2_RDWR;
return pte;
}
static inline pmd_t kvm_s2pmd_mkwrite(pmd_t pmd)
{
pmd_val(pmd) |= L_PMD_S2_RDWR;
return pmd;
}
static inline pte_t kvm_s2pte_mkexec(pte_t pte)
{
pte_val(pte) &= ~L_PTE_XN;
return pte;
}
static inline pmd_t kvm_s2pmd_mkexec(pmd_t pmd)
{
pmd_val(pmd) &= ~PMD_SECT_XN;
return pmd;
}
static inline void kvm_set_s2pte_readonly(pte_t *pte)
{
pte_val(*pte) = (pte_val(*pte) & ~L_PTE_S2_RDWR) | L_PTE_S2_RDONLY;
}
static inline bool kvm_s2pte_readonly(pte_t *pte)
{
return (pte_val(*pte) & L_PTE_S2_RDWR) == L_PTE_S2_RDONLY;
}
static inline bool kvm_s2pte_exec(pte_t *pte)
{
return !(pte_val(*pte) & L_PTE_XN);
}
static inline void kvm_set_s2pmd_readonly(pmd_t *pmd)
{
pmd_val(*pmd) = (pmd_val(*pmd) & ~L_PMD_S2_RDWR) | L_PMD_S2_RDONLY;
}
static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
{
return (pmd_val(*pmd) & L_PMD_S2_RDWR) == L_PMD_S2_RDONLY;
}
static inline bool kvm_s2pmd_exec(pmd_t *pmd)
{
return !(pmd_val(*pmd) & PMD_SECT_XN);
}
static inline bool kvm_page_empty(void *ptr)
{
struct page *ptr_page = virt_to_page(ptr);
return page_count(ptr_page) == 1;
}
#define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep)
#define kvm_pmd_table_empty(kvm, pmdp) kvm_page_empty(pmdp)
#define kvm_pud_table_empty(kvm, pudp) false
#define hyp_pte_table_empty(ptep) kvm_page_empty(ptep)
#define hyp_pmd_table_empty(pmdp) kvm_page_empty(pmdp)
#define hyp_pud_table_empty(pudp) false
struct kvm;
#define kvm_flush_dcache_to_poc(a,l) __cpuc_flush_dcache_area((a), (l))
static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
{
return (vcpu_cp15(vcpu, c1_SCTLR) & 0b101) == 0b101;
}
static inline void __clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
{
/*
* Clean the dcache to the Point of Coherency.
*
* We need to do this through a kernel mapping (using the
* user-space mapping has proved to be the wrong
* solution). For that, we need to kmap one page at a time,
* and iterate over the range.
*/
VM_BUG_ON(size & ~PAGE_MASK);
while (size) {
void *va = kmap_atomic_pfn(pfn);
kvm_flush_dcache_to_poc(va, PAGE_SIZE);
size -= PAGE_SIZE;
pfn++;
kunmap_atomic(va);
}
}
static inline void __invalidate_icache_guest_page(kvm_pfn_t pfn,
unsigned long size)
{
u32 iclsz;
/*
* If we are going to insert an instruction page and the icache is
* either VIPT or PIPT, there is a potential problem where the host
* (or another VM) may have used the same page as this guest, and we
* read incorrect data from the icache. If we're using a PIPT cache,
* we can invalidate just that page, but if we are using a VIPT cache
* we need to invalidate the entire icache - damn shame - as written
* in the ARM ARM (DDI 0406C.b - Page B3-1393).
*
* VIVT caches are tagged using both the ASID and the VMID and doesn't
* need any kind of flushing (DDI 0406C.b - Page B3-1392).
*/
VM_BUG_ON(size & ~PAGE_MASK);
if (icache_is_vivt_asid_tagged())
return;
if (!icache_is_pipt()) {
/* any kind of VIPT cache */
__flush_icache_all();
return;
}
/*
* CTR IminLine contains Log2 of the number of words in the
* cache line, so we can get the number of words as
* 2 << (IminLine - 1). To get the number of bytes, we
* multiply by 4 (the number of bytes in a 32-bit word), and
* get 4 << (IminLine).
*/
iclsz = 4 << (read_cpuid(CPUID_CACHETYPE) & 0xf);
while (size) {
void *va = kmap_atomic_pfn(pfn);
void *end = va + PAGE_SIZE;
void *addr = va;
do {
write_sysreg(addr, ICIMVAU);
addr += iclsz;
} while (addr < end);
dsb(ishst);
isb();
size -= PAGE_SIZE;
pfn++;
kunmap_atomic(va);
}
/* Check if we need to invalidate the BTB */
if ((read_cpuid_ext(CPUID_EXT_MMFR1) >> 28) != 4) {
write_sysreg(0, BPIALLIS);
dsb(ishst);
isb();
}
}
static inline void __kvm_flush_dcache_pte(pte_t pte)
{
void *va = kmap_atomic(pte_page(pte));
kvm_flush_dcache_to_poc(va, PAGE_SIZE);
kunmap_atomic(va);
}
static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
{
unsigned long size = PMD_SIZE;
kvm_pfn_t pfn = pmd_pfn(pmd);
while (size) {
void *va = kmap_atomic_pfn(pfn);
kvm_flush_dcache_to_poc(va, PAGE_SIZE);
pfn++;
size -= PAGE_SIZE;
kunmap_atomic(va);
}
}
static inline void __kvm_flush_dcache_pud(pud_t pud)
{
}
#define kvm_virt_to_phys(x) virt_to_idmap((unsigned long)(x))
void kvm_set_way_flush(struct kvm_vcpu *vcpu);
void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
static inline bool __kvm_cpu_uses_extended_idmap(void)
{
return false;
}
static inline unsigned long __kvm_idmap_ptrs_per_pgd(void)
{
return PTRS_PER_PGD;
}
static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,
pgd_t *hyp_pgd,
pgd_t *merged_hyp_pgd,
unsigned long hyp_idmap_start) { }
static inline unsigned int kvm_get_vmid_bits(void)
{
return 8;
}
/*
* We are not in the kvm->srcu critical section most of the time, so we take
* the SRCU read lock here. Since we copy the data from the user page, we
* can immediately drop the lock again.
*/
static inline int kvm_read_guest_lock(struct kvm *kvm,
gpa_t gpa, void *data, unsigned long len)
{
int srcu_idx = srcu_read_lock(&kvm->srcu);
int ret = kvm_read_guest(kvm, gpa, data, len);
srcu_read_unlock(&kvm->srcu, srcu_idx);
return ret;
}
static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
const void *data, unsigned long len)
{
int srcu_idx = srcu_read_lock(&kvm->srcu);
int ret = kvm_write_guest(kvm, gpa, data, len);
srcu_read_unlock(&kvm->srcu, srcu_idx);
return ret;
}
static inline void *kvm_get_hyp_vector(void)
{
switch(read_cpuid_part()) {
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
case ARM_CPU_PART_CORTEX_A12:
case ARM_CPU_PART_CORTEX_A17:
{
extern char __kvm_hyp_vector_bp_inv[];
return kvm_ksym_ref(__kvm_hyp_vector_bp_inv);
}
case ARM_CPU_PART_BRAHMA_B15:
case ARM_CPU_PART_CORTEX_A15:
{
extern char __kvm_hyp_vector_ic_inv[];
return kvm_ksym_ref(__kvm_hyp_vector_ic_inv);
}
#endif
default:
{
extern char __kvm_hyp_vector[];
return kvm_ksym_ref(__kvm_hyp_vector);
}
}
}
static inline int kvm_map_vectors(void)
{
return 0;
}
static inline int hyp_map_aux_data(void)
{
return 0;
}
#define kvm_phys_to_vttbr(addr) (addr)
static inline void kvm_set_ipa_limit(void) {}
static __always_inline u64 kvm_get_vttbr(struct kvm *kvm)
{
struct kvm_vmid *vmid = &kvm->arch.vmid;
u64 vmid_field, baddr;
baddr = kvm->arch.pgd_phys;
vmid_field = (u64)vmid->vmid << VTTBR_VMID_SHIFT;
return kvm_phys_to_vttbr(baddr) | vmid_field;
}
#endif /* !__ASSEMBLY__ */
#endif /* __ARM_KVM_MMU_H__ */

Просмотреть файл

@ -1,14 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2018 - Arm Ltd */
#ifndef __ARM_KVM_RAS_H__
#define __ARM_KVM_RAS_H__
#include <linux/types.h>
static inline int kvm_handle_guest_sea(phys_addr_t addr, unsigned int esr)
{
return -1;
}
#endif /* __ARM_KVM_RAS_H__ */

Просмотреть файл

@ -1,75 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2016 - ARM Ltd
*
* stage2 page table helpers
*/
#ifndef __ARM_S2_PGTABLE_H_
#define __ARM_S2_PGTABLE_H_
/*
* kvm_mmu_cache_min_pages() is the number of pages required
* to install a stage-2 translation. We pre-allocate the entry
* level table at VM creation. Since we have a 3 level page-table,
* we need only two pages to add a new mapping.
*/
#define kvm_mmu_cache_min_pages(kvm) 2
#define stage2_pgd_none(kvm, pgd) pgd_none(pgd)
#define stage2_pgd_clear(kvm, pgd) pgd_clear(pgd)
#define stage2_pgd_present(kvm, pgd) pgd_present(pgd)
#define stage2_pgd_populate(kvm, pgd, pud) pgd_populate(NULL, pgd, pud)
#define stage2_pud_offset(kvm, pgd, address) pud_offset(pgd, address)
#define stage2_pud_free(kvm, pud) do { } while (0)
#define stage2_pud_none(kvm, pud) pud_none(pud)
#define stage2_pud_clear(kvm, pud) pud_clear(pud)
#define stage2_pud_present(kvm, pud) pud_present(pud)
#define stage2_pud_populate(kvm, pud, pmd) pud_populate(NULL, pud, pmd)
#define stage2_pmd_offset(kvm, pud, address) pmd_offset(pud, address)
#define stage2_pmd_free(kvm, pmd) free_page((unsigned long)pmd)
#define stage2_pud_huge(kvm, pud) pud_huge(pud)
/* Open coded p*d_addr_end that can deal with 64bit addresses */
static inline phys_addr_t
stage2_pgd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
{
phys_addr_t boundary = (addr + PGDIR_SIZE) & PGDIR_MASK;
return (boundary - 1 < end - 1) ? boundary : end;
}
#define stage2_pud_addr_end(kvm, addr, end) (end)
static inline phys_addr_t
stage2_pmd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
{
phys_addr_t boundary = (addr + PMD_SIZE) & PMD_MASK;
return (boundary - 1 < end - 1) ? boundary : end;
}
#define stage2_pgd_index(kvm, addr) pgd_index(addr)
#define stage2_pte_table_empty(kvm, ptep) kvm_page_empty(ptep)
#define stage2_pmd_table_empty(kvm, pmdp) kvm_page_empty(pmdp)
#define stage2_pud_table_empty(kvm, pudp) false
static inline bool kvm_stage2_has_pud(struct kvm *kvm)
{
return false;
}
#define S2_PMD_MASK PMD_MASK
#define S2_PMD_SIZE PMD_SIZE
#define S2_PUD_MASK PUD_MASK
#define S2_PUD_SIZE PUD_SIZE
static inline bool kvm_stage2_has_pmd(struct kvm *kvm)
{
return true;
}
#endif /* __ARM_S2_PGTABLE_H_ */

Просмотреть файл

@ -67,11 +67,6 @@ static inline bool is_kernel_in_hyp_mode(void)
return false;
}
static inline bool has_vhe(void)
{
return false;
}
/* The section containing the hypervisor idmap text */
extern char __hyp_idmap_text_start[];
extern char __hyp_idmap_text_end[];

Просмотреть файл

@ -1,314 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __ARM_KVM_H__
#define __ARM_KVM_H__
#include <linux/types.h>
#include <linux/psci.h>
#include <asm/ptrace.h>
#define __KVM_HAVE_GUEST_DEBUG
#define __KVM_HAVE_IRQ_LINE
#define __KVM_HAVE_READONLY_MEM
#define __KVM_HAVE_VCPU_EVENTS
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
#define KVM_REG_SIZE(id) \
(1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
/* Valid for svc_regs, abt_regs, und_regs, irq_regs in struct kvm_regs */
#define KVM_ARM_SVC_sp svc_regs[0]
#define KVM_ARM_SVC_lr svc_regs[1]
#define KVM_ARM_SVC_spsr svc_regs[2]
#define KVM_ARM_ABT_sp abt_regs[0]
#define KVM_ARM_ABT_lr abt_regs[1]
#define KVM_ARM_ABT_spsr abt_regs[2]
#define KVM_ARM_UND_sp und_regs[0]
#define KVM_ARM_UND_lr und_regs[1]
#define KVM_ARM_UND_spsr und_regs[2]
#define KVM_ARM_IRQ_sp irq_regs[0]
#define KVM_ARM_IRQ_lr irq_regs[1]
#define KVM_ARM_IRQ_spsr irq_regs[2]
/* Valid only for fiq_regs in struct kvm_regs */
#define KVM_ARM_FIQ_r8 fiq_regs[0]
#define KVM_ARM_FIQ_r9 fiq_regs[1]
#define KVM_ARM_FIQ_r10 fiq_regs[2]
#define KVM_ARM_FIQ_fp fiq_regs[3]
#define KVM_ARM_FIQ_ip fiq_regs[4]
#define KVM_ARM_FIQ_sp fiq_regs[5]
#define KVM_ARM_FIQ_lr fiq_regs[6]
#define KVM_ARM_FIQ_spsr fiq_regs[7]
struct kvm_regs {
struct pt_regs usr_regs; /* R0_usr - R14_usr, PC, CPSR */
unsigned long svc_regs[3]; /* SP_svc, LR_svc, SPSR_svc */
unsigned long abt_regs[3]; /* SP_abt, LR_abt, SPSR_abt */
unsigned long und_regs[3]; /* SP_und, LR_und, SPSR_und */
unsigned long irq_regs[3]; /* SP_irq, LR_irq, SPSR_irq */
unsigned long fiq_regs[8]; /* R8_fiq - R14_fiq, SPSR_fiq */
};
/* Supported Processor Types */
#define KVM_ARM_TARGET_CORTEX_A15 0
#define KVM_ARM_TARGET_CORTEX_A7 1
#define KVM_ARM_NUM_TARGETS 2
/* KVM_ARM_SET_DEVICE_ADDR ioctl id encoding */
#define KVM_ARM_DEVICE_TYPE_SHIFT 0
#define KVM_ARM_DEVICE_TYPE_MASK (0xffff << KVM_ARM_DEVICE_TYPE_SHIFT)
#define KVM_ARM_DEVICE_ID_SHIFT 16
#define KVM_ARM_DEVICE_ID_MASK (0xffff << KVM_ARM_DEVICE_ID_SHIFT)
/* Supported device IDs */
#define KVM_ARM_DEVICE_VGIC_V2 0
/* Supported VGIC address types */
#define KVM_VGIC_V2_ADDR_TYPE_DIST 0
#define KVM_VGIC_V2_ADDR_TYPE_CPU 1
#define KVM_VGIC_V2_DIST_SIZE 0x1000
#define KVM_VGIC_V2_CPU_SIZE 0x2000
/* Supported VGICv3 address types */
#define KVM_VGIC_V3_ADDR_TYPE_DIST 2
#define KVM_VGIC_V3_ADDR_TYPE_REDIST 3
#define KVM_VGIC_ITS_ADDR_TYPE 4
#define KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION 5
#define KVM_VGIC_V3_DIST_SIZE SZ_64K
#define KVM_VGIC_V3_REDIST_SIZE (2 * SZ_64K)
#define KVM_VGIC_V3_ITS_SIZE (2 * SZ_64K)
#define KVM_ARM_VCPU_POWER_OFF 0 /* CPU is started in OFF state */
#define KVM_ARM_VCPU_PSCI_0_2 1 /* CPU uses PSCI v0.2 */
struct kvm_vcpu_init {
__u32 target;
__u32 features[7];
};
struct kvm_sregs {
};
struct kvm_fpu {
};
struct kvm_guest_debug_arch {
};
struct kvm_debug_exit_arch {
};
struct kvm_sync_regs {
/* Used with KVM_CAP_ARM_USER_IRQ */
__u64 device_irq_level;
};
struct kvm_arch_memory_slot {
};
/* for KVM_GET/SET_VCPU_EVENTS */
struct kvm_vcpu_events {
struct {
__u8 serror_pending;
__u8 serror_has_esr;
__u8 ext_dabt_pending;
/* Align it to 8 bytes */
__u8 pad[5];
__u64 serror_esr;
} exception;
__u32 reserved[12];
};
/* If you need to interpret the index values, here is the key: */
#define KVM_REG_ARM_COPROC_MASK 0x000000000FFF0000
#define KVM_REG_ARM_COPROC_SHIFT 16
#define KVM_REG_ARM_32_OPC2_MASK 0x0000000000000007
#define KVM_REG_ARM_32_OPC2_SHIFT 0
#define KVM_REG_ARM_OPC1_MASK 0x0000000000000078
#define KVM_REG_ARM_OPC1_SHIFT 3
#define KVM_REG_ARM_CRM_MASK 0x0000000000000780
#define KVM_REG_ARM_CRM_SHIFT 7
#define KVM_REG_ARM_32_CRN_MASK 0x0000000000007800
#define KVM_REG_ARM_32_CRN_SHIFT 11
/*
* For KVM currently all guest registers are nonsecure, but we reserve a bit
* in the encoding to distinguish secure from nonsecure for AArch32 system
* registers that are banked by security. This is 1 for the secure banked
* register, and 0 for the nonsecure banked register or if the register is
* not banked by security.
*/
#define KVM_REG_ARM_SECURE_MASK 0x0000000010000000
#define KVM_REG_ARM_SECURE_SHIFT 28
#define ARM_CP15_REG_SHIFT_MASK(x,n) \
(((x) << KVM_REG_ARM_ ## n ## _SHIFT) & KVM_REG_ARM_ ## n ## _MASK)
#define __ARM_CP15_REG(op1,crn,crm,op2) \
(KVM_REG_ARM | (15 << KVM_REG_ARM_COPROC_SHIFT) | \
ARM_CP15_REG_SHIFT_MASK(op1, OPC1) | \
ARM_CP15_REG_SHIFT_MASK(crn, 32_CRN) | \
ARM_CP15_REG_SHIFT_MASK(crm, CRM) | \
ARM_CP15_REG_SHIFT_MASK(op2, 32_OPC2))
#define ARM_CP15_REG32(...) (__ARM_CP15_REG(__VA_ARGS__) | KVM_REG_SIZE_U32)
#define __ARM_CP15_REG64(op1,crm) \
(__ARM_CP15_REG(op1, 0, crm, 0) | KVM_REG_SIZE_U64)
#define ARM_CP15_REG64(...) __ARM_CP15_REG64(__VA_ARGS__)
/* PL1 Physical Timer Registers */
#define KVM_REG_ARM_PTIMER_CTL ARM_CP15_REG32(0, 14, 2, 1)
#define KVM_REG_ARM_PTIMER_CNT ARM_CP15_REG64(0, 14)
#define KVM_REG_ARM_PTIMER_CVAL ARM_CP15_REG64(2, 14)
/* Virtual Timer Registers */
#define KVM_REG_ARM_TIMER_CTL ARM_CP15_REG32(0, 14, 3, 1)
#define KVM_REG_ARM_TIMER_CNT ARM_CP15_REG64(1, 14)
#define KVM_REG_ARM_TIMER_CVAL ARM_CP15_REG64(3, 14)
/* Normal registers are mapped as coprocessor 16. */
#define KVM_REG_ARM_CORE (0x0010 << KVM_REG_ARM_COPROC_SHIFT)
#define KVM_REG_ARM_CORE_REG(name) (offsetof(struct kvm_regs, name) / 4)
/* Some registers need more space to represent values. */
#define KVM_REG_ARM_DEMUX (0x0011 << KVM_REG_ARM_COPROC_SHIFT)
#define KVM_REG_ARM_DEMUX_ID_MASK 0x000000000000FF00
#define KVM_REG_ARM_DEMUX_ID_SHIFT 8
#define KVM_REG_ARM_DEMUX_ID_CCSIDR (0x00 << KVM_REG_ARM_DEMUX_ID_SHIFT)
#define KVM_REG_ARM_DEMUX_VAL_MASK 0x00000000000000FF
#define KVM_REG_ARM_DEMUX_VAL_SHIFT 0
/* VFP registers: we could overload CP10 like ARM does, but that's ugly. */
#define KVM_REG_ARM_VFP (0x0012 << KVM_REG_ARM_COPROC_SHIFT)
#define KVM_REG_ARM_VFP_MASK 0x000000000000FFFF
#define KVM_REG_ARM_VFP_BASE_REG 0x0
#define KVM_REG_ARM_VFP_FPSID 0x1000
#define KVM_REG_ARM_VFP_FPSCR 0x1001
#define KVM_REG_ARM_VFP_MVFR1 0x1006
#define KVM_REG_ARM_VFP_MVFR0 0x1007
#define KVM_REG_ARM_VFP_FPEXC 0x1008
#define KVM_REG_ARM_VFP_FPINST 0x1009
#define KVM_REG_ARM_VFP_FPINST2 0x100A
/* KVM-as-firmware specific pseudo-registers */
#define KVM_REG_ARM_FW (0x0014 << KVM_REG_ARM_COPROC_SHIFT)
#define KVM_REG_ARM_FW_REG(r) (KVM_REG_ARM | KVM_REG_SIZE_U64 | \
KVM_REG_ARM_FW | ((r) & 0xffff))
#define KVM_REG_ARM_PSCI_VERSION KVM_REG_ARM_FW_REG(0)
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1 KVM_REG_ARM_FW_REG(1)
/* Higher values mean better protection. */
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL 0
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL 1
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_REQUIRED 2
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2 KVM_REG_ARM_FW_REG(2)
/* Higher values mean better protection. */
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL 0
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN 1
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL 2
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED 3
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED (1U << 4)
/* Device Control API: ARM VGIC */
#define KVM_DEV_ARM_VGIC_GRP_ADDR 0
#define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1
#define KVM_DEV_ARM_VGIC_GRP_CPU_REGS 2
#define KVM_DEV_ARM_VGIC_CPUID_SHIFT 32
#define KVM_DEV_ARM_VGIC_CPUID_MASK (0xffULL << KVM_DEV_ARM_VGIC_CPUID_SHIFT)
#define KVM_DEV_ARM_VGIC_V3_MPIDR_SHIFT 32
#define KVM_DEV_ARM_VGIC_V3_MPIDR_MASK \
(0xffffffffULL << KVM_DEV_ARM_VGIC_V3_MPIDR_SHIFT)
#define KVM_DEV_ARM_VGIC_OFFSET_SHIFT 0
#define KVM_DEV_ARM_VGIC_OFFSET_MASK (0xffffffffULL << KVM_DEV_ARM_VGIC_OFFSET_SHIFT)
#define KVM_DEV_ARM_VGIC_SYSREG_INSTR_MASK (0xffff)
#define KVM_DEV_ARM_VGIC_GRP_NR_IRQS 3
#define KVM_DEV_ARM_VGIC_GRP_CTRL 4
#define KVM_DEV_ARM_VGIC_GRP_REDIST_REGS 5
#define KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS 6
#define KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO 7
#define KVM_DEV_ARM_VGIC_GRP_ITS_REGS 8
#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT 10
#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK \
(0x3fffffULL << KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT)
#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INTID_MASK 0x3ff
#define VGIC_LEVEL_INFO_LINE_LEVEL 0
/* Device Control API on vcpu fd */
#define KVM_ARM_VCPU_PMU_V3_CTRL 0
#define KVM_ARM_VCPU_PMU_V3_IRQ 0
#define KVM_ARM_VCPU_PMU_V3_INIT 1
#define KVM_ARM_VCPU_TIMER_CTRL 1
#define KVM_ARM_VCPU_TIMER_IRQ_VTIMER 0
#define KVM_ARM_VCPU_TIMER_IRQ_PTIMER 1
#define KVM_DEV_ARM_VGIC_CTRL_INIT 0
#define KVM_DEV_ARM_ITS_SAVE_TABLES 1
#define KVM_DEV_ARM_ITS_RESTORE_TABLES 2
#define KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3
#define KVM_DEV_ARM_ITS_CTRL_RESET 4
/* KVM_IRQ_LINE irq field index values */
#define KVM_ARM_IRQ_VCPU2_SHIFT 28
#define KVM_ARM_IRQ_VCPU2_MASK 0xf
#define KVM_ARM_IRQ_TYPE_SHIFT 24
#define KVM_ARM_IRQ_TYPE_MASK 0xf
#define KVM_ARM_IRQ_VCPU_SHIFT 16
#define KVM_ARM_IRQ_VCPU_MASK 0xff
#define KVM_ARM_IRQ_NUM_SHIFT 0
#define KVM_ARM_IRQ_NUM_MASK 0xffff
/* irq_type field */
#define KVM_ARM_IRQ_TYPE_CPU 0
#define KVM_ARM_IRQ_TYPE_SPI 1
#define KVM_ARM_IRQ_TYPE_PPI 2
/* out-of-kernel GIC cpu interrupt injection irq_number field */
#define KVM_ARM_IRQ_CPU_IRQ 0
#define KVM_ARM_IRQ_CPU_FIQ 1
/*
* This used to hold the highest supported SPI, but it is now obsolete
* and only here to provide source code level compatibility with older
* userland. The highest SPI number can be set via KVM_DEV_ARM_VGIC_GRP_NR_IRQS.
*/
#ifndef __KERNEL__
#define KVM_ARM_IRQ_GIC_MAX 127
#endif
/* One single KVM irqchip, ie. the VGIC */
#define KVM_NR_IRQCHIPS 1
/* PSCI interface */
#define KVM_PSCI_FN_BASE 0x95c1ba5e
#define KVM_PSCI_FN(n) (KVM_PSCI_FN_BASE + (n))
#define KVM_PSCI_FN_CPU_SUSPEND KVM_PSCI_FN(0)
#define KVM_PSCI_FN_CPU_OFF KVM_PSCI_FN(1)
#define KVM_PSCI_FN_CPU_ON KVM_PSCI_FN(2)
#define KVM_PSCI_FN_MIGRATE KVM_PSCI_FN(3)
#define KVM_PSCI_RET_SUCCESS PSCI_RET_SUCCESS
#define KVM_PSCI_RET_NI PSCI_RET_NOT_SUPPORTED
#define KVM_PSCI_RET_INVAL PSCI_RET_INVALID_PARAMS
#define KVM_PSCI_RET_DENIED PSCI_RET_DENIED
#endif /* __ARM_KVM_H__ */

Просмотреть файл

@ -11,9 +11,6 @@
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#ifdef CONFIG_KVM_ARM_HOST
#include <linux/kvm_host.h>
#endif
#include <asm/cacheflush.h>
#include <asm/glue-df.h>
#include <asm/glue-pf.h>
@ -167,14 +164,6 @@ int main(void)
DEFINE(CACHE_WRITEBACK_ORDER, __CACHE_WRITEBACK_ORDER);
DEFINE(CACHE_WRITEBACK_GRANULE, __CACHE_WRITEBACK_GRANULE);
BLANK();
#ifdef CONFIG_KVM_ARM_HOST
DEFINE(VCPU_GUEST_CTXT, offsetof(struct kvm_vcpu, arch.ctxt));
DEFINE(VCPU_HOST_CTXT, offsetof(struct kvm_vcpu, arch.host_cpu_context));
DEFINE(CPU_CTXT_VFP, offsetof(struct kvm_cpu_context, vfp));
DEFINE(CPU_CTXT_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs));
DEFINE(GP_REGS_USR, offsetof(struct kvm_regs, usr_regs));
#endif
BLANK();
#ifdef CONFIG_VDSO
DEFINE(VDSO_DATA_SIZE, sizeof(union vdso_data_store));
#endif

Просмотреть файл

@ -1,59 +0,0 @@
# SPDX-License-Identifier: GPL-2.0
#
# KVM configuration
#
source "virt/kvm/Kconfig"
source "virt/lib/Kconfig"
menuconfig VIRTUALIZATION
bool "Virtualization"
---help---
Say Y here to get to see options for using your Linux host to run
other operating systems inside virtual machines (guests).
This option alone does not add any kernel code.
If you say N, all options in this submenu will be skipped and
disabled.
if VIRTUALIZATION
config KVM
bool "Kernel-based Virtual Machine (KVM) support"
depends on MMU && OF
select PREEMPT_NOTIFIERS
select ARM_GIC
select ARM_GIC_V3
select ARM_GIC_V3_ITS
select HAVE_KVM_CPU_RELAX_INTERCEPT
select HAVE_KVM_ARCH_TLB_FLUSH_ALL
select KVM_MMIO
select KVM_ARM_HOST
select KVM_GENERIC_DIRTYLOG_READ_PROTECT
select SRCU
select MMU_NOTIFIER
select KVM_VFIO
select HAVE_KVM_EVENTFD
select HAVE_KVM_IRQFD
select HAVE_KVM_IRQCHIP
select HAVE_KVM_IRQ_ROUTING
select HAVE_KVM_MSI
select IRQ_BYPASS_MANAGER
select HAVE_KVM_IRQ_BYPASS
depends on ARM_VIRT_EXT && ARM_LPAE && ARM_ARCH_TIMER
---help---
Support hosting virtualized guest machines.
This module provides access to the hardware capabilities through
a character device node named /dev/kvm.
If unsure, say N.
config KVM_ARM_HOST
bool
---help---
Provides host support for ARM processors.
source "drivers/vhost/Kconfig"
endif # VIRTUALIZATION

Просмотреть файл

@ -1,43 +0,0 @@
# SPDX-License-Identifier: GPL-2.0
#
# Makefile for Kernel-based Virtual Machine module
#
plus_virt := $(call as-instr,.arch_extension virt,+virt)
ifeq ($(plus_virt),+virt)
plus_virt_def := -DREQUIRES_VIRT=1
endif
KVM := ../../../virt/kvm
ccflags-y += -I $(srctree)/$(src) -I $(srctree)/virt/kvm/arm/vgic
CFLAGS_$(KVM)/arm/arm.o := $(plus_virt_def)
AFLAGS_init.o := -Wa,-march=armv7-a$(plus_virt)
AFLAGS_interrupts.o := -Wa,-march=armv7-a$(plus_virt)
kvm-arm-y = $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o $(KVM)/vfio.o
obj-$(CONFIG_KVM_ARM_HOST) += hyp/
obj-y += kvm-arm.o init.o interrupts.o
obj-y += handle_exit.o guest.o emulate.o reset.o
obj-y += coproc.o coproc_a15.o coproc_a7.o vgic-v3-coproc.o
obj-y += $(KVM)/arm/arm.o $(KVM)/arm/mmu.o $(KVM)/arm/mmio.o
obj-y += $(KVM)/arm/psci.o $(KVM)/arm/perf.o $(KVM)/arm/hypercalls.o
obj-y += $(KVM)/arm/aarch32.o
obj-y += $(KVM)/arm/vgic/vgic.o
obj-y += $(KVM)/arm/vgic/vgic-init.o
obj-y += $(KVM)/arm/vgic/vgic-irqfd.o
obj-y += $(KVM)/arm/vgic/vgic-v2.o
obj-y += $(KVM)/arm/vgic/vgic-v3.o
obj-y += $(KVM)/arm/vgic/vgic-v4.o
obj-y += $(KVM)/arm/vgic/vgic-mmio.o
obj-y += $(KVM)/arm/vgic/vgic-mmio-v2.o
obj-y += $(KVM)/arm/vgic/vgic-mmio-v3.o
obj-y += $(KVM)/arm/vgic/vgic-kvm-device.o
obj-y += $(KVM)/arm/vgic/vgic-its.o
obj-y += $(KVM)/arm/vgic/vgic-debug.o
obj-y += $(KVM)/irqchip.o
obj-y += $(KVM)/arm/arch_timer.o

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,130 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Authors: Christoffer Dall <c.dall@virtualopensystems.com>
*/
#ifndef __ARM_KVM_COPROC_LOCAL_H__
#define __ARM_KVM_COPROC_LOCAL_H__
struct coproc_params {
unsigned long CRn;
unsigned long CRm;
unsigned long Op1;
unsigned long Op2;
unsigned long Rt1;
unsigned long Rt2;
bool is_64bit;
bool is_write;
};
struct coproc_reg {
/* MRC/MCR/MRRC/MCRR instruction which accesses it. */
unsigned long CRn;
unsigned long CRm;
unsigned long Op1;
unsigned long Op2;
bool is_64bit;
/* Trapped access from guest, if non-NULL. */
bool (*access)(struct kvm_vcpu *,
const struct coproc_params *,
const struct coproc_reg *);
/* Initialization for vcpu. */
void (*reset)(struct kvm_vcpu *, const struct coproc_reg *);
/* Index into vcpu_cp15(vcpu, ...), or 0 if we don't need to save it. */
unsigned long reg;
/* Value (usually reset value) */
u64 val;
};
static inline void print_cp_instr(const struct coproc_params *p)
{
/* Look, we even formatted it for you to paste into the table! */
if (p->is_64bit) {
kvm_pr_unimpl(" { CRm64(%2lu), Op1(%2lu), is64, func_%s },\n",
p->CRn, p->Op1, p->is_write ? "write" : "read");
} else {
kvm_pr_unimpl(" { CRn(%2lu), CRm(%2lu), Op1(%2lu), Op2(%2lu), is32,"
" func_%s },\n",
p->CRn, p->CRm, p->Op1, p->Op2,
p->is_write ? "write" : "read");
}
}
static inline bool ignore_write(struct kvm_vcpu *vcpu,
const struct coproc_params *p)
{
return true;
}
static inline bool read_zero(struct kvm_vcpu *vcpu,
const struct coproc_params *p)
{
*vcpu_reg(vcpu, p->Rt1) = 0;
return true;
}
/* Reset functions */
static inline void reset_unknown(struct kvm_vcpu *vcpu,
const struct coproc_reg *r)
{
BUG_ON(!r->reg);
BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.ctxt.cp15));
vcpu_cp15(vcpu, r->reg) = 0xdecafbad;
}
static inline void reset_val(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
{
BUG_ON(!r->reg);
BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.ctxt.cp15));
vcpu_cp15(vcpu, r->reg) = r->val;
}
static inline void reset_unknown64(struct kvm_vcpu *vcpu,
const struct coproc_reg *r)
{
BUG_ON(!r->reg);
BUG_ON(r->reg + 1 >= ARRAY_SIZE(vcpu->arch.ctxt.cp15));
vcpu_cp15(vcpu, r->reg) = 0xdecafbad;
vcpu_cp15(vcpu, r->reg+1) = 0xd0c0ffee;
}
static inline int cmp_reg(const struct coproc_reg *i1,
const struct coproc_reg *i2)
{
BUG_ON(i1 == i2);
if (!i1)
return 1;
else if (!i2)
return -1;
if (i1->CRn != i2->CRn)
return i1->CRn - i2->CRn;
if (i1->CRm != i2->CRm)
return i1->CRm - i2->CRm;
if (i1->Op1 != i2->Op1)
return i1->Op1 - i2->Op1;
if (i1->Op2 != i2->Op2)
return i1->Op2 - i2->Op2;
return i2->is_64bit - i1->is_64bit;
}
#define CRn(_x) .CRn = _x
#define CRm(_x) .CRm = _x
#define CRm64(_x) .CRn = _x, .CRm = 0
#define Op1(_x) .Op1 = _x
#define Op2(_x) .Op2 = _x
#define is64 .is_64bit = true
#define is32 .is_64bit = false
bool access_vm_reg(struct kvm_vcpu *vcpu,
const struct coproc_params *p,
const struct coproc_reg *r);
#endif /* __ARM_KVM_COPROC_LOCAL_H__ */

Просмотреть файл

@ -1,39 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Authors: Rusty Russell <rusty@rustcorp.au>
* Christoffer Dall <c.dall@virtualopensystems.com>
*/
#include <linux/kvm_host.h>
#include <asm/kvm_coproc.h>
#include <asm/kvm_emulate.h>
#include <linux/init.h>
#include "coproc.h"
/*
* A15-specific CP15 registers.
* CRn denotes the primary register number, but is copied to the CRm in the
* user space API for 64-bit register access in line with the terminology used
* in the ARM ARM.
* Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit
* registers preceding 32-bit ones.
*/
static const struct coproc_reg a15_regs[] = {
/* SCTLR: swapped by interrupt.S. */
{ CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32,
access_vm_reg, reset_val, c1_SCTLR, 0x00C50078 },
};
static struct kvm_coproc_target_table a15_target_table = {
.target = KVM_ARM_TARGET_CORTEX_A15,
.table = a15_regs,
.num = ARRAY_SIZE(a15_regs),
};
static int __init coproc_a15_init(void)
{
kvm_register_target_coproc_table(&a15_target_table);
return 0;
}
late_initcall(coproc_a15_init);

Просмотреть файл

@ -1,42 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Copyright (C) 2013 - ARM Ltd
*
* Authors: Rusty Russell <rusty@rustcorp.au>
* Christoffer Dall <c.dall@virtualopensystems.com>
* Jonathan Austin <jonathan.austin@arm.com>
*/
#include <linux/kvm_host.h>
#include <asm/kvm_coproc.h>
#include <asm/kvm_emulate.h>
#include <linux/init.h>
#include "coproc.h"
/*
* Cortex-A7 specific CP15 registers.
* CRn denotes the primary register number, but is copied to the CRm in the
* user space API for 64-bit register access in line with the terminology used
* in the ARM ARM.
* Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit
* registers preceding 32-bit ones.
*/
static const struct coproc_reg a7_regs[] = {
/* SCTLR: swapped by interrupt.S. */
{ CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32,
access_vm_reg, reset_val, c1_SCTLR, 0x00C50878 },
};
static struct kvm_coproc_target_table a7_target_table = {
.target = KVM_ARM_TARGET_CORTEX_A7,
.table = a7_regs,
.num = ARRAY_SIZE(a7_regs),
};
static int __init coproc_a7_init(void)
{
kvm_register_target_coproc_table(&a7_target_table);
return 0;
}
late_initcall(coproc_a7_init);

Просмотреть файл

@ -1,166 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
*/
#include <linux/mm.h>
#include <linux/kvm_host.h>
#include <asm/kvm_arm.h>
#include <asm/kvm_emulate.h>
#include <asm/opcodes.h>
#include <trace/events/kvm.h>
#include "trace.h"
#define VCPU_NR_MODES 6
#define VCPU_REG_OFFSET_USR 0
#define VCPU_REG_OFFSET_FIQ 1
#define VCPU_REG_OFFSET_IRQ 2
#define VCPU_REG_OFFSET_SVC 3
#define VCPU_REG_OFFSET_ABT 4
#define VCPU_REG_OFFSET_UND 5
#define REG_OFFSET(_reg) \
(offsetof(struct kvm_regs, _reg) / sizeof(u32))
#define USR_REG_OFFSET(_num) REG_OFFSET(usr_regs.uregs[_num])
static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][15] = {
/* USR/SYS Registers */
[VCPU_REG_OFFSET_USR] = {
USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
USR_REG_OFFSET(12), USR_REG_OFFSET(13), USR_REG_OFFSET(14),
},
/* FIQ Registers */
[VCPU_REG_OFFSET_FIQ] = {
USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
USR_REG_OFFSET(6), USR_REG_OFFSET(7),
REG_OFFSET(fiq_regs[0]), /* r8 */
REG_OFFSET(fiq_regs[1]), /* r9 */
REG_OFFSET(fiq_regs[2]), /* r10 */
REG_OFFSET(fiq_regs[3]), /* r11 */
REG_OFFSET(fiq_regs[4]), /* r12 */
REG_OFFSET(fiq_regs[5]), /* r13 */
REG_OFFSET(fiq_regs[6]), /* r14 */
},
/* IRQ Registers */
[VCPU_REG_OFFSET_IRQ] = {
USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
USR_REG_OFFSET(12),
REG_OFFSET(irq_regs[0]), /* r13 */
REG_OFFSET(irq_regs[1]), /* r14 */
},
/* SVC Registers */
[VCPU_REG_OFFSET_SVC] = {
USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
USR_REG_OFFSET(12),
REG_OFFSET(svc_regs[0]), /* r13 */
REG_OFFSET(svc_regs[1]), /* r14 */
},
/* ABT Registers */
[VCPU_REG_OFFSET_ABT] = {
USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
USR_REG_OFFSET(12),
REG_OFFSET(abt_regs[0]), /* r13 */
REG_OFFSET(abt_regs[1]), /* r14 */
},
/* UND Registers */
[VCPU_REG_OFFSET_UND] = {
USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
USR_REG_OFFSET(12),
REG_OFFSET(und_regs[0]), /* r13 */
REG_OFFSET(und_regs[1]), /* r14 */
},
};
/*
* Return a pointer to the register number valid in the current mode of
* the virtual CPU.
*/
unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num)
{
unsigned long *reg_array = (unsigned long *)&vcpu->arch.ctxt.gp_regs;
unsigned long mode = *vcpu_cpsr(vcpu) & MODE_MASK;
switch (mode) {
case USR_MODE...SVC_MODE:
mode &= ~MODE32_BIT; /* 0 ... 3 */
break;
case ABT_MODE:
mode = VCPU_REG_OFFSET_ABT;
break;
case UND_MODE:
mode = VCPU_REG_OFFSET_UND;
break;
case SYSTEM_MODE:
mode = VCPU_REG_OFFSET_USR;
break;
default:
BUG();
}
return reg_array + vcpu_reg_offsets[mode][reg_num];
}
/*
* Return the SPSR for the current mode of the virtual CPU.
*/
unsigned long *__vcpu_spsr(struct kvm_vcpu *vcpu)
{
unsigned long mode = *vcpu_cpsr(vcpu) & MODE_MASK;
switch (mode) {
case SVC_MODE:
return &vcpu->arch.ctxt.gp_regs.KVM_ARM_SVC_spsr;
case ABT_MODE:
return &vcpu->arch.ctxt.gp_regs.KVM_ARM_ABT_spsr;
case UND_MODE:
return &vcpu->arch.ctxt.gp_regs.KVM_ARM_UND_spsr;
case IRQ_MODE:
return &vcpu->arch.ctxt.gp_regs.KVM_ARM_IRQ_spsr;
case FIQ_MODE:
return &vcpu->arch.ctxt.gp_regs.KVM_ARM_FIQ_spsr;
default:
BUG();
}
}
/******************************************************************************
* Inject exceptions into the guest
*/
/**
* kvm_inject_vabt - inject an async abort / SError into the guest
* @vcpu: The VCPU to receive the exception
*
* It is assumed that this code is called from the VCPU thread and that the
* VCPU therefore is not currently executing guest code.
*/
void kvm_inject_vabt(struct kvm_vcpu *vcpu)
{
*vcpu_hcr(vcpu) |= HCR_VA;
}

Просмотреть файл

@ -1,387 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
*/
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/kvm_host.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <kvm/arm_psci.h>
#include <asm/cputype.h>
#include <linux/uaccess.h>
#include <asm/kvm.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_coproc.h>
#define VM_STAT(x) { #x, offsetof(struct kvm, stat.x), KVM_STAT_VM }
#define VCPU_STAT(x) { #x, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU }
struct kvm_stats_debugfs_item debugfs_entries[] = {
VCPU_STAT(halt_successful_poll),
VCPU_STAT(halt_attempted_poll),
VCPU_STAT(halt_poll_invalid),
VCPU_STAT(halt_wakeup),
VCPU_STAT(hvc_exit_stat),
VCPU_STAT(wfe_exit_stat),
VCPU_STAT(wfi_exit_stat),
VCPU_STAT(mmio_exit_user),
VCPU_STAT(mmio_exit_kernel),
VCPU_STAT(exits),
{ NULL }
};
static u64 core_reg_offset_from_id(u64 id)
{
return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
}
static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{
u32 __user *uaddr = (u32 __user *)(long)reg->addr;
struct kvm_regs *regs = &vcpu->arch.ctxt.gp_regs;
u64 off;
if (KVM_REG_SIZE(reg->id) != 4)
return -ENOENT;
/* Our ID is an index into the kvm_regs struct. */
off = core_reg_offset_from_id(reg->id);
if (off >= sizeof(*regs) / KVM_REG_SIZE(reg->id))
return -ENOENT;
return put_user(((u32 *)regs)[off], uaddr);
}
static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{
u32 __user *uaddr = (u32 __user *)(long)reg->addr;
struct kvm_regs *regs = &vcpu->arch.ctxt.gp_regs;
u64 off, val;
if (KVM_REG_SIZE(reg->id) != 4)
return -ENOENT;
/* Our ID is an index into the kvm_regs struct. */
off = core_reg_offset_from_id(reg->id);
if (off >= sizeof(*regs) / KVM_REG_SIZE(reg->id))
return -ENOENT;
if (get_user(val, uaddr) != 0)
return -EFAULT;
if (off == KVM_REG_ARM_CORE_REG(usr_regs.ARM_cpsr)) {
unsigned long mode = val & MODE_MASK;
switch (mode) {
case USR_MODE:
case FIQ_MODE:
case IRQ_MODE:
case SVC_MODE:
case ABT_MODE:
case UND_MODE:
break;
default:
return -EINVAL;
}
}
((u32 *)regs)[off] = val;
return 0;
}
int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
return -EINVAL;
}
int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
return -EINVAL;
}
#define NUM_TIMER_REGS 3
static bool is_timer_reg(u64 index)
{
switch (index) {
case KVM_REG_ARM_TIMER_CTL:
case KVM_REG_ARM_TIMER_CNT:
case KVM_REG_ARM_TIMER_CVAL:
return true;
}
return false;
}
static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
{
if (put_user(KVM_REG_ARM_TIMER_CTL, uindices))
return -EFAULT;
uindices++;
if (put_user(KVM_REG_ARM_TIMER_CNT, uindices))
return -EFAULT;
uindices++;
if (put_user(KVM_REG_ARM_TIMER_CVAL, uindices))
return -EFAULT;
return 0;
}
static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{
void __user *uaddr = (void __user *)(long)reg->addr;
u64 val;
int ret;
ret = copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id));
if (ret != 0)
return -EFAULT;
return kvm_arm_timer_set_reg(vcpu, reg->id, val);
}
static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{
void __user *uaddr = (void __user *)(long)reg->addr;
u64 val;
val = kvm_arm_timer_get_reg(vcpu, reg->id);
return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0;
}
static unsigned long num_core_regs(void)
{
return sizeof(struct kvm_regs) / sizeof(u32);
}
/**
* kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG
*
* This is for all registers.
*/
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
{
return num_core_regs() + kvm_arm_num_coproc_regs(vcpu)
+ kvm_arm_get_fw_num_regs(vcpu)
+ NUM_TIMER_REGS;
}
/**
* kvm_arm_copy_reg_indices - get indices of all registers.
*
* We do core registers right here, then we append coproc regs.
*/
int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
{
unsigned int i;
const u64 core_reg = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_CORE;
int ret;
for (i = 0; i < sizeof(struct kvm_regs)/sizeof(u32); i++) {
if (put_user(core_reg | i, uindices))
return -EFAULT;
uindices++;
}
ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices);
if (ret)
return ret;
uindices += kvm_arm_get_fw_num_regs(vcpu);
ret = copy_timer_indices(vcpu, uindices);
if (ret)
return ret;
uindices += NUM_TIMER_REGS;
return kvm_arm_copy_coproc_indices(vcpu, uindices);
}
int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{
/* We currently use nothing arch-specific in upper 32 bits */
if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM >> 32)
return -EINVAL;
/* Register group 16 means we want a core register. */
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
return get_core_reg(vcpu, reg);
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
return kvm_arm_get_fw_reg(vcpu, reg);
if (is_timer_reg(reg->id))
return get_timer_reg(vcpu, reg);
return kvm_arm_coproc_get_reg(vcpu, reg);
}
int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{
/* We currently use nothing arch-specific in upper 32 bits */
if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM >> 32)
return -EINVAL;
/* Register group 16 means we set a core register. */
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
return set_core_reg(vcpu, reg);
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
return kvm_arm_set_fw_reg(vcpu, reg);
if (is_timer_reg(reg->id))
return set_timer_reg(vcpu, reg);
return kvm_arm_coproc_set_reg(vcpu, reg);
}
int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
struct kvm_sregs *sregs)
{
return -EINVAL;
}
int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
struct kvm_sregs *sregs)
{
return -EINVAL;
}
int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
struct kvm_vcpu_events *events)
{
events->exception.serror_pending = !!(*vcpu_hcr(vcpu) & HCR_VA);
/*
* We never return a pending ext_dabt here because we deliver it to
* the virtual CPU directly when setting the event and it's no longer
* 'pending' at this point.
*/
return 0;
}
int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
struct kvm_vcpu_events *events)
{
bool serror_pending = events->exception.serror_pending;
bool has_esr = events->exception.serror_has_esr;
bool ext_dabt_pending = events->exception.ext_dabt_pending;
if (serror_pending && has_esr)
return -EINVAL;
else if (serror_pending)
kvm_inject_vabt(vcpu);
if (ext_dabt_pending)
kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
return 0;
}
int __attribute_const__ kvm_target_cpu(void)
{
switch (read_cpuid_part()) {
case ARM_CPU_PART_CORTEX_A7:
return KVM_ARM_TARGET_CORTEX_A7;
case ARM_CPU_PART_CORTEX_A15:
return KVM_ARM_TARGET_CORTEX_A15;
default:
return -EINVAL;
}
}
int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init)
{
int target = kvm_target_cpu();
if (target < 0)
return -ENODEV;
memset(init, 0, sizeof(*init));
/*
* For now, we don't return any features.
* In future, we might use features to return target
* specific features available for the preferred
* target type.
*/
init->target = (__u32)target;
return 0;
}
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
return -EINVAL;
}
int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
return -EINVAL;
}
int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
struct kvm_translation *tr)
{
return -EINVAL;
}
int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
struct kvm_guest_debug *dbg)
{
return -EINVAL;
}
int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr)
{
int ret;
switch (attr->group) {
case KVM_ARM_VCPU_TIMER_CTRL:
ret = kvm_arm_timer_set_attr(vcpu, attr);
break;
default:
ret = -ENXIO;
break;
}
return ret;
}
int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr)
{
int ret;
switch (attr->group) {
case KVM_ARM_VCPU_TIMER_CTRL:
ret = kvm_arm_timer_get_attr(vcpu, attr);
break;
default:
ret = -ENXIO;
break;
}
return ret;
}
int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr)
{
int ret;
switch (attr->group) {
case KVM_ARM_VCPU_TIMER_CTRL:
ret = kvm_arm_timer_has_attr(vcpu, attr);
break;
default:
ret = -ENXIO;
break;
}
return ret;
}

Просмотреть файл

@ -1,175 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
*/
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_coproc.h>
#include <asm/kvm_mmu.h>
#include <kvm/arm_hypercalls.h>
#include <trace/events/kvm.h>
#include "trace.h"
typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *);
static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
int ret;
trace_kvm_hvc(*vcpu_pc(vcpu), *vcpu_reg(vcpu, 0),
kvm_vcpu_hvc_get_imm(vcpu));
vcpu->stat.hvc_exit_stat++;
ret = kvm_hvc_call_handler(vcpu);
if (ret < 0) {
vcpu_set_reg(vcpu, 0, ~0UL);
return 1;
}
return ret;
}
static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
/*
* "If an SMC instruction executed at Non-secure EL1 is
* trapped to EL2 because HCR_EL2.TSC is 1, the exception is a
* Trap exception, not a Secure Monitor Call exception [...]"
*
* We need to advance the PC after the trap, as it would
* otherwise return to the same address...
*/
vcpu_set_reg(vcpu, 0, ~0UL);
kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
return 1;
}
/**
* kvm_handle_wfx - handle a WFI or WFE instructions trapped in guests
* @vcpu: the vcpu pointer
* @run: the kvm_run structure pointer
*
* WFE: Yield the CPU and come back to this vcpu when the scheduler
* decides to.
* WFI: Simply call kvm_vcpu_block(), which will halt execution of
* world-switches and schedule other host processes until there is an
* incoming IRQ or FIQ to the VM.
*/
static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
if (kvm_vcpu_get_hsr(vcpu) & HSR_WFI_IS_WFE) {
trace_kvm_wfx(*vcpu_pc(vcpu), true);
vcpu->stat.wfe_exit_stat++;
kvm_vcpu_on_spin(vcpu, vcpu_mode_priv(vcpu));
} else {
trace_kvm_wfx(*vcpu_pc(vcpu), false);
vcpu->stat.wfi_exit_stat++;
kvm_vcpu_block(vcpu);
kvm_clear_request(KVM_REQ_UNHALT, vcpu);
}
kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
return 1;
}
static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
u32 hsr = kvm_vcpu_get_hsr(vcpu);
kvm_pr_unimpl("Unknown exception class: hsr: %#08x\n",
hsr);
kvm_inject_undefined(vcpu);
return 1;
}
static exit_handle_fn arm_exit_handlers[] = {
[0 ... HSR_EC_MAX] = kvm_handle_unknown_ec,
[HSR_EC_WFI] = kvm_handle_wfx,
[HSR_EC_CP15_32] = kvm_handle_cp15_32,
[HSR_EC_CP15_64] = kvm_handle_cp15_64,
[HSR_EC_CP14_MR] = kvm_handle_cp14_32,
[HSR_EC_CP14_LS] = kvm_handle_cp14_load_store,
[HSR_EC_CP14_64] = kvm_handle_cp14_64,
[HSR_EC_CP_0_13] = kvm_handle_cp_0_13_access,
[HSR_EC_CP10_ID] = kvm_handle_cp10_id,
[HSR_EC_HVC] = handle_hvc,
[HSR_EC_SMC] = handle_smc,
[HSR_EC_IABT] = kvm_handle_guest_abort,
[HSR_EC_DABT] = kvm_handle_guest_abort,
};
static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
{
u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
return arm_exit_handlers[hsr_ec];
}
/*
* Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
* proper exit to userspace.
*/
int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
int exception_index)
{
exit_handle_fn exit_handler;
if (ARM_ABORT_PENDING(exception_index)) {
u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
/*
* HVC/SMC already have an adjusted PC, which we need
* to correct in order to return to after having
* injected the abort.
*/
if (hsr_ec == HSR_EC_HVC || hsr_ec == HSR_EC_SMC) {
u32 adj = kvm_vcpu_trap_il_is32bit(vcpu) ? 4 : 2;
*vcpu_pc(vcpu) -= adj;
}
kvm_inject_vabt(vcpu);
return 1;
}
exception_index = ARM_EXCEPTION_CODE(exception_index);
switch (exception_index) {
case ARM_EXCEPTION_IRQ:
return 1;
case ARM_EXCEPTION_HVC:
/*
* See ARM ARM B1.14.1: "Hyp traps on instructions
* that fail their condition code check"
*/
if (!kvm_condition_valid(vcpu)) {
kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
return 1;
}
exit_handler = kvm_get_exit_handler(vcpu);
return exit_handler(vcpu, run);
case ARM_EXCEPTION_DATA_ABORT:
kvm_inject_vabt(vcpu);
return 1;
case ARM_EXCEPTION_HYP_GONE:
/*
* HYP has been reset to the hyp-stub. This happens
* when a guest is pre-empted by kvm_reboot()'s
* shutdown call.
*/
run->exit_reason = KVM_EXIT_FAIL_ENTRY;
return 0;
default:
kvm_pr_unimpl("Unsupported exception type: %d",
exception_index);
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return 0;
}
}

Просмотреть файл

@ -1,34 +0,0 @@
# SPDX-License-Identifier: GPL-2.0
#
# Makefile for Kernel-based Virtual Machine module, HYP part
#
ccflags-y += -fno-stack-protector -DDISABLE_BRANCH_PROFILING
KVM=../../../../virt/kvm
CFLAGS_ARMV7VE :=$(call cc-option, -march=armv7ve)
obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v3-sr.o
obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/timer-sr.o
obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/aarch32.o
obj-$(CONFIG_KVM_ARM_HOST) += tlb.o
obj-$(CONFIG_KVM_ARM_HOST) += cp15-sr.o
obj-$(CONFIG_KVM_ARM_HOST) += vfp.o
obj-$(CONFIG_KVM_ARM_HOST) += banked-sr.o
CFLAGS_banked-sr.o += $(CFLAGS_ARMV7VE)
obj-$(CONFIG_KVM_ARM_HOST) += entry.o
obj-$(CONFIG_KVM_ARM_HOST) += hyp-entry.o
obj-$(CONFIG_KVM_ARM_HOST) += switch.o
CFLAGS_switch.o += $(CFLAGS_ARMV7VE)
obj-$(CONFIG_KVM_ARM_HOST) += s2-setup.o
# KVM code is run at a different exception code with a different map, so
# compiler instrumentation that inserts callbacks or checks into the code may
# cause crashes. Just disable it.
GCOV_PROFILE := n
KASAN_SANITIZE := n
UBSAN_SANITIZE := n
KCOV_INSTRUMENT := n

Просмотреть файл

@ -1,70 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Original code:
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
*
* Mostly rewritten in C by Marc Zyngier <marc.zyngier@arm.com>
*/
#include <asm/kvm_hyp.h>
/*
* gcc before 4.9 doesn't understand -march=armv7ve, so we have to
* trick the assembler.
*/
__asm__(".arch_extension virt");
void __hyp_text __banked_save_state(struct kvm_cpu_context *ctxt)
{
ctxt->gp_regs.usr_regs.ARM_sp = read_special(SP_usr);
ctxt->gp_regs.usr_regs.ARM_pc = read_special(ELR_hyp);
ctxt->gp_regs.usr_regs.ARM_cpsr = read_special(SPSR);
ctxt->gp_regs.KVM_ARM_SVC_sp = read_special(SP_svc);
ctxt->gp_regs.KVM_ARM_SVC_lr = read_special(LR_svc);
ctxt->gp_regs.KVM_ARM_SVC_spsr = read_special(SPSR_svc);
ctxt->gp_regs.KVM_ARM_ABT_sp = read_special(SP_abt);
ctxt->gp_regs.KVM_ARM_ABT_lr = read_special(LR_abt);
ctxt->gp_regs.KVM_ARM_ABT_spsr = read_special(SPSR_abt);
ctxt->gp_regs.KVM_ARM_UND_sp = read_special(SP_und);
ctxt->gp_regs.KVM_ARM_UND_lr = read_special(LR_und);
ctxt->gp_regs.KVM_ARM_UND_spsr = read_special(SPSR_und);
ctxt->gp_regs.KVM_ARM_IRQ_sp = read_special(SP_irq);
ctxt->gp_regs.KVM_ARM_IRQ_lr = read_special(LR_irq);
ctxt->gp_regs.KVM_ARM_IRQ_spsr = read_special(SPSR_irq);
ctxt->gp_regs.KVM_ARM_FIQ_r8 = read_special(R8_fiq);
ctxt->gp_regs.KVM_ARM_FIQ_r9 = read_special(R9_fiq);
ctxt->gp_regs.KVM_ARM_FIQ_r10 = read_special(R10_fiq);
ctxt->gp_regs.KVM_ARM_FIQ_fp = read_special(R11_fiq);
ctxt->gp_regs.KVM_ARM_FIQ_ip = read_special(R12_fiq);
ctxt->gp_regs.KVM_ARM_FIQ_sp = read_special(SP_fiq);
ctxt->gp_regs.KVM_ARM_FIQ_lr = read_special(LR_fiq);
ctxt->gp_regs.KVM_ARM_FIQ_spsr = read_special(SPSR_fiq);
}
void __hyp_text __banked_restore_state(struct kvm_cpu_context *ctxt)
{
write_special(ctxt->gp_regs.usr_regs.ARM_sp, SP_usr);
write_special(ctxt->gp_regs.usr_regs.ARM_pc, ELR_hyp);
write_special(ctxt->gp_regs.usr_regs.ARM_cpsr, SPSR_cxsf);
write_special(ctxt->gp_regs.KVM_ARM_SVC_sp, SP_svc);
write_special(ctxt->gp_regs.KVM_ARM_SVC_lr, LR_svc);
write_special(ctxt->gp_regs.KVM_ARM_SVC_spsr, SPSR_svc);
write_special(ctxt->gp_regs.KVM_ARM_ABT_sp, SP_abt);
write_special(ctxt->gp_regs.KVM_ARM_ABT_lr, LR_abt);
write_special(ctxt->gp_regs.KVM_ARM_ABT_spsr, SPSR_abt);
write_special(ctxt->gp_regs.KVM_ARM_UND_sp, SP_und);
write_special(ctxt->gp_regs.KVM_ARM_UND_lr, LR_und);
write_special(ctxt->gp_regs.KVM_ARM_UND_spsr, SPSR_und);
write_special(ctxt->gp_regs.KVM_ARM_IRQ_sp, SP_irq);
write_special(ctxt->gp_regs.KVM_ARM_IRQ_lr, LR_irq);
write_special(ctxt->gp_regs.KVM_ARM_IRQ_spsr, SPSR_irq);
write_special(ctxt->gp_regs.KVM_ARM_FIQ_r8, R8_fiq);
write_special(ctxt->gp_regs.KVM_ARM_FIQ_r9, R9_fiq);
write_special(ctxt->gp_regs.KVM_ARM_FIQ_r10, R10_fiq);
write_special(ctxt->gp_regs.KVM_ARM_FIQ_fp, R11_fiq);
write_special(ctxt->gp_regs.KVM_ARM_FIQ_ip, R12_fiq);
write_special(ctxt->gp_regs.KVM_ARM_FIQ_sp, SP_fiq);
write_special(ctxt->gp_regs.KVM_ARM_FIQ_lr, LR_fiq);
write_special(ctxt->gp_regs.KVM_ARM_FIQ_spsr, SPSR_fiq);
}

Просмотреть файл

@ -1,72 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Original code:
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
*
* Mostly rewritten in C by Marc Zyngier <marc.zyngier@arm.com>
*/
#include <asm/kvm_hyp.h>
static u64 *cp15_64(struct kvm_cpu_context *ctxt, int idx)
{
return (u64 *)(ctxt->cp15 + idx);
}
void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt)
{
ctxt->cp15[c0_CSSELR] = read_sysreg(CSSELR);
ctxt->cp15[c1_SCTLR] = read_sysreg(SCTLR);
ctxt->cp15[c1_CPACR] = read_sysreg(CPACR);
*cp15_64(ctxt, c2_TTBR0) = read_sysreg(TTBR0);
*cp15_64(ctxt, c2_TTBR1) = read_sysreg(TTBR1);
ctxt->cp15[c2_TTBCR] = read_sysreg(TTBCR);
ctxt->cp15[c3_DACR] = read_sysreg(DACR);
ctxt->cp15[c5_DFSR] = read_sysreg(DFSR);
ctxt->cp15[c5_IFSR] = read_sysreg(IFSR);
ctxt->cp15[c5_ADFSR] = read_sysreg(ADFSR);
ctxt->cp15[c5_AIFSR] = read_sysreg(AIFSR);
ctxt->cp15[c6_DFAR] = read_sysreg(DFAR);
ctxt->cp15[c6_IFAR] = read_sysreg(IFAR);
*cp15_64(ctxt, c7_PAR) = read_sysreg(PAR);
ctxt->cp15[c10_PRRR] = read_sysreg(PRRR);
ctxt->cp15[c10_NMRR] = read_sysreg(NMRR);
ctxt->cp15[c10_AMAIR0] = read_sysreg(AMAIR0);
ctxt->cp15[c10_AMAIR1] = read_sysreg(AMAIR1);
ctxt->cp15[c12_VBAR] = read_sysreg(VBAR);
ctxt->cp15[c13_CID] = read_sysreg(CID);
ctxt->cp15[c13_TID_URW] = read_sysreg(TID_URW);
ctxt->cp15[c13_TID_URO] = read_sysreg(TID_URO);
ctxt->cp15[c13_TID_PRIV] = read_sysreg(TID_PRIV);
ctxt->cp15[c14_CNTKCTL] = read_sysreg(CNTKCTL);
}
void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt)
{
write_sysreg(ctxt->cp15[c0_MPIDR], VMPIDR);
write_sysreg(ctxt->cp15[c0_CSSELR], CSSELR);
write_sysreg(ctxt->cp15[c1_SCTLR], SCTLR);
write_sysreg(ctxt->cp15[c1_CPACR], CPACR);
write_sysreg(*cp15_64(ctxt, c2_TTBR0), TTBR0);
write_sysreg(*cp15_64(ctxt, c2_TTBR1), TTBR1);
write_sysreg(ctxt->cp15[c2_TTBCR], TTBCR);
write_sysreg(ctxt->cp15[c3_DACR], DACR);
write_sysreg(ctxt->cp15[c5_DFSR], DFSR);
write_sysreg(ctxt->cp15[c5_IFSR], IFSR);
write_sysreg(ctxt->cp15[c5_ADFSR], ADFSR);
write_sysreg(ctxt->cp15[c5_AIFSR], AIFSR);
write_sysreg(ctxt->cp15[c6_DFAR], DFAR);
write_sysreg(ctxt->cp15[c6_IFAR], IFAR);
write_sysreg(*cp15_64(ctxt, c7_PAR), PAR);
write_sysreg(ctxt->cp15[c10_PRRR], PRRR);
write_sysreg(ctxt->cp15[c10_NMRR], NMRR);
write_sysreg(ctxt->cp15[c10_AMAIR0], AMAIR0);
write_sysreg(ctxt->cp15[c10_AMAIR1], AMAIR1);
write_sysreg(ctxt->cp15[c12_VBAR], VBAR);
write_sysreg(ctxt->cp15[c13_CID], CID);
write_sysreg(ctxt->cp15[c13_TID_URW], TID_URW);
write_sysreg(ctxt->cp15[c13_TID_URO], TID_URO);
write_sysreg(ctxt->cp15[c13_TID_PRIV], TID_PRIV);
write_sysreg(ctxt->cp15[c14_CNTKCTL], CNTKCTL);
}

Просмотреть файл

@ -1,121 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2016 - ARM Ltd
* Author: Marc Zyngier <marc.zyngier@arm.com>
*/
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/kvm_arm.h>
#include <asm/kvm_asm.h>
.arch_extension virt
.text
.pushsection .hyp.text, "ax"
#define USR_REGS_OFFSET (CPU_CTXT_GP_REGS + GP_REGS_USR)
/* int __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host) */
ENTRY(__guest_enter)
@ Save host registers
add r1, r1, #(USR_REGS_OFFSET + S_R4)
stm r1!, {r4-r12}
str lr, [r1, #4] @ Skip SP_usr (already saved)
@ Restore guest registers
add r0, r0, #(VCPU_GUEST_CTXT + USR_REGS_OFFSET + S_R0)
ldr lr, [r0, #S_LR]
ldm r0, {r0-r12}
clrex
eret
ENDPROC(__guest_enter)
ENTRY(__guest_exit)
/*
* return convention:
* guest r0, r1, r2 saved on the stack
* r0: vcpu pointer
* r1: exception code
*/
add r2, r0, #(VCPU_GUEST_CTXT + USR_REGS_OFFSET + S_R3)
stm r2!, {r3-r12}
str lr, [r2, #4]
add r2, r0, #(VCPU_GUEST_CTXT + USR_REGS_OFFSET + S_R0)
pop {r3, r4, r5} @ r0, r1, r2
stm r2, {r3-r5}
ldr r0, [r0, #VCPU_HOST_CTXT]
add r0, r0, #(USR_REGS_OFFSET + S_R4)
ldm r0!, {r4-r12}
ldr lr, [r0, #4]
mov r0, r1
mrs r1, SPSR
mrs r2, ELR_hyp
mrc p15, 4, r3, c5, c2, 0 @ HSR
/*
* Force loads and stores to complete before unmasking aborts
* and forcing the delivery of the exception. This gives us a
* single instruction window, which the handler will try to
* match.
*/
dsb sy
cpsie a
.global abort_guest_exit_start
abort_guest_exit_start:
isb
.global abort_guest_exit_end
abort_guest_exit_end:
/*
* If we took an abort, r0[31] will be set, and cmp will set
* the N bit in PSTATE.
*/
cmp r0, #0
msrmi SPSR_cxsf, r1
msrmi ELR_hyp, r2
mcrmi p15, 4, r3, c5, c2, 0 @ HSR
bx lr
ENDPROC(__guest_exit)
/*
* If VFPv3 support is not available, then we will not switch the VFP
* registers; however cp10 and cp11 accesses will still trap and fallback
* to the regular coprocessor emulation code, which currently will
* inject an undefined exception to the guest.
*/
#ifdef CONFIG_VFPv3
ENTRY(__vfp_guest_restore)
push {r3, r4, lr}
@ NEON/VFP used. Turn on VFP access.
mrc p15, 4, r1, c1, c1, 2 @ HCPTR
bic r1, r1, #(HCPTR_TCP(10) | HCPTR_TCP(11))
mcr p15, 4, r1, c1, c1, 2 @ HCPTR
isb
@ Switch VFP/NEON hardware state to the guest's
mov r4, r0
ldr r0, [r0, #VCPU_HOST_CTXT]
add r0, r0, #CPU_CTXT_VFP
bl __vfp_save_state
add r0, r4, #(VCPU_GUEST_CTXT + CPU_CTXT_VFP)
bl __vfp_restore_state
pop {r3, r4, lr}
pop {r0, r1, r2}
clrex
eret
ENDPROC(__vfp_guest_restore)
#endif
.popsection

Просмотреть файл

@ -1,295 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
*/
#include <linux/arm-smccc.h>
#include <linux/linkage.h>
#include <asm/kvm_arm.h>
#include <asm/kvm_asm.h>
.arch_extension virt
.text
.pushsection .hyp.text, "ax"
.macro load_vcpu reg
mrc p15, 4, \reg, c13, c0, 2 @ HTPIDR
.endm
/********************************************************************
* Hypervisor exception vector and handlers
*
*
* The KVM/ARM Hypervisor ABI is defined as follows:
*
* Entry to Hyp mode from the host kernel will happen _only_ when an HVC
* instruction is issued since all traps are disabled when running the host
* kernel as per the Hyp-mode initialization at boot time.
*
* HVC instructions cause a trap to the vector page + offset 0x14 (see hyp_hvc
* below) when the HVC instruction is called from SVC mode (i.e. a guest or the
* host kernel) and they cause a trap to the vector page + offset 0x8 when HVC
* instructions are called from within Hyp-mode.
*
* Hyp-ABI: Calling HYP-mode functions from host (in SVC mode):
* Switching to Hyp mode is done through a simple HVC #0 instruction. The
* exception vector code will check that the HVC comes from VMID==0.
* - r0 contains a pointer to a HYP function
* - r1, r2, and r3 contain arguments to the above function.
* - The HYP function will be called with its arguments in r0, r1 and r2.
* On HYP function return, we return directly to SVC.
*
* Note that the above is used to execute code in Hyp-mode from a host-kernel
* point of view, and is a different concept from performing a world-switch and
* executing guest code SVC mode (with a VMID != 0).
*/
.align 5
__kvm_hyp_vector:
.global __kvm_hyp_vector
@ Hyp-mode exception vector
W(b) hyp_reset
W(b) hyp_undef
W(b) hyp_svc
W(b) hyp_pabt
W(b) hyp_dabt
W(b) hyp_hvc
W(b) hyp_irq
W(b) hyp_fiq
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
.align 5
__kvm_hyp_vector_ic_inv:
.global __kvm_hyp_vector_ic_inv
/*
* We encode the exception entry in the bottom 3 bits of
* SP, and we have to guarantee to be 8 bytes aligned.
*/
W(add) sp, sp, #1 /* Reset 7 */
W(add) sp, sp, #1 /* Undef 6 */
W(add) sp, sp, #1 /* Syscall 5 */
W(add) sp, sp, #1 /* Prefetch abort 4 */
W(add) sp, sp, #1 /* Data abort 3 */
W(add) sp, sp, #1 /* HVC 2 */
W(add) sp, sp, #1 /* IRQ 1 */
W(nop) /* FIQ 0 */
mcr p15, 0, r0, c7, c5, 0 /* ICIALLU */
isb
b decode_vectors
.align 5
__kvm_hyp_vector_bp_inv:
.global __kvm_hyp_vector_bp_inv
/*
* We encode the exception entry in the bottom 3 bits of
* SP, and we have to guarantee to be 8 bytes aligned.
*/
W(add) sp, sp, #1 /* Reset 7 */
W(add) sp, sp, #1 /* Undef 6 */
W(add) sp, sp, #1 /* Syscall 5 */
W(add) sp, sp, #1 /* Prefetch abort 4 */
W(add) sp, sp, #1 /* Data abort 3 */
W(add) sp, sp, #1 /* HVC 2 */
W(add) sp, sp, #1 /* IRQ 1 */
W(nop) /* FIQ 0 */
mcr p15, 0, r0, c7, c5, 6 /* BPIALL */
isb
decode_vectors:
#ifdef CONFIG_THUMB2_KERNEL
/*
* Yet another silly hack: Use VPIDR as a temp register.
* Thumb2 is really a pain, as SP cannot be used with most
* of the bitwise instructions. The vect_br macro ensures
* things gets cleaned-up.
*/
mcr p15, 4, r0, c0, c0, 0 /* VPIDR */
mov r0, sp
and r0, r0, #7
sub sp, sp, r0
push {r1, r2}
mov r1, r0
mrc p15, 4, r0, c0, c0, 0 /* VPIDR */
mrc p15, 0, r2, c0, c0, 0 /* MIDR */
mcr p15, 4, r2, c0, c0, 0 /* VPIDR */
#endif
.macro vect_br val, targ
ARM( eor sp, sp, #\val )
ARM( tst sp, #7 )
ARM( eorne sp, sp, #\val )
THUMB( cmp r1, #\val )
THUMB( popeq {r1, r2} )
beq \targ
.endm
vect_br 0, hyp_fiq
vect_br 1, hyp_irq
vect_br 2, hyp_hvc
vect_br 3, hyp_dabt
vect_br 4, hyp_pabt
vect_br 5, hyp_svc
vect_br 6, hyp_undef
vect_br 7, hyp_reset
#endif
.macro invalid_vector label, cause
.align
\label: mov r0, #\cause
b __hyp_panic
.endm
invalid_vector hyp_reset ARM_EXCEPTION_RESET
invalid_vector hyp_undef ARM_EXCEPTION_UNDEFINED
invalid_vector hyp_svc ARM_EXCEPTION_SOFTWARE
invalid_vector hyp_pabt ARM_EXCEPTION_PREF_ABORT
invalid_vector hyp_fiq ARM_EXCEPTION_FIQ
ENTRY(__hyp_do_panic)
mrs lr, cpsr
bic lr, lr, #MODE_MASK
orr lr, lr, #SVC_MODE
THUMB( orr lr, lr, #PSR_T_BIT )
msr spsr_cxsf, lr
ldr lr, =panic
msr ELR_hyp, lr
ldr lr, =__kvm_call_hyp
clrex
eret
ENDPROC(__hyp_do_panic)
hyp_hvc:
/*
* Getting here is either because of a trap from a guest,
* or from executing HVC from the host kernel, which means
* "do something in Hyp mode".
*/
push {r0, r1, r2}
@ Check syndrome register
mrc p15, 4, r1, c5, c2, 0 @ HSR
lsr r0, r1, #HSR_EC_SHIFT
cmp r0, #HSR_EC_HVC
bne guest_trap @ Not HVC instr.
/*
* Let's check if the HVC came from VMID 0 and allow simple
* switch to Hyp mode
*/
mrrc p15, 6, r0, r2, c2
lsr r2, r2, #16
and r2, r2, #0xff
cmp r2, #0
bne guest_hvc_trap @ Guest called HVC
/*
* Getting here means host called HVC, we shift parameters and branch
* to Hyp function.
*/
pop {r0, r1, r2}
/*
* Check if we have a kernel function, which is guaranteed to be
* bigger than the maximum hyp stub hypercall
*/
cmp r0, #HVC_STUB_HCALL_NR
bhs 1f
/*
* Not a kernel function, treat it as a stub hypercall.
* Compute the physical address for __kvm_handle_stub_hvc
* (as the code lives in the idmaped page) and branch there.
* We hijack ip (r12) as a tmp register.
*/
push {r1}
ldr r1, =kimage_voffset
ldr r1, [r1]
ldr ip, =__kvm_handle_stub_hvc
sub ip, ip, r1
pop {r1}
bx ip
1:
/*
* Pushing r2 here is just a way of keeping the stack aligned to
* 8 bytes on any path that can trigger a HYP exception. Here,
* we may well be about to jump into the guest, and the guest
* exit would otherwise be badly decoded by our fancy
* "decode-exception-without-a-branch" code...
*/
push {r2, lr}
mov lr, r0
mov r0, r1
mov r1, r2
mov r2, r3
THUMB( orr lr, #1)
blx lr @ Call the HYP function
pop {r2, lr}
eret
guest_hvc_trap:
movw r2, #:lower16:ARM_SMCCC_ARCH_WORKAROUND_1
movt r2, #:upper16:ARM_SMCCC_ARCH_WORKAROUND_1
ldr r0, [sp] @ Guest's r0
teq r0, r2
bne guest_trap
add sp, sp, #12
@ Returns:
@ r0 = 0
@ r1 = HSR value (perfectly predictable)
@ r2 = ARM_SMCCC_ARCH_WORKAROUND_1
mov r0, #0
eret
guest_trap:
load_vcpu r0 @ Load VCPU pointer to r0
#ifdef CONFIG_VFPv3
@ Check for a VFP access
lsr r1, r1, #HSR_EC_SHIFT
cmp r1, #HSR_EC_CP_0_13
beq __vfp_guest_restore
#endif
mov r1, #ARM_EXCEPTION_HVC
b __guest_exit
hyp_irq:
push {r0, r1, r2}
mov r1, #ARM_EXCEPTION_IRQ
load_vcpu r0 @ Load VCPU pointer to r0
b __guest_exit
hyp_dabt:
push {r0, r1}
mrs r0, ELR_hyp
ldr r1, =abort_guest_exit_start
THUMB( add r1, r1, #1)
cmp r0, r1
ldrne r1, =abort_guest_exit_end
THUMB( addne r1, r1, #1)
cmpne r0, r1
pop {r0, r1}
bne __hyp_panic
orr r0, r0, #(1 << ARM_EXIT_WITH_ABORT_BIT)
eret
.ltorg
.popsection

Просмотреть файл

@ -1,22 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2016 - ARM Ltd
* Author: Marc Zyngier <marc.zyngier@arm.com>
*/
#include <linux/types.h>
#include <asm/kvm_arm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_hyp.h>
void __hyp_text __init_stage2_translation(void)
{
u64 val;
val = read_sysreg(VTCR) & ~VTCR_MASK;
val |= read_sysreg(HTCR) & VTCR_HTCR_SH;
val |= KVM_VTCR_SL0 | KVM_VTCR_T0SZ | KVM_VTCR_S;
write_sysreg(val, VTCR);
}

Просмотреть файл

@ -1,242 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2015 - ARM Ltd
* Author: Marc Zyngier <marc.zyngier@arm.com>
*/
#include <linux/jump_label.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_hyp.h>
#include <asm/kvm_mmu.h>
__asm__(".arch_extension virt");
/*
* Activate the traps, saving the host's fpexc register before
* overwriting it. We'll restore it on VM exit.
*/
static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu, u32 *fpexc_host)
{
u32 val;
/*
* We are about to set HCPTR.TCP10/11 to trap all floating point
* register accesses to HYP, however, the ARM ARM clearly states that
* traps are only taken to HYP if the operation would not otherwise
* trap to SVC. Therefore, always make sure that for 32-bit guests,
* we set FPEXC.EN to prevent traps to SVC, when setting the TCP bits.
*/
val = read_sysreg(VFP_FPEXC);
*fpexc_host = val;
if (!(val & FPEXC_EN)) {
write_sysreg(val | FPEXC_EN, VFP_FPEXC);
isb();
}
write_sysreg(vcpu->arch.hcr, HCR);
/* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */
write_sysreg(HSTR_T(15), HSTR);
write_sysreg(HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11), HCPTR);
val = read_sysreg(HDCR);
val |= HDCR_TPM | HDCR_TPMCR; /* trap performance monitors */
val |= HDCR_TDRA | HDCR_TDOSA | HDCR_TDA; /* trap debug regs */
write_sysreg(val, HDCR);
}
static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
{
u32 val;
/*
* If we pended a virtual abort, preserve it until it gets
* cleared. See B1.9.9 (Virtual Abort exception) for details,
* but the crucial bit is the zeroing of HCR.VA in the
* pseudocode.
*/
if (vcpu->arch.hcr & HCR_VA)
vcpu->arch.hcr = read_sysreg(HCR);
write_sysreg(0, HCR);
write_sysreg(0, HSTR);
val = read_sysreg(HDCR);
write_sysreg(val & ~(HDCR_TPM | HDCR_TPMCR), HDCR);
write_sysreg(0, HCPTR);
}
static void __hyp_text __activate_vm(struct kvm_vcpu *vcpu)
{
struct kvm *kvm = kern_hyp_va(vcpu->kvm);
write_sysreg(kvm_get_vttbr(kvm), VTTBR);
write_sysreg(vcpu->arch.midr, VPIDR);
}
static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu)
{
write_sysreg(0, VTTBR);
write_sysreg(read_sysreg(MIDR), VPIDR);
}
static void __hyp_text __vgic_save_state(struct kvm_vcpu *vcpu)
{
if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
__vgic_v3_save_state(vcpu);
__vgic_v3_deactivate_traps(vcpu);
}
}
static void __hyp_text __vgic_restore_state(struct kvm_vcpu *vcpu)
{
if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
__vgic_v3_activate_traps(vcpu);
__vgic_v3_restore_state(vcpu);
}
}
static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
{
u32 hsr = read_sysreg(HSR);
u8 ec = hsr >> HSR_EC_SHIFT;
u32 hpfar, far;
vcpu->arch.fault.hsr = hsr;
if (ec == HSR_EC_IABT)
far = read_sysreg(HIFAR);
else if (ec == HSR_EC_DABT)
far = read_sysreg(HDFAR);
else
return true;
/*
* B3.13.5 Reporting exceptions taken to the Non-secure PL2 mode:
*
* Abort on the stage 2 translation for a memory access from a
* Non-secure PL1 or PL0 mode:
*
* For any Access flag fault or Translation fault, and also for any
* Permission fault on the stage 2 translation of a memory access
* made as part of a translation table walk for a stage 1 translation,
* the HPFAR holds the IPA that caused the fault. Otherwise, the HPFAR
* is UNKNOWN.
*/
if (!(hsr & HSR_DABT_S1PTW) && (hsr & HSR_FSC_TYPE) == FSC_PERM) {
u64 par, tmp;
par = read_sysreg(PAR);
write_sysreg(far, ATS1CPR);
isb();
tmp = read_sysreg(PAR);
write_sysreg(par, PAR);
if (unlikely(tmp & 1))
return false; /* Translation failed, back to guest */
hpfar = ((tmp >> 12) & ((1UL << 28) - 1)) << 4;
} else {
hpfar = read_sysreg(HPFAR);
}
vcpu->arch.fault.hxfar = far;
vcpu->arch.fault.hpfar = hpfar;
return true;
}
int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *host_ctxt;
struct kvm_cpu_context *guest_ctxt;
bool fp_enabled;
u64 exit_code;
u32 fpexc;
vcpu = kern_hyp_va(vcpu);
write_sysreg(vcpu, HTPIDR);
host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
guest_ctxt = &vcpu->arch.ctxt;
__sysreg_save_state(host_ctxt);
__banked_save_state(host_ctxt);
__activate_traps(vcpu, &fpexc);
__activate_vm(vcpu);
__vgic_restore_state(vcpu);
__timer_enable_traps(vcpu);
__sysreg_restore_state(guest_ctxt);
__banked_restore_state(guest_ctxt);
/* Jump in the fire! */
again:
exit_code = __guest_enter(vcpu, host_ctxt);
/* And we're baaack! */
if (exit_code == ARM_EXCEPTION_HVC && !__populate_fault_info(vcpu))
goto again;
fp_enabled = __vfp_enabled();
__banked_save_state(guest_ctxt);
__sysreg_save_state(guest_ctxt);
__timer_disable_traps(vcpu);
__vgic_save_state(vcpu);
__deactivate_traps(vcpu);
__deactivate_vm(vcpu);
__banked_restore_state(host_ctxt);
__sysreg_restore_state(host_ctxt);
if (fp_enabled) {
__vfp_save_state(&guest_ctxt->vfp);
__vfp_restore_state(&host_ctxt->vfp);
}
write_sysreg(fpexc, VFP_FPEXC);
return exit_code;
}
static const char * const __hyp_panic_string[] = {
[ARM_EXCEPTION_RESET] = "\nHYP panic: RST PC:%08x CPSR:%08x",
[ARM_EXCEPTION_UNDEFINED] = "\nHYP panic: UNDEF PC:%08x CPSR:%08x",
[ARM_EXCEPTION_SOFTWARE] = "\nHYP panic: SVC PC:%08x CPSR:%08x",
[ARM_EXCEPTION_PREF_ABORT] = "\nHYP panic: PABRT PC:%08x CPSR:%08x",
[ARM_EXCEPTION_DATA_ABORT] = "\nHYP panic: DABRT PC:%08x ADDR:%08x",
[ARM_EXCEPTION_IRQ] = "\nHYP panic: IRQ PC:%08x CPSR:%08x",
[ARM_EXCEPTION_FIQ] = "\nHYP panic: FIQ PC:%08x CPSR:%08x",
[ARM_EXCEPTION_HVC] = "\nHYP panic: HVC PC:%08x CPSR:%08x",
};
void __hyp_text __noreturn __hyp_panic(int cause)
{
u32 elr = read_special(ELR_hyp);
u32 val;
if (cause == ARM_EXCEPTION_DATA_ABORT)
val = read_sysreg(HDFAR);
else
val = read_special(SPSR);
if (read_sysreg(VTTBR)) {
struct kvm_vcpu *vcpu;
struct kvm_cpu_context *host_ctxt;
vcpu = (struct kvm_vcpu *)read_sysreg(HTPIDR);
host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
__timer_disable_traps(vcpu);
__deactivate_traps(vcpu);
__deactivate_vm(vcpu);
__banked_restore_state(host_ctxt);
__sysreg_restore_state(host_ctxt);
}
/* Call panic for real */
__hyp_do_panic(__hyp_panic_string[cause], elr, val);
unreachable();
}

Просмотреть файл

@ -1,68 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Original code:
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
*
* Mostly rewritten in C by Marc Zyngier <marc.zyngier@arm.com>
*/
#include <asm/kvm_hyp.h>
#include <asm/kvm_mmu.h>
/**
* Flush per-VMID TLBs
*
* __kvm_tlb_flush_vmid(struct kvm *kvm);
*
* We rely on the hardware to broadcast the TLB invalidation to all CPUs
* inside the inner-shareable domain (which is the case for all v7
* implementations). If we come across a non-IS SMP implementation, we'll
* have to use an IPI based mechanism. Until then, we stick to the simple
* hardware assisted version.
*
* As v7 does not support flushing per IPA, just nuke the whole TLB
* instead, ignoring the ipa value.
*/
void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
{
dsb(ishst);
/* Switch to requested VMID */
kvm = kern_hyp_va(kvm);
write_sysreg(kvm_get_vttbr(kvm), VTTBR);
isb();
write_sysreg(0, TLBIALLIS);
dsb(ish);
isb();
write_sysreg(0, VTTBR);
}
void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
{
__kvm_tlb_flush_vmid(kvm);
}
void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
{
struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);
/* Switch to requested VMID */
write_sysreg(kvm_get_vttbr(kvm), VTTBR);
isb();
write_sysreg(0, TLBIALL);
dsb(nsh);
isb();
write_sysreg(0, VTTBR);
}
void __hyp_text __kvm_flush_vm_context(void)
{
write_sysreg(0, TLBIALLNSNHIS);
write_sysreg(0, ICIALLUIS);
dsb(ish);
}

Просмотреть файл

@ -1,57 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
*/
#include <linux/linkage.h>
#include <asm/vfpmacros.h>
.text
.pushsection .hyp.text, "ax"
/* void __vfp_save_state(struct vfp_hard_struct *vfp); */
ENTRY(__vfp_save_state)
push {r4, r5}
VFPFMRX r1, FPEXC
@ Make sure *really* VFP is enabled so we can touch the registers.
orr r5, r1, #FPEXC_EN
tst r5, #FPEXC_EX @ Check for VFP Subarchitecture
bic r5, r5, #FPEXC_EX @ FPEXC_EX disable
VFPFMXR FPEXC, r5
isb
VFPFMRX r2, FPSCR
beq 1f
@ If FPEXC_EX is 0, then FPINST/FPINST2 reads are upredictable, so
@ we only need to save them if FPEXC_EX is set.
VFPFMRX r3, FPINST
tst r5, #FPEXC_FP2V
VFPFMRX r4, FPINST2, ne @ vmrsne
1:
VFPFSTMIA r0, r5 @ Save VFP registers
stm r0, {r1-r4} @ Save FPEXC, FPSCR, FPINST, FPINST2
pop {r4, r5}
bx lr
ENDPROC(__vfp_save_state)
/* void __vfp_restore_state(struct vfp_hard_struct *vfp);
* Assume FPEXC_EN is on and FPEXC_EX is off */
ENTRY(__vfp_restore_state)
VFPFLDMIA r0, r1 @ Load VFP registers
ldm r0, {r0-r3} @ Load FPEXC, FPSCR, FPINST, FPINST2
VFPFMXR FPSCR, r1
tst r0, #FPEXC_EX @ Check for VFP Subarchitecture
beq 1f
VFPFMXR FPINST, r2
tst r0, #FPEXC_FP2V
VFPFMXR FPINST2, r3, ne
1:
VFPFMXR FPEXC, r0 @ FPEXC (last, in case !EN)
bx lr
ENDPROC(__vfp_restore_state)
.popsection

Просмотреть файл

@ -1,157 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/unified.h>
#include <asm/asm-offsets.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_arm.h>
#include <asm/kvm_mmu.h>
#include <asm/virt.h>
/********************************************************************
* Hypervisor initialization
* - should be called with:
* r0 = top of Hyp stack (kernel VA)
* r1 = pointer to hyp vectors
* r2,r3 = Hypervisor pgd pointer
*
* The init scenario is:
* - We jump in HYP with 3 parameters: runtime HYP pgd, runtime stack,
* runtime vectors
* - Invalidate TLBs
* - Set stack and vectors
* - Setup the page tables
* - Enable the MMU
* - Profit! (or eret, if you only care about the code).
*
* Another possibility is to get a HYP stub hypercall.
* We discriminate between the two by checking if r0 contains a value
* that is less than HVC_STUB_HCALL_NR.
*/
.text
.pushsection .hyp.idmap.text,"ax"
.align 5
__kvm_hyp_init:
.globl __kvm_hyp_init
@ Hyp-mode exception vector
W(b) .
W(b) .
W(b) .
W(b) .
W(b) .
W(b) __do_hyp_init
W(b) .
W(b) .
__do_hyp_init:
@ Check for a stub hypercall
cmp r0, #HVC_STUB_HCALL_NR
blo __kvm_handle_stub_hvc
@ Set stack pointer
mov sp, r0
@ Set HVBAR to point to the HYP vectors
mcr p15, 4, r1, c12, c0, 0 @ HVBAR
@ Set the HTTBR to point to the hypervisor PGD pointer passed
mcrr p15, 4, rr_lo_hi(r2, r3), c2
@ Set the HTCR and VTCR to the same shareability and cacheability
@ settings as the non-secure TTBCR and with T0SZ == 0.
mrc p15, 4, r0, c2, c0, 2 @ HTCR
ldr r2, =HTCR_MASK
bic r0, r0, r2
mrc p15, 0, r1, c2, c0, 2 @ TTBCR
and r1, r1, #(HTCR_MASK & ~TTBCR_T0SZ)
orr r0, r0, r1
mcr p15, 4, r0, c2, c0, 2 @ HTCR
@ Use the same memory attributes for hyp. accesses as the kernel
@ (copy MAIRx ro HMAIRx).
mrc p15, 0, r0, c10, c2, 0
mcr p15, 4, r0, c10, c2, 0
mrc p15, 0, r0, c10, c2, 1
mcr p15, 4, r0, c10, c2, 1
@ Invalidate the stale TLBs from Bootloader
mcr p15, 4, r0, c8, c7, 0 @ TLBIALLH
dsb ish
@ Set the HSCTLR to:
@ - ARM/THUMB exceptions: Kernel config (Thumb-2 kernel)
@ - Endianness: Kernel config
@ - Fast Interrupt Features: Kernel config
@ - Write permission implies XN: disabled
@ - Instruction cache: enabled
@ - Data/Unified cache: enabled
@ - MMU: enabled (this code must be run from an identity mapping)
mrc p15, 4, r0, c1, c0, 0 @ HSCR
ldr r2, =HSCTLR_MASK
bic r0, r0, r2
mrc p15, 0, r1, c1, c0, 0 @ SCTLR
ldr r2, =(HSCTLR_EE | HSCTLR_FI | HSCTLR_I | HSCTLR_C)
and r1, r1, r2
ARM( ldr r2, =(HSCTLR_M) )
THUMB( ldr r2, =(HSCTLR_M | HSCTLR_TE) )
orr r1, r1, r2
orr r0, r0, r1
mcr p15, 4, r0, c1, c0, 0 @ HSCR
isb
eret
ENTRY(__kvm_handle_stub_hvc)
cmp r0, #HVC_SOFT_RESTART
bne 1f
/* The target is expected in r1 */
msr ELR_hyp, r1
mrs r0, cpsr
bic r0, r0, #MODE_MASK
orr r0, r0, #HYP_MODE
THUMB( orr r0, r0, #PSR_T_BIT )
msr spsr_cxsf, r0
b reset
1: cmp r0, #HVC_RESET_VECTORS
bne 1f
reset:
/* We're now in idmap, disable MMU */
mrc p15, 4, r1, c1, c0, 0 @ HSCTLR
ldr r0, =(HSCTLR_M | HSCTLR_A | HSCTLR_C | HSCTLR_I)
bic r1, r1, r0
mcr p15, 4, r1, c1, c0, 0 @ HSCTLR
/*
* Install stub vectors, using ardb's VA->PA trick.
*/
0: adr r0, 0b @ PA(0)
movw r1, #:lower16:__hyp_stub_vectors - 0b @ VA(stub) - VA(0)
movt r1, #:upper16:__hyp_stub_vectors - 0b
add r1, r1, r0 @ PA(stub)
mcr p15, 4, r1, c12, c0, 0 @ HVBAR
b exit
1: ldr r0, =HVC_STUB_ERR
eret
exit:
mov r0, #0
eret
ENDPROC(__kvm_handle_stub_hvc)
.ltorg
.globl __kvm_hyp_init_end
__kvm_hyp_init_end:
.popsection

Просмотреть файл

@ -1,36 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
*/
#include <linux/linkage.h>
.text
/********************************************************************
* Call function in Hyp mode
*
*
* unsigned long kvm_call_hyp(void *hypfn, ...);
*
* This is not really a variadic function in the classic C-way and care must
* be taken when calling this to ensure parameters are passed in registers
* only, since the stack will change between the caller and the callee.
*
* Call the function with the first argument containing a pointer to the
* function you wish to call in Hyp mode, and subsequent arguments will be
* passed as r0, r1, and r2 (a maximum of 3 arguments in addition to the
* function pointer can be passed). The function being called must be mapped
* in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are
* passed in r0 (strictly 32bit).
*
* The calling convention follows the standard AAPCS:
* r0 - r3: caller save
* r12: caller save
* rest: callee save
*/
ENTRY(__kvm_call_hyp)
hvc #0
bx lr
ENDPROC(__kvm_call_hyp)

Просмотреть файл

@ -1,16 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* irq.h: in kernel interrupt controller related definitions
* Copyright (c) 2016 Red Hat, Inc.
*
* This header is included by irqchip.c. However, on ARM, interrupt
* controller declarations are located in include/kvm/arm_vgic.h since
* they are mostly shared between arm and arm64.
*/
#ifndef __IRQ_H
#define __IRQ_H
#include <kvm/arm_vgic.h>
#endif

Просмотреть файл

@ -1,86 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
*/
#include <linux/compiler.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kvm_host.h>
#include <linux/kvm.h>
#include <asm/unified.h>
#include <asm/ptrace.h>
#include <asm/cputype.h>
#include <asm/kvm_arm.h>
#include <asm/kvm_coproc.h>
#include <asm/kvm_emulate.h>
#include <kvm/arm_arch_timer.h>
/******************************************************************************
* Cortex-A15 and Cortex-A7 Reset Values
*/
static struct kvm_regs cortexa_regs_reset = {
.usr_regs.ARM_cpsr = SVC_MODE | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT,
};
/*******************************************************************************
* Exported reset function
*/
/**
* kvm_reset_vcpu - sets core registers and cp15 registers to reset value
* @vcpu: The VCPU pointer
*
* This function finds the right table above and sets the registers on the
* virtual CPU struct to their architecturally defined reset values.
*/
int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
{
struct kvm_regs *reset_regs;
switch (vcpu->arch.target) {
case KVM_ARM_TARGET_CORTEX_A7:
case KVM_ARM_TARGET_CORTEX_A15:
reset_regs = &cortexa_regs_reset;
vcpu->arch.midr = read_cpuid_id();
break;
default:
return -ENODEV;
}
/* Reset core registers */
memcpy(&vcpu->arch.ctxt.gp_regs, reset_regs, sizeof(vcpu->arch.ctxt.gp_regs));
/* Reset CP15 registers */
kvm_reset_coprocs(vcpu);
/*
* Additional reset state handling that PSCI may have imposed on us.
* Must be done after all the sys_reg reset.
*/
if (READ_ONCE(vcpu->arch.reset_state.reset)) {
unsigned long target_pc = vcpu->arch.reset_state.pc;
/* Gracefully handle Thumb2 entry point */
if (target_pc & 1) {
target_pc &= ~1UL;
vcpu_set_thumb(vcpu);
}
/* Propagate caller endianness */
if (vcpu->arch.reset_state.be)
kvm_vcpu_set_be(vcpu);
*vcpu_pc(vcpu) = target_pc;
vcpu_set_reg(vcpu, 0, vcpu->arch.reset_state.r0);
vcpu->arch.reset_state.reset = false;
}
/* Reset arch_timer context */
return kvm_timer_vcpu_reset(vcpu);
}

Просмотреть файл

@ -1,86 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#if !defined(_TRACE_ARM_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_ARM_KVM_H
#include <linux/tracepoint.h>
#undef TRACE_SYSTEM
#define TRACE_SYSTEM kvm
/* Architecturally implementation defined CP15 register access */
TRACE_EVENT(kvm_emulate_cp15_imp,
TP_PROTO(unsigned long Op1, unsigned long Rt1, unsigned long CRn,
unsigned long CRm, unsigned long Op2, bool is_write),
TP_ARGS(Op1, Rt1, CRn, CRm, Op2, is_write),
TP_STRUCT__entry(
__field( unsigned int, Op1 )
__field( unsigned int, Rt1 )
__field( unsigned int, CRn )
__field( unsigned int, CRm )
__field( unsigned int, Op2 )
__field( bool, is_write )
),
TP_fast_assign(
__entry->is_write = is_write;
__entry->Op1 = Op1;
__entry->Rt1 = Rt1;
__entry->CRn = CRn;
__entry->CRm = CRm;
__entry->Op2 = Op2;
),
TP_printk("Implementation defined CP15: %s\tp15, %u, r%u, c%u, c%u, %u",
(__entry->is_write) ? "mcr" : "mrc",
__entry->Op1, __entry->Rt1, __entry->CRn,
__entry->CRm, __entry->Op2)
);
TRACE_EVENT(kvm_wfx,
TP_PROTO(unsigned long vcpu_pc, bool is_wfe),
TP_ARGS(vcpu_pc, is_wfe),
TP_STRUCT__entry(
__field( unsigned long, vcpu_pc )
__field( bool, is_wfe )
),
TP_fast_assign(
__entry->vcpu_pc = vcpu_pc;
__entry->is_wfe = is_wfe;
),
TP_printk("guest executed wf%c at: 0x%08lx",
__entry->is_wfe ? 'e' : 'i', __entry->vcpu_pc)
);
TRACE_EVENT(kvm_hvc,
TP_PROTO(unsigned long vcpu_pc, unsigned long r0, unsigned long imm),
TP_ARGS(vcpu_pc, r0, imm),
TP_STRUCT__entry(
__field( unsigned long, vcpu_pc )
__field( unsigned long, r0 )
__field( unsigned long, imm )
),
TP_fast_assign(
__entry->vcpu_pc = vcpu_pc;
__entry->r0 = r0;
__entry->imm = imm;
),
TP_printk("HVC at 0x%08lx (r0: 0x%08lx, imm: 0x%lx",
__entry->vcpu_pc, __entry->r0, __entry->imm)
);
#endif /* _TRACE_ARM_KVM_H */
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH .
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_FILE trace
/* This part must be outside protection */
#include <trace/define_trace.h>

Просмотреть файл

@ -1,27 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* VGIC system registers handling functions for AArch32 mode
*/
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <asm/kvm_emulate.h>
#include "vgic.h"
int vgic_v3_has_cpu_sysregs_attr(struct kvm_vcpu *vcpu, bool is_write, u64 id,
u64 *reg)
{
/*
* TODO: Implement for AArch32
*/
return -ENXIO;
}
int vgic_v3_cpu_sysregs_uaccess(struct kvm_vcpu *vcpu, bool is_write, u64 id,
u64 *reg)
{
/*
* TODO: Implement for AArch32
*/
return -ENXIO;
}