KVM/ARM Changes for v4.14
Two minor cleanups and improvements, a fix for decoding external abort types from guests, and added support for migrating the active priority of interrupts when running a GICv2 guest on a GICv3 host. -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABAgAGBQJZrsRdAAoJEEtpOizt6ddyoDIH/i94Wc5Iw9mpzi99NXyDGxYb 2kEGg8uRZFuAwK49+aNVEqL7pPymlV3qdl1fdsmWM9ch9D8WOiezx5JgSd3+uriF x2Cc6kxZQJ4/UT3laAZArVcGnQqDHdOrJIsOZ9f+UFRiFazlMVMgAJX7yQwHYiBQ QM/z5BbLSbh+NHAv15utz85JjR8lFL7AIzPemHR6Zh7cr3XmeW3nadaYMuyqjFsZ aYD4S+y/eRw6HkImV/DaaLv/tDrOEU2cQOqi19TvfYpx/XW4uLQA9oxS9Qd9ADZw T8Ha5jGEthy+3mzwEmGFwTVGpbvt3TYavZ743lp230PODYOkfO+ETbIlstuYNb4= =dIG3 -----END PGP SIGNATURE----- Merge tag 'kvm-arm-for-v4.14' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm KVM/ARM Changes for v4.14 Two minor cleanups and improvements, a fix for decoding external abort types from guests, and added support for migrating the active priority of interrupts when running a GICv2 guest on a GICv3 host.
This commit is contained in:
Коммит
082d3900a4
|
@ -83,6 +83,11 @@ Groups:
|
||||||
|
|
||||||
Bits for undefined preemption levels are RAZ/WI.
|
Bits for undefined preemption levels are RAZ/WI.
|
||||||
|
|
||||||
|
Note that this differs from a CPU's view of the APRs on hardware in which
|
||||||
|
a GIC without the security extensions expose group 0 and group 1 active
|
||||||
|
priorities in separate register groups, whereas we show a combined view
|
||||||
|
similar to GICv2's GICH_APR.
|
||||||
|
|
||||||
For historical reasons and to provide ABI compatibility with userspace we
|
For historical reasons and to provide ABI compatibility with userspace we
|
||||||
export the GICC_PMR register in the format of the GICH_VMCR.VMPriMask
|
export the GICC_PMR register in the format of the GICH_VMCR.VMPriMask
|
||||||
field in the lower 5 bits of a word, meaning that userspace must always
|
field in the lower 5 bits of a word, meaning that userspace must always
|
||||||
|
|
|
@ -227,7 +227,6 @@
|
||||||
|
|
||||||
#define HSR_DABT_S1PTW (_AC(1, UL) << 7)
|
#define HSR_DABT_S1PTW (_AC(1, UL) << 7)
|
||||||
#define HSR_DABT_CM (_AC(1, UL) << 8)
|
#define HSR_DABT_CM (_AC(1, UL) << 8)
|
||||||
#define HSR_DABT_EA (_AC(1, UL) << 9)
|
|
||||||
|
|
||||||
#define kvm_arm_exception_type \
|
#define kvm_arm_exception_type \
|
||||||
{0, "RESET" }, \
|
{0, "RESET" }, \
|
||||||
|
|
|
@ -149,11 +149,6 @@ static inline int kvm_vcpu_dabt_get_rd(struct kvm_vcpu *vcpu)
|
||||||
return (kvm_vcpu_get_hsr(vcpu) & HSR_SRT_MASK) >> HSR_SRT_SHIFT;
|
return (kvm_vcpu_get_hsr(vcpu) & HSR_SRT_MASK) >> HSR_SRT_SHIFT;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool kvm_vcpu_dabt_isextabt(struct kvm_vcpu *vcpu)
|
|
||||||
{
|
|
||||||
return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_EA;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline bool kvm_vcpu_dabt_iss1tw(struct kvm_vcpu *vcpu)
|
static inline bool kvm_vcpu_dabt_iss1tw(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_S1PTW;
|
return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_S1PTW;
|
||||||
|
@ -206,6 +201,25 @@ static inline u8 kvm_vcpu_trap_get_fault_type(struct kvm_vcpu *vcpu)
|
||||||
return kvm_vcpu_get_hsr(vcpu) & HSR_FSC_TYPE;
|
return kvm_vcpu_get_hsr(vcpu) & HSR_FSC_TYPE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool kvm_vcpu_dabt_isextabt(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
switch (kvm_vcpu_trap_get_fault_type(vcpu)) {
|
||||||
|
case FSC_SEA:
|
||||||
|
case FSC_SEA_TTW0:
|
||||||
|
case FSC_SEA_TTW1:
|
||||||
|
case FSC_SEA_TTW2:
|
||||||
|
case FSC_SEA_TTW3:
|
||||||
|
case FSC_SECC:
|
||||||
|
case FSC_SECC_TTW0:
|
||||||
|
case FSC_SECC_TTW1:
|
||||||
|
case FSC_SECC_TTW2:
|
||||||
|
case FSC_SECC_TTW3:
|
||||||
|
return true;
|
||||||
|
default:
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu)
|
static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK;
|
return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK;
|
||||||
|
|
|
@ -188,11 +188,6 @@ static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
|
||||||
return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
|
return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
|
|
||||||
{
|
|
||||||
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_EA);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
|
static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW);
|
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW);
|
||||||
|
@ -240,6 +235,25 @@ static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
|
||||||
return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE;
|
return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
switch (kvm_vcpu_trap_get_fault_type(vcpu)) {
|
||||||
|
case FSC_SEA:
|
||||||
|
case FSC_SEA_TTW0:
|
||||||
|
case FSC_SEA_TTW1:
|
||||||
|
case FSC_SEA_TTW2:
|
||||||
|
case FSC_SEA_TTW3:
|
||||||
|
case FSC_SECC:
|
||||||
|
case FSC_SECC_TTW0:
|
||||||
|
case FSC_SECC_TTW1:
|
||||||
|
case FSC_SECC_TTW2:
|
||||||
|
case FSC_SECC_TTW3:
|
||||||
|
return true;
|
||||||
|
default:
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
|
static inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
u32 esr = kvm_vcpu_get_hsr(vcpu);
|
u32 esr = kvm_vcpu_get_hsr(vcpu);
|
||||||
|
|
|
@ -208,29 +208,12 @@ static void vgic_v3_access_apr_reg(struct kvm_vcpu *vcpu,
|
||||||
static bool access_gic_aprn(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
static bool access_gic_aprn(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
||||||
const struct sys_reg_desc *r, u8 apr)
|
const struct sys_reg_desc *r, u8 apr)
|
||||||
{
|
{
|
||||||
struct vgic_cpu *vgic_v3_cpu = &vcpu->arch.vgic_cpu;
|
|
||||||
u8 idx = r->Op2 & 3;
|
u8 idx = r->Op2 & 3;
|
||||||
|
|
||||||
/*
|
if (idx > vgic_v3_max_apr_idx(vcpu))
|
||||||
* num_pri_bits are initialized with HW supported values.
|
goto err;
|
||||||
* We can rely safely on num_pri_bits even if VM has not
|
|
||||||
* restored ICC_CTLR_EL1 before restoring APnR registers.
|
|
||||||
*/
|
|
||||||
switch (vgic_v3_cpu->num_pri_bits) {
|
|
||||||
case 7:
|
|
||||||
vgic_v3_access_apr_reg(vcpu, p, apr, idx);
|
|
||||||
break;
|
|
||||||
case 6:
|
|
||||||
if (idx > 1)
|
|
||||||
goto err;
|
|
||||||
vgic_v3_access_apr_reg(vcpu, p, apr, idx);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
if (idx > 0)
|
|
||||||
goto err;
|
|
||||||
vgic_v3_access_apr_reg(vcpu, p, apr, idx);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
vgic_v3_access_apr_reg(vcpu, p, apr, idx);
|
||||||
return true;
|
return true;
|
||||||
err:
|
err:
|
||||||
if (!p->is_write)
|
if (!p->is_write)
|
||||||
|
|
|
@ -1454,25 +1454,6 @@ out:
|
||||||
kvm_set_pfn_accessed(pfn);
|
kvm_set_pfn_accessed(pfn);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool is_abort_sea(unsigned long fault_status)
|
|
||||||
{
|
|
||||||
switch (fault_status) {
|
|
||||||
case FSC_SEA:
|
|
||||||
case FSC_SEA_TTW0:
|
|
||||||
case FSC_SEA_TTW1:
|
|
||||||
case FSC_SEA_TTW2:
|
|
||||||
case FSC_SEA_TTW3:
|
|
||||||
case FSC_SECC:
|
|
||||||
case FSC_SECC_TTW0:
|
|
||||||
case FSC_SECC_TTW1:
|
|
||||||
case FSC_SECC_TTW2:
|
|
||||||
case FSC_SECC_TTW3:
|
|
||||||
return true;
|
|
||||||
default:
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* kvm_handle_guest_abort - handles all 2nd stage aborts
|
* kvm_handle_guest_abort - handles all 2nd stage aborts
|
||||||
* @vcpu: the VCPU pointer
|
* @vcpu: the VCPU pointer
|
||||||
|
@ -1498,20 +1479,21 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||||
fault_status = kvm_vcpu_trap_get_fault_type(vcpu);
|
fault_status = kvm_vcpu_trap_get_fault_type(vcpu);
|
||||||
|
|
||||||
fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
|
fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
|
||||||
|
is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
|
||||||
|
|
||||||
/*
|
/* Synchronous External Abort? */
|
||||||
* The host kernel will handle the synchronous external abort. There
|
if (kvm_vcpu_dabt_isextabt(vcpu)) {
|
||||||
* is no need to pass the error into the guest.
|
/*
|
||||||
*/
|
* For RAS the host kernel may handle this abort.
|
||||||
if (is_abort_sea(fault_status)) {
|
* There is no need to pass the error into the guest.
|
||||||
|
*/
|
||||||
if (!handle_guest_sea(fault_ipa, kvm_vcpu_get_hsr(vcpu)))
|
if (!handle_guest_sea(fault_ipa, kvm_vcpu_get_hsr(vcpu)))
|
||||||
return 1;
|
return 1;
|
||||||
}
|
|
||||||
|
|
||||||
is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
|
if (unlikely(!is_iabt)) {
|
||||||
if (unlikely(!is_iabt && kvm_vcpu_dabt_isextabt(vcpu))) {
|
kvm_inject_vabt(vcpu);
|
||||||
kvm_inject_vabt(vcpu);
|
return 1;
|
||||||
return 1;
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu),
|
trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu),
|
||||||
|
|
|
@ -234,7 +234,7 @@ static int vgic_debug_show(struct seq_file *s, void *v)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct seq_operations vgic_debug_seq_ops = {
|
static const struct seq_operations vgic_debug_seq_ops = {
|
||||||
.start = vgic_debug_start,
|
.start = vgic_debug_start,
|
||||||
.next = vgic_debug_next,
|
.next = vgic_debug_next,
|
||||||
.stop = vgic_debug_stop,
|
.stop = vgic_debug_stop,
|
||||||
|
@ -255,7 +255,7 @@ static int debug_open(struct inode *inode, struct file *file)
|
||||||
return ret;
|
return ret;
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct file_operations vgic_debug_fops = {
|
static const struct file_operations vgic_debug_fops = {
|
||||||
.owner = THIS_MODULE,
|
.owner = THIS_MODULE,
|
||||||
.open = debug_open,
|
.open = debug_open,
|
||||||
.read = seq_read,
|
.read = seq_read,
|
||||||
|
|
|
@ -144,7 +144,6 @@ struct its_ite {
|
||||||
|
|
||||||
struct vgic_irq *irq;
|
struct vgic_irq *irq;
|
||||||
struct its_collection *collection;
|
struct its_collection *collection;
|
||||||
u32 lpi;
|
|
||||||
u32 event_id;
|
u32 event_id;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -813,7 +812,7 @@ static void vgic_its_free_collection(struct vgic_its *its, u32 coll_id)
|
||||||
/* Must be called with its_lock mutex held */
|
/* Must be called with its_lock mutex held */
|
||||||
static struct its_ite *vgic_its_alloc_ite(struct its_device *device,
|
static struct its_ite *vgic_its_alloc_ite(struct its_device *device,
|
||||||
struct its_collection *collection,
|
struct its_collection *collection,
|
||||||
u32 lpi_id, u32 event_id)
|
u32 event_id)
|
||||||
{
|
{
|
||||||
struct its_ite *ite;
|
struct its_ite *ite;
|
||||||
|
|
||||||
|
@ -823,7 +822,6 @@ static struct its_ite *vgic_its_alloc_ite(struct its_device *device,
|
||||||
|
|
||||||
ite->event_id = event_id;
|
ite->event_id = event_id;
|
||||||
ite->collection = collection;
|
ite->collection = collection;
|
||||||
ite->lpi = lpi_id;
|
|
||||||
|
|
||||||
list_add_tail(&ite->ite_list, &device->itt_head);
|
list_add_tail(&ite->ite_list, &device->itt_head);
|
||||||
return ite;
|
return ite;
|
||||||
|
@ -873,7 +871,7 @@ static int vgic_its_cmd_handle_mapi(struct kvm *kvm, struct vgic_its *its,
|
||||||
new_coll = collection;
|
new_coll = collection;
|
||||||
}
|
}
|
||||||
|
|
||||||
ite = vgic_its_alloc_ite(device, collection, lpi_nr, event_id);
|
ite = vgic_its_alloc_ite(device, collection, event_id);
|
||||||
if (IS_ERR(ite)) {
|
if (IS_ERR(ite)) {
|
||||||
if (new_coll)
|
if (new_coll)
|
||||||
vgic_its_free_collection(its, coll_id);
|
vgic_its_free_collection(its, coll_id);
|
||||||
|
@ -1848,7 +1846,7 @@ static int vgic_its_save_ite(struct vgic_its *its, struct its_device *dev,
|
||||||
|
|
||||||
next_offset = compute_next_eventid_offset(&dev->itt_head, ite);
|
next_offset = compute_next_eventid_offset(&dev->itt_head, ite);
|
||||||
val = ((u64)next_offset << KVM_ITS_ITE_NEXT_SHIFT) |
|
val = ((u64)next_offset << KVM_ITS_ITE_NEXT_SHIFT) |
|
||||||
((u64)ite->lpi << KVM_ITS_ITE_PINTID_SHIFT) |
|
((u64)ite->irq->intid << KVM_ITS_ITE_PINTID_SHIFT) |
|
||||||
ite->collection->collection_id;
|
ite->collection->collection_id;
|
||||||
val = cpu_to_le64(val);
|
val = cpu_to_le64(val);
|
||||||
return kvm_write_guest(kvm, gpa, &val, ite_esz);
|
return kvm_write_guest(kvm, gpa, &val, ite_esz);
|
||||||
|
@ -1895,7 +1893,7 @@ static int vgic_its_restore_ite(struct vgic_its *its, u32 event_id,
|
||||||
if (!collection)
|
if (!collection)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
ite = vgic_its_alloc_ite(dev, collection, lpi_id, event_id);
|
ite = vgic_its_alloc_ite(dev, collection, event_id);
|
||||||
if (IS_ERR(ite))
|
if (IS_ERR(ite))
|
||||||
return PTR_ERR(ite);
|
return PTR_ERR(ite);
|
||||||
|
|
||||||
|
|
|
@ -303,6 +303,51 @@ static void vgic_mmio_write_vcpuif(struct kvm_vcpu *vcpu,
|
||||||
vgic_set_vmcr(vcpu, &vmcr);
|
vgic_set_vmcr(vcpu, &vmcr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static unsigned long vgic_mmio_read_apr(struct kvm_vcpu *vcpu,
|
||||||
|
gpa_t addr, unsigned int len)
|
||||||
|
{
|
||||||
|
int n; /* which APRn is this */
|
||||||
|
|
||||||
|
n = (addr >> 2) & 0x3;
|
||||||
|
|
||||||
|
if (kvm_vgic_global_state.type == VGIC_V2) {
|
||||||
|
/* GICv2 hardware systems support max. 32 groups */
|
||||||
|
if (n != 0)
|
||||||
|
return 0;
|
||||||
|
return vcpu->arch.vgic_cpu.vgic_v2.vgic_apr;
|
||||||
|
} else {
|
||||||
|
struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
|
||||||
|
|
||||||
|
if (n > vgic_v3_max_apr_idx(vcpu))
|
||||||
|
return 0;
|
||||||
|
/* GICv3 only uses ICH_AP1Rn for memory mapped (GICv2) guests */
|
||||||
|
return vgicv3->vgic_ap1r[n];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void vgic_mmio_write_apr(struct kvm_vcpu *vcpu,
|
||||||
|
gpa_t addr, unsigned int len,
|
||||||
|
unsigned long val)
|
||||||
|
{
|
||||||
|
int n; /* which APRn is this */
|
||||||
|
|
||||||
|
n = (addr >> 2) & 0x3;
|
||||||
|
|
||||||
|
if (kvm_vgic_global_state.type == VGIC_V2) {
|
||||||
|
/* GICv2 hardware systems support max. 32 groups */
|
||||||
|
if (n != 0)
|
||||||
|
return;
|
||||||
|
vcpu->arch.vgic_cpu.vgic_v2.vgic_apr = val;
|
||||||
|
} else {
|
||||||
|
struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
|
||||||
|
|
||||||
|
if (n > vgic_v3_max_apr_idx(vcpu))
|
||||||
|
return;
|
||||||
|
/* GICv3 only uses ICH_AP1Rn for memory mapped (GICv2) guests */
|
||||||
|
vgicv3->vgic_ap1r[n] = val;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static const struct vgic_register_region vgic_v2_dist_registers[] = {
|
static const struct vgic_register_region vgic_v2_dist_registers[] = {
|
||||||
REGISTER_DESC_WITH_LENGTH(GIC_DIST_CTRL,
|
REGISTER_DESC_WITH_LENGTH(GIC_DIST_CTRL,
|
||||||
vgic_mmio_read_v2_misc, vgic_mmio_write_v2_misc, 12,
|
vgic_mmio_read_v2_misc, vgic_mmio_write_v2_misc, 12,
|
||||||
|
@ -364,7 +409,7 @@ static const struct vgic_register_region vgic_v2_cpu_registers[] = {
|
||||||
vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
|
vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
|
||||||
VGIC_ACCESS_32bit),
|
VGIC_ACCESS_32bit),
|
||||||
REGISTER_DESC_WITH_LENGTH(GIC_CPU_ACTIVEPRIO,
|
REGISTER_DESC_WITH_LENGTH(GIC_CPU_ACTIVEPRIO,
|
||||||
vgic_mmio_read_raz, vgic_mmio_write_wi, 16,
|
vgic_mmio_read_apr, vgic_mmio_write_apr, 16,
|
||||||
VGIC_ACCESS_32bit),
|
VGIC_ACCESS_32bit),
|
||||||
REGISTER_DESC_WITH_LENGTH(GIC_CPU_IDENT,
|
REGISTER_DESC_WITH_LENGTH(GIC_CPU_IDENT,
|
||||||
vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
|
vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
|
||||||
|
|
|
@ -220,4 +220,20 @@ int vgic_debug_destroy(struct kvm *kvm);
|
||||||
bool lock_all_vcpus(struct kvm *kvm);
|
bool lock_all_vcpus(struct kvm *kvm);
|
||||||
void unlock_all_vcpus(struct kvm *kvm);
|
void unlock_all_vcpus(struct kvm *kvm);
|
||||||
|
|
||||||
|
static inline int vgic_v3_max_apr_idx(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
struct vgic_cpu *cpu_if = &vcpu->arch.vgic_cpu;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* num_pri_bits are initialized with HW supported values.
|
||||||
|
* We can rely safely on num_pri_bits even if VM has not
|
||||||
|
* restored ICC_CTLR_EL1 before restoring APnR registers.
|
||||||
|
*/
|
||||||
|
switch (cpu_if->num_pri_bits) {
|
||||||
|
case 7: return 3;
|
||||||
|
case 6: return 1;
|
||||||
|
default: return 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
Загрузка…
Ссылка в новой задаче