2019-06-03 08:44:50 +03:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
2013-01-22 04:36:12 +04:00
|
|
|
/*
|
2016-05-28 13:27:11 +03:00
|
|
|
* Copyright (C) 2015, 2016 ARM Ltd.
|
2013-01-22 04:36:12 +04:00
|
|
|
*/
|
2016-05-28 13:27:11 +03:00
|
|
|
#ifndef __KVM_ARM_VGIC_H
|
|
|
|
#define __KVM_ARM_VGIC_H
|
2015-11-23 18:20:05 +03:00
|
|
|
|
2022-01-04 18:19:40 +03:00
|
|
|
#include <linux/bits.h>
|
2013-01-22 04:36:14 +04:00
|
|
|
#include <linux/kvm.h>
|
|
|
|
#include <linux/irqreturn.h>
|
2022-01-04 18:19:40 +03:00
|
|
|
#include <linux/kref.h>
|
|
|
|
#include <linux/mutex.h>
|
2013-01-22 04:36:14 +04:00
|
|
|
#include <linux/spinlock.h>
|
arm64: KVM: vgic-v2: Add the GICV emulation infrastructure
In order to efficiently perform the GICV access on behalf of the
guest, we need to be able to avoid going back all the way to
the host kernel.
For this, we introduce a new hook in the world switch code,
conveniently placed just after populating the fault info.
At that point, we only have saved/restored the GP registers,
and we can quickly perform all the required checks (data abort,
translation fault, valid faulting syndrome, not an external
abort, not a PTW).
Coming back from the emulation code, we need to skip the emulated
instruction. This involves an additional bit of save/restore in
order to be able to access the guest's PC (and possibly CPSR if
this is a 32bit guest).
At this stage, no emulation code is provided.
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
2016-09-06 11:28:45 +03:00
|
|
|
#include <linux/static_key.h>
|
2013-01-22 04:36:14 +04:00
|
|
|
#include <linux/types.h>
|
2015-03-26 17:39:34 +03:00
|
|
|
#include <kvm/iodev.h>
|
2016-07-15 14:43:32 +03:00
|
|
|
#include <linux/list.h>
|
2016-09-12 17:49:15 +03:00
|
|
|
#include <linux/jump_label.h>
|
2013-01-22 04:36:12 +04:00
|
|
|
|
2017-10-27 17:28:38 +03:00
|
|
|
#include <linux/irqchip/arm-gic-v4.h>
|
|
|
|
|
2018-05-22 10:55:18 +03:00
|
|
|
#define VGIC_V3_MAX_CPUS 512
|
2016-05-28 13:27:11 +03:00
|
|
|
#define VGIC_V2_MAX_CPUS 8
|
|
|
|
#define VGIC_NR_IRQS_LEGACY 256
|
2013-01-22 04:36:14 +04:00
|
|
|
#define VGIC_NR_SGIS 16
|
|
|
|
#define VGIC_NR_PPIS 16
|
|
|
|
#define VGIC_NR_PRIVATE_IRQS (VGIC_NR_SGIS + VGIC_NR_PPIS)
|
2016-05-28 13:27:11 +03:00
|
|
|
#define VGIC_MAX_PRIVATE (VGIC_NR_PRIVATE_IRQS - 1)
|
|
|
|
#define VGIC_MAX_SPI 1019
|
|
|
|
#define VGIC_MAX_RESERVED 1023
|
|
|
|
#define VGIC_MIN_LPI 8192
|
2016-07-22 19:20:41 +03:00
|
|
|
#define KVM_IRQCHIP_NUM_PINS (1020 - 32)
|
2014-02-04 22:13:03 +04:00
|
|
|
|
2017-05-02 21:11:49 +03:00
|
|
|
#define irq_is_ppi(irq) ((irq) >= VGIC_NR_SGIS && (irq) < VGIC_NR_PRIVATE_IRQS)
|
2017-05-16 20:53:50 +03:00
|
|
|
#define irq_is_spi(irq) ((irq) >= VGIC_NR_PRIVATE_IRQS && \
|
|
|
|
(irq) <= VGIC_MAX_SPI)
|
2017-05-02 21:11:49 +03:00
|
|
|
|
2016-05-28 13:27:11 +03:00
|
|
|
enum vgic_type {
|
|
|
|
VGIC_V2, /* Good ol' GICv2 */
|
|
|
|
VGIC_V3, /* New fancy GICv3 */
|
|
|
|
};
|
2013-01-22 04:36:14 +04:00
|
|
|
|
2016-05-28 13:27:11 +03:00
|
|
|
/* same for all guests, as depending only on the _host's_ GIC model */
|
|
|
|
struct vgic_global {
|
|
|
|
/* type of the host GIC */
|
|
|
|
enum vgic_type type;
|
2013-01-22 04:36:14 +04:00
|
|
|
|
2016-05-28 13:27:11 +03:00
|
|
|
/* Physical address of vgic virtual cpu interface */
|
|
|
|
phys_addr_t vcpu_base;
|
2013-01-22 04:36:14 +04:00
|
|
|
|
2017-12-04 19:43:23 +03:00
|
|
|
/* GICV mapping, kernel VA */
|
2016-09-06 11:28:46 +03:00
|
|
|
void __iomem *vcpu_base_va;
|
2017-12-04 19:43:23 +03:00
|
|
|
/* GICV mapping, HYP VA */
|
|
|
|
void __iomem *vcpu_hyp_va;
|
2016-09-06 11:28:46 +03:00
|
|
|
|
2017-12-04 19:43:23 +03:00
|
|
|
/* virtual control interface mapping, kernel VA */
|
2016-05-28 13:27:11 +03:00
|
|
|
void __iomem *vctrl_base;
|
2017-12-04 19:43:23 +03:00
|
|
|
/* virtual control interface mapping, HYP VA */
|
|
|
|
void __iomem *vctrl_hyp;
|
2013-01-22 04:36:14 +04:00
|
|
|
|
2016-05-28 13:27:11 +03:00
|
|
|
/* Number of implemented list registers */
|
|
|
|
int nr_lr;
|
2013-06-03 18:55:02 +04:00
|
|
|
|
2016-05-28 13:27:11 +03:00
|
|
|
/* Maintenance IRQ number */
|
|
|
|
unsigned int maint_irq;
|
2013-06-21 14:57:56 +04:00
|
|
|
|
2016-05-28 13:27:11 +03:00
|
|
|
/* maximum number of VCPUs allowed (GICv2 limits us to 8) */
|
|
|
|
int max_gic_vcpus;
|
2013-06-03 18:55:02 +04:00
|
|
|
|
2016-05-28 13:27:11 +03:00
|
|
|
/* Only needed for the legacy KVM_CREATE_IRQCHIP */
|
|
|
|
bool can_emulate_gicv2;
|
2016-09-12 17:49:15 +03:00
|
|
|
|
2017-10-27 17:28:37 +03:00
|
|
|
/* Hardware has GICv4? */
|
|
|
|
bool has_gicv4;
|
2020-03-04 23:33:20 +03:00
|
|
|
bool has_gicv4_1;
|
2017-10-27 17:28:37 +03:00
|
|
|
|
2021-03-16 00:56:47 +03:00
|
|
|
/* Pseudo GICv3 from outer space */
|
|
|
|
bool no_hw_deactivation;
|
|
|
|
|
2016-09-12 17:49:15 +03:00
|
|
|
/* GIC system register CPU interface */
|
|
|
|
struct static_key_false gicv3_cpuif;
|
2017-01-26 17:20:51 +03:00
|
|
|
|
|
|
|
u32 ich_vtr_el2;
|
2013-06-03 18:55:02 +04:00
|
|
|
};
|
|
|
|
|
2016-05-28 13:27:11 +03:00
|
|
|
extern struct vgic_global kvm_vgic_global_state;
|
2014-02-04 21:48:10 +04:00
|
|
|
|
2016-05-28 13:27:11 +03:00
|
|
|
#define VGIC_V2_MAX_LRS (1 << 6)
|
|
|
|
#define VGIC_V3_MAX_LRS 16
|
|
|
|
#define VGIC_V3_LR_INDEX(lr) (VGIC_V3_MAX_LRS - 1 - lr)
|
2013-06-03 18:55:02 +04:00
|
|
|
|
2016-05-28 13:27:11 +03:00
|
|
|
enum vgic_irq_config {
|
|
|
|
VGIC_CONFIG_EDGE = 0,
|
|
|
|
VGIC_CONFIG_LEVEL
|
2013-06-18 22:17:28 +04:00
|
|
|
};
|
|
|
|
|
2021-03-01 20:39:39 +03:00
|
|
|
/*
|
|
|
|
* Per-irq ops overriding some common behavious.
|
|
|
|
*
|
|
|
|
* Always called in non-preemptible section and the functions can use
|
|
|
|
* kvm_arm_get_running_vcpu() to get the vcpu pointer for private IRQs.
|
|
|
|
*/
|
|
|
|
struct irq_ops {
|
2021-03-15 16:11:58 +03:00
|
|
|
/* Per interrupt flags for special-cased interrupts */
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
#define VGIC_IRQ_SW_RESAMPLE BIT(0) /* Clear the active state for resampling */
|
|
|
|
|
2021-03-01 20:39:39 +03:00
|
|
|
/*
|
|
|
|
* Callback function pointer to in-kernel devices that can tell us the
|
|
|
|
* state of the input level of mapped level-triggered IRQ faster than
|
|
|
|
* peaking into the physical GIC.
|
|
|
|
*/
|
|
|
|
bool (*get_input_level)(int vintid);
|
|
|
|
};
|
|
|
|
|
2016-05-28 13:27:11 +03:00
|
|
|
struct vgic_irq {
|
2019-01-07 18:06:15 +03:00
|
|
|
raw_spinlock_t irq_lock; /* Protects the content of the struct */
|
2016-07-15 14:43:33 +03:00
|
|
|
struct list_head lpi_list; /* Used to link all LPIs together */
|
2016-05-28 13:27:11 +03:00
|
|
|
struct list_head ap_list;
|
|
|
|
|
|
|
|
struct kvm_vcpu *vcpu; /* SGIs and PPIs: The VCPU
|
|
|
|
* SPIs and LPIs: The VCPU whose ap_list
|
|
|
|
* this is queued on.
|
|
|
|
*/
|
|
|
|
|
|
|
|
struct kvm_vcpu *target_vcpu; /* The VCPU that this interrupt should
|
|
|
|
* be sent to, as a result of the
|
|
|
|
* targets reg (v2) or the
|
|
|
|
* affinity reg (v3).
|
|
|
|
*/
|
|
|
|
|
|
|
|
u32 intid; /* Guest visible INTID */
|
|
|
|
bool line_level; /* Level only */
|
2017-01-23 16:07:18 +03:00
|
|
|
bool pending_latch; /* The pending latch state used to calculate
|
|
|
|
* the pending state for both level
|
|
|
|
* and edge triggered IRQs. */
|
2016-05-28 13:27:11 +03:00
|
|
|
bool active; /* not used for LPIs */
|
|
|
|
bool enabled;
|
|
|
|
bool hw; /* Tied to HW IRQ */
|
2016-07-15 14:43:27 +03:00
|
|
|
struct kref refcount; /* Used for LPIs */
|
2016-05-28 13:27:11 +03:00
|
|
|
u32 hwintid; /* HW INTID number */
|
2017-10-27 17:28:32 +03:00
|
|
|
unsigned int host_irq; /* linux irq corresponding to hwintid */
|
2016-05-28 13:27:11 +03:00
|
|
|
union {
|
|
|
|
u8 targets; /* GICv2 target VCPUs mask */
|
|
|
|
u32 mpidr; /* GICv3 target VCPU */
|
|
|
|
};
|
|
|
|
u8 source; /* GICv2 SGIs only */
|
KVM: arm/arm64: vgic: Fix source vcpu issues for GICv2 SGI
Now that we make sure we don't inject multiple instances of the
same GICv2 SGI at the same time, we've made another bug more
obvious:
If we exit with an active SGI, we completely lose track of which
vcpu it came from. On the next entry, we restore it with 0 as a
source, and if that wasn't the right one, too bad. While this
doesn't seem to trouble GIC-400, the architectural model gets
offended and doesn't deactivate the interrupt on EOI.
Another connected issue is that we will happilly make pending
an interrupt from another vcpu, overriding the above zero with
something that is just as inconsistent. Don't do that.
The final issue is that we signal a maintenance interrupt when
no pending interrupts are present in the LR. Assuming we've fixed
the two issues above, we end-up in a situation where we keep
exiting as soon as we've reached the active state, and not be
able to inject the following pending.
The fix comes in 3 parts:
- GICv2 SGIs have their source vcpu saved if they are active on
exit, and restored on entry
- Multi-SGIs cannot go via the Pending+Active state, as this would
corrupt the source field
- Multi-SGIs are converted to using MI on EOI instead of NPIE
Fixes: 16ca6a607d84bef0 ("KVM: arm/arm64: vgic: Don't populate multiple LRs with the same vintid")
Reported-by: Mark Rutland <mark.rutland@arm.com>
Tested-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Christoffer Dall <christoffer.dall@arm.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
2018-04-18 12:39:04 +03:00
|
|
|
u8 active_source; /* GICv2 SGIs only */
|
2016-05-28 13:27:11 +03:00
|
|
|
u8 priority;
|
2018-07-16 16:06:21 +03:00
|
|
|
u8 group; /* 0 == group 0, 1 == group 1 */
|
2016-05-28 13:27:11 +03:00
|
|
|
enum vgic_irq_config config; /* Level or edge */
|
2017-05-04 14:24:20 +03:00
|
|
|
|
2021-03-01 20:39:39 +03:00
|
|
|
struct irq_ops *ops;
|
2017-10-27 20:30:09 +03:00
|
|
|
|
2017-05-04 14:24:20 +03:00
|
|
|
void *owner; /* Opaque pointer to reserve an interrupt
|
|
|
|
for in-kernel devices. */
|
2014-06-02 18:19:12 +04:00
|
|
|
};
|
|
|
|
|
2021-03-15 16:11:58 +03:00
|
|
|
static inline bool vgic_irq_needs_resampling(struct vgic_irq *irq)
|
|
|
|
{
|
|
|
|
return irq->ops && (irq->ops->flags & VGIC_IRQ_SW_RESAMPLE);
|
|
|
|
}
|
|
|
|
|
2016-05-28 13:27:11 +03:00
|
|
|
struct vgic_register_region;
|
2016-07-15 14:43:30 +03:00
|
|
|
struct vgic_its;
|
|
|
|
|
|
|
|
enum iodev_type {
|
|
|
|
IODEV_CPUIF,
|
|
|
|
IODEV_DIST,
|
|
|
|
IODEV_REDIST,
|
|
|
|
IODEV_ITS
|
|
|
|
};
|
2016-05-28 13:27:11 +03:00
|
|
|
|
2015-03-26 17:39:34 +03:00
|
|
|
struct vgic_io_device {
|
2016-05-28 13:27:11 +03:00
|
|
|
gpa_t base_addr;
|
2016-07-15 14:43:30 +03:00
|
|
|
union {
|
|
|
|
struct kvm_vcpu *redist_vcpu;
|
|
|
|
struct vgic_its *its;
|
|
|
|
};
|
2016-05-28 13:27:11 +03:00
|
|
|
const struct vgic_register_region *regions;
|
2016-07-15 14:43:30 +03:00
|
|
|
enum iodev_type iodev_type;
|
2016-05-28 13:27:11 +03:00
|
|
|
int nr_regions;
|
2015-03-26 17:39:34 +03:00
|
|
|
struct kvm_io_device dev;
|
|
|
|
};
|
|
|
|
|
2016-07-15 14:43:30 +03:00
|
|
|
struct vgic_its {
|
|
|
|
/* The base address of the ITS control register frame */
|
|
|
|
gpa_t vgic_its_base;
|
|
|
|
|
|
|
|
bool enabled;
|
|
|
|
struct vgic_io_device iodev;
|
2016-07-17 23:35:07 +03:00
|
|
|
struct kvm_device *dev;
|
2016-07-15 14:43:32 +03:00
|
|
|
|
|
|
|
/* These registers correspond to GITS_BASER{0,1} */
|
|
|
|
u64 baser_device_table;
|
|
|
|
u64 baser_coll_table;
|
|
|
|
|
|
|
|
/* Protects the command queue */
|
|
|
|
struct mutex cmd_lock;
|
|
|
|
u64 cbaser;
|
|
|
|
u32 creadr;
|
|
|
|
u32 cwriter;
|
|
|
|
|
2017-04-13 10:06:20 +03:00
|
|
|
/* migration ABI revision in use */
|
|
|
|
u32 abi_rev;
|
|
|
|
|
2016-07-15 14:43:32 +03:00
|
|
|
/* Protects the device and collection lists */
|
|
|
|
struct mutex its_lock;
|
|
|
|
struct list_head device_list;
|
|
|
|
struct list_head collection_list;
|
2016-07-15 14:43:30 +03:00
|
|
|
};
|
|
|
|
|
2017-01-18 01:09:13 +03:00
|
|
|
struct vgic_state_iter;
|
|
|
|
|
2018-05-22 10:55:08 +03:00
|
|
|
struct vgic_redist_region {
|
|
|
|
u32 index;
|
|
|
|
gpa_t base;
|
|
|
|
u32 count; /* number of redistributors or 0 if single region */
|
|
|
|
u32 free_index; /* index of the next free redistributor */
|
|
|
|
struct list_head list;
|
|
|
|
};
|
|
|
|
|
2013-01-22 04:36:12 +04:00
|
|
|
struct vgic_dist {
|
2014-05-15 13:03:25 +04:00
|
|
|
bool in_kernel;
|
2013-01-22 04:36:16 +04:00
|
|
|
bool ready;
|
2016-05-28 13:27:11 +03:00
|
|
|
bool initialized;
|
2013-01-22 04:36:14 +04:00
|
|
|
|
2014-06-03 11:33:10 +04:00
|
|
|
/* vGIC model the kernel emulates for the guest (GICv2 or GICv3) */
|
|
|
|
u32 vgic_model;
|
|
|
|
|
2018-07-16 16:06:19 +03:00
|
|
|
/* Implementation revision as reported in the GICD_IIDR */
|
|
|
|
u32 implementation_rev;
|
2022-04-05 21:23:27 +03:00
|
|
|
#define KVM_VGIC_IMP_REV_2 2 /* GICv2 restorable groups */
|
|
|
|
#define KVM_VGIC_IMP_REV_3 3 /* GICv3 GICR_CTLR.{IW,CES,RWP} */
|
|
|
|
#define KVM_VGIC_IMP_REV_LATEST KVM_VGIC_IMP_REV_3
|
2018-07-16 16:06:19 +03:00
|
|
|
|
2018-07-16 16:06:26 +03:00
|
|
|
/* Userspace can write to GICv2 IGROUPR */
|
|
|
|
bool v2_groups_user_writable;
|
|
|
|
|
2016-07-15 14:43:38 +03:00
|
|
|
/* Do injected MSIs require an additional device ID? */
|
|
|
|
bool msis_require_devid;
|
|
|
|
|
2016-05-28 13:27:11 +03:00
|
|
|
int nr_spis;
|
2014-07-08 15:09:01 +04:00
|
|
|
|
2016-05-28 13:27:11 +03:00
|
|
|
/* base addresses in guest physical address space: */
|
|
|
|
gpa_t vgic_dist_base; /* distributor */
|
2014-06-07 02:54:51 +04:00
|
|
|
union {
|
2016-05-28 13:27:11 +03:00
|
|
|
/* either a GICv2 CPU interface */
|
|
|
|
gpa_t vgic_cpu_base;
|
|
|
|
/* or a number of GICv3 redistributor regions */
|
2018-05-22 10:55:08 +03:00
|
|
|
struct list_head rd_regions;
|
2014-06-07 02:54:51 +04:00
|
|
|
};
|
2013-01-22 04:36:14 +04:00
|
|
|
|
2016-05-28 13:27:11 +03:00
|
|
|
/* distributor enabled */
|
|
|
|
bool enabled;
|
2015-03-13 20:02:54 +03:00
|
|
|
|
2020-03-04 23:33:26 +03:00
|
|
|
/* Wants SGIs without active state */
|
|
|
|
bool nassgireq;
|
|
|
|
|
2016-05-28 13:27:11 +03:00
|
|
|
struct vgic_irq *spis;
|
2013-01-22 04:36:14 +04:00
|
|
|
|
2015-03-26 17:39:35 +03:00
|
|
|
struct vgic_io_device dist_iodev;
|
2016-07-15 14:43:29 +03:00
|
|
|
|
2016-07-15 14:43:31 +03:00
|
|
|
bool has_its;
|
|
|
|
|
2016-07-15 14:43:29 +03:00
|
|
|
/*
|
|
|
|
* Contains the attributes and gpa of the LPI configuration table.
|
|
|
|
* Since we report GICR_TYPER.CommonLPIAff as 0b00, we can share
|
|
|
|
* one address across all redistributors.
|
2019-10-29 10:19:18 +03:00
|
|
|
* GICv3 spec: IHI 0069E 6.1.1 "LPI Configuration tables"
|
2016-07-15 14:43:29 +03:00
|
|
|
*/
|
|
|
|
u64 propbaser;
|
2016-07-15 14:43:33 +03:00
|
|
|
|
|
|
|
/* Protects the lpi_list and the count value below. */
|
2019-01-07 18:06:16 +03:00
|
|
|
raw_spinlock_t lpi_list_lock;
|
2016-07-15 14:43:33 +03:00
|
|
|
struct list_head lpi_list_head;
|
|
|
|
int lpi_list_count;
|
2017-01-18 01:09:13 +03:00
|
|
|
|
2019-03-18 13:13:01 +03:00
|
|
|
/* LPI translation cache */
|
|
|
|
struct list_head lpi_translation_cache;
|
|
|
|
|
2017-01-18 01:09:13 +03:00
|
|
|
/* used by vgic-debug */
|
|
|
|
struct vgic_state_iter *iter;
|
2017-10-27 17:28:38 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* GICv4 ITS per-VM data, containing the IRQ domain, the VPE
|
|
|
|
* array, the property table pointer as well as allocation
|
|
|
|
* data. This essentially ties the Linux IRQ core and ITS
|
|
|
|
* together, and avoids leaking KVM's data structures anywhere
|
|
|
|
* else.
|
|
|
|
*/
|
|
|
|
struct its_vm its_vm;
|
2013-01-22 04:36:12 +04:00
|
|
|
};
|
|
|
|
|
2013-05-30 13:20:36 +04:00
|
|
|
struct vgic_v2_cpu_if {
|
|
|
|
u32 vgic_hcr;
|
|
|
|
u32 vgic_vmcr;
|
|
|
|
u32 vgic_apr;
|
2014-02-04 22:13:03 +04:00
|
|
|
u32 vgic_lr[VGIC_V2_MAX_LRS];
|
2018-12-01 19:41:28 +03:00
|
|
|
|
|
|
|
unsigned int used_lrs;
|
2013-05-30 13:20:36 +04:00
|
|
|
};
|
|
|
|
|
2013-07-12 18:15:23 +04:00
|
|
|
struct vgic_v3_cpu_if {
|
|
|
|
u32 vgic_hcr;
|
|
|
|
u32 vgic_vmcr;
|
2014-06-03 10:58:15 +04:00
|
|
|
u32 vgic_sre; /* Restored only, change ignored */
|
2013-07-12 18:15:23 +04:00
|
|
|
u32 vgic_ap0r[4];
|
|
|
|
u32 vgic_ap1r[4];
|
|
|
|
u64 vgic_lr[VGIC_V3_MAX_LRS];
|
2017-10-27 17:28:38 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* GICv4 ITS per-VPE data, containing the doorbell IRQ, the
|
|
|
|
* pending table pointer, the its_vm pointer and a few other
|
|
|
|
* HW specific things. As for the its_vm structure, this is
|
|
|
|
* linking the Linux IRQ subsystem and the ITS together.
|
|
|
|
*/
|
|
|
|
struct its_vpe its_vpe;
|
2018-12-01 19:41:28 +03:00
|
|
|
|
|
|
|
unsigned int used_lrs;
|
2013-07-12 18:15:23 +04:00
|
|
|
};
|
|
|
|
|
2013-01-22 04:36:12 +04:00
|
|
|
struct vgic_cpu {
|
2013-01-22 04:36:14 +04:00
|
|
|
/* CPU vif control registers for world switch */
|
2013-05-30 13:20:36 +04:00
|
|
|
union {
|
|
|
|
struct vgic_v2_cpu_if vgic_v2;
|
2013-07-12 18:15:23 +04:00
|
|
|
struct vgic_v3_cpu_if vgic_v3;
|
2013-05-30 13:20:36 +04:00
|
|
|
};
|
2014-06-23 20:37:18 +04:00
|
|
|
|
2016-05-28 13:27:11 +03:00
|
|
|
struct vgic_irq private_irqs[VGIC_NR_PRIVATE_IRQS];
|
2013-01-22 04:36:12 +04:00
|
|
|
|
2019-01-07 18:06:17 +03:00
|
|
|
raw_spinlock_t ap_list_lock; /* Protects the ap_list */
|
2013-01-22 04:36:14 +04:00
|
|
|
|
2016-05-28 13:27:11 +03:00
|
|
|
/*
|
|
|
|
* List of IRQs that this VCPU should consider because they are either
|
|
|
|
* Active or Pending (hence the name; AP list), or because they recently
|
|
|
|
* were one of the two and need to be migrated off this list to another
|
|
|
|
* VCPU.
|
|
|
|
*/
|
|
|
|
struct list_head ap_list_head;
|
2013-06-04 14:02:10 +04:00
|
|
|
|
2016-07-15 14:43:22 +03:00
|
|
|
/*
|
|
|
|
* Members below are used with GICv3 emulation only and represent
|
|
|
|
* parts of the redistributor.
|
|
|
|
*/
|
|
|
|
struct vgic_io_device rd_iodev;
|
2018-05-22 10:55:08 +03:00
|
|
|
struct vgic_redist_region *rdreg;
|
2021-04-05 19:39:40 +03:00
|
|
|
u32 rdreg_index;
|
2022-04-05 21:23:26 +03:00
|
|
|
atomic_t syncr_busy;
|
2016-07-15 14:43:29 +03:00
|
|
|
|
|
|
|
/* Contains the attributes and gpa of the LPI pending tables. */
|
|
|
|
u64 pendbaser;
|
2022-04-05 21:23:25 +03:00
|
|
|
/* GICR_CTLR.{ENABLE_LPIS,RWP} */
|
|
|
|
atomic_t ctlr;
|
2017-01-26 17:20:51 +03:00
|
|
|
|
|
|
|
/* Cache guest priority bits */
|
|
|
|
u32 num_pri_bits;
|
|
|
|
|
|
|
|
/* Cache guest interrupt ID bits */
|
|
|
|
u32 num_id_bits;
|
2016-05-28 13:27:11 +03:00
|
|
|
};
|
2013-01-22 04:36:12 +04:00
|
|
|
|
arm64: KVM: vgic-v2: Add the GICV emulation infrastructure
In order to efficiently perform the GICV access on behalf of the
guest, we need to be able to avoid going back all the way to
the host kernel.
For this, we introduce a new hook in the world switch code,
conveniently placed just after populating the fault info.
At that point, we only have saved/restored the GP registers,
and we can quickly perform all the required checks (data abort,
translation fault, valid faulting syndrome, not an external
abort, not a PTW).
Coming back from the emulation code, we need to skip the emulated
instruction. This involves an additional bit of save/restore in
order to be able to access the guest's PC (and possibly CPSR if
this is a 32bit guest).
At this stage, no emulation code is provided.
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
2016-09-06 11:28:45 +03:00
|
|
|
extern struct static_key_false vgic_v2_cpuif_trap;
|
2017-06-09 14:49:33 +03:00
|
|
|
extern struct static_key_false vgic_v3_cpuif_trap;
|
arm64: KVM: vgic-v2: Add the GICV emulation infrastructure
In order to efficiently perform the GICV access on behalf of the
guest, we need to be able to avoid going back all the way to
the host kernel.
For this, we introduce a new hook in the world switch code,
conveniently placed just after populating the fault info.
At that point, we only have saved/restored the GP registers,
and we can quickly perform all the required checks (data abort,
translation fault, valid faulting syndrome, not an external
abort, not a PTW).
Coming back from the emulation code, we need to skip the emulated
instruction. This involves an additional bit of save/restore in
order to be able to access the guest's PC (and possibly CPSR if
this is a 32bit guest).
At this stage, no emulation code is provided.
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
2016-09-06 11:28:45 +03:00
|
|
|
|
2022-07-05 16:34:33 +03:00
|
|
|
int kvm_set_legacy_vgic_v2_addr(struct kvm *kvm, struct kvm_arm_device_addr *dev_addr);
|
2014-06-23 20:37:18 +04:00
|
|
|
void kvm_vgic_early_init(struct kvm *kvm);
|
2017-05-08 13:30:24 +03:00
|
|
|
int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu);
|
2014-06-03 11:33:10 +04:00
|
|
|
int kvm_vgic_create(struct kvm *kvm, u32 type);
|
2014-07-08 15:09:01 +04:00
|
|
|
void kvm_vgic_destroy(struct kvm *kvm);
|
|
|
|
void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu);
|
2016-05-28 13:27:11 +03:00
|
|
|
int kvm_vgic_map_resources(struct kvm *kvm);
|
|
|
|
int kvm_vgic_hyp_init(void);
|
2017-03-18 15:56:56 +03:00
|
|
|
void kvm_vgic_init_cpu_hardware(void);
|
2016-05-28 13:27:11 +03:00
|
|
|
|
|
|
|
int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
|
2017-05-16 13:41:18 +03:00
|
|
|
bool level, void *owner);
|
2017-10-27 17:28:32 +03:00
|
|
|
int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
|
2021-03-01 20:39:39 +03:00
|
|
|
u32 vintid, struct irq_ops *ops);
|
2017-10-27 17:28:32 +03:00
|
|
|
int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid);
|
|
|
|
bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid);
|
2013-01-22 04:36:12 +04:00
|
|
|
|
2016-05-28 13:27:11 +03:00
|
|
|
int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
|
|
|
|
|
2016-03-24 13:21:04 +03:00
|
|
|
void kvm_vgic_load(struct kvm_vcpu *vcpu);
|
|
|
|
void kvm_vgic_put(struct kvm_vcpu *vcpu);
|
KVM: arm/arm64: Sync ICH_VMCR_EL2 back when about to block
Since commit commit 328e56647944 ("KVM: arm/arm64: vgic: Defer
touching GICH_VMCR to vcpu_load/put"), we leave ICH_VMCR_EL2 (or
its GICv2 equivalent) loaded as long as we can, only syncing it
back when we're scheduled out.
There is a small snag with that though: kvm_vgic_vcpu_pending_irq(),
which is indirectly called from kvm_vcpu_check_block(), needs to
evaluate the guest's view of ICC_PMR_EL1. At the point were we
call kvm_vcpu_check_block(), the vcpu is still loaded, and whatever
changes to PMR is not visible in memory until we do a vcpu_put().
Things go really south if the guest does the following:
mov x0, #0 // or any small value masking interrupts
msr ICC_PMR_EL1, x0
[vcpu preempted, then rescheduled, VMCR sampled]
mov x0, #ff // allow all interrupts
msr ICC_PMR_EL1, x0
wfi // traps to EL2, so samping of VMCR
[interrupt arrives just after WFI]
Here, the hypervisor's view of PMR is zero, while the guest has enabled
its interrupts. kvm_vgic_vcpu_pending_irq() will then say that no
interrupts are pending (despite an interrupt being received) and we'll
block for no reason. If the guest doesn't have a periodic interrupt
firing once it has blocked, it will stay there forever.
To avoid this unfortuante situation, let's resync VMCR from
kvm_arch_vcpu_blocking(), ensuring that a following kvm_vcpu_check_block()
will observe the latest value of PMR.
This has been found by booting an arm64 Linux guest with the pseudo NMI
feature, and thus using interrupt priorities to mask interrupts instead
of the usual PSTATE masking.
Cc: stable@vger.kernel.org # 4.12
Fixes: 328e56647944 ("KVM: arm/arm64: vgic: Defer touching GICH_VMCR to vcpu_load/put")
Signed-off-by: Marc Zyngier <maz@kernel.org>
2019-08-02 12:28:32 +03:00
|
|
|
void kvm_vgic_vmcr_sync(struct kvm_vcpu *vcpu);
|
2016-03-24 13:21:04 +03:00
|
|
|
|
2014-05-15 13:03:25 +04:00
|
|
|
#define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel))
|
2016-05-28 13:27:11 +03:00
|
|
|
#define vgic_initialized(k) ((k)->arch.vgic.initialized)
|
2014-12-09 16:28:09 +03:00
|
|
|
#define vgic_ready(k) ((k)->arch.vgic.ready)
|
2016-03-07 13:32:29 +03:00
|
|
|
#define vgic_valid_spi(k, i) (((i) >= VGIC_NR_PRIVATE_IRQS) && \
|
2016-05-28 13:27:11 +03:00
|
|
|
((i) < (k)->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS))
|
|
|
|
|
|
|
|
bool kvm_vcpu_has_pending_irqs(struct kvm_vcpu *vcpu);
|
|
|
|
void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu);
|
|
|
|
void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu);
|
2018-03-05 13:36:38 +03:00
|
|
|
void kvm_vgic_reset_mapped_irq(struct kvm_vcpu *vcpu, u32 vintid);
|
2013-01-22 04:36:14 +04:00
|
|
|
|
2018-08-06 14:51:19 +03:00
|
|
|
void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1);
|
2014-02-04 22:13:03 +04:00
|
|
|
|
2016-05-28 13:27:11 +03:00
|
|
|
/**
|
|
|
|
* kvm_vgic_get_max_vcpus - Get the maximum number of VCPUs allowed by HW
|
|
|
|
*
|
|
|
|
* The host's GIC naturally limits the maximum amount of VCPUs a guest
|
|
|
|
* can use.
|
|
|
|
*/
|
|
|
|
static inline int kvm_vgic_get_max_vcpus(void)
|
|
|
|
{
|
|
|
|
return kvm_vgic_global_state.max_gic_vcpus;
|
|
|
|
}
|
|
|
|
|
2016-07-22 19:20:41 +03:00
|
|
|
/**
|
|
|
|
* kvm_vgic_setup_default_irq_routing:
|
|
|
|
* Setup a default flat gsi routing table mapping all SPIs
|
|
|
|
*/
|
|
|
|
int kvm_vgic_setup_default_irq_routing(struct kvm *kvm);
|
|
|
|
|
2017-05-04 14:24:20 +03:00
|
|
|
int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner);
|
|
|
|
|
2017-10-27 17:28:39 +03:00
|
|
|
struct kvm_kernel_irq_routing_entry;
|
|
|
|
|
|
|
|
int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int irq,
|
|
|
|
struct kvm_kernel_irq_routing_entry *irq_entry);
|
|
|
|
|
|
|
|
int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int irq,
|
|
|
|
struct kvm_kernel_irq_routing_entry *irq_entry);
|
|
|
|
|
KVM: arm64: vgic-v4: Move the GICv4 residency flow to be driven by vcpu_load/put
When the VHE code was reworked, a lot of the vgic stuff was moved around,
but the GICv4 residency code did stay untouched, meaning that we come
in and out of residency on each flush/sync, which is obviously suboptimal.
To address this, let's move things around a bit:
- Residency entry (flush) moves to vcpu_load
- Residency exit (sync) moves to vcpu_put
- On blocking (entry to WFI), we "put"
- On unblocking (exit from WFI), we "load"
Because these can nest (load/block/put/load/unblock/put, for example),
we now have per-VPE tracking of the residency state.
Additionally, vgic_v4_put gains a "need doorbell" parameter, which only
gets set to true when blocking because of a WFI. This allows a finer
control of the doorbell, which now also gets disabled as soon as
it gets signaled.
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20191027144234.8395-2-maz@kernel.org
2019-10-27 17:41:59 +03:00
|
|
|
int vgic_v4_load(struct kvm_vcpu *vcpu);
|
2020-11-28 17:18:57 +03:00
|
|
|
void vgic_v4_commit(struct kvm_vcpu *vcpu);
|
KVM: arm64: vgic-v4: Move the GICv4 residency flow to be driven by vcpu_load/put
When the VHE code was reworked, a lot of the vgic stuff was moved around,
but the GICv4 residency code did stay untouched, meaning that we come
in and out of residency on each flush/sync, which is obviously suboptimal.
To address this, let's move things around a bit:
- Residency entry (flush) moves to vcpu_load
- Residency exit (sync) moves to vcpu_put
- On blocking (entry to WFI), we "put"
- On unblocking (exit from WFI), we "load"
Because these can nest (load/block/put/load/unblock/put, for example),
we now have per-VPE tracking of the residency state.
Additionally, vgic_v4_put gains a "need doorbell" parameter, which only
gets set to true when blocking because of a WFI. This allows a finer
control of the doorbell, which now also gets disabled as soon as
it gets signaled.
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20191027144234.8395-2-maz@kernel.org
2019-10-27 17:41:59 +03:00
|
|
|
int vgic_v4_put(struct kvm_vcpu *vcpu, bool need_db);
|
2017-10-27 17:28:49 +03:00
|
|
|
|
2016-05-28 13:27:11 +03:00
|
|
|
#endif /* __KVM_ARM_VGIC_H */
|