Merge branch irq/gic-v3-nmi-fixes-5.19 into irq/irqchip-next

* irq/gic-v3-nmi-fixes-5.19:
  : .
  : GICv3 pseudo-NMI fixes from Mark Rutland:
  :
  : "These patches fix a couple of issues with the way GICv3 pseudo-NMIs are
  : handled:
  :
  : * The first patch adds a barrier we missed from NMI handling due to an
  :   oversight.
  :
  : * The second patch refactors some logic around reads from ICC_IAR1_EL1
  :   and adds commentary to explain what's going on.
  :
  : * The third patch descends into madness, reworking gic_handle_irq() to
  :   consistently manage ICC_PMR_EL1 + DAIF and avoid cases where these can
  :   be left in an inconsistent state while softirqs are processed."
  : .
  irqchip/gic-v3: Fix priority mask handling
  irqchip/gic-v3: Refactor ISB + EOIR at ack time
  irqchip/gic-v3: Ensure pseudo-NMIs have an ISB between ack and handling

Signed-off-by: Marc Zyngier <maz@kernel.org>
This commit is contained in:
Marc Zyngier 2022-05-17 10:37:06 +01:00
Родитель 61299e1838 614ab80c96
Коммит 492449ae4f
3 изменённых файлов: 134 добавлений и 88 удалений

Просмотреть файл

@ -48,6 +48,7 @@ static inline u32 read_ ## a64(void) \
return read_sysreg(a32); \ return read_sysreg(a32); \
} \ } \
CPUIF_MAP(ICC_EOIR1, ICC_EOIR1_EL1)
CPUIF_MAP(ICC_PMR, ICC_PMR_EL1) CPUIF_MAP(ICC_PMR, ICC_PMR_EL1)
CPUIF_MAP(ICC_AP0R0, ICC_AP0R0_EL1) CPUIF_MAP(ICC_AP0R0, ICC_AP0R0_EL1)
CPUIF_MAP(ICC_AP0R1, ICC_AP0R1_EL1) CPUIF_MAP(ICC_AP0R1, ICC_AP0R1_EL1)
@ -63,12 +64,6 @@ CPUIF_MAP(ICC_AP1R3, ICC_AP1R3_EL1)
/* Low-level accessors */ /* Low-level accessors */
static inline void gic_write_eoir(u32 irq)
{
write_sysreg(irq, ICC_EOIR1);
isb();
}
static inline void gic_write_dir(u32 val) static inline void gic_write_dir(u32 val)
{ {
write_sysreg(val, ICC_DIR); write_sysreg(val, ICC_DIR);

Просмотреть файл

@ -26,12 +26,6 @@
* sets the GP register's most significant bits to 0 with an explicit cast. * sets the GP register's most significant bits to 0 with an explicit cast.
*/ */
static inline void gic_write_eoir(u32 irq)
{
write_sysreg_s(irq, SYS_ICC_EOIR1_EL1);
isb();
}
static __always_inline void gic_write_dir(u32 irq) static __always_inline void gic_write_dir(u32 irq)
{ {
write_sysreg_s(irq, SYS_ICC_DIR_EL1); write_sysreg_s(irq, SYS_ICC_DIR_EL1);

Просмотреть файл

@ -559,7 +559,8 @@ static void gic_irq_nmi_teardown(struct irq_data *d)
static void gic_eoi_irq(struct irq_data *d) static void gic_eoi_irq(struct irq_data *d)
{ {
gic_write_eoir(gic_irq(d)); write_gicreg(gic_irq(d), ICC_EOIR1_EL1);
isb();
} }
static void gic_eoimode1_eoi_irq(struct irq_data *d) static void gic_eoimode1_eoi_irq(struct irq_data *d)
@ -639,40 +640,125 @@ static void gic_deactivate_unhandled(u32 irqnr)
if (irqnr < 8192) if (irqnr < 8192)
gic_write_dir(irqnr); gic_write_dir(irqnr);
} else { } else {
gic_write_eoir(irqnr); write_gicreg(irqnr, ICC_EOIR1_EL1);
isb();
} }
} }
static inline void gic_handle_nmi(u32 irqnr, struct pt_regs *regs) /*
{ * Follow a read of the IAR with any HW maintenance that needs to happen prior
bool irqs_enabled = interrupts_enabled(regs); * to invoking the relevant IRQ handler. We must do two things:
int err; *
* (1) Ensure instruction ordering between a read of IAR and subsequent
if (irqs_enabled) * instructions in the IRQ handler using an ISB.
nmi_enter(); *
* It is possible for the IAR to report an IRQ which was signalled *after*
if (static_branch_likely(&supports_deactivate_key)) * the CPU took an IRQ exception as multiple interrupts can race to be
gic_write_eoir(irqnr); * recognized by the GIC, earlier interrupts could be withdrawn, and/or
/* * later interrupts could be prioritized by the GIC.
* Leave the PSR.I bit set to prevent other NMIs to be *
* received while handling this one. * For devices which are tightly coupled to the CPU, such as PMUs, a
* PSR.I will be restored when we ERET to the * context synchronization event is necessary to ensure that system
* interrupted context. * register state is not stale, as these may have been indirectly written
* *after* exception entry.
*
* (2) Deactivate the interrupt when EOI mode 1 is in use.
*/ */
err = generic_handle_domain_nmi(gic_data.domain, irqnr); static inline void gic_complete_ack(u32 irqnr)
if (err) {
gic_deactivate_unhandled(irqnr); if (static_branch_likely(&supports_deactivate_key))
write_gicreg(irqnr, ICC_EOIR1_EL1);
if (irqs_enabled) isb();
nmi_exit();
} }
static u32 do_read_iar(struct pt_regs *regs) static bool gic_rpr_is_nmi_prio(void)
{ {
u32 iar; if (!gic_supports_nmi())
return false;
if (gic_supports_nmi() && unlikely(!interrupts_enabled(regs))) { return unlikely(gic_read_rpr() == GICD_INT_RPR_PRI(GICD_INT_NMI_PRI));
}
static bool gic_irqnr_is_special(u32 irqnr)
{
return irqnr >= 1020 && irqnr <= 1023;
}
static void __gic_handle_irq(u32 irqnr, struct pt_regs *regs)
{
if (gic_irqnr_is_special(irqnr))
return;
gic_complete_ack(irqnr);
if (generic_handle_domain_irq(gic_data.domain, irqnr)) {
WARN_ONCE(true, "Unexpected interrupt (irqnr %u)\n", irqnr);
gic_deactivate_unhandled(irqnr);
}
}
static void __gic_handle_nmi(u32 irqnr, struct pt_regs *regs)
{
if (gic_irqnr_is_special(irqnr))
return;
gic_complete_ack(irqnr);
if (generic_handle_domain_nmi(gic_data.domain, irqnr)) {
WARN_ONCE(true, "Unexpected pseudo-NMI (irqnr %u)\n", irqnr);
gic_deactivate_unhandled(irqnr);
}
}
/*
* An exception has been taken from a context with IRQs enabled, and this could
* be an IRQ or an NMI.
*
* The entry code called us with DAIF.IF set to keep NMIs masked. We must clear
* DAIF.IF (and update ICC_PMR_EL1 to mask regular IRQs) prior to returning,
* after handling any NMI but before handling any IRQ.
*
* The entry code has performed IRQ entry, and if an NMI is detected we must
* perform NMI entry/exit around invoking the handler.
*/
static void __gic_handle_irq_from_irqson(struct pt_regs *regs)
{
bool is_nmi;
u32 irqnr;
irqnr = gic_read_iar();
is_nmi = gic_rpr_is_nmi_prio();
if (is_nmi) {
nmi_enter();
__gic_handle_nmi(irqnr, regs);
nmi_exit();
}
if (gic_prio_masking_enabled()) {
gic_pmr_mask_irqs();
gic_arch_enable_irqs();
}
if (!is_nmi)
__gic_handle_irq(irqnr, regs);
}
/*
* An exception has been taken from a context with IRQs disabled, which can only
* be an NMI.
*
* The entry code called us with DAIF.IF set to keep NMIs masked. We must leave
* DAIF.IF (and ICC_PMR_EL1) unchanged.
*
* The entry code has performed NMI entry.
*/
static void __gic_handle_irq_from_irqsoff(struct pt_regs *regs)
{
u64 pmr; u64 pmr;
u32 irqnr;
/* /*
* We were in a context with IRQs disabled. However, the * We were in a context with IRQs disabled. However, the
@ -690,47 +776,18 @@ static u32 do_read_iar(struct pt_regs *regs)
pmr = gic_read_pmr(); pmr = gic_read_pmr();
gic_pmr_mask_irqs(); gic_pmr_mask_irqs();
isb(); isb();
irqnr = gic_read_iar();
iar = gic_read_iar();
gic_write_pmr(pmr); gic_write_pmr(pmr);
} else {
iar = gic_read_iar();
}
return iar; __gic_handle_nmi(irqnr, regs);
} }
static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs) static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
{ {
u32 irqnr; if (unlikely(gic_supports_nmi() && !interrupts_enabled(regs)))
__gic_handle_irq_from_irqsoff(regs);
irqnr = do_read_iar(regs);
/* Check for special IDs first */
if ((irqnr >= 1020 && irqnr <= 1023))
return;
if (gic_supports_nmi() &&
unlikely(gic_read_rpr() == GICD_INT_RPR_PRI(GICD_INT_NMI_PRI))) {
gic_handle_nmi(irqnr, regs);
return;
}
if (gic_prio_masking_enabled()) {
gic_pmr_mask_irqs();
gic_arch_enable_irqs();
}
if (static_branch_likely(&supports_deactivate_key))
gic_write_eoir(irqnr);
else else
isb(); __gic_handle_irq_from_irqson(regs);
if (generic_handle_domain_irq(gic_data.domain, irqnr)) {
WARN_ONCE(true, "Unexpected interrupt received!\n");
gic_deactivate_unhandled(irqnr);
}
} }
static u32 gic_get_pribits(void) static u32 gic_get_pribits(void)