PCI/MSI: Move non-mask check back into low level accessors
commit9c8e9c9681
upstream. The recent rework of PCI/MSI[X] masking moved the non-mask checks from the low level accessors into the higher level mask/unmask functions. This missed the fact that these accessors can be invoked from other places as well. The missing checks break XEN-PV which sets pci_msi_ignore_mask and also violates the virtual MSIX and the msi_attrib.maskbit protections. Instead of sprinkling checks all over the place, lift them back into the low level accessor functions. To avoid checking three different conditions combine them into one property of msi_desc::msi_attrib. [ josef: Fixed the missed conversion in the core code ] Fixes:fcacdfbef5
("PCI/MSI: Provide a new set of mask and unmask functions") Reported-by: Josef Johansson <josef@oderland.se> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Tested-by: Josef Johansson <josef@oderland.se> Cc: Bjorn Helgaas <helgaas@kernel.org> Cc: stable@vger.kernel.org Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Родитель
affa136164
Коммит
ab40a2e5e2
|
@ -148,6 +148,9 @@ static noinline void pci_msi_update_mask(struct msi_desc *desc, u32 clear, u32 s
|
|||
raw_spinlock_t *lock = &desc->dev->msi_lock;
|
||||
unsigned long flags;
|
||||
|
||||
if (!desc->msi_attrib.can_mask)
|
||||
return;
|
||||
|
||||
raw_spin_lock_irqsave(lock, flags);
|
||||
desc->msi_mask &= ~clear;
|
||||
desc->msi_mask |= set;
|
||||
|
@ -181,7 +184,8 @@ static void pci_msix_write_vector_ctrl(struct msi_desc *desc, u32 ctrl)
|
|||
{
|
||||
void __iomem *desc_addr = pci_msix_desc_addr(desc);
|
||||
|
||||
writel(ctrl, desc_addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
|
||||
if (desc->msi_attrib.can_mask)
|
||||
writel(ctrl, desc_addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
|
||||
}
|
||||
|
||||
static inline void pci_msix_mask(struct msi_desc *desc)
|
||||
|
@ -200,23 +204,17 @@ static inline void pci_msix_unmask(struct msi_desc *desc)
|
|||
|
||||
static void __pci_msi_mask_desc(struct msi_desc *desc, u32 mask)
|
||||
{
|
||||
if (pci_msi_ignore_mask || desc->msi_attrib.is_virtual)
|
||||
return;
|
||||
|
||||
if (desc->msi_attrib.is_msix)
|
||||
pci_msix_mask(desc);
|
||||
else if (desc->msi_attrib.maskbit)
|
||||
else
|
||||
pci_msi_mask(desc, mask);
|
||||
}
|
||||
|
||||
static void __pci_msi_unmask_desc(struct msi_desc *desc, u32 mask)
|
||||
{
|
||||
if (pci_msi_ignore_mask || desc->msi_attrib.is_virtual)
|
||||
return;
|
||||
|
||||
if (desc->msi_attrib.is_msix)
|
||||
pci_msix_unmask(desc);
|
||||
else if (desc->msi_attrib.maskbit)
|
||||
else
|
||||
pci_msi_unmask(desc, mask);
|
||||
}
|
||||
|
||||
|
@ -484,7 +482,8 @@ msi_setup_entry(struct pci_dev *dev, int nvec, struct irq_affinity *affd)
|
|||
entry->msi_attrib.is_64 = !!(control & PCI_MSI_FLAGS_64BIT);
|
||||
entry->msi_attrib.is_virtual = 0;
|
||||
entry->msi_attrib.entry_nr = 0;
|
||||
entry->msi_attrib.maskbit = !!(control & PCI_MSI_FLAGS_MASKBIT);
|
||||
entry->msi_attrib.can_mask = !pci_msi_ignore_mask &&
|
||||
!!(control & PCI_MSI_FLAGS_MASKBIT);
|
||||
entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */
|
||||
entry->msi_attrib.multi_cap = (control & PCI_MSI_FLAGS_QMASK) >> 1;
|
||||
entry->msi_attrib.multiple = ilog2(__roundup_pow_of_two(nvec));
|
||||
|
@ -495,7 +494,7 @@ msi_setup_entry(struct pci_dev *dev, int nvec, struct irq_affinity *affd)
|
|||
entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_32;
|
||||
|
||||
/* Save the initial mask status */
|
||||
if (entry->msi_attrib.maskbit)
|
||||
if (entry->msi_attrib.can_mask)
|
||||
pci_read_config_dword(dev, entry->mask_pos, &entry->msi_mask);
|
||||
|
||||
out:
|
||||
|
@ -638,10 +637,13 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
|
|||
entry->msi_attrib.is_virtual =
|
||||
entry->msi_attrib.entry_nr >= vec_count;
|
||||
|
||||
entry->msi_attrib.can_mask = !pci_msi_ignore_mask &&
|
||||
!entry->msi_attrib.is_virtual;
|
||||
|
||||
entry->msi_attrib.default_irq = dev->irq;
|
||||
entry->mask_base = base;
|
||||
|
||||
if (!entry->msi_attrib.is_virtual) {
|
||||
if (entry->msi_attrib.can_mask) {
|
||||
addr = pci_msix_desc_addr(entry);
|
||||
entry->msix_ctrl = readl(addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
|
||||
}
|
||||
|
|
|
@ -148,7 +148,7 @@ struct msi_desc {
|
|||
u8 is_msix : 1;
|
||||
u8 multiple : 3;
|
||||
u8 multi_cap : 3;
|
||||
u8 maskbit : 1;
|
||||
u8 can_mask : 1;
|
||||
u8 is_64 : 1;
|
||||
u8 is_virtual : 1;
|
||||
u16 entry_nr;
|
||||
|
|
|
@ -529,10 +529,10 @@ static bool msi_check_reservation_mode(struct irq_domain *domain,
|
|||
|
||||
/*
|
||||
* Checking the first MSI descriptor is sufficient. MSIX supports
|
||||
* masking and MSI does so when the maskbit is set.
|
||||
* masking and MSI does so when the can_mask attribute is set.
|
||||
*/
|
||||
desc = first_msi_entry(dev);
|
||||
return desc->msi_attrib.is_msix || desc->msi_attrib.maskbit;
|
||||
return desc->msi_attrib.is_msix || desc->msi_attrib.can_mask;
|
||||
}
|
||||
|
||||
int __msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
|
||||
|
|
Загрузка…
Ссылка в новой задаче