NTB/msi: Convert to msi_on_each_desc()

Replace the about to vanish iterators, make use of the filtering and take
the descriptor lock around the iteration.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Logan Gunthorpe <logang@deltatee.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/20211206210748.683004012@linutronix.de
This commit is contained in:
Thomas Gleixner 2021-12-06 23:51:34 +01:00
Родитель dc2b453290
Коммит 68e3183580
1 изменённых файлов: 13 добавлений и 6 удалений

Просмотреть файл

@ -108,8 +108,10 @@ int ntb_msi_setup_mws(struct ntb_dev *ntb)
if (!ntb->msi)
return -EINVAL;
desc = first_msi_entry(&ntb->pdev->dev);
msi_lock_descs(&ntb->pdev->dev);
desc = msi_first_desc(&ntb->pdev->dev, MSI_DESC_ASSOCIATED);
addr = desc->msg.address_lo + ((uint64_t)desc->msg.address_hi << 32);
msi_unlock_descs(&ntb->pdev->dev);
for (peer = 0; peer < ntb_peer_port_count(ntb); peer++) {
peer_widx = ntb_peer_highest_mw_idx(ntb, peer);
@ -281,13 +283,15 @@ int ntbm_msi_request_threaded_irq(struct ntb_dev *ntb, irq_handler_t handler,
const char *name, void *dev_id,
struct ntb_msi_desc *msi_desc)
{
struct device *dev = &ntb->pdev->dev;
struct msi_desc *entry;
int ret;
if (!ntb->msi)
return -EINVAL;
for_each_pci_msi_entry(entry, ntb->pdev) {
msi_lock_descs(dev);
msi_for_each_desc(entry, dev, MSI_DESC_ASSOCIATED) {
if (irq_has_action(entry->irq))
continue;
@ -304,14 +308,17 @@ int ntbm_msi_request_threaded_irq(struct ntb_dev *ntb, irq_handler_t handler,
ret = ntbm_msi_setup_callback(ntb, entry, msi_desc);
if (ret) {
devm_free_irq(&ntb->dev, entry->irq, dev_id);
return ret;
goto unlock;
}
return entry->irq;
ret = entry->irq;
goto unlock;
}
ret = -ENODEV;
return -ENODEV;
unlock:
msi_unlock_descs(dev);
return ret;
}
EXPORT_SYMBOL(ntbm_msi_request_threaded_irq);