iommu/amd: Improve amd_iommu_v2_exit()

During module exit, the current logic loops through all possible
16-bit device ID space to search for existing devices and clean up
device state structures. This can be simplified by looping through
the device state list.

Also, refactor various clean up logic into free_device_state()
for better reusability.

Signed-off-by: Vasant Hegde <vasant.hegde@amd.com>
Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Link: https://lore.kernel.org/r/20220301085626.87680-6-vasant.hegde@amd.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
Suravee Suthikulpanit 2022-03-01 14:26:26 +05:30 коммит произвёл Joerg Roedel
Родитель c1d5b57a1e
Коммит 9f968fc70d
1 изменённых файлов: 17 добавлений и 17 удалений

Просмотреть файл

@ -24,7 +24,6 @@
MODULE_LICENSE("GPL v2"); MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Joerg Roedel <jroedel@suse.de>"); MODULE_AUTHOR("Joerg Roedel <jroedel@suse.de>");
#define MAX_DEVICES 0x10000
#define PRI_QUEUE_SIZE 512 #define PRI_QUEUE_SIZE 512
struct pri_queue { struct pri_queue {
@ -124,6 +123,15 @@ static void free_device_state(struct device_state *dev_state)
{ {
struct iommu_group *group; struct iommu_group *group;
/* Get rid of any remaining pasid states */
free_pasid_states(dev_state);
/*
* Wait until the last reference is dropped before freeing
* the device state.
*/
wait_event(dev_state->wq, !atomic_read(&dev_state->count));
/* /*
* First detach device from domain - No more PRI requests will arrive * First detach device from domain - No more PRI requests will arrive
* from that device after it is unbound from the IOMMUv2 domain. * from that device after it is unbound from the IOMMUv2 domain.
@ -849,15 +857,7 @@ void amd_iommu_free_device(struct pci_dev *pdev)
spin_unlock_irqrestore(&state_lock, flags); spin_unlock_irqrestore(&state_lock, flags);
/* Get rid of any remaining pasid states */
free_pasid_states(dev_state);
put_device_state(dev_state); put_device_state(dev_state);
/*
* Wait until the last reference is dropped before freeing
* the device state.
*/
wait_event(dev_state->wq, !atomic_read(&dev_state->count));
free_device_state(dev_state); free_device_state(dev_state);
} }
EXPORT_SYMBOL(amd_iommu_free_device); EXPORT_SYMBOL(amd_iommu_free_device);
@ -954,8 +954,8 @@ out:
static void __exit amd_iommu_v2_exit(void) static void __exit amd_iommu_v2_exit(void)
{ {
struct device_state *dev_state; struct device_state *dev_state, *next;
int i; unsigned long flags;
if (!amd_iommu_v2_supported()) if (!amd_iommu_v2_supported())
return; return;
@ -968,18 +968,18 @@ static void __exit amd_iommu_v2_exit(void)
* The loop below might call flush_workqueue(), so call * The loop below might call flush_workqueue(), so call
* destroy_workqueue() after it * destroy_workqueue() after it
*/ */
for (i = 0; i < MAX_DEVICES; ++i) { spin_lock_irqsave(&state_lock, flags);
dev_state = get_device_state(i);
if (dev_state == NULL)
continue;
list_for_each_entry_safe(dev_state, next, &state_list, list) {
WARN_ON_ONCE(1); WARN_ON_ONCE(1);
put_device_state(dev_state); put_device_state(dev_state);
amd_iommu_free_device(dev_state->pdev); list_del(&dev_state->list);
free_device_state(dev_state);
} }
spin_unlock_irqrestore(&state_lock, flags);
destroy_workqueue(iommu_wq); destroy_workqueue(iommu_wq);
} }