- Base MSI remapping on either IOMMU domain or IRQ domain support
    (Robin Murphy)
 
  - Prioritize hardware MSI regions over software defined regions
    (Robin Murphy)
 
  - Fix no-iommu reference counting (Eric Auger)
 
  - Stall removing last device from group for container cleanup
    (Alex Williamson)
 
  - Constify amba_id (Arvind Yadav)
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v2.0.14 (GNU/Linux)
 
 iQIcBAABAgAGBQJZstOJAAoJECObm247sIsih4AQAJKsgQlwtC0mnI5l/RFN1cP7
 cFa3xusuCYAMhJ1IOo23rWsvnM1Me8lFPCoYWQ2iiPYya7ceyT7I1A8cX92m+XOG
 phyCmG4gurWAHWk/9WTtWUi0s+lss6pDNsgAGoWYoACwD4Wm81h3S73rWphfH34y
 khYYmC6JojJ+V3CpgXrdnofPKZd0LHoSZZSo5rqK2hmuLc/75Y5tTMYHR+PnjJkS
 cz4hXNou2ngpnjESSb825+hroLTQpDZPyDn6iL3F0+qMci52Zl3jRFF0SvdJ8cBe
 1kx8o9Rnx8mNlS+JqdVQHE/mRMZZLf20HmdPbGwfi5lzfdg7Z6dLNUsgemHy7RQY
 4lHXLIO46i5LnCneRTOgKJri/L1ocB9z0u3uyVaQOxoi+PacrlkKMlQZyl3XU22f
 YWdTEaWdGjIIYbosdhkvnIx7hOlbomNgdsjmc3qGQbixJSnZ02PH8VeywSAbBvfl
 AtAHu5ktMulgfTicsy16PycBfrNsirLoobGVFCosKDpCAuixOm2YmyUHAaHubXBV
 XgWbmsoEySL4wy3j0g+JRUFejN20ZulY24kLSOehKKfY5zX2k/TsJ2DsOfgSqBvR
 dPGcwvll1/LayCRICxPhIrEunuk8hnClCvlc1kQ9LC1MFC2yHXXG6oAfYbu7gno8
 1XFOeVZ1ITVP0677H1LA
 =T3/q
 -----END PGP SIGNATURE-----

Merge tag 'vfio-v4.14-rc1' of git://github.com/awilliam/linux-vfio

Pull VFIO updates from Alex Williamson:

 - Base MSI remapping on either IOMMU domain or IRQ domain support
   (Robin Murphy)

 - Prioritize hardware MSI regions over software defined regions (Robin
   Murphy)

 - Fix no-iommu reference counting (Eric Auger)

 - Stall removing last device from group for container cleanup (Alex
   Williamson)

 - Constify amba_id (Arvind Yadav)

* tag 'vfio-v4.14-rc1' of git://github.com/awilliam/linux-vfio:
  vfio: platform: constify amba_id
  vfio: Stall vfio_del_group_dev() for container group detach
  vfio: fix noiommu vfio_iommu_group_get reference count
  vfio/type1: Give hardware MSI regions precedence
  vfio/type1: Cope with hardware MSI reserved regions
This commit is contained in:
Linus Torvalds 2017-09-09 14:28:45 -07:00
Родитель d2d8f51e28 417fb50d55
Коммит 8c1d70b2de
3 изменённых файлов: 36 добавлений и 7 удалений

Просмотреть файл

@ -93,7 +93,7 @@ static int vfio_amba_remove(struct amba_device *adev)
return -EINVAL;
}
static struct amba_id pl330_ids[] = {
static const struct amba_id pl330_ids[] = {
{ 0, 0 },
};

Просмотреть файл

@ -85,6 +85,7 @@ struct vfio_group {
struct list_head unbound_list;
struct mutex unbound_lock;
atomic_t opened;
wait_queue_head_t container_q;
bool noiommu;
struct kvm *kvm;
struct blocking_notifier_head notifier;
@ -138,9 +139,10 @@ struct iommu_group *vfio_iommu_group_get(struct device *dev)
iommu_group_set_name(group, "vfio-noiommu");
iommu_group_set_iommudata(group, &noiommu, NULL);
ret = iommu_group_add_device(group, dev);
iommu_group_put(group);
if (ret)
if (ret) {
iommu_group_put(group);
return NULL;
}
/*
* Where to taint? At this point we've added an IOMMU group for a
@ -337,6 +339,7 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group)
mutex_init(&group->unbound_lock);
atomic_set(&group->container_users, 0);
atomic_set(&group->opened, 0);
init_waitqueue_head(&group->container_q);
group->iommu_group = iommu_group;
#ifdef CONFIG_VFIO_NOIOMMU
group->noiommu = (iommu_group_get_iommudata(iommu_group) == &noiommu);
@ -993,6 +996,23 @@ void *vfio_del_group_dev(struct device *dev)
}
} while (ret <= 0);
/*
* In order to support multiple devices per group, devices can be
* plucked from the group while other devices in the group are still
* in use. The container persists with this group and those remaining
* devices still attached. If the user creates an isolation violation
* by binding this device to another driver while the group is still in
* use, that's their fault. However, in the case of removing the last,
* or potentially the only, device in the group there can be no other
* in-use devices in the group. The user has done their due diligence
* and we should lay no claims to those devices. In order to do that,
* we need to make sure the group is detached from the container.
* Without this stall, we're potentially racing with a user process
* that may attempt to immediately bind this device to another driver.
*/
if (list_empty(&group->device_list))
wait_event(group->container_q, !group->container);
vfio_group_put(group);
return device_data;
@ -1298,6 +1318,7 @@ static void __vfio_group_unset_container(struct vfio_group *group)
group->iommu_group);
group->container = NULL;
wake_up(&group->container_q);
list_del(&group->container_next);
/* Detaching the last group deprivileges a container, remove iommu */

Просмотреть файл

@ -1169,13 +1169,21 @@ static bool vfio_iommu_has_sw_msi(struct iommu_group *group, phys_addr_t *base)
INIT_LIST_HEAD(&group_resv_regions);
iommu_get_group_resv_regions(group, &group_resv_regions);
list_for_each_entry(region, &group_resv_regions, list) {
/*
* The presence of any 'real' MSI regions should take
* precedence over the software-managed one if the
* IOMMU driver happens to advertise both types.
*/
if (region->type == IOMMU_RESV_MSI) {
ret = false;
break;
}
if (region->type == IOMMU_RESV_SW_MSI) {
*base = region->start;
ret = true;
goto out;
}
}
out:
list_for_each_entry_safe(region, next, &group_resv_regions, list)
kfree(region);
return ret;
@ -1265,8 +1273,8 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
INIT_LIST_HEAD(&domain->group_list);
list_add(&group->next, &domain->group_list);
msi_remap = resv_msi ? irq_domain_check_msi_remap() :
iommu_capable(bus, IOMMU_CAP_INTR_REMAP);
msi_remap = irq_domain_check_msi_remap() ||
iommu_capable(bus, IOMMU_CAP_INTR_REMAP);
if (!allow_unsafe_interrupts && !msi_remap) {
pr_warn("%s: No interrupt remapping support. Use the module param \"allow_unsafe_interrupts\" to enable VFIO IOMMU support on this platform\n",