intel-iommu: Unify hardware and software passthrough support
This makes the hardware passthrough mode work a lot more like the software version, so that the behaviour of a kernel with 'iommu=pt' is the same whether the hardware supports passthrough or not. In particular: - We use a single si_domain for the pass-through devices. - 32-bit devices can be taken out of the pass-through domain so that they don't have to use swiotlb. - Devices will work again after being removed from a KVM guest. - A potential oops on OOM (in init_context_pass_through()) is fixed. Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
This commit is contained in:
Родитель
0815565adf
Коммит
19943b0e30
|
@ -71,9 +71,8 @@ void __init pci_swiotlb_init(void)
|
|||
{
|
||||
/* don't initialize swiotlb if iommu=off (no_iommu=1) */
|
||||
#ifdef CONFIG_X86_64
|
||||
if ((!iommu_detected && !no_iommu && max_pfn > MAX_DMA32_PFN) ||
|
||||
iommu_pass_through)
|
||||
swiotlb = 1;
|
||||
if ((!iommu_detected && !no_iommu && max_pfn > MAX_DMA32_PFN))
|
||||
swiotlb = 1;
|
||||
#endif
|
||||
if (swiotlb_force)
|
||||
swiotlb = 1;
|
||||
|
|
|
@ -251,7 +251,8 @@ static inline int first_pte_in_page(struct dma_pte *pte)
|
|||
* 2. It maps to each iommu if successful.
|
||||
* 3. Each iommu mapps to this domain if successful.
|
||||
*/
|
||||
struct dmar_domain *si_domain;
|
||||
static struct dmar_domain *si_domain;
|
||||
static int hw_pass_through = 1;
|
||||
|
||||
/* devices under the same p2p bridge are owned in one domain */
|
||||
#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
|
||||
|
@ -1948,14 +1949,24 @@ static int iommu_prepare_identity_map(struct pci_dev *pdev,
|
|||
struct dmar_domain *domain;
|
||||
int ret;
|
||||
|
||||
printk(KERN_INFO
|
||||
"IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
|
||||
pci_name(pdev), start, end);
|
||||
|
||||
domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
|
||||
if (!domain)
|
||||
return -ENOMEM;
|
||||
|
||||
/* For _hardware_ passthrough, don't bother. But for software
|
||||
passthrough, we do it anyway -- it may indicate a memory
|
||||
range which is reserved in E820, so which didn't get set
|
||||
up to start with in si_domain */
|
||||
if (domain == si_domain && hw_pass_through) {
|
||||
printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
|
||||
pci_name(pdev), start, end);
|
||||
return 0;
|
||||
}
|
||||
|
||||
printk(KERN_INFO
|
||||
"IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
|
||||
pci_name(pdev), start, end);
|
||||
|
||||
ret = iommu_domain_identity_map(domain, start, end);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
@ -2006,23 +2017,6 @@ static inline void iommu_prepare_isa(void)
|
|||
}
|
||||
#endif /* !CONFIG_DMAR_FLPY_WA */
|
||||
|
||||
/* Initialize each context entry as pass through.*/
|
||||
static int __init init_context_pass_through(void)
|
||||
{
|
||||
struct pci_dev *pdev = NULL;
|
||||
struct dmar_domain *domain;
|
||||
int ret;
|
||||
|
||||
for_each_pci_dev(pdev) {
|
||||
domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
|
||||
ret = domain_context_mapping(domain, pdev,
|
||||
CONTEXT_TT_PASS_THROUGH);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int md_domain_init(struct dmar_domain *domain, int guest_width);
|
||||
|
||||
static int __init si_domain_work_fn(unsigned long start_pfn,
|
||||
|
@ -2037,7 +2031,7 @@ static int __init si_domain_work_fn(unsigned long start_pfn,
|
|||
|
||||
}
|
||||
|
||||
static int si_domain_init(void)
|
||||
static int si_domain_init(int hw)
|
||||
{
|
||||
struct dmar_drhd_unit *drhd;
|
||||
struct intel_iommu *iommu;
|
||||
|
@ -2064,6 +2058,9 @@ static int si_domain_init(void)
|
|||
|
||||
si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
|
||||
|
||||
if (hw)
|
||||
return 0;
|
||||
|
||||
for_each_online_node(nid) {
|
||||
work_with_active_regions(nid, si_domain_work_fn, &ret);
|
||||
if (ret)
|
||||
|
@ -2155,24 +2152,26 @@ static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
|
|||
return 1;
|
||||
}
|
||||
|
||||
static int iommu_prepare_static_identity_mapping(void)
|
||||
static int iommu_prepare_static_identity_mapping(int hw)
|
||||
{
|
||||
struct pci_dev *pdev = NULL;
|
||||
int ret;
|
||||
|
||||
ret = si_domain_init();
|
||||
ret = si_domain_init(hw);
|
||||
if (ret)
|
||||
return -EFAULT;
|
||||
|
||||
for_each_pci_dev(pdev) {
|
||||
if (iommu_should_identity_map(pdev, 1)) {
|
||||
printk(KERN_INFO "IOMMU: identity mapping for device %s\n",
|
||||
pci_name(pdev));
|
||||
printk(KERN_INFO "IOMMU: %s identity mapping for device %s\n",
|
||||
hw ? "hardware" : "software", pci_name(pdev));
|
||||
|
||||
ret = domain_context_mapping(si_domain, pdev,
|
||||
hw ? CONTEXT_TT_PASS_THROUGH :
|
||||
CONTEXT_TT_MULTI_LEVEL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = domain_add_dev_info(si_domain, pdev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -2189,14 +2188,6 @@ int __init init_dmars(void)
|
|||
struct pci_dev *pdev;
|
||||
struct intel_iommu *iommu;
|
||||
int i, ret;
|
||||
int pass_through = 1;
|
||||
|
||||
/*
|
||||
* In case pass through can not be enabled, iommu tries to use identity
|
||||
* mapping.
|
||||
*/
|
||||
if (iommu_pass_through)
|
||||
iommu_identity_mapping = 1;
|
||||
|
||||
/*
|
||||
* for each drhd
|
||||
|
@ -2250,14 +2241,8 @@ int __init init_dmars(void)
|
|||
goto error;
|
||||
}
|
||||
if (!ecap_pass_through(iommu->ecap))
|
||||
pass_through = 0;
|
||||
hw_pass_through = 0;
|
||||
}
|
||||
if (iommu_pass_through)
|
||||
if (!pass_through) {
|
||||
printk(KERN_INFO
|
||||
"Pass Through is not supported by hardware.\n");
|
||||
iommu_pass_through = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Start from the sane iommu hardware state.
|
||||
|
@ -2312,63 +2297,56 @@ int __init init_dmars(void)
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* If pass through is set and enabled, context entries of all pci
|
||||
* devices are intialized by pass through translation type.
|
||||
*/
|
||||
if (iommu_pass_through) {
|
||||
ret = init_context_pass_through();
|
||||
if (ret) {
|
||||
printk(KERN_ERR "IOMMU: Pass through init failed.\n");
|
||||
iommu_pass_through = 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (iommu_pass_through)
|
||||
iommu_identity_mapping = 1;
|
||||
#ifdef CONFIG_DMAR_BROKEN_GFX_WA
|
||||
else
|
||||
iommu_identity_mapping = 2;
|
||||
#endif
|
||||
/*
|
||||
* If pass through is not set or not enabled, setup context entries for
|
||||
* identity mappings for rmrr, gfx, and isa and may fall back to static
|
||||
* identity mapping if iommu_identity_mapping is set.
|
||||
*/
|
||||
if (!iommu_pass_through) {
|
||||
#ifdef CONFIG_DMAR_BROKEN_GFX_WA
|
||||
if (!iommu_identity_mapping)
|
||||
iommu_identity_mapping = 2;
|
||||
#endif
|
||||
if (iommu_identity_mapping)
|
||||
iommu_prepare_static_identity_mapping();
|
||||
/*
|
||||
* For each rmrr
|
||||
* for each dev attached to rmrr
|
||||
* do
|
||||
* locate drhd for dev, alloc domain for dev
|
||||
* allocate free domain
|
||||
* allocate page table entries for rmrr
|
||||
* if context not allocated for bus
|
||||
* allocate and init context
|
||||
* set present in root table for this bus
|
||||
* init context with domain, translation etc
|
||||
* endfor
|
||||
* endfor
|
||||
*/
|
||||
printk(KERN_INFO "IOMMU: Setting RMRR:\n");
|
||||
for_each_rmrr_units(rmrr) {
|
||||
for (i = 0; i < rmrr->devices_cnt; i++) {
|
||||
pdev = rmrr->devices[i];
|
||||
/*
|
||||
* some BIOS lists non-exist devices in DMAR
|
||||
* table.
|
||||
*/
|
||||
if (!pdev)
|
||||
continue;
|
||||
ret = iommu_prepare_rmrr_dev(rmrr, pdev);
|
||||
if (ret)
|
||||
printk(KERN_ERR
|
||||
"IOMMU: mapping reserved region failed\n");
|
||||
}
|
||||
if (iommu_identity_mapping) {
|
||||
ret = iommu_prepare_static_identity_mapping(hw_pass_through);
|
||||
if (ret) {
|
||||
printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
|
||||
goto error;
|
||||
}
|
||||
|
||||
iommu_prepare_isa();
|
||||
}
|
||||
/*
|
||||
* For each rmrr
|
||||
* for each dev attached to rmrr
|
||||
* do
|
||||
* locate drhd for dev, alloc domain for dev
|
||||
* allocate free domain
|
||||
* allocate page table entries for rmrr
|
||||
* if context not allocated for bus
|
||||
* allocate and init context
|
||||
* set present in root table for this bus
|
||||
* init context with domain, translation etc
|
||||
* endfor
|
||||
* endfor
|
||||
*/
|
||||
printk(KERN_INFO "IOMMU: Setting RMRR:\n");
|
||||
for_each_rmrr_units(rmrr) {
|
||||
for (i = 0; i < rmrr->devices_cnt; i++) {
|
||||
pdev = rmrr->devices[i];
|
||||
/*
|
||||
* some BIOS lists non-exist devices in DMAR
|
||||
* table.
|
||||
*/
|
||||
if (!pdev)
|
||||
continue;
|
||||
ret = iommu_prepare_rmrr_dev(rmrr, pdev);
|
||||
if (ret)
|
||||
printk(KERN_ERR
|
||||
"IOMMU: mapping reserved region failed\n");
|
||||
}
|
||||
}
|
||||
|
||||
iommu_prepare_isa();
|
||||
|
||||
/*
|
||||
* for each drhd
|
||||
|
@ -2536,7 +2514,10 @@ static int iommu_no_mapping(struct device *dev)
|
|||
ret = domain_add_dev_info(si_domain, pdev);
|
||||
if (ret)
|
||||
return 0;
|
||||
ret = domain_context_mapping(si_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
|
||||
ret = domain_context_mapping(si_domain, pdev,
|
||||
hw_pass_through ?
|
||||
CONTEXT_TT_PASS_THROUGH :
|
||||
CONTEXT_TT_MULTI_LEVEL);
|
||||
if (!ret) {
|
||||
printk(KERN_INFO "64bit %s uses identity mapping\n",
|
||||
pci_name(pdev));
|
||||
|
@ -3202,7 +3183,7 @@ int __init intel_iommu_init(void)
|
|||
* Check the need for DMA-remapping initialization now.
|
||||
* Above initialization will also be used by Interrupt-remapping.
|
||||
*/
|
||||
if (no_iommu || (swiotlb && !iommu_pass_through) || dmar_disabled)
|
||||
if (no_iommu || swiotlb || dmar_disabled)
|
||||
return -ENODEV;
|
||||
|
||||
iommu_init_mempool();
|
||||
|
@ -3222,14 +3203,7 @@ int __init intel_iommu_init(void)
|
|||
|
||||
init_timer(&unmap_timer);
|
||||
force_iommu = 1;
|
||||
|
||||
if (!iommu_pass_through) {
|
||||
printk(KERN_INFO
|
||||
"Multi-level page-table translation for DMAR.\n");
|
||||
dma_ops = &intel_dma_ops;
|
||||
} else
|
||||
printk(KERN_INFO
|
||||
"DMAR: Pass through translation for DMAR.\n");
|
||||
dma_ops = &intel_dma_ops;
|
||||
|
||||
init_iommu_sysfs();
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче