Merge git://git.infradead.org/iommu-2.6
* git://git.infradead.org/iommu-2.6: intel-iommu: Don't use identity mapping for PCI devices behind bridges intel-iommu: Use iommu_should_identity_map() at startup time too. intel-iommu: No mapping for non-PCI devices intel-iommu: Restore DMAR_BROKEN_GFX_WA option for broken graphics drivers intel-iommu: Add iommu_should_identity_map() function intel-iommu: Fix reattaching of devices to identity mapping domain intel-iommu: Don't set identity mapping for bypassed graphics devices intel-iommu: Fix dma vs. mm page confusion with aligned_nrpages()
This commit is contained in:
Коммит
085ff82c9c
|
@ -1913,6 +1913,18 @@ config DMAR_DEFAULT_ON
|
||||||
recommended you say N here while the DMAR code remains
|
recommended you say N here while the DMAR code remains
|
||||||
experimental.
|
experimental.
|
||||||
|
|
||||||
|
config DMAR_BROKEN_GFX_WA
|
||||||
|
def_bool n
|
||||||
|
prompt "Workaround broken graphics drivers (going away soon)"
|
||||||
|
depends on DMAR
|
||||||
|
---help---
|
||||||
|
Current Graphics drivers tend to use physical address
|
||||||
|
for DMA and avoid using DMA APIs. Setting this config
|
||||||
|
option permits the IOMMU driver to set a unity map for
|
||||||
|
all the OS-visible memory. Hence the driver can continue
|
||||||
|
to use physical addresses for DMA, at least until this
|
||||||
|
option is removed in the 2.6.32 kernel.
|
||||||
|
|
||||||
config DMAR_FLOPPY_WA
|
config DMAR_FLOPPY_WA
|
||||||
def_bool y
|
def_bool y
|
||||||
depends on DMAR
|
depends on DMAR
|
||||||
|
|
|
@ -2117,6 +2117,47 @@ static int domain_add_dev_info(struct dmar_domain *domain,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
|
||||||
|
{
|
||||||
|
if (iommu_identity_mapping == 2)
|
||||||
|
return IS_GFX_DEVICE(pdev);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We want to start off with all devices in the 1:1 domain, and
|
||||||
|
* take them out later if we find they can't access all of memory.
|
||||||
|
*
|
||||||
|
* However, we can't do this for PCI devices behind bridges,
|
||||||
|
* because all PCI devices behind the same bridge will end up
|
||||||
|
* with the same source-id on their transactions.
|
||||||
|
*
|
||||||
|
* Practically speaking, we can't change things around for these
|
||||||
|
* devices at run-time, because we can't be sure there'll be no
|
||||||
|
* DMA transactions in flight for any of their siblings.
|
||||||
|
*
|
||||||
|
* So PCI devices (unless they're on the root bus) as well as
|
||||||
|
* their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
|
||||||
|
* the 1:1 domain, just in _case_ one of their siblings turns out
|
||||||
|
* not to be able to map all of memory.
|
||||||
|
*/
|
||||||
|
if (!pdev->is_pcie) {
|
||||||
|
if (!pci_is_root_bus(pdev->bus))
|
||||||
|
return 0;
|
||||||
|
if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
|
||||||
|
return 0;
|
||||||
|
} else if (pdev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* At boot time, we don't yet know if devices will be 64-bit capable.
|
||||||
|
* Assume that they will -- if they turn out not to be, then we can
|
||||||
|
* take them out of the 1:1 domain later.
|
||||||
|
*/
|
||||||
|
if (!startup)
|
||||||
|
return pdev->dma_mask > DMA_BIT_MASK(32);
|
||||||
|
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
static int iommu_prepare_static_identity_mapping(void)
|
static int iommu_prepare_static_identity_mapping(void)
|
||||||
{
|
{
|
||||||
struct pci_dev *pdev = NULL;
|
struct pci_dev *pdev = NULL;
|
||||||
|
@ -2127,16 +2168,18 @@ static int iommu_prepare_static_identity_mapping(void)
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
for_each_pci_dev(pdev) {
|
for_each_pci_dev(pdev) {
|
||||||
printk(KERN_INFO "IOMMU: identity mapping for device %s\n",
|
if (iommu_should_identity_map(pdev, 1)) {
|
||||||
pci_name(pdev));
|
printk(KERN_INFO "IOMMU: identity mapping for device %s\n",
|
||||||
|
pci_name(pdev));
|
||||||
|
|
||||||
ret = domain_context_mapping(si_domain, pdev,
|
ret = domain_context_mapping(si_domain, pdev,
|
||||||
CONTEXT_TT_MULTI_LEVEL);
|
CONTEXT_TT_MULTI_LEVEL);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
ret = domain_add_dev_info(si_domain, pdev);
|
ret = domain_add_dev_info(si_domain, pdev);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -2291,6 +2334,10 @@ int __init init_dmars(void)
|
||||||
* identity mapping if iommu_identity_mapping is set.
|
* identity mapping if iommu_identity_mapping is set.
|
||||||
*/
|
*/
|
||||||
if (!iommu_pass_through) {
|
if (!iommu_pass_through) {
|
||||||
|
#ifdef CONFIG_DMAR_BROKEN_GFX_WA
|
||||||
|
if (!iommu_identity_mapping)
|
||||||
|
iommu_identity_mapping = 2;
|
||||||
|
#endif
|
||||||
if (iommu_identity_mapping)
|
if (iommu_identity_mapping)
|
||||||
iommu_prepare_static_identity_mapping();
|
iommu_prepare_static_identity_mapping();
|
||||||
/*
|
/*
|
||||||
|
@ -2368,15 +2415,15 @@ error:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Returns a number of VTD pages, but aligned to MM page size */
|
||||||
static inline unsigned long aligned_nrpages(unsigned long host_addr,
|
static inline unsigned long aligned_nrpages(unsigned long host_addr,
|
||||||
size_t size)
|
size_t size)
|
||||||
{
|
{
|
||||||
host_addr &= ~PAGE_MASK;
|
host_addr &= ~PAGE_MASK;
|
||||||
host_addr += size + PAGE_SIZE - 1;
|
return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
|
||||||
|
|
||||||
return host_addr >> VTD_PAGE_SHIFT;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* This takes a number of _MM_ pages, not VTD pages */
|
||||||
static struct iova *intel_alloc_iova(struct device *dev,
|
static struct iova *intel_alloc_iova(struct device *dev,
|
||||||
struct dmar_domain *domain,
|
struct dmar_domain *domain,
|
||||||
unsigned long nrpages, uint64_t dma_mask)
|
unsigned long nrpages, uint64_t dma_mask)
|
||||||
|
@ -2443,16 +2490,24 @@ static int iommu_dummy(struct pci_dev *pdev)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Check if the pdev needs to go through non-identity map and unmap process.*/
|
/* Check if the pdev needs to go through non-identity map and unmap process.*/
|
||||||
static int iommu_no_mapping(struct pci_dev *pdev)
|
static int iommu_no_mapping(struct device *dev)
|
||||||
{
|
{
|
||||||
|
struct pci_dev *pdev;
|
||||||
int found;
|
int found;
|
||||||
|
|
||||||
|
if (unlikely(dev->bus != &pci_bus_type))
|
||||||
|
return 1;
|
||||||
|
|
||||||
|
pdev = to_pci_dev(dev);
|
||||||
|
if (iommu_dummy(pdev))
|
||||||
|
return 1;
|
||||||
|
|
||||||
if (!iommu_identity_mapping)
|
if (!iommu_identity_mapping)
|
||||||
return iommu_dummy(pdev);
|
return 0;
|
||||||
|
|
||||||
found = identity_mapping(pdev);
|
found = identity_mapping(pdev);
|
||||||
if (found) {
|
if (found) {
|
||||||
if (pdev->dma_mask > DMA_BIT_MASK(32))
|
if (iommu_should_identity_map(pdev, 0))
|
||||||
return 1;
|
return 1;
|
||||||
else {
|
else {
|
||||||
/*
|
/*
|
||||||
|
@ -2469,9 +2524,12 @@ static int iommu_no_mapping(struct pci_dev *pdev)
|
||||||
* In case of a detached 64 bit DMA device from vm, the device
|
* In case of a detached 64 bit DMA device from vm, the device
|
||||||
* is put into si_domain for identity mapping.
|
* is put into si_domain for identity mapping.
|
||||||
*/
|
*/
|
||||||
if (pdev->dma_mask > DMA_BIT_MASK(32)) {
|
if (iommu_should_identity_map(pdev, 0)) {
|
||||||
int ret;
|
int ret;
|
||||||
ret = domain_add_dev_info(si_domain, pdev);
|
ret = domain_add_dev_info(si_domain, pdev);
|
||||||
|
if (ret)
|
||||||
|
return 0;
|
||||||
|
ret = domain_context_mapping(si_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
printk(KERN_INFO "64bit %s uses identity mapping\n",
|
printk(KERN_INFO "64bit %s uses identity mapping\n",
|
||||||
pci_name(pdev));
|
pci_name(pdev));
|
||||||
|
@ -2480,7 +2538,7 @@ static int iommu_no_mapping(struct pci_dev *pdev)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return iommu_dummy(pdev);
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
|
static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
|
||||||
|
@ -2496,7 +2554,7 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
|
||||||
|
|
||||||
BUG_ON(dir == DMA_NONE);
|
BUG_ON(dir == DMA_NONE);
|
||||||
|
|
||||||
if (iommu_no_mapping(pdev))
|
if (iommu_no_mapping(hwdev))
|
||||||
return paddr;
|
return paddr;
|
||||||
|
|
||||||
domain = get_valid_domain_for_dev(pdev);
|
domain = get_valid_domain_for_dev(pdev);
|
||||||
|
@ -2506,7 +2564,8 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
|
||||||
iommu = domain_get_iommu(domain);
|
iommu = domain_get_iommu(domain);
|
||||||
size = aligned_nrpages(paddr, size);
|
size = aligned_nrpages(paddr, size);
|
||||||
|
|
||||||
iova = intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
|
iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
|
||||||
|
pdev->dma_mask);
|
||||||
if (!iova)
|
if (!iova)
|
||||||
goto error;
|
goto error;
|
||||||
|
|
||||||
|
@ -2635,7 +2694,7 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
|
||||||
struct iova *iova;
|
struct iova *iova;
|
||||||
struct intel_iommu *iommu;
|
struct intel_iommu *iommu;
|
||||||
|
|
||||||
if (iommu_no_mapping(pdev))
|
if (iommu_no_mapping(dev))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
domain = find_domain(pdev);
|
domain = find_domain(pdev);
|
||||||
|
@ -2726,7 +2785,7 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
|
||||||
struct iova *iova;
|
struct iova *iova;
|
||||||
struct intel_iommu *iommu;
|
struct intel_iommu *iommu;
|
||||||
|
|
||||||
if (iommu_no_mapping(pdev))
|
if (iommu_no_mapping(hwdev))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
domain = find_domain(pdev);
|
domain = find_domain(pdev);
|
||||||
|
@ -2785,7 +2844,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
|
||||||
struct intel_iommu *iommu;
|
struct intel_iommu *iommu;
|
||||||
|
|
||||||
BUG_ON(dir == DMA_NONE);
|
BUG_ON(dir == DMA_NONE);
|
||||||
if (iommu_no_mapping(pdev))
|
if (iommu_no_mapping(hwdev))
|
||||||
return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
|
return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
|
||||||
|
|
||||||
domain = get_valid_domain_for_dev(pdev);
|
domain = get_valid_domain_for_dev(pdev);
|
||||||
|
@ -2797,7 +2856,8 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
|
||||||
for_each_sg(sglist, sg, nelems, i)
|
for_each_sg(sglist, sg, nelems, i)
|
||||||
size += aligned_nrpages(sg->offset, sg->length);
|
size += aligned_nrpages(sg->offset, sg->length);
|
||||||
|
|
||||||
iova = intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
|
iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
|
||||||
|
pdev->dma_mask);
|
||||||
if (!iova) {
|
if (!iova) {
|
||||||
sglist->dma_length = 0;
|
sglist->dma_length = 0;
|
||||||
return 0;
|
return 0;
|
||||||
|
|
Загрузка…
Ссылка в новой задаче