Merge branches 'arm/io-pgtable', 'arm/qcom', 'arm/tegra', 'x86/vt-d', 'x86/amd' and 'core' into next
This commit is contained in:
Коммит
1f56835711
|
@ -23,7 +23,7 @@ config IOMMU_IO_PGTABLE
|
|||
config IOMMU_IO_PGTABLE_LPAE
|
||||
bool "ARMv7/v8 Long Descriptor Format"
|
||||
select IOMMU_IO_PGTABLE
|
||||
depends on HAS_DMA && (ARM || ARM64 || (COMPILE_TEST && !GENERIC_ATOMIC64))
|
||||
depends on ARM || ARM64 || (COMPILE_TEST && !GENERIC_ATOMIC64)
|
||||
help
|
||||
Enable support for the ARM long descriptor pagetable format.
|
||||
This allocator supports 4K/2M/1G, 16K/32M and 64K/512M page
|
||||
|
@ -42,7 +42,7 @@ config IOMMU_IO_PGTABLE_LPAE_SELFTEST
|
|||
config IOMMU_IO_PGTABLE_ARMV7S
|
||||
bool "ARMv7/v8 Short Descriptor Format"
|
||||
select IOMMU_IO_PGTABLE
|
||||
depends on HAS_DMA && (ARM || ARM64 || COMPILE_TEST)
|
||||
depends on ARM || ARM64 || COMPILE_TEST
|
||||
help
|
||||
Enable support for the ARM Short-descriptor pagetable format.
|
||||
This supports 32-bit virtual and physical addresses mapped using
|
||||
|
@ -376,7 +376,6 @@ config QCOM_IOMMU
|
|||
# Note: iommu drivers cannot (yet?) be built as modules
|
||||
bool "Qualcomm IOMMU Support"
|
||||
depends on ARCH_QCOM || (COMPILE_TEST && !GENERIC_ATOMIC64)
|
||||
depends on HAS_DMA
|
||||
select IOMMU_API
|
||||
select IOMMU_IO_PGTABLE_LPAE
|
||||
select ARM_DMA_USE_IOMMU
|
||||
|
|
|
@ -544,7 +544,7 @@ static void amd_iommu_report_page_fault(u16 devid, u16 domain_id,
|
|||
static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
|
||||
{
|
||||
struct device *dev = iommu->iommu.dev;
|
||||
int type, devid, domid, flags;
|
||||
int type, devid, pasid, flags, tag;
|
||||
volatile u32 *event = __evt;
|
||||
int count = 0;
|
||||
u64 address;
|
||||
|
@ -552,7 +552,7 @@ static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
|
|||
retry:
|
||||
type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
|
||||
devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
|
||||
domid = (event[1] >> EVENT_DOMID_SHIFT) & EVENT_DOMID_MASK;
|
||||
pasid = PPR_PASID(*(u64 *)&event[0]);
|
||||
flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
|
||||
address = (u64)(((u64)event[3]) << 32) | event[2];
|
||||
|
||||
|
@ -567,7 +567,7 @@ retry:
|
|||
}
|
||||
|
||||
if (type == EVENT_TYPE_IO_FAULT) {
|
||||
amd_iommu_report_page_fault(devid, domid, address, flags);
|
||||
amd_iommu_report_page_fault(devid, pasid, address, flags);
|
||||
return;
|
||||
} else {
|
||||
dev_err(dev, "AMD-Vi: Event logged [");
|
||||
|
@ -575,10 +575,9 @@ retry:
|
|||
|
||||
switch (type) {
|
||||
case EVENT_TYPE_ILL_DEV:
|
||||
dev_err(dev, "ILLEGAL_DEV_TABLE_ENTRY device=%02x:%02x.%x "
|
||||
"address=0x%016llx flags=0x%04x]\n",
|
||||
dev_err(dev, "ILLEGAL_DEV_TABLE_ENTRY device=%02x:%02x.%x pasid=0x%05x address=0x%016llx flags=0x%04x]\n",
|
||||
PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
|
||||
address, flags);
|
||||
pasid, address, flags);
|
||||
dump_dte_entry(devid);
|
||||
break;
|
||||
case EVENT_TYPE_DEV_TAB_ERR:
|
||||
|
@ -588,34 +587,38 @@ retry:
|
|||
address, flags);
|
||||
break;
|
||||
case EVENT_TYPE_PAGE_TAB_ERR:
|
||||
dev_err(dev, "PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
|
||||
"domain=0x%04x address=0x%016llx flags=0x%04x]\n",
|
||||
dev_err(dev, "PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x domain=0x%04x address=0x%016llx flags=0x%04x]\n",
|
||||
PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
|
||||
domid, address, flags);
|
||||
pasid, address, flags);
|
||||
break;
|
||||
case EVENT_TYPE_ILL_CMD:
|
||||
dev_err(dev, "ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address);
|
||||
dump_command(address);
|
||||
break;
|
||||
case EVENT_TYPE_CMD_HARD_ERR:
|
||||
dev_err(dev, "COMMAND_HARDWARE_ERROR address=0x%016llx "
|
||||
"flags=0x%04x]\n", address, flags);
|
||||
dev_err(dev, "COMMAND_HARDWARE_ERROR address=0x%016llx flags=0x%04x]\n",
|
||||
address, flags);
|
||||
break;
|
||||
case EVENT_TYPE_IOTLB_INV_TO:
|
||||
dev_err(dev, "IOTLB_INV_TIMEOUT device=%02x:%02x.%x "
|
||||
"address=0x%016llx]\n",
|
||||
dev_err(dev, "IOTLB_INV_TIMEOUT device=%02x:%02x.%x address=0x%016llx]\n",
|
||||
PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
|
||||
address);
|
||||
break;
|
||||
case EVENT_TYPE_INV_DEV_REQ:
|
||||
dev_err(dev, "INVALID_DEVICE_REQUEST device=%02x:%02x.%x "
|
||||
"address=0x%016llx flags=0x%04x]\n",
|
||||
dev_err(dev, "INVALID_DEVICE_REQUEST device=%02x:%02x.%x pasid=0x%05x address=0x%016llx flags=0x%04x]\n",
|
||||
PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
|
||||
address, flags);
|
||||
pasid, address, flags);
|
||||
break;
|
||||
case EVENT_TYPE_INV_PPR_REQ:
|
||||
pasid = ((event[0] >> 16) & 0xFFFF)
|
||||
| ((event[1] << 6) & 0xF0000);
|
||||
tag = event[1] & 0x03FF;
|
||||
dev_err(dev, "INVALID_PPR_REQUEST device=%02x:%02x.%x pasid=0x%05x address=0x%016llx flags=0x%04x]\n",
|
||||
PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
|
||||
pasid, address, flags);
|
||||
break;
|
||||
default:
|
||||
dev_err(dev, KERN_ERR "UNKNOWN event[0]=0x%08x event[1]=0x%08x "
|
||||
"event[2]=0x%08x event[3]=0x%08x\n",
|
||||
dev_err(dev, "UNKNOWN event[0]=0x%08x event[1]=0x%08x event[2]=0x%08x event[3]=0x%08x\n",
|
||||
event[0], event[1], event[2], event[3]);
|
||||
}
|
||||
|
||||
|
@ -1911,15 +1914,6 @@ static void do_detach(struct iommu_dev_data *dev_data)
|
|||
struct amd_iommu *iommu;
|
||||
u16 alias;
|
||||
|
||||
/*
|
||||
* First check if the device is still attached. It might already
|
||||
* be detached from its domain because the generic
|
||||
* iommu_detach_group code detached it and we try again here in
|
||||
* our alias handling.
|
||||
*/
|
||||
if (!dev_data->domain)
|
||||
return;
|
||||
|
||||
iommu = amd_iommu_rlookup_table[dev_data->devid];
|
||||
alias = dev_data->alias;
|
||||
|
||||
|
@ -1939,8 +1933,8 @@ static void do_detach(struct iommu_dev_data *dev_data)
|
|||
}
|
||||
|
||||
/*
|
||||
* If a device is not yet associated with a domain, this function does
|
||||
* assigns it visible for the hardware
|
||||
* If a device is not yet associated with a domain, this function makes the
|
||||
* device visible in the domain
|
||||
*/
|
||||
static int __attach_device(struct iommu_dev_data *dev_data,
|
||||
struct protection_domain *domain)
|
||||
|
@ -2061,8 +2055,8 @@ static bool pci_pri_tlp_required(struct pci_dev *pdev)
|
|||
}
|
||||
|
||||
/*
|
||||
* If a device is not yet associated with a domain, this function
|
||||
* assigns it visible for the hardware
|
||||
* If a device is not yet associated with a domain, this function makes the
|
||||
* device visible in the domain
|
||||
*/
|
||||
static int attach_device(struct device *dev,
|
||||
struct protection_domain *domain)
|
||||
|
@ -2124,9 +2118,6 @@ static void __detach_device(struct iommu_dev_data *dev_data)
|
|||
*/
|
||||
WARN_ON(!irqs_disabled());
|
||||
|
||||
if (WARN_ON(!dev_data->domain))
|
||||
return;
|
||||
|
||||
domain = dev_data->domain;
|
||||
|
||||
spin_lock(&domain->lock);
|
||||
|
@ -2148,6 +2139,15 @@ static void detach_device(struct device *dev)
|
|||
dev_data = get_dev_data(dev);
|
||||
domain = dev_data->domain;
|
||||
|
||||
/*
|
||||
* First check if the device is still attached. It might already
|
||||
* be detached from its domain because the generic
|
||||
* iommu_detach_group code detached it and we try again here in
|
||||
* our alias handling.
|
||||
*/
|
||||
if (WARN_ON(!dev_data->domain))
|
||||
return;
|
||||
|
||||
/* lock device table */
|
||||
spin_lock_irqsave(&amd_iommu_devtable_lock, flags);
|
||||
__detach_device(dev_data);
|
||||
|
@ -2793,6 +2793,7 @@ static void cleanup_domain(struct protection_domain *domain)
|
|||
while (!list_empty(&domain->dev_list)) {
|
||||
entry = list_first_entry(&domain->dev_list,
|
||||
struct iommu_dev_data, list);
|
||||
BUG_ON(!entry->domain);
|
||||
__detach_device(entry);
|
||||
}
|
||||
|
||||
|
|
|
@ -133,6 +133,7 @@
|
|||
#define EVENT_TYPE_CMD_HARD_ERR 0x6
|
||||
#define EVENT_TYPE_IOTLB_INV_TO 0x7
|
||||
#define EVENT_TYPE_INV_DEV_REQ 0x8
|
||||
#define EVENT_TYPE_INV_PPR_REQ 0x9
|
||||
#define EVENT_DEVID_MASK 0xffff
|
||||
#define EVENT_DEVID_SHIFT 0
|
||||
#define EVENT_DOMID_MASK 0xffff
|
||||
|
|
|
@ -1618,17 +1618,13 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
|
|||
int reg, fault_index;
|
||||
u32 fault_status;
|
||||
unsigned long flag;
|
||||
bool ratelimited;
|
||||
static DEFINE_RATELIMIT_STATE(rs,
|
||||
DEFAULT_RATELIMIT_INTERVAL,
|
||||
DEFAULT_RATELIMIT_BURST);
|
||||
|
||||
/* Disable printing, simply clear the fault when ratelimited */
|
||||
ratelimited = !__ratelimit(&rs);
|
||||
|
||||
raw_spin_lock_irqsave(&iommu->register_lock, flag);
|
||||
fault_status = readl(iommu->reg + DMAR_FSTS_REG);
|
||||
if (fault_status && !ratelimited)
|
||||
if (fault_status && __ratelimit(&rs))
|
||||
pr_err("DRHD: handling fault status reg %x\n", fault_status);
|
||||
|
||||
/* TBD: ignore advanced fault log currently */
|
||||
|
@ -1638,6 +1634,8 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
|
|||
fault_index = dma_fsts_fault_record_index(fault_status);
|
||||
reg = cap_fault_reg_offset(iommu->cap);
|
||||
while (1) {
|
||||
/* Disable printing, simply clear the fault when ratelimited */
|
||||
bool ratelimited = !__ratelimit(&rs);
|
||||
u8 fault_reason;
|
||||
u16 source_id;
|
||||
u64 guest_addr;
|
||||
|
|
|
@ -485,37 +485,14 @@ static int dmar_forcedac;
|
|||
static int intel_iommu_strict;
|
||||
static int intel_iommu_superpage = 1;
|
||||
static int intel_iommu_ecs = 1;
|
||||
static int intel_iommu_pasid28;
|
||||
static int iommu_identity_mapping;
|
||||
|
||||
#define IDENTMAP_ALL 1
|
||||
#define IDENTMAP_GFX 2
|
||||
#define IDENTMAP_AZALIA 4
|
||||
|
||||
/* Broadwell and Skylake have broken ECS support — normal so-called "second
|
||||
* level" translation of DMA requests-without-PASID doesn't actually happen
|
||||
* unless you also set the NESTE bit in an extended context-entry. Which of
|
||||
* course means that SVM doesn't work because it's trying to do nested
|
||||
* translation of the physical addresses it finds in the process page tables,
|
||||
* through the IOVA->phys mapping found in the "second level" page tables.
|
||||
*
|
||||
* The VT-d specification was retroactively changed to change the definition
|
||||
* of the capability bits and pretend that Broadwell/Skylake never happened...
|
||||
* but unfortunately the wrong bit was changed. It's ECS which is broken, but
|
||||
* for some reason it was the PASID capability bit which was redefined (from
|
||||
* bit 28 on BDW/SKL to bit 40 in future).
|
||||
*
|
||||
* So our test for ECS needs to eschew those implementations which set the old
|
||||
* PASID capabiity bit 28, since those are the ones on which ECS is broken.
|
||||
* Unless we are working around the 'pasid28' limitations, that is, by putting
|
||||
* the device into passthrough mode for normal DMA and thus masking the bug.
|
||||
*/
|
||||
#define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \
|
||||
(intel_iommu_pasid28 || !ecap_broken_pasid(iommu->ecap)))
|
||||
/* PASID support is thus enabled if ECS is enabled and *either* of the old
|
||||
* or new capability bits are set. */
|
||||
#define pasid_enabled(iommu) (ecs_enabled(iommu) && \
|
||||
(ecap_pasid(iommu->ecap) || ecap_broken_pasid(iommu->ecap)))
|
||||
#define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap))
|
||||
#define pasid_enabled(iommu) (ecs_enabled(iommu) && ecap_pasid(iommu->ecap))
|
||||
|
||||
int intel_iommu_gfx_mapped;
|
||||
EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
|
||||
|
@ -578,11 +555,6 @@ static int __init intel_iommu_setup(char *str)
|
|||
printk(KERN_INFO
|
||||
"Intel-IOMMU: disable extended context table support\n");
|
||||
intel_iommu_ecs = 0;
|
||||
} else if (!strncmp(str, "pasid28", 7)) {
|
||||
printk(KERN_INFO
|
||||
"Intel-IOMMU: enable pre-production PASID support\n");
|
||||
intel_iommu_pasid28 = 1;
|
||||
iommu_identity_mapping |= IDENTMAP_GFX;
|
||||
} else if (!strncmp(str, "tboot_noforce", 13)) {
|
||||
printk(KERN_INFO
|
||||
"Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n");
|
||||
|
@ -1606,6 +1578,18 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
|
|||
iommu_flush_dev_iotlb(domain, addr, mask);
|
||||
}
|
||||
|
||||
/* Notification for newly created mappings */
|
||||
static inline void __mapping_notify_one(struct intel_iommu *iommu,
|
||||
struct dmar_domain *domain,
|
||||
unsigned long pfn, unsigned int pages)
|
||||
{
|
||||
/* It's a non-present to present mapping. Only flush if caching mode */
|
||||
if (cap_caching_mode(iommu->cap))
|
||||
iommu_flush_iotlb_psi(iommu, domain, pfn, pages, 0, 1);
|
||||
else
|
||||
iommu_flush_write_buffer(iommu);
|
||||
}
|
||||
|
||||
static void iommu_flush_iova(struct iova_domain *iovad)
|
||||
{
|
||||
struct dmar_domain *domain;
|
||||
|
@ -2340,18 +2324,47 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
|
||||
struct scatterlist *sg, unsigned long phys_pfn,
|
||||
unsigned long nr_pages, int prot)
|
||||
{
|
||||
int ret;
|
||||
struct intel_iommu *iommu;
|
||||
|
||||
/* Do the real mapping first */
|
||||
ret = __domain_mapping(domain, iov_pfn, sg, phys_pfn, nr_pages, prot);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Notify about the new mapping */
|
||||
if (domain_type_is_vm(domain)) {
|
||||
/* VM typed domains can have more than one IOMMUs */
|
||||
int iommu_id;
|
||||
for_each_domain_iommu(iommu_id, domain) {
|
||||
iommu = g_iommus[iommu_id];
|
||||
__mapping_notify_one(iommu, domain, iov_pfn, nr_pages);
|
||||
}
|
||||
} else {
|
||||
/* General domains only have one IOMMU */
|
||||
iommu = domain_get_iommu(domain);
|
||||
__mapping_notify_one(iommu, domain, iov_pfn, nr_pages);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
|
||||
struct scatterlist *sg, unsigned long nr_pages,
|
||||
int prot)
|
||||
{
|
||||
return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
|
||||
return domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
|
||||
}
|
||||
|
||||
static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
|
||||
unsigned long phys_pfn, unsigned long nr_pages,
|
||||
int prot)
|
||||
{
|
||||
return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
|
||||
return domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
|
||||
}
|
||||
|
||||
static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn)
|
||||
|
@ -2533,7 +2546,7 @@ static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw)
|
|||
struct device_domain_info *info = NULL;
|
||||
struct dmar_domain *domain = NULL;
|
||||
struct intel_iommu *iommu;
|
||||
u16 req_id, dma_alias;
|
||||
u16 dma_alias;
|
||||
unsigned long flags;
|
||||
u8 bus, devfn;
|
||||
|
||||
|
@ -2541,8 +2554,6 @@ static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw)
|
|||
if (!iommu)
|
||||
return NULL;
|
||||
|
||||
req_id = ((u16)bus << 8) | devfn;
|
||||
|
||||
if (dev_is_pci(dev)) {
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
|
||||
|
@ -2656,9 +2667,9 @@ static int iommu_domain_identity_map(struct dmar_domain *domain,
|
|||
*/
|
||||
dma_pte_clear_range(domain, first_vpfn, last_vpfn);
|
||||
|
||||
return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
|
||||
last_vpfn - first_vpfn + 1,
|
||||
DMA_PTE_READ|DMA_PTE_WRITE);
|
||||
return __domain_mapping(domain, first_vpfn, NULL,
|
||||
first_vpfn, last_vpfn - first_vpfn + 1,
|
||||
DMA_PTE_READ|DMA_PTE_WRITE);
|
||||
}
|
||||
|
||||
static int domain_prepare_identity_map(struct device *dev,
|
||||
|
@ -3625,14 +3636,6 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
|
|||
if (ret)
|
||||
goto error;
|
||||
|
||||
/* it's a non-present to present mapping. Only flush if caching mode */
|
||||
if (cap_caching_mode(iommu->cap))
|
||||
iommu_flush_iotlb_psi(iommu, domain,
|
||||
mm_to_dma_pfn(iova_pfn),
|
||||
size, 0, 1);
|
||||
else
|
||||
iommu_flush_write_buffer(iommu);
|
||||
|
||||
start_paddr = (phys_addr_t)iova_pfn << PAGE_SHIFT;
|
||||
start_paddr += paddr & ~PAGE_MASK;
|
||||
return start_paddr;
|
||||
|
@ -3819,12 +3822,6 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* it's a non-present to present mapping. Only flush if caching mode */
|
||||
if (cap_caching_mode(iommu->cap))
|
||||
iommu_flush_iotlb_psi(iommu, domain, start_vpfn, size, 0, 1);
|
||||
else
|
||||
iommu_flush_write_buffer(iommu);
|
||||
|
||||
return nelems;
|
||||
}
|
||||
|
||||
|
|
|
@ -319,7 +319,7 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
|
|||
} else
|
||||
pasid_max = 1 << 20;
|
||||
|
||||
if ((flags & SVM_FLAG_SUPERVISOR_MODE)) {
|
||||
if (flags & SVM_FLAG_SUPERVISOR_MODE) {
|
||||
if (!ecap_srs(iommu->ecap))
|
||||
return -EINVAL;
|
||||
} else if (pasid) {
|
||||
|
|
|
@ -898,8 +898,7 @@ static int __init arm_v7s_do_selftests(void)
|
|||
|
||||
/* Full unmap */
|
||||
iova = 0;
|
||||
i = find_first_bit(&cfg.pgsize_bitmap, BITS_PER_LONG);
|
||||
while (i != BITS_PER_LONG) {
|
||||
for_each_set_bit(i, &cfg.pgsize_bitmap, BITS_PER_LONG) {
|
||||
size = 1UL << i;
|
||||
|
||||
if (ops->unmap(ops, iova, size) != size)
|
||||
|
@ -916,8 +915,6 @@ static int __init arm_v7s_do_selftests(void)
|
|||
return __FAIL(ops);
|
||||
|
||||
iova += SZ_16M;
|
||||
i++;
|
||||
i = find_next_bit(&cfg.pgsize_bitmap, BITS_PER_LONG, i);
|
||||
}
|
||||
|
||||
free_io_pgtable_ops(ops);
|
||||
|
|
|
@ -231,12 +231,17 @@ static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
|
|||
struct io_pgtable_cfg *cfg)
|
||||
{
|
||||
struct device *dev = cfg->iommu_dev;
|
||||
int order = get_order(size);
|
||||
struct page *p;
|
||||
dma_addr_t dma;
|
||||
void *pages = alloc_pages_exact(size, gfp | __GFP_ZERO);
|
||||
void *pages;
|
||||
|
||||
if (!pages)
|
||||
VM_BUG_ON((gfp & __GFP_HIGHMEM));
|
||||
p = alloc_pages_node(dev_to_node(dev), gfp | __GFP_ZERO, order);
|
||||
if (!p)
|
||||
return NULL;
|
||||
|
||||
pages = page_address(p);
|
||||
if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) {
|
||||
dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(dev, dma))
|
||||
|
@ -256,7 +261,7 @@ out_unmap:
|
|||
dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n");
|
||||
dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
|
||||
out_free:
|
||||
free_pages_exact(pages, size);
|
||||
__free_pages(p, order);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -266,7 +271,7 @@ static void __arm_lpae_free_pages(void *pages, size_t size,
|
|||
if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA))
|
||||
dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages),
|
||||
size, DMA_TO_DEVICE);
|
||||
free_pages_exact(pages, size);
|
||||
free_pages((unsigned long)pages, get_order(size));
|
||||
}
|
||||
|
||||
static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep,
|
||||
|
@ -1120,8 +1125,7 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
|
|||
|
||||
/* Full unmap */
|
||||
iova = 0;
|
||||
j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG);
|
||||
while (j != BITS_PER_LONG) {
|
||||
for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
|
||||
size = 1UL << j;
|
||||
|
||||
if (ops->unmap(ops, iova, size) != size)
|
||||
|
@ -1138,8 +1142,6 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
|
|||
return __FAIL(ops, i);
|
||||
|
||||
iova += SZ_1G;
|
||||
j++;
|
||||
j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
|
||||
}
|
||||
|
||||
free_io_pgtable_ops(ops);
|
||||
|
|
|
@ -116,9 +116,11 @@ static void __iommu_detach_group(struct iommu_domain *domain,
|
|||
static int __init iommu_set_def_domain_type(char *str)
|
||||
{
|
||||
bool pt;
|
||||
int ret;
|
||||
|
||||
if (!str || strtobool(str, &pt))
|
||||
return -EINVAL;
|
||||
ret = kstrtobool(str, &pt);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
iommu_def_domain_type = pt ? IOMMU_DOMAIN_IDENTITY : IOMMU_DOMAIN_DMA;
|
||||
return 0;
|
||||
|
@ -322,7 +324,6 @@ static struct kobj_type iommu_group_ktype = {
|
|||
|
||||
/**
|
||||
* iommu_group_alloc - Allocate a new group
|
||||
* @name: Optional name to associate with group, visible in sysfs
|
||||
*
|
||||
* This function is called by an iommu driver to allocate a new iommu
|
||||
* group. The iommu group represents the minimum granularity of the iommu.
|
||||
|
|
|
@ -885,16 +885,14 @@ static int qcom_iommu_device_remove(struct platform_device *pdev)
|
|||
|
||||
static int __maybe_unused qcom_iommu_resume(struct device *dev)
|
||||
{
|
||||
struct platform_device *pdev = to_platform_device(dev);
|
||||
struct qcom_iommu_dev *qcom_iommu = platform_get_drvdata(pdev);
|
||||
struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev);
|
||||
|
||||
return qcom_iommu_enable_clocks(qcom_iommu);
|
||||
}
|
||||
|
||||
static int __maybe_unused qcom_iommu_suspend(struct device *dev)
|
||||
{
|
||||
struct platform_device *pdev = to_platform_device(dev);
|
||||
struct qcom_iommu_dev *qcom_iommu = platform_get_drvdata(pdev);
|
||||
struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev);
|
||||
|
||||
qcom_iommu_disable_clocks(qcom_iommu);
|
||||
|
||||
|
|
|
@ -72,6 +72,8 @@ struct gart_domain {
|
|||
|
||||
static struct gart_device *gart_handle; /* unique for a system */
|
||||
|
||||
static bool gart_debug;
|
||||
|
||||
#define GART_PTE(_pfn) \
|
||||
(GART_ENTRY_PHYS_ADDR_VALID | ((_pfn) << PAGE_SHIFT))
|
||||
|
||||
|
@ -271,6 +273,7 @@ static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova,
|
|||
struct gart_device *gart = gart_domain->gart;
|
||||
unsigned long flags;
|
||||
unsigned long pfn;
|
||||
unsigned long pte;
|
||||
|
||||
if (!gart_iova_range_valid(gart, iova, bytes))
|
||||
return -EINVAL;
|
||||
|
@ -282,6 +285,14 @@ static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova,
|
|||
spin_unlock_irqrestore(&gart->pte_lock, flags);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (gart_debug) {
|
||||
pte = gart_read_pte(gart, iova);
|
||||
if (pte & GART_ENTRY_PHYS_ADDR_VALID) {
|
||||
spin_unlock_irqrestore(&gart->pte_lock, flags);
|
||||
dev_err(gart->dev, "Page entry is in-use\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
}
|
||||
gart_set_pte(gart, iova, GART_PTE(pfn));
|
||||
FLUSH_GART_REGS(gart);
|
||||
spin_unlock_irqrestore(&gart->pte_lock, flags);
|
||||
|
@ -302,7 +313,7 @@ static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
|
|||
gart_set_pte(gart, iova, 0);
|
||||
FLUSH_GART_REGS(gart);
|
||||
spin_unlock_irqrestore(&gart->pte_lock, flags);
|
||||
return 0;
|
||||
return bytes;
|
||||
}
|
||||
|
||||
static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain,
|
||||
|
@ -515,7 +526,9 @@ static void __exit tegra_gart_exit(void)
|
|||
|
||||
subsys_initcall(tegra_gart_init);
|
||||
module_exit(tegra_gart_exit);
|
||||
module_param(gart_debug, bool, 0644);
|
||||
|
||||
MODULE_PARM_DESC(gart_debug, "Enable GART debugging");
|
||||
MODULE_DESCRIPTION("IOMMU API for GART in Tegra20");
|
||||
MODULE_AUTHOR("Hiroshi DOYU <hdoyu@nvidia.com>");
|
||||
MODULE_ALIAS("platform:tegra-gart");
|
||||
|
|
|
@ -121,7 +121,6 @@
|
|||
#define ecap_srs(e) ((e >> 31) & 0x1)
|
||||
#define ecap_ers(e) ((e >> 30) & 0x1)
|
||||
#define ecap_prs(e) ((e >> 29) & 0x1)
|
||||
#define ecap_broken_pasid(e) ((e >> 28) & 0x1)
|
||||
#define ecap_dis(e) ((e >> 27) & 0x1)
|
||||
#define ecap_nest(e) ((e >> 26) & 0x1)
|
||||
#define ecap_mts(e) ((e >> 25) & 0x1)
|
||||
|
|
Загрузка…
Ссылка в новой задаче