Merge branches 'arm/exynos', 'arm/omap', 'arm/smmu', 'x86/vt-d', 'x86/amd' and 'core' into next
Conflicts: drivers/iommu/arm-smmu.c
This commit is contained in:
Коммит
09b5269a1b
|
@ -14,6 +14,7 @@ conditions.
|
|||
"arm,smmu-v1"
|
||||
"arm,smmu-v2"
|
||||
"arm,mmu-400"
|
||||
"arm,mmu-401"
|
||||
"arm,mmu-500"
|
||||
|
||||
depending on the particular implementation and/or the
|
||||
|
|
|
@ -1211,6 +1211,9 @@ void device_del(struct device *dev)
|
|||
*/
|
||||
if (platform_notify_remove)
|
||||
platform_notify_remove(dev);
|
||||
if (dev->bus)
|
||||
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
|
||||
BUS_NOTIFY_REMOVED_DEVICE, dev);
|
||||
kobject_uevent(&dev->kobj, KOBJ_REMOVE);
|
||||
cleanup_device_parent(dev);
|
||||
kobject_del(&dev->kobj);
|
||||
|
|
|
@ -507,7 +507,7 @@ int usnic_uiom_attach_dev_to_pd(struct usnic_uiom_pd *pd, struct device *dev)
|
|||
if (err)
|
||||
goto out_free_dev;
|
||||
|
||||
if (!iommu_domain_has_cap(pd->domain, IOMMU_CAP_CACHE_COHERENCY)) {
|
||||
if (!iommu_capable(dev->bus, IOMMU_CAP_CACHE_COHERENCY)) {
|
||||
usnic_err("IOMMU of %s does not support cache coherency\n",
|
||||
dev_name(dev));
|
||||
err = -EINVAL;
|
||||
|
|
|
@ -87,6 +87,27 @@ int amd_iommu_max_glx_val = -1;
|
|||
|
||||
static struct dma_map_ops amd_iommu_dma_ops;
|
||||
|
||||
/*
|
||||
* This struct contains device specific data for the IOMMU
|
||||
*/
|
||||
struct iommu_dev_data {
|
||||
struct list_head list; /* For domain->dev_list */
|
||||
struct list_head dev_data_list; /* For global dev_data_list */
|
||||
struct list_head alias_list; /* Link alias-groups together */
|
||||
struct iommu_dev_data *alias_data;/* The alias dev_data */
|
||||
struct protection_domain *domain; /* Domain the device is bound to */
|
||||
u16 devid; /* PCI Device ID */
|
||||
bool iommu_v2; /* Device can make use of IOMMUv2 */
|
||||
bool passthrough; /* Default for device is pt_domain */
|
||||
struct {
|
||||
bool enabled;
|
||||
int qdep;
|
||||
} ats; /* ATS state */
|
||||
bool pri_tlp; /* PASID TLB required for
|
||||
PPR completions */
|
||||
u32 errata; /* Bitmap for errata to apply */
|
||||
};
|
||||
|
||||
/*
|
||||
* general struct to manage commands send to an IOMMU
|
||||
*/
|
||||
|
@ -114,8 +135,9 @@ static struct iommu_dev_data *alloc_dev_data(u16 devid)
|
|||
if (!dev_data)
|
||||
return NULL;
|
||||
|
||||
INIT_LIST_HEAD(&dev_data->alias_list);
|
||||
|
||||
dev_data->devid = devid;
|
||||
atomic_set(&dev_data->bind, 0);
|
||||
|
||||
spin_lock_irqsave(&dev_data_list_lock, flags);
|
||||
list_add_tail(&dev_data->dev_data_list, &dev_data_list);
|
||||
|
@ -260,17 +282,13 @@ static bool check_device(struct device *dev)
|
|||
return true;
|
||||
}
|
||||
|
||||
static int init_iommu_group(struct device *dev)
|
||||
static void init_iommu_group(struct device *dev)
|
||||
{
|
||||
struct iommu_group *group;
|
||||
|
||||
group = iommu_group_get_for_dev(dev);
|
||||
|
||||
if (IS_ERR(group))
|
||||
return PTR_ERR(group);
|
||||
|
||||
iommu_group_put(group);
|
||||
return 0;
|
||||
if (!IS_ERR(group))
|
||||
iommu_group_put(group);
|
||||
}
|
||||
|
||||
static int __last_alias(struct pci_dev *pdev, u16 alias, void *data)
|
||||
|
@ -340,7 +358,6 @@ static int iommu_init_device(struct device *dev)
|
|||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct iommu_dev_data *dev_data;
|
||||
u16 alias;
|
||||
int ret;
|
||||
|
||||
if (dev->archdata.iommu)
|
||||
return 0;
|
||||
|
@ -362,12 +379,9 @@ static int iommu_init_device(struct device *dev)
|
|||
return -ENOTSUPP;
|
||||
}
|
||||
dev_data->alias_data = alias_data;
|
||||
}
|
||||
|
||||
ret = init_iommu_group(dev);
|
||||
if (ret) {
|
||||
free_dev_data(dev_data);
|
||||
return ret;
|
||||
/* Add device to the alias_list */
|
||||
list_add(&dev_data->alias_list, &alias_data->alias_list);
|
||||
}
|
||||
|
||||
if (pci_iommuv2_capable(pdev)) {
|
||||
|
@ -455,6 +469,15 @@ int __init amd_iommu_init_devices(void)
|
|||
goto out_free;
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize IOMMU groups only after iommu_init_device() has
|
||||
* had a chance to populate any IVRS defined aliases.
|
||||
*/
|
||||
for_each_pci_dev(pdev) {
|
||||
if (check_device(&pdev->dev))
|
||||
init_iommu_group(&pdev->dev);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
out_free:
|
||||
|
@ -1368,6 +1391,9 @@ static int iommu_map_page(struct protection_domain *dom,
|
|||
count = PAGE_SIZE_PTE_COUNT(page_size);
|
||||
pte = alloc_pte(dom, bus_addr, page_size, NULL, GFP_KERNEL);
|
||||
|
||||
if (!pte)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < count; ++i)
|
||||
if (IOMMU_PTE_PRESENT(pte[i]))
|
||||
return -EBUSY;
|
||||
|
@ -2122,35 +2148,29 @@ static void do_detach(struct iommu_dev_data *dev_data)
|
|||
static int __attach_device(struct iommu_dev_data *dev_data,
|
||||
struct protection_domain *domain)
|
||||
{
|
||||
struct iommu_dev_data *head, *entry;
|
||||
int ret;
|
||||
|
||||
/* lock domain */
|
||||
spin_lock(&domain->lock);
|
||||
|
||||
if (dev_data->alias_data != NULL) {
|
||||
struct iommu_dev_data *alias_data = dev_data->alias_data;
|
||||
head = dev_data;
|
||||
|
||||
/* Some sanity checks */
|
||||
ret = -EBUSY;
|
||||
if (alias_data->domain != NULL &&
|
||||
alias_data->domain != domain)
|
||||
goto out_unlock;
|
||||
if (head->alias_data != NULL)
|
||||
head = head->alias_data;
|
||||
|
||||
if (dev_data->domain != NULL &&
|
||||
dev_data->domain != domain)
|
||||
goto out_unlock;
|
||||
/* Now we have the root of the alias group, if any */
|
||||
|
||||
/* Do real assignment */
|
||||
if (alias_data->domain == NULL)
|
||||
do_attach(alias_data, domain);
|
||||
ret = -EBUSY;
|
||||
if (head->domain != NULL)
|
||||
goto out_unlock;
|
||||
|
||||
atomic_inc(&alias_data->bind);
|
||||
}
|
||||
/* Attach alias group root */
|
||||
do_attach(head, domain);
|
||||
|
||||
if (dev_data->domain == NULL)
|
||||
do_attach(dev_data, domain);
|
||||
|
||||
atomic_inc(&dev_data->bind);
|
||||
/* Attach other devices in the alias group */
|
||||
list_for_each_entry(entry, &head->alias_list, alias_list)
|
||||
do_attach(entry, domain);
|
||||
|
||||
ret = 0;
|
||||
|
||||
|
@ -2298,6 +2318,7 @@ static int attach_device(struct device *dev,
|
|||
*/
|
||||
static void __detach_device(struct iommu_dev_data *dev_data)
|
||||
{
|
||||
struct iommu_dev_data *head, *entry;
|
||||
struct protection_domain *domain;
|
||||
unsigned long flags;
|
||||
|
||||
|
@ -2307,15 +2328,14 @@ static void __detach_device(struct iommu_dev_data *dev_data)
|
|||
|
||||
spin_lock_irqsave(&domain->lock, flags);
|
||||
|
||||
if (dev_data->alias_data != NULL) {
|
||||
struct iommu_dev_data *alias_data = dev_data->alias_data;
|
||||
head = dev_data;
|
||||
if (head->alias_data != NULL)
|
||||
head = head->alias_data;
|
||||
|
||||
if (atomic_dec_and_test(&alias_data->bind))
|
||||
do_detach(alias_data);
|
||||
}
|
||||
list_for_each_entry(entry, &head->alias_list, alias_list)
|
||||
do_detach(entry);
|
||||
|
||||
if (atomic_dec_and_test(&dev_data->bind))
|
||||
do_detach(dev_data);
|
||||
do_detach(head);
|
||||
|
||||
spin_unlock_irqrestore(&domain->lock, flags);
|
||||
|
||||
|
@ -2415,6 +2435,7 @@ static int device_change_notifier(struct notifier_block *nb,
|
|||
case BUS_NOTIFY_ADD_DEVICE:
|
||||
|
||||
iommu_init_device(dev);
|
||||
init_iommu_group(dev);
|
||||
|
||||
/*
|
||||
* dev_data is still NULL and
|
||||
|
@ -3158,7 +3179,6 @@ static void cleanup_domain(struct protection_domain *domain)
|
|||
entry = list_first_entry(&domain->dev_list,
|
||||
struct iommu_dev_data, list);
|
||||
__detach_device(entry);
|
||||
atomic_set(&entry->bind, 0);
|
||||
}
|
||||
|
||||
write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
|
||||
|
@ -3384,20 +3404,20 @@ static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
|
|||
return paddr;
|
||||
}
|
||||
|
||||
static int amd_iommu_domain_has_cap(struct iommu_domain *domain,
|
||||
unsigned long cap)
|
||||
static bool amd_iommu_capable(enum iommu_cap cap)
|
||||
{
|
||||
switch (cap) {
|
||||
case IOMMU_CAP_CACHE_COHERENCY:
|
||||
return 1;
|
||||
return true;
|
||||
case IOMMU_CAP_INTR_REMAP:
|
||||
return irq_remapping_enabled;
|
||||
return (irq_remapping_enabled == 1);
|
||||
}
|
||||
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
static const struct iommu_ops amd_iommu_ops = {
|
||||
.capable = amd_iommu_capable,
|
||||
.domain_init = amd_iommu_domain_init,
|
||||
.domain_destroy = amd_iommu_domain_destroy,
|
||||
.attach_dev = amd_iommu_attach_device,
|
||||
|
@ -3405,7 +3425,6 @@ static const struct iommu_ops amd_iommu_ops = {
|
|||
.map = amd_iommu_map,
|
||||
.unmap = amd_iommu_unmap,
|
||||
.iova_to_phys = amd_iommu_iova_to_phys,
|
||||
.domain_has_cap = amd_iommu_domain_has_cap,
|
||||
.pgsize_bitmap = AMD_IOMMU_PGSIZES,
|
||||
};
|
||||
|
||||
|
@ -4235,7 +4254,7 @@ static int msi_setup_irq(struct pci_dev *pdev, unsigned int irq,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int setup_hpet_msi(unsigned int irq, unsigned int id)
|
||||
static int alloc_hpet_msi(unsigned int irq, unsigned int id)
|
||||
{
|
||||
struct irq_2_irte *irte_info;
|
||||
struct irq_cfg *cfg;
|
||||
|
@ -4274,6 +4293,6 @@ struct irq_remap_ops amd_iommu_irq_ops = {
|
|||
.compose_msi_msg = compose_msi_msg,
|
||||
.msi_alloc_irq = msi_alloc_irq,
|
||||
.msi_setup_irq = msi_setup_irq,
|
||||
.setup_hpet_msi = setup_hpet_msi,
|
||||
.alloc_hpet_msi = alloc_hpet_msi,
|
||||
};
|
||||
#endif
|
||||
|
|
|
@ -712,7 +712,7 @@ static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
|
|||
set_iommu_for_device(iommu, devid);
|
||||
}
|
||||
|
||||
static int __init add_special_device(u8 type, u8 id, u16 devid, bool cmd_line)
|
||||
static int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line)
|
||||
{
|
||||
struct devid_map *entry;
|
||||
struct list_head *list;
|
||||
|
@ -731,6 +731,8 @@ static int __init add_special_device(u8 type, u8 id, u16 devid, bool cmd_line)
|
|||
pr_info("AMD-Vi: Command-line override present for %s id %d - ignoring\n",
|
||||
type == IVHD_SPECIAL_IOAPIC ? "IOAPIC" : "HPET", id);
|
||||
|
||||
*devid = entry->devid;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -739,7 +741,7 @@ static int __init add_special_device(u8 type, u8 id, u16 devid, bool cmd_line)
|
|||
return -ENOMEM;
|
||||
|
||||
entry->id = id;
|
||||
entry->devid = devid;
|
||||
entry->devid = *devid;
|
||||
entry->cmd_line = cmd_line;
|
||||
|
||||
list_add_tail(&entry->list, list);
|
||||
|
@ -754,7 +756,7 @@ static int __init add_early_maps(void)
|
|||
for (i = 0; i < early_ioapic_map_size; ++i) {
|
||||
ret = add_special_device(IVHD_SPECIAL_IOAPIC,
|
||||
early_ioapic_map[i].id,
|
||||
early_ioapic_map[i].devid,
|
||||
&early_ioapic_map[i].devid,
|
||||
early_ioapic_map[i].cmd_line);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -763,7 +765,7 @@ static int __init add_early_maps(void)
|
|||
for (i = 0; i < early_hpet_map_size; ++i) {
|
||||
ret = add_special_device(IVHD_SPECIAL_HPET,
|
||||
early_hpet_map[i].id,
|
||||
early_hpet_map[i].devid,
|
||||
&early_hpet_map[i].devid,
|
||||
early_hpet_map[i].cmd_line);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -978,10 +980,17 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
|
|||
PCI_SLOT(devid),
|
||||
PCI_FUNC(devid));
|
||||
|
||||
set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
|
||||
ret = add_special_device(type, handle, devid, false);
|
||||
ret = add_special_device(type, handle, &devid, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* add_special_device might update the devid in case a
|
||||
* command-line override is present. So call
|
||||
* set_dev_entry_from_acpi after add_special_device.
|
||||
*/
|
||||
set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
|
||||
|
||||
break;
|
||||
}
|
||||
default:
|
||||
|
|
|
@ -417,27 +417,6 @@ struct protection_domain {
|
|||
|
||||
};
|
||||
|
||||
/*
|
||||
* This struct contains device specific data for the IOMMU
|
||||
*/
|
||||
struct iommu_dev_data {
|
||||
struct list_head list; /* For domain->dev_list */
|
||||
struct list_head dev_data_list; /* For global dev_data_list */
|
||||
struct iommu_dev_data *alias_data;/* The alias dev_data */
|
||||
struct protection_domain *domain; /* Domain the device is bound to */
|
||||
atomic_t bind; /* Domain attach reference count */
|
||||
u16 devid; /* PCI Device ID */
|
||||
bool iommu_v2; /* Device can make use of IOMMUv2 */
|
||||
bool passthrough; /* Default for device is pt_domain */
|
||||
struct {
|
||||
bool enabled;
|
||||
int qdep;
|
||||
} ats; /* ATS state */
|
||||
bool pri_tlp; /* PASID TLB required for
|
||||
PPR completions */
|
||||
u32 errata; /* Bitmap for errata to apply */
|
||||
};
|
||||
|
||||
/*
|
||||
* For dynamic growth the aperture size is split into ranges of 128MB of
|
||||
* DMA address space each. This struct represents one such range.
|
||||
|
|
|
@ -24,7 +24,7 @@
|
|||
* - v7/v8 long-descriptor format
|
||||
* - Non-secure access to the SMMU
|
||||
* - 4k and 64k pages, with contiguous pte hints.
|
||||
* - Up to 42-bit addressing (dependent on VA_BITS)
|
||||
* - Up to 48-bit addressing (dependent on VA_BITS)
|
||||
* - Context fault reporting
|
||||
*/
|
||||
|
||||
|
@ -59,7 +59,7 @@
|
|||
|
||||
/* SMMU global address space */
|
||||
#define ARM_SMMU_GR0(smmu) ((smmu)->base)
|
||||
#define ARM_SMMU_GR1(smmu) ((smmu)->base + (smmu)->pagesize)
|
||||
#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
|
||||
|
||||
/*
|
||||
* SMMU global address space with conditional offset to access secure
|
||||
|
@ -224,7 +224,7 @@
|
|||
|
||||
/* Translation context bank */
|
||||
#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
|
||||
#define ARM_SMMU_CB(smmu, n) ((n) * (smmu)->pagesize)
|
||||
#define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
|
||||
|
||||
#define ARM_SMMU_CB_SCTLR 0x0
|
||||
#define ARM_SMMU_CB_RESUME 0x8
|
||||
|
@ -326,6 +326,16 @@
|
|||
|
||||
#define FSYNR0_WNR (1 << 4)
|
||||
|
||||
static int force_stage;
|
||||
module_param_named(force_stage, force_stage, int, S_IRUGO | S_IWUSR);
|
||||
MODULE_PARM_DESC(force_stage,
|
||||
"Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
|
||||
|
||||
enum arm_smmu_arch_version {
|
||||
ARM_SMMU_V1 = 1,
|
||||
ARM_SMMU_V2,
|
||||
};
|
||||
|
||||
struct arm_smmu_smr {
|
||||
u8 idx;
|
||||
u16 mask;
|
||||
|
@ -349,7 +359,7 @@ struct arm_smmu_device {
|
|||
|
||||
void __iomem *base;
|
||||
unsigned long size;
|
||||
unsigned long pagesize;
|
||||
unsigned long pgshift;
|
||||
|
||||
#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
|
||||
#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
|
||||
|
@ -360,7 +370,7 @@ struct arm_smmu_device {
|
|||
|
||||
#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
|
||||
u32 options;
|
||||
int version;
|
||||
enum arm_smmu_arch_version version;
|
||||
|
||||
u32 num_context_banks;
|
||||
u32 num_s2_context_banks;
|
||||
|
@ -370,8 +380,9 @@ struct arm_smmu_device {
|
|||
u32 num_mapping_groups;
|
||||
DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS);
|
||||
|
||||
unsigned long input_size;
|
||||
unsigned long s1_input_size;
|
||||
unsigned long s1_output_size;
|
||||
unsigned long s2_input_size;
|
||||
unsigned long s2_output_size;
|
||||
|
||||
u32 num_global_irqs;
|
||||
|
@ -426,17 +437,17 @@ static void parse_driver_options(struct arm_smmu_device *smmu)
|
|||
} while (arm_smmu_options[++i].opt);
|
||||
}
|
||||
|
||||
static struct device *dev_get_master_dev(struct device *dev)
|
||||
static struct device_node *dev_get_dev_node(struct device *dev)
|
||||
{
|
||||
if (dev_is_pci(dev)) {
|
||||
struct pci_bus *bus = to_pci_dev(dev)->bus;
|
||||
|
||||
while (!pci_is_root_bus(bus))
|
||||
bus = bus->parent;
|
||||
return bus->bridge->parent;
|
||||
return bus->bridge->parent->of_node;
|
||||
}
|
||||
|
||||
return dev;
|
||||
return dev->of_node;
|
||||
}
|
||||
|
||||
static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu,
|
||||
|
@ -461,15 +472,17 @@ static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu,
|
|||
}
|
||||
|
||||
static struct arm_smmu_master_cfg *
|
||||
find_smmu_master_cfg(struct arm_smmu_device *smmu, struct device *dev)
|
||||
find_smmu_master_cfg(struct device *dev)
|
||||
{
|
||||
struct arm_smmu_master *master;
|
||||
struct arm_smmu_master_cfg *cfg = NULL;
|
||||
struct iommu_group *group = iommu_group_get(dev);
|
||||
|
||||
if (dev_is_pci(dev))
|
||||
return dev->archdata.iommu;
|
||||
if (group) {
|
||||
cfg = iommu_group_get_iommudata(group);
|
||||
iommu_group_put(group);
|
||||
}
|
||||
|
||||
master = find_smmu_master(smmu, dev->of_node);
|
||||
return master ? &master->cfg : NULL;
|
||||
return cfg;
|
||||
}
|
||||
|
||||
static int insert_smmu_master(struct arm_smmu_device *smmu,
|
||||
|
@ -545,7 +558,7 @@ static struct arm_smmu_device *find_smmu_for_device(struct device *dev)
|
|||
{
|
||||
struct arm_smmu_device *smmu;
|
||||
struct arm_smmu_master *master = NULL;
|
||||
struct device_node *dev_node = dev_get_master_dev(dev)->of_node;
|
||||
struct device_node *dev_node = dev_get_dev_node(dev);
|
||||
|
||||
spin_lock(&arm_smmu_devices_lock);
|
||||
list_for_each_entry(smmu, &arm_smmu_devices, list) {
|
||||
|
@ -729,7 +742,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
|
|||
|
||||
/* CBAR */
|
||||
reg = cfg->cbar;
|
||||
if (smmu->version == 1)
|
||||
if (smmu->version == ARM_SMMU_V1)
|
||||
reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
|
||||
|
||||
/*
|
||||
|
@ -744,7 +757,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
|
|||
}
|
||||
writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
|
||||
|
||||
if (smmu->version > 1) {
|
||||
if (smmu->version > ARM_SMMU_V1) {
|
||||
/* CBA2R */
|
||||
#ifdef CONFIG_64BIT
|
||||
reg = CBA2R_RW64_64BIT;
|
||||
|
@ -755,7 +768,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
|
|||
gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
|
||||
|
||||
/* TTBCR2 */
|
||||
switch (smmu->input_size) {
|
||||
switch (smmu->s1_input_size) {
|
||||
case 32:
|
||||
reg = (TTBCR2_ADDR_32 << TTBCR2_SEP_SHIFT);
|
||||
break;
|
||||
|
@ -817,14 +830,14 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
|
|||
* TTBCR
|
||||
* We use long descriptor, with inner-shareable WBWA tables in TTBR0.
|
||||
*/
|
||||
if (smmu->version > 1) {
|
||||
if (smmu->version > ARM_SMMU_V1) {
|
||||
if (PAGE_SIZE == SZ_4K)
|
||||
reg = TTBCR_TG0_4K;
|
||||
else
|
||||
reg = TTBCR_TG0_64K;
|
||||
|
||||
if (!stage1) {
|
||||
reg |= (64 - smmu->s1_output_size) << TTBCR_T0SZ_SHIFT;
|
||||
reg |= (64 - smmu->s2_input_size) << TTBCR_T0SZ_SHIFT;
|
||||
|
||||
switch (smmu->s2_output_size) {
|
||||
case 32:
|
||||
|
@ -847,7 +860,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
|
|||
break;
|
||||
}
|
||||
} else {
|
||||
reg |= (64 - smmu->input_size) << TTBCR_T0SZ_SHIFT;
|
||||
reg |= (64 - smmu->s1_input_size) << TTBCR_T0SZ_SHIFT;
|
||||
}
|
||||
} else {
|
||||
reg = 0;
|
||||
|
@ -914,7 +927,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
|
|||
goto out_unlock;
|
||||
|
||||
cfg->cbndx = ret;
|
||||
if (smmu->version == 1) {
|
||||
if (smmu->version == ARM_SMMU_V1) {
|
||||
cfg->irptndx = atomic_inc_return(&smmu->irptndx);
|
||||
cfg->irptndx %= smmu->num_context_irqs;
|
||||
} else {
|
||||
|
@ -1151,9 +1164,10 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
|
|||
struct arm_smmu_device *smmu = smmu_domain->smmu;
|
||||
void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
|
||||
|
||||
/* Devices in an IOMMU group may already be configured */
|
||||
ret = arm_smmu_master_configure_smrs(smmu, cfg);
|
||||
if (ret)
|
||||
return ret;
|
||||
return ret == -EEXIST ? 0 : ret;
|
||||
|
||||
for (i = 0; i < cfg->num_streamids; ++i) {
|
||||
u32 idx, s2cr;
|
||||
|
@ -1174,6 +1188,10 @@ static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
|
|||
struct arm_smmu_device *smmu = smmu_domain->smmu;
|
||||
void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
|
||||
|
||||
/* An IOMMU group is torn down by the first device to be removed */
|
||||
if ((smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) && !cfg->smrs)
|
||||
return;
|
||||
|
||||
/*
|
||||
* We *must* clear the S2CR first, because freeing the SMR means
|
||||
* that it can be re-allocated immediately.
|
||||
|
@ -1195,12 +1213,17 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
|
|||
struct arm_smmu_device *smmu, *dom_smmu;
|
||||
struct arm_smmu_master_cfg *cfg;
|
||||
|
||||
smmu = dev_get_master_dev(dev)->archdata.iommu;
|
||||
smmu = find_smmu_for_device(dev);
|
||||
if (!smmu) {
|
||||
dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
if (dev->archdata.iommu) {
|
||||
dev_err(dev, "already attached to IOMMU domain\n");
|
||||
return -EEXIST;
|
||||
}
|
||||
|
||||
/*
|
||||
* Sanity check the domain. We don't support domains across
|
||||
* different SMMUs.
|
||||
|
@ -1223,11 +1246,14 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
|
|||
}
|
||||
|
||||
/* Looks ok, so add the device to the domain */
|
||||
cfg = find_smmu_master_cfg(smmu_domain->smmu, dev);
|
||||
cfg = find_smmu_master_cfg(dev);
|
||||
if (!cfg)
|
||||
return -ENODEV;
|
||||
|
||||
return arm_smmu_domain_add_master(smmu_domain, cfg);
|
||||
ret = arm_smmu_domain_add_master(smmu_domain, cfg);
|
||||
if (!ret)
|
||||
dev->archdata.iommu = domain;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
|
||||
|
@ -1235,9 +1261,12 @@ static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
|
|||
struct arm_smmu_domain *smmu_domain = domain->priv;
|
||||
struct arm_smmu_master_cfg *cfg;
|
||||
|
||||
cfg = find_smmu_master_cfg(smmu_domain->smmu, dev);
|
||||
if (cfg)
|
||||
arm_smmu_domain_remove_master(smmu_domain, cfg);
|
||||
cfg = find_smmu_master_cfg(dev);
|
||||
if (!cfg)
|
||||
return;
|
||||
|
||||
dev->archdata.iommu = NULL;
|
||||
arm_smmu_domain_remove_master(smmu_domain, cfg);
|
||||
}
|
||||
|
||||
static bool arm_smmu_pte_is_contiguous_range(unsigned long addr,
|
||||
|
@ -1379,6 +1408,7 @@ static int arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud,
|
|||
ret = arm_smmu_alloc_init_pte(smmu, pmd, addr, next, pfn,
|
||||
prot, stage);
|
||||
phys += next - addr;
|
||||
pfn = __phys_to_pfn(phys);
|
||||
} while (pmd++, addr = next, addr < end);
|
||||
|
||||
return ret;
|
||||
|
@ -1431,9 +1461,11 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
|
|||
|
||||
if (cfg->cbar == CBAR_TYPE_S2_TRANS) {
|
||||
stage = 2;
|
||||
input_mask = (1ULL << smmu->s2_input_size) - 1;
|
||||
output_mask = (1ULL << smmu->s2_output_size) - 1;
|
||||
} else {
|
||||
stage = 1;
|
||||
input_mask = (1ULL << smmu->s1_input_size) - 1;
|
||||
output_mask = (1ULL << smmu->s1_output_size) - 1;
|
||||
}
|
||||
|
||||
|
@ -1443,7 +1475,6 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
|
|||
if (size & ~PAGE_MASK)
|
||||
return -EINVAL;
|
||||
|
||||
input_mask = (1ULL << smmu->input_size) - 1;
|
||||
if ((phys_addr_t)iova & ~input_mask)
|
||||
return -ERANGE;
|
||||
|
||||
|
@ -1526,20 +1557,19 @@ static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
|
|||
return __pfn_to_phys(pte_pfn(pte)) | (iova & ~PAGE_MASK);
|
||||
}
|
||||
|
||||
static int arm_smmu_domain_has_cap(struct iommu_domain *domain,
|
||||
unsigned long cap)
|
||||
static bool arm_smmu_capable(enum iommu_cap cap)
|
||||
{
|
||||
struct arm_smmu_domain *smmu_domain = domain->priv;
|
||||
struct arm_smmu_device *smmu = smmu_domain->smmu;
|
||||
u32 features = smmu ? smmu->features : 0;
|
||||
|
||||
switch (cap) {
|
||||
case IOMMU_CAP_CACHE_COHERENCY:
|
||||
return features & ARM_SMMU_FEAT_COHERENT_WALK;
|
||||
/*
|
||||
* Return true here as the SMMU can always send out coherent
|
||||
* requests.
|
||||
*/
|
||||
return true;
|
||||
case IOMMU_CAP_INTR_REMAP:
|
||||
return 1; /* MSIs are just memory writes */
|
||||
return true; /* MSIs are just memory writes */
|
||||
default:
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1549,17 +1579,19 @@ static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
|
|||
return 0; /* Continue walking */
|
||||
}
|
||||
|
||||
static void __arm_smmu_release_pci_iommudata(void *data)
|
||||
{
|
||||
kfree(data);
|
||||
}
|
||||
|
||||
static int arm_smmu_add_device(struct device *dev)
|
||||
{
|
||||
struct arm_smmu_device *smmu;
|
||||
struct arm_smmu_master_cfg *cfg;
|
||||
struct iommu_group *group;
|
||||
void (*releasefn)(void *) = NULL;
|
||||
int ret;
|
||||
|
||||
if (dev->archdata.iommu) {
|
||||
dev_warn(dev, "IOMMU driver already assigned to device\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
smmu = find_smmu_for_device(dev);
|
||||
if (!smmu)
|
||||
return -ENODEV;
|
||||
|
@ -1571,7 +1603,6 @@ static int arm_smmu_add_device(struct device *dev)
|
|||
}
|
||||
|
||||
if (dev_is_pci(dev)) {
|
||||
struct arm_smmu_master_cfg *cfg;
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
|
||||
cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
|
||||
|
@ -1587,11 +1618,20 @@ static int arm_smmu_add_device(struct device *dev)
|
|||
*/
|
||||
pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid,
|
||||
&cfg->streamids[0]);
|
||||
dev->archdata.iommu = cfg;
|
||||
releasefn = __arm_smmu_release_pci_iommudata;
|
||||
} else {
|
||||
dev->archdata.iommu = smmu;
|
||||
struct arm_smmu_master *master;
|
||||
|
||||
master = find_smmu_master(smmu, dev->of_node);
|
||||
if (!master) {
|
||||
ret = -ENODEV;
|
||||
goto out_put_group;
|
||||
}
|
||||
|
||||
cfg = &master->cfg;
|
||||
}
|
||||
|
||||
iommu_group_set_iommudata(group, cfg, releasefn);
|
||||
ret = iommu_group_add_device(group, dev);
|
||||
|
||||
out_put_group:
|
||||
|
@ -1601,14 +1641,11 @@ out_put_group:
|
|||
|
||||
static void arm_smmu_remove_device(struct device *dev)
|
||||
{
|
||||
if (dev_is_pci(dev))
|
||||
kfree(dev->archdata.iommu);
|
||||
|
||||
dev->archdata.iommu = NULL;
|
||||
iommu_group_remove_device(dev);
|
||||
}
|
||||
|
||||
static const struct iommu_ops arm_smmu_ops = {
|
||||
.capable = arm_smmu_capable,
|
||||
.domain_init = arm_smmu_domain_init,
|
||||
.domain_destroy = arm_smmu_domain_destroy,
|
||||
.attach_dev = arm_smmu_attach_dev,
|
||||
|
@ -1616,7 +1653,6 @@ static const struct iommu_ops arm_smmu_ops = {
|
|||
.map = arm_smmu_map,
|
||||
.unmap = arm_smmu_unmap,
|
||||
.iova_to_phys = arm_smmu_iova_to_phys,
|
||||
.domain_has_cap = arm_smmu_domain_has_cap,
|
||||
.add_device = arm_smmu_add_device,
|
||||
.remove_device = arm_smmu_remove_device,
|
||||
.pgsize_bitmap = (SECTION_SIZE |
|
||||
|
@ -1702,10 +1738,6 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
|
|||
u32 id;
|
||||
|
||||
dev_notice(smmu->dev, "probing hardware configuration...\n");
|
||||
|
||||
/* Primecell ID */
|
||||
id = readl_relaxed(gr0_base + ARM_SMMU_GR0_PIDR2);
|
||||
smmu->version = ((id >> PIDR2_ARCH_SHIFT) & PIDR2_ARCH_MASK) + 1;
|
||||
dev_notice(smmu->dev, "SMMUv%d with:\n", smmu->version);
|
||||
|
||||
/* ID0 */
|
||||
|
@ -1716,6 +1748,13 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
|
|||
return -ENODEV;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Restrict available stages based on module parameter */
|
||||
if (force_stage == 1)
|
||||
id &= ~(ID0_S2TS | ID0_NTS);
|
||||
else if (force_stage == 2)
|
||||
id &= ~(ID0_S1TS | ID0_NTS);
|
||||
|
||||
if (id & ID0_S1TS) {
|
||||
smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
|
||||
dev_notice(smmu->dev, "\tstage 1 translation\n");
|
||||
|
@ -1732,8 +1771,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
|
|||
}
|
||||
|
||||
if (!(smmu->features &
|
||||
(ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2 |
|
||||
ARM_SMMU_FEAT_TRANS_NESTED))) {
|
||||
(ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
|
||||
dev_err(smmu->dev, "\tno translation support!\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
@ -1779,12 +1817,12 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
|
|||
|
||||
/* ID1 */
|
||||
id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
|
||||
smmu->pagesize = (id & ID1_PAGESIZE) ? SZ_64K : SZ_4K;
|
||||
smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
|
||||
|
||||
/* Check for size mismatch of SMMU address space from mapped region */
|
||||
size = 1 <<
|
||||
(((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
|
||||
size *= (smmu->pagesize << 1);
|
||||
size *= 2 << smmu->pgshift;
|
||||
if (smmu->size != size)
|
||||
dev_warn(smmu->dev,
|
||||
"SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
|
||||
|
@ -1803,28 +1841,21 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
|
|||
/* ID2 */
|
||||
id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
|
||||
size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
|
||||
smmu->s1_output_size = min_t(unsigned long, PHYS_MASK_SHIFT, size);
|
||||
|
||||
/*
|
||||
* Stage-1 output limited by stage-2 input size due to pgd
|
||||
* allocation (PTRS_PER_PGD).
|
||||
*/
|
||||
if (smmu->features & ARM_SMMU_FEAT_TRANS_NESTED) {
|
||||
/* Stage-2 input size limited due to pgd allocation (PTRS_PER_PGD) */
|
||||
#ifdef CONFIG_64BIT
|
||||
smmu->s1_output_size = min_t(unsigned long, VA_BITS, size);
|
||||
smmu->s2_input_size = min_t(unsigned long, VA_BITS, size);
|
||||
#else
|
||||
smmu->s1_output_size = min(32UL, size);
|
||||
smmu->s2_input_size = min(32UL, size);
|
||||
#endif
|
||||
} else {
|
||||
smmu->s1_output_size = min_t(unsigned long, PHYS_MASK_SHIFT,
|
||||
size);
|
||||
}
|
||||
|
||||
/* The stage-2 output mask is also applied for bypass */
|
||||
size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
|
||||
smmu->s2_output_size = min_t(unsigned long, PHYS_MASK_SHIFT, size);
|
||||
|
||||
if (smmu->version == 1) {
|
||||
smmu->input_size = 32;
|
||||
if (smmu->version == ARM_SMMU_V1) {
|
||||
smmu->s1_input_size = 32;
|
||||
} else {
|
||||
#ifdef CONFIG_64BIT
|
||||
size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
|
||||
|
@ -1832,7 +1863,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
|
|||
#else
|
||||
size = 32;
|
||||
#endif
|
||||
smmu->input_size = size;
|
||||
smmu->s1_input_size = size;
|
||||
|
||||
if ((PAGE_SIZE == SZ_4K && !(id & ID2_PTFS_4K)) ||
|
||||
(PAGE_SIZE == SZ_64K && !(id & ID2_PTFS_64K)) ||
|
||||
|
@ -1843,15 +1874,30 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
|
|||
}
|
||||
}
|
||||
|
||||
dev_notice(smmu->dev,
|
||||
"\t%lu-bit VA, %lu-bit IPA, %lu-bit PA\n",
|
||||
smmu->input_size, smmu->s1_output_size,
|
||||
smmu->s2_output_size);
|
||||
if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
|
||||
dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
|
||||
smmu->s1_input_size, smmu->s1_output_size);
|
||||
|
||||
if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
|
||||
dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
|
||||
smmu->s2_input_size, smmu->s2_output_size);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id arm_smmu_of_match[] = {
|
||||
{ .compatible = "arm,smmu-v1", .data = (void *)ARM_SMMU_V1 },
|
||||
{ .compatible = "arm,smmu-v2", .data = (void *)ARM_SMMU_V2 },
|
||||
{ .compatible = "arm,mmu-400", .data = (void *)ARM_SMMU_V1 },
|
||||
{ .compatible = "arm,mmu-401", .data = (void *)ARM_SMMU_V1 },
|
||||
{ .compatible = "arm,mmu-500", .data = (void *)ARM_SMMU_V2 },
|
||||
{ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
|
||||
|
||||
static int arm_smmu_device_dt_probe(struct platform_device *pdev)
|
||||
{
|
||||
const struct of_device_id *of_id;
|
||||
struct resource *res;
|
||||
struct arm_smmu_device *smmu;
|
||||
struct device *dev = &pdev->dev;
|
||||
|
@ -1866,6 +1912,9 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
|
|||
}
|
||||
smmu->dev = dev;
|
||||
|
||||
of_id = of_match_node(arm_smmu_of_match, dev->of_node);
|
||||
smmu->version = (enum arm_smmu_arch_version)of_id->data;
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
smmu->base = devm_ioremap_resource(dev, res);
|
||||
if (IS_ERR(smmu->base))
|
||||
|
@ -1930,7 +1979,7 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
|
|||
|
||||
parse_driver_options(smmu);
|
||||
|
||||
if (smmu->version > 1 &&
|
||||
if (smmu->version > ARM_SMMU_V1 &&
|
||||
smmu->num_context_banks != smmu->num_context_irqs) {
|
||||
dev_err(dev,
|
||||
"found only %d context interrupt(s) but %d required\n",
|
||||
|
@ -2011,17 +2060,6 @@ static int arm_smmu_device_remove(struct platform_device *pdev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_OF
|
||||
static struct of_device_id arm_smmu_of_match[] = {
|
||||
{ .compatible = "arm,smmu-v1", },
|
||||
{ .compatible = "arm,smmu-v2", },
|
||||
{ .compatible = "arm,mmu-400", },
|
||||
{ .compatible = "arm,mmu-500", },
|
||||
{ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
|
||||
#endif
|
||||
|
||||
static struct platform_driver arm_smmu_driver = {
|
||||
.driver = {
|
||||
.owner = THIS_MODULE,
|
||||
|
|
|
@ -155,6 +155,7 @@ dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
|
|||
if (event == BUS_NOTIFY_ADD_DEVICE) {
|
||||
for (tmp = dev; tmp; tmp = tmp->bus->self) {
|
||||
level--;
|
||||
info->path[level].bus = tmp->bus->number;
|
||||
info->path[level].device = PCI_SLOT(tmp->devfn);
|
||||
info->path[level].function = PCI_FUNC(tmp->devfn);
|
||||
if (pci_is_root_bus(tmp->bus))
|
||||
|
@ -177,17 +178,33 @@ static bool dmar_match_pci_path(struct dmar_pci_notify_info *info, int bus,
|
|||
int i;
|
||||
|
||||
if (info->bus != bus)
|
||||
return false;
|
||||
goto fallback;
|
||||
if (info->level != count)
|
||||
return false;
|
||||
goto fallback;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
if (path[i].device != info->path[i].device ||
|
||||
path[i].function != info->path[i].function)
|
||||
return false;
|
||||
goto fallback;
|
||||
}
|
||||
|
||||
return true;
|
||||
|
||||
fallback:
|
||||
|
||||
if (count != 1)
|
||||
return false;
|
||||
|
||||
i = info->level - 1;
|
||||
if (bus == info->path[i].bus &&
|
||||
path[0].device == info->path[i].device &&
|
||||
path[0].function == info->path[i].function) {
|
||||
pr_info(FW_BUG "RMRR entry for device %02x:%02x.%x is broken - applying workaround\n",
|
||||
bus, path[0].device, path[0].function);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Return: > 0 if match found, 0 if no match found, < 0 if error happens */
|
||||
|
@ -247,7 +264,7 @@ int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, u16 segment,
|
|||
|
||||
for_each_active_dev_scope(devices, count, index, tmp)
|
||||
if (tmp == &info->dev->dev) {
|
||||
rcu_assign_pointer(devices[index].dev, NULL);
|
||||
RCU_INIT_POINTER(devices[index].dev, NULL);
|
||||
synchronize_rcu();
|
||||
put_device(tmp);
|
||||
return 1;
|
||||
|
|
|
@ -32,7 +32,7 @@
|
|||
typedef u32 sysmmu_iova_t;
|
||||
typedef u32 sysmmu_pte_t;
|
||||
|
||||
/* We does not consider super section mapping (16MB) */
|
||||
/* We do not consider super section mapping (16MB) */
|
||||
#define SECT_ORDER 20
|
||||
#define LPAGE_ORDER 16
|
||||
#define SPAGE_ORDER 12
|
||||
|
@ -307,7 +307,7 @@ static void show_fault_information(const char *name,
|
|||
|
||||
static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
|
||||
{
|
||||
/* SYSMMU is in blocked when interrupt occurred. */
|
||||
/* SYSMMU is in blocked state when interrupt occurred. */
|
||||
struct sysmmu_drvdata *data = dev_id;
|
||||
enum exynos_sysmmu_inttype itype;
|
||||
sysmmu_iova_t addr = -1;
|
||||
|
@ -567,8 +567,8 @@ static void sysmmu_tlb_invalidate_entry(struct device *dev, sysmmu_iova_t iova,
|
|||
/*
|
||||
* L2TLB invalidation required
|
||||
* 4KB page: 1 invalidation
|
||||
* 64KB page: 16 invalidation
|
||||
* 1MB page: 64 invalidation
|
||||
* 64KB page: 16 invalidations
|
||||
* 1MB page: 64 invalidations
|
||||
* because it is set-associative TLB
|
||||
* with 8-way and 64 sets.
|
||||
* 1MB page can be cached in one of all sets.
|
||||
|
@ -714,7 +714,7 @@ static int exynos_iommu_domain_init(struct iommu_domain *domain)
|
|||
if (!priv->lv2entcnt)
|
||||
goto err_counter;
|
||||
|
||||
/* w/a of System MMU v3.3 to prevent caching 1MiB mapping */
|
||||
/* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */
|
||||
for (i = 0; i < NUM_LV1ENTRIES; i += 8) {
|
||||
priv->pgtable[i + 0] = ZERO_LV2LINK;
|
||||
priv->pgtable[i + 1] = ZERO_LV2LINK;
|
||||
|
@ -861,14 +861,14 @@ static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *priv,
|
|||
pgtable_flush(sent, sent + 1);
|
||||
|
||||
/*
|
||||
* If pretched SLPD is a fault SLPD in zero_l2_table, FLPD cache
|
||||
* may caches the address of zero_l2_table. This function
|
||||
* replaces the zero_l2_table with new L2 page table to write
|
||||
* valid mappings.
|
||||
* If pre-fetched SLPD is a faulty SLPD in zero_l2_table,
|
||||
* FLPD cache may cache the address of zero_l2_table. This
|
||||
* function replaces the zero_l2_table with new L2 page table
|
||||
* to write valid mappings.
|
||||
* Accessing the valid area may cause page fault since FLPD
|
||||
* cache may still caches zero_l2_table for the valid area
|
||||
* instead of new L2 page table that have the mapping
|
||||
* information of the valid area
|
||||
* cache may still cache zero_l2_table for the valid area
|
||||
* instead of new L2 page table that has the mapping
|
||||
* information of the valid area.
|
||||
* Thus any replacement of zero_l2_table with other valid L2
|
||||
* page table must involve FLPD cache invalidation for System
|
||||
* MMU v3.3.
|
||||
|
@ -963,27 +963,27 @@ static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
|
|||
/*
|
||||
* *CAUTION* to the I/O virtual memory managers that support exynos-iommu:
|
||||
*
|
||||
* System MMU v3.x have an advanced logic to improve address translation
|
||||
* System MMU v3.x has advanced logic to improve address translation
|
||||
* performance with caching more page table entries by a page table walk.
|
||||
* However, the logic has a bug that caching fault page table entries and System
|
||||
* MMU reports page fault if the cached fault entry is hit even though the fault
|
||||
* entry is updated to a valid entry after the entry is cached.
|
||||
* To prevent caching fault page table entries which may be updated to valid
|
||||
* entries later, the virtual memory manager should care about the w/a about the
|
||||
* problem. The followings describe w/a.
|
||||
* However, the logic has a bug that while caching faulty page table entries,
|
||||
* System MMU reports page fault if the cached fault entry is hit even though
|
||||
* the fault entry is updated to a valid entry after the entry is cached.
|
||||
* To prevent caching faulty page table entries which may be updated to valid
|
||||
* entries later, the virtual memory manager should care about the workaround
|
||||
* for the problem. The following describes the workaround.
|
||||
*
|
||||
* Any two consecutive I/O virtual address regions must have a hole of 128KiB
|
||||
* in maximum to prevent misbehavior of System MMU 3.x. (w/a of h/w bug)
|
||||
* at maximum to prevent misbehavior of System MMU 3.x (workaround for h/w bug).
|
||||
*
|
||||
* Precisely, any start address of I/O virtual region must be aligned by
|
||||
* Precisely, any start address of I/O virtual region must be aligned with
|
||||
* the following sizes for System MMU v3.1 and v3.2.
|
||||
* System MMU v3.1: 128KiB
|
||||
* System MMU v3.2: 256KiB
|
||||
*
|
||||
* Because System MMU v3.3 caches page table entries more aggressively, it needs
|
||||
* more w/a.
|
||||
* - Any two consecutive I/O virtual regions must be have a hole of larger size
|
||||
* than or equal size to 128KiB.
|
||||
* more workarounds.
|
||||
* - Any two consecutive I/O virtual regions must have a hole of size larger
|
||||
* than or equal to 128KiB.
|
||||
* - Start address of an I/O virtual region must be aligned by 128KiB.
|
||||
*/
|
||||
static int exynos_iommu_map(struct iommu_domain *domain, unsigned long l_iova,
|
||||
|
@ -1061,7 +1061,8 @@ static size_t exynos_iommu_unmap(struct iommu_domain *domain,
|
|||
goto err;
|
||||
}
|
||||
|
||||
*ent = ZERO_LV2LINK; /* w/a for h/w bug in Sysmem MMU v3.3 */
|
||||
/* workaround for h/w bug in System MMU v3.3 */
|
||||
*ent = ZERO_LV2LINK;
|
||||
pgtable_flush(ent, ent + 1);
|
||||
size = SECT_SIZE;
|
||||
goto done;
|
||||
|
|
|
@ -411,8 +411,7 @@ static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain,
|
|||
return get_phys_addr(dma_domain, iova);
|
||||
}
|
||||
|
||||
static int fsl_pamu_domain_has_cap(struct iommu_domain *domain,
|
||||
unsigned long cap)
|
||||
static bool fsl_pamu_capable(enum iommu_cap cap)
|
||||
{
|
||||
return cap == IOMMU_CAP_CACHE_COHERENCY;
|
||||
}
|
||||
|
@ -1080,6 +1079,7 @@ static u32 fsl_pamu_get_windows(struct iommu_domain *domain)
|
|||
}
|
||||
|
||||
static const struct iommu_ops fsl_pamu_ops = {
|
||||
.capable = fsl_pamu_capable,
|
||||
.domain_init = fsl_pamu_domain_init,
|
||||
.domain_destroy = fsl_pamu_domain_destroy,
|
||||
.attach_dev = fsl_pamu_attach_device,
|
||||
|
@ -1089,7 +1089,6 @@ static const struct iommu_ops fsl_pamu_ops = {
|
|||
.domain_get_windows = fsl_pamu_get_windows,
|
||||
.domain_set_windows = fsl_pamu_set_windows,
|
||||
.iova_to_phys = fsl_pamu_iova_to_phys,
|
||||
.domain_has_cap = fsl_pamu_domain_has_cap,
|
||||
.domain_set_attr = fsl_pamu_set_domain_attr,
|
||||
.domain_get_attr = fsl_pamu_get_domain_attr,
|
||||
.add_device = fsl_pamu_add_device,
|
||||
|
|
|
@ -3865,8 +3865,7 @@ static int device_notifier(struct notifier_block *nb,
|
|||
if (iommu_dummy(dev))
|
||||
return 0;
|
||||
|
||||
if (action != BUS_NOTIFY_UNBOUND_DRIVER &&
|
||||
action != BUS_NOTIFY_DEL_DEVICE)
|
||||
if (action != BUS_NOTIFY_REMOVED_DEVICE)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
|
@ -4415,17 +4414,14 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
|
|||
return phys;
|
||||
}
|
||||
|
||||
static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
|
||||
unsigned long cap)
|
||||
static bool intel_iommu_capable(enum iommu_cap cap)
|
||||
{
|
||||
struct dmar_domain *dmar_domain = domain->priv;
|
||||
|
||||
if (cap == IOMMU_CAP_CACHE_COHERENCY)
|
||||
return dmar_domain->iommu_snooping;
|
||||
return domain_update_iommu_snooping(NULL) == 1;
|
||||
if (cap == IOMMU_CAP_INTR_REMAP)
|
||||
return irq_remapping_enabled;
|
||||
return irq_remapping_enabled == 1;
|
||||
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
static int intel_iommu_add_device(struct device *dev)
|
||||
|
@ -4464,6 +4460,7 @@ static void intel_iommu_remove_device(struct device *dev)
|
|||
}
|
||||
|
||||
static const struct iommu_ops intel_iommu_ops = {
|
||||
.capable = intel_iommu_capable,
|
||||
.domain_init = intel_iommu_domain_init,
|
||||
.domain_destroy = intel_iommu_domain_destroy,
|
||||
.attach_dev = intel_iommu_attach_device,
|
||||
|
@ -4471,7 +4468,6 @@ static const struct iommu_ops intel_iommu_ops = {
|
|||
.map = intel_iommu_map,
|
||||
.unmap = intel_iommu_unmap,
|
||||
.iova_to_phys = intel_iommu_iova_to_phys,
|
||||
.domain_has_cap = intel_iommu_domain_has_cap,
|
||||
.add_device = intel_iommu_add_device,
|
||||
.remove_device = intel_iommu_remove_device,
|
||||
.pgsize_bitmap = INTEL_IOMMU_PGSIZES,
|
||||
|
|
|
@ -438,8 +438,7 @@ static void iommu_set_irq_remapping(struct intel_iommu *iommu, int mode)
|
|||
(addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
|
||||
|
||||
/* Set interrupt-remapping table pointer */
|
||||
iommu->gcmd |= DMA_GCMD_SIRTP;
|
||||
writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
|
||||
writel(iommu->gcmd | DMA_GCMD_SIRTP, iommu->reg + DMAR_GCMD_REG);
|
||||
|
||||
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
|
||||
readl, (sts & DMA_GSTS_IRTPS), sts);
|
||||
|
@ -1139,7 +1138,7 @@ static int intel_msi_setup_irq(struct pci_dev *pdev, unsigned int irq,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int intel_setup_hpet_msi(unsigned int irq, unsigned int id)
|
||||
static int intel_alloc_hpet_msi(unsigned int irq, unsigned int id)
|
||||
{
|
||||
int ret = -1;
|
||||
struct intel_iommu *iommu;
|
||||
|
@ -1170,5 +1169,5 @@ struct irq_remap_ops intel_irq_remap_ops = {
|
|||
.compose_msi_msg = intel_compose_msi_msg,
|
||||
.msi_alloc_irq = intel_msi_alloc_irq,
|
||||
.msi_setup_irq = intel_msi_setup_irq,
|
||||
.setup_hpet_msi = intel_setup_hpet_msi,
|
||||
.alloc_hpet_msi = intel_alloc_hpet_msi,
|
||||
};
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
#include <linux/notifier.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <trace/events/iommu.h>
|
||||
|
||||
static struct kset *iommu_group_kset;
|
||||
|
@ -519,6 +520,9 @@ int iommu_group_id(struct iommu_group *group)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_group_id);
|
||||
|
||||
static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
|
||||
unsigned long *devfns);
|
||||
|
||||
/*
|
||||
* To consider a PCI device isolated, we require ACS to support Source
|
||||
* Validation, Request Redirection, Completer Redirection, and Upstream
|
||||
|
@ -529,6 +533,86 @@ EXPORT_SYMBOL_GPL(iommu_group_id);
|
|||
*/
|
||||
#define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
|
||||
|
||||
/*
|
||||
* For multifunction devices which are not isolated from each other, find
|
||||
* all the other non-isolated functions and look for existing groups. For
|
||||
* each function, we also need to look for aliases to or from other devices
|
||||
* that may already have a group.
|
||||
*/
|
||||
static struct iommu_group *get_pci_function_alias_group(struct pci_dev *pdev,
|
||||
unsigned long *devfns)
|
||||
{
|
||||
struct pci_dev *tmp = NULL;
|
||||
struct iommu_group *group;
|
||||
|
||||
if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS))
|
||||
return NULL;
|
||||
|
||||
for_each_pci_dev(tmp) {
|
||||
if (tmp == pdev || tmp->bus != pdev->bus ||
|
||||
PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) ||
|
||||
pci_acs_enabled(tmp, REQ_ACS_FLAGS))
|
||||
continue;
|
||||
|
||||
group = get_pci_alias_group(tmp, devfns);
|
||||
if (group) {
|
||||
pci_dev_put(tmp);
|
||||
return group;
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Look for aliases to or from the given device for exisiting groups. The
|
||||
* dma_alias_devfn only supports aliases on the same bus, therefore the search
|
||||
* space is quite small (especially since we're really only looking at pcie
|
||||
* device, and therefore only expect multiple slots on the root complex or
|
||||
* downstream switch ports). It's conceivable though that a pair of
|
||||
* multifunction devices could have aliases between them that would cause a
|
||||
* loop. To prevent this, we use a bitmap to track where we've been.
|
||||
*/
|
||||
static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
|
||||
unsigned long *devfns)
|
||||
{
|
||||
struct pci_dev *tmp = NULL;
|
||||
struct iommu_group *group;
|
||||
|
||||
if (test_and_set_bit(pdev->devfn & 0xff, devfns))
|
||||
return NULL;
|
||||
|
||||
group = iommu_group_get(&pdev->dev);
|
||||
if (group)
|
||||
return group;
|
||||
|
||||
for_each_pci_dev(tmp) {
|
||||
if (tmp == pdev || tmp->bus != pdev->bus)
|
||||
continue;
|
||||
|
||||
/* We alias them or they alias us */
|
||||
if (((pdev->dev_flags & PCI_DEV_FLAGS_DMA_ALIAS_DEVFN) &&
|
||||
pdev->dma_alias_devfn == tmp->devfn) ||
|
||||
((tmp->dev_flags & PCI_DEV_FLAGS_DMA_ALIAS_DEVFN) &&
|
||||
tmp->dma_alias_devfn == pdev->devfn)) {
|
||||
|
||||
group = get_pci_alias_group(tmp, devfns);
|
||||
if (group) {
|
||||
pci_dev_put(tmp);
|
||||
return group;
|
||||
}
|
||||
|
||||
group = get_pci_function_alias_group(tmp, devfns);
|
||||
if (group) {
|
||||
pci_dev_put(tmp);
|
||||
return group;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct group_for_pci_data {
|
||||
struct pci_dev *pdev;
|
||||
struct iommu_group *group;
|
||||
|
@ -557,7 +641,7 @@ static struct iommu_group *iommu_group_get_for_pci_dev(struct pci_dev *pdev)
|
|||
struct group_for_pci_data data;
|
||||
struct pci_bus *bus;
|
||||
struct iommu_group *group = NULL;
|
||||
struct pci_dev *tmp;
|
||||
u64 devfns[4] = { 0 };
|
||||
|
||||
/*
|
||||
* Find the upstream DMA alias for the device. A device must not
|
||||
|
@ -591,76 +675,21 @@ static struct iommu_group *iommu_group_get_for_pci_dev(struct pci_dev *pdev)
|
|||
}
|
||||
|
||||
/*
|
||||
* Next we need to consider DMA alias quirks. If one device aliases
|
||||
* to another, they should be grouped together. It's theoretically
|
||||
* possible that aliases could create chains of devices where each
|
||||
* device aliases another device. If we then factor in multifunction
|
||||
* ACS grouping requirements, each alias could incorporate a new slot
|
||||
* with multiple functions, each with aliases. This is all extremely
|
||||
* unlikely as DMA alias quirks are typically only used for PCIe
|
||||
* devices where we usually have a single slot per bus. Furthermore,
|
||||
* the alias quirk is usually to another function within the slot
|
||||
* (and ACS multifunction is not supported) or to a different slot
|
||||
* that doesn't physically exist. The likely scenario is therefore
|
||||
* that everything on the bus gets grouped together. To reduce the
|
||||
* problem space, share the IOMMU group for all devices on the bus
|
||||
* if a DMA alias quirk is present on the bus.
|
||||
* Look for existing groups on device aliases. If we alias another
|
||||
* device or another device aliases us, use the same group.
|
||||
*/
|
||||
tmp = NULL;
|
||||
for_each_pci_dev(tmp) {
|
||||
if (tmp->bus != pdev->bus ||
|
||||
!(tmp->dev_flags & PCI_DEV_FLAGS_DMA_ALIAS_DEVFN))
|
||||
continue;
|
||||
|
||||
pci_dev_put(tmp);
|
||||
tmp = NULL;
|
||||
|
||||
/* We have an alias quirk, search for an existing group */
|
||||
for_each_pci_dev(tmp) {
|
||||
struct iommu_group *group_tmp;
|
||||
|
||||
if (tmp->bus != pdev->bus)
|
||||
continue;
|
||||
|
||||
group_tmp = iommu_group_get(&tmp->dev);
|
||||
if (!group) {
|
||||
group = group_tmp;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (group_tmp) {
|
||||
WARN_ON(group != group_tmp);
|
||||
iommu_group_put(group_tmp);
|
||||
}
|
||||
}
|
||||
|
||||
return group ? group : iommu_group_alloc();
|
||||
}
|
||||
group = get_pci_alias_group(pdev, (unsigned long *)devfns);
|
||||
if (group)
|
||||
return group;
|
||||
|
||||
/*
|
||||
* Non-multifunction devices or multifunction devices supporting
|
||||
* ACS get their own group.
|
||||
* Look for existing groups on non-isolated functions on the same
|
||||
* slot and aliases of those funcions, if any. No need to clear
|
||||
* the search bitmap, the tested devfns are still valid.
|
||||
*/
|
||||
if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS))
|
||||
return iommu_group_alloc();
|
||||
|
||||
/*
|
||||
* Multifunction devices not supporting ACS share a group with other
|
||||
* similar devices in the same slot.
|
||||
*/
|
||||
tmp = NULL;
|
||||
for_each_pci_dev(tmp) {
|
||||
if (tmp == pdev || tmp->bus != pdev->bus ||
|
||||
PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) ||
|
||||
pci_acs_enabled(tmp, REQ_ACS_FLAGS))
|
||||
continue;
|
||||
|
||||
group = iommu_group_get(&tmp->dev);
|
||||
if (group) {
|
||||
pci_dev_put(tmp);
|
||||
return group;
|
||||
}
|
||||
}
|
||||
group = get_pci_function_alias_group(pdev, (unsigned long *)devfns);
|
||||
if (group)
|
||||
return group;
|
||||
|
||||
/* No shared group found, allocate new */
|
||||
return iommu_group_alloc();
|
||||
|
@ -770,18 +799,26 @@ static int iommu_bus_notifier(struct notifier_block *nb,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static struct notifier_block iommu_bus_nb = {
|
||||
.notifier_call = iommu_bus_notifier,
|
||||
};
|
||||
|
||||
static void iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
|
||||
static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
|
||||
{
|
||||
int err;
|
||||
struct notifier_block *nb;
|
||||
struct iommu_callback_data cb = {
|
||||
.ops = ops,
|
||||
};
|
||||
|
||||
bus_register_notifier(bus, &iommu_bus_nb);
|
||||
bus_for_each_dev(bus, NULL, &cb, add_iommu_group);
|
||||
nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
|
||||
if (!nb)
|
||||
return -ENOMEM;
|
||||
|
||||
nb->notifier_call = iommu_bus_notifier;
|
||||
|
||||
err = bus_register_notifier(bus, nb);
|
||||
if (err) {
|
||||
kfree(nb);
|
||||
return err;
|
||||
}
|
||||
return bus_for_each_dev(bus, NULL, &cb, add_iommu_group);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -805,9 +842,7 @@ int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops)
|
|||
bus->iommu_ops = ops;
|
||||
|
||||
/* Do IOMMU specific setup for this bus-type */
|
||||
iommu_bus_init(bus, ops);
|
||||
|
||||
return 0;
|
||||
return iommu_bus_init(bus, ops);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bus_set_iommu);
|
||||
|
||||
|
@ -817,6 +852,15 @@ bool iommu_present(struct bus_type *bus)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_present);
|
||||
|
||||
bool iommu_capable(struct bus_type *bus, enum iommu_cap cap)
|
||||
{
|
||||
if (!bus->iommu_ops || !bus->iommu_ops->capable)
|
||||
return false;
|
||||
|
||||
return bus->iommu_ops->capable(cap);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_capable);
|
||||
|
||||
/**
|
||||
* iommu_set_fault_handler() - set a fault handler for an iommu domain
|
||||
* @domain: iommu domain
|
||||
|
@ -947,16 +991,6 @@ phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
|
||||
|
||||
int iommu_domain_has_cap(struct iommu_domain *domain,
|
||||
unsigned long cap)
|
||||
{
|
||||
if (unlikely(domain->ops->domain_has_cap == NULL))
|
||||
return 0;
|
||||
|
||||
return domain->ops->domain_has_cap(domain, cap);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_domain_has_cap);
|
||||
|
||||
static size_t iommu_pgsize(struct iommu_domain *domain,
|
||||
unsigned long addr_merge, size_t size)
|
||||
{
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#include <asm/processor.h>
|
||||
#include <asm/x86_init.h>
|
||||
#include <asm/apic.h>
|
||||
#include <asm/hpet.h>
|
||||
|
||||
#include "irq_remapping.h"
|
||||
|
||||
|
@ -345,10 +346,16 @@ static int msi_setup_remapped_irq(struct pci_dev *pdev, unsigned int irq,
|
|||
|
||||
int setup_hpet_msi_remapped(unsigned int irq, unsigned int id)
|
||||
{
|
||||
if (!remap_ops || !remap_ops->setup_hpet_msi)
|
||||
int ret;
|
||||
|
||||
if (!remap_ops || !remap_ops->alloc_hpet_msi)
|
||||
return -ENODEV;
|
||||
|
||||
return remap_ops->setup_hpet_msi(irq, id);
|
||||
ret = remap_ops->alloc_hpet_msi(irq, id);
|
||||
if (ret)
|
||||
return -EINVAL;
|
||||
|
||||
return default_setup_hpet_msi(irq, id);
|
||||
}
|
||||
|
||||
void panic_if_irq_remap(const char *msg)
|
||||
|
|
|
@ -80,7 +80,7 @@ struct irq_remap_ops {
|
|||
int (*msi_setup_irq)(struct pci_dev *, unsigned int, int, int);
|
||||
|
||||
/* Setup interrupt remapping for an HPET MSI */
|
||||
int (*setup_hpet_msi)(unsigned int, unsigned int);
|
||||
int (*alloc_hpet_msi)(unsigned int, unsigned int);
|
||||
};
|
||||
|
||||
extern struct irq_remap_ops intel_irq_remap_ops;
|
||||
|
|
|
@ -603,10 +603,9 @@ fail:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int msm_iommu_domain_has_cap(struct iommu_domain *domain,
|
||||
unsigned long cap)
|
||||
static bool msm_iommu_capable(enum iommu_cap cap)
|
||||
{
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
static void print_ctx_regs(void __iomem *base, int ctx)
|
||||
|
@ -675,6 +674,7 @@ fail:
|
|||
}
|
||||
|
||||
static const struct iommu_ops msm_iommu_ops = {
|
||||
.capable = msm_iommu_capable,
|
||||
.domain_init = msm_iommu_domain_init,
|
||||
.domain_destroy = msm_iommu_domain_destroy,
|
||||
.attach_dev = msm_iommu_attach_dev,
|
||||
|
@ -682,7 +682,6 @@ static const struct iommu_ops msm_iommu_ops = {
|
|||
.map = msm_iommu_map,
|
||||
.unmap = msm_iommu_unmap,
|
||||
.iova_to_phys = msm_iommu_iova_to_phys,
|
||||
.domain_has_cap = msm_iommu_domain_has_cap,
|
||||
.pgsize_bitmap = MSM_IOMMU_PGSIZES,
|
||||
};
|
||||
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#include <linux/of.h>
|
||||
#include <linux/of_iommu.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/of_platform.h>
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
|
@ -892,19 +893,11 @@ static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd)
|
|||
goto err_enable;
|
||||
flush_iotlb_all(obj);
|
||||
|
||||
if (!try_module_get(obj->owner)) {
|
||||
err = -ENODEV;
|
||||
goto err_module;
|
||||
}
|
||||
|
||||
spin_unlock(&obj->iommu_lock);
|
||||
|
||||
dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
|
||||
return obj;
|
||||
|
||||
err_module:
|
||||
if (obj->refcount == 1)
|
||||
iommu_disable(obj);
|
||||
err_enable:
|
||||
obj->refcount--;
|
||||
spin_unlock(&obj->iommu_lock);
|
||||
|
@ -925,8 +918,6 @@ static void omap_iommu_detach(struct omap_iommu *obj)
|
|||
if (--obj->refcount == 0)
|
||||
iommu_disable(obj);
|
||||
|
||||
module_put(obj->owner);
|
||||
|
||||
obj->iopgd = NULL;
|
||||
|
||||
spin_unlock(&obj->iommu_lock);
|
||||
|
@ -1006,7 +997,7 @@ static int omap_iommu_remove(struct platform_device *pdev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static struct of_device_id omap_iommu_of_match[] = {
|
||||
static const struct of_device_id omap_iommu_of_match[] = {
|
||||
{ .compatible = "ti,omap2-iommu" },
|
||||
{ .compatible = "ti,omap4-iommu" },
|
||||
{ .compatible = "ti,dra7-iommu" },
|
||||
|
@ -1091,6 +1082,11 @@ omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
|
|||
struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
|
||||
int ret = 0;
|
||||
|
||||
if (!arch_data || !arch_data->name) {
|
||||
dev_err(dev, "device doesn't have an associated iommu\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
spin_lock(&omap_domain->lock);
|
||||
|
||||
/* only a single device is supported per domain for now */
|
||||
|
@ -1239,6 +1235,7 @@ static int omap_iommu_add_device(struct device *dev)
|
|||
{
|
||||
struct omap_iommu_arch_data *arch_data;
|
||||
struct device_node *np;
|
||||
struct platform_device *pdev;
|
||||
|
||||
/*
|
||||
* Allocate the archdata iommu structure for DT-based devices.
|
||||
|
@ -1253,13 +1250,19 @@ static int omap_iommu_add_device(struct device *dev)
|
|||
if (!np)
|
||||
return 0;
|
||||
|
||||
pdev = of_find_device_by_node(np);
|
||||
if (WARN_ON(!pdev)) {
|
||||
of_node_put(np);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
arch_data = kzalloc(sizeof(*arch_data), GFP_KERNEL);
|
||||
if (!arch_data) {
|
||||
of_node_put(np);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
arch_data->name = kstrdup(dev_name(dev), GFP_KERNEL);
|
||||
arch_data->name = kstrdup(dev_name(&pdev->dev), GFP_KERNEL);
|
||||
dev->archdata.iommu = arch_data;
|
||||
|
||||
of_node_put(np);
|
||||
|
|
|
@ -28,7 +28,6 @@ struct iotlb_entry {
|
|||
|
||||
struct omap_iommu {
|
||||
const char *name;
|
||||
struct module *owner;
|
||||
void __iomem *regbase;
|
||||
struct device *dev;
|
||||
void *isr_priv;
|
||||
|
|
|
@ -303,13 +303,13 @@ static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain,
|
|||
return pa;
|
||||
}
|
||||
|
||||
static int gart_iommu_domain_has_cap(struct iommu_domain *domain,
|
||||
unsigned long cap)
|
||||
static bool gart_iommu_capable(enum iommu_cap cap)
|
||||
{
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
static const struct iommu_ops gart_iommu_ops = {
|
||||
.capable = gart_iommu_capable,
|
||||
.domain_init = gart_iommu_domain_init,
|
||||
.domain_destroy = gart_iommu_domain_destroy,
|
||||
.attach_dev = gart_iommu_attach_dev,
|
||||
|
@ -317,7 +317,6 @@ static const struct iommu_ops gart_iommu_ops = {
|
|||
.map = gart_iommu_map,
|
||||
.unmap = gart_iommu_unmap,
|
||||
.iova_to_phys = gart_iommu_iova_to_phys,
|
||||
.domain_has_cap = gart_iommu_domain_has_cap,
|
||||
.pgsize_bitmap = GART_IOMMU_PGSIZES,
|
||||
};
|
||||
|
||||
|
@ -416,7 +415,7 @@ static const struct dev_pm_ops tegra_gart_pm_ops = {
|
|||
.resume = tegra_gart_resume,
|
||||
};
|
||||
|
||||
static struct of_device_id tegra_gart_of_match[] = {
|
||||
static const struct of_device_id tegra_gart_of_match[] = {
|
||||
{ .compatible = "nvidia,tegra20-gart", },
|
||||
{ },
|
||||
};
|
||||
|
|
|
@ -780,10 +780,9 @@ static phys_addr_t smmu_iommu_iova_to_phys(struct iommu_domain *domain,
|
|||
return PFN_PHYS(pfn);
|
||||
}
|
||||
|
||||
static int smmu_iommu_domain_has_cap(struct iommu_domain *domain,
|
||||
unsigned long cap)
|
||||
static bool smmu_iommu_capable(enum iommu_cap cap)
|
||||
{
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
static int smmu_iommu_attach_dev(struct iommu_domain *domain,
|
||||
|
@ -949,6 +948,7 @@ static void smmu_iommu_domain_destroy(struct iommu_domain *domain)
|
|||
}
|
||||
|
||||
static const struct iommu_ops smmu_iommu_ops = {
|
||||
.capable = smmu_iommu_capable,
|
||||
.domain_init = smmu_iommu_domain_init,
|
||||
.domain_destroy = smmu_iommu_domain_destroy,
|
||||
.attach_dev = smmu_iommu_attach_dev,
|
||||
|
@ -956,7 +956,6 @@ static const struct iommu_ops smmu_iommu_ops = {
|
|||
.map = smmu_iommu_map,
|
||||
.unmap = smmu_iommu_unmap,
|
||||
.iova_to_phys = smmu_iommu_iova_to_phys,
|
||||
.domain_has_cap = smmu_iommu_domain_has_cap,
|
||||
.pgsize_bitmap = SMMU_IOMMU_PGSIZES,
|
||||
};
|
||||
|
||||
|
@ -1260,7 +1259,7 @@ static const struct dev_pm_ops tegra_smmu_pm_ops = {
|
|||
.resume = tegra_smmu_resume,
|
||||
};
|
||||
|
||||
static struct of_device_id tegra_smmu_of_match[] = {
|
||||
static const struct of_device_id tegra_smmu_of_match[] = {
|
||||
{ .compatible = "nvidia,tegra30-smmu", },
|
||||
{ },
|
||||
};
|
||||
|
|
|
@ -713,14 +713,14 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
|
|||
list_add(&group->next, &domain->group_list);
|
||||
|
||||
if (!allow_unsafe_interrupts &&
|
||||
!iommu_domain_has_cap(domain->domain, IOMMU_CAP_INTR_REMAP)) {
|
||||
!iommu_capable(bus, IOMMU_CAP_INTR_REMAP)) {
|
||||
pr_warn("%s: No interrupt remapping support. Use the module param \"allow_unsafe_interrupts\" to enable VFIO IOMMU support on this platform\n",
|
||||
__func__);
|
||||
ret = -EPERM;
|
||||
goto out_detach;
|
||||
}
|
||||
|
||||
if (iommu_domain_has_cap(domain->domain, IOMMU_CAP_CACHE_COHERENCY))
|
||||
if (iommu_capable(bus, IOMMU_CAP_CACHE_COHERENCY))
|
||||
domain->prot |= IOMMU_CACHE;
|
||||
|
||||
/*
|
||||
|
|
|
@ -181,13 +181,14 @@ extern int bus_unregister_notifier(struct bus_type *bus,
|
|||
* with the device lock held in the core, so be careful.
|
||||
*/
|
||||
#define BUS_NOTIFY_ADD_DEVICE 0x00000001 /* device added */
|
||||
#define BUS_NOTIFY_DEL_DEVICE 0x00000002 /* device removed */
|
||||
#define BUS_NOTIFY_BIND_DRIVER 0x00000003 /* driver about to be
|
||||
#define BUS_NOTIFY_DEL_DEVICE 0x00000002 /* device to be removed */
|
||||
#define BUS_NOTIFY_REMOVED_DEVICE 0x00000003 /* device removed */
|
||||
#define BUS_NOTIFY_BIND_DRIVER 0x00000004 /* driver about to be
|
||||
bound */
|
||||
#define BUS_NOTIFY_BOUND_DRIVER 0x00000004 /* driver bound to device */
|
||||
#define BUS_NOTIFY_UNBIND_DRIVER 0x00000005 /* driver about to be
|
||||
#define BUS_NOTIFY_BOUND_DRIVER 0x00000005 /* driver bound to device */
|
||||
#define BUS_NOTIFY_UNBIND_DRIVER 0x00000006 /* driver about to be
|
||||
unbound */
|
||||
#define BUS_NOTIFY_UNBOUND_DRIVER 0x00000006 /* driver is unbound
|
||||
#define BUS_NOTIFY_UNBOUND_DRIVER 0x00000007 /* driver is unbound
|
||||
from the device */
|
||||
|
||||
extern struct kset *bus_get_kset(struct bus_type *bus);
|
||||
|
|
|
@ -56,13 +56,19 @@ struct dmar_drhd_unit {
|
|||
struct intel_iommu *iommu;
|
||||
};
|
||||
|
||||
struct dmar_pci_path {
|
||||
u8 bus;
|
||||
u8 device;
|
||||
u8 function;
|
||||
};
|
||||
|
||||
struct dmar_pci_notify_info {
|
||||
struct pci_dev *dev;
|
||||
unsigned long event;
|
||||
int bus;
|
||||
u16 seg;
|
||||
u16 level;
|
||||
struct acpi_dmar_pci_path path[];
|
||||
struct dmar_pci_path path[];
|
||||
} __attribute__((packed));
|
||||
|
||||
extern struct rw_semaphore dmar_global_lock;
|
||||
|
|
|
@ -57,8 +57,11 @@ struct iommu_domain {
|
|||
struct iommu_domain_geometry geometry;
|
||||
};
|
||||
|
||||
#define IOMMU_CAP_CACHE_COHERENCY 0x1
|
||||
#define IOMMU_CAP_INTR_REMAP 0x2 /* isolates device intrs */
|
||||
enum iommu_cap {
|
||||
IOMMU_CAP_CACHE_COHERENCY, /* IOMMU can enforce cache coherent DMA
|
||||
transactions */
|
||||
IOMMU_CAP_INTR_REMAP, /* IOMMU supports interrupt isolation */
|
||||
};
|
||||
|
||||
/*
|
||||
* Following constraints are specifc to FSL_PAMUV1:
|
||||
|
@ -94,7 +97,6 @@ enum iommu_attr {
|
|||
* @map: map a physically contiguous memory region to an iommu domain
|
||||
* @unmap: unmap a physically contiguous memory region from an iommu domain
|
||||
* @iova_to_phys: translate iova to physical address
|
||||
* @domain_has_cap: domain capabilities query
|
||||
* @add_device: add device to iommu grouping
|
||||
* @remove_device: remove device from iommu grouping
|
||||
* @domain_get_attr: Query domain attributes
|
||||
|
@ -102,6 +104,7 @@ enum iommu_attr {
|
|||
* @pgsize_bitmap: bitmap of supported page sizes
|
||||
*/
|
||||
struct iommu_ops {
|
||||
bool (*capable)(enum iommu_cap);
|
||||
int (*domain_init)(struct iommu_domain *domain);
|
||||
void (*domain_destroy)(struct iommu_domain *domain);
|
||||
int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
|
||||
|
@ -111,8 +114,6 @@ struct iommu_ops {
|
|||
size_t (*unmap)(struct iommu_domain *domain, unsigned long iova,
|
||||
size_t size);
|
||||
phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova);
|
||||
int (*domain_has_cap)(struct iommu_domain *domain,
|
||||
unsigned long cap);
|
||||
int (*add_device)(struct device *dev);
|
||||
void (*remove_device)(struct device *dev);
|
||||
int (*device_group)(struct device *dev, unsigned int *groupid);
|
||||
|
@ -142,6 +143,7 @@ struct iommu_ops {
|
|||
|
||||
extern int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops);
|
||||
extern bool iommu_present(struct bus_type *bus);
|
||||
extern bool iommu_capable(struct bus_type *bus, enum iommu_cap cap);
|
||||
extern struct iommu_domain *iommu_domain_alloc(struct bus_type *bus);
|
||||
extern struct iommu_group *iommu_group_get_by_id(int id);
|
||||
extern void iommu_domain_free(struct iommu_domain *domain);
|
||||
|
@ -154,8 +156,6 @@ extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
|
|||
extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
|
||||
size_t size);
|
||||
extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova);
|
||||
extern int iommu_domain_has_cap(struct iommu_domain *domain,
|
||||
unsigned long cap);
|
||||
extern void iommu_set_fault_handler(struct iommu_domain *domain,
|
||||
iommu_fault_handler_t handler, void *token);
|
||||
|
||||
|
@ -250,6 +250,11 @@ static inline bool iommu_present(struct bus_type *bus)
|
|||
return false;
|
||||
}
|
||||
|
||||
static inline bool iommu_capable(struct bus_type *bus, enum iommu_cap cap)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
|
||||
{
|
||||
return NULL;
|
||||
|
@ -304,12 +309,6 @@ static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_ad
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline int iommu_domain_has_cap(struct iommu_domain *domain,
|
||||
unsigned long cap)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void iommu_set_fault_handler(struct iommu_domain *domain,
|
||||
iommu_fault_handler_t handler, void *token)
|
||||
{
|
||||
|
|
|
@ -191,8 +191,7 @@ int kvm_assign_device(struct kvm *kvm,
|
|||
return r;
|
||||
}
|
||||
|
||||
noncoherent = !iommu_domain_has_cap(kvm->arch.iommu_domain,
|
||||
IOMMU_CAP_CACHE_COHERENCY);
|
||||
noncoherent = !iommu_capable(&pci_bus_type, IOMMU_CAP_CACHE_COHERENCY);
|
||||
|
||||
/* Check if need to update IOMMU page table for guest memory */
|
||||
if (noncoherent != kvm->arch.iommu_noncoherent) {
|
||||
|
@ -254,8 +253,7 @@ int kvm_iommu_map_guest(struct kvm *kvm)
|
|||
}
|
||||
|
||||
if (!allow_unsafe_assigned_interrupts &&
|
||||
!iommu_domain_has_cap(kvm->arch.iommu_domain,
|
||||
IOMMU_CAP_INTR_REMAP)) {
|
||||
!iommu_capable(&pci_bus_type, IOMMU_CAP_INTR_REMAP)) {
|
||||
printk(KERN_WARNING "%s: No interrupt remapping support,"
|
||||
" disallowing device assignment."
|
||||
" Re-enble with \"allow_unsafe_assigned_interrupts=1\""
|
||||
|
|
Загрузка…
Ссылка в новой задаче