Merge branch 'for-joerg/arm-smmu/updates' of git://git.kernel.org/pub/scm/linux/kernel/git/will/linux into arm/smmu
This commit is contained in:
Коммит
acf7f76b64
|
@ -1644,6 +1644,12 @@
|
|||
nobypass [PPC/POWERNV]
|
||||
Disable IOMMU bypass, using IOMMU for PCI devices.
|
||||
|
||||
iommu.passthrough=
|
||||
[ARM64] Configure DMA to bypass the IOMMU by default.
|
||||
Format: { "0" | "1" }
|
||||
0 - Use IOMMU translation for DMA.
|
||||
1 - Bypass the IOMMU for DMA.
|
||||
unset - Use IOMMU translation for DMA.
|
||||
|
||||
io7= [HW] IO7 for Marvel based alpha systems
|
||||
See comment before marvel_specify_io7 in
|
||||
|
|
|
@ -60,6 +60,17 @@ conditions.
|
|||
aliases of secure registers have to be used during
|
||||
SMMU configuration.
|
||||
|
||||
- stream-match-mask : For SMMUs supporting stream matching and using
|
||||
#iommu-cells = <1>, specifies a mask of bits to ignore
|
||||
when matching stream IDs (e.g. this may be programmed
|
||||
into the SMRn.MASK field of every stream match register
|
||||
used). For cases where it is desirable to ignore some
|
||||
portion of every Stream ID (e.g. for certain MMU-500
|
||||
configurations given globally unique input IDs). This
|
||||
property is not valid for SMMUs using stream indexing,
|
||||
or using stream matching with #iommu-cells = <2>, and
|
||||
may be ignored if present in such cases.
|
||||
|
||||
** Deprecated properties:
|
||||
|
||||
- mmu-masters (deprecated in favour of the generic "iommus" binding) :
|
||||
|
@ -109,3 +120,20 @@ conditions.
|
|||
master3 {
|
||||
iommus = <&smmu2 1 0x30>;
|
||||
};
|
||||
|
||||
|
||||
/* ARM MMU-500 with 10-bit stream ID input configuration */
|
||||
smmu3: iommu {
|
||||
compatible = "arm,mmu-500", "arm,smmu-v2";
|
||||
...
|
||||
#iommu-cells = <1>;
|
||||
/* always ignore appended 5-bit TBU number */
|
||||
stream-match-mask = 0x7c00;
|
||||
};
|
||||
|
||||
bus {
|
||||
/* bus whose child devices emit one unique 10-bit stream
|
||||
ID each, but may master through multiple SMMU TBUs */
|
||||
iommu-map = <0 &smmu3 0 0x400>;
|
||||
...
|
||||
};
|
||||
|
|
|
@ -554,9 +554,14 @@ struct arm_smmu_s2_cfg {
|
|||
};
|
||||
|
||||
struct arm_smmu_strtab_ent {
|
||||
bool valid;
|
||||
|
||||
bool bypass; /* Overrides s1/s2 config */
|
||||
/*
|
||||
* An STE is "assigned" if the master emitting the corresponding SID
|
||||
* is attached to a domain. The behaviour of an unassigned STE is
|
||||
* determined by the disable_bypass parameter, whereas an assigned
|
||||
* STE behaves according to s1_cfg/s2_cfg, which themselves are
|
||||
* configured according to the domain type.
|
||||
*/
|
||||
bool assigned;
|
||||
struct arm_smmu_s1_cfg *s1_cfg;
|
||||
struct arm_smmu_s2_cfg *s2_cfg;
|
||||
};
|
||||
|
@ -632,6 +637,7 @@ enum arm_smmu_domain_stage {
|
|||
ARM_SMMU_DOMAIN_S1 = 0,
|
||||
ARM_SMMU_DOMAIN_S2,
|
||||
ARM_SMMU_DOMAIN_NESTED,
|
||||
ARM_SMMU_DOMAIN_BYPASS,
|
||||
};
|
||||
|
||||
struct arm_smmu_domain {
|
||||
|
@ -1005,9 +1011,9 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
|
|||
* This is hideously complicated, but we only really care about
|
||||
* three cases at the moment:
|
||||
*
|
||||
* 1. Invalid (all zero) -> bypass (init)
|
||||
* 2. Bypass -> translation (attach)
|
||||
* 3. Translation -> bypass (detach)
|
||||
* 1. Invalid (all zero) -> bypass/fault (init)
|
||||
* 2. Bypass/fault -> translation/bypass (attach)
|
||||
* 3. Translation/bypass -> bypass/fault (detach)
|
||||
*
|
||||
* Given that we can't update the STE atomically and the SMMU
|
||||
* doesn't read the thing in a defined order, that leaves us
|
||||
|
@ -1046,11 +1052,15 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
|
|||
}
|
||||
|
||||
/* Nuke the existing STE_0 value, as we're going to rewrite it */
|
||||
val = ste->valid ? STRTAB_STE_0_V : 0;
|
||||
val = STRTAB_STE_0_V;
|
||||
|
||||
/* Bypass/fault */
|
||||
if (!ste->assigned || !(ste->s1_cfg || ste->s2_cfg)) {
|
||||
if (!ste->assigned && disable_bypass)
|
||||
val |= STRTAB_STE_0_CFG_ABORT;
|
||||
else
|
||||
val |= STRTAB_STE_0_CFG_BYPASS;
|
||||
|
||||
if (ste->bypass) {
|
||||
val |= disable_bypass ? STRTAB_STE_0_CFG_ABORT
|
||||
: STRTAB_STE_0_CFG_BYPASS;
|
||||
dst[0] = cpu_to_le64(val);
|
||||
dst[1] = cpu_to_le64(STRTAB_STE_1_SHCFG_INCOMING
|
||||
<< STRTAB_STE_1_SHCFG_SHIFT);
|
||||
|
@ -1111,10 +1121,7 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
|
|||
static void arm_smmu_init_bypass_stes(u64 *strtab, unsigned int nent)
|
||||
{
|
||||
unsigned int i;
|
||||
struct arm_smmu_strtab_ent ste = {
|
||||
.valid = true,
|
||||
.bypass = true,
|
||||
};
|
||||
struct arm_smmu_strtab_ent ste = { .assigned = false };
|
||||
|
||||
for (i = 0; i < nent; ++i) {
|
||||
arm_smmu_write_strtab_ent(NULL, -1, strtab, &ste);
|
||||
|
@ -1378,7 +1385,9 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
|
|||
{
|
||||
struct arm_smmu_domain *smmu_domain;
|
||||
|
||||
if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
|
||||
if (type != IOMMU_DOMAIN_UNMANAGED &&
|
||||
type != IOMMU_DOMAIN_DMA &&
|
||||
type != IOMMU_DOMAIN_IDENTITY)
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
|
@ -1509,6 +1518,11 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
|
|||
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
|
||||
struct arm_smmu_device *smmu = smmu_domain->smmu;
|
||||
|
||||
if (domain->type == IOMMU_DOMAIN_IDENTITY) {
|
||||
smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Restrict the stage to what we can actually support */
|
||||
if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
|
||||
smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
|
||||
|
@ -1579,7 +1593,7 @@ static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
|
|||
return step;
|
||||
}
|
||||
|
||||
static int arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec)
|
||||
static void arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec)
|
||||
{
|
||||
int i;
|
||||
struct arm_smmu_master_data *master = fwspec->iommu_priv;
|
||||
|
@ -1591,17 +1605,14 @@ static int arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec)
|
|||
|
||||
arm_smmu_write_strtab_ent(smmu, sid, step, &master->ste);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void arm_smmu_detach_dev(struct device *dev)
|
||||
{
|
||||
struct arm_smmu_master_data *master = dev->iommu_fwspec->iommu_priv;
|
||||
|
||||
master->ste.bypass = true;
|
||||
if (arm_smmu_install_ste_for_dev(dev->iommu_fwspec) < 0)
|
||||
dev_warn(dev, "failed to install bypass STE\n");
|
||||
master->ste.assigned = false;
|
||||
arm_smmu_install_ste_for_dev(dev->iommu_fwspec);
|
||||
}
|
||||
|
||||
static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
|
||||
|
@ -1620,7 +1631,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
|
|||
ste = &master->ste;
|
||||
|
||||
/* Already attached to a different domain? */
|
||||
if (!ste->bypass)
|
||||
if (ste->assigned)
|
||||
arm_smmu_detach_dev(dev);
|
||||
|
||||
mutex_lock(&smmu_domain->init_mutex);
|
||||
|
@ -1641,10 +1652,12 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
|
|||
goto out_unlock;
|
||||
}
|
||||
|
||||
ste->bypass = false;
|
||||
ste->valid = true;
|
||||
ste->assigned = true;
|
||||
|
||||
if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
|
||||
if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS) {
|
||||
ste->s1_cfg = NULL;
|
||||
ste->s2_cfg = NULL;
|
||||
} else if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
|
||||
ste->s1_cfg = &smmu_domain->s1_cfg;
|
||||
ste->s2_cfg = NULL;
|
||||
arm_smmu_write_ctx_desc(smmu, ste->s1_cfg);
|
||||
|
@ -1653,10 +1666,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
|
|||
ste->s2_cfg = &smmu_domain->s2_cfg;
|
||||
}
|
||||
|
||||
ret = arm_smmu_install_ste_for_dev(dev->iommu_fwspec);
|
||||
if (ret < 0)
|
||||
ste->valid = false;
|
||||
|
||||
arm_smmu_install_ste_for_dev(dev->iommu_fwspec);
|
||||
out_unlock:
|
||||
mutex_unlock(&smmu_domain->init_mutex);
|
||||
return ret;
|
||||
|
@ -1807,7 +1817,7 @@ static void arm_smmu_remove_device(struct device *dev)
|
|||
|
||||
master = fwspec->iommu_priv;
|
||||
smmu = master->smmu;
|
||||
if (master && master->ste.valid)
|
||||
if (master && master->ste.assigned)
|
||||
arm_smmu_detach_dev(dev);
|
||||
iommu_group_remove_device(dev);
|
||||
iommu_device_unlink(&smmu->iommu, dev);
|
||||
|
@ -1837,6 +1847,9 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
|
|||
{
|
||||
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
|
||||
|
||||
if (domain->type != IOMMU_DOMAIN_UNMANAGED)
|
||||
return -EINVAL;
|
||||
|
||||
switch (attr) {
|
||||
case DOMAIN_ATTR_NESTING:
|
||||
*(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
|
||||
|
@ -1852,6 +1865,9 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
|
|||
int ret = 0;
|
||||
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
|
||||
|
||||
if (domain->type != IOMMU_DOMAIN_UNMANAGED)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&smmu_domain->init_mutex);
|
||||
|
||||
switch (attr) {
|
||||
|
|
|
@ -162,6 +162,7 @@
|
|||
#define ARM_SMMU_GR0_sTLBGSTATUS 0x74
|
||||
#define sTLBGSTATUS_GSACTIVE (1 << 0)
|
||||
#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
|
||||
#define TLB_SPIN_COUNT 10
|
||||
|
||||
/* Stream mapping registers */
|
||||
#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
|
||||
|
@ -216,8 +217,7 @@ enum arm_smmu_s2cr_privcfg {
|
|||
#define CBA2R_VMID_MASK 0xffff
|
||||
|
||||
/* Translation context bank */
|
||||
#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
|
||||
#define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
|
||||
#define ARM_SMMU_CB(smmu, n) ((smmu)->cb_base + ((n) << (smmu)->pgshift))
|
||||
|
||||
#define ARM_SMMU_CB_SCTLR 0x0
|
||||
#define ARM_SMMU_CB_ACTLR 0x4
|
||||
|
@ -238,6 +238,8 @@ enum arm_smmu_s2cr_privcfg {
|
|||
#define ARM_SMMU_CB_S1_TLBIVAL 0x620
|
||||
#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
|
||||
#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
|
||||
#define ARM_SMMU_CB_TLBSYNC 0x7f0
|
||||
#define ARM_SMMU_CB_TLBSTATUS 0x7f4
|
||||
#define ARM_SMMU_CB_ATS1PR 0x800
|
||||
#define ARM_SMMU_CB_ATSR 0x8f0
|
||||
|
||||
|
@ -344,7 +346,7 @@ struct arm_smmu_device {
|
|||
struct device *dev;
|
||||
|
||||
void __iomem *base;
|
||||
unsigned long size;
|
||||
void __iomem *cb_base;
|
||||
unsigned long pgshift;
|
||||
|
||||
#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
|
||||
|
@ -404,18 +406,20 @@ enum arm_smmu_context_fmt {
|
|||
struct arm_smmu_cfg {
|
||||
u8 cbndx;
|
||||
u8 irptndx;
|
||||
union {
|
||||
u16 asid;
|
||||
u16 vmid;
|
||||
};
|
||||
u32 cbar;
|
||||
enum arm_smmu_context_fmt fmt;
|
||||
};
|
||||
#define INVALID_IRPTNDX 0xff
|
||||
|
||||
#define ARM_SMMU_CB_ASID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx)
|
||||
#define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
|
||||
|
||||
enum arm_smmu_domain_stage {
|
||||
ARM_SMMU_DOMAIN_S1 = 0,
|
||||
ARM_SMMU_DOMAIN_S2,
|
||||
ARM_SMMU_DOMAIN_NESTED,
|
||||
ARM_SMMU_DOMAIN_BYPASS,
|
||||
};
|
||||
|
||||
struct arm_smmu_domain {
|
||||
|
@ -569,49 +573,67 @@ static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
|
|||
}
|
||||
|
||||
/* Wait for any pending TLB invalidations to complete */
|
||||
static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
|
||||
static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu,
|
||||
void __iomem *sync, void __iomem *status)
|
||||
{
|
||||
int count = 0;
|
||||
void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
|
||||
unsigned int spin_cnt, delay;
|
||||
|
||||
writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
|
||||
while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
|
||||
& sTLBGSTATUS_GSACTIVE) {
|
||||
cpu_relax();
|
||||
if (++count == TLB_LOOP_TIMEOUT) {
|
||||
dev_err_ratelimited(smmu->dev,
|
||||
"TLB sync timed out -- SMMU may be deadlocked\n");
|
||||
return;
|
||||
writel_relaxed(0, sync);
|
||||
for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
|
||||
for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
|
||||
if (!(readl_relaxed(status) & sTLBGSTATUS_GSACTIVE))
|
||||
return;
|
||||
cpu_relax();
|
||||
}
|
||||
udelay(1);
|
||||
udelay(delay);
|
||||
}
|
||||
dev_err_ratelimited(smmu->dev,
|
||||
"TLB sync timed out -- SMMU may be deadlocked\n");
|
||||
}
|
||||
|
||||
static void arm_smmu_tlb_sync(void *cookie)
|
||||
static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu)
|
||||
{
|
||||
void __iomem *base = ARM_SMMU_GR0(smmu);
|
||||
|
||||
__arm_smmu_tlb_sync(smmu, base + ARM_SMMU_GR0_sTLBGSYNC,
|
||||
base + ARM_SMMU_GR0_sTLBGSTATUS);
|
||||
}
|
||||
|
||||
static void arm_smmu_tlb_sync_context(void *cookie)
|
||||
{
|
||||
struct arm_smmu_domain *smmu_domain = cookie;
|
||||
__arm_smmu_tlb_sync(smmu_domain->smmu);
|
||||
struct arm_smmu_device *smmu = smmu_domain->smmu;
|
||||
void __iomem *base = ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);
|
||||
|
||||
__arm_smmu_tlb_sync(smmu, base + ARM_SMMU_CB_TLBSYNC,
|
||||
base + ARM_SMMU_CB_TLBSTATUS);
|
||||
}
|
||||
|
||||
static void arm_smmu_tlb_inv_context(void *cookie)
|
||||
static void arm_smmu_tlb_sync_vmid(void *cookie)
|
||||
{
|
||||
struct arm_smmu_domain *smmu_domain = cookie;
|
||||
|
||||
arm_smmu_tlb_sync_global(smmu_domain->smmu);
|
||||
}
|
||||
|
||||
static void arm_smmu_tlb_inv_context_s1(void *cookie)
|
||||
{
|
||||
struct arm_smmu_domain *smmu_domain = cookie;
|
||||
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
|
||||
void __iomem *base = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
|
||||
|
||||
writel_relaxed(cfg->asid, base + ARM_SMMU_CB_S1_TLBIASID);
|
||||
arm_smmu_tlb_sync_context(cookie);
|
||||
}
|
||||
|
||||
static void arm_smmu_tlb_inv_context_s2(void *cookie)
|
||||
{
|
||||
struct arm_smmu_domain *smmu_domain = cookie;
|
||||
struct arm_smmu_device *smmu = smmu_domain->smmu;
|
||||
bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
|
||||
void __iomem *base;
|
||||
void __iomem *base = ARM_SMMU_GR0(smmu);
|
||||
|
||||
if (stage1) {
|
||||
base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
|
||||
writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
|
||||
base + ARM_SMMU_CB_S1_TLBIASID);
|
||||
} else {
|
||||
base = ARM_SMMU_GR0(smmu);
|
||||
writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
|
||||
base + ARM_SMMU_GR0_TLBIVMID);
|
||||
}
|
||||
|
||||
__arm_smmu_tlb_sync(smmu);
|
||||
writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
|
||||
arm_smmu_tlb_sync_global(smmu);
|
||||
}
|
||||
|
||||
static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
|
||||
|
@ -619,31 +641,28 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
|
|||
{
|
||||
struct arm_smmu_domain *smmu_domain = cookie;
|
||||
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
|
||||
struct arm_smmu_device *smmu = smmu_domain->smmu;
|
||||
bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
|
||||
void __iomem *reg;
|
||||
void __iomem *reg = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
|
||||
|
||||
if (stage1) {
|
||||
reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
|
||||
reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
|
||||
|
||||
if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
|
||||
iova &= ~12UL;
|
||||
iova |= ARM_SMMU_CB_ASID(smmu, cfg);
|
||||
iova |= cfg->asid;
|
||||
do {
|
||||
writel_relaxed(iova, reg);
|
||||
iova += granule;
|
||||
} while (size -= granule);
|
||||
} else {
|
||||
iova >>= 12;
|
||||
iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
|
||||
iova |= (u64)cfg->asid << 48;
|
||||
do {
|
||||
writeq_relaxed(iova, reg);
|
||||
iova += granule >> 12;
|
||||
} while (size -= granule);
|
||||
}
|
||||
} else if (smmu->version == ARM_SMMU_V2) {
|
||||
reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
|
||||
} else {
|
||||
reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
|
||||
ARM_SMMU_CB_S2_TLBIIPAS2;
|
||||
iova >>= 12;
|
||||
|
@ -651,16 +670,40 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
|
|||
smmu_write_atomic_lq(iova, reg);
|
||||
iova += granule >> 12;
|
||||
} while (size -= granule);
|
||||
} else {
|
||||
reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
|
||||
writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
|
||||
}
|
||||
}
|
||||
|
||||
static const struct iommu_gather_ops arm_smmu_gather_ops = {
|
||||
.tlb_flush_all = arm_smmu_tlb_inv_context,
|
||||
/*
|
||||
* On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears
|
||||
* almost negligible, but the benefit of getting the first one in as far ahead
|
||||
* of the sync as possible is significant, hence we don't just make this a
|
||||
* no-op and set .tlb_sync to arm_smmu_inv_context_s2() as you might think.
|
||||
*/
|
||||
static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size,
|
||||
size_t granule, bool leaf, void *cookie)
|
||||
{
|
||||
struct arm_smmu_domain *smmu_domain = cookie;
|
||||
void __iomem *base = ARM_SMMU_GR0(smmu_domain->smmu);
|
||||
|
||||
writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
|
||||
}
|
||||
|
||||
static const struct iommu_gather_ops arm_smmu_s1_tlb_ops = {
|
||||
.tlb_flush_all = arm_smmu_tlb_inv_context_s1,
|
||||
.tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
|
||||
.tlb_sync = arm_smmu_tlb_sync,
|
||||
.tlb_sync = arm_smmu_tlb_sync_context,
|
||||
};
|
||||
|
||||
static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v2 = {
|
||||
.tlb_flush_all = arm_smmu_tlb_inv_context_s2,
|
||||
.tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
|
||||
.tlb_sync = arm_smmu_tlb_sync_context,
|
||||
};
|
||||
|
||||
static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v1 = {
|
||||
.tlb_flush_all = arm_smmu_tlb_inv_context_s2,
|
||||
.tlb_add_flush = arm_smmu_tlb_inv_vmid_nosync,
|
||||
.tlb_sync = arm_smmu_tlb_sync_vmid,
|
||||
};
|
||||
|
||||
static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
|
||||
|
@ -673,7 +716,7 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
|
|||
struct arm_smmu_device *smmu = smmu_domain->smmu;
|
||||
void __iomem *cb_base;
|
||||
|
||||
cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
|
||||
cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
|
||||
fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
|
||||
|
||||
if (!(fsr & FSR_FAULT))
|
||||
|
@ -726,7 +769,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
|
|||
|
||||
gr1_base = ARM_SMMU_GR1(smmu);
|
||||
stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
|
||||
cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
|
||||
cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
|
||||
|
||||
if (smmu->version > ARM_SMMU_V1) {
|
||||
if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
|
||||
|
@ -735,7 +778,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
|
|||
reg = CBA2R_RW64_32BIT;
|
||||
/* 16-bit VMIDs live in CBA2R */
|
||||
if (smmu->features & ARM_SMMU_FEAT_VMID16)
|
||||
reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
|
||||
reg |= cfg->vmid << CBA2R_VMID_SHIFT;
|
||||
|
||||
writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
|
||||
}
|
||||
|
@ -754,34 +797,15 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
|
|||
(CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
|
||||
} else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
|
||||
/* 8-bit VMIDs live in CBAR */
|
||||
reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT;
|
||||
reg |= cfg->vmid << CBAR_VMID_SHIFT;
|
||||
}
|
||||
writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
|
||||
|
||||
/* TTBRs */
|
||||
if (stage1) {
|
||||
u16 asid = ARM_SMMU_CB_ASID(smmu, cfg);
|
||||
|
||||
if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
|
||||
reg = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
|
||||
writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0);
|
||||
reg = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
|
||||
writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1);
|
||||
writel_relaxed(asid, cb_base + ARM_SMMU_CB_CONTEXTIDR);
|
||||
} else {
|
||||
reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
|
||||
reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
|
||||
writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
|
||||
reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
|
||||
reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
|
||||
writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
|
||||
}
|
||||
} else {
|
||||
reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
|
||||
writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
|
||||
}
|
||||
|
||||
/* TTBCR */
|
||||
/*
|
||||
* TTBCR
|
||||
* We must write this before the TTBRs, since it determines the
|
||||
* access behaviour of some fields (in particular, ASID[15:8]).
|
||||
*/
|
||||
if (stage1) {
|
||||
if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
|
||||
reg = pgtbl_cfg->arm_v7s_cfg.tcr;
|
||||
|
@ -800,6 +824,27 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
|
|||
}
|
||||
writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
|
||||
|
||||
/* TTBRs */
|
||||
if (stage1) {
|
||||
if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
|
||||
reg = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
|
||||
writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0);
|
||||
reg = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
|
||||
writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1);
|
||||
writel_relaxed(cfg->asid, cb_base + ARM_SMMU_CB_CONTEXTIDR);
|
||||
} else {
|
||||
reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
|
||||
reg64 |= (u64)cfg->asid << TTBRn_ASID_SHIFT;
|
||||
writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
|
||||
reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
|
||||
reg64 |= (u64)cfg->asid << TTBRn_ASID_SHIFT;
|
||||
writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
|
||||
}
|
||||
} else {
|
||||
reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
|
||||
writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
|
||||
}
|
||||
|
||||
/* MAIRs (stage-1 only) */
|
||||
if (stage1) {
|
||||
if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
|
||||
|
@ -833,11 +878,18 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
|
|||
enum io_pgtable_fmt fmt;
|
||||
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
|
||||
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
|
||||
const struct iommu_gather_ops *tlb_ops;
|
||||
|
||||
mutex_lock(&smmu_domain->init_mutex);
|
||||
if (smmu_domain->smmu)
|
||||
goto out_unlock;
|
||||
|
||||
if (domain->type == IOMMU_DOMAIN_IDENTITY) {
|
||||
smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
|
||||
smmu_domain->smmu = smmu;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
/*
|
||||
* Mapping the requested stage onto what we support is surprisingly
|
||||
* complicated, mainly because the spec allows S1+S2 SMMUs without
|
||||
|
@ -904,6 +956,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
|
|||
ias = min(ias, 32UL);
|
||||
oas = min(oas, 32UL);
|
||||
}
|
||||
tlb_ops = &arm_smmu_s1_tlb_ops;
|
||||
break;
|
||||
case ARM_SMMU_DOMAIN_NESTED:
|
||||
/*
|
||||
|
@ -922,12 +975,15 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
|
|||
ias = min(ias, 40UL);
|
||||
oas = min(oas, 40UL);
|
||||
}
|
||||
if (smmu->version == ARM_SMMU_V2)
|
||||
tlb_ops = &arm_smmu_s2_tlb_ops_v2;
|
||||
else
|
||||
tlb_ops = &arm_smmu_s2_tlb_ops_v1;
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
|
||||
smmu->num_context_banks);
|
||||
if (ret < 0)
|
||||
|
@ -941,11 +997,16 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
|
|||
cfg->irptndx = cfg->cbndx;
|
||||
}
|
||||
|
||||
if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2)
|
||||
cfg->vmid = cfg->cbndx + 1 + smmu->cavium_id_base;
|
||||
else
|
||||
cfg->asid = cfg->cbndx + smmu->cavium_id_base;
|
||||
|
||||
pgtbl_cfg = (struct io_pgtable_cfg) {
|
||||
.pgsize_bitmap = smmu->pgsize_bitmap,
|
||||
.ias = ias,
|
||||
.oas = oas,
|
||||
.tlb = &arm_smmu_gather_ops,
|
||||
.tlb = tlb_ops,
|
||||
.iommu_dev = smmu->dev,
|
||||
};
|
||||
|
||||
|
@ -998,14 +1059,14 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
|
|||
void __iomem *cb_base;
|
||||
int irq;
|
||||
|
||||
if (!smmu)
|
||||
if (!smmu || domain->type == IOMMU_DOMAIN_IDENTITY)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Disable the context bank and free the page tables before freeing
|
||||
* it.
|
||||
*/
|
||||
cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
|
||||
cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
|
||||
writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
|
||||
|
||||
if (cfg->irptndx != INVALID_IRPTNDX) {
|
||||
|
@ -1021,7 +1082,9 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
|
|||
{
|
||||
struct arm_smmu_domain *smmu_domain;
|
||||
|
||||
if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
|
||||
if (type != IOMMU_DOMAIN_UNMANAGED &&
|
||||
type != IOMMU_DOMAIN_DMA &&
|
||||
type != IOMMU_DOMAIN_IDENTITY)
|
||||
return NULL;
|
||||
/*
|
||||
* Allocate the domain and initialise some of its data structures.
|
||||
|
@ -1250,10 +1313,15 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
|
|||
{
|
||||
struct arm_smmu_device *smmu = smmu_domain->smmu;
|
||||
struct arm_smmu_s2cr *s2cr = smmu->s2crs;
|
||||
enum arm_smmu_s2cr_type type = S2CR_TYPE_TRANS;
|
||||
u8 cbndx = smmu_domain->cfg.cbndx;
|
||||
enum arm_smmu_s2cr_type type;
|
||||
int i, idx;
|
||||
|
||||
if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS)
|
||||
type = S2CR_TYPE_BYPASS;
|
||||
else
|
||||
type = S2CR_TYPE_TRANS;
|
||||
|
||||
for_each_cfg_sme(fwspec, i, idx) {
|
||||
if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
|
||||
continue;
|
||||
|
@ -1356,7 +1424,7 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
|
|||
u64 phys;
|
||||
unsigned long va;
|
||||
|
||||
cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
|
||||
cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
|
||||
|
||||
/* ATS1 registers can only be written atomically */
|
||||
va = iova & ~0xfffUL;
|
||||
|
@ -1549,6 +1617,9 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
|
|||
{
|
||||
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
|
||||
|
||||
if (domain->type != IOMMU_DOMAIN_UNMANAGED)
|
||||
return -EINVAL;
|
||||
|
||||
switch (attr) {
|
||||
case DOMAIN_ATTR_NESTING:
|
||||
*(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
|
||||
|
@ -1564,6 +1635,9 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
|
|||
int ret = 0;
|
||||
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
|
||||
|
||||
if (domain->type != IOMMU_DOMAIN_UNMANAGED)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&smmu_domain->init_mutex);
|
||||
|
||||
switch (attr) {
|
||||
|
@ -1590,13 +1664,15 @@ out_unlock:
|
|||
|
||||
static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
|
||||
{
|
||||
u32 fwid = 0;
|
||||
u32 mask, fwid = 0;
|
||||
|
||||
if (args->args_count > 0)
|
||||
fwid |= (u16)args->args[0];
|
||||
|
||||
if (args->args_count > 1)
|
||||
fwid |= (u16)args->args[1] << SMR_MASK_SHIFT;
|
||||
else if (!of_property_read_u32(args->np, "stream-match-mask", &mask))
|
||||
fwid |= (u16)mask << SMR_MASK_SHIFT;
|
||||
|
||||
return iommu_fwspec_add_ids(dev, &fwid, 1);
|
||||
}
|
||||
|
@ -1683,7 +1759,7 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
|
|||
|
||||
/* Make sure all context banks are disabled and clear CB_FSR */
|
||||
for (i = 0; i < smmu->num_context_banks; ++i) {
|
||||
cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
|
||||
cb_base = ARM_SMMU_CB(smmu, i);
|
||||
writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
|
||||
writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
|
||||
/*
|
||||
|
@ -1729,7 +1805,7 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
|
|||
reg |= sCR0_EXIDENABLE;
|
||||
|
||||
/* Push the button */
|
||||
__arm_smmu_tlb_sync(smmu);
|
||||
arm_smmu_tlb_sync_global(smmu);
|
||||
writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
|
||||
}
|
||||
|
||||
|
@ -1863,11 +1939,11 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
|
|||
|
||||
/* Check for size mismatch of SMMU address space from mapped region */
|
||||
size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
|
||||
size *= 2 << smmu->pgshift;
|
||||
if (smmu->size != size)
|
||||
size <<= smmu->pgshift;
|
||||
if (smmu->cb_base != gr0_base + size)
|
||||
dev_warn(smmu->dev,
|
||||
"SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
|
||||
size, smmu->size);
|
||||
"SMMU address space size (0x%lx) differs from mapped region size (0x%tx)!\n",
|
||||
size * 2, (smmu->cb_base - gr0_base) * 2);
|
||||
|
||||
smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
|
||||
smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
|
||||
|
@ -1887,6 +1963,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
|
|||
atomic_add_return(smmu->num_context_banks,
|
||||
&cavium_smmu_context_count);
|
||||
smmu->cavium_id_base -= smmu->num_context_banks;
|
||||
dev_notice(smmu->dev, "\tenabling workaround for Cavium erratum 27704\n");
|
||||
}
|
||||
|
||||
/* ID2 */
|
||||
|
@ -2103,7 +2180,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
|
|||
smmu->base = devm_ioremap_resource(dev, res);
|
||||
if (IS_ERR(smmu->base))
|
||||
return PTR_ERR(smmu->base);
|
||||
smmu->size = resource_size(res);
|
||||
smmu->cb_base = smmu->base + resource_size(res) / 2;
|
||||
|
||||
num_irqs = 0;
|
||||
while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
|
||||
|
|
|
@ -74,7 +74,7 @@
|
|||
|
||||
/* Calculate the block/page mapping size at level l for pagetable in d. */
|
||||
#define ARM_LPAE_BLOCK_SIZE(l,d) \
|
||||
(1 << (ilog2(sizeof(arm_lpae_iopte)) + \
|
||||
(1ULL << (ilog2(sizeof(arm_lpae_iopte)) + \
|
||||
((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level)))
|
||||
|
||||
/* Page table bits */
|
||||
|
|
|
@ -36,6 +36,7 @@
|
|||
|
||||
static struct kset *iommu_group_kset;
|
||||
static DEFINE_IDA(iommu_group_ida);
|
||||
static unsigned int iommu_def_domain_type = IOMMU_DOMAIN_DMA;
|
||||
|
||||
struct iommu_callback_data {
|
||||
const struct iommu_ops *ops;
|
||||
|
@ -112,6 +113,18 @@ static int __iommu_attach_group(struct iommu_domain *domain,
|
|||
static void __iommu_detach_group(struct iommu_domain *domain,
|
||||
struct iommu_group *group);
|
||||
|
||||
static int __init iommu_set_def_domain_type(char *str)
|
||||
{
|
||||
bool pt;
|
||||
|
||||
if (!str || strtobool(str, &pt))
|
||||
return -EINVAL;
|
||||
|
||||
iommu_def_domain_type = pt ? IOMMU_DOMAIN_IDENTITY : IOMMU_DOMAIN_DMA;
|
||||
return 0;
|
||||
}
|
||||
early_param("iommu.passthrough", iommu_set_def_domain_type);
|
||||
|
||||
static ssize_t iommu_group_attr_show(struct kobject *kobj,
|
||||
struct attribute *__attr, char *buf)
|
||||
{
|
||||
|
@ -1015,10 +1028,19 @@ struct iommu_group *iommu_group_get_for_dev(struct device *dev)
|
|||
* IOMMU driver.
|
||||
*/
|
||||
if (!group->default_domain) {
|
||||
group->default_domain = __iommu_domain_alloc(dev->bus,
|
||||
IOMMU_DOMAIN_DMA);
|
||||
struct iommu_domain *dom;
|
||||
|
||||
dom = __iommu_domain_alloc(dev->bus, iommu_def_domain_type);
|
||||
if (!dom && iommu_def_domain_type != IOMMU_DOMAIN_DMA) {
|
||||
dev_warn(dev,
|
||||
"failed to allocate default IOMMU domain of type %u; falling back to IOMMU_DOMAIN_DMA",
|
||||
iommu_def_domain_type);
|
||||
dom = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_DMA);
|
||||
}
|
||||
|
||||
group->default_domain = dom;
|
||||
if (!group->domain)
|
||||
group->domain = group->default_domain;
|
||||
group->domain = dom;
|
||||
}
|
||||
|
||||
ret = iommu_group_add_device(group, dev);
|
||||
|
|
|
@ -32,10 +32,13 @@
|
|||
#define IOMMU_NOEXEC (1 << 3)
|
||||
#define IOMMU_MMIO (1 << 4) /* e.g. things like MSI doorbells */
|
||||
/*
|
||||
* This is to make the IOMMU API setup privileged
|
||||
* mapppings accessible by the master only at higher
|
||||
* privileged execution level and inaccessible at
|
||||
* less privileged levels.
|
||||
* Where the bus hardware includes a privilege level as part of its access type
|
||||
* markings, and certain devices are capable of issuing transactions marked as
|
||||
* either 'supervisor' or 'user', the IOMMU_PRIV flag requests that the other
|
||||
* given permission flags only apply to accesses at the higher privilege level,
|
||||
* and that unprivileged transactions should have as little access as possible.
|
||||
* This would usually imply the same permissions as kernel mappings on the CPU,
|
||||
* if the IOMMU page table format is equivalent.
|
||||
*/
|
||||
#define IOMMU_PRIV (1 << 5)
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче