iommu/arm-smmu: Fix ATS1* register writes
The ATS1* address translation registers only support being written atomically - in SMMUv2 where they are 64 bits wide, 32-bit writes to the lower half are automatically zero-extended, whilst 32-bit writes to the upper half are ignored. Thus, the current logic of performing 64-bit writes as two 32-bit accesses is wrong. Since we already limit IOVAs to 32 bits on 32-bit ARM, the lack of a suitable writeq() implementation there is not an issue, and we only need a little preprocessor ugliness to safely hide the 64-bit case. Signed-off-by: Robin Murphy <robin.murphy@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
Родитель
ba155e2d21
Коммит
661d962f19
|
@ -202,8 +202,7 @@
|
|||
#define ARM_SMMU_CB_S1_TLBIVAL 0x620
|
||||
#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
|
||||
#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
|
||||
#define ARM_SMMU_CB_ATS1PR_LO 0x800
|
||||
#define ARM_SMMU_CB_ATS1PR_HI 0x804
|
||||
#define ARM_SMMU_CB_ATS1PR 0x800
|
||||
#define ARM_SMMU_CB_ATSR 0x8f0
|
||||
|
||||
#define SCTLR_S1_ASIDPNE (1 << 12)
|
||||
|
@ -1229,18 +1228,18 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
|
|||
void __iomem *cb_base;
|
||||
u32 tmp;
|
||||
u64 phys;
|
||||
unsigned long va;
|
||||
|
||||
cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
|
||||
|
||||
if (smmu->version == 1) {
|
||||
u32 reg = iova & ~0xfff;
|
||||
writel_relaxed(reg, cb_base + ARM_SMMU_CB_ATS1PR_LO);
|
||||
} else {
|
||||
u32 reg = iova & ~0xfff;
|
||||
writel_relaxed(reg, cb_base + ARM_SMMU_CB_ATS1PR_LO);
|
||||
reg = ((u64)iova & ~0xfff) >> 32;
|
||||
writel_relaxed(reg, cb_base + ARM_SMMU_CB_ATS1PR_HI);
|
||||
}
|
||||
/* ATS1 registers can only be written atomically */
|
||||
va = iova & ~0xfffUL;
|
||||
#ifdef CONFIG_64BIT
|
||||
if (smmu->version == ARM_SMMU_V2)
|
||||
writeq_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
|
||||
else
|
||||
#endif
|
||||
writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
|
||||
|
||||
if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
|
||||
!(tmp & ATSR_ACTIVE), 5, 50)) {
|
||||
|
|
Загрузка…
Ссылка в новой задаче