iommu/arm-smmu: Ensure that page-table updates are visible before TLBI
The IO-pgtable code relies on the driver TLB invalidation callbacks to
ensure that all page-table updates are visible to the IOMMU page-table
walker.
In the case that the page-table walker is cache-coherent, we cannot rely
on an implicit DSB from the DMA-mapping code, so we must ensure that we
execute a DSB in our tlb_add_flush() callback prior to triggering the
invalidation.
Cc: <stable@vger.kernel.org>
Cc: Robin Murphy <robin.murphy@arm.com>
Fixes: 2df7a25ce4
("iommu/arm-smmu: Clean up DMA API usage")
Signed-off-by: Will Deacon <will.deacon@arm.com>
This commit is contained in:
Родитель
07fdef34d2
Коммит
7d321bd354
|
@ -469,6 +469,9 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
|
|||
bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
|
||||
void __iomem *reg = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
|
||||
|
||||
if (smmu_domain->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
|
||||
wmb();
|
||||
|
||||
if (stage1) {
|
||||
reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
|
||||
|
||||
|
@ -510,6 +513,9 @@ static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size,
|
|||
struct arm_smmu_domain *smmu_domain = cookie;
|
||||
void __iomem *base = ARM_SMMU_GR0(smmu_domain->smmu);
|
||||
|
||||
if (smmu_domain->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
|
||||
wmb();
|
||||
|
||||
writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
|
||||
}
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче