iommu/arm-smmu: Remove .tlb_inv_range indirection
Fill in 'native' iommu_flush_ops callbacks for all the arm_smmu_flush_ops variants, and clear up the remains of the previous .tlb_inv_range abstraction. Signed-off-by: Robin Murphy <robin.murphy@arm.com> Signed-off-by: Will Deacon <will@kernel.org>
This commit is contained in:
Родитель
54ecb8f702
Коммит
3f3b8d0c9c
|
@ -312,7 +312,7 @@ static void arm_smmu_tlb_inv_context_s2(void *cookie)
|
||||||
}
|
}
|
||||||
|
|
||||||
static void arm_smmu_tlb_inv_range_s1(unsigned long iova, size_t size,
|
static void arm_smmu_tlb_inv_range_s1(unsigned long iova, size_t size,
|
||||||
size_t granule, bool leaf, void *cookie)
|
size_t granule, void *cookie, bool leaf)
|
||||||
{
|
{
|
||||||
struct arm_smmu_domain *smmu_domain = cookie;
|
struct arm_smmu_domain *smmu_domain = cookie;
|
||||||
struct arm_smmu_device *smmu = smmu_domain->smmu;
|
struct arm_smmu_device *smmu = smmu_domain->smmu;
|
||||||
|
@ -342,7 +342,7 @@ static void arm_smmu_tlb_inv_range_s1(unsigned long iova, size_t size,
|
||||||
}
|
}
|
||||||
|
|
||||||
static void arm_smmu_tlb_inv_range_s2(unsigned long iova, size_t size,
|
static void arm_smmu_tlb_inv_range_s2(unsigned long iova, size_t size,
|
||||||
size_t granule, bool leaf, void *cookie)
|
size_t granule, void *cookie, bool leaf)
|
||||||
{
|
{
|
||||||
struct arm_smmu_domain *smmu_domain = cookie;
|
struct arm_smmu_domain *smmu_domain = cookie;
|
||||||
struct arm_smmu_device *smmu = smmu_domain->smmu;
|
struct arm_smmu_device *smmu = smmu_domain->smmu;
|
||||||
|
@ -362,14 +362,63 @@ static void arm_smmu_tlb_inv_range_s2(unsigned long iova, size_t size,
|
||||||
} while (size -= granule);
|
} while (size -= granule);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void arm_smmu_tlb_inv_walk_s1(unsigned long iova, size_t size,
|
||||||
|
size_t granule, void *cookie)
|
||||||
|
{
|
||||||
|
arm_smmu_tlb_inv_range_s1(iova, size, granule, cookie, false);
|
||||||
|
arm_smmu_tlb_sync_context(cookie);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void arm_smmu_tlb_inv_leaf_s1(unsigned long iova, size_t size,
|
||||||
|
size_t granule, void *cookie)
|
||||||
|
{
|
||||||
|
arm_smmu_tlb_inv_range_s1(iova, size, granule, cookie, true);
|
||||||
|
arm_smmu_tlb_sync_context(cookie);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void arm_smmu_tlb_add_page_s1(struct iommu_iotlb_gather *gather,
|
||||||
|
unsigned long iova, size_t granule,
|
||||||
|
void *cookie)
|
||||||
|
{
|
||||||
|
arm_smmu_tlb_inv_range_s1(iova, granule, granule, cookie, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void arm_smmu_tlb_inv_walk_s2(unsigned long iova, size_t size,
|
||||||
|
size_t granule, void *cookie)
|
||||||
|
{
|
||||||
|
arm_smmu_tlb_inv_range_s2(iova, size, granule, cookie, false);
|
||||||
|
arm_smmu_tlb_sync_context(cookie);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void arm_smmu_tlb_inv_leaf_s2(unsigned long iova, size_t size,
|
||||||
|
size_t granule, void *cookie)
|
||||||
|
{
|
||||||
|
arm_smmu_tlb_inv_range_s2(iova, size, granule, cookie, true);
|
||||||
|
arm_smmu_tlb_sync_context(cookie);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void arm_smmu_tlb_add_page_s2(struct iommu_iotlb_gather *gather,
|
||||||
|
unsigned long iova, size_t granule,
|
||||||
|
void *cookie)
|
||||||
|
{
|
||||||
|
arm_smmu_tlb_inv_range_s2(iova, granule, granule, cookie, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void arm_smmu_tlb_inv_any_s2_v1(unsigned long iova, size_t size,
|
||||||
|
size_t granule, void *cookie)
|
||||||
|
{
|
||||||
|
arm_smmu_tlb_inv_context_s2(cookie);
|
||||||
|
}
|
||||||
/*
|
/*
|
||||||
* On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears
|
* On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears
|
||||||
* almost negligible, but the benefit of getting the first one in as far ahead
|
* almost negligible, but the benefit of getting the first one in as far ahead
|
||||||
* of the sync as possible is significant, hence we don't just make this a
|
* of the sync as possible is significant, hence we don't just make this a
|
||||||
* no-op and set .tlb_sync to arm_smmu_tlb_inv_context_s2() as you might think.
|
* no-op and call arm_smmu_tlb_inv_context_s2() from .iotlb_sync as you might
|
||||||
|
* think.
|
||||||
*/
|
*/
|
||||||
static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size,
|
static void arm_smmu_tlb_add_page_s2_v1(struct iommu_iotlb_gather *gather,
|
||||||
size_t granule, bool leaf, void *cookie)
|
unsigned long iova, size_t granule,
|
||||||
|
void *cookie)
|
||||||
{
|
{
|
||||||
struct arm_smmu_domain *smmu_domain = cookie;
|
struct arm_smmu_domain *smmu_domain = cookie;
|
||||||
struct arm_smmu_device *smmu = smmu_domain->smmu;
|
struct arm_smmu_device *smmu = smmu_domain->smmu;
|
||||||
|
@ -380,66 +429,33 @@ static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size,
|
||||||
arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
|
arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void arm_smmu_tlb_inv_walk(unsigned long iova, size_t size,
|
|
||||||
size_t granule, void *cookie)
|
|
||||||
{
|
|
||||||
struct arm_smmu_domain *smmu_domain = cookie;
|
|
||||||
const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops;
|
|
||||||
|
|
||||||
ops->tlb_inv_range(iova, size, granule, false, cookie);
|
|
||||||
ops->tlb_sync(cookie);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void arm_smmu_tlb_inv_leaf(unsigned long iova, size_t size,
|
|
||||||
size_t granule, void *cookie)
|
|
||||||
{
|
|
||||||
struct arm_smmu_domain *smmu_domain = cookie;
|
|
||||||
const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops;
|
|
||||||
|
|
||||||
ops->tlb_inv_range(iova, size, granule, true, cookie);
|
|
||||||
ops->tlb_sync(cookie);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void arm_smmu_tlb_add_page(struct iommu_iotlb_gather *gather,
|
|
||||||
unsigned long iova, size_t granule,
|
|
||||||
void *cookie)
|
|
||||||
{
|
|
||||||
struct arm_smmu_domain *smmu_domain = cookie;
|
|
||||||
const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops;
|
|
||||||
|
|
||||||
ops->tlb_inv_range(iova, granule, granule, true, cookie);
|
|
||||||
}
|
|
||||||
|
|
||||||
static const struct arm_smmu_flush_ops arm_smmu_s1_tlb_ops = {
|
static const struct arm_smmu_flush_ops arm_smmu_s1_tlb_ops = {
|
||||||
.tlb = {
|
.tlb = {
|
||||||
.tlb_flush_all = arm_smmu_tlb_inv_context_s1,
|
.tlb_flush_all = arm_smmu_tlb_inv_context_s1,
|
||||||
.tlb_flush_walk = arm_smmu_tlb_inv_walk,
|
.tlb_flush_walk = arm_smmu_tlb_inv_walk_s1,
|
||||||
.tlb_flush_leaf = arm_smmu_tlb_inv_leaf,
|
.tlb_flush_leaf = arm_smmu_tlb_inv_leaf_s1,
|
||||||
.tlb_add_page = arm_smmu_tlb_add_page,
|
.tlb_add_page = arm_smmu_tlb_add_page_s1,
|
||||||
},
|
},
|
||||||
.tlb_inv_range = arm_smmu_tlb_inv_range_s1,
|
|
||||||
.tlb_sync = arm_smmu_tlb_sync_context,
|
.tlb_sync = arm_smmu_tlb_sync_context,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v2 = {
|
static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v2 = {
|
||||||
.tlb = {
|
.tlb = {
|
||||||
.tlb_flush_all = arm_smmu_tlb_inv_context_s2,
|
.tlb_flush_all = arm_smmu_tlb_inv_context_s2,
|
||||||
.tlb_flush_walk = arm_smmu_tlb_inv_walk,
|
.tlb_flush_walk = arm_smmu_tlb_inv_walk_s2,
|
||||||
.tlb_flush_leaf = arm_smmu_tlb_inv_leaf,
|
.tlb_flush_leaf = arm_smmu_tlb_inv_leaf_s2,
|
||||||
.tlb_add_page = arm_smmu_tlb_add_page,
|
.tlb_add_page = arm_smmu_tlb_add_page_s2,
|
||||||
},
|
},
|
||||||
.tlb_inv_range = arm_smmu_tlb_inv_range_s2,
|
|
||||||
.tlb_sync = arm_smmu_tlb_sync_context,
|
.tlb_sync = arm_smmu_tlb_sync_context,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v1 = {
|
static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v1 = {
|
||||||
.tlb = {
|
.tlb = {
|
||||||
.tlb_flush_all = arm_smmu_tlb_inv_context_s2,
|
.tlb_flush_all = arm_smmu_tlb_inv_context_s2,
|
||||||
.tlb_flush_walk = arm_smmu_tlb_inv_walk,
|
.tlb_flush_walk = arm_smmu_tlb_inv_any_s2_v1,
|
||||||
.tlb_flush_leaf = arm_smmu_tlb_inv_leaf,
|
.tlb_flush_leaf = arm_smmu_tlb_inv_any_s2_v1,
|
||||||
.tlb_add_page = arm_smmu_tlb_add_page,
|
.tlb_add_page = arm_smmu_tlb_add_page_s2_v1,
|
||||||
},
|
},
|
||||||
.tlb_inv_range = arm_smmu_tlb_inv_vmid_nosync,
|
|
||||||
.tlb_sync = arm_smmu_tlb_sync_vmid,
|
.tlb_sync = arm_smmu_tlb_sync_vmid,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -306,8 +306,6 @@ enum arm_smmu_domain_stage {
|
||||||
|
|
||||||
struct arm_smmu_flush_ops {
|
struct arm_smmu_flush_ops {
|
||||||
struct iommu_flush_ops tlb;
|
struct iommu_flush_ops tlb;
|
||||||
void (*tlb_inv_range)(unsigned long iova, size_t size, size_t granule,
|
|
||||||
bool leaf, void *cookie);
|
|
||||||
void (*tlb_sync)(void *cookie);
|
void (*tlb_sync)(void *cookie);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
Загрузка…
Ссылка в новой задаче