iommu/io-pgtable-arm: Call ->tlb_flush_walk() and ->tlb_flush_leaf()
Now that all IOMMU drivers using the io-pgtable API implement the ->tlb_flush_walk() and ->tlb_flush_leaf() callbacks, we can use them in the io-pgtable code instead of ->tlb_add_flush() immediately followed by ->tlb_sync(). Signed-off-by: Will Deacon <will@kernel.org>
This commit is contained in:
Родитель
05aed9412b
Коммит
10b7a7d912
|
@ -493,9 +493,8 @@ static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova,
|
||||||
* a chance for anything to kick off a table walk for the new iova.
|
* a chance for anything to kick off a table walk for the new iova.
|
||||||
*/
|
*/
|
||||||
if (iop->cfg.quirks & IO_PGTABLE_QUIRK_TLBI_ON_MAP) {
|
if (iop->cfg.quirks & IO_PGTABLE_QUIRK_TLBI_ON_MAP) {
|
||||||
io_pgtable_tlb_add_flush(iop, iova, size,
|
io_pgtable_tlb_flush_walk(iop, iova, size,
|
||||||
ARM_V7S_BLOCK_SIZE(2), false);
|
ARM_V7S_BLOCK_SIZE(2));
|
||||||
io_pgtable_tlb_sync(iop);
|
|
||||||
} else {
|
} else {
|
||||||
wmb();
|
wmb();
|
||||||
}
|
}
|
||||||
|
@ -541,8 +540,7 @@ static arm_v7s_iopte arm_v7s_split_cont(struct arm_v7s_io_pgtable *data,
|
||||||
__arm_v7s_pte_sync(ptep, ARM_V7S_CONT_PAGES, &iop->cfg);
|
__arm_v7s_pte_sync(ptep, ARM_V7S_CONT_PAGES, &iop->cfg);
|
||||||
|
|
||||||
size *= ARM_V7S_CONT_PAGES;
|
size *= ARM_V7S_CONT_PAGES;
|
||||||
io_pgtable_tlb_add_flush(iop, iova, size, size, true);
|
io_pgtable_tlb_flush_leaf(iop, iova, size, size);
|
||||||
io_pgtable_tlb_sync(iop);
|
|
||||||
return pte;
|
return pte;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -637,9 +635,8 @@ static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *data,
|
||||||
for (i = 0; i < num_entries; i++) {
|
for (i = 0; i < num_entries; i++) {
|
||||||
if (ARM_V7S_PTE_IS_TABLE(pte[i], lvl)) {
|
if (ARM_V7S_PTE_IS_TABLE(pte[i], lvl)) {
|
||||||
/* Also flush any partial walks */
|
/* Also flush any partial walks */
|
||||||
io_pgtable_tlb_add_flush(iop, iova, blk_size,
|
io_pgtable_tlb_flush_walk(iop, iova, blk_size,
|
||||||
ARM_V7S_BLOCK_SIZE(lvl + 1), false);
|
ARM_V7S_BLOCK_SIZE(lvl + 1));
|
||||||
io_pgtable_tlb_sync(iop);
|
|
||||||
ptep = iopte_deref(pte[i], lvl);
|
ptep = iopte_deref(pte[i], lvl);
|
||||||
__arm_v7s_free_table(ptep, lvl + 1, data);
|
__arm_v7s_free_table(ptep, lvl + 1, data);
|
||||||
} else if (iop->cfg.quirks & IO_PGTABLE_QUIRK_NON_STRICT) {
|
} else if (iop->cfg.quirks & IO_PGTABLE_QUIRK_NON_STRICT) {
|
||||||
|
@ -805,13 +802,19 @@ static void dummy_tlb_flush_all(void *cookie)
|
||||||
WARN_ON(cookie != cfg_cookie);
|
WARN_ON(cookie != cfg_cookie);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dummy_tlb_add_flush(unsigned long iova, size_t size,
|
static void dummy_tlb_flush(unsigned long iova, size_t size, size_t granule,
|
||||||
size_t granule, bool leaf, void *cookie)
|
void *cookie)
|
||||||
{
|
{
|
||||||
WARN_ON(cookie != cfg_cookie);
|
WARN_ON(cookie != cfg_cookie);
|
||||||
WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
|
WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void dummy_tlb_add_flush(unsigned long iova, size_t size,
|
||||||
|
size_t granule, bool leaf, void *cookie)
|
||||||
|
{
|
||||||
|
dummy_tlb_flush(iova, size, granule, cookie);
|
||||||
|
}
|
||||||
|
|
||||||
static void dummy_tlb_sync(void *cookie)
|
static void dummy_tlb_sync(void *cookie)
|
||||||
{
|
{
|
||||||
WARN_ON(cookie != cfg_cookie);
|
WARN_ON(cookie != cfg_cookie);
|
||||||
|
@ -819,6 +822,8 @@ static void dummy_tlb_sync(void *cookie)
|
||||||
|
|
||||||
static const struct iommu_flush_ops dummy_tlb_ops = {
|
static const struct iommu_flush_ops dummy_tlb_ops = {
|
||||||
.tlb_flush_all = dummy_tlb_flush_all,
|
.tlb_flush_all = dummy_tlb_flush_all,
|
||||||
|
.tlb_flush_walk = dummy_tlb_flush,
|
||||||
|
.tlb_flush_leaf = dummy_tlb_flush,
|
||||||
.tlb_add_flush = dummy_tlb_add_flush,
|
.tlb_add_flush = dummy_tlb_add_flush,
|
||||||
.tlb_sync = dummy_tlb_sync,
|
.tlb_sync = dummy_tlb_sync,
|
||||||
};
|
};
|
||||||
|
|
|
@ -611,9 +611,8 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
|
||||||
|
|
||||||
if (!iopte_leaf(pte, lvl, iop->fmt)) {
|
if (!iopte_leaf(pte, lvl, iop->fmt)) {
|
||||||
/* Also flush any partial walks */
|
/* Also flush any partial walks */
|
||||||
io_pgtable_tlb_add_flush(iop, iova, size,
|
io_pgtable_tlb_flush_walk(iop, iova, size,
|
||||||
ARM_LPAE_GRANULE(data), false);
|
ARM_LPAE_GRANULE(data));
|
||||||
io_pgtable_tlb_sync(iop);
|
|
||||||
ptep = iopte_deref(pte, data);
|
ptep = iopte_deref(pte, data);
|
||||||
__arm_lpae_free_pgtable(data, lvl + 1, ptep);
|
__arm_lpae_free_pgtable(data, lvl + 1, ptep);
|
||||||
} else if (iop->cfg.quirks & IO_PGTABLE_QUIRK_NON_STRICT) {
|
} else if (iop->cfg.quirks & IO_PGTABLE_QUIRK_NON_STRICT) {
|
||||||
|
@ -1069,13 +1068,19 @@ static void dummy_tlb_flush_all(void *cookie)
|
||||||
WARN_ON(cookie != cfg_cookie);
|
WARN_ON(cookie != cfg_cookie);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dummy_tlb_add_flush(unsigned long iova, size_t size,
|
static void dummy_tlb_flush(unsigned long iova, size_t size, size_t granule,
|
||||||
size_t granule, bool leaf, void *cookie)
|
void *cookie)
|
||||||
{
|
{
|
||||||
WARN_ON(cookie != cfg_cookie);
|
WARN_ON(cookie != cfg_cookie);
|
||||||
WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
|
WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void dummy_tlb_add_flush(unsigned long iova, size_t size,
|
||||||
|
size_t granule, bool leaf, void *cookie)
|
||||||
|
{
|
||||||
|
dummy_tlb_flush(iova, size, granule, cookie);
|
||||||
|
}
|
||||||
|
|
||||||
static void dummy_tlb_sync(void *cookie)
|
static void dummy_tlb_sync(void *cookie)
|
||||||
{
|
{
|
||||||
WARN_ON(cookie != cfg_cookie);
|
WARN_ON(cookie != cfg_cookie);
|
||||||
|
@ -1083,6 +1088,8 @@ static void dummy_tlb_sync(void *cookie)
|
||||||
|
|
||||||
static const struct iommu_flush_ops dummy_tlb_ops __initconst = {
|
static const struct iommu_flush_ops dummy_tlb_ops __initconst = {
|
||||||
.tlb_flush_all = dummy_tlb_flush_all,
|
.tlb_flush_all = dummy_tlb_flush_all,
|
||||||
|
.tlb_flush_walk = dummy_tlb_flush,
|
||||||
|
.tlb_flush_leaf = dummy_tlb_flush,
|
||||||
.tlb_add_flush = dummy_tlb_add_flush,
|
.tlb_add_flush = dummy_tlb_add_flush,
|
||||||
.tlb_sync = dummy_tlb_sync,
|
.tlb_sync = dummy_tlb_sync,
|
||||||
};
|
};
|
||||||
|
|
|
@ -198,6 +198,20 @@ static inline void io_pgtable_tlb_flush_all(struct io_pgtable *iop)
|
||||||
iop->cfg.tlb->tlb_flush_all(iop->cookie);
|
iop->cfg.tlb->tlb_flush_all(iop->cookie);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
io_pgtable_tlb_flush_walk(struct io_pgtable *iop, unsigned long iova,
|
||||||
|
size_t size, size_t granule)
|
||||||
|
{
|
||||||
|
iop->cfg.tlb->tlb_flush_walk(iova, size, granule, iop->cookie);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
io_pgtable_tlb_flush_leaf(struct io_pgtable *iop, unsigned long iova,
|
||||||
|
size_t size, size_t granule)
|
||||||
|
{
|
||||||
|
iop->cfg.tlb->tlb_flush_leaf(iova, size, granule, iop->cookie);
|
||||||
|
}
|
||||||
|
|
||||||
static inline void io_pgtable_tlb_add_flush(struct io_pgtable *iop,
|
static inline void io_pgtable_tlb_add_flush(struct io_pgtable *iop,
|
||||||
unsigned long iova, size_t size, size_t granule, bool leaf)
|
unsigned long iova, size_t size, size_t granule, bool leaf)
|
||||||
{
|
{
|
||||||
|
|
Загрузка…
Ссылка в новой задаче