RDMA/odp: Iterate over the whole rbtree directly

Instead of intersecting a full interval, just iterate over every element
directly. This is faster and clearer.

Link: https://lore.kernel.org/r/20190819111710.18440-3-leon@kernel.org
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
Jason Gunthorpe 2019-08-19 14:17:00 +03:00
Родитель 7cc2e18f21
Коммит f993de88a5
2 изменённых файлов: 41 добавлений и 42 удалений

Просмотреть файл

@ -72,31 +72,34 @@ static void ib_umem_notifier_end_account(struct ib_umem_odp *umem_odp)
mutex_unlock(&umem_odp->umem_mutex); mutex_unlock(&umem_odp->umem_mutex);
} }
static int ib_umem_notifier_release_trampoline(struct ib_umem_odp *umem_odp,
u64 start, u64 end, void *cookie)
{
/*
* Increase the number of notifiers running, to
* prevent any further fault handling on this MR.
*/
ib_umem_notifier_start_account(umem_odp);
complete_all(&umem_odp->notifier_completion);
umem_odp->umem.context->invalidate_range(
umem_odp, ib_umem_start(umem_odp), ib_umem_end(umem_odp));
return 0;
}
static void ib_umem_notifier_release(struct mmu_notifier *mn, static void ib_umem_notifier_release(struct mmu_notifier *mn,
struct mm_struct *mm) struct mm_struct *mm)
{ {
struct ib_ucontext_per_mm *per_mm = struct ib_ucontext_per_mm *per_mm =
container_of(mn, struct ib_ucontext_per_mm, mn); container_of(mn, struct ib_ucontext_per_mm, mn);
struct rb_node *node;
down_read(&per_mm->umem_rwsem); down_read(&per_mm->umem_rwsem);
if (per_mm->active) if (!per_mm->active)
rbt_ib_umem_for_each_in_range( goto out;
&per_mm->umem_tree, 0, ULLONG_MAX,
ib_umem_notifier_release_trampoline, true, NULL); for (node = rb_first_cached(&per_mm->umem_tree); node;
node = rb_next(node)) {
struct ib_umem_odp *umem_odp =
rb_entry(node, struct ib_umem_odp, interval_tree.rb);
/*
* Increase the number of notifiers running, to prevent any
* further fault handling on this MR.
*/
ib_umem_notifier_start_account(umem_odp);
complete_all(&umem_odp->notifier_completion);
umem_odp->umem.context->invalidate_range(
umem_odp, ib_umem_start(umem_odp),
ib_umem_end(umem_odp));
}
out:
up_read(&per_mm->umem_rwsem); up_read(&per_mm->umem_rwsem);
} }
@ -756,4 +759,3 @@ int rbt_ib_umem_for_each_in_range(struct rb_root_cached *root,
return ret_val; return ret_val;
} }
EXPORT_SYMBOL(rbt_ib_umem_for_each_in_range);

Просмотреть файл

@ -539,34 +539,31 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
return imr; return imr;
} }
static int mr_leaf_free(struct ib_umem_odp *umem_odp, u64 start, u64 end,
void *cookie)
{
struct mlx5_ib_mr *mr = umem_odp->private, *imr = cookie;
if (mr->parent != imr)
return 0;
ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem_odp),
ib_umem_end(umem_odp));
if (umem_odp->dying)
return 0;
WRITE_ONCE(umem_odp->dying, 1);
atomic_inc(&imr->num_leaf_free);
schedule_work(&umem_odp->work);
return 0;
}
void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr) void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
{ {
struct ib_ucontext_per_mm *per_mm = mr_to_per_mm(imr); struct ib_ucontext_per_mm *per_mm = mr_to_per_mm(imr);
struct rb_node *node;
down_read(&per_mm->umem_rwsem); down_read(&per_mm->umem_rwsem);
rbt_ib_umem_for_each_in_range(&per_mm->umem_tree, 0, ULLONG_MAX, for (node = rb_first_cached(&per_mm->umem_tree); node;
mr_leaf_free, true, imr); node = rb_next(node)) {
struct ib_umem_odp *umem_odp =
rb_entry(node, struct ib_umem_odp, interval_tree.rb);
struct mlx5_ib_mr *mr = umem_odp->private;
if (mr->parent != imr)
continue;
ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem_odp),
ib_umem_end(umem_odp));
if (umem_odp->dying)
continue;
WRITE_ONCE(umem_odp->dying, 1);
atomic_inc(&imr->num_leaf_free);
schedule_work(&umem_odp->work);
}
up_read(&per_mm->umem_rwsem); up_read(&per_mm->umem_rwsem);
wait_event(imr->q_leaf_free, !atomic_read(&imr->num_leaf_free)); wait_event(imr->q_leaf_free, !atomic_read(&imr->num_leaf_free));