dma-buf: add dma_resv_replace_fences v2
This function allows to replace fences from the shared fence list when we can gurantee that the operation represented by the original fence has finished or no accesses to the resources protected by the dma_resv object any more when the new fence finishes. Then use this function in the amdkfd code when BOs are unmapped from the process. v2: add an example when this is usefull. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com> Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch> Link: https://patchwork.freedesktop.org/patch/msgid/20220321135856.1331-1-christian.koenig@amd.com
This commit is contained in:
Родитель
f30bceab16
Коммит
548e7432dc
|
@ -289,6 +289,51 @@ replace:
|
|||
}
|
||||
EXPORT_SYMBOL(dma_resv_add_shared_fence);
|
||||
|
||||
/**
|
||||
* dma_resv_replace_fences - replace fences in the dma_resv obj
|
||||
* @obj: the reservation object
|
||||
* @context: the context of the fences to replace
|
||||
* @replacement: the new fence to use instead
|
||||
*
|
||||
* Replace fences with a specified context with a new fence. Only valid if the
|
||||
* operation represented by the original fence has no longer access to the
|
||||
* resources represented by the dma_resv object when the new fence completes.
|
||||
*
|
||||
* And example for using this is replacing a preemption fence with a page table
|
||||
* update fence which makes the resource inaccessible.
|
||||
*/
|
||||
void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
|
||||
struct dma_fence *replacement)
|
||||
{
|
||||
struct dma_resv_list *list;
|
||||
struct dma_fence *old;
|
||||
unsigned int i;
|
||||
|
||||
dma_resv_assert_held(obj);
|
||||
|
||||
write_seqcount_begin(&obj->seq);
|
||||
|
||||
old = dma_resv_excl_fence(obj);
|
||||
if (old->context == context) {
|
||||
RCU_INIT_POINTER(obj->fence_excl, dma_fence_get(replacement));
|
||||
dma_fence_put(old);
|
||||
}
|
||||
|
||||
list = dma_resv_shared_list(obj);
|
||||
for (i = 0; list && i < list->shared_count; ++i) {
|
||||
old = rcu_dereference_protected(list->shared[i],
|
||||
dma_resv_held(obj));
|
||||
if (old->context != context)
|
||||
continue;
|
||||
|
||||
rcu_assign_pointer(list->shared[i], dma_fence_get(replacement));
|
||||
dma_fence_put(old);
|
||||
}
|
||||
|
||||
write_seqcount_end(&obj->seq);
|
||||
}
|
||||
EXPORT_SYMBOL(dma_resv_replace_fences);
|
||||
|
||||
/**
|
||||
* dma_resv_add_excl_fence - Add an exclusive fence.
|
||||
* @obj: the reservation object
|
||||
|
|
|
@ -253,53 +253,18 @@ void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo)
|
|||
static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
|
||||
struct amdgpu_amdkfd_fence *ef)
|
||||
{
|
||||
struct dma_resv *resv = bo->tbo.base.resv;
|
||||
struct dma_resv_list *old, *new;
|
||||
unsigned int i, j, k;
|
||||
struct dma_fence *replacement;
|
||||
|
||||
if (!ef)
|
||||
return -EINVAL;
|
||||
|
||||
old = dma_resv_shared_list(resv);
|
||||
if (!old)
|
||||
return 0;
|
||||
|
||||
new = kmalloc(struct_size(new, shared, old->shared_max), GFP_KERNEL);
|
||||
if (!new)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Go through all the shared fences in the resevation object and sort
|
||||
* the interesting ones to the end of the list.
|
||||
/* TODO: Instead of block before we should use the fence of the page
|
||||
* table update and TLB flush here directly.
|
||||
*/
|
||||
for (i = 0, j = old->shared_count, k = 0; i < old->shared_count; ++i) {
|
||||
struct dma_fence *f;
|
||||
|
||||
f = rcu_dereference_protected(old->shared[i],
|
||||
dma_resv_held(resv));
|
||||
|
||||
if (f->context == ef->base.context)
|
||||
RCU_INIT_POINTER(new->shared[--j], f);
|
||||
else
|
||||
RCU_INIT_POINTER(new->shared[k++], f);
|
||||
}
|
||||
new->shared_max = old->shared_max;
|
||||
new->shared_count = k;
|
||||
|
||||
/* Install the new fence list, seqcount provides the barriers */
|
||||
write_seqcount_begin(&resv->seq);
|
||||
RCU_INIT_POINTER(resv->fence, new);
|
||||
write_seqcount_end(&resv->seq);
|
||||
|
||||
/* Drop the references to the removed fences or move them to ef_list */
|
||||
for (i = j; i < old->shared_count; ++i) {
|
||||
struct dma_fence *f;
|
||||
|
||||
f = rcu_dereference_protected(new->shared[i],
|
||||
dma_resv_held(resv));
|
||||
dma_fence_put(f);
|
||||
}
|
||||
kfree_rcu(old, rcu);
|
||||
|
||||
replacement = dma_fence_get_stub();
|
||||
dma_resv_replace_fences(bo->tbo.base.resv, ef->base.context,
|
||||
replacement);
|
||||
dma_fence_put(replacement);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -468,6 +468,8 @@ void dma_resv_init(struct dma_resv *obj);
|
|||
void dma_resv_fini(struct dma_resv *obj);
|
||||
int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences);
|
||||
void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence);
|
||||
void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
|
||||
struct dma_fence *fence);
|
||||
void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence);
|
||||
int dma_resv_get_fences(struct dma_resv *obj, bool write,
|
||||
unsigned int *num_fences, struct dma_fence ***fences);
|
||||
|
|
Загрузка…
Ссылка в новой задаче