drm/nouveau/gem: tie deferred unmapping of buffers to VMA fence completion

As VMAs are per-client, unlike buffers, this allows us to avoid referencing
foreign fences (those that belong to another client/driver) from the client
deferred work handler, and prevent some not-fun race conditions that can be
triggered when a fence stalls.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
This commit is contained in:
Ben Skeggs 2018-05-08 20:39:47 +10:00
Родитель 0db912af8f
Коммит 470db8b781
1 изменённых файлов: 2 добавлений и 15 удалений

Просмотреть файл

@ -115,25 +115,12 @@ nouveau_gem_object_delete_work(struct nouveau_cli_work *w)
static void
nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
{
const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM;
struct reservation_object *resv = nvbo->bo.resv;
struct reservation_object_list *fobj;
struct dma_fence *fence = vma->fence ? &vma->fence->base : NULL;
struct nouveau_gem_object_unmap *work;
struct dma_fence *fence = NULL;
fobj = reservation_object_get_list(resv);
list_del_init(&vma->head);
if (fobj && fobj->shared_count > 1)
ttm_bo_wait(&nvbo->bo, false, false);
else if (fobj && fobj->shared_count == 1)
fence = rcu_dereference_protected(fobj->shared[0],
reservation_object_held(resv));
else
fence = reservation_object_get_excl(nvbo->bo.resv);
if (!fence || !mapped) {
if (!fence) {
nouveau_gem_object_delete(vma);
return;
}