drm/i915: vma/ppgtt lifetime rules

VMAs should take a reference of the address space they use.

Now, when the fd is closed, it will release the ref that the context was
holding, but it will still be referenced by any vmas that are still
active.

ppgtt_release() should then only be called when the last thing referencing
it releases the ref, and it can just call the base cleanup and free the
ppgtt.

Note that with this we will extend the lifetime of ppgtts which
contain shared objects. But all the non-shared objects will get
removed as soon as they drop of the active list and for the shared
ones the shrinker can eventually reap them. Since we currently can't
evict ppgtt pagetables either I don't think that temporary leak is
important.

Signed-off-by: Michel Thierry <michel.thierry@intel.com>
[danvet: Add note about potential ppgtt leak with this approach.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
This commit is contained in:
Michel Thierry 2014-08-06 15:04:44 +02:00 коммит произвёл Daniel Vetter
Родитель 14bf993e83
Коммит b9d06dd9d1
4 изменённых файлов: 18 добавлений и 20 удалений

Просмотреть файл

@ -2547,7 +2547,9 @@ void i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj);
/* i915_gem_context.c */
#define ctx_to_ppgtt(ctx) container_of((ctx)->vm, struct i915_hw_ppgtt, base)
#define vm_to_ppgtt(vm) container_of(vm, struct i915_hw_ppgtt, base)
int __must_check i915_gem_context_init(struct drm_device *dev);
void ppgtt_release(struct kref *kref);
void i915_gem_context_fini(struct drm_device *dev);
void i915_gem_context_reset(struct drm_device *dev);
int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);

Просмотреть файл

@ -4476,12 +4476,20 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
void i915_gem_vma_destroy(struct i915_vma *vma)
{
struct i915_address_space *vm = NULL;
struct i915_hw_ppgtt *ppgtt = NULL;
WARN_ON(vma->node.allocated);
/* Keep the vma as a placeholder in the execbuffer reservation lists */
if (!list_empty(&vma->exec_list))
return;
vm = vma->vm;
ppgtt = vm_to_ppgtt(vm);
if (ppgtt)
kref_put(&ppgtt->ref, ppgtt_release);
list_del(&vma->vma_link);
kfree(vma);

Просмотреть файл

@ -108,30 +108,13 @@ static void do_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
return;
}
/*
* Make sure vmas are unbound before we take down the drm_mm
*
* FIXME: Proper refcounting should take care of this, this shouldn't be
* needed at all.
*/
if (!list_empty(&vm->active_list)) {
struct i915_vma *vma;
list_for_each_entry(vma, &vm->active_list, mm_list)
if (WARN_ON(list_empty(&vma->vma_link) ||
list_is_singular(&vma->vma_link)))
break;
i915_gem_evict_vm(&ppgtt->base, true);
} else {
i915_gem_retire_requests(dev);
i915_gem_evict_vm(&ppgtt->base, false);
}
/* vmas should already be unbound */
WARN_ON(!list_empty(&vm->active_list));
ppgtt->base.cleanup(&ppgtt->base);
}
static void ppgtt_release(struct kref *kref)
void ppgtt_release(struct kref *kref)
{
struct i915_hw_ppgtt *ppgtt =
container_of(kref, struct i915_hw_ppgtt, ref);

Просмотреть файл

@ -2148,10 +2148,15 @@ i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
struct i915_address_space *vm)
{
struct i915_vma *vma;
struct i915_hw_ppgtt *ppgtt = NULL;
vma = i915_gem_obj_to_vma(obj, vm);
if (!vma)
vma = __i915_gem_vma_create(obj, vm);
ppgtt = vm_to_ppgtt(vm);
if (ppgtt)
kref_get(&ppgtt->ref);
return vma;
}