drm/i915: Fix pages pin counting around swizzle quirk
commitbc0629a767
("drm/i915: Track pages pinned due to swizzling quirk") fixed one problem, but revealed a whole lot more. The root cause of the pin count mismatch for the swizzle quirk (for L-shaped memory on gen3/4) was that we were incrementing the pages_pin_count upon getting the backing pages but then overwriting the pages_pin_count to set it to 1 afterwards. With a little bit of adjustment to satisfy the GEM_BUG_ON sanitychecks, the fix is to replace the explicit atomic_set with an atomic_inc. v2: Consistently use atomics (not mix atomics and helpers) within the lowlevel get_pages routines. This makes the atomic operations much clearer. Fixes:1233e2db19
("drm/i915: Move object backing storage manipulation") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20161104103001.27643-1-chris@chris-wilson.co.uk Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
This commit is contained in:
Родитель
a44342acde
Коммит
2c3a3f44dc
|
@ -2324,12 +2324,6 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
|
|||
if (i915_gem_object_needs_bit17_swizzle(obj))
|
||||
i915_gem_object_do_bit_17_swizzle(obj, st);
|
||||
|
||||
if (i915_gem_object_is_tiled(obj) &&
|
||||
dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
|
||||
__i915_gem_object_pin_pages(obj);
|
||||
obj->mm.quirked = true;
|
||||
}
|
||||
|
||||
return st;
|
||||
|
||||
err_pages:
|
||||
|
@ -2362,12 +2356,21 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
|
|||
obj->mm.get_page.sg_idx = 0;
|
||||
|
||||
obj->mm.pages = pages;
|
||||
|
||||
if (i915_gem_object_is_tiled(obj) &&
|
||||
to_i915(obj->base.dev)->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
|
||||
GEM_BUG_ON(obj->mm.quirked);
|
||||
__i915_gem_object_pin_pages(obj);
|
||||
obj->mm.quirked = true;
|
||||
}
|
||||
}
|
||||
|
||||
static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct sg_table *pages;
|
||||
|
||||
GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
|
||||
|
||||
if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
|
||||
DRM_DEBUG("Attempting to obtain a purgeable object\n");
|
||||
return -EFAULT;
|
||||
|
@ -2396,16 +2399,14 @@ int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
if (likely(obj->mm.pages)) {
|
||||
__i915_gem_object_pin_pages(obj);
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
|
||||
|
||||
if (unlikely(!obj->mm.pages)) {
|
||||
err = ____i915_gem_object_get_pages(obj);
|
||||
if (!err)
|
||||
atomic_set_release(&obj->mm.pages_pin_count, 1);
|
||||
if (err)
|
||||
goto unlock;
|
||||
|
||||
smp_mb__before_atomic();
|
||||
}
|
||||
atomic_inc(&obj->mm.pages_pin_count);
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&obj->mm.lock);
|
||||
|
@ -2476,12 +2477,14 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
|
|||
|
||||
pinned = true;
|
||||
if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
|
||||
if (unlikely(!obj->mm.pages)) {
|
||||
ret = ____i915_gem_object_get_pages(obj);
|
||||
if (ret)
|
||||
goto err_unlock;
|
||||
|
||||
GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count));
|
||||
atomic_set_release(&obj->mm.pages_pin_count, 1);
|
||||
smp_mb__before_atomic();
|
||||
}
|
||||
atomic_inc(&obj->mm.pages_pin_count);
|
||||
pinned = false;
|
||||
}
|
||||
GEM_BUG_ON(!obj->mm.pages);
|
||||
|
@ -2933,7 +2936,7 @@ int i915_vma_unbind(struct i915_vma *vma)
|
|||
goto destroy;
|
||||
|
||||
GEM_BUG_ON(obj->bind_count == 0);
|
||||
GEM_BUG_ON(!obj->mm.pages);
|
||||
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
|
||||
|
||||
if (i915_vma_is_map_and_fenceable(vma)) {
|
||||
/* release the fence reg _after_ flushing */
|
||||
|
@ -3167,6 +3170,7 @@ search_free:
|
|||
list_move_tail(&obj->global_link, &dev_priv->mm.bound_list);
|
||||
list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
|
||||
obj->bind_count++;
|
||||
GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -4100,6 +4104,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
|
|||
obj->mm.quirked = false;
|
||||
}
|
||||
if (args->madv == I915_MADV_WILLNEED) {
|
||||
GEM_BUG_ON(obj->mm.quirked);
|
||||
__i915_gem_object_pin_pages(obj);
|
||||
obj->mm.quirked = true;
|
||||
}
|
||||
|
|
|
@ -3711,6 +3711,13 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
|
|||
{
|
||||
int ret = 0;
|
||||
|
||||
/* The vma->pages are only valid within the lifespan of the borrowed
|
||||
* obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so
|
||||
* must be the vma->pages. A simple rule is that vma->pages must only
|
||||
* be accessed when the obj->mm.pages are pinned.
|
||||
*/
|
||||
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
|
||||
|
||||
if (vma->pages)
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -269,6 +269,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
|
|||
obj->mm.quirked = false;
|
||||
}
|
||||
if (!i915_gem_object_is_tiled(obj)) {
|
||||
GEM_BUG_ON(!obj->mm.quirked);
|
||||
__i915_gem_object_pin_pages(obj);
|
||||
obj->mm.quirked = true;
|
||||
}
|
||||
|
|
Загрузка…
Ссылка в новой задаче