drm/i915: Recreate vmapping even when the object is pinned
Sometimes we know we are the only user of the bo, but since we take a
protective pin_pages early on, an attempt to change the vmap on the
object is denied because it is busy. i915_gem_object_pin_map() cannot
tell from our single pin_count if the operation is safe. Instead we must
pass that information down from the caller in the manner of
I915_MAP_OVERRIDE.
This issue has existed from the introduction of the mapping, but was
never noticed as the only place where this conflict might happen is for
cached kernel buffers (such as allocated by i915_gem_batch_pool_get()).
Until recently there was only a single user (the cmdparser) so no
conflicts ever occurred. However, we now use it to allocate batches for
different operations (using MAP_WC on !llc for writes) in addition to the
existing shadow batch (using MAP_WB for reads).
We could either keep both mappings cached, or use a different write
mechanism if we detect a MAP_WB already exists (i.e. clflush
afterwards), but as we haven't seen this issue in the wild (it requires
hitting the GPU reloc path in addition to the cmdparser) for simplicity
just allow the mappings to be recreated.
v2: Include the i915_MAP_OVERRIDE bit in the enum so the compiler knows
about all the valid values.
Fixes: 7dd4f6729f
("drm/i915: Async GPU relocation processing")
Testcase: igt/gem_lut_handle # byt, completely by accident
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20170828104631.8606-1-chris@chris-wilson.co.uk
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
This commit is contained in:
Родитель
942d5d0dc4
Коммит
a575c67617
|
@ -1073,7 +1073,7 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
|
|||
goto unpin_src;
|
||||
}
|
||||
|
||||
dst = i915_gem_object_pin_map(dst_obj, I915_MAP_WB);
|
||||
dst = i915_gem_object_pin_map(dst_obj, I915_MAP_FORCE_WB);
|
||||
if (IS_ERR(dst))
|
||||
goto unpin_dst;
|
||||
|
||||
|
|
|
@ -3485,6 +3485,9 @@ void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj);
|
|||
enum i915_map_type {
|
||||
I915_MAP_WB = 0,
|
||||
I915_MAP_WC,
|
||||
#define I915_MAP_OVERRIDE BIT(31)
|
||||
I915_MAP_FORCE_WB = I915_MAP_WB | I915_MAP_OVERRIDE,
|
||||
I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE,
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
@ -2553,6 +2553,9 @@ static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
|
|||
GEM_BUG_ON(i != n_pages);
|
||||
|
||||
switch (type) {
|
||||
default:
|
||||
MISSING_CASE(type);
|
||||
/* fallthrough to use PAGE_KERNEL anyway */
|
||||
case I915_MAP_WB:
|
||||
pgprot = PAGE_KERNEL;
|
||||
break;
|
||||
|
@ -2583,7 +2586,9 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
|
|||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
pinned = true;
|
||||
pinned = !(type & I915_MAP_OVERRIDE);
|
||||
type &= ~I915_MAP_OVERRIDE;
|
||||
|
||||
if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
|
||||
if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) {
|
||||
ret = ____i915_gem_object_get_pages(obj);
|
||||
|
|
|
@ -1071,7 +1071,9 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
|
|||
return PTR_ERR(obj);
|
||||
|
||||
cmd = i915_gem_object_pin_map(obj,
|
||||
cache->has_llc ? I915_MAP_WB : I915_MAP_WC);
|
||||
cache->has_llc ?
|
||||
I915_MAP_FORCE_WB :
|
||||
I915_MAP_FORCE_WC);
|
||||
i915_gem_object_unpin_pages(obj);
|
||||
if (IS_ERR(cmd))
|
||||
return PTR_ERR(cmd);
|
||||
|
|
Загрузка…
Ссылка в новой задаче