drm/i915: Unconditionally flush any chipset buffers before execbuf
If userspace is asynchronously streaming into the batch or other
execobjects, we may not flush those writes along with a change in cache
domain (as there is no change). Therefore those writes may end up in
internal chipset buffers and not visible to the GPU upon execution. We
must issue a flush command or otherwise we encounter incoherency in the
batchbuffers and the GPU executing invalid commands (i.e. hanging) quite
regularly.
v2: Throw a paranoid wmb() into the general flush so that we remain
consistent with before.
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=90841
Fixes: 1816f92363
("drm/i915: Support creation of unbound wc user...")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Akash Goel <akash.goel@intel.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Tested-by: Matti Hämäläinen <ccr@tnsp.org>
Cc: stable@vger.kernel.org
Reviewed-by: Mika Kuoppala <mika.kuoppala@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160818161718.27187-1-chris@chris-wilson.co.uk
This commit is contained in:
Родитель
43aa7e8750
Коммит
600f436801
|
@ -3420,6 +3420,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
|
||||||
/* belongs in i915_gem_gtt.h */
|
/* belongs in i915_gem_gtt.h */
|
||||||
static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv)
|
static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv)
|
||||||
{
|
{
|
||||||
|
wmb();
|
||||||
if (INTEL_GEN(dev_priv) < 6)
|
if (INTEL_GEN(dev_priv) < 6)
|
||||||
intel_gtt_chipset_flush();
|
intel_gtt_chipset_flush();
|
||||||
}
|
}
|
||||||
|
|
|
@ -1015,8 +1015,6 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
|
||||||
{
|
{
|
||||||
const unsigned int other_rings = eb_other_engines(req);
|
const unsigned int other_rings = eb_other_engines(req);
|
||||||
struct i915_vma *vma;
|
struct i915_vma *vma;
|
||||||
uint32_t flush_domains = 0;
|
|
||||||
bool flush_chipset = false;
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
list_for_each_entry(vma, vmas, exec_list) {
|
list_for_each_entry(vma, vmas, exec_list) {
|
||||||
|
@ -1029,17 +1027,12 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
|
if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
|
||||||
flush_chipset |= i915_gem_clflush_object(obj, false);
|
i915_gem_clflush_object(obj, false);
|
||||||
|
|
||||||
flush_domains |= obj->base.write_domain;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (flush_chipset)
|
/* Unconditionally flush any chipset caches (for streaming writes). */
|
||||||
i915_gem_chipset_flush(req->engine->i915);
|
i915_gem_chipset_flush(req->engine->i915);
|
||||||
|
|
||||||
if (flush_domains & I915_GEM_DOMAIN_GTT)
|
|
||||||
wmb();
|
|
||||||
|
|
||||||
/* Unconditionally invalidate GPU caches and TLBs. */
|
/* Unconditionally invalidate GPU caches and TLBs. */
|
||||||
return req->engine->emit_flush(req, EMIT_INVALIDATE);
|
return req->engine->emit_flush(req, EMIT_INVALIDATE);
|
||||||
}
|
}
|
||||||
|
|
Загрузка…
Ссылка в новой задаче