drm/i915: abolish separate per-ring default_context pointers
Now that we've eliminated a lot of uses of ring->default_context,
we can eliminate the pointer itself.
All the engines share the same default intel_context, so we can just
keep a single reference to it in the dev_priv structure rather than one
in each of the engine[] elements. This make refcounting more sensible
too, as we now have a refcount of one for the one pointer, rather than
a refcount of one but multiple pointers.
From an idea by Chris Wilson.
v2: transform an extra instance of ring->default_context introduced by
42f1cae8c
drm/i915: Restore inhibiting the load of the default context
That patch's commentary includes:
v2: Mark the global default context as uninitialized on GPU reset so
that the context-local workarounds are reloaded upon re-enabling
The code implementing that now also benefits from the replacement of
the multiple (per-ring) pointers to the default context with a single
pointer to the unique kernel context.
v4: Rebased, remove underused local (Nick Hoath)
Signed-off-by: Dave Gordon <david.s.gordon@intel.com>
Reviewed-by: Nick Hoath <nicholas.hoath@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Link: http://patchwork.freedesktop.org/patch/msgid/1453230175-19330-3-git-send-email-david.s.gordon@intel.com
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
This commit is contained in:
Родитель
2682708839
Коммит
ed54c1a1d1
|
@ -1961,7 +1961,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
|
|||
seq_puts(m, "HW context ");
|
||||
describe_ctx(m, ctx);
|
||||
for_each_ring(ring, dev_priv, i) {
|
||||
if (ring->default_context == ctx)
|
||||
if (dev_priv->kernel_context == ctx)
|
||||
seq_printf(m, "(default context %s) ",
|
||||
ring->name);
|
||||
}
|
||||
|
@ -2058,7 +2058,7 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
|
|||
|
||||
list_for_each_entry(ctx, &dev_priv->context_list, link) {
|
||||
for_each_ring(ring, dev_priv, i) {
|
||||
if (ring->default_context != ctx)
|
||||
if (dev_priv->kernel_context != ctx)
|
||||
i915_dump_lrc_obj(m, ctx, ring);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1948,6 +1948,8 @@ struct drm_i915_private {
|
|||
void (*stop_ring)(struct intel_engine_cs *ring);
|
||||
} gt;
|
||||
|
||||
struct intel_context *kernel_context;
|
||||
|
||||
bool edp_low_vswing;
|
||||
|
||||
/* perform PHY state sanity checks? */
|
||||
|
|
|
@ -2680,7 +2680,7 @@ void i915_gem_request_free(struct kref *req_ref)
|
|||
|
||||
if (ctx) {
|
||||
if (i915.enable_execlists) {
|
||||
if (ctx != req->ring->default_context)
|
||||
if (ctx != req->i915->kernel_context)
|
||||
intel_lr_context_unpin(req);
|
||||
}
|
||||
|
||||
|
@ -2776,7 +2776,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
|
|||
int err;
|
||||
|
||||
if (ctx == NULL)
|
||||
ctx = engine->default_context;
|
||||
ctx = to_i915(engine->dev)->kernel_context;
|
||||
err = __i915_gem_request_alloc(engine, ctx, &req);
|
||||
return err ? ERR_PTR(err) : req;
|
||||
}
|
||||
|
@ -4864,7 +4864,7 @@ i915_gem_init_hw(struct drm_device *dev)
|
|||
*/
|
||||
init_unused_rings(dev);
|
||||
|
||||
BUG_ON(!dev_priv->ring[RCS].default_context);
|
||||
BUG_ON(!dev_priv->kernel_context);
|
||||
|
||||
ret = i915_ppgtt_init_hw(dev);
|
||||
if (ret) {
|
||||
|
|
|
@ -347,22 +347,20 @@ void i915_gem_context_reset(struct drm_device *dev)
|
|||
i915_gem_context_unreference(lctx);
|
||||
ring->last_context = NULL;
|
||||
}
|
||||
|
||||
/* Force the GPU state to be reinitialised on enabling */
|
||||
if (ring->default_context)
|
||||
ring->default_context->legacy_hw_ctx.initialized = false;
|
||||
}
|
||||
|
||||
/* Force the GPU state to be reinitialised on enabling */
|
||||
dev_priv->kernel_context->legacy_hw_ctx.initialized = false;
|
||||
}
|
||||
|
||||
int i915_gem_context_init(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_context *ctx;
|
||||
int i;
|
||||
|
||||
/* Init should only be called once per module load. Eventually the
|
||||
* restriction on the context_disabled check can be loosened. */
|
||||
if (WARN_ON(dev_priv->ring[RCS].default_context))
|
||||
if (WARN_ON(dev_priv->kernel_context))
|
||||
return 0;
|
||||
|
||||
if (intel_vgpu_active(dev) && HAS_LOGICAL_RING_CONTEXTS(dev)) {
|
||||
|
@ -392,12 +390,7 @@ int i915_gem_context_init(struct drm_device *dev)
|
|||
return PTR_ERR(ctx);
|
||||
}
|
||||
|
||||
for (i = 0; i < I915_NUM_RINGS; i++) {
|
||||
struct intel_engine_cs *ring = &dev_priv->ring[i];
|
||||
|
||||
/* NB: RCS will hold a ref for all rings */
|
||||
ring->default_context = ctx;
|
||||
}
|
||||
dev_priv->kernel_context = ctx;
|
||||
|
||||
DRM_DEBUG_DRIVER("%s context support initialized\n",
|
||||
i915.enable_execlists ? "LR" :
|
||||
|
@ -408,7 +401,7 @@ int i915_gem_context_init(struct drm_device *dev)
|
|||
void i915_gem_context_fini(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_context *dctx = dev_priv->ring[RCS].default_context;
|
||||
struct intel_context *dctx = dev_priv->kernel_context;
|
||||
int i;
|
||||
|
||||
if (dctx->legacy_hw_ctx.rcs_state) {
|
||||
|
@ -435,17 +428,17 @@ void i915_gem_context_fini(struct drm_device *dev)
|
|||
i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
|
||||
}
|
||||
|
||||
for (i = 0; i < I915_NUM_RINGS; i++) {
|
||||
for (i = I915_NUM_RINGS; --i >= 0;) {
|
||||
struct intel_engine_cs *ring = &dev_priv->ring[i];
|
||||
|
||||
if (ring->last_context)
|
||||
if (ring->last_context) {
|
||||
i915_gem_context_unreference(ring->last_context);
|
||||
|
||||
ring->default_context = NULL;
|
||||
ring->last_context = NULL;
|
||||
ring->last_context = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
i915_gem_context_unreference(dctx);
|
||||
dev_priv->kernel_context = NULL;
|
||||
}
|
||||
|
||||
int i915_gem_context_enable(struct drm_i915_gem_request *req)
|
||||
|
|
|
@ -1050,7 +1050,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
|
|||
if (request)
|
||||
rbuf = request->ctx->engine[ring->id].ringbuf;
|
||||
else
|
||||
rbuf = ring->default_context->engine[ring->id].ringbuf;
|
||||
rbuf = dev_priv->kernel_context->engine[ring->id].ringbuf;
|
||||
} else
|
||||
rbuf = ring->buffer;
|
||||
|
||||
|
|
|
@ -964,7 +964,7 @@ int i915_guc_submission_enable(struct drm_device *dev)
|
|||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_guc *guc = &dev_priv->guc;
|
||||
struct intel_context *ctx = dev_priv->ring[RCS].default_context;
|
||||
struct intel_context *ctx = dev_priv->kernel_context;
|
||||
struct i915_guc_client *client;
|
||||
|
||||
/* client for execbuf submission */
|
||||
|
@ -1021,7 +1021,7 @@ int intel_guc_suspend(struct drm_device *dev)
|
|||
if (!i915.enable_guc_submission)
|
||||
return 0;
|
||||
|
||||
ctx = dev_priv->ring[RCS].default_context;
|
||||
ctx = dev_priv->kernel_context;
|
||||
|
||||
data[0] = HOST2GUC_ACTION_ENTER_S_STATE;
|
||||
/* any value greater than GUC_POWER_D0 */
|
||||
|
@ -1047,7 +1047,7 @@ int intel_guc_resume(struct drm_device *dev)
|
|||
if (!i915.enable_guc_submission)
|
||||
return 0;
|
||||
|
||||
ctx = dev_priv->ring[RCS].default_context;
|
||||
ctx = dev_priv->kernel_context;
|
||||
|
||||
data[0] = HOST2GUC_ACTION_EXIT_S_STATE;
|
||||
data[1] = GUC_POWER_D0;
|
||||
|
|
|
@ -598,7 +598,7 @@ static int execlists_context_queue(struct drm_i915_gem_request *request)
|
|||
struct drm_i915_gem_request *cursor;
|
||||
int num_elements = 0;
|
||||
|
||||
if (request->ctx != ring->default_context)
|
||||
if (request->ctx != request->i915->kernel_context)
|
||||
intel_lr_context_pin(request);
|
||||
|
||||
i915_gem_request_reference(request);
|
||||
|
@ -690,7 +690,7 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
|
|||
|
||||
request->ringbuf = request->ctx->engine[request->ring->id].ringbuf;
|
||||
|
||||
if (request->ctx != request->ring->default_context) {
|
||||
if (request->ctx != request->i915->kernel_context) {
|
||||
ret = intel_lr_context_pin(request);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -1006,7 +1006,7 @@ void intel_execlists_retire_requests(struct intel_engine_cs *ring)
|
|||
struct drm_i915_gem_object *ctx_obj =
|
||||
ctx->engine[ring->id].state;
|
||||
|
||||
if (ctx_obj && (ctx != ring->default_context))
|
||||
if (ctx_obj && (ctx != req->i915->kernel_context))
|
||||
intel_lr_context_unpin(req);
|
||||
list_del(&req->execlist_link);
|
||||
i915_gem_request_unreference(req);
|
||||
|
@ -1529,7 +1529,7 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring)
|
|||
u8 next_context_status_buffer_hw;
|
||||
|
||||
lrc_setup_hardware_status_page(ring,
|
||||
ring->default_context->engine[ring->id].state);
|
||||
dev_priv->kernel_context->engine[ring->id].state);
|
||||
|
||||
I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
|
||||
I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff);
|
||||
|
@ -2005,6 +2005,7 @@ logical_ring_default_irqs(struct intel_engine_cs *ring, unsigned shift)
|
|||
static int
|
||||
logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring)
|
||||
{
|
||||
struct intel_context *dctx = to_i915(dev)->kernel_context;
|
||||
int ret;
|
||||
|
||||
/* Intentionally left blank. */
|
||||
|
@ -2027,12 +2028,12 @@ logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring)
|
|||
if (ret)
|
||||
goto error;
|
||||
|
||||
ret = intel_lr_context_deferred_alloc(ring->default_context, ring);
|
||||
ret = intel_lr_context_deferred_alloc(dctx, ring);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
/* As this is the default context, always pin it */
|
||||
ret = intel_lr_context_do_pin(ring, ring->default_context);
|
||||
ret = intel_lr_context_do_pin(ring, dctx);
|
||||
if (ret) {
|
||||
DRM_ERROR(
|
||||
"Failed to pin and map ringbuffer %s: %d\n",
|
||||
|
@ -2398,7 +2399,7 @@ void intel_lr_context_free(struct intel_context *ctx)
|
|||
ctx->engine[i].ringbuf;
|
||||
struct intel_engine_cs *ring = ringbuf->ring;
|
||||
|
||||
if (ctx == ring->default_context) {
|
||||
if (ctx == ctx->i915->kernel_context) {
|
||||
intel_unpin_ringbuffer_obj(ringbuf);
|
||||
i915_gem_object_ggtt_unpin(ctx_obj);
|
||||
}
|
||||
|
@ -2517,7 +2518,7 @@ int intel_lr_context_deferred_alloc(struct intel_context *ctx,
|
|||
ctx->engine[ring->id].ringbuf = ringbuf;
|
||||
ctx->engine[ring->id].state = ctx_obj;
|
||||
|
||||
if (ctx != ring->default_context && ring->init_context) {
|
||||
if (ctx != ctx->i915->kernel_context && ring->init_context) {
|
||||
struct drm_i915_gem_request *req;
|
||||
|
||||
req = i915_gem_request_alloc(ring, ctx);
|
||||
|
|
|
@ -309,7 +309,6 @@ struct intel_engine_cs {
|
|||
|
||||
wait_queue_head_t irq_queue;
|
||||
|
||||
struct intel_context *default_context;
|
||||
struct intel_context *last_context;
|
||||
|
||||
struct intel_ring_hangcheck hangcheck;
|
||||
|
|
Загрузка…
Ссылка в новой задаче