drm/i915: Only disable execlist preemption for the duration of the request
We need to prevent resubmission of the context immediately following an initial resubmit (which does a lite-restore preemption). Currently we do this by disabling all submission whilst the context is still active, but we can improve this by limiting the restriction to only until we receive notification from the context-switch interrupt that the lite-restore preemption is complete. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Mika Kuoppala <mika.kuoppala@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20170124110009.28947-2-chris@chris-wilson.co.uk
This commit is contained in:
Родитель
c816e605ff
Коммит
816ee798ec
|
@ -3320,15 +3320,21 @@ static int i915_engine_info(struct seq_file *m, void *unused)
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
rq = READ_ONCE(engine->execlist_port[0].request);
|
rq = READ_ONCE(engine->execlist_port[0].request);
|
||||||
if (rq)
|
if (rq) {
|
||||||
print_request(m, rq, "\t\tELSP[0] ");
|
seq_printf(m, "\t\tELSP[0] count=%d, ",
|
||||||
else
|
engine->execlist_port[0].count);
|
||||||
|
print_request(m, rq, "rq: ");
|
||||||
|
} else {
|
||||||
seq_printf(m, "\t\tELSP[0] idle\n");
|
seq_printf(m, "\t\tELSP[0] idle\n");
|
||||||
|
}
|
||||||
rq = READ_ONCE(engine->execlist_port[1].request);
|
rq = READ_ONCE(engine->execlist_port[1].request);
|
||||||
if (rq)
|
if (rq) {
|
||||||
print_request(m, rq, "\t\tELSP[1] ");
|
seq_printf(m, "\t\tELSP[1] count=%d, ",
|
||||||
else
|
engine->execlist_port[1].count);
|
||||||
|
print_request(m, rq, "rq: ");
|
||||||
|
} else {
|
||||||
seq_printf(m, "\t\tELSP[1] idle\n");
|
seq_printf(m, "\t\tELSP[1] idle\n");
|
||||||
|
}
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
|
||||||
spin_lock_irq(&engine->timeline->lock);
|
spin_lock_irq(&engine->timeline->lock);
|
||||||
|
|
|
@ -380,7 +380,7 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
|
||||||
execlists_context_status_change(port[0].request,
|
execlists_context_status_change(port[0].request,
|
||||||
INTEL_CONTEXT_SCHEDULE_IN);
|
INTEL_CONTEXT_SCHEDULE_IN);
|
||||||
desc[0] = execlists_update_context(port[0].request);
|
desc[0] = execlists_update_context(port[0].request);
|
||||||
engine->preempt_wa = port[0].count++; /* bdw only? fixed on skl? */
|
port[0].count++;
|
||||||
|
|
||||||
if (port[1].request) {
|
if (port[1].request) {
|
||||||
GEM_BUG_ON(port[1].count);
|
GEM_BUG_ON(port[1].count);
|
||||||
|
@ -545,15 +545,11 @@ bool intel_execlists_idle(struct drm_i915_private *dev_priv)
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool execlists_elsp_ready(struct intel_engine_cs *engine)
|
static bool execlists_elsp_ready(const struct intel_engine_cs *engine)
|
||||||
{
|
{
|
||||||
int port;
|
const struct execlist_port *port = engine->execlist_port;
|
||||||
|
|
||||||
port = 1; /* wait for a free slot */
|
return port[0].count + port[1].count < 2;
|
||||||
if (engine->preempt_wa)
|
|
||||||
port = 0; /* wait for GPU to be idle before continuing */
|
|
||||||
|
|
||||||
return !engine->execlist_port[port].request;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -601,8 +597,6 @@ static void intel_lrc_irq_handler(unsigned long data)
|
||||||
i915_gem_request_put(port[0].request);
|
i915_gem_request_put(port[0].request);
|
||||||
port[0] = port[1];
|
port[0] = port[1];
|
||||||
memset(&port[1], 0, sizeof(port[1]));
|
memset(&port[1], 0, sizeof(port[1]));
|
||||||
|
|
||||||
engine->preempt_wa = false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
GEM_BUG_ON(port[0].count == 0 &&
|
GEM_BUG_ON(port[0].count == 0 &&
|
||||||
|
|
|
@ -380,7 +380,6 @@ struct intel_engine_cs {
|
||||||
struct rb_root execlist_queue;
|
struct rb_root execlist_queue;
|
||||||
struct rb_node *execlist_first;
|
struct rb_node *execlist_first;
|
||||||
unsigned int fw_domains;
|
unsigned int fw_domains;
|
||||||
bool preempt_wa;
|
|
||||||
u32 ctx_desc_template;
|
u32 ctx_desc_template;
|
||||||
|
|
||||||
/* Contexts are pinned whilst they are active on the GPU. The last
|
/* Contexts are pinned whilst they are active on the GPU. The last
|
||||||
|
|
Загрузка…
Ссылка в новой задаче