drm/i915/guc: Add bypass tasklet submission path to GuC
Add bypass tasklet submission path to GuC. The tasklet is only used if H2G channel has backpresure. Signed-off-by: Matthew Brost <matthew.brost@intel.com> Reviewed-by: John Harrison <John.C.Harrison@Intel.com> Signed-off-by: John Harrison <John.C.Harrison@Intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20210721215101.139794-6-matthew.brost@intel.com
This commit is contained in:
Родитель
925dc1cf58
Коммит
2330923e92
|
@ -172,6 +172,12 @@ static int guc_add_request(struct intel_guc *guc, struct i915_request *rq)
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void guc_set_lrc_tail(struct i915_request *rq)
|
||||||
|
{
|
||||||
|
rq->context->lrc_reg_state[CTX_RING_TAIL] =
|
||||||
|
intel_ring_set_tail(rq->ring, rq->tail);
|
||||||
|
}
|
||||||
|
|
||||||
static inline int rq_prio(const struct i915_request *rq)
|
static inline int rq_prio(const struct i915_request *rq)
|
||||||
{
|
{
|
||||||
return rq->sched.attr.priority;
|
return rq->sched.attr.priority;
|
||||||
|
@ -215,8 +221,7 @@ static int guc_dequeue_one_context(struct intel_guc *guc)
|
||||||
}
|
}
|
||||||
done:
|
done:
|
||||||
if (submit) {
|
if (submit) {
|
||||||
last->context->lrc_reg_state[CTX_RING_TAIL] =
|
guc_set_lrc_tail(last);
|
||||||
intel_ring_set_tail(last->ring, last->tail);
|
|
||||||
resubmit:
|
resubmit:
|
||||||
/*
|
/*
|
||||||
* We only check for -EBUSY here even though it is possible for
|
* We only check for -EBUSY here even though it is possible for
|
||||||
|
@ -496,20 +501,36 @@ static inline void queue_request(struct i915_sched_engine *sched_engine,
|
||||||
set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
|
set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int guc_bypass_tasklet_submit(struct intel_guc *guc,
|
||||||
|
struct i915_request *rq)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
__i915_request_submit(rq);
|
||||||
|
|
||||||
|
trace_i915_request_in(rq, 0);
|
||||||
|
|
||||||
|
guc_set_lrc_tail(rq);
|
||||||
|
ret = guc_add_request(guc, rq);
|
||||||
|
if (ret == -EBUSY)
|
||||||
|
guc->stalled_request = rq;
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
static void guc_submit_request(struct i915_request *rq)
|
static void guc_submit_request(struct i915_request *rq)
|
||||||
{
|
{
|
||||||
struct i915_sched_engine *sched_engine = rq->engine->sched_engine;
|
struct i915_sched_engine *sched_engine = rq->engine->sched_engine;
|
||||||
|
struct intel_guc *guc = &rq->engine->gt->uc.guc;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
/* Will be called from irq-context when using foreign fences. */
|
/* Will be called from irq-context when using foreign fences. */
|
||||||
spin_lock_irqsave(&sched_engine->lock, flags);
|
spin_lock_irqsave(&sched_engine->lock, flags);
|
||||||
|
|
||||||
queue_request(sched_engine, rq, rq_prio(rq));
|
if (guc->stalled_request || !i915_sched_engine_is_empty(sched_engine))
|
||||||
|
queue_request(sched_engine, rq, rq_prio(rq));
|
||||||
GEM_BUG_ON(i915_sched_engine_is_empty(sched_engine));
|
else if (guc_bypass_tasklet_submit(guc, rq) == -EBUSY)
|
||||||
GEM_BUG_ON(list_empty(&rq->sched.link));
|
tasklet_hi_schedule(&sched_engine->tasklet);
|
||||||
|
|
||||||
tasklet_hi_schedule(&sched_engine->tasklet);
|
|
||||||
|
|
||||||
spin_unlock_irqrestore(&sched_engine->lock, flags);
|
spin_unlock_irqrestore(&sched_engine->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
Загрузка…
Ссылка в новой задаче