drm/i915: Pass i915_sched_node around internally
To simplify the next patch, update bump_priority and schedule to accept
the internal i915_sched_ndoe directly and not expect a request pointer.
add/remove: 0/0 grow/shrink: 2/1 up/down: 8/-15 (-7)
Function old new delta
i915_schedule_bump_priority 109 113 +4
i915_schedule 50 54 +4
__i915_schedule 922 907 -15
v2: Adopt node for the old rq local, since it no longer is a request but
the origin node.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190513120102.29660-2-chris@chris-wilson.co.uk
(cherry picked from commit 52c76fb18a
)
Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
This commit is contained in:
Родитель
06b2b1a40e
Коммит
f312c23ff9
|
@ -175,7 +175,7 @@ static bool inflight(const struct i915_request *rq,
|
||||||
return active->hw_context == rq->hw_context;
|
return active->hw_context == rq->hw_context;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __i915_schedule(struct i915_request *rq,
|
static void __i915_schedule(struct i915_sched_node *node,
|
||||||
const struct i915_sched_attr *attr)
|
const struct i915_sched_attr *attr)
|
||||||
{
|
{
|
||||||
struct intel_engine_cs *engine;
|
struct intel_engine_cs *engine;
|
||||||
|
@ -189,13 +189,13 @@ static void __i915_schedule(struct i915_request *rq,
|
||||||
lockdep_assert_held(&schedule_lock);
|
lockdep_assert_held(&schedule_lock);
|
||||||
GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
|
GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
|
||||||
|
|
||||||
if (i915_request_completed(rq))
|
if (node_signaled(node))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (prio <= READ_ONCE(rq->sched.attr.priority))
|
if (prio <= READ_ONCE(node->attr.priority))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
stack.signaler = &rq->sched;
|
stack.signaler = node;
|
||||||
list_add(&stack.dfs_link, &dfs);
|
list_add(&stack.dfs_link, &dfs);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -246,9 +246,9 @@ static void __i915_schedule(struct i915_request *rq,
|
||||||
* execlists_submit_request()), we can set our own priority and skip
|
* execlists_submit_request()), we can set our own priority and skip
|
||||||
* acquiring the engine locks.
|
* acquiring the engine locks.
|
||||||
*/
|
*/
|
||||||
if (rq->sched.attr.priority == I915_PRIORITY_INVALID) {
|
if (node->attr.priority == I915_PRIORITY_INVALID) {
|
||||||
GEM_BUG_ON(!list_empty(&rq->sched.link));
|
GEM_BUG_ON(!list_empty(&node->link));
|
||||||
rq->sched.attr = *attr;
|
node->attr = *attr;
|
||||||
|
|
||||||
if (stack.dfs_link.next == stack.dfs_link.prev)
|
if (stack.dfs_link.next == stack.dfs_link.prev)
|
||||||
return;
|
return;
|
||||||
|
@ -257,15 +257,14 @@ static void __i915_schedule(struct i915_request *rq,
|
||||||
}
|
}
|
||||||
|
|
||||||
memset(&cache, 0, sizeof(cache));
|
memset(&cache, 0, sizeof(cache));
|
||||||
engine = rq->engine;
|
engine = node_to_request(node)->engine;
|
||||||
spin_lock(&engine->timeline.lock);
|
spin_lock(&engine->timeline.lock);
|
||||||
|
|
||||||
/* Fifo and depth-first replacement ensure our deps execute before us */
|
/* Fifo and depth-first replacement ensure our deps execute before us */
|
||||||
list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
|
list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
|
||||||
struct i915_sched_node *node = dep->signaler;
|
|
||||||
|
|
||||||
INIT_LIST_HEAD(&dep->dfs_link);
|
INIT_LIST_HEAD(&dep->dfs_link);
|
||||||
|
|
||||||
|
node = dep->signaler;
|
||||||
engine = sched_lock_engine(node, engine, &cache);
|
engine = sched_lock_engine(node, engine, &cache);
|
||||||
lockdep_assert_held(&engine->timeline.lock);
|
lockdep_assert_held(&engine->timeline.lock);
|
||||||
|
|
||||||
|
@ -315,13 +314,20 @@ static void __i915_schedule(struct i915_request *rq,
|
||||||
void i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr)
|
void i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr)
|
||||||
{
|
{
|
||||||
spin_lock_irq(&schedule_lock);
|
spin_lock_irq(&schedule_lock);
|
||||||
__i915_schedule(rq, attr);
|
__i915_schedule(&rq->sched, attr);
|
||||||
spin_unlock_irq(&schedule_lock);
|
spin_unlock_irq(&schedule_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void __bump_priority(struct i915_sched_node *node, unsigned int bump)
|
||||||
|
{
|
||||||
|
struct i915_sched_attr attr = node->attr;
|
||||||
|
|
||||||
|
attr.priority |= bump;
|
||||||
|
__i915_schedule(node, &attr);
|
||||||
|
}
|
||||||
|
|
||||||
void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump)
|
void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump)
|
||||||
{
|
{
|
||||||
struct i915_sched_attr attr;
|
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
GEM_BUG_ON(bump & ~I915_PRIORITY_MASK);
|
GEM_BUG_ON(bump & ~I915_PRIORITY_MASK);
|
||||||
|
@ -330,11 +336,7 @@ void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
spin_lock_irqsave(&schedule_lock, flags);
|
spin_lock_irqsave(&schedule_lock, flags);
|
||||||
|
__bump_priority(&rq->sched, bump);
|
||||||
attr = rq->sched.attr;
|
|
||||||
attr.priority |= bump;
|
|
||||||
__i915_schedule(rq, &attr);
|
|
||||||
|
|
||||||
spin_unlock_irqrestore(&schedule_lock, flags);
|
spin_unlock_irqrestore(&schedule_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Загрузка…
Ссылка в новой задаче