drm/i915: Split i915_active.mutex into an irq-safe spinlock for the rbtree
As we want to be able to run inside atomic context for retiring the i915_active, and we are no longer allowed to abuse mutex_trylock, split the tree management portion of i915_active.mutex into an irq-safe spinlock. References:a0855d24fc
("locking/mutex: Complain upon mutex API misuse in IRQ contexts") References: https://bugs.freedesktop.org/show_bug.cgi?id=111626 Fixes:274cbf20fd
("drm/i915: Push the i915_active.retire into a worker") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com> Cc: Matthew Auld <matthew.auld@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20191114172535.1116-1-chris@chris-wilson.co.uk (cherry picked from commitc9ad602fea
) Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
This commit is contained in:
Родитель
fa039b936c
Коммит
093b922873
|
@ -91,14 +91,15 @@ static void debug_active_init(struct i915_active *ref)
|
||||||
|
|
||||||
static void debug_active_activate(struct i915_active *ref)
|
static void debug_active_activate(struct i915_active *ref)
|
||||||
{
|
{
|
||||||
lockdep_assert_held(&ref->mutex);
|
spin_lock_irq(&ref->tree_lock);
|
||||||
if (!atomic_read(&ref->count)) /* before the first inc */
|
if (!atomic_read(&ref->count)) /* before the first inc */
|
||||||
debug_object_activate(ref, &active_debug_desc);
|
debug_object_activate(ref, &active_debug_desc);
|
||||||
|
spin_unlock_irq(&ref->tree_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void debug_active_deactivate(struct i915_active *ref)
|
static void debug_active_deactivate(struct i915_active *ref)
|
||||||
{
|
{
|
||||||
lockdep_assert_held(&ref->mutex);
|
lockdep_assert_held(&ref->tree_lock);
|
||||||
if (!atomic_read(&ref->count)) /* after the last dec */
|
if (!atomic_read(&ref->count)) /* after the last dec */
|
||||||
debug_object_deactivate(ref, &active_debug_desc);
|
debug_object_deactivate(ref, &active_debug_desc);
|
||||||
}
|
}
|
||||||
|
@ -128,29 +129,22 @@ __active_retire(struct i915_active *ref)
|
||||||
{
|
{
|
||||||
struct active_node *it, *n;
|
struct active_node *it, *n;
|
||||||
struct rb_root root;
|
struct rb_root root;
|
||||||
bool retire = false;
|
unsigned long flags;
|
||||||
|
|
||||||
lockdep_assert_held(&ref->mutex);
|
|
||||||
GEM_BUG_ON(i915_active_is_idle(ref));
|
GEM_BUG_ON(i915_active_is_idle(ref));
|
||||||
|
|
||||||
/* return the unused nodes to our slabcache -- flushing the allocator */
|
/* return the unused nodes to our slabcache -- flushing the allocator */
|
||||||
if (atomic_dec_and_test(&ref->count)) {
|
if (!atomic_dec_and_lock_irqsave(&ref->count, &ref->tree_lock, flags))
|
||||||
debug_active_deactivate(ref);
|
|
||||||
root = ref->tree;
|
|
||||||
ref->tree = RB_ROOT;
|
|
||||||
ref->cache = NULL;
|
|
||||||
retire = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
mutex_unlock(&ref->mutex);
|
|
||||||
if (!retire)
|
|
||||||
return;
|
return;
|
||||||
|
|
||||||
GEM_BUG_ON(rcu_access_pointer(ref->excl.fence));
|
GEM_BUG_ON(rcu_access_pointer(ref->excl.fence));
|
||||||
rbtree_postorder_for_each_entry_safe(it, n, &root, node) {
|
debug_active_deactivate(ref);
|
||||||
GEM_BUG_ON(i915_active_fence_isset(&it->base));
|
|
||||||
kmem_cache_free(global.slab_cache, it);
|
root = ref->tree;
|
||||||
}
|
ref->tree = RB_ROOT;
|
||||||
|
ref->cache = NULL;
|
||||||
|
|
||||||
|
spin_unlock_irqrestore(&ref->tree_lock, flags);
|
||||||
|
|
||||||
/* After the final retire, the entire struct may be freed */
|
/* After the final retire, the entire struct may be freed */
|
||||||
if (ref->retire)
|
if (ref->retire)
|
||||||
|
@ -158,6 +152,11 @@ __active_retire(struct i915_active *ref)
|
||||||
|
|
||||||
/* ... except if you wait on it, you must manage your own references! */
|
/* ... except if you wait on it, you must manage your own references! */
|
||||||
wake_up_var(ref);
|
wake_up_var(ref);
|
||||||
|
|
||||||
|
rbtree_postorder_for_each_entry_safe(it, n, &root, node) {
|
||||||
|
GEM_BUG_ON(i915_active_fence_isset(&it->base));
|
||||||
|
kmem_cache_free(global.slab_cache, it);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
@ -169,7 +168,6 @@ active_work(struct work_struct *wrk)
|
||||||
if (atomic_add_unless(&ref->count, -1, 1))
|
if (atomic_add_unless(&ref->count, -1, 1))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
mutex_lock(&ref->mutex);
|
|
||||||
__active_retire(ref);
|
__active_retire(ref);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -180,9 +178,7 @@ active_retire(struct i915_active *ref)
|
||||||
if (atomic_add_unless(&ref->count, -1, 1))
|
if (atomic_add_unless(&ref->count, -1, 1))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* If we are inside interrupt context (fence signaling), defer */
|
if (ref->flags & I915_ACTIVE_RETIRE_SLEEPS) {
|
||||||
if (ref->flags & I915_ACTIVE_RETIRE_SLEEPS ||
|
|
||||||
!mutex_trylock(&ref->mutex)) {
|
|
||||||
queue_work(system_unbound_wq, &ref->work);
|
queue_work(system_unbound_wq, &ref->work);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -227,7 +223,7 @@ active_instance(struct i915_active *ref, struct intel_timeline *tl)
|
||||||
if (!prealloc)
|
if (!prealloc)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
mutex_lock(&ref->mutex);
|
spin_lock_irq(&ref->tree_lock);
|
||||||
GEM_BUG_ON(i915_active_is_idle(ref));
|
GEM_BUG_ON(i915_active_is_idle(ref));
|
||||||
|
|
||||||
parent = NULL;
|
parent = NULL;
|
||||||
|
@ -257,7 +253,7 @@ active_instance(struct i915_active *ref, struct intel_timeline *tl)
|
||||||
|
|
||||||
out:
|
out:
|
||||||
ref->cache = node;
|
ref->cache = node;
|
||||||
mutex_unlock(&ref->mutex);
|
spin_unlock_irq(&ref->tree_lock);
|
||||||
|
|
||||||
BUILD_BUG_ON(offsetof(typeof(*node), base));
|
BUILD_BUG_ON(offsetof(typeof(*node), base));
|
||||||
return &node->base;
|
return &node->base;
|
||||||
|
@ -278,8 +274,10 @@ void __i915_active_init(struct i915_active *ref,
|
||||||
if (bits & I915_ACTIVE_MAY_SLEEP)
|
if (bits & I915_ACTIVE_MAY_SLEEP)
|
||||||
ref->flags |= I915_ACTIVE_RETIRE_SLEEPS;
|
ref->flags |= I915_ACTIVE_RETIRE_SLEEPS;
|
||||||
|
|
||||||
|
spin_lock_init(&ref->tree_lock);
|
||||||
ref->tree = RB_ROOT;
|
ref->tree = RB_ROOT;
|
||||||
ref->cache = NULL;
|
ref->cache = NULL;
|
||||||
|
|
||||||
init_llist_head(&ref->preallocated_barriers);
|
init_llist_head(&ref->preallocated_barriers);
|
||||||
atomic_set(&ref->count, 0);
|
atomic_set(&ref->count, 0);
|
||||||
__mutex_init(&ref->mutex, "i915_active", key);
|
__mutex_init(&ref->mutex, "i915_active", key);
|
||||||
|
@ -510,7 +508,7 @@ static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
|
||||||
if (RB_EMPTY_ROOT(&ref->tree))
|
if (RB_EMPTY_ROOT(&ref->tree))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
mutex_lock(&ref->mutex);
|
spin_lock_irq(&ref->tree_lock);
|
||||||
GEM_BUG_ON(i915_active_is_idle(ref));
|
GEM_BUG_ON(i915_active_is_idle(ref));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -575,7 +573,7 @@ static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
|
||||||
goto match;
|
goto match;
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_unlock(&ref->mutex);
|
spin_unlock_irq(&ref->tree_lock);
|
||||||
|
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
@ -583,7 +581,7 @@ match:
|
||||||
rb_erase(p, &ref->tree); /* Hide from waits and sibling allocations */
|
rb_erase(p, &ref->tree); /* Hide from waits and sibling allocations */
|
||||||
if (p == &ref->cache->node)
|
if (p == &ref->cache->node)
|
||||||
ref->cache = NULL;
|
ref->cache = NULL;
|
||||||
mutex_unlock(&ref->mutex);
|
spin_unlock_irq(&ref->tree_lock);
|
||||||
|
|
||||||
return rb_entry(p, struct active_node, node);
|
return rb_entry(p, struct active_node, node);
|
||||||
}
|
}
|
||||||
|
@ -664,6 +662,7 @@ unwind:
|
||||||
void i915_active_acquire_barrier(struct i915_active *ref)
|
void i915_active_acquire_barrier(struct i915_active *ref)
|
||||||
{
|
{
|
||||||
struct llist_node *pos, *next;
|
struct llist_node *pos, *next;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
GEM_BUG_ON(i915_active_is_idle(ref));
|
GEM_BUG_ON(i915_active_is_idle(ref));
|
||||||
|
|
||||||
|
@ -673,7 +672,7 @@ void i915_active_acquire_barrier(struct i915_active *ref)
|
||||||
* populated by i915_request_add_active_barriers() to point to the
|
* populated by i915_request_add_active_barriers() to point to the
|
||||||
* request that will eventually release them.
|
* request that will eventually release them.
|
||||||
*/
|
*/
|
||||||
mutex_lock_nested(&ref->mutex, SINGLE_DEPTH_NESTING);
|
spin_lock_irqsave_nested(&ref->tree_lock, flags, SINGLE_DEPTH_NESTING);
|
||||||
llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) {
|
llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) {
|
||||||
struct active_node *node = barrier_from_ll(pos);
|
struct active_node *node = barrier_from_ll(pos);
|
||||||
struct intel_engine_cs *engine = barrier_to_engine(node);
|
struct intel_engine_cs *engine = barrier_to_engine(node);
|
||||||
|
@ -699,7 +698,7 @@ void i915_active_acquire_barrier(struct i915_active *ref)
|
||||||
llist_add(barrier_to_ll(node), &engine->barrier_tasks);
|
llist_add(barrier_to_ll(node), &engine->barrier_tasks);
|
||||||
intel_engine_pm_put(engine);
|
intel_engine_pm_put(engine);
|
||||||
}
|
}
|
||||||
mutex_unlock(&ref->mutex);
|
spin_unlock_irqrestore(&ref->tree_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
void i915_request_add_active_barriers(struct i915_request *rq)
|
void i915_request_add_active_barriers(struct i915_request *rq)
|
||||||
|
|
|
@ -48,6 +48,7 @@ struct i915_active {
|
||||||
atomic_t count;
|
atomic_t count;
|
||||||
struct mutex mutex;
|
struct mutex mutex;
|
||||||
|
|
||||||
|
spinlock_t tree_lock;
|
||||||
struct active_node *cache;
|
struct active_node *cache;
|
||||||
struct rb_root tree;
|
struct rb_root tree;
|
||||||
|
|
||||||
|
|
Загрузка…
Ссылка в новой задаче