drm/i915: drop the __i915_active_call pointer packing
We use some of the lower bits of the retire function pointer for potential flags, which is quite thorny, since the caller needs to remember to give the function the correct alignment with __i915_active_call, otherwise we might incorrectly unpack the pointer and jump to some garbage address later. Instead of all this let's just pass the flags along as a separate parameter. Suggested-by: Ville Syrjälä <ville.syrjala@linux.intel.com> Suggested-by: Daniel Vetter <daniel@ffwll.ch> References:ca419f407b
("drm/i915: Fix crash in auto_retire") References:d8e44e4dd2
("drm/i915/overlay: Fix active retire callback alignment") References:fd5f262db1
("drm/i915/selftests: Fix active retire callback alignment") Signed-off-by: Matthew Auld <matthew.auld@intel.com> Reviewed-by: Matthew Brost <matthew.brost@intel.com> Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch> Link: https://patchwork.freedesktop.org/patch/msgid/20210504164136.96456-1-matthew.auld@intel.com
This commit is contained in:
Родитель
0a46be95c2
Коммит
c3b147604f
|
@ -206,7 +206,6 @@ static int frontbuffer_active(struct i915_active *ref)
|
|||
return 0;
|
||||
}
|
||||
|
||||
__i915_active_call
|
||||
static void frontbuffer_retire(struct i915_active *ref)
|
||||
{
|
||||
struct intel_frontbuffer *front =
|
||||
|
@ -261,7 +260,8 @@ intel_frontbuffer_get(struct drm_i915_gem_object *obj)
|
|||
atomic_set(&front->bits, 0);
|
||||
i915_active_init(&front->write,
|
||||
frontbuffer_active,
|
||||
i915_active_may_sleep(frontbuffer_retire));
|
||||
frontbuffer_retire,
|
||||
I915_ACTIVE_RETIRE_SLEEPS);
|
||||
|
||||
spin_lock(&i915->fb_tracking.lock);
|
||||
if (rcu_access_pointer(obj->frontbuffer)) {
|
||||
|
|
|
@ -383,8 +383,7 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay)
|
|||
i830_overlay_clock_gating(dev_priv, true);
|
||||
}
|
||||
|
||||
__i915_active_call static void
|
||||
intel_overlay_last_flip_retire(struct i915_active *active)
|
||||
static void intel_overlay_last_flip_retire(struct i915_active *active)
|
||||
{
|
||||
struct intel_overlay *overlay =
|
||||
container_of(active, typeof(*overlay), last_flip);
|
||||
|
@ -1399,7 +1398,7 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv)
|
|||
overlay->saturation = 146;
|
||||
|
||||
i915_active_init(&overlay->last_flip,
|
||||
NULL, intel_overlay_last_flip_retire);
|
||||
NULL, intel_overlay_last_flip_retire, 0);
|
||||
|
||||
ret = get_registers(overlay, OVERLAY_NEEDS_PHYSICAL(dev_priv));
|
||||
if (ret)
|
||||
|
|
|
@ -1046,7 +1046,6 @@ struct context_barrier_task {
|
|||
void *data;
|
||||
};
|
||||
|
||||
__i915_active_call
|
||||
static void cb_retire(struct i915_active *base)
|
||||
{
|
||||
struct context_barrier_task *cb = container_of(base, typeof(*cb), base);
|
||||
|
@ -1080,7 +1079,7 @@ static int context_barrier_task(struct i915_gem_context *ctx,
|
|||
if (!cb)
|
||||
return -ENOMEM;
|
||||
|
||||
i915_active_init(&cb->base, NULL, cb_retire);
|
||||
i915_active_init(&cb->base, NULL, cb_retire, 0);
|
||||
err = i915_active_acquire(&cb->base);
|
||||
if (err) {
|
||||
kfree(cb);
|
||||
|
|
|
@ -343,7 +343,7 @@ static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size)
|
|||
if (!vma)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
i915_active_init(&vma->active, NULL, NULL);
|
||||
i915_active_init(&vma->active, NULL, NULL, 0);
|
||||
|
||||
kref_init(&vma->ref);
|
||||
mutex_init(&vma->pages_mutex);
|
||||
|
|
|
@ -326,7 +326,6 @@ void intel_context_unpin(struct intel_context *ce)
|
|||
intel_context_put(ce);
|
||||
}
|
||||
|
||||
__i915_active_call
|
||||
static void __intel_context_retire(struct i915_active *active)
|
||||
{
|
||||
struct intel_context *ce = container_of(active, typeof(*ce), active);
|
||||
|
@ -385,7 +384,7 @@ intel_context_init(struct intel_context *ce, struct intel_engine_cs *engine)
|
|||
mutex_init(&ce->pin_mutex);
|
||||
|
||||
i915_active_init(&ce->active,
|
||||
__intel_context_active, __intel_context_retire);
|
||||
__intel_context_active, __intel_context_retire, 0);
|
||||
}
|
||||
|
||||
void intel_context_fini(struct intel_context *ce)
|
||||
|
|
|
@ -867,7 +867,7 @@ void intel_ggtt_init_fences(struct i915_ggtt *ggtt)
|
|||
for (i = 0; i < num_fences; i++) {
|
||||
struct i915_fence_reg *fence = &ggtt->fence_regs[i];
|
||||
|
||||
i915_active_init(&fence->active, NULL, NULL);
|
||||
i915_active_init(&fence->active, NULL, NULL, 0);
|
||||
fence->ggtt = ggtt;
|
||||
fence->id = i;
|
||||
list_add_tail(&fence->link, &ggtt->fence_list);
|
||||
|
|
|
@ -98,7 +98,6 @@ static void pool_free_work(struct work_struct *wrk)
|
|||
round_jiffies_up_relative(HZ));
|
||||
}
|
||||
|
||||
__i915_active_call
|
||||
static void pool_retire(struct i915_active *ref)
|
||||
{
|
||||
struct intel_gt_buffer_pool_node *node =
|
||||
|
@ -154,7 +153,7 @@ node_create(struct intel_gt_buffer_pool *pool, size_t sz,
|
|||
node->age = 0;
|
||||
node->pool = pool;
|
||||
node->pinned = false;
|
||||
i915_active_init(&node->active, NULL, pool_retire);
|
||||
i915_active_init(&node->active, NULL, pool_retire, 0);
|
||||
|
||||
obj = i915_gem_object_create_internal(gt->i915, sz);
|
||||
if (IS_ERR(obj)) {
|
||||
|
|
|
@ -32,7 +32,6 @@ static struct i915_vma *hwsp_alloc(struct intel_gt *gt)
|
|||
return vma;
|
||||
}
|
||||
|
||||
__i915_active_call
|
||||
static void __timeline_retire(struct i915_active *active)
|
||||
{
|
||||
struct intel_timeline *tl =
|
||||
|
@ -104,7 +103,8 @@ static int intel_timeline_init(struct intel_timeline *timeline,
|
|||
INIT_LIST_HEAD(&timeline->requests);
|
||||
|
||||
i915_syncmap_init(&timeline->sync);
|
||||
i915_active_init(&timeline->active, __timeline_active, __timeline_retire);
|
||||
i915_active_init(&timeline->active, __timeline_active,
|
||||
__timeline_retire, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -55,7 +55,7 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
|
|||
kfree(ring);
|
||||
return NULL;
|
||||
}
|
||||
i915_active_init(&ring->vma->active, NULL, NULL);
|
||||
i915_active_init(&ring->vma->active, NULL, NULL, 0);
|
||||
__set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(ring->vma));
|
||||
__set_bit(DRM_MM_NODE_ALLOCATED_BIT, &ring->vma->node.flags);
|
||||
ring->vma->node.size = sz;
|
||||
|
|
|
@ -63,7 +63,7 @@ static void pulse_put(struct pulse *p)
|
|||
kref_put(&p->kref, pulse_free);
|
||||
}
|
||||
|
||||
__i915_active_call static void pulse_retire(struct i915_active *active)
|
||||
static void pulse_retire(struct i915_active *active)
|
||||
{
|
||||
pulse_put(container_of(active, struct pulse, active));
|
||||
}
|
||||
|
@ -77,7 +77,7 @@ static struct pulse *pulse_create(void)
|
|||
return p;
|
||||
|
||||
kref_init(&p->kref);
|
||||
i915_active_init(&p->active, pulse_active, pulse_retire);
|
||||
i915_active_init(&p->active, pulse_active, pulse_retire, 0);
|
||||
|
||||
return p;
|
||||
}
|
||||
|
|
|
@ -343,18 +343,15 @@ out:
|
|||
void __i915_active_init(struct i915_active *ref,
|
||||
int (*active)(struct i915_active *ref),
|
||||
void (*retire)(struct i915_active *ref),
|
||||
unsigned long flags,
|
||||
struct lock_class_key *mkey,
|
||||
struct lock_class_key *wkey)
|
||||
{
|
||||
unsigned long bits;
|
||||
|
||||
debug_active_init(ref);
|
||||
|
||||
ref->flags = 0;
|
||||
ref->flags = flags;
|
||||
ref->active = active;
|
||||
ref->retire = ptr_unpack_bits(retire, &bits, 2);
|
||||
if (bits & I915_ACTIVE_MAY_SLEEP)
|
||||
ref->flags |= I915_ACTIVE_RETIRE_SLEEPS;
|
||||
ref->retire = retire;
|
||||
|
||||
spin_lock_init(&ref->tree_lock);
|
||||
ref->tree = RB_ROOT;
|
||||
|
@ -1156,8 +1153,7 @@ static int auto_active(struct i915_active *ref)
|
|||
return 0;
|
||||
}
|
||||
|
||||
__i915_active_call static void
|
||||
auto_retire(struct i915_active *ref)
|
||||
static void auto_retire(struct i915_active *ref)
|
||||
{
|
||||
i915_active_put(ref);
|
||||
}
|
||||
|
@ -1171,7 +1167,7 @@ struct i915_active *i915_active_create(void)
|
|||
return NULL;
|
||||
|
||||
kref_init(&aa->ref);
|
||||
i915_active_init(&aa->base, auto_active, auto_retire);
|
||||
i915_active_init(&aa->base, auto_active, auto_retire, 0);
|
||||
|
||||
return &aa->base;
|
||||
}
|
||||
|
|
|
@ -152,15 +152,16 @@ i915_active_fence_isset(const struct i915_active_fence *active)
|
|||
void __i915_active_init(struct i915_active *ref,
|
||||
int (*active)(struct i915_active *ref),
|
||||
void (*retire)(struct i915_active *ref),
|
||||
unsigned long flags,
|
||||
struct lock_class_key *mkey,
|
||||
struct lock_class_key *wkey);
|
||||
|
||||
/* Specialise each class of i915_active to avoid impossible lockdep cycles. */
|
||||
#define i915_active_init(ref, active, retire) do { \
|
||||
static struct lock_class_key __mkey; \
|
||||
static struct lock_class_key __wkey; \
|
||||
\
|
||||
__i915_active_init(ref, active, retire, &__mkey, &__wkey); \
|
||||
#define i915_active_init(ref, active, retire, flags) do { \
|
||||
static struct lock_class_key __mkey; \
|
||||
static struct lock_class_key __wkey; \
|
||||
\
|
||||
__i915_active_init(ref, active, retire, flags, &__mkey, &__wkey); \
|
||||
} while (0)
|
||||
|
||||
struct dma_fence *
|
||||
|
|
|
@ -24,11 +24,6 @@ struct i915_active_fence {
|
|||
|
||||
struct active_node;
|
||||
|
||||
#define I915_ACTIVE_MAY_SLEEP BIT(0)
|
||||
|
||||
#define __i915_active_call __aligned(4)
|
||||
#define i915_active_may_sleep(fn) ptr_pack_bits(&(fn), I915_ACTIVE_MAY_SLEEP, 2)
|
||||
|
||||
struct i915_active {
|
||||
atomic_t count;
|
||||
struct mutex mutex;
|
||||
|
|
|
@ -94,7 +94,6 @@ static int __i915_vma_active(struct i915_active *ref)
|
|||
return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT;
|
||||
}
|
||||
|
||||
__i915_active_call
|
||||
static void __i915_vma_retire(struct i915_active *ref)
|
||||
{
|
||||
i915_vma_put(active_to_vma(ref));
|
||||
|
@ -125,7 +124,7 @@ vma_create(struct drm_i915_gem_object *obj,
|
|||
vma->size = obj->base.size;
|
||||
vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
|
||||
|
||||
i915_active_init(&vma->active, __i915_vma_active, __i915_vma_retire);
|
||||
i915_active_init(&vma->active, __i915_vma_active, __i915_vma_retire, 0);
|
||||
|
||||
/* Declare ourselves safe for use inside shrinkers */
|
||||
if (IS_ENABLED(CONFIG_LOCKDEP)) {
|
||||
|
|
|
@ -51,7 +51,7 @@ static int __live_active(struct i915_active *base)
|
|||
return 0;
|
||||
}
|
||||
|
||||
__i915_active_call static void __live_retire(struct i915_active *base)
|
||||
static void __live_retire(struct i915_active *base)
|
||||
{
|
||||
struct live_active *active = container_of(base, typeof(*active), base);
|
||||
|
||||
|
@ -68,7 +68,7 @@ static struct live_active *__live_alloc(struct drm_i915_private *i915)
|
|||
return NULL;
|
||||
|
||||
kref_init(&active->ref);
|
||||
i915_active_init(&active->base, __live_active, __live_retire);
|
||||
i915_active_init(&active->base, __live_active, __live_retire, 0);
|
||||
|
||||
return active;
|
||||
}
|
||||
|
|
Загрузка…
Ссылка в новой задаче