drm/i915/selftests: Pass intel_context to igt_spinner
Teach igt_spinner to only use our internal structs, decoupling the interface from the GEM contexts. This makes it easier to avoid requiring ce->gem_context back references for kernel_context that may have them in future. v2: Lift engine lock to verify_wa() caller. v3: Less than v2, but more so Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190731081126.9139-1-chris@chris-wilson.co.uk
This commit is contained in:
Родитель
cb0c43f30c
Коммит
f277bc0c98
|
@ -821,8 +821,7 @@ err_vma:
|
|||
#define TEST_RESET BIT(2)
|
||||
|
||||
static int
|
||||
__sseu_prepare(struct drm_i915_private *i915,
|
||||
const char *name,
|
||||
__sseu_prepare(const char *name,
|
||||
unsigned int flags,
|
||||
struct intel_context *ce,
|
||||
struct igt_spinner **spin)
|
||||
|
@ -838,14 +837,11 @@ __sseu_prepare(struct drm_i915_private *i915,
|
|||
if (!*spin)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = igt_spinner_init(*spin, i915);
|
||||
ret = igt_spinner_init(*spin, ce->engine->gt);
|
||||
if (ret)
|
||||
goto err_free;
|
||||
|
||||
rq = igt_spinner_create_request(*spin,
|
||||
ce->gem_context,
|
||||
ce->engine,
|
||||
MI_NOOP);
|
||||
rq = igt_spinner_create_request(*spin, ce, MI_NOOP);
|
||||
if (IS_ERR(rq)) {
|
||||
ret = PTR_ERR(rq);
|
||||
goto err_fini;
|
||||
|
@ -871,8 +867,7 @@ err_free:
|
|||
}
|
||||
|
||||
static int
|
||||
__read_slice_count(struct drm_i915_private *i915,
|
||||
struct intel_context *ce,
|
||||
__read_slice_count(struct intel_context *ce,
|
||||
struct drm_i915_gem_object *obj,
|
||||
struct igt_spinner *spin,
|
||||
u32 *rpcs)
|
||||
|
@ -901,7 +896,7 @@ __read_slice_count(struct drm_i915_private *i915,
|
|||
return ret;
|
||||
}
|
||||
|
||||
if (INTEL_GEN(i915) >= 11) {
|
||||
if (INTEL_GEN(ce->engine->i915) >= 11) {
|
||||
s_mask = GEN11_RPCS_S_CNT_MASK;
|
||||
s_shift = GEN11_RPCS_S_CNT_SHIFT;
|
||||
} else {
|
||||
|
@ -944,8 +939,7 @@ __check_rpcs(const char *name, u32 rpcs, int slices, unsigned int expected,
|
|||
}
|
||||
|
||||
static int
|
||||
__sseu_finish(struct drm_i915_private *i915,
|
||||
const char *name,
|
||||
__sseu_finish(const char *name,
|
||||
unsigned int flags,
|
||||
struct intel_context *ce,
|
||||
struct drm_i915_gem_object *obj,
|
||||
|
@ -962,14 +956,13 @@ __sseu_finish(struct drm_i915_private *i915,
|
|||
goto out;
|
||||
}
|
||||
|
||||
ret = __read_slice_count(i915, ce, obj,
|
||||
ret = __read_slice_count(ce, obj,
|
||||
flags & TEST_RESET ? NULL : spin, &rpcs);
|
||||
ret = __check_rpcs(name, rpcs, ret, expected, "Context", "!");
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = __read_slice_count(i915, ce->engine->kernel_context, obj,
|
||||
NULL, &rpcs);
|
||||
ret = __read_slice_count(ce->engine->kernel_context, obj, NULL, &rpcs);
|
||||
ret = __check_rpcs(name, rpcs, ret, slices, "Kernel context", "!");
|
||||
|
||||
out:
|
||||
|
@ -977,11 +970,12 @@ out:
|
|||
igt_spinner_end(spin);
|
||||
|
||||
if ((flags & TEST_IDLE) && ret == 0) {
|
||||
ret = i915_gem_wait_for_idle(i915, 0, MAX_SCHEDULE_TIMEOUT);
|
||||
ret = i915_gem_wait_for_idle(ce->engine->i915,
|
||||
0, MAX_SCHEDULE_TIMEOUT);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = __read_slice_count(i915, ce, obj, NULL, &rpcs);
|
||||
ret = __read_slice_count(ce, obj, NULL, &rpcs);
|
||||
ret = __check_rpcs(name, rpcs, ret, expected,
|
||||
"Context", " after idle!");
|
||||
}
|
||||
|
@ -990,8 +984,7 @@ out:
|
|||
}
|
||||
|
||||
static int
|
||||
__sseu_test(struct drm_i915_private *i915,
|
||||
const char *name,
|
||||
__sseu_test(const char *name,
|
||||
unsigned int flags,
|
||||
struct intel_context *ce,
|
||||
struct drm_i915_gem_object *obj,
|
||||
|
@ -1000,7 +993,7 @@ __sseu_test(struct drm_i915_private *i915,
|
|||
struct igt_spinner *spin = NULL;
|
||||
int ret;
|
||||
|
||||
ret = __sseu_prepare(i915, name, flags, ce, &spin);
|
||||
ret = __sseu_prepare(name, flags, ce, &spin);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -1008,7 +1001,7 @@ __sseu_test(struct drm_i915_private *i915,
|
|||
if (ret)
|
||||
goto out_spin;
|
||||
|
||||
ret = __sseu_finish(i915, name, flags, ce, obj,
|
||||
ret = __sseu_finish(name, flags, ce, obj,
|
||||
hweight32(sseu.slice_mask), spin);
|
||||
|
||||
out_spin:
|
||||
|
@ -1088,22 +1081,22 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
|
|||
goto out_context;
|
||||
|
||||
/* First set the default mask. */
|
||||
ret = __sseu_test(i915, name, flags, ce, obj, engine->sseu);
|
||||
ret = __sseu_test(name, flags, ce, obj, engine->sseu);
|
||||
if (ret)
|
||||
goto out_fail;
|
||||
|
||||
/* Then set a power-gated configuration. */
|
||||
ret = __sseu_test(i915, name, flags, ce, obj, pg_sseu);
|
||||
ret = __sseu_test(name, flags, ce, obj, pg_sseu);
|
||||
if (ret)
|
||||
goto out_fail;
|
||||
|
||||
/* Back to defaults. */
|
||||
ret = __sseu_test(i915, name, flags, ce, obj, engine->sseu);
|
||||
ret = __sseu_test(name, flags, ce, obj, engine->sseu);
|
||||
if (ret)
|
||||
goto out_fail;
|
||||
|
||||
/* One last power-gated configuration for the road. */
|
||||
ret = __sseu_test(i915, name, flags, ce, obj, pg_sseu);
|
||||
ret = __sseu_test(name, flags, ce, obj, pg_sseu);
|
||||
if (ret)
|
||||
goto out_fail;
|
||||
|
||||
|
|
|
@ -22,9 +22,9 @@
|
|||
static int live_sanitycheck(void *arg)
|
||||
{
|
||||
struct drm_i915_private *i915 = arg;
|
||||
struct intel_engine_cs *engine;
|
||||
struct i915_gem_engines_iter it;
|
||||
struct i915_gem_context *ctx;
|
||||
enum intel_engine_id id;
|
||||
struct intel_context *ce;
|
||||
struct igt_spinner spin;
|
||||
intel_wakeref_t wakeref;
|
||||
int err = -ENOMEM;
|
||||
|
@ -35,17 +35,17 @@ static int live_sanitycheck(void *arg)
|
|||
mutex_lock(&i915->drm.struct_mutex);
|
||||
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
||||
|
||||
if (igt_spinner_init(&spin, i915))
|
||||
if (igt_spinner_init(&spin, &i915->gt))
|
||||
goto err_unlock;
|
||||
|
||||
ctx = kernel_context(i915);
|
||||
if (!ctx)
|
||||
goto err_spin;
|
||||
|
||||
for_each_engine(engine, i915, id) {
|
||||
for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
|
||||
struct i915_request *rq;
|
||||
|
||||
rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP);
|
||||
rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
|
||||
if (IS_ERR(rq)) {
|
||||
err = PTR_ERR(rq);
|
||||
goto err_ctx;
|
||||
|
@ -69,6 +69,7 @@ static int live_sanitycheck(void *arg)
|
|||
|
||||
err = 0;
|
||||
err_ctx:
|
||||
i915_gem_context_unlock_engines(ctx);
|
||||
kernel_context_close(ctx);
|
||||
err_spin:
|
||||
igt_spinner_fini(&spin);
|
||||
|
@ -480,6 +481,24 @@ err_unlock:
|
|||
return err;
|
||||
}
|
||||
|
||||
static struct i915_request *
|
||||
spinner_create_request(struct igt_spinner *spin,
|
||||
struct i915_gem_context *ctx,
|
||||
struct intel_engine_cs *engine,
|
||||
u32 arb)
|
||||
{
|
||||
struct intel_context *ce;
|
||||
struct i915_request *rq;
|
||||
|
||||
ce = i915_gem_context_get_engine(ctx, engine->id);
|
||||
if (IS_ERR(ce))
|
||||
return ERR_CAST(ce);
|
||||
|
||||
rq = igt_spinner_create_request(spin, ce, arb);
|
||||
intel_context_put(ce);
|
||||
return rq;
|
||||
}
|
||||
|
||||
static int live_preempt(void *arg)
|
||||
{
|
||||
struct drm_i915_private *i915 = arg;
|
||||
|
@ -499,10 +518,10 @@ static int live_preempt(void *arg)
|
|||
mutex_lock(&i915->drm.struct_mutex);
|
||||
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
||||
|
||||
if (igt_spinner_init(&spin_hi, i915))
|
||||
if (igt_spinner_init(&spin_hi, &i915->gt))
|
||||
goto err_unlock;
|
||||
|
||||
if (igt_spinner_init(&spin_lo, i915))
|
||||
if (igt_spinner_init(&spin_lo, &i915->gt))
|
||||
goto err_spin_hi;
|
||||
|
||||
ctx_hi = kernel_context(i915);
|
||||
|
@ -529,8 +548,8 @@ static int live_preempt(void *arg)
|
|||
goto err_ctx_lo;
|
||||
}
|
||||
|
||||
rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
|
||||
MI_ARB_CHECK);
|
||||
rq = spinner_create_request(&spin_lo, ctx_lo, engine,
|
||||
MI_ARB_CHECK);
|
||||
if (IS_ERR(rq)) {
|
||||
err = PTR_ERR(rq);
|
||||
goto err_ctx_lo;
|
||||
|
@ -545,8 +564,8 @@ static int live_preempt(void *arg)
|
|||
goto err_ctx_lo;
|
||||
}
|
||||
|
||||
rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
|
||||
MI_ARB_CHECK);
|
||||
rq = spinner_create_request(&spin_hi, ctx_hi, engine,
|
||||
MI_ARB_CHECK);
|
||||
if (IS_ERR(rq)) {
|
||||
igt_spinner_end(&spin_lo);
|
||||
err = PTR_ERR(rq);
|
||||
|
@ -603,10 +622,10 @@ static int live_late_preempt(void *arg)
|
|||
mutex_lock(&i915->drm.struct_mutex);
|
||||
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
||||
|
||||
if (igt_spinner_init(&spin_hi, i915))
|
||||
if (igt_spinner_init(&spin_hi, &i915->gt))
|
||||
goto err_unlock;
|
||||
|
||||
if (igt_spinner_init(&spin_lo, i915))
|
||||
if (igt_spinner_init(&spin_lo, &i915->gt))
|
||||
goto err_spin_hi;
|
||||
|
||||
ctx_hi = kernel_context(i915);
|
||||
|
@ -632,8 +651,8 @@ static int live_late_preempt(void *arg)
|
|||
goto err_ctx_lo;
|
||||
}
|
||||
|
||||
rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
|
||||
MI_ARB_CHECK);
|
||||
rq = spinner_create_request(&spin_lo, ctx_lo, engine,
|
||||
MI_ARB_CHECK);
|
||||
if (IS_ERR(rq)) {
|
||||
err = PTR_ERR(rq);
|
||||
goto err_ctx_lo;
|
||||
|
@ -645,8 +664,8 @@ static int live_late_preempt(void *arg)
|
|||
goto err_wedged;
|
||||
}
|
||||
|
||||
rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
|
||||
MI_NOOP);
|
||||
rq = spinner_create_request(&spin_hi, ctx_hi, engine,
|
||||
MI_NOOP);
|
||||
if (IS_ERR(rq)) {
|
||||
igt_spinner_end(&spin_lo);
|
||||
err = PTR_ERR(rq);
|
||||
|
@ -711,7 +730,7 @@ static int preempt_client_init(struct drm_i915_private *i915,
|
|||
if (!c->ctx)
|
||||
return -ENOMEM;
|
||||
|
||||
if (igt_spinner_init(&c->spin, i915))
|
||||
if (igt_spinner_init(&c->spin, &i915->gt))
|
||||
goto err_ctx;
|
||||
|
||||
return 0;
|
||||
|
@ -761,9 +780,9 @@ static int live_nopreempt(void *arg)
|
|||
|
||||
engine->execlists.preempt_hang.count = 0;
|
||||
|
||||
rq_a = igt_spinner_create_request(&a.spin,
|
||||
a.ctx, engine,
|
||||
MI_ARB_CHECK);
|
||||
rq_a = spinner_create_request(&a.spin,
|
||||
a.ctx, engine,
|
||||
MI_ARB_CHECK);
|
||||
if (IS_ERR(rq_a)) {
|
||||
err = PTR_ERR(rq_a);
|
||||
goto err_client_b;
|
||||
|
@ -778,9 +797,9 @@ static int live_nopreempt(void *arg)
|
|||
goto err_wedged;
|
||||
}
|
||||
|
||||
rq_b = igt_spinner_create_request(&b.spin,
|
||||
b.ctx, engine,
|
||||
MI_ARB_CHECK);
|
||||
rq_b = spinner_create_request(&b.spin,
|
||||
b.ctx, engine,
|
||||
MI_ARB_CHECK);
|
||||
if (IS_ERR(rq_b)) {
|
||||
err = PTR_ERR(rq_b);
|
||||
goto err_client_b;
|
||||
|
@ -880,9 +899,9 @@ static int live_suppress_self_preempt(void *arg)
|
|||
|
||||
engine->execlists.preempt_hang.count = 0;
|
||||
|
||||
rq_a = igt_spinner_create_request(&a.spin,
|
||||
a.ctx, engine,
|
||||
MI_NOOP);
|
||||
rq_a = spinner_create_request(&a.spin,
|
||||
a.ctx, engine,
|
||||
MI_NOOP);
|
||||
if (IS_ERR(rq_a)) {
|
||||
err = PTR_ERR(rq_a);
|
||||
goto err_client_b;
|
||||
|
@ -895,9 +914,9 @@ static int live_suppress_self_preempt(void *arg)
|
|||
}
|
||||
|
||||
for (depth = 0; depth < 8; depth++) {
|
||||
rq_b = igt_spinner_create_request(&b.spin,
|
||||
b.ctx, engine,
|
||||
MI_NOOP);
|
||||
rq_b = spinner_create_request(&b.spin,
|
||||
b.ctx, engine,
|
||||
MI_NOOP);
|
||||
if (IS_ERR(rq_b)) {
|
||||
err = PTR_ERR(rq_b);
|
||||
goto err_client_b;
|
||||
|
@ -1048,9 +1067,9 @@ static int live_suppress_wait_preempt(void *arg)
|
|||
goto err_client_3;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(client); i++) {
|
||||
rq[i] = igt_spinner_create_request(&client[i].spin,
|
||||
client[i].ctx, engine,
|
||||
MI_NOOP);
|
||||
rq[i] = spinner_create_request(&client[i].spin,
|
||||
client[i].ctx, engine,
|
||||
MI_NOOP);
|
||||
if (IS_ERR(rq[i])) {
|
||||
err = PTR_ERR(rq[i]);
|
||||
goto err_wedged;
|
||||
|
@ -1157,9 +1176,9 @@ static int live_chain_preempt(void *arg)
|
|||
if (!intel_engine_has_preemption(engine))
|
||||
continue;
|
||||
|
||||
rq = igt_spinner_create_request(&lo.spin,
|
||||
lo.ctx, engine,
|
||||
MI_ARB_CHECK);
|
||||
rq = spinner_create_request(&lo.spin,
|
||||
lo.ctx, engine,
|
||||
MI_ARB_CHECK);
|
||||
if (IS_ERR(rq))
|
||||
goto err_wedged;
|
||||
i915_request_add(rq);
|
||||
|
@ -1183,18 +1202,18 @@ static int live_chain_preempt(void *arg)
|
|||
}
|
||||
|
||||
for_each_prime_number_from(count, 1, ring_size) {
|
||||
rq = igt_spinner_create_request(&hi.spin,
|
||||
hi.ctx, engine,
|
||||
MI_ARB_CHECK);
|
||||
rq = spinner_create_request(&hi.spin,
|
||||
hi.ctx, engine,
|
||||
MI_ARB_CHECK);
|
||||
if (IS_ERR(rq))
|
||||
goto err_wedged;
|
||||
i915_request_add(rq);
|
||||
if (!igt_wait_for_spinner(&hi.spin, rq))
|
||||
goto err_wedged;
|
||||
|
||||
rq = igt_spinner_create_request(&lo.spin,
|
||||
lo.ctx, engine,
|
||||
MI_ARB_CHECK);
|
||||
rq = spinner_create_request(&lo.spin,
|
||||
lo.ctx, engine,
|
||||
MI_ARB_CHECK);
|
||||
if (IS_ERR(rq))
|
||||
goto err_wedged;
|
||||
i915_request_add(rq);
|
||||
|
@ -1284,10 +1303,10 @@ static int live_preempt_hang(void *arg)
|
|||
mutex_lock(&i915->drm.struct_mutex);
|
||||
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
||||
|
||||
if (igt_spinner_init(&spin_hi, i915))
|
||||
if (igt_spinner_init(&spin_hi, &i915->gt))
|
||||
goto err_unlock;
|
||||
|
||||
if (igt_spinner_init(&spin_lo, i915))
|
||||
if (igt_spinner_init(&spin_lo, &i915->gt))
|
||||
goto err_spin_hi;
|
||||
|
||||
ctx_hi = kernel_context(i915);
|
||||
|
@ -1308,8 +1327,8 @@ static int live_preempt_hang(void *arg)
|
|||
if (!intel_engine_has_preemption(engine))
|
||||
continue;
|
||||
|
||||
rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
|
||||
MI_ARB_CHECK);
|
||||
rq = spinner_create_request(&spin_lo, ctx_lo, engine,
|
||||
MI_ARB_CHECK);
|
||||
if (IS_ERR(rq)) {
|
||||
err = PTR_ERR(rq);
|
||||
goto err_ctx_lo;
|
||||
|
@ -1324,8 +1343,8 @@ static int live_preempt_hang(void *arg)
|
|||
goto err_ctx_lo;
|
||||
}
|
||||
|
||||
rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
|
||||
MI_ARB_CHECK);
|
||||
rq = spinner_create_request(&spin_hi, ctx_hi, engine,
|
||||
MI_ARB_CHECK);
|
||||
if (IS_ERR(rq)) {
|
||||
igt_spinner_end(&spin_lo);
|
||||
err = PTR_ERR(rq);
|
||||
|
|
|
@ -238,6 +238,7 @@ switch_to_scratch_context(struct intel_engine_cs *engine,
|
|||
struct igt_spinner *spin)
|
||||
{
|
||||
struct i915_gem_context *ctx;
|
||||
struct intel_context *ce;
|
||||
struct i915_request *rq;
|
||||
intel_wakeref_t wakeref;
|
||||
int err = 0;
|
||||
|
@ -248,10 +249,14 @@ switch_to_scratch_context(struct intel_engine_cs *engine,
|
|||
|
||||
GEM_BUG_ON(i915_gem_context_is_bannable(ctx));
|
||||
|
||||
ce = i915_gem_context_get_engine(ctx, engine->id);
|
||||
GEM_BUG_ON(IS_ERR(ce));
|
||||
|
||||
rq = ERR_PTR(-ENODEV);
|
||||
with_intel_runtime_pm(&engine->i915->runtime_pm, wakeref)
|
||||
rq = igt_spinner_create_request(spin, ctx, engine, MI_NOOP);
|
||||
rq = igt_spinner_create_request(spin, ce, MI_NOOP);
|
||||
|
||||
intel_context_put(ce);
|
||||
kernel_context_close(ctx);
|
||||
|
||||
if (IS_ERR(rq)) {
|
||||
|
@ -291,7 +296,7 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
|
|||
if (IS_ERR(ctx))
|
||||
return PTR_ERR(ctx);
|
||||
|
||||
err = igt_spinner_init(&spin, i915);
|
||||
err = igt_spinner_init(&spin, engine->gt);
|
||||
if (err)
|
||||
goto out_ctx;
|
||||
|
||||
|
@ -1083,7 +1088,7 @@ verify_wa_lists(struct i915_gem_context *ctx, struct wa_lists *lists,
|
|||
|
||||
ok &= wa_list_verify(&i915->uncore, &lists->gt_wa_list, str);
|
||||
|
||||
for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
|
||||
for_each_gem_engine(ce, i915_gem_context_engines(ctx), it) {
|
||||
enum intel_engine_id id = ce->engine->id;
|
||||
|
||||
ok &= engine_wa_list_verify(ce,
|
||||
|
@ -1094,7 +1099,6 @@ verify_wa_lists(struct i915_gem_context *ctx, struct wa_lists *lists,
|
|||
&lists->engine[id].ctx_wa_list,
|
||||
str) == 0;
|
||||
}
|
||||
i915_gem_context_unlock_engines(ctx);
|
||||
|
||||
return ok;
|
||||
}
|
||||
|
@ -1115,6 +1119,8 @@ live_gpu_reset_workarounds(void *arg)
|
|||
if (IS_ERR(ctx))
|
||||
return PTR_ERR(ctx);
|
||||
|
||||
i915_gem_context_lock_engines(ctx);
|
||||
|
||||
pr_info("Verifying after GPU reset...\n");
|
||||
|
||||
igt_global_reset_lock(&i915->gt);
|
||||
|
@ -1131,6 +1137,7 @@ live_gpu_reset_workarounds(void *arg)
|
|||
ok = verify_wa_lists(ctx, &lists, "after reset");
|
||||
|
||||
out:
|
||||
i915_gem_context_unlock_engines(ctx);
|
||||
kernel_context_close(ctx);
|
||||
reference_lists_fini(i915, &lists);
|
||||
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
||||
|
@ -1143,10 +1150,10 @@ static int
|
|||
live_engine_reset_workarounds(void *arg)
|
||||
{
|
||||
struct drm_i915_private *i915 = arg;
|
||||
struct intel_engine_cs *engine;
|
||||
struct i915_gem_engines_iter it;
|
||||
struct i915_gem_context *ctx;
|
||||
struct intel_context *ce;
|
||||
struct igt_spinner spin;
|
||||
enum intel_engine_id id;
|
||||
struct i915_request *rq;
|
||||
intel_wakeref_t wakeref;
|
||||
struct wa_lists lists;
|
||||
|
@ -1164,7 +1171,8 @@ live_engine_reset_workarounds(void *arg)
|
|||
|
||||
reference_lists_init(i915, &lists);
|
||||
|
||||
for_each_engine(engine, i915, id) {
|
||||
for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
|
||||
struct intel_engine_cs *engine = ce->engine;
|
||||
bool ok;
|
||||
|
||||
pr_info("Verifying after %s reset...\n", engine->name);
|
||||
|
@ -1183,11 +1191,11 @@ live_engine_reset_workarounds(void *arg)
|
|||
goto err;
|
||||
}
|
||||
|
||||
ret = igt_spinner_init(&spin, i915);
|
||||
ret = igt_spinner_init(&spin, engine->gt);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP);
|
||||
rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
|
||||
if (IS_ERR(rq)) {
|
||||
ret = PTR_ERR(rq);
|
||||
igt_spinner_fini(&spin);
|
||||
|
@ -1214,8 +1222,8 @@ live_engine_reset_workarounds(void *arg)
|
|||
goto err;
|
||||
}
|
||||
}
|
||||
|
||||
err:
|
||||
i915_gem_context_unlock_engines(ctx);
|
||||
reference_lists_fini(i915, &lists);
|
||||
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
||||
igt_global_reset_unlock(&i915->gt);
|
||||
|
|
|
@ -9,25 +9,24 @@
|
|||
|
||||
#include "igt_spinner.h"
|
||||
|
||||
int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915)
|
||||
int igt_spinner_init(struct igt_spinner *spin, struct intel_gt *gt)
|
||||
{
|
||||
unsigned int mode;
|
||||
void *vaddr;
|
||||
int err;
|
||||
|
||||
GEM_BUG_ON(INTEL_GEN(i915) < 8);
|
||||
GEM_BUG_ON(INTEL_GEN(gt->i915) < 8);
|
||||
|
||||
memset(spin, 0, sizeof(*spin));
|
||||
spin->i915 = i915;
|
||||
spin->gt = &i915->gt;
|
||||
spin->gt = gt;
|
||||
|
||||
spin->hws = i915_gem_object_create_internal(i915, PAGE_SIZE);
|
||||
spin->hws = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
|
||||
if (IS_ERR(spin->hws)) {
|
||||
err = PTR_ERR(spin->hws);
|
||||
goto err;
|
||||
}
|
||||
|
||||
spin->obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
|
||||
spin->obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
|
||||
if (IS_ERR(spin->obj)) {
|
||||
err = PTR_ERR(spin->obj);
|
||||
goto err_hws;
|
||||
|
@ -41,7 +40,7 @@ int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915)
|
|||
}
|
||||
spin->seqno = memset(vaddr, 0xff, PAGE_SIZE);
|
||||
|
||||
mode = i915_coherent_map_type(i915);
|
||||
mode = i915_coherent_map_type(gt->i915);
|
||||
vaddr = i915_gem_object_pin_map(spin->obj, mode);
|
||||
if (IS_ERR(vaddr)) {
|
||||
err = PTR_ERR(vaddr);
|
||||
|
@ -87,22 +86,22 @@ static int move_to_active(struct i915_vma *vma,
|
|||
|
||||
struct i915_request *
|
||||
igt_spinner_create_request(struct igt_spinner *spin,
|
||||
struct i915_gem_context *ctx,
|
||||
struct intel_engine_cs *engine,
|
||||
struct intel_context *ce,
|
||||
u32 arbitration_command)
|
||||
{
|
||||
struct intel_engine_cs *engine = ce->engine;
|
||||
struct i915_request *rq = NULL;
|
||||
struct i915_vma *hws, *vma;
|
||||
u32 *batch;
|
||||
int err;
|
||||
|
||||
spin->gt = engine->gt;
|
||||
GEM_BUG_ON(spin->gt != ce->vm->gt);
|
||||
|
||||
vma = i915_vma_instance(spin->obj, ctx->vm, NULL);
|
||||
vma = i915_vma_instance(spin->obj, ce->vm, NULL);
|
||||
if (IS_ERR(vma))
|
||||
return ERR_CAST(vma);
|
||||
|
||||
hws = i915_vma_instance(spin->hws, ctx->vm, NULL);
|
||||
hws = i915_vma_instance(spin->hws, ce->vm, NULL);
|
||||
if (IS_ERR(hws))
|
||||
return ERR_CAST(hws);
|
||||
|
||||
|
@ -114,7 +113,7 @@ igt_spinner_create_request(struct igt_spinner *spin,
|
|||
if (err)
|
||||
goto unpin_vma;
|
||||
|
||||
rq = igt_request_alloc(ctx, engine);
|
||||
rq = intel_context_create_request(ce);
|
||||
if (IS_ERR(rq)) {
|
||||
err = PTR_ERR(rq);
|
||||
goto unpin_hws;
|
||||
|
|
|
@ -17,7 +17,6 @@
|
|||
struct intel_gt;
|
||||
|
||||
struct igt_spinner {
|
||||
struct drm_i915_private *i915;
|
||||
struct intel_gt *gt;
|
||||
struct drm_i915_gem_object *hws;
|
||||
struct drm_i915_gem_object *obj;
|
||||
|
@ -25,13 +24,12 @@ struct igt_spinner {
|
|||
void *seqno;
|
||||
};
|
||||
|
||||
int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915);
|
||||
int igt_spinner_init(struct igt_spinner *spin, struct intel_gt *gt);
|
||||
void igt_spinner_fini(struct igt_spinner *spin);
|
||||
|
||||
struct i915_request *
|
||||
igt_spinner_create_request(struct igt_spinner *spin,
|
||||
struct i915_gem_context *ctx,
|
||||
struct intel_engine_cs *engine,
|
||||
struct intel_context *ce,
|
||||
u32 arbitration_command);
|
||||
void igt_spinner_end(struct igt_spinner *spin);
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче