drm/i915/guc: Implement banned contexts for GuC submission

When using GuC submission, if a context gets banned disable scheduling
and mark all inflight requests as complete.

Cc: John Harrison <John.C.Harrison@Intel.com>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: John Harrison <John.C.Harrison@Intel.com>
Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210727002348.97202-25-matthew.brost@intel.com
This commit is contained in:
Matthew Brost 2021-07-26 17:23:39 -07:00 коммит произвёл John Harrison
Родитель 481d458cae
Коммит ae8ac10dfd
8 изменённых файлов: 195 добавлений и 37 удалений

Просмотреть файл

@ -1084,7 +1084,7 @@ static void kill_engines(struct i915_gem_engines *engines, bool ban)
for_each_gem_engine(ce, engines, it) { for_each_gem_engine(ce, engines, it) {
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
if (ban && intel_context_set_banned(ce)) if (ban && intel_context_ban(ce, NULL))
continue; continue;
/* /*

Просмотреть файл

@ -16,6 +16,7 @@
#include "intel_engine_types.h" #include "intel_engine_types.h"
#include "intel_ring_types.h" #include "intel_ring_types.h"
#include "intel_timeline_types.h" #include "intel_timeline_types.h"
#include "i915_trace.h"
#define CE_TRACE(ce, fmt, ...) do { \ #define CE_TRACE(ce, fmt, ...) do { \
const struct intel_context *ce__ = (ce); \ const struct intel_context *ce__ = (ce); \
@ -243,6 +244,18 @@ static inline bool intel_context_set_banned(struct intel_context *ce)
return test_and_set_bit(CONTEXT_BANNED, &ce->flags); return test_and_set_bit(CONTEXT_BANNED, &ce->flags);
} }
static inline bool intel_context_ban(struct intel_context *ce,
struct i915_request *rq)
{
bool ret = intel_context_set_banned(ce);
trace_intel_context_ban(ce);
if (ce->ops->ban)
ce->ops->ban(ce, rq);
return ret;
}
static inline bool static inline bool
intel_context_force_single_submission(const struct intel_context *ce) intel_context_force_single_submission(const struct intel_context *ce)
{ {

Просмотреть файл

@ -35,6 +35,8 @@ struct intel_context_ops {
int (*alloc)(struct intel_context *ce); int (*alloc)(struct intel_context *ce);
void (*ban)(struct intel_context *ce, struct i915_request *rq);
int (*pre_pin)(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void **vaddr); int (*pre_pin)(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void **vaddr);
int (*pin)(struct intel_context *ce, void *vaddr); int (*pin)(struct intel_context *ce, void *vaddr);
void (*unpin)(struct intel_context *ce); void (*unpin)(struct intel_context *ce);

Просмотреть файл

@ -22,7 +22,6 @@
#include "intel_reset.h" #include "intel_reset.h"
#include "uc/intel_guc.h" #include "uc/intel_guc.h"
#include "uc/intel_guc_submission.h"
#define RESET_MAX_RETRIES 3 #define RESET_MAX_RETRIES 3
@ -39,21 +38,6 @@ static void rmw_clear_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
intel_uncore_rmw_fw(uncore, reg, clr, 0); intel_uncore_rmw_fw(uncore, reg, clr, 0);
} }
static void skip_context(struct i915_request *rq)
{
struct intel_context *hung_ctx = rq->context;
list_for_each_entry_from_rcu(rq, &hung_ctx->timeline->requests, link) {
if (!i915_request_is_active(rq))
return;
if (rq->context == hung_ctx) {
i915_request_set_error_once(rq, -EIO);
__i915_request_skip(rq);
}
}
}
static void client_mark_guilty(struct i915_gem_context *ctx, bool banned) static void client_mark_guilty(struct i915_gem_context *ctx, bool banned)
{ {
struct drm_i915_file_private *file_priv = ctx->file_priv; struct drm_i915_file_private *file_priv = ctx->file_priv;
@ -88,10 +72,8 @@ static bool mark_guilty(struct i915_request *rq)
bool banned; bool banned;
int i; int i;
if (intel_context_is_closed(rq->context)) { if (intel_context_is_closed(rq->context))
intel_context_set_banned(rq->context);
return true; return true;
}
rcu_read_lock(); rcu_read_lock();
ctx = rcu_dereference(rq->context->gem_context); ctx = rcu_dereference(rq->context->gem_context);
@ -123,11 +105,9 @@ static bool mark_guilty(struct i915_request *rq)
banned = !i915_gem_context_is_recoverable(ctx); banned = !i915_gem_context_is_recoverable(ctx);
if (time_before(jiffies, prev_hang + CONTEXT_FAST_HANG_JIFFIES)) if (time_before(jiffies, prev_hang + CONTEXT_FAST_HANG_JIFFIES))
banned = true; banned = true;
if (banned) { if (banned)
drm_dbg(&ctx->i915->drm, "context %s: guilty %d, banned\n", drm_dbg(&ctx->i915->drm, "context %s: guilty %d, banned\n",
ctx->name, atomic_read(&ctx->guilty_count)); ctx->name, atomic_read(&ctx->guilty_count));
intel_context_set_banned(rq->context);
}
client_mark_guilty(ctx, banned); client_mark_guilty(ctx, banned);
@ -149,6 +129,8 @@ static void mark_innocent(struct i915_request *rq)
void __i915_request_reset(struct i915_request *rq, bool guilty) void __i915_request_reset(struct i915_request *rq, bool guilty)
{ {
bool banned = false;
RQ_TRACE(rq, "guilty? %s\n", yesno(guilty)); RQ_TRACE(rq, "guilty? %s\n", yesno(guilty));
GEM_BUG_ON(__i915_request_is_complete(rq)); GEM_BUG_ON(__i915_request_is_complete(rq));
@ -156,13 +138,15 @@ void __i915_request_reset(struct i915_request *rq, bool guilty)
if (guilty) { if (guilty) {
i915_request_set_error_once(rq, -EIO); i915_request_set_error_once(rq, -EIO);
__i915_request_skip(rq); __i915_request_skip(rq);
if (mark_guilty(rq) && !intel_engine_uses_guc(rq->engine)) banned = mark_guilty(rq);
skip_context(rq);
} else { } else {
i915_request_set_error_once(rq, -EAGAIN); i915_request_set_error_once(rq, -EAGAIN);
mark_innocent(rq); mark_innocent(rq);
} }
rcu_read_unlock(); rcu_read_unlock();
if (banned)
intel_context_ban(rq->context, rq);
} }
static bool i915_in_reset(struct pci_dev *pdev) static bool i915_in_reset(struct pci_dev *pdev)

Просмотреть файл

@ -586,9 +586,29 @@ static void ring_context_reset(struct intel_context *ce)
clear_bit(CONTEXT_VALID_BIT, &ce->flags); clear_bit(CONTEXT_VALID_BIT, &ce->flags);
} }
static void ring_context_ban(struct intel_context *ce,
struct i915_request *rq)
{
struct intel_engine_cs *engine;
if (!rq || !i915_request_is_active(rq))
return;
engine = rq->engine;
lockdep_assert_held(&engine->sched_engine->lock);
list_for_each_entry_continue(rq, &engine->sched_engine->requests,
sched.link)
if (rq->context == ce) {
i915_request_set_error_once(rq, -EIO);
__i915_request_skip(rq);
}
}
static const struct intel_context_ops ring_context_ops = { static const struct intel_context_ops ring_context_ops = {
.alloc = ring_context_alloc, .alloc = ring_context_alloc,
.ban = ring_context_ban,
.pre_pin = ring_context_pre_pin, .pre_pin = ring_context_pre_pin,
.pin = ring_context_pin, .pin = ring_context_pin,
.unpin = ring_context_unpin, .unpin = ring_context_unpin,

Просмотреть файл

@ -281,6 +281,8 @@ void intel_guc_find_hung_context(struct intel_engine_cs *engine);
int intel_guc_global_policies_update(struct intel_guc *guc); int intel_guc_global_policies_update(struct intel_guc *guc);
void intel_guc_context_ban(struct intel_context *ce, struct i915_request *rq);
void intel_guc_submission_reset_prepare(struct intel_guc *guc); void intel_guc_submission_reset_prepare(struct intel_guc *guc);
void intel_guc_submission_reset(struct intel_guc *guc, bool stalled); void intel_guc_submission_reset(struct intel_guc *guc, bool stalled);
void intel_guc_submission_reset_finish(struct intel_guc *guc); void intel_guc_submission_reset_finish(struct intel_guc *guc);

Просмотреть файл

@ -125,6 +125,7 @@ static inline void clr_context_pending_enable(struct intel_context *ce)
#define SCHED_STATE_WAIT_FOR_DEREGISTER_TO_REGISTER BIT(0) #define SCHED_STATE_WAIT_FOR_DEREGISTER_TO_REGISTER BIT(0)
#define SCHED_STATE_DESTROYED BIT(1) #define SCHED_STATE_DESTROYED BIT(1)
#define SCHED_STATE_PENDING_DISABLE BIT(2) #define SCHED_STATE_PENDING_DISABLE BIT(2)
#define SCHED_STATE_BANNED BIT(3)
static inline void init_sched_state(struct intel_context *ce) static inline void init_sched_state(struct intel_context *ce)
{ {
/* Only should be called from guc_lrc_desc_pin() */ /* Only should be called from guc_lrc_desc_pin() */
@ -185,6 +186,23 @@ static inline void clr_context_pending_disable(struct intel_context *ce)
ce->guc_state.sched_state &= ~SCHED_STATE_PENDING_DISABLE; ce->guc_state.sched_state &= ~SCHED_STATE_PENDING_DISABLE;
} }
static inline bool context_banned(struct intel_context *ce)
{
return ce->guc_state.sched_state & SCHED_STATE_BANNED;
}
static inline void set_context_banned(struct intel_context *ce)
{
lockdep_assert_held(&ce->guc_state.lock);
ce->guc_state.sched_state |= SCHED_STATE_BANNED;
}
static inline void clr_context_banned(struct intel_context *ce)
{
lockdep_assert_held(&ce->guc_state.lock);
ce->guc_state.sched_state &= ~SCHED_STATE_BANNED;
}
static inline bool context_guc_id_invalid(struct intel_context *ce) static inline bool context_guc_id_invalid(struct intel_context *ce)
{ {
return ce->guc_id == GUC_INVALID_LRC_ID; return ce->guc_id == GUC_INVALID_LRC_ID;
@ -357,13 +375,23 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop);
static int guc_add_request(struct intel_guc *guc, struct i915_request *rq) static int guc_add_request(struct intel_guc *guc, struct i915_request *rq)
{ {
int err; int err = 0;
struct intel_context *ce = rq->context; struct intel_context *ce = rq->context;
u32 action[3]; u32 action[3];
int len = 0; int len = 0;
u32 g2h_len_dw = 0; u32 g2h_len_dw = 0;
bool enabled; bool enabled;
/*
* Corner case where requests were sitting in the priority list or a
* request resubmitted after the context was banned.
*/
if (unlikely(intel_context_is_banned(ce))) {
i915_request_put(i915_request_mark_eio(rq));
intel_engine_signal_breadcrumbs(ce->engine);
goto out;
}
GEM_BUG_ON(!atomic_read(&ce->guc_id_ref)); GEM_BUG_ON(!atomic_read(&ce->guc_id_ref));
GEM_BUG_ON(context_guc_id_invalid(ce)); GEM_BUG_ON(context_guc_id_invalid(ce));
@ -399,6 +427,8 @@ static int guc_add_request(struct intel_guc *guc, struct i915_request *rq)
clr_context_pending_enable(ce); clr_context_pending_enable(ce);
intel_context_put(ce); intel_context_put(ce);
} }
if (likely(!err))
trace_i915_request_guc_submit(rq);
out: out:
return err; return err;
@ -463,7 +493,6 @@ resubmit:
guc->stalled_request = last; guc->stalled_request = last;
return false; return false;
} }
trace_i915_request_guc_submit(last);
} }
guc->stalled_request = NULL; guc->stalled_request = NULL;
@ -502,12 +531,13 @@ static void cs_irq_handler(struct intel_engine_cs *engine, u16 iir)
static void __guc_context_destroy(struct intel_context *ce); static void __guc_context_destroy(struct intel_context *ce);
static void release_guc_id(struct intel_guc *guc, struct intel_context *ce); static void release_guc_id(struct intel_guc *guc, struct intel_context *ce);
static void guc_signal_context_fence(struct intel_context *ce); static void guc_signal_context_fence(struct intel_context *ce);
static void guc_cancel_context_requests(struct intel_context *ce);
static void scrub_guc_desc_for_outstanding_g2h(struct intel_guc *guc) static void scrub_guc_desc_for_outstanding_g2h(struct intel_guc *guc)
{ {
struct intel_context *ce; struct intel_context *ce;
unsigned long index, flags; unsigned long index, flags;
bool pending_disable, pending_enable, deregister, destroyed; bool pending_disable, pending_enable, deregister, destroyed, banned;
xa_for_each(&guc->context_lookup, index, ce) { xa_for_each(&guc->context_lookup, index, ce) {
/* Flush context */ /* Flush context */
@ -525,6 +555,7 @@ static void scrub_guc_desc_for_outstanding_g2h(struct intel_guc *guc)
pending_enable = context_pending_enable(ce); pending_enable = context_pending_enable(ce);
pending_disable = context_pending_disable(ce); pending_disable = context_pending_disable(ce);
deregister = context_wait_for_deregister_to_register(ce); deregister = context_wait_for_deregister_to_register(ce);
banned = context_banned(ce);
init_sched_state(ce); init_sched_state(ce);
if (pending_enable || destroyed || deregister) { if (pending_enable || destroyed || deregister) {
@ -542,6 +573,10 @@ static void scrub_guc_desc_for_outstanding_g2h(struct intel_guc *guc)
/* Not mutualy exclusive with above if statement. */ /* Not mutualy exclusive with above if statement. */
if (pending_disable) { if (pending_disable) {
guc_signal_context_fence(ce); guc_signal_context_fence(ce);
if (banned) {
guc_cancel_context_requests(ce);
intel_engine_signal_breadcrumbs(ce->engine);
}
intel_context_sched_disable_unpin(ce); intel_context_sched_disable_unpin(ce);
atomic_dec(&guc->outstanding_submission_g2h); atomic_dec(&guc->outstanding_submission_g2h);
intel_context_put(ce); intel_context_put(ce);
@ -661,6 +696,9 @@ static void guc_reset_state(struct intel_context *ce, u32 head, bool scrub)
{ {
struct intel_engine_cs *engine = __context_to_physical_engine(ce); struct intel_engine_cs *engine = __context_to_physical_engine(ce);
if (intel_context_is_banned(ce))
return;
GEM_BUG_ON(!intel_context_is_pinned(ce)); GEM_BUG_ON(!intel_context_is_pinned(ce));
/* /*
@ -731,6 +769,8 @@ static void __guc_reset_context(struct intel_context *ce, bool stalled)
struct i915_request *rq; struct i915_request *rq;
u32 head; u32 head;
intel_context_get(ce);
/* /*
* GuC will implicitly mark the context as non-schedulable * GuC will implicitly mark the context as non-schedulable
* when it sends the reset notification. Make sure our state * when it sends the reset notification. Make sure our state
@ -756,6 +796,7 @@ static void __guc_reset_context(struct intel_context *ce, bool stalled)
out_replay: out_replay:
guc_reset_state(ce, head, stalled); guc_reset_state(ce, head, stalled);
__unwind_incomplete_requests(ce); __unwind_incomplete_requests(ce);
intel_context_put(ce);
} }
void intel_guc_submission_reset(struct intel_guc *guc, bool stalled) void intel_guc_submission_reset(struct intel_guc *guc, bool stalled)
@ -940,8 +981,6 @@ static int guc_bypass_tasklet_submit(struct intel_guc *guc,
ret = guc_add_request(guc, rq); ret = guc_add_request(guc, rq);
if (ret == -EBUSY) if (ret == -EBUSY)
guc->stalled_request = rq; guc->stalled_request = rq;
else
trace_i915_request_guc_submit(rq);
if (unlikely(ret == -EPIPE)) if (unlikely(ret == -EPIPE))
disable_submission(guc); disable_submission(guc);
@ -1344,13 +1383,77 @@ static u16 prep_context_pending_disable(struct intel_context *ce)
return ce->guc_id; return ce->guc_id;
} }
static void __guc_context_set_preemption_timeout(struct intel_guc *guc,
u16 guc_id,
u32 preemption_timeout)
{
u32 action[] = {
INTEL_GUC_ACTION_SET_CONTEXT_PREEMPTION_TIMEOUT,
guc_id,
preemption_timeout
};
intel_guc_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true);
}
static void guc_context_ban(struct intel_context *ce, struct i915_request *rq)
{
struct intel_guc *guc = ce_to_guc(ce);
struct intel_runtime_pm *runtime_pm =
&ce->engine->gt->i915->runtime_pm;
intel_wakeref_t wakeref;
unsigned long flags;
guc_flush_submissions(guc);
spin_lock_irqsave(&ce->guc_state.lock, flags);
set_context_banned(ce);
if (submission_disabled(guc) ||
(!context_enabled(ce) && !context_pending_disable(ce))) {
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
guc_cancel_context_requests(ce);
intel_engine_signal_breadcrumbs(ce->engine);
} else if (!context_pending_disable(ce)) {
u16 guc_id;
/*
* We add +2 here as the schedule disable complete CTB handler
* calls intel_context_sched_disable_unpin (-2 to pin_count).
*/
atomic_add(2, &ce->pin_count);
guc_id = prep_context_pending_disable(ce);
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
/*
* In addition to disabling scheduling, set the preemption
* timeout to the minimum value (1 us) so the banned context
* gets kicked off the HW ASAP.
*/
with_intel_runtime_pm(runtime_pm, wakeref) {
__guc_context_set_preemption_timeout(guc, guc_id, 1);
__guc_context_sched_disable(guc, ce, guc_id);
}
} else {
if (!context_guc_id_invalid(ce))
with_intel_runtime_pm(runtime_pm, wakeref)
__guc_context_set_preemption_timeout(guc,
ce->guc_id,
1);
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
}
}
static void guc_context_sched_disable(struct intel_context *ce) static void guc_context_sched_disable(struct intel_context *ce)
{ {
struct intel_guc *guc = ce_to_guc(ce); struct intel_guc *guc = ce_to_guc(ce);
struct intel_runtime_pm *runtime_pm = &ce->engine->gt->i915->runtime_pm;
unsigned long flags; unsigned long flags;
u16 guc_id; struct intel_runtime_pm *runtime_pm = &ce->engine->gt->i915->runtime_pm;
intel_wakeref_t wakeref; intel_wakeref_t wakeref;
u16 guc_id;
bool enabled;
if (submission_disabled(guc) || context_guc_id_invalid(ce) || if (submission_disabled(guc) || context_guc_id_invalid(ce) ||
!lrc_desc_registered(guc, ce->guc_id)) { !lrc_desc_registered(guc, ce->guc_id)) {
@ -1364,14 +1467,22 @@ static void guc_context_sched_disable(struct intel_context *ce)
spin_lock_irqsave(&ce->guc_state.lock, flags); spin_lock_irqsave(&ce->guc_state.lock, flags);
/* /*
* We have to check if the context has been pinned again as another pin * We have to check if the context has been disabled by another thread.
* operation is allowed to pass this function. Checking the pin count, * We also have to check if the context has been pinned again as another
* within ce->guc_state.lock, synchronizes this function with * pin operation is allowed to pass this function. Checking the pin
* count, within ce->guc_state.lock, synchronizes this function with
* guc_request_alloc ensuring a request doesn't slip through the * guc_request_alloc ensuring a request doesn't slip through the
* 'context_pending_disable' fence. Checking within the spin lock (can't * 'context_pending_disable' fence. Checking within the spin lock (can't
* sleep) ensures another process doesn't pin this context and generate * sleep) ensures another process doesn't pin this context and generate
* a request before we set the 'context_pending_disable' flag here. * a request before we set the 'context_pending_disable' flag here.
*/ */
enabled = context_enabled(ce);
if (unlikely(!enabled || submission_disabled(guc))) {
if (enabled)
clr_context_enabled(ce);
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
goto unpin;
}
if (unlikely(atomic_add_unless(&ce->pin_count, -2, 2))) { if (unlikely(atomic_add_unless(&ce->pin_count, -2, 2))) {
spin_unlock_irqrestore(&ce->guc_state.lock, flags); spin_unlock_irqrestore(&ce->guc_state.lock, flags);
return; return;
@ -1529,6 +1640,8 @@ static const struct intel_context_ops guc_context_ops = {
.unpin = guc_context_unpin, .unpin = guc_context_unpin,
.post_unpin = guc_context_post_unpin, .post_unpin = guc_context_post_unpin,
.ban = guc_context_ban,
.enter = intel_context_enter_engine, .enter = intel_context_enter_engine,
.exit = intel_context_exit_engine, .exit = intel_context_exit_engine,
@ -1722,6 +1835,8 @@ static const struct intel_context_ops virtual_guc_context_ops = {
.unpin = guc_context_unpin, .unpin = guc_context_unpin,
.post_unpin = guc_context_post_unpin, .post_unpin = guc_context_post_unpin,
.ban = guc_context_ban,
.enter = guc_virtual_context_enter, .enter = guc_virtual_context_enter,
.exit = guc_virtual_context_exit, .exit = guc_virtual_context_exit,
@ -2164,6 +2279,8 @@ int intel_guc_sched_done_process_msg(struct intel_guc *guc,
if (context_pending_enable(ce)) { if (context_pending_enable(ce)) {
clr_context_pending_enable(ce); clr_context_pending_enable(ce);
} else if (context_pending_disable(ce)) { } else if (context_pending_disable(ce)) {
bool banned;
/* /*
* Unpin must be done before __guc_signal_context_fence, * Unpin must be done before __guc_signal_context_fence,
* otherwise a race exists between the requests getting * otherwise a race exists between the requests getting
@ -2174,9 +2291,16 @@ int intel_guc_sched_done_process_msg(struct intel_guc *guc,
intel_context_sched_disable_unpin(ce); intel_context_sched_disable_unpin(ce);
spin_lock_irqsave(&ce->guc_state.lock, flags); spin_lock_irqsave(&ce->guc_state.lock, flags);
banned = context_banned(ce);
clr_context_banned(ce);
clr_context_pending_disable(ce); clr_context_pending_disable(ce);
__guc_signal_context_fence(ce); __guc_signal_context_fence(ce);
spin_unlock_irqrestore(&ce->guc_state.lock, flags); spin_unlock_irqrestore(&ce->guc_state.lock, flags);
if (banned) {
guc_cancel_context_requests(ce);
intel_engine_signal_breadcrumbs(ce->engine);
}
} }
decr_outstanding_submission_g2h(guc); decr_outstanding_submission_g2h(guc);
@ -2211,9 +2335,12 @@ static void guc_handle_context_reset(struct intel_guc *guc,
struct intel_context *ce) struct intel_context *ce)
{ {
trace_intel_context_reset(ce); trace_intel_context_reset(ce);
if (likely(!intel_context_is_banned(ce))) {
capture_error_state(guc, ce); capture_error_state(guc, ce);
guc_context_replay(ce); guc_context_replay(ce);
} }
}
int intel_guc_context_reset_process_msg(struct intel_guc *guc, int intel_guc_context_reset_process_msg(struct intel_guc *guc,
const u32 *msg, u32 len) const u32 *msg, u32 len)

Просмотреть файл

@ -925,6 +925,11 @@ DEFINE_EVENT(intel_context, intel_context_reset,
TP_ARGS(ce) TP_ARGS(ce)
); );
DEFINE_EVENT(intel_context, intel_context_ban,
TP_PROTO(struct intel_context *ce),
TP_ARGS(ce)
);
DEFINE_EVENT(intel_context, intel_context_register, DEFINE_EVENT(intel_context, intel_context_register,
TP_PROTO(struct intel_context *ce), TP_PROTO(struct intel_context *ce),
TP_ARGS(ce) TP_ARGS(ce)
@ -1017,6 +1022,11 @@ trace_intel_context_reset(struct intel_context *ce)
{ {
} }
static inline void
trace_intel_context_ban(struct intel_context *ce)
{
}
static inline void static inline void
trace_intel_context_register(struct intel_context *ce) trace_intel_context_register(struct intel_context *ce)
{ {