drm/i915/guc: Update intel_gt_wait_for_idle to work with GuC
When running the GuC the GPU can't be considered idle if the GuC still has contexts pinned. As such, a call has been added in intel_gt_wait_for_idle to idle the UC and in turn the GuC by waiting for the number of unpinned contexts to go to zero. v2: rtimeout -> remaining_timeout v3: Drop unnecessary includes, guc_submission_busy_loop -> guc_submission_send_busy_loop, drop negatie timeout trick, move a refactor of guc_context_unpin to earlier path (John H) v4: Add stddef.h back into intel_gt_requests.h, sort circuit idle function if not in GuC submission mode Cc: John Harrison <john.c.harrison@intel.com> Signed-off-by: Matthew Brost <matthew.brost@intel.com> Reviewed-by: John Harrison <John.C.Harrison@Intel.com> Signed-off-by: John Harrison <John.C.Harrison@Intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20210721215101.139794-16-matthew.brost@intel.com
This commit is contained in:
Родитель
f4eb1f3fe9
Коммит
b97060a99b
|
@ -645,7 +645,8 @@ mmap_offset_attach(struct drm_i915_gem_object *obj,
|
||||||
goto insert;
|
goto insert;
|
||||||
|
|
||||||
/* Attempt to reap some mmap space from dead objects */
|
/* Attempt to reap some mmap space from dead objects */
|
||||||
err = intel_gt_retire_requests_timeout(&i915->gt, MAX_SCHEDULE_TIMEOUT);
|
err = intel_gt_retire_requests_timeout(&i915->gt, MAX_SCHEDULE_TIMEOUT,
|
||||||
|
NULL);
|
||||||
if (err)
|
if (err)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
|
|
|
@ -585,6 +585,25 @@ static void __intel_gt_disable(struct intel_gt *gt)
|
||||||
GEM_BUG_ON(intel_gt_pm_is_awake(gt));
|
GEM_BUG_ON(intel_gt_pm_is_awake(gt));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout)
|
||||||
|
{
|
||||||
|
long remaining_timeout;
|
||||||
|
|
||||||
|
/* If the device is asleep, we have no requests outstanding */
|
||||||
|
if (!intel_gt_pm_is_awake(gt))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
while ((timeout = intel_gt_retire_requests_timeout(gt, timeout,
|
||||||
|
&remaining_timeout)) > 0) {
|
||||||
|
cond_resched();
|
||||||
|
if (signal_pending(current))
|
||||||
|
return -EINTR;
|
||||||
|
}
|
||||||
|
|
||||||
|
return timeout ? timeout : intel_uc_wait_for_idle(>->uc,
|
||||||
|
remaining_timeout);
|
||||||
|
}
|
||||||
|
|
||||||
int intel_gt_init(struct intel_gt *gt)
|
int intel_gt_init(struct intel_gt *gt)
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
|
|
|
@ -48,6 +48,8 @@ void intel_gt_driver_release(struct intel_gt *gt);
|
||||||
|
|
||||||
void intel_gt_driver_late_release(struct intel_gt *gt);
|
void intel_gt_driver_late_release(struct intel_gt *gt);
|
||||||
|
|
||||||
|
int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout);
|
||||||
|
|
||||||
void intel_gt_check_and_clear_faults(struct intel_gt *gt);
|
void intel_gt_check_and_clear_faults(struct intel_gt *gt);
|
||||||
void intel_gt_clear_error_registers(struct intel_gt *gt,
|
void intel_gt_clear_error_registers(struct intel_gt *gt,
|
||||||
intel_engine_mask_t engine_mask);
|
intel_engine_mask_t engine_mask);
|
||||||
|
|
|
@ -130,7 +130,8 @@ void intel_engine_fini_retire(struct intel_engine_cs *engine)
|
||||||
GEM_BUG_ON(engine->retire);
|
GEM_BUG_ON(engine->retire);
|
||||||
}
|
}
|
||||||
|
|
||||||
long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
|
long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout,
|
||||||
|
long *remaining_timeout)
|
||||||
{
|
{
|
||||||
struct intel_gt_timelines *timelines = >->timelines;
|
struct intel_gt_timelines *timelines = >->timelines;
|
||||||
struct intel_timeline *tl, *tn;
|
struct intel_timeline *tl, *tn;
|
||||||
|
@ -195,24 +196,12 @@ out_active: spin_lock(&timelines->lock);
|
||||||
if (flush_submission(gt, timeout)) /* Wait, there's more! */
|
if (flush_submission(gt, timeout)) /* Wait, there's more! */
|
||||||
active_count++;
|
active_count++;
|
||||||
|
|
||||||
|
if (remaining_timeout)
|
||||||
|
*remaining_timeout = timeout;
|
||||||
|
|
||||||
return active_count ? timeout : 0;
|
return active_count ? timeout : 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout)
|
|
||||||
{
|
|
||||||
/* If the device is asleep, we have no requests outstanding */
|
|
||||||
if (!intel_gt_pm_is_awake(gt))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
while ((timeout = intel_gt_retire_requests_timeout(gt, timeout)) > 0) {
|
|
||||||
cond_resched();
|
|
||||||
if (signal_pending(current))
|
|
||||||
return -EINTR;
|
|
||||||
}
|
|
||||||
|
|
||||||
return timeout;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void retire_work_handler(struct work_struct *work)
|
static void retire_work_handler(struct work_struct *work)
|
||||||
{
|
{
|
||||||
struct intel_gt *gt =
|
struct intel_gt *gt =
|
||||||
|
|
|
@ -6,14 +6,17 @@
|
||||||
#ifndef INTEL_GT_REQUESTS_H
|
#ifndef INTEL_GT_REQUESTS_H
|
||||||
#define INTEL_GT_REQUESTS_H
|
#define INTEL_GT_REQUESTS_H
|
||||||
|
|
||||||
|
#include <stddef.h>
|
||||||
|
|
||||||
struct intel_engine_cs;
|
struct intel_engine_cs;
|
||||||
struct intel_gt;
|
struct intel_gt;
|
||||||
struct intel_timeline;
|
struct intel_timeline;
|
||||||
|
|
||||||
long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout);
|
long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout,
|
||||||
|
long *remaining_timeout);
|
||||||
static inline void intel_gt_retire_requests(struct intel_gt *gt)
|
static inline void intel_gt_retire_requests(struct intel_gt *gt)
|
||||||
{
|
{
|
||||||
intel_gt_retire_requests_timeout(gt, 0);
|
intel_gt_retire_requests_timeout(gt, 0, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
void intel_engine_init_retire(struct intel_engine_cs *engine);
|
void intel_engine_init_retire(struct intel_engine_cs *engine);
|
||||||
|
@ -21,8 +24,6 @@ void intel_engine_add_retire(struct intel_engine_cs *engine,
|
||||||
struct intel_timeline *tl);
|
struct intel_timeline *tl);
|
||||||
void intel_engine_fini_retire(struct intel_engine_cs *engine);
|
void intel_engine_fini_retire(struct intel_engine_cs *engine);
|
||||||
|
|
||||||
int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout);
|
|
||||||
|
|
||||||
void intel_gt_init_requests(struct intel_gt *gt);
|
void intel_gt_init_requests(struct intel_gt *gt);
|
||||||
void intel_gt_park_requests(struct intel_gt *gt);
|
void intel_gt_park_requests(struct intel_gt *gt);
|
||||||
void intel_gt_unpark_requests(struct intel_gt *gt);
|
void intel_gt_unpark_requests(struct intel_gt *gt);
|
||||||
|
|
|
@ -39,6 +39,8 @@ struct intel_guc {
|
||||||
spinlock_t irq_lock;
|
spinlock_t irq_lock;
|
||||||
unsigned int msg_enabled_mask;
|
unsigned int msg_enabled_mask;
|
||||||
|
|
||||||
|
atomic_t outstanding_submission_g2h;
|
||||||
|
|
||||||
struct {
|
struct {
|
||||||
void (*reset)(struct intel_guc *guc);
|
void (*reset)(struct intel_guc *guc);
|
||||||
void (*enable)(struct intel_guc *guc);
|
void (*enable)(struct intel_guc *guc);
|
||||||
|
@ -245,6 +247,8 @@ static inline void intel_guc_disable_msg(struct intel_guc *guc, u32 mask)
|
||||||
spin_unlock_irq(&guc->irq_lock);
|
spin_unlock_irq(&guc->irq_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int intel_guc_wait_for_idle(struct intel_guc *guc, long timeout);
|
||||||
|
|
||||||
int intel_guc_reset_engine(struct intel_guc *guc,
|
int intel_guc_reset_engine(struct intel_guc *guc,
|
||||||
struct intel_engine_cs *engine);
|
struct intel_engine_cs *engine);
|
||||||
|
|
||||||
|
|
|
@ -109,6 +109,7 @@ void intel_guc_ct_init_early(struct intel_guc_ct *ct)
|
||||||
INIT_LIST_HEAD(&ct->requests.incoming);
|
INIT_LIST_HEAD(&ct->requests.incoming);
|
||||||
INIT_WORK(&ct->requests.worker, ct_incoming_request_worker_func);
|
INIT_WORK(&ct->requests.worker, ct_incoming_request_worker_func);
|
||||||
tasklet_setup(&ct->receive_tasklet, ct_receive_tasklet_func);
|
tasklet_setup(&ct->receive_tasklet, ct_receive_tasklet_func);
|
||||||
|
init_waitqueue_head(&ct->wq);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline const char *guc_ct_buffer_type_to_str(u32 type)
|
static inline const char *guc_ct_buffer_type_to_str(u32 type)
|
||||||
|
|
|
@ -10,6 +10,7 @@
|
||||||
#include <linux/spinlock.h>
|
#include <linux/spinlock.h>
|
||||||
#include <linux/workqueue.h>
|
#include <linux/workqueue.h>
|
||||||
#include <linux/ktime.h>
|
#include <linux/ktime.h>
|
||||||
|
#include <linux/wait.h>
|
||||||
|
|
||||||
#include "intel_guc_fwif.h"
|
#include "intel_guc_fwif.h"
|
||||||
|
|
||||||
|
@ -68,6 +69,9 @@ struct intel_guc_ct {
|
||||||
|
|
||||||
struct tasklet_struct receive_tasklet;
|
struct tasklet_struct receive_tasklet;
|
||||||
|
|
||||||
|
/** @wq: wait queue for g2h chanenl */
|
||||||
|
wait_queue_head_t wq;
|
||||||
|
|
||||||
struct {
|
struct {
|
||||||
u16 last_fence; /* last fence used to send request */
|
u16 last_fence; /* last fence used to send request */
|
||||||
|
|
||||||
|
|
|
@ -252,6 +252,72 @@ static inline void set_lrc_desc_registered(struct intel_guc *guc, u32 id,
|
||||||
xa_store_irq(&guc->context_lookup, id, ce, GFP_ATOMIC);
|
xa_store_irq(&guc->context_lookup, id, ce, GFP_ATOMIC);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int guc_submission_send_busy_loop(struct intel_guc *guc,
|
||||||
|
const u32 *action,
|
||||||
|
u32 len,
|
||||||
|
u32 g2h_len_dw,
|
||||||
|
bool loop)
|
||||||
|
{
|
||||||
|
int err;
|
||||||
|
|
||||||
|
err = intel_guc_send_busy_loop(guc, action, len, g2h_len_dw, loop);
|
||||||
|
|
||||||
|
if (!err && g2h_len_dw)
|
||||||
|
atomic_inc(&guc->outstanding_submission_g2h);
|
||||||
|
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int guc_wait_for_pending_msg(struct intel_guc *guc,
|
||||||
|
atomic_t *wait_var,
|
||||||
|
bool interruptible,
|
||||||
|
long timeout)
|
||||||
|
{
|
||||||
|
const int state = interruptible ?
|
||||||
|
TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
|
||||||
|
DEFINE_WAIT(wait);
|
||||||
|
|
||||||
|
might_sleep();
|
||||||
|
GEM_BUG_ON(timeout < 0);
|
||||||
|
|
||||||
|
if (!atomic_read(wait_var))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
if (!timeout)
|
||||||
|
return -ETIME;
|
||||||
|
|
||||||
|
for (;;) {
|
||||||
|
prepare_to_wait(&guc->ct.wq, &wait, state);
|
||||||
|
|
||||||
|
if (!atomic_read(wait_var))
|
||||||
|
break;
|
||||||
|
|
||||||
|
if (signal_pending_state(state, current)) {
|
||||||
|
timeout = -EINTR;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!timeout) {
|
||||||
|
timeout = -ETIME;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
timeout = io_schedule_timeout(timeout);
|
||||||
|
}
|
||||||
|
finish_wait(&guc->ct.wq, &wait);
|
||||||
|
|
||||||
|
return (timeout < 0) ? timeout : 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int intel_guc_wait_for_idle(struct intel_guc *guc, long timeout)
|
||||||
|
{
|
||||||
|
if (!intel_uc_uses_guc_submission(&guc_to_gt(guc)->uc))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
return guc_wait_for_pending_msg(guc, &guc->outstanding_submission_g2h,
|
||||||
|
true, timeout);
|
||||||
|
}
|
||||||
|
|
||||||
static int guc_add_request(struct intel_guc *guc, struct i915_request *rq)
|
static int guc_add_request(struct intel_guc *guc, struct i915_request *rq)
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
|
@ -278,6 +344,7 @@ static int guc_add_request(struct intel_guc *guc, struct i915_request *rq)
|
||||||
|
|
||||||
err = intel_guc_send_nb(guc, action, len, g2h_len_dw);
|
err = intel_guc_send_nb(guc, action, len, g2h_len_dw);
|
||||||
if (!enabled && !err) {
|
if (!enabled && !err) {
|
||||||
|
atomic_inc(&guc->outstanding_submission_g2h);
|
||||||
set_context_enabled(ce);
|
set_context_enabled(ce);
|
||||||
} else if (!enabled) {
|
} else if (!enabled) {
|
||||||
clr_context_pending_enable(ce);
|
clr_context_pending_enable(ce);
|
||||||
|
@ -735,7 +802,8 @@ static int __guc_action_register_context(struct intel_guc *guc,
|
||||||
offset,
|
offset,
|
||||||
};
|
};
|
||||||
|
|
||||||
return intel_guc_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true);
|
return guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action),
|
||||||
|
0, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int register_context(struct intel_context *ce)
|
static int register_context(struct intel_context *ce)
|
||||||
|
@ -755,8 +823,9 @@ static int __guc_action_deregister_context(struct intel_guc *guc,
|
||||||
guc_id,
|
guc_id,
|
||||||
};
|
};
|
||||||
|
|
||||||
return intel_guc_send_busy_loop(guc, action, ARRAY_SIZE(action),
|
return guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action),
|
||||||
G2H_LEN_DW_DEREGISTER_CONTEXT, true);
|
G2H_LEN_DW_DEREGISTER_CONTEXT,
|
||||||
|
true);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int deregister_context(struct intel_context *ce, u32 guc_id)
|
static int deregister_context(struct intel_context *ce, u32 guc_id)
|
||||||
|
@ -901,8 +970,8 @@ static void __guc_context_sched_disable(struct intel_guc *guc,
|
||||||
|
|
||||||
intel_context_get(ce);
|
intel_context_get(ce);
|
||||||
|
|
||||||
intel_guc_send_busy_loop(guc, action, ARRAY_SIZE(action),
|
guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action),
|
||||||
G2H_LEN_DW_SCHED_CONTEXT_MODE_SET, true);
|
G2H_LEN_DW_SCHED_CONTEXT_MODE_SET, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
static u16 prep_context_pending_disable(struct intel_context *ce)
|
static u16 prep_context_pending_disable(struct intel_context *ce)
|
||||||
|
@ -1444,6 +1513,12 @@ g2h_context_lookup(struct intel_guc *guc, u32 desc_idx)
|
||||||
return ce;
|
return ce;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void decr_outstanding_submission_g2h(struct intel_guc *guc)
|
||||||
|
{
|
||||||
|
if (atomic_dec_and_test(&guc->outstanding_submission_g2h))
|
||||||
|
wake_up_all(&guc->ct.wq);
|
||||||
|
}
|
||||||
|
|
||||||
int intel_guc_deregister_done_process_msg(struct intel_guc *guc,
|
int intel_guc_deregister_done_process_msg(struct intel_guc *guc,
|
||||||
const u32 *msg,
|
const u32 *msg,
|
||||||
u32 len)
|
u32 len)
|
||||||
|
@ -1479,6 +1554,8 @@ int intel_guc_deregister_done_process_msg(struct intel_guc *guc,
|
||||||
lrc_destroy(&ce->ref);
|
lrc_destroy(&ce->ref);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
decr_outstanding_submission_g2h(guc);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1527,6 +1604,7 @@ int intel_guc_sched_done_process_msg(struct intel_guc *guc,
|
||||||
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
|
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
decr_outstanding_submission_g2h(guc);
|
||||||
intel_context_put(ce);
|
intel_context_put(ce);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -81,6 +81,11 @@ uc_state_checkers(guc, guc_submission);
|
||||||
#undef uc_state_checkers
|
#undef uc_state_checkers
|
||||||
#undef __uc_state_checker
|
#undef __uc_state_checker
|
||||||
|
|
||||||
|
static inline int intel_uc_wait_for_idle(struct intel_uc *uc, long timeout)
|
||||||
|
{
|
||||||
|
return intel_guc_wait_for_idle(&uc->guc, timeout);
|
||||||
|
}
|
||||||
|
|
||||||
#define intel_uc_ops_function(_NAME, _OPS, _TYPE, _RET) \
|
#define intel_uc_ops_function(_NAME, _OPS, _TYPE, _RET) \
|
||||||
static inline _TYPE intel_uc_##_NAME(struct intel_uc *uc) \
|
static inline _TYPE intel_uc_##_NAME(struct intel_uc *uc) \
|
||||||
{ \
|
{ \
|
||||||
|
|
|
@ -27,6 +27,7 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include "gem/i915_gem_context.h"
|
#include "gem/i915_gem_context.h"
|
||||||
|
#include "gt/intel_gt.h"
|
||||||
#include "gt/intel_gt_requests.h"
|
#include "gt/intel_gt_requests.h"
|
||||||
|
|
||||||
#include "i915_drv.h"
|
#include "i915_drv.h"
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include "i915_drv.h"
|
#include "i915_drv.h"
|
||||||
#include "gt/intel_gt_requests.h"
|
#include "gt/intel_gt.h"
|
||||||
|
|
||||||
#include "../i915_selftest.h"
|
#include "../i915_selftest.h"
|
||||||
#include "igt_flush_test.h"
|
#include "igt_flush_test.h"
|
||||||
|
|
|
@ -52,7 +52,8 @@ void mock_device_flush(struct drm_i915_private *i915)
|
||||||
do {
|
do {
|
||||||
for_each_engine(engine, gt, id)
|
for_each_engine(engine, gt, id)
|
||||||
mock_engine_flush(engine);
|
mock_engine_flush(engine);
|
||||||
} while (intel_gt_retire_requests_timeout(gt, MAX_SCHEDULE_TIMEOUT));
|
} while (intel_gt_retire_requests_timeout(gt, MAX_SCHEDULE_TIMEOUT,
|
||||||
|
NULL));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mock_device_release(struct drm_device *dev)
|
static void mock_device_release(struct drm_device *dev)
|
||||||
|
|
Загрузка…
Ссылка в новой задаче