drm/msm: add basic hangcheck/recovery mechanism

A basic, no-frills recovery mechanism in case the gpu gets wedged.  We
could try to be a bit more fancy and restart the next submit after the
one that got wedged, but for now keep it simple.  This is enough to
recover things if, for example, the gpu hangs mid way through a piglit
run.

Signed-off-by: Rob Clark <robdclark@gmail.com>
This commit is contained in:
Rob Clark 2013-08-24 14:20:38 -04:00
Родитель 7198e6b031
Коммит bd6f82d828
5 изменённых файлов: 87 добавлений и 5 удалений

Просмотреть файл

@ -371,6 +371,7 @@ static const struct adreno_gpu_funcs funcs = {
.hw_init = a3xx_hw_init, .hw_init = a3xx_hw_init,
.pm_suspend = msm_gpu_pm_suspend, .pm_suspend = msm_gpu_pm_suspend,
.pm_resume = msm_gpu_pm_resume, .pm_resume = msm_gpu_pm_resume,
.recover = adreno_recover,
.last_fence = adreno_last_fence, .last_fence = adreno_last_fence,
.submit = adreno_submit, .submit = adreno_submit,
.flush = adreno_flush, .flush = adreno_flush,

Просмотреть файл

@ -111,6 +111,28 @@ uint32_t adreno_last_fence(struct msm_gpu *gpu)
return adreno_gpu->memptrs->fence; return adreno_gpu->memptrs->fence;
} }
void adreno_recover(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct drm_device *dev = gpu->dev;
int ret;
gpu->funcs->pm_suspend(gpu);
/* reset ringbuffer: */
gpu->rb->cur = gpu->rb->start;
/* reset completed fence seqno, just discard anything pending: */
adreno_gpu->memptrs->fence = gpu->submitted_fence;
gpu->funcs->pm_resume(gpu);
ret = gpu->funcs->hw_init(gpu);
if (ret) {
dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
/* hmm, oh well? */
}
}
int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct msm_file_private *ctx) struct msm_file_private *ctx)
{ {
@ -119,8 +141,6 @@ int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct msm_ringbuffer *ring = gpu->rb; struct msm_ringbuffer *ring = gpu->rb;
unsigned i, ibs = 0; unsigned i, ibs = 0;
adreno_gpu->last_fence = submit->fence;
for (i = 0; i < submit->nr_cmds; i++) { for (i = 0; i < submit->nr_cmds; i++) {
switch (submit->cmd[i].type) { switch (submit->cmd[i].type) {
case MSM_SUBMIT_CMD_IB_TARGET_BUF: case MSM_SUBMIT_CMD_IB_TARGET_BUF:
@ -225,7 +245,7 @@ void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
adreno_gpu->rev.patchid); adreno_gpu->rev.patchid);
seq_printf(m, "fence: %d/%d\n", adreno_gpu->memptrs->fence, seq_printf(m, "fence: %d/%d\n", adreno_gpu->memptrs->fence,
adreno_gpu->last_fence); gpu->submitted_fence);
seq_printf(m, "rptr: %d\n", adreno_gpu->memptrs->rptr); seq_printf(m, "rptr: %d\n", adreno_gpu->memptrs->rptr);
seq_printf(m, "wptr: %d\n", adreno_gpu->memptrs->wptr); seq_printf(m, "wptr: %d\n", adreno_gpu->memptrs->wptr);
seq_printf(m, "rb wptr: %d\n", get_wptr(gpu->rb)); seq_printf(m, "rb wptr: %d\n", get_wptr(gpu->rb));

Просмотреть файл

@ -54,8 +54,6 @@ struct adreno_gpu {
uint32_t revn; /* numeric revision name */ uint32_t revn; /* numeric revision name */
const struct adreno_gpu_funcs *funcs; const struct adreno_gpu_funcs *funcs;
uint32_t last_fence;
/* firmware: */ /* firmware: */
const struct firmware *pm4, *pfp; const struct firmware *pm4, *pfp;
@ -99,6 +97,7 @@ static inline bool adreno_is_a330(struct adreno_gpu *gpu)
int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value); int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
int adreno_hw_init(struct msm_gpu *gpu); int adreno_hw_init(struct msm_gpu *gpu);
uint32_t adreno_last_fence(struct msm_gpu *gpu); uint32_t adreno_last_fence(struct msm_gpu *gpu);
void adreno_recover(struct msm_gpu *gpu);
int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct msm_file_private *ctx); struct msm_file_private *ctx);
void adreno_flush(struct msm_gpu *gpu); void adreno_flush(struct msm_gpu *gpu);

Просмотреть файл

@ -202,6 +202,51 @@ int msm_gpu_pm_suspend(struct msm_gpu *gpu)
return 0; return 0;
} }
/*
* Hangcheck detection for locked gpu:
*/
static void recover_worker(struct work_struct *work)
{
struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work);
struct drm_device *dev = gpu->dev;
dev_err(dev->dev, "%s: hangcheck recover!\n", gpu->name);
mutex_lock(&dev->struct_mutex);
gpu->funcs->recover(gpu);
mutex_unlock(&dev->struct_mutex);
msm_gpu_retire(gpu);
}
static void hangcheck_timer_reset(struct msm_gpu *gpu)
{
DBG("%s", gpu->name);
mod_timer(&gpu->hangcheck_timer,
round_jiffies_up(jiffies + DRM_MSM_HANGCHECK_JIFFIES));
}
static void hangcheck_handler(unsigned long data)
{
struct msm_gpu *gpu = (struct msm_gpu *)data;
uint32_t fence = gpu->funcs->last_fence(gpu);
if (fence != gpu->hangcheck_fence) {
/* some progress has been made.. ya! */
gpu->hangcheck_fence = fence;
} else if (fence < gpu->submitted_fence) {
/* no progress and not done.. hung! */
struct msm_drm_private *priv = gpu->dev->dev_private;
gpu->hangcheck_fence = fence;
queue_work(priv->wq, &gpu->recover_work);
}
/* if still more pending work, reset the hangcheck timer: */
if (gpu->submitted_fence > gpu->hangcheck_fence)
hangcheck_timer_reset(gpu);
}
/* /*
* Cmdstream submission/retirement: * Cmdstream submission/retirement:
*/ */
@ -254,6 +299,8 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
submit->fence = ++priv->next_fence; submit->fence = ++priv->next_fence;
gpu->submitted_fence = submit->fence;
ret = gpu->funcs->submit(gpu, submit, ctx); ret = gpu->funcs->submit(gpu, submit, ctx);
priv->lastctx = ctx; priv->lastctx = ctx;
@ -276,6 +323,7 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
msm_gem_move_to_active(&msm_obj->base, gpu, submit->fence); msm_gem_move_to_active(&msm_obj->base, gpu, submit->fence);
} }
hangcheck_timer_reset(gpu);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
return ret; return ret;
@ -307,6 +355,10 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
INIT_LIST_HEAD(&gpu->active_list); INIT_LIST_HEAD(&gpu->active_list);
INIT_WORK(&gpu->retire_work, retire_worker); INIT_WORK(&gpu->retire_work, retire_worker);
INIT_WORK(&gpu->recover_work, recover_worker);
setup_timer(&gpu->hangcheck_timer, hangcheck_handler,
(unsigned long)gpu);
BUG_ON(ARRAY_SIZE(clk_names) != ARRAY_SIZE(gpu->grp_clks)); BUG_ON(ARRAY_SIZE(clk_names) != ARRAY_SIZE(gpu->grp_clks));

Просмотреть файл

@ -51,6 +51,7 @@ struct msm_gpu_funcs {
void (*idle)(struct msm_gpu *gpu); void (*idle)(struct msm_gpu *gpu);
irqreturn_t (*irq)(struct msm_gpu *irq); irqreturn_t (*irq)(struct msm_gpu *irq);
uint32_t (*last_fence)(struct msm_gpu *gpu); uint32_t (*last_fence)(struct msm_gpu *gpu);
void (*recover)(struct msm_gpu *gpu);
void (*destroy)(struct msm_gpu *gpu); void (*destroy)(struct msm_gpu *gpu);
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
/* show GPU status in debugfs: */ /* show GPU status in debugfs: */
@ -69,6 +70,8 @@ struct msm_gpu {
/* list of GEM active objects: */ /* list of GEM active objects: */
struct list_head active_list; struct list_head active_list;
uint32_t submitted_fence;
/* worker for handling active-list retiring: */ /* worker for handling active-list retiring: */
struct work_struct retire_work; struct work_struct retire_work;
@ -83,6 +86,13 @@ struct msm_gpu {
struct clk *ebi1_clk, *grp_clks[5]; struct clk *ebi1_clk, *grp_clks[5];
uint32_t fast_rate, slow_rate, bus_freq; uint32_t fast_rate, slow_rate, bus_freq;
uint32_t bsc; uint32_t bsc;
/* Hang Detction: */
#define DRM_MSM_HANGCHECK_PERIOD 500 /* in ms */
#define DRM_MSM_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_MSM_HANGCHECK_PERIOD)
struct timer_list hangcheck_timer;
uint32_t hangcheck_fence;
struct work_struct recover_work;
}; };
static inline void gpu_write(struct msm_gpu *gpu, u32 reg, u32 data) static inline void gpu_write(struct msm_gpu *gpu, u32 reg, u32 data)