Merge tag 'drm-intel-fixes-2017-10-18-1' of git://anongit.freedesktop.org/drm/drm-intel into drm-fixes
Fix for stable: - Fix DDI translation tables for BDW (Chris). Critical fix: - Fix GPU Hang on GVT (Changbin). Other fixes: - Fix eviction when GGTT is idle (Chris). - CNL PLL fixes (Rodrigo). - Fix pwrite into shmemfs (Chris). - Mask bits for BXT and CHV L3 Workaround WaProgramL3SqcReg1Default (Oscar). * tag 'drm-intel-fixes-2017-10-18-1' of git://anongit.freedesktop.org/drm/drm-intel: drm/i915: Use a mask when applying WaProgramL3SqcReg1Default drm/i915: Report -EFAULT before pwrite fast path into shmemfs drm/i915/cnl: Fix PLL initialization for HDMI. drm/i915/cnl: Fix PLL mapping. drm/i915: Use bdw_ddi_translations_fdi for Broadwell drm/i915: Fix eviction when the GGTT is idle but full drm/i915/gvt: Fix GPU hang after reusing vGPU instance across different guest OS
This commit is contained in:
Коммит
2cb3a34abd
|
@ -308,20 +308,8 @@ static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu)
|
|||
|
||||
static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct intel_gvt_workload_scheduler *scheduler = &vgpu->gvt->scheduler;
|
||||
int ring_id;
|
||||
|
||||
kfree(vgpu->sched_data);
|
||||
vgpu->sched_data = NULL;
|
||||
|
||||
spin_lock_bh(&scheduler->mmio_context_lock);
|
||||
for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) {
|
||||
if (scheduler->engine_owner[ring_id] == vgpu) {
|
||||
intel_gvt_switch_mmio(vgpu, NULL, ring_id);
|
||||
scheduler->engine_owner[ring_id] = NULL;
|
||||
}
|
||||
}
|
||||
spin_unlock_bh(&scheduler->mmio_context_lock);
|
||||
}
|
||||
|
||||
static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
|
||||
|
@ -388,6 +376,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
|
|||
{
|
||||
struct intel_gvt_workload_scheduler *scheduler =
|
||||
&vgpu->gvt->scheduler;
|
||||
int ring_id;
|
||||
|
||||
gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id);
|
||||
|
||||
|
@ -401,4 +390,13 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
|
|||
scheduler->need_reschedule = true;
|
||||
scheduler->current_vgpu = NULL;
|
||||
}
|
||||
|
||||
spin_lock_bh(&scheduler->mmio_context_lock);
|
||||
for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) {
|
||||
if (scheduler->engine_owner[ring_id] == vgpu) {
|
||||
intel_gvt_switch_mmio(vgpu, NULL, ring_id);
|
||||
scheduler->engine_owner[ring_id] = NULL;
|
||||
}
|
||||
}
|
||||
spin_unlock_bh(&scheduler->mmio_context_lock);
|
||||
}
|
||||
|
|
|
@ -2657,6 +2657,9 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
|
|||
if (READ_ONCE(obj->mm.pages))
|
||||
return -ENODEV;
|
||||
|
||||
if (obj->mm.madv != I915_MADV_WILLNEED)
|
||||
return -EFAULT;
|
||||
|
||||
/* Before the pages are instantiated the object is treated as being
|
||||
* in the CPU domain. The pages will be clflushed as required before
|
||||
* use, and we can freely write into the pages directly. If userspace
|
||||
|
|
|
@ -33,21 +33,20 @@
|
|||
#include "intel_drv.h"
|
||||
#include "i915_trace.h"
|
||||
|
||||
static bool ggtt_is_idle(struct drm_i915_private *dev_priv)
|
||||
static bool ggtt_is_idle(struct drm_i915_private *i915)
|
||||
{
|
||||
struct i915_ggtt *ggtt = &dev_priv->ggtt;
|
||||
struct intel_engine_cs *engine;
|
||||
enum intel_engine_id id;
|
||||
struct intel_engine_cs *engine;
|
||||
enum intel_engine_id id;
|
||||
|
||||
for_each_engine(engine, dev_priv, id) {
|
||||
struct intel_timeline *tl;
|
||||
if (i915->gt.active_requests)
|
||||
return false;
|
||||
|
||||
tl = &ggtt->base.timeline.engine[engine->id];
|
||||
if (i915_gem_active_isset(&tl->last_request))
|
||||
return false;
|
||||
}
|
||||
for_each_engine(engine, i915, id) {
|
||||
if (engine->last_retired_context != i915->kernel_context)
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
return true;
|
||||
}
|
||||
|
||||
static int ggtt_flush(struct drm_i915_private *i915)
|
||||
|
@ -157,7 +156,8 @@ i915_gem_evict_something(struct i915_address_space *vm,
|
|||
min_size, alignment, cache_level,
|
||||
start, end, mode);
|
||||
|
||||
/* Retire before we search the active list. Although we have
|
||||
/*
|
||||
* Retire before we search the active list. Although we have
|
||||
* reasonable accuracy in our retirement lists, we may have
|
||||
* a stray pin (preventing eviction) that can only be resolved by
|
||||
* retiring.
|
||||
|
@ -182,7 +182,8 @@ search_again:
|
|||
BUG_ON(ret);
|
||||
}
|
||||
|
||||
/* Can we unpin some objects such as idle hw contents,
|
||||
/*
|
||||
* Can we unpin some objects such as idle hw contents,
|
||||
* or pending flips? But since only the GGTT has global entries
|
||||
* such as scanouts, rinbuffers and contexts, we can skip the
|
||||
* purge when inspecting per-process local address spaces.
|
||||
|
@ -190,19 +191,33 @@ search_again:
|
|||
if (!i915_is_ggtt(vm) || flags & PIN_NONBLOCK)
|
||||
return -ENOSPC;
|
||||
|
||||
if (ggtt_is_idle(dev_priv)) {
|
||||
/* If we still have pending pageflip completions, drop
|
||||
* back to userspace to give our workqueues time to
|
||||
* acquire our locks and unpin the old scanouts.
|
||||
*/
|
||||
return intel_has_pending_fb_unpin(dev_priv) ? -EAGAIN : -ENOSPC;
|
||||
/*
|
||||
* Not everything in the GGTT is tracked via VMA using
|
||||
* i915_vma_move_to_active(), otherwise we could evict as required
|
||||
* with minimal stalling. Instead we are forced to idle the GPU and
|
||||
* explicitly retire outstanding requests which will then remove
|
||||
* the pinning for active objects such as contexts and ring,
|
||||
* enabling us to evict them on the next iteration.
|
||||
*
|
||||
* To ensure that all user contexts are evictable, we perform
|
||||
* a switch to the perma-pinned kernel context. This all also gives
|
||||
* us a termination condition, when the last retired context is
|
||||
* the kernel's there is no more we can evict.
|
||||
*/
|
||||
if (!ggtt_is_idle(dev_priv)) {
|
||||
ret = ggtt_flush(dev_priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
goto search_again;
|
||||
}
|
||||
|
||||
ret = ggtt_flush(dev_priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
goto search_again;
|
||||
/*
|
||||
* If we still have pending pageflip completions, drop
|
||||
* back to userspace to give our workqueues time to
|
||||
* acquire our locks and unpin the old scanouts.
|
||||
*/
|
||||
return intel_has_pending_fb_unpin(dev_priv) ? -EAGAIN : -ENOSPC;
|
||||
|
||||
found:
|
||||
/* drm_mm doesn't allow any other other operations while
|
||||
|
|
|
@ -6998,6 +6998,7 @@ enum {
|
|||
*/
|
||||
#define L3_GENERAL_PRIO_CREDITS(x) (((x) >> 1) << 19)
|
||||
#define L3_HIGH_PRIO_CREDITS(x) (((x) >> 1) << 14)
|
||||
#define L3_PRIO_CREDITS_MASK ((0x1f << 19) | (0x1f << 14))
|
||||
|
||||
#define GEN7_L3CNTLREG1 _MMIO(0xB01C)
|
||||
#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C
|
||||
|
|
|
@ -664,8 +664,8 @@ intel_ddi_get_buf_trans_fdi(struct drm_i915_private *dev_priv,
|
|||
int *n_entries)
|
||||
{
|
||||
if (IS_BROADWELL(dev_priv)) {
|
||||
*n_entries = ARRAY_SIZE(hsw_ddi_translations_fdi);
|
||||
return hsw_ddi_translations_fdi;
|
||||
*n_entries = ARRAY_SIZE(bdw_ddi_translations_fdi);
|
||||
return bdw_ddi_translations_fdi;
|
||||
} else if (IS_HASWELL(dev_priv)) {
|
||||
*n_entries = ARRAY_SIZE(hsw_ddi_translations_fdi);
|
||||
return hsw_ddi_translations_fdi;
|
||||
|
@ -2102,8 +2102,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
|
|||
* register writes.
|
||||
*/
|
||||
val = I915_READ(DPCLKA_CFGCR0);
|
||||
val &= ~(DPCLKA_CFGCR0_DDI_CLK_OFF(port) |
|
||||
DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port));
|
||||
val &= ~DPCLKA_CFGCR0_DDI_CLK_OFF(port);
|
||||
I915_WRITE(DPCLKA_CFGCR0, val);
|
||||
} else if (IS_GEN9_BC(dev_priv)) {
|
||||
/* DDI -> PLL mapping */
|
||||
|
|
|
@ -1996,7 +1996,7 @@ static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
|
|||
|
||||
/* 3. Configure DPLL_CFGCR0 */
|
||||
/* Avoid touch CFGCR1 if HDMI mode is not enabled */
|
||||
if (pll->state.hw_state.cfgcr0 & DPLL_CTRL1_HDMI_MODE(pll->id)) {
|
||||
if (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_HDMI_MODE) {
|
||||
val = pll->state.hw_state.cfgcr1;
|
||||
I915_WRITE(CNL_DPLL_CFGCR1(pll->id), val);
|
||||
/* 4. Reab back to ensure writes completed */
|
||||
|
|
|
@ -1048,9 +1048,12 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
|
|||
}
|
||||
|
||||
/* WaProgramL3SqcReg1DefaultForPerf:bxt */
|
||||
if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER))
|
||||
I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) |
|
||||
L3_HIGH_PRIO_CREDITS(2));
|
||||
if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) {
|
||||
u32 val = I915_READ(GEN8_L3SQCREG1);
|
||||
val &= ~L3_PRIO_CREDITS_MASK;
|
||||
val |= L3_GENERAL_PRIO_CREDITS(62) | L3_HIGH_PRIO_CREDITS(2);
|
||||
I915_WRITE(GEN8_L3SQCREG1, val);
|
||||
}
|
||||
|
||||
/* WaToEnableHwFixForPushConstHWBug:bxt */
|
||||
if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
|
||||
|
|
|
@ -8245,14 +8245,17 @@ static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
|
|||
int high_prio_credits)
|
||||
{
|
||||
u32 misccpctl;
|
||||
u32 val;
|
||||
|
||||
/* WaTempDisableDOPClkGating:bdw */
|
||||
misccpctl = I915_READ(GEN7_MISCCPCTL);
|
||||
I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
|
||||
|
||||
I915_WRITE(GEN8_L3SQCREG1,
|
||||
L3_GENERAL_PRIO_CREDITS(general_prio_credits) |
|
||||
L3_HIGH_PRIO_CREDITS(high_prio_credits));
|
||||
val = I915_READ(GEN8_L3SQCREG1);
|
||||
val &= ~L3_PRIO_CREDITS_MASK;
|
||||
val |= L3_GENERAL_PRIO_CREDITS(general_prio_credits);
|
||||
val |= L3_HIGH_PRIO_CREDITS(high_prio_credits);
|
||||
I915_WRITE(GEN8_L3SQCREG1, val);
|
||||
|
||||
/*
|
||||
* Wait at least 100 clocks before re-enabling clock gating.
|
||||
|
|
Загрузка…
Ссылка в новой задаче