Merge tag 'drm-intel-next-2019-03-20' of git://anongit.freedesktop.org/drm/drm-intel into drm-next

UAPI Changes:
- Report an error early instead of SIGBUS later when mmap beyond BO size

Core Changes:
- This includes backmerge of drm-next and two merges of Maarten's
  topic/hdr-formats

Driver Changes:
- Add Comet Lake (Gen9) PCI IDs to Coffee Lake ID list (Anusha)
- Add missing ICL PCI ID (Jose)
- Fix legacy gamma mode for ICL (Ville)
- Assume eDP is present on port A when there is no VBT (Thomas)
- Corrections to eDP training patterns (Jose)
- Fix PSR2 selective update corruption after PSR1 setup (Jose)
- Fix CRC mismatch error for DP link layer compliance (Aditya)
- Fix CNL DPLL readout and clean up code (Ville)
- Turn off the CUS when turning off a HDR plane (Ville)
- Avoid a race with execlist tasklet during race (Chris)
- Add missing CSC readout and clean up code (Ville)
- Avoid unnecessary wakeref during debugfs/drop_caches/set (Chris, Caz)
- Hold references to ring/HW context/context explicitly when used (Chris)

- Assume next platforms inherit old platform (Rodrigo)
- Use HWS indices rather than addresses for breadcrumbs (Chris)
- Add REG_BIT/REG_GENMASK and REG_FIELD_PREP macros (Jani)
- Convert crept in C99 types to kernel fixed size types (Jani)
- Avoid passing full dev_priv in forcewake functions (Daniele)
- Reset GuC on GPU reset (Sujaritha)
- Rework MG and Combo PLLs to vfuncs (Lucas)
- Explicitly track ppGTT size (Chris, Bob)
- Coding style improvements and code modularization (Ville)
- Selftest and debugging improvements (Chris)

Signed-off-by: Dave Airlie <airlied@redhat.com>

# Conflicts:
#	drivers/gpu/drm/i915/intel_hdmi.c
From: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190325124925.GA12726@jlahtine-desk.ger.corp.intel.com
This commit is contained in:
Dave Airlie 2019-03-26 06:15:27 +10:00
Родитель 0bec6219e5 1284ec9855
Коммит f144e67b0e
151 изменённых файлов: 10735 добавлений и 5303 удалений

Просмотреть файл

@ -56,6 +56,15 @@ i915-$(CONFIG_COMPAT) += i915_ioc32.o
i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o intel_pipe_crc.o
i915-$(CONFIG_PERF_EVENTS) += i915_pmu.o
# Test the headers are compilable as standalone units
i915-$(CONFIG_DRM_I915_WERROR) += \
test_i915_active_types_standalone.o \
test_i915_gem_context_types_standalone.o \
test_i915_timeline_types_standalone.o \
test_intel_context_types_standalone.o \
test_intel_engine_types_standalone.o \
test_intel_workarounds_types_standalone.o
# GEM code
i915-y += \
i915_active.o \
@ -77,6 +86,7 @@ i915-y += \
i915_gem_tiling.o \
i915_gem_userptr.o \
i915_gemfs.o \
i915_globals.o \
i915_query.o \
i915_request.o \
i915_scheduler.o \
@ -84,6 +94,7 @@ i915-y += \
i915_trace_points.o \
i915_vma.o \
intel_breadcrumbs.o \
intel_context.o \
intel_engine_cs.o \
intel_hangcheck.o \
intel_lrc.o \

Просмотреть файл

@ -391,12 +391,12 @@ struct cmd_info {
#define F_POST_HANDLE (1<<2)
u32 flag;
#define R_RCS (1 << RCS)
#define R_VCS1 (1 << VCS)
#define R_VCS2 (1 << VCS2)
#define R_RCS BIT(RCS0)
#define R_VCS1 BIT(VCS0)
#define R_VCS2 BIT(VCS1)
#define R_VCS (R_VCS1 | R_VCS2)
#define R_BCS (1 << BCS)
#define R_VECS (1 << VECS)
#define R_BCS BIT(BCS0)
#define R_VECS BIT(VECS0)
#define R_ALL (R_RCS | R_VCS | R_BCS | R_VECS)
/* rings that support this cmd: BLT/RCS/VCS/VECS */
u16 rings;
@ -558,7 +558,7 @@ static const struct decode_info decode_info_vebox = {
};
static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
[RCS] = {
[RCS0] = {
&decode_info_mi,
NULL,
NULL,
@ -569,7 +569,7 @@ static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
NULL,
},
[VCS] = {
[VCS0] = {
&decode_info_mi,
NULL,
NULL,
@ -580,7 +580,7 @@ static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
NULL,
},
[BCS] = {
[BCS0] = {
&decode_info_mi,
NULL,
&decode_info_2d,
@ -591,7 +591,7 @@ static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
NULL,
},
[VECS] = {
[VECS0] = {
&decode_info_mi,
NULL,
NULL,
@ -602,7 +602,7 @@ static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
NULL,
},
[VCS2] = {
[VCS1] = {
&decode_info_mi,
NULL,
NULL,
@ -631,8 +631,7 @@ static inline const struct cmd_info *find_cmd_entry(struct intel_gvt *gvt,
struct cmd_entry *e;
hash_for_each_possible(gvt->cmd_table, e, hlist, opcode) {
if ((opcode == e->info->opcode) &&
(e->info->rings & (1 << ring_id)))
if (opcode == e->info->opcode && e->info->rings & BIT(ring_id))
return e->info;
}
return NULL;
@ -943,15 +942,12 @@ static int cmd_handler_lri(struct parser_exec_state *s)
struct intel_gvt *gvt = s->vgpu->gvt;
for (i = 1; i < cmd_len; i += 2) {
if (IS_BROADWELL(gvt->dev_priv) &&
(s->ring_id != RCS)) {
if (s->ring_id == BCS &&
cmd_reg(s, i) ==
i915_mmio_reg_offset(DERRMR))
if (IS_BROADWELL(gvt->dev_priv) && s->ring_id != RCS0) {
if (s->ring_id == BCS0 &&
cmd_reg(s, i) == i915_mmio_reg_offset(DERRMR))
ret |= 0;
else
ret |= (cmd_reg_inhibit(s, i)) ?
-EBADRQC : 0;
ret |= cmd_reg_inhibit(s, i) ? -EBADRQC : 0;
}
if (ret)
break;
@ -1047,27 +1043,27 @@ struct cmd_interrupt_event {
};
static struct cmd_interrupt_event cmd_interrupt_events[] = {
[RCS] = {
[RCS0] = {
.pipe_control_notify = RCS_PIPE_CONTROL,
.mi_flush_dw = INTEL_GVT_EVENT_RESERVED,
.mi_user_interrupt = RCS_MI_USER_INTERRUPT,
},
[BCS] = {
[BCS0] = {
.pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
.mi_flush_dw = BCS_MI_FLUSH_DW,
.mi_user_interrupt = BCS_MI_USER_INTERRUPT,
},
[VCS] = {
[VCS0] = {
.pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
.mi_flush_dw = VCS_MI_FLUSH_DW,
.mi_user_interrupt = VCS_MI_USER_INTERRUPT,
},
[VCS2] = {
[VCS1] = {
.pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
.mi_flush_dw = VCS2_MI_FLUSH_DW,
.mi_user_interrupt = VCS2_MI_USER_INTERRUPT,
},
[VECS] = {
[VECS0] = {
.pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
.mi_flush_dw = VECS_MI_FLUSH_DW,
.mi_user_interrupt = VECS_MI_USER_INTERRUPT,

Просмотреть файл

@ -153,7 +153,7 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj;
obj = i915_gem_object_alloc(dev_priv);
obj = i915_gem_object_alloc();
if (obj == NULL)
return NULL;

Просмотреть файл

@ -47,17 +47,16 @@
((a)->lrca == (b)->lrca))
static int context_switch_events[] = {
[RCS] = RCS_AS_CONTEXT_SWITCH,
[BCS] = BCS_AS_CONTEXT_SWITCH,
[VCS] = VCS_AS_CONTEXT_SWITCH,
[VCS2] = VCS2_AS_CONTEXT_SWITCH,
[VECS] = VECS_AS_CONTEXT_SWITCH,
[RCS0] = RCS_AS_CONTEXT_SWITCH,
[BCS0] = BCS_AS_CONTEXT_SWITCH,
[VCS0] = VCS_AS_CONTEXT_SWITCH,
[VCS1] = VCS2_AS_CONTEXT_SWITCH,
[VECS0] = VECS_AS_CONTEXT_SWITCH,
};
static int ring_id_to_context_switch_event(int ring_id)
static int ring_id_to_context_switch_event(unsigned int ring_id)
{
if (WARN_ON(ring_id < RCS ||
ring_id >= ARRAY_SIZE(context_switch_events)))
if (WARN_ON(ring_id >= ARRAY_SIZE(context_switch_events)))
return -EINVAL;
return context_switch_events[ring_id];
@ -411,7 +410,7 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload)
gvt_dbg_el("complete workload %p status %d\n", workload,
workload->status);
if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id)))
if (workload->status || (vgpu->resetting_eng & BIT(ring_id)))
goto out;
if (!list_empty(workload_q_head(vgpu, ring_id))) {

Просмотреть файл

@ -323,25 +323,25 @@ static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
} else {
if (data & GEN6_GRDOM_RENDER) {
gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id);
engine_mask |= (1 << RCS);
engine_mask |= BIT(RCS0);
}
if (data & GEN6_GRDOM_MEDIA) {
gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id);
engine_mask |= (1 << VCS);
engine_mask |= BIT(VCS0);
}
if (data & GEN6_GRDOM_BLT) {
gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id);
engine_mask |= (1 << BCS);
engine_mask |= BIT(BCS0);
}
if (data & GEN6_GRDOM_VECS) {
gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id);
engine_mask |= (1 << VECS);
engine_mask |= BIT(VECS0);
}
if (data & GEN8_GRDOM_MEDIA2) {
gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id);
if (HAS_BSD2(vgpu->gvt->dev_priv))
engine_mask |= (1 << VCS2);
engine_mask |= BIT(VCS1);
}
engine_mask &= INTEL_INFO(vgpu->gvt->dev_priv)->engine_mask;
}
/* vgpu_lock already hold by emulate mmio r/w */
@ -1704,7 +1704,7 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
return 0;
ret = intel_vgpu_select_submission_ops(vgpu,
ENGINE_MASK(ring_id),
BIT(ring_id),
INTEL_VGPU_EXECLIST_SUBMISSION);
if (ret)
return ret;
@ -1724,19 +1724,19 @@ static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu,
switch (offset) {
case 0x4260:
id = RCS;
id = RCS0;
break;
case 0x4264:
id = VCS;
id = VCS0;
break;
case 0x4268:
id = VCS2;
id = VCS1;
break;
case 0x426c:
id = BCS;
id = BCS0;
break;
case 0x4270:
id = VECS;
id = VECS0;
break;
default:
return -EINVAL;
@ -1793,7 +1793,7 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
MMIO_F(prefix(BLT_RING_BASE), s, f, am, rm, d, r, w); \
MMIO_F(prefix(GEN6_BSD_RING_BASE), s, f, am, rm, d, r, w); \
MMIO_F(prefix(VEBOX_RING_BASE), s, f, am, rm, d, r, w); \
if (HAS_BSD2(dev_priv)) \
if (HAS_ENGINE(dev_priv, VCS1)) \
MMIO_F(prefix(GEN8_BSD2_RING_BASE), s, f, am, rm, d, r, w); \
} while (0)

Просмотреть файл

@ -536,7 +536,7 @@ static void gen8_init_irq(
SET_BIT_INFO(irq, 4, VCS_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT1);
SET_BIT_INFO(irq, 8, VCS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT1);
if (HAS_BSD2(gvt->dev_priv)) {
if (HAS_ENGINE(gvt->dev_priv, VCS1)) {
SET_BIT_INFO(irq, 16, VCS2_MI_USER_INTERRUPT,
INTEL_GVT_IRQ_INFO_GT1);
SET_BIT_INFO(irq, 20, VCS2_MI_FLUSH_DW,

Просмотреть файл

@ -41,102 +41,102 @@
/* Raw offset is appened to each line for convenience. */
static struct engine_mmio gen8_engine_mmio_list[] __cacheline_aligned = {
{RCS, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */
{RCS, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
{RCS, HWSTAM, 0x0, false}, /* 0x2098 */
{RCS, INSTPM, 0xffff, true}, /* 0x20c0 */
{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */
{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */
{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */
{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */
{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */
{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */
{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */
{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */
{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */
{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */
{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */
{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */
{RCS, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */
{RCS, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */
{RCS, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */
{RCS, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */
{RCS, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */
{RCS, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */
{RCS0, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */
{RCS0, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
{RCS0, HWSTAM, 0x0, false}, /* 0x2098 */
{RCS0, INSTPM, 0xffff, true}, /* 0x20c0 */
{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */
{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */
{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */
{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */
{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */
{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */
{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */
{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */
{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */
{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */
{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */
{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */
{RCS0, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */
{RCS0, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */
{RCS0, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */
{RCS0, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */
{RCS0, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */
{RCS0, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */
{BCS, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */
{BCS, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */
{BCS, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */
{BCS, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */
{BCS, RING_EXCC(BLT_RING_BASE), 0x0, false}, /* 0x22028 */
{RCS, INVALID_MMIO_REG, 0, false } /* Terminated */
{BCS0, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */
{BCS0, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */
{BCS0, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */
{BCS0, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */
{BCS0, RING_EXCC(BLT_RING_BASE), 0x0, false}, /* 0x22028 */
{RCS0, INVALID_MMIO_REG, 0, false } /* Terminated */
};
static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
{RCS, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */
{RCS, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
{RCS, HWSTAM, 0x0, false}, /* 0x2098 */
{RCS, INSTPM, 0xffff, true}, /* 0x20c0 */
{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */
{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */
{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */
{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */
{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */
{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */
{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */
{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */
{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */
{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */
{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */
{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */
{RCS, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */
{RCS, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */
{RCS, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */
{RCS, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */
{RCS, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */
{RCS, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */
{RCS0, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */
{RCS0, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
{RCS0, HWSTAM, 0x0, false}, /* 0x2098 */
{RCS0, INSTPM, 0xffff, true}, /* 0x20c0 */
{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */
{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */
{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */
{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */
{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */
{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */
{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */
{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */
{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */
{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */
{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */
{RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */
{RCS0, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */
{RCS0, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */
{RCS0, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */
{RCS0, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */
{RCS0, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */
{RCS0, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */
{RCS, GEN8_PRIVATE_PAT_LO, 0, false}, /* 0x40e0 */
{RCS, GEN8_PRIVATE_PAT_HI, 0, false}, /* 0x40e4 */
{RCS, GEN8_CS_CHICKEN1, 0xffff, true}, /* 0x2580 */
{RCS, COMMON_SLICE_CHICKEN2, 0xffff, true}, /* 0x7014 */
{RCS, GEN9_CS_DEBUG_MODE1, 0xffff, false}, /* 0x20ec */
{RCS, GEN8_L3SQCREG4, 0, false}, /* 0xb118 */
{RCS, GEN7_HALF_SLICE_CHICKEN1, 0xffff, true}, /* 0xe100 */
{RCS, HALF_SLICE_CHICKEN2, 0xffff, true}, /* 0xe180 */
{RCS, HALF_SLICE_CHICKEN3, 0xffff, true}, /* 0xe184 */
{RCS, GEN9_HALF_SLICE_CHICKEN5, 0xffff, true}, /* 0xe188 */
{RCS, GEN9_HALF_SLICE_CHICKEN7, 0xffff, true}, /* 0xe194 */
{RCS, GEN8_ROW_CHICKEN, 0xffff, true}, /* 0xe4f0 */
{RCS, TRVATTL3PTRDW(0), 0, false}, /* 0x4de0 */
{RCS, TRVATTL3PTRDW(1), 0, false}, /* 0x4de4 */
{RCS, TRNULLDETCT, 0, false}, /* 0x4de8 */
{RCS, TRINVTILEDETCT, 0, false}, /* 0x4dec */
{RCS, TRVADR, 0, false}, /* 0x4df0 */
{RCS, TRTTE, 0, false}, /* 0x4df4 */
{RCS0, GEN8_PRIVATE_PAT_LO, 0, false}, /* 0x40e0 */
{RCS0, GEN8_PRIVATE_PAT_HI, 0, false}, /* 0x40e4 */
{RCS0, GEN8_CS_CHICKEN1, 0xffff, true}, /* 0x2580 */
{RCS0, COMMON_SLICE_CHICKEN2, 0xffff, true}, /* 0x7014 */
{RCS0, GEN9_CS_DEBUG_MODE1, 0xffff, false}, /* 0x20ec */
{RCS0, GEN8_L3SQCREG4, 0, false}, /* 0xb118 */
{RCS0, GEN7_HALF_SLICE_CHICKEN1, 0xffff, true}, /* 0xe100 */
{RCS0, HALF_SLICE_CHICKEN2, 0xffff, true}, /* 0xe180 */
{RCS0, HALF_SLICE_CHICKEN3, 0xffff, true}, /* 0xe184 */
{RCS0, GEN9_HALF_SLICE_CHICKEN5, 0xffff, true}, /* 0xe188 */
{RCS0, GEN9_HALF_SLICE_CHICKEN7, 0xffff, true}, /* 0xe194 */
{RCS0, GEN8_ROW_CHICKEN, 0xffff, true}, /* 0xe4f0 */
{RCS0, TRVATTL3PTRDW(0), 0, false}, /* 0x4de0 */
{RCS0, TRVATTL3PTRDW(1), 0, false}, /* 0x4de4 */
{RCS0, TRNULLDETCT, 0, false}, /* 0x4de8 */
{RCS0, TRINVTILEDETCT, 0, false}, /* 0x4dec */
{RCS0, TRVADR, 0, false}, /* 0x4df0 */
{RCS0, TRTTE, 0, false}, /* 0x4df4 */
{BCS, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */
{BCS, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */
{BCS, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */
{BCS, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */
{BCS, RING_EXCC(BLT_RING_BASE), 0x0, false}, /* 0x22028 */
{BCS0, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */
{BCS0, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */
{BCS0, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */
{BCS0, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */
{BCS0, RING_EXCC(BLT_RING_BASE), 0x0, false}, /* 0x22028 */
{VCS2, RING_EXCC(GEN8_BSD2_RING_BASE), 0xffff, false}, /* 0x1c028 */
{VCS1, RING_EXCC(GEN8_BSD2_RING_BASE), 0xffff, false}, /* 0x1c028 */
{VECS, RING_EXCC(VEBOX_RING_BASE), 0xffff, false}, /* 0x1a028 */
{VECS0, RING_EXCC(VEBOX_RING_BASE), 0xffff, false}, /* 0x1a028 */
{RCS, GEN8_HDC_CHICKEN1, 0xffff, true}, /* 0x7304 */
{RCS, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
{RCS, GEN7_UCGCTL4, 0x0, false}, /* 0x940c */
{RCS, GAMT_CHKN_BIT_REG, 0x0, false}, /* 0x4ab8 */
{RCS0, GEN8_HDC_CHICKEN1, 0xffff, true}, /* 0x7304 */
{RCS0, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
{RCS0, GEN7_UCGCTL4, 0x0, false}, /* 0x940c */
{RCS0, GAMT_CHKN_BIT_REG, 0x0, false}, /* 0x4ab8 */
{RCS, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */
{RCS, GEN9_CSFE_CHICKEN1_RCS, 0xffff, false}, /* 0x20d4 */
{RCS0, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */
{RCS0, GEN9_CSFE_CHICKEN1_RCS, 0xffff, false}, /* 0x20d4 */
{RCS, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */
{RCS, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */
{RCS, FF_SLICE_CS_CHICKEN2, 0xffff, false}, /* 0x20e4 */
{RCS, INVALID_MMIO_REG, 0, false } /* Terminated */
{RCS0, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */
{RCS0, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */
{RCS0, FF_SLICE_CS_CHICKEN2, 0xffff, false}, /* 0x20e4 */
{RCS0, INVALID_MMIO_REG, 0, false } /* Terminated */
};
static struct {
@ -149,11 +149,11 @@ static void load_render_mocs(struct drm_i915_private *dev_priv)
{
i915_reg_t offset;
u32 regs[] = {
[RCS] = 0xc800,
[VCS] = 0xc900,
[VCS2] = 0xca00,
[BCS] = 0xcc00,
[VECS] = 0xcb00,
[RCS0] = 0xc800,
[VCS0] = 0xc900,
[VCS1] = 0xca00,
[BCS0] = 0xcc00,
[VECS0] = 0xcb00,
};
int ring_id, i;
@ -301,7 +301,7 @@ int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu,
goto out;
/* no MOCS register in context except render engine */
if (req->engine->id != RCS)
if (req->engine->id != RCS0)
goto out;
ret = restore_render_mocs_control_for_inhibit(vgpu, req);
@ -331,11 +331,11 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
enum forcewake_domains fw;
i915_reg_t reg;
u32 regs[] = {
[RCS] = 0x4260,
[VCS] = 0x4264,
[VCS2] = 0x4268,
[BCS] = 0x426c,
[VECS] = 0x4270,
[RCS0] = 0x4260,
[VCS0] = 0x4264,
[VCS1] = 0x4268,
[BCS0] = 0x426c,
[VECS0] = 0x4270,
};
if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
@ -353,7 +353,7 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
*/
fw = intel_uncore_forcewake_for_reg(dev_priv, reg,
FW_REG_READ | FW_REG_WRITE);
if (ring_id == RCS && (INTEL_GEN(dev_priv) >= 9))
if (ring_id == RCS0 && INTEL_GEN(dev_priv) >= 9)
fw |= FORCEWAKE_RENDER;
intel_uncore_forcewake_get(dev_priv, fw);
@ -378,11 +378,11 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
u32 old_v, new_v;
u32 regs[] = {
[RCS] = 0xc800,
[VCS] = 0xc900,
[VCS2] = 0xca00,
[BCS] = 0xcc00,
[VECS] = 0xcb00,
[RCS0] = 0xc800,
[VCS0] = 0xc900,
[VCS1] = 0xca00,
[BCS0] = 0xcc00,
[VECS0] = 0xcb00,
};
int i;
@ -390,8 +390,10 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
return;
if ((IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv)
|| IS_COFFEELAKE(dev_priv)) && ring_id == RCS)
if (ring_id == RCS0 &&
(IS_KABYLAKE(dev_priv) ||
IS_BROXTON(dev_priv) ||
IS_COFFEELAKE(dev_priv)))
return;
if (!pre && !gen9_render_mocs.initialized)
@ -414,7 +416,7 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
offset.reg += 4;
}
if (ring_id == RCS) {
if (ring_id == RCS0) {
l3_offset.reg = 0xb020;
for (i = 0; i < GEN9_MOCS_SIZE / 2; i++) {
if (pre)
@ -492,7 +494,8 @@ static void switch_mmio(struct intel_vgpu *pre,
* itself.
*/
if (mmio->in_context &&
!is_inhibit_context(&s->shadow_ctx->__engine[ring_id]))
!is_inhibit_context(intel_context_lookup(s->shadow_ctx,
dev_priv->engine[ring_id])))
continue;
if (mmio->mask)

Просмотреть файл

@ -93,7 +93,7 @@ static void sr_oa_regs(struct intel_vgpu_workload *workload,
i915_mmio_reg_offset(EU_PERF_CNTL6),
};
if (workload->ring_id != RCS)
if (workload->ring_id != RCS0)
return;
if (save) {
@ -149,7 +149,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
COPY_REG_MASKED(ctx_ctrl);
COPY_REG(ctx_timestamp);
if (ring_id == RCS) {
if (ring_id == RCS0) {
COPY_REG(bb_per_ctx_ptr);
COPY_REG(rcs_indirect_ctx);
COPY_REG(rcs_indirect_ctx_offset);
@ -177,7 +177,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
context_page_num = context_page_num >> PAGE_SHIFT;
if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS0)
context_page_num = 19;
i = 2;
@ -440,8 +440,7 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
if (ret)
goto err_unpin;
if ((workload->ring_id == RCS) &&
(workload->wa_ctx.indirect_ctx.size != 0)) {
if (workload->ring_id == RCS0 && workload->wa_ctx.indirect_ctx.size) {
ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
if (ret)
goto err_shadow;
@ -791,7 +790,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
context_page_num = rq->engine->context_size;
context_page_num = context_page_num >> PAGE_SHIFT;
if (IS_BROADWELL(gvt->dev_priv) && rq->engine->id == RCS)
if (IS_BROADWELL(gvt->dev_priv) && rq->engine->id == RCS0)
context_page_num = 19;
i = 2;
@ -891,8 +890,8 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
workload->status = 0;
}
if (!workload->status && !(vgpu->resetting_eng &
ENGINE_MASK(ring_id))) {
if (!workload->status &&
!(vgpu->resetting_eng & BIT(ring_id))) {
update_guest_context(workload);
for_each_set_bit(event, workload->pending_events,
@ -915,7 +914,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
list_del_init(&workload->list);
if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) {
if (workload->status || vgpu->resetting_eng & BIT(ring_id)) {
/* if workload->status is not successful means HW GPU
* has occurred GPU hang or something wrong with i915/GVT,
* and GVT won't inject context switch interrupt to guest.
@ -929,7 +928,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
* cleaned up during the resetting process later, so doing
* the workload clean up here doesn't have any impact.
**/
intel_vgpu_clean_workloads(vgpu, ENGINE_MASK(ring_id));
intel_vgpu_clean_workloads(vgpu, BIT(ring_id));
}
workload->complete(workload);
@ -1102,9 +1101,9 @@ i915_context_ppgtt_root_restore(struct intel_vgpu_submission *s)
struct i915_hw_ppgtt *i915_ppgtt = s->shadow_ctx->ppgtt;
int i;
if (i915_vm_is_48bit(&i915_ppgtt->vm))
if (i915_vm_is_4lvl(&i915_ppgtt->vm)) {
px_dma(&i915_ppgtt->pml4) = s->i915_context_pml4;
else {
} else {
for (i = 0; i < GEN8_3LVL_PDPES; i++)
px_dma(i915_ppgtt->pdp.page_directory[i]) =
s->i915_context_pdps[i];
@ -1155,7 +1154,7 @@ i915_context_ppgtt_root_save(struct intel_vgpu_submission *s)
struct i915_hw_ppgtt *i915_ppgtt = s->shadow_ctx->ppgtt;
int i;
if (i915_vm_is_48bit(&i915_ppgtt->vm))
if (i915_vm_is_4lvl(&i915_ppgtt->vm))
s->i915_context_pml4 = px_dma(&i915_ppgtt->pml4);
else {
for (i = 0; i < GEN8_3LVL_PDPES; i++)
@ -1438,7 +1437,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
workload->rb_start = start;
workload->rb_ctl = ctl;
if (ring_id == RCS) {
if (ring_id == RCS0) {
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4);
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +

Просмотреть файл

@ -44,7 +44,7 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu)
vgpu_vreg_t(vgpu, vgtif_reg(display_ready)) = 0;
vgpu_vreg_t(vgpu, vgtif_reg(vgt_id)) = vgpu->id;
vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_48BIT_PPGTT;
vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_PPGTT;
vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HWSP_EMULATION;
vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HUGE_GTT;

Просмотреть файл

@ -6,6 +6,7 @@
#include "i915_drv.h"
#include "i915_active.h"
#include "i915_globals.h"
#define BKL(ref) (&(ref)->i915->drm.struct_mutex)
@ -17,6 +18,7 @@
* nodes from a local slab cache to hopefully reduce the fragmentation.
*/
static struct i915_global_active {
struct i915_global base;
struct kmem_cache *slab_cache;
} global;
@ -285,16 +287,27 @@ void i915_active_retire_noop(struct i915_active_request *active,
#include "selftests/i915_active.c"
#endif
static void i915_global_active_shrink(void)
{
kmem_cache_shrink(global.slab_cache);
}
static void i915_global_active_exit(void)
{
kmem_cache_destroy(global.slab_cache);
}
static struct i915_global_active global = { {
.shrink = i915_global_active_shrink,
.exit = i915_global_active_exit,
} };
int __init i915_global_active_init(void)
{
global.slab_cache = KMEM_CACHE(active_node, SLAB_HWCACHE_ALIGN);
if (!global.slab_cache)
return -ENOMEM;
i915_global_register(&global.base);
return 0;
}
void __exit i915_global_active_exit(void)
{
kmem_cache_destroy(global.slab_cache);
}

Просмотреть файл

@ -108,19 +108,6 @@ i915_active_request_set_retire_fn(struct i915_active_request *active,
active->retire = fn ?: i915_active_retire_noop;
}
static inline struct i915_request *
__i915_active_request_peek(const struct i915_active_request *active)
{
/*
* Inside the error capture (running with the driver in an unknown
* state), we want to bend the rules slightly (a lot).
*
* Work is in progress to make it safer, in the meantime this keeps
* the known issue from spamming the logs.
*/
return rcu_dereference_protected(active->request, 1);
}
/**
* i915_active_request_raw - return the active request
* @active - the active tracker
@ -419,7 +406,4 @@ void i915_active_fini(struct i915_active *ref);
static inline void i915_active_fini(struct i915_active *ref) { }
#endif
int i915_global_active_init(void);
void i915_global_active_exit(void);
#endif /* _I915_ACTIVE_H_ */

Просмотреть файл

@ -868,8 +868,8 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
if (!IS_GEN(engine->i915, 7))
return;
switch (engine->id) {
case RCS:
switch (engine->class) {
case RENDER_CLASS:
if (IS_HASWELL(engine->i915)) {
cmd_tables = hsw_render_ring_cmds;
cmd_table_count =
@ -889,12 +889,12 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
engine->get_cmd_length_mask = gen7_render_get_cmd_length_mask;
break;
case VCS:
case VIDEO_DECODE_CLASS:
cmd_tables = gen7_video_cmds;
cmd_table_count = ARRAY_SIZE(gen7_video_cmds);
engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
break;
case BCS:
case COPY_ENGINE_CLASS:
if (IS_HASWELL(engine->i915)) {
cmd_tables = hsw_blt_ring_cmds;
cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds);
@ -913,14 +913,14 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
engine->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
break;
case VECS:
case VIDEO_ENHANCEMENT_CLASS:
cmd_tables = hsw_vebox_cmds;
cmd_table_count = ARRAY_SIZE(hsw_vebox_cmds);
/* VECS can use the same length_mask function as VCS */
engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
break;
default:
MISSING_CASE(engine->id);
MISSING_CASE(engine->class);
return;
}

Просмотреть файл

@ -388,12 +388,9 @@ static void print_context_stats(struct seq_file *m,
struct i915_gem_context *ctx;
list_for_each_entry(ctx, &i915->contexts.list, link) {
struct intel_engine_cs *engine;
enum intel_engine_id id;
for_each_engine(engine, i915, id) {
struct intel_context *ce = to_intel_context(ctx, engine);
struct intel_context *ce;
list_for_each_entry(ce, &ctx->active_engines, active_link) {
if (ce->state)
per_file_stats(0, ce->state->obj, &kstats);
if (ce->ring)
@ -1281,14 +1278,11 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
intel_wakeref_t wakeref;
enum intel_engine_id id;
seq_printf(m, "Reset flags: %lx\n", dev_priv->gpu_error.flags);
if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
seq_puts(m, "Wedged\n");
seq_puts(m, "\tWedged\n");
if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags))
seq_puts(m, "Reset in progress: struct_mutex backoff\n");
if (waitqueue_active(&dev_priv->gpu_error.wait_queue))
seq_puts(m, "Waiter holding struct mutex\n");
if (waitqueue_active(&dev_priv->gpu_error.reset_queue))
seq_puts(m, "struct_mutex blocked for reset\n");
seq_puts(m, "\tDevice (global) reset in progress\n");
if (!i915_modparams.enable_hangcheck) {
seq_puts(m, "Hangcheck disabled\n");
@ -1298,10 +1292,10 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
with_intel_runtime_pm(dev_priv, wakeref) {
for_each_engine(engine, dev_priv, id) {
acthd[id] = intel_engine_get_active_head(engine);
seqno[id] = intel_engine_get_seqno(engine);
seqno[id] = intel_engine_get_hangcheck_seqno(engine);
}
intel_engine_get_instdone(dev_priv->engine[RCS], &instdone);
intel_engine_get_instdone(dev_priv->engine[RCS0], &instdone);
}
if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer))
@ -1318,8 +1312,9 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
for_each_engine(engine, dev_priv, id) {
seq_printf(m, "%s:\n", engine->name);
seq_printf(m, "\tseqno = %x [current %x, last %x], %dms ago\n",
engine->hangcheck.seqno, seqno[id],
intel_engine_last_submit(engine),
engine->hangcheck.last_seqno,
seqno[id],
engine->hangcheck.next_seqno,
jiffies_to_msecs(jiffies -
engine->hangcheck.action_timestamp));
@ -1327,7 +1322,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
(long long)engine->hangcheck.acthd,
(long long)acthd[id]);
if (engine->id == RCS) {
if (engine->id == RCS0) {
seq_puts(m, "\tinstdone read =\n");
i915_instdone_info(dev_priv, m, &instdone);
@ -1882,9 +1877,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct drm_device *dev = &dev_priv->drm;
struct intel_engine_cs *engine;
struct i915_gem_context *ctx;
enum intel_engine_id id;
int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
@ -1892,6 +1885,8 @@ static int i915_context_status(struct seq_file *m, void *unused)
return ret;
list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
struct intel_context *ce;
seq_puts(m, "HW context ");
if (!list_empty(&ctx->hw_id_link))
seq_printf(m, "%x [pin %u]", ctx->hw_id,
@ -1914,11 +1909,8 @@ static int i915_context_status(struct seq_file *m, void *unused)
seq_putc(m, ctx->remap_slice ? 'R' : 'r');
seq_putc(m, '\n');
for_each_engine(engine, dev_priv, id) {
struct intel_context *ce =
to_intel_context(ctx, engine);
seq_printf(m, "%s: ", engine->name);
list_for_each_entry(ce, &ctx->active_engines, active_link) {
seq_printf(m, "%s: ", ce->engine->name);
if (ce->state)
describe_obj(m, ce->state->obj);
if (ce->ring)
@ -2023,11 +2015,9 @@ static const char *rps_power_to_str(unsigned int power)
static int i915_rps_boost_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct drm_device *dev = &dev_priv->drm;
struct intel_rps *rps = &dev_priv->gt_pm.rps;
u32 act_freq = rps->cur_freq;
intel_wakeref_t wakeref;
struct drm_file *file;
with_intel_runtime_pm_if_in_use(dev_priv, wakeref) {
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
@ -2061,22 +2051,7 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
intel_gpu_freq(dev_priv, rps->efficient_freq),
intel_gpu_freq(dev_priv, rps->boost_freq));
mutex_lock(&dev->filelist_mutex);
list_for_each_entry_reverse(file, &dev->filelist, lhead) {
struct drm_i915_file_private *file_priv = file->driver_priv;
struct task_struct *task;
rcu_read_lock();
task = pid_task(file->pid, PIDTYPE_PID);
seq_printf(m, "%s [%d]: %d boosts\n",
task ? task->comm : "<unknown>",
task ? task->pid : -1,
atomic_read(&file_priv->rps_client.boosts));
rcu_read_unlock();
}
seq_printf(m, "Kernel (anonymous) boosts: %d\n",
atomic_read(&rps->boosts));
mutex_unlock(&dev->filelist_mutex);
seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
if (INTEL_GEN(dev_priv) >= 6 &&
rps->enabled &&
@ -2607,7 +2582,6 @@ static int
i915_edp_psr_debug_set(void *data, u64 val)
{
struct drm_i915_private *dev_priv = data;
struct drm_modeset_acquire_ctx ctx;
intel_wakeref_t wakeref;
int ret;
@ -2618,18 +2592,7 @@ i915_edp_psr_debug_set(void *data, u64 val)
wakeref = intel_runtime_pm_get(dev_priv);
drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
retry:
ret = intel_psr_set_debugfs_mode(dev_priv, &ctx, val);
if (ret == -EDEADLK) {
ret = drm_modeset_backoff(&ctx);
if (!ret)
goto retry;
}
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
ret = intel_psr_debug_set(dev_priv, val);
intel_runtime_pm_put(dev_priv, wakeref);
@ -2686,8 +2649,7 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
seq_printf(m, "Runtime power status: %s\n",
enableddisabled(!dev_priv->power_domains.wakeref));
seq_printf(m, "GPU idle: %s (epoch %u)\n",
yesno(!dev_priv->gt.awake), dev_priv->gt.epoch);
seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake));
seq_printf(m, "IRQs disabled: %s\n",
yesno(!intel_irqs_enabled(dev_priv)));
#ifdef CONFIG_PM
@ -3123,8 +3085,7 @@ static int i915_engine_info(struct seq_file *m, void *unused)
wakeref = intel_runtime_pm_get(dev_priv);
seq_printf(m, "GT awake? %s (epoch %u)\n",
yesno(dev_priv->gt.awake), dev_priv->gt.epoch);
seq_printf(m, "GT awake? %s\n", yesno(dev_priv->gt.awake));
seq_printf(m, "Global active requests: %d\n",
dev_priv->gt.active_requests);
seq_printf(m, "CS timestamp frequency: %u kHz\n",
@ -3211,7 +3172,7 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
static int i915_wa_registers(struct seq_file *m, void *unused)
{
struct drm_i915_private *i915 = node_to_i915(m->private);
const struct i915_wa_list *wal = &i915->engine[RCS]->ctx_wa_list;
const struct i915_wa_list *wal = &i915->engine[RCS0]->ctx_wa_list;
struct i915_wa *wa;
unsigned int i;
@ -3865,11 +3826,18 @@ static const struct file_operations i915_cur_wm_latency_fops = {
static int
i915_wedged_get(void *data, u64 *val)
{
struct drm_i915_private *dev_priv = data;
int ret = i915_terminally_wedged(data);
*val = i915_terminally_wedged(&dev_priv->gpu_error);
return 0;
switch (ret) {
case -EIO:
*val = 1;
return 0;
case 0:
*val = 0;
return 0;
default:
return ret;
}
}
static int
@ -3877,16 +3845,9 @@ i915_wedged_set(void *data, u64 val)
{
struct drm_i915_private *i915 = data;
/*
* There is no safeguard against this debugfs entry colliding
* with the hangcheck calling same i915_handle_error() in
* parallel, causing an explosion. For now we assume that the
* test harness is responsible enough not to inject gpu hangs
* while it is writing to 'i915_wedged'
*/
if (i915_reset_backoff(&i915->gpu_error))
return -EAGAIN;
/* Flush any previous reset before applying for a new one */
wait_event(i915->gpu_error.reset_queue,
!test_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags));
i915_handle_error(i915, val, I915_ERROR_CAPTURE,
"Manually set wedged engine mask = %llx", val);
@ -3927,12 +3888,9 @@ static int
i915_drop_caches_set(void *data, u64 val)
{
struct drm_i915_private *i915 = data;
intel_wakeref_t wakeref;
int ret = 0;
DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
val, val & DROP_ALL);
wakeref = intel_runtime_pm_get(i915);
if (val & DROP_RESET_ACTIVE &&
wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT))
@ -3941,9 +3899,11 @@ i915_drop_caches_set(void *data, u64 val)
/* No need to check and wait for gpu resets, only libdrm auto-restarts
* on ioctls on -EAGAIN. */
if (val & (DROP_ACTIVE | DROP_RETIRE | DROP_RESET_SEQNO)) {
int ret;
ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
if (ret)
goto out;
return ret;
if (val & DROP_ACTIVE)
ret = i915_gem_wait_for_idle(i915,
@ -3957,7 +3917,7 @@ i915_drop_caches_set(void *data, u64 val)
mutex_unlock(&i915->drm.struct_mutex);
}
if (val & DROP_RESET_ACTIVE && i915_terminally_wedged(&i915->gpu_error))
if (val & DROP_RESET_ACTIVE && i915_terminally_wedged(i915))
i915_handle_error(i915, ALL_ENGINES, 0, NULL);
fs_reclaim_acquire(GFP_KERNEL);
@ -3982,10 +3942,7 @@ i915_drop_caches_set(void *data, u64 val)
if (val & DROP_FREED)
i915_gem_drain_freed_objects(i915);
out:
intel_runtime_pm_put(i915, wakeref);
return ret;
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,

Просмотреть файл

@ -188,6 +188,11 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
DRM_DEBUG_KMS("Found Cannon Lake LP PCH (CNP-LP)\n");
WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv));
return PCH_CNP;
case INTEL_PCH_CMP_DEVICE_ID_TYPE:
DRM_DEBUG_KMS("Found Comet Lake PCH (CMP)\n");
WARN_ON(!IS_COFFEELAKE(dev_priv));
/* CometPoint is CNP Compatible */
return PCH_CNP;
case INTEL_PCH_ICP_DEVICE_ID_TYPE:
DRM_DEBUG_KMS("Found Ice Lake PCH\n");
WARN_ON(!IS_ICELAKE(dev_priv));
@ -219,20 +224,20 @@ intel_virt_detect_pch(const struct drm_i915_private *dev_priv)
* make an educated guess as to which PCH is really there.
*/
if (IS_GEN(dev_priv, 5))
id = INTEL_PCH_IBX_DEVICE_ID_TYPE;
else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
id = INTEL_PCH_CPT_DEVICE_ID_TYPE;
if (IS_ICELAKE(dev_priv))
id = INTEL_PCH_ICP_DEVICE_ID_TYPE;
else if (IS_CANNONLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
id = INTEL_PCH_CNP_DEVICE_ID_TYPE;
else if (IS_KABYLAKE(dev_priv) || IS_SKYLAKE(dev_priv))
id = INTEL_PCH_SPT_DEVICE_ID_TYPE;
else if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
id = INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
id = INTEL_PCH_LPT_DEVICE_ID_TYPE;
else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
id = INTEL_PCH_SPT_DEVICE_ID_TYPE;
else if (IS_COFFEELAKE(dev_priv) || IS_CANNONLAKE(dev_priv))
id = INTEL_PCH_CNP_DEVICE_ID_TYPE;
else if (IS_ICELAKE(dev_priv))
id = INTEL_PCH_ICP_DEVICE_ID_TYPE;
else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
id = INTEL_PCH_CPT_DEVICE_ID_TYPE;
else if (IS_GEN(dev_priv, 5))
id = INTEL_PCH_IBX_DEVICE_ID_TYPE;
if (id)
DRM_DEBUG_KMS("Assuming PCH ID %04x\n", id);
@ -330,16 +335,16 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
value = dev_priv->overlay ? 1 : 0;
break;
case I915_PARAM_HAS_BSD:
value = !!dev_priv->engine[VCS];
value = !!dev_priv->engine[VCS0];
break;
case I915_PARAM_HAS_BLT:
value = !!dev_priv->engine[BCS];
value = !!dev_priv->engine[BCS0];
break;
case I915_PARAM_HAS_VEBOX:
value = !!dev_priv->engine[VECS];
value = !!dev_priv->engine[VECS0];
break;
case I915_PARAM_HAS_BSD2:
value = !!dev_priv->engine[VCS2];
value = !!dev_priv->engine[VCS1];
break;
case I915_PARAM_HAS_LLC:
value = HAS_LLC(dev_priv);
@ -348,10 +353,10 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
value = HAS_WT(dev_priv);
break;
case I915_PARAM_HAS_ALIASING_PPGTT:
value = min_t(int, INTEL_PPGTT(dev_priv), I915_GEM_PPGTT_FULL);
value = INTEL_PPGTT(dev_priv);
break;
case I915_PARAM_HAS_SEMAPHORES:
value = 0;
value = !!(dev_priv->caps.scheduler & I915_SCHEDULER_CAP_SEMAPHORES);
break;
case I915_PARAM_HAS_SECURE_BATCHES:
value = capable(CAP_SYS_ADMIN);
@ -714,8 +719,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
return 0;
cleanup_gem:
if (i915_gem_suspend(dev_priv))
DRM_ERROR("failed to idle hardware; continuing to unload!\n");
i915_gem_suspend(dev_priv);
i915_gem_fini(dev_priv);
cleanup_modeset:
intel_modeset_cleanup(dev);
@ -873,6 +877,7 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv)
mutex_init(&dev_priv->av_mutex);
mutex_init(&dev_priv->wm.wm_mutex);
mutex_init(&dev_priv->pps_mutex);
mutex_init(&dev_priv->hdcp_comp_mutex);
i915_memcpy_init_early(dev_priv);
intel_runtime_pm_init_early(dev_priv);
@ -1034,110 +1039,180 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv)
intel_gvt_sanitize_options(dev_priv);
}
static enum dram_rank skl_get_dimm_rank(u8 size, u32 rank)
{
if (size == 0)
return I915_DRAM_RANK_INVALID;
if (rank == SKL_DRAM_RANK_SINGLE)
return I915_DRAM_RANK_SINGLE;
else if (rank == SKL_DRAM_RANK_DUAL)
return I915_DRAM_RANK_DUAL;
#define DRAM_TYPE_STR(type) [INTEL_DRAM_ ## type] = #type
return I915_DRAM_RANK_INVALID;
static const char *intel_dram_type_str(enum intel_dram_type type)
{
static const char * const str[] = {
DRAM_TYPE_STR(UNKNOWN),
DRAM_TYPE_STR(DDR3),
DRAM_TYPE_STR(DDR4),
DRAM_TYPE_STR(LPDDR3),
DRAM_TYPE_STR(LPDDR4),
};
if (type >= ARRAY_SIZE(str))
type = INTEL_DRAM_UNKNOWN;
return str[type];
}
#undef DRAM_TYPE_STR
static int intel_dimm_num_devices(const struct dram_dimm_info *dimm)
{
return dimm->ranks * 64 / (dimm->width ?: 1);
}
/* Returns total GB for the whole DIMM */
static int skl_get_dimm_size(u16 val)
{
return val & SKL_DRAM_SIZE_MASK;
}
static int skl_get_dimm_width(u16 val)
{
if (skl_get_dimm_size(val) == 0)
return 0;
switch (val & SKL_DRAM_WIDTH_MASK) {
case SKL_DRAM_WIDTH_X8:
case SKL_DRAM_WIDTH_X16:
case SKL_DRAM_WIDTH_X32:
val = (val & SKL_DRAM_WIDTH_MASK) >> SKL_DRAM_WIDTH_SHIFT;
return 8 << val;
default:
MISSING_CASE(val);
return 0;
}
}
static int skl_get_dimm_ranks(u16 val)
{
if (skl_get_dimm_size(val) == 0)
return 0;
val = (val & SKL_DRAM_RANK_MASK) >> SKL_DRAM_RANK_SHIFT;
return val + 1;
}
/* Returns total GB for the whole DIMM */
static int cnl_get_dimm_size(u16 val)
{
return (val & CNL_DRAM_SIZE_MASK) / 2;
}
static int cnl_get_dimm_width(u16 val)
{
if (cnl_get_dimm_size(val) == 0)
return 0;
switch (val & CNL_DRAM_WIDTH_MASK) {
case CNL_DRAM_WIDTH_X8:
case CNL_DRAM_WIDTH_X16:
case CNL_DRAM_WIDTH_X32:
val = (val & CNL_DRAM_WIDTH_MASK) >> CNL_DRAM_WIDTH_SHIFT;
return 8 << val;
default:
MISSING_CASE(val);
return 0;
}
}
static int cnl_get_dimm_ranks(u16 val)
{
if (cnl_get_dimm_size(val) == 0)
return 0;
val = (val & CNL_DRAM_RANK_MASK) >> CNL_DRAM_RANK_SHIFT;
return val + 1;
}
static bool
skl_is_16gb_dimm(enum dram_rank rank, u8 size, u8 width)
skl_is_16gb_dimm(const struct dram_dimm_info *dimm)
{
if (rank == I915_DRAM_RANK_SINGLE && width == 8 && size == 16)
return true;
else if (rank == I915_DRAM_RANK_DUAL && width == 8 && size == 32)
return true;
else if (rank == SKL_DRAM_RANK_SINGLE && width == 16 && size == 8)
return true;
else if (rank == SKL_DRAM_RANK_DUAL && width == 16 && size == 16)
return true;
/* Convert total GB to Gb per DRAM device */
return 8 * dimm->size / (intel_dimm_num_devices(dimm) ?: 1) == 16;
}
return false;
static void
skl_dram_get_dimm_info(struct drm_i915_private *dev_priv,
struct dram_dimm_info *dimm,
int channel, char dimm_name, u16 val)
{
if (INTEL_GEN(dev_priv) >= 10) {
dimm->size = cnl_get_dimm_size(val);
dimm->width = cnl_get_dimm_width(val);
dimm->ranks = cnl_get_dimm_ranks(val);
} else {
dimm->size = skl_get_dimm_size(val);
dimm->width = skl_get_dimm_width(val);
dimm->ranks = skl_get_dimm_ranks(val);
}
DRM_DEBUG_KMS("CH%u DIMM %c size: %u GB, width: X%u, ranks: %u, 16Gb DIMMs: %s\n",
channel, dimm_name, dimm->size, dimm->width, dimm->ranks,
yesno(skl_is_16gb_dimm(dimm)));
}
static int
skl_dram_get_channel_info(struct dram_channel_info *ch, u32 val)
skl_dram_get_channel_info(struct drm_i915_private *dev_priv,
struct dram_channel_info *ch,
int channel, u32 val)
{
u32 tmp_l, tmp_s;
u32 s_val = val >> SKL_DRAM_S_SHIFT;
skl_dram_get_dimm_info(dev_priv, &ch->dimm_l,
channel, 'L', val & 0xffff);
skl_dram_get_dimm_info(dev_priv, &ch->dimm_s,
channel, 'S', val >> 16);
if (!val)
if (ch->dimm_l.size == 0 && ch->dimm_s.size == 0) {
DRM_DEBUG_KMS("CH%u not populated\n", channel);
return -EINVAL;
}
tmp_l = val & SKL_DRAM_SIZE_MASK;
tmp_s = s_val & SKL_DRAM_SIZE_MASK;
if (tmp_l == 0 && tmp_s == 0)
return -EINVAL;
ch->l_info.size = tmp_l;
ch->s_info.size = tmp_s;
tmp_l = (val & SKL_DRAM_WIDTH_MASK) >> SKL_DRAM_WIDTH_SHIFT;
tmp_s = (s_val & SKL_DRAM_WIDTH_MASK) >> SKL_DRAM_WIDTH_SHIFT;
ch->l_info.width = (1 << tmp_l) * 8;
ch->s_info.width = (1 << tmp_s) * 8;
tmp_l = val & SKL_DRAM_RANK_MASK;
tmp_s = s_val & SKL_DRAM_RANK_MASK;
ch->l_info.rank = skl_get_dimm_rank(ch->l_info.size, tmp_l);
ch->s_info.rank = skl_get_dimm_rank(ch->s_info.size, tmp_s);
if (ch->l_info.rank == I915_DRAM_RANK_DUAL ||
ch->s_info.rank == I915_DRAM_RANK_DUAL)
ch->rank = I915_DRAM_RANK_DUAL;
else if (ch->l_info.rank == I915_DRAM_RANK_SINGLE &&
ch->s_info.rank == I915_DRAM_RANK_SINGLE)
ch->rank = I915_DRAM_RANK_DUAL;
if (ch->dimm_l.ranks == 2 || ch->dimm_s.ranks == 2)
ch->ranks = 2;
else if (ch->dimm_l.ranks == 1 && ch->dimm_s.ranks == 1)
ch->ranks = 2;
else
ch->rank = I915_DRAM_RANK_SINGLE;
ch->ranks = 1;
ch->is_16gb_dimm = skl_is_16gb_dimm(ch->l_info.rank, ch->l_info.size,
ch->l_info.width) ||
skl_is_16gb_dimm(ch->s_info.rank, ch->s_info.size,
ch->s_info.width);
ch->is_16gb_dimm =
skl_is_16gb_dimm(&ch->dimm_l) ||
skl_is_16gb_dimm(&ch->dimm_s);
DRM_DEBUG_KMS("(size:width:rank) L(%dGB:X%d:%s) S(%dGB:X%d:%s)\n",
ch->l_info.size, ch->l_info.width,
ch->l_info.rank ? "dual" : "single",
ch->s_info.size, ch->s_info.width,
ch->s_info.rank ? "dual" : "single");
DRM_DEBUG_KMS("CH%u ranks: %u, 16Gb DIMMs: %s\n",
channel, ch->ranks, yesno(ch->is_16gb_dimm));
return 0;
}
static bool
intel_is_dram_symmetric(u32 val_ch0, u32 val_ch1,
struct dram_channel_info *ch0)
intel_is_dram_symmetric(const struct dram_channel_info *ch0,
const struct dram_channel_info *ch1)
{
return (val_ch0 == val_ch1 &&
(ch0->s_info.size == 0 ||
(ch0->l_info.size == ch0->s_info.size &&
ch0->l_info.width == ch0->s_info.width &&
ch0->l_info.rank == ch0->s_info.rank)));
return !memcmp(ch0, ch1, sizeof(*ch0)) &&
(ch0->dimm_s.size == 0 ||
!memcmp(&ch0->dimm_l, &ch0->dimm_s, sizeof(ch0->dimm_l)));
}
static int
skl_dram_get_channels_info(struct drm_i915_private *dev_priv)
{
struct dram_info *dram_info = &dev_priv->dram_info;
struct dram_channel_info ch0, ch1;
u32 val_ch0, val_ch1;
struct dram_channel_info ch0 = {}, ch1 = {};
u32 val;
int ret;
val_ch0 = I915_READ(SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN);
ret = skl_dram_get_channel_info(&ch0, val_ch0);
val = I915_READ(SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN);
ret = skl_dram_get_channel_info(dev_priv, &ch0, 0, val);
if (ret == 0)
dram_info->num_channels++;
val_ch1 = I915_READ(SKL_MAD_DIMM_CH1_0_0_0_MCHBAR_MCMAIN);
ret = skl_dram_get_channel_info(&ch1, val_ch1);
val = I915_READ(SKL_MAD_DIMM_CH1_0_0_0_MCHBAR_MCMAIN);
ret = skl_dram_get_channel_info(dev_priv, &ch1, 1, val);
if (ret == 0)
dram_info->num_channels++;
@ -1151,28 +1226,47 @@ skl_dram_get_channels_info(struct drm_i915_private *dev_priv)
* will be same as if single rank memory, so consider single rank
* memory.
*/
if (ch0.rank == I915_DRAM_RANK_SINGLE ||
ch1.rank == I915_DRAM_RANK_SINGLE)
dram_info->rank = I915_DRAM_RANK_SINGLE;
if (ch0.ranks == 1 || ch1.ranks == 1)
dram_info->ranks = 1;
else
dram_info->rank = max(ch0.rank, ch1.rank);
dram_info->ranks = max(ch0.ranks, ch1.ranks);
if (dram_info->rank == I915_DRAM_RANK_INVALID) {
if (dram_info->ranks == 0) {
DRM_INFO("couldn't get memory rank information\n");
return -EINVAL;
}
dram_info->is_16gb_dimm = ch0.is_16gb_dimm || ch1.is_16gb_dimm;
dev_priv->dram_info.symmetric_memory = intel_is_dram_symmetric(val_ch0,
val_ch1,
&ch0);
dram_info->symmetric_memory = intel_is_dram_symmetric(&ch0, &ch1);
DRM_DEBUG_KMS("memory configuration is %sSymmetric memory\n",
dev_priv->dram_info.symmetric_memory ? "" : "not ");
DRM_DEBUG_KMS("Memory configuration is symmetric? %s\n",
yesno(dram_info->symmetric_memory));
return 0;
}
static enum intel_dram_type
skl_get_dram_type(struct drm_i915_private *dev_priv)
{
u32 val;
val = I915_READ(SKL_MAD_INTER_CHANNEL_0_0_0_MCHBAR_MCMAIN);
switch (val & SKL_DRAM_DDR_TYPE_MASK) {
case SKL_DRAM_DDR_TYPE_DDR3:
return INTEL_DRAM_DDR3;
case SKL_DRAM_DDR_TYPE_DDR4:
return INTEL_DRAM_DDR4;
case SKL_DRAM_DDR_TYPE_LPDDR3:
return INTEL_DRAM_LPDDR3;
case SKL_DRAM_DDR_TYPE_LPDDR4:
return INTEL_DRAM_LPDDR4;
default:
MISSING_CASE(val);
return INTEL_DRAM_UNKNOWN;
}
}
static int
skl_get_dram_info(struct drm_i915_private *dev_priv)
{
@ -1180,6 +1274,9 @@ skl_get_dram_info(struct drm_i915_private *dev_priv)
u32 mem_freq_khz, val;
int ret;
dram_info->type = skl_get_dram_type(dev_priv);
DRM_DEBUG_KMS("DRAM type: %s\n", intel_dram_type_str(dram_info->type));
ret = skl_dram_get_channels_info(dev_priv);
if (ret)
return ret;
@ -1200,6 +1297,85 @@ skl_get_dram_info(struct drm_i915_private *dev_priv)
return 0;
}
/* Returns Gb per DRAM device */
static int bxt_get_dimm_size(u32 val)
{
switch (val & BXT_DRAM_SIZE_MASK) {
case BXT_DRAM_SIZE_4GBIT:
return 4;
case BXT_DRAM_SIZE_6GBIT:
return 6;
case BXT_DRAM_SIZE_8GBIT:
return 8;
case BXT_DRAM_SIZE_12GBIT:
return 12;
case BXT_DRAM_SIZE_16GBIT:
return 16;
default:
MISSING_CASE(val);
return 0;
}
}
static int bxt_get_dimm_width(u32 val)
{
if (!bxt_get_dimm_size(val))
return 0;
val = (val & BXT_DRAM_WIDTH_MASK) >> BXT_DRAM_WIDTH_SHIFT;
return 8 << val;
}
static int bxt_get_dimm_ranks(u32 val)
{
if (!bxt_get_dimm_size(val))
return 0;
switch (val & BXT_DRAM_RANK_MASK) {
case BXT_DRAM_RANK_SINGLE:
return 1;
case BXT_DRAM_RANK_DUAL:
return 2;
default:
MISSING_CASE(val);
return 0;
}
}
static enum intel_dram_type bxt_get_dimm_type(u32 val)
{
if (!bxt_get_dimm_size(val))
return INTEL_DRAM_UNKNOWN;
switch (val & BXT_DRAM_TYPE_MASK) {
case BXT_DRAM_TYPE_DDR3:
return INTEL_DRAM_DDR3;
case BXT_DRAM_TYPE_LPDDR3:
return INTEL_DRAM_LPDDR3;
case BXT_DRAM_TYPE_DDR4:
return INTEL_DRAM_DDR4;
case BXT_DRAM_TYPE_LPDDR4:
return INTEL_DRAM_LPDDR4;
default:
MISSING_CASE(val);
return INTEL_DRAM_UNKNOWN;
}
}
static void bxt_get_dimm_info(struct dram_dimm_info *dimm,
u32 val)
{
dimm->width = bxt_get_dimm_width(val);
dimm->ranks = bxt_get_dimm_ranks(val);
/*
* Size in register is Gb per DRAM device. Convert to total
* GB to match the way we report this for non-LP platforms.
*/
dimm->size = bxt_get_dimm_size(val) * intel_dimm_num_devices(dimm) / 8;
}
static int
bxt_get_dram_info(struct drm_i915_private *dev_priv)
{
@ -1228,57 +1404,44 @@ bxt_get_dram_info(struct drm_i915_private *dev_priv)
* Now read each DUNIT8/9/10/11 to check the rank of each dimms.
*/
for (i = BXT_D_CR_DRP0_DUNIT_START; i <= BXT_D_CR_DRP0_DUNIT_END; i++) {
u8 size, width;
enum dram_rank rank;
u32 tmp;
struct dram_dimm_info dimm;
enum intel_dram_type type;
val = I915_READ(BXT_D_CR_DRP0_DUNIT(i));
if (val == 0xFFFFFFFF)
continue;
dram_info->num_channels++;
tmp = val & BXT_DRAM_RANK_MASK;
if (tmp == BXT_DRAM_RANK_SINGLE)
rank = I915_DRAM_RANK_SINGLE;
else if (tmp == BXT_DRAM_RANK_DUAL)
rank = I915_DRAM_RANK_DUAL;
else
rank = I915_DRAM_RANK_INVALID;
bxt_get_dimm_info(&dimm, val);
type = bxt_get_dimm_type(val);
tmp = val & BXT_DRAM_SIZE_MASK;
if (tmp == BXT_DRAM_SIZE_4GB)
size = 4;
else if (tmp == BXT_DRAM_SIZE_6GB)
size = 6;
else if (tmp == BXT_DRAM_SIZE_8GB)
size = 8;
else if (tmp == BXT_DRAM_SIZE_12GB)
size = 12;
else if (tmp == BXT_DRAM_SIZE_16GB)
size = 16;
else
size = 0;
WARN_ON(type != INTEL_DRAM_UNKNOWN &&
dram_info->type != INTEL_DRAM_UNKNOWN &&
dram_info->type != type);
tmp = (val & BXT_DRAM_WIDTH_MASK) >> BXT_DRAM_WIDTH_SHIFT;
width = (1 << tmp) * 8;
DRM_DEBUG_KMS("dram size:%dGB width:X%d rank:%s\n", size,
width, rank == I915_DRAM_RANK_SINGLE ? "single" :
rank == I915_DRAM_RANK_DUAL ? "dual" : "unknown");
DRM_DEBUG_KMS("CH%u DIMM size: %u GB, width: X%u, ranks: %u, type: %s\n",
i - BXT_D_CR_DRP0_DUNIT_START,
dimm.size, dimm.width, dimm.ranks,
intel_dram_type_str(type));
/*
* If any of the channel is single rank channel,
* worst case output will be same as if single rank
* memory, so consider single rank memory.
*/
if (dram_info->rank == I915_DRAM_RANK_INVALID)
dram_info->rank = rank;
else if (rank == I915_DRAM_RANK_SINGLE)
dram_info->rank = I915_DRAM_RANK_SINGLE;
if (dram_info->ranks == 0)
dram_info->ranks = dimm.ranks;
else if (dimm.ranks == 1)
dram_info->ranks = 1;
if (type != INTEL_DRAM_UNKNOWN)
dram_info->type = type;
}
if (dram_info->rank == I915_DRAM_RANK_INVALID) {
DRM_INFO("couldn't get memory rank information\n");
if (dram_info->type == INTEL_DRAM_UNKNOWN ||
dram_info->ranks == 0) {
DRM_INFO("couldn't get memory information\n");
return -EINVAL;
}
@ -1290,14 +1453,8 @@ static void
intel_get_dram_info(struct drm_i915_private *dev_priv)
{
struct dram_info *dram_info = &dev_priv->dram_info;
char bandwidth_str[32];
int ret;
dram_info->valid = false;
dram_info->rank = I915_DRAM_RANK_INVALID;
dram_info->bandwidth_kbps = 0;
dram_info->num_channels = 0;
/*
* Assume 16Gb DIMMs are present until proven otherwise.
* This is only used for the level 0 watermark latency
@ -1305,28 +1462,22 @@ intel_get_dram_info(struct drm_i915_private *dev_priv)
*/
dram_info->is_16gb_dimm = !IS_GEN9_LP(dev_priv);
if (INTEL_GEN(dev_priv) < 9 || IS_GEMINILAKE(dev_priv))
if (INTEL_GEN(dev_priv) < 9)
return;
/* Need to calculate bandwidth only for Gen9 */
if (IS_BROXTON(dev_priv))
if (IS_GEN9_LP(dev_priv))
ret = bxt_get_dram_info(dev_priv);
else if (IS_GEN(dev_priv, 9))
ret = skl_get_dram_info(dev_priv);
else
ret = skl_dram_get_channels_info(dev_priv);
ret = skl_get_dram_info(dev_priv);
if (ret)
return;
if (dram_info->bandwidth_kbps)
sprintf(bandwidth_str, "%d KBps", dram_info->bandwidth_kbps);
else
sprintf(bandwidth_str, "unknown");
DRM_DEBUG_KMS("DRAM bandwidth:%s, total-channels: %u\n",
bandwidth_str, dram_info->num_channels);
DRM_DEBUG_KMS("DRAM rank: %s rank 16GB-dimm:%s\n",
(dram_info->rank == I915_DRAM_RANK_DUAL) ?
"dual" : "single", yesno(dram_info->is_16gb_dimm));
DRM_DEBUG_KMS("DRAM bandwidth: %u kBps, channels: %u\n",
dram_info->bandwidth_kbps,
dram_info->num_channels);
DRM_DEBUG_KMS("DRAM ranks: %u, 16Gb DIMMs: %s\n",
dram_info->ranks, yesno(dram_info->is_16gb_dimm));
}
/**
@ -1348,7 +1499,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
if (HAS_PPGTT(dev_priv)) {
if (intel_vgpu_active(dev_priv) &&
!intel_vgpu_has_full_48bit_ppgtt(dev_priv)) {
!intel_vgpu_has_full_ppgtt(dev_priv)) {
i915_report_error(dev_priv,
"incompatible vGPU found, support for isolated ppGTT required\n");
return -ENXIO;
@ -1753,8 +1904,7 @@ void i915_driver_unload(struct drm_device *dev)
/* Flush any external code that still may be under the RCU lock */
synchronize_rcu();
if (i915_gem_suspend(dev_priv))
DRM_ERROR("failed to idle hardware; continuing to unload!\n");
i915_gem_suspend(dev_priv);
drm_atomic_helper_shutdown(dev);
@ -1862,7 +2012,6 @@ static bool suspend_to_idle(struct drm_i915_private *dev_priv)
static int i915_drm_prepare(struct drm_device *dev)
{
struct drm_i915_private *i915 = to_i915(dev);
int err;
/*
* NB intel_display_suspend() may issue new requests after we've
@ -1870,12 +2019,9 @@ static int i915_drm_prepare(struct drm_device *dev)
* split out that work and pull it forward so that after point,
* the GPU is not woken again.
*/
err = i915_gem_suspend(i915);
if (err)
dev_err(&i915->drm.pdev->dev,
"GEM idle failed, suspend/resume might fail\n");
i915_gem_suspend(i915);
return err;
return 0;
}
static int i915_drm_suspend(struct drm_device *dev)

Просмотреть файл

@ -55,6 +55,7 @@
#include <drm/drm_util.h>
#include <drm/drm_dsc.h>
#include <drm/drm_connector.h>
#include <drm/i915_mei_hdcp_interface.h>
#include "i915_fixed.h"
#include "i915_params.h"
@ -91,8 +92,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
#define DRIVER_DATE "20190207"
#define DRIVER_TIMESTAMP 1549572331
#define DRIVER_DATE "20190320"
#define DRIVER_TIMESTAMP 1553069028
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
* WARN_ON()) for hw state sanity checks to check for unexpected conditions
@ -217,10 +218,6 @@ struct drm_i915_file_private {
} mm;
struct idr context_idr;
struct intel_rps_client {
atomic_t boosts;
} rps_client;
unsigned int bsd_engine;
/*
@ -508,7 +505,7 @@ struct i915_psr {
u32 debug;
bool sink_support;
bool prepared, enabled;
bool enabled;
struct intel_dp *dp;
enum pipe pipe;
bool active;
@ -526,16 +523,22 @@ struct i915_psr {
u16 su_x_granularity;
};
/*
* Sorted by south display engine compatibility.
* If the new PCH comes with a south display engine that is not
* inherited from the latest item, please do not add it to the
* end. Instead, add it right after its "parent" PCH.
*/
enum intel_pch {
PCH_NOP = -1, /* PCH without south display */
PCH_NONE = 0, /* No PCH present */
PCH_IBX, /* Ibexpeak PCH */
PCH_CPT, /* Cougarpoint/Pantherpoint PCH */
PCH_LPT, /* Lynxpoint/Wildcatpoint PCH */
PCH_SPT, /* Sunrisepoint PCH */
PCH_KBP, /* Kaby Lake PCH */
PCH_CNP, /* Cannon Lake PCH */
PCH_CNP, /* Cannon/Comet Lake PCH */
PCH_ICP, /* Ice Lake PCH */
PCH_NOP, /* PCH without south display */
};
enum intel_sbi_destination {
@ -1009,6 +1012,7 @@ struct intel_vbt_data {
enum psr_lines_to_wait lines_to_wait;
int tp1_wakeup_time_us;
int tp2_tp3_wakeup_time_us;
int psr2_tp2_tp3_wakeup_time_us;
} psr;
struct {
@ -1130,6 +1134,7 @@ struct skl_wm_level {
u16 plane_res_b;
u8 plane_res_l;
bool plane_en;
bool ignore_lines;
};
/* Stores plane specific WM parameters */
@ -1200,7 +1205,11 @@ enum intel_pipe_crc_source {
INTEL_PIPE_CRC_SOURCE_NONE,
INTEL_PIPE_CRC_SOURCE_PLANE1,
INTEL_PIPE_CRC_SOURCE_PLANE2,
INTEL_PIPE_CRC_SOURCE_PF,
INTEL_PIPE_CRC_SOURCE_PLANE3,
INTEL_PIPE_CRC_SOURCE_PLANE4,
INTEL_PIPE_CRC_SOURCE_PLANE5,
INTEL_PIPE_CRC_SOURCE_PLANE6,
INTEL_PIPE_CRC_SOURCE_PLANE7,
INTEL_PIPE_CRC_SOURCE_PIPE,
/* TV/DP on pre-gen5/vlv can't use the pipe source. */
INTEL_PIPE_CRC_SOURCE_TV,
@ -1468,13 +1477,6 @@ struct intel_cdclk_state {
struct drm_i915_private {
struct drm_device drm;
struct kmem_cache *objects;
struct kmem_cache *vmas;
struct kmem_cache *luts;
struct kmem_cache *requests;
struct kmem_cache *dependencies;
struct kmem_cache *priorities;
const struct intel_device_info __info; /* Use INTEL_INFO() to access. */
struct intel_runtime_info __runtime; /* Use RUNTIME_INFO() to access. */
struct intel_driver_caps caps;
@ -1831,13 +1833,16 @@ struct drm_i915_private {
bool valid;
bool is_16gb_dimm;
u8 num_channels;
enum dram_rank {
I915_DRAM_RANK_INVALID = 0,
I915_DRAM_RANK_SINGLE,
I915_DRAM_RANK_DUAL
} rank;
u8 ranks;
u32 bandwidth_kbps;
bool symmetric_memory;
enum intel_dram_type {
INTEL_DRAM_UNKNOWN,
INTEL_DRAM_DDR3,
INTEL_DRAM_DDR4,
INTEL_DRAM_LPDDR3,
INTEL_DRAM_LPDDR4
} type;
} dram_info;
struct i915_runtime_pm runtime_pm;
@ -1997,6 +2002,7 @@ struct drm_i915_private {
struct list_head hwsp_free_list;
} timelines;
intel_engine_mask_t active_engines;
struct list_head active_rings;
struct list_head closed_vma;
u32 active_requests;
@ -2010,12 +2016,6 @@ struct drm_i915_private {
*/
intel_wakeref_t awake;
/**
* The number of times we have woken up.
*/
unsigned int epoch;
#define I915_EPOCH_INVALID 0
/**
* We leave the user IRQ off as much as possible,
* but this means that requests will finish and never
@ -2055,18 +2055,25 @@ struct drm_i915_private {
struct i915_pmu pmu;
struct i915_hdcp_comp_master *hdcp_master;
bool hdcp_comp_added;
/* Mutex to protect the above hdcp component related values. */
struct mutex hdcp_comp_mutex;
/*
* NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
* will be rejected. Instead look for a better place.
*/
};
struct dram_dimm_info {
u8 size, width, ranks;
};
struct dram_channel_info {
struct info {
u8 size, width;
enum dram_rank rank;
} l_info, s_info;
enum dram_rank rank;
struct dram_dimm_info dimm_l, dimm_s;
u8 ranks;
bool is_16gb_dimm;
};
@ -2104,7 +2111,7 @@ static inline struct drm_i915_private *huc_to_i915(struct intel_huc *huc)
/* Iterator over subset of engines selected by mask */
#define for_each_engine_masked(engine__, dev_priv__, mask__, tmp__) \
for ((tmp__) = (mask__) & INTEL_INFO(dev_priv__)->ring_mask; \
for ((tmp__) = (mask__) & INTEL_INFO(dev_priv__)->engine_mask; \
(tmp__) ? \
((engine__) = (dev_priv__)->engine[__mask_next_bit(tmp__)]), 1 : \
0;)
@ -2425,24 +2432,8 @@ static inline unsigned int i915_sg_segment_size(void)
#define IS_GEN9_LP(dev_priv) (IS_GEN(dev_priv, 9) && IS_LP(dev_priv))
#define IS_GEN9_BC(dev_priv) (IS_GEN(dev_priv, 9) && !IS_LP(dev_priv))
#define ENGINE_MASK(id) BIT(id)
#define RENDER_RING ENGINE_MASK(RCS)
#define BSD_RING ENGINE_MASK(VCS)
#define BLT_RING ENGINE_MASK(BCS)
#define VEBOX_RING ENGINE_MASK(VECS)
#define BSD2_RING ENGINE_MASK(VCS2)
#define BSD3_RING ENGINE_MASK(VCS3)
#define BSD4_RING ENGINE_MASK(VCS4)
#define VEBOX2_RING ENGINE_MASK(VECS2)
#define ALL_ENGINES (~0)
#define HAS_ENGINE(dev_priv, id) \
(!!(INTEL_INFO(dev_priv)->ring_mask & ENGINE_MASK(id)))
#define HAS_BSD(dev_priv) HAS_ENGINE(dev_priv, VCS)
#define HAS_BSD2(dev_priv) HAS_ENGINE(dev_priv, VCS2)
#define HAS_BLT(dev_priv) HAS_ENGINE(dev_priv, BCS)
#define HAS_VEBOX(dev_priv) HAS_ENGINE(dev_priv, VECS)
#define ALL_ENGINES (~0u)
#define HAS_ENGINE(dev_priv, id) (INTEL_INFO(dev_priv)->engine_mask & BIT(id))
#define HAS_LLC(dev_priv) (INTEL_INFO(dev_priv)->has_llc)
#define HAS_SNOOP(dev_priv) (INTEL_INFO(dev_priv)->has_snoop)
@ -2461,13 +2452,11 @@ static inline unsigned int i915_sg_segment_size(void)
#define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv)
#define INTEL_PPGTT(dev_priv) (INTEL_INFO(dev_priv)->ppgtt)
#define INTEL_PPGTT(dev_priv) (INTEL_INFO(dev_priv)->ppgtt_type)
#define HAS_PPGTT(dev_priv) \
(INTEL_PPGTT(dev_priv) != INTEL_PPGTT_NONE)
#define HAS_FULL_PPGTT(dev_priv) \
(INTEL_PPGTT(dev_priv) >= INTEL_PPGTT_FULL)
#define HAS_FULL_48BIT_PPGTT(dev_priv) \
(INTEL_PPGTT(dev_priv) >= INTEL_PPGTT_FULL_4LVL)
#define HAS_PAGE_SIZES(dev_priv, sizes) ({ \
GEM_BUG_ON((sizes) == 0); \
@ -2511,6 +2500,7 @@ static inline unsigned int i915_sg_segment_size(void)
#define HAS_DDI(dev_priv) (INTEL_INFO(dev_priv)->display.has_ddi)
#define HAS_FPGA_DBG_UNCLAIMED(dev_priv) (INTEL_INFO(dev_priv)->has_fpga_dbg)
#define HAS_PSR(dev_priv) (INTEL_INFO(dev_priv)->display.has_psr)
#define HAS_TRANSCODER_EDP(dev_priv) (INTEL_INFO(dev_priv)->trans_offsets[TRANSCODER_EDP] != 0)
#define HAS_RC6(dev_priv) (INTEL_INFO(dev_priv)->has_rc6)
#define HAS_RC6p(dev_priv) (INTEL_INFO(dev_priv)->has_rc6p)
@ -2557,6 +2547,7 @@ static inline unsigned int i915_sg_segment_size(void)
#define INTEL_PCH_KBP_DEVICE_ID_TYPE 0xA280
#define INTEL_PCH_CNP_DEVICE_ID_TYPE 0xA300
#define INTEL_PCH_CNP_LP_DEVICE_ID_TYPE 0x9D80
#define INTEL_PCH_CMP_DEVICE_ID_TYPE 0x0280
#define INTEL_PCH_ICP_DEVICE_ID_TYPE 0x3480
#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
#define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000
@ -2566,8 +2557,6 @@ static inline unsigned int i915_sg_segment_size(void)
#define INTEL_PCH_ID(dev_priv) ((dev_priv)->pch_id)
#define HAS_PCH_ICP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_ICP)
#define HAS_PCH_CNP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CNP)
#define HAS_PCH_CNP_LP(dev_priv) \
(INTEL_PCH_ID(dev_priv) == INTEL_PCH_CNP_LP_DEVICE_ID_TYPE)
#define HAS_PCH_KBP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_KBP)
#define HAS_PCH_SPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_SPT)
#define HAS_PCH_LPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT)
@ -2799,8 +2788,6 @@ void i915_gem_load_init_fences(struct drm_i915_private *dev_priv);
int i915_gem_freeze(struct drm_i915_private *dev_priv);
int i915_gem_freeze_late(struct drm_i915_private *dev_priv);
void *i915_gem_object_alloc(struct drm_i915_private *dev_priv);
void i915_gem_object_free(struct drm_i915_gem_object *obj);
void i915_gem_object_init(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_object_ops *ops);
struct drm_i915_gem_object *
@ -3001,7 +2988,12 @@ i915_gem_obj_finish_shmem_access(struct drm_i915_gem_object *obj)
i915_gem_object_unpin_pages(obj);
}
int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
static inline int __must_check
i915_mutex_lock_interruptible(struct drm_device *dev)
{
return mutex_lock_interruptible(&dev->struct_mutex);
}
int i915_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
@ -3015,22 +3007,14 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno);
struct i915_request *
i915_gem_find_active_request(struct intel_engine_cs *engine);
static inline bool i915_reset_backoff(struct i915_gpu_error *error)
{
return unlikely(test_bit(I915_RESET_BACKOFF, &error->flags));
}
static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
static inline bool __i915_wedged(struct i915_gpu_error *error)
{
return unlikely(test_bit(I915_WEDGED, &error->flags));
}
static inline bool i915_reset_backoff_or_wedged(struct i915_gpu_error *error)
static inline bool i915_reset_failed(struct drm_i915_private *i915)
{
return i915_reset_backoff(error) | i915_terminally_wedged(error);
return __i915_wedged(&i915->gpu_error);
}
static inline u32 i915_reset_count(struct i915_gpu_error *error)
@ -3055,14 +3039,13 @@ void i915_gem_fini(struct drm_i915_private *dev_priv);
void i915_gem_cleanup_engines(struct drm_i915_private *dev_priv);
int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
unsigned int flags, long timeout);
int __must_check i915_gem_suspend(struct drm_i915_private *dev_priv);
void i915_gem_suspend(struct drm_i915_private *dev_priv);
void i915_gem_suspend_late(struct drm_i915_private *dev_priv);
void i915_gem_resume(struct drm_i915_private *dev_priv);
vm_fault_t i915_gem_fault(struct vm_fault *vmf);
int i915_gem_object_wait(struct drm_i915_gem_object *obj,
unsigned int flags,
long timeout,
struct intel_rps_client *rps);
long timeout);
int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
unsigned int flags,
const struct i915_sched_attr *attr);
@ -3105,7 +3088,6 @@ struct drm_i915_fence_reg *
i915_reserve_fence(struct drm_i915_private *dev_priv);
void i915_unreserve_fence(struct drm_i915_fence_reg *fence);
void i915_gem_revoke_fences(struct drm_i915_private *dev_priv);
void i915_gem_restore_fences(struct drm_i915_private *dev_priv);
void i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv);
@ -3141,7 +3123,7 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
void i915_oa_init_reg_state(struct intel_engine_cs *engine,
struct i915_gem_context *ctx,
struct intel_context *ce,
u32 *reg_state);
/* i915_gem_evict.c */

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -75,12 +75,14 @@ struct drm_i915_private;
#define I915_NUM_ENGINES 8
#define I915_GEM_IDLE_TIMEOUT (HZ / 5)
void i915_gem_park(struct drm_i915_private *i915);
void i915_gem_unpark(struct drm_i915_private *i915);
static inline void __tasklet_disable_sync_once(struct tasklet_struct *t)
{
if (atomic_inc_return(&t->count) == 1)
if (!atomic_fetch_inc(&t->count))
tasklet_unlock_wait(t);
}
@ -89,4 +91,9 @@ static inline bool __tasklet_is_enabled(const struct tasklet_struct *t)
return !atomic_read(&t->count);
}
static inline bool __tasklet_enable(struct tasklet_struct *t)
{
return atomic_dec_and_test(&t->count);
}
#endif /* __I915_GEM_H__ */

Просмотреть файл

@ -88,12 +88,28 @@
#include <linux/log2.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_globals.h"
#include "i915_trace.h"
#include "intel_lrc_reg.h"
#include "intel_workarounds.h"
#define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
static struct i915_global_gem_context {
struct i915_global base;
struct kmem_cache *slab_luts;
} global;
struct i915_lut_handle *i915_lut_handle_alloc(void)
{
return kmem_cache_alloc(global.slab_luts, GFP_KERNEL);
}
void i915_lut_handle_free(struct i915_lut_handle *lut)
{
return kmem_cache_free(global.slab_luts, lut);
}
static void lut_close(struct i915_gem_context *ctx)
{
struct i915_lut_handle *lut, *ln;
@ -102,7 +118,7 @@ static void lut_close(struct i915_gem_context *ctx)
list_for_each_entry_safe(lut, ln, &ctx->handles_list, ctx_link) {
list_del(&lut->obj_link);
kmem_cache_free(ctx->i915->luts, lut);
i915_lut_handle_free(lut);
}
rcu_read_lock();
@ -206,25 +222,23 @@ static void release_hw_id(struct i915_gem_context *ctx)
static void i915_gem_context_free(struct i915_gem_context *ctx)
{
unsigned int n;
struct intel_context *it, *n;
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
GEM_BUG_ON(!list_empty(&ctx->active_engines));
release_hw_id(ctx);
i915_ppgtt_put(ctx->ppgtt);
for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
struct intel_context *ce = &ctx->__engine[n];
if (ce->ops)
ce->ops->destroy(ce);
}
rbtree_postorder_for_each_entry_safe(it, n, &ctx->hw_contexts, node)
intel_context_put(it);
kfree(ctx->name);
put_pid(ctx->pid);
list_del(&ctx->link);
mutex_destroy(&ctx->mutex);
kfree_rcu(ctx, rcu);
}
@ -307,7 +321,7 @@ static u32 default_desc_template(const struct drm_i915_private *i915,
desc = GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE;
address_mode = INTEL_LEGACY_32B_CONTEXT;
if (ppgtt && i915_vm_is_48bit(&ppgtt->vm))
if (ppgtt && i915_vm_is_4lvl(&ppgtt->vm))
address_mode = INTEL_LEGACY_64B_CONTEXT;
desc |= address_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT;
@ -322,39 +336,13 @@ static u32 default_desc_template(const struct drm_i915_private *i915,
return desc;
}
static void intel_context_retire(struct i915_active_request *active,
struct i915_request *rq)
{
struct intel_context *ce =
container_of(active, typeof(*ce), active_tracker);
intel_context_unpin(ce);
}
void
intel_context_init(struct intel_context *ce,
struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
ce->gem_context = ctx;
INIT_LIST_HEAD(&ce->signal_link);
INIT_LIST_HEAD(&ce->signals);
/* Use the whole device by default */
ce->sseu = intel_device_default_sseu(ctx->i915);
i915_active_request_init(&ce->active_tracker,
NULL, intel_context_retire);
}
static struct i915_gem_context *
__create_hw_context(struct drm_i915_private *dev_priv,
struct drm_i915_file_private *file_priv)
{
struct i915_gem_context *ctx;
unsigned int n;
int ret;
int i;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (ctx == NULL)
@ -364,9 +352,11 @@ __create_hw_context(struct drm_i915_private *dev_priv,
list_add_tail(&ctx->link, &dev_priv->contexts.list);
ctx->i915 = dev_priv;
ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL);
INIT_LIST_HEAD(&ctx->active_engines);
mutex_init(&ctx->mutex);
for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++)
intel_context_init(&ctx->__engine[n], ctx, dev_priv->engine[n]);
ctx->hw_contexts = RB_ROOT;
spin_lock_init(&ctx->hw_contexts_lock);
INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
INIT_LIST_HEAD(&ctx->handles_list);
@ -401,10 +391,15 @@ __create_hw_context(struct drm_i915_private *dev_priv,
ctx->remap_slice = ALL_L3_SLICES(dev_priv);
i915_gem_context_set_bannable(ctx);
i915_gem_context_set_recoverable(ctx);
ctx->ring_size = 4 * PAGE_SIZE;
ctx->desc_template =
default_desc_template(dev_priv, dev_priv->mm.aliasing_ppgtt);
for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
return ctx;
err_pid:
@ -563,7 +558,7 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
GEM_BUG_ON(dev_priv->kernel_context);
GEM_BUG_ON(dev_priv->preempt_context);
intel_engine_init_ctx_wa(dev_priv->engine[RCS]);
intel_engine_init_ctx_wa(dev_priv->engine[RCS0]);
init_contexts(dev_priv);
/* lowest priority; idle task */
@ -682,81 +677,98 @@ last_request_on_engine(struct i915_timeline *timeline,
return NULL;
}
static bool engine_has_kernel_context_barrier(struct intel_engine_cs *engine)
struct context_barrier_task {
struct i915_active base;
void (*task)(void *data);
void *data;
};
static void cb_retire(struct i915_active *base)
{
struct drm_i915_private *i915 = engine->i915;
const struct intel_context * const ce =
to_intel_context(i915->kernel_context, engine);
struct i915_timeline *barrier = ce->ring->timeline;
struct intel_ring *ring;
bool any_active = false;
struct context_barrier_task *cb = container_of(base, typeof(*cb), base);
lockdep_assert_held(&i915->drm.struct_mutex);
list_for_each_entry(ring, &i915->gt.active_rings, active_link) {
struct i915_request *rq;
if (cb->task)
cb->task(cb->data);
rq = last_request_on_engine(ring->timeline, engine);
if (!rq)
continue;
any_active = true;
if (rq->hw_context == ce)
continue;
/*
* Was this request submitted after the previous
* switch-to-kernel-context?
*/
if (!i915_timeline_sync_is_later(barrier, &rq->fence)) {
GEM_TRACE("%s needs barrier for %llx:%lld\n",
ring->timeline->name,
rq->fence.context,
rq->fence.seqno);
return false;
}
GEM_TRACE("%s has barrier after %llx:%lld\n",
ring->timeline->name,
rq->fence.context,
rq->fence.seqno);
}
/*
* If any other timeline was still active and behind the last barrier,
* then our last switch-to-kernel-context must still be queued and
* will run last (leaving the engine in the kernel context when it
* eventually idles).
*/
if (any_active)
return true;
/* The engine is idle; check that it is idling in the kernel context. */
return engine->last_retired_context == ce;
i915_active_fini(&cb->base);
kfree(cb);
}
int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915)
I915_SELFTEST_DECLARE(static unsigned long context_barrier_inject_fault);
static int context_barrier_task(struct i915_gem_context *ctx,
unsigned long engines,
void (*task)(void *data),
void *data)
{
struct drm_i915_private *i915 = ctx->i915;
struct context_barrier_task *cb;
struct intel_context *ce;
intel_wakeref_t wakeref;
int err = 0;
lockdep_assert_held(&i915->drm.struct_mutex);
GEM_BUG_ON(!task);
cb = kmalloc(sizeof(*cb), GFP_KERNEL);
if (!cb)
return -ENOMEM;
i915_active_init(i915, &cb->base, cb_retire);
i915_active_acquire(&cb->base);
wakeref = intel_runtime_pm_get(i915);
list_for_each_entry(ce, &ctx->active_engines, active_link) {
struct intel_engine_cs *engine = ce->engine;
struct i915_request *rq;
if (!(ce->engine->mask & engines))
continue;
if (I915_SELFTEST_ONLY(context_barrier_inject_fault &
engine->mask)) {
err = -ENXIO;
break;
}
rq = i915_request_alloc(engine, ctx);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
break;
}
err = i915_active_ref(&cb->base, rq->fence.context, rq);
i915_request_add(rq);
if (err)
break;
}
intel_runtime_pm_put(i915, wakeref);
cb->task = err ? NULL : task; /* caller needs to unwind instead */
cb->data = data;
i915_active_release(&cb->base);
return err;
}
int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915,
unsigned long mask)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
GEM_TRACE("awake?=%s\n", yesno(i915->gt.awake));
lockdep_assert_held(&i915->drm.struct_mutex);
GEM_BUG_ON(!i915->kernel_context);
i915_retire_requests(i915);
/* Inoperable, so presume the GPU is safely pointing into the void! */
if (i915_terminally_wedged(i915))
return 0;
for_each_engine(engine, i915, id) {
for_each_engine_masked(engine, i915, mask, mask) {
struct intel_ring *ring;
struct i915_request *rq;
GEM_BUG_ON(!to_intel_context(i915->kernel_context, engine));
if (engine_has_kernel_context_barrier(engine))
continue;
GEM_TRACE("emit barrier on %s\n", engine->name);
rq = i915_request_alloc(engine, i915->kernel_context);
if (IS_ERR(rq))
return PTR_ERR(rq);
@ -779,7 +791,6 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915)
i915_sw_fence_await_sw_fence_gfp(&rq->submit,
&prev->submit,
I915_FENCE_GFP);
i915_timeline_sync_set(rq->timeline, &prev->fence);
}
i915_request_add(rq);
@ -796,18 +807,22 @@ static bool client_is_banned(struct drm_i915_file_private *file_priv)
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_private *i915 = to_i915(dev);
struct drm_i915_gem_context_create *args = data;
struct drm_i915_file_private *file_priv = file->driver_priv;
struct i915_gem_context *ctx;
int ret;
if (!DRIVER_CAPS(dev_priv)->has_logical_contexts)
if (!DRIVER_CAPS(i915)->has_logical_contexts)
return -ENODEV;
if (args->pad != 0)
return -EINVAL;
ret = i915_terminally_wedged(i915);
if (ret)
return ret;
if (client_is_banned(file_priv)) {
DRM_DEBUG("client %s[%d] banned from creating ctx\n",
current->comm,
@ -820,7 +835,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
if (ret)
return ret;
ctx = i915_gem_create_context(dev_priv, file_priv);
ctx = i915_gem_create_context(i915, file_priv);
mutex_unlock(&dev->struct_mutex);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
@ -869,7 +884,6 @@ static int get_sseu(struct i915_gem_context *ctx,
struct drm_i915_gem_context_param_sseu user_sseu;
struct intel_engine_cs *engine;
struct intel_context *ce;
int ret;
if (args->size == 0)
goto out;
@ -889,19 +903,16 @@ static int get_sseu(struct i915_gem_context *ctx,
if (!engine)
return -EINVAL;
/* Only use for mutex here is to serialize get_param and set_param. */
ret = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex);
if (ret)
return ret;
ce = to_intel_context(ctx, engine);
ce = intel_context_pin_lock(ctx, engine); /* serialises with set_sseu */
if (IS_ERR(ce))
return PTR_ERR(ce);
user_sseu.slice_mask = ce->sseu.slice_mask;
user_sseu.subslice_mask = ce->sseu.subslice_mask;
user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice;
user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice;
mutex_unlock(&ctx->i915->drm.struct_mutex);
intel_context_pin_unlock(ce);
if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu,
sizeof(user_sseu)))
@ -951,6 +962,10 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
args->size = 0;
args->value = i915_gem_context_is_bannable(ctx);
break;
case I915_CONTEXT_PARAM_RECOVERABLE:
args->size = 0;
args->value = i915_gem_context_is_recoverable(ctx);
break;
case I915_CONTEXT_PARAM_PRIORITY:
args->size = 0;
args->value = ctx->sched.priority >> I915_USER_PRIORITY_SHIFT;
@ -993,23 +1008,28 @@ static int gen8_emit_rpcs_config(struct i915_request *rq,
}
static int
gen8_modify_rpcs_gpu(struct intel_context *ce,
struct intel_engine_cs *engine,
struct intel_sseu sseu)
gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
{
struct drm_i915_private *i915 = engine->i915;
struct drm_i915_private *i915 = ce->engine->i915;
struct i915_request *rq, *prev;
intel_wakeref_t wakeref;
int ret;
GEM_BUG_ON(!ce->pin_count);
lockdep_assert_held(&ce->pin_mutex);
lockdep_assert_held(&i915->drm.struct_mutex);
/*
* If the context is not idle, we have to submit an ordered request to
* modify its context image via the kernel context (writing to our own
* image, or into the registers directory, does not stick). Pristine
* and idle contexts will be configured on pinning.
*/
if (!intel_context_is_pinned(ce))
return 0;
/* Submitting requests etc needs the hw awake. */
wakeref = intel_runtime_pm_get(i915);
rq = i915_request_alloc(engine, i915->kernel_context);
rq = i915_request_alloc(ce->engine, i915->kernel_context);
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);
goto out_put;
@ -1057,27 +1077,26 @@ __i915_gem_context_reconfigure_sseu(struct i915_gem_context *ctx,
struct intel_engine_cs *engine,
struct intel_sseu sseu)
{
struct intel_context *ce = to_intel_context(ctx, engine);
struct intel_context *ce;
int ret = 0;
GEM_BUG_ON(INTEL_GEN(ctx->i915) < 8);
GEM_BUG_ON(engine->id != RCS);
GEM_BUG_ON(engine->id != RCS0);
ce = intel_context_pin_lock(ctx, engine);
if (IS_ERR(ce))
return PTR_ERR(ce);
/* Nothing to do if unmodified. */
if (!memcmp(&ce->sseu, &sseu, sizeof(sseu)))
return 0;
/*
* If context is not idle we have to submit an ordered request to modify
* its context image via the kernel context. Pristine and idle contexts
* will be configured on pinning.
*/
if (ce->pin_count)
ret = gen8_modify_rpcs_gpu(ce, engine, sseu);
goto unlock;
ret = gen8_modify_rpcs(ce, sseu);
if (!ret)
ce->sseu = sseu;
unlock:
intel_context_pin_unlock(ce);
return ret;
}
@ -1285,6 +1304,15 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
i915_gem_context_clear_bannable(ctx);
break;
case I915_CONTEXT_PARAM_RECOVERABLE:
if (args->size)
ret = -EINVAL;
else if (args->value)
i915_gem_context_set_recoverable(ctx);
else
i915_gem_context_clear_recoverable(ctx);
break;
case I915_CONTEXT_PARAM_PRIORITY:
{
s64 priority = args->value;
@ -1385,3 +1413,28 @@ out_unlock:
#include "selftests/mock_context.c"
#include "selftests/i915_gem_context.c"
#endif
static void i915_global_gem_context_shrink(void)
{
kmem_cache_shrink(global.slab_luts);
}
static void i915_global_gem_context_exit(void)
{
kmem_cache_destroy(global.slab_luts);
}
static struct i915_global_gem_context global = { {
.shrink = i915_global_gem_context_shrink,
.exit = i915_global_gem_context_exit,
} };
int __init i915_global_gem_context_init(void)
{
global.slab_luts = KMEM_CACHE(i915_lut_handle, 0);
if (!global.slab_luts)
return -ENOMEM;
i915_global_register(&global.base);
return 0;
}

Просмотреть файл

@ -25,210 +25,16 @@
#ifndef __I915_GEM_CONTEXT_H__
#define __I915_GEM_CONTEXT_H__
#include <linux/bitops.h>
#include <linux/list.h>
#include <linux/radix-tree.h>
#include "i915_gem_context_types.h"
#include "i915_gem.h"
#include "i915_scheduler.h"
#include "intel_context.h"
#include "intel_device_info.h"
struct pid;
struct drm_device;
struct drm_file;
struct drm_i915_private;
struct drm_i915_file_private;
struct i915_hw_ppgtt;
struct i915_request;
struct i915_vma;
struct intel_ring;
#define DEFAULT_CONTEXT_HANDLE 0
struct intel_context;
struct intel_context_ops {
void (*unpin)(struct intel_context *ce);
void (*destroy)(struct intel_context *ce);
};
/*
* Powergating configuration for a particular (context,engine).
*/
struct intel_sseu {
u8 slice_mask;
u8 subslice_mask;
u8 min_eus_per_subslice;
u8 max_eus_per_subslice;
};
/**
* struct i915_gem_context - client state
*
* The struct i915_gem_context represents the combined view of the driver and
* logical hardware state for a particular client.
*/
struct i915_gem_context {
/** i915: i915 device backpointer */
struct drm_i915_private *i915;
/** file_priv: owning file descriptor */
struct drm_i915_file_private *file_priv;
/**
* @ppgtt: unique address space (GTT)
*
* In full-ppgtt mode, each context has its own address space ensuring
* complete seperation of one client from all others.
*
* In other modes, this is a NULL pointer with the expectation that
* the caller uses the shared global GTT.
*/
struct i915_hw_ppgtt *ppgtt;
/**
* @pid: process id of creator
*
* Note that who created the context may not be the principle user,
* as the context may be shared across a local socket. However,
* that should only affect the default context, all contexts created
* explicitly by the client are expected to be isolated.
*/
struct pid *pid;
/**
* @name: arbitrary name
*
* A name is constructed for the context from the creator's process
* name, pid and user handle in order to uniquely identify the
* context in messages.
*/
const char *name;
/** link: place with &drm_i915_private.context_list */
struct list_head link;
struct llist_node free_link;
/**
* @ref: reference count
*
* A reference to a context is held by both the client who created it
* and on each request submitted to the hardware using the request
* (to ensure the hardware has access to the state until it has
* finished all pending writes). See i915_gem_context_get() and
* i915_gem_context_put() for access.
*/
struct kref ref;
/**
* @rcu: rcu_head for deferred freeing.
*/
struct rcu_head rcu;
/**
* @user_flags: small set of booleans controlled by the user
*/
unsigned long user_flags;
#define UCONTEXT_NO_ZEROMAP 0
#define UCONTEXT_NO_ERROR_CAPTURE 1
#define UCONTEXT_BANNABLE 2
/**
* @flags: small set of booleans
*/
unsigned long flags;
#define CONTEXT_BANNED 0
#define CONTEXT_CLOSED 1
#define CONTEXT_FORCE_SINGLE_SUBMISSION 2
/**
* @hw_id: - unique identifier for the context
*
* The hardware needs to uniquely identify the context for a few
* functions like fault reporting, PASID, scheduling. The
* &drm_i915_private.context_hw_ida is used to assign a unqiue
* id for the lifetime of the context.
*
* @hw_id_pin_count: - number of times this context had been pinned
* for use (should be, at most, once per engine).
*
* @hw_id_link: - all contexts with an assigned id are tracked
* for possible repossession.
*/
unsigned int hw_id;
atomic_t hw_id_pin_count;
struct list_head hw_id_link;
/**
* @user_handle: userspace identifier
*
* A unique per-file identifier is generated from
* &drm_i915_file_private.contexts.
*/
u32 user_handle;
struct i915_sched_attr sched;
/** engine: per-engine logical HW state */
struct intel_context {
struct i915_gem_context *gem_context;
struct intel_engine_cs *active;
struct list_head signal_link;
struct list_head signals;
struct i915_vma *state;
struct intel_ring *ring;
u32 *lrc_reg_state;
u64 lrc_desc;
int pin_count;
/**
* active_tracker: Active tracker for the external rq activity
* on this intel_context object.
*/
struct i915_active_request active_tracker;
const struct intel_context_ops *ops;
/** sseu: Control eu/slice partitioning */
struct intel_sseu sseu;
} __engine[I915_NUM_ENGINES];
/** ring_size: size for allocating the per-engine ring buffer */
u32 ring_size;
/** desc_template: invariant fields for the HW context descriptor */
u32 desc_template;
/** guilty_count: How many times this context has caused a GPU hang. */
atomic_t guilty_count;
/**
* @active_count: How many times this context was active during a GPU
* hang, but did not cause it.
*/
atomic_t active_count;
#define CONTEXT_SCORE_GUILTY 10
#define CONTEXT_SCORE_BAN_THRESHOLD 40
/** ban_score: Accumulated score of all hangs caused by this context. */
atomic_t ban_score;
/** remap_slice: Bitmask of cache lines that need remapping */
u8 remap_slice;
/** handles_vma: rbtree to look up our context specific obj/vma for
* the user handle. (user handles are per fd, but the binding is
* per vm, which may be one per context or shared with the global GTT)
*/
struct radix_tree_root handles_vma;
/** handles_list: reverse list of all the rbtree entries in use for
* this context, which allows us to free all the allocations on
* context close.
*/
struct list_head handles_list;
};
static inline bool i915_gem_context_is_closed(const struct i915_gem_context *ctx)
{
return test_bit(CONTEXT_CLOSED, &ctx->flags);
@ -270,6 +76,21 @@ static inline void i915_gem_context_clear_bannable(struct i915_gem_context *ctx)
clear_bit(UCONTEXT_BANNABLE, &ctx->user_flags);
}
static inline bool i915_gem_context_is_recoverable(const struct i915_gem_context *ctx)
{
return test_bit(UCONTEXT_RECOVERABLE, &ctx->user_flags);
}
static inline void i915_gem_context_set_recoverable(struct i915_gem_context *ctx)
{
set_bit(UCONTEXT_RECOVERABLE, &ctx->user_flags);
}
static inline void i915_gem_context_clear_recoverable(struct i915_gem_context *ctx)
{
clear_bit(UCONTEXT_RECOVERABLE, &ctx->user_flags);
}
static inline bool i915_gem_context_is_banned(const struct i915_gem_context *ctx)
{
return test_bit(CONTEXT_BANNED, &ctx->flags);
@ -315,35 +136,6 @@ static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx)
return !ctx->file_priv;
}
static inline struct intel_context *
to_intel_context(struct i915_gem_context *ctx,
const struct intel_engine_cs *engine)
{
return &ctx->__engine[engine->id];
}
static inline struct intel_context *
intel_context_pin(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
{
return engine->context_pin(engine, ctx);
}
static inline void __intel_context_pin(struct intel_context *ce)
{
GEM_BUG_ON(!ce->pin_count);
ce->pin_count++;
}
static inline void intel_context_unpin(struct intel_context *ce)
{
GEM_BUG_ON(!ce->pin_count);
if (--ce->pin_count)
return;
GEM_BUG_ON(!ce->ops);
ce->ops->unpin(ce);
}
/* i915_gem_context.c */
int __must_check i915_gem_contexts_init(struct drm_i915_private *dev_priv);
void i915_gem_contexts_lost(struct drm_i915_private *dev_priv);
@ -354,7 +146,8 @@ int i915_gem_context_open(struct drm_i915_private *i915,
void i915_gem_context_close(struct drm_file *file);
int i915_switch_context(struct i915_request *rq);
int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv);
int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915,
unsigned long engine_mask);
void i915_gem_context_release(struct kref *ctx_ref);
struct i915_gem_context *
@ -386,8 +179,7 @@ static inline void i915_gem_context_put(struct i915_gem_context *ctx)
kref_put(&ctx->ref, i915_gem_context_release);
}
void intel_context_init(struct intel_context *ce,
struct i915_gem_context *ctx,
struct intel_engine_cs *engine);
struct i915_lut_handle *i915_lut_handle_alloc(void);
void i915_lut_handle_free(struct i915_lut_handle *lut);
#endif /* !__I915_GEM_CONTEXT_H__ */

Просмотреть файл

@ -0,0 +1,182 @@
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2019 Intel Corporation
*/
#ifndef __I915_GEM_CONTEXT_TYPES_H__
#define __I915_GEM_CONTEXT_TYPES_H__
#include <linux/atomic.h>
#include <linux/list.h>
#include <linux/llist.h>
#include <linux/kref.h>
#include <linux/mutex.h>
#include <linux/radix-tree.h>
#include <linux/rbtree.h>
#include <linux/rcupdate.h>
#include <linux/types.h>
#include "i915_scheduler.h"
#include "intel_context_types.h"
struct pid;
struct drm_i915_private;
struct drm_i915_file_private;
struct i915_hw_ppgtt;
struct i915_timeline;
struct intel_ring;
/**
* struct i915_gem_context - client state
*
* The struct i915_gem_context represents the combined view of the driver and
* logical hardware state for a particular client.
*/
struct i915_gem_context {
/** i915: i915 device backpointer */
struct drm_i915_private *i915;
/** file_priv: owning file descriptor */
struct drm_i915_file_private *file_priv;
/**
* @ppgtt: unique address space (GTT)
*
* In full-ppgtt mode, each context has its own address space ensuring
* complete seperation of one client from all others.
*
* In other modes, this is a NULL pointer with the expectation that
* the caller uses the shared global GTT.
*/
struct i915_hw_ppgtt *ppgtt;
/**
* @pid: process id of creator
*
* Note that who created the context may not be the principle user,
* as the context may be shared across a local socket. However,
* that should only affect the default context, all contexts created
* explicitly by the client are expected to be isolated.
*/
struct pid *pid;
/**
* @name: arbitrary name
*
* A name is constructed for the context from the creator's process
* name, pid and user handle in order to uniquely identify the
* context in messages.
*/
const char *name;
/** link: place with &drm_i915_private.context_list */
struct list_head link;
struct llist_node free_link;
/**
* @ref: reference count
*
* A reference to a context is held by both the client who created it
* and on each request submitted to the hardware using the request
* (to ensure the hardware has access to the state until it has
* finished all pending writes). See i915_gem_context_get() and
* i915_gem_context_put() for access.
*/
struct kref ref;
/**
* @rcu: rcu_head for deferred freeing.
*/
struct rcu_head rcu;
/**
* @user_flags: small set of booleans controlled by the user
*/
unsigned long user_flags;
#define UCONTEXT_NO_ZEROMAP 0
#define UCONTEXT_NO_ERROR_CAPTURE 1
#define UCONTEXT_BANNABLE 2
#define UCONTEXT_RECOVERABLE 3
/**
* @flags: small set of booleans
*/
unsigned long flags;
#define CONTEXT_BANNED 0
#define CONTEXT_CLOSED 1
#define CONTEXT_FORCE_SINGLE_SUBMISSION 2
/**
* @hw_id: - unique identifier for the context
*
* The hardware needs to uniquely identify the context for a few
* functions like fault reporting, PASID, scheduling. The
* &drm_i915_private.context_hw_ida is used to assign a unqiue
* id for the lifetime of the context.
*
* @hw_id_pin_count: - number of times this context had been pinned
* for use (should be, at most, once per engine).
*
* @hw_id_link: - all contexts with an assigned id are tracked
* for possible repossession.
*/
unsigned int hw_id;
atomic_t hw_id_pin_count;
struct list_head hw_id_link;
struct list_head active_engines;
struct mutex mutex;
/**
* @user_handle: userspace identifier
*
* A unique per-file identifier is generated from
* &drm_i915_file_private.contexts.
*/
u32 user_handle;
#define DEFAULT_CONTEXT_HANDLE 0
struct i915_sched_attr sched;
/** hw_contexts: per-engine logical HW state */
struct rb_root hw_contexts;
spinlock_t hw_contexts_lock;
/** ring_size: size for allocating the per-engine ring buffer */
u32 ring_size;
/** desc_template: invariant fields for the HW context descriptor */
u32 desc_template;
/** guilty_count: How many times this context has caused a GPU hang. */
atomic_t guilty_count;
/**
* @active_count: How many times this context was active during a GPU
* hang, but did not cause it.
*/
atomic_t active_count;
/**
* @hang_timestamp: The last time(s) this context caused a GPU hang
*/
unsigned long hang_timestamp[2];
#define CONTEXT_FAST_HANG_JIFFIES (120 * HZ) /* 3 hangs within 120s? Banned! */
/** remap_slice: Bitmask of cache lines that need remapping */
u8 remap_slice;
/** handles_vma: rbtree to look up our context specific obj/vma for
* the user handle. (user handles are per fd, but the binding is
* per vm, which may be one per context or shared with the global GTT)
*/
struct radix_tree_root handles_vma;
/** handles_list: reverse list of all the rbtree entries in use for
* this context, which allows us to free all the allocations on
* context close.
*/
struct list_head handles_list;
};
#endif /* __I915_GEM_CONTEXT_TYPES_H__ */

Просмотреть файл

@ -300,7 +300,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
get_dma_buf(dma_buf);
obj = i915_gem_object_alloc(to_i915(dev));
obj = i915_gem_object_alloc();
if (obj == NULL) {
ret = -ENOMEM;
goto fail_detach;

Просмотреть файл

@ -38,31 +38,21 @@ I915_SELFTEST_DECLARE(static struct igt_evict_ctl {
static bool ggtt_is_idle(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
if (i915->gt.active_requests)
return false;
for_each_engine(engine, i915, id) {
if (!intel_engine_has_kernel_context(engine))
return false;
}
return true;
return !i915->gt.active_requests;
}
static int ggtt_flush(struct drm_i915_private *i915)
{
int err;
/* Not everything in the GGTT is tracked via vma (otherwise we
/*
* Not everything in the GGTT is tracked via vma (otherwise we
* could evict as required with minimal stalling) so we are forced
* to idle the GPU and explicitly retire outstanding requests in
* the hopes that we can then remove contexts and the like only
* bound by their active reference.
*/
err = i915_gem_switch_to_kernel_context(i915);
err = i915_gem_switch_to_kernel_context(i915, i915->gt.active_engines);
if (err)
return err;

Просмотреть файл

@ -794,8 +794,8 @@ static int eb_wait_for_ring(const struct i915_execbuffer *eb)
* keeping all of their resources pinned.
*/
ce = to_intel_context(eb->ctx, eb->engine);
if (!ce->ring) /* first use, assume empty! */
ce = intel_context_lookup(eb->ctx, eb->engine);
if (!ce || !ce->ring) /* first use, assume empty! */
return 0;
rq = __eb_wait_for_ring(ce->ring);
@ -849,12 +849,12 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
}
vma = i915_vma_instance(obj, eb->vm, NULL);
if (unlikely(IS_ERR(vma))) {
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err_obj;
}
lut = kmem_cache_alloc(eb->i915->luts, GFP_KERNEL);
lut = i915_lut_handle_alloc();
if (unlikely(!lut)) {
err = -ENOMEM;
goto err_obj;
@ -862,7 +862,7 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
err = radix_tree_insert(handles_vma, handle, vma);
if (unlikely(err)) {
kmem_cache_free(eb->i915->luts, lut);
i915_lut_handle_free(lut);
goto err_obj;
}
@ -1957,7 +1957,7 @@ static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
u32 *cs;
int i;
if (!IS_GEN(rq->i915, 7) || rq->engine->id != RCS) {
if (!IS_GEN(rq->i915, 7) || rq->engine->id != RCS0) {
DRM_DEBUG("sol reset is gen7/rcs only\n");
return -EINVAL;
}
@ -2082,11 +2082,11 @@ gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
#define I915_USER_RINGS (4)
static const enum intel_engine_id user_ring_map[I915_USER_RINGS + 1] = {
[I915_EXEC_DEFAULT] = RCS,
[I915_EXEC_RENDER] = RCS,
[I915_EXEC_BLT] = BCS,
[I915_EXEC_BSD] = VCS,
[I915_EXEC_VEBOX] = VECS
[I915_EXEC_DEFAULT] = RCS0,
[I915_EXEC_RENDER] = RCS0,
[I915_EXEC_BLT] = BCS0,
[I915_EXEC_BSD] = VCS0,
[I915_EXEC_VEBOX] = VECS0
};
static struct intel_engine_cs *
@ -2109,7 +2109,7 @@ eb_select_engine(struct drm_i915_private *dev_priv,
return NULL;
}
if (user_ring_id == I915_EXEC_BSD && HAS_BSD2(dev_priv)) {
if (user_ring_id == I915_EXEC_BSD && HAS_ENGINE(dev_priv, VCS1)) {
unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;
if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
@ -2312,10 +2312,6 @@ i915_gem_do_execbuffer(struct drm_device *dev,
if (args->flags & I915_EXEC_IS_PINNED)
eb.batch_flags |= I915_DISPATCH_PINNED;
eb.engine = eb_select_engine(eb.i915, file, args);
if (!eb.engine)
return -EINVAL;
if (args->flags & I915_EXEC_FENCE_IN) {
in_fence = sync_file_get_fence(lower_32_bits(args->rsvd2));
if (!in_fence)
@ -2340,6 +2336,12 @@ i915_gem_do_execbuffer(struct drm_device *dev,
if (unlikely(err))
goto err_destroy;
eb.engine = eb_select_engine(eb.i915, file, args);
if (!eb.engine) {
err = -EINVAL;
goto err_engine;
}
/*
* Take a local wakeref for preparing to dispatch the execbuf as
* we expect to access the hardware fairly frequently in the
@ -2505,6 +2507,7 @@ err_unlock:
mutex_unlock(&dev->struct_mutex);
err_rpm:
intel_runtime_pm_put(eb.i915, wakeref);
err_engine:
i915_gem_context_put(eb.ctx);
err_destroy:
eb_destroy(&eb);

Просмотреть файл

@ -210,6 +210,7 @@ static int fence_update(struct drm_i915_fence_reg *fence,
struct i915_vma *vma)
{
intel_wakeref_t wakeref;
struct i915_vma *old;
int ret;
if (vma) {
@ -229,49 +230,55 @@ static int fence_update(struct drm_i915_fence_reg *fence,
return ret;
}
if (fence->vma) {
struct i915_vma *old = fence->vma;
old = xchg(&fence->vma, NULL);
if (old) {
ret = i915_active_request_retire(&old->last_fence,
&old->obj->base.dev->struct_mutex);
if (ret)
if (ret) {
fence->vma = old;
return ret;
}
i915_vma_flush_writes(old);
}
if (fence->vma && fence->vma != vma) {
/* Ensure that all userspace CPU access is completed before
/*
* Ensure that all userspace CPU access is completed before
* stealing the fence.
*/
GEM_BUG_ON(fence->vma->fence != fence);
i915_vma_revoke_mmap(fence->vma);
fence->vma->fence = NULL;
fence->vma = NULL;
if (old != vma) {
GEM_BUG_ON(old->fence != fence);
i915_vma_revoke_mmap(old);
old->fence = NULL;
}
list_move(&fence->link, &fence->i915->mm.fence_list);
}
/* We only need to update the register itself if the device is awake.
/*
* We only need to update the register itself if the device is awake.
* If the device is currently powered down, we will defer the write
* to the runtime resume, see i915_gem_restore_fences().
*
* This only works for removing the fence register, on acquisition
* the caller must hold the rpm wakeref. The fence register must
* be cleared before we can use any other fences to ensure that
* the new fences do not overlap the elided clears, confusing HW.
*/
wakeref = intel_runtime_pm_get_if_in_use(fence->i915);
if (wakeref) {
fence_write(fence, vma);
intel_runtime_pm_put(fence->i915, wakeref);
if (!wakeref) {
GEM_BUG_ON(vma);
return 0;
}
if (vma) {
if (fence->vma != vma) {
vma->fence = fence;
fence->vma = vma;
}
WRITE_ONCE(fence->vma, vma);
fence_write(fence, vma);
if (vma) {
vma->fence = fence;
list_move_tail(&fence->link, &fence->i915->mm.fence_list);
}
intel_runtime_pm_put(fence->i915, wakeref);
return 0;
}
@ -435,32 +442,6 @@ void i915_unreserve_fence(struct drm_i915_fence_reg *fence)
list_add(&fence->link, &fence->i915->mm.fence_list);
}
/**
* i915_gem_revoke_fences - revoke fence state
* @dev_priv: i915 device private
*
* Removes all GTT mmappings via the fence registers. This forces any user
* of the fence to reacquire that fence before continuing with their access.
* One use is during GPU reset where the fence register is lost and we need to
* revoke concurrent userspace access via GTT mmaps until the hardware has been
* reset and the fence registers have been restored.
*/
void i915_gem_revoke_fences(struct drm_i915_private *dev_priv)
{
int i;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
for (i = 0; i < dev_priv->num_fence_regs; i++) {
struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i];
GEM_BUG_ON(fence->vma && fence->vma->fence != fence);
if (fence->vma)
i915_vma_revoke_mmap(fence->vma);
}
}
/**
* i915_gem_restore_fences - restore fence state
* @dev_priv: i915 device private
@ -473,9 +454,10 @@ void i915_gem_restore_fences(struct drm_i915_private *dev_priv)
{
int i;
rcu_read_lock(); /* keep obj alive as we dereference */
for (i = 0; i < dev_priv->num_fence_regs; i++) {
struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
struct i915_vma *vma = reg->vma;
struct i915_vma *vma = READ_ONCE(reg->vma);
GEM_BUG_ON(vma && vma->fence != reg);
@ -483,18 +465,12 @@ void i915_gem_restore_fences(struct drm_i915_private *dev_priv)
* Commit delayed tiling changes if we have an object still
* attached to the fence, otherwise just clear the fence.
*/
if (vma && !i915_gem_object_is_tiled(vma->obj)) {
GEM_BUG_ON(!reg->dirty);
GEM_BUG_ON(i915_vma_has_userfault(vma));
list_move(&reg->link, &dev_priv->mm.fence_list);
vma->fence = NULL;
if (vma && !i915_gem_object_is_tiled(vma->obj))
vma = NULL;
}
fence_write(reg, vma);
reg->vma = vma;
}
rcu_read_unlock();
}
/**

Просмотреть файл

@ -584,7 +584,7 @@ setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
* for all.
*/
size = I915_GTT_PAGE_SIZE_4K;
if (i915_vm_is_48bit(vm) &&
if (i915_vm_is_4lvl(vm) &&
HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K)) {
size = I915_GTT_PAGE_SIZE_64K;
gfp |= __GFP_NOWARN;
@ -613,7 +613,7 @@ setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
vm->scratch_page.page = page;
vm->scratch_page.daddr = addr;
vm->scratch_page.order = order;
vm->scratch_order = order;
return 0;
unmap_page:
@ -632,10 +632,11 @@ skip:
static void cleanup_scratch_page(struct i915_address_space *vm)
{
struct i915_page_dma *p = &vm->scratch_page;
int order = vm->scratch_order;
dma_unmap_page(vm->dma, p->daddr, BIT(p->order) << PAGE_SHIFT,
dma_unmap_page(vm->dma, p->daddr, BIT(order) << PAGE_SHIFT,
PCI_DMA_BIDIRECTIONAL);
__free_pages(p->page, p->order);
__free_pages(p->page, order);
}
static struct i915_page_table *alloc_pt(struct i915_address_space *vm)
@ -726,18 +727,13 @@ static void __pdp_fini(struct i915_page_directory_pointer *pdp)
pdp->page_directory = NULL;
}
static inline bool use_4lvl(const struct i915_address_space *vm)
{
return i915_vm_is_48bit(vm);
}
static struct i915_page_directory_pointer *
alloc_pdp(struct i915_address_space *vm)
{
struct i915_page_directory_pointer *pdp;
int ret = -ENOMEM;
GEM_BUG_ON(!use_4lvl(vm));
GEM_BUG_ON(!i915_vm_is_4lvl(vm));
pdp = kzalloc(sizeof(*pdp), GFP_KERNEL);
if (!pdp)
@ -766,7 +762,7 @@ static void free_pdp(struct i915_address_space *vm,
{
__pdp_fini(pdp);
if (!use_4lvl(vm))
if (!i915_vm_is_4lvl(vm))
return;
cleanup_px(vm, pdp);
@ -791,14 +787,15 @@ static void gen8_initialize_pml4(struct i915_address_space *vm,
memset_p((void **)pml4->pdps, vm->scratch_pdp, GEN8_PML4ES_PER_PML4);
}
/* PDE TLBs are a pain to invalidate on GEN8+. When we modify
/*
* PDE TLBs are a pain to invalidate on GEN8+. When we modify
* the page table structures, we mark them dirty so that
* context switching/execlist queuing code takes extra steps
* to ensure that tlbs are flushed.
*/
static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
{
ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->vm.i915)->ring_mask;
ppgtt->pd_dirty_engines = ALL_ENGINES;
}
/* Removes entries from a single page table, releasing it if it's empty.
@ -809,8 +806,6 @@ static bool gen8_ppgtt_clear_pt(const struct i915_address_space *vm,
u64 start, u64 length)
{
unsigned int num_entries = gen8_pte_count(start, length);
unsigned int pte = gen8_pte_index(start);
unsigned int pte_end = pte + num_entries;
gen8_pte_t *vaddr;
GEM_BUG_ON(num_entries > pt->used_ptes);
@ -820,8 +815,7 @@ static bool gen8_ppgtt_clear_pt(const struct i915_address_space *vm,
return true;
vaddr = kmap_atomic_px(pt);
while (pte < pte_end)
vaddr[pte++] = vm->scratch_pte;
memset64(vaddr + gen8_pte_index(start), vm->scratch_pte, num_entries);
kunmap_atomic(vaddr);
return false;
@ -872,7 +866,7 @@ static void gen8_ppgtt_set_pdpe(struct i915_address_space *vm,
gen8_ppgtt_pdpe_t *vaddr;
pdp->page_directory[pdpe] = pd;
if (!use_4lvl(vm))
if (!i915_vm_is_4lvl(vm))
return;
vaddr = kmap_atomic_px(pdp);
@ -937,7 +931,7 @@ static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm,
struct i915_page_directory_pointer *pdp;
unsigned int pml4e;
GEM_BUG_ON(!use_4lvl(vm));
GEM_BUG_ON(!i915_vm_is_4lvl(vm));
gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
GEM_BUG_ON(pdp == vm->scratch_pdp);
@ -1219,7 +1213,7 @@ static int gen8_init_scratch(struct i915_address_space *vm)
GEM_BUG_ON(!clone->has_read_only);
vm->scratch_page.order = clone->scratch_page.order;
vm->scratch_order = clone->scratch_order;
vm->scratch_pte = clone->scratch_pte;
vm->scratch_pt = clone->scratch_pt;
vm->scratch_pd = clone->scratch_pd;
@ -1248,7 +1242,7 @@ static int gen8_init_scratch(struct i915_address_space *vm)
goto free_pt;
}
if (use_4lvl(vm)) {
if (i915_vm_is_4lvl(vm)) {
vm->scratch_pdp = alloc_pdp(vm);
if (IS_ERR(vm->scratch_pdp)) {
ret = PTR_ERR(vm->scratch_pdp);
@ -1258,7 +1252,7 @@ static int gen8_init_scratch(struct i915_address_space *vm)
gen8_initialize_pt(vm, vm->scratch_pt);
gen8_initialize_pd(vm, vm->scratch_pd);
if (use_4lvl(vm))
if (i915_vm_is_4lvl(vm))
gen8_initialize_pdp(vm, vm->scratch_pdp);
return 0;
@ -1280,7 +1274,7 @@ static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
enum vgt_g2v_type msg;
int i;
if (use_4lvl(vm)) {
if (i915_vm_is_4lvl(vm)) {
const u64 daddr = px_dma(&ppgtt->pml4);
I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
@ -1310,7 +1304,7 @@ static void gen8_free_scratch(struct i915_address_space *vm)
if (!vm->scratch_page.daddr)
return;
if (use_4lvl(vm))
if (i915_vm_is_4lvl(vm))
free_pdp(vm, vm->scratch_pdp);
free_pd(vm, vm->scratch_pd);
free_pt(vm, vm->scratch_pt);
@ -1356,7 +1350,7 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
if (intel_vgpu_active(dev_priv))
gen8_ppgtt_notify_vgt(ppgtt, false);
if (use_4lvl(vm))
if (i915_vm_is_4lvl(vm))
gen8_ppgtt_cleanup_4lvl(ppgtt);
else
gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, &ppgtt->pdp);
@ -1519,6 +1513,23 @@ unwind:
return -ENOMEM;
}
static void ppgtt_init(struct drm_i915_private *i915,
struct i915_hw_ppgtt *ppgtt)
{
kref_init(&ppgtt->ref);
ppgtt->vm.i915 = i915;
ppgtt->vm.dma = &i915->drm.pdev->dev;
ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size);
i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
ppgtt->vm.vma_ops.bind_vma = ppgtt_bind_vma;
ppgtt->vm.vma_ops.unbind_vma = ppgtt_unbind_vma;
ppgtt->vm.vma_ops.set_pages = ppgtt_set_pages;
ppgtt->vm.vma_ops.clear_pages = clear_pages;
}
/*
* GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
* with a net effect resembling a 2-level page table in normal x86 terms. Each
@ -1535,20 +1546,11 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
if (!ppgtt)
return ERR_PTR(-ENOMEM);
kref_init(&ppgtt->ref);
ppgtt->vm.i915 = i915;
ppgtt->vm.dma = &i915->drm.pdev->dev;
ppgtt->vm.total = HAS_FULL_48BIT_PPGTT(i915) ?
1ULL << 48 :
1ULL << 32;
ppgtt_init(i915, ppgtt);
/* From bdw, there is support for read-only pages in the PPGTT. */
ppgtt->vm.has_read_only = true;
i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
/* There are only few exceptions for gen >=6. chv and bxt.
* And we are not sure about the latter so play safe for now.
*/
@ -1559,7 +1561,7 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
if (err)
goto err_free;
if (use_4lvl(&ppgtt->vm)) {
if (i915_vm_is_4lvl(&ppgtt->vm)) {
err = setup_px(&ppgtt->vm, &ppgtt->pml4);
if (err)
goto err_scratch;
@ -1592,11 +1594,6 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
ppgtt->vm.vma_ops.bind_vma = ppgtt_bind_vma;
ppgtt->vm.vma_ops.unbind_vma = ppgtt_unbind_vma;
ppgtt->vm.vma_ops.set_pages = ppgtt_set_pages;
ppgtt->vm.vma_ops.clear_pages = clear_pages;
return ppgtt;
err_scratch:
@ -1672,8 +1669,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
while (num_entries) {
struct i915_page_table *pt = ppgtt->base.pd.page_table[pde++];
const unsigned int end = min(pte + num_entries, GEN6_PTES);
const unsigned int count = end - pte;
const unsigned int count = min(num_entries, GEN6_PTES - pte);
gen6_pte_t *vaddr;
GEM_BUG_ON(pt == vm->scratch_pt);
@ -1693,9 +1689,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
*/
vaddr = kmap_atomic_px(pt);
do {
vaddr[pte++] = scratch_pte;
} while (pte < end);
memset32(vaddr + pte, scratch_pte, count);
kunmap_atomic(vaddr);
pte = 0;
@ -1913,7 +1907,7 @@ static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size)
GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
GEM_BUG_ON(size > ggtt->vm.total);
vma = kmem_cache_zalloc(i915->vmas, GFP_KERNEL);
vma = i915_vma_alloc();
if (!vma)
return ERR_PTR(-ENOMEM);
@ -1991,25 +1985,13 @@ static struct i915_hw_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
if (!ppgtt)
return ERR_PTR(-ENOMEM);
kref_init(&ppgtt->base.ref);
ppgtt->base.vm.i915 = i915;
ppgtt->base.vm.dma = &i915->drm.pdev->dev;
ppgtt->base.vm.total = I915_PDES * GEN6_PTES * I915_GTT_PAGE_SIZE;
i915_address_space_init(&ppgtt->base.vm, VM_CLASS_PPGTT);
ppgtt_init(i915, &ppgtt->base);
ppgtt->base.vm.allocate_va_range = gen6_alloc_va_range;
ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range;
ppgtt->base.vm.insert_entries = gen6_ppgtt_insert_entries;
ppgtt->base.vm.cleanup = gen6_ppgtt_cleanup;
ppgtt->base.vm.vma_ops.bind_vma = ppgtt_bind_vma;
ppgtt->base.vm.vma_ops.unbind_vma = ppgtt_unbind_vma;
ppgtt->base.vm.vma_ops.set_pages = ppgtt_set_pages;
ppgtt->base.vm.vma_ops.clear_pages = clear_pages;
ppgtt->base.vm.pte_encode = ggtt->vm.pte_encode;
err = gen6_ppgtt_init_scratch(ppgtt);
@ -3701,7 +3683,7 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
}
ret = 0;
if (unlikely(IS_ERR(vma->pages))) {
if (IS_ERR(vma->pages)) {
ret = PTR_ERR(vma->pages);
vma->pages = NULL;
DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n",

Просмотреть файл

@ -213,7 +213,6 @@ struct i915_vma;
struct i915_page_dma {
struct page *page;
int order;
union {
dma_addr_t daddr;
@ -293,6 +292,7 @@ struct i915_address_space {
#define VM_CLASS_PPGTT 1
u64 scratch_pte;
int scratch_order;
struct i915_page_dma scratch_page;
struct i915_page_table *scratch_pt;
struct i915_page_directory *scratch_pd;
@ -348,7 +348,7 @@ struct i915_address_space {
#define i915_is_ggtt(vm) ((vm)->is_ggtt)
static inline bool
i915_vm_is_48bit(const struct i915_address_space *vm)
i915_vm_is_4lvl(const struct i915_address_space *vm)
{
return (vm->total - 1) >> 32;
}
@ -356,7 +356,7 @@ i915_vm_is_48bit(const struct i915_address_space *vm)
static inline bool
i915_vm_has_scratch_64K(struct i915_address_space *vm)
{
return vm->scratch_page.order == get_order(I915_GTT_PAGE_SIZE_64K);
return vm->scratch_order == get_order(I915_GTT_PAGE_SIZE_64K);
}
/* The Graphics Translation Table is the way in which GEN hardware translates a
@ -390,7 +390,7 @@ struct i915_hw_ppgtt {
struct i915_address_space vm;
struct kref ref;
unsigned long pd_dirty_rings;
unsigned long pd_dirty_engines;
union {
struct i915_pml4 pml4; /* GEN8+ & 48b PPGTT */
struct i915_page_directory_pointer pdp; /* GEN8+ */
@ -488,7 +488,7 @@ static inline u32 gen6_pde_index(u32 addr)
static inline unsigned int
i915_pdpes_per_pdp(const struct i915_address_space *vm)
{
if (i915_vm_is_48bit(vm))
if (i915_vm_is_4lvl(vm))
return GEN8_PML4ES_PER_PML4;
return GEN8_3LVL_PDPES;

Просмотреть файл

@ -193,7 +193,7 @@ i915_gem_object_create_internal(struct drm_i915_private *i915,
if (overflows_type(size, obj->base.size))
return ERR_PTR(-E2BIG);
obj = i915_gem_object_alloc(i915);
obj = i915_gem_object_alloc();
if (!obj)
return ERR_PTR(-ENOMEM);

Просмотреть файл

@ -24,6 +24,22 @@
#include "i915_drv.h"
#include "i915_gem_object.h"
#include "i915_globals.h"
static struct i915_global_object {
struct i915_global base;
struct kmem_cache *slab_objects;
} global;
struct drm_i915_gem_object *i915_gem_object_alloc(void)
{
return kmem_cache_zalloc(global.slab_objects, GFP_KERNEL);
}
void i915_gem_object_free(struct drm_i915_gem_object *obj)
{
return kmem_cache_free(global.slab_objects, obj);
}
/**
* Mark up the object's coherency levels for a given cache_level
@ -46,3 +62,29 @@ void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
obj->cache_dirty =
!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE);
}
static void i915_global_objects_shrink(void)
{
kmem_cache_shrink(global.slab_objects);
}
static void i915_global_objects_exit(void)
{
kmem_cache_destroy(global.slab_objects);
}
static struct i915_global_object global = { {
.shrink = i915_global_objects_shrink,
.exit = i915_global_objects_exit,
} };
int __init i915_global_objects_init(void)
{
global.slab_objects =
KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN);
if (!global.slab_objects)
return -ENOMEM;
i915_global_register(&global.base);
return 0;
}

Просмотреть файл

@ -304,6 +304,9 @@ to_intel_bo(struct drm_gem_object *gem)
return container_of(gem, struct drm_i915_gem_object, base);
}
struct drm_i915_gem_object *i915_gem_object_alloc(void);
void i915_gem_object_free(struct drm_i915_gem_object *obj);
/**
* i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle
* @filp: DRM file private date
@ -500,4 +503,3 @@ void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);
#endif

Просмотреть файл

@ -42,7 +42,7 @@ struct intel_render_state {
static const struct intel_renderstate_rodata *
render_state_get_rodata(const struct intel_engine_cs *engine)
{
if (engine->id != RCS)
if (engine->id != RCS0)
return NULL;
switch (INTEL_GEN(engine->i915)) {

Просмотреть файл

@ -565,7 +565,7 @@ _i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
struct drm_i915_gem_object *obj;
unsigned int cache_level;
obj = i915_gem_object_alloc(dev_priv);
obj = i915_gem_object_alloc();
if (obj == NULL)
return NULL;

Просмотреть файл

@ -795,7 +795,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
return -ENODEV;
}
obj = i915_gem_object_alloc(dev_priv);
obj = i915_gem_object_alloc();
if (obj == NULL)
return -ENOMEM;

Просмотреть файл

@ -0,0 +1,135 @@
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2019 Intel Corporation
*/
#include <linux/slab.h>
#include <linux/workqueue.h>
#include "i915_active.h"
#include "i915_gem_context.h"
#include "i915_gem_object.h"
#include "i915_globals.h"
#include "i915_request.h"
#include "i915_scheduler.h"
#include "i915_vma.h"
static LIST_HEAD(globals);
void __init i915_global_register(struct i915_global *global)
{
GEM_BUG_ON(!global->shrink);
GEM_BUG_ON(!global->exit);
list_add_tail(&global->link, &globals);
}
static void __i915_globals_cleanup(void)
{
struct i915_global *global, *next;
list_for_each_entry_safe_reverse(global, next, &globals, link)
global->exit();
}
static __initconst int (* const initfn[])(void) = {
i915_global_active_init,
i915_global_context_init,
i915_global_gem_context_init,
i915_global_objects_init,
i915_global_request_init,
i915_global_scheduler_init,
i915_global_vma_init,
};
int __init i915_globals_init(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(initfn); i++) {
int err;
err = initfn[i]();
if (err) {
__i915_globals_cleanup();
return err;
}
}
return 0;
}
static void i915_globals_shrink(void)
{
struct i915_global *global;
/*
* kmem_cache_shrink() discards empty slabs and reorders partially
* filled slabs to prioritise allocating from the mostly full slabs,
* with the aim of reducing fragmentation.
*/
list_for_each_entry(global, &globals, link)
global->shrink();
}
static atomic_t active;
static atomic_t epoch;
struct park_work {
struct rcu_work work;
int epoch;
};
static void __i915_globals_park(struct work_struct *work)
{
struct park_work *wrk = container_of(work, typeof(*wrk), work.work);
/* Confirm nothing woke up in the last grace period */
if (wrk->epoch == atomic_read(&epoch))
i915_globals_shrink();
kfree(wrk);
}
void i915_globals_park(void)
{
struct park_work *wrk;
/*
* Defer shrinking the global slab caches (and other work) until
* after a RCU grace period has completed with no activity. This
* is to try and reduce the latency impact on the consumers caused
* by us shrinking the caches the same time as they are trying to
* allocate, with the assumption being that if we idle long enough
* for an RCU grace period to elapse since the last use, it is likely
* to be longer until we need the caches again.
*/
if (!atomic_dec_and_test(&active))
return;
wrk = kmalloc(sizeof(*wrk), GFP_KERNEL);
if (!wrk)
return;
wrk->epoch = atomic_inc_return(&epoch);
INIT_RCU_WORK(&wrk->work, __i915_globals_park);
queue_rcu_work(system_wq, &wrk->work);
}
void i915_globals_unpark(void)
{
atomic_inc(&epoch);
atomic_inc(&active);
}
void __exit i915_globals_exit(void)
{
/* Flush any residual park_work */
rcu_barrier();
flush_scheduled_work();
__i915_globals_cleanup();
/* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
rcu_barrier();
}

Просмотреть файл

@ -0,0 +1,35 @@
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2019 Intel Corporation
*/
#ifndef _I915_GLOBALS_H_
#define _I915_GLOBALS_H_
typedef void (*i915_global_func_t)(void);
struct i915_global {
struct list_head link;
i915_global_func_t shrink;
i915_global_func_t exit;
};
void i915_global_register(struct i915_global *global);
int i915_globals_init(void);
void i915_globals_park(void);
void i915_globals_unpark(void);
void i915_globals_exit(void);
/* constructors */
int i915_global_active_init(void);
int i915_global_context_init(void);
int i915_global_gem_context_init(void);
int i915_global_objects_init(void);
int i915_global_request_init(void);
int i915_global_scheduler_init(void);
int i915_global_vma_init(void);
#endif /* _I915_GLOBALS_H_ */

Просмотреть файл

@ -380,19 +380,16 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m,
err_printf(m, "%s [%d]:\n", name, count);
while (count--) {
err_printf(m, " %08x_%08x %8u %02x %02x %02x",
err_printf(m, " %08x_%08x %8u %02x %02x",
upper_32_bits(err->gtt_offset),
lower_32_bits(err->gtt_offset),
err->size,
err->read_domains,
err->write_domain,
err->wseqno);
err->write_domain);
err_puts(m, tiling_flag(err->tiling));
err_puts(m, dirty_flag(err->dirty));
err_puts(m, purgeable_flag(err->purgeable));
err_puts(m, err->userptr ? " userptr" : "");
err_puts(m, err->engine != -1 ? " " : "");
err_puts(m, engine_name(m->i915, err->engine));
err_puts(m, i915_cache_level_str(m->i915, err->cache_level));
if (err->name)
@ -414,7 +411,7 @@ static void error_print_instdone(struct drm_i915_error_state_buf *m,
err_printf(m, " INSTDONE: 0x%08x\n",
ee->instdone.instdone);
if (ee->engine_id != RCS || INTEL_GEN(m->i915) <= 3)
if (ee->engine_id != RCS0 || INTEL_GEN(m->i915) <= 3)
return;
err_printf(m, " SC_INSTDONE: 0x%08x\n",
@ -434,11 +431,6 @@ static void error_print_instdone(struct drm_i915_error_state_buf *m,
ee->instdone.row[slice][subslice]);
}
static const char *bannable(const struct drm_i915_error_context *ctx)
{
return ctx->bannable ? "" : " (unbannable)";
}
static void error_print_request(struct drm_i915_error_state_buf *m,
const char *prefix,
const struct drm_i915_error_request *erq,
@ -447,9 +439,8 @@ static void error_print_request(struct drm_i915_error_state_buf *m,
if (!erq->seqno)
return;
err_printf(m, "%s pid %d, ban score %d, seqno %8x:%08x%s%s, prio %d, emitted %dms, start %08x, head %08x, tail %08x\n",
prefix, erq->pid, erq->ban_score,
erq->context, erq->seqno,
err_printf(m, "%s pid %d, seqno %8x:%08x%s%s, prio %d, emitted %dms, start %08x, head %08x, tail %08x\n",
prefix, erq->pid, erq->context, erq->seqno,
test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
&erq->flags) ? "!" : "",
test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
@ -463,10 +454,9 @@ static void error_print_context(struct drm_i915_error_state_buf *m,
const char *header,
const struct drm_i915_error_context *ctx)
{
err_printf(m, "%s%s[%d] user_handle %d hw_id %d, prio %d, ban score %d%s guilty %d active %d\n",
err_printf(m, "%s%s[%d] user_handle %d hw_id %d, prio %d, guilty %d active %d\n",
header, ctx->comm, ctx->pid, ctx->handle, ctx->hw_id,
ctx->sched_attr.priority, ctx->ban_score, bannable(ctx),
ctx->guilty, ctx->active);
ctx->sched_attr.priority, ctx->guilty, ctx->active);
}
static void error_print_engine(struct drm_i915_error_state_buf *m,
@ -512,13 +502,6 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
if (INTEL_GEN(m->i915) >= 6) {
err_printf(m, " RC PSMI: 0x%08x\n", ee->rc_psmi);
err_printf(m, " FAULT_REG: 0x%08x\n", ee->fault_reg);
err_printf(m, " SYNC_0: 0x%08x\n",
ee->semaphore_mboxes[0]);
err_printf(m, " SYNC_1: 0x%08x\n",
ee->semaphore_mboxes[1]);
if (HAS_VEBOX(m->i915))
err_printf(m, " SYNC_2: 0x%08x\n",
ee->semaphore_mboxes[2]);
}
if (HAS_PPGTT(m->i915)) {
err_printf(m, " GFX_MODE: 0x%08x\n", ee->vm_info.gfx_mode);
@ -533,8 +516,6 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
ee->vm_info.pp_dir_base);
}
}
err_printf(m, " seqno: 0x%08x\n", ee->seqno);
err_printf(m, " last_seqno: 0x%08x\n", ee->last_seqno);
err_printf(m, " ring->head: 0x%08x\n", ee->cpu_ring_head);
err_printf(m, " ring->tail: 0x%08x\n", ee->cpu_ring_tail);
err_printf(m, " hangcheck timestamp: %dms (%lu%s)\n",
@ -688,12 +669,10 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
if (!error->engine[i].context.pid)
continue;
err_printf(m, "Active process (on ring %s): %s [%d], score %d%s\n",
err_printf(m, "Active process (on ring %s): %s [%d]\n",
engine_name(m->i915, i),
error->engine[i].context.comm,
error->engine[i].context.pid,
error->engine[i].context.ban_score,
bannable(&error->engine[i].context));
error->engine[i].context.pid);
}
err_printf(m, "Reset count: %u\n", error->reset_count);
err_printf(m, "Suspend count: %u\n", error->suspend_count);
@ -779,13 +758,11 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
if (obj) {
err_puts(m, m->i915->engine[i]->name);
if (ee->context.pid)
err_printf(m, " (submitted by %s [%d], ctx %d [%d], score %d%s)",
err_printf(m, " (submitted by %s [%d], ctx %d [%d])",
ee->context.comm,
ee->context.pid,
ee->context.handle,
ee->context.hw_id,
ee->context.ban_score,
bannable(&ee->context));
ee->context.hw_id);
err_printf(m, " --- gtt_offset = 0x%08x %08x\n",
upper_32_bits(obj->gtt_offset),
lower_32_bits(obj->gtt_offset));
@ -1061,27 +1038,6 @@ i915_error_object_create(struct drm_i915_private *i915,
return dst;
}
/* The error capture is special as tries to run underneath the normal
* locking rules - so we use the raw version of the i915_active_request lookup.
*/
static inline u32
__active_get_seqno(struct i915_active_request *active)
{
struct i915_request *request;
request = __i915_active_request_peek(active);
return request ? request->global_seqno : 0;
}
static inline int
__active_get_engine_id(struct i915_active_request *active)
{
struct i915_request *request;
request = __i915_active_request_peek(active);
return request ? request->engine->id : -1;
}
static void capture_bo(struct drm_i915_error_buffer *err,
struct i915_vma *vma)
{
@ -1090,9 +1046,6 @@ static void capture_bo(struct drm_i915_error_buffer *err,
err->size = obj->base.size;
err->name = obj->base.name;
err->wseqno = __active_get_seqno(&obj->frontbuffer_write);
err->engine = __active_get_engine_id(&obj->frontbuffer_write);
err->gtt_offset = vma->node.start;
err->read_domains = obj->read_domains;
err->write_domain = obj->write_domain;
@ -1178,18 +1131,6 @@ static void gem_record_fences(struct i915_gpu_state *error)
error->nfence = i;
}
static void gen6_record_semaphore_state(struct intel_engine_cs *engine,
struct drm_i915_error_engine *ee)
{
struct drm_i915_private *dev_priv = engine->i915;
ee->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(engine->mmio_base));
ee->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(engine->mmio_base));
if (HAS_VEBOX(dev_priv))
ee->semaphore_mboxes[2] =
I915_READ(RING_SYNC_2(engine->mmio_base));
}
static void error_record_engine_registers(struct i915_gpu_state *error,
struct intel_engine_cs *engine,
struct drm_i915_error_engine *ee)
@ -1198,12 +1139,10 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
if (INTEL_GEN(dev_priv) >= 6) {
ee->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base));
if (INTEL_GEN(dev_priv) >= 8) {
if (INTEL_GEN(dev_priv) >= 8)
ee->fault_reg = I915_READ(GEN8_RING_FAULT_REG);
} else {
gen6_record_semaphore_state(engine, ee);
else
ee->fault_reg = I915_READ(RING_FAULT_REG(engine));
}
}
if (INTEL_GEN(dev_priv) >= 4) {
@ -1227,8 +1166,6 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
ee->instpm = I915_READ(RING_INSTPM(engine->mmio_base));
ee->acthd = intel_engine_get_active_head(engine);
ee->seqno = intel_engine_get_seqno(engine);
ee->last_seqno = intel_engine_last_submit(engine);
ee->start = I915_READ_START(engine);
ee->head = I915_READ_HEAD(engine);
ee->tail = I915_READ_TAIL(engine);
@ -1242,16 +1179,17 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
if (IS_GEN(dev_priv, 7)) {
switch (engine->id) {
default:
case RCS:
MISSING_CASE(engine->id);
case RCS0:
mmio = RENDER_HWS_PGA_GEN7;
break;
case BCS:
case BCS0:
mmio = BLT_HWS_PGA_GEN7;
break;
case VCS:
case VCS0:
mmio = BSD_HWS_PGA_GEN7;
break;
case VECS:
case VECS0:
mmio = VEBOX_HWS_PGA_GEN7;
break;
}
@ -1299,10 +1237,9 @@ static void record_request(struct i915_request *request,
struct i915_gem_context *ctx = request->gem_context;
erq->flags = request->fence.flags;
erq->context = ctx->hw_id;
erq->context = request->fence.context;
erq->seqno = request->fence.seqno;
erq->sched_attr = request->sched.attr;
erq->ban_score = atomic_read(&ctx->ban_score);
erq->seqno = request->global_seqno;
erq->jiffies = request->emitted_jiffies;
erq->start = i915_ggtt_offset(request->ring->vma);
erq->head = request->head;
@ -1396,8 +1333,6 @@ static void record_context(struct drm_i915_error_context *e,
e->handle = ctx->user_handle;
e->hw_id = ctx->hw_id;
e->sched_attr = ctx->sched;
e->ban_score = atomic_read(&ctx->ban_score);
e->bannable = i915_gem_context_is_bannable(ctx);
e->guilty = atomic_read(&ctx->guilty_count);
e->active = atomic_read(&ctx->active_count);
}
@ -1476,7 +1411,7 @@ static void gem_record_rings(struct i915_gpu_state *error)
error_record_engine_registers(error, engine, ee);
error_record_engine_execlists(engine, ee);
request = i915_gem_find_active_request(engine);
request = intel_engine_find_active_request(engine);
if (request) {
struct i915_gem_context *ctx = request->gem_context;
struct intel_ring *ring;

Просмотреть файл

@ -94,8 +94,6 @@ struct i915_gpu_state {
u32 cpu_ring_head;
u32 cpu_ring_tail;
u32 last_seqno;
/* Register state */
u32 start;
u32 tail;
@ -108,13 +106,11 @@ struct i915_gpu_state {
u32 bbstate;
u32 instpm;
u32 instps;
u32 seqno;
u64 bbaddr;
u64 acthd;
u32 fault_reg;
u64 faddr;
u32 rc_psmi; /* sleep state */
u32 semaphore_mboxes[I915_NUM_ENGINES - 1];
struct intel_instdone instdone;
struct drm_i915_error_context {
@ -122,10 +118,8 @@ struct i915_gpu_state {
pid_t pid;
u32 handle;
u32 hw_id;
int ban_score;
int active;
int guilty;
bool bannable;
struct i915_sched_attr sched_attr;
} context;
@ -149,7 +143,6 @@ struct i915_gpu_state {
long jiffies;
pid_t pid;
u32 context;
int ban_score;
u32 seqno;
u32 start;
u32 head;
@ -170,7 +163,6 @@ struct i915_gpu_state {
struct drm_i915_error_buffer {
u32 size;
u32 name;
u32 wseqno;
u64 gtt_offset;
u32 read_domains;
u32 write_domain;
@ -179,7 +171,6 @@ struct i915_gpu_state {
u32 dirty:1;
u32 purgeable:1;
u32 userptr:1;
s32 engine:4;
u32 cache_level:3;
} *active_bo[I915_NUM_ENGINES], *pinned_bo;
u32 active_bo_count[I915_NUM_ENGINES], pinned_bo_count;
@ -204,39 +195,13 @@ struct i915_gpu_error {
atomic_t pending_fb_pin;
/**
* State variable controlling the reset flow and count
*
* This is a counter which gets incremented when reset is triggered,
*
* Before the reset commences, the I915_RESET_BACKOFF bit is set
* meaning that any waiters holding onto the struct_mutex should
* relinquish the lock immediately in order for the reset to start.
*
* If reset is not completed successfully, the I915_WEDGE bit is
* set meaning that hardware is terminally sour and there is no
* recovery. All waiters on the reset_queue will be woken when
* that happens.
*
* This counter is used by the wait_seqno code to notice that reset
* event happened and it needs to restart the entire ioctl (since most
* likely the seqno it waited for won't ever signal anytime soon).
*
* This is important for lock-free wait paths, where no contended lock
* naturally enforces the correct ordering between the bail-out of the
* waiter and the gpu reset work code.
*/
unsigned long reset_count;
/**
* flags: Control various stages of the GPU reset
*
* #I915_RESET_BACKOFF - When we start a reset, we want to stop any
* other users acquiring the struct_mutex. To do this we set the
* #I915_RESET_BACKOFF bit in the error flags when we detect a reset
* and then check for that bit before acquiring the struct_mutex (in
* i915_mutex_lock_interruptible()?). I915_RESET_BACKOFF serves a
* secondary role in preventing two concurrent global reset attempts.
* #I915_RESET_BACKOFF - When we start a global reset, we need to
* serialise with any other users attempting to do the same, and
* any global resources that may be clobber by the reset (such as
* FENCE registers).
*
* #I915_RESET_ENGINE[num_engines] - Since the driver doesn't need to
* acquire the struct_mutex to reset an engine, we need an explicit
@ -255,6 +220,9 @@ struct i915_gpu_error {
#define I915_RESET_ENGINE 2
#define I915_WEDGED (BITS_PER_LONG - 1)
/** Number of times the device has been reset (global) */
u32 reset_count;
/** Number of times an engine has been reset */
u32 reset_engine_count[I915_NUM_ENGINES];
@ -272,6 +240,8 @@ struct i915_gpu_error {
*/
wait_queue_head_t reset_queue;
struct srcu_struct reset_backoff_srcu;
struct i915_gpu_restart *restart;
};

Просмотреть файл

@ -1288,6 +1288,18 @@ static void gen6_pm_rps_work(struct work_struct *work)
rps->last_adj = adj;
/*
* Limit deboosting and boosting to keep ourselves at the extremes
* when in the respective power modes (i.e. slowly decrease frequencies
* while in the HIGH_POWER zone and slowly increase frequencies while
* in the LOW_POWER zone). On idle, we will hit the timeout and drop
* to the next level quickly, and conversely if busy we expect to
* hit a waitboost and rapidly switch into max power.
*/
if ((adj < 0 && rps->power.mode == HIGH_POWER) ||
(adj > 0 && rps->power.mode == LOW_POWER))
rps->last_adj = 0;
/* sysfs frequency interfaces may have snuck in while servicing the
* interrupt
*/
@ -1415,20 +1427,20 @@ static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
u32 gt_iir)
{
if (gt_iir & GT_RENDER_USER_INTERRUPT)
intel_engine_breadcrumbs_irq(dev_priv->engine[RCS]);
intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
if (gt_iir & ILK_BSD_USER_INTERRUPT)
intel_engine_breadcrumbs_irq(dev_priv->engine[VCS]);
intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]);
}
static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
u32 gt_iir)
{
if (gt_iir & GT_RENDER_USER_INTERRUPT)
intel_engine_breadcrumbs_irq(dev_priv->engine[RCS]);
intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
if (gt_iir & GT_BSD_USER_INTERRUPT)
intel_engine_breadcrumbs_irq(dev_priv->engine[VCS]);
intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]);
if (gt_iir & GT_BLT_USER_INTERRUPT)
intel_engine_breadcrumbs_irq(dev_priv->engine[BCS]);
intel_engine_breadcrumbs_irq(dev_priv->engine[BCS0]);
if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
GT_BSD_CS_ERROR_INTERRUPT |
@ -1463,8 +1475,8 @@ static void gen8_gt_irq_ack(struct drm_i915_private *i915,
#define GEN8_GT_IRQS (GEN8_GT_RCS_IRQ | \
GEN8_GT_BCS_IRQ | \
GEN8_GT_VCS0_IRQ | \
GEN8_GT_VCS1_IRQ | \
GEN8_GT_VCS2_IRQ | \
GEN8_GT_VECS_IRQ | \
GEN8_GT_PM_IRQ | \
GEN8_GT_GUC_IRQ)
@ -1475,7 +1487,7 @@ static void gen8_gt_irq_ack(struct drm_i915_private *i915,
raw_reg_write(regs, GEN8_GT_IIR(0), gt_iir[0]);
}
if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
gt_iir[1] = raw_reg_read(regs, GEN8_GT_IIR(1));
if (likely(gt_iir[1]))
raw_reg_write(regs, GEN8_GT_IIR(1), gt_iir[1]);
@ -1498,21 +1510,21 @@ static void gen8_gt_irq_handler(struct drm_i915_private *i915,
u32 master_ctl, u32 gt_iir[4])
{
if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
gen8_cs_irq_handler(i915->engine[RCS],
gen8_cs_irq_handler(i915->engine[RCS0],
gt_iir[0] >> GEN8_RCS_IRQ_SHIFT);
gen8_cs_irq_handler(i915->engine[BCS],
gen8_cs_irq_handler(i915->engine[BCS0],
gt_iir[0] >> GEN8_BCS_IRQ_SHIFT);
}
if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
gen8_cs_irq_handler(i915->engine[VCS],
if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
gen8_cs_irq_handler(i915->engine[VCS0],
gt_iir[1] >> GEN8_VCS0_IRQ_SHIFT);
gen8_cs_irq_handler(i915->engine[VCS1],
gt_iir[1] >> GEN8_VCS1_IRQ_SHIFT);
gen8_cs_irq_handler(i915->engine[VCS2],
gt_iir[1] >> GEN8_VCS2_IRQ_SHIFT);
}
if (master_ctl & GEN8_GT_VECS_IRQ) {
gen8_cs_irq_handler(i915->engine[VECS],
gen8_cs_irq_handler(i915->engine[VECS0],
gt_iir[3] >> GEN8_VECS_IRQ_SHIFT);
}
@ -1693,7 +1705,9 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
{
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
u32 crcs[5];
u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };
trace_intel_pipe_crc(crtc, crcs);
spin_lock(&pipe_crc->lock);
/*
@ -1712,11 +1726,6 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
}
spin_unlock(&pipe_crc->lock);
crcs[0] = crc0;
crcs[1] = crc1;
crcs[2] = crc2;
crcs[3] = crc3;
crcs[4] = crc4;
drm_crtc_add_crc_entry(&crtc->base, true,
drm_crtc_accurate_vblank_count(&crtc->base),
crcs);
@ -1792,13 +1801,11 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
if (INTEL_GEN(dev_priv) >= 8)
return;
if (HAS_VEBOX(dev_priv)) {
if (pm_iir & PM_VEBOX_USER_INTERRUPT)
intel_engine_breadcrumbs_irq(dev_priv->engine[VECS]);
if (pm_iir & PM_VEBOX_USER_INTERRUPT)
intel_engine_breadcrumbs_irq(dev_priv->engine[VECS0]);
if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
}
if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
}
static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir)
@ -2667,6 +2674,25 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
DRM_ERROR("Unexpected DE HPD interrupt 0x%08x\n", iir);
}
static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
{
u32 mask = GEN8_AUX_CHANNEL_A;
if (INTEL_GEN(dev_priv) >= 9)
mask |= GEN9_AUX_CHANNEL_B |
GEN9_AUX_CHANNEL_C |
GEN9_AUX_CHANNEL_D;
if (IS_CNL_WITH_PORT_F(dev_priv))
mask |= CNL_AUX_CHANNEL_F;
if (INTEL_GEN(dev_priv) >= 11)
mask |= ICL_AUX_CHANNEL_E |
CNL_AUX_CHANNEL_F;
return mask;
}
static irqreturn_t
gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
{
@ -2722,20 +2748,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
I915_WRITE(GEN8_DE_PORT_IIR, iir);
ret = IRQ_HANDLED;
tmp_mask = GEN8_AUX_CHANNEL_A;
if (INTEL_GEN(dev_priv) >= 9)
tmp_mask |= GEN9_AUX_CHANNEL_B |
GEN9_AUX_CHANNEL_C |
GEN9_AUX_CHANNEL_D;
if (INTEL_GEN(dev_priv) >= 11)
tmp_mask |= ICL_AUX_CHANNEL_E;
if (IS_CNL_WITH_PORT_F(dev_priv) ||
INTEL_GEN(dev_priv) >= 11)
tmp_mask |= CNL_AUX_CHANNEL_F;
if (iir & tmp_mask) {
if (iir & gen8_de_port_aux_mask(dev_priv)) {
dp_aux_irq_handler(dev_priv);
found = true;
}
@ -2816,11 +2829,9 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
I915_WRITE(SDEIIR, iir);
ret = IRQ_HANDLED;
if (HAS_PCH_ICP(dev_priv))
if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
icp_irq_handler(dev_priv, iir);
else if (HAS_PCH_SPT(dev_priv) ||
HAS_PCH_KBP(dev_priv) ||
HAS_PCH_CNP(dev_priv))
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
spt_irq_handler(dev_priv, iir);
else
cpt_irq_handler(dev_priv, iir);
@ -3402,7 +3413,7 @@ static void gen11_irq_reset(struct drm_device *dev)
GEN3_IRQ_RESET(GEN11_GU_MISC_);
GEN3_IRQ_RESET(GEN8_PCU_);
if (HAS_PCH_ICP(dev_priv))
if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
GEN3_IRQ_RESET(SDE);
}
@ -3583,7 +3594,7 @@ static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
gen11_hpd_detection_setup(dev_priv);
if (HAS_PCH_ICP(dev_priv))
if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
icp_hpd_irq_setup(dev_priv);
}
@ -3767,7 +3778,7 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
* RPS interrupts will get enabled/disabled on demand when RPS
* itself is enabled/disabled.
*/
if (HAS_VEBOX(dev_priv)) {
if (HAS_ENGINE(dev_priv, VECS0)) {
pm_irqs |= PM_VEBOX_USER_INTERRUPT;
dev_priv->pm_ier |= PM_VEBOX_USER_INTERRUPT;
}
@ -3879,18 +3890,21 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
{
/* These are interrupts we'll toggle with the ring mask register */
u32 gt_interrupts[] = {
GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
(GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT),
(GT_RENDER_USER_INTERRUPT << GEN8_VCS0_IRQ_SHIFT |
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS0_IRQ_SHIFT |
GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT),
0,
GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
};
(GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT)
};
dev_priv->pm_ier = 0x0;
dev_priv->pm_imr = ~dev_priv->pm_ier;
@ -4036,7 +4050,7 @@ static int gen11_irq_postinstall(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 gu_misc_masked = GEN11_GU_MISC_GSE;
if (HAS_PCH_ICP(dev_priv))
if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
icp_irq_postinstall(dev);
gen11_gt_irq_postinstall(dev_priv);
@ -4218,7 +4232,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
I915_WRITE16(IIR, iir);
if (iir & I915_USER_INTERRUPT)
intel_engine_breadcrumbs_irq(dev_priv->engine[RCS]);
intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
if (iir & I915_MASTER_ERROR_INTERRUPT)
i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
@ -4326,7 +4340,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
I915_WRITE(IIR, iir);
if (iir & I915_USER_INTERRUPT)
intel_engine_breadcrumbs_irq(dev_priv->engine[RCS]);
intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
if (iir & I915_MASTER_ERROR_INTERRUPT)
i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
@ -4471,10 +4485,10 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
I915_WRITE(IIR, iir);
if (iir & I915_USER_INTERRUPT)
intel_engine_breadcrumbs_irq(dev_priv->engine[RCS]);
intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
if (iir & I915_BSD_USER_INTERRUPT)
intel_engine_breadcrumbs_irq(dev_priv->engine[VCS]);
intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]);
if (iir & I915_MASTER_ERROR_INTERRUPT)
i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
@ -4605,8 +4619,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
dev->driver->disable_vblank = gen8_disable_vblank;
if (IS_GEN9_LP(dev_priv))
dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv) ||
HAS_PCH_CNP(dev_priv))
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
else
dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;

Просмотреть файл

@ -28,14 +28,44 @@
#include <drm/drm_drv.h>
#include "i915_active.h"
#include "i915_drv.h"
#include "i915_globals.h"
#include "i915_selftest.h"
#define PLATFORM(x) .platform = (x), .platform_mask = BIT(x)
#define GEN(x) .gen = (x), .gen_mask = BIT((x) - 1)
#define GEN_DEFAULT_PIPEOFFSETS \
#define I845_PIPE_OFFSETS \
.pipe_offsets = { \
[TRANSCODER_A] = PIPE_A_OFFSET, \
}, \
.trans_offsets = { \
[TRANSCODER_A] = TRANSCODER_A_OFFSET, \
}
#define I9XX_PIPE_OFFSETS \
.pipe_offsets = { \
[TRANSCODER_A] = PIPE_A_OFFSET, \
[TRANSCODER_B] = PIPE_B_OFFSET, \
}, \
.trans_offsets = { \
[TRANSCODER_A] = TRANSCODER_A_OFFSET, \
[TRANSCODER_B] = TRANSCODER_B_OFFSET, \
}
#define IVB_PIPE_OFFSETS \
.pipe_offsets = { \
[TRANSCODER_A] = PIPE_A_OFFSET, \
[TRANSCODER_B] = PIPE_B_OFFSET, \
[TRANSCODER_C] = PIPE_C_OFFSET, \
}, \
.trans_offsets = { \
[TRANSCODER_A] = TRANSCODER_A_OFFSET, \
[TRANSCODER_B] = TRANSCODER_B_OFFSET, \
[TRANSCODER_C] = TRANSCODER_C_OFFSET, \
}
#define HSW_PIPE_OFFSETS \
.pipe_offsets = { \
[TRANSCODER_A] = PIPE_A_OFFSET, \
[TRANSCODER_B] = PIPE_B_OFFSET, \
@ -49,7 +79,7 @@
[TRANSCODER_EDP] = TRANSCODER_EDP_OFFSET, \
}
#define GEN_CHV_PIPEOFFSETS \
#define CHV_PIPE_OFFSETS \
.pipe_offsets = { \
[TRANSCODER_A] = PIPE_A_OFFSET, \
[TRANSCODER_B] = PIPE_B_OFFSET, \
@ -61,11 +91,30 @@
[TRANSCODER_C] = CHV_TRANSCODER_C_OFFSET, \
}
#define CURSOR_OFFSETS \
.cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET }
#define I845_CURSOR_OFFSETS \
.cursor_offsets = { \
[PIPE_A] = CURSOR_A_OFFSET, \
}
#define I9XX_CURSOR_OFFSETS \
.cursor_offsets = { \
[PIPE_A] = CURSOR_A_OFFSET, \
[PIPE_B] = CURSOR_B_OFFSET, \
}
#define CHV_CURSOR_OFFSETS \
.cursor_offsets = { \
[PIPE_A] = CURSOR_A_OFFSET, \
[PIPE_B] = CURSOR_B_OFFSET, \
[PIPE_C] = CHV_CURSOR_C_OFFSET, \
}
#define IVB_CURSOR_OFFSETS \
.cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET }
.cursor_offsets = { \
[PIPE_A] = CURSOR_A_OFFSET, \
[PIPE_B] = IVB_CURSOR_B_OFFSET, \
[PIPE_C] = IVB_CURSOR_C_OFFSET, \
}
#define BDW_COLORS \
.color = { .degamma_lut_size = 512, .gamma_lut_size = 512 }
@ -75,7 +124,7 @@
.gamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING, \
}
#define GLK_COLORS \
.color = { .degamma_lut_size = 0, .gamma_lut_size = 1024, \
.color = { .degamma_lut_size = 33, .gamma_lut_size = 1024, \
.degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING | \
DRM_COLOR_LUT_EQUAL_CHANNELS, \
}
@ -85,7 +134,25 @@
#define GEN_DEFAULT_PAGE_SIZES \
.page_sizes = I915_GTT_PAGE_SIZE_4K
#define GEN2_FEATURES \
#define I830_FEATURES \
GEN(2), \
.is_mobile = 1, \
.num_pipes = 2, \
.display.has_overlay = 1, \
.display.cursor_needs_physical = 1, \
.display.overlay_needs_physical = 1, \
.display.has_gmch = 1, \
.gpu_reset_clobbers_display = true, \
.hws_needs_physical = 1, \
.unfenced_needs_alignment = 1, \
.engine_mask = BIT(RCS0), \
.has_snoop = true, \
.has_coherent_ggtt = false, \
I9XX_PIPE_OFFSETS, \
I9XX_CURSOR_OFFSETS, \
GEN_DEFAULT_PAGE_SIZES
#define I845_FEATURES \
GEN(2), \
.num_pipes = 1, \
.display.has_overlay = 1, \
@ -94,37 +161,31 @@
.gpu_reset_clobbers_display = true, \
.hws_needs_physical = 1, \
.unfenced_needs_alignment = 1, \
.ring_mask = RENDER_RING, \
.engine_mask = BIT(RCS0), \
.has_snoop = true, \
.has_coherent_ggtt = false, \
GEN_DEFAULT_PIPEOFFSETS, \
GEN_DEFAULT_PAGE_SIZES, \
CURSOR_OFFSETS
I845_PIPE_OFFSETS, \
I845_CURSOR_OFFSETS, \
GEN_DEFAULT_PAGE_SIZES
static const struct intel_device_info intel_i830_info = {
GEN2_FEATURES,
I830_FEATURES,
PLATFORM(INTEL_I830),
.is_mobile = 1,
.display.cursor_needs_physical = 1,
.num_pipes = 2, /* legal, last one wins */
};
static const struct intel_device_info intel_i845g_info = {
GEN2_FEATURES,
I845_FEATURES,
PLATFORM(INTEL_I845G),
};
static const struct intel_device_info intel_i85x_info = {
GEN2_FEATURES,
I830_FEATURES,
PLATFORM(INTEL_I85X),
.is_mobile = 1,
.num_pipes = 2, /* legal, last one wins */
.display.cursor_needs_physical = 1,
.display.has_fbc = 1,
};
static const struct intel_device_info intel_i865g_info = {
GEN2_FEATURES,
I845_FEATURES,
PLATFORM(INTEL_I865G),
};
@ -133,12 +194,12 @@ static const struct intel_device_info intel_i865g_info = {
.num_pipes = 2, \
.display.has_gmch = 1, \
.gpu_reset_clobbers_display = true, \
.ring_mask = RENDER_RING, \
.engine_mask = BIT(RCS0), \
.has_snoop = true, \
.has_coherent_ggtt = true, \
GEN_DEFAULT_PIPEOFFSETS, \
GEN_DEFAULT_PAGE_SIZES, \
CURSOR_OFFSETS
I9XX_PIPE_OFFSETS, \
I9XX_CURSOR_OFFSETS, \
GEN_DEFAULT_PAGE_SIZES
static const struct intel_device_info intel_i915g_info = {
GEN3_FEATURES,
@ -210,12 +271,12 @@ static const struct intel_device_info intel_pineview_info = {
.display.has_hotplug = 1, \
.display.has_gmch = 1, \
.gpu_reset_clobbers_display = true, \
.ring_mask = RENDER_RING, \
.engine_mask = BIT(RCS0), \
.has_snoop = true, \
.has_coherent_ggtt = true, \
GEN_DEFAULT_PIPEOFFSETS, \
GEN_DEFAULT_PAGE_SIZES, \
CURSOR_OFFSETS
I9XX_PIPE_OFFSETS, \
I9XX_CURSOR_OFFSETS, \
GEN_DEFAULT_PAGE_SIZES
static const struct intel_device_info intel_i965g_info = {
GEN4_FEATURES,
@ -239,7 +300,7 @@ static const struct intel_device_info intel_i965gm_info = {
static const struct intel_device_info intel_g45_info = {
GEN4_FEATURES,
PLATFORM(INTEL_G45),
.ring_mask = RENDER_RING | BSD_RING,
.engine_mask = BIT(RCS0) | BIT(VCS0),
.gpu_reset_clobbers_display = false,
};
@ -249,7 +310,7 @@ static const struct intel_device_info intel_gm45_info = {
.is_mobile = 1,
.display.has_fbc = 1,
.display.supports_tv = 1,
.ring_mask = RENDER_RING | BSD_RING,
.engine_mask = BIT(RCS0) | BIT(VCS0),
.gpu_reset_clobbers_display = false,
};
@ -257,14 +318,14 @@ static const struct intel_device_info intel_gm45_info = {
GEN(5), \
.num_pipes = 2, \
.display.has_hotplug = 1, \
.ring_mask = RENDER_RING | BSD_RING, \
.engine_mask = BIT(RCS0) | BIT(VCS0), \
.has_snoop = true, \
.has_coherent_ggtt = true, \
/* ilk does support rc6, but we do not implement [power] contexts */ \
.has_rc6 = 0, \
GEN_DEFAULT_PIPEOFFSETS, \
GEN_DEFAULT_PAGE_SIZES, \
CURSOR_OFFSETS
I9XX_PIPE_OFFSETS, \
I9XX_CURSOR_OFFSETS, \
GEN_DEFAULT_PAGE_SIZES
static const struct intel_device_info intel_ironlake_d_info = {
GEN5_FEATURES,
@ -283,15 +344,16 @@ static const struct intel_device_info intel_ironlake_m_info = {
.num_pipes = 2, \
.display.has_hotplug = 1, \
.display.has_fbc = 1, \
.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
.engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
.has_coherent_ggtt = true, \
.has_llc = 1, \
.has_rc6 = 1, \
.has_rc6p = 1, \
.ppgtt = INTEL_PPGTT_ALIASING, \
GEN_DEFAULT_PIPEOFFSETS, \
GEN_DEFAULT_PAGE_SIZES, \
CURSOR_OFFSETS
.ppgtt_type = INTEL_PPGTT_ALIASING, \
.ppgtt_size = 31, \
I9XX_PIPE_OFFSETS, \
I9XX_CURSOR_OFFSETS, \
GEN_DEFAULT_PAGE_SIZES
#define SNB_D_PLATFORM \
GEN6_FEATURES, \
@ -328,15 +390,16 @@ static const struct intel_device_info intel_sandybridge_m_gt2_info = {
.num_pipes = 3, \
.display.has_hotplug = 1, \
.display.has_fbc = 1, \
.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
.engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
.has_coherent_ggtt = true, \
.has_llc = 1, \
.has_rc6 = 1, \
.has_rc6p = 1, \
.ppgtt = INTEL_PPGTT_FULL, \
GEN_DEFAULT_PIPEOFFSETS, \
GEN_DEFAULT_PAGE_SIZES, \
IVB_CURSOR_OFFSETS
.ppgtt_type = INTEL_PPGTT_FULL, \
.ppgtt_size = 31, \
IVB_PIPE_OFFSETS, \
IVB_CURSOR_OFFSETS, \
GEN_DEFAULT_PAGE_SIZES
#define IVB_D_PLATFORM \
GEN7_FEATURES, \
@ -386,24 +449,26 @@ static const struct intel_device_info intel_valleyview_info = {
.has_rc6 = 1,
.display.has_gmch = 1,
.display.has_hotplug = 1,
.ppgtt = INTEL_PPGTT_FULL,
.ppgtt_type = INTEL_PPGTT_FULL,
.ppgtt_size = 31,
.has_snoop = true,
.has_coherent_ggtt = false,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
.engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0),
.display_mmio_offset = VLV_DISPLAY_BASE,
I9XX_PIPE_OFFSETS,
I9XX_CURSOR_OFFSETS,
GEN_DEFAULT_PAGE_SIZES,
GEN_DEFAULT_PIPEOFFSETS,
CURSOR_OFFSETS
};
#define G75_FEATURES \
GEN7_FEATURES, \
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \
.engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
.display.has_ddi = 1, \
.has_fpga_dbg = 1, \
.display.has_psr = 1, \
.display.has_dp_mst = 1, \
.has_rc6p = 0 /* RC6p removed-by HSW */, \
HSW_PIPE_OFFSETS, \
.has_runtime_pm = 1
#define HSW_PLATFORM \
@ -433,7 +498,8 @@ static const struct intel_device_info intel_haswell_gt3_info = {
.page_sizes = I915_GTT_PAGE_SIZE_4K | \
I915_GTT_PAGE_SIZE_2M, \
.has_logical_ring_contexts = 1, \
.ppgtt = INTEL_PPGTT_FULL_4LVL, \
.ppgtt_type = INTEL_PPGTT_FULL, \
.ppgtt_size = 48, \
.has_64bit_reloc = 1, \
.has_reset_engine = 1
@ -462,7 +528,8 @@ static const struct intel_device_info intel_broadwell_rsvd_info = {
static const struct intel_device_info intel_broadwell_gt3_info = {
BDW_PLATFORM,
.gt = 3,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
.engine_mask =
BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1),
};
static const struct intel_device_info intel_cherryview_info = {
@ -471,21 +538,22 @@ static const struct intel_device_info intel_cherryview_info = {
.num_pipes = 3,
.display.has_hotplug = 1,
.is_lp = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
.engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0),
.has_64bit_reloc = 1,
.has_runtime_pm = 1,
.has_rc6 = 1,
.has_logical_ring_contexts = 1,
.display.has_gmch = 1,
.ppgtt = INTEL_PPGTT_FULL,
.ppgtt_type = INTEL_PPGTT_FULL,
.ppgtt_size = 32,
.has_reset_engine = 1,
.has_snoop = true,
.has_coherent_ggtt = false,
.display_mmio_offset = VLV_DISPLAY_BASE,
GEN_DEFAULT_PAGE_SIZES,
GEN_CHV_PIPEOFFSETS,
CURSOR_OFFSETS,
CHV_PIPE_OFFSETS,
CHV_CURSOR_OFFSETS,
CHV_COLORS,
GEN_DEFAULT_PAGE_SIZES,
};
#define GEN9_DEFAULT_PAGE_SIZES \
@ -521,7 +589,8 @@ static const struct intel_device_info intel_skylake_gt2_info = {
#define SKL_GT3_PLUS_PLATFORM \
SKL_PLATFORM, \
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING
.engine_mask = \
BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1)
static const struct intel_device_info intel_skylake_gt3_info = {
@ -538,7 +607,7 @@ static const struct intel_device_info intel_skylake_gt4_info = {
GEN(9), \
.is_lp = 1, \
.display.has_hotplug = 1, \
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \
.engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
.num_pipes = 3, \
.has_64bit_reloc = 1, \
.display.has_ddi = 1, \
@ -552,15 +621,16 @@ static const struct intel_device_info intel_skylake_gt4_info = {
.has_logical_ring_contexts = 1, \
.has_logical_ring_preemption = 1, \
.has_guc = 1, \
.ppgtt = INTEL_PPGTT_FULL_4LVL, \
.ppgtt_type = INTEL_PPGTT_FULL, \
.ppgtt_size = 48, \
.has_reset_engine = 1, \
.has_snoop = true, \
.has_coherent_ggtt = false, \
.display.has_ipc = 1, \
GEN9_DEFAULT_PAGE_SIZES, \
GEN_DEFAULT_PIPEOFFSETS, \
HSW_PIPE_OFFSETS, \
IVB_CURSOR_OFFSETS, \
BDW_COLORS
BDW_COLORS, \
GEN9_DEFAULT_PAGE_SIZES
static const struct intel_device_info intel_broxton_info = {
GEN9_LP_FEATURES,
@ -592,7 +662,8 @@ static const struct intel_device_info intel_kabylake_gt2_info = {
static const struct intel_device_info intel_kabylake_gt3_info = {
KBL_PLATFORM,
.gt = 3,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
.engine_mask =
BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1),
};
#define CFL_PLATFORM \
@ -612,7 +683,8 @@ static const struct intel_device_info intel_coffeelake_gt2_info = {
static const struct intel_device_info intel_coffeelake_gt3_info = {
CFL_PLATFORM,
.gt = 3,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
.engine_mask =
BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1),
};
#define GEN10_FEATURES \
@ -648,13 +720,14 @@ static const struct intel_device_info intel_cannonlake_info = {
}, \
GEN(11), \
.ddb_size = 2048, \
.has_logical_ring_elsq = 1
.has_logical_ring_elsq = 1, \
.color = { .degamma_lut_size = 33, .gamma_lut_size = 1024 }
static const struct intel_device_info intel_icelake_11_info = {
GEN11_FEATURES,
PLATFORM(INTEL_ICELAKE),
.is_alpha_support = 1,
.ring_mask = RENDER_RING | BLT_RING | VEBOX_RING | BSD_RING | BSD3_RING,
.engine_mask =
BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
};
#undef GEN
@ -722,6 +795,8 @@ static const struct pci_device_id pciidlist[] = {
INTEL_WHL_U_GT2_IDS(&intel_coffeelake_gt2_info),
INTEL_AML_CFL_GT2_IDS(&intel_coffeelake_gt2_info),
INTEL_WHL_U_GT3_IDS(&intel_coffeelake_gt3_info),
INTEL_CML_GT1_IDS(&intel_coffeelake_gt1_info),
INTEL_CML_GT2_IDS(&intel_coffeelake_gt2_info),
INTEL_CNL_IDS(&intel_cannonlake_info),
INTEL_ICL_11_IDS(&intel_icelake_11_info),
{0, 0, 0}
@ -801,7 +876,9 @@ static int __init i915_init(void)
bool use_kms = true;
int err;
i915_global_active_init();
err = i915_globals_init();
if (err)
return err;
err = i915_mock_selftests();
if (err)
@ -834,7 +911,7 @@ static void __exit i915_exit(void)
return;
pci_unregister_driver(&i915_pci_driver);
i915_global_active_exit();
i915_globals_exit();
}
module_init(i915_init);

Просмотреть файл

@ -1202,7 +1202,7 @@ static int i915_oa_read(struct i915_perf_stream *stream,
static struct intel_context *oa_pin_context(struct drm_i915_private *i915,
struct i915_gem_context *ctx)
{
struct intel_engine_cs *engine = i915->engine[RCS];
struct intel_engine_cs *engine = i915->engine[RCS0];
struct intel_context *ce;
int ret;
@ -1629,13 +1629,14 @@ static void hsw_disable_metric_set(struct drm_i915_private *dev_priv)
* It's fine to put out-of-date values into these per-context registers
* in the case that the OA unit has been disabled.
*/
static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx,
u32 *reg_state,
const struct i915_oa_config *oa_config)
static void
gen8_update_reg_state_unlocked(struct intel_context *ce,
u32 *reg_state,
const struct i915_oa_config *oa_config)
{
struct drm_i915_private *dev_priv = ctx->i915;
u32 ctx_oactxctrl = dev_priv->perf.oa.ctx_oactxctrl_offset;
u32 ctx_flexeu0 = dev_priv->perf.oa.ctx_flexeu0_offset;
struct drm_i915_private *i915 = ce->gem_context->i915;
u32 ctx_oactxctrl = i915->perf.oa.ctx_oactxctrl_offset;
u32 ctx_flexeu0 = i915->perf.oa.ctx_flexeu0_offset;
/* The MMIO offsets for Flex EU registers aren't contiguous */
i915_reg_t flex_regs[] = {
EU_PERF_CNTL0,
@ -1649,8 +1650,8 @@ static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx,
int i;
CTX_REG(reg_state, ctx_oactxctrl, GEN8_OACTXCONTROL,
(dev_priv->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
(dev_priv->perf.oa.periodic ? GEN8_OA_TIMER_ENABLE : 0) |
(i915->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
(i915->perf.oa.periodic ? GEN8_OA_TIMER_ENABLE : 0) |
GEN8_OA_COUNTER_RESUME);
for (i = 0; i < ARRAY_SIZE(flex_regs); i++) {
@ -1678,10 +1679,9 @@ static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx,
CTX_REG(reg_state, state_offset, flex_regs[i], value);
}
CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
gen8_make_rpcs(dev_priv,
&to_intel_context(ctx,
dev_priv->engine[RCS])->sseu));
CTX_REG(reg_state,
CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
gen8_make_rpcs(i915, &ce->sseu));
}
/*
@ -1711,7 +1711,7 @@ static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx,
static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
const struct i915_oa_config *oa_config)
{
struct intel_engine_cs *engine = dev_priv->engine[RCS];
struct intel_engine_cs *engine = dev_priv->engine[RCS0];
unsigned int map_type = i915_coherent_map_type(dev_priv);
struct i915_gem_context *ctx;
struct i915_request *rq;
@ -1740,11 +1740,11 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
/* Update all contexts now that we've stalled the submission. */
list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
struct intel_context *ce = to_intel_context(ctx, engine);
struct intel_context *ce = intel_context_lookup(ctx, engine);
u32 *regs;
/* OA settings will be set upon first use */
if (!ce->state)
if (!ce || !ce->state)
continue;
regs = i915_gem_object_pin_map(ce->state->obj, map_type);
@ -1754,7 +1754,7 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
ce->state->obj->mm.dirty = true;
regs += LRC_STATE_PN * PAGE_SIZE / sizeof(*regs);
gen8_update_reg_state_unlocked(ctx, regs, oa_config);
gen8_update_reg_state_unlocked(ce, regs, oa_config);
i915_gem_object_unpin_map(ce->state->obj);
}
@ -2138,17 +2138,17 @@ err_config:
}
void i915_oa_init_reg_state(struct intel_engine_cs *engine,
struct i915_gem_context *ctx,
u32 *reg_state)
struct intel_context *ce,
u32 *regs)
{
struct i915_perf_stream *stream;
if (engine->id != RCS)
if (engine->class != RENDER_CLASS)
return;
stream = engine->i915->perf.oa.exclusive_stream;
if (stream)
gen8_update_reg_state_unlocked(ctx, reg_state, stream->oa_config);
gen8_update_reg_state_unlocked(ce, regs, stream->oa_config);
}
/**
@ -2881,12 +2881,24 @@ void i915_perf_register(struct drm_i915_private *dev_priv)
sysfs_attr_init(&dev_priv->perf.oa.test_config.sysfs_metric_id.attr);
if (IS_HASWELL(dev_priv)) {
i915_perf_load_test_config_hsw(dev_priv);
} else if (IS_BROADWELL(dev_priv)) {
i915_perf_load_test_config_bdw(dev_priv);
} else if (IS_CHERRYVIEW(dev_priv)) {
i915_perf_load_test_config_chv(dev_priv);
if (INTEL_GEN(dev_priv) >= 11) {
i915_perf_load_test_config_icl(dev_priv);
} else if (IS_CANNONLAKE(dev_priv)) {
i915_perf_load_test_config_cnl(dev_priv);
} else if (IS_COFFEELAKE(dev_priv)) {
if (IS_CFL_GT2(dev_priv))
i915_perf_load_test_config_cflgt2(dev_priv);
if (IS_CFL_GT3(dev_priv))
i915_perf_load_test_config_cflgt3(dev_priv);
} else if (IS_GEMINILAKE(dev_priv)) {
i915_perf_load_test_config_glk(dev_priv);
} else if (IS_KABYLAKE(dev_priv)) {
if (IS_KBL_GT2(dev_priv))
i915_perf_load_test_config_kblgt2(dev_priv);
else if (IS_KBL_GT3(dev_priv))
i915_perf_load_test_config_kblgt3(dev_priv);
} else if (IS_BROXTON(dev_priv)) {
i915_perf_load_test_config_bxt(dev_priv);
} else if (IS_SKYLAKE(dev_priv)) {
if (IS_SKL_GT2(dev_priv))
i915_perf_load_test_config_sklgt2(dev_priv);
@ -2894,25 +2906,13 @@ void i915_perf_register(struct drm_i915_private *dev_priv)
i915_perf_load_test_config_sklgt3(dev_priv);
else if (IS_SKL_GT4(dev_priv))
i915_perf_load_test_config_sklgt4(dev_priv);
} else if (IS_BROXTON(dev_priv)) {
i915_perf_load_test_config_bxt(dev_priv);
} else if (IS_KABYLAKE(dev_priv)) {
if (IS_KBL_GT2(dev_priv))
i915_perf_load_test_config_kblgt2(dev_priv);
else if (IS_KBL_GT3(dev_priv))
i915_perf_load_test_config_kblgt3(dev_priv);
} else if (IS_GEMINILAKE(dev_priv)) {
i915_perf_load_test_config_glk(dev_priv);
} else if (IS_COFFEELAKE(dev_priv)) {
if (IS_CFL_GT2(dev_priv))
i915_perf_load_test_config_cflgt2(dev_priv);
if (IS_CFL_GT3(dev_priv))
i915_perf_load_test_config_cflgt3(dev_priv);
} else if (IS_CANNONLAKE(dev_priv)) {
i915_perf_load_test_config_cnl(dev_priv);
} else if (IS_ICELAKE(dev_priv)) {
i915_perf_load_test_config_icl(dev_priv);
}
} else if (IS_CHERRYVIEW(dev_priv)) {
i915_perf_load_test_config_chv(dev_priv);
} else if (IS_BROADWELL(dev_priv)) {
i915_perf_load_test_config_bdw(dev_priv);
} else if (IS_HASWELL(dev_priv)) {
i915_perf_load_test_config_hsw(dev_priv);
}
if (dev_priv->perf.oa.test_config.id == 0)
goto sysfs_error;

Просмотреть файл

@ -102,7 +102,7 @@ static bool pmu_needs_timer(struct drm_i915_private *i915, bool gpu_active)
*
* Use RCS as proxy for all engines.
*/
else if (intel_engine_supports_stats(i915->engine[RCS]))
else if (intel_engine_supports_stats(i915->engine[RCS0]))
enable &= ~BIT(I915_SAMPLE_BUSY);
/*
@ -149,14 +149,6 @@ void i915_pmu_gt_unparked(struct drm_i915_private *i915)
spin_unlock_irq(&i915->pmu.lock);
}
static bool grab_forcewake(struct drm_i915_private *i915, bool fw)
{
if (!fw)
intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
return true;
}
static void
add_sample(struct i915_pmu_sample *sample, u32 val)
{
@ -169,49 +161,48 @@ engines_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
struct intel_engine_cs *engine;
enum intel_engine_id id;
intel_wakeref_t wakeref;
bool fw = false;
unsigned long flags;
if ((dev_priv->pmu.enable & ENGINE_SAMPLE_MASK) == 0)
return;
if (!dev_priv->gt.awake)
return;
wakeref = intel_runtime_pm_get_if_in_use(dev_priv);
wakeref = 0;
if (READ_ONCE(dev_priv->gt.awake))
wakeref = intel_runtime_pm_get_if_in_use(dev_priv);
if (!wakeref)
return;
spin_lock_irqsave(&dev_priv->uncore.lock, flags);
for_each_engine(engine, dev_priv, id) {
u32 current_seqno = intel_engine_get_seqno(engine);
u32 last_seqno = intel_engine_last_submit(engine);
struct intel_engine_pmu *pmu = &engine->pmu;
bool busy;
u32 val;
val = !i915_seqno_passed(current_seqno, last_seqno);
if (val)
add_sample(&engine->pmu.sample[I915_SAMPLE_BUSY],
period_ns);
if (val && (engine->pmu.enable &
(BIT(I915_SAMPLE_WAIT) | BIT(I915_SAMPLE_SEMA)))) {
fw = grab_forcewake(dev_priv, fw);
val = I915_READ_FW(RING_CTL(engine->mmio_base));
} else {
val = 0;
}
val = I915_READ_FW(RING_CTL(engine->mmio_base));
if (val == 0) /* powerwell off => engine idle */
continue;
if (val & RING_WAIT)
add_sample(&engine->pmu.sample[I915_SAMPLE_WAIT],
period_ns);
add_sample(&pmu->sample[I915_SAMPLE_WAIT], period_ns);
if (val & RING_WAIT_SEMAPHORE)
add_sample(&engine->pmu.sample[I915_SAMPLE_SEMA],
period_ns);
}
add_sample(&pmu->sample[I915_SAMPLE_SEMA], period_ns);
if (fw)
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
/*
* While waiting on a semaphore or event, MI_MODE reports the
* ring as idle. However, previously using the seqno, and with
* execlists sampling, we account for the ring waiting as the
* engine being busy. Therefore, we record the sample as being
* busy if either waiting or !idle.
*/
busy = val & (RING_WAIT_SEMAPHORE | RING_WAIT);
if (!busy) {
val = I915_READ_FW(RING_MI_MODE(engine->mmio_base));
busy = !(val & MODE_IDLE);
}
if (busy)
add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns);
}
spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
intel_runtime_pm_put(dev_priv, wakeref);
}

Просмотреть файл

@ -52,7 +52,7 @@ enum vgt_g2v_type {
/*
* VGT capabilities type
*/
#define VGT_CAPS_FULL_48BIT_PPGTT BIT(2)
#define VGT_CAPS_FULL_PPGTT BIT(2)
#define VGT_CAPS_HWSP_EMULATION BIT(3)
#define VGT_CAPS_HUGE_GTT BIT(4)

Просмотреть файл

@ -10,12 +10,34 @@
#include "i915_query.h"
#include <uapi/drm/i915_drm.h>
static int copy_query_item(void *query_hdr, size_t query_sz,
u32 total_length,
struct drm_i915_query_item *query_item)
{
if (query_item->length == 0)
return total_length;
if (query_item->length < total_length)
return -EINVAL;
if (copy_from_user(query_hdr, u64_to_user_ptr(query_item->data_ptr),
query_sz))
return -EFAULT;
if (!access_ok(u64_to_user_ptr(query_item->data_ptr),
total_length))
return -EFAULT;
return 0;
}
static int query_topology_info(struct drm_i915_private *dev_priv,
struct drm_i915_query_item *query_item)
{
const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
struct drm_i915_query_topology_info topo;
u32 slice_length, subslice_length, eu_length, total_length;
int ret;
if (query_item->flags != 0)
return -EINVAL;
@ -33,23 +55,14 @@ static int query_topology_info(struct drm_i915_private *dev_priv,
total_length = sizeof(topo) + slice_length + subslice_length + eu_length;
if (query_item->length == 0)
return total_length;
if (query_item->length < total_length)
return -EINVAL;
if (copy_from_user(&topo, u64_to_user_ptr(query_item->data_ptr),
sizeof(topo)))
return -EFAULT;
ret = copy_query_item(&topo, sizeof(topo), total_length,
query_item);
if (ret != 0)
return ret;
if (topo.flags != 0)
return -EINVAL;
if (!access_ok(u64_to_user_ptr(query_item->data_ptr),
total_length))
return -EFAULT;
memset(&topo, 0, sizeof(topo));
topo.max_slices = sseu->max_slices;
topo.max_subslices = sseu->max_subslices;

Просмотреть файл

@ -25,6 +25,9 @@
#ifndef _I915_REG_H_
#define _I915_REG_H_
#include <linux/bitfield.h>
#include <linux/bits.h>
/**
* DOC: The i915 register macro definition style guide
*
@ -59,15 +62,13 @@
* significant to least significant bit. Indent the register content macros
* using two extra spaces between ``#define`` and the macro name.
*
* For bit fields, define a ``_MASK`` and a ``_SHIFT`` macro. Define bit field
* contents so that they are already shifted in place, and can be directly
* OR'd. For convenience, function-like macros may be used to define bit fields,
* but do note that the macros may be needed to read as well as write the
* register contents.
* Define bit fields using ``REG_GENMASK(h, l)``. Define bit field contents
* using ``REG_FIELD_PREP(mask, value)``. This will define the values already
* shifted in place, so they can be directly OR'd together. For convenience,
* function-like macros may be used to define bit fields, but do note that the
* macros may be needed to read as well as write the register contents.
*
* Define bits using ``(1 << N)`` instead of ``BIT(N)``. We may change this in
* the future, but this is the prevailing style. Do **not** add ``_BIT`` suffix
* to the name.
* Define bits using ``REG_BIT(N)``. Do **not** add ``_BIT`` suffix to the name.
*
* Group the register and its contents together without blank lines, separate
* from other registers and their contents with one blank line.
@ -105,17 +106,78 @@
* #define _FOO_A 0xf000
* #define _FOO_B 0xf001
* #define FOO(pipe) _MMIO_PIPE(pipe, _FOO_A, _FOO_B)
* #define FOO_ENABLE (1 << 31)
* #define FOO_MODE_MASK (0xf << 16)
* #define FOO_MODE_SHIFT 16
* #define FOO_MODE_BAR (0 << 16)
* #define FOO_MODE_BAZ (1 << 16)
* #define FOO_MODE_QUX_SNB (2 << 16)
* #define FOO_ENABLE REG_BIT(31)
* #define FOO_MODE_MASK REG_GENMASK(19, 16)
* #define FOO_MODE_BAR REG_FIELD_PREP(FOO_MODE_MASK, 0)
* #define FOO_MODE_BAZ REG_FIELD_PREP(FOO_MODE_MASK, 1)
* #define FOO_MODE_QUX_SNB REG_FIELD_PREP(FOO_MODE_MASK, 2)
*
* #define BAR _MMIO(0xb000)
* #define GEN8_BAR _MMIO(0xb888)
*/
/**
* REG_BIT() - Prepare a u32 bit value
* @__n: 0-based bit number
*
* Local wrapper for BIT() to force u32, with compile time checks.
*
* @return: Value with bit @__n set.
*/
#define REG_BIT(__n) \
((u32)(BIT(__n) + \
BUILD_BUG_ON_ZERO(__builtin_constant_p(__n) && \
((__n) < 0 || (__n) > 31))))
/**
* REG_GENMASK() - Prepare a continuous u32 bitmask
* @__high: 0-based high bit
* @__low: 0-based low bit
*
* Local wrapper for GENMASK() to force u32, with compile time checks.
*
* @return: Continuous bitmask from @__high to @__low, inclusive.
*/
#define REG_GENMASK(__high, __low) \
((u32)(GENMASK(__high, __low) + \
BUILD_BUG_ON_ZERO(__builtin_constant_p(__high) && \
__builtin_constant_p(__low) && \
((__low) < 0 || (__high) > 31 || (__low) > (__high)))))
/*
* Local integer constant expression version of is_power_of_2().
*/
#define IS_POWER_OF_2(__x) ((__x) && (((__x) & ((__x) - 1)) == 0))
/**
* REG_FIELD_PREP() - Prepare a u32 bitfield value
* @__mask: shifted mask defining the field's length and position
* @__val: value to put in the field
* Local copy of FIELD_PREP() to generate an integer constant expression, force
* u32 and for consistency with REG_FIELD_GET(), REG_BIT() and REG_GENMASK().
*
* @return: @__val masked and shifted into the field defined by @__mask.
*/
#define REG_FIELD_PREP(__mask, __val) \
((u32)((((typeof(__mask))(__val) << __bf_shf(__mask)) & (__mask)) + \
BUILD_BUG_ON_ZERO(!__builtin_constant_p(__mask)) + \
BUILD_BUG_ON_ZERO((__mask) == 0 || (__mask) > U32_MAX) + \
BUILD_BUG_ON_ZERO(!IS_POWER_OF_2((__mask) + (1ULL << __bf_shf(__mask)))) + \
BUILD_BUG_ON_ZERO(__builtin_choose_expr(__builtin_constant_p(__val), (~((__mask) >> __bf_shf(__mask)) & (__val)), 0))))
/**
* REG_FIELD_GET() - Extract a u32 bitfield value
* @__mask: shifted mask defining the field's length and position
* @__val: value to extract the bitfield value from
*
* Local wrapper for FIELD_GET() to force u32 and for consistency with
* REG_FIELD_PREP(), REG_BIT() and REG_GENMASK().
*
* @return: Masked and shifted value of the field defined by @__mask in @__val.
*/
#define REG_FIELD_GET(__mask, __val) ((u32)FIELD_GET(__mask, __val))
typedef struct {
u32 reg;
} i915_reg_t;
@ -210,14 +272,14 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
/* Engine ID */
#define RCS_HW 0
#define VCS_HW 1
#define BCS_HW 2
#define VECS_HW 3
#define VCS2_HW 4
#define VCS3_HW 6
#define VCS4_HW 7
#define VECS2_HW 12
#define RCS0_HW 0
#define VCS0_HW 1
#define BCS0_HW 2
#define VECS0_HW 3
#define VCS1_HW 4
#define VCS2_HW 6
#define VCS3_HW 7
#define VECS1_HW 12
/* Engine class */
@ -1044,7 +1106,32 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
/* See configdb bunit SB addr map */
#define BUNIT_REG_BISOC 0x11
#define PUNIT_REG_DSPFREQ 0x36
/* PUNIT_REG_*SSPM0 */
#define _SSPM0_SSC(val) ((val) << 0)
#define SSPM0_SSC_MASK _SSPM0_SSC(0x3)
#define SSPM0_SSC_PWR_ON _SSPM0_SSC(0x0)
#define SSPM0_SSC_CLK_GATE _SSPM0_SSC(0x1)
#define SSPM0_SSC_RESET _SSPM0_SSC(0x2)
#define SSPM0_SSC_PWR_GATE _SSPM0_SSC(0x3)
#define _SSPM0_SSS(val) ((val) << 24)
#define SSPM0_SSS_MASK _SSPM0_SSS(0x3)
#define SSPM0_SSS_PWR_ON _SSPM0_SSS(0x0)
#define SSPM0_SSS_CLK_GATE _SSPM0_SSS(0x1)
#define SSPM0_SSS_RESET _SSPM0_SSS(0x2)
#define SSPM0_SSS_PWR_GATE _SSPM0_SSS(0x3)
/* PUNIT_REG_*SSPM1 */
#define SSPM1_FREQSTAT_SHIFT 24
#define SSPM1_FREQSTAT_MASK (0x1f << SSPM1_FREQSTAT_SHIFT)
#define SSPM1_FREQGUAR_SHIFT 8
#define SSPM1_FREQGUAR_MASK (0x1f << SSPM1_FREQGUAR_SHIFT)
#define SSPM1_FREQ_SHIFT 0
#define SSPM1_FREQ_MASK (0x1f << SSPM1_FREQ_SHIFT)
#define PUNIT_REG_VEDSSPM0 0x32
#define PUNIT_REG_VEDSSPM1 0x33
#define PUNIT_REG_DSPSSPM 0x36
#define DSPFREQSTAT_SHIFT_CHV 24
#define DSPFREQSTAT_MASK_CHV (0x1f << DSPFREQSTAT_SHIFT_CHV)
#define DSPFREQGUAR_SHIFT_CHV 8
@ -1069,6 +1156,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define DP_SSS_RESET(pipe) _DP_SSS(0x2, (pipe))
#define DP_SSS_PWR_GATE(pipe) _DP_SSS(0x3, (pipe))
#define PUNIT_REG_ISPSSPM0 0x39
#define PUNIT_REG_ISPSSPM1 0x3a
/*
* i915_power_well_id:
*
@ -1860,13 +1950,13 @@ enum i915_power_well_id {
#define _CNL_PORT_TX_DW4_LN1_AE 0x1624D0
#define CNL_PORT_TX_DW4_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP(4, (port)))
#define CNL_PORT_TX_DW4_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0(4, (port)))
#define CNL_PORT_TX_DW4_LN(port, ln) _MMIO(_CNL_PORT_TX_DW_LN0(4, (port)) + \
#define CNL_PORT_TX_DW4_LN(ln, port) _MMIO(_CNL_PORT_TX_DW_LN0(4, (port)) + \
((ln) * (_CNL_PORT_TX_DW4_LN1_AE - \
_CNL_PORT_TX_DW4_LN0_AE)))
#define ICL_PORT_TX_DW4_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(4, port))
#define ICL_PORT_TX_DW4_GRP(port) _MMIO(_ICL_PORT_TX_DW_GRP(4, port))
#define ICL_PORT_TX_DW4_LN0(port) _MMIO(_ICL_PORT_TX_DW_LN(4, 0, port))
#define ICL_PORT_TX_DW4_LN(port, ln) _MMIO(_ICL_PORT_TX_DW_LN(4, ln, port))
#define ICL_PORT_TX_DW4_LN(ln, port) _MMIO(_ICL_PORT_TX_DW_LN(4, ln, port))
#define LOADGEN_SELECT (1 << 31)
#define POST_CURSOR_1(x) ((x) << 12)
#define POST_CURSOR_1_MASK (0x3F << 12)
@ -1893,11 +1983,11 @@ enum i915_power_well_id {
#define ICL_PORT_TX_DW7_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(7, port))
#define ICL_PORT_TX_DW7_GRP(port) _MMIO(_ICL_PORT_TX_DW_GRP(7, port))
#define ICL_PORT_TX_DW7_LN0(port) _MMIO(_ICL_PORT_TX_DW_LN(7, 0, port))
#define ICL_PORT_TX_DW7_LN(port, ln) _MMIO(_ICL_PORT_TX_DW_LN(7, ln, port))
#define ICL_PORT_TX_DW7_LN(ln, port) _MMIO(_ICL_PORT_TX_DW_LN(7, ln, port))
#define N_SCALAR(x) ((x) << 24)
#define N_SCALAR_MASK (0x7F << 24)
#define MG_PHY_PORT_LN(port, ln, ln0p1, ln0p2, ln1p1) \
#define MG_PHY_PORT_LN(ln, port, ln0p1, ln0p2, ln1p1) \
_MMIO(_PORT((port) - PORT_C, ln0p1, ln0p2) + (ln) * ((ln1p1) - (ln0p1)))
#define MG_TX_LINK_PARAMS_TX1LN0_PORT1 0x16812C
@ -1908,8 +1998,8 @@ enum i915_power_well_id {
#define MG_TX_LINK_PARAMS_TX1LN1_PORT3 0x16A52C
#define MG_TX_LINK_PARAMS_TX1LN0_PORT4 0x16B12C
#define MG_TX_LINK_PARAMS_TX1LN1_PORT4 0x16B52C
#define MG_TX1_LINK_PARAMS(port, ln) \
MG_PHY_PORT_LN(port, ln, MG_TX_LINK_PARAMS_TX1LN0_PORT1, \
#define MG_TX1_LINK_PARAMS(ln, port) \
MG_PHY_PORT_LN(ln, port, MG_TX_LINK_PARAMS_TX1LN0_PORT1, \
MG_TX_LINK_PARAMS_TX1LN0_PORT2, \
MG_TX_LINK_PARAMS_TX1LN1_PORT1)
@ -1921,8 +2011,8 @@ enum i915_power_well_id {
#define MG_TX_LINK_PARAMS_TX2LN1_PORT3 0x16A4AC
#define MG_TX_LINK_PARAMS_TX2LN0_PORT4 0x16B0AC
#define MG_TX_LINK_PARAMS_TX2LN1_PORT4 0x16B4AC
#define MG_TX2_LINK_PARAMS(port, ln) \
MG_PHY_PORT_LN(port, ln, MG_TX_LINK_PARAMS_TX2LN0_PORT1, \
#define MG_TX2_LINK_PARAMS(ln, port) \
MG_PHY_PORT_LN(ln, port, MG_TX_LINK_PARAMS_TX2LN0_PORT1, \
MG_TX_LINK_PARAMS_TX2LN0_PORT2, \
MG_TX_LINK_PARAMS_TX2LN1_PORT1)
#define CRI_USE_FS32 (1 << 5)
@ -1935,8 +2025,8 @@ enum i915_power_well_id {
#define MG_TX_PISO_READLOAD_TX1LN1_PORT3 0x16A54C
#define MG_TX_PISO_READLOAD_TX1LN0_PORT4 0x16B14C
#define MG_TX_PISO_READLOAD_TX1LN1_PORT4 0x16B54C
#define MG_TX1_PISO_READLOAD(port, ln) \
MG_PHY_PORT_LN(port, ln, MG_TX_PISO_READLOAD_TX1LN0_PORT1, \
#define MG_TX1_PISO_READLOAD(ln, port) \
MG_PHY_PORT_LN(ln, port, MG_TX_PISO_READLOAD_TX1LN0_PORT1, \
MG_TX_PISO_READLOAD_TX1LN0_PORT2, \
MG_TX_PISO_READLOAD_TX1LN1_PORT1)
@ -1948,8 +2038,8 @@ enum i915_power_well_id {
#define MG_TX_PISO_READLOAD_TX2LN1_PORT3 0x16A4CC
#define MG_TX_PISO_READLOAD_TX2LN0_PORT4 0x16B0CC
#define MG_TX_PISO_READLOAD_TX2LN1_PORT4 0x16B4CC
#define MG_TX2_PISO_READLOAD(port, ln) \
MG_PHY_PORT_LN(port, ln, MG_TX_PISO_READLOAD_TX2LN0_PORT1, \
#define MG_TX2_PISO_READLOAD(ln, port) \
MG_PHY_PORT_LN(ln, port, MG_TX_PISO_READLOAD_TX2LN0_PORT1, \
MG_TX_PISO_READLOAD_TX2LN0_PORT2, \
MG_TX_PISO_READLOAD_TX2LN1_PORT1)
#define CRI_CALCINIT (1 << 1)
@ -1962,8 +2052,8 @@ enum i915_power_well_id {
#define MG_TX_SWINGCTRL_TX1LN1_PORT3 0x16A548
#define MG_TX_SWINGCTRL_TX1LN0_PORT4 0x16B148
#define MG_TX_SWINGCTRL_TX1LN1_PORT4 0x16B548
#define MG_TX1_SWINGCTRL(port, ln) \
MG_PHY_PORT_LN(port, ln, MG_TX_SWINGCTRL_TX1LN0_PORT1, \
#define MG_TX1_SWINGCTRL(ln, port) \
MG_PHY_PORT_LN(ln, port, MG_TX_SWINGCTRL_TX1LN0_PORT1, \
MG_TX_SWINGCTRL_TX1LN0_PORT2, \
MG_TX_SWINGCTRL_TX1LN1_PORT1)
@ -1975,8 +2065,8 @@ enum i915_power_well_id {
#define MG_TX_SWINGCTRL_TX2LN1_PORT3 0x16A4C8
#define MG_TX_SWINGCTRL_TX2LN0_PORT4 0x16B0C8
#define MG_TX_SWINGCTRL_TX2LN1_PORT4 0x16B4C8
#define MG_TX2_SWINGCTRL(port, ln) \
MG_PHY_PORT_LN(port, ln, MG_TX_SWINGCTRL_TX2LN0_PORT1, \
#define MG_TX2_SWINGCTRL(ln, port) \
MG_PHY_PORT_LN(ln, port, MG_TX_SWINGCTRL_TX2LN0_PORT1, \
MG_TX_SWINGCTRL_TX2LN0_PORT2, \
MG_TX_SWINGCTRL_TX2LN1_PORT1)
#define CRI_TXDEEMPH_OVERRIDE_17_12(x) ((x) << 0)
@ -1990,8 +2080,8 @@ enum i915_power_well_id {
#define MG_TX_DRVCTRL_TX1LN1_TXPORT3 0x16A544
#define MG_TX_DRVCTRL_TX1LN0_TXPORT4 0x16B144
#define MG_TX_DRVCTRL_TX1LN1_TXPORT4 0x16B544
#define MG_TX1_DRVCTRL(port, ln) \
MG_PHY_PORT_LN(port, ln, MG_TX_DRVCTRL_TX1LN0_TXPORT1, \
#define MG_TX1_DRVCTRL(ln, port) \
MG_PHY_PORT_LN(ln, port, MG_TX_DRVCTRL_TX1LN0_TXPORT1, \
MG_TX_DRVCTRL_TX1LN0_TXPORT2, \
MG_TX_DRVCTRL_TX1LN1_TXPORT1)
@ -2003,8 +2093,8 @@ enum i915_power_well_id {
#define MG_TX_DRVCTRL_TX2LN1_PORT3 0x16A4C4
#define MG_TX_DRVCTRL_TX2LN0_PORT4 0x16B0C4
#define MG_TX_DRVCTRL_TX2LN1_PORT4 0x16B4C4
#define MG_TX2_DRVCTRL(port, ln) \
MG_PHY_PORT_LN(port, ln, MG_TX_DRVCTRL_TX2LN0_PORT1, \
#define MG_TX2_DRVCTRL(ln, port) \
MG_PHY_PORT_LN(ln, port, MG_TX_DRVCTRL_TX2LN0_PORT1, \
MG_TX_DRVCTRL_TX2LN0_PORT2, \
MG_TX_DRVCTRL_TX2LN1_PORT1)
#define CRI_TXDEEMPH_OVERRIDE_11_6(x) ((x) << 24)
@ -2023,8 +2113,8 @@ enum i915_power_well_id {
#define MG_CLKHUB_LN1_PORT3 0x16A79C
#define MG_CLKHUB_LN0_PORT4 0x16B39C
#define MG_CLKHUB_LN1_PORT4 0x16B79C
#define MG_CLKHUB(port, ln) \
MG_PHY_PORT_LN(port, ln, MG_CLKHUB_LN0_PORT1, \
#define MG_CLKHUB(ln, port) \
MG_PHY_PORT_LN(ln, port, MG_CLKHUB_LN0_PORT1, \
MG_CLKHUB_LN0_PORT2, \
MG_CLKHUB_LN1_PORT1)
#define CFG_LOW_RATE_LKREN_EN (1 << 11)
@ -2037,8 +2127,8 @@ enum i915_power_well_id {
#define MG_TX_DCC_TX1LN1_PORT3 0x16A510
#define MG_TX_DCC_TX1LN0_PORT4 0x16B110
#define MG_TX_DCC_TX1LN1_PORT4 0x16B510
#define MG_TX1_DCC(port, ln) \
MG_PHY_PORT_LN(port, ln, MG_TX_DCC_TX1LN0_PORT1, \
#define MG_TX1_DCC(ln, port) \
MG_PHY_PORT_LN(ln, port, MG_TX_DCC_TX1LN0_PORT1, \
MG_TX_DCC_TX1LN0_PORT2, \
MG_TX_DCC_TX1LN1_PORT1)
#define MG_TX_DCC_TX2LN0_PORT1 0x168090
@ -2049,8 +2139,8 @@ enum i915_power_well_id {
#define MG_TX_DCC_TX2LN1_PORT3 0x16A490
#define MG_TX_DCC_TX2LN0_PORT4 0x16B090
#define MG_TX_DCC_TX2LN1_PORT4 0x16B490
#define MG_TX2_DCC(port, ln) \
MG_PHY_PORT_LN(port, ln, MG_TX_DCC_TX2LN0_PORT1, \
#define MG_TX2_DCC(ln, port) \
MG_PHY_PORT_LN(ln, port, MG_TX_DCC_TX2LN0_PORT1, \
MG_TX_DCC_TX2LN0_PORT2, \
MG_TX_DCC_TX2LN1_PORT1)
#define CFG_AMI_CK_DIV_OVERRIDE_VAL(x) ((x) << 25)
@ -2065,8 +2155,8 @@ enum i915_power_well_id {
#define MG_DP_MODE_LN1_ACU_PORT3 0x16A7A0
#define MG_DP_MODE_LN0_ACU_PORT4 0x16B3A0
#define MG_DP_MODE_LN1_ACU_PORT4 0x16B7A0
#define MG_DP_MODE(port, ln) \
MG_PHY_PORT_LN(port, ln, MG_DP_MODE_LN0_ACU_PORT1, \
#define MG_DP_MODE(ln, port) \
MG_PHY_PORT_LN(ln, port, MG_DP_MODE_LN0_ACU_PORT1, \
MG_DP_MODE_LN0_ACU_PORT2, \
MG_DP_MODE_LN1_ACU_PORT1)
#define MG_DP_MODE_CFG_DP_X2_MODE (1 << 7)
@ -3989,6 +4079,15 @@ enum {
/* Pipe A CRC regs */
#define _PIPE_CRC_CTL_A 0x60050
#define PIPE_CRC_ENABLE (1 << 31)
/* skl+ source selection */
#define PIPE_CRC_SOURCE_PLANE_1_SKL (0 << 28)
#define PIPE_CRC_SOURCE_PLANE_2_SKL (2 << 28)
#define PIPE_CRC_SOURCE_DMUX_SKL (4 << 28)
#define PIPE_CRC_SOURCE_PLANE_3_SKL (6 << 28)
#define PIPE_CRC_SOURCE_PLANE_4_SKL (7 << 28)
#define PIPE_CRC_SOURCE_PLANE_5_SKL (5 << 28)
#define PIPE_CRC_SOURCE_PLANE_6_SKL (3 << 28)
#define PIPE_CRC_SOURCE_PLANE_7_SKL (1 << 28)
/* ivb+ source selection */
#define PIPE_CRC_SOURCE_PRIMARY_IVB (0 << 29)
#define PIPE_CRC_SOURCE_SPRITE_IVB (1 << 29)
@ -4168,6 +4267,7 @@ enum {
#define EDP_PSR_TP2_TP3_TIME_100us (1 << 8)
#define EDP_PSR_TP2_TP3_TIME_2500us (2 << 8)
#define EDP_PSR_TP2_TP3_TIME_0us (3 << 8)
#define EDP_PSR_TP4_TIME_0US (3 << 6) /* ICL+ */
#define EDP_PSR_TP1_TIME_500us (0 << 4)
#define EDP_PSR_TP1_TIME_100us (1 << 4)
#define EDP_PSR_TP1_TIME_2500us (2 << 4)
@ -4612,13 +4712,14 @@ enum {
#define VIDEO_DIP_ENABLE (1 << 31)
#define VIDEO_DIP_PORT(port) ((port) << 29)
#define VIDEO_DIP_PORT_MASK (3 << 29)
#define VIDEO_DIP_ENABLE_GCP (1 << 25)
#define VIDEO_DIP_ENABLE_GCP (1 << 25) /* ilk+ */
#define VIDEO_DIP_ENABLE_AVI (1 << 21)
#define VIDEO_DIP_ENABLE_VENDOR (2 << 21)
#define VIDEO_DIP_ENABLE_GAMUT (4 << 21)
#define VIDEO_DIP_ENABLE_GAMUT (4 << 21) /* ilk+ */
#define VIDEO_DIP_ENABLE_SPD (8 << 21)
#define VIDEO_DIP_SELECT_AVI (0 << 19)
#define VIDEO_DIP_SELECT_VENDOR (1 << 19)
#define VIDEO_DIP_SELECT_GAMUT (2 << 19)
#define VIDEO_DIP_SELECT_SPD (3 << 19)
#define VIDEO_DIP_SELECT_MASK (3 << 19)
#define VIDEO_DIP_FREQ_ONCE (0 << 16)
@ -4653,18 +4754,17 @@ enum {
#define _PP_STATUS 0x61200
#define PP_STATUS(pps_idx) _MMIO_PPS(pps_idx, _PP_STATUS)
#define PP_ON (1 << 31)
#define PP_ON REG_BIT(31)
#define _PP_CONTROL_1 0xc7204
#define _PP_CONTROL_2 0xc7304
#define ICP_PP_CONTROL(x) _MMIO(((x) == 1) ? _PP_CONTROL_1 : \
_PP_CONTROL_2)
#define POWER_CYCLE_DELAY_MASK (0x1f << 4)
#define POWER_CYCLE_DELAY_SHIFT 4
#define VDD_OVERRIDE_FORCE (1 << 3)
#define BACKLIGHT_ENABLE (1 << 2)
#define PWR_DOWN_ON_RESET (1 << 1)
#define PWR_STATE_TARGET (1 << 0)
#define POWER_CYCLE_DELAY_MASK REG_GENMASK(8, 4)
#define VDD_OVERRIDE_FORCE REG_BIT(3)
#define BACKLIGHT_ENABLE REG_BIT(2)
#define PWR_DOWN_ON_RESET REG_BIT(1)
#define PWR_STATE_TARGET REG_BIT(0)
/*
* Indicates that all dependencies of the panel are on:
*
@ -4672,62 +4772,53 @@ enum {
* - pipe enabled
* - LVDS/DVOB/DVOC on
*/
#define PP_READY (1 << 30)
#define PP_SEQUENCE_NONE (0 << 28)
#define PP_SEQUENCE_POWER_UP (1 << 28)
#define PP_SEQUENCE_POWER_DOWN (2 << 28)
#define PP_SEQUENCE_MASK (3 << 28)
#define PP_SEQUENCE_SHIFT 28
#define PP_CYCLE_DELAY_ACTIVE (1 << 27)
#define PP_SEQUENCE_STATE_MASK 0x0000000f
#define PP_SEQUENCE_STATE_OFF_IDLE (0x0 << 0)
#define PP_SEQUENCE_STATE_OFF_S0_1 (0x1 << 0)
#define PP_SEQUENCE_STATE_OFF_S0_2 (0x2 << 0)
#define PP_SEQUENCE_STATE_OFF_S0_3 (0x3 << 0)
#define PP_SEQUENCE_STATE_ON_IDLE (0x8 << 0)
#define PP_SEQUENCE_STATE_ON_S1_0 (0x9 << 0)
#define PP_SEQUENCE_STATE_ON_S1_2 (0xa << 0)
#define PP_SEQUENCE_STATE_ON_S1_3 (0xb << 0)
#define PP_SEQUENCE_STATE_RESET (0xf << 0)
#define PP_READY REG_BIT(30)
#define PP_SEQUENCE_MASK REG_GENMASK(29, 28)
#define PP_SEQUENCE_NONE REG_FIELD_PREP(PP_SEQUENCE_MASK, 0)
#define PP_SEQUENCE_POWER_UP REG_FIELD_PREP(PP_SEQUENCE_MASK, 1)
#define PP_SEQUENCE_POWER_DOWN REG_FIELD_PREP(PP_SEQUENCE_MASK, 2)
#define PP_CYCLE_DELAY_ACTIVE REG_BIT(27)
#define PP_SEQUENCE_STATE_MASK REG_GENMASK(3, 0)
#define PP_SEQUENCE_STATE_OFF_IDLE REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0x0)
#define PP_SEQUENCE_STATE_OFF_S0_1 REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0x1)
#define PP_SEQUENCE_STATE_OFF_S0_2 REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0x2)
#define PP_SEQUENCE_STATE_OFF_S0_3 REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0x3)
#define PP_SEQUENCE_STATE_ON_IDLE REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0x8)
#define PP_SEQUENCE_STATE_ON_S1_1 REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0x9)
#define PP_SEQUENCE_STATE_ON_S1_2 REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0xa)
#define PP_SEQUENCE_STATE_ON_S1_3 REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0xb)
#define PP_SEQUENCE_STATE_RESET REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0xf)
#define _PP_CONTROL 0x61204
#define PP_CONTROL(pps_idx) _MMIO_PPS(pps_idx, _PP_CONTROL)
#define PANEL_UNLOCK_REGS (0xabcd << 16)
#define PANEL_UNLOCK_MASK (0xffff << 16)
#define BXT_POWER_CYCLE_DELAY_MASK 0x1f0
#define BXT_POWER_CYCLE_DELAY_SHIFT 4
#define EDP_FORCE_VDD (1 << 3)
#define EDP_BLC_ENABLE (1 << 2)
#define PANEL_POWER_RESET (1 << 1)
#define PANEL_POWER_ON (1 << 0)
#define PANEL_UNLOCK_MASK REG_GENMASK(31, 16)
#define PANEL_UNLOCK_REGS REG_FIELD_PREP(PANEL_UNLOCK_MASK, 0xabcd)
#define BXT_POWER_CYCLE_DELAY_MASK REG_GENMASK(8, 4)
#define EDP_FORCE_VDD REG_BIT(3)
#define EDP_BLC_ENABLE REG_BIT(2)
#define PANEL_POWER_RESET REG_BIT(1)
#define PANEL_POWER_ON REG_BIT(0)
#define _PP_ON_DELAYS 0x61208
#define PP_ON_DELAYS(pps_idx) _MMIO_PPS(pps_idx, _PP_ON_DELAYS)
#define PANEL_PORT_SELECT_SHIFT 30
#define PANEL_PORT_SELECT_MASK (3 << 30)
#define PANEL_PORT_SELECT_LVDS (0 << 30)
#define PANEL_PORT_SELECT_DPA (1 << 30)
#define PANEL_PORT_SELECT_DPC (2 << 30)
#define PANEL_PORT_SELECT_DPD (3 << 30)
#define PANEL_PORT_SELECT_VLV(port) ((port) << 30)
#define PANEL_POWER_UP_DELAY_MASK 0x1fff0000
#define PANEL_POWER_UP_DELAY_SHIFT 16
#define PANEL_LIGHT_ON_DELAY_MASK 0x1fff
#define PANEL_LIGHT_ON_DELAY_SHIFT 0
#define PANEL_PORT_SELECT_MASK REG_GENMASK(31, 30)
#define PANEL_PORT_SELECT_LVDS REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, 0)
#define PANEL_PORT_SELECT_DPA REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, 1)
#define PANEL_PORT_SELECT_DPC REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, 2)
#define PANEL_PORT_SELECT_DPD REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, 3)
#define PANEL_PORT_SELECT_VLV(port) REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, port)
#define PANEL_POWER_UP_DELAY_MASK REG_GENMASK(28, 16)
#define PANEL_LIGHT_ON_DELAY_MASK REG_GENMASK(12, 0)
#define _PP_OFF_DELAYS 0x6120C
#define PP_OFF_DELAYS(pps_idx) _MMIO_PPS(pps_idx, _PP_OFF_DELAYS)
#define PANEL_POWER_DOWN_DELAY_MASK 0x1fff0000
#define PANEL_POWER_DOWN_DELAY_SHIFT 16
#define PANEL_LIGHT_OFF_DELAY_MASK 0x1fff
#define PANEL_LIGHT_OFF_DELAY_SHIFT 0
#define PANEL_POWER_DOWN_DELAY_MASK REG_GENMASK(28, 16)
#define PANEL_LIGHT_OFF_DELAY_MASK REG_GENMASK(12, 0)
#define _PP_DIVISOR 0x61210
#define PP_DIVISOR(pps_idx) _MMIO_PPS(pps_idx, _PP_DIVISOR)
#define PP_REFERENCE_DIVIDER_MASK 0xffffff00
#define PP_REFERENCE_DIVIDER_SHIFT 8
#define PANEL_POWER_CYCLE_DELAY_MASK 0x1f
#define PANEL_POWER_CYCLE_DELAY_SHIFT 0
#define PP_REFERENCE_DIVIDER_MASK REG_GENMASK(31, 8)
#define PANEL_POWER_CYCLE_DELAY_MASK REG_GENMASK(4, 0)
/* Panel fitting */
#define PFIT_CONTROL _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61230)
@ -5590,9 +5681,15 @@ enum {
#define PIPECONF_SINGLE_WIDE 0
#define PIPECONF_PIPE_UNLOCKED 0
#define PIPECONF_PIPE_LOCKED (1 << 25)
#define PIPECONF_PALETTE 0
#define PIPECONF_GAMMA (1 << 24)
#define PIPECONF_FORCE_BORDER (1 << 25)
#define PIPECONF_GAMMA_MODE_MASK_I9XX (1 << 24) /* gmch */
#define PIPECONF_GAMMA_MODE_MASK_ILK (3 << 24) /* ilk-ivb */
#define PIPECONF_GAMMA_MODE_8BIT (0 << 24) /* gmch,ilk-ivb */
#define PIPECONF_GAMMA_MODE_10BIT (1 << 24) /* gmch,ilk-ivb */
#define PIPECONF_GAMMA_MODE_12BIT (2 << 24) /* ilk-ivb */
#define PIPECONF_GAMMA_MODE_SPLIT (3 << 24) /* ivb */
#define PIPECONF_GAMMA_MODE(x) ((x) << 24) /* pass in GAMMA_MODE_MODE_* */
#define PIPECONF_GAMMA_MODE_SHIFT 24
#define PIPECONF_INTERLACE_MASK (7 << 21)
#define PIPECONF_INTERLACE_MASK_HSW (3 << 21)
/* Note that pre-gen3 does not support interlaced display directly. Panel
@ -5998,6 +6095,7 @@ enum {
#define _CUR_WM_TRANS_A_0 0x70168
#define _CUR_WM_TRANS_B_0 0x71168
#define PLANE_WM_EN (1 << 31)
#define PLANE_WM_IGNORE_LINES (1 << 30)
#define PLANE_WM_LINES_SHIFT 14
#define PLANE_WM_LINES_MASK 0x1f
#define PLANE_WM_BLOCKS_MASK 0x7ff /* skl+: 10 bits, icl+ 11 bits */
@ -6124,7 +6222,7 @@ enum {
#define MCURSOR_PIPE_SELECT_SHIFT 28
#define MCURSOR_PIPE_SELECT(pipe) ((pipe) << 28)
#define MCURSOR_GAMMA_ENABLE (1 << 26)
#define MCURSOR_PIPE_CSC_ENABLE (1 << 24)
#define MCURSOR_PIPE_CSC_ENABLE (1 << 24) /* ilk+ */
#define MCURSOR_ROTATE_180 (1 << 15)
#define MCURSOR_TRICKLE_FEED_DISABLE (1 << 14)
#define _CURABASE 0x70084
@ -6179,7 +6277,7 @@ enum {
#define DISPPLANE_RGBA888 (0xf << 26)
#define DISPPLANE_STEREO_ENABLE (1 << 25)
#define DISPPLANE_STEREO_DISABLE 0
#define DISPPLANE_PIPE_CSC_ENABLE (1 << 24)
#define DISPPLANE_PIPE_CSC_ENABLE (1 << 24) /* ilk+ */
#define DISPPLANE_SEL_PIPE_SHIFT 24
#define DISPPLANE_SEL_PIPE_MASK (3 << DISPPLANE_SEL_PIPE_SHIFT)
#define DISPPLANE_SEL_PIPE(pipe) ((pipe) << DISPPLANE_SEL_PIPE_SHIFT)
@ -7114,11 +7212,12 @@ enum {
#define _GAMMA_MODE_A 0x4a480
#define _GAMMA_MODE_B 0x4ac80
#define GAMMA_MODE(pipe) _MMIO_PIPE(pipe, _GAMMA_MODE_A, _GAMMA_MODE_B)
#define GAMMA_MODE_MODE_MASK (3 << 0)
#define GAMMA_MODE_MODE_8BIT (0 << 0)
#define GAMMA_MODE_MODE_10BIT (1 << 0)
#define GAMMA_MODE_MODE_12BIT (2 << 0)
#define GAMMA_MODE_MODE_SPLIT (3 << 0)
#define PRE_CSC_GAMMA_ENABLE (1 << 31)
#define POST_CSC_GAMMA_ENABLE (1 << 30)
#define GAMMA_MODE_MODE_8BIT (0 << 0)
#define GAMMA_MODE_MODE_10BIT (1 << 0)
#define GAMMA_MODE_MODE_12BIT (2 << 0)
#define GAMMA_MODE_MODE_SPLIT (3 << 0)
/* DMC/CSR */
#define CSR_PROGRAM(i) _MMIO(0x80000 + (i) * 4)
@ -7213,8 +7312,8 @@ enum {
#define GEN8_GT_VECS_IRQ (1 << 6)
#define GEN8_GT_GUC_IRQ (1 << 5)
#define GEN8_GT_PM_IRQ (1 << 4)
#define GEN8_GT_VCS2_IRQ (1 << 3)
#define GEN8_GT_VCS1_IRQ (1 << 2)
#define GEN8_GT_VCS1_IRQ (1 << 3) /* NB: VCS2 in bspec! */
#define GEN8_GT_VCS0_IRQ (1 << 2) /* NB: VCS1 in bpsec! */
#define GEN8_GT_BCS_IRQ (1 << 1)
#define GEN8_GT_RCS_IRQ (1 << 0)
@ -7235,8 +7334,8 @@ enum {
#define GEN8_RCS_IRQ_SHIFT 0
#define GEN8_BCS_IRQ_SHIFT 16
#define GEN8_VCS1_IRQ_SHIFT 0
#define GEN8_VCS2_IRQ_SHIFT 16
#define GEN8_VCS0_IRQ_SHIFT 0 /* NB: VCS1 in bspec! */
#define GEN8_VCS1_IRQ_SHIFT 16 /* NB: VCS2 in bpsec! */
#define GEN8_VECS_IRQ_SHIFT 0
#define GEN8_WD_IRQ_SHIFT 16
@ -7622,13 +7721,13 @@ enum {
#define GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE (1 << 2)
/*GEN11 chicken */
#define _PIPEA_CHICKEN 0x70038
#define _PIPEB_CHICKEN 0x71038
#define _PIPEC_CHICKEN 0x72038
#define PER_PIXEL_ALPHA_BYPASS_EN (1 << 7)
#define PM_FILL_MAINTAIN_DBUF_FULLNESS (1 << 0)
#define PIPE_CHICKEN(pipe) _MMIO_PIPE(pipe, _PIPEA_CHICKEN,\
_PIPEB_CHICKEN)
#define _PIPEA_CHICKEN 0x70038
#define _PIPEB_CHICKEN 0x71038
#define _PIPEC_CHICKEN 0x72038
#define PIPE_CHICKEN(pipe) _MMIO_PIPE(pipe, _PIPEA_CHICKEN,\
_PIPEB_CHICKEN)
#define PIXEL_ROUNDING_TRUNC_FB_PASSTHRU (1 << 15)
#define PER_PIXEL_ALPHA_BYPASS_EN (1 << 7)
/* PCH */
@ -8098,10 +8197,11 @@ enum {
#define _ICL_VIDEO_DIP_PPS_ECC_B 0x613D4
#define HSW_TVIDEO_DIP_CTL(trans) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_CTL_A)
#define HSW_TVIDEO_DIP_GCP(trans) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_GCP_A)
#define HSW_TVIDEO_DIP_AVI_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_AVI_DATA_A + (i) * 4)
#define HSW_TVIDEO_DIP_VS_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_VS_DATA_A + (i) * 4)
#define HSW_TVIDEO_DIP_SPD_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_SPD_DATA_A + (i) * 4)
#define HSW_TVIDEO_DIP_GCP(trans) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_GCP_A)
#define HSW_TVIDEO_DIP_GMP_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_GMP_DATA_A + (i) * 4)
#define HSW_TVIDEO_DIP_VSC_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_VSC_DATA_A + (i) * 4)
#define ICL_VIDEO_DIP_PPS_DATA(trans, i) _MMIO_TRANS2(trans, _ICL_VIDEO_DIP_PPS_DATA_A + (i) * 4)
#define ICL_VIDEO_DIP_PPS_ECC(trans, i) _MMIO_TRANS2(trans, _ICL_VIDEO_DIP_PPS_ECC_A + (i) * 4)
@ -9750,7 +9850,7 @@ enum skl_power_gate {
#define DPLL_CFGCR1_KDIV(x) ((x) << 6)
#define DPLL_CFGCR1_KDIV_1 (1 << 6)
#define DPLL_CFGCR1_KDIV_2 (2 << 6)
#define DPLL_CFGCR1_KDIV_4 (4 << 6)
#define DPLL_CFGCR1_KDIV_3 (4 << 6)
#define DPLL_CFGCR1_PDIV_MASK (0xf << 2)
#define DPLL_CFGCR1_PDIV_SHIFT (2)
#define DPLL_CFGCR1_PDIV(x) ((x) << 2)
@ -9819,16 +9919,29 @@ enum skl_power_gate {
#define BXT_DRAM_WIDTH_X64 (0x3 << 4)
#define BXT_DRAM_SIZE_MASK (0x7 << 6)
#define BXT_DRAM_SIZE_SHIFT 6
#define BXT_DRAM_SIZE_4GB (0x0 << 6)
#define BXT_DRAM_SIZE_6GB (0x1 << 6)
#define BXT_DRAM_SIZE_8GB (0x2 << 6)
#define BXT_DRAM_SIZE_12GB (0x3 << 6)
#define BXT_DRAM_SIZE_16GB (0x4 << 6)
#define BXT_DRAM_SIZE_4GBIT (0x0 << 6)
#define BXT_DRAM_SIZE_6GBIT (0x1 << 6)
#define BXT_DRAM_SIZE_8GBIT (0x2 << 6)
#define BXT_DRAM_SIZE_12GBIT (0x3 << 6)
#define BXT_DRAM_SIZE_16GBIT (0x4 << 6)
#define BXT_DRAM_TYPE_MASK (0x7 << 22)
#define BXT_DRAM_TYPE_SHIFT 22
#define BXT_DRAM_TYPE_DDR3 (0x0 << 22)
#define BXT_DRAM_TYPE_LPDDR3 (0x1 << 22)
#define BXT_DRAM_TYPE_LPDDR4 (0x2 << 22)
#define BXT_DRAM_TYPE_DDR4 (0x4 << 22)
#define SKL_MEMORY_FREQ_MULTIPLIER_HZ 266666666
#define SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5E04)
#define SKL_REQ_DATA_MASK (0xF << 0)
#define SKL_MAD_INTER_CHANNEL_0_0_0_MCHBAR_MCMAIN _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5000)
#define SKL_DRAM_DDR_TYPE_MASK (0x3 << 0)
#define SKL_DRAM_DDR_TYPE_DDR4 (0 << 0)
#define SKL_DRAM_DDR_TYPE_DDR3 (1 << 0)
#define SKL_DRAM_DDR_TYPE_LPDDR3 (2 << 0)
#define SKL_DRAM_DDR_TYPE_LPDDR4 (3 << 0)
#define SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x500C)
#define SKL_MAD_DIMM_CH1_0_0_0_MCHBAR_MCMAIN _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5010)
#define SKL_DRAM_S_SHIFT 16
@ -9840,8 +9953,21 @@ enum skl_power_gate {
#define SKL_DRAM_WIDTH_X32 (0x2 << 8)
#define SKL_DRAM_RANK_MASK (0x1 << 10)
#define SKL_DRAM_RANK_SHIFT 10
#define SKL_DRAM_RANK_SINGLE (0x0 << 10)
#define SKL_DRAM_RANK_DUAL (0x1 << 10)
#define SKL_DRAM_RANK_1 (0x0 << 10)
#define SKL_DRAM_RANK_2 (0x1 << 10)
#define SKL_DRAM_RANK_MASK (0x1 << 10)
#define CNL_DRAM_SIZE_MASK 0x7F
#define CNL_DRAM_WIDTH_MASK (0x3 << 7)
#define CNL_DRAM_WIDTH_SHIFT 7
#define CNL_DRAM_WIDTH_X8 (0x0 << 7)
#define CNL_DRAM_WIDTH_X16 (0x1 << 7)
#define CNL_DRAM_WIDTH_X32 (0x2 << 7)
#define CNL_DRAM_RANK_MASK (0x3 << 9)
#define CNL_DRAM_RANK_SHIFT 9
#define CNL_DRAM_RANK_1 (0x0 << 9)
#define CNL_DRAM_RANK_2 (0x1 << 9)
#define CNL_DRAM_RANK_3 (0x2 << 9)
#define CNL_DRAM_RANK_4 (0x3 << 9)
/* Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register,
* since on HSW we can't write to it using I915_WRITE. */
@ -9886,10 +10012,14 @@ enum skl_power_gate {
#define _PIPE_A_CSC_COEFF_BU 0x4901c
#define _PIPE_A_CSC_COEFF_RV_GV 0x49020
#define _PIPE_A_CSC_COEFF_BV 0x49024
#define _PIPE_A_CSC_MODE 0x49028
#define CSC_BLACK_SCREEN_OFFSET (1 << 2)
#define CSC_POSITION_BEFORE_GAMMA (1 << 1)
#define CSC_MODE_YUV_TO_RGB (1 << 0)
#define ICL_CSC_ENABLE (1 << 31)
#define ICL_OUTPUT_CSC_ENABLE (1 << 30)
#define CSC_BLACK_SCREEN_OFFSET (1 << 2)
#define CSC_POSITION_BEFORE_GAMMA (1 << 1)
#define CSC_MODE_YUV_TO_RGB (1 << 0)
#define _PIPE_A_CSC_PREOFF_HI 0x49030
#define _PIPE_A_CSC_PREOFF_ME 0x49034
#define _PIPE_A_CSC_PREOFF_LO 0x49038
@ -9925,6 +10055,70 @@ enum skl_power_gate {
#define PIPE_CSC_POSTOFF_ME(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_POSTOFF_ME, _PIPE_B_CSC_POSTOFF_ME)
#define PIPE_CSC_POSTOFF_LO(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_POSTOFF_LO, _PIPE_B_CSC_POSTOFF_LO)
/* Pipe Output CSC */
#define _PIPE_A_OUTPUT_CSC_COEFF_RY_GY 0x49050
#define _PIPE_A_OUTPUT_CSC_COEFF_BY 0x49054
#define _PIPE_A_OUTPUT_CSC_COEFF_RU_GU 0x49058
#define _PIPE_A_OUTPUT_CSC_COEFF_BU 0x4905c
#define _PIPE_A_OUTPUT_CSC_COEFF_RV_GV 0x49060
#define _PIPE_A_OUTPUT_CSC_COEFF_BV 0x49064
#define _PIPE_A_OUTPUT_CSC_PREOFF_HI 0x49068
#define _PIPE_A_OUTPUT_CSC_PREOFF_ME 0x4906c
#define _PIPE_A_OUTPUT_CSC_PREOFF_LO 0x49070
#define _PIPE_A_OUTPUT_CSC_POSTOFF_HI 0x49074
#define _PIPE_A_OUTPUT_CSC_POSTOFF_ME 0x49078
#define _PIPE_A_OUTPUT_CSC_POSTOFF_LO 0x4907c
#define _PIPE_B_OUTPUT_CSC_COEFF_RY_GY 0x49150
#define _PIPE_B_OUTPUT_CSC_COEFF_BY 0x49154
#define _PIPE_B_OUTPUT_CSC_COEFF_RU_GU 0x49158
#define _PIPE_B_OUTPUT_CSC_COEFF_BU 0x4915c
#define _PIPE_B_OUTPUT_CSC_COEFF_RV_GV 0x49160
#define _PIPE_B_OUTPUT_CSC_COEFF_BV 0x49164
#define _PIPE_B_OUTPUT_CSC_PREOFF_HI 0x49168
#define _PIPE_B_OUTPUT_CSC_PREOFF_ME 0x4916c
#define _PIPE_B_OUTPUT_CSC_PREOFF_LO 0x49170
#define _PIPE_B_OUTPUT_CSC_POSTOFF_HI 0x49174
#define _PIPE_B_OUTPUT_CSC_POSTOFF_ME 0x49178
#define _PIPE_B_OUTPUT_CSC_POSTOFF_LO 0x4917c
#define PIPE_CSC_OUTPUT_COEFF_RY_GY(pipe) _MMIO_PIPE(pipe,\
_PIPE_A_OUTPUT_CSC_COEFF_RY_GY,\
_PIPE_B_OUTPUT_CSC_COEFF_RY_GY)
#define PIPE_CSC_OUTPUT_COEFF_BY(pipe) _MMIO_PIPE(pipe, \
_PIPE_A_OUTPUT_CSC_COEFF_BY, \
_PIPE_B_OUTPUT_CSC_COEFF_BY)
#define PIPE_CSC_OUTPUT_COEFF_RU_GU(pipe) _MMIO_PIPE(pipe, \
_PIPE_A_OUTPUT_CSC_COEFF_RU_GU, \
_PIPE_B_OUTPUT_CSC_COEFF_RU_GU)
#define PIPE_CSC_OUTPUT_COEFF_BU(pipe) _MMIO_PIPE(pipe, \
_PIPE_A_OUTPUT_CSC_COEFF_BU, \
_PIPE_B_OUTPUT_CSC_COEFF_BU)
#define PIPE_CSC_OUTPUT_COEFF_RV_GV(pipe) _MMIO_PIPE(pipe, \
_PIPE_A_OUTPUT_CSC_COEFF_RV_GV, \
_PIPE_B_OUTPUT_CSC_COEFF_RV_GV)
#define PIPE_CSC_OUTPUT_COEFF_BV(pipe) _MMIO_PIPE(pipe, \
_PIPE_A_OUTPUT_CSC_COEFF_BV, \
_PIPE_B_OUTPUT_CSC_COEFF_BV)
#define PIPE_CSC_OUTPUT_PREOFF_HI(pipe) _MMIO_PIPE(pipe, \
_PIPE_A_OUTPUT_CSC_PREOFF_HI, \
_PIPE_B_OUTPUT_CSC_PREOFF_HI)
#define PIPE_CSC_OUTPUT_PREOFF_ME(pipe) _MMIO_PIPE(pipe, \
_PIPE_A_OUTPUT_CSC_PREOFF_ME, \
_PIPE_B_OUTPUT_CSC_PREOFF_ME)
#define PIPE_CSC_OUTPUT_PREOFF_LO(pipe) _MMIO_PIPE(pipe, \
_PIPE_A_OUTPUT_CSC_PREOFF_LO, \
_PIPE_B_OUTPUT_CSC_PREOFF_LO)
#define PIPE_CSC_OUTPUT_POSTOFF_HI(pipe) _MMIO_PIPE(pipe, \
_PIPE_A_OUTPUT_CSC_POSTOFF_HI, \
_PIPE_B_OUTPUT_CSC_POSTOFF_HI)
#define PIPE_CSC_OUTPUT_POSTOFF_ME(pipe) _MMIO_PIPE(pipe, \
_PIPE_A_OUTPUT_CSC_POSTOFF_ME, \
_PIPE_B_OUTPUT_CSC_POSTOFF_ME)
#define PIPE_CSC_OUTPUT_POSTOFF_LO(pipe) _MMIO_PIPE(pipe, \
_PIPE_A_OUTPUT_CSC_POSTOFF_LO, \
_PIPE_B_OUTPUT_CSC_POSTOFF_LO)
/* pipe degamma/gamma LUTs on IVB+ */
#define _PAL_PREC_INDEX_A 0x4A400
#define _PAL_PREC_INDEX_B 0x4AC00

Просмотреть файл

@ -22,16 +22,31 @@
*
*/
#include <linux/prefetch.h>
#include <linux/dma-fence-array.h>
#include <linux/irq_work.h>
#include <linux/prefetch.h>
#include <linux/sched.h>
#include <linux/sched/clock.h>
#include <linux/sched/signal.h>
#include "i915_drv.h"
#include "i915_active.h"
#include "i915_globals.h"
#include "i915_reset.h"
struct execute_cb {
struct list_head link;
struct irq_work work;
struct i915_sw_fence *fence;
};
static struct i915_global_request {
struct i915_global base;
struct kmem_cache *slab_requests;
struct kmem_cache *slab_dependencies;
struct kmem_cache *slab_execute_cbs;
} global;
static const char *i915_fence_get_driver_name(struct dma_fence *fence)
{
return "i915";
@ -68,7 +83,9 @@ static signed long i915_fence_wait(struct dma_fence *fence,
bool interruptible,
signed long timeout)
{
return i915_request_wait(to_request(fence), interruptible, timeout);
return i915_request_wait(to_request(fence),
interruptible | I915_WAIT_PRIORITY,
timeout);
}
static void i915_fence_release(struct dma_fence *fence)
@ -84,7 +101,7 @@ static void i915_fence_release(struct dma_fence *fence)
*/
i915_sw_fence_fini(&rq->submit);
kmem_cache_free(rq->i915->requests, rq);
kmem_cache_free(global.slab_requests, rq);
}
const struct dma_fence_ops i915_fence_ops = {
@ -177,12 +194,10 @@ static void free_capture_list(struct i915_request *request)
static void __retire_engine_request(struct intel_engine_cs *engine,
struct i915_request *rq)
{
GEM_TRACE("%s(%s) fence %llx:%lld, global=%d, current %d:%d\n",
GEM_TRACE("%s(%s) fence %llx:%lld, current %d\n",
__func__, engine->name,
rq->fence.context, rq->fence.seqno,
rq->global_seqno,
hwsp_seqno(rq),
intel_engine_get_seqno(engine));
hwsp_seqno(rq));
GEM_BUG_ON(!i915_request_completed(rq));
@ -241,12 +256,10 @@ static void i915_request_retire(struct i915_request *request)
{
struct i915_active_request *active, *next;
GEM_TRACE("%s fence %llx:%lld, global=%d, current %d:%d\n",
GEM_TRACE("%s fence %llx:%lld, current %d\n",
request->engine->name,
request->fence.context, request->fence.seqno,
request->global_seqno,
hwsp_seqno(request),
intel_engine_get_seqno(request->engine));
hwsp_seqno(request));
lockdep_assert_held(&request->i915->drm.struct_mutex);
GEM_BUG_ON(!i915_sw_fence_signaled(&request->submit));
@ -288,15 +301,13 @@ static void i915_request_retire(struct i915_request *request)
i915_request_remove_from_client(request);
/* Retirement decays the ban score as it is a sign of ctx progress */
atomic_dec_if_positive(&request->gem_context->ban_score);
intel_context_unpin(request->hw_context);
__retire_engine_upto(request->engine, request);
unreserve_gt(request->i915);
i915_sched_node_fini(request->i915, &request->sched);
i915_sched_node_fini(&request->sched);
i915_request_put(request);
}
@ -305,12 +316,10 @@ void i915_request_retire_upto(struct i915_request *rq)
struct intel_ring *ring = rq->ring;
struct i915_request *tmp;
GEM_TRACE("%s fence %llx:%lld, global=%d, current %d:%d\n",
GEM_TRACE("%s fence %llx:%lld, current %d\n",
rq->engine->name,
rq->fence.context, rq->fence.seqno,
rq->global_seqno,
hwsp_seqno(rq),
intel_engine_get_seqno(rq->engine));
hwsp_seqno(rq));
lockdep_assert_held(&rq->i915->drm.struct_mutex);
GEM_BUG_ON(!i915_request_completed(rq));
@ -326,9 +335,67 @@ void i915_request_retire_upto(struct i915_request *rq)
} while (tmp != rq);
}
static u32 timeline_get_seqno(struct i915_timeline *tl)
static void irq_execute_cb(struct irq_work *wrk)
{
return tl->seqno += 1 + tl->has_initial_breadcrumb;
struct execute_cb *cb = container_of(wrk, typeof(*cb), work);
i915_sw_fence_complete(cb->fence);
kmem_cache_free(global.slab_execute_cbs, cb);
}
static void __notify_execute_cb(struct i915_request *rq)
{
struct execute_cb *cb;
lockdep_assert_held(&rq->lock);
if (list_empty(&rq->execute_cb))
return;
list_for_each_entry(cb, &rq->execute_cb, link)
irq_work_queue(&cb->work);
/*
* XXX Rollback on __i915_request_unsubmit()
*
* In the future, perhaps when we have an active time-slicing scheduler,
* it will be interesting to unsubmit parallel execution and remove
* busywaits from the GPU until their master is restarted. This is
* quite hairy, we have to carefully rollback the fence and do a
* preempt-to-idle cycle on the target engine, all the while the
* master execute_cb may refire.
*/
INIT_LIST_HEAD(&rq->execute_cb);
}
static int
i915_request_await_execution(struct i915_request *rq,
struct i915_request *signal,
gfp_t gfp)
{
struct execute_cb *cb;
if (i915_request_is_active(signal))
return 0;
cb = kmem_cache_alloc(global.slab_execute_cbs, gfp);
if (!cb)
return -ENOMEM;
cb->fence = &rq->submit;
i915_sw_fence_await(cb->fence);
init_irq_work(&cb->work, irq_execute_cb);
spin_lock_irq(&signal->lock);
if (i915_request_is_active(signal)) {
i915_sw_fence_complete(cb->fence);
kmem_cache_free(global.slab_execute_cbs, cb);
} else {
list_add_tail(&cb->link, &signal->execute_cb);
}
spin_unlock_irq(&signal->lock);
return 0;
}
static void move_to_timeline(struct i915_request *request,
@ -342,42 +409,33 @@ static void move_to_timeline(struct i915_request *request,
spin_unlock(&request->timeline->lock);
}
static u32 next_global_seqno(struct i915_timeline *tl)
{
if (!++tl->seqno)
++tl->seqno;
return tl->seqno;
}
void __i915_request_submit(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
u32 seqno;
GEM_TRACE("%s fence %llx:%lld -> global=%d, current %d:%d\n",
GEM_TRACE("%s fence %llx:%lld -> current %d\n",
engine->name,
request->fence.context, request->fence.seqno,
engine->timeline.seqno + 1,
hwsp_seqno(request),
intel_engine_get_seqno(engine));
hwsp_seqno(request));
GEM_BUG_ON(!irqs_disabled());
lockdep_assert_held(&engine->timeline.lock);
GEM_BUG_ON(request->global_seqno);
seqno = next_global_seqno(&engine->timeline);
GEM_BUG_ON(!seqno);
GEM_BUG_ON(intel_engine_signaled(engine, seqno));
if (i915_gem_context_is_banned(request->gem_context))
i915_request_skip(request, -EIO);
/* We may be recursing from the signal callback of another i915 fence */
spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
GEM_BUG_ON(test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags));
set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags);
request->global_seqno = seqno;
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags) &&
!i915_request_enable_breadcrumb(request))
intel_engine_queue_breadcrumbs(engine);
__notify_execute_cb(request);
spin_unlock(&request->lock);
engine->emit_fini_breadcrumb(request,
@ -406,12 +464,10 @@ void __i915_request_unsubmit(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
GEM_TRACE("%s fence %llx:%lld <- global=%d, current %d:%d\n",
GEM_TRACE("%s fence %llx:%lld, current %d\n",
engine->name,
request->fence.context, request->fence.seqno,
request->global_seqno,
hwsp_seqno(request),
intel_engine_get_seqno(engine));
hwsp_seqno(request));
GEM_BUG_ON(!irqs_disabled());
lockdep_assert_held(&engine->timeline.lock);
@ -420,18 +476,25 @@ void __i915_request_unsubmit(struct i915_request *request)
* Only unwind in reverse order, required so that the per-context list
* is kept in seqno/ring order.
*/
GEM_BUG_ON(!request->global_seqno);
GEM_BUG_ON(request->global_seqno != engine->timeline.seqno);
GEM_BUG_ON(intel_engine_has_completed(engine, request->global_seqno));
engine->timeline.seqno--;
/* We may be recursing from the signal callback of another i915 fence */
spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
request->global_seqno = 0;
/*
* As we do not allow WAIT to preempt inflight requests,
* once we have executed a request, along with triggering
* any execution callbacks, we must preserve its ordering
* within the non-preemptible FIFO.
*/
BUILD_BUG_ON(__NO_PREEMPTION & ~I915_PRIORITY_MASK); /* only internal */
request->sched.attr.priority |= __NO_PREEMPTION;
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
i915_request_cancel_breadcrumb(request);
GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags));
clear_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags);
spin_unlock(&request->lock);
/* Transfer back from the global per-engine timeline to per-context */
@ -518,7 +581,7 @@ i915_request_alloc_slow(struct intel_context *ce)
ring_retire_requests(ring);
out:
return kmem_cache_alloc(ce->gem_context->i915->requests, GFP_KERNEL);
return kmem_cache_alloc(global.slab_requests, GFP_KERNEL);
}
static int add_timeline_barrier(struct i915_request *rq)
@ -539,8 +602,10 @@ struct i915_request *
i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
{
struct drm_i915_private *i915 = engine->i915;
struct i915_request *rq;
struct intel_context *ce;
struct i915_timeline *tl;
struct i915_request *rq;
u32 seqno;
int ret;
lockdep_assert_held(&i915->drm.struct_mutex);
@ -556,8 +621,9 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
* EIO if the GPU is already wedged.
*/
if (i915_terminally_wedged(&i915->gpu_error))
return ERR_PTR(-EIO);
ret = i915_terminally_wedged(i915);
if (ret)
return ERR_PTR(ret);
/*
* Pinning the contexts may generate requests in order to acquire
@ -569,6 +635,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
return ERR_CAST(ce);
reserve_gt(i915);
mutex_lock(&ce->ring->timeline->mutex);
/* Move our oldest request to the slab-cache (if not in use!) */
rq = list_first_entry(&ce->ring->request_list, typeof(*rq), ring_link);
@ -605,7 +672,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
*
* Do not use kmem_cache_zalloc() here!
*/
rq = kmem_cache_alloc(i915->requests,
rq = kmem_cache_alloc(global.slab_requests,
GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
if (unlikely(!rq)) {
rq = i915_request_alloc_slow(ce);
@ -615,24 +682,28 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
}
}
rq->rcustate = get_state_synchronize_rcu();
INIT_LIST_HEAD(&rq->active_list);
INIT_LIST_HEAD(&rq->execute_cb);
tl = ce->ring->timeline;
ret = i915_timeline_get_seqno(tl, rq, &seqno);
if (ret)
goto err_free;
rq->i915 = i915;
rq->engine = engine;
rq->gem_context = ctx;
rq->hw_context = ce;
rq->ring = ce->ring;
rq->timeline = ce->ring->timeline;
rq->timeline = tl;
GEM_BUG_ON(rq->timeline == &engine->timeline);
rq->hwsp_seqno = rq->timeline->hwsp_seqno;
rq->hwsp_seqno = tl->hwsp_seqno;
rq->hwsp_cacheline = tl->hwsp_cacheline;
rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */
spin_lock_init(&rq->lock);
dma_fence_init(&rq->fence,
&i915_fence_ops,
&rq->lock,
rq->timeline->fence_context,
timeline_get_seqno(rq->timeline));
dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock,
tl->fence_context, seqno);
/* We bump the ref for the fence chain */
i915_sw_fence_init(&i915_request_get(rq)->submit, submit_notify);
@ -640,7 +711,6 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
i915_sched_node_init(&rq->sched);
/* No zalloc, must clear what we need by hand */
rq->global_seqno = 0;
rq->file_priv = NULL;
rq->batch = NULL;
rq->capture_list = NULL;
@ -693,13 +763,62 @@ err_unwind:
GEM_BUG_ON(!list_empty(&rq->sched.signalers_list));
GEM_BUG_ON(!list_empty(&rq->sched.waiters_list));
kmem_cache_free(i915->requests, rq);
err_free:
kmem_cache_free(global.slab_requests, rq);
err_unreserve:
mutex_unlock(&ce->ring->timeline->mutex);
unreserve_gt(i915);
intel_context_unpin(ce);
return ERR_PTR(ret);
}
static int
emit_semaphore_wait(struct i915_request *to,
struct i915_request *from,
gfp_t gfp)
{
u32 hwsp_offset;
u32 *cs;
int err;
GEM_BUG_ON(!from->timeline->has_initial_breadcrumb);
GEM_BUG_ON(INTEL_GEN(to->i915) < 8);
/* We need to pin the signaler's HWSP until we are finished reading. */
err = i915_timeline_read_hwsp(from, to, &hwsp_offset);
if (err)
return err;
/* Only submit our spinner after the signaler is running! */
err = i915_request_await_execution(to, from, gfp);
if (err)
return err;
cs = intel_ring_begin(to, 4);
if (IS_ERR(cs))
return PTR_ERR(cs);
/*
* Using greater-than-or-equal here means we have to worry
* about seqno wraparound. To side step that issue, we swap
* the timeline HWSP upon wrapping, so that everyone listening
* for the old (pre-wrap) values do not see the much smaller
* (post-wrap) values than they were expecting (and so wait
* forever).
*/
*cs++ = MI_SEMAPHORE_WAIT |
MI_SEMAPHORE_GLOBAL_GTT |
MI_SEMAPHORE_POLL |
MI_SEMAPHORE_SAD_GTE_SDD;
*cs++ = from->fence.seqno;
*cs++ = hwsp_offset;
*cs++ = 0;
intel_ring_advance(to, cs);
to->sched.flags |= I915_SCHED_HAS_SEMAPHORE;
return 0;
}
static int
i915_request_await_request(struct i915_request *to, struct i915_request *from)
{
@ -712,9 +831,7 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from)
return 0;
if (to->engine->schedule) {
ret = i915_sched_node_add_dependency(to->i915,
&to->sched,
&from->sched);
ret = i915_sched_node_add_dependency(&to->sched, &from->sched);
if (ret < 0)
return ret;
}
@ -723,6 +840,9 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from)
ret = i915_sw_fence_await_sw_fence_gfp(&to->submit,
&from->submit,
I915_FENCE_GFP);
} else if (intel_engine_has_semaphores(to->engine) &&
to->gem_context->sched.priority >= I915_PRIORITY_NORMAL) {
ret = emit_semaphore_wait(to, from, I915_FENCE_GFP);
} else {
ret = i915_sw_fence_await_dma_fence(&to->submit,
&from->fence, 0,
@ -889,7 +1009,7 @@ void i915_request_add(struct i915_request *request)
GEM_TRACE("%s fence %llx:%lld\n",
engine->name, request->fence.context, request->fence.seqno);
lockdep_assert_held(&request->i915->drm.struct_mutex);
lockdep_assert_held(&request->timeline->mutex);
trace_i915_request_add(request);
/*
@ -948,6 +1068,7 @@ void i915_request_add(struct i915_request *request)
GEM_TRACE("marking %s as active\n", ring->timeline->name);
list_add(&ring->active_link, &request->i915->gt.active_rings);
}
request->i915->gt.active_engines |= request->engine->mask;
request->emitted_jiffies = jiffies;
/*
@ -966,6 +1087,21 @@ void i915_request_add(struct i915_request *request)
if (engine->schedule) {
struct i915_sched_attr attr = request->gem_context->sched;
/*
* Boost actual workloads past semaphores!
*
* With semaphores we spin on one engine waiting for another,
* simply to reduce the latency of starting our work when
* the signaler completes. However, if there is any other
* work that we could be doing on this engine instead, that
* is better utilisation and will reduce the overall duration
* of the current work. To avoid PI boosting a semaphore
* far in the distance past over useful work, we keep a history
* of any semaphore use along our dependency chain.
*/
if (!(request->sched.flags & I915_SCHED_HAS_SEMAPHORE))
attr.priority |= I915_PRIORITY_NOSEMAPHORE;
/*
* Boost priorities to new clients (new request flows).
*
@ -1000,6 +1136,8 @@ void i915_request_add(struct i915_request *request)
*/
if (prev && i915_request_completed(prev))
i915_request_retire_upto(prev);
mutex_unlock(&request->timeline->mutex);
}
static unsigned long local_clock_us(unsigned int *cpu)
@ -1136,8 +1274,23 @@ long i915_request_wait(struct i915_request *rq,
if (__i915_spin_request(rq, state, 5))
goto out;
if (flags & I915_WAIT_PRIORITY)
/*
* This client is about to stall waiting for the GPU. In many cases
* this is undesirable and limits the throughput of the system, as
* many clients cannot continue processing user input/output whilst
* blocked. RPS autotuning may take tens of milliseconds to respond
* to the GPU load and thus incurs additional latency for the client.
* We can circumvent that by promoting the GPU frequency to maximum
* before we sleep. This makes the GPU throttle up much more quickly
* (good for benchmarks and user experience, e.g. window animations),
* but at a cost of spending more power processing the workload
* (bad for battery).
*/
if (flags & I915_WAIT_PRIORITY) {
if (!i915_request_started(rq) && INTEL_GEN(rq->i915) >= 6)
gen6_rps_boost(rq);
i915_schedule_bump_priority(rq, I915_PRIORITY_WAIT);
}
wait.tsk = current;
if (dma_fence_add_callback(&rq->fence, &wait.cb, request_wait_wake))
@ -1179,11 +1332,66 @@ void i915_retire_requests(struct drm_i915_private *i915)
if (!i915->gt.active_requests)
return;
list_for_each_entry_safe(ring, tmp, &i915->gt.active_rings, active_link)
list_for_each_entry_safe(ring, tmp,
&i915->gt.active_rings, active_link) {
intel_ring_get(ring); /* last rq holds reference! */
ring_retire_requests(ring);
intel_ring_put(ring);
}
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_request.c"
#include "selftests/i915_request.c"
#endif
static void i915_global_request_shrink(void)
{
kmem_cache_shrink(global.slab_dependencies);
kmem_cache_shrink(global.slab_execute_cbs);
kmem_cache_shrink(global.slab_requests);
}
static void i915_global_request_exit(void)
{
kmem_cache_destroy(global.slab_dependencies);
kmem_cache_destroy(global.slab_execute_cbs);
kmem_cache_destroy(global.slab_requests);
}
static struct i915_global_request global = { {
.shrink = i915_global_request_shrink,
.exit = i915_global_request_exit,
} };
int __init i915_global_request_init(void)
{
global.slab_requests = KMEM_CACHE(i915_request,
SLAB_HWCACHE_ALIGN |
SLAB_RECLAIM_ACCOUNT |
SLAB_TYPESAFE_BY_RCU);
if (!global.slab_requests)
return -ENOMEM;
global.slab_execute_cbs = KMEM_CACHE(execute_cb,
SLAB_HWCACHE_ALIGN |
SLAB_RECLAIM_ACCOUNT |
SLAB_TYPESAFE_BY_RCU);
if (!global.slab_execute_cbs)
goto err_requests;
global.slab_dependencies = KMEM_CACHE(i915_dependency,
SLAB_HWCACHE_ALIGN |
SLAB_RECLAIM_ACCOUNT);
if (!global.slab_dependencies)
goto err_execute_cbs;
i915_global_register(&global.base);
return 0;
err_execute_cbs:
kmem_cache_destroy(global.slab_execute_cbs);
err_requests:
kmem_cache_destroy(global.slab_requests);
return -ENOMEM;
}

Просмотреть файл

@ -29,6 +29,7 @@
#include "i915_gem.h"
#include "i915_scheduler.h"
#include "i915_selftest.h"
#include "i915_sw_fence.h"
#include <uapi/drm/i915_drm.h>
@ -37,6 +38,7 @@ struct drm_file;
struct drm_i915_gem_object;
struct i915_request;
struct i915_timeline;
struct i915_timeline_cacheline;
struct i915_capture_list {
struct i915_capture_list *next;
@ -127,6 +129,7 @@ struct i915_request {
*/
struct i915_sw_fence submit;
wait_queue_entry_t submitq;
struct list_head execute_cb;
/*
* A list of everyone we wait upon, and everyone who waits upon us.
@ -147,13 +150,15 @@ struct i915_request {
*/
const u32 *hwsp_seqno;
/**
* GEM sequence number associated with this request on the
* global execution timeline. It is zero when the request is not
* on the HW queue (i.e. not on the engine timeline list).
* Its value is guarded by the timeline spinlock.
/*
* If we need to access the timeline's seqno for this request in
* another request, we need to keep a read reference to this associated
* cacheline, so that we do not free and recycle it before the foreign
* observers have completed. Hence, we keep a pointer to the cacheline
* inside the timeline's HWSP vma, but it is only valid while this
* request has not completed and guarded by the timeline mutex.
*/
u32 global_seqno;
struct i915_timeline_cacheline *hwsp_cacheline;
/** Position in the ring of the start of the request */
u32 head;
@ -204,6 +209,11 @@ struct i915_request {
struct drm_i915_file_private *file_priv;
/** file_priv list entry for this request */
struct list_head client_link;
I915_SELFTEST_DECLARE(struct {
struct list_head link;
unsigned long delay;
} mock;)
};
#define I915_FENCE_GFP (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
@ -247,30 +257,6 @@ i915_request_put(struct i915_request *rq)
dma_fence_put(&rq->fence);
}
/**
* i915_request_global_seqno - report the current global seqno
* @request - the request
*
* A request is assigned a global seqno only when it is on the hardware
* execution queue. The global seqno can be used to maintain a list of
* requests on the same engine in retirement order, for example for
* constructing a priority queue for waiting. Prior to its execution, or
* if it is subsequently removed in the event of preemption, its global
* seqno is zero. As both insertion and removal from the execution queue
* may operate in IRQ context, it is not guarded by the usual struct_mutex
* BKL. Instead those relying on the global seqno must be prepared for its
* value to change between reads. Only when the request is complete can
* the global seqno be stable (due to the memory barriers on submitting
* the commands to the hardware to write the breadcrumb, if the HWS shows
* that it has passed the global seqno and the global seqno is unchanged
* after the read, it is indeed complete).
*/
static inline u32
i915_request_global_seqno(const struct i915_request *request)
{
return READ_ONCE(request->global_seqno);
}
int i915_request_await_object(struct i915_request *to,
struct drm_i915_gem_object *obj,
bool write);
@ -358,10 +344,27 @@ static inline bool __i915_request_has_started(const struct i915_request *rq)
* i915_request_started - check if the request has begun being executed
* @rq: the request
*
* Returns true if the request has been submitted to hardware, and the hardware
* has advanced passed the end of the previous request and so should be either
* currently processing the request (though it may be preempted and so
* not necessarily the next request to complete) or have completed the request.
* If the timeline is not using initial breadcrumbs, a request is
* considered started if the previous request on its timeline (i.e.
* context) has been signaled.
*
* If the timeline is using semaphores, it will also be emitting an
* "initial breadcrumb" after the semaphores are complete and just before
* it began executing the user payload. A request can therefore be active
* on the HW and not yet started as it is still busywaiting on its
* dependencies (via HW semaphores).
*
* If the request has started, its dependencies will have been signaled
* (either by fences or by semaphores) and it will have begun processing
* the user payload.
*
* However, even if a request has started, it may have been preempted and
* so no longer active, or it may have already completed.
*
* See also i915_request_is_active().
*
* Returns true if the request has begun executing the user payload, or
* has completed:
*/
static inline bool i915_request_started(const struct i915_request *rq)
{

Просмотреть файл

@ -22,24 +22,15 @@ static void engine_skip_context(struct i915_request *rq)
{
struct intel_engine_cs *engine = rq->engine;
struct i915_gem_context *hung_ctx = rq->gem_context;
struct i915_timeline *timeline = rq->timeline;
lockdep_assert_held(&engine->timeline.lock);
GEM_BUG_ON(timeline == &engine->timeline);
spin_lock(&timeline->lock);
if (!i915_request_is_active(rq))
return;
if (i915_request_is_active(rq)) {
list_for_each_entry_continue(rq,
&engine->timeline.requests, link)
if (rq->gem_context == hung_ctx)
i915_request_skip(rq, -EIO);
}
list_for_each_entry(rq, &timeline->requests, link)
i915_request_skip(rq, -EIO);
spin_unlock(&timeline->lock);
list_for_each_entry_continue(rq, &engine->timeline.requests, link)
if (rq->gem_context == hung_ctx)
i915_request_skip(rq, -EIO);
}
static void client_mark_guilty(struct drm_i915_file_private *file_priv,
@ -68,23 +59,29 @@ static void client_mark_guilty(struct drm_i915_file_private *file_priv,
static bool context_mark_guilty(struct i915_gem_context *ctx)
{
unsigned int score;
bool banned, bannable;
unsigned long prev_hang;
bool banned;
int i;
atomic_inc(&ctx->guilty_count);
bannable = i915_gem_context_is_bannable(ctx);
score = atomic_add_return(CONTEXT_SCORE_GUILTY, &ctx->ban_score);
banned = score >= CONTEXT_SCORE_BAN_THRESHOLD;
/* Cool contexts don't accumulate client ban score */
if (!bannable)
/* Cool contexts are too cool to be banned! (Used for reset testing.) */
if (!i915_gem_context_is_bannable(ctx))
return false;
/* Record the timestamp for the last N hangs */
prev_hang = ctx->hang_timestamp[0];
for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp) - 1; i++)
ctx->hang_timestamp[i] = ctx->hang_timestamp[i + 1];
ctx->hang_timestamp[i] = jiffies;
/* If we have hung N+1 times in rapid succession, we ban the context! */
banned = !i915_gem_context_is_recoverable(ctx);
if (time_before(jiffies, prev_hang + CONTEXT_FAST_HANG_JIFFIES))
banned = true;
if (banned) {
DRM_DEBUG_DRIVER("context %s: guilty %d, score %u, banned\n",
ctx->name, atomic_read(&ctx->guilty_count),
score);
DRM_DEBUG_DRIVER("context %s: guilty %d, banned\n",
ctx->name, atomic_read(&ctx->guilty_count));
i915_gem_context_set_banned(ctx);
}
@ -101,6 +98,12 @@ static void context_mark_innocent(struct i915_gem_context *ctx)
void i915_reset_request(struct i915_request *rq, bool guilty)
{
GEM_TRACE("%s rq=%llx:%lld, guilty? %s\n",
rq->engine->name,
rq->fence.context,
rq->fence.seqno,
yesno(guilty));
lockdep_assert_held(&rq->engine->timeline.lock);
GEM_BUG_ON(i915_request_completed(rq));
@ -119,8 +122,10 @@ static void gen3_stop_engine(struct intel_engine_cs *engine)
struct drm_i915_private *dev_priv = engine->i915;
const u32 base = engine->mmio_base;
GEM_TRACE("%s\n", engine->name);
if (intel_engine_stop_cs(engine))
DRM_DEBUG_DRIVER("%s: timed out on STOP_RING\n", engine->name);
GEM_TRACE("%s: timed out on STOP_RING\n", engine->name);
I915_WRITE_FW(RING_HEAD(base), I915_READ_FW(RING_TAIL(base)));
POSTING_READ_FW(RING_HEAD(base)); /* paranoia */
@ -133,9 +138,9 @@ static void gen3_stop_engine(struct intel_engine_cs *engine)
I915_WRITE_FW(RING_CTL(base), 0);
/* Check acts as a post */
if (I915_READ_FW(RING_HEAD(base)) != 0)
DRM_DEBUG_DRIVER("%s: ring head not parked\n",
engine->name);
if (I915_READ_FW(RING_HEAD(base)))
GEM_TRACE("%s: ring head [%x] not parked\n",
engine->name, I915_READ_FW(RING_HEAD(base)));
}
static void i915_stop_engines(struct drm_i915_private *i915,
@ -298,12 +303,12 @@ static int gen6_reset_engines(struct drm_i915_private *i915,
unsigned int retry)
{
struct intel_engine_cs *engine;
const u32 hw_engine_mask[I915_NUM_ENGINES] = {
[RCS] = GEN6_GRDOM_RENDER,
[BCS] = GEN6_GRDOM_BLT,
[VCS] = GEN6_GRDOM_MEDIA,
[VCS2] = GEN8_GRDOM_MEDIA2,
[VECS] = GEN6_GRDOM_VECS,
const u32 hw_engine_mask[] = {
[RCS0] = GEN6_GRDOM_RENDER,
[BCS0] = GEN6_GRDOM_BLT,
[VCS0] = GEN6_GRDOM_MEDIA,
[VCS1] = GEN8_GRDOM_MEDIA2,
[VECS0] = GEN6_GRDOM_VECS,
};
u32 hw_mask;
@ -313,8 +318,10 @@ static int gen6_reset_engines(struct drm_i915_private *i915,
unsigned int tmp;
hw_mask = 0;
for_each_engine_masked(engine, i915, engine_mask, tmp)
for_each_engine_masked(engine, i915, engine_mask, tmp) {
GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask));
hw_mask |= hw_engine_mask[engine->id];
}
}
return gen6_hw_domain_reset(i915, hw_mask);
@ -421,28 +428,27 @@ static int gen11_reset_engines(struct drm_i915_private *i915,
unsigned int engine_mask,
unsigned int retry)
{
const u32 hw_engine_mask[I915_NUM_ENGINES] = {
[RCS] = GEN11_GRDOM_RENDER,
[BCS] = GEN11_GRDOM_BLT,
[VCS] = GEN11_GRDOM_MEDIA,
[VCS2] = GEN11_GRDOM_MEDIA2,
[VCS3] = GEN11_GRDOM_MEDIA3,
[VCS4] = GEN11_GRDOM_MEDIA4,
[VECS] = GEN11_GRDOM_VECS,
[VECS2] = GEN11_GRDOM_VECS2,
const u32 hw_engine_mask[] = {
[RCS0] = GEN11_GRDOM_RENDER,
[BCS0] = GEN11_GRDOM_BLT,
[VCS0] = GEN11_GRDOM_MEDIA,
[VCS1] = GEN11_GRDOM_MEDIA2,
[VCS2] = GEN11_GRDOM_MEDIA3,
[VCS3] = GEN11_GRDOM_MEDIA4,
[VECS0] = GEN11_GRDOM_VECS,
[VECS1] = GEN11_GRDOM_VECS2,
};
struct intel_engine_cs *engine;
unsigned int tmp;
u32 hw_mask;
int ret;
BUILD_BUG_ON(VECS2 + 1 != I915_NUM_ENGINES);
if (engine_mask == ALL_ENGINES) {
hw_mask = GEN11_GRDOM_FULL;
} else {
hw_mask = 0;
for_each_engine_masked(engine, i915, engine_mask, tmp) {
GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask));
hw_mask |= hw_engine_mask[engine->id];
hw_mask |= gen11_lock_sfc(i915, engine);
}
@ -532,9 +538,6 @@ typedef int (*reset_func)(struct drm_i915_private *,
static reset_func intel_get_gpu_reset(struct drm_i915_private *i915)
{
if (!i915_modparams.reset)
return NULL;
if (INTEL_GEN(i915) >= 8)
return gen8_reset_engines;
else if (INTEL_GEN(i915) >= 6)
@ -582,7 +585,8 @@ int intel_gpu_reset(struct drm_i915_private *i915, unsigned int engine_mask)
*
* FIXME: Wa for more modern gens needs to be validated
*/
i915_stop_engines(i915, engine_mask);
if (retry)
i915_stop_engines(i915, engine_mask);
GEM_TRACE("engine_mask=%x\n", engine_mask);
preempt_disable();
@ -599,6 +603,9 @@ bool intel_has_gpu_reset(struct drm_i915_private *i915)
if (USES_GUC(i915))
return false;
if (!i915_modparams.reset)
return NULL;
return intel_get_gpu_reset(i915);
}
@ -639,6 +646,32 @@ static void reset_prepare_engine(struct intel_engine_cs *engine)
engine->reset.prepare(engine);
}
static void revoke_mmaps(struct drm_i915_private *i915)
{
int i;
for (i = 0; i < i915->num_fence_regs; i++) {
struct drm_vma_offset_node *node;
struct i915_vma *vma;
u64 vma_offset;
vma = READ_ONCE(i915->fence_regs[i].vma);
if (!vma)
continue;
if (!i915_vma_has_userfault(vma))
continue;
GEM_BUG_ON(vma->fence != &i915->fence_regs[i]);
node = &vma->obj->base.vma_node;
vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
unmap_mapping_range(i915->drm.anon_inode->i_mapping,
drm_vma_node_offset_addr(node) + vma_offset,
vma->size,
1);
}
}
static void reset_prepare(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
@ -647,7 +680,12 @@ static void reset_prepare(struct drm_i915_private *i915)
for_each_engine(engine, i915, id)
reset_prepare_engine(engine);
intel_uc_sanitize(i915);
intel_uc_reset_prepare(i915);
}
static void gt_revoke(struct drm_i915_private *i915)
{
revoke_mmaps(i915);
}
static int gt_reset(struct drm_i915_private *i915, unsigned int stalled_mask)
@ -665,7 +703,7 @@ static int gt_reset(struct drm_i915_private *i915, unsigned int stalled_mask)
return err;
for_each_engine(engine, i915, id)
intel_engine_reset(engine, stalled_mask & ENGINE_MASK(id));
intel_engine_reset(engine, stalled_mask & engine->mask);
i915_gem_restore_fences(i915);
@ -722,8 +760,10 @@ static void reset_finish(struct drm_i915_private *i915)
struct intel_engine_cs *engine;
enum intel_engine_id id;
for_each_engine(engine, i915, id)
for_each_engine(engine, i915, id) {
reset_finish_engine(engine);
intel_engine_signal_breadcrumbs(engine);
}
}
static void reset_restart(struct drm_i915_private *i915)
@ -761,23 +801,19 @@ static void nop_submit_request(struct i915_request *request)
spin_lock_irqsave(&engine->timeline.lock, flags);
__i915_request_submit(request);
i915_request_mark_complete(request);
intel_engine_write_global_seqno(engine, request->global_seqno);
spin_unlock_irqrestore(&engine->timeline.lock, flags);
intel_engine_queue_breadcrumbs(engine);
}
void i915_gem_set_wedged(struct drm_i915_private *i915)
static void __i915_gem_set_wedged(struct drm_i915_private *i915)
{
struct i915_gpu_error *error = &i915->gpu_error;
struct intel_engine_cs *engine;
enum intel_engine_id id;
mutex_lock(&error->wedge_mutex);
if (test_bit(I915_WEDGED, &error->flags)) {
mutex_unlock(&error->wedge_mutex);
if (test_bit(I915_WEDGED, &error->flags))
return;
}
if (GEM_SHOW_DEBUG() && !intel_engines_are_idle(i915)) {
struct drm_printer p = drm_debug_printer(__func__);
@ -793,11 +829,10 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
* rolling the global seqno forward (since this would complete requests
* for which we haven't set the fence error to EIO yet).
*/
for_each_engine(engine, i915, id)
reset_prepare_engine(engine);
reset_prepare(i915);
/* Even if the GPU reset fails, it should still stop the engines */
if (INTEL_GEN(i915) >= 5)
if (!INTEL_INFO(i915)->gpu_reset_clobbers_display)
intel_gpu_reset(i915, ALL_ENGINES);
for_each_engine(engine, i915, id) {
@ -811,31 +846,35 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
* either this call here to intel_engine_write_global_seqno, or the one
* in nop_submit_request.
*/
synchronize_rcu();
synchronize_rcu_expedited();
/* Mark all executing requests as skipped */
for_each_engine(engine, i915, id)
engine->cancel_requests(engine);
for_each_engine(engine, i915, id) {
reset_finish_engine(engine);
intel_engine_signal_breadcrumbs(engine);
}
reset_finish(i915);
smp_mb__before_atomic();
set_bit(I915_WEDGED, &error->flags);
GEM_TRACE("end\n");
mutex_unlock(&error->wedge_mutex);
wake_up_all(&error->reset_queue);
}
bool i915_gem_unset_wedged(struct drm_i915_private *i915)
void i915_gem_set_wedged(struct drm_i915_private *i915)
{
struct i915_gpu_error *error = &i915->gpu_error;
intel_wakeref_t wakeref;
mutex_lock(&error->wedge_mutex);
with_intel_runtime_pm(i915, wakeref)
__i915_gem_set_wedged(i915);
mutex_unlock(&error->wedge_mutex);
}
static bool __i915_gem_unset_wedged(struct drm_i915_private *i915)
{
struct i915_gpu_error *error = &i915->gpu_error;
struct i915_timeline *tl;
bool ret = false;
if (!test_bit(I915_WEDGED, &error->flags))
return true;
@ -843,8 +882,6 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915)
if (!i915->gt.scratch) /* Never full initialised, recovery impossible */
return false;
mutex_lock(&error->wedge_mutex);
GEM_TRACE("start\n");
/*
@ -860,30 +897,20 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915)
mutex_lock(&i915->gt.timelines.mutex);
list_for_each_entry(tl, &i915->gt.timelines.active_list, link) {
struct i915_request *rq;
long timeout;
rq = i915_active_request_get_unlocked(&tl->last_request);
if (!rq)
continue;
/*
* We can't use our normal waiter as we want to
* avoid recursively trying to handle the current
* reset. The basic dma_fence_default_wait() installs
* a callback for dma_fence_signal(), which is
* triggered by our nop handler (indirectly, the
* callback enables the signaler thread which is
* woken by the nop_submit_request() advancing the seqno
* and when the seqno passes the fence, the signaler
* then signals the fence waking us up).
* All internal dependencies (i915_requests) will have
* been flushed by the set-wedge, but we may be stuck waiting
* for external fences. These should all be capped to 10s
* (I915_FENCE_TIMEOUT) so this wait should not be unbounded
* in the worst case.
*/
timeout = dma_fence_default_wait(&rq->fence, true,
MAX_SCHEDULE_TIMEOUT);
dma_fence_default_wait(&rq->fence, false, MAX_SCHEDULE_TIMEOUT);
i915_request_put(rq);
if (timeout < 0) {
mutex_unlock(&i915->gt.timelines.mutex);
goto unlock;
}
}
mutex_unlock(&i915->gt.timelines.mutex);
@ -904,57 +931,37 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915)
smp_mb__before_atomic(); /* complete takeover before enabling execbuf */
clear_bit(I915_WEDGED, &i915->gpu_error.flags);
ret = true;
unlock:
mutex_unlock(&i915->gpu_error.wedge_mutex);
return ret;
return true;
}
struct __i915_reset {
struct drm_i915_private *i915;
unsigned int stalled_mask;
};
static int __i915_reset__BKL(void *data)
bool i915_gem_unset_wedged(struct drm_i915_private *i915)
{
struct __i915_reset *arg = data;
int err;
struct i915_gpu_error *error = &i915->gpu_error;
bool result;
err = intel_gpu_reset(arg->i915, ALL_ENGINES);
if (err)
return err;
mutex_lock(&error->wedge_mutex);
result = __i915_gem_unset_wedged(i915);
mutex_unlock(&error->wedge_mutex);
return gt_reset(arg->i915, arg->stalled_mask);
return result;
}
#if RESET_UNDER_STOP_MACHINE
/*
* XXX An alternative to using stop_machine would be to park only the
* processes that have a GGTT mmap. By remote parking the threads (SIGSTOP)
* we should be able to prevent their memmory accesses via the lost fence
* registers over the course of the reset without the potential recursive
* of mutexes between the pagefault handler and reset.
*
* See igt/gem_mmap_gtt/hang
*/
#define __do_reset(fn, arg) stop_machine(fn, arg, NULL)
#else
#define __do_reset(fn, arg) fn(arg)
#endif
static int do_reset(struct drm_i915_private *i915, unsigned int stalled_mask)
{
struct __i915_reset arg = { i915, stalled_mask };
int err, i;
err = __do_reset(__i915_reset__BKL, &arg);
for (i = 0; err && i < RESET_MAX_RETRIES; i++) {
msleep(100);
err = __do_reset(__i915_reset__BKL, &arg);
}
gt_revoke(i915);
return err;
err = intel_gpu_reset(i915, ALL_ENGINES);
for (i = 0; err && i < RESET_MAX_RETRIES; i++) {
msleep(10 * (i + 1));
err = intel_gpu_reset(i915, ALL_ENGINES);
}
if (err)
return err;
return gt_reset(i915, stalled_mask);
}
/**
@ -966,8 +973,6 @@ static int do_reset(struct drm_i915_private *i915, unsigned int stalled_mask)
* Reset the chip. Useful if a hang is detected. Marks the device as wedged
* on failure.
*
* Caller must hold the struct_mutex.
*
* Procedure is fairly simple:
* - reset the chip using the reset reg
* - re-init context state
@ -990,7 +995,7 @@ void i915_reset(struct drm_i915_private *i915,
GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &error->flags));
/* Clear any previous failed attempts at recovery. Time to try again. */
if (!i915_gem_unset_wedged(i915))
if (!__i915_gem_unset_wedged(i915))
return;
if (reason)
@ -1007,11 +1012,17 @@ void i915_reset(struct drm_i915_private *i915,
goto error;
}
if (INTEL_INFO(i915)->gpu_reset_clobbers_display)
intel_runtime_pm_disable_interrupts(i915);
if (do_reset(i915, stalled_mask)) {
dev_err(i915->drm.dev, "Failed to reset chip\n");
goto taint;
}
if (INTEL_INFO(i915)->gpu_reset_clobbers_display)
intel_runtime_pm_enable_interrupts(i915);
intel_overlay_reset(i915);
/*
@ -1033,7 +1044,7 @@ void i915_reset(struct drm_i915_private *i915,
finish:
reset_finish(i915);
if (!i915_terminally_wedged(error))
if (!__i915_wedged(error))
reset_restart(i915);
return;
@ -1052,14 +1063,14 @@ taint:
*/
add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
error:
i915_gem_set_wedged(i915);
__i915_gem_set_wedged(i915);
goto finish;
}
static inline int intel_gt_reset_engine(struct drm_i915_private *i915,
struct intel_engine_cs *engine)
{
return intel_gpu_reset(i915, intel_engine_flag(engine));
return intel_gpu_reset(i915, engine->mask);
}
/**
@ -1144,7 +1155,12 @@ static void i915_reset_device(struct drm_i915_private *i915,
i915_wedge_on_timeout(&w, i915, 5 * HZ) {
intel_prepare_reset(i915);
/* Flush everyone using a resource about to be clobbered */
synchronize_srcu_expedited(&error->reset_backoff_srcu);
mutex_lock(&error->wedge_mutex);
i915_reset(i915, engine_mask, reason);
mutex_unlock(&error->wedge_mutex);
intel_finish_reset(i915);
}
@ -1190,7 +1206,7 @@ void i915_clear_error_registers(struct drm_i915_private *dev_priv)
I915_READ(RING_FAULT_REG(engine)) &
~RING_FAULT_VALID);
}
POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS]));
POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS0]));
}
}
@ -1212,6 +1228,7 @@ void i915_handle_error(struct drm_i915_private *i915,
unsigned long flags,
const char *fmt, ...)
{
struct i915_gpu_error *error = &i915->gpu_error;
struct intel_engine_cs *engine;
intel_wakeref_t wakeref;
unsigned int tmp;
@ -1237,7 +1254,7 @@ void i915_handle_error(struct drm_i915_private *i915,
*/
wakeref = intel_runtime_pm_get(i915);
engine_mask &= INTEL_INFO(i915)->ring_mask;
engine_mask &= INTEL_INFO(i915)->engine_mask;
if (flags & I915_ERROR_CAPTURE) {
i915_capture_error_state(i915, engine_mask, msg);
@ -1248,20 +1265,19 @@ void i915_handle_error(struct drm_i915_private *i915,
* Try engine reset when available. We fall back to full reset if
* single reset fails.
*/
if (intel_has_reset_engine(i915) &&
!i915_terminally_wedged(&i915->gpu_error)) {
if (intel_has_reset_engine(i915) && !__i915_wedged(error)) {
for_each_engine_masked(engine, i915, engine_mask, tmp) {
BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
&i915->gpu_error.flags))
&error->flags))
continue;
if (i915_reset_engine(engine, msg) == 0)
engine_mask &= ~intel_engine_flag(engine);
engine_mask &= ~engine->mask;
clear_bit(I915_RESET_ENGINE + engine->id,
&i915->gpu_error.flags);
wake_up_bit(&i915->gpu_error.flags,
&error->flags);
wake_up_bit(&error->flags,
I915_RESET_ENGINE + engine->id);
}
}
@ -1270,18 +1286,20 @@ void i915_handle_error(struct drm_i915_private *i915,
goto out;
/* Full reset needs the mutex, stop any other user trying to do so. */
if (test_and_set_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags)) {
wait_event(i915->gpu_error.reset_queue,
!test_bit(I915_RESET_BACKOFF,
&i915->gpu_error.flags));
goto out;
if (test_and_set_bit(I915_RESET_BACKOFF, &error->flags)) {
wait_event(error->reset_queue,
!test_bit(I915_RESET_BACKOFF, &error->flags));
goto out; /* piggy-back on the other reset */
}
/* Make sure i915_reset_trylock() sees the I915_RESET_BACKOFF */
synchronize_rcu_expedited();
/* Prevent any other reset-engine attempt. */
for_each_engine(engine, i915, tmp) {
while (test_and_set_bit(I915_RESET_ENGINE + engine->id,
&i915->gpu_error.flags))
wait_on_bit(&i915->gpu_error.flags,
&error->flags))
wait_on_bit(&error->flags,
I915_RESET_ENGINE + engine->id,
TASK_UNINTERRUPTIBLE);
}
@ -1290,16 +1308,74 @@ void i915_handle_error(struct drm_i915_private *i915,
for_each_engine(engine, i915, tmp) {
clear_bit(I915_RESET_ENGINE + engine->id,
&i915->gpu_error.flags);
&error->flags);
}
clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags);
wake_up_all(&i915->gpu_error.reset_queue);
clear_bit(I915_RESET_BACKOFF, &error->flags);
wake_up_all(&error->reset_queue);
out:
intel_runtime_pm_put(i915, wakeref);
}
int i915_reset_trylock(struct drm_i915_private *i915)
{
struct i915_gpu_error *error = &i915->gpu_error;
int srcu;
might_lock(&error->reset_backoff_srcu);
might_sleep();
rcu_read_lock();
while (test_bit(I915_RESET_BACKOFF, &error->flags)) {
rcu_read_unlock();
if (wait_event_interruptible(error->reset_queue,
!test_bit(I915_RESET_BACKOFF,
&error->flags)))
return -EINTR;
rcu_read_lock();
}
srcu = srcu_read_lock(&error->reset_backoff_srcu);
rcu_read_unlock();
return srcu;
}
void i915_reset_unlock(struct drm_i915_private *i915, int tag)
__releases(&i915->gpu_error.reset_backoff_srcu)
{
struct i915_gpu_error *error = &i915->gpu_error;
srcu_read_unlock(&error->reset_backoff_srcu, tag);
}
int i915_terminally_wedged(struct drm_i915_private *i915)
{
struct i915_gpu_error *error = &i915->gpu_error;
might_sleep();
if (!__i915_wedged(error))
return 0;
/* Reset still in progress? Maybe we will recover? */
if (!test_bit(I915_RESET_BACKOFF, &error->flags))
return -EIO;
/* XXX intel_reset_finish() still takes struct_mutex!!! */
if (mutex_is_locked(&i915->drm.struct_mutex))
return -EAGAIN;
if (wait_event_interruptible(error->reset_queue,
!test_bit(I915_RESET_BACKOFF,
&error->flags)))
return -EINTR;
return __i915_wedged(error) ? -EIO : 0;
}
bool i915_reset_flush(struct drm_i915_private *i915)
{
int err;

Просмотреть файл

@ -9,6 +9,7 @@
#include <linux/compiler.h>
#include <linux/types.h>
#include <linux/srcu.h>
struct drm_i915_private;
struct intel_engine_cs;
@ -32,6 +33,11 @@ int i915_reset_engine(struct intel_engine_cs *engine,
void i915_reset_request(struct i915_request *rq, bool guilty);
bool i915_reset_flush(struct drm_i915_private *i915);
int __must_check i915_reset_trylock(struct drm_i915_private *i915);
void i915_reset_unlock(struct drm_i915_private *i915, int tag);
int i915_terminally_wedged(struct drm_i915_private *i915);
bool intel_has_gpu_reset(struct drm_i915_private *i915);
bool intel_has_reset_engine(struct drm_i915_private *i915);

Просмотреть файл

@ -7,9 +7,16 @@
#include <linux/mutex.h>
#include "i915_drv.h"
#include "i915_globals.h"
#include "i915_request.h"
#include "i915_scheduler.h"
static struct i915_global_scheduler {
struct i915_global base;
struct kmem_cache *slab_dependencies;
struct kmem_cache *slab_priorities;
} global;
static DEFINE_SPINLOCK(schedule_lock);
static const struct i915_request *
@ -18,6 +25,11 @@ node_to_request(const struct i915_sched_node *node)
return container_of(node, const struct i915_request, sched);
}
static inline bool node_started(const struct i915_sched_node *node)
{
return i915_request_started(node_to_request(node));
}
static inline bool node_signaled(const struct i915_sched_node *node)
{
return i915_request_completed(node_to_request(node));
@ -29,19 +41,19 @@ void i915_sched_node_init(struct i915_sched_node *node)
INIT_LIST_HEAD(&node->waiters_list);
INIT_LIST_HEAD(&node->link);
node->attr.priority = I915_PRIORITY_INVALID;
node->flags = 0;
}
static struct i915_dependency *
i915_dependency_alloc(struct drm_i915_private *i915)
i915_dependency_alloc(void)
{
return kmem_cache_alloc(i915->dependencies, GFP_KERNEL);
return kmem_cache_alloc(global.slab_dependencies, GFP_KERNEL);
}
static void
i915_dependency_free(struct drm_i915_private *i915,
struct i915_dependency *dep)
i915_dependency_free(struct i915_dependency *dep)
{
kmem_cache_free(i915->dependencies, dep);
kmem_cache_free(global.slab_dependencies, dep);
}
bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
@ -60,6 +72,11 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
dep->signaler = signal;
dep->flags = flags;
/* Keep track of whether anyone on this chain has a semaphore */
if (signal->flags & I915_SCHED_HAS_SEMAPHORE &&
!node_started(signal))
node->flags |= I915_SCHED_HAS_SEMAPHORE;
ret = true;
}
@ -68,25 +85,23 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
return ret;
}
int i915_sched_node_add_dependency(struct drm_i915_private *i915,
struct i915_sched_node *node,
int i915_sched_node_add_dependency(struct i915_sched_node *node,
struct i915_sched_node *signal)
{
struct i915_dependency *dep;
dep = i915_dependency_alloc(i915);
dep = i915_dependency_alloc();
if (!dep)
return -ENOMEM;
if (!__i915_sched_node_add_dependency(node, signal, dep,
I915_DEPENDENCY_ALLOC))
i915_dependency_free(i915, dep);
i915_dependency_free(dep);
return 0;
}
void i915_sched_node_fini(struct drm_i915_private *i915,
struct i915_sched_node *node)
void i915_sched_node_fini(struct i915_sched_node *node)
{
struct i915_dependency *dep, *tmp;
@ -106,7 +121,7 @@ void i915_sched_node_fini(struct drm_i915_private *i915,
list_del(&dep->wait_link);
if (dep->flags & I915_DEPENDENCY_ALLOC)
i915_dependency_free(i915, dep);
i915_dependency_free(dep);
}
/* Remove ourselves from everyone who depends upon us */
@ -116,7 +131,7 @@ void i915_sched_node_fini(struct drm_i915_private *i915,
list_del(&dep->signal_link);
if (dep->flags & I915_DEPENDENCY_ALLOC)
i915_dependency_free(i915, dep);
i915_dependency_free(dep);
}
spin_unlock(&schedule_lock);
@ -193,7 +208,7 @@ find_priolist:
if (prio == I915_PRIORITY_NORMAL) {
p = &execlists->default_priolist;
} else {
p = kmem_cache_alloc(engine->i915->priorities, GFP_ATOMIC);
p = kmem_cache_alloc(global.slab_priorities, GFP_ATOMIC);
/* Convert an allocation failure to a priority bump */
if (unlikely(!p)) {
prio = I915_PRIORITY_NORMAL; /* recurses just once */
@ -301,6 +316,10 @@ static void __i915_schedule(struct i915_request *rq,
list_for_each_entry(dep, &dfs, dfs_link) {
struct i915_sched_node *node = dep->signaler;
/* If we are already flying, we know we have no signalers */
if (node_started(node))
continue;
/*
* Within an engine, there can be no cycle, but we may
* refer to the same dependency chain multiple times
@ -313,7 +332,6 @@ static void __i915_schedule(struct i915_request *rq,
if (node_signaled(p->signaler))
continue;
GEM_BUG_ON(p->signaler->attr.priority < node->attr.priority);
if (prio > READ_ONCE(p->signaler->attr.priority))
list_move_tail(&p->dfs_link, &dfs);
}
@ -415,3 +433,45 @@ void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump)
spin_unlock_bh(&schedule_lock);
}
void __i915_priolist_free(struct i915_priolist *p)
{
kmem_cache_free(global.slab_priorities, p);
}
static void i915_global_scheduler_shrink(void)
{
kmem_cache_shrink(global.slab_dependencies);
kmem_cache_shrink(global.slab_priorities);
}
static void i915_global_scheduler_exit(void)
{
kmem_cache_destroy(global.slab_dependencies);
kmem_cache_destroy(global.slab_priorities);
}
static struct i915_global_scheduler global = { {
.shrink = i915_global_scheduler_shrink,
.exit = i915_global_scheduler_exit,
} };
int __init i915_global_scheduler_init(void)
{
global.slab_dependencies = KMEM_CACHE(i915_dependency,
SLAB_HWCACHE_ALIGN);
if (!global.slab_dependencies)
return -ENOMEM;
global.slab_priorities = KMEM_CACHE(i915_priolist,
SLAB_HWCACHE_ALIGN);
if (!global.slab_priorities)
goto err_priorities;
i915_global_register(&global.base);
return 0;
err_priorities:
kmem_cache_destroy(global.slab_priorities);
return -ENOMEM;
}

Просмотреть файл

@ -24,14 +24,17 @@ enum {
I915_PRIORITY_INVALID = INT_MIN
};
#define I915_USER_PRIORITY_SHIFT 2
#define I915_USER_PRIORITY_SHIFT 3
#define I915_USER_PRIORITY(x) ((x) << I915_USER_PRIORITY_SHIFT)
#define I915_PRIORITY_COUNT BIT(I915_USER_PRIORITY_SHIFT)
#define I915_PRIORITY_MASK (I915_PRIORITY_COUNT - 1)
#define I915_PRIORITY_WAIT ((u8)BIT(0))
#define I915_PRIORITY_NEWCLIENT ((u8)BIT(1))
#define I915_PRIORITY_WAIT ((u8)BIT(0))
#define I915_PRIORITY_NEWCLIENT ((u8)BIT(1))
#define I915_PRIORITY_NOSEMAPHORE ((u8)BIT(2))
#define __NO_PREEMPTION (I915_PRIORITY_WAIT)
struct i915_sched_attr {
/**
@ -72,6 +75,8 @@ struct i915_sched_node {
struct list_head waiters_list; /* those after us, they depend upon us */
struct list_head link;
struct i915_sched_attr attr;
unsigned int flags;
#define I915_SCHED_HAS_SEMAPHORE BIT(0)
};
struct i915_dependency {
@ -83,6 +88,25 @@ struct i915_dependency {
#define I915_DEPENDENCY_ALLOC BIT(0)
};
struct i915_priolist {
struct list_head requests[I915_PRIORITY_COUNT];
struct rb_node node;
unsigned long used;
int priority;
};
#define priolist_for_each_request(it, plist, idx) \
for (idx = 0; idx < ARRAY_SIZE((plist)->requests); idx++) \
list_for_each_entry(it, &(plist)->requests[idx], sched.link)
#define priolist_for_each_request_consume(it, n, plist, idx) \
for (; \
(plist)->used ? (idx = __ffs((plist)->used)), 1 : 0; \
(plist)->used &= ~BIT(idx)) \
list_for_each_entry_safe(it, n, \
&(plist)->requests[idx], \
sched.link)
void i915_sched_node_init(struct i915_sched_node *node);
bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
@ -90,12 +114,10 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
struct i915_dependency *dep,
unsigned long flags);
int i915_sched_node_add_dependency(struct drm_i915_private *i915,
struct i915_sched_node *node,
int i915_sched_node_add_dependency(struct i915_sched_node *node,
struct i915_sched_node *signal);
void i915_sched_node_fini(struct drm_i915_private *i915,
struct i915_sched_node *node);
void i915_sched_node_fini(struct i915_sched_node *node);
void i915_schedule(struct i915_request *request,
const struct i915_sched_attr *attr);
@ -105,4 +127,11 @@ void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump);
struct list_head *
i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio);
void __i915_priolist_free(struct i915_priolist *p);
static inline void i915_priolist_free(struct i915_priolist *p)
{
if (p->priority != I915_PRIORITY_NORMAL)
__i915_priolist_free(p);
}
#endif /* _I915_SCHEDULER_H_ */

Просмотреть файл

@ -192,7 +192,7 @@ static void __i915_sw_fence_complete(struct i915_sw_fence *fence,
__i915_sw_fence_notify(fence, FENCE_FREE);
}
static void i915_sw_fence_complete(struct i915_sw_fence *fence)
void i915_sw_fence_complete(struct i915_sw_fence *fence)
{
debug_fence_assert(fence);
@ -202,7 +202,7 @@ static void i915_sw_fence_complete(struct i915_sw_fence *fence)
__i915_sw_fence_complete(fence, NULL);
}
static void i915_sw_fence_await(struct i915_sw_fence *fence)
void i915_sw_fence_await(struct i915_sw_fence *fence)
{
debug_fence_assert(fence);
WARN_ON(atomic_inc_return(&fence->pending) <= 1);

Просмотреть файл

@ -79,6 +79,9 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
unsigned long timeout,
gfp_t gfp);
void i915_sw_fence_await(struct i915_sw_fence *fence);
void i915_sw_fence_complete(struct i915_sw_fence *fence);
static inline bool i915_sw_fence_signaled(const struct i915_sw_fence *fence)
{
return atomic_read(&fence->pending) <= 0;

Просмотреть файл

@ -6,19 +6,32 @@
#include "i915_drv.h"
#include "i915_timeline.h"
#include "i915_active.h"
#include "i915_syncmap.h"
#include "i915_timeline.h"
#define ptr_set_bit(ptr, bit) ((typeof(ptr))((unsigned long)(ptr) | BIT(bit)))
#define ptr_test_bit(ptr, bit) ((unsigned long)(ptr) & BIT(bit))
struct i915_timeline_hwsp {
struct i915_vma *vma;
struct i915_gt_timelines *gt;
struct list_head free_link;
struct i915_vma *vma;
u64 free_bitmap;
};
static inline struct i915_timeline_hwsp *
i915_timeline_hwsp(const struct i915_timeline *tl)
struct i915_timeline_cacheline {
struct i915_active active;
struct i915_timeline_hwsp *hwsp;
void *vaddr;
#define CACHELINE_BITS 6
#define CACHELINE_FREE CACHELINE_BITS
};
static inline struct drm_i915_private *
hwsp_to_i915(struct i915_timeline_hwsp *hwsp)
{
return tl->hwsp_ggtt->private;
return container_of(hwsp->gt, struct drm_i915_private, gt.timelines);
}
static struct i915_vma *__hwsp_alloc(struct drm_i915_private *i915)
@ -71,6 +84,7 @@ hwsp_alloc(struct i915_timeline *timeline, unsigned int *cacheline)
vma->private = hwsp;
hwsp->vma = vma;
hwsp->free_bitmap = ~0ull;
hwsp->gt = gt;
spin_lock(&gt->hwsp_lock);
list_add(&hwsp->free_link, &gt->hwsp_free_list);
@ -88,14 +102,9 @@ hwsp_alloc(struct i915_timeline *timeline, unsigned int *cacheline)
return hwsp->vma;
}
static void hwsp_free(struct i915_timeline *timeline)
static void __idle_hwsp_free(struct i915_timeline_hwsp *hwsp, int cacheline)
{
struct i915_gt_timelines *gt = &timeline->i915->gt.timelines;
struct i915_timeline_hwsp *hwsp;
hwsp = i915_timeline_hwsp(timeline);
if (!hwsp) /* leave global HWSP alone! */
return;
struct i915_gt_timelines *gt = hwsp->gt;
spin_lock(&gt->hwsp_lock);
@ -103,7 +112,8 @@ static void hwsp_free(struct i915_timeline *timeline)
if (!hwsp->free_bitmap)
list_add_tail(&hwsp->free_link, &gt->hwsp_free_list);
hwsp->free_bitmap |= BIT_ULL(timeline->hwsp_offset / CACHELINE_BYTES);
GEM_BUG_ON(cacheline >= BITS_PER_TYPE(hwsp->free_bitmap));
hwsp->free_bitmap |= BIT_ULL(cacheline);
/* And if no one is left using it, give the page back to the system */
if (hwsp->free_bitmap == ~0ull) {
@ -115,6 +125,76 @@ static void hwsp_free(struct i915_timeline *timeline)
spin_unlock(&gt->hwsp_lock);
}
static void __idle_cacheline_free(struct i915_timeline_cacheline *cl)
{
GEM_BUG_ON(!i915_active_is_idle(&cl->active));
i915_gem_object_unpin_map(cl->hwsp->vma->obj);
i915_vma_put(cl->hwsp->vma);
__idle_hwsp_free(cl->hwsp, ptr_unmask_bits(cl->vaddr, CACHELINE_BITS));
i915_active_fini(&cl->active);
kfree(cl);
}
static void __cacheline_retire(struct i915_active *active)
{
struct i915_timeline_cacheline *cl =
container_of(active, typeof(*cl), active);
i915_vma_unpin(cl->hwsp->vma);
if (ptr_test_bit(cl->vaddr, CACHELINE_FREE))
__idle_cacheline_free(cl);
}
static struct i915_timeline_cacheline *
cacheline_alloc(struct i915_timeline_hwsp *hwsp, unsigned int cacheline)
{
struct i915_timeline_cacheline *cl;
void *vaddr;
GEM_BUG_ON(cacheline >= BIT(CACHELINE_BITS));
cl = kmalloc(sizeof(*cl), GFP_KERNEL);
if (!cl)
return ERR_PTR(-ENOMEM);
vaddr = i915_gem_object_pin_map(hwsp->vma->obj, I915_MAP_WB);
if (IS_ERR(vaddr)) {
kfree(cl);
return ERR_CAST(vaddr);
}
i915_vma_get(hwsp->vma);
cl->hwsp = hwsp;
cl->vaddr = page_pack_bits(vaddr, cacheline);
i915_active_init(hwsp_to_i915(hwsp), &cl->active, __cacheline_retire);
return cl;
}
static void cacheline_acquire(struct i915_timeline_cacheline *cl)
{
if (cl && i915_active_acquire(&cl->active))
__i915_vma_pin(cl->hwsp->vma);
}
static void cacheline_release(struct i915_timeline_cacheline *cl)
{
if (cl)
i915_active_release(&cl->active);
}
static void cacheline_free(struct i915_timeline_cacheline *cl)
{
GEM_BUG_ON(ptr_test_bit(cl->vaddr, CACHELINE_FREE));
cl->vaddr = ptr_set_bit(cl->vaddr, CACHELINE_FREE);
if (i915_active_is_idle(&cl->active))
__idle_cacheline_free(cl);
}
int i915_timeline_init(struct drm_i915_private *i915,
struct i915_timeline *timeline,
const char *name,
@ -136,32 +216,44 @@ int i915_timeline_init(struct drm_i915_private *i915,
timeline->name = name;
timeline->pin_count = 0;
timeline->has_initial_breadcrumb = !hwsp;
timeline->hwsp_cacheline = NULL;
timeline->hwsp_offset = I915_GEM_HWS_SEQNO_ADDR;
if (!hwsp) {
struct i915_timeline_cacheline *cl;
unsigned int cacheline;
hwsp = hwsp_alloc(timeline, &cacheline);
if (IS_ERR(hwsp))
return PTR_ERR(hwsp);
timeline->hwsp_offset = cacheline * CACHELINE_BYTES;
}
timeline->hwsp_ggtt = i915_vma_get(hwsp);
cl = cacheline_alloc(hwsp->private, cacheline);
if (IS_ERR(cl)) {
__idle_hwsp_free(hwsp->private, cacheline);
return PTR_ERR(cl);
}
vaddr = i915_gem_object_pin_map(hwsp->obj, I915_MAP_WB);
if (IS_ERR(vaddr)) {
hwsp_free(timeline);
i915_vma_put(hwsp);
return PTR_ERR(vaddr);
timeline->hwsp_cacheline = cl;
timeline->hwsp_offset = cacheline * CACHELINE_BYTES;
vaddr = page_mask_bits(cl->vaddr);
} else {
timeline->hwsp_offset = I915_GEM_HWS_SEQNO_ADDR;
vaddr = i915_gem_object_pin_map(hwsp->obj, I915_MAP_WB);
if (IS_ERR(vaddr))
return PTR_ERR(vaddr);
}
timeline->hwsp_seqno =
memset(vaddr + timeline->hwsp_offset, 0, CACHELINE_BYTES);
timeline->hwsp_ggtt = i915_vma_get(hwsp);
GEM_BUG_ON(timeline->hwsp_offset >= hwsp->size);
timeline->fence_context = dma_fence_context_alloc(1);
spin_lock_init(&timeline->lock);
mutex_init(&timeline->mutex);
INIT_ACTIVE_REQUEST(&timeline->barrier);
INIT_ACTIVE_REQUEST(&timeline->last_request);
@ -239,9 +331,12 @@ void i915_timeline_fini(struct i915_timeline *timeline)
GEM_BUG_ON(i915_active_request_isset(&timeline->barrier));
i915_syncmap_free(&timeline->sync);
hwsp_free(timeline);
i915_gem_object_unpin_map(timeline->hwsp_ggtt->obj);
if (timeline->hwsp_cacheline)
cacheline_free(timeline->hwsp_cacheline);
else
i915_gem_object_unpin_map(timeline->hwsp_ggtt->obj);
i915_vma_put(timeline->hwsp_ggtt);
}
@ -284,6 +379,7 @@ int i915_timeline_pin(struct i915_timeline *tl)
i915_ggtt_offset(tl->hwsp_ggtt) +
offset_in_page(tl->hwsp_offset);
cacheline_acquire(tl->hwsp_cacheline);
timeline_add_to_active(tl);
return 0;
@ -293,6 +389,157 @@ unpin:
return err;
}
static u32 timeline_advance(struct i915_timeline *tl)
{
GEM_BUG_ON(!tl->pin_count);
GEM_BUG_ON(tl->seqno & tl->has_initial_breadcrumb);
return tl->seqno += 1 + tl->has_initial_breadcrumb;
}
static void timeline_rollback(struct i915_timeline *tl)
{
tl->seqno -= 1 + tl->has_initial_breadcrumb;
}
static noinline int
__i915_timeline_get_seqno(struct i915_timeline *tl,
struct i915_request *rq,
u32 *seqno)
{
struct i915_timeline_cacheline *cl;
unsigned int cacheline;
struct i915_vma *vma;
void *vaddr;
int err;
/*
* If there is an outstanding GPU reference to this cacheline,
* such as it being sampled by a HW semaphore on another timeline,
* we cannot wraparound our seqno value (the HW semaphore does
* a strict greater-than-or-equals compare, not i915_seqno_passed).
* So if the cacheline is still busy, we must detach ourselves
* from it and leave it inflight alongside its users.
*
* However, if nobody is watching and we can guarantee that nobody
* will, we could simply reuse the same cacheline.
*
* if (i915_active_request_is_signaled(&tl->last_request) &&
* i915_active_is_signaled(&tl->hwsp_cacheline->active))
* return 0;
*
* That seems unlikely for a busy timeline that needed to wrap in
* the first place, so just replace the cacheline.
*/
vma = hwsp_alloc(tl, &cacheline);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err_rollback;
}
err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
if (err) {
__idle_hwsp_free(vma->private, cacheline);
goto err_rollback;
}
cl = cacheline_alloc(vma->private, cacheline);
if (IS_ERR(cl)) {
err = PTR_ERR(cl);
__idle_hwsp_free(vma->private, cacheline);
goto err_unpin;
}
GEM_BUG_ON(cl->hwsp->vma != vma);
/*
* Attach the old cacheline to the current request, so that we only
* free it after the current request is retired, which ensures that
* all writes into the cacheline from previous requests are complete.
*/
err = i915_active_ref(&tl->hwsp_cacheline->active,
tl->fence_context, rq);
if (err)
goto err_cacheline;
cacheline_release(tl->hwsp_cacheline); /* ownership now xfered to rq */
cacheline_free(tl->hwsp_cacheline);
i915_vma_unpin(tl->hwsp_ggtt); /* binding kept alive by old cacheline */
i915_vma_put(tl->hwsp_ggtt);
tl->hwsp_ggtt = i915_vma_get(vma);
vaddr = page_mask_bits(cl->vaddr);
tl->hwsp_offset = cacheline * CACHELINE_BYTES;
tl->hwsp_seqno =
memset(vaddr + tl->hwsp_offset, 0, CACHELINE_BYTES);
tl->hwsp_offset += i915_ggtt_offset(vma);
cacheline_acquire(cl);
tl->hwsp_cacheline = cl;
*seqno = timeline_advance(tl);
GEM_BUG_ON(i915_seqno_passed(*tl->hwsp_seqno, *seqno));
return 0;
err_cacheline:
cacheline_free(cl);
err_unpin:
i915_vma_unpin(vma);
err_rollback:
timeline_rollback(tl);
return err;
}
int i915_timeline_get_seqno(struct i915_timeline *tl,
struct i915_request *rq,
u32 *seqno)
{
*seqno = timeline_advance(tl);
/* Replace the HWSP on wraparound for HW semaphores */
if (unlikely(!*seqno && tl->hwsp_cacheline))
return __i915_timeline_get_seqno(tl, rq, seqno);
return 0;
}
static int cacheline_ref(struct i915_timeline_cacheline *cl,
struct i915_request *rq)
{
return i915_active_ref(&cl->active, rq->fence.context, rq);
}
int i915_timeline_read_hwsp(struct i915_request *from,
struct i915_request *to,
u32 *hwsp)
{
struct i915_timeline_cacheline *cl = from->hwsp_cacheline;
struct i915_timeline *tl = from->timeline;
int err;
GEM_BUG_ON(to->timeline == tl);
mutex_lock_nested(&tl->mutex, SINGLE_DEPTH_NESTING);
err = i915_request_completed(from);
if (!err)
err = cacheline_ref(cl, to);
if (!err) {
if (likely(cl == tl->hwsp_cacheline)) {
*hwsp = tl->hwsp_offset;
} else { /* across a seqno wrap, recover the original offset */
*hwsp = i915_ggtt_offset(cl->hwsp->vma) +
ptr_unmask_bits(cl->vaddr, CACHELINE_BITS) *
CACHELINE_BYTES;
}
}
mutex_unlock(&tl->mutex);
return err;
}
void i915_timeline_unpin(struct i915_timeline *tl)
{
GEM_BUG_ON(!tl->pin_count);
@ -300,6 +547,7 @@ void i915_timeline_unpin(struct i915_timeline *tl)
return;
timeline_remove_from_active(tl);
cacheline_release(tl->hwsp_cacheline);
/*
* Since this timeline is idle, all bariers upon which we were waiting

Просмотреть файл

@ -25,72 +25,10 @@
#ifndef I915_TIMELINE_H
#define I915_TIMELINE_H
#include <linux/list.h>
#include <linux/kref.h>
#include <linux/lockdep.h>
#include "i915_active.h"
#include "i915_request.h"
#include "i915_syncmap.h"
#include "i915_utils.h"
struct i915_vma;
struct i915_timeline_hwsp;
struct i915_timeline {
u64 fence_context;
u32 seqno;
spinlock_t lock;
#define TIMELINE_CLIENT 0 /* default subclass */
#define TIMELINE_ENGINE 1
unsigned int pin_count;
const u32 *hwsp_seqno;
struct i915_vma *hwsp_ggtt;
u32 hwsp_offset;
bool has_initial_breadcrumb;
/**
* List of breadcrumbs associated with GPU requests currently
* outstanding.
*/
struct list_head requests;
/* Contains an RCU guarded pointer to the last request. No reference is
* held to the request, users must carefully acquire a reference to
* the request using i915_active_request_get_request_rcu(), or hold the
* struct_mutex.
*/
struct i915_active_request last_request;
/**
* We track the most recent seqno that we wait on in every context so
* that we only have to emit a new await and dependency on a more
* recent sync point. As the contexts may be executed out-of-order, we
* have to track each individually and can not rely on an absolute
* global_seqno. When we know that all tracked fences are completed
* (i.e. when the driver is idle), we know that the syncmap is
* redundant and we can discard it without loss of generality.
*/
struct i915_syncmap *sync;
/**
* Barrier provides the ability to serialize ordering between different
* timelines.
*
* Users can call i915_timeline_set_barrier which will make all
* subsequent submissions to this timeline be executed only after the
* barrier has been completed.
*/
struct i915_active_request barrier;
struct list_head link;
const char *name;
struct drm_i915_private *i915;
struct kref kref;
};
#include "i915_timeline_types.h"
int i915_timeline_init(struct drm_i915_private *i915,
struct i915_timeline *tl,
@ -160,8 +98,15 @@ static inline bool i915_timeline_sync_is_later(struct i915_timeline *tl,
}
int i915_timeline_pin(struct i915_timeline *tl);
int i915_timeline_get_seqno(struct i915_timeline *tl,
struct i915_request *rq,
u32 *seqno);
void i915_timeline_unpin(struct i915_timeline *tl);
int i915_timeline_read_hwsp(struct i915_request *from,
struct i915_request *until,
u32 *hwsp_offset);
void i915_timelines_init(struct drm_i915_private *i915);
void i915_timelines_park(struct drm_i915_private *i915);
void i915_timelines_fini(struct drm_i915_private *i915);

Просмотреть файл

@ -0,0 +1,80 @@
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2016 Intel Corporation
*/
#ifndef __I915_TIMELINE_TYPES_H__
#define __I915_TIMELINE_TYPES_H__
#include <linux/list.h>
#include <linux/kref.h>
#include <linux/types.h>
#include "i915_active.h"
struct drm_i915_private;
struct i915_vma;
struct i915_timeline_cacheline;
struct i915_syncmap;
struct i915_timeline {
u64 fence_context;
u32 seqno;
spinlock_t lock;
#define TIMELINE_CLIENT 0 /* default subclass */
#define TIMELINE_ENGINE 1
struct mutex mutex; /* protects the flow of requests */
unsigned int pin_count;
const u32 *hwsp_seqno;
struct i915_vma *hwsp_ggtt;
u32 hwsp_offset;
struct i915_timeline_cacheline *hwsp_cacheline;
bool has_initial_breadcrumb;
/**
* List of breadcrumbs associated with GPU requests currently
* outstanding.
*/
struct list_head requests;
/* Contains an RCU guarded pointer to the last request. No reference is
* held to the request, users must carefully acquire a reference to
* the request using i915_active_request_get_request_rcu(), or hold the
* struct_mutex.
*/
struct i915_active_request last_request;
/**
* We track the most recent seqno that we wait on in every context so
* that we only have to emit a new await and dependency on a more
* recent sync point. As the contexts may be executed out-of-order, we
* have to track each individually and can not rely on an absolute
* global_seqno. When we know that all tracked fences are completed
* (i.e. when the driver is idle), we know that the syncmap is
* redundant and we can discard it without loss of generality.
*/
struct i915_syncmap *sync;
/**
* Barrier provides the ability to serialize ordering between different
* timelines.
*
* Users can call i915_timeline_set_barrier which will make all
* subsequent submissions to this timeline be executed only after the
* barrier has been completed.
*/
struct i915_active_request barrier;
struct list_head link;
const char *name;
struct drm_i915_private *i915;
struct kref kref;
};
#endif /* __I915_TIMELINE_TYPES_H__ */

Просмотреть файл

@ -18,6 +18,87 @@
/* watermark/fifo updates */
TRACE_EVENT(intel_pipe_enable,
TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pipe),
TP_ARGS(dev_priv, pipe),
TP_STRUCT__entry(
__array(u32, frame, 3)
__array(u32, scanline, 3)
__field(enum pipe, pipe)
),
TP_fast_assign(
enum pipe _pipe;
for_each_pipe(dev_priv, _pipe) {
__entry->frame[_pipe] =
dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, _pipe);
__entry->scanline[_pipe] =
intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, _pipe));
}
__entry->pipe = pipe;
),
TP_printk("pipe %c enable, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u",
pipe_name(__entry->pipe),
__entry->frame[PIPE_A], __entry->scanline[PIPE_A],
__entry->frame[PIPE_B], __entry->scanline[PIPE_B],
__entry->frame[PIPE_C], __entry->scanline[PIPE_C])
);
TRACE_EVENT(intel_pipe_disable,
TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pipe),
TP_ARGS(dev_priv, pipe),
TP_STRUCT__entry(
__array(u32, frame, 3)
__array(u32, scanline, 3)
__field(enum pipe, pipe)
),
TP_fast_assign(
enum pipe _pipe;
for_each_pipe(dev_priv, _pipe) {
__entry->frame[_pipe] =
dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, _pipe);
__entry->scanline[_pipe] =
intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, _pipe));
}
__entry->pipe = pipe;
),
TP_printk("pipe %c disable, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u",
pipe_name(__entry->pipe),
__entry->frame[PIPE_A], __entry->scanline[PIPE_A],
__entry->frame[PIPE_B], __entry->scanline[PIPE_B],
__entry->frame[PIPE_C], __entry->scanline[PIPE_C])
);
TRACE_EVENT(intel_pipe_crc,
TP_PROTO(struct intel_crtc *crtc, const u32 *crcs),
TP_ARGS(crtc, crcs),
TP_STRUCT__entry(
__field(enum pipe, pipe)
__field(u32, frame)
__field(u32, scanline)
__array(u32, crcs, 5)
),
TP_fast_assign(
__entry->pipe = crtc->pipe;
__entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
crtc->pipe);
__entry->scanline = intel_get_crtc_scanline(crtc);
memcpy(__entry->crcs, crcs, sizeof(__entry->crcs));
),
TP_printk("pipe %c, frame=%u, scanline=%u crc=%08x %08x %08x %08x %08x",
pipe_name(__entry->pipe), __entry->frame, __entry->scanline,
__entry->crcs[0], __entry->crcs[1], __entry->crcs[2],
__entry->crcs[3], __entry->crcs[4])
);
TRACE_EVENT(intel_cpu_fifo_underrun,
TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pipe),
TP_ARGS(dev_priv, pipe),
@ -627,7 +708,6 @@ DECLARE_EVENT_CLASS(i915_request,
__field(u16, class)
__field(u16, instance)
__field(u32, seqno)
__field(u32, global)
),
TP_fast_assign(
@ -637,13 +717,11 @@ DECLARE_EVENT_CLASS(i915_request,
__entry->instance = rq->engine->instance;
__entry->ctx = rq->fence.context;
__entry->seqno = rq->fence.seqno;
__entry->global = rq->global_seqno;
),
TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, global=%u",
TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u",
__entry->dev, __entry->class, __entry->instance,
__entry->hw_id, __entry->ctx, __entry->seqno,
__entry->global)
__entry->hw_id, __entry->ctx, __entry->seqno)
);
DEFINE_EVENT(i915_request, i915_request_add,
@ -673,7 +751,6 @@ TRACE_EVENT(i915_request_in,
__field(u16, class)
__field(u16, instance)
__field(u32, seqno)
__field(u32, global_seqno)
__field(u32, port)
__field(u32, prio)
),
@ -685,15 +762,14 @@ TRACE_EVENT(i915_request_in,
__entry->instance = rq->engine->instance;
__entry->ctx = rq->fence.context;
__entry->seqno = rq->fence.seqno;
__entry->global_seqno = rq->global_seqno;
__entry->prio = rq->sched.attr.priority;
__entry->port = port;
),
TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, prio=%u, global=%u, port=%u",
TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, prio=%u, port=%u",
__entry->dev, __entry->class, __entry->instance,
__entry->hw_id, __entry->ctx, __entry->seqno,
__entry->prio, __entry->global_seqno, __entry->port)
__entry->prio, __entry->port)
);
TRACE_EVENT(i915_request_out,
@ -707,7 +783,6 @@ TRACE_EVENT(i915_request_out,
__field(u16, class)
__field(u16, instance)
__field(u32, seqno)
__field(u32, global_seqno)
__field(u32, completed)
),
@ -718,14 +793,13 @@ TRACE_EVENT(i915_request_out,
__entry->instance = rq->engine->instance;
__entry->ctx = rq->fence.context;
__entry->seqno = rq->fence.seqno;
__entry->global_seqno = rq->global_seqno;
__entry->completed = i915_request_completed(rq);
),
TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, global=%u, completed?=%u",
TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, completed?=%u",
__entry->dev, __entry->class, __entry->instance,
__entry->hw_id, __entry->ctx, __entry->seqno,
__entry->global_seqno, __entry->completed)
__entry->completed)
);
#else
@ -768,7 +842,6 @@ TRACE_EVENT(i915_request_wait_begin,
__field(u16, class)
__field(u16, instance)
__field(u32, seqno)
__field(u32, global)
__field(unsigned int, flags)
),
@ -785,14 +858,13 @@ TRACE_EVENT(i915_request_wait_begin,
__entry->instance = rq->engine->instance;
__entry->ctx = rq->fence.context;
__entry->seqno = rq->fence.seqno;
__entry->global = rq->global_seqno;
__entry->flags = flags;
),
TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, global=%u, blocking=%u, flags=0x%x",
TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, blocking=%u, flags=0x%x",
__entry->dev, __entry->class, __entry->instance,
__entry->hw_id, __entry->ctx, __entry->seqno,
__entry->global, !!(__entry->flags & I915_WAIT_LOCKED),
!!(__entry->flags & I915_WAIT_LOCKED),
__entry->flags)
);

Просмотреть файл

@ -81,9 +81,9 @@ void i915_check_vgpu(struct drm_i915_private *dev_priv)
DRM_INFO("Virtual GPU for Intel GVT-g detected.\n");
}
bool intel_vgpu_has_full_48bit_ppgtt(struct drm_i915_private *dev_priv)
bool intel_vgpu_has_full_ppgtt(struct drm_i915_private *dev_priv)
{
return dev_priv->vgpu.caps & VGT_CAPS_FULL_48BIT_PPGTT;
return dev_priv->vgpu.caps & VGT_CAPS_FULL_PPGTT;
}
struct _balloon_info_ {

Просмотреть файл

@ -28,7 +28,7 @@
void i915_check_vgpu(struct drm_i915_private *dev_priv);
bool intel_vgpu_has_full_48bit_ppgtt(struct drm_i915_private *dev_priv);
bool intel_vgpu_has_full_ppgtt(struct drm_i915_private *dev_priv);
static inline bool
intel_vgpu_has_hwsp_emulation(struct drm_i915_private *dev_priv)

Просмотреть файл

@ -25,11 +25,27 @@
#include "i915_vma.h"
#include "i915_drv.h"
#include "i915_globals.h"
#include "intel_ringbuffer.h"
#include "intel_frontbuffer.h"
#include <drm/drm_gem.h>
static struct i915_global_vma {
struct i915_global base;
struct kmem_cache *slab_vmas;
} global;
struct i915_vma *i915_vma_alloc(void)
{
return kmem_cache_zalloc(global.slab_vmas, GFP_KERNEL);
}
void i915_vma_free(struct i915_vma *vma)
{
return kmem_cache_free(global.slab_vmas, vma);
}
#if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM)
#include <linux/stackdepot.h>
@ -115,7 +131,7 @@ vma_create(struct drm_i915_gem_object *obj,
/* The aliasing_ppgtt should never be used directly! */
GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm);
vma = kmem_cache_zalloc(vm->i915->vmas, GFP_KERNEL);
vma = i915_vma_alloc();
if (vma == NULL)
return ERR_PTR(-ENOMEM);
@ -190,7 +206,7 @@ vma_create(struct drm_i915_gem_object *obj,
cmp = i915_vma_compare(pos, vm, view);
if (cmp == 0) {
spin_unlock(&obj->vma.lock);
kmem_cache_free(vm->i915->vmas, vma);
i915_vma_free(vma);
return pos;
}
@ -222,7 +238,7 @@ vma_create(struct drm_i915_gem_object *obj,
return vma;
err_vma:
kmem_cache_free(vm->i915->vmas, vma);
i915_vma_free(vma);
return ERR_PTR(-E2BIG);
}
@ -803,8 +819,6 @@ void i915_vma_reopen(struct i915_vma *vma)
static void __i915_vma_destroy(struct i915_vma *vma)
{
struct drm_i915_private *i915 = vma->vm->i915;
GEM_BUG_ON(vma->node.allocated);
GEM_BUG_ON(vma->fence);
@ -825,7 +839,7 @@ static void __i915_vma_destroy(struct i915_vma *vma)
i915_active_fini(&vma->active);
kmem_cache_free(i915->vmas, vma);
i915_vma_free(vma);
}
void i915_vma_destroy(struct i915_vma *vma)
@ -1041,3 +1055,28 @@ unpin:
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/i915_vma.c"
#endif
static void i915_global_vma_shrink(void)
{
kmem_cache_shrink(global.slab_vmas);
}
static void i915_global_vma_exit(void)
{
kmem_cache_destroy(global.slab_vmas);
}
static struct i915_global_vma global = { {
.shrink = i915_global_vma_shrink,
.exit = i915_global_vma_exit,
} };
int __init i915_global_vma_init(void)
{
global.slab_vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
if (!global.slab_vmas)
return -ENOMEM;
i915_global_register(&global.base);
return 0;
}

Просмотреть файл

@ -440,4 +440,7 @@ void i915_vma_parked(struct drm_i915_private *i915);
list_for_each_entry(V, &(OBJ)->vma.list, obj_link) \
for_each_until(!i915_vma_is_ggtt(V))
struct i915_vma *i915_vma_alloc(void);
void i915_vma_free(struct i915_vma *vma);
#endif

Просмотреть файл

@ -246,13 +246,13 @@ static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder)
for (lane = 0; lane <= 3; lane++) {
/* Bspec: must not use GRP register for write */
tmp = I915_READ(ICL_PORT_TX_DW4_LN(port, lane));
tmp = I915_READ(ICL_PORT_TX_DW4_LN(lane, port));
tmp &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
CURSOR_COEFF_MASK);
tmp |= POST_CURSOR_1(0x0);
tmp |= POST_CURSOR_2(0x0);
tmp |= CURSOR_COEFF(0x3f);
I915_WRITE(ICL_PORT_TX_DW4_LN(port, lane), tmp);
I915_WRITE(ICL_PORT_TX_DW4_LN(lane, port), tmp);
}
}
}
@ -390,11 +390,11 @@ static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder)
tmp &= ~LOADGEN_SELECT;
I915_WRITE(ICL_PORT_TX_DW4_AUX(port), tmp);
for (lane = 0; lane <= 3; lane++) {
tmp = I915_READ(ICL_PORT_TX_DW4_LN(port, lane));
tmp = I915_READ(ICL_PORT_TX_DW4_LN(lane, port));
tmp &= ~LOADGEN_SELECT;
if (lane != 2)
tmp |= LOADGEN_SELECT;
I915_WRITE(ICL_PORT_TX_DW4_LN(port, lane), tmp);
I915_WRITE(ICL_PORT_TX_DW4_LN(lane, port), tmp);
}
}

Просмотреть файл

@ -121,6 +121,7 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
new_crtc_state->active_planes &= ~BIT(plane->id);
new_crtc_state->nv12_planes &= ~BIT(plane->id);
new_crtc_state->c8_planes &= ~BIT(plane->id);
new_plane_state->base.visible = false;
if (!new_plane_state->base.crtc && !old_plane_state->base.crtc)
@ -138,6 +139,10 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
is_planar_yuv_format(new_plane_state->base.fb->format->format))
new_crtc_state->nv12_planes |= BIT(plane->id);
if (new_plane_state->base.visible &&
new_plane_state->base.fb->format->format == DRM_FORMAT_C8)
new_crtc_state->c8_planes |= BIT(plane->id);
if (new_plane_state->base.visible || old_plane_state->base.visible)
new_crtc_state->update_planes |= BIT(plane->id);
@ -214,6 +219,35 @@ skl_next_plane_to_commit(struct intel_atomic_state *state,
return NULL;
}
void intel_update_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
trace_intel_update_plane(&plane->base, crtc);
plane->update_plane(plane, crtc_state, plane_state);
}
void intel_update_slave(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
trace_intel_update_plane(&plane->base, crtc);
plane->update_slave(plane, crtc_state, plane_state);
}
void intel_disable_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
trace_intel_disable_plane(&plane->base, crtc);
plane->disable_plane(plane, crtc_state);
}
void skl_update_planes_on_crtc(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
@ -238,8 +272,7 @@ void skl_update_planes_on_crtc(struct intel_atomic_state *state,
intel_atomic_get_new_plane_state(state, plane);
if (new_plane_state->base.visible) {
trace_intel_update_plane(&plane->base, crtc);
plane->update_plane(plane, new_crtc_state, new_plane_state);
intel_update_plane(plane, new_crtc_state, new_plane_state);
} else if (new_plane_state->slave) {
struct intel_plane *master =
new_plane_state->linked_plane;
@ -256,11 +289,9 @@ void skl_update_planes_on_crtc(struct intel_atomic_state *state,
new_plane_state =
intel_atomic_get_new_plane_state(state, master);
trace_intel_update_plane(&plane->base, crtc);
plane->update_slave(plane, new_crtc_state, new_plane_state);
intel_update_slave(plane, new_crtc_state, new_plane_state);
} else {
trace_intel_disable_plane(&plane->base, crtc);
plane->disable_plane(plane, new_crtc_state);
intel_disable_plane(plane, new_crtc_state);
}
}
}
@ -280,13 +311,10 @@ void i9xx_update_planes_on_crtc(struct intel_atomic_state *state,
!(update_mask & BIT(plane->id)))
continue;
if (new_plane_state->base.visible) {
trace_intel_update_plane(&plane->base, crtc);
plane->update_plane(plane, new_crtc_state, new_plane_state);
} else {
trace_intel_disable_plane(&plane->base, crtc);
plane->disable_plane(plane, new_crtc_state);
}
if (new_plane_state->base.visible)
intel_update_plane(plane, new_crtc_state, new_plane_state);
else
intel_disable_plane(plane, new_crtc_state);
}
}

Просмотреть файл

@ -741,27 +741,31 @@ void intel_init_audio_hooks(struct drm_i915_private *dev_priv)
}
}
static void i915_audio_component_get_power(struct device *kdev)
static unsigned long i915_audio_component_get_power(struct device *kdev)
{
intel_display_power_get(kdev_to_i915(kdev), POWER_DOMAIN_AUDIO);
/* Catch potential impedance mismatches before they occur! */
BUILD_BUG_ON(sizeof(intel_wakeref_t) > sizeof(unsigned long));
return intel_display_power_get(kdev_to_i915(kdev), POWER_DOMAIN_AUDIO);
}
static void i915_audio_component_put_power(struct device *kdev)
static void i915_audio_component_put_power(struct device *kdev,
unsigned long cookie)
{
intel_display_power_put_unchecked(kdev_to_i915(kdev),
POWER_DOMAIN_AUDIO);
intel_display_power_put(kdev_to_i915(kdev), POWER_DOMAIN_AUDIO, cookie);
}
static void i915_audio_component_codec_wake_override(struct device *kdev,
bool enable)
{
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
unsigned long cookie;
u32 tmp;
if (!IS_GEN(dev_priv, 9))
return;
i915_audio_component_get_power(kdev);
cookie = i915_audio_component_get_power(kdev);
/*
* Enable/disable generating the codec wake signal, overriding the
@ -779,7 +783,7 @@ static void i915_audio_component_codec_wake_override(struct device *kdev,
usleep_range(1000, 1500);
}
i915_audio_component_put_power(kdev);
i915_audio_component_put_power(kdev, cookie);
}
/* Get CDCLK in kHz */
@ -850,12 +854,13 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
struct i915_audio_component *acomp = dev_priv->audio_component;
struct intel_encoder *encoder;
struct intel_crtc *crtc;
unsigned long cookie;
int err = 0;
if (!HAS_DDI(dev_priv))
return 0;
i915_audio_component_get_power(kdev);
cookie = i915_audio_component_get_power(kdev);
mutex_lock(&dev_priv->av_mutex);
/* 1. get the pipe */
@ -875,7 +880,7 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
unlock:
mutex_unlock(&dev_priv->av_mutex);
i915_audio_component_put_power(kdev);
i915_audio_component_put_power(kdev, cookie);
return err;
}

Просмотреть файл

@ -760,6 +760,31 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
dev_priv->vbt.psr.tp1_wakeup_time_us = psr_table->tp1_wakeup_time * 100;
dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = psr_table->tp2_tp3_wakeup_time * 100;
}
if (bdb->version >= 226) {
u32 wakeup_time = psr_table->psr2_tp2_tp3_wakeup_time;
wakeup_time = (wakeup_time >> (2 * panel_type)) & 0x3;
switch (wakeup_time) {
case 0:
wakeup_time = 500;
break;
case 1:
wakeup_time = 100;
break;
case 3:
wakeup_time = 50;
break;
default:
case 2:
wakeup_time = 2500;
break;
}
dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us = wakeup_time;
} else {
/* Reusing PSR1 wakeup time for PSR2 in older VBTs */
dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us = dev_priv->vbt.psr.tp2_tp3_wakeup_time_us;
}
}
static void parse_dsi_backlight_ports(struct drm_i915_private *dev_priv,
@ -2094,8 +2119,8 @@ bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv,
dvo_port = child->dvo_port;
if (dvo_port == DVO_PORT_MIPIA ||
(dvo_port == DVO_PORT_MIPIB && IS_ICELAKE(dev_priv)) ||
(dvo_port == DVO_PORT_MIPIC && !IS_ICELAKE(dev_priv))) {
(dvo_port == DVO_PORT_MIPIB && INTEL_GEN(dev_priv) >= 11) ||
(dvo_port == DVO_PORT_MIPIC && INTEL_GEN(dev_priv) < 11)) {
if (port)
*port = dvo_port - DVO_PORT_MIPIA;
return true;

Просмотреть файл

@ -468,7 +468,7 @@ static void vlv_get_cdclk(struct drm_i915_private *dev_priv,
cdclk_state->vco);
mutex_lock(&dev_priv->pcu_lock);
val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
mutex_unlock(&dev_priv->pcu_lock);
if (IS_VALLEYVIEW(dev_priv))
@ -543,11 +543,11 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
mutex_lock(&dev_priv->pcu_lock);
val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
val &= ~DSPFREQGUAR_MASK;
val |= (cmd << DSPFREQGUAR_SHIFT);
vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val);
if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) &
DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
50)) {
DRM_ERROR("timed out waiting for CDclk change\n");
@ -624,11 +624,11 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv,
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
mutex_lock(&dev_priv->pcu_lock);
val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
val &= ~DSPFREQGUAR_MASK_CHV;
val |= (cmd << DSPFREQGUAR_SHIFT_CHV);
vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val);
if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) &
DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV),
50)) {
DRM_ERROR("timed out waiting for CDclk change\n");
@ -2560,7 +2560,7 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
*/
void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
{
if (IS_ICELAKE(dev_priv)) {
if (INTEL_GEN(dev_priv) >= 11) {
if (dev_priv->cdclk.hw.ref == 24000)
dev_priv->max_cdclk_freq = 648000;
else
@ -2668,7 +2668,7 @@ static int cnp_rawclk(struct drm_i915_private *dev_priv)
rawclk |= CNP_RAWCLK_DEN(DIV_ROUND_CLOSEST(numerator * 1000,
fraction) - 1);
if (HAS_PCH_ICP(dev_priv))
if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
rawclk |= ICP_RAWCLK_NUM(numerator);
}
@ -2723,7 +2723,7 @@ static int g4x_hrawclk(struct drm_i915_private *dev_priv)
*/
void intel_update_rawclk(struct drm_i915_private *dev_priv)
{
if (HAS_PCH_CNP(dev_priv) || HAS_PCH_ICP(dev_priv))
if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
dev_priv->rawclk_freq = cnp_rawclk(dev_priv);
else if (HAS_PCH_SPLIT(dev_priv))
dev_priv->rawclk_freq = pch_rawclk(dev_priv);
@ -2744,18 +2744,13 @@ void intel_update_rawclk(struct drm_i915_private *dev_priv)
*/
void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
{
if (IS_CHERRYVIEW(dev_priv)) {
dev_priv->display.set_cdclk = chv_set_cdclk;
if (INTEL_GEN(dev_priv) >= 11) {
dev_priv->display.set_cdclk = icl_set_cdclk;
dev_priv->display.modeset_calc_cdclk = icl_modeset_calc_cdclk;
} else if (IS_CANNONLAKE(dev_priv)) {
dev_priv->display.set_cdclk = cnl_set_cdclk;
dev_priv->display.modeset_calc_cdclk =
vlv_modeset_calc_cdclk;
} else if (IS_VALLEYVIEW(dev_priv)) {
dev_priv->display.set_cdclk = vlv_set_cdclk;
dev_priv->display.modeset_calc_cdclk =
vlv_modeset_calc_cdclk;
} else if (IS_BROADWELL(dev_priv)) {
dev_priv->display.set_cdclk = bdw_set_cdclk;
dev_priv->display.modeset_calc_cdclk =
bdw_modeset_calc_cdclk;
cnl_modeset_calc_cdclk;
} else if (IS_GEN9_LP(dev_priv)) {
dev_priv->display.set_cdclk = bxt_set_cdclk;
dev_priv->display.modeset_calc_cdclk =
@ -2764,23 +2759,28 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
dev_priv->display.set_cdclk = skl_set_cdclk;
dev_priv->display.modeset_calc_cdclk =
skl_modeset_calc_cdclk;
} else if (IS_CANNONLAKE(dev_priv)) {
dev_priv->display.set_cdclk = cnl_set_cdclk;
} else if (IS_BROADWELL(dev_priv)) {
dev_priv->display.set_cdclk = bdw_set_cdclk;
dev_priv->display.modeset_calc_cdclk =
cnl_modeset_calc_cdclk;
} else if (IS_ICELAKE(dev_priv)) {
dev_priv->display.set_cdclk = icl_set_cdclk;
dev_priv->display.modeset_calc_cdclk = icl_modeset_calc_cdclk;
bdw_modeset_calc_cdclk;
} else if (IS_CHERRYVIEW(dev_priv)) {
dev_priv->display.set_cdclk = chv_set_cdclk;
dev_priv->display.modeset_calc_cdclk =
vlv_modeset_calc_cdclk;
} else if (IS_VALLEYVIEW(dev_priv)) {
dev_priv->display.set_cdclk = vlv_set_cdclk;
dev_priv->display.modeset_calc_cdclk =
vlv_modeset_calc_cdclk;
}
if (IS_ICELAKE(dev_priv))
if (INTEL_GEN(dev_priv) >= 11)
dev_priv->display.get_cdclk = icl_get_cdclk;
else if (IS_CANNONLAKE(dev_priv))
dev_priv->display.get_cdclk = cnl_get_cdclk;
else if (IS_GEN9_BC(dev_priv))
dev_priv->display.get_cdclk = skl_get_cdclk;
else if (IS_GEN9_LP(dev_priv))
dev_priv->display.get_cdclk = bxt_get_cdclk;
else if (IS_GEN9_BC(dev_priv))
dev_priv->display.get_cdclk = skl_get_cdclk;
else if (IS_BROADWELL(dev_priv))
dev_priv->display.get_cdclk = bdw_get_cdclk;
else if (IS_HASWELL(dev_priv))

Просмотреть файл

@ -40,23 +40,6 @@
#define CTM_COEFF_ABS(coeff) ((coeff) & (CTM_COEFF_SIGN - 1))
#define LEGACY_LUT_LENGTH 256
/* Post offset values for RGB->YCBCR conversion */
#define POSTOFF_RGB_TO_YUV_HI 0x800
#define POSTOFF_RGB_TO_YUV_ME 0x100
#define POSTOFF_RGB_TO_YUV_LO 0x800
/*
* These values are direct register values specified in the Bspec,
* for RGB->YUV conversion matrix (colorspace BT709)
*/
#define CSC_RGB_TO_YUV_RU_GU 0x2ba809d8
#define CSC_RGB_TO_YUV_BU 0x37e80000
#define CSC_RGB_TO_YUV_RY_GY 0x1e089cc0
#define CSC_RGB_TO_YUV_BY 0xb5280000
#define CSC_RGB_TO_YUV_RV_GV 0xbce89ad8
#define CSC_RGB_TO_YUV_BV 0x1e080000
/*
* Extract the CSC coefficient from a CTM coefficient (in U32.32 fixed point
* format). This macro takes the coefficient we want transformed and the
@ -69,10 +52,45 @@
#define ILK_CSC_COEFF_FP(coeff, fbits) \
(clamp_val(((coeff) >> (32 - (fbits) - 3)) + 4, 0, 0xfff) & 0xff8)
#define ILK_CSC_COEFF_LIMITED_RANGE \
ILK_CSC_COEFF_FP(CTM_COEFF_LIMITED_RANGE, 9)
#define ILK_CSC_COEFF_1_0 \
((7 << 12) | ILK_CSC_COEFF_FP(CTM_COEFF_1_0, 8))
#define ILK_CSC_COEFF_LIMITED_RANGE 0x0dc0
#define ILK_CSC_COEFF_1_0 0x7800
#define ILK_CSC_POSTOFF_LIMITED_RANGE (16 * (1 << 12) / 255)
static const u16 ilk_csc_off_zero[3] = {};
static const u16 ilk_csc_coeff_identity[9] = {
ILK_CSC_COEFF_1_0, 0, 0,
0, ILK_CSC_COEFF_1_0, 0,
0, 0, ILK_CSC_COEFF_1_0,
};
static const u16 ilk_csc_postoff_limited_range[3] = {
ILK_CSC_POSTOFF_LIMITED_RANGE,
ILK_CSC_POSTOFF_LIMITED_RANGE,
ILK_CSC_POSTOFF_LIMITED_RANGE,
};
static const u16 ilk_csc_coeff_limited_range[9] = {
ILK_CSC_COEFF_LIMITED_RANGE, 0, 0,
0, ILK_CSC_COEFF_LIMITED_RANGE, 0,
0, 0, ILK_CSC_COEFF_LIMITED_RANGE,
};
/*
* These values are direct register values specified in the Bspec,
* for RGB->YUV conversion matrix (colorspace BT709)
*/
static const u16 ilk_csc_coeff_rgb_to_ycbcr[9] = {
0x1e08, 0x9cc0, 0xb528,
0x2ba8, 0x09d8, 0x37e8,
0xbce8, 0x9ad8, 0x1e08,
};
/* Post offset values for RGB->YCBCR conversion */
static const u16 ilk_csc_postoff_rgb_to_ycbcr[3] = {
0x0800, 0x0100, 0x0800,
};
static bool lut_is_legacy(const struct drm_property_blob *lut)
{
@ -113,145 +131,180 @@ static u64 *ctm_mult_by_limited(u64 *result, const u64 *input)
return result;
}
static void ilk_load_ycbcr_conversion_matrix(struct intel_crtc *crtc)
static void ilk_update_pipe_csc(struct intel_crtc *crtc,
const u16 preoff[3],
const u16 coeff[9],
const u16 postoff[3])
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0);
I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0);
I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0);
I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), preoff[0]);
I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), preoff[1]);
I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), preoff[2]);
I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), CSC_RGB_TO_YUV_RU_GU);
I915_WRITE(PIPE_CSC_COEFF_BU(pipe), CSC_RGB_TO_YUV_BU);
I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeff[0] << 16 | coeff[1]);
I915_WRITE(PIPE_CSC_COEFF_BY(pipe), coeff[2] << 16);
I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), CSC_RGB_TO_YUV_RY_GY);
I915_WRITE(PIPE_CSC_COEFF_BY(pipe), CSC_RGB_TO_YUV_BY);
I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeff[3] << 16 | coeff[4]);
I915_WRITE(PIPE_CSC_COEFF_BU(pipe), coeff[5] << 16);
I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), CSC_RGB_TO_YUV_RV_GV);
I915_WRITE(PIPE_CSC_COEFF_BV(pipe), CSC_RGB_TO_YUV_BV);
I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), coeff[6] << 16 | coeff[7]);
I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeff[8] << 16);
I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), POSTOFF_RGB_TO_YUV_HI);
I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), POSTOFF_RGB_TO_YUV_ME);
I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), POSTOFF_RGB_TO_YUV_LO);
I915_WRITE(PIPE_CSC_MODE(pipe), 0);
if (INTEL_GEN(dev_priv) >= 7) {
I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff[0]);
I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff[1]);
I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff[2]);
}
}
static void icl_update_output_csc(struct intel_crtc *crtc,
const u16 preoff[3],
const u16 coeff[9],
const u16 postoff[3])
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
I915_WRITE(PIPE_CSC_OUTPUT_PREOFF_HI(pipe), preoff[0]);
I915_WRITE(PIPE_CSC_OUTPUT_PREOFF_ME(pipe), preoff[1]);
I915_WRITE(PIPE_CSC_OUTPUT_PREOFF_LO(pipe), preoff[2]);
I915_WRITE(PIPE_CSC_OUTPUT_COEFF_RY_GY(pipe), coeff[0] << 16 | coeff[1]);
I915_WRITE(PIPE_CSC_OUTPUT_COEFF_BY(pipe), coeff[2]);
I915_WRITE(PIPE_CSC_OUTPUT_COEFF_RU_GU(pipe), coeff[3] << 16 | coeff[4]);
I915_WRITE(PIPE_CSC_OUTPUT_COEFF_BU(pipe), coeff[5]);
I915_WRITE(PIPE_CSC_OUTPUT_COEFF_RV_GV(pipe), coeff[6] << 16 | coeff[7]);
I915_WRITE(PIPE_CSC_OUTPUT_COEFF_BV(pipe), coeff[8]);
I915_WRITE(PIPE_CSC_OUTPUT_POSTOFF_HI(pipe), postoff[0]);
I915_WRITE(PIPE_CSC_OUTPUT_POSTOFF_ME(pipe), postoff[1]);
I915_WRITE(PIPE_CSC_OUTPUT_POSTOFF_LO(pipe), postoff[2]);
}
static bool ilk_csc_limited_range(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
/*
* FIXME if there's a gamma LUT after the CSC, we should
* do the range compression using the gamma LUT instead.
*/
return crtc_state->limited_color_range &&
(IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) ||
IS_GEN_RANGE(dev_priv, 9, 10));
}
static void ilk_csc_convert_ctm(const struct intel_crtc_state *crtc_state,
u16 coeffs[9])
{
const struct drm_color_ctm *ctm = crtc_state->base.ctm->data;
const u64 *input;
u64 temp[9];
int i;
if (ilk_csc_limited_range(crtc_state))
input = ctm_mult_by_limited(temp, ctm->matrix);
else
input = ctm->matrix;
/*
* Convert fixed point S31.32 input to format supported by the
* hardware.
*/
for (i = 0; i < 9; i++) {
u64 abs_coeff = ((1ULL << 63) - 1) & input[i];
/*
* Clamp input value to min/max supported by
* hardware.
*/
abs_coeff = clamp_val(abs_coeff, 0, CTM_COEFF_4_0 - 1);
coeffs[i] = 0;
/* sign bit */
if (CTM_COEFF_NEGATIVE(input[i]))
coeffs[i] |= 1 << 15;
if (abs_coeff < CTM_COEFF_0_125)
coeffs[i] |= (3 << 12) |
ILK_CSC_COEFF_FP(abs_coeff, 12);
else if (abs_coeff < CTM_COEFF_0_25)
coeffs[i] |= (2 << 12) |
ILK_CSC_COEFF_FP(abs_coeff, 11);
else if (abs_coeff < CTM_COEFF_0_5)
coeffs[i] |= (1 << 12) |
ILK_CSC_COEFF_FP(abs_coeff, 10);
else if (abs_coeff < CTM_COEFF_1_0)
coeffs[i] |= ILK_CSC_COEFF_FP(abs_coeff, 9);
else if (abs_coeff < CTM_COEFF_2_0)
coeffs[i] |= (7 << 12) |
ILK_CSC_COEFF_FP(abs_coeff, 8);
else
coeffs[i] |= (6 << 12) |
ILK_CSC_COEFF_FP(abs_coeff, 7);
}
}
static void ilk_load_csc_matrix(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
bool limited_color_range = false;
enum pipe pipe = crtc->pipe;
u16 coeffs[9] = {};
int i;
bool limited_color_range = ilk_csc_limited_range(crtc_state);
/*
* FIXME if there's a gamma LUT after the CSC, we should
* do the range compression using the gamma LUT instead.
*/
if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv))
limited_color_range = crtc_state->limited_color_range;
if (crtc_state->base.ctm) {
u16 coeff[9];
if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) {
ilk_load_ycbcr_conversion_matrix(crtc);
return;
} else if (crtc_state->base.ctm) {
struct drm_color_ctm *ctm = crtc_state->base.ctm->data;
const u64 *input;
u64 temp[9];
if (limited_color_range)
input = ctm_mult_by_limited(temp, ctm->matrix);
else
input = ctm->matrix;
/*
* Convert fixed point S31.32 input to format supported by the
* hardware.
*/
for (i = 0; i < ARRAY_SIZE(coeffs); i++) {
u64 abs_coeff = ((1ULL << 63) - 1) & input[i];
/*
* Clamp input value to min/max supported by
* hardware.
*/
abs_coeff = clamp_val(abs_coeff, 0, CTM_COEFF_4_0 - 1);
/* sign bit */
if (CTM_COEFF_NEGATIVE(input[i]))
coeffs[i] |= 1 << 15;
if (abs_coeff < CTM_COEFF_0_125)
coeffs[i] |= (3 << 12) |
ILK_CSC_COEFF_FP(abs_coeff, 12);
else if (abs_coeff < CTM_COEFF_0_25)
coeffs[i] |= (2 << 12) |
ILK_CSC_COEFF_FP(abs_coeff, 11);
else if (abs_coeff < CTM_COEFF_0_5)
coeffs[i] |= (1 << 12) |
ILK_CSC_COEFF_FP(abs_coeff, 10);
else if (abs_coeff < CTM_COEFF_1_0)
coeffs[i] |= ILK_CSC_COEFF_FP(abs_coeff, 9);
else if (abs_coeff < CTM_COEFF_2_0)
coeffs[i] |= (7 << 12) |
ILK_CSC_COEFF_FP(abs_coeff, 8);
else
coeffs[i] |= (6 << 12) |
ILK_CSC_COEFF_FP(abs_coeff, 7);
}
} else {
/*
* Load an identity matrix if no coefficients are provided.
*
* TODO: Check what kind of values actually come out of the
* pipe with these coeff/postoff values and adjust to get the
* best accuracy. Perhaps we even need to take the bpc value
* into consideration.
*/
for (i = 0; i < 3; i++) {
if (limited_color_range)
coeffs[i * 3 + i] =
ILK_CSC_COEFF_LIMITED_RANGE;
else
coeffs[i * 3 + i] = ILK_CSC_COEFF_1_0;
}
ilk_csc_convert_ctm(crtc_state, coeff);
ilk_update_pipe_csc(crtc, ilk_csc_off_zero, coeff,
limited_color_range ?
ilk_csc_postoff_limited_range :
ilk_csc_off_zero);
} else if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) {
ilk_update_pipe_csc(crtc, ilk_csc_off_zero,
ilk_csc_coeff_rgb_to_ycbcr,
ilk_csc_postoff_rgb_to_ycbcr);
} else if (limited_color_range) {
ilk_update_pipe_csc(crtc, ilk_csc_off_zero,
ilk_csc_coeff_limited_range,
ilk_csc_postoff_limited_range);
} else if (crtc_state->csc_enable) {
ilk_update_pipe_csc(crtc, ilk_csc_off_zero,
ilk_csc_coeff_identity,
ilk_csc_off_zero);
}
I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeffs[0] << 16 | coeffs[1]);
I915_WRITE(PIPE_CSC_COEFF_BY(pipe), coeffs[2] << 16);
I915_WRITE(PIPE_CSC_MODE(crtc->pipe), crtc_state->csc_mode);
}
I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeffs[3] << 16 | coeffs[4]);
I915_WRITE(PIPE_CSC_COEFF_BU(pipe), coeffs[5] << 16);
static void icl_load_csc_matrix(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), coeffs[6] << 16 | coeffs[7]);
I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeffs[8] << 16);
if (crtc_state->base.ctm) {
u16 coeff[9];
I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0);
I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0);
I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0);
if (INTEL_GEN(dev_priv) > 6) {
u16 postoff = 0;
if (limited_color_range)
postoff = (16 * (1 << 12) / 255) & 0x1fff;
I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff);
I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff);
I915_WRITE(PIPE_CSC_MODE(pipe), 0);
} else {
u32 mode = CSC_MODE_YUV_TO_RGB;
if (limited_color_range)
mode |= CSC_BLACK_SCREEN_OFFSET;
I915_WRITE(PIPE_CSC_MODE(pipe), mode);
ilk_csc_convert_ctm(crtc_state, coeff);
ilk_update_pipe_csc(crtc, ilk_csc_off_zero,
coeff, ilk_csc_off_zero);
}
if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) {
icl_update_output_csc(crtc, ilk_csc_off_zero,
ilk_csc_coeff_rgb_to_ycbcr,
ilk_csc_postoff_rgb_to_ycbcr);
} else if (crtc_state->limited_color_range) {
icl_update_output_csc(crtc, ilk_csc_off_zero,
ilk_csc_coeff_limited_range,
ilk_csc_postoff_limited_range);
}
I915_WRITE(PIPE_CSC_MODE(crtc->pipe), crtc_state->csc_mode);
}
/*
@ -262,7 +315,6 @@ static void cherryview_load_csc_matrix(const struct intel_crtc_state *crtc_state
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
u32 mode;
if (crtc_state->base.ctm) {
const struct drm_color_ctm *ctm = crtc_state->base.ctm->data;
@ -296,12 +348,7 @@ static void cherryview_load_csc_matrix(const struct intel_crtc_state *crtc_state
I915_WRITE(CGM_PIPE_CSC_COEFF8(pipe), coeffs[8]);
}
mode = (crtc_state->base.ctm ? CGM_PIPE_MODE_CSC : 0);
if (!crtc_state_is_legacy_gamma(crtc_state)) {
mode |= (crtc_state->base.degamma_lut ? CGM_PIPE_MODE_DEGAMMA : 0) |
(crtc_state->base.gamma_lut ? CGM_PIPE_MODE_GAMMA : 0);
}
I915_WRITE(CGM_PIPE_MODE(pipe), mode);
I915_WRITE(CGM_PIPE_MODE(pipe), crtc_state->cgm_mode);
}
/* Loads the legacy palette/gamma unit for the CRTC. */
@ -351,6 +398,32 @@ static void i9xx_load_luts(const struct intel_crtc_state *crtc_state)
i9xx_load_luts_internal(crtc_state, crtc_state->base.gamma_lut);
}
static void i9xx_color_commit(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
u32 val;
val = I915_READ(PIPECONF(pipe));
val &= ~PIPECONF_GAMMA_MODE_MASK_I9XX;
val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
I915_WRITE(PIPECONF(pipe), val);
}
static void ilk_color_commit(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
u32 val;
val = I915_READ(PIPECONF(pipe));
val &= ~PIPECONF_GAMMA_MODE_MASK_ILK;
val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
I915_WRITE(PIPECONF(pipe), val);
}
static void hsw_color_commit(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
@ -361,6 +434,32 @@ static void hsw_color_commit(const struct intel_crtc_state *crtc_state)
ilk_load_csc_matrix(crtc_state);
}
static void skl_color_commit(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
u32 val = 0;
/*
* We don't (yet) allow userspace to control the pipe background color,
* so force it to black, but apply pipe gamma and CSC appropriately
* so that its handling will match how we program our planes.
*/
if (crtc_state->gamma_enable)
val |= SKL_BOTTOM_COLOR_GAMMA_ENABLE;
if (crtc_state->csc_enable)
val |= SKL_BOTTOM_COLOR_CSC_ENABLE;
I915_WRITE(SKL_BOTTOM_COLOR(pipe), val);
I915_WRITE(GAMMA_MODE(crtc->pipe), crtc_state->gamma_mode);
if (INTEL_GEN(dev_priv) >= 11)
icl_load_csc_matrix(crtc_state);
else
ilk_load_csc_matrix(crtc_state);
}
static void bdw_load_degamma_lut(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
@ -440,6 +539,12 @@ static void bdw_load_gamma_lut(const struct intel_crtc_state *crtc_state, u32 of
I915_WRITE(PREC_PAL_GC_MAX(pipe, 1), (1 << 16) - 1);
I915_WRITE(PREC_PAL_GC_MAX(pipe, 2), (1 << 16) - 1);
}
/*
* Reset the index, otherwise it prevents the legacy palette to be
* written properly.
*/
I915_WRITE(PREC_PAL_INDEX(pipe), 0);
}
/* Loads the palette/gamma unit for the CRTC on Broadwell+. */
@ -447,7 +552,6 @@ static void broadwell_load_luts(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
if (crtc_state_is_legacy_gamma(crtc_state)) {
i9xx_load_luts(crtc_state);
@ -455,12 +559,6 @@ static void broadwell_load_luts(const struct intel_crtc_state *crtc_state)
bdw_load_degamma_lut(crtc_state);
bdw_load_gamma_lut(crtc_state,
INTEL_INFO(dev_priv)->color.degamma_lut_size);
/*
* Reset the index, otherwise it prevents the legacy palette to be
* written properly.
*/
I915_WRITE(PREC_PAL_INDEX(pipe), 0);
}
}
@ -469,7 +567,7 @@ static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state)
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
const u32 lut_size = 33;
const u32 lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
u32 i;
/*
@ -480,14 +578,32 @@ static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state)
I915_WRITE(PRE_CSC_GAMC_INDEX(pipe), 0);
I915_WRITE(PRE_CSC_GAMC_INDEX(pipe), PRE_CSC_GAMC_AUTO_INCREMENT);
/*
* FIXME: The pipe degamma table in geminilake doesn't support
* different values per channel, so this just loads a linear table.
*/
for (i = 0; i < lut_size; i++) {
u32 v = (i * (1 << 16)) / (lut_size - 1);
if (crtc_state->base.degamma_lut) {
struct drm_color_lut *lut = crtc_state->base.degamma_lut->data;
I915_WRITE(PRE_CSC_GAMC_DATA(pipe), v);
for (i = 0; i < lut_size; i++) {
/*
* First 33 entries represent range from 0 to 1.0
* 34th and 35th entry will represent extended range
* inputs 3.0 and 7.0 respectively, currently clamped
* at 1.0. Since the precision is 16bit, the user
* value can be directly filled to register.
* The pipe degamma table in GLK+ onwards doesn't
* support different values per channel, so this just
* programs green value which will be equal to Red and
* Blue into the lut registers.
* ToDo: Extend to max 7.0. Enable 32 bit input value
* as compared to just 16 to achieve this.
*/
I915_WRITE(PRE_CSC_GAMC_DATA(pipe), lut[i].green);
}
} else {
/* load a linear table. */
for (i = 0; i < lut_size; i++) {
u32 v = (i * (1 << 16)) / (lut_size - 1);
I915_WRITE(PRE_CSC_GAMC_DATA(pipe), v);
}
}
/* Clamp values > 1.0. */
@ -497,23 +613,23 @@ static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state)
static void glk_load_luts(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
glk_load_degamma_lut(crtc_state);
if (crtc_state_is_legacy_gamma(crtc_state)) {
if (crtc_state_is_legacy_gamma(crtc_state))
i9xx_load_luts(crtc_state);
} else {
else
bdw_load_gamma_lut(crtc_state, 0);
}
/*
* Reset the index, otherwise it prevents the legacy palette to be
* written properly.
*/
I915_WRITE(PREC_PAL_INDEX(pipe), 0);
}
static void icl_load_luts(const struct intel_crtc_state *crtc_state)
{
glk_load_degamma_lut(crtc_state);
if (crtc_state_is_legacy_gamma(crtc_state))
i9xx_load_luts(crtc_state);
else
/* ToDo: Add support for multi segment gamma LUT */
bdw_load_gamma_lut(crtc_state, 0);
}
static void cherryview_load_luts(const struct intel_crtc_state *crtc_state)
@ -585,8 +701,57 @@ void intel_color_commit(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
if (dev_priv->display.color_commit)
dev_priv->display.color_commit(crtc_state);
dev_priv->display.color_commit(crtc_state);
}
static bool need_plane_update(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
/*
* On pre-SKL the pipe gamma enable and pipe csc enable for
* the pipe bottom color are configured via the primary plane.
* We have to reconfigure that even if the plane is inactive.
*/
return crtc_state->active_planes & BIT(plane->id) ||
(INTEL_GEN(dev_priv) < 9 &&
plane->id == PLANE_PRIMARY);
}
static int
intel_color_add_affected_planes(struct intel_crtc_state *new_crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_atomic_state *state =
to_intel_atomic_state(new_crtc_state->base.state);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
struct intel_plane *plane;
if (!new_crtc_state->base.active ||
drm_atomic_crtc_needs_modeset(&new_crtc_state->base))
return 0;
if (new_crtc_state->gamma_enable == old_crtc_state->gamma_enable &&
new_crtc_state->csc_enable == old_crtc_state->csc_enable)
return 0;
for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
struct intel_plane_state *plane_state;
if (!need_plane_update(plane, new_crtc_state))
continue;
plane_state = intel_atomic_get_plane_state(state, plane);
if (IS_ERR(plane_state))
return PTR_ERR(plane_state);
new_crtc_state->update_planes |= BIT(plane->id);
}
return 0;
}
static int check_lut_size(const struct drm_property_blob *lut, int expected)
@ -606,22 +771,70 @@ static int check_lut_size(const struct drm_property_blob *lut, int expected)
return 0;
}
static u32 chv_cgm_mode(const struct intel_crtc_state *crtc_state)
{
u32 cgm_mode = 0;
if (crtc_state_is_legacy_gamma(crtc_state))
return 0;
if (crtc_state->base.degamma_lut)
cgm_mode |= CGM_PIPE_MODE_DEGAMMA;
if (crtc_state->base.ctm)
cgm_mode |= CGM_PIPE_MODE_CSC;
if (crtc_state->base.gamma_lut)
cgm_mode |= CGM_PIPE_MODE_GAMMA;
return cgm_mode;
}
int intel_color_check(struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
const struct drm_property_blob *degamma_lut = crtc_state->base.degamma_lut;
bool limited_color_range = false;
int gamma_length, degamma_length;
u32 gamma_tests, degamma_tests;
int ret;
degamma_length = INTEL_INFO(dev_priv)->color.degamma_lut_size;
gamma_length = INTEL_INFO(dev_priv)->color.gamma_lut_size;
degamma_tests = INTEL_INFO(dev_priv)->color.degamma_lut_tests;
gamma_tests = INTEL_INFO(dev_priv)->color.gamma_lut_tests;
/* C8 needs the legacy LUT all to itself */
if (crtc_state->c8_planes &&
!crtc_state_is_legacy_gamma(crtc_state))
return -EINVAL;
crtc_state->gamma_enable = (gamma_lut || degamma_lut) &&
!crtc_state->c8_planes;
if (INTEL_GEN(dev_priv) >= 9 ||
IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
limited_color_range = crtc_state->limited_color_range;
crtc_state->csc_enable =
crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB ||
crtc_state->base.ctm || limited_color_range;
ret = intel_color_add_affected_planes(crtc_state);
if (ret)
return ret;
crtc_state->csc_mode = 0;
if (IS_CHERRYVIEW(dev_priv))
crtc_state->cgm_mode = chv_cgm_mode(crtc_state);
/* Always allow legacy gamma LUT with no further checking. */
if (crtc_state_is_legacy_gamma(crtc_state)) {
if (!crtc_state->gamma_enable ||
crtc_state_is_legacy_gamma(crtc_state)) {
crtc_state->gamma_mode = GAMMA_MODE_MODE_8BIT;
if (INTEL_GEN(dev_priv) >= 11 &&
crtc_state->gamma_enable)
crtc_state->gamma_mode |= POST_CSC_GAMMA_ENABLE;
return 0;
}
@ -633,13 +846,26 @@ int intel_color_check(struct intel_crtc_state *crtc_state)
drm_color_lut_check(gamma_lut, gamma_tests))
return -EINVAL;
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
if (INTEL_GEN(dev_priv) >= 11)
crtc_state->gamma_mode = GAMMA_MODE_MODE_10BIT |
PRE_CSC_GAMMA_ENABLE |
POST_CSC_GAMMA_ENABLE;
else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
crtc_state->gamma_mode = GAMMA_MODE_MODE_10BIT;
else if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
crtc_state->gamma_mode = GAMMA_MODE_MODE_SPLIT;
else
crtc_state->gamma_mode = GAMMA_MODE_MODE_8BIT;
if (INTEL_GEN(dev_priv) >= 11) {
if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB ||
crtc_state->limited_color_range)
crtc_state->csc_mode |= ICL_OUTPUT_CSC_ENABLE;
if (crtc_state->base.ctm)
crtc_state->csc_mode |= ICL_CSC_ENABLE;
}
return 0;
}
@ -649,20 +875,29 @@ void intel_color_init(struct intel_crtc *crtc)
drm_mode_crtc_set_gamma_size(&crtc->base, 256);
if (IS_CHERRYVIEW(dev_priv)) {
dev_priv->display.load_luts = cherryview_load_luts;
} else if (IS_HASWELL(dev_priv)) {
dev_priv->display.load_luts = i9xx_load_luts;
dev_priv->display.color_commit = hsw_color_commit;
} else if (IS_BROADWELL(dev_priv) || IS_GEN9_BC(dev_priv) ||
IS_BROXTON(dev_priv)) {
dev_priv->display.load_luts = broadwell_load_luts;
dev_priv->display.color_commit = hsw_color_commit;
} else if (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) {
dev_priv->display.load_luts = glk_load_luts;
dev_priv->display.color_commit = hsw_color_commit;
if (HAS_GMCH(dev_priv)) {
if (IS_CHERRYVIEW(dev_priv))
dev_priv->display.load_luts = cherryview_load_luts;
else
dev_priv->display.load_luts = i9xx_load_luts;
dev_priv->display.color_commit = i9xx_color_commit;
} else {
dev_priv->display.load_luts = i9xx_load_luts;
if (INTEL_GEN(dev_priv) >= 11)
dev_priv->display.load_luts = icl_load_luts;
else if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
dev_priv->display.load_luts = glk_load_luts;
else if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
dev_priv->display.load_luts = broadwell_load_luts;
else
dev_priv->display.load_luts = i9xx_load_luts;
if (INTEL_GEN(dev_priv) >= 9)
dev_priv->display.color_commit = skl_color_commit;
else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
dev_priv->display.color_commit = hsw_color_commit;
else
dev_priv->display.color_commit = ilk_color_commit;
}
/* Enable color management support when we have degamma & gamma LUTs. */

Просмотреть файл

@ -88,6 +88,8 @@ void intel_connector_destroy(struct drm_connector *connector)
kfree(intel_connector->detect_edid);
intel_hdcp_cleanup(intel_connector);
if (!IS_ERR_OR_NULL(intel_connector->edid))
kfree(intel_connector->edid);

Просмотреть файл

@ -0,0 +1,269 @@
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2019 Intel Corporation
*/
#include "i915_drv.h"
#include "i915_gem_context.h"
#include "i915_globals.h"
#include "intel_context.h"
#include "intel_ringbuffer.h"
static struct i915_global_context {
struct i915_global base;
struct kmem_cache *slab_ce;
} global;
struct intel_context *intel_context_alloc(void)
{
return kmem_cache_zalloc(global.slab_ce, GFP_KERNEL);
}
void intel_context_free(struct intel_context *ce)
{
kmem_cache_free(global.slab_ce, ce);
}
struct intel_context *
intel_context_lookup(struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
struct intel_context *ce = NULL;
struct rb_node *p;
spin_lock(&ctx->hw_contexts_lock);
p = ctx->hw_contexts.rb_node;
while (p) {
struct intel_context *this =
rb_entry(p, struct intel_context, node);
if (this->engine == engine) {
GEM_BUG_ON(this->gem_context != ctx);
ce = this;
break;
}
if (this->engine < engine)
p = p->rb_right;
else
p = p->rb_left;
}
spin_unlock(&ctx->hw_contexts_lock);
return ce;
}
struct intel_context *
__intel_context_insert(struct i915_gem_context *ctx,
struct intel_engine_cs *engine,
struct intel_context *ce)
{
struct rb_node **p, *parent;
int err = 0;
spin_lock(&ctx->hw_contexts_lock);
parent = NULL;
p = &ctx->hw_contexts.rb_node;
while (*p) {
struct intel_context *this;
parent = *p;
this = rb_entry(parent, struct intel_context, node);
if (this->engine == engine) {
err = -EEXIST;
ce = this;
break;
}
if (this->engine < engine)
p = &parent->rb_right;
else
p = &parent->rb_left;
}
if (!err) {
rb_link_node(&ce->node, parent, p);
rb_insert_color(&ce->node, &ctx->hw_contexts);
}
spin_unlock(&ctx->hw_contexts_lock);
return ce;
}
void __intel_context_remove(struct intel_context *ce)
{
struct i915_gem_context *ctx = ce->gem_context;
spin_lock(&ctx->hw_contexts_lock);
rb_erase(&ce->node, &ctx->hw_contexts);
spin_unlock(&ctx->hw_contexts_lock);
}
static struct intel_context *
intel_context_instance(struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
struct intel_context *ce, *pos;
ce = intel_context_lookup(ctx, engine);
if (likely(ce))
return ce;
ce = intel_context_alloc();
if (!ce)
return ERR_PTR(-ENOMEM);
intel_context_init(ce, ctx, engine);
pos = __intel_context_insert(ctx, engine, ce);
if (unlikely(pos != ce)) /* Beaten! Use their HW context instead */
intel_context_free(ce);
GEM_BUG_ON(intel_context_lookup(ctx, engine) != pos);
return pos;
}
struct intel_context *
intel_context_pin_lock(struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
__acquires(ce->pin_mutex)
{
struct intel_context *ce;
ce = intel_context_instance(ctx, engine);
if (IS_ERR(ce))
return ce;
if (mutex_lock_interruptible(&ce->pin_mutex))
return ERR_PTR(-EINTR);
return ce;
}
struct intel_context *
intel_context_pin(struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
struct intel_context *ce;
int err;
ce = intel_context_instance(ctx, engine);
if (IS_ERR(ce))
return ce;
if (likely(atomic_inc_not_zero(&ce->pin_count)))
return ce;
if (mutex_lock_interruptible(&ce->pin_mutex))
return ERR_PTR(-EINTR);
if (likely(!atomic_read(&ce->pin_count))) {
err = ce->ops->pin(ce);
if (err)
goto err;
i915_gem_context_get(ctx);
GEM_BUG_ON(ce->gem_context != ctx);
mutex_lock(&ctx->mutex);
list_add(&ce->active_link, &ctx->active_engines);
mutex_unlock(&ctx->mutex);
intel_context_get(ce);
smp_mb__before_atomic(); /* flush pin before it is visible */
}
atomic_inc(&ce->pin_count);
GEM_BUG_ON(!intel_context_is_pinned(ce)); /* no overflow! */
mutex_unlock(&ce->pin_mutex);
return ce;
err:
mutex_unlock(&ce->pin_mutex);
return ERR_PTR(err);
}
void intel_context_unpin(struct intel_context *ce)
{
if (likely(atomic_add_unless(&ce->pin_count, -1, 1)))
return;
/* We may be called from inside intel_context_pin() to evict another */
intel_context_get(ce);
mutex_lock_nested(&ce->pin_mutex, SINGLE_DEPTH_NESTING);
if (likely(atomic_dec_and_test(&ce->pin_count))) {
ce->ops->unpin(ce);
mutex_lock(&ce->gem_context->mutex);
list_del(&ce->active_link);
mutex_unlock(&ce->gem_context->mutex);
i915_gem_context_put(ce->gem_context);
intel_context_put(ce);
}
mutex_unlock(&ce->pin_mutex);
intel_context_put(ce);
}
static void intel_context_retire(struct i915_active_request *active,
struct i915_request *rq)
{
struct intel_context *ce =
container_of(active, typeof(*ce), active_tracker);
intel_context_unpin(ce);
}
void
intel_context_init(struct intel_context *ce,
struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
kref_init(&ce->ref);
ce->gem_context = ctx;
ce->engine = engine;
ce->ops = engine->cops;
INIT_LIST_HEAD(&ce->signal_link);
INIT_LIST_HEAD(&ce->signals);
mutex_init(&ce->pin_mutex);
/* Use the whole device by default */
ce->sseu = intel_device_default_sseu(ctx->i915);
i915_active_request_init(&ce->active_tracker,
NULL, intel_context_retire);
}
static void i915_global_context_shrink(void)
{
kmem_cache_shrink(global.slab_ce);
}
static void i915_global_context_exit(void)
{
kmem_cache_destroy(global.slab_ce);
}
static struct i915_global_context global = { {
.shrink = i915_global_context_shrink,
.exit = i915_global_context_exit,
} };
int __init i915_global_context_init(void)
{
global.slab_ce = KMEM_CACHE(intel_context, SLAB_HWCACHE_ALIGN);
if (!global.slab_ce)
return -ENOMEM;
i915_global_register(&global.base);
return 0;
}

Просмотреть файл

@ -0,0 +1,87 @@
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2019 Intel Corporation
*/
#ifndef __INTEL_CONTEXT_H__
#define __INTEL_CONTEXT_H__
#include <linux/lockdep.h>
#include "intel_context_types.h"
#include "intel_engine_types.h"
struct intel_context *intel_context_alloc(void);
void intel_context_free(struct intel_context *ce);
void intel_context_init(struct intel_context *ce,
struct i915_gem_context *ctx,
struct intel_engine_cs *engine);
/**
* intel_context_lookup - Find the matching HW context for this (ctx, engine)
* @ctx - the parent GEM context
* @engine - the target HW engine
*
* May return NULL if the HW context hasn't been instantiated (i.e. unused).
*/
struct intel_context *
intel_context_lookup(struct i915_gem_context *ctx,
struct intel_engine_cs *engine);
/**
* intel_context_pin_lock - Stablises the 'pinned' status of the HW context
* @ctx - the parent GEM context
* @engine - the target HW engine
*
* Acquire a lock on the pinned status of the HW context, such that the context
* can neither be bound to the GPU or unbound whilst the lock is held, i.e.
* intel_context_is_pinned() remains stable.
*/
struct intel_context *
intel_context_pin_lock(struct i915_gem_context *ctx,
struct intel_engine_cs *engine);
static inline bool
intel_context_is_pinned(struct intel_context *ce)
{
return atomic_read(&ce->pin_count);
}
static inline void intel_context_pin_unlock(struct intel_context *ce)
__releases(ce->pin_mutex)
{
mutex_unlock(&ce->pin_mutex);
}
struct intel_context *
__intel_context_insert(struct i915_gem_context *ctx,
struct intel_engine_cs *engine,
struct intel_context *ce);
void
__intel_context_remove(struct intel_context *ce);
struct intel_context *
intel_context_pin(struct i915_gem_context *ctx, struct intel_engine_cs *engine);
static inline void __intel_context_pin(struct intel_context *ce)
{
GEM_BUG_ON(!intel_context_is_pinned(ce));
atomic_inc(&ce->pin_count);
}
void intel_context_unpin(struct intel_context *ce);
static inline struct intel_context *intel_context_get(struct intel_context *ce)
{
kref_get(&ce->ref);
return ce;
}
static inline void intel_context_put(struct intel_context *ce)
{
kref_put(&ce->ref, ce->ops->destroy);
}
#endif /* __INTEL_CONTEXT_H__ */

Просмотреть файл

@ -0,0 +1,73 @@
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2019 Intel Corporation
*/
#ifndef __INTEL_CONTEXT_TYPES__
#define __INTEL_CONTEXT_TYPES__
#include <linux/kref.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/rbtree.h>
#include <linux/types.h>
#include "i915_active_types.h"
struct i915_gem_context;
struct i915_vma;
struct intel_context;
struct intel_ring;
struct intel_context_ops {
int (*pin)(struct intel_context *ce);
void (*unpin)(struct intel_context *ce);
void (*destroy)(struct kref *kref);
};
/*
* Powergating configuration for a particular (context,engine).
*/
struct intel_sseu {
u8 slice_mask;
u8 subslice_mask;
u8 min_eus_per_subslice;
u8 max_eus_per_subslice;
};
struct intel_context {
struct kref ref;
struct i915_gem_context *gem_context;
struct intel_engine_cs *engine;
struct intel_engine_cs *active;
struct list_head active_link;
struct list_head signal_link;
struct list_head signals;
struct i915_vma *state;
struct intel_ring *ring;
u32 *lrc_reg_state;
u64 lrc_desc;
atomic_t pin_count;
struct mutex pin_mutex; /* guards pinning and associated on-gpuing */
/**
* active_tracker: Active tracker for the external rq activity
* on this intel_context object.
*/
struct i915_active_request active_tracker;
const struct intel_context_ops *ops;
struct rb_node node;
/** sseu: Control eu/slice partitioning */
struct intel_sseu sseu;
};
#endif /* __INTEL_CONTEXT_TYPES__ */

Просмотреть файл

@ -851,7 +851,7 @@ static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port por
level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
if (IS_ICELAKE(dev_priv)) {
if (INTEL_GEN(dev_priv) >= 11) {
if (intel_port_is_combophy(dev_priv, port))
icl_get_combo_buf_trans(dev_priv, port, INTEL_OUTPUT_HDMI,
0, &n_entries);
@ -1349,8 +1349,8 @@ int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
case DPLL_CFGCR1_KDIV_2:
p2 = 2;
break;
case DPLL_CFGCR1_KDIV_4:
p2 = 4;
case DPLL_CFGCR1_KDIV_3:
p2 = 3;
break;
}
@ -1477,11 +1477,7 @@ static void icl_ddi_clock_get(struct intel_encoder *encoder,
pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll);
if (intel_port_is_combophy(dev_priv, port)) {
if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
link_clock = cnl_calc_wrpll_link(dev_priv, pll_id);
else
link_clock = icl_calc_dp_combo_pll_link(dev_priv,
pll_id);
link_clock = cnl_calc_wrpll_link(dev_priv, pll_id);
} else {
if (pll_id == DPLL_ID_ICL_TBTPLL)
link_clock = icl_calc_tbt_pll_link(dev_priv, port);
@ -1678,7 +1674,7 @@ static void intel_ddi_clock_get(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
if (IS_ICELAKE(dev_priv))
if (INTEL_GEN(dev_priv) >= 11)
icl_ddi_clock_get(encoder, pipe_config);
else if (IS_CANNONLAKE(dev_priv))
cnl_ddi_clock_get(encoder, pipe_config);
@ -1911,7 +1907,7 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
goto out;
}
if (port == PORT_A)
if (HAS_TRANSCODER_EDP(dev_priv) && port == PORT_A)
cpu_transcoder = TRANSCODER_EDP;
else
cpu_transcoder = (enum transcoder) pipe;
@ -1973,7 +1969,7 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
if (!(tmp & DDI_BUF_CTL_ENABLE))
goto out;
if (port == PORT_A) {
if (HAS_TRANSCODER_EDP(dev_priv) && port == PORT_A) {
tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
@ -2225,7 +2221,7 @@ u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
enum port port = encoder->port;
int n_entries;
if (IS_ICELAKE(dev_priv)) {
if (INTEL_GEN(dev_priv) >= 11) {
if (intel_port_is_combophy(dev_priv, port))
icl_get_combo_buf_trans(dev_priv, port, encoder->type,
intel_dp->link_rate, &n_entries);
@ -2317,13 +2313,13 @@ static void cnl_ddi_vswing_program(struct intel_encoder *encoder,
/* Program PORT_TX_DW4 */
/* We cannot write to GRP. It would overrite individual loadgen */
for (ln = 0; ln < 4; ln++) {
val = I915_READ(CNL_PORT_TX_DW4_LN(port, ln));
val = I915_READ(CNL_PORT_TX_DW4_LN(ln, port));
val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
CURSOR_COEFF_MASK);
val |= POST_CURSOR_1(ddi_translations[level].dw4_post_cursor_1);
val |= POST_CURSOR_2(ddi_translations[level].dw4_post_cursor_2);
val |= CURSOR_COEFF(ddi_translations[level].dw4_cursor_coeff);
I915_WRITE(CNL_PORT_TX_DW4_LN(port, ln), val);
I915_WRITE(CNL_PORT_TX_DW4_LN(ln, port), val);
}
/* Program PORT_TX_DW5 */
@ -2379,14 +2375,14 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder,
* > 6 GHz (LN0=0, LN1=0, LN2=0, LN3=0)
*/
for (ln = 0; ln <= 3; ln++) {
val = I915_READ(CNL_PORT_TX_DW4_LN(port, ln));
val = I915_READ(CNL_PORT_TX_DW4_LN(ln, port));
val &= ~LOADGEN_SELECT;
if ((rate <= 600000 && width == 4 && ln >= 1) ||
(rate <= 600000 && width < 4 && (ln == 1 || ln == 2))) {
val |= LOADGEN_SELECT;
}
I915_WRITE(CNL_PORT_TX_DW4_LN(port, ln), val);
I915_WRITE(CNL_PORT_TX_DW4_LN(ln, port), val);
}
/* 3. Set PORT_CL_DW5 SUS Clock Config to 11b */
@ -2448,13 +2444,13 @@ static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv,
/* Program PORT_TX_DW4 */
/* We cannot write to GRP. It would overwrite individual loadgen. */
for (ln = 0; ln <= 3; ln++) {
val = I915_READ(ICL_PORT_TX_DW4_LN(port, ln));
val = I915_READ(ICL_PORT_TX_DW4_LN(ln, port));
val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
CURSOR_COEFF_MASK);
val |= POST_CURSOR_1(ddi_translations[level].dw4_post_cursor_1);
val |= POST_CURSOR_2(ddi_translations[level].dw4_post_cursor_2);
val |= CURSOR_COEFF(ddi_translations[level].dw4_cursor_coeff);
I915_WRITE(ICL_PORT_TX_DW4_LN(port, ln), val);
I915_WRITE(ICL_PORT_TX_DW4_LN(ln, port), val);
}
/* Program PORT_TX_DW7 */
@ -2505,14 +2501,14 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
* > 6 GHz (LN0=0, LN1=0, LN2=0, LN3=0)
*/
for (ln = 0; ln <= 3; ln++) {
val = I915_READ(ICL_PORT_TX_DW4_LN(port, ln));
val = I915_READ(ICL_PORT_TX_DW4_LN(ln, port));
val &= ~LOADGEN_SELECT;
if ((rate <= 600000 && width == 4 && ln >= 1) ||
(rate <= 600000 && width < 4 && (ln == 1 || ln == 2))) {
val |= LOADGEN_SELECT;
}
I915_WRITE(ICL_PORT_TX_DW4_LN(port, ln), val);
I915_WRITE(ICL_PORT_TX_DW4_LN(ln, port), val);
}
/* 3. Set PORT_CL_DW5 SUS Clock Config to 11b */
@ -2555,33 +2551,33 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
/* Set MG_TX_LINK_PARAMS cri_use_fs32 to 0. */
for (ln = 0; ln < 2; ln++) {
val = I915_READ(MG_TX1_LINK_PARAMS(port, ln));
val = I915_READ(MG_TX1_LINK_PARAMS(ln, port));
val &= ~CRI_USE_FS32;
I915_WRITE(MG_TX1_LINK_PARAMS(port, ln), val);
I915_WRITE(MG_TX1_LINK_PARAMS(ln, port), val);
val = I915_READ(MG_TX2_LINK_PARAMS(port, ln));
val = I915_READ(MG_TX2_LINK_PARAMS(ln, port));
val &= ~CRI_USE_FS32;
I915_WRITE(MG_TX2_LINK_PARAMS(port, ln), val);
I915_WRITE(MG_TX2_LINK_PARAMS(ln, port), val);
}
/* Program MG_TX_SWINGCTRL with values from vswing table */
for (ln = 0; ln < 2; ln++) {
val = I915_READ(MG_TX1_SWINGCTRL(port, ln));
val = I915_READ(MG_TX1_SWINGCTRL(ln, port));
val &= ~CRI_TXDEEMPH_OVERRIDE_17_12_MASK;
val |= CRI_TXDEEMPH_OVERRIDE_17_12(
ddi_translations[level].cri_txdeemph_override_17_12);
I915_WRITE(MG_TX1_SWINGCTRL(port, ln), val);
I915_WRITE(MG_TX1_SWINGCTRL(ln, port), val);
val = I915_READ(MG_TX2_SWINGCTRL(port, ln));
val = I915_READ(MG_TX2_SWINGCTRL(ln, port));
val &= ~CRI_TXDEEMPH_OVERRIDE_17_12_MASK;
val |= CRI_TXDEEMPH_OVERRIDE_17_12(
ddi_translations[level].cri_txdeemph_override_17_12);
I915_WRITE(MG_TX2_SWINGCTRL(port, ln), val);
I915_WRITE(MG_TX2_SWINGCTRL(ln, port), val);
}
/* Program MG_TX_DRVCTRL with values from vswing table */
for (ln = 0; ln < 2; ln++) {
val = I915_READ(MG_TX1_DRVCTRL(port, ln));
val = I915_READ(MG_TX1_DRVCTRL(ln, port));
val &= ~(CRI_TXDEEMPH_OVERRIDE_11_6_MASK |
CRI_TXDEEMPH_OVERRIDE_5_0_MASK);
val |= CRI_TXDEEMPH_OVERRIDE_5_0(
@ -2589,9 +2585,9 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
CRI_TXDEEMPH_OVERRIDE_11_6(
ddi_translations[level].cri_txdeemph_override_11_6) |
CRI_TXDEEMPH_OVERRIDE_EN;
I915_WRITE(MG_TX1_DRVCTRL(port, ln), val);
I915_WRITE(MG_TX1_DRVCTRL(ln, port), val);
val = I915_READ(MG_TX2_DRVCTRL(port, ln));
val = I915_READ(MG_TX2_DRVCTRL(ln, port));
val &= ~(CRI_TXDEEMPH_OVERRIDE_11_6_MASK |
CRI_TXDEEMPH_OVERRIDE_5_0_MASK);
val |= CRI_TXDEEMPH_OVERRIDE_5_0(
@ -2599,7 +2595,7 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
CRI_TXDEEMPH_OVERRIDE_11_6(
ddi_translations[level].cri_txdeemph_override_11_6) |
CRI_TXDEEMPH_OVERRIDE_EN;
I915_WRITE(MG_TX2_DRVCTRL(port, ln), val);
I915_WRITE(MG_TX2_DRVCTRL(ln, port), val);
/* FIXME: Program CRI_LOADGEN_SEL after the spec is updated */
}
@ -2610,17 +2606,17 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
* values from table for which TX1 and TX2 enabled.
*/
for (ln = 0; ln < 2; ln++) {
val = I915_READ(MG_CLKHUB(port, ln));
val = I915_READ(MG_CLKHUB(ln, port));
if (link_clock < 300000)
val |= CFG_LOW_RATE_LKREN_EN;
else
val &= ~CFG_LOW_RATE_LKREN_EN;
I915_WRITE(MG_CLKHUB(port, ln), val);
I915_WRITE(MG_CLKHUB(ln, port), val);
}
/* Program the MG_TX_DCC<LN, port being used> based on the link frequency */
for (ln = 0; ln < 2; ln++) {
val = I915_READ(MG_TX1_DCC(port, ln));
val = I915_READ(MG_TX1_DCC(ln, port));
val &= ~CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK;
if (link_clock <= 500000) {
val &= ~CFG_AMI_CK_DIV_OVERRIDE_EN;
@ -2628,9 +2624,9 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
val |= CFG_AMI_CK_DIV_OVERRIDE_EN |
CFG_AMI_CK_DIV_OVERRIDE_VAL(1);
}
I915_WRITE(MG_TX1_DCC(port, ln), val);
I915_WRITE(MG_TX1_DCC(ln, port), val);
val = I915_READ(MG_TX2_DCC(port, ln));
val = I915_READ(MG_TX2_DCC(ln, port));
val &= ~CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK;
if (link_clock <= 500000) {
val &= ~CFG_AMI_CK_DIV_OVERRIDE_EN;
@ -2638,18 +2634,18 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
val |= CFG_AMI_CK_DIV_OVERRIDE_EN |
CFG_AMI_CK_DIV_OVERRIDE_VAL(1);
}
I915_WRITE(MG_TX2_DCC(port, ln), val);
I915_WRITE(MG_TX2_DCC(ln, port), val);
}
/* Program MG_TX_PISO_READLOAD with values from vswing table */
for (ln = 0; ln < 2; ln++) {
val = I915_READ(MG_TX1_PISO_READLOAD(port, ln));
val = I915_READ(MG_TX1_PISO_READLOAD(ln, port));
val |= CRI_CALCINIT;
I915_WRITE(MG_TX1_PISO_READLOAD(port, ln), val);
I915_WRITE(MG_TX1_PISO_READLOAD(ln, port), val);
val = I915_READ(MG_TX2_PISO_READLOAD(port, ln));
val = I915_READ(MG_TX2_PISO_READLOAD(ln, port));
val |= CRI_CALCINIT;
I915_WRITE(MG_TX2_PISO_READLOAD(port, ln), val);
I915_WRITE(MG_TX2_PISO_READLOAD(ln, port), val);
}
}
@ -2698,7 +2694,7 @@ u32 bxt_signal_levels(struct intel_dp *intel_dp)
struct intel_encoder *encoder = &dport->base;
int level = intel_ddi_dp_level(intel_dp);
if (IS_ICELAKE(dev_priv))
if (INTEL_GEN(dev_priv) >= 11)
icl_ddi_vswing_sequence(encoder, intel_dp->link_rate,
level, encoder->type);
else if (IS_CANNONLAKE(dev_priv))
@ -2867,7 +2863,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
mutex_lock(&dev_priv->dpll_lock);
if (IS_ICELAKE(dev_priv)) {
if (INTEL_GEN(dev_priv) >= 11) {
if (!intel_port_is_combophy(dev_priv, port))
I915_WRITE(DDI_CLK_SEL(port),
icl_pll_to_ddi_clk_sel(encoder, crtc_state));
@ -2909,7 +2905,7 @@ static void intel_ddi_clk_disable(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
if (IS_ICELAKE(dev_priv)) {
if (INTEL_GEN(dev_priv) >= 11) {
if (!intel_port_is_combophy(dev_priv, port))
I915_WRITE(DDI_CLK_SEL(port), DDI_CLK_SEL_NONE);
} else if (IS_CANNONLAKE(dev_priv)) {
@ -2928,7 +2924,7 @@ static void icl_enable_phy_clock_gating(struct intel_digital_port *dig_port)
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
enum port port = dig_port->base.port;
enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
i915_reg_t mg_regs[2] = { MG_DP_MODE(port, 0), MG_DP_MODE(port, 1) };
i915_reg_t mg_regs[2] = { MG_DP_MODE(0, port), MG_DP_MODE(1, port) };
u32 val;
int i;
@ -2999,8 +2995,8 @@ static void icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port)
if (tc_port == PORT_TC_NONE || intel_dig_port->tc_type == TC_PORT_TBT)
return;
ln0 = I915_READ(MG_DP_MODE(port, 0));
ln1 = I915_READ(MG_DP_MODE(port, 1));
ln0 = I915_READ(MG_DP_MODE(0, port));
ln1 = I915_READ(MG_DP_MODE(1, port));
switch (intel_dig_port->tc_type) {
case TC_PORT_TYPEC:
@ -3050,8 +3046,8 @@ static void icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port)
return;
}
I915_WRITE(MG_DP_MODE(port, 0), ln0);
I915_WRITE(MG_DP_MODE(port, 1), ln1);
I915_WRITE(MG_DP_MODE(0, port), ln0);
I915_WRITE(MG_DP_MODE(1, port), ln1);
}
static void intel_dp_sink_set_fec_ready(struct intel_dp *intel_dp,
@ -3126,7 +3122,7 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
icl_program_mg_dp_mode(dig_port);
icl_disable_phy_clock_gating(dig_port);
if (IS_ICELAKE(dev_priv))
if (INTEL_GEN(dev_priv) >= 11)
icl_ddi_vswing_sequence(encoder, crtc_state->port_clock,
level, encoder->type);
else if (IS_CANNONLAKE(dev_priv))
@ -3175,7 +3171,7 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
icl_program_mg_dp_mode(dig_port);
icl_disable_phy_clock_gating(dig_port);
if (IS_ICELAKE(dev_priv))
if (INTEL_GEN(dev_priv) >= 11)
icl_ddi_vswing_sequence(encoder, crtc_state->port_clock,
level, INTEL_OUTPUT_HDMI);
else if (IS_CANNONLAKE(dev_priv))
@ -3556,7 +3552,7 @@ static void intel_ddi_update_pipe_dp(struct intel_encoder *encoder,
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
intel_psr_enable(intel_dp, crtc_state);
intel_psr_update(intel_dp, crtc_state);
intel_edp_drrs_enable(intel_dp, crtc_state);
intel_panel_update_backlight(encoder, crtc_state, conn_state);
@ -3711,7 +3707,7 @@ static bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
struct intel_crtc_state *crtc_state)
{
if (IS_ICELAKE(dev_priv) && crtc_state->port_clock > 594000)
if (INTEL_GEN(dev_priv) >= 11 && crtc_state->port_clock > 594000)
crtc_state->min_voltage_level = 1;
else if (IS_CANNONLAKE(dev_priv) && crtc_state->port_clock > 594000)
crtc_state->min_voltage_level = 2;
@ -3764,7 +3760,10 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
pipe_config->has_hdmi_sink = true;
intel_dig_port = enc_to_dig_port(&encoder->base);
if (intel_dig_port->infoframe_enabled(encoder, pipe_config))
pipe_config->infoframes.enable |=
intel_hdmi_infoframes_enabled(encoder, pipe_config);
if (pipe_config->infoframes.enable)
pipe_config->has_infoframe = true;
if (temp & TRANS_DDI_HDMI_SCRAMBLING)
@ -3828,6 +3827,18 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
bxt_ddi_phy_get_lane_lat_optim_mask(encoder);
intel_ddi_compute_min_voltage_level(dev_priv, pipe_config);
intel_hdmi_read_gcp_infoframe(encoder, pipe_config);
intel_read_infoframe(encoder, pipe_config,
HDMI_INFOFRAME_TYPE_AVI,
&pipe_config->infoframes.avi);
intel_read_infoframe(encoder, pipe_config,
HDMI_INFOFRAME_TYPE_SPD,
&pipe_config->infoframes.spd);
intel_read_infoframe(encoder, pipe_config,
HDMI_INFOFRAME_TYPE_VENDOR,
&pipe_config->infoframes.hdmi);
}
static enum intel_output_type
@ -3856,7 +3867,7 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
enum port port = encoder->port;
int ret;
if (port == PORT_A)
if (HAS_TRANSCODER_EDP(dev_priv) && port == PORT_A)
pipe_config->cpu_transcoder = TRANSCODER_EDP;
if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
@ -3958,15 +3969,7 @@ static int modeset_pipe(struct drm_crtc *crtc,
goto out;
}
crtc_state->mode_changed = true;
ret = drm_atomic_add_affected_connectors(state, crtc);
if (ret)
goto out;
ret = drm_atomic_add_affected_planes(state, crtc);
if (ret)
goto out;
crtc_state->connectors_changed = true;
ret = drm_atomic_commit(state);
out:

Просмотреть файл

@ -738,9 +738,9 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
runtime->num_scalers[PIPE_C] = 1;
}
BUILD_BUG_ON(I915_NUM_ENGINES > BITS_PER_TYPE(intel_ring_mask_t));
BUILD_BUG_ON(BITS_PER_TYPE(intel_engine_mask_t) < I915_NUM_ENGINES);
if (IS_GEN(dev_priv, 11))
if (INTEL_GEN(dev_priv) >= 11)
for_each_pipe(dev_priv, pipe)
runtime->num_sprites[pipe] = 6;
else if (IS_GEN(dev_priv, 10) || IS_GEMINILAKE(dev_priv))
@ -844,7 +844,7 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
if (IS_GEN(dev_priv, 6) && intel_vtd_active()) {
DRM_INFO("Disabling ppGTT for VT-d support\n");
info->ppgtt = INTEL_PPGTT_NONE;
info->ppgtt_type = INTEL_PPGTT_NONE;
}
/* Initialize command stream timestamp frequency */
@ -887,7 +887,7 @@ void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
continue;
if (!(BIT(i) & RUNTIME_INFO(dev_priv)->vdbox_enable)) {
info->ring_mask &= ~ENGINE_MASK(_VCS(i));
info->engine_mask &= ~BIT(_VCS(i));
DRM_DEBUG_DRIVER("vcs%u fused off\n", i);
continue;
}
@ -906,7 +906,7 @@ void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
continue;
if (!(BIT(i) & RUNTIME_INFO(dev_priv)->vebox_enable)) {
info->ring_mask &= ~ENGINE_MASK(_VECS(i));
info->engine_mask &= ~BIT(_VECS(i));
DRM_DEBUG_DRIVER("vecs%u fused off\n", i);
}
}

Просмотреть файл

@ -76,11 +76,10 @@ enum intel_platform {
INTEL_MAX_PLATFORMS
};
enum intel_ppgtt {
enum intel_ppgtt_type {
INTEL_PPGTT_NONE = I915_GEM_PPGTT_NONE,
INTEL_PPGTT_ALIASING = I915_GEM_PPGTT_ALIASING,
INTEL_PPGTT_FULL = I915_GEM_PPGTT_FULL,
INTEL_PPGTT_FULL_4LVL,
};
#define DEV_INFO_FOR_EACH_FLAG(func) \
@ -150,19 +149,21 @@ struct sseu_dev_info {
u8 eu_mask[GEN_MAX_SLICES * GEN_MAX_SUBSLICES];
};
typedef u8 intel_ring_mask_t;
typedef u8 intel_engine_mask_t;
struct intel_device_info {
u16 gen_mask;
u8 gen;
u8 gt; /* GT number, 0 if undefined */
intel_ring_mask_t ring_mask; /* Rings supported by the HW */
intel_engine_mask_t engine_mask; /* Engines supported by the HW */
enum intel_platform platform;
u32 platform_mask;
enum intel_ppgtt ppgtt;
enum intel_ppgtt_type ppgtt_type;
unsigned int ppgtt_size; /* log2, e.g. 31/32/48 bits */
unsigned int page_sizes; /* page sizes supported by the HW */
u32 display_mmio_offset;
@ -200,7 +201,7 @@ struct intel_runtime_info {
u8 num_sprites[I915_MAX_PIPES];
u8 num_scalers[I915_MAX_PIPES];
u8 num_rings;
u8 num_engines;
/* Slice/subslice/EU info */
struct sseu_dev_info sseu;

Просмотреть файл

@ -595,7 +595,7 @@ i9xx_select_p2_div(const struct intel_limit *limit,
const struct intel_crtc_state *crtc_state,
int target)
{
struct drm_device *dev = crtc_state->base.crtc->dev;
struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
/*
@ -603,7 +603,7 @@ i9xx_select_p2_div(const struct intel_limit *limit,
* We haven't figured out how to reliably set up different
* single/dual channel state, if we even can.
*/
if (intel_is_dual_link_lvds(dev))
if (intel_is_dual_link_lvds(dev_priv))
return limit->p2.p2_fast;
else
return limit->p2.p2_slow;
@ -951,14 +951,15 @@ chv_find_best_dpll(const struct intel_limit *limit,
return found;
}
bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
struct dpll *best_clock)
{
int refclk = 100000;
const struct intel_limit *limit = &intel_limits_bxt;
return chv_find_best_dpll(limit, crtc_state,
target_clock, refclk, NULL, best_clock);
crtc_state->port_clock, refclk,
NULL, best_clock);
}
bool intel_crtc_active(struct intel_crtc *crtc)
@ -1441,19 +1442,6 @@ static void chv_enable_pll(struct intel_crtc *crtc,
}
}
static int intel_num_dvo_pipes(struct drm_i915_private *dev_priv)
{
struct intel_crtc *crtc;
int count = 0;
for_each_intel_crtc(&dev_priv->drm, crtc) {
count += crtc->base.state->active &&
intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO);
}
return count;
}
static void i9xx_enable_pll(struct intel_crtc *crtc,
const struct intel_crtc_state *crtc_state)
{
@ -1468,26 +1456,12 @@ static void i9xx_enable_pll(struct intel_crtc *crtc,
if (IS_MOBILE(dev_priv) && !IS_I830(dev_priv))
assert_panel_unlocked(dev_priv, crtc->pipe);
/* Enable DVO 2x clock on both PLLs if necessary */
if (IS_I830(dev_priv) && intel_num_dvo_pipes(dev_priv) > 0) {
/*
* It appears to be important that we don't enable this
* for the current pipe before otherwise configuring the
* PLL. No idea how this should be handled if multiple
* DVO outputs are enabled simultaneosly.
*/
dpll |= DPLL_DVO_2X_MODE;
I915_WRITE(DPLL(!crtc->pipe),
I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
}
/*
* Apparently we need to have VGA mode enabled prior to changing
* the P1/P2 dividers. Otherwise the DPLL will keep using the old
* dividers, even though the register value does change.
*/
I915_WRITE(reg, 0);
I915_WRITE(reg, dpll & ~DPLL_VGA_MODE_DIS);
I915_WRITE(reg, dpll);
/* Wait for the clocks to stabilize. */
@ -1520,16 +1494,6 @@ static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
/* Disable DVO 2x clock on both PLLs if necessary */
if (IS_I830(dev_priv) &&
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO) &&
!intel_num_dvo_pipes(dev_priv)) {
I915_WRITE(DPLL(PIPE_B),
I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
I915_WRITE(DPLL(PIPE_A),
I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE);
}
/* Don't disable pipe or pipe PLLs if needed */
if (IS_I830(dev_priv))
return;
@ -1658,14 +1622,15 @@ static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_s
}
val &= ~TRANS_INTERLACE_MASK;
if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) {
if (HAS_PCH_IBX(dev_priv) &&
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
val |= TRANS_LEGACY_INTERLACED_ILK;
else
val |= TRANS_INTERLACED;
else
} else {
val |= TRANS_PROGRESSIVE;
}
I915_WRITE(reg, val | TRANS_ENABLE);
if (intel_wait_for_register(dev_priv,
@ -1830,6 +1795,8 @@ static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
/* FIXME: assert CPU port conditions for SNB+ */
}
trace_intel_pipe_enable(dev_priv, pipe);
reg = PIPECONF(cpu_transcoder);
val = I915_READ(reg);
if (val & PIPECONF_ENABLE) {
@ -1869,6 +1836,8 @@ static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
*/
assert_planes_disabled(crtc);
trace_intel_pipe_disable(dev_priv, pipe);
reg = PIPECONF(cpu_transcoder);
val = I915_READ(reg);
if ((val & PIPECONF_ENABLE) == 0)
@ -2855,8 +2824,7 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
if (plane->id == PLANE_PRIMARY)
intel_pre_disable_primary_noatomic(&crtc->base);
trace_intel_disable_plane(&plane->base, crtc);
plane->disable_plane(plane, crtc_state);
intel_disable_plane(plane, crtc_state);
}
static void
@ -3260,9 +3228,10 @@ static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 dspcntr = 0;
dspcntr |= DISPPLANE_GAMMA_ENABLE;
if (crtc_state->gamma_enable)
dspcntr |= DISPPLANE_GAMMA_ENABLE;
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
if (crtc_state->csc_enable)
dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
if (INTEL_GEN(dev_priv) < 5)
@ -3489,7 +3458,7 @@ static void i9xx_disable_plane(struct intel_plane *plane,
*
* On pre-g4x there is no way to gamma correct the
* pipe bottom color but we'll keep on doing this
* anyway.
* anyway so that the crtc state readout works correctly.
*/
dspcntr = i9xx_plane_ctl_crtc(crtc_state);
@ -3764,8 +3733,11 @@ u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
return plane_ctl;
plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE;
plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
if (crtc_state->gamma_enable)
plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE;
if (crtc_state->csc_enable)
plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
return plane_ctl;
}
@ -3817,8 +3789,11 @@ u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state)
if (INTEL_GEN(dev_priv) >= 11)
return plane_color_ctl;
plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
if (crtc_state->gamma_enable)
plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
if (crtc_state->csc_enable)
plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
return plane_color_ctl;
}
@ -3977,9 +3952,6 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
* The display has been reset as well,
* so need a full re-initialization.
*/
intel_runtime_pm_disable_interrupts(dev_priv);
intel_runtime_pm_enable_interrupts(dev_priv);
intel_pps_unlock_regs_wa(dev_priv);
intel_modeset_init_hw(dev);
intel_init_clock_gating(dev_priv);
@ -4019,13 +3991,13 @@ static void icl_set_pipe_chicken(struct intel_crtc *crtc)
* and rounding for per-pixel values 00 and 0xff
*/
tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
/*
* W/A for underruns with linear/X-tiled with
* WM1+ disabled.
* Display WA # 1605353570: icl
* Set the pixel rounding bit to 1 for allowing
* passthrough of Frame buffer pixels unmodified
* across pipe
*/
tmp |= PM_FILL_MAINTAIN_DBUF_FULLNESS;
tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
I915_WRITE(PIPE_CHICKEN(pipe), tmp);
}
@ -4064,16 +4036,6 @@ static void intel_update_pipe_config(const struct intel_crtc_state *old_crtc_sta
ironlake_pfit_disable(old_crtc_state);
}
/*
* We don't (yet) allow userspace to control the pipe background color,
* so force it to black, but apply pipe gamma and CSC so that its
* handling will match how we program our planes.
*/
if (INTEL_GEN(dev_priv) >= 9)
I915_WRITE(SKL_BOTTOM_COLOR(crtc->pipe),
SKL_BOTTOM_COLOR_GAMMA_ENABLE |
SKL_BOTTOM_COLOR_CSC_ENABLE);
if (INTEL_GEN(dev_priv) >= 11)
icl_set_pipe_chicken(crtc);
}
@ -5101,10 +5063,10 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
/* range checks */
if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
(IS_GEN(dev_priv, 11) &&
(INTEL_GEN(dev_priv) >= 11 &&
(src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H ||
dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) ||
(!IS_GEN(dev_priv, 11) &&
(INTEL_GEN(dev_priv) < 11 &&
(src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) {
DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
@ -5673,7 +5635,7 @@ static void intel_crtc_disable_planes(struct intel_atomic_state *state,
!(update_mask & BIT(plane->id)))
continue;
plane->disable_plane(plane, new_crtc_state);
intel_disable_plane(plane, new_crtc_state);
if (old_plane_state->base.visible)
fb_bits |= plane->frontbuffer_bit;
@ -5824,6 +5786,14 @@ static void intel_encoders_update_pipe(struct drm_crtc *crtc,
}
}
static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct intel_plane *plane = to_intel_plane(crtc->base.primary);
plane->disable_plane(plane, crtc_state);
}
static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
struct drm_atomic_state *old_state)
{
@ -5889,6 +5859,8 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
*/
intel_color_load_luts(pipe_config);
intel_color_commit(pipe_config);
/* update DSPCNTR to configure gamma for pipe bottom color */
intel_disable_primary_plane(pipe_config);
if (dev_priv->display.initial_watermarks != NULL)
dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
@ -6017,6 +5989,9 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
*/
intel_color_load_luts(pipe_config);
intel_color_commit(pipe_config);
/* update DSPCNTR to configure gamma/csc for pipe bottom color */
if (INTEL_GEN(dev_priv) < 9)
intel_disable_primary_plane(pipe_config);
if (INTEL_GEN(dev_priv) >= 11)
icl_set_pipe_chicken(intel_crtc);
@ -6197,7 +6172,7 @@ bool intel_port_is_combophy(struct drm_i915_private *dev_priv, enum port port)
if (port == PORT_NONE)
return false;
if (IS_ICELAKE(dev_priv))
if (INTEL_GEN(dev_priv) >= 11)
return port <= PORT_B;
return false;
@ -6205,7 +6180,7 @@ bool intel_port_is_combophy(struct drm_i915_private *dev_priv, enum port port)
bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port)
{
if (IS_ICELAKE(dev_priv))
if (INTEL_GEN(dev_priv) >= 11)
return port >= PORT_C && port <= PORT_F;
return false;
@ -6374,6 +6349,8 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
intel_color_load_luts(pipe_config);
intel_color_commit(pipe_config);
/* update DSPCNTR to configure gamma for pipe bottom color */
intel_disable_primary_plane(pipe_config);
dev_priv->display.initial_watermarks(old_intel_state,
pipe_config);
@ -6431,6 +6408,8 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
intel_color_load_luts(pipe_config);
intel_color_commit(pipe_config);
/* update DSPCNTR to configure gamma for pipe bottom color */
intel_disable_primary_plane(pipe_config);
if (dev_priv->display.initial_watermarks != NULL)
dev_priv->display.initial_watermarks(old_intel_state,
@ -6813,7 +6792,13 @@ static bool hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
if (!hsw_crtc_state_ips_capable(crtc_state))
return false;
if (crtc_state->ips_force_disable)
/*
* When IPS gets enabled, the pipe CRC changes. Since IPS gets
* enabled and disabled dynamically based on package C states,
* user space can't make reliable use of the CRCs, so let's just
* completely disable it.
*/
if (crtc_state->crc_enabled)
return false;
/* IPS should be fine as long as at least one plane is enabled. */
@ -6888,8 +6873,7 @@ static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
static int intel_crtc_compute_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
int clock_limit = dev_priv->max_dotclk_freq;
@ -6939,7 +6923,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
}
if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
intel_is_dual_link_lvds(dev)) {
intel_is_dual_link_lvds(dev_priv)) {
DRM_DEBUG_KMS("Odd pipe source width not supported with dual link LVDS\n");
return -EINVAL;
}
@ -7556,7 +7540,19 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc,
dpll |= PLL_P2_DIVIDE_BY_4;
}
if (!IS_I830(dev_priv) &&
/*
* Bspec:
* "[Almador Errata}: For the correct operation of the muxed DVO pins
* (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data,
* GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock
* Enable) must be set to 1 in both the DPLL A Control Register
* (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)."
*
* For simplicity We simply keep both bits always enabled in
* both DPLLS. The spec says we should disable the DVO 2X clock
* when not needed, but this seems to work fine in practice.
*/
if (IS_I830(dev_priv) ||
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
dpll |= DPLL_DVO_2X_MODE;
@ -7764,13 +7760,16 @@ static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
else
pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
} else
} else {
pipeconf |= PIPECONF_PROGRESSIVE;
}
if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
crtc_state->limited_color_range)
pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
I915_WRITE(PIPECONF(crtc->pipe), pipeconf);
POSTING_READ(PIPECONF(crtc->pipe));
}
@ -7814,8 +7813,7 @@ static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct intel_limit *limit;
int refclk = 96000;
@ -7828,7 +7826,7 @@ static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
}
if (intel_is_dual_link_lvds(dev))
if (intel_is_dual_link_lvds(dev_priv))
limit = &intel_limits_g4x_dual_channel_lvds;
else
limit = &intel_limits_g4x_single_channel_lvds;
@ -8178,6 +8176,24 @@ static void intel_get_crtc_ycbcr_config(struct intel_crtc *crtc,
pipe_config->output_format = output;
}
static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct intel_plane *plane = to_intel_plane(crtc->base.primary);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
u32 tmp;
tmp = I915_READ(DSPCNTR(i9xx_plane));
if (tmp & DISPPLANE_GAMMA_ENABLE)
crtc_state->gamma_enable = true;
if (!HAS_GMCH(dev_priv) &&
tmp & DISPPLANE_PIPE_CSC_ENABLE)
crtc_state->csc_enable = true;
}
static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
@ -8223,6 +8239,14 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
(tmp & PIPECONF_COLOR_RANGE_SELECT))
pipe_config->limited_color_range = true;
pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >>
PIPECONF_GAMMA_MODE_SHIFT;
if (IS_CHERRYVIEW(dev_priv))
pipe_config->cgm_mode = I915_READ(CGM_PIPE_MODE(crtc->pipe));
i9xx_get_pipe_color_config(pipe_config);
if (INTEL_GEN(dev_priv) < 4)
pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
@ -8255,14 +8279,6 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
}
pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
/*
* DPLL_DVO_2X_MODE must be enabled for both DPLLs
* on 830. Filter it out here so that we don't
* report errors due to that.
*/
if (IS_I830(dev_priv))
pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE;
pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
} else {
@ -8762,6 +8778,8 @@ static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state)
if (crtc_state->limited_color_range)
val |= PIPECONF_COLOR_RANGE_SELECT;
val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
I915_WRITE(PIPECONF(pipe), val);
POSTING_READ(PIPECONF(pipe));
}
@ -8842,13 +8860,11 @@ static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
}
static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
static void ironlake_compute_dpll(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state,
struct dpll *reduced_clock)
{
struct drm_crtc *crtc = &intel_crtc->base;
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 dpll, fp, fp2;
int factor;
@ -8857,10 +8873,12 @@ static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if ((intel_panel_use_ssc(dev_priv) &&
dev_priv->vbt.lvds_ssc_freq == 100000) ||
(HAS_PCH_IBX(dev_priv) && intel_is_dual_link_lvds(dev)))
(HAS_PCH_IBX(dev_priv) &&
intel_is_dual_link_lvds(dev_priv)))
factor = 25;
} else if (crtc_state->sdvo_tv_clock)
} else if (crtc_state->sdvo_tv_clock) {
factor = 20;
}
fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
@ -8947,8 +8965,7 @@ static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct intel_limit *limit;
int refclk = 120000;
@ -8966,7 +8983,7 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
refclk = dev_priv->vbt.lvds_ssc_freq;
}
if (intel_is_dual_link_lvds(dev)) {
if (intel_is_dual_link_lvds(dev_priv)) {
if (refclk == 100000)
limit = &intel_limits_ironlake_dual_lvds_100m;
else
@ -8990,7 +9007,7 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
ironlake_compute_dpll(crtc, crtc_state, NULL);
if (!intel_get_shared_dpll(crtc, crtc_state, NULL)) {
if (!intel_get_shared_dpll(crtc_state, NULL)) {
DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
pipe_name(crtc->pipe));
return -EINVAL;
@ -9296,6 +9313,13 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
if (tmp & PIPECONF_COLOR_RANGE_SELECT)
pipe_config->limited_color_range = true;
pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >>
PIPECONF_GAMMA_MODE_SHIFT;
pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe));
i9xx_get_pipe_color_config(pipe_config);
if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
struct intel_shared_dpll *pll;
enum intel_dpll_id pll_id;
@ -9580,11 +9604,11 @@ static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
to_intel_atomic_state(crtc_state->base.state);
if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) ||
IS_ICELAKE(dev_priv)) {
INTEL_GEN(dev_priv) >= 11) {
struct intel_encoder *encoder =
intel_get_crtc_new_encoder(state, crtc_state);
if (!intel_get_shared_dpll(crtc, crtc_state, encoder)) {
if (!intel_get_shared_dpll(crtc_state, encoder)) {
DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
pipe_name(crtc->pipe));
return -EINVAL;
@ -9622,9 +9646,6 @@ static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv,
temp = I915_READ(DPCLKA_CFGCR0_ICL) &
DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
if (WARN_ON(!intel_dpll_is_combophy(id)))
return;
} else if (intel_port_is_tc(dev_priv, port)) {
id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv, port));
} else {
@ -9718,15 +9739,18 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum intel_display_power_domain power_domain;
unsigned long panel_transcoder_mask = BIT(TRANSCODER_EDP);
unsigned long panel_transcoder_mask = 0;
unsigned long enabled_panel_transcoders = 0;
enum transcoder panel_transcoder;
u32 tmp;
if (IS_ICELAKE(dev_priv))
if (INTEL_GEN(dev_priv) >= 11)
panel_transcoder_mask |=
BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
if (HAS_TRANSCODER_EDP(dev_priv))
panel_transcoder_mask |= BIT(TRANSCODER_EDP);
/*
* The pipe->transcoder mapping is fixed with the exception of the eDP
* and DSI transcoders handled below.
@ -9856,7 +9880,7 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
if (IS_ICELAKE(dev_priv))
if (INTEL_GEN(dev_priv) >= 11)
icelake_get_ddi_pll(dev_priv, port, pipe_config);
else if (IS_CANNONLAKE(dev_priv))
cannonlake_get_ddi_pll(dev_priv, port, pipe_config);
@ -9919,7 +9943,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
goto out;
if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
IS_ICELAKE(dev_priv)) {
INTEL_GEN(dev_priv) >= 11) {
haswell_get_ddi_port_state(crtc, pipe_config);
intel_get_pipe_timings(crtc, pipe_config);
}
@ -9927,8 +9951,21 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
intel_get_pipe_src_size(crtc, pipe_config);
intel_get_crtc_ycbcr_config(crtc, pipe_config);
pipe_config->gamma_mode =
I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK;
pipe_config->gamma_mode = I915_READ(GAMMA_MODE(crtc->pipe));
pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe));
if (INTEL_GEN(dev_priv) >= 9) {
u32 tmp = I915_READ(SKL_BOTTOM_COLOR(crtc->pipe));
if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
pipe_config->gamma_enable = true;
if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
pipe_config->csc_enable = true;
} else {
i9xx_get_pipe_color_config(pipe_config);
}
power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
@ -10100,7 +10137,12 @@ i845_cursor_max_stride(struct intel_plane *plane,
static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
{
return CURSOR_GAMMA_ENABLE;
u32 cntl = 0;
if (crtc_state->gamma_enable)
cntl |= CURSOR_GAMMA_ENABLE;
return cntl;
}
static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
@ -10254,9 +10296,10 @@ static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
if (INTEL_GEN(dev_priv) >= 11)
return cntl;
cntl |= MCURSOR_GAMMA_ENABLE;
if (crtc_state->gamma_enable)
cntl = MCURSOR_GAMMA_ENABLE;
if (HAS_DDI(dev_priv))
if (crtc_state->csc_enable)
cntl |= MCURSOR_PIPE_CSC_ENABLE;
if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
@ -11245,16 +11288,11 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
return ret;
}
if (mode_changed || crtc_state->color_mgmt_changed) {
if (mode_changed || pipe_config->update_pipe ||
crtc_state->color_mgmt_changed) {
ret = intel_color_check(pipe_config);
if (ret)
return ret;
/*
* Changing color management on Intel hardware is
* handled as part of planes update.
*/
crtc_state->planes_changed = true;
}
ret = 0;
@ -11425,6 +11463,16 @@ intel_dump_m_n_config(struct intel_crtc_state *pipe_config, char *id,
m_n->link_m, m_n->link_n, m_n->tu);
}
static void
intel_dump_infoframe(struct drm_i915_private *dev_priv,
const union hdmi_infoframe *frame)
{
if ((drm_debug & DRM_UT_KMS) == 0)
return;
hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
}
#define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
static const char * const output_type_str[] = {
@ -11528,6 +11576,22 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
DRM_DEBUG_KMS("audio: %i, infoframes: %i\n",
pipe_config->has_audio, pipe_config->has_infoframe);
DRM_DEBUG_KMS("infoframes enabled: 0x%x\n",
pipe_config->infoframes.enable);
if (pipe_config->infoframes.enable &
intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
DRM_DEBUG_KMS("GCP: 0x%x\n", pipe_config->infoframes.gcp);
if (pipe_config->infoframes.enable &
intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi);
if (pipe_config->infoframes.enable &
intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd);
if (pipe_config->infoframes.enable &
intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
DRM_DEBUG_KMS("requested mode:\n");
drm_mode_debug_printmodeline(&pipe_config->base.mode);
DRM_DEBUG_KMS("adjusted mode:\n");
@ -11676,7 +11740,7 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
saved_state->shared_dpll = crtc_state->shared_dpll;
saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
saved_state->pch_pfit.force_thru = crtc_state->pch_pfit.force_thru;
saved_state->ips_force_disable = crtc_state->ips_force_disable;
saved_state->crc_enabled = crtc_state->crc_enabled;
if (IS_G4X(dev_priv) ||
IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
saved_state->wm = crtc_state->wm;
@ -11895,6 +11959,37 @@ intel_compare_link_m_n(const struct intel_link_m_n *m_n,
return false;
}
static bool
intel_compare_infoframe(const union hdmi_infoframe *a,
const union hdmi_infoframe *b)
{
return memcmp(a, b, sizeof(*a)) == 0;
}
static void
pipe_config_infoframe_err(struct drm_i915_private *dev_priv,
bool adjust, const char *name,
const union hdmi_infoframe *a,
const union hdmi_infoframe *b)
{
if (adjust) {
if ((drm_debug & DRM_UT_KMS) == 0)
return;
drm_dbg(DRM_UT_KMS, "mismatch in %s infoframe", name);
drm_dbg(DRM_UT_KMS, "expected:");
hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
drm_dbg(DRM_UT_KMS, "found");
hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
} else {
drm_err("mismatch in %s infoframe", name);
drm_err("expected:");
hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
drm_err("found");
hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
}
}
static void __printf(3, 4)
pipe_config_err(bool adjust, const char *name, const char *format, ...)
{
@ -12078,7 +12173,17 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
} \
} while (0)
#define PIPE_CONF_QUIRK(quirk) \
#define PIPE_CONF_CHECK_INFOFRAME(name) do { \
if (!intel_compare_infoframe(&current_config->infoframes.name, \
&pipe_config->infoframes.name)) { \
pipe_config_infoframe_err(dev_priv, adjust, __stringify(name), \
&current_config->infoframes.name, \
&pipe_config->infoframes.name); \
ret = false; \
} \
} while (0)
#define PIPE_CONF_QUIRK(quirk) \
((current_config->quirks | pipe_config->quirks) & (quirk))
PIPE_CONF_CHECK_I(cpu_transcoder);
@ -12159,6 +12264,14 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
PIPE_CONF_CHECK_I(scaler_state.scaler_id);
PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
PIPE_CONF_CHECK_X(gamma_mode);
if (IS_CHERRYVIEW(dev_priv))
PIPE_CONF_CHECK_X(cgm_mode);
else
PIPE_CONF_CHECK_X(csc_mode);
PIPE_CONF_CHECK_BOOL(gamma_enable);
PIPE_CONF_CHECK_BOOL(csc_enable);
}
PIPE_CONF_CHECK_BOOL(double_wide);
@ -12207,6 +12320,12 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
PIPE_CONF_CHECK_I(min_voltage_level);
PIPE_CONF_CHECK_X(infoframes.enable);
PIPE_CONF_CHECK_X(infoframes.gcp);
PIPE_CONF_CHECK_INFOFRAME(avi);
PIPE_CONF_CHECK_INFOFRAME(spd);
PIPE_CONF_CHECK_INFOFRAME(hdmi);
#undef PIPE_CONF_CHECK_X
#undef PIPE_CONF_CHECK_I
#undef PIPE_CONF_CHECK_BOOL
@ -12241,12 +12360,15 @@ static void verify_wm_state(struct drm_crtc *crtc,
struct drm_crtc_state *new_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct skl_ddb_allocation hw_ddb, *sw_ddb;
struct skl_pipe_wm hw_wm, *sw_wm;
struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
struct skl_hw_state {
struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
struct skl_ddb_allocation ddb;
struct skl_pipe_wm wm;
} *hw;
struct skl_ddb_allocation *sw_ddb;
struct skl_pipe_wm *sw_wm;
struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
struct skl_ddb_entry hw_ddb_y[I915_MAX_PLANES];
struct skl_ddb_entry hw_ddb_uv[I915_MAX_PLANES];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
const enum pipe pipe = intel_crtc->pipe;
int plane, level, max_level = ilk_wm_max_level(dev_priv);
@ -12254,22 +12376,29 @@ static void verify_wm_state(struct drm_crtc *crtc,
if (INTEL_GEN(dev_priv) < 9 || !new_state->active)
return;
skl_pipe_wm_get_hw_state(intel_crtc, &hw_wm);
hw = kzalloc(sizeof(*hw), GFP_KERNEL);
if (!hw)
return;
skl_pipe_wm_get_hw_state(intel_crtc, &hw->wm);
sw_wm = &to_intel_crtc_state(new_state)->wm.skl.optimal;
skl_pipe_ddb_get_hw_state(intel_crtc, hw_ddb_y, hw_ddb_uv);
skl_pipe_ddb_get_hw_state(intel_crtc, hw->ddb_y, hw->ddb_uv);
skl_ddb_get_hw_state(dev_priv, &hw_ddb);
skl_ddb_get_hw_state(dev_priv, &hw->ddb);
sw_ddb = &dev_priv->wm.skl_hw.ddb;
if (INTEL_GEN(dev_priv) >= 11)
if (hw_ddb.enabled_slices != sw_ddb->enabled_slices)
DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n",
sw_ddb->enabled_slices,
hw_ddb.enabled_slices);
if (INTEL_GEN(dev_priv) >= 11 &&
hw->ddb.enabled_slices != sw_ddb->enabled_slices)
DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n",
sw_ddb->enabled_slices,
hw->ddb.enabled_slices);
/* planes */
for_each_universal_plane(dev_priv, pipe, plane) {
hw_plane_wm = &hw_wm.planes[plane];
struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
hw_plane_wm = &hw->wm.planes[plane];
sw_plane_wm = &sw_wm->planes[plane];
/* Watermarks */
@ -12301,7 +12430,7 @@ static void verify_wm_state(struct drm_crtc *crtc,
}
/* DDB */
hw_ddb_entry = &hw_ddb_y[plane];
hw_ddb_entry = &hw->ddb_y[plane];
sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[plane];
if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
@ -12319,7 +12448,9 @@ static void verify_wm_state(struct drm_crtc *crtc,
* once the plane becomes visible, we can skip this check
*/
if (1) {
hw_plane_wm = &hw_wm.planes[PLANE_CURSOR];
struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
hw_plane_wm = &hw->wm.planes[PLANE_CURSOR];
sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
/* Watermarks */
@ -12351,7 +12482,7 @@ static void verify_wm_state(struct drm_crtc *crtc,
}
/* DDB */
hw_ddb_entry = &hw_ddb_y[PLANE_CURSOR];
hw_ddb_entry = &hw->ddb_y[PLANE_CURSOR];
sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[PLANE_CURSOR];
if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
@ -12361,6 +12492,8 @@ static void verify_wm_state(struct drm_crtc *crtc,
hw_ddb_entry->start, hw_ddb_entry->end);
}
}
kfree(hw);
}
static void
@ -12517,7 +12650,8 @@ intel_verify_planes(struct intel_atomic_state *state)
for_each_new_intel_plane_in_state(state, plane,
plane_state, i)
assert_plane(plane, plane_state->base.visible);
assert_plane(plane, plane_state->slave ||
plane_state->base.visible);
}
static void
@ -13576,7 +13710,7 @@ static int do_rps_boost(struct wait_queue_entry *_wait,
* vblank without our intervention, so leave RPS alone.
*/
if (!i915_request_started(rq))
gen6_rps_boost(rq, NULL);
gen6_rps_boost(rq);
i915_request_put(rq);
drm_crtc_vblank_put(wait->crtc);
@ -14109,14 +14243,11 @@ intel_legacy_cursor_update(struct drm_plane *plane,
*/
crtc_state->active_planes = new_crtc_state->active_planes;
if (plane->state->visible) {
trace_intel_update_plane(plane, to_intel_crtc(crtc));
intel_plane->update_plane(intel_plane, crtc_state,
to_intel_plane_state(plane->state));
} else {
trace_intel_disable_plane(plane, to_intel_crtc(crtc));
intel_plane->disable_plane(intel_plane, crtc_state);
}
if (plane->state->visible)
intel_update_plane(intel_plane, crtc_state,
to_intel_plane_state(plane->state));
else
intel_disable_plane(intel_plane, crtc_state);
intel_plane_unpin_fb(to_intel_plane_state(old_plane_state));
@ -14566,7 +14697,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
if (!HAS_DISPLAY(dev_priv))
return;
if (IS_ICELAKE(dev_priv)) {
if (INTEL_GEN(dev_priv) >= 11) {
intel_ddi_init(dev_priv, PORT_A);
intel_ddi_init(dev_priv, PORT_B);
intel_ddi_init(dev_priv, PORT_C);
@ -15467,6 +15598,8 @@ int intel_modeset_init(struct drm_device *dev)
intel_update_czclk(dev_priv);
intel_modeset_init_hw(dev);
intel_hdcp_component_init(dev_priv);
if (dev_priv->max_cdclk_freq == 0)
intel_update_max_cdclk(dev_priv);
@ -15542,7 +15675,7 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
pipe_name(pipe), clock.vco, clock.dot);
fp = i9xx_dpll_compute_fp(&clock);
dpll = (I915_READ(DPLL(pipe)) & DPLL_DVO_2X_MODE) |
dpll = DPLL_DVO_2X_MODE |
DPLL_VGA_MODE_DIS |
((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
PLL_P2_DIVIDE_BY_4 |
@ -16328,6 +16461,8 @@ void intel_modeset_cleanup(struct drm_device *dev)
/* flush any delayed tasks or pending work */
flush_scheduled_work();
intel_hdcp_component_fini(dev_priv);
drm_mode_config_cleanup(dev);
intel_overlay_cleanup(dev_priv);
@ -16374,8 +16509,6 @@ struct intel_display_error_state {
u32 power_well_driver;
int num_transcoders;
struct intel_cursor_error_state {
u32 control;
u32 position;
@ -16400,6 +16533,7 @@ struct intel_display_error_state {
} plane[I915_MAX_PIPES];
struct intel_transcoder_error_state {
bool available;
bool power_domain_on;
enum transcoder cpu_transcoder;
@ -16426,6 +16560,8 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv)
};
int i;
BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder));
if (!HAS_DISPLAY(dev_priv))
return NULL;
@ -16466,14 +16602,13 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv)
error->pipe[i].stat = I915_READ(PIPESTAT(i));
}
/* Note: this does not include DSI transcoders. */
error->num_transcoders = INTEL_INFO(dev_priv)->num_pipes;
if (HAS_DDI(dev_priv))
error->num_transcoders++; /* Account for eDP. */
for (i = 0; i < error->num_transcoders; i++) {
for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
enum transcoder cpu_transcoder = transcoders[i];
if (!INTEL_INFO(dev_priv)->trans_offsets[cpu_transcoder])
continue;
error->transcoder[i].available = true;
error->transcoder[i].power_domain_on =
__intel_display_power_is_enabled(dev_priv,
POWER_DOMAIN_TRANSCODER(cpu_transcoder));
@ -16537,7 +16672,10 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
err_printf(m, " BASE: %08x\n", error->cursor[i].base);
}
for (i = 0; i < error->num_transcoders; i++) {
for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
if (!error->transcoder[i].available)
continue;
err_printf(m, "CPU transcoder: %s\n",
transcoder_name(error->transcoder[i].cpu_transcoder));
err_printf(m, " Power: %s\n",

Просмотреть файл

@ -949,8 +949,11 @@ static void intel_pps_get_registers(struct intel_dp *intel_dp,
regs->pp_stat = PP_STATUS(pps_idx);
regs->pp_on = PP_ON_DELAYS(pps_idx);
regs->pp_off = PP_OFF_DELAYS(pps_idx);
if (!IS_GEN9_LP(dev_priv) && !HAS_PCH_CNP(dev_priv) &&
!HAS_PCH_ICP(dev_priv))
/* Cycle delay moved from PP_DIVISOR to PP_CONTROL */
if (IS_GEN9_LP(dev_priv) || INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
regs->pp_div = INVALID_MMIO_REG;
else
regs->pp_div = PP_DIVISOR(pps_idx);
}
@ -4780,7 +4783,7 @@ static void intel_dp_check_service_irq(struct intel_dp *intel_dp)
intel_dp_handle_test_request(intel_dp);
if (val & DP_CP_IRQ)
intel_hdcp_check_link(intel_dp->attached_connector);
intel_hdcp_handle_cp_irq(intel_dp->attached_connector);
if (val & DP_SINK_SPECIFIC_IRQ)
DRM_DEBUG_DRIVER("Sink specific irq unhandled\n");
@ -5623,6 +5626,18 @@ void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
edp_panel_vdd_off_sync(intel_dp);
}
static void intel_dp_hdcp_wait_for_cp_irq(struct intel_hdcp *hdcp, int timeout)
{
long ret;
#define C (hdcp->cp_irq_count_cached != atomic_read(&hdcp->cp_irq_count))
ret = wait_event_interruptible_timeout(hdcp->cp_irq_queue, C,
msecs_to_jiffies(timeout));
if (!ret)
DRM_DEBUG_KMS("Timedout at waiting for CP_IRQ\n");
}
static
int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
u8 *an)
@ -5847,6 +5862,336 @@ int intel_dp_hdcp_capable(struct intel_digital_port *intel_dig_port,
return 0;
}
struct hdcp2_dp_errata_stream_type {
u8 msg_id;
u8 stream_type;
} __packed;
static struct hdcp2_dp_msg_data {
u8 msg_id;
u32 offset;
bool msg_detectable;
u32 timeout;
u32 timeout2; /* Added for non_paired situation */
} hdcp2_msg_data[] = {
{HDCP_2_2_AKE_INIT, DP_HDCP_2_2_AKE_INIT_OFFSET, false, 0, 0},
{HDCP_2_2_AKE_SEND_CERT, DP_HDCP_2_2_AKE_SEND_CERT_OFFSET,
false, HDCP_2_2_CERT_TIMEOUT_MS, 0},
{HDCP_2_2_AKE_NO_STORED_KM, DP_HDCP_2_2_AKE_NO_STORED_KM_OFFSET,
false, 0, 0},
{HDCP_2_2_AKE_STORED_KM, DP_HDCP_2_2_AKE_STORED_KM_OFFSET,
false, 0, 0},
{HDCP_2_2_AKE_SEND_HPRIME, DP_HDCP_2_2_AKE_SEND_HPRIME_OFFSET,
true, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS,
HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS},
{HDCP_2_2_AKE_SEND_PAIRING_INFO,
DP_HDCP_2_2_AKE_SEND_PAIRING_INFO_OFFSET, true,
HDCP_2_2_PAIRING_TIMEOUT_MS, 0},
{HDCP_2_2_LC_INIT, DP_HDCP_2_2_LC_INIT_OFFSET, false, 0, 0},
{HDCP_2_2_LC_SEND_LPRIME, DP_HDCP_2_2_LC_SEND_LPRIME_OFFSET,
false, HDCP_2_2_DP_LPRIME_TIMEOUT_MS, 0},
{HDCP_2_2_SKE_SEND_EKS, DP_HDCP_2_2_SKE_SEND_EKS_OFFSET, false,
0, 0},
{HDCP_2_2_REP_SEND_RECVID_LIST,
DP_HDCP_2_2_REP_SEND_RECVID_LIST_OFFSET, true,
HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0},
{HDCP_2_2_REP_SEND_ACK, DP_HDCP_2_2_REP_SEND_ACK_OFFSET, false,
0, 0},
{HDCP_2_2_REP_STREAM_MANAGE,
DP_HDCP_2_2_REP_STREAM_MANAGE_OFFSET, false,
0, 0},
{HDCP_2_2_REP_STREAM_READY, DP_HDCP_2_2_REP_STREAM_READY_OFFSET,
false, HDCP_2_2_STREAM_READY_TIMEOUT_MS, 0},
/* local define to shovel this through the write_2_2 interface */
#define HDCP_2_2_ERRATA_DP_STREAM_TYPE 50
{HDCP_2_2_ERRATA_DP_STREAM_TYPE,
DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET, false,
0, 0},
};
static inline
int intel_dp_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port,
u8 *rx_status)
{
ssize_t ret;
ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
DP_HDCP_2_2_REG_RXSTATUS_OFFSET, rx_status,
HDCP_2_2_DP_RXSTATUS_LEN);
if (ret != HDCP_2_2_DP_RXSTATUS_LEN) {
DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
return ret >= 0 ? -EIO : ret;
}
return 0;
}
static
int hdcp2_detect_msg_availability(struct intel_digital_port *intel_dig_port,
u8 msg_id, bool *msg_ready)
{
u8 rx_status;
int ret;
*msg_ready = false;
ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status);
if (ret < 0)
return ret;
switch (msg_id) {
case HDCP_2_2_AKE_SEND_HPRIME:
if (HDCP_2_2_DP_RXSTATUS_H_PRIME(rx_status))
*msg_ready = true;
break;
case HDCP_2_2_AKE_SEND_PAIRING_INFO:
if (HDCP_2_2_DP_RXSTATUS_PAIRING(rx_status))
*msg_ready = true;
break;
case HDCP_2_2_REP_SEND_RECVID_LIST:
if (HDCP_2_2_DP_RXSTATUS_READY(rx_status))
*msg_ready = true;
break;
default:
DRM_ERROR("Unidentified msg_id: %d\n", msg_id);
return -EINVAL;
}
return 0;
}
static ssize_t
intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
struct hdcp2_dp_msg_data *hdcp2_msg_data)
{
struct intel_dp *dp = &intel_dig_port->dp;
struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
u8 msg_id = hdcp2_msg_data->msg_id;
int ret, timeout;
bool msg_ready = false;
if (msg_id == HDCP_2_2_AKE_SEND_HPRIME && !hdcp->is_paired)
timeout = hdcp2_msg_data->timeout2;
else
timeout = hdcp2_msg_data->timeout;
/*
* There is no way to detect the CERT, LPRIME and STREAM_READY
* availability. So Wait for timeout and read the msg.
*/
if (!hdcp2_msg_data->msg_detectable) {
mdelay(timeout);
ret = 0;
} else {
/*
* As we want to check the msg availability at timeout, Ignoring
* the timeout at wait for CP_IRQ.
*/
intel_dp_hdcp_wait_for_cp_irq(hdcp, timeout);
ret = hdcp2_detect_msg_availability(intel_dig_port,
msg_id, &msg_ready);
if (!msg_ready)
ret = -ETIMEDOUT;
}
if (ret)
DRM_DEBUG_KMS("msg_id %d, ret %d, timeout(mSec): %d\n",
hdcp2_msg_data->msg_id, ret, timeout);
return ret;
}
static struct hdcp2_dp_msg_data *get_hdcp2_dp_msg_data(u8 msg_id)
{
int i;
for (i = 0; i < ARRAY_SIZE(hdcp2_msg_data); i++)
if (hdcp2_msg_data[i].msg_id == msg_id)
return &hdcp2_msg_data[i];
return NULL;
}
static
int intel_dp_hdcp2_write_msg(struct intel_digital_port *intel_dig_port,
void *buf, size_t size)
{
struct intel_dp *dp = &intel_dig_port->dp;
struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
unsigned int offset;
u8 *byte = buf;
ssize_t ret, bytes_to_write, len;
struct hdcp2_dp_msg_data *hdcp2_msg_data;
hdcp2_msg_data = get_hdcp2_dp_msg_data(*byte);
if (!hdcp2_msg_data)
return -EINVAL;
offset = hdcp2_msg_data->offset;
/* No msg_id in DP HDCP2.2 msgs */
bytes_to_write = size - 1;
byte++;
hdcp->cp_irq_count_cached = atomic_read(&hdcp->cp_irq_count);
while (bytes_to_write) {
len = bytes_to_write > DP_AUX_MAX_PAYLOAD_BYTES ?
DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_write;
ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux,
offset, (void *)byte, len);
if (ret < 0)
return ret;
bytes_to_write -= ret;
byte += ret;
offset += ret;
}
return size;
}
static
ssize_t get_receiver_id_list_size(struct intel_digital_port *intel_dig_port)
{
u8 rx_info[HDCP_2_2_RXINFO_LEN];
u32 dev_cnt;
ssize_t ret;
ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
DP_HDCP_2_2_REG_RXINFO_OFFSET,
(void *)rx_info, HDCP_2_2_RXINFO_LEN);
if (ret != HDCP_2_2_RXINFO_LEN)
return ret >= 0 ? -EIO : ret;
dev_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
if (dev_cnt > HDCP_2_2_MAX_DEVICE_COUNT)
dev_cnt = HDCP_2_2_MAX_DEVICE_COUNT;
ret = sizeof(struct hdcp2_rep_send_receiverid_list) -
HDCP_2_2_RECEIVER_IDS_MAX_LEN +
(dev_cnt * HDCP_2_2_RECEIVER_ID_LEN);
return ret;
}
static
int intel_dp_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
u8 msg_id, void *buf, size_t size)
{
unsigned int offset;
u8 *byte = buf;
ssize_t ret, bytes_to_recv, len;
struct hdcp2_dp_msg_data *hdcp2_msg_data;
hdcp2_msg_data = get_hdcp2_dp_msg_data(msg_id);
if (!hdcp2_msg_data)
return -EINVAL;
offset = hdcp2_msg_data->offset;
ret = intel_dp_hdcp2_wait_for_msg(intel_dig_port, hdcp2_msg_data);
if (ret < 0)
return ret;
if (msg_id == HDCP_2_2_REP_SEND_RECVID_LIST) {
ret = get_receiver_id_list_size(intel_dig_port);
if (ret < 0)
return ret;
size = ret;
}
bytes_to_recv = size - 1;
/* DP adaptation msgs has no msg_id */
byte++;
while (bytes_to_recv) {
len = bytes_to_recv > DP_AUX_MAX_PAYLOAD_BYTES ?
DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_recv;
ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, offset,
(void *)byte, len);
if (ret < 0) {
DRM_DEBUG_KMS("msg_id %d, ret %zd\n", msg_id, ret);
return ret;
}
bytes_to_recv -= ret;
byte += ret;
offset += ret;
}
byte = buf;
*byte = msg_id;
return size;
}
static
int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *intel_dig_port,
bool is_repeater, u8 content_type)
{
struct hdcp2_dp_errata_stream_type stream_type_msg;
if (is_repeater)
return 0;
/*
* Errata for DP: As Stream type is used for encryption, Receiver
* should be communicated with stream type for the decryption of the
* content.
* Repeater will be communicated with stream type as a part of it's
* auth later in time.
*/
stream_type_msg.msg_id = HDCP_2_2_ERRATA_DP_STREAM_TYPE;
stream_type_msg.stream_type = content_type;
return intel_dp_hdcp2_write_msg(intel_dig_port, &stream_type_msg,
sizeof(stream_type_msg));
}
static
int intel_dp_hdcp2_check_link(struct intel_digital_port *intel_dig_port)
{
u8 rx_status;
int ret;
ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status);
if (ret)
return ret;
if (HDCP_2_2_DP_RXSTATUS_REAUTH_REQ(rx_status))
ret = HDCP_REAUTH_REQUEST;
else if (HDCP_2_2_DP_RXSTATUS_LINK_FAILED(rx_status))
ret = HDCP_LINK_INTEGRITY_FAILURE;
else if (HDCP_2_2_DP_RXSTATUS_READY(rx_status))
ret = HDCP_TOPOLOGY_CHANGE;
return ret;
}
static
int intel_dp_hdcp2_capable(struct intel_digital_port *intel_dig_port,
bool *capable)
{
u8 rx_caps[3];
int ret;
*capable = false;
ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
DP_HDCP_2_2_REG_RX_CAPS_OFFSET,
rx_caps, HDCP_2_2_RXCAPS_LEN);
if (ret != HDCP_2_2_RXCAPS_LEN)
return ret >= 0 ? -EIO : ret;
if (rx_caps[0] == HDCP_2_2_RX_CAPS_VERSION_VAL &&
HDCP_2_2_DP_HDCP_CAPABLE(rx_caps[2]))
*capable = true;
return 0;
}
static const struct intel_hdcp_shim intel_dp_hdcp_shim = {
.write_an_aksv = intel_dp_hdcp_write_an_aksv,
.read_bksv = intel_dp_hdcp_read_bksv,
@ -5859,6 +6204,12 @@ static const struct intel_hdcp_shim intel_dp_hdcp_shim = {
.toggle_signalling = intel_dp_hdcp_toggle_signalling,
.check_link = intel_dp_hdcp_check_link,
.hdcp_capable = intel_dp_hdcp_capable,
.write_2_2_msg = intel_dp_hdcp2_write_msg,
.read_2_2_msg = intel_dp_hdcp2_read_msg,
.config_stream_type = intel_dp_hdcp2_config_stream_type,
.check_2_2_link = intel_dp_hdcp2_check_link,
.hdcp_2_2_capable = intel_dp_hdcp2_capable,
.protocol = HDCP_PROTOCOL_DP,
};
static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
@ -6072,43 +6423,34 @@ static void
intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
u32 pp_on, pp_off, pp_ctl;
struct pps_registers regs;
intel_pps_get_registers(intel_dp, &regs);
/* Workaround: Need to write PP_CONTROL with the unlock key as
* the very first thing. */
pp_ctl = ironlake_get_pp_control(intel_dp);
/* Ensure PPS is unlocked */
if (!HAS_DDI(dev_priv))
I915_WRITE(regs.pp_ctrl, pp_ctl);
pp_on = I915_READ(regs.pp_on);
pp_off = I915_READ(regs.pp_off);
if (!IS_GEN9_LP(dev_priv) && !HAS_PCH_CNP(dev_priv) &&
!HAS_PCH_ICP(dev_priv)) {
I915_WRITE(regs.pp_ctrl, pp_ctl);
pp_div = I915_READ(regs.pp_div);
}
/* Pull timing values out of registers */
seq->t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
PANEL_POWER_UP_DELAY_SHIFT;
seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on);
seq->t8 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on);
seq->t9 = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off);
seq->t10 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off);
seq->t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
PANEL_LIGHT_ON_DELAY_SHIFT;
if (i915_mmio_reg_valid(regs.pp_div)) {
u32 pp_div;
seq->t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
PANEL_LIGHT_OFF_DELAY_SHIFT;
pp_div = I915_READ(regs.pp_div);
seq->t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
PANEL_POWER_DOWN_DELAY_SHIFT;
if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv) ||
HAS_PCH_ICP(dev_priv)) {
seq->t11_t12 = ((pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
BXT_POWER_CYCLE_DELAY_SHIFT) * 1000;
seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000;
} else {
seq->t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
seq->t11_t12 = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl) * 1000;
}
}
@ -6233,7 +6575,7 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
bool force_disable_vdd)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u32 pp_on, pp_off, pp_div, port_sel = 0;
u32 pp_on, pp_off, port_sel = 0;
int div = dev_priv->rawclk_freq / 1000;
struct pps_registers regs;
enum port port = dp_to_dig_port(intel_dp)->base.port;
@ -6268,23 +6610,10 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
I915_WRITE(regs.pp_ctrl, pp);
}
pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
(seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
(seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
/* Compute the divisor for the pp clock, simply match the Bspec
* formula. */
if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv) ||
HAS_PCH_ICP(dev_priv)) {
pp_div = I915_READ(regs.pp_ctrl);
pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
<< BXT_POWER_CYCLE_DELAY_SHIFT);
} else {
pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
<< PANEL_POWER_CYCLE_DELAY_SHIFT);
}
pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) |
REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->t8);
pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->t9) |
REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->t10);
/* Haswell doesn't have any port selection bits for the panel
* power sequencer any more. */
@ -6311,19 +6640,29 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
I915_WRITE(regs.pp_on, pp_on);
I915_WRITE(regs.pp_off, pp_off);
if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv) ||
HAS_PCH_ICP(dev_priv))
I915_WRITE(regs.pp_ctrl, pp_div);
else
I915_WRITE(regs.pp_div, pp_div);
/*
* Compute the divisor for the pp clock, simply match the Bspec formula.
*/
if (i915_mmio_reg_valid(regs.pp_div)) {
I915_WRITE(regs.pp_div,
REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) |
REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)));
} else {
u32 pp_ctl;
pp_ctl = I915_READ(regs.pp_ctrl);
pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK;
pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000));
I915_WRITE(regs.pp_ctrl, pp_ctl);
}
DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
I915_READ(regs.pp_on),
I915_READ(regs.pp_off),
(IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv) ||
HAS_PCH_ICP(dev_priv)) ?
(I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK) :
I915_READ(regs.pp_div));
i915_mmio_reg_valid(regs.pp_div) ?
I915_READ(regs.pp_div) :
(I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK));
}
static void intel_dp_pps_init(struct intel_dp *intel_dp)
@ -6734,7 +7073,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
* eDP and LVDS bail out early in this case to prevent interfering
* with an already powered-on LVDS power sequencer.
*/
if (intel_get_lvds_encoder(&dev_priv->drm)) {
if (intel_get_lvds_encoder(dev_priv)) {
WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
DRM_INFO("LVDS was detected, not registering eDP\n");

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -327,8 +327,7 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
bool state);
#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true)
#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
struct intel_crtc_state *state,
struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc_state *state,
struct intel_encoder *encoder);
void intel_release_shared_dpll(struct intel_shared_dpll *dpll,
struct intel_crtc *crtc,
@ -341,8 +340,6 @@ void intel_shared_dpll_init(struct drm_device *dev);
void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
struct intel_dpll_hw_state *hw_state);
int icl_calc_dp_combo_pll_link(struct drm_i915_private *dev_priv,
u32 pll_id);
int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv);
enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port);
bool intel_dpll_is_combophy(enum intel_dpll_id id);

Просмотреть файл

@ -41,6 +41,7 @@
#include <drm/drm_rect.h>
#include <drm/drm_vblank.h>
#include <drm/drm_atomic.h>
#include <drm/i915_mei_hdcp_interface.h>
#include <media/cec-notifier.h>
struct drm_printer;
@ -323,6 +324,13 @@ struct intel_panel {
struct intel_digital_port;
enum check_link_response {
HDCP_LINK_PROTECTED = 0,
HDCP_TOPOLOGY_CHANGE,
HDCP_LINK_INTEGRITY_FAILURE,
HDCP_REAUTH_REQUEST
};
/*
* This structure serves as a translation layer between the generic HDCP code
* and the bus-specific code. What that means is that HDCP over HDMI differs
@ -395,6 +403,32 @@ struct intel_hdcp_shim {
/* Detects panel's hdcp capability. This is optional for HDMI. */
int (*hdcp_capable)(struct intel_digital_port *intel_dig_port,
bool *hdcp_capable);
/* HDCP adaptation(DP/HDMI) required on the port */
enum hdcp_wired_protocol protocol;
/* Detects whether sink is HDCP2.2 capable */
int (*hdcp_2_2_capable)(struct intel_digital_port *intel_dig_port,
bool *capable);
/* Write HDCP2.2 messages */
int (*write_2_2_msg)(struct intel_digital_port *intel_dig_port,
void *buf, size_t size);
/* Read HDCP2.2 messages */
int (*read_2_2_msg)(struct intel_digital_port *intel_dig_port,
u8 msg_id, void *buf, size_t size);
/*
* Implementation of DP HDCP2.2 Errata for the communication of stream
* type to Receivers. In DP HDCP2.2 Stream type is one of the input to
* the HDCP2.2 Cipher for En/De-Cryption. Not applicable for HDMI.
*/
int (*config_stream_type)(struct intel_digital_port *intel_dig_port,
bool is_repeater, u8 type);
/* HDCP2.2 Link Integrity Check */
int (*check_2_2_link)(struct intel_digital_port *intel_dig_port);
};
struct intel_hdcp {
@ -404,6 +438,50 @@ struct intel_hdcp {
u64 value;
struct delayed_work check_work;
struct work_struct prop_work;
/* HDCP1.4 Encryption status */
bool hdcp_encrypted;
/* HDCP2.2 related definitions */
/* Flag indicates whether this connector supports HDCP2.2 or not. */
bool hdcp2_supported;
/* HDCP2.2 Encryption status */
bool hdcp2_encrypted;
/*
* Content Stream Type defined by content owner. TYPE0(0x0) content can
* flow in the link protected by HDCP2.2 or HDCP1.4, where as TYPE1(0x1)
* content can flow only through a link protected by HDCP2.2.
*/
u8 content_type;
struct hdcp_port_data port_data;
bool is_paired;
bool is_repeater;
/*
* Count of ReceiverID_List received. Initialized to 0 at AKE_INIT.
* Incremented after processing the RepeaterAuth_Send_ReceiverID_List.
* When it rolls over re-auth has to be triggered.
*/
u32 seq_num_v;
/*
* Count of RepeaterAuth_Stream_Manage msg propagated.
* Initialized to 0 on AKE_INIT. Incremented after every successful
* transmission of RepeaterAuth_Stream_Manage message. When it rolls
* over re-Auth has to be triggered.
*/
u32 seq_num_m;
/*
* Work queue to signal the CP_IRQ. Used for the waiters to read the
* available information from HDCP DP sink.
*/
wait_queue_head_t cp_irq_queue;
atomic_t cp_irq_count;
int cp_irq_count_cached;
};
struct intel_connector {
@ -921,7 +999,8 @@ struct intel_crtc_state {
struct intel_link_m_n fdi_m_n;
bool ips_enabled;
bool ips_force_disable;
bool crc_enabled;
bool enable_fbc;
@ -942,13 +1021,30 @@ struct intel_crtc_state {
/* Gamma mode programmed on the pipe */
u32 gamma_mode;
union {
/* CSC mode programmed on the pipe */
u32 csc_mode;
/* CHV CGM mode */
u32 cgm_mode;
};
/* bitmask of visible planes (enum plane_id) */
u8 active_planes;
u8 nv12_planes;
u8 c8_planes;
/* bitmask of planes that will be updated during the commit */
u8 update_planes;
struct {
u32 enable;
u32 gcp;
union hdmi_infoframe avi;
union hdmi_infoframe spd;
union hdmi_infoframe hdmi;
} infoframes;
/* HDMI scrambling status */
bool hdmi_scrambling;
@ -961,6 +1057,12 @@ struct intel_crtc_state {
/* Output down scaling is done in LSPCON device */
bool lspcon_downsampling;
/* enable pipe gamma? */
bool gamma_enable;
/* enable pipe csc? */
bool csc_enable;
/* Display Stream compression state */
struct {
bool compression_enable;
@ -989,9 +1091,6 @@ struct intel_crtc {
struct intel_crtc_state *config;
/* global reset count when the last flip was submitted */
unsigned int reset_count;
/* Access to these should be protected by dev_priv->irq_lock. */
bool cpu_fifo_underrun_disabled;
bool pch_fifo_underrun_disabled;
@ -1260,11 +1359,15 @@ struct intel_digital_port {
const struct intel_crtc_state *crtc_state,
unsigned int type,
const void *frame, ssize_t len);
void (*read_infoframe)(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
unsigned int type,
void *frame, ssize_t len);
void (*set_infoframes)(struct intel_encoder *encoder,
bool enable,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
bool (*infoframe_enabled)(struct intel_encoder *encoder,
u32 (*infoframes_enabled)(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config);
};
@ -1738,7 +1841,7 @@ void intel_dp_get_m_n(struct intel_crtc *crtc,
void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state,
enum link_m_n_set m_n);
int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
struct dpll *best_clock);
int chv_calc_dpll_params(int refclk, struct dpll *pll_clock);
@ -2000,13 +2103,22 @@ bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder,
bool scrambling);
void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable);
void intel_infoframe_init(struct intel_digital_port *intel_dig_port);
u32 intel_hdmi_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
u32 intel_hdmi_infoframe_enable(unsigned int type);
void intel_hdmi_read_gcp_infoframe(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state);
void intel_read_infoframe(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
enum hdmi_infoframe_type type,
union hdmi_infoframe *frame);
/* intel_lvds.c */
bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv,
i915_reg_t lvds_reg, enum pipe *pipe);
void intel_lvds_init(struct drm_i915_private *dev_priv);
struct intel_encoder *intel_get_lvds_encoder(struct drm_device *dev);
bool intel_is_dual_link_lvds(struct drm_device *dev);
struct intel_encoder *intel_get_lvds_encoder(struct drm_i915_private *dev_priv);
bool intel_is_dual_link_lvds(struct drm_i915_private *dev_priv);
/* intel_overlay.c */
void intel_overlay_setup(struct drm_i915_private *dev_priv);
@ -2068,9 +2180,12 @@ int intel_hdcp_init(struct intel_connector *connector,
const struct intel_hdcp_shim *hdcp_shim);
int intel_hdcp_enable(struct intel_connector *connector);
int intel_hdcp_disable(struct intel_connector *connector);
int intel_hdcp_check_link(struct intel_connector *connector);
bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port);
bool intel_hdcp_capable(struct intel_connector *connector);
void intel_hdcp_component_init(struct drm_i915_private *dev_priv);
void intel_hdcp_component_fini(struct drm_i915_private *dev_priv);
void intel_hdcp_cleanup(struct intel_connector *connector);
void intel_hdcp_handle_cp_irq(struct intel_connector *connector);
/* intel_psr.c */
#define CAN_PSR(dev_priv) (HAS_PSR(dev_priv) && dev_priv->psr.sink_support)
@ -2079,9 +2194,9 @@ void intel_psr_enable(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state);
void intel_psr_disable(struct intel_dp *intel_dp,
const struct intel_crtc_state *old_crtc_state);
int intel_psr_set_debugfs_mode(struct drm_i915_private *dev_priv,
struct drm_modeset_acquire_ctx *ctx,
u64 value);
void intel_psr_update(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state);
int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 value);
void intel_psr_invalidate(struct drm_i915_private *dev_priv,
unsigned frontbuffer_bits,
enum fb_op_origin origin);
@ -2261,7 +2376,7 @@ void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv);
void gen6_rps_busy(struct drm_i915_private *dev_priv);
void gen6_rps_reset_ei(struct drm_i915_private *dev_priv);
void gen6_rps_idle(struct drm_i915_private *dev_priv);
void gen6_rps_boost(struct i915_request *rq, struct intel_rps_client *rps);
void gen6_rps_boost(struct i915_request *rq);
void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv);
void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv);
void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv);
@ -2375,6 +2490,14 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
struct intel_crtc_state *crtc_state);
/* intel_atomic_plane.c */
void intel_update_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
void intel_update_slave(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
void intel_disable_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state);
struct intel_plane *intel_plane_alloc(void);
void intel_plane_free(struct intel_plane *plane);
struct drm_plane_state *intel_plane_duplicate_state(struct drm_plane *plane);
@ -2404,11 +2527,15 @@ void lspcon_write_infoframe(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
unsigned int type,
const void *buf, ssize_t len);
void lspcon_read_infoframe(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
unsigned int type,
void *frame, ssize_t len);
void lspcon_set_infoframes(struct intel_encoder *encoder,
bool enable,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
bool lspcon_infoframe_enabled(struct intel_encoder *encoder,
u32 lspcon_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config);
void lspcon_ycbcr420_config(struct drm_connector *connector,
struct intel_crtc_state *crtc_state);

Просмотреть файл

@ -194,7 +194,7 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
break;
}
if (!IS_ICELAKE(dev_priv))
if (INTEL_GEN(dev_priv) < 11)
vlv_dsi_wait_for_fifo_empty(intel_dsi, port);
out:
@ -365,7 +365,7 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
/* pull up/down */
value = *data++ & 1;
if (IS_ICELAKE(dev_priv))
if (INTEL_GEN(dev_priv) >= 11)
icl_exec_gpio(dev_priv, gpio_source, gpio_index, value);
else if (IS_VALLEYVIEW(dev_priv))
vlv_exec_gpio(dev_priv, gpio_source, gpio_number, value);
@ -890,7 +890,7 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
intel_dsi->burst_mode_ratio = burst_mode_ratio;
if (IS_ICELAKE(dev_priv))
if (INTEL_GEN(dev_priv) >= 11)
icl_dphy_param_init(intel_dsi);
else
vlv_dphy_param_init(intel_dsi);

Просмотреть файл

@ -84,7 +84,6 @@ static const struct engine_class_info intel_engine_classes[] = {
#define MAX_MMIO_BASES 3
struct engine_info {
unsigned int hw_id;
unsigned int uabi_id;
u8 class;
u8 instance;
/* mmio bases table *must* be sorted in reverse gen order */
@ -95,27 +94,24 @@ struct engine_info {
};
static const struct engine_info intel_engines[] = {
[RCS] = {
.hw_id = RCS_HW,
.uabi_id = I915_EXEC_RENDER,
[RCS0] = {
.hw_id = RCS0_HW,
.class = RENDER_CLASS,
.instance = 0,
.mmio_bases = {
{ .gen = 1, .base = RENDER_RING_BASE }
},
},
[BCS] = {
.hw_id = BCS_HW,
.uabi_id = I915_EXEC_BLT,
[BCS0] = {
.hw_id = BCS0_HW,
.class = COPY_ENGINE_CLASS,
.instance = 0,
.mmio_bases = {
{ .gen = 6, .base = BLT_RING_BASE }
},
},
[VCS] = {
.hw_id = VCS_HW,
.uabi_id = I915_EXEC_BSD,
[VCS0] = {
.hw_id = VCS0_HW,
.class = VIDEO_DECODE_CLASS,
.instance = 0,
.mmio_bases = {
@ -124,9 +120,8 @@ static const struct engine_info intel_engines[] = {
{ .gen = 4, .base = BSD_RING_BASE }
},
},
[VCS2] = {
.hw_id = VCS2_HW,
.uabi_id = I915_EXEC_BSD,
[VCS1] = {
.hw_id = VCS1_HW,
.class = VIDEO_DECODE_CLASS,
.instance = 1,
.mmio_bases = {
@ -134,27 +129,24 @@ static const struct engine_info intel_engines[] = {
{ .gen = 8, .base = GEN8_BSD2_RING_BASE }
},
},
[VCS3] = {
.hw_id = VCS3_HW,
.uabi_id = I915_EXEC_BSD,
[VCS2] = {
.hw_id = VCS2_HW,
.class = VIDEO_DECODE_CLASS,
.instance = 2,
.mmio_bases = {
{ .gen = 11, .base = GEN11_BSD3_RING_BASE }
},
},
[VCS4] = {
.hw_id = VCS4_HW,
.uabi_id = I915_EXEC_BSD,
[VCS3] = {
.hw_id = VCS3_HW,
.class = VIDEO_DECODE_CLASS,
.instance = 3,
.mmio_bases = {
{ .gen = 11, .base = GEN11_BSD4_RING_BASE }
},
},
[VECS] = {
.hw_id = VECS_HW,
.uabi_id = I915_EXEC_VEBOX,
[VECS0] = {
.hw_id = VECS0_HW,
.class = VIDEO_ENHANCEMENT_CLASS,
.instance = 0,
.mmio_bases = {
@ -162,9 +154,8 @@ static const struct engine_info intel_engines[] = {
{ .gen = 7, .base = VEBOX_RING_BASE }
},
},
[VECS2] = {
.hw_id = VECS2_HW,
.uabi_id = I915_EXEC_VEBOX,
[VECS1] = {
.hw_id = VECS1_HW,
.class = VIDEO_ENHANCEMENT_CLASS,
.instance = 1,
.mmio_bases = {
@ -313,7 +304,10 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
if (!engine)
return -ENOMEM;
BUILD_BUG_ON(BITS_PER_TYPE(engine->mask) < I915_NUM_ENGINES);
engine->id = id;
engine->mask = BIT(id);
engine->i915 = dev_priv;
__sprint_engine_name(engine->name, info);
engine->hw_id = engine->guc_id = info->hw_id;
@ -321,7 +315,6 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
engine->class = info->class;
engine->instance = info->instance;
engine->uabi_id = info->uabi_id;
engine->uabi_class = intel_engine_classes[info->class].uabi_class;
engine->context_size = __intel_engine_context_size(dev_priv,
@ -355,15 +348,15 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
int intel_engines_init_mmio(struct drm_i915_private *dev_priv)
{
struct intel_device_info *device_info = mkwrite_device_info(dev_priv);
const unsigned int ring_mask = INTEL_INFO(dev_priv)->ring_mask;
const unsigned int engine_mask = INTEL_INFO(dev_priv)->engine_mask;
struct intel_engine_cs *engine;
enum intel_engine_id id;
unsigned int mask = 0;
unsigned int i;
int err;
WARN_ON(ring_mask == 0);
WARN_ON(ring_mask &
WARN_ON(engine_mask == 0);
WARN_ON(engine_mask &
GENMASK(BITS_PER_TYPE(mask) - 1, I915_NUM_ENGINES));
if (i915_inject_load_failure())
@ -377,7 +370,7 @@ int intel_engines_init_mmio(struct drm_i915_private *dev_priv)
if (err)
goto cleanup;
mask |= ENGINE_MASK(i);
mask |= BIT(i);
}
/*
@ -385,16 +378,16 @@ int intel_engines_init_mmio(struct drm_i915_private *dev_priv)
* are added to the driver by a warning and disabling the forgotten
* engines.
*/
if (WARN_ON(mask != ring_mask))
device_info->ring_mask = mask;
if (WARN_ON(mask != engine_mask))
device_info->engine_mask = mask;
/* We always presume we have at least RCS available for later probing */
if (WARN_ON(!HAS_ENGINE(dev_priv, RCS))) {
if (WARN_ON(!HAS_ENGINE(dev_priv, RCS0))) {
err = -ENODEV;
goto cleanup;
}
RUNTIME_INFO(dev_priv)->num_rings = hweight32(mask);
RUNTIME_INFO(dev_priv)->num_engines = hweight32(mask);
i915_check_and_clear_faults(dev_priv);
@ -455,12 +448,6 @@ cleanup:
return err;
}
void intel_engine_write_global_seqno(struct intel_engine_cs *engine, u32 seqno)
{
intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
GEM_BUG_ON(intel_engine_get_seqno(engine) != seqno);
}
static void intel_engine_init_batch_pool(struct intel_engine_cs *engine)
{
i915_gem_batch_pool_init(&engine->batch_pool, engine);
@ -614,10 +601,44 @@ err_hwsp:
return err;
}
static void __intel_context_unpin(struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
void intel_engines_set_scheduler_caps(struct drm_i915_private *i915)
{
intel_context_unpin(to_intel_context(ctx, engine));
static const struct {
u8 engine;
u8 sched;
} map[] = {
#define MAP(x, y) { ilog2(I915_ENGINE_HAS_##x), ilog2(I915_SCHEDULER_CAP_##y) }
MAP(PREEMPTION, PREEMPTION),
MAP(SEMAPHORES, SEMAPHORES),
#undef MAP
};
struct intel_engine_cs *engine;
enum intel_engine_id id;
u32 enabled, disabled;
enabled = 0;
disabled = 0;
for_each_engine(engine, i915, id) { /* all engines must agree! */
int i;
if (engine->schedule)
enabled |= (I915_SCHEDULER_CAP_ENABLED |
I915_SCHEDULER_CAP_PRIORITY);
else
disabled |= (I915_SCHEDULER_CAP_ENABLED |
I915_SCHEDULER_CAP_PRIORITY);
for (i = 0; i < ARRAY_SIZE(map); i++) {
if (engine->flags & BIT(map[i].engine))
enabled |= BIT(map[i].sched);
else
disabled |= BIT(map[i].sched);
}
}
i915->caps.scheduler = enabled & ~disabled;
if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_ENABLED))
i915->caps.scheduler = 0;
}
struct measure_breadcrumb {
@ -670,6 +691,20 @@ out_frame:
return dw;
}
static int pin_context(struct i915_gem_context *ctx,
struct intel_engine_cs *engine,
struct intel_context **out)
{
struct intel_context *ce;
ce = intel_context_pin(ctx, engine);
if (IS_ERR(ce))
return PTR_ERR(ce);
*out = ce;
return 0;
}
/**
* intel_engines_init_common - initialize cengine state which might require hw access
* @engine: Engine to initialize.
@ -684,11 +719,8 @@ out_frame:
int intel_engine_init_common(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
struct intel_context *ce;
int ret;
engine->set_default_submission(engine);
/* We may need to do things with the shrinker which
* require us to immediately switch back to the default
* context. This can cause a problem as pinning the
@ -696,36 +728,34 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
* be available. To avoid this we always pin the default
* context.
*/
ce = intel_context_pin(i915->kernel_context, engine);
if (IS_ERR(ce))
return PTR_ERR(ce);
ret = pin_context(i915->kernel_context, engine,
&engine->kernel_context);
if (ret)
return ret;
/*
* Similarly the preempt context must always be available so that
* we can interrupt the engine at any time.
* we can interrupt the engine at any time. However, as preemption
* is optional, we allow it to fail.
*/
if (i915->preempt_context) {
ce = intel_context_pin(i915->preempt_context, engine);
if (IS_ERR(ce)) {
ret = PTR_ERR(ce);
goto err_unpin_kernel;
}
}
if (i915->preempt_context)
pin_context(i915->preempt_context, engine,
&engine->preempt_context);
ret = measure_breadcrumb_dw(engine);
if (ret < 0)
goto err_unpin_preempt;
goto err_unpin;
engine->emit_fini_breadcrumb_dw = ret;
engine->set_default_submission(engine);
return 0;
err_unpin_preempt:
if (i915->preempt_context)
__intel_context_unpin(i915->preempt_context, engine);
err_unpin_kernel:
__intel_context_unpin(i915->kernel_context, engine);
err_unpin:
if (engine->preempt_context)
intel_context_unpin(engine->preempt_context);
intel_context_unpin(engine->kernel_context);
return ret;
}
@ -738,8 +768,6 @@ err_unpin_kernel:
*/
void intel_engine_cleanup_common(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
cleanup_status_page(engine);
intel_engine_fini_breadcrumbs(engine);
@ -749,9 +777,9 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
if (engine->default_state)
i915_gem_object_put(engine->default_state);
if (i915->preempt_context)
__intel_context_unpin(i915->preempt_context, engine);
__intel_context_unpin(i915->kernel_context, engine);
if (engine->preempt_context)
intel_context_unpin(engine->preempt_context);
intel_context_unpin(engine->kernel_context);
i915_timeline_fini(&engine->timeline);
@ -930,7 +958,7 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine,
default:
instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
if (engine->id != RCS)
if (engine->id != RCS0)
break;
instdone->slice_common = I915_READ(GEN7_SC_INSTDONE);
@ -946,7 +974,7 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine,
case 7:
instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
if (engine->id != RCS)
if (engine->id != RCS0)
break;
instdone->slice_common = I915_READ(GEN7_SC_INSTDONE);
@ -959,7 +987,7 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine,
case 4:
instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
if (engine->id == RCS)
if (engine->id == RCS0)
/* HACK: Using the wrong struct member */
instdone->slice_common = I915_READ(GEN4_INSTDONE1);
break;
@ -1007,16 +1035,10 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
*/
bool intel_engine_is_idle(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
/* More white lies, if wedged, hw state is inconsistent */
if (i915_terminally_wedged(&dev_priv->gpu_error))
if (i915_reset_failed(engine->i915))
return true;
/* Any inflight/incomplete requests? */
if (!intel_engine_signaled(engine, intel_engine_last_submit(engine)))
return false;
/* Waiting to drain ELSP? */
if (READ_ONCE(engine->execlists.active)) {
struct tasklet_struct *t = &engine->execlists.tasklet;
@ -1045,7 +1067,7 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
return ring_is_idle(engine);
}
bool intel_engines_are_idle(struct drm_i915_private *dev_priv)
bool intel_engines_are_idle(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
@ -1054,10 +1076,14 @@ bool intel_engines_are_idle(struct drm_i915_private *dev_priv)
* If the driver is wedged, HW state may be very inconsistent and
* report that it is still busy, even though we have stopped using it.
*/
if (i915_terminally_wedged(&dev_priv->gpu_error))
if (i915_reset_failed(i915))
return true;
for_each_engine(engine, dev_priv, id) {
/* Already parked (and passed an idleness test); must still be idle */
if (!READ_ONCE(i915->gt.awake))
return true;
for_each_engine(engine, i915, id) {
if (!intel_engine_is_idle(engine))
return false;
}
@ -1065,34 +1091,6 @@ bool intel_engines_are_idle(struct drm_i915_private *dev_priv)
return true;
}
/**
* intel_engine_has_kernel_context:
* @engine: the engine
*
* Returns true if the last context to be executed on this engine, or has been
* executed if the engine is already idle, is the kernel context
* (#i915.kernel_context).
*/
bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine)
{
const struct intel_context *kernel_context =
to_intel_context(engine->i915->kernel_context, engine);
struct i915_request *rq;
lockdep_assert_held(&engine->i915->drm.struct_mutex);
/*
* Check the last context seen by the engine. If active, it will be
* the last request that remains in the timeline. When idle, it is
* the last executed context as tracked by retirement.
*/
rq = __i915_active_request_peek(&engine->timeline.last_request);
if (rq)
return rq->hw_context == kernel_context;
else
return engine->last_retired_context == kernel_context;
}
void intel_engines_reset_default_submission(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
@ -1180,6 +1178,8 @@ void intel_engines_park(struct drm_i915_private *i915)
i915_gem_batch_pool_fini(&engine->batch_pool);
engine->execlists.no_priolist = false;
}
i915->gt.active_engines = 0;
}
/**
@ -1283,15 +1283,14 @@ static void print_request(struct drm_printer *m,
x = print_sched_attr(rq->i915, &rq->sched.attr, buf, x, sizeof(buf));
drm_printf(m, "%s%x%s%s [%llx:%llx]%s @ %dms: %s\n",
drm_printf(m, "%s %llx:%llx%s%s %s @ %dms: %s\n",
prefix,
rq->global_seqno,
rq->fence.context, rq->fence.seqno,
i915_request_completed(rq) ? "!" :
i915_request_started(rq) ? "*" :
"",
test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
&rq->fence.flags) ? "+" : "",
rq->fence.context, rq->fence.seqno,
buf,
jiffies_to_msecs(jiffies - rq->emitted_jiffies),
name);
@ -1334,7 +1333,7 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
&engine->execlists;
u64 addr;
if (engine->id == RCS && IS_GEN_RANGE(dev_priv, 4, 7))
if (engine->id == RCS0 && IS_GEN_RANGE(dev_priv, 4, 7))
drm_printf(m, "\tCCID: 0x%08x\n", I915_READ(CCID));
drm_printf(m, "\tRING_START: 0x%08x\n",
I915_READ(RING_START(engine->mmio_base)));
@ -1425,10 +1424,11 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
char hdr[80];
snprintf(hdr, sizeof(hdr),
"\t\tELSP[%d] count=%d, ring:{start:%08x, hwsp:%08x}, rq: ",
"\t\tELSP[%d] count=%d, ring:{start:%08x, hwsp:%08x, seqno:%08x}, rq: ",
idx, count,
i915_ggtt_offset(rq->ring->vma),
rq->timeline->hwsp_offset);
rq->timeline->hwsp_offset,
hwsp_seqno(rq));
print_request(m, rq, hdr);
} else {
drm_printf(m, "\t\tELSP[%d] idle\n", idx);
@ -1495,13 +1495,12 @@ void intel_engine_dump(struct intel_engine_cs *engine,
va_end(ap);
}
if (i915_terminally_wedged(&engine->i915->gpu_error))
if (i915_reset_failed(engine->i915))
drm_printf(m, "*** WEDGED ***\n");
drm_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms]\n",
intel_engine_get_seqno(engine),
intel_engine_last_submit(engine),
engine->hangcheck.seqno,
drm_printf(m, "\tHangcheck %x:%x [%d ms]\n",
engine->hangcheck.last_seqno,
engine->hangcheck.next_seqno,
jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp));
drm_printf(m, "\tReset count: %d (global %d)\n",
i915_reset_engine_count(error, engine),
@ -1521,7 +1520,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
if (&rq->link != &engine->timeline.requests)
print_request(m, rq, "\t\tlast ");
rq = i915_gem_find_active_request(engine);
rq = intel_engine_find_active_request(engine);
if (rq) {
print_request(m, rq, "\t\tactive ");
@ -1688,6 +1687,51 @@ void intel_disable_engine_stats(struct intel_engine_cs *engine)
write_sequnlock_irqrestore(&engine->stats.lock, flags);
}
static bool match_ring(struct i915_request *rq)
{
struct drm_i915_private *dev_priv = rq->i915;
u32 ring = I915_READ(RING_START(rq->engine->mmio_base));
return ring == i915_ggtt_offset(rq->ring->vma);
}
struct i915_request *
intel_engine_find_active_request(struct intel_engine_cs *engine)
{
struct i915_request *request, *active = NULL;
unsigned long flags;
/*
* We are called by the error capture, reset and to dump engine
* state at random points in time. In particular, note that neither is
* crucially ordered with an interrupt. After a hang, the GPU is dead
* and we assume that no more writes can happen (we waited long enough
* for all writes that were in transaction to be flushed) - adding an
* extra delay for a recent interrupt is pointless. Hence, we do
* not need an engine->irq_seqno_barrier() before the seqno reads.
* At all other times, we must assume the GPU is still running, but
* we only care about the snapshot of this moment.
*/
spin_lock_irqsave(&engine->timeline.lock, flags);
list_for_each_entry(request, &engine->timeline.requests, link) {
if (i915_request_completed(request))
continue;
if (!i915_request_started(request))
break;
/* More than one preemptible request may match! */
if (!match_ring(request))
break;
active = request;
break;
}
spin_unlock_irqrestore(&engine->timeline.lock, flags);
return active;
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_engine.c"
#include "selftests/intel_engine_cs.c"

Просмотреть файл

@ -0,0 +1,525 @@
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2019 Intel Corporation
*/
#ifndef __INTEL_ENGINE_TYPES__
#define __INTEL_ENGINE_TYPES__
#include <linux/hashtable.h>
#include <linux/irq_work.h>
#include <linux/kref.h>
#include <linux/list.h>
#include <linux/types.h>
#include "i915_timeline_types.h"
#include "intel_device_info.h"
#include "intel_workarounds_types.h"
#include "i915_gem_batch_pool.h"
#include "i915_pmu.h"
#define I915_MAX_SLICES 3
#define I915_MAX_SUBSLICES 8
#define I915_CMD_HASH_ORDER 9
struct drm_i915_reg_table;
struct i915_gem_context;
struct i915_request;
struct i915_sched_attr;
struct intel_hw_status_page {
struct i915_vma *vma;
u32 *addr;
};
struct intel_instdone {
u32 instdone;
/* The following exist only in the RCS engine */
u32 slice_common;
u32 sampler[I915_MAX_SLICES][I915_MAX_SUBSLICES];
u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES];
};
struct intel_engine_hangcheck {
u64 acthd;
u32 last_seqno;
u32 next_seqno;
unsigned long action_timestamp;
struct intel_instdone instdone;
};
struct intel_ring {
struct kref ref;
struct i915_vma *vma;
void *vaddr;
struct i915_timeline *timeline;
struct list_head request_list;
struct list_head active_link;
u32 head;
u32 tail;
u32 emit;
u32 space;
u32 size;
u32 effective_size;
};
/*
* we use a single page to load ctx workarounds so all of these
* values are referred in terms of dwords
*
* struct i915_wa_ctx_bb:
* offset: specifies batch starting position, also helpful in case
* if we want to have multiple batches at different offsets based on
* some criteria. It is not a requirement at the moment but provides
* an option for future use.
* size: size of the batch in DWORDS
*/
struct i915_ctx_workarounds {
struct i915_wa_ctx_bb {
u32 offset;
u32 size;
} indirect_ctx, per_ctx;
struct i915_vma *vma;
};
#define I915_MAX_VCS 4
#define I915_MAX_VECS 2
/*
* Engine IDs definitions.
* Keep instances of the same type engine together.
*/
enum intel_engine_id {
RCS0 = 0,
BCS0,
VCS0,
VCS1,
VCS2,
VCS3,
#define _VCS(n) (VCS0 + (n))
VECS0,
VECS1
#define _VECS(n) (VECS0 + (n))
};
struct st_preempt_hang {
struct completion completion;
unsigned int count;
bool inject_hang;
};
/**
* struct intel_engine_execlists - execlist submission queue and port state
*
* The struct intel_engine_execlists represents the combined logical state of
* driver and the hardware state for execlist mode of submission.
*/
struct intel_engine_execlists {
/**
* @tasklet: softirq tasklet for bottom handler
*/
struct tasklet_struct tasklet;
/**
* @default_priolist: priority list for I915_PRIORITY_NORMAL
*/
struct i915_priolist default_priolist;
/**
* @no_priolist: priority lists disabled
*/
bool no_priolist;
/**
* @submit_reg: gen-specific execlist submission register
* set to the ExecList Submission Port (elsp) register pre-Gen11 and to
* the ExecList Submission Queue Contents register array for Gen11+
*/
u32 __iomem *submit_reg;
/**
* @ctrl_reg: the enhanced execlists control register, used to load the
* submit queue on the HW and to request preemptions to idle
*/
u32 __iomem *ctrl_reg;
/**
* @port: execlist port states
*
* For each hardware ELSP (ExecList Submission Port) we keep
* track of the last request and the number of times we submitted
* that port to hw. We then count the number of times the hw reports
* a context completion or preemption. As only one context can
* be active on hw, we limit resubmission of context to port[0]. This
* is called Lite Restore, of the context.
*/
struct execlist_port {
/**
* @request_count: combined request and submission count
*/
struct i915_request *request_count;
#define EXECLIST_COUNT_BITS 2
#define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS)
#define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS)
#define port_pack(rq, count) ptr_pack_bits(rq, count, EXECLIST_COUNT_BITS)
#define port_unpack(p, count) ptr_unpack_bits((p)->request_count, count, EXECLIST_COUNT_BITS)
#define port_set(p, packed) ((p)->request_count = (packed))
#define port_isset(p) ((p)->request_count)
#define port_index(p, execlists) ((p) - (execlists)->port)
/**
* @context_id: context ID for port
*/
GEM_DEBUG_DECL(u32 context_id);
#define EXECLIST_MAX_PORTS 2
} port[EXECLIST_MAX_PORTS];
/**
* @active: is the HW active? We consider the HW as active after
* submitting any context for execution and until we have seen the
* last context completion event. After that, we do not expect any
* more events until we submit, and so can park the HW.
*
* As we have a small number of different sources from which we feed
* the HW, we track the state of each inside a single bitfield.
*/
unsigned int active;
#define EXECLISTS_ACTIVE_USER 0
#define EXECLISTS_ACTIVE_PREEMPT 1
#define EXECLISTS_ACTIVE_HWACK 2
/**
* @port_mask: number of execlist ports - 1
*/
unsigned int port_mask;
/**
* @queue_priority_hint: Highest pending priority.
*
* When we add requests into the queue, or adjust the priority of
* executing requests, we compute the maximum priority of those
* pending requests. We can then use this value to determine if
* we need to preempt the executing requests to service the queue.
* However, since the we may have recorded the priority of an inflight
* request we wanted to preempt but since completed, at the time of
* dequeuing the priority hint may no longer may match the highest
* available request priority.
*/
int queue_priority_hint;
/**
* @queue: queue of requests, in priority lists
*/
struct rb_root_cached queue;
/**
* @csb_write: control register for Context Switch buffer
*
* Note this register may be either mmio or HWSP shadow.
*/
u32 *csb_write;
/**
* @csb_status: status array for Context Switch buffer
*
* Note these register may be either mmio or HWSP shadow.
*/
u32 *csb_status;
/**
* @preempt_complete_status: expected CSB upon completing preemption
*/
u32 preempt_complete_status;
/**
* @csb_head: context status buffer head
*/
u8 csb_head;
I915_SELFTEST_DECLARE(struct st_preempt_hang preempt_hang;)
};
#define INTEL_ENGINE_CS_MAX_NAME 8
struct intel_engine_cs {
struct drm_i915_private *i915;
char name[INTEL_ENGINE_CS_MAX_NAME];
enum intel_engine_id id;
unsigned int hw_id;
unsigned int guc_id;
intel_engine_mask_t mask;
u8 uabi_class;
u8 class;
u8 instance;
u32 context_size;
u32 mmio_base;
struct intel_ring *buffer;
struct i915_timeline timeline;
struct intel_context *kernel_context; /* pinned */
struct intel_context *preempt_context; /* pinned; optional */
struct drm_i915_gem_object *default_state;
void *pinned_default_state;
/* Rather than have every client wait upon all user interrupts,
* with the herd waking after every interrupt and each doing the
* heavyweight seqno dance, we delegate the task (of being the
* bottom-half of the user interrupt) to the first client. After
* every interrupt, we wake up one client, who does the heavyweight
* coherent seqno read and either goes back to sleep (if incomplete),
* or wakes up all the completed clients in parallel, before then
* transferring the bottom-half status to the next client in the queue.
*
* Compared to walking the entire list of waiters in a single dedicated
* bottom-half, we reduce the latency of the first waiter by avoiding
* a context switch, but incur additional coherent seqno reads when
* following the chain of request breadcrumbs. Since it is most likely
* that we have a single client waiting on each seqno, then reducing
* the overhead of waking that client is much preferred.
*/
struct intel_breadcrumbs {
spinlock_t irq_lock;
struct list_head signalers;
struct irq_work irq_work; /* for use from inside irq_lock */
unsigned int irq_enabled;
bool irq_armed;
} breadcrumbs;
struct intel_engine_pmu {
/**
* @enable: Bitmask of enable sample events on this engine.
*
* Bits correspond to sample event types, for instance
* I915_SAMPLE_QUEUED is bit 0 etc.
*/
u32 enable;
/**
* @enable_count: Reference count for the enabled samplers.
*
* Index number corresponds to @enum drm_i915_pmu_engine_sample.
*/
unsigned int enable_count[I915_ENGINE_SAMPLE_COUNT];
/**
* @sample: Counter values for sampling events.
*
* Our internal timer stores the current counters in this field.
*
* Index number corresponds to @enum drm_i915_pmu_engine_sample.
*/
struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_COUNT];
} pmu;
/*
* A pool of objects to use as shadow copies of client batch buffers
* when the command parser is enabled. Prevents the client from
* modifying the batch contents after software parsing.
*/
struct i915_gem_batch_pool batch_pool;
struct intel_hw_status_page status_page;
struct i915_ctx_workarounds wa_ctx;
struct i915_wa_list ctx_wa_list;
struct i915_wa_list wa_list;
struct i915_wa_list whitelist;
u32 irq_keep_mask; /* always keep these interrupts */
u32 irq_enable_mask; /* bitmask to enable ring interrupt */
void (*irq_enable)(struct intel_engine_cs *engine);
void (*irq_disable)(struct intel_engine_cs *engine);
int (*init_hw)(struct intel_engine_cs *engine);
struct {
void (*prepare)(struct intel_engine_cs *engine);
void (*reset)(struct intel_engine_cs *engine, bool stalled);
void (*finish)(struct intel_engine_cs *engine);
} reset;
void (*park)(struct intel_engine_cs *engine);
void (*unpark)(struct intel_engine_cs *engine);
void (*set_default_submission)(struct intel_engine_cs *engine);
const struct intel_context_ops *cops;
int (*request_alloc)(struct i915_request *rq);
int (*init_context)(struct i915_request *rq);
int (*emit_flush)(struct i915_request *request, u32 mode);
#define EMIT_INVALIDATE BIT(0)
#define EMIT_FLUSH BIT(1)
#define EMIT_BARRIER (EMIT_INVALIDATE | EMIT_FLUSH)
int (*emit_bb_start)(struct i915_request *rq,
u64 offset, u32 length,
unsigned int dispatch_flags);
#define I915_DISPATCH_SECURE BIT(0)
#define I915_DISPATCH_PINNED BIT(1)
int (*emit_init_breadcrumb)(struct i915_request *rq);
u32 *(*emit_fini_breadcrumb)(struct i915_request *rq,
u32 *cs);
unsigned int emit_fini_breadcrumb_dw;
/* Pass the request to the hardware queue (e.g. directly into
* the legacy ringbuffer or to the end of an execlist).
*
* This is called from an atomic context with irqs disabled; must
* be irq safe.
*/
void (*submit_request)(struct i915_request *rq);
/*
* Call when the priority on a request has changed and it and its
* dependencies may need rescheduling. Note the request itself may
* not be ready to run!
*/
void (*schedule)(struct i915_request *request,
const struct i915_sched_attr *attr);
/*
* Cancel all requests on the hardware, or queued for execution.
* This should only cancel the ready requests that have been
* submitted to the engine (via the engine->submit_request callback).
* This is called when marking the device as wedged.
*/
void (*cancel_requests)(struct intel_engine_cs *engine);
void (*cleanup)(struct intel_engine_cs *engine);
struct intel_engine_execlists execlists;
/* Contexts are pinned whilst they are active on the GPU. The last
* context executed remains active whilst the GPU is idle - the
* switch away and write to the context object only occurs on the
* next execution. Contexts are only unpinned on retirement of the
* following request ensuring that we can always write to the object
* on the context switch even after idling. Across suspend, we switch
* to the kernel context and trash it as the save may not happen
* before the hardware is powered down.
*/
struct intel_context *last_retired_context;
/* status_notifier: list of callbacks for context-switch changes */
struct atomic_notifier_head context_status_notifier;
struct intel_engine_hangcheck hangcheck;
#define I915_ENGINE_NEEDS_CMD_PARSER BIT(0)
#define I915_ENGINE_SUPPORTS_STATS BIT(1)
#define I915_ENGINE_HAS_PREEMPTION BIT(2)
#define I915_ENGINE_HAS_SEMAPHORES BIT(3)
unsigned int flags;
/*
* Table of commands the command parser needs to know about
* for this engine.
*/
DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
/*
* Table of registers allowed in commands that read/write registers.
*/
const struct drm_i915_reg_table *reg_tables;
int reg_table_count;
/*
* Returns the bitmask for the length field of the specified command.
* Return 0 for an unrecognized/invalid command.
*
* If the command parser finds an entry for a command in the engine's
* cmd_tables, it gets the command's length based on the table entry.
* If not, it calls this function to determine the per-engine length
* field encoding for the command (i.e. different opcode ranges use
* certain bits to encode the command length in the header).
*/
u32 (*get_cmd_length_mask)(u32 cmd_header);
struct {
/**
* @lock: Lock protecting the below fields.
*/
seqlock_t lock;
/**
* @enabled: Reference count indicating number of listeners.
*/
unsigned int enabled;
/**
* @active: Number of contexts currently scheduled in.
*/
unsigned int active;
/**
* @enabled_at: Timestamp when busy stats were enabled.
*/
ktime_t enabled_at;
/**
* @start: Timestamp of the last idle to active transition.
*
* Idle is defined as active == 0, active is active > 0.
*/
ktime_t start;
/**
* @total: Total time this engine was busy.
*
* Accumulated time not counting the most recent block in cases
* where engine is currently busy (active > 0).
*/
ktime_t total;
} stats;
};
static inline bool
intel_engine_needs_cmd_parser(const struct intel_engine_cs *engine)
{
return engine->flags & I915_ENGINE_NEEDS_CMD_PARSER;
}
static inline bool
intel_engine_supports_stats(const struct intel_engine_cs *engine)
{
return engine->flags & I915_ENGINE_SUPPORTS_STATS;
}
static inline bool
intel_engine_has_preemption(const struct intel_engine_cs *engine)
{
return engine->flags & I915_ENGINE_HAS_PREEMPTION;
}
static inline bool
intel_engine_has_semaphores(const struct intel_engine_cs *engine)
{
return engine->flags & I915_ENGINE_HAS_SEMAPHORES;
}
#define instdone_slice_mask(dev_priv__) \
(IS_GEN(dev_priv__, 7) ? \
1 : RUNTIME_INFO(dev_priv__)->sseu.slice_mask)
#define instdone_subslice_mask(dev_priv__) \
(IS_GEN(dev_priv__, 7) ? \
1 : RUNTIME_INFO(dev_priv__)->sseu.subslice_mask[0])
#define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
for ((slice__) = 0, (subslice__) = 0; \
(slice__) < I915_MAX_SLICES; \
(subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \
(slice__) += ((subslice__) == 0)) \
for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \
(BIT(subslice__) & instdone_subslice_mask(dev_priv__)))
#endif /* __INTEL_ENGINE_TYPES_H__ */

Просмотреть файл

@ -105,8 +105,13 @@
#define MI_SEMAPHORE_SIGNAL MI_INSTR(0x1b, 0) /* GEN8+ */
#define MI_SEMAPHORE_TARGET(engine) ((engine)<<15)
#define MI_SEMAPHORE_WAIT MI_INSTR(0x1c, 2) /* GEN8+ */
#define MI_SEMAPHORE_POLL (1<<15)
#define MI_SEMAPHORE_SAD_GTE_SDD (1<<12)
#define MI_SEMAPHORE_POLL (1 << 15)
#define MI_SEMAPHORE_SAD_GT_SDD (0 << 12)
#define MI_SEMAPHORE_SAD_GTE_SDD (1 << 12)
#define MI_SEMAPHORE_SAD_LT_SDD (2 << 12)
#define MI_SEMAPHORE_SAD_LTE_SDD (3 << 12)
#define MI_SEMAPHORE_SAD_EQ_SDD (4 << 12)
#define MI_SEMAPHORE_SAD_NEQ_SDD (5 << 12)
#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1)
#define MI_STORE_DWORD_IMM_GEN4 MI_INSTR(0x20, 2)
#define MI_MEM_VIRTUAL (1 << 22) /* 945,g33,965 */

Просмотреть файл

@ -203,11 +203,19 @@ int intel_guc_init(struct intel_guc *guc)
goto err_log;
GEM_BUG_ON(!guc->ads_vma);
if (HAS_GUC_CT(dev_priv)) {
ret = intel_guc_ct_init(&guc->ct);
if (ret)
goto err_ads;
}
/* We need to notify the guc whenever we change the GGTT */
i915_ggtt_enable_guc(dev_priv);
return 0;
err_ads:
intel_guc_ads_destroy(guc);
err_log:
intel_guc_log_destroy(&guc->log);
err_shared:
@ -222,6 +230,10 @@ void intel_guc_fini(struct intel_guc *guc)
struct drm_i915_private *dev_priv = guc_to_i915(guc);
i915_ggtt_disable_guc(dev_priv);
if (HAS_GUC_CT(dev_priv))
intel_guc_ct_fini(&guc->ct);
intel_guc_ads_destroy(guc);
intel_guc_log_destroy(&guc->log);
guc_shared_data_destroy(guc);

Просмотреть файл

@ -32,6 +32,7 @@
#include "intel_guc_log.h"
#include "intel_guc_reg.h"
#include "intel_uc_fw.h"
#include "i915_utils.h"
#include "i915_vma.h"
struct guc_preempt_work {

Просмотреть файл

@ -121,8 +121,7 @@ int intel_guc_ads_create(struct intel_guc *guc)
* to find it. Note that we have to skip our header (1 page),
* because our GuC shared data is there.
*/
kernel_ctx_vma = to_intel_context(dev_priv->kernel_context,
dev_priv->engine[RCS])->state;
kernel_ctx_vma = dev_priv->engine[RCS0]->kernel_context->state;
blob->ads.golden_context_lrca =
intel_guc_ggtt_offset(guc, kernel_ctx_vma) + skipped_offset;

Просмотреть файл

@ -140,11 +140,6 @@ static int guc_action_deregister_ct_buffer(struct intel_guc *guc,
return err;
}
static bool ctch_is_open(struct intel_guc_ct_channel *ctch)
{
return ctch->vma != NULL;
}
static int ctch_init(struct intel_guc *guc,
struct intel_guc_ct_channel *ctch)
{
@ -214,25 +209,21 @@ err_out:
static void ctch_fini(struct intel_guc *guc,
struct intel_guc_ct_channel *ctch)
{
GEM_BUG_ON(ctch->enabled);
i915_vma_unpin_and_release(&ctch->vma, I915_VMA_RELEASE_MAP);
}
static int ctch_open(struct intel_guc *guc,
struct intel_guc_ct_channel *ctch)
static int ctch_enable(struct intel_guc *guc,
struct intel_guc_ct_channel *ctch)
{
u32 base;
int err;
int i;
CT_DEBUG_DRIVER("CT: channel %d reopen=%s\n",
ctch->owner, yesno(ctch_is_open(ctch)));
GEM_BUG_ON(!ctch->vma);
if (!ctch->vma) {
err = ctch_init(guc, ctch);
if (unlikely(err))
goto err_out;
GEM_BUG_ON(!ctch->vma);
}
GEM_BUG_ON(ctch->enabled);
/* vma should be already allocated and map'ed */
base = intel_guc_ggtt_offset(guc, ctch->vma);
@ -255,7 +246,7 @@ static int ctch_open(struct intel_guc *guc,
base + PAGE_SIZE/4 * CTB_RECV,
INTEL_GUC_CT_BUFFER_TYPE_RECV);
if (unlikely(err))
goto err_fini;
goto err_out;
err = guc_action_register_ct_buffer(guc,
base + PAGE_SIZE/4 * CTB_SEND,
@ -263,23 +254,25 @@ static int ctch_open(struct intel_guc *guc,
if (unlikely(err))
goto err_deregister;
ctch->enabled = true;
return 0;
err_deregister:
guc_action_deregister_ct_buffer(guc,
ctch->owner,
INTEL_GUC_CT_BUFFER_TYPE_RECV);
err_fini:
ctch_fini(guc, ctch);
err_out:
DRM_ERROR("CT: can't open channel %d; err=%d\n", ctch->owner, err);
return err;
}
static void ctch_close(struct intel_guc *guc,
struct intel_guc_ct_channel *ctch)
static void ctch_disable(struct intel_guc *guc,
struct intel_guc_ct_channel *ctch)
{
GEM_BUG_ON(!ctch_is_open(ctch));
GEM_BUG_ON(!ctch->enabled);
ctch->enabled = false;
guc_action_deregister_ct_buffer(guc,
ctch->owner,
@ -287,7 +280,6 @@ static void ctch_close(struct intel_guc *guc,
guc_action_deregister_ct_buffer(guc,
ctch->owner,
INTEL_GUC_CT_BUFFER_TYPE_RECV);
ctch_fini(guc, ctch);
}
static u32 ctch_get_next_fence(struct intel_guc_ct_channel *ctch)
@ -481,7 +473,7 @@ static int ctch_send(struct intel_guc_ct *ct,
u32 fence;
int err;
GEM_BUG_ON(!ctch_is_open(ctch));
GEM_BUG_ON(!ctch->enabled);
GEM_BUG_ON(!len);
GEM_BUG_ON(len & ~GUC_CT_MSG_LEN_MASK);
GEM_BUG_ON(!response_buf && response_buf_size);
@ -817,7 +809,7 @@ static void ct_process_host_channel(struct intel_guc_ct *ct)
u32 msg[GUC_CT_MSG_LEN_MASK + 1]; /* one extra dw for the header */
int err = 0;
if (!ctch_is_open(ctch))
if (!ctch->enabled)
return;
do {
@ -848,6 +840,51 @@ static void intel_guc_to_host_event_handler_ct(struct intel_guc *guc)
ct_process_host_channel(ct);
}
/**
* intel_guc_ct_init - Init CT communication
* @ct: pointer to CT struct
*
* Allocate memory required for communication via
* the CT channel.
*
* Shall only be called for platforms with HAS_GUC_CT.
*
* Return: 0 on success, a negative errno code on failure.
*/
int intel_guc_ct_init(struct intel_guc_ct *ct)
{
struct intel_guc *guc = ct_to_guc(ct);
struct intel_guc_ct_channel *ctch = &ct->host_channel;
int err;
err = ctch_init(guc, ctch);
if (unlikely(err)) {
DRM_ERROR("CT: can't open channel %d; err=%d\n",
ctch->owner, err);
return err;
}
GEM_BUG_ON(!ctch->vma);
return 0;
}
/**
* intel_guc_ct_fini - Fini CT communication
* @ct: pointer to CT struct
*
* Deallocate memory required for communication via
* the CT channel.
*
* Shall only be called for platforms with HAS_GUC_CT.
*/
void intel_guc_ct_fini(struct intel_guc_ct *ct)
{
struct intel_guc *guc = ct_to_guc(ct);
struct intel_guc_ct_channel *ctch = &ct->host_channel;
ctch_fini(guc, ctch);
}
/**
* intel_guc_ct_enable - Enable buffer based command transport.
* @ct: pointer to CT struct
@ -865,7 +902,10 @@ int intel_guc_ct_enable(struct intel_guc_ct *ct)
GEM_BUG_ON(!HAS_GUC_CT(i915));
err = ctch_open(guc, ctch);
if (ctch->enabled)
return 0;
err = ctch_enable(guc, ctch);
if (unlikely(err))
return err;
@ -890,10 +930,10 @@ void intel_guc_ct_disable(struct intel_guc_ct *ct)
GEM_BUG_ON(!HAS_GUC_CT(i915));
if (!ctch_is_open(ctch))
if (!ctch->enabled)
return;
ctch_close(guc, ctch);
ctch_disable(guc, ctch);
/* Disable send */
guc->send = intel_guc_send_nop;

Просмотреть файл

@ -66,6 +66,7 @@ struct intel_guc_ct_channel {
struct intel_guc_ct_buffer ctbs[2];
u32 owner;
u32 next_fence;
bool enabled;
};
/** Holds all command transport channels.
@ -90,6 +91,8 @@ struct intel_guc_ct {
};
void intel_guc_ct_init_early(struct intel_guc_ct *ct);
int intel_guc_ct_init(struct intel_guc_ct *ct);
void intel_guc_ct_fini(struct intel_guc_ct *ct);
int intel_guc_ct_enable(struct intel_guc_ct *ct);
void intel_guc_ct_disable(struct intel_guc_ct *ct);

Просмотреть файл

@ -620,7 +620,12 @@ void intel_guc_log_relay_flush(struct intel_guc_log *log)
void intel_guc_log_relay_close(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
struct drm_i915_private *i915 = guc_to_i915(guc);
guc_log_disable_flush_events(log);
synchronize_irq(i915->drm.irq);
flush_work(&log->relay.flush_work);
mutex_lock(&log->relay.lock);

Просмотреть файл

@ -382,7 +382,7 @@ static void guc_stage_desc_init(struct intel_guc_client *client)
desc->db_id = client->doorbell_id;
for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
struct intel_context *ce = to_intel_context(ctx, engine);
struct intel_context *ce = intel_context_lookup(ctx, engine);
u32 guc_engine_id = engine->guc_id;
struct guc_execlist_context *lrc = &desc->lrc[guc_engine_id];
@ -393,7 +393,7 @@ static void guc_stage_desc_init(struct intel_guc_client *client)
* for now who owns a GuC client. But for future owner of GuC
* client, need to make sure lrc is pinned prior to enter here.
*/
if (!ce->state)
if (!ce || !ce->state)
break; /* XXX: continue? */
/*
@ -535,7 +535,7 @@ static void guc_add_request(struct intel_guc *guc, struct i915_request *rq)
spin_lock(&client->wq_lock);
guc_wq_item_append(client, engine->guc_id, ctx_desc,
ring_tail, rq->global_seqno);
ring_tail, rq->fence.seqno);
guc_ring_doorbell(client);
client->submissions[engine->id] += 1;
@ -567,7 +567,7 @@ static void inject_preempt_context(struct work_struct *work)
preempt_work[engine->id]);
struct intel_guc_client *client = guc->preempt_client;
struct guc_stage_desc *stage_desc = __get_stage_desc(client);
struct intel_context *ce = to_intel_context(client->owner, engine);
struct intel_context *ce = intel_context_lookup(client->owner, engine);
u32 data[7];
if (!ce->ring->emit) { /* recreate upon load/resume */
@ -575,7 +575,7 @@ static void inject_preempt_context(struct work_struct *work)
u32 *cs;
cs = ce->ring->vaddr;
if (engine->id == RCS) {
if (engine->class == RENDER_CLASS) {
cs = gen8_emit_ggtt_write_rcs(cs,
GUC_PREEMPT_FINISHED,
addr,
@ -583,7 +583,8 @@ static void inject_preempt_context(struct work_struct *work)
} else {
cs = gen8_emit_ggtt_write(cs,
GUC_PREEMPT_FINISHED,
addr);
addr,
0);
*cs++ = MI_NOOP;
*cs++ = MI_NOOP;
}
@ -720,7 +721,7 @@ static inline int rq_prio(const struct i915_request *rq)
static inline int port_prio(const struct execlist_port *port)
{
return rq_prio(port_request(port));
return rq_prio(port_request(port)) | __NO_PREEMPTION;
}
static bool __guc_dequeue(struct intel_engine_cs *engine)
@ -781,8 +782,7 @@ static bool __guc_dequeue(struct intel_engine_cs *engine)
}
rb_erase_cached(&p->node, &execlists->queue);
if (p->priority != I915_PRIORITY_NORMAL)
kmem_cache_free(engine->i915->priorities, p);
i915_priolist_free(p);
}
done:
execlists->queue_priority_hint =
@ -1031,7 +1031,7 @@ static int guc_clients_create(struct intel_guc *guc)
GEM_BUG_ON(guc->preempt_client);
client = guc_client_alloc(dev_priv,
INTEL_INFO(dev_priv)->ring_mask,
INTEL_INFO(dev_priv)->engine_mask,
GUC_CLIENT_PRIORITY_KMD_NORMAL,
dev_priv->kernel_context);
if (IS_ERR(client)) {
@ -1042,7 +1042,7 @@ static int guc_clients_create(struct intel_guc *guc)
if (dev_priv->preempt_context) {
client = guc_client_alloc(dev_priv,
INTEL_INFO(dev_priv)->ring_mask,
INTEL_INFO(dev_priv)->engine_mask,
GUC_CLIENT_PRIORITY_KMD_HIGH,
dev_priv->preempt_context);
if (IS_ERR(client)) {

Просмотреть файл

@ -56,7 +56,7 @@ static bool subunits_stuck(struct intel_engine_cs *engine)
int slice;
int subslice;
if (engine->id != RCS)
if (engine->id != RCS0)
return true;
intel_engine_get_instdone(engine, &instdone);
@ -120,7 +120,7 @@ engine_stuck(struct intel_engine_cs *engine, u64 acthd)
*/
tmp = I915_READ_CTL(engine);
if (tmp & RING_WAIT) {
i915_handle_error(dev_priv, BIT(engine->id), 0,
i915_handle_error(dev_priv, engine->mask, 0,
"stuck wait on %s", engine->name);
I915_WRITE_CTL(engine, tmp);
return ENGINE_WAIT_KICK;
@ -133,21 +133,21 @@ static void hangcheck_load_sample(struct intel_engine_cs *engine,
struct hangcheck *hc)
{
hc->acthd = intel_engine_get_active_head(engine);
hc->seqno = intel_engine_get_seqno(engine);
hc->seqno = intel_engine_get_hangcheck_seqno(engine);
}
static void hangcheck_store_sample(struct intel_engine_cs *engine,
const struct hangcheck *hc)
{
engine->hangcheck.acthd = hc->acthd;
engine->hangcheck.seqno = hc->seqno;
engine->hangcheck.last_seqno = hc->seqno;
}
static enum intel_engine_hangcheck_action
hangcheck_get_action(struct intel_engine_cs *engine,
const struct hangcheck *hc)
{
if (engine->hangcheck.seqno != hc->seqno)
if (engine->hangcheck.last_seqno != hc->seqno)
return ENGINE_ACTIVE_SEQNO;
if (intel_engine_is_idle(engine))
@ -263,7 +263,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
if (!READ_ONCE(dev_priv->gt.awake))
return;
if (i915_terminally_wedged(&dev_priv->gpu_error))
if (i915_terminally_wedged(dev_priv))
return;
/* As enabling the GPU requires fairly extensive mmio access,
@ -282,13 +282,13 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
hangcheck_store_sample(engine, &hc);
if (hc.stalled) {
hung |= intel_engine_flag(engine);
hung |= engine->mask;
if (hc.action != ENGINE_DEAD)
stuck |= intel_engine_flag(engine);
stuck |= engine->mask;
}
if (hc.wedged)
wedged |= intel_engine_flag(engine);
wedged |= engine->mask;
}
if (GEM_SHOW_DEBUG() && (hung | stuck)) {

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -164,20 +164,15 @@
#define WA_TAIL_DWORDS 2
#define WA_TAIL_BYTES (sizeof(u32) * WA_TAIL_DWORDS)
static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
struct intel_engine_cs *engine,
struct intel_context *ce);
#define ACTIVE_PRIORITY (I915_PRIORITY_NEWCLIENT | I915_PRIORITY_NOSEMAPHORE)
static int execlists_context_deferred_alloc(struct intel_context *ce,
struct intel_engine_cs *engine);
static void execlists_init_reg_state(u32 *reg_state,
struct i915_gem_context *ctx,
struct intel_context *ce,
struct intel_engine_cs *engine,
struct intel_ring *ring);
static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
{
return (i915_ggtt_offset(engine->status_page.vma) +
I915_GEM_HWS_INDEX_ADDR);
}
static inline struct i915_priolist *to_priolist(struct rb_node *rb)
{
return rb_entry(rb, struct i915_priolist, node);
@ -188,6 +183,34 @@ static inline int rq_prio(const struct i915_request *rq)
return rq->sched.attr.priority;
}
static int effective_prio(const struct i915_request *rq)
{
int prio = rq_prio(rq);
/*
* On unwinding the active request, we give it a priority bump
* equivalent to a freshly submitted request. This protects it from
* being gazumped again, but it would be preferable if we didn't
* let it be gazumped in the first place!
*
* See __unwind_incomplete_requests()
*/
if (~prio & ACTIVE_PRIORITY && __i915_request_has_started(rq)) {
/*
* After preemption, we insert the active request at the
* end of the new priority level. This means that we will be
* _lower_ priority than the preemptee all things equal (and
* so the preemption is valid), so adjust our comparison
* accordingly.
*/
prio |= ACTIVE_PRIORITY;
prio--;
}
/* Restrict mere WAIT boosts from triggering preemption */
return prio | __NO_PREEMPTION;
}
static int queue_prio(const struct intel_engine_execlists *execlists)
{
struct i915_priolist *p;
@ -208,7 +231,7 @@ static int queue_prio(const struct intel_engine_execlists *execlists)
static inline bool need_preempt(const struct intel_engine_cs *engine,
const struct i915_request *rq)
{
const int last_prio = rq_prio(rq);
int last_prio;
if (!intel_engine_has_preemption(engine))
return false;
@ -228,6 +251,7 @@ static inline bool need_preempt(const struct intel_engine_cs *engine,
* preempt. If that hint is stale or we may be trying to preempt
* ourselves, ignore the request.
*/
last_prio = effective_prio(rq);
if (!__execlists_need_preempt(engine->execlists.queue_priority_hint,
last_prio))
return false;
@ -254,12 +278,11 @@ static inline bool need_preempt(const struct intel_engine_cs *engine,
}
__maybe_unused static inline bool
assert_priority_queue(const struct intel_engine_execlists *execlists,
const struct i915_request *prev,
assert_priority_queue(const struct i915_request *prev,
const struct i915_request *next)
{
if (!prev)
return true;
const struct intel_engine_execlists *execlists =
&prev->engine->execlists;
/*
* Without preemption, the prev may refer to the still active element
@ -300,11 +323,10 @@ assert_priority_queue(const struct intel_engine_execlists *execlists,
* engine info, SW context ID and SW counter need to form a unique number
* (Context ID) per lrc.
*/
static void
intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
struct intel_engine_cs *engine,
struct intel_context *ce)
static u64
lrc_descriptor(struct intel_context *ce, struct intel_engine_cs *engine)
{
struct i915_gem_context *ctx = ce->gem_context;
u64 desc;
BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (BIT(GEN8_CTX_ID_WIDTH)));
@ -322,7 +344,7 @@ intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
* Consider updating oa_get_render_ctx_id in i915_perf.c when changing
* anything below.
*/
if (INTEL_GEN(ctx->i915) >= 11) {
if (INTEL_GEN(engine->i915) >= 11) {
GEM_BUG_ON(ctx->hw_id >= BIT(GEN11_SW_CTX_ID_WIDTH));
desc |= (u64)ctx->hw_id << GEN11_SW_CTX_ID_SHIFT;
/* bits 37-47 */
@ -339,7 +361,7 @@ intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT; /* bits 32-52 */
}
ce->lrc_desc = desc;
return desc;
}
static void unwind_wa_tail(struct i915_request *rq)
@ -353,7 +375,7 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
{
struct i915_request *rq, *rn, *active = NULL;
struct list_head *uninitialized_var(pl);
int prio = I915_PRIORITY_INVALID | I915_PRIORITY_NEWCLIENT;
int prio = I915_PRIORITY_INVALID | ACTIVE_PRIORITY;
lockdep_assert_held(&engine->timeline.lock);
@ -384,9 +406,21 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
* The active request is now effectively the start of a new client
* stream, so give it the equivalent small priority bump to prevent
* it being gazumped a second time by another peer.
*
* Note we have to be careful not to apply a priority boost to a request
* still spinning on its semaphores. If the request hasn't started, that
* means it is still waiting for its dependencies to be signaled, and
* if we apply a priority boost to this request, we will boost it past
* its signalers and so break PI.
*
* One consequence of this preemption boost is that we may jump
* over lesser priorities (such as I915_PRIORITY_WAIT), effectively
* making those priorities non-preemptible. They will be moved forward
* in the priority queue, but they will not gain immediate access to
* the GPU.
*/
if (!(prio & I915_PRIORITY_NEWCLIENT)) {
prio |= I915_PRIORITY_NEWCLIENT;
if (~prio & ACTIVE_PRIORITY && __i915_request_has_started(active)) {
prio |= ACTIVE_PRIORITY;
active->sched.attr.priority = prio;
list_move_tail(&active->sched.link,
i915_sched_lookup_priolist(engine, prio));
@ -523,13 +557,11 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
desc = execlists_update_context(rq);
GEM_DEBUG_EXEC(port[n].context_id = upper_32_bits(desc));
GEM_TRACE("%s in[%d]: ctx=%d.%d, global=%d (fence %llx:%lld) (current %d:%d), prio=%d\n",
GEM_TRACE("%s in[%d]: ctx=%d.%d, fence %llx:%lld (current %d), prio=%d\n",
engine->name, n,
port[n].context_id, count,
rq->global_seqno,
rq->fence.context, rq->fence.seqno,
hwsp_seqno(rq),
intel_engine_get_seqno(engine),
rq_prio(rq));
} else {
GEM_BUG_ON(!n);
@ -564,6 +596,17 @@ static bool can_merge_ctx(const struct intel_context *prev,
return true;
}
static bool can_merge_rq(const struct i915_request *prev,
const struct i915_request *next)
{
GEM_BUG_ON(!assert_priority_queue(prev, next));
if (!can_merge_ctx(prev->hw_context, next->hw_context))
return false;
return true;
}
static void port_assign(struct execlist_port *port, struct i915_request *rq)
{
GEM_BUG_ON(rq == port_request(port));
@ -577,8 +620,7 @@ static void port_assign(struct execlist_port *port, struct i915_request *rq)
static void inject_preempt_context(struct intel_engine_cs *engine)
{
struct intel_engine_execlists *execlists = &engine->execlists;
struct intel_context *ce =
to_intel_context(engine->i915->preempt_context, engine);
struct intel_context *ce = engine->preempt_context;
unsigned int n;
GEM_BUG_ON(execlists->preempt_complete_status !=
@ -716,8 +758,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
int i;
priolist_for_each_request_consume(rq, rn, p, i) {
GEM_BUG_ON(!assert_priority_queue(execlists, last, rq));
/*
* Can we combine this request with the current port?
* It has to be the same context/ringbuffer and not
@ -729,8 +769,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* second request, and so we never need to tell the
* hardware about the first.
*/
if (last &&
!can_merge_ctx(rq->hw_context, last->hw_context)) {
if (last && !can_merge_rq(last, rq)) {
/*
* If we are on the second port and cannot
* combine this request with the last, then we
@ -739,6 +778,14 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
if (port == last_port)
goto done;
/*
* We must not populate both ELSP[] with the
* same LRCA, i.e. we must submit 2 different
* contexts if we submit 2 ELSP.
*/
if (last->hw_context == rq->hw_context)
goto done;
/*
* If GVT overrides us we only ever submit
* port[0], leaving port[1] empty. Note that we
@ -750,7 +797,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
ctx_single_port_submission(rq->hw_context))
goto done;
GEM_BUG_ON(last->hw_context == rq->hw_context);
if (submit)
port_assign(port, last);
@ -769,8 +815,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
}
rb_erase_cached(&p->node, &execlists->queue);
if (p->priority != I915_PRIORITY_NORMAL)
kmem_cache_free(engine->i915->priorities, p);
i915_priolist_free(p);
}
done:
@ -790,8 +835,7 @@ done:
* request triggering preemption on the next dequeue (or subsequent
* interrupt for secondary ports).
*/
execlists->queue_priority_hint =
port != execlists->port ? rq_prio(last) : INT_MIN;
execlists->queue_priority_hint = queue_prio(execlists);
if (submit) {
port_assign(port, last);
@ -821,13 +865,11 @@ execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
while (num_ports-- && port_isset(port)) {
struct i915_request *rq = port_request(port);
GEM_TRACE("%s:port%u global=%d (fence %llx:%lld), (current %d:%d)\n",
GEM_TRACE("%s:port%u fence %llx:%lld, (current %d)\n",
rq->engine->name,
(unsigned int)(port - execlists->port),
rq->global_seqno,
rq->fence.context, rq->fence.seqno,
hwsp_seqno(rq),
intel_engine_get_seqno(rq->engine));
hwsp_seqno(rq));
GEM_BUG_ON(!execlists->active);
execlists_context_schedule_out(rq,
@ -883,8 +925,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
struct rb_node *rb;
unsigned long flags;
GEM_TRACE("%s current %d\n",
engine->name, intel_engine_get_seqno(engine));
GEM_TRACE("%s\n", engine->name);
/*
* Before we call engine->cancel_requests(), we should have exclusive
@ -908,8 +949,6 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
/* Mark all executing requests as skipped. */
list_for_each_entry(rq, &engine->timeline.requests, link) {
GEM_BUG_ON(!rq->global_seqno);
if (!i915_request_signaled(rq))
dma_fence_set_error(&rq->fence, -EIO);
@ -929,14 +968,9 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
}
rb_erase_cached(&p->node, &execlists->queue);
if (p->priority != I915_PRIORITY_NORMAL)
kmem_cache_free(engine->i915->priorities, p);
i915_priolist_free(p);
}
intel_write_status_page(engine,
I915_GEM_HWS_INDEX,
intel_engine_last_submit(engine));
/* Remaining _unready_ requests will be nop'ed when submitted */
execlists->queue_priority_hint = INT_MIN;
@ -1052,14 +1086,12 @@ static void process_csb(struct intel_engine_cs *engine)
EXECLISTS_ACTIVE_USER));
rq = port_unpack(port, &count);
GEM_TRACE("%s out[0]: ctx=%d.%d, global=%d (fence %llx:%lld) (current %d:%d), prio=%d\n",
GEM_TRACE("%s out[0]: ctx=%d.%d, fence %llx:%lld (current %d), prio=%d\n",
engine->name,
port->context_id, count,
rq ? rq->global_seqno : 0,
rq ? rq->fence.context : 0,
rq ? rq->fence.seqno : 0,
rq ? hwsp_seqno(rq) : 0,
intel_engine_get_seqno(engine),
rq ? rq_prio(rq) : 0);
/* Check the context/desc id for this event matches */
@ -1196,19 +1228,26 @@ static void execlists_submit_request(struct i915_request *request)
spin_unlock_irqrestore(&engine->timeline.lock, flags);
}
static void execlists_context_destroy(struct intel_context *ce)
static void __execlists_context_fini(struct intel_context *ce)
{
GEM_BUG_ON(ce->pin_count);
if (!ce->state)
return;
intel_ring_free(ce->ring);
intel_ring_put(ce->ring);
GEM_BUG_ON(i915_gem_object_is_active(ce->state->obj));
i915_gem_object_put(ce->state->obj);
}
static void execlists_context_destroy(struct kref *kref)
{
struct intel_context *ce = container_of(kref, typeof(*ce), ref);
GEM_BUG_ON(intel_context_is_pinned(ce));
if (ce->state)
__execlists_context_fini(ce);
intel_context_free(ce);
}
static void execlists_context_unpin(struct intel_context *ce)
{
struct intel_engine_cs *engine;
@ -1240,11 +1279,9 @@ static void execlists_context_unpin(struct intel_context *ce)
ce->state->obj->pin_global--;
i915_gem_object_unpin_map(ce->state->obj);
i915_vma_unpin(ce->state);
i915_gem_context_put(ce->gem_context);
}
static int __context_pin(struct i915_gem_context *ctx, struct i915_vma *vma)
static int __context_pin(struct i915_vma *vma)
{
unsigned int flags;
int err;
@ -1267,11 +1304,14 @@ static int __context_pin(struct i915_gem_context *ctx, struct i915_vma *vma)
}
static void
__execlists_update_reg_state(struct intel_engine_cs *engine,
struct intel_context *ce)
__execlists_update_reg_state(struct intel_context *ce,
struct intel_engine_cs *engine)
{
u32 *regs = ce->lrc_reg_state;
struct intel_ring *ring = ce->ring;
u32 *regs = ce->lrc_reg_state;
GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head));
GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail));
regs[CTX_RING_BUFFER_START + 1] = i915_ggtt_offset(ring->vma);
regs[CTX_RING_HEAD + 1] = ring->head;
@ -1279,29 +1319,30 @@ __execlists_update_reg_state(struct intel_engine_cs *engine,
/* RPCS */
if (engine->class == RENDER_CLASS)
regs[CTX_R_PWR_CLK_STATE + 1] = gen8_make_rpcs(engine->i915,
&ce->sseu);
regs[CTX_R_PWR_CLK_STATE + 1] =
gen8_make_rpcs(engine->i915, &ce->sseu);
}
static struct intel_context *
__execlists_context_pin(struct intel_engine_cs *engine,
struct i915_gem_context *ctx,
struct intel_context *ce)
static int
__execlists_context_pin(struct intel_context *ce,
struct intel_engine_cs *engine)
{
void *vaddr;
int ret;
ret = execlists_context_deferred_alloc(ctx, engine, ce);
GEM_BUG_ON(!ce->gem_context->ppgtt);
ret = execlists_context_deferred_alloc(ce, engine);
if (ret)
goto err;
GEM_BUG_ON(!ce->state);
ret = __context_pin(ctx, ce->state);
ret = __context_pin(ce->state);
if (ret)
goto err;
vaddr = i915_gem_object_pin_map(ce->state->obj,
i915_coherent_map_type(ctx->i915) |
i915_coherent_map_type(engine->i915) |
I915_MAP_OVERRIDE);
if (IS_ERR(vaddr)) {
ret = PTR_ERR(vaddr);
@ -1312,21 +1353,16 @@ __execlists_context_pin(struct intel_engine_cs *engine,
if (ret)
goto unpin_map;
ret = i915_gem_context_pin_hw_id(ctx);
ret = i915_gem_context_pin_hw_id(ce->gem_context);
if (ret)
goto unpin_ring;
intel_lr_context_descriptor_update(ctx, engine, ce);
GEM_BUG_ON(!intel_ring_offset_valid(ce->ring, ce->ring->head));
ce->lrc_desc = lrc_descriptor(ce, engine);
ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
__execlists_update_reg_state(engine, ce);
__execlists_update_reg_state(ce, engine);
ce->state->obj->pin_global++;
i915_gem_context_get(ctx);
return ce;
return 0;
unpin_ring:
intel_ring_unpin(ce->ring);
@ -1335,33 +1371,20 @@ unpin_map:
unpin_vma:
__i915_vma_unpin(ce->state);
err:
ce->pin_count = 0;
return ERR_PTR(ret);
return ret;
}
static int execlists_context_pin(struct intel_context *ce)
{
return __execlists_context_pin(ce, ce->engine);
}
static const struct intel_context_ops execlists_context_ops = {
.pin = execlists_context_pin,
.unpin = execlists_context_unpin,
.destroy = execlists_context_destroy,
};
static struct intel_context *
execlists_context_pin(struct intel_engine_cs *engine,
struct i915_gem_context *ctx)
{
struct intel_context *ce = to_intel_context(ctx, engine);
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
GEM_BUG_ON(!ctx->ppgtt);
if (likely(ce->pin_count++))
return ce;
GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
ce->ops = &execlists_context_ops;
return __execlists_context_pin(engine, ctx, ce);
}
static int gen8_emit_init_breadcrumb(struct i915_request *rq)
{
u32 *cs;
@ -1387,6 +1410,10 @@ static int gen8_emit_init_breadcrumb(struct i915_request *rq)
*cs++ = rq->fence.seqno - 1;
intel_ring_advance(rq, cs);
/* Record the updated position of the request's payload */
rq->infix = intel_ring_offset(rq, cs);
return 0;
}
@ -1447,7 +1474,7 @@ static int execlists_request_alloc(struct i915_request *request)
{
int ret;
GEM_BUG_ON(!request->hw_context->pin_count);
GEM_BUG_ON(!intel_context_is_pinned(request->hw_context));
/*
* Flush enough space to reduce the likelihood of waiting after
@ -1465,7 +1492,7 @@ static int execlists_request_alloc(struct i915_request *request)
*/
/* Unconditionally invalidate GPU caches and TLBs. */
if (i915_vm_is_48bit(&request->gem_context->ppgtt->vm))
if (i915_vm_is_4lvl(&request->gem_context->ppgtt->vm))
ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
else
ret = emit_pdps(request);
@ -1732,7 +1759,7 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
unsigned int i;
int ret;
if (GEM_DEBUG_WARN_ON(engine->id != RCS))
if (GEM_DEBUG_WARN_ON(engine->id != RCS0))
return -EINVAL;
switch (INTEL_GEN(engine->i915)) {
@ -1872,12 +1899,31 @@ static void execlists_reset_prepare(struct intel_engine_cs *engine)
__tasklet_disable_sync_once(&execlists->tasklet);
GEM_BUG_ON(!reset_in_progress(execlists));
intel_engine_stop_cs(engine);
/* And flush any current direct submission. */
spin_lock_irqsave(&engine->timeline.lock, flags);
process_csb(engine); /* drain preemption events */
spin_unlock_irqrestore(&engine->timeline.lock, flags);
}
static bool lrc_regs_ok(const struct i915_request *rq)
{
const struct intel_ring *ring = rq->ring;
const u32 *regs = rq->hw_context->lrc_reg_state;
/* Quick spot check for the common signs of context corruption */
if (regs[CTX_RING_BUFFER_CONTROL + 1] !=
(RING_CTL_SIZE(ring->size) | RING_VALID))
return false;
if (regs[CTX_RING_BUFFER_START + 1] != i915_ggtt_offset(ring->vma))
return false;
return true;
}
static void execlists_reset(struct intel_engine_cs *engine, bool stalled)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
@ -1904,14 +1950,24 @@ static void execlists_reset(struct intel_engine_cs *engine, bool stalled)
/* Following the reset, we need to reload the CSB read/write pointers */
reset_csb_pointers(&engine->execlists);
GEM_TRACE("%s seqno=%d, current=%d, stalled? %s\n",
engine->name,
rq ? rq->global_seqno : 0,
intel_engine_get_seqno(engine),
yesno(stalled));
if (!rq)
goto out_unlock;
/*
* If this request hasn't started yet, e.g. it is waiting on a
* semaphore, we need to avoid skipping the request or else we
* break the signaling chain. However, if the context is corrupt
* the request will not restart and we will be stuck with a wedged
* device. It is quite often the case that if we issue a reset
* while the GPU is loading the context image, that the context
* image becomes corrupt.
*
* Otherwise, if we have not started yet, the request should replay
* perfectly and we do not need to flag the result as being erroneous.
*/
if (!i915_request_started(rq) && lrc_regs_ok(rq))
goto out_unlock;
/*
* If the request was innocent, we leave the request in the ELSP
* and will try to replay it on restarting. The context image may
@ -1924,7 +1980,7 @@ static void execlists_reset(struct intel_engine_cs *engine, bool stalled)
* image back to the expected values to skip over the guilty request.
*/
i915_reset_request(rq, stalled);
if (!stalled)
if (!stalled && lrc_regs_ok(rq))
goto out_unlock;
/*
@ -1942,12 +1998,12 @@ static void execlists_reset(struct intel_engine_cs *engine, bool stalled)
engine->context_size - PAGE_SIZE);
}
/* Move the RING_HEAD onto the breadcrumb, past the hanging batch */
rq->ring->head = intel_ring_wrap(rq->ring, rq->postfix);
/* Rerun the request; its payload has been neutered (if guilty). */
rq->ring->head = intel_ring_wrap(rq->ring, rq->head);
intel_ring_update_space(rq->ring);
execlists_init_reg_state(regs, rq->gem_context, engine, rq->ring);
__execlists_update_reg_state(engine, rq->hw_context);
execlists_init_reg_state(regs, rq->hw_context, engine, rq->ring);
__execlists_update_reg_state(rq->hw_context, engine);
out_unlock:
spin_unlock_irqrestore(&engine->timeline.lock, flags);
@ -1961,13 +2017,14 @@ static void execlists_reset_finish(struct intel_engine_cs *engine)
* After a GPU reset, we may have requests to replay. Do so now while
* we still have the forcewake to be sure that the GPU is not allowed
* to sleep before we restart and reload a context.
*
*/
GEM_BUG_ON(!reset_in_progress(execlists));
if (!RB_EMPTY_ROOT(&execlists->queue.rb_root))
execlists->tasklet.func(execlists->tasklet.data);
tasklet_enable(&execlists->tasklet);
if (__tasklet_enable(&execlists->tasklet))
/* And kick in case we missed a new request submission. */
tasklet_hi_schedule(&execlists->tasklet);
GEM_TRACE("%s: depth->%d\n", engine->name,
atomic_read(&execlists->tasklet.count));
}
@ -2148,16 +2205,16 @@ static u32 *gen8_emit_wa_tail(struct i915_request *request, u32 *cs)
static u32 *gen8_emit_fini_breadcrumb(struct i915_request *request, u32 *cs)
{
/* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5));
cs = gen8_emit_ggtt_write(cs,
request->fence.seqno,
request->timeline->hwsp_offset);
request->timeline->hwsp_offset,
0);
cs = gen8_emit_ggtt_write(cs,
request->global_seqno,
intel_hws_seqno_address(request->engine));
intel_engine_next_hangcheck_seqno(request->engine),
I915_GEM_HWS_HANGCHECK_ADDR,
MI_FLUSH_DW_STORE_INDEX);
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
@ -2180,9 +2237,9 @@ static u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
PIPE_CONTROL_CS_STALL);
cs = gen8_emit_ggtt_write_rcs(cs,
request->global_seqno,
intel_hws_seqno_address(request->engine),
PIPE_CONTROL_CS_STALL);
intel_engine_next_hangcheck_seqno(request->engine),
I915_GEM_HWS_HANGCHECK_ADDR,
PIPE_CONTROL_STORE_DATA_INDEX);
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
@ -2258,15 +2315,10 @@ void intel_execlists_set_default_submission(struct intel_engine_cs *engine)
engine->park = NULL;
engine->unpark = NULL;
engine->flags |= I915_ENGINE_HAS_SEMAPHORES;
engine->flags |= I915_ENGINE_SUPPORTS_STATS;
if (engine->i915->preempt_context)
if (engine->preempt_context)
engine->flags |= I915_ENGINE_HAS_PREEMPTION;
engine->i915->caps.scheduler =
I915_SCHEDULER_CAP_ENABLED |
I915_SCHEDULER_CAP_PRIORITY;
if (intel_engine_has_preemption(engine))
engine->i915->caps.scheduler |= I915_SCHEDULER_CAP_PREEMPTION;
}
static void
@ -2279,7 +2331,7 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
engine->reset.reset = execlists_reset;
engine->reset.finish = execlists_reset_finish;
engine->context_pin = execlists_context_pin;
engine->cops = &execlists_context_ops;
engine->request_alloc = execlists_request_alloc;
engine->emit_flush = gen8_emit_flush;
@ -2309,11 +2361,11 @@ logical_ring_default_irqs(struct intel_engine_cs *engine)
if (INTEL_GEN(engine->i915) < 11) {
const u8 irq_shifts[] = {
[RCS] = GEN8_RCS_IRQ_SHIFT,
[BCS] = GEN8_BCS_IRQ_SHIFT,
[VCS] = GEN8_VCS1_IRQ_SHIFT,
[VCS2] = GEN8_VCS2_IRQ_SHIFT,
[VECS] = GEN8_VECS_IRQ_SHIFT,
[RCS0] = GEN8_RCS_IRQ_SHIFT,
[BCS0] = GEN8_BCS_IRQ_SHIFT,
[VCS0] = GEN8_VCS0_IRQ_SHIFT,
[VCS1] = GEN8_VCS1_IRQ_SHIFT,
[VECS0] = GEN8_VECS_IRQ_SHIFT,
};
shift = irq_shifts[engine->id];
@ -2367,13 +2419,9 @@ static int logical_ring_init(struct intel_engine_cs *engine)
}
execlists->preempt_complete_status = ~0u;
if (i915->preempt_context) {
struct intel_context *ce =
to_intel_context(i915->preempt_context, engine);
if (engine->preempt_context)
execlists->preempt_complete_status =
upper_32_bits(ce->lrc_desc);
}
upper_32_bits(engine->preempt_context->lrc_desc);
execlists->csb_status =
&engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
@ -2592,13 +2640,13 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
}
static void execlists_init_reg_state(u32 *regs,
struct i915_gem_context *ctx,
struct intel_context *ce,
struct intel_engine_cs *engine,
struct intel_ring *ring)
{
struct drm_i915_private *dev_priv = engine->i915;
u32 base = engine->mmio_base;
struct i915_hw_ppgtt *ppgtt = ce->gem_context->ppgtt;
bool rcs = engine->class == RENDER_CLASS;
u32 base = engine->mmio_base;
/* A context is actually a big batch buffer with several
* MI_LOAD_REGISTER_IMM commands followed by (reg, value) pairs. The
@ -2613,7 +2661,7 @@ static void execlists_init_reg_state(u32 *regs,
CTX_REG(regs, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(engine),
_MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT) |
_MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH));
if (INTEL_GEN(dev_priv) < 11) {
if (INTEL_GEN(engine->i915) < 11) {
regs[CTX_CONTEXT_CONTROL + 1] |=
_MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT |
CTX_CTRL_RS_CTX_ENABLE);
@ -2668,33 +2716,33 @@ static void execlists_init_reg_state(u32 *regs,
CTX_REG(regs, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0), 0);
CTX_REG(regs, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0), 0);
if (i915_vm_is_48bit(&ctx->ppgtt->vm)) {
if (i915_vm_is_4lvl(&ppgtt->vm)) {
/* 64b PPGTT (48bit canonical)
* PDP0_DESCRIPTOR contains the base address to PML4 and
* other PDP Descriptors are ignored.
*/
ASSIGN_CTX_PML4(ctx->ppgtt, regs);
ASSIGN_CTX_PML4(ppgtt, regs);
} else {
ASSIGN_CTX_PDP(ctx->ppgtt, regs, 3);
ASSIGN_CTX_PDP(ctx->ppgtt, regs, 2);
ASSIGN_CTX_PDP(ctx->ppgtt, regs, 1);
ASSIGN_CTX_PDP(ctx->ppgtt, regs, 0);
ASSIGN_CTX_PDP(ppgtt, regs, 3);
ASSIGN_CTX_PDP(ppgtt, regs, 2);
ASSIGN_CTX_PDP(ppgtt, regs, 1);
ASSIGN_CTX_PDP(ppgtt, regs, 0);
}
if (rcs) {
regs[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
CTX_REG(regs, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE, 0);
i915_oa_init_reg_state(engine, ctx, regs);
i915_oa_init_reg_state(engine, ce, regs);
}
regs[CTX_END] = MI_BATCH_BUFFER_END;
if (INTEL_GEN(dev_priv) >= 10)
if (INTEL_GEN(engine->i915) >= 10)
regs[CTX_END] |= BIT(0);
}
static int
populate_lr_context(struct i915_gem_context *ctx,
populate_lr_context(struct intel_context *ce,
struct drm_i915_gem_object *ctx_obj,
struct intel_engine_cs *engine,
struct intel_ring *ring)
@ -2740,11 +2788,12 @@ populate_lr_context(struct i915_gem_context *ctx,
/* The second page of the context object contains some fields which must
* be set up prior to the first execution. */
regs = vaddr + LRC_STATE_PN * PAGE_SIZE;
execlists_init_reg_state(regs, ctx, engine, ring);
execlists_init_reg_state(regs, ce, engine, ring);
if (!engine->default_state)
regs[CTX_CONTEXT_CONTROL + 1] |=
_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
if (ctx == ctx->i915->preempt_context && INTEL_GEN(engine->i915) < 11)
if (ce->gem_context == engine->i915->preempt_context &&
INTEL_GEN(engine->i915) < 11)
regs[CTX_CONTEXT_CONTROL + 1] |=
_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT);
@ -2754,9 +2803,13 @@ err_unpin_ctx:
return ret;
}
static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
struct intel_engine_cs *engine,
struct intel_context *ce)
static struct i915_timeline *get_timeline(struct i915_gem_context *ctx)
{
return i915_timeline_create(ctx->i915, ctx->name, NULL);
}
static int execlists_context_deferred_alloc(struct intel_context *ce,
struct intel_engine_cs *engine)
{
struct drm_i915_gem_object *ctx_obj;
struct i915_vma *vma;
@ -2776,30 +2829,32 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
*/
context_size += LRC_HEADER_PAGES * PAGE_SIZE;
ctx_obj = i915_gem_object_create(ctx->i915, context_size);
ctx_obj = i915_gem_object_create(engine->i915, context_size);
if (IS_ERR(ctx_obj))
return PTR_ERR(ctx_obj);
vma = i915_vma_instance(ctx_obj, &ctx->i915->ggtt.vm, NULL);
vma = i915_vma_instance(ctx_obj, &engine->i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto error_deref_obj;
}
timeline = i915_timeline_create(ctx->i915, ctx->name, NULL);
timeline = get_timeline(ce->gem_context);
if (IS_ERR(timeline)) {
ret = PTR_ERR(timeline);
goto error_deref_obj;
}
ring = intel_engine_create_ring(engine, timeline, ctx->ring_size);
ring = intel_engine_create_ring(engine,
timeline,
ce->gem_context->ring_size);
i915_timeline_put(timeline);
if (IS_ERR(ring)) {
ret = PTR_ERR(ring);
goto error_deref_obj;
}
ret = populate_lr_context(ctx, ctx_obj, engine, ring);
ret = populate_lr_context(ce, ctx_obj, engine, ring);
if (ret) {
DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
goto error_ring_free;
@ -2811,7 +2866,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
return 0;
error_ring_free:
intel_ring_free(ring);
intel_ring_put(ring);
error_deref_obj:
i915_gem_object_put(ctx_obj);
return ret;
@ -2819,9 +2874,8 @@ error_deref_obj:
void intel_lr_context_resume(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
struct i915_gem_context *ctx;
enum intel_engine_id id;
struct intel_context *ce;
/*
* Because we emit WA_TAIL_DWORDS there may be a disparity
@ -2835,17 +2889,10 @@ void intel_lr_context_resume(struct drm_i915_private *i915)
* simplicity, we just zero everything out.
*/
list_for_each_entry(ctx, &i915->contexts.list, link) {
for_each_engine(engine, i915, id) {
struct intel_context *ce =
to_intel_context(ctx, engine);
if (!ce->state)
continue;
list_for_each_entry(ce, &ctx->active_engines, active_link) {
GEM_BUG_ON(!ce->ring);
intel_ring_reset(ce->ring, 0);
if (ce->pin_count) /* otherwise done in context_pin */
__execlists_update_reg_state(engine, ce);
__execlists_update_reg_state(ce, ce->engine);
}
}
}

Просмотреть файл

@ -452,6 +452,14 @@ void lspcon_write_infoframe(struct intel_encoder *encoder,
DRM_DEBUG_DRIVER("AVI infoframes updated successfully\n");
}
void lspcon_read_infoframe(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
unsigned int type,
void *frame, ssize_t len)
{
/* FIXME implement this */
}
void lspcon_set_infoframes(struct intel_encoder *encoder,
bool enable,
const struct intel_crtc_state *crtc_state,
@ -470,6 +478,8 @@ void lspcon_set_infoframes(struct intel_encoder *encoder,
return;
}
/* FIXME precompute infoframes */
ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
conn_state->connector,
adjusted_mode);
@ -504,9 +514,10 @@ void lspcon_set_infoframes(struct intel_encoder *encoder,
buf, ret);
}
bool lspcon_infoframe_enabled(struct intel_encoder *encoder,
u32 lspcon_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
/* FIXME actually read this from the hw */
return enc_to_intel_lspcon(&encoder->base)->active;
}

Просмотреть файл

@ -152,24 +152,17 @@ static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv,
pps->powerdown_on_reset = I915_READ(PP_CONTROL(0)) & PANEL_POWER_RESET;
val = I915_READ(PP_ON_DELAYS(0));
pps->port = (val & PANEL_PORT_SELECT_MASK) >>
PANEL_PORT_SELECT_SHIFT;
pps->t1_t2 = (val & PANEL_POWER_UP_DELAY_MASK) >>
PANEL_POWER_UP_DELAY_SHIFT;
pps->t5 = (val & PANEL_LIGHT_ON_DELAY_MASK) >>
PANEL_LIGHT_ON_DELAY_SHIFT;
pps->port = REG_FIELD_GET(PANEL_PORT_SELECT_MASK, val);
pps->t1_t2 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, val);
pps->t5 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, val);
val = I915_READ(PP_OFF_DELAYS(0));
pps->t3 = (val & PANEL_POWER_DOWN_DELAY_MASK) >>
PANEL_POWER_DOWN_DELAY_SHIFT;
pps->tx = (val & PANEL_LIGHT_OFF_DELAY_MASK) >>
PANEL_LIGHT_OFF_DELAY_SHIFT;
pps->t3 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, val);
pps->tx = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, val);
val = I915_READ(PP_DIVISOR(0));
pps->divider = (val & PP_REFERENCE_DIVIDER_MASK) >>
PP_REFERENCE_DIVIDER_SHIFT;
val = (val & PANEL_POWER_CYCLE_DELAY_MASK) >>
PANEL_POWER_CYCLE_DELAY_SHIFT;
pps->divider = REG_FIELD_GET(PP_REFERENCE_DIVIDER_MASK, val);
val = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, val);
/*
* Remove the BSpec specified +1 (100ms) offset that accounts for a
* too short power-cycle delay due to the asynchronous programming of
@ -209,16 +202,19 @@ static void intel_lvds_pps_init_hw(struct drm_i915_private *dev_priv,
val |= PANEL_POWER_RESET;
I915_WRITE(PP_CONTROL(0), val);
I915_WRITE(PP_ON_DELAYS(0), (pps->port << PANEL_PORT_SELECT_SHIFT) |
(pps->t1_t2 << PANEL_POWER_UP_DELAY_SHIFT) |
(pps->t5 << PANEL_LIGHT_ON_DELAY_SHIFT));
I915_WRITE(PP_OFF_DELAYS(0), (pps->t3 << PANEL_POWER_DOWN_DELAY_SHIFT) |
(pps->tx << PANEL_LIGHT_OFF_DELAY_SHIFT));
I915_WRITE(PP_ON_DELAYS(0),
REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, pps->port) |
REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, pps->t1_t2) |
REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, pps->t5));
val = pps->divider << PP_REFERENCE_DIVIDER_SHIFT;
val |= (DIV_ROUND_UP(pps->t4, 1000) + 1) <<
PANEL_POWER_CYCLE_DELAY_SHIFT;
I915_WRITE(PP_DIVISOR(0), val);
I915_WRITE(PP_OFF_DELAYS(0),
REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, pps->t3) |
REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, pps->tx));
I915_WRITE(PP_DIVISOR(0),
REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, pps->divider) |
REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK,
DIV_ROUND_UP(pps->t4, 1000) + 1));
}
static void intel_pre_enable_lvds(struct intel_encoder *encoder,
@ -746,20 +742,21 @@ static const struct dmi_system_id intel_dual_link_lvds[] = {
{ } /* terminating entry */
};
struct intel_encoder *intel_get_lvds_encoder(struct drm_device *dev)
struct intel_encoder *intel_get_lvds_encoder(struct drm_i915_private *dev_priv)
{
struct intel_encoder *intel_encoder;
struct intel_encoder *encoder;
for_each_intel_encoder(dev, intel_encoder)
if (intel_encoder->type == INTEL_OUTPUT_LVDS)
return intel_encoder;
for_each_intel_encoder(&dev_priv->drm, encoder) {
if (encoder->type == INTEL_OUTPUT_LVDS)
return encoder;
}
return NULL;
}
bool intel_is_dual_link_lvds(struct drm_device *dev)
bool intel_is_dual_link_lvds(struct drm_i915_private *dev_priv)
{
struct intel_encoder *encoder = intel_get_lvds_encoder(dev);
struct intel_encoder *encoder = intel_get_lvds_encoder(dev_priv);
return encoder && to_lvds_encoder(&encoder->base)->is_dual_link;
}

Просмотреть файл

@ -252,7 +252,7 @@ static bool get_mocs_settings(struct drm_i915_private *dev_priv,
{
bool result = false;
if (IS_ICELAKE(dev_priv)) {
if (INTEL_GEN(dev_priv) >= 11) {
table->size = ARRAY_SIZE(icelake_mocs_table);
table->table = icelake_mocs_table;
table->n_entries = GEN11_NUM_MOCS_ENTRIES;
@ -288,17 +288,17 @@ static bool get_mocs_settings(struct drm_i915_private *dev_priv,
static i915_reg_t mocs_register(enum intel_engine_id engine_id, int index)
{
switch (engine_id) {
case RCS:
case RCS0:
return GEN9_GFX_MOCS(index);
case VCS:
case VCS0:
return GEN9_MFX0_MOCS(index);
case BCS:
case BCS0:
return GEN9_BLT_MOCS(index);
case VECS:
case VECS0:
return GEN9_VEBOX_MOCS(index);
case VCS2:
case VCS1:
return GEN9_MFX1_MOCS(index);
case VCS3:
case VCS2:
return GEN11_MFX2_MOCS(index);
default:
MISSING_CASE(engine_id);

Просмотреть файл

@ -236,7 +236,7 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
static struct i915_request *alloc_request(struct intel_overlay *overlay)
{
struct drm_i915_private *dev_priv = overlay->i915;
struct intel_engine_cs *engine = dev_priv->engine[RCS];
struct intel_engine_cs *engine = dev_priv->engine[RCS0];
return i915_request_alloc(engine, dev_priv->kernel_context);
}

Просмотреть файл

@ -1894,15 +1894,14 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel)
panel->backlight.set = bxt_set_backlight;
panel->backlight.get = bxt_get_backlight;
panel->backlight.hz_to_pwm = bxt_hz_to_pwm;
} else if (HAS_PCH_CNP(dev_priv) || HAS_PCH_ICP(dev_priv)) {
} else if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) {
panel->backlight.setup = cnp_setup_backlight;
panel->backlight.enable = cnp_enable_backlight;
panel->backlight.disable = cnp_disable_backlight;
panel->backlight.set = bxt_set_backlight;
panel->backlight.get = bxt_get_backlight;
panel->backlight.hz_to_pwm = cnp_hz_to_pwm;
} else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_SPT(dev_priv) ||
HAS_PCH_KBP(dev_priv)) {
} else if (INTEL_PCH_TYPE(dev_priv) >= PCH_LPT) {
panel->backlight.setup = lpt_setup_backlight;
panel->backlight.enable = lpt_enable_backlight;
panel->backlight.disable = lpt_disable_backlight;

Просмотреть файл

@ -31,16 +31,20 @@
#include "intel_drv.h"
static const char * const pipe_crc_sources[] = {
"none",
"plane1",
"plane2",
"pf",
"pipe",
"TV",
"DP-B",
"DP-C",
"DP-D",
"auto",
[INTEL_PIPE_CRC_SOURCE_NONE] = "none",
[INTEL_PIPE_CRC_SOURCE_PLANE1] = "plane1",
[INTEL_PIPE_CRC_SOURCE_PLANE2] = "plane2",
[INTEL_PIPE_CRC_SOURCE_PLANE3] = "plane3",
[INTEL_PIPE_CRC_SOURCE_PLANE4] = "plane4",
[INTEL_PIPE_CRC_SOURCE_PLANE5] = "plane5",
[INTEL_PIPE_CRC_SOURCE_PLANE6] = "plane6",
[INTEL_PIPE_CRC_SOURCE_PLANE7] = "plane7",
[INTEL_PIPE_CRC_SOURCE_PIPE] = "pipe",
[INTEL_PIPE_CRC_SOURCE_TV] = "TV",
[INTEL_PIPE_CRC_SOURCE_DP_B] = "DP-B",
[INTEL_PIPE_CRC_SOURCE_DP_C] = "DP-C",
[INTEL_PIPE_CRC_SOURCE_DP_D] = "DP-D",
[INTEL_PIPE_CRC_SOURCE_AUTO] = "auto",
};
static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
@ -192,8 +196,6 @@ static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
enum intel_pipe_crc_source *source,
u32 *val)
{
bool need_stable_symbols = false;
if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
int ret = i9xx_pipe_crc_auto_source(dev_priv, pipe, source);
if (ret)
@ -209,56 +211,23 @@ static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
return -EINVAL;
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE;
break;
case INTEL_PIPE_CRC_SOURCE_DP_B:
if (!IS_G4X(dev_priv))
return -EINVAL;
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_G4X;
need_stable_symbols = true;
break;
case INTEL_PIPE_CRC_SOURCE_DP_C:
if (!IS_G4X(dev_priv))
return -EINVAL;
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_G4X;
need_stable_symbols = true;
break;
case INTEL_PIPE_CRC_SOURCE_DP_D:
if (!IS_G4X(dev_priv))
return -EINVAL;
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_G4X;
need_stable_symbols = true;
break;
case INTEL_PIPE_CRC_SOURCE_NONE:
*val = 0;
break;
default:
/*
* The DP CRC source doesn't work on g4x.
* It can be made to work to some degree by selecting
* the correct CRC source before the port is enabled,
* and not touching the CRC source bits again until
* the port is disabled. But even then the bits
* eventually get stuck and a reboot is needed to get
* working CRCs on the pipe again. Let's simply
* refuse to use DP CRCs on g4x.
*/
return -EINVAL;
}
/*
* When the pipe CRC tap point is after the transcoders we need
* to tweak symbol-level features to produce a deterministic series of
* symbols for a given frame. We need to reset those features only once
* a frame (instead of every nth symbol):
* - DC-balance: used to ensure a better clock recovery from the data
* link (SDVO)
* - DisplayPort scrambling: used for EMI reduction
*/
if (need_stable_symbols) {
u32 tmp = I915_READ(PORT_DFT2_G4X);
WARN_ON(!IS_G4X(dev_priv));
I915_WRITE(PORT_DFT_I9XX,
I915_READ(PORT_DFT_I9XX) | DC_BALANCE_RESET);
if (pipe == PIPE_A)
tmp |= PIPE_A_SCRAMBLE_RESET;
else
tmp |= PIPE_B_SCRAMBLE_RESET;
I915_WRITE(PORT_DFT2_G4X, tmp);
}
return 0;
}
@ -283,24 +252,6 @@ static void vlv_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
if (!(tmp & PIPE_SCRAMBLE_RESET_MASK))
tmp &= ~DC_BALANCE_RESET_VLV;
I915_WRITE(PORT_DFT2_G4X, tmp);
}
static void g4x_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
u32 tmp = I915_READ(PORT_DFT2_G4X);
if (pipe == PIPE_A)
tmp &= ~PIPE_A_SCRAMBLE_RESET;
else
tmp &= ~PIPE_B_SCRAMBLE_RESET;
I915_WRITE(PORT_DFT2_G4X, tmp);
if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) {
I915_WRITE(PORT_DFT_I9XX,
I915_READ(PORT_DFT_I9XX) & ~DC_BALANCE_RESET);
}
}
static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
@ -329,19 +280,18 @@ static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
return 0;
}
static void hsw_pipe_A_crc_wa(struct drm_i915_private *dev_priv,
bool enable)
static void
intel_crtc_crc_setup_workarounds(struct intel_crtc *crtc, bool enable)
{
struct drm_device *dev = &dev_priv->drm;
struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_crtc_state *pipe_config;
struct drm_atomic_state *state;
struct drm_modeset_acquire_ctx ctx;
int ret = 0;
int ret;
drm_modeset_acquire_init(&ctx, 0);
state = drm_atomic_state_alloc(dev);
state = drm_atomic_state_alloc(&dev_priv->drm);
if (!state) {
ret = -ENOMEM;
goto unlock;
@ -356,17 +306,10 @@ retry:
goto put_state;
}
if (HAS_IPS(dev_priv)) {
/*
* When IPS gets enabled, the pipe CRC changes. Since IPS gets
* enabled and disabled dynamically based on package C states,
* user space can't make reliable use of the CRCs, so let's just
* completely disable it.
*/
pipe_config->ips_force_disable = enable;
}
pipe_config->base.mode_changed = pipe_config->has_psr;
pipe_config->crc_enabled = enable;
if (IS_HASWELL(dev_priv)) {
if (IS_HASWELL(dev_priv) && crtc->pipe == PIPE_A) {
pipe_config->pch_pfit.force_thru = enable;
if (pipe_config->cpu_transcoder == TRANSCODER_EDP &&
pipe_config->pch_pfit.enabled != enable)
@ -392,11 +335,10 @@ unlock:
static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
enum pipe pipe,
enum intel_pipe_crc_source *source,
u32 *val,
bool set_wa)
u32 *val)
{
if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
*source = INTEL_PIPE_CRC_SOURCE_PF;
*source = INTEL_PIPE_CRC_SOURCE_PIPE;
switch (*source) {
case INTEL_PIPE_CRC_SOURCE_PLANE1:
@ -405,11 +347,7 @@ static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
case INTEL_PIPE_CRC_SOURCE_PLANE2:
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB;
break;
case INTEL_PIPE_CRC_SOURCE_PF:
if (set_wa && (IS_HASWELL(dev_priv) ||
IS_BROADWELL(dev_priv)) && pipe == PIPE_A)
hsw_pipe_A_crc_wa(dev_priv, true);
case INTEL_PIPE_CRC_SOURCE_PIPE:
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
break;
case INTEL_PIPE_CRC_SOURCE_NONE:
@ -422,10 +360,52 @@ static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
return 0;
}
static int skl_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
enum pipe pipe,
enum intel_pipe_crc_source *source,
u32 *val)
{
if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
*source = INTEL_PIPE_CRC_SOURCE_PIPE;
switch (*source) {
case INTEL_PIPE_CRC_SOURCE_PLANE1:
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_1_SKL;
break;
case INTEL_PIPE_CRC_SOURCE_PLANE2:
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_2_SKL;
break;
case INTEL_PIPE_CRC_SOURCE_PLANE3:
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_3_SKL;
break;
case INTEL_PIPE_CRC_SOURCE_PLANE4:
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_4_SKL;
break;
case INTEL_PIPE_CRC_SOURCE_PLANE5:
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_5_SKL;
break;
case INTEL_PIPE_CRC_SOURCE_PLANE6:
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_6_SKL;
break;
case INTEL_PIPE_CRC_SOURCE_PLANE7:
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_7_SKL;
break;
case INTEL_PIPE_CRC_SOURCE_PIPE:
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DMUX_SKL;
break;
case INTEL_PIPE_CRC_SOURCE_NONE:
*val = 0;
break;
default:
return -EINVAL;
}
return 0;
}
static int get_new_crc_ctl_reg(struct drm_i915_private *dev_priv,
enum pipe pipe,
enum intel_pipe_crc_source *source, u32 *val,
bool set_wa)
enum intel_pipe_crc_source *source, u32 *val)
{
if (IS_GEN(dev_priv, 2))
return i8xx_pipe_crc_ctl_reg(source, val);
@ -435,8 +415,10 @@ static int get_new_crc_ctl_reg(struct drm_i915_private *dev_priv,
return vlv_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
else if (IS_GEN_RANGE(dev_priv, 5, 6))
return ilk_pipe_crc_ctl_reg(source, val);
else if (INTEL_GEN(dev_priv) < 9)
return ivb_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
else
return ivb_pipe_crc_ctl_reg(dev_priv, pipe, source, val, set_wa);
return skl_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
}
static int
@ -486,9 +468,6 @@ static int i9xx_crc_source_valid(struct drm_i915_private *dev_priv,
switch (source) {
case INTEL_PIPE_CRC_SOURCE_PIPE:
case INTEL_PIPE_CRC_SOURCE_TV:
case INTEL_PIPE_CRC_SOURCE_DP_B:
case INTEL_PIPE_CRC_SOURCE_DP_C:
case INTEL_PIPE_CRC_SOURCE_DP_D:
case INTEL_PIPE_CRC_SOURCE_NONE:
return 0;
default:
@ -532,7 +511,25 @@ static int ivb_crc_source_valid(struct drm_i915_private *dev_priv,
case INTEL_PIPE_CRC_SOURCE_PIPE:
case INTEL_PIPE_CRC_SOURCE_PLANE1:
case INTEL_PIPE_CRC_SOURCE_PLANE2:
case INTEL_PIPE_CRC_SOURCE_PF:
case INTEL_PIPE_CRC_SOURCE_NONE:
return 0;
default:
return -EINVAL;
}
}
static int skl_crc_source_valid(struct drm_i915_private *dev_priv,
const enum intel_pipe_crc_source source)
{
switch (source) {
case INTEL_PIPE_CRC_SOURCE_PIPE:
case INTEL_PIPE_CRC_SOURCE_PLANE1:
case INTEL_PIPE_CRC_SOURCE_PLANE2:
case INTEL_PIPE_CRC_SOURCE_PLANE3:
case INTEL_PIPE_CRC_SOURCE_PLANE4:
case INTEL_PIPE_CRC_SOURCE_PLANE5:
case INTEL_PIPE_CRC_SOURCE_PLANE6:
case INTEL_PIPE_CRC_SOURCE_PLANE7:
case INTEL_PIPE_CRC_SOURCE_NONE:
return 0;
default:
@ -552,8 +549,10 @@ intel_is_valid_crc_source(struct drm_i915_private *dev_priv,
return vlv_crc_source_valid(dev_priv, source);
else if (IS_GEN_RANGE(dev_priv, 5, 6))
return ilk_crc_source_valid(dev_priv, source);
else
else if (INTEL_GEN(dev_priv) < 9)
return ivb_crc_source_valid(dev_priv, source);
else
return skl_crc_source_valid(dev_priv, source);
}
const char *const *intel_crtc_get_crc_sources(struct drm_crtc *crtc,
@ -592,6 +591,7 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name)
intel_wakeref_t wakeref;
u32 val = 0; /* shut up gcc */
int ret = 0;
bool enable;
if (display_crc_ctl_parse_source(source_name, &source) < 0) {
DRM_DEBUG_DRIVER("unknown source %s\n", source_name);
@ -605,7 +605,11 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name)
return -EIO;
}
ret = get_new_crc_ctl_reg(dev_priv, crtc->index, &source, &val, true);
enable = source != INTEL_PIPE_CRC_SOURCE_NONE;
if (enable)
intel_crtc_crc_setup_workarounds(to_intel_crtc(crtc), true);
ret = get_new_crc_ctl_reg(dev_priv, crtc->index, &source, &val);
if (ret != 0)
goto out;
@ -614,18 +618,16 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name)
POSTING_READ(PIPE_CRC_CTL(crtc->index));
if (!source) {
if (IS_G4X(dev_priv))
g4x_undo_pipe_scramble_reset(dev_priv, crtc->index);
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
vlv_undo_pipe_scramble_reset(dev_priv, crtc->index);
else if ((IS_HASWELL(dev_priv) ||
IS_BROADWELL(dev_priv)) && crtc->index == PIPE_A)
hsw_pipe_A_crc_wa(dev_priv, false);
}
pipe_crc->skipped = 0;
out:
if (!enable)
intel_crtc_crc_setup_workarounds(to_intel_crtc(crtc), false);
intel_display_power_put(dev_priv, power_domain, wakeref);
return ret;
@ -641,7 +643,7 @@ void intel_crtc_enable_pipe_crc(struct intel_crtc *intel_crtc)
if (!crtc->crc.opened)
return;
if (get_new_crc_ctl_reg(dev_priv, crtc->index, &pipe_crc->source, &val, false) < 0)
if (get_new_crc_ctl_reg(dev_priv, crtc->index, &pipe_crc->source, &val) < 0)
return;
/* Don't need pipe_crc->lock here, IRQs are not generated. */

Просмотреть файл

@ -338,12 +338,12 @@ static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
mutex_lock(&dev_priv->pcu_lock);
val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
if (enable)
val |= DSP_MAXFIFO_PM5_ENABLE;
else
val &= ~DSP_MAXFIFO_PM5_ENABLE;
vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val);
mutex_unlock(&dev_priv->pcu_lock);
}
@ -3624,7 +3624,12 @@ static u8 intel_enabled_dbuf_slices_num(struct drm_i915_private *dev_priv)
if (INTEL_GEN(dev_priv) < 11)
return enabled_slices;
if (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE)
/*
* FIXME: for now we'll only ever use 1 slice; pretend that we have
* only that 1 slice enabled until we have a proper way for on-demand
* toggling of the second slice.
*/
if (0 && I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE)
enabled_slices++;
return enabled_slices;
@ -4466,6 +4471,17 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
for_each_plane_id_on_crtc(intel_crtc, plane_id) {
wm = &cstate->wm.skl.optimal.planes[plane_id];
memset(&wm->wm[level], 0, sizeof(wm->wm[level]));
/*
* Wa_1408961008:icl
* Underruns with WM1+ disabled
*/
if (IS_ICELAKE(dev_priv) &&
level == 1 && wm->wm[0].plane_en) {
wm->wm[level].plane_res_b = wm->wm[0].plane_res_b;
wm->wm[level].plane_res_l = wm->wm[0].plane_res_l;
wm->wm[level].ignore_lines = wm->wm[0].ignore_lines;
}
}
}
@ -5056,11 +5072,12 @@ static void skl_write_wm_level(struct drm_i915_private *dev_priv,
{
u32 val = 0;
if (level->plane_en) {
if (level->plane_en)
val |= PLANE_WM_EN;
val |= level->plane_res_b;
val |= level->plane_res_l << PLANE_WM_LINES_SHIFT;
}
if (level->ignore_lines)
val |= PLANE_WM_IGNORE_LINES;
val |= level->plane_res_b;
val |= level->plane_res_l << PLANE_WM_LINES_SHIFT;
I915_WRITE_FW(reg, val);
}
@ -5126,6 +5143,7 @@ bool skl_wm_level_equals(const struct skl_wm_level *l1,
const struct skl_wm_level *l2)
{
return l1->plane_en == l2->plane_en &&
l1->ignore_lines == l2->ignore_lines &&
l1->plane_res_l == l2->plane_res_l &&
l1->plane_res_b == l2->plane_res_b;
}
@ -5269,6 +5287,11 @@ skl_compute_ddb(struct intel_atomic_state *state)
return 0;
}
static char enast(bool enable)
{
return enable ? '*' : ' ';
}
static void
skl_print_wm_changes(struct intel_atomic_state *state)
{
@ -5279,8 +5302,16 @@ skl_print_wm_changes(struct intel_atomic_state *state)
struct intel_crtc *crtc;
int i;
if ((drm_debug & DRM_UT_KMS) == 0)
return;
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
const struct skl_pipe_wm *old_pipe_wm, *new_pipe_wm;
old_pipe_wm = &old_crtc_state->wm.skl.optimal;
new_pipe_wm = &new_crtc_state->wm.skl.optimal;
for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
enum plane_id plane_id = plane->id;
const struct skl_ddb_entry *old, *new;
@ -5291,10 +5322,86 @@ skl_print_wm_changes(struct intel_atomic_state *state)
if (skl_ddb_entry_equal(old, new))
continue;
DRM_DEBUG_KMS("[PLANE:%d:%s] ddb (%d - %d) -> (%d - %d)\n",
DRM_DEBUG_KMS("[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n",
plane->base.base.id, plane->base.name,
old->start, old->end,
new->start, new->end);
old->start, old->end, new->start, new->end,
skl_ddb_entry_size(old), skl_ddb_entry_size(new));
}
for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
enum plane_id plane_id = plane->id;
const struct skl_plane_wm *old_wm, *new_wm;
old_wm = &old_pipe_wm->planes[plane_id];
new_wm = &new_pipe_wm->planes[plane_id];
if (skl_plane_wm_equals(dev_priv, old_wm, new_wm))
continue;
DRM_DEBUG_KMS("[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm"
" -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm\n",
plane->base.base.id, plane->base.name,
enast(old_wm->wm[0].plane_en), enast(old_wm->wm[1].plane_en),
enast(old_wm->wm[2].plane_en), enast(old_wm->wm[3].plane_en),
enast(old_wm->wm[4].plane_en), enast(old_wm->wm[5].plane_en),
enast(old_wm->wm[6].plane_en), enast(old_wm->wm[7].plane_en),
enast(old_wm->trans_wm.plane_en),
enast(new_wm->wm[0].plane_en), enast(new_wm->wm[1].plane_en),
enast(new_wm->wm[2].plane_en), enast(new_wm->wm[3].plane_en),
enast(new_wm->wm[4].plane_en), enast(new_wm->wm[5].plane_en),
enast(new_wm->wm[6].plane_en), enast(new_wm->wm[7].plane_en),
enast(new_wm->trans_wm.plane_en));
DRM_DEBUG_KMS("[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d"
" -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d\n",
plane->base.base.id, plane->base.name,
enast(old_wm->wm[0].ignore_lines), old_wm->wm[0].plane_res_l,
enast(old_wm->wm[1].ignore_lines), old_wm->wm[1].plane_res_l,
enast(old_wm->wm[2].ignore_lines), old_wm->wm[2].plane_res_l,
enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].plane_res_l,
enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].plane_res_l,
enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].plane_res_l,
enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].plane_res_l,
enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].plane_res_l,
enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm.plane_res_l,
enast(new_wm->wm[0].ignore_lines), new_wm->wm[0].plane_res_l,
enast(new_wm->wm[1].ignore_lines), new_wm->wm[1].plane_res_l,
enast(new_wm->wm[2].ignore_lines), new_wm->wm[2].plane_res_l,
enast(new_wm->wm[3].ignore_lines), new_wm->wm[3].plane_res_l,
enast(new_wm->wm[4].ignore_lines), new_wm->wm[4].plane_res_l,
enast(new_wm->wm[5].ignore_lines), new_wm->wm[5].plane_res_l,
enast(new_wm->wm[6].ignore_lines), new_wm->wm[6].plane_res_l,
enast(new_wm->wm[7].ignore_lines), new_wm->wm[7].plane_res_l,
enast(new_wm->trans_wm.ignore_lines), new_wm->trans_wm.plane_res_l);
DRM_DEBUG_KMS("[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d"
" -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n",
plane->base.base.id, plane->base.name,
old_wm->wm[0].plane_res_b, old_wm->wm[1].plane_res_b,
old_wm->wm[2].plane_res_b, old_wm->wm[3].plane_res_b,
old_wm->wm[4].plane_res_b, old_wm->wm[5].plane_res_b,
old_wm->wm[6].plane_res_b, old_wm->wm[7].plane_res_b,
old_wm->trans_wm.plane_res_b,
new_wm->wm[0].plane_res_b, new_wm->wm[1].plane_res_b,
new_wm->wm[2].plane_res_b, new_wm->wm[3].plane_res_b,
new_wm->wm[4].plane_res_b, new_wm->wm[5].plane_res_b,
new_wm->wm[6].plane_res_b, new_wm->wm[7].plane_res_b,
new_wm->trans_wm.plane_res_b);
DRM_DEBUG_KMS("[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d"
" -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n",
plane->base.base.id, plane->base.name,
old_wm->wm[0].min_ddb_alloc, old_wm->wm[1].min_ddb_alloc,
old_wm->wm[2].min_ddb_alloc, old_wm->wm[3].min_ddb_alloc,
old_wm->wm[4].min_ddb_alloc, old_wm->wm[5].min_ddb_alloc,
old_wm->wm[6].min_ddb_alloc, old_wm->wm[7].min_ddb_alloc,
old_wm->trans_wm.min_ddb_alloc,
new_wm->wm[0].min_ddb_alloc, new_wm->wm[1].min_ddb_alloc,
new_wm->wm[2].min_ddb_alloc, new_wm->wm[3].min_ddb_alloc,
new_wm->wm[4].min_ddb_alloc, new_wm->wm[5].min_ddb_alloc,
new_wm->wm[6].min_ddb_alloc, new_wm->wm[7].min_ddb_alloc,
new_wm->trans_wm.min_ddb_alloc);
}
}
}
@ -5609,6 +5716,7 @@ static inline void skl_wm_level_from_reg_val(u32 val,
struct skl_wm_level *level)
{
level->plane_en = val & PLANE_WM_EN;
level->ignore_lines = val & PLANE_WM_IGNORE_LINES;
level->plane_res_b = val & PLANE_WM_BLOCKS_MASK;
level->plane_res_l = (val >> PLANE_WM_LINES_SHIFT) &
PLANE_WM_LINES_MASK;
@ -5986,7 +6094,7 @@ void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
if (IS_CHERRYVIEW(dev_priv)) {
mutex_lock(&dev_priv->pcu_lock);
val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
if (val & DSP_MAXFIFO_PM5_ENABLE)
wm->level = VLV_WM_LEVEL_PM5;
@ -6691,8 +6799,7 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
mutex_unlock(&dev_priv->pcu_lock);
}
void gen6_rps_boost(struct i915_request *rq,
struct intel_rps_client *rps_client)
void gen6_rps_boost(struct i915_request *rq)
{
struct intel_rps *rps = &rq->i915->gt_pm.rps;
unsigned long flags;
@ -6721,7 +6828,7 @@ void gen6_rps_boost(struct i915_request *rq,
if (READ_ONCE(rps->cur_freq) < rps->boost_freq)
schedule_work(&rps->work);
atomic_inc(rps_client ? &rps_client->boosts : &rps->boosts);
atomic_inc(&rps->boosts);
}
int intel_set_rps(struct drm_i915_private *dev_priv, u8 val)

Просмотреть файл

@ -51,6 +51,7 @@
* must be correctly synchronized/cancelled when shutting down the pipe."
*/
#include <drm/drm_atomic_helper.h>
#include "intel_drv.h"
#include "i915_drv.h"
@ -78,9 +79,6 @@ static bool intel_psr2_enabled(struct drm_i915_private *dev_priv,
case I915_PSR_DEBUG_DISABLE:
case I915_PSR_DEBUG_FORCE_PSR1:
return false;
case I915_PSR_DEBUG_DEFAULT:
if (i915_modparams.enable_psr <= 0)
return false;
default:
return crtc_state->has_psr2;
}
@ -435,6 +433,41 @@ static void intel_psr_enable_sink(struct intel_dp *intel_dp)
drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
}
static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u32 val = 0;
if (INTEL_GEN(dev_priv) >= 11)
val |= EDP_PSR_TP4_TIME_0US;
if (dev_priv->vbt.psr.tp1_wakeup_time_us == 0)
val |= EDP_PSR_TP1_TIME_0us;
else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 100)
val |= EDP_PSR_TP1_TIME_100us;
else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 500)
val |= EDP_PSR_TP1_TIME_500us;
else
val |= EDP_PSR_TP1_TIME_2500us;
if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us == 0)
val |= EDP_PSR_TP2_TP3_TIME_0us;
else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100)
val |= EDP_PSR_TP2_TP3_TIME_100us;
else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500)
val |= EDP_PSR_TP2_TP3_TIME_500us;
else
val |= EDP_PSR_TP2_TP3_TIME_2500us;
if (intel_dp_source_supports_hbr2(intel_dp) &&
drm_dp_tps3_supported(intel_dp->dpcd))
val |= EDP_PSR_TP1_TP3_SEL;
else
val |= EDP_PSR_TP1_TP2_SEL;
return val;
}
static void hsw_activate_psr1(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
@ -459,29 +492,7 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp)
if (dev_priv->psr.link_standby)
val |= EDP_PSR_LINK_STANDBY;
if (dev_priv->vbt.psr.tp1_wakeup_time_us == 0)
val |= EDP_PSR_TP1_TIME_0us;
else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 100)
val |= EDP_PSR_TP1_TIME_100us;
else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 500)
val |= EDP_PSR_TP1_TIME_500us;
else
val |= EDP_PSR_TP1_TIME_2500us;
if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us == 0)
val |= EDP_PSR_TP2_TP3_TIME_0us;
else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100)
val |= EDP_PSR_TP2_TP3_TIME_100us;
else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500)
val |= EDP_PSR_TP2_TP3_TIME_500us;
else
val |= EDP_PSR_TP2_TP3_TIME_2500us;
if (intel_dp_source_supports_hbr2(intel_dp) &&
drm_dp_tps3_supported(intel_dp->dpcd))
val |= EDP_PSR_TP1_TP3_SEL;
else
val |= EDP_PSR_TP1_TP2_SEL;
val |= intel_psr1_get_tp_time(intel_dp);
if (INTEL_GEN(dev_priv) >= 8)
val |= EDP_PSR_CRC_ENABLE;
@ -509,16 +520,24 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
val |= EDP_PSR2_FRAME_BEFORE_SU(dev_priv->psr.sink_sync_latency + 1);
if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us >= 0 &&
dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 50)
if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
val |= EDP_PSR2_TP2_TIME_50us;
else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100)
else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
val |= EDP_PSR2_TP2_TIME_100us;
else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500)
else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
val |= EDP_PSR2_TP2_TIME_500us;
else
val |= EDP_PSR2_TP2_TIME_2500us;
/*
* FIXME: There is probably a issue in DMC firmwares(icl_dmc_ver1_07.bin
* and kbl_dmc_ver1_04.bin at least) that causes PSR2 SU to fail after
* exiting DC6 if EDP_PSR_TP1_TP3_SEL is kept in PSR_CTL, so for now
* lets workaround the issue by cleaning PSR_CTL before enable PSR2.
*/
I915_WRITE(EDP_PSR_CTL, 0);
I915_WRITE(EDP_PSR2_CTL, val);
}
@ -530,11 +549,6 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
int crtc_vdisplay = crtc_state->base.adjusted_mode.crtc_vdisplay;
int psr_max_h = 0, psr_max_v = 0;
/*
* FIXME psr2_support is messed up. It's both computed
* dynamically during PSR enable, and extracted from sink
* caps during eDP detection.
*/
if (!dev_priv->psr.sink_psr2_support)
return false;
@ -575,6 +589,11 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
return false;
}
if (crtc_state->crc_enabled) {
DRM_DEBUG_KMS("PSR2 not enabled because it would inhibit pipe CRC calculation\n");
return false;
}
return true;
}
@ -718,8 +737,11 @@ static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
{
struct intel_dp *intel_dp = dev_priv->psr.dp;
if (dev_priv->psr.enabled)
return;
WARN_ON(dev_priv->psr.enabled);
dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state);
dev_priv->psr.busy_frontbuffer_bits = 0;
dev_priv->psr.pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
DRM_DEBUG_KMS("Enabling PSR%s\n",
dev_priv->psr.psr2_enabled ? "2" : "1");
@ -752,20 +774,13 @@ void intel_psr_enable(struct intel_dp *intel_dp,
WARN_ON(dev_priv->drrs.dp);
mutex_lock(&dev_priv->psr.lock);
if (dev_priv->psr.prepared) {
DRM_DEBUG_KMS("PSR already in use\n");
if (!psr_global_enabled(dev_priv->psr.debug)) {
DRM_DEBUG_KMS("PSR disabled by flag\n");
goto unlock;
}
dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state);
dev_priv->psr.busy_frontbuffer_bits = 0;
dev_priv->psr.prepared = true;
dev_priv->psr.pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
if (psr_global_enabled(dev_priv->psr.debug))
intel_psr_enable_locked(dev_priv, crtc_state);
else
DRM_DEBUG_KMS("PSR disabled by flag\n");
intel_psr_enable_locked(dev_priv, crtc_state);
unlock:
mutex_unlock(&dev_priv->psr.lock);
@ -848,18 +863,69 @@ void intel_psr_disable(struct intel_dp *intel_dp,
return;
mutex_lock(&dev_priv->psr.lock);
if (!dev_priv->psr.prepared) {
mutex_unlock(&dev_priv->psr.lock);
return;
}
intel_psr_disable_locked(intel_dp);
dev_priv->psr.prepared = false;
mutex_unlock(&dev_priv->psr.lock);
cancel_work_sync(&dev_priv->psr.work);
}
static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv)
{
/*
* Display WA #0884: all
* This documented WA for bxt can be safely applied
* broadly so we can force HW tracking to exit PSR
* instead of disabling and re-enabling.
* Workaround tells us to write 0 to CUR_SURFLIVE_A,
* but it makes more sense write to the current active
* pipe.
*/
I915_WRITE(CURSURFLIVE(dev_priv->psr.pipe), 0);
}
/**
* intel_psr_update - Update PSR state
* @intel_dp: Intel DP
* @crtc_state: new CRTC state
*
* This functions will update PSR states, disabling, enabling or switching PSR
* version when executing fastsets. For full modeset, intel_psr_disable() and
* intel_psr_enable() should be called instead.
*/
void intel_psr_update(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct i915_psr *psr = &dev_priv->psr;
bool enable, psr2_enable;
if (!CAN_PSR(dev_priv) || READ_ONCE(psr->dp) != intel_dp)
return;
mutex_lock(&dev_priv->psr.lock);
enable = crtc_state->has_psr && psr_global_enabled(psr->debug);
psr2_enable = intel_psr2_enabled(dev_priv, crtc_state);
if (enable == psr->enabled && psr2_enable == psr->psr2_enabled) {
/* Force a PSR exit when enabling CRC to avoid CRC timeouts */
if (crtc_state->crc_enabled && psr->enabled)
psr_force_hw_tracking_exit(dev_priv);
goto unlock;
}
if (psr->enabled)
intel_psr_disable_locked(intel_dp);
if (enable)
intel_psr_enable_locked(dev_priv, crtc_state);
unlock:
mutex_unlock(&dev_priv->psr.lock);
}
/**
* intel_psr_wait_for_idle - wait for PSR1 to idle
* @new_crtc_state: new CRTC state
@ -924,36 +990,63 @@ static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv)
return err == 0 && dev_priv->psr.enabled;
}
static bool switching_psr(struct drm_i915_private *dev_priv,
struct intel_crtc_state *crtc_state,
u32 mode)
{
/* Can't switch psr state anyway if PSR2 is not supported. */
if (!crtc_state || !crtc_state->has_psr2)
return false;
if (dev_priv->psr.psr2_enabled && mode == I915_PSR_DEBUG_FORCE_PSR1)
return true;
if (!dev_priv->psr.psr2_enabled && mode != I915_PSR_DEBUG_FORCE_PSR1)
return true;
return false;
}
int intel_psr_set_debugfs_mode(struct drm_i915_private *dev_priv,
struct drm_modeset_acquire_ctx *ctx,
u64 val)
static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = &dev_priv->drm;
struct drm_connector_state *conn_state;
struct intel_crtc_state *crtc_state = NULL;
struct drm_crtc_commit *commit;
struct drm_modeset_acquire_ctx ctx;
struct drm_atomic_state *state;
struct drm_crtc *crtc;
struct intel_dp *dp;
int err;
state = drm_atomic_state_alloc(dev);
if (!state)
return -ENOMEM;
drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
state->acquire_ctx = &ctx;
retry:
drm_for_each_crtc(crtc, dev) {
struct drm_crtc_state *crtc_state;
struct intel_crtc_state *intel_crtc_state;
crtc_state = drm_atomic_get_crtc_state(state, crtc);
if (IS_ERR(crtc_state)) {
err = PTR_ERR(crtc_state);
goto error;
}
intel_crtc_state = to_intel_crtc_state(crtc_state);
if (crtc_state->active && intel_crtc_state->has_psr) {
/* Mark mode as changed to trigger a pipe->update() */
crtc_state->mode_changed = true;
break;
}
}
err = drm_atomic_commit(state);
error:
if (err == -EDEADLK) {
drm_atomic_state_clear(state);
err = drm_modeset_backoff(&ctx);
if (!err)
goto retry;
}
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
drm_atomic_state_put(state);
return err;
}
int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 val)
{
const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
u32 old_mode;
int ret;
bool enable;
u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) ||
mode > I915_PSR_DEBUG_FORCE_PSR1) {
@ -961,49 +1054,19 @@ int intel_psr_set_debugfs_mode(struct drm_i915_private *dev_priv,
return -EINVAL;
}
ret = drm_modeset_lock(&dev->mode_config.connection_mutex, ctx);
if (ret)
return ret;
/* dev_priv->psr.dp should be set once and then never touched again. */
dp = READ_ONCE(dev_priv->psr.dp);
conn_state = dp->attached_connector->base.state;
crtc = conn_state->crtc;
if (crtc) {
ret = drm_modeset_lock(&crtc->mutex, ctx);
if (ret)
return ret;
crtc_state = to_intel_crtc_state(crtc->state);
commit = crtc_state->base.commit;
} else {
commit = conn_state->commit;
}
if (commit) {
ret = wait_for_completion_interruptible(&commit->hw_done);
if (ret)
return ret;
}
ret = mutex_lock_interruptible(&dev_priv->psr.lock);
if (ret)
return ret;
enable = psr_global_enabled(val);
if (!enable || switching_psr(dev_priv, crtc_state, mode))
intel_psr_disable_locked(dev_priv->psr.dp);
old_mode = dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK;
dev_priv->psr.debug = val;
if (crtc)
dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state);
intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
if (dev_priv->psr.prepared && enable)
intel_psr_enable_locked(dev_priv, crtc_state);
mutex_unlock(&dev_priv->psr.lock);
if (old_mode != mode)
ret = intel_psr_fastset_force(dev_priv);
return ret;
}
@ -1121,18 +1184,8 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
/* By definition flush = invalidate + flush */
if (frontbuffer_bits) {
/*
* Display WA #0884: all
* This documented WA for bxt can be safely applied
* broadly so we can force HW tracking to exit PSR
* instead of disabling and re-enabling.
* Workaround tells us to write 0 to CUR_SURFLIVE_A,
* but it makes more sense write to the current active
* pipe.
*/
I915_WRITE(CURSURFLIVE(dev_priv->psr.pipe), 0);
}
if (frontbuffer_bits)
psr_force_hw_tracking_exit(dev_priv);
if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
schedule_work(&dev_priv->psr.work);

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше