drm/i915/gt: replace IS_GEN and friends with GRAPHICS_VER
This was done by the following semantic patch: @@ expression i915; @@ - INTEL_GEN(i915) + GRAPHICS_VER(i915) @@ expression i915; expression E; @@ - INTEL_GEN(i915) >= E + GRAPHICS_VER(i915) >= E @@ expression dev_priv; expression E; @@ - !IS_GEN(dev_priv, E) + GRAPHICS_VER(dev_priv) != E @@ expression dev_priv; expression E; @@ - IS_GEN(dev_priv, E) + GRAPHICS_VER(dev_priv) == E @@ expression dev_priv; expression from, until; @@ - IS_GEN_RANGE(dev_priv, from, until) + IS_GRAPHICS_VER(dev_priv, from, until) @def@ expression E; identifier id =~ "^gen$"; @@ - id = GRAPHICS_VER(E) + ver = GRAPHICS_VER(E) @@ identifier def.id; @@ - id + ver It also takes care of renaming the variable we assign to GRAPHICS_VER() so to use "ver" rather than "gen". Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com> Reviewed-by: Matt Roper <matthew.d.roper@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20210605155356.4183026-2-lucas.demarchi@intel.com
This commit is contained in:
Родитель
84bdf4571d
Коммит
c816723b6b
|
@ -85,14 +85,14 @@ static int gen6_drpc(struct seq_file *m)
|
|||
gt_core_status = intel_uncore_read_fw(uncore, GEN6_GT_CORE_STATUS);
|
||||
|
||||
rcctl1 = intel_uncore_read(uncore, GEN6_RC_CONTROL);
|
||||
if (INTEL_GEN(i915) >= 9) {
|
||||
if (GRAPHICS_VER(i915) >= 9) {
|
||||
gen9_powergate_enable =
|
||||
intel_uncore_read(uncore, GEN9_PG_ENABLE);
|
||||
gen9_powergate_status =
|
||||
intel_uncore_read(uncore, GEN9_PWRGT_DOMAIN_STATUS);
|
||||
}
|
||||
|
||||
if (INTEL_GEN(i915) <= 7)
|
||||
if (GRAPHICS_VER(i915) <= 7)
|
||||
sandybridge_pcode_read(i915, GEN6_PCODE_READ_RC6VIDS,
|
||||
&rc6vids, NULL);
|
||||
|
||||
|
@ -100,7 +100,7 @@ static int gen6_drpc(struct seq_file *m)
|
|||
yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
|
||||
seq_printf(m, "RC6 Enabled: %s\n",
|
||||
yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
|
||||
if (INTEL_GEN(i915) >= 9) {
|
||||
if (GRAPHICS_VER(i915) >= 9) {
|
||||
seq_printf(m, "Render Well Gating Enabled: %s\n",
|
||||
yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
|
||||
seq_printf(m, "Media Well Gating Enabled: %s\n",
|
||||
|
@ -134,7 +134,7 @@ static int gen6_drpc(struct seq_file *m)
|
|||
|
||||
seq_printf(m, "Core Power Down: %s\n",
|
||||
yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
|
||||
if (INTEL_GEN(i915) >= 9) {
|
||||
if (GRAPHICS_VER(i915) >= 9) {
|
||||
seq_printf(m, "Render Power Well: %s\n",
|
||||
(gen9_powergate_status &
|
||||
GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
|
||||
|
@ -150,7 +150,7 @@ static int gen6_drpc(struct seq_file *m)
|
|||
print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
|
||||
print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
|
||||
|
||||
if (INTEL_GEN(i915) <= 7) {
|
||||
if (GRAPHICS_VER(i915) <= 7) {
|
||||
seq_printf(m, "RC6 voltage: %dmV\n",
|
||||
GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
|
||||
seq_printf(m, "RC6+ voltage: %dmV\n",
|
||||
|
@ -250,7 +250,7 @@ static int frequency_show(struct seq_file *m, void *unused)
|
|||
|
||||
wakeref = intel_runtime_pm_get(uncore->rpm);
|
||||
|
||||
if (IS_GEN(i915, 5)) {
|
||||
if (GRAPHICS_VER(i915) == 5) {
|
||||
u16 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
|
||||
u16 rgvstat = intel_uncore_read16(uncore, MEMSTAT_ILK);
|
||||
|
||||
|
@ -296,7 +296,7 @@ static int frequency_show(struct seq_file *m, void *unused)
|
|||
|
||||
seq_printf(m, "efficient (RPe) frequency: %d MHz\n",
|
||||
intel_gpu_freq(rps, rps->efficient_freq));
|
||||
} else if (INTEL_GEN(i915) >= 6) {
|
||||
} else if (GRAPHICS_VER(i915) >= 6) {
|
||||
u32 rp_state_limits;
|
||||
u32 gt_perf_status;
|
||||
u32 rp_state_cap;
|
||||
|
@ -321,7 +321,7 @@ static int frequency_show(struct seq_file *m, void *unused)
|
|||
intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
|
||||
|
||||
reqf = intel_uncore_read(uncore, GEN6_RPNSWREQ);
|
||||
if (INTEL_GEN(i915) >= 9) {
|
||||
if (GRAPHICS_VER(i915) >= 9) {
|
||||
reqf >>= 23;
|
||||
} else {
|
||||
reqf &= ~GEN6_TURBO_DISABLE;
|
||||
|
@ -354,7 +354,7 @@ static int frequency_show(struct seq_file *m, void *unused)
|
|||
|
||||
intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
|
||||
|
||||
if (INTEL_GEN(i915) >= 11) {
|
||||
if (GRAPHICS_VER(i915) >= 11) {
|
||||
pm_ier = intel_uncore_read(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE);
|
||||
pm_imr = intel_uncore_read(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK);
|
||||
/*
|
||||
|
@ -363,7 +363,7 @@ static int frequency_show(struct seq_file *m, void *unused)
|
|||
*/
|
||||
pm_isr = 0;
|
||||
pm_iir = 0;
|
||||
} else if (INTEL_GEN(i915) >= 8) {
|
||||
} else if (GRAPHICS_VER(i915) >= 8) {
|
||||
pm_ier = intel_uncore_read(uncore, GEN8_GT_IER(2));
|
||||
pm_imr = intel_uncore_read(uncore, GEN8_GT_IMR(2));
|
||||
pm_isr = intel_uncore_read(uncore, GEN8_GT_ISR(2));
|
||||
|
@ -386,14 +386,14 @@ static int frequency_show(struct seq_file *m, void *unused)
|
|||
|
||||
seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
|
||||
pm_ier, pm_imr, pm_mask);
|
||||
if (INTEL_GEN(i915) <= 10)
|
||||
if (GRAPHICS_VER(i915) <= 10)
|
||||
seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
|
||||
pm_isr, pm_iir);
|
||||
seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
|
||||
rps->pm_intrmsk_mbz);
|
||||
seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
|
||||
seq_printf(m, "Render p-state ratio: %d\n",
|
||||
(gt_perf_status & (INTEL_GEN(i915) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
|
||||
(gt_perf_status & (GRAPHICS_VER(i915) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
|
||||
seq_printf(m, "Render p-state VID: %d\n",
|
||||
gt_perf_status & 0xff);
|
||||
seq_printf(m, "Render p-state limit: %d\n",
|
||||
|
@ -437,20 +437,20 @@ static int frequency_show(struct seq_file *m, void *unused)
|
|||
max_freq = (IS_GEN9_LP(i915) ? rp_state_cap >> 0 :
|
||||
rp_state_cap >> 16) & 0xff;
|
||||
max_freq *= (IS_GEN9_BC(i915) ||
|
||||
INTEL_GEN(i915) >= 10 ? GEN9_FREQ_SCALER : 1);
|
||||
GRAPHICS_VER(i915) >= 10 ? GEN9_FREQ_SCALER : 1);
|
||||
seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
|
||||
intel_gpu_freq(rps, max_freq));
|
||||
|
||||
max_freq = (rp_state_cap & 0xff00) >> 8;
|
||||
max_freq *= (IS_GEN9_BC(i915) ||
|
||||
INTEL_GEN(i915) >= 10 ? GEN9_FREQ_SCALER : 1);
|
||||
GRAPHICS_VER(i915) >= 10 ? GEN9_FREQ_SCALER : 1);
|
||||
seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
|
||||
intel_gpu_freq(rps, max_freq));
|
||||
|
||||
max_freq = (IS_GEN9_LP(i915) ? rp_state_cap >> 16 :
|
||||
rp_state_cap >> 0) & 0xff;
|
||||
max_freq *= (IS_GEN9_BC(i915) ||
|
||||
INTEL_GEN(i915) >= 10 ? GEN9_FREQ_SCALER : 1);
|
||||
GRAPHICS_VER(i915) >= 10 ? GEN9_FREQ_SCALER : 1);
|
||||
seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
|
||||
intel_gpu_freq(rps, max_freq));
|
||||
seq_printf(m, "Max overclocked frequency: %dMHz\n",
|
||||
|
@ -488,7 +488,7 @@ static int llc_show(struct seq_file *m, void *data)
|
|||
{
|
||||
struct intel_gt *gt = m->private;
|
||||
struct drm_i915_private *i915 = gt->i915;
|
||||
const bool edram = INTEL_GEN(i915) > 8;
|
||||
const bool edram = GRAPHICS_VER(i915) > 8;
|
||||
struct intel_rps *rps = >->rps;
|
||||
unsigned int max_gpu_freq, min_gpu_freq;
|
||||
intel_wakeref_t wakeref;
|
||||
|
@ -500,7 +500,7 @@ static int llc_show(struct seq_file *m, void *data)
|
|||
|
||||
min_gpu_freq = rps->min_freq;
|
||||
max_gpu_freq = rps->max_freq;
|
||||
if (IS_GEN9_BC(i915) || INTEL_GEN(i915) >= 10) {
|
||||
if (IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 10) {
|
||||
/* Convert GT frequency to 50 HZ units */
|
||||
min_gpu_freq /= GEN9_FREQ_SCALER;
|
||||
max_gpu_freq /= GEN9_FREQ_SCALER;
|
||||
|
@ -518,7 +518,7 @@ static int llc_show(struct seq_file *m, void *data)
|
|||
intel_gpu_freq(rps,
|
||||
(gpu_freq *
|
||||
(IS_GEN9_BC(i915) ||
|
||||
INTEL_GEN(i915) >= 10 ?
|
||||
GRAPHICS_VER(i915) >= 10 ?
|
||||
GEN9_FREQ_SCALER : 1))),
|
||||
((ia_freq >> 0) & 0xff) * 100,
|
||||
((ia_freq >> 8) & 0xff) * 100);
|
||||
|
@ -580,7 +580,7 @@ static int rps_boost_show(struct seq_file *m, void *data)
|
|||
|
||||
seq_printf(m, "Wait boosts: %d\n", READ_ONCE(rps->boosts));
|
||||
|
||||
if (INTEL_GEN(i915) >= 6 && intel_rps_is_active(rps)) {
|
||||
if (GRAPHICS_VER(i915) >= 6 && intel_rps_is_active(rps)) {
|
||||
struct intel_uncore *uncore = gt->uncore;
|
||||
u32 rpup, rpupei;
|
||||
u32 rpdown, rpdownei;
|
||||
|
|
|
@ -74,7 +74,7 @@ int gen4_emit_flush_rcs(struct i915_request *rq, u32 mode)
|
|||
cmd = MI_FLUSH;
|
||||
if (mode & EMIT_INVALIDATE) {
|
||||
cmd |= MI_EXE_FLUSH;
|
||||
if (IS_G4X(rq->engine->i915) || IS_GEN(rq->engine->i915, 5))
|
||||
if (IS_G4X(rq->engine->i915) || GRAPHICS_VER(rq->engine->i915) == 5)
|
||||
cmd |= MI_INVALIDATE_ISP;
|
||||
}
|
||||
|
||||
|
|
|
@ -38,7 +38,7 @@ int gen8_emit_flush_rcs(struct i915_request *rq, u32 mode)
|
|||
* On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
|
||||
* pipe control.
|
||||
*/
|
||||
if (IS_GEN(rq->engine->i915, 9))
|
||||
if (GRAPHICS_VER(rq->engine->i915) == 9)
|
||||
vf_flush_wa = true;
|
||||
|
||||
/* WaForGAMHang:kbl */
|
||||
|
|
|
@ -709,7 +709,7 @@ struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt)
|
|||
*
|
||||
* Gen12 has inherited the same read-only fault issue from gen11.
|
||||
*/
|
||||
ppgtt->vm.has_read_only = !IS_GEN_RANGE(gt->i915, 11, 12);
|
||||
ppgtt->vm.has_read_only = !IS_GRAPHICS_VER(gt->i915, 11, 12);
|
||||
|
||||
if (HAS_LMEM(gt->i915))
|
||||
ppgtt->vm.alloc_pt_dma = alloc_pt_lmem;
|
||||
|
|
|
@ -76,7 +76,7 @@ intel_context_reconfigure_sseu(struct intel_context *ce,
|
|||
{
|
||||
int ret;
|
||||
|
||||
GEM_BUG_ON(INTEL_GEN(ce->engine->i915) < 8);
|
||||
GEM_BUG_ON(GRAPHICS_VER(ce->engine->i915) < 8);
|
||||
|
||||
ret = intel_context_lock_pinned(ce);
|
||||
if (ret)
|
||||
|
|
|
@ -240,10 +240,10 @@ void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask)
|
|||
* Though they added more rings on g4x/ilk, they did not add
|
||||
* per-engine HWSTAM until gen6.
|
||||
*/
|
||||
if (INTEL_GEN(engine->i915) < 6 && engine->class != RENDER_CLASS)
|
||||
if (GRAPHICS_VER(engine->i915) < 6 && engine->class != RENDER_CLASS)
|
||||
return;
|
||||
|
||||
if (INTEL_GEN(engine->i915) >= 3)
|
||||
if (GRAPHICS_VER(engine->i915) >= 3)
|
||||
ENGINE_WRITE(engine, RING_HWSTAM, mask);
|
||||
else
|
||||
ENGINE_WRITE16(engine, RING_HWSTAM, mask);
|
||||
|
@ -317,7 +317,7 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
|
|||
CONFIG_DRM_I915_TIMESLICE_DURATION;
|
||||
|
||||
/* Override to uninterruptible for OpenCL workloads. */
|
||||
if (INTEL_GEN(i915) == 12 && engine->class == RENDER_CLASS)
|
||||
if (GRAPHICS_VER(i915) == 12 && engine->class == RENDER_CLASS)
|
||||
engine->props.preempt_timeout_ms = 0;
|
||||
|
||||
engine->defaults = engine->props; /* never to change again */
|
||||
|
@ -354,8 +354,8 @@ static void __setup_engine_capabilities(struct intel_engine_cs *engine)
|
|||
* HEVC support is present on first engine instance
|
||||
* before Gen11 and on all instances afterwards.
|
||||
*/
|
||||
if (INTEL_GEN(i915) >= 11 ||
|
||||
(INTEL_GEN(i915) >= 9 && engine->instance == 0))
|
||||
if (GRAPHICS_VER(i915) >= 11 ||
|
||||
(GRAPHICS_VER(i915) >= 9 && engine->instance == 0))
|
||||
engine->uabi_capabilities |=
|
||||
I915_VIDEO_CLASS_CAPABILITY_HEVC;
|
||||
|
||||
|
@ -363,14 +363,14 @@ static void __setup_engine_capabilities(struct intel_engine_cs *engine)
|
|||
* SFC block is present only on even logical engine
|
||||
* instances.
|
||||
*/
|
||||
if ((INTEL_GEN(i915) >= 11 &&
|
||||
if ((GRAPHICS_VER(i915) >= 11 &&
|
||||
(engine->gt->info.vdbox_sfc_access &
|
||||
BIT(engine->instance))) ||
|
||||
(INTEL_GEN(i915) >= 9 && engine->instance == 0))
|
||||
(GRAPHICS_VER(i915) >= 9 && engine->instance == 0))
|
||||
engine->uabi_capabilities |=
|
||||
I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC;
|
||||
} else if (engine->class == VIDEO_ENHANCEMENT_CLASS) {
|
||||
if (INTEL_GEN(i915) >= 9)
|
||||
if (GRAPHICS_VER(i915) >= 9)
|
||||
engine->uabi_capabilities |=
|
||||
I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC;
|
||||
}
|
||||
|
@ -468,7 +468,7 @@ static intel_engine_mask_t init_engine_mask(struct intel_gt *gt)
|
|||
|
||||
info->engine_mask = INTEL_INFO(i915)->platform_engine_mask;
|
||||
|
||||
if (INTEL_GEN(i915) < 11)
|
||||
if (GRAPHICS_VER(i915) < 11)
|
||||
return info->engine_mask;
|
||||
|
||||
media_fuse = ~intel_uncore_read(uncore, GEN11_GT_VEBOX_VDBOX_DISABLE);
|
||||
|
@ -494,7 +494,7 @@ static intel_engine_mask_t init_engine_mask(struct intel_gt *gt)
|
|||
* hooked up to an SFC (Scaler & Format Converter) unit.
|
||||
* In TGL each VDBOX has access to an SFC.
|
||||
*/
|
||||
if (INTEL_GEN(i915) >= 12 || logical_vdbox++ % 2 == 0)
|
||||
if (GRAPHICS_VER(i915) >= 12 || logical_vdbox++ % 2 == 0)
|
||||
gt->info.vdbox_sfc_access |= BIT(i);
|
||||
}
|
||||
drm_dbg(&i915->drm, "vdbox enable: %04x, instances: %04lx\n",
|
||||
|
@ -731,7 +731,7 @@ static int engine_setup_common(struct intel_engine_cs *engine)
|
|||
intel_engine_init_whitelist(engine);
|
||||
intel_engine_init_ctx_wa(engine);
|
||||
|
||||
if (INTEL_GEN(engine->i915) >= 12)
|
||||
if (GRAPHICS_VER(engine->i915) >= 12)
|
||||
engine->flags |= I915_ENGINE_HAS_RELATIVE_MMIO;
|
||||
|
||||
return 0;
|
||||
|
@ -999,9 +999,9 @@ u64 intel_engine_get_active_head(const struct intel_engine_cs *engine)
|
|||
|
||||
u64 acthd;
|
||||
|
||||
if (INTEL_GEN(i915) >= 8)
|
||||
if (GRAPHICS_VER(i915) >= 8)
|
||||
acthd = ENGINE_READ64(engine, RING_ACTHD, RING_ACTHD_UDW);
|
||||
else if (INTEL_GEN(i915) >= 4)
|
||||
else if (GRAPHICS_VER(i915) >= 4)
|
||||
acthd = ENGINE_READ(engine, RING_ACTHD);
|
||||
else
|
||||
acthd = ENGINE_READ(engine, ACTHD);
|
||||
|
@ -1013,7 +1013,7 @@ u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine)
|
|||
{
|
||||
u64 bbaddr;
|
||||
|
||||
if (INTEL_GEN(engine->i915) >= 8)
|
||||
if (GRAPHICS_VER(engine->i915) >= 8)
|
||||
bbaddr = ENGINE_READ64(engine, RING_BBADDR, RING_BBADDR_UDW);
|
||||
else
|
||||
bbaddr = ENGINE_READ(engine, RING_BBADDR);
|
||||
|
@ -1060,7 +1060,7 @@ int intel_engine_stop_cs(struct intel_engine_cs *engine)
|
|||
{
|
||||
int err = 0;
|
||||
|
||||
if (INTEL_GEN(engine->i915) < 3)
|
||||
if (GRAPHICS_VER(engine->i915) < 3)
|
||||
return -ENODEV;
|
||||
|
||||
ENGINE_TRACE(engine, "\n");
|
||||
|
@ -1110,7 +1110,7 @@ read_subslice_reg(const struct intel_engine_cs *engine,
|
|||
u32 mcr_mask, mcr_ss, mcr, old_mcr, val;
|
||||
enum forcewake_domains fw_domains;
|
||||
|
||||
if (INTEL_GEN(i915) >= 11) {
|
||||
if (GRAPHICS_VER(i915) >= 11) {
|
||||
mcr_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK;
|
||||
mcr_ss = GEN11_MCR_SLICE(slice) | GEN11_MCR_SUBSLICE(subslice);
|
||||
} else {
|
||||
|
@ -1159,7 +1159,7 @@ void intel_engine_get_instdone(const struct intel_engine_cs *engine,
|
|||
|
||||
memset(instdone, 0, sizeof(*instdone));
|
||||
|
||||
switch (INTEL_GEN(i915)) {
|
||||
switch (GRAPHICS_VER(i915)) {
|
||||
default:
|
||||
instdone->instdone =
|
||||
intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
|
||||
|
@ -1169,7 +1169,7 @@ void intel_engine_get_instdone(const struct intel_engine_cs *engine,
|
|||
|
||||
instdone->slice_common =
|
||||
intel_uncore_read(uncore, GEN7_SC_INSTDONE);
|
||||
if (INTEL_GEN(i915) >= 12) {
|
||||
if (GRAPHICS_VER(i915) >= 12) {
|
||||
instdone->slice_common_extra[0] =
|
||||
intel_uncore_read(uncore, GEN12_SC_INSTDONE_EXTRA);
|
||||
instdone->slice_common_extra[1] =
|
||||
|
@ -1232,7 +1232,7 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
|
|||
idle = false;
|
||||
|
||||
/* No bit for gen2, so assume the CS parser is idle */
|
||||
if (INTEL_GEN(engine->i915) > 2 &&
|
||||
if (GRAPHICS_VER(engine->i915) > 2 &&
|
||||
!(ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE))
|
||||
idle = false;
|
||||
|
||||
|
@ -1329,7 +1329,7 @@ void intel_engines_reset_default_submission(struct intel_gt *gt)
|
|||
|
||||
bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
|
||||
{
|
||||
switch (INTEL_GEN(engine->i915)) {
|
||||
switch (GRAPHICS_VER(engine->i915)) {
|
||||
case 2:
|
||||
return false; /* uses physical not virtual addresses */
|
||||
case 3:
|
||||
|
@ -1434,7 +1434,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
|
|||
struct intel_engine_execlists * const execlists = &engine->execlists;
|
||||
u64 addr;
|
||||
|
||||
if (engine->id == RENDER_CLASS && IS_GEN_RANGE(dev_priv, 4, 7))
|
||||
if (engine->id == RENDER_CLASS && IS_GRAPHICS_VER(dev_priv, 4, 7))
|
||||
drm_printf(m, "\tCCID: 0x%08x\n", ENGINE_READ(engine, CCID));
|
||||
if (HAS_EXECLISTS(dev_priv)) {
|
||||
drm_printf(m, "\tEL_STAT_HI: 0x%08x\n",
|
||||
|
@ -1451,13 +1451,13 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
|
|||
drm_printf(m, "\tRING_CTL: 0x%08x%s\n",
|
||||
ENGINE_READ(engine, RING_CTL),
|
||||
ENGINE_READ(engine, RING_CTL) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? " [waiting]" : "");
|
||||
if (INTEL_GEN(engine->i915) > 2) {
|
||||
if (GRAPHICS_VER(engine->i915) > 2) {
|
||||
drm_printf(m, "\tRING_MODE: 0x%08x%s\n",
|
||||
ENGINE_READ(engine, RING_MI_MODE),
|
||||
ENGINE_READ(engine, RING_MI_MODE) & (MODE_IDLE) ? " [idle]" : "");
|
||||
}
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 6) {
|
||||
if (GRAPHICS_VER(dev_priv) >= 6) {
|
||||
drm_printf(m, "\tRING_IMR: 0x%08x\n",
|
||||
ENGINE_READ(engine, RING_IMR));
|
||||
drm_printf(m, "\tRING_ESR: 0x%08x\n",
|
||||
|
@ -1474,15 +1474,15 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
|
|||
addr = intel_engine_get_last_batch_head(engine);
|
||||
drm_printf(m, "\tBBADDR: 0x%08x_%08x\n",
|
||||
upper_32_bits(addr), lower_32_bits(addr));
|
||||
if (INTEL_GEN(dev_priv) >= 8)
|
||||
if (GRAPHICS_VER(dev_priv) >= 8)
|
||||
addr = ENGINE_READ64(engine, RING_DMA_FADD, RING_DMA_FADD_UDW);
|
||||
else if (INTEL_GEN(dev_priv) >= 4)
|
||||
else if (GRAPHICS_VER(dev_priv) >= 4)
|
||||
addr = ENGINE_READ(engine, RING_DMA_FADD);
|
||||
else
|
||||
addr = ENGINE_READ(engine, DMA_FADD_I8XX);
|
||||
drm_printf(m, "\tDMA_FADDR: 0x%08x_%08x\n",
|
||||
upper_32_bits(addr), lower_32_bits(addr));
|
||||
if (INTEL_GEN(dev_priv) >= 4) {
|
||||
if (GRAPHICS_VER(dev_priv) >= 4) {
|
||||
drm_printf(m, "\tIPEIR: 0x%08x\n",
|
||||
ENGINE_READ(engine, RING_IPEIR));
|
||||
drm_printf(m, "\tIPEHR: 0x%08x\n",
|
||||
|
@ -1561,7 +1561,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
|
|||
}
|
||||
rcu_read_unlock();
|
||||
execlists_active_unlock_bh(execlists);
|
||||
} else if (INTEL_GEN(dev_priv) > 6) {
|
||||
} else if (GRAPHICS_VER(dev_priv) > 6) {
|
||||
drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
|
||||
ENGINE_READ(engine, RING_PP_DIR_BASE));
|
||||
drm_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n",
|
||||
|
|
|
@ -1847,7 +1847,7 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
|
|||
ENGINE_TRACE(engine, "csb[%d]: status=0x%08x:0x%08x\n",
|
||||
head, upper_32_bits(csb), lower_32_bits(csb));
|
||||
|
||||
if (INTEL_GEN(engine->i915) >= 12)
|
||||
if (GRAPHICS_VER(engine->i915) >= 12)
|
||||
promote = gen12_csb_parse(csb);
|
||||
else
|
||||
promote = gen8_csb_parse(csb);
|
||||
|
@ -2772,7 +2772,7 @@ static void enable_execlists(struct intel_engine_cs *engine)
|
|||
|
||||
intel_engine_set_hwsp_writemask(engine, ~0u); /* HWSTAM */
|
||||
|
||||
if (INTEL_GEN(engine->i915) >= 11)
|
||||
if (GRAPHICS_VER(engine->i915) >= 11)
|
||||
mode = _MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE);
|
||||
else
|
||||
mode = _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE);
|
||||
|
@ -3103,7 +3103,7 @@ static void execlists_park(struct intel_engine_cs *engine)
|
|||
|
||||
static bool can_preempt(struct intel_engine_cs *engine)
|
||||
{
|
||||
if (INTEL_GEN(engine->i915) > 8)
|
||||
if (GRAPHICS_VER(engine->i915) > 8)
|
||||
return true;
|
||||
|
||||
/* GPGPU on bdw requires extra w/a; not implemented */
|
||||
|
@ -3156,13 +3156,13 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
|
|||
engine->emit_flush = gen8_emit_flush_xcs;
|
||||
engine->emit_init_breadcrumb = gen8_emit_init_breadcrumb;
|
||||
engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_xcs;
|
||||
if (INTEL_GEN(engine->i915) >= 12) {
|
||||
if (GRAPHICS_VER(engine->i915) >= 12) {
|
||||
engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_xcs;
|
||||
engine->emit_flush = gen12_emit_flush_xcs;
|
||||
}
|
||||
engine->set_default_submission = execlists_set_default_submission;
|
||||
|
||||
if (INTEL_GEN(engine->i915) < 11) {
|
||||
if (GRAPHICS_VER(engine->i915) < 11) {
|
||||
engine->irq_enable = gen8_logical_ring_enable_irq;
|
||||
engine->irq_disable = gen8_logical_ring_disable_irq;
|
||||
} else {
|
||||
|
@ -3195,7 +3195,7 @@ static void logical_ring_default_irqs(struct intel_engine_cs *engine)
|
|||
{
|
||||
unsigned int shift = 0;
|
||||
|
||||
if (INTEL_GEN(engine->i915) < 11) {
|
||||
if (GRAPHICS_VER(engine->i915) < 11) {
|
||||
const u8 irq_shifts[] = {
|
||||
[RCS0] = GEN8_RCS_IRQ_SHIFT,
|
||||
[BCS0] = GEN8_BCS_IRQ_SHIFT,
|
||||
|
@ -3215,7 +3215,7 @@ static void logical_ring_default_irqs(struct intel_engine_cs *engine)
|
|||
|
||||
static void rcs_submission_override(struct intel_engine_cs *engine)
|
||||
{
|
||||
switch (INTEL_GEN(engine->i915)) {
|
||||
switch (GRAPHICS_VER(engine->i915)) {
|
||||
case 12:
|
||||
engine->emit_flush = gen12_emit_flush_rcs;
|
||||
engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_rcs;
|
||||
|
@ -3266,13 +3266,13 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine)
|
|||
execlists->csb_write =
|
||||
&engine->status_page.addr[intel_hws_csb_write_index(i915)];
|
||||
|
||||
if (INTEL_GEN(i915) < 11)
|
||||
if (GRAPHICS_VER(i915) < 11)
|
||||
execlists->csb_size = GEN8_CSB_ENTRIES;
|
||||
else
|
||||
execlists->csb_size = GEN11_CSB_ENTRIES;
|
||||
|
||||
engine->context_tag = GENMASK(BITS_PER_LONG - 2, 0);
|
||||
if (INTEL_GEN(engine->i915) >= 11) {
|
||||
if (GRAPHICS_VER(engine->i915) >= 11) {
|
||||
execlists->ccid |= engine->instance << (GEN11_ENGINE_INSTANCE_SHIFT - 32);
|
||||
execlists->ccid |= engine->class << (GEN11_ENGINE_CLASS_SHIFT - 32);
|
||||
}
|
||||
|
|
|
@ -107,10 +107,10 @@ static bool needs_idle_maps(struct drm_i915_private *i915)
|
|||
if (!intel_vtd_active())
|
||||
return false;
|
||||
|
||||
if (IS_GEN(i915, 5) && IS_MOBILE(i915))
|
||||
if (GRAPHICS_VER(i915) == 5 && IS_MOBILE(i915))
|
||||
return true;
|
||||
|
||||
if (IS_GEN(i915, 12))
|
||||
if (GRAPHICS_VER(i915) == 12)
|
||||
return true; /* XXX DMAR fault reason 7 */
|
||||
|
||||
return false;
|
||||
|
@ -176,7 +176,7 @@ static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
|
|||
|
||||
gen8_ggtt_invalidate(ggtt);
|
||||
|
||||
if (INTEL_GEN(i915) >= 12)
|
||||
if (GRAPHICS_VER(i915) >= 12)
|
||||
intel_uncore_write_fw(uncore, GEN12_GUC_TLB_INV_CR,
|
||||
GEN12_GUC_TLB_INV_CR_INVALIDATE);
|
||||
else
|
||||
|
@ -832,7 +832,7 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
|
|||
* resort to an uncached mapping. The WC issue is easily caught by the
|
||||
* readback check when writing GTT PTE entries.
|
||||
*/
|
||||
if (IS_GEN9_LP(i915) || INTEL_GEN(i915) >= 10)
|
||||
if (IS_GEN9_LP(i915) || GRAPHICS_VER(i915) >= 10)
|
||||
ggtt->gsm = ioremap(phys_addr, size);
|
||||
else
|
||||
ggtt->gsm = ioremap_wc(phys_addr, size);
|
||||
|
@ -1078,7 +1078,7 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
|
|||
ggtt->vm.pte_encode = hsw_pte_encode;
|
||||
else if (IS_VALLEYVIEW(i915))
|
||||
ggtt->vm.pte_encode = byt_pte_encode;
|
||||
else if (INTEL_GEN(i915) >= 7)
|
||||
else if (GRAPHICS_VER(i915) >= 7)
|
||||
ggtt->vm.pte_encode = ivb_pte_encode;
|
||||
else
|
||||
ggtt->vm.pte_encode = snb_pte_encode;
|
||||
|
@ -1150,9 +1150,9 @@ static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt)
|
|||
ggtt->vm.dma = i915->drm.dev;
|
||||
dma_resv_init(&ggtt->vm._resv);
|
||||
|
||||
if (INTEL_GEN(i915) <= 5)
|
||||
if (GRAPHICS_VER(i915) <= 5)
|
||||
ret = i915_gmch_probe(ggtt);
|
||||
else if (INTEL_GEN(i915) < 8)
|
||||
else if (GRAPHICS_VER(i915) < 8)
|
||||
ret = gen6_gmch_probe(ggtt);
|
||||
else
|
||||
ret = gen8_gmch_probe(ggtt);
|
||||
|
@ -1209,7 +1209,7 @@ int i915_ggtt_probe_hw(struct drm_i915_private *i915)
|
|||
|
||||
int i915_ggtt_enable_hw(struct drm_i915_private *i915)
|
||||
{
|
||||
if (INTEL_GEN(i915) < 6 && !intel_enable_gtt())
|
||||
if (GRAPHICS_VER(i915) < 6 && !intel_enable_gtt())
|
||||
return -EIO;
|
||||
|
||||
return 0;
|
||||
|
@ -1274,7 +1274,7 @@ void i915_ggtt_resume(struct i915_ggtt *ggtt)
|
|||
if (flush)
|
||||
wbinvd_on_all_cpus();
|
||||
|
||||
if (INTEL_GEN(ggtt->vm.i915) >= 8)
|
||||
if (GRAPHICS_VER(ggtt->vm.i915) >= 8)
|
||||
setup_private_pat(ggtt->vm.gt->uncore);
|
||||
|
||||
intel_ggtt_restore_fences(ggtt);
|
||||
|
|
|
@ -56,7 +56,7 @@ static void i965_write_fence_reg(struct i915_fence_reg *fence)
|
|||
int fence_pitch_shift;
|
||||
u64 val;
|
||||
|
||||
if (INTEL_GEN(fence_to_i915(fence)) >= 6) {
|
||||
if (GRAPHICS_VER(fence_to_i915(fence)) >= 6) {
|
||||
fence_reg_lo = FENCE_REG_GEN6_LO(fence->id);
|
||||
fence_reg_hi = FENCE_REG_GEN6_HI(fence->id);
|
||||
fence_pitch_shift = GEN6_FENCE_PITCH_SHIFT;
|
||||
|
@ -173,9 +173,9 @@ static void fence_write(struct i915_fence_reg *fence)
|
|||
* and explicitly managed for internal users.
|
||||
*/
|
||||
|
||||
if (IS_GEN(i915, 2))
|
||||
if (GRAPHICS_VER(i915) == 2)
|
||||
i830_write_fence_reg(fence);
|
||||
else if (IS_GEN(i915, 3))
|
||||
else if (GRAPHICS_VER(i915) == 3)
|
||||
i915_write_fence_reg(fence);
|
||||
else
|
||||
i965_write_fence_reg(fence);
|
||||
|
@ -188,7 +188,7 @@ static void fence_write(struct i915_fence_reg *fence)
|
|||
|
||||
static bool gpu_uses_fence_registers(struct i915_fence_reg *fence)
|
||||
{
|
||||
return INTEL_GEN(fence_to_i915(fence)) < 4;
|
||||
return GRAPHICS_VER(fence_to_i915(fence)) < 4;
|
||||
}
|
||||
|
||||
static int fence_update(struct i915_fence_reg *fence,
|
||||
|
@ -569,7 +569,7 @@ static void detect_bit_6_swizzle(struct i915_ggtt *ggtt)
|
|||
u32 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
|
||||
u32 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
|
||||
|
||||
if (INTEL_GEN(i915) >= 8 || IS_VALLEYVIEW(i915)) {
|
||||
if (GRAPHICS_VER(i915) >= 8 || IS_VALLEYVIEW(i915)) {
|
||||
/*
|
||||
* On BDW+, swizzling is not used. We leave the CPU memory
|
||||
* controller in charge of optimizing memory accesses without
|
||||
|
@ -579,7 +579,7 @@ static void detect_bit_6_swizzle(struct i915_ggtt *ggtt)
|
|||
*/
|
||||
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
|
||||
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
|
||||
} else if (INTEL_GEN(i915) >= 6) {
|
||||
} else if (GRAPHICS_VER(i915) >= 6) {
|
||||
if (i915->preserve_bios_swizzle) {
|
||||
if (intel_uncore_read(uncore, DISP_ARB_CTL) &
|
||||
DISP_TILE_SURFACE_SWIZZLING) {
|
||||
|
@ -611,14 +611,14 @@ static void detect_bit_6_swizzle(struct i915_ggtt *ggtt)
|
|||
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
|
||||
}
|
||||
}
|
||||
} else if (IS_GEN(i915, 5)) {
|
||||
} else if (GRAPHICS_VER(i915) == 5) {
|
||||
/*
|
||||
* On Ironlake whatever DRAM config, GPU always do
|
||||
* same swizzling setup.
|
||||
*/
|
||||
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
|
||||
swizzle_y = I915_BIT_6_SWIZZLE_9;
|
||||
} else if (IS_GEN(i915, 2)) {
|
||||
} else if (GRAPHICS_VER(i915) == 2) {
|
||||
/*
|
||||
* As far as we know, the 865 doesn't have these bit 6
|
||||
* swizzling issues.
|
||||
|
@ -697,7 +697,7 @@ static void detect_bit_6_swizzle(struct i915_ggtt *ggtt)
|
|||
}
|
||||
|
||||
/* check for L-shaped memory aka modified enhanced addressing */
|
||||
if (IS_GEN(i915, 4) &&
|
||||
if (GRAPHICS_VER(i915) == 4 &&
|
||||
!(intel_uncore_read(uncore, DCC2) & DCC2_MODIFIED_ENHANCED_DISABLE)) {
|
||||
swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
|
||||
swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
|
||||
|
@ -844,10 +844,10 @@ void intel_ggtt_init_fences(struct i915_ggtt *ggtt)
|
|||
|
||||
if (!i915_ggtt_has_aperture(ggtt))
|
||||
num_fences = 0;
|
||||
else if (INTEL_GEN(i915) >= 7 &&
|
||||
else if (GRAPHICS_VER(i915) >= 7 &&
|
||||
!(IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)))
|
||||
num_fences = 32;
|
||||
else if (INTEL_GEN(i915) >= 4 ||
|
||||
else if (GRAPHICS_VER(i915) >= 4 ||
|
||||
IS_I945G(i915) || IS_I945GM(i915) ||
|
||||
IS_G33(i915) || IS_PINEVIEW(i915))
|
||||
num_fences = 16;
|
||||
|
@ -895,29 +895,29 @@ void intel_gt_init_swizzling(struct intel_gt *gt)
|
|||
struct drm_i915_private *i915 = gt->i915;
|
||||
struct intel_uncore *uncore = gt->uncore;
|
||||
|
||||
if (INTEL_GEN(i915) < 5 ||
|
||||
if (GRAPHICS_VER(i915) < 5 ||
|
||||
i915->ggtt.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
|
||||
return;
|
||||
|
||||
intel_uncore_rmw(uncore, DISP_ARB_CTL, 0, DISP_TILE_SURFACE_SWIZZLING);
|
||||
|
||||
if (IS_GEN(i915, 5))
|
||||
if (GRAPHICS_VER(i915) == 5)
|
||||
return;
|
||||
|
||||
intel_uncore_rmw(uncore, TILECTL, 0, TILECTL_SWZCTL);
|
||||
|
||||
if (IS_GEN(i915, 6))
|
||||
if (GRAPHICS_VER(i915) == 6)
|
||||
intel_uncore_write(uncore,
|
||||
ARB_MODE,
|
||||
_MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
|
||||
else if (IS_GEN(i915, 7))
|
||||
else if (GRAPHICS_VER(i915) == 7)
|
||||
intel_uncore_write(uncore,
|
||||
ARB_MODE,
|
||||
_MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
|
||||
else if (IS_GEN(i915, 8))
|
||||
else if (GRAPHICS_VER(i915) == 8)
|
||||
intel_uncore_write(uncore,
|
||||
GAMTARBMODE,
|
||||
_MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
|
||||
else
|
||||
MISSING_CASE(INTEL_GEN(i915));
|
||||
MISSING_CASE(GRAPHICS_VER(i915));
|
||||
}
|
||||
|
|
|
@ -113,10 +113,10 @@ static void init_unused_rings(struct intel_gt *gt)
|
|||
init_unused_ring(gt, SRB1_BASE);
|
||||
init_unused_ring(gt, SRB2_BASE);
|
||||
init_unused_ring(gt, SRB3_BASE);
|
||||
} else if (IS_GEN(i915, 2)) {
|
||||
} else if (GRAPHICS_VER(i915) == 2) {
|
||||
init_unused_ring(gt, SRB0_BASE);
|
||||
init_unused_ring(gt, SRB1_BASE);
|
||||
} else if (IS_GEN(i915, 3)) {
|
||||
} else if (GRAPHICS_VER(i915) == 3) {
|
||||
init_unused_ring(gt, PRB1_BASE);
|
||||
init_unused_ring(gt, PRB2_BASE);
|
||||
}
|
||||
|
@ -133,7 +133,7 @@ int intel_gt_init_hw(struct intel_gt *gt)
|
|||
/* Double layer security blanket, see i915_gem_init() */
|
||||
intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
|
||||
|
||||
if (HAS_EDRAM(i915) && INTEL_GEN(i915) < 9)
|
||||
if (HAS_EDRAM(i915) && GRAPHICS_VER(i915) < 9)
|
||||
intel_uncore_rmw(uncore, HSW_IDICR, 0, IDIHASHMSK(0xf));
|
||||
|
||||
if (IS_HASWELL(i915))
|
||||
|
@ -206,10 +206,10 @@ intel_gt_clear_error_registers(struct intel_gt *gt,
|
|||
struct intel_uncore *uncore = gt->uncore;
|
||||
u32 eir;
|
||||
|
||||
if (!IS_GEN(i915, 2))
|
||||
if (GRAPHICS_VER(i915) != 2)
|
||||
clear_register(uncore, PGTBL_ER);
|
||||
|
||||
if (INTEL_GEN(i915) < 4)
|
||||
if (GRAPHICS_VER(i915) < 4)
|
||||
clear_register(uncore, IPEIR(RENDER_RING_BASE));
|
||||
else
|
||||
clear_register(uncore, IPEIR_I965);
|
||||
|
@ -227,13 +227,13 @@ intel_gt_clear_error_registers(struct intel_gt *gt,
|
|||
I915_MASTER_ERROR_INTERRUPT);
|
||||
}
|
||||
|
||||
if (INTEL_GEN(i915) >= 12) {
|
||||
if (GRAPHICS_VER(i915) >= 12) {
|
||||
rmw_clear(uncore, GEN12_RING_FAULT_REG, RING_FAULT_VALID);
|
||||
intel_uncore_posting_read(uncore, GEN12_RING_FAULT_REG);
|
||||
} else if (INTEL_GEN(i915) >= 8) {
|
||||
} else if (GRAPHICS_VER(i915) >= 8) {
|
||||
rmw_clear(uncore, GEN8_RING_FAULT_REG, RING_FAULT_VALID);
|
||||
intel_uncore_posting_read(uncore, GEN8_RING_FAULT_REG);
|
||||
} else if (INTEL_GEN(i915) >= 6) {
|
||||
} else if (GRAPHICS_VER(i915) >= 6) {
|
||||
struct intel_engine_cs *engine;
|
||||
enum intel_engine_id id;
|
||||
|
||||
|
@ -271,7 +271,7 @@ static void gen8_check_faults(struct intel_gt *gt)
|
|||
i915_reg_t fault_reg, fault_data0_reg, fault_data1_reg;
|
||||
u32 fault;
|
||||
|
||||
if (INTEL_GEN(gt->i915) >= 12) {
|
||||
if (GRAPHICS_VER(gt->i915) >= 12) {
|
||||
fault_reg = GEN12_RING_FAULT_REG;
|
||||
fault_data0_reg = GEN12_FAULT_TLB_DATA0;
|
||||
fault_data1_reg = GEN12_FAULT_TLB_DATA1;
|
||||
|
@ -311,9 +311,9 @@ void intel_gt_check_and_clear_faults(struct intel_gt *gt)
|
|||
struct drm_i915_private *i915 = gt->i915;
|
||||
|
||||
/* From GEN8 onwards we only have one 'All Engine Fault Register' */
|
||||
if (INTEL_GEN(i915) >= 8)
|
||||
if (GRAPHICS_VER(i915) >= 8)
|
||||
gen8_check_faults(gt);
|
||||
else if (INTEL_GEN(i915) >= 6)
|
||||
else if (GRAPHICS_VER(i915) >= 6)
|
||||
gen6_check_faults(gt);
|
||||
else
|
||||
return;
|
||||
|
@ -365,7 +365,7 @@ void intel_gt_flush_ggtt_writes(struct intel_gt *gt)
|
|||
void intel_gt_chipset_flush(struct intel_gt *gt)
|
||||
{
|
||||
wmb();
|
||||
if (INTEL_GEN(gt->i915) < 6)
|
||||
if (GRAPHICS_VER(gt->i915) < 6)
|
||||
intel_gtt_chipset_flush();
|
||||
}
|
||||
|
||||
|
@ -589,7 +589,8 @@ int intel_gt_init(struct intel_gt *gt)
|
|||
*/
|
||||
intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
|
||||
|
||||
err = intel_gt_init_scratch(gt, IS_GEN(gt->i915, 2) ? SZ_256K : SZ_4K);
|
||||
err = intel_gt_init_scratch(gt,
|
||||
GRAPHICS_VER(gt->i915) == 2 ? SZ_256K : SZ_4K);
|
||||
if (err)
|
||||
goto out_fw;
|
||||
|
||||
|
|
|
@ -76,7 +76,7 @@ static u32 read_clock_frequency(struct intel_uncore *uncore)
|
|||
u32 f19_2_mhz = 19200000;
|
||||
u32 f24_mhz = 24000000;
|
||||
|
||||
if (INTEL_GEN(uncore->i915) <= 4) {
|
||||
if (GRAPHICS_VER(uncore->i915) <= 4) {
|
||||
/*
|
||||
* PRMs say:
|
||||
*
|
||||
|
@ -85,7 +85,7 @@ static u32 read_clock_frequency(struct intel_uncore *uncore)
|
|||
* (“CLKCFG”) MCHBAR register)
|
||||
*/
|
||||
return RUNTIME_INFO(uncore->i915)->rawclk_freq * 1000 / 16;
|
||||
} else if (INTEL_GEN(uncore->i915) <= 8) {
|
||||
} else if (GRAPHICS_VER(uncore->i915) <= 8) {
|
||||
/*
|
||||
* PRMs say:
|
||||
*
|
||||
|
@ -94,7 +94,7 @@ static u32 read_clock_frequency(struct intel_uncore *uncore)
|
|||
* rolling over every 1.5 hours).
|
||||
*/
|
||||
return f12_5_mhz;
|
||||
} else if (INTEL_GEN(uncore->i915) <= 9) {
|
||||
} else if (GRAPHICS_VER(uncore->i915) <= 9) {
|
||||
u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE);
|
||||
u32 freq = 0;
|
||||
|
||||
|
@ -113,7 +113,7 @@ static u32 read_clock_frequency(struct intel_uncore *uncore)
|
|||
}
|
||||
|
||||
return freq;
|
||||
} else if (INTEL_GEN(uncore->i915) <= 12) {
|
||||
} else if (GRAPHICS_VER(uncore->i915) <= 12) {
|
||||
u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE);
|
||||
u32 freq = 0;
|
||||
|
||||
|
@ -128,7 +128,7 @@ static u32 read_clock_frequency(struct intel_uncore *uncore)
|
|||
} else {
|
||||
u32 c0 = intel_uncore_read(uncore, RPM_CONFIG0);
|
||||
|
||||
if (INTEL_GEN(uncore->i915) <= 10)
|
||||
if (GRAPHICS_VER(uncore->i915) <= 10)
|
||||
freq = gen10_get_crystal_clock_freq(uncore, c0);
|
||||
else
|
||||
freq = gen11_get_crystal_clock_freq(uncore, c0);
|
||||
|
@ -211,7 +211,7 @@ u64 intel_gt_ns_to_pm_interval(const struct intel_gt *gt, u64 ns)
|
|||
* frozen machine.
|
||||
*/
|
||||
val = div_u64_roundup(intel_gt_ns_to_clock_interval(gt, ns), 16);
|
||||
if (IS_GEN(gt->i915, 6))
|
||||
if (GRAPHICS_VER(gt->i915) == 6)
|
||||
val = div_u64_roundup(val, 25) * 25;
|
||||
|
||||
return val;
|
||||
|
|
|
@ -399,7 +399,7 @@ void gen5_gt_irq_reset(struct intel_gt *gt)
|
|||
struct intel_uncore *uncore = gt->uncore;
|
||||
|
||||
GEN3_IRQ_RESET(uncore, GT);
|
||||
if (INTEL_GEN(gt->i915) >= 6)
|
||||
if (GRAPHICS_VER(gt->i915) >= 6)
|
||||
GEN3_IRQ_RESET(uncore, GEN6_PM);
|
||||
}
|
||||
|
||||
|
@ -417,14 +417,14 @@ void gen5_gt_irq_postinstall(struct intel_gt *gt)
|
|||
}
|
||||
|
||||
gt_irqs |= GT_RENDER_USER_INTERRUPT;
|
||||
if (IS_GEN(gt->i915, 5))
|
||||
if (GRAPHICS_VER(gt->i915) == 5)
|
||||
gt_irqs |= ILK_BSD_USER_INTERRUPT;
|
||||
else
|
||||
gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
|
||||
|
||||
GEN3_IRQ_INIT(uncore, GT, gt->gt_imr, gt_irqs);
|
||||
|
||||
if (INTEL_GEN(gt->i915) >= 6) {
|
||||
if (GRAPHICS_VER(gt->i915) >= 6) {
|
||||
/*
|
||||
* RPS interrupts will get enabled/disabled on demand when RPS
|
||||
* itself is enabled/disabled.
|
||||
|
|
|
@ -16,10 +16,10 @@ static void write_pm_imr(struct intel_gt *gt)
|
|||
u32 mask = gt->pm_imr;
|
||||
i915_reg_t reg;
|
||||
|
||||
if (INTEL_GEN(i915) >= 11) {
|
||||
if (GRAPHICS_VER(i915) >= 11) {
|
||||
reg = GEN11_GPM_WGBOXPERF_INTR_MASK;
|
||||
mask <<= 16; /* pm is in upper half */
|
||||
} else if (INTEL_GEN(i915) >= 8) {
|
||||
} else if (GRAPHICS_VER(i915) >= 8) {
|
||||
reg = GEN8_GT_IMR(2);
|
||||
} else {
|
||||
reg = GEN6_PMIMR;
|
||||
|
@ -61,7 +61,7 @@ void gen6_gt_pm_mask_irq(struct intel_gt *gt, u32 mask)
|
|||
void gen6_gt_pm_reset_iir(struct intel_gt *gt, u32 reset_mask)
|
||||
{
|
||||
struct intel_uncore *uncore = gt->uncore;
|
||||
i915_reg_t reg = INTEL_GEN(gt->i915) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
|
||||
i915_reg_t reg = GRAPHICS_VER(gt->i915) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
|
||||
|
||||
lockdep_assert_held(>->irq_lock);
|
||||
|
||||
|
@ -77,10 +77,10 @@ static void write_pm_ier(struct intel_gt *gt)
|
|||
u32 mask = gt->pm_ier;
|
||||
i915_reg_t reg;
|
||||
|
||||
if (INTEL_GEN(i915) >= 11) {
|
||||
if (GRAPHICS_VER(i915) >= 11) {
|
||||
reg = GEN11_GPM_WGBOXPERF_INTR_ENABLE;
|
||||
mask <<= 16; /* pm is in upper half */
|
||||
} else if (INTEL_GEN(i915) >= 8) {
|
||||
} else if (GRAPHICS_VER(i915) >= 8) {
|
||||
reg = GEN8_GT_IER(2);
|
||||
} else {
|
||||
reg = GEN6_PMIER;
|
||||
|
|
|
@ -356,7 +356,7 @@ void gtt_write_workarounds(struct intel_gt *gt)
|
|||
intel_uncore_write(uncore,
|
||||
GEN8_L3_LRA_1_GPGPU,
|
||||
GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
|
||||
else if (INTEL_GEN(i915) >= 9 && INTEL_GEN(i915) <= 11)
|
||||
else if (GRAPHICS_VER(i915) >= 9 && GRAPHICS_VER(i915) <= 11)
|
||||
intel_uncore_write(uncore,
|
||||
GEN8_L3_LRA_1_GPGPU,
|
||||
GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
|
||||
|
@ -373,13 +373,13 @@ void gtt_write_workarounds(struct intel_gt *gt)
|
|||
* driver.
|
||||
*/
|
||||
if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K) &&
|
||||
INTEL_GEN(i915) <= 10)
|
||||
GRAPHICS_VER(i915) <= 10)
|
||||
intel_uncore_rmw(uncore,
|
||||
GEN8_GAMW_ECO_DEV_RW_IA,
|
||||
0,
|
||||
GAMW_ECO_ENABLE_64K_IPS_FIELD);
|
||||
|
||||
if (IS_GEN_RANGE(i915, 8, 11)) {
|
||||
if (IS_GRAPHICS_VER(i915, 8, 11)) {
|
||||
bool can_use_gtt_cache = true;
|
||||
|
||||
/*
|
||||
|
@ -461,7 +461,7 @@ static void bdw_setup_private_ppat(struct intel_uncore *uncore)
|
|||
GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
|
||||
|
||||
/* for scanout with eLLC */
|
||||
if (INTEL_GEN(i915) >= 9)
|
||||
if (GRAPHICS_VER(i915) >= 9)
|
||||
pat |= GEN8_PPAT(2, GEN8_PPAT_WB | GEN8_PPAT_ELLC_OVERRIDE);
|
||||
else
|
||||
pat |= GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
|
||||
|
@ -510,11 +510,11 @@ void setup_private_pat(struct intel_uncore *uncore)
|
|||
{
|
||||
struct drm_i915_private *i915 = uncore->i915;
|
||||
|
||||
GEM_BUG_ON(INTEL_GEN(i915) < 8);
|
||||
GEM_BUG_ON(GRAPHICS_VER(i915) < 8);
|
||||
|
||||
if (INTEL_GEN(i915) >= 12)
|
||||
if (GRAPHICS_VER(i915) >= 12)
|
||||
tgl_setup_private_ppat(uncore);
|
||||
else if (INTEL_GEN(i915) >= 10)
|
||||
else if (GRAPHICS_VER(i915) >= 10)
|
||||
cnl_setup_private_ppat(uncore);
|
||||
else if (IS_CHERRYVIEW(i915) || IS_GEN9_LP(i915))
|
||||
chv_setup_private_ppat(uncore);
|
||||
|
|
|
@ -64,7 +64,7 @@ static bool get_ia_constants(struct intel_llc *llc,
|
|||
|
||||
consts->min_gpu_freq = rps->min_freq;
|
||||
consts->max_gpu_freq = rps->max_freq;
|
||||
if (INTEL_GEN(i915) >= 9) {
|
||||
if (GRAPHICS_VER(i915) >= 9) {
|
||||
/* Convert GT frequency to 50 HZ units */
|
||||
consts->min_gpu_freq /= GEN9_FREQ_SCALER;
|
||||
consts->max_gpu_freq /= GEN9_FREQ_SCALER;
|
||||
|
@ -83,13 +83,13 @@ static void calc_ia_freq(struct intel_llc *llc,
|
|||
const int diff = consts->max_gpu_freq - gpu_freq;
|
||||
unsigned int ia_freq = 0, ring_freq = 0;
|
||||
|
||||
if (INTEL_GEN(i915) >= 9) {
|
||||
if (GRAPHICS_VER(i915) >= 9) {
|
||||
/*
|
||||
* ring_freq = 2 * GT. ring_freq is in 100MHz units
|
||||
* No floor required for ring frequency on SKL.
|
||||
*/
|
||||
ring_freq = gpu_freq;
|
||||
} else if (INTEL_GEN(i915) >= 8) {
|
||||
} else if (GRAPHICS_VER(i915) >= 8) {
|
||||
/* max(2 * GT, DDR). NB: GT is 50MHz units */
|
||||
ring_freq = max(consts->min_ring_freq, gpu_freq);
|
||||
} else if (IS_HASWELL(i915)) {
|
||||
|
|
|
@ -47,7 +47,7 @@ static void set_offsets(u32 *regs,
|
|||
*regs = MI_LOAD_REGISTER_IMM(count);
|
||||
if (flags & POSTED)
|
||||
*regs |= MI_LRI_FORCE_POSTED;
|
||||
if (INTEL_GEN(engine->i915) >= 11)
|
||||
if (GRAPHICS_VER(engine->i915) >= 11)
|
||||
*regs |= MI_LRI_LRM_CS_MMIO;
|
||||
regs++;
|
||||
|
||||
|
@ -70,7 +70,7 @@ static void set_offsets(u32 *regs,
|
|||
if (close) {
|
||||
/* Close the batch; used mainly by live_lrc_layout() */
|
||||
*regs = MI_BATCH_BUFFER_END;
|
||||
if (INTEL_GEN(engine->i915) >= 10)
|
||||
if (GRAPHICS_VER(engine->i915) >= 10)
|
||||
*regs |= BIT(0);
|
||||
}
|
||||
}
|
||||
|
@ -498,22 +498,22 @@ static const u8 *reg_offsets(const struct intel_engine_cs *engine)
|
|||
* addressing to automatic fixup the register state between the
|
||||
* physical engines for virtual engine.
|
||||
*/
|
||||
GEM_BUG_ON(INTEL_GEN(engine->i915) >= 12 &&
|
||||
GEM_BUG_ON(GRAPHICS_VER(engine->i915) >= 12 &&
|
||||
!intel_engine_has_relative_mmio(engine));
|
||||
|
||||
if (engine->class == RENDER_CLASS) {
|
||||
if (INTEL_GEN(engine->i915) >= 12)
|
||||
if (GRAPHICS_VER(engine->i915) >= 12)
|
||||
return gen12_rcs_offsets;
|
||||
else if (INTEL_GEN(engine->i915) >= 11)
|
||||
else if (GRAPHICS_VER(engine->i915) >= 11)
|
||||
return gen11_rcs_offsets;
|
||||
else if (INTEL_GEN(engine->i915) >= 9)
|
||||
else if (GRAPHICS_VER(engine->i915) >= 9)
|
||||
return gen9_rcs_offsets;
|
||||
else
|
||||
return gen8_rcs_offsets;
|
||||
} else {
|
||||
if (INTEL_GEN(engine->i915) >= 12)
|
||||
if (GRAPHICS_VER(engine->i915) >= 12)
|
||||
return gen12_xcs_offsets;
|
||||
else if (INTEL_GEN(engine->i915) >= 9)
|
||||
else if (GRAPHICS_VER(engine->i915) >= 9)
|
||||
return gen9_xcs_offsets;
|
||||
else
|
||||
return gen8_xcs_offsets;
|
||||
|
@ -522,9 +522,9 @@ static const u8 *reg_offsets(const struct intel_engine_cs *engine)
|
|||
|
||||
static int lrc_ring_mi_mode(const struct intel_engine_cs *engine)
|
||||
{
|
||||
if (INTEL_GEN(engine->i915) >= 12)
|
||||
if (GRAPHICS_VER(engine->i915) >= 12)
|
||||
return 0x60;
|
||||
else if (INTEL_GEN(engine->i915) >= 9)
|
||||
else if (GRAPHICS_VER(engine->i915) >= 9)
|
||||
return 0x54;
|
||||
else if (engine->class == RENDER_CLASS)
|
||||
return 0x58;
|
||||
|
@ -534,9 +534,9 @@ static int lrc_ring_mi_mode(const struct intel_engine_cs *engine)
|
|||
|
||||
static int lrc_ring_gpr0(const struct intel_engine_cs *engine)
|
||||
{
|
||||
if (INTEL_GEN(engine->i915) >= 12)
|
||||
if (GRAPHICS_VER(engine->i915) >= 12)
|
||||
return 0x74;
|
||||
else if (INTEL_GEN(engine->i915) >= 9)
|
||||
else if (GRAPHICS_VER(engine->i915) >= 9)
|
||||
return 0x68;
|
||||
else if (engine->class == RENDER_CLASS)
|
||||
return 0xd8;
|
||||
|
@ -546,9 +546,9 @@ static int lrc_ring_gpr0(const struct intel_engine_cs *engine)
|
|||
|
||||
static int lrc_ring_wa_bb_per_ctx(const struct intel_engine_cs *engine)
|
||||
{
|
||||
if (INTEL_GEN(engine->i915) >= 12)
|
||||
if (GRAPHICS_VER(engine->i915) >= 12)
|
||||
return 0x12;
|
||||
else if (INTEL_GEN(engine->i915) >= 9 || engine->class == RENDER_CLASS)
|
||||
else if (GRAPHICS_VER(engine->i915) >= 9 || engine->class == RENDER_CLASS)
|
||||
return 0x18;
|
||||
else
|
||||
return -1;
|
||||
|
@ -581,9 +581,9 @@ static int lrc_ring_cmd_buf_cctl(const struct intel_engine_cs *engine)
|
|||
if (engine->class != RENDER_CLASS)
|
||||
return -1;
|
||||
|
||||
if (INTEL_GEN(engine->i915) >= 12)
|
||||
if (GRAPHICS_VER(engine->i915) >= 12)
|
||||
return 0xb6;
|
||||
else if (INTEL_GEN(engine->i915) >= 11)
|
||||
else if (GRAPHICS_VER(engine->i915) >= 11)
|
||||
return 0xaa;
|
||||
else
|
||||
return -1;
|
||||
|
@ -592,9 +592,9 @@ static int lrc_ring_cmd_buf_cctl(const struct intel_engine_cs *engine)
|
|||
static u32
|
||||
lrc_ring_indirect_offset_default(const struct intel_engine_cs *engine)
|
||||
{
|
||||
switch (INTEL_GEN(engine->i915)) {
|
||||
switch (GRAPHICS_VER(engine->i915)) {
|
||||
default:
|
||||
MISSING_CASE(INTEL_GEN(engine->i915));
|
||||
MISSING_CASE(GRAPHICS_VER(engine->i915));
|
||||
fallthrough;
|
||||
case 12:
|
||||
return GEN12_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
|
||||
|
@ -637,7 +637,7 @@ static void init_common_regs(u32 * const regs,
|
|||
ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
|
||||
if (inhibit)
|
||||
ctl |= CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT;
|
||||
if (INTEL_GEN(engine->i915) < 11)
|
||||
if (GRAPHICS_VER(engine->i915) < 11)
|
||||
ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT |
|
||||
CTX_CTRL_RS_CTX_ENABLE);
|
||||
regs[CTX_CONTEXT_CONTROL] = ctl;
|
||||
|
@ -805,7 +805,7 @@ __lrc_alloc_state(struct intel_context *ce, struct intel_engine_cs *engine)
|
|||
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
|
||||
context_size += I915_GTT_PAGE_SIZE; /* for redzone */
|
||||
|
||||
if (INTEL_GEN(engine->i915) == 12) {
|
||||
if (GRAPHICS_VER(engine->i915) == 12) {
|
||||
ce->wa_bb_page = context_size / PAGE_SIZE;
|
||||
context_size += PAGE_SIZE;
|
||||
}
|
||||
|
@ -1114,7 +1114,7 @@ static u32 lrc_descriptor(const struct intel_context *ce)
|
|||
desc <<= GEN8_CTX_ADDRESSING_MODE_SHIFT;
|
||||
|
||||
desc |= GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE;
|
||||
if (IS_GEN(ce->vm->i915, 8))
|
||||
if (GRAPHICS_VER(ce->vm->i915) == 8)
|
||||
desc |= GEN8_CTX_L3LLC_COHERENT;
|
||||
|
||||
return i915_ggtt_offset(ce->state) | desc;
|
||||
|
@ -1469,7 +1469,7 @@ void lrc_init_wa_ctx(struct intel_engine_cs *engine)
|
|||
if (engine->class != RENDER_CLASS)
|
||||
return;
|
||||
|
||||
switch (INTEL_GEN(engine->i915)) {
|
||||
switch (GRAPHICS_VER(engine->i915)) {
|
||||
case 12:
|
||||
case 11:
|
||||
return;
|
||||
|
@ -1486,7 +1486,7 @@ void lrc_init_wa_ctx(struct intel_engine_cs *engine)
|
|||
wa_bb_fn[1] = NULL;
|
||||
break;
|
||||
default:
|
||||
MISSING_CASE(INTEL_GEN(engine->i915));
|
||||
MISSING_CASE(GRAPHICS_VER(engine->i915));
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -344,11 +344,11 @@ static unsigned int get_mocs_settings(const struct drm_i915_private *i915,
|
|||
table->size = ARRAY_SIZE(dg1_mocs_table);
|
||||
table->table = dg1_mocs_table;
|
||||
table->n_entries = GEN9_NUM_MOCS_ENTRIES;
|
||||
} else if (INTEL_GEN(i915) >= 12) {
|
||||
} else if (GRAPHICS_VER(i915) >= 12) {
|
||||
table->size = ARRAY_SIZE(tgl_mocs_table);
|
||||
table->table = tgl_mocs_table;
|
||||
table->n_entries = GEN9_NUM_MOCS_ENTRIES;
|
||||
} else if (IS_GEN(i915, 11)) {
|
||||
} else if (GRAPHICS_VER(i915) == 11) {
|
||||
table->size = ARRAY_SIZE(icl_mocs_table);
|
||||
table->table = icl_mocs_table;
|
||||
table->n_entries = GEN9_NUM_MOCS_ENTRIES;
|
||||
|
@ -361,7 +361,7 @@ static unsigned int get_mocs_settings(const struct drm_i915_private *i915,
|
|||
table->n_entries = GEN9_NUM_MOCS_ENTRIES;
|
||||
table->table = broxton_mocs_table;
|
||||
} else {
|
||||
drm_WARN_ONCE(&i915->drm, INTEL_GEN(i915) >= 9,
|
||||
drm_WARN_ONCE(&i915->drm, GRAPHICS_VER(i915) >= 9,
|
||||
"Platform that should have a MOCS table does not.\n");
|
||||
return 0;
|
||||
}
|
||||
|
@ -370,7 +370,7 @@ static unsigned int get_mocs_settings(const struct drm_i915_private *i915,
|
|||
return 0;
|
||||
|
||||
/* WaDisableSkipCaching:skl,bxt,kbl,glk */
|
||||
if (IS_GEN(i915, 9)) {
|
||||
if (GRAPHICS_VER(i915) == 9) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < table->size; i++)
|
||||
|
|
|
@ -146,9 +146,9 @@ int i915_ppgtt_init_hw(struct intel_gt *gt)
|
|||
|
||||
gtt_write_workarounds(gt);
|
||||
|
||||
if (IS_GEN(i915, 6))
|
||||
if (GRAPHICS_VER(i915) == 6)
|
||||
gen6_ppgtt_enable(gt);
|
||||
else if (IS_GEN(i915, 7))
|
||||
else if (GRAPHICS_VER(i915) == 7)
|
||||
gen7_ppgtt_enable(gt);
|
||||
|
||||
return 0;
|
||||
|
@ -157,7 +157,7 @@ int i915_ppgtt_init_hw(struct intel_gt *gt)
|
|||
static struct i915_ppgtt *
|
||||
__ppgtt_create(struct intel_gt *gt)
|
||||
{
|
||||
if (INTEL_GEN(gt->i915) < 8)
|
||||
if (GRAPHICS_VER(gt->i915) < 8)
|
||||
return gen6_ppgtt_create(gt);
|
||||
else
|
||||
return gen8_ppgtt_create(gt);
|
||||
|
|
|
@ -109,7 +109,7 @@ static void gen11_rc6_enable(struct intel_rc6 *rc6)
|
|||
GEN9_MEDIA_PG_ENABLE |
|
||||
GEN11_MEDIA_SAMPLER_PG_ENABLE;
|
||||
|
||||
if (INTEL_GEN(gt->i915) >= 12) {
|
||||
if (GRAPHICS_VER(gt->i915) >= 12) {
|
||||
for (i = 0; i < I915_MAX_VCS; i++)
|
||||
if (HAS_ENGINE(gt, _VCS(i)))
|
||||
pg_enable |= (VDN_HCP_POWERGATE_ENABLE(i) |
|
||||
|
@ -126,7 +126,7 @@ static void gen9_rc6_enable(struct intel_rc6 *rc6)
|
|||
enum intel_engine_id id;
|
||||
|
||||
/* 2b: Program RC6 thresholds.*/
|
||||
if (INTEL_GEN(rc6_to_i915(rc6)) >= 10) {
|
||||
if (GRAPHICS_VER(rc6_to_i915(rc6)) >= 10) {
|
||||
set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16 | 85);
|
||||
set(uncore, GEN10_MEDIA_WAKE_RATE_LIMIT, 150);
|
||||
} else if (IS_SKYLAKE(rc6_to_i915(rc6))) {
|
||||
|
@ -249,9 +249,9 @@ static void gen6_rc6_enable(struct intel_rc6 *rc6)
|
|||
rc6vids = 0;
|
||||
ret = sandybridge_pcode_read(i915, GEN6_PCODE_READ_RC6VIDS,
|
||||
&rc6vids, NULL);
|
||||
if (IS_GEN(i915, 6) && ret) {
|
||||
if (GRAPHICS_VER(i915) == 6 && ret) {
|
||||
drm_dbg(&i915->drm, "Couldn't check for BIOS workaround\n");
|
||||
} else if (IS_GEN(i915, 6) &&
|
||||
} else if (GRAPHICS_VER(i915) == 6 &&
|
||||
(GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
|
||||
drm_dbg(&i915->drm,
|
||||
"You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
|
||||
|
@ -515,7 +515,7 @@ static void __intel_rc6_disable(struct intel_rc6 *rc6)
|
|||
struct intel_uncore *uncore = rc6_to_uncore(rc6);
|
||||
|
||||
intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
|
||||
if (INTEL_GEN(i915) >= 9)
|
||||
if (GRAPHICS_VER(i915) >= 9)
|
||||
set(uncore, GEN9_PG_ENABLE, 0);
|
||||
set(uncore, GEN6_RC_CONTROL, 0);
|
||||
set(uncore, GEN6_RC_STATE, 0);
|
||||
|
@ -575,13 +575,13 @@ void intel_rc6_enable(struct intel_rc6 *rc6)
|
|||
chv_rc6_enable(rc6);
|
||||
else if (IS_VALLEYVIEW(i915))
|
||||
vlv_rc6_enable(rc6);
|
||||
else if (INTEL_GEN(i915) >= 11)
|
||||
else if (GRAPHICS_VER(i915) >= 11)
|
||||
gen11_rc6_enable(rc6);
|
||||
else if (INTEL_GEN(i915) >= 9)
|
||||
else if (GRAPHICS_VER(i915) >= 9)
|
||||
gen9_rc6_enable(rc6);
|
||||
else if (IS_BROADWELL(i915))
|
||||
gen8_rc6_enable(rc6);
|
||||
else if (INTEL_GEN(i915) >= 6)
|
||||
else if (GRAPHICS_VER(i915) >= 6)
|
||||
gen6_rc6_enable(rc6);
|
||||
|
||||
rc6->manual = rc6->ctl_enable & GEN6_RC_CTL_RC6_ENABLE;
|
||||
|
|
|
@ -15,7 +15,7 @@ render_state_get_rodata(const struct intel_engine_cs *engine)
|
|||
if (engine->class != RENDER_CLASS)
|
||||
return NULL;
|
||||
|
||||
switch (INTEL_GEN(engine->i915)) {
|
||||
switch (GRAPHICS_VER(engine->i915)) {
|
||||
case 6:
|
||||
return &gen6_null_state;
|
||||
case 7:
|
||||
|
|
|
@ -421,7 +421,7 @@ static int gen11_lock_sfc(struct intel_engine_cs *engine,
|
|||
struct intel_engine_cs *paired_vecs;
|
||||
|
||||
if (engine->class != VIDEO_DECODE_CLASS ||
|
||||
!IS_GEN(engine->i915, 12))
|
||||
GRAPHICS_VER(engine->i915) != 12)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
|
@ -633,7 +633,7 @@ static int gen8_reset_engines(struct intel_gt *gt,
|
|||
*/
|
||||
}
|
||||
|
||||
if (INTEL_GEN(gt->i915) >= 11)
|
||||
if (GRAPHICS_VER(gt->i915) >= 11)
|
||||
ret = gen11_reset_engines(gt, engine_mask, retry);
|
||||
else
|
||||
ret = gen6_reset_engines(gt, engine_mask, retry);
|
||||
|
@ -662,17 +662,17 @@ static reset_func intel_get_gpu_reset(const struct intel_gt *gt)
|
|||
|
||||
if (is_mock_gt(gt))
|
||||
return mock_reset;
|
||||
else if (INTEL_GEN(i915) >= 8)
|
||||
else if (GRAPHICS_VER(i915) >= 8)
|
||||
return gen8_reset_engines;
|
||||
else if (INTEL_GEN(i915) >= 6)
|
||||
else if (GRAPHICS_VER(i915) >= 6)
|
||||
return gen6_reset_engines;
|
||||
else if (INTEL_GEN(i915) >= 5)
|
||||
else if (GRAPHICS_VER(i915) >= 5)
|
||||
return ilk_do_reset;
|
||||
else if (IS_G4X(i915))
|
||||
return g4x_do_reset;
|
||||
else if (IS_G33(i915) || IS_PINEVIEW(i915))
|
||||
return g33_do_reset;
|
||||
else if (INTEL_GEN(i915) >= 3)
|
||||
else if (GRAPHICS_VER(i915) >= 3)
|
||||
return i915_do_reset;
|
||||
else
|
||||
return NULL;
|
||||
|
@ -724,7 +724,7 @@ bool intel_has_reset_engine(const struct intel_gt *gt)
|
|||
int intel_reset_guc(struct intel_gt *gt)
|
||||
{
|
||||
u32 guc_domain =
|
||||
INTEL_GEN(gt->i915) >= 11 ? GEN11_GRDOM_GUC : GEN9_GRDOM_GUC;
|
||||
GRAPHICS_VER(gt->i915) >= 11 ? GEN11_GRDOM_GUC : GEN9_GRDOM_GUC;
|
||||
int ret;
|
||||
|
||||
GEM_BUG_ON(!HAS_GT_UC(gt->i915));
|
||||
|
|
|
@ -29,7 +29,7 @@ static void set_hwstam(struct intel_engine_cs *engine, u32 mask)
|
|||
* lost interrupts following a reset.
|
||||
*/
|
||||
if (engine->class == RENDER_CLASS) {
|
||||
if (INTEL_GEN(engine->i915) >= 6)
|
||||
if (GRAPHICS_VER(engine->i915) >= 6)
|
||||
mask &= ~BIT(0);
|
||||
else
|
||||
mask &= ~I915_USER_INTERRUPT;
|
||||
|
@ -43,7 +43,7 @@ static void set_hws_pga(struct intel_engine_cs *engine, phys_addr_t phys)
|
|||
u32 addr;
|
||||
|
||||
addr = lower_32_bits(phys);
|
||||
if (INTEL_GEN(engine->i915) >= 4)
|
||||
if (GRAPHICS_VER(engine->i915) >= 4)
|
||||
addr |= (phys >> 28) & 0xf0;
|
||||
|
||||
intel_uncore_write(engine->uncore, HWS_PGA, addr);
|
||||
|
@ -71,7 +71,7 @@ static void set_hwsp(struct intel_engine_cs *engine, u32 offset)
|
|||
* The ring status page addresses are no longer next to the rest of
|
||||
* the ring registers as of gen7.
|
||||
*/
|
||||
if (IS_GEN(engine->i915, 7)) {
|
||||
if (GRAPHICS_VER(engine->i915) == 7) {
|
||||
switch (engine->id) {
|
||||
/*
|
||||
* No more rings exist on Gen7. Default case is only to shut up
|
||||
|
@ -93,7 +93,7 @@ static void set_hwsp(struct intel_engine_cs *engine, u32 offset)
|
|||
hwsp = VEBOX_HWS_PGA_GEN7;
|
||||
break;
|
||||
}
|
||||
} else if (IS_GEN(engine->i915, 6)) {
|
||||
} else if (GRAPHICS_VER(engine->i915) == 6) {
|
||||
hwsp = RING_HWS_PGA_GEN6(engine->mmio_base);
|
||||
} else {
|
||||
hwsp = RING_HWS_PGA(engine->mmio_base);
|
||||
|
@ -105,7 +105,7 @@ static void set_hwsp(struct intel_engine_cs *engine, u32 offset)
|
|||
|
||||
static void flush_cs_tlb(struct intel_engine_cs *engine)
|
||||
{
|
||||
if (!IS_GEN_RANGE(engine->i915, 6, 7))
|
||||
if (!IS_GRAPHICS_VER(engine->i915, 6, 7))
|
||||
return;
|
||||
|
||||
/* ring should be idle before issuing a sync flush*/
|
||||
|
@ -153,7 +153,7 @@ static void set_pp_dir(struct intel_engine_cs *engine)
|
|||
ENGINE_WRITE_FW(engine, RING_PP_DIR_DCLV, PP_DIR_DCLV_2G);
|
||||
ENGINE_WRITE_FW(engine, RING_PP_DIR_BASE, pp_dir(vm));
|
||||
|
||||
if (INTEL_GEN(engine->i915) >= 7) {
|
||||
if (GRAPHICS_VER(engine->i915) >= 7) {
|
||||
ENGINE_WRITE_FW(engine,
|
||||
RING_MODE_GEN7,
|
||||
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
|
||||
|
@ -229,7 +229,7 @@ static int xcs_resume(struct intel_engine_cs *engine)
|
|||
5000, 0, NULL))
|
||||
goto err;
|
||||
|
||||
if (INTEL_GEN(engine->i915) > 2)
|
||||
if (GRAPHICS_VER(engine->i915) > 2)
|
||||
ENGINE_WRITE_FW(engine,
|
||||
RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
|
||||
|
||||
|
@ -646,9 +646,9 @@ static int mi_set_context(struct i915_request *rq,
|
|||
u32 *cs;
|
||||
|
||||
len = 4;
|
||||
if (IS_GEN(i915, 7))
|
||||
if (GRAPHICS_VER(i915) == 7)
|
||||
len += 2 + (num_engines ? 4 * num_engines + 6 : 0);
|
||||
else if (IS_GEN(i915, 5))
|
||||
else if (GRAPHICS_VER(i915) == 5)
|
||||
len += 2;
|
||||
if (flags & MI_FORCE_RESTORE) {
|
||||
GEM_BUG_ON(flags & MI_RESTORE_INHIBIT);
|
||||
|
@ -662,7 +662,7 @@ static int mi_set_context(struct i915_request *rq,
|
|||
return PTR_ERR(cs);
|
||||
|
||||
/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
|
||||
if (IS_GEN(i915, 7)) {
|
||||
if (GRAPHICS_VER(i915) == 7) {
|
||||
*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
|
||||
if (num_engines) {
|
||||
struct intel_engine_cs *signaller;
|
||||
|
@ -678,7 +678,7 @@ static int mi_set_context(struct i915_request *rq,
|
|||
GEN6_PSMI_SLEEP_MSG_DISABLE);
|
||||
}
|
||||
}
|
||||
} else if (IS_GEN(i915, 5)) {
|
||||
} else if (GRAPHICS_VER(i915) == 5) {
|
||||
/*
|
||||
* This w/a is only listed for pre-production ilk a/b steppings,
|
||||
* but is also mentioned for programming the powerctx. To be
|
||||
|
@ -716,7 +716,7 @@ static int mi_set_context(struct i915_request *rq,
|
|||
*/
|
||||
*cs++ = MI_NOOP;
|
||||
|
||||
if (IS_GEN(i915, 7)) {
|
||||
if (GRAPHICS_VER(i915) == 7) {
|
||||
if (num_engines) {
|
||||
struct intel_engine_cs *signaller;
|
||||
i915_reg_t last_reg = {}; /* keep gcc quiet */
|
||||
|
@ -740,7 +740,7 @@ static int mi_set_context(struct i915_request *rq,
|
|||
*cs++ = MI_NOOP;
|
||||
}
|
||||
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
|
||||
} else if (IS_GEN(i915, 5)) {
|
||||
} else if (GRAPHICS_VER(i915) == 5) {
|
||||
*cs++ = MI_SUSPEND_FLUSH;
|
||||
}
|
||||
|
||||
|
@ -1001,7 +1001,7 @@ static void ring_release(struct intel_engine_cs *engine)
|
|||
{
|
||||
struct drm_i915_private *dev_priv = engine->i915;
|
||||
|
||||
drm_WARN_ON(&dev_priv->drm, INTEL_GEN(dev_priv) > 2 &&
|
||||
drm_WARN_ON(&dev_priv->drm, GRAPHICS_VER(dev_priv) > 2 &&
|
||||
(ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
|
||||
|
||||
intel_engine_cleanup_common(engine);
|
||||
|
@ -1029,13 +1029,13 @@ static void setup_irq(struct intel_engine_cs *engine)
|
|||
|
||||
intel_engine_set_irq_handler(engine, irq_handler);
|
||||
|
||||
if (INTEL_GEN(i915) >= 6) {
|
||||
if (GRAPHICS_VER(i915) >= 6) {
|
||||
engine->irq_enable = gen6_irq_enable;
|
||||
engine->irq_disable = gen6_irq_disable;
|
||||
} else if (INTEL_GEN(i915) >= 5) {
|
||||
} else if (GRAPHICS_VER(i915) >= 5) {
|
||||
engine->irq_enable = gen5_irq_enable;
|
||||
engine->irq_disable = gen5_irq_disable;
|
||||
} else if (INTEL_GEN(i915) >= 3) {
|
||||
} else if (GRAPHICS_VER(i915) >= 3) {
|
||||
engine->irq_enable = gen3_irq_enable;
|
||||
engine->irq_disable = gen3_irq_disable;
|
||||
} else {
|
||||
|
@ -1049,7 +1049,7 @@ static void setup_common(struct intel_engine_cs *engine)
|
|||
struct drm_i915_private *i915 = engine->i915;
|
||||
|
||||
/* gen8+ are only supported with execlists */
|
||||
GEM_BUG_ON(INTEL_GEN(i915) >= 8);
|
||||
GEM_BUG_ON(GRAPHICS_VER(i915) >= 8);
|
||||
|
||||
setup_irq(engine);
|
||||
|
||||
|
@ -1070,14 +1070,14 @@ static void setup_common(struct intel_engine_cs *engine)
|
|||
* engine->emit_init_breadcrumb().
|
||||
*/
|
||||
engine->emit_fini_breadcrumb = gen3_emit_breadcrumb;
|
||||
if (IS_GEN(i915, 5))
|
||||
if (GRAPHICS_VER(i915) == 5)
|
||||
engine->emit_fini_breadcrumb = gen5_emit_breadcrumb;
|
||||
|
||||
engine->set_default_submission = i9xx_set_default_submission;
|
||||
|
||||
if (INTEL_GEN(i915) >= 6)
|
||||
if (GRAPHICS_VER(i915) >= 6)
|
||||
engine->emit_bb_start = gen6_emit_bb_start;
|
||||
else if (INTEL_GEN(i915) >= 4)
|
||||
else if (GRAPHICS_VER(i915) >= 4)
|
||||
engine->emit_bb_start = gen4_emit_bb_start;
|
||||
else if (IS_I830(i915) || IS_I845G(i915))
|
||||
engine->emit_bb_start = i830_emit_bb_start;
|
||||
|
@ -1094,16 +1094,16 @@ static void setup_rcs(struct intel_engine_cs *engine)
|
|||
|
||||
engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
|
||||
|
||||
if (INTEL_GEN(i915) >= 7) {
|
||||
if (GRAPHICS_VER(i915) >= 7) {
|
||||
engine->emit_flush = gen7_emit_flush_rcs;
|
||||
engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_rcs;
|
||||
} else if (IS_GEN(i915, 6)) {
|
||||
} else if (GRAPHICS_VER(i915) == 6) {
|
||||
engine->emit_flush = gen6_emit_flush_rcs;
|
||||
engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_rcs;
|
||||
} else if (IS_GEN(i915, 5)) {
|
||||
} else if (GRAPHICS_VER(i915) == 5) {
|
||||
engine->emit_flush = gen4_emit_flush_rcs;
|
||||
} else {
|
||||
if (INTEL_GEN(i915) < 4)
|
||||
if (GRAPHICS_VER(i915) < 4)
|
||||
engine->emit_flush = gen2_emit_flush;
|
||||
else
|
||||
engine->emit_flush = gen4_emit_flush_rcs;
|
||||
|
@ -1118,20 +1118,20 @@ static void setup_vcs(struct intel_engine_cs *engine)
|
|||
{
|
||||
struct drm_i915_private *i915 = engine->i915;
|
||||
|
||||
if (INTEL_GEN(i915) >= 6) {
|
||||
if (GRAPHICS_VER(i915) >= 6) {
|
||||
/* gen6 bsd needs a special wa for tail updates */
|
||||
if (IS_GEN(i915, 6))
|
||||
if (GRAPHICS_VER(i915) == 6)
|
||||
engine->set_default_submission = gen6_bsd_set_default_submission;
|
||||
engine->emit_flush = gen6_emit_flush_vcs;
|
||||
engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
|
||||
|
||||
if (IS_GEN(i915, 6))
|
||||
if (GRAPHICS_VER(i915) == 6)
|
||||
engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_xcs;
|
||||
else
|
||||
engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_xcs;
|
||||
} else {
|
||||
engine->emit_flush = gen4_emit_flush_vcs;
|
||||
if (IS_GEN(i915, 5))
|
||||
if (GRAPHICS_VER(i915) == 5)
|
||||
engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
|
||||
else
|
||||
engine->irq_enable_mask = I915_BSD_USER_INTERRUPT;
|
||||
|
@ -1145,7 +1145,7 @@ static void setup_bcs(struct intel_engine_cs *engine)
|
|||
engine->emit_flush = gen6_emit_flush_xcs;
|
||||
engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
|
||||
|
||||
if (IS_GEN(i915, 6))
|
||||
if (GRAPHICS_VER(i915) == 6)
|
||||
engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_xcs;
|
||||
else
|
||||
engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_xcs;
|
||||
|
@ -1155,7 +1155,7 @@ static void setup_vecs(struct intel_engine_cs *engine)
|
|||
{
|
||||
struct drm_i915_private *i915 = engine->i915;
|
||||
|
||||
GEM_BUG_ON(INTEL_GEN(i915) < 7);
|
||||
GEM_BUG_ON(GRAPHICS_VER(i915) < 7);
|
||||
|
||||
engine->emit_flush = gen6_emit_flush_xcs;
|
||||
engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
|
||||
|
@ -1203,7 +1203,7 @@ static struct i915_vma *gen7_ctx_vma(struct intel_engine_cs *engine)
|
|||
struct i915_vma *vma;
|
||||
int size, err;
|
||||
|
||||
if (!IS_GEN(engine->i915, 7) || engine->class != RENDER_CLASS)
|
||||
if (GRAPHICS_VER(engine->i915) != 7 || engine->class != RENDER_CLASS)
|
||||
return 0;
|
||||
|
||||
err = gen7_ctx_switch_bb_setup(engine, NULL /* probe size */);
|
||||
|
|
|
@ -196,7 +196,7 @@ static void rps_reset_interrupts(struct intel_rps *rps)
|
|||
struct intel_gt *gt = rps_to_gt(rps);
|
||||
|
||||
spin_lock_irq(>->irq_lock);
|
||||
if (INTEL_GEN(gt->i915) >= 11)
|
||||
if (GRAPHICS_VER(gt->i915) >= 11)
|
||||
gen11_rps_reset_interrupts(rps);
|
||||
else
|
||||
gen6_rps_reset_interrupts(rps);
|
||||
|
@ -630,7 +630,7 @@ static u32 rps_limits(struct intel_rps *rps, u8 val)
|
|||
* frequency, if the down threshold expires in that window we will not
|
||||
* receive a down interrupt.
|
||||
*/
|
||||
if (INTEL_GEN(rps_to_i915(rps)) >= 9) {
|
||||
if (GRAPHICS_VER(rps_to_i915(rps)) >= 9) {
|
||||
limits = rps->max_freq_softlimit << 23;
|
||||
if (val <= rps->min_freq_softlimit)
|
||||
limits |= rps->min_freq_softlimit << 14;
|
||||
|
@ -697,7 +697,7 @@ static void rps_set_power(struct intel_rps *rps, int new_power)
|
|||
intel_gt_ns_to_pm_interval(gt, ei_down * threshold_down * 10));
|
||||
|
||||
set(uncore, GEN6_RP_CONTROL,
|
||||
(INTEL_GEN(gt->i915) > 9 ? 0 : GEN6_RP_MEDIA_TURBO) |
|
||||
(GRAPHICS_VER(gt->i915) > 9 ? 0 : GEN6_RP_MEDIA_TURBO) |
|
||||
GEN6_RP_MEDIA_HW_NORMAL_MODE |
|
||||
GEN6_RP_MEDIA_IS_GFX |
|
||||
GEN6_RP_ENABLE |
|
||||
|
@ -771,7 +771,7 @@ static int gen6_rps_set(struct intel_rps *rps, u8 val)
|
|||
struct drm_i915_private *i915 = rps_to_i915(rps);
|
||||
u32 swreq;
|
||||
|
||||
if (INTEL_GEN(i915) >= 9)
|
||||
if (GRAPHICS_VER(i915) >= 9)
|
||||
swreq = GEN9_FREQUENCY(val);
|
||||
else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
|
||||
swreq = HSW_FREQUENCY(val);
|
||||
|
@ -812,14 +812,14 @@ static int rps_set(struct intel_rps *rps, u8 val, bool update)
|
|||
|
||||
if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
|
||||
err = vlv_rps_set(rps, val);
|
||||
else if (INTEL_GEN(i915) >= 6)
|
||||
else if (GRAPHICS_VER(i915) >= 6)
|
||||
err = gen6_rps_set(rps, val);
|
||||
else
|
||||
err = gen5_rps_set(rps, val);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (update && INTEL_GEN(i915) >= 6)
|
||||
if (update && GRAPHICS_VER(i915) >= 6)
|
||||
gen6_rps_set_thresholds(rps, val);
|
||||
rps->last_freq = val;
|
||||
|
||||
|
@ -853,7 +853,7 @@ void intel_rps_unpark(struct intel_rps *rps)
|
|||
if (intel_rps_uses_timer(rps))
|
||||
rps_start_timer(rps);
|
||||
|
||||
if (IS_GEN(rps_to_i915(rps), 5))
|
||||
if (GRAPHICS_VER(rps_to_i915(rps)) == 5)
|
||||
gen5_rps_update(rps);
|
||||
}
|
||||
|
||||
|
@ -999,7 +999,7 @@ static void gen6_rps_init(struct intel_rps *rps)
|
|||
|
||||
rps->efficient_freq = rps->rp1_freq;
|
||||
if (IS_HASWELL(i915) || IS_BROADWELL(i915) ||
|
||||
IS_GEN9_BC(i915) || INTEL_GEN(i915) >= 10) {
|
||||
IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 10) {
|
||||
u32 ddcc_status = 0;
|
||||
|
||||
if (sandybridge_pcode_read(i915,
|
||||
|
@ -1012,7 +1012,7 @@ static void gen6_rps_init(struct intel_rps *rps)
|
|||
rps->max_freq);
|
||||
}
|
||||
|
||||
if (IS_GEN9_BC(i915) || INTEL_GEN(i915) >= 10) {
|
||||
if (IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 10) {
|
||||
/* Store the frequency values in 16.66 MHZ units, which is
|
||||
* the natural hardware unit for SKL
|
||||
*/
|
||||
|
@ -1048,7 +1048,7 @@ static bool gen9_rps_enable(struct intel_rps *rps)
|
|||
struct intel_uncore *uncore = gt->uncore;
|
||||
|
||||
/* Program defaults and thresholds for RPS */
|
||||
if (IS_GEN(gt->i915, 9))
|
||||
if (GRAPHICS_VER(gt->i915) == 9)
|
||||
intel_uncore_write_fw(uncore, GEN6_RC_VIDEO_FREQ,
|
||||
GEN9_FREQUENCY(rps->rp1_freq));
|
||||
|
||||
|
@ -1365,16 +1365,16 @@ void intel_rps_enable(struct intel_rps *rps)
|
|||
enabled = chv_rps_enable(rps);
|
||||
else if (IS_VALLEYVIEW(i915))
|
||||
enabled = vlv_rps_enable(rps);
|
||||
else if (INTEL_GEN(i915) >= 9)
|
||||
else if (GRAPHICS_VER(i915) >= 9)
|
||||
enabled = gen9_rps_enable(rps);
|
||||
else if (INTEL_GEN(i915) >= 8)
|
||||
else if (GRAPHICS_VER(i915) >= 8)
|
||||
enabled = gen8_rps_enable(rps);
|
||||
else if (INTEL_GEN(i915) >= 6)
|
||||
else if (GRAPHICS_VER(i915) >= 6)
|
||||
enabled = gen6_rps_enable(rps);
|
||||
else if (IS_IRONLAKE_M(i915))
|
||||
enabled = gen5_rps_enable(rps);
|
||||
else
|
||||
MISSING_CASE(INTEL_GEN(i915));
|
||||
MISSING_CASE(GRAPHICS_VER(i915));
|
||||
intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
|
||||
if (!enabled)
|
||||
return;
|
||||
|
@ -1393,7 +1393,7 @@ void intel_rps_enable(struct intel_rps *rps)
|
|||
|
||||
if (has_busy_stats(rps))
|
||||
intel_rps_set_timer(rps);
|
||||
else if (INTEL_GEN(i915) >= 6)
|
||||
else if (GRAPHICS_VER(i915) >= 6)
|
||||
intel_rps_set_interrupts(rps);
|
||||
else
|
||||
/* Ironlake currently uses intel_ips.ko */ {}
|
||||
|
@ -1414,7 +1414,7 @@ void intel_rps_disable(struct intel_rps *rps)
|
|||
intel_rps_clear_interrupts(rps);
|
||||
intel_rps_clear_timer(rps);
|
||||
|
||||
if (INTEL_GEN(i915) >= 6)
|
||||
if (GRAPHICS_VER(i915) >= 6)
|
||||
gen6_rps_disable(rps);
|
||||
else if (IS_IRONLAKE_M(i915))
|
||||
gen5_rps_disable(rps);
|
||||
|
@ -1453,14 +1453,14 @@ int intel_gpu_freq(struct intel_rps *rps, int val)
|
|||
{
|
||||
struct drm_i915_private *i915 = rps_to_i915(rps);
|
||||
|
||||
if (INTEL_GEN(i915) >= 9)
|
||||
if (GRAPHICS_VER(i915) >= 9)
|
||||
return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER,
|
||||
GEN9_FREQ_SCALER);
|
||||
else if (IS_CHERRYVIEW(i915))
|
||||
return chv_gpu_freq(rps, val);
|
||||
else if (IS_VALLEYVIEW(i915))
|
||||
return byt_gpu_freq(rps, val);
|
||||
else if (INTEL_GEN(i915) >= 6)
|
||||
else if (GRAPHICS_VER(i915) >= 6)
|
||||
return val * GT_FREQUENCY_MULTIPLIER;
|
||||
else
|
||||
return val;
|
||||
|
@ -1470,14 +1470,14 @@ int intel_freq_opcode(struct intel_rps *rps, int val)
|
|||
{
|
||||
struct drm_i915_private *i915 = rps_to_i915(rps);
|
||||
|
||||
if (INTEL_GEN(i915) >= 9)
|
||||
if (GRAPHICS_VER(i915) >= 9)
|
||||
return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER,
|
||||
GT_FREQUENCY_MULTIPLIER);
|
||||
else if (IS_CHERRYVIEW(i915))
|
||||
return chv_freq_opcode(rps, val);
|
||||
else if (IS_VALLEYVIEW(i915))
|
||||
return byt_freq_opcode(rps, val);
|
||||
else if (INTEL_GEN(i915) >= 6)
|
||||
else if (GRAPHICS_VER(i915) >= 6)
|
||||
return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER);
|
||||
else
|
||||
return val;
|
||||
|
@ -1770,7 +1770,7 @@ void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
|
|||
spin_unlock(>->irq_lock);
|
||||
}
|
||||
|
||||
if (INTEL_GEN(gt->i915) >= 8)
|
||||
if (GRAPHICS_VER(gt->i915) >= 8)
|
||||
return;
|
||||
|
||||
if (pm_iir & PM_VEBOX_USER_INTERRUPT)
|
||||
|
@ -1833,7 +1833,7 @@ void intel_rps_init(struct intel_rps *rps)
|
|||
chv_rps_init(rps);
|
||||
else if (IS_VALLEYVIEW(i915))
|
||||
vlv_rps_init(rps);
|
||||
else if (INTEL_GEN(i915) >= 6)
|
||||
else if (GRAPHICS_VER(i915) >= 6)
|
||||
gen6_rps_init(rps);
|
||||
else if (IS_IRONLAKE_M(i915))
|
||||
gen5_rps_init(rps);
|
||||
|
@ -1843,7 +1843,7 @@ void intel_rps_init(struct intel_rps *rps)
|
|||
rps->min_freq_softlimit = rps->min_freq;
|
||||
|
||||
/* After setting max-softlimit, find the overclock max freq */
|
||||
if (IS_GEN(i915, 6) || IS_IVYBRIDGE(i915) || IS_HASWELL(i915)) {
|
||||
if (GRAPHICS_VER(i915) == 6 || IS_IVYBRIDGE(i915) || IS_HASWELL(i915)) {
|
||||
u32 params = 0;
|
||||
|
||||
sandybridge_pcode_read(i915, GEN6_READ_OC_PARAMS,
|
||||
|
@ -1872,16 +1872,16 @@ void intel_rps_init(struct intel_rps *rps)
|
|||
*
|
||||
* TODO: verify if this can be reproduced on VLV,CHV.
|
||||
*/
|
||||
if (INTEL_GEN(i915) <= 7)
|
||||
if (GRAPHICS_VER(i915) <= 7)
|
||||
rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
|
||||
|
||||
if (INTEL_GEN(i915) >= 8 && INTEL_GEN(i915) < 11)
|
||||
if (GRAPHICS_VER(i915) >= 8 && GRAPHICS_VER(i915) < 11)
|
||||
rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
|
||||
}
|
||||
|
||||
void intel_rps_sanitize(struct intel_rps *rps)
|
||||
{
|
||||
if (INTEL_GEN(rps_to_i915(rps)) >= 6)
|
||||
if (GRAPHICS_VER(rps_to_i915(rps)) >= 6)
|
||||
rps_disable_interrupts(rps);
|
||||
}
|
||||
|
||||
|
@ -1892,11 +1892,11 @@ u32 intel_rps_get_cagf(struct intel_rps *rps, u32 rpstat)
|
|||
|
||||
if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
|
||||
cagf = (rpstat >> 8) & 0xff;
|
||||
else if (INTEL_GEN(i915) >= 9)
|
||||
else if (GRAPHICS_VER(i915) >= 9)
|
||||
cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
|
||||
else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
|
||||
cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
|
||||
else if (INTEL_GEN(i915) >= 6)
|
||||
else if (GRAPHICS_VER(i915) >= 6)
|
||||
cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
|
||||
else
|
||||
cagf = gen5_invert_freq(rps, (rpstat & MEMSTAT_PSTATE_MASK) >>
|
||||
|
@ -1915,7 +1915,7 @@ static u32 read_cagf(struct intel_rps *rps)
|
|||
vlv_punit_get(i915);
|
||||
freq = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
|
||||
vlv_punit_put(i915);
|
||||
} else if (INTEL_GEN(i915) >= 6) {
|
||||
} else if (GRAPHICS_VER(i915) >= 6) {
|
||||
freq = intel_uncore_read(uncore, GEN6_RPSTAT1);
|
||||
} else {
|
||||
freq = intel_uncore_read(uncore, MEMSTAT_ILK);
|
||||
|
@ -1968,7 +1968,7 @@ void intel_rps_driver_register(struct intel_rps *rps)
|
|||
* We only register the i915 ips part with intel-ips once everything is
|
||||
* set up, to avoid intel-ips sneaking in and reading bogus values.
|
||||
*/
|
||||
if (IS_GEN(gt->i915, 5)) {
|
||||
if (GRAPHICS_VER(gt->i915) == 5) {
|
||||
GEM_BUG_ON(ips_mchdev);
|
||||
rcu_assign_pointer(ips_mchdev, gt->i915);
|
||||
ips_ping_for_i915_load();
|
||||
|
|
|
@ -590,13 +590,13 @@ void intel_sseu_info_init(struct intel_gt *gt)
|
|||
cherryview_sseu_info_init(gt);
|
||||
else if (IS_BROADWELL(i915))
|
||||
bdw_sseu_info_init(gt);
|
||||
else if (IS_GEN(i915, 9))
|
||||
else if (GRAPHICS_VER(i915) == 9)
|
||||
gen9_sseu_info_init(gt);
|
||||
else if (IS_GEN(i915, 10))
|
||||
else if (GRAPHICS_VER(i915) == 10)
|
||||
gen10_sseu_info_init(gt);
|
||||
else if (IS_GEN(i915, 11))
|
||||
else if (GRAPHICS_VER(i915) == 11)
|
||||
gen11_sseu_info_init(gt);
|
||||
else if (INTEL_GEN(i915) >= 12)
|
||||
else if (GRAPHICS_VER(i915) >= 12)
|
||||
gen12_sseu_info_init(gt);
|
||||
}
|
||||
|
||||
|
@ -613,7 +613,7 @@ u32 intel_sseu_make_rpcs(struct intel_gt *gt,
|
|||
* No explicit RPCS request is needed to ensure full
|
||||
* slice/subslice/EU enablement prior to Gen9.
|
||||
*/
|
||||
if (INTEL_GEN(i915) < 9)
|
||||
if (GRAPHICS_VER(i915) < 9)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
|
@ -651,7 +651,7 @@ u32 intel_sseu_make_rpcs(struct intel_gt *gt,
|
|||
* subslices are enabled, or a count between one and four on the first
|
||||
* slice.
|
||||
*/
|
||||
if (IS_GEN(i915, 11) &&
|
||||
if (GRAPHICS_VER(i915) == 11 &&
|
||||
slices == 1 &&
|
||||
subslices > min_t(u8, 4, hweight8(sseu->subslice_mask[0]) / 2)) {
|
||||
GEM_BUG_ON(subslices & 1);
|
||||
|
@ -669,7 +669,7 @@ u32 intel_sseu_make_rpcs(struct intel_gt *gt,
|
|||
if (sseu->has_slice_pg) {
|
||||
u32 mask, val = slices;
|
||||
|
||||
if (INTEL_GEN(i915) >= 11) {
|
||||
if (GRAPHICS_VER(i915) >= 11) {
|
||||
mask = GEN11_RPCS_S_CNT_MASK;
|
||||
val <<= GEN11_RPCS_S_CNT_SHIFT;
|
||||
} else {
|
||||
|
|
|
@ -699,9 +699,9 @@ __intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
|
|||
|
||||
if (IS_DG1(i915))
|
||||
dg1_ctx_workarounds_init(engine, wal);
|
||||
else if (IS_GEN(i915, 12))
|
||||
else if (GRAPHICS_VER(i915) == 12)
|
||||
gen12_ctx_workarounds_init(engine, wal);
|
||||
else if (IS_GEN(i915, 11))
|
||||
else if (GRAPHICS_VER(i915) == 11)
|
||||
icl_ctx_workarounds_init(engine, wal);
|
||||
else if (IS_CANNONLAKE(i915))
|
||||
cnl_ctx_workarounds_init(engine, wal);
|
||||
|
@ -719,14 +719,14 @@ __intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
|
|||
chv_ctx_workarounds_init(engine, wal);
|
||||
else if (IS_BROADWELL(i915))
|
||||
bdw_ctx_workarounds_init(engine, wal);
|
||||
else if (IS_GEN(i915, 7))
|
||||
else if (GRAPHICS_VER(i915) == 7)
|
||||
gen7_ctx_workarounds_init(engine, wal);
|
||||
else if (IS_GEN(i915, 6))
|
||||
else if (GRAPHICS_VER(i915) == 6)
|
||||
gen6_ctx_workarounds_init(engine, wal);
|
||||
else if (INTEL_GEN(i915) < 8)
|
||||
else if (GRAPHICS_VER(i915) < 8)
|
||||
;
|
||||
else
|
||||
MISSING_CASE(INTEL_GEN(i915));
|
||||
MISSING_CASE(GRAPHICS_VER(i915));
|
||||
|
||||
wa_init_finish(wal);
|
||||
}
|
||||
|
@ -950,7 +950,7 @@ wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
|
|||
unsigned int slice, subslice;
|
||||
u32 l3_en, mcr, mcr_mask;
|
||||
|
||||
GEM_BUG_ON(INTEL_GEN(i915) < 10);
|
||||
GEM_BUG_ON(GRAPHICS_VER(i915) < 10);
|
||||
|
||||
/*
|
||||
* WaProgramMgsrForL3BankSpecificMmioReads: cnl,icl
|
||||
|
@ -980,7 +980,7 @@ wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
|
|||
* of every MMIO read.
|
||||
*/
|
||||
|
||||
if (INTEL_GEN(i915) >= 10 && is_power_of_2(sseu->slice_mask)) {
|
||||
if (GRAPHICS_VER(i915) >= 10 && is_power_of_2(sseu->slice_mask)) {
|
||||
u32 l3_fuse =
|
||||
intel_uncore_read(&i915->uncore, GEN10_MIRROR_FUSE3) &
|
||||
GEN10_L3BANK_MASK;
|
||||
|
@ -1002,7 +1002,7 @@ wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
|
|||
}
|
||||
subslice--;
|
||||
|
||||
if (INTEL_GEN(i915) >= 11) {
|
||||
if (GRAPHICS_VER(i915) >= 11) {
|
||||
mcr = GEN11_MCR_SLICE(slice) | GEN11_MCR_SUBSLICE(subslice);
|
||||
mcr_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK;
|
||||
} else {
|
||||
|
@ -1171,9 +1171,9 @@ gt_init_workarounds(struct drm_i915_private *i915, struct i915_wa_list *wal)
|
|||
dg1_gt_workarounds_init(i915, wal);
|
||||
else if (IS_TIGERLAKE(i915))
|
||||
tgl_gt_workarounds_init(i915, wal);
|
||||
else if (IS_GEN(i915, 12))
|
||||
else if (GRAPHICS_VER(i915) == 12)
|
||||
gen12_gt_workarounds_init(i915, wal);
|
||||
else if (IS_GEN(i915, 11))
|
||||
else if (GRAPHICS_VER(i915) == 11)
|
||||
icl_gt_workarounds_init(i915, wal);
|
||||
else if (IS_CANNONLAKE(i915))
|
||||
cnl_gt_workarounds_init(i915, wal);
|
||||
|
@ -1193,18 +1193,18 @@ gt_init_workarounds(struct drm_i915_private *i915, struct i915_wa_list *wal)
|
|||
vlv_gt_workarounds_init(i915, wal);
|
||||
else if (IS_IVYBRIDGE(i915))
|
||||
ivb_gt_workarounds_init(i915, wal);
|
||||
else if (IS_GEN(i915, 6))
|
||||
else if (GRAPHICS_VER(i915) == 6)
|
||||
snb_gt_workarounds_init(i915, wal);
|
||||
else if (IS_GEN(i915, 5))
|
||||
else if (GRAPHICS_VER(i915) == 5)
|
||||
ilk_gt_workarounds_init(i915, wal);
|
||||
else if (IS_G4X(i915))
|
||||
g4x_gt_workarounds_init(i915, wal);
|
||||
else if (IS_GEN(i915, 4))
|
||||
else if (GRAPHICS_VER(i915) == 4)
|
||||
gen4_gt_workarounds_init(i915, wal);
|
||||
else if (INTEL_GEN(i915) <= 8)
|
||||
else if (GRAPHICS_VER(i915) <= 8)
|
||||
;
|
||||
else
|
||||
MISSING_CASE(INTEL_GEN(i915));
|
||||
MISSING_CASE(GRAPHICS_VER(i915));
|
||||
}
|
||||
|
||||
void intel_gt_init_workarounds(struct drm_i915_private *i915)
|
||||
|
@ -1558,9 +1558,9 @@ void intel_engine_init_whitelist(struct intel_engine_cs *engine)
|
|||
|
||||
if (IS_DG1(i915))
|
||||
dg1_whitelist_build(engine);
|
||||
else if (IS_GEN(i915, 12))
|
||||
else if (GRAPHICS_VER(i915) == 12)
|
||||
tgl_whitelist_build(engine);
|
||||
else if (IS_GEN(i915, 11))
|
||||
else if (GRAPHICS_VER(i915) == 11)
|
||||
icl_whitelist_build(engine);
|
||||
else if (IS_CANNONLAKE(i915))
|
||||
cnl_whitelist_build(engine);
|
||||
|
@ -1576,10 +1576,10 @@ void intel_engine_init_whitelist(struct intel_engine_cs *engine)
|
|||
bxt_whitelist_build(engine);
|
||||
else if (IS_SKYLAKE(i915))
|
||||
skl_whitelist_build(engine);
|
||||
else if (INTEL_GEN(i915) <= 8)
|
||||
else if (GRAPHICS_VER(i915) <= 8)
|
||||
;
|
||||
else
|
||||
MISSING_CASE(INTEL_GEN(i915));
|
||||
MISSING_CASE(GRAPHICS_VER(i915));
|
||||
|
||||
wa_init_finish(w);
|
||||
}
|
||||
|
@ -1695,7 +1695,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
|
|||
ENABLE_SMALLPL);
|
||||
}
|
||||
|
||||
if (IS_GEN(i915, 11)) {
|
||||
if (GRAPHICS_VER(i915) == 11) {
|
||||
/* This is not an Wa. Enable for better image quality */
|
||||
wa_masked_en(wal,
|
||||
_3D_CHICKEN3,
|
||||
|
@ -1793,7 +1793,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
|
|||
FF_DOP_CLOCK_GATE_DISABLE);
|
||||
}
|
||||
|
||||
if (IS_GEN_RANGE(i915, 9, 12)) {
|
||||
if (IS_GRAPHICS_VER(i915, 9, 12)) {
|
||||
/* FtrPerCtxtPreemptionGranularityControl:skl,bxt,kbl,cfl,cnl,icl,tgl */
|
||||
wa_masked_en(wal,
|
||||
GEN7_FF_SLICE_CS_CHICKEN1,
|
||||
|
@ -1817,7 +1817,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
|
|||
GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
|
||||
}
|
||||
|
||||
if (IS_GEN(i915, 9)) {
|
||||
if (GRAPHICS_VER(i915) == 9) {
|
||||
/* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
|
||||
wa_masked_en(wal,
|
||||
GEN9_CSFE_CHICKEN1_RCS,
|
||||
|
@ -1921,7 +1921,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
|
|||
GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
|
||||
}
|
||||
|
||||
if (IS_GEN(i915, 7)) {
|
||||
if (GRAPHICS_VER(i915) == 7) {
|
||||
/* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
|
||||
wa_masked_en(wal,
|
||||
GFX_MODE_GEN7,
|
||||
|
@ -1953,7 +1953,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
|
|||
GEN6_WIZ_HASHING_16x4);
|
||||
}
|
||||
|
||||
if (IS_GEN_RANGE(i915, 6, 7))
|
||||
if (IS_GRAPHICS_VER(i915, 6, 7))
|
||||
/*
|
||||
* We need to disable the AsyncFlip performance optimisations in
|
||||
* order to use MI_WAIT_FOR_EVENT within the CS. It should
|
||||
|
@ -1965,7 +1965,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
|
|||
MI_MODE,
|
||||
ASYNC_FLIP_PERF_DISABLE);
|
||||
|
||||
if (IS_GEN(i915, 6)) {
|
||||
if (GRAPHICS_VER(i915) == 6) {
|
||||
/*
|
||||
* Required for the hardware to program scanline values for
|
||||
* waiting
|
||||
|
@ -2019,14 +2019,14 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
|
|||
CM0_STC_EVICT_DISABLE_LRA_SNB);
|
||||
}
|
||||
|
||||
if (IS_GEN_RANGE(i915, 4, 6))
|
||||
if (IS_GRAPHICS_VER(i915, 4, 6))
|
||||
/* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
|
||||
wa_add(wal, MI_MODE,
|
||||
0, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH),
|
||||
/* XXX bit doesn't stick on Broadwater */
|
||||
IS_I965G(i915) ? 0 : VS_TIMER_DISPATCH);
|
||||
|
||||
if (IS_GEN(i915, 4))
|
||||
if (GRAPHICS_VER(i915) == 4)
|
||||
/*
|
||||
* Disable CONSTANT_BUFFER before it is loaded from the context
|
||||
* image. For as it is loaded, it is executed and the stored
|
||||
|
@ -2058,7 +2058,7 @@ xcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
|
|||
static void
|
||||
engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal)
|
||||
{
|
||||
if (I915_SELFTEST_ONLY(INTEL_GEN(engine->i915) < 4))
|
||||
if (I915_SELFTEST_ONLY(GRAPHICS_VER(engine->i915) < 4))
|
||||
return;
|
||||
|
||||
if (engine->class == RENDER_CLASS)
|
||||
|
@ -2071,7 +2071,7 @@ void intel_engine_init_workarounds(struct intel_engine_cs *engine)
|
|||
{
|
||||
struct i915_wa_list *wal = &engine->wa_list;
|
||||
|
||||
if (INTEL_GEN(engine->i915) < 4)
|
||||
if (GRAPHICS_VER(engine->i915) < 4)
|
||||
return;
|
||||
|
||||
wa_init_start(wal, "engine", engine->name);
|
||||
|
@ -2112,9 +2112,9 @@ static bool mcr_range(struct drm_i915_private *i915, u32 offset)
|
|||
const struct mcr_range *mcr_ranges;
|
||||
int i;
|
||||
|
||||
if (INTEL_GEN(i915) >= 12)
|
||||
if (GRAPHICS_VER(i915) >= 12)
|
||||
mcr_ranges = mcr_ranges_gen12;
|
||||
else if (INTEL_GEN(i915) >= 8)
|
||||
else if (GRAPHICS_VER(i915) >= 8)
|
||||
mcr_ranges = mcr_ranges_gen8;
|
||||
else
|
||||
return false;
|
||||
|
@ -2143,7 +2143,7 @@ wa_list_srm(struct i915_request *rq,
|
|||
u32 srm, *cs;
|
||||
|
||||
srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
|
||||
if (INTEL_GEN(i915) >= 8)
|
||||
if (GRAPHICS_VER(i915) >= 8)
|
||||
srm++;
|
||||
|
||||
for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
|
||||
|
|
|
@ -52,7 +52,7 @@ static int write_timestamp(struct i915_request *rq, int slot)
|
|||
return PTR_ERR(cs);
|
||||
|
||||
cmd = MI_STORE_REGISTER_MEM | MI_USE_GGTT;
|
||||
if (INTEL_GEN(rq->engine->i915) >= 8)
|
||||
if (GRAPHICS_VER(rq->engine->i915) >= 8)
|
||||
cmd++;
|
||||
*cs++ = cmd;
|
||||
*cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(rq->engine->mmio_base));
|
||||
|
@ -125,7 +125,7 @@ static int perf_mi_bb_start(void *arg)
|
|||
enum intel_engine_id id;
|
||||
int err = 0;
|
||||
|
||||
if (INTEL_GEN(gt->i915) < 7) /* for per-engine CS_TIMESTAMP */
|
||||
if (GRAPHICS_VER(gt->i915) < 7) /* for per-engine CS_TIMESTAMP */
|
||||
return 0;
|
||||
|
||||
perf_begin(gt);
|
||||
|
@ -249,7 +249,7 @@ static int perf_mi_noop(void *arg)
|
|||
enum intel_engine_id id;
|
||||
int err = 0;
|
||||
|
||||
if (INTEL_GEN(gt->i915) < 7) /* for per-engine CS_TIMESTAMP */
|
||||
if (GRAPHICS_VER(gt->i915) < 7) /* for per-engine CS_TIMESTAMP */
|
||||
return 0;
|
||||
|
||||
perf_begin(gt);
|
||||
|
|
|
@ -198,7 +198,7 @@ static int live_engine_timestamps(void *arg)
|
|||
* the same CS clock.
|
||||
*/
|
||||
|
||||
if (INTEL_GEN(gt->i915) < 8)
|
||||
if (GRAPHICS_VER(gt->i915) < 8)
|
||||
return 0;
|
||||
|
||||
for_each_engine(engine, gt, id) {
|
||||
|
|
|
@ -3269,7 +3269,7 @@ static int live_preempt_user(void *arg)
|
|||
if (!intel_engine_has_preemption(engine))
|
||||
continue;
|
||||
|
||||
if (IS_GEN(gt->i915, 8) && engine->class != RENDER_CLASS)
|
||||
if (GRAPHICS_VER(gt->i915) == 8 && engine->class != RENDER_CLASS)
|
||||
continue; /* we need per-context GPR */
|
||||
|
||||
if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
|
||||
|
@ -4293,7 +4293,7 @@ static int live_virtual_preserved(void *arg)
|
|||
return 0;
|
||||
|
||||
/* As we use CS_GPR we cannot run before they existed on all engines. */
|
||||
if (INTEL_GEN(gt->i915) < 9)
|
||||
if (GRAPHICS_VER(gt->i915) < 9)
|
||||
return 0;
|
||||
|
||||
for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
|
||||
|
|
|
@ -74,10 +74,10 @@ static int live_gt_clocks(void *arg)
|
|||
return 0;
|
||||
}
|
||||
|
||||
if (INTEL_GEN(gt->i915) < 4) /* Any CS_TIMESTAMP? */
|
||||
if (GRAPHICS_VER(gt->i915) < 4) /* Any CS_TIMESTAMP? */
|
||||
return 0;
|
||||
|
||||
if (IS_GEN(gt->i915, 5))
|
||||
if (GRAPHICS_VER(gt->i915) == 5)
|
||||
/*
|
||||
* XXX CS_TIMESTAMP low dword is dysfunctional?
|
||||
*
|
||||
|
@ -86,7 +86,7 @@ static int live_gt_clocks(void *arg)
|
|||
*/
|
||||
return 0;
|
||||
|
||||
if (IS_GEN(gt->i915, 4))
|
||||
if (GRAPHICS_VER(gt->i915) == 4)
|
||||
/*
|
||||
* XXX CS_TIMESTAMP appears gibberish
|
||||
*
|
||||
|
@ -105,7 +105,7 @@ static int live_gt_clocks(void *arg)
|
|||
u64 time;
|
||||
u64 dt;
|
||||
|
||||
if (INTEL_GEN(engine->i915) < 7 && engine->id != RCS0)
|
||||
if (GRAPHICS_VER(engine->i915) < 7 && engine->id != RCS0)
|
||||
continue;
|
||||
|
||||
measure_clocks(engine, &cycles, &dt);
|
||||
|
|
|
@ -180,7 +180,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
|
|||
goto cancel_rq;
|
||||
|
||||
batch = h->batch;
|
||||
if (INTEL_GEN(gt->i915) >= 8) {
|
||||
if (GRAPHICS_VER(gt->i915) >= 8) {
|
||||
*batch++ = MI_STORE_DWORD_IMM_GEN4;
|
||||
*batch++ = lower_32_bits(hws_address(hws, rq));
|
||||
*batch++ = upper_32_bits(hws_address(hws, rq));
|
||||
|
@ -194,7 +194,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
|
|||
*batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
|
||||
*batch++ = lower_32_bits(vma->node.start);
|
||||
*batch++ = upper_32_bits(vma->node.start);
|
||||
} else if (INTEL_GEN(gt->i915) >= 6) {
|
||||
} else if (GRAPHICS_VER(gt->i915) >= 6) {
|
||||
*batch++ = MI_STORE_DWORD_IMM_GEN4;
|
||||
*batch++ = 0;
|
||||
*batch++ = lower_32_bits(hws_address(hws, rq));
|
||||
|
@ -207,7 +207,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
|
|||
*batch++ = MI_NOOP;
|
||||
*batch++ = MI_BATCH_BUFFER_START | 1 << 8;
|
||||
*batch++ = lower_32_bits(vma->node.start);
|
||||
} else if (INTEL_GEN(gt->i915) >= 4) {
|
||||
} else if (GRAPHICS_VER(gt->i915) >= 4) {
|
||||
*batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
|
||||
*batch++ = 0;
|
||||
*batch++ = lower_32_bits(hws_address(hws, rq));
|
||||
|
@ -243,7 +243,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
|
|||
}
|
||||
|
||||
flags = 0;
|
||||
if (INTEL_GEN(gt->i915) <= 5)
|
||||
if (GRAPHICS_VER(gt->i915) <= 5)
|
||||
flags |= I915_DISPATCH_SECURE;
|
||||
|
||||
err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags);
|
||||
|
|
|
@ -44,7 +44,7 @@ static int gen6_verify_ring_freq(struct intel_llc *llc)
|
|||
if (found != ia_freq) {
|
||||
pr_err("Min freq table(%d/[%d, %d]):%dMHz did not match expected CPU freq, found %d, expected %d\n",
|
||||
gpu_freq, consts.min_gpu_freq, consts.max_gpu_freq,
|
||||
intel_gpu_freq(rps, gpu_freq * (INTEL_GEN(i915) >= 9 ? GEN9_FREQ_SCALER : 1)),
|
||||
intel_gpu_freq(rps, gpu_freq * (GRAPHICS_VER(i915) >= 9 ? GEN9_FREQ_SCALER : 1)),
|
||||
found, ia_freq);
|
||||
err = -EINVAL;
|
||||
break;
|
||||
|
@ -54,7 +54,7 @@ static int gen6_verify_ring_freq(struct intel_llc *llc)
|
|||
if (found != ring_freq) {
|
||||
pr_err("Min freq table(%d/[%d, %d]):%dMHz did not match expected ring freq, found %d, expected %d\n",
|
||||
gpu_freq, consts.min_gpu_freq, consts.max_gpu_freq,
|
||||
intel_gpu_freq(rps, gpu_freq * (INTEL_GEN(i915) >= 9 ? GEN9_FREQ_SCALER : 1)),
|
||||
intel_gpu_freq(rps, gpu_freq * (GRAPHICS_VER(i915) >= 9 ? GEN9_FREQ_SCALER : 1)),
|
||||
found, ring_freq);
|
||||
err = -EINVAL;
|
||||
break;
|
||||
|
|
|
@ -584,7 +584,7 @@ static int __live_lrc_gpr(struct intel_engine_cs *engine,
|
|||
int err;
|
||||
int n;
|
||||
|
||||
if (INTEL_GEN(engine->i915) < 9 && engine->class != RENDER_CLASS)
|
||||
if (GRAPHICS_VER(engine->i915) < 9 && engine->class != RENDER_CLASS)
|
||||
return 0; /* GPR only on rcs0 for gen8 */
|
||||
|
||||
err = gpr_make_dirty(engine->kernel_context);
|
||||
|
@ -1389,10 +1389,10 @@ err_A:
|
|||
|
||||
static bool skip_isolation(const struct intel_engine_cs *engine)
|
||||
{
|
||||
if (engine->class == COPY_ENGINE_CLASS && INTEL_GEN(engine->i915) == 9)
|
||||
if (engine->class == COPY_ENGINE_CLASS && GRAPHICS_VER(engine->i915) == 9)
|
||||
return true;
|
||||
|
||||
if (engine->class == RENDER_CLASS && INTEL_GEN(engine->i915) == 11)
|
||||
if (engine->class == RENDER_CLASS && GRAPHICS_VER(engine->i915) == 11)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
|
@ -1551,7 +1551,7 @@ static int __live_lrc_indirect_ctx_bb(struct intel_engine_cs *engine)
|
|||
/* We use the already reserved extra page in context state */
|
||||
if (!a->wa_bb_page) {
|
||||
GEM_BUG_ON(b->wa_bb_page);
|
||||
GEM_BUG_ON(INTEL_GEN(engine->i915) == 12);
|
||||
GEM_BUG_ON(GRAPHICS_VER(engine->i915) == 12);
|
||||
goto unpin_b;
|
||||
}
|
||||
|
||||
|
|
|
@ -183,7 +183,7 @@ static bool mcr_range(struct drm_i915_private *i915, u32 offset)
|
|||
* which only controls CPU initiated MMIO. Routing does not
|
||||
* work for CS access so we cannot verify them on this path.
|
||||
*/
|
||||
return INTEL_GEN(i915) >= 8 && offset >= 0xb000 && offset <= 0xb4ff;
|
||||
return GRAPHICS_VER(i915) >= 8 && offset >= 0xb000 && offset <= 0xb4ff;
|
||||
}
|
||||
|
||||
static int check_l3cc_table(struct intel_engine_cs *engine,
|
||||
|
|
|
@ -140,7 +140,7 @@ static const u32 *__live_rc6_ctx(struct intel_context *ce)
|
|||
}
|
||||
|
||||
cmd = MI_STORE_REGISTER_MEM | MI_USE_GGTT;
|
||||
if (INTEL_GEN(rq->engine->i915) >= 8)
|
||||
if (GRAPHICS_VER(rq->engine->i915) >= 8)
|
||||
cmd++;
|
||||
|
||||
*cs++ = cmd;
|
||||
|
@ -193,7 +193,7 @@ int live_rc6_ctx_wa(void *arg)
|
|||
int err = 0;
|
||||
|
||||
/* A read of CTX_INFO upsets rc6. Poke the bear! */
|
||||
if (INTEL_GEN(gt->i915) < 8)
|
||||
if (GRAPHICS_VER(gt->i915) < 8)
|
||||
return 0;
|
||||
|
||||
engines = randomised_engines(gt, &prng, &count);
|
||||
|
|
|
@ -41,10 +41,10 @@ static struct i915_vma *create_wally(struct intel_engine_cs *engine)
|
|||
return ERR_CAST(cs);
|
||||
}
|
||||
|
||||
if (INTEL_GEN(engine->i915) >= 6) {
|
||||
if (GRAPHICS_VER(engine->i915) >= 6) {
|
||||
*cs++ = MI_STORE_DWORD_IMM_GEN4;
|
||||
*cs++ = 0;
|
||||
} else if (INTEL_GEN(engine->i915) >= 4) {
|
||||
} else if (GRAPHICS_VER(engine->i915) >= 4) {
|
||||
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
|
||||
*cs++ = 0;
|
||||
} else {
|
||||
|
@ -266,7 +266,7 @@ static int live_ctx_switch_wa(void *arg)
|
|||
if (!intel_engine_can_store_dword(engine))
|
||||
continue;
|
||||
|
||||
if (IS_GEN_RANGE(gt->i915, 4, 5))
|
||||
if (IS_GRAPHICS_VER(gt->i915, 4, 5))
|
||||
continue; /* MI_STORE_DWORD is privileged! */
|
||||
|
||||
saved_wa = fetch_and_zero(&engine->wa_ctx.vma);
|
||||
|
|
|
@ -204,7 +204,7 @@ static void show_pstate_limits(struct intel_rps *rps)
|
|||
i915_mmio_reg_offset(BXT_RP_STATE_CAP),
|
||||
intel_uncore_read(rps_to_uncore(rps),
|
||||
BXT_RP_STATE_CAP));
|
||||
} else if (IS_GEN(i915, 9)) {
|
||||
} else if (GRAPHICS_VER(i915) == 9) {
|
||||
pr_info("P_STATE_LIMITS[%x]: 0x%08x\n",
|
||||
i915_mmio_reg_offset(GEN9_RP_STATE_LIMITS),
|
||||
intel_uncore_read(rps_to_uncore(rps),
|
||||
|
@ -222,7 +222,7 @@ int live_rps_clock_interval(void *arg)
|
|||
struct igt_spinner spin;
|
||||
int err = 0;
|
||||
|
||||
if (!intel_rps_is_enabled(rps) || INTEL_GEN(gt->i915) < 6)
|
||||
if (!intel_rps_is_enabled(rps) || GRAPHICS_VER(gt->i915) < 6)
|
||||
return 0;
|
||||
|
||||
if (igt_spinner_init(&spin, gt))
|
||||
|
@ -506,7 +506,7 @@ static void show_pcu_config(struct intel_rps *rps)
|
|||
|
||||
min_gpu_freq = rps->min_freq;
|
||||
max_gpu_freq = rps->max_freq;
|
||||
if (INTEL_GEN(i915) >= 9) {
|
||||
if (GRAPHICS_VER(i915) >= 9) {
|
||||
/* Convert GT frequency to 50 HZ units */
|
||||
min_gpu_freq /= GEN9_FREQ_SCALER;
|
||||
max_gpu_freq /= GEN9_FREQ_SCALER;
|
||||
|
@ -614,7 +614,7 @@ int live_rps_frequency_cs(void *arg)
|
|||
if (!intel_rps_is_enabled(rps))
|
||||
return 0;
|
||||
|
||||
if (INTEL_GEN(gt->i915) < 8) /* for CS simplicity */
|
||||
if (GRAPHICS_VER(gt->i915) < 8) /* for CS simplicity */
|
||||
return 0;
|
||||
|
||||
if (CPU_LATENCY >= 0)
|
||||
|
@ -755,7 +755,7 @@ int live_rps_frequency_srm(void *arg)
|
|||
if (!intel_rps_is_enabled(rps))
|
||||
return 0;
|
||||
|
||||
if (INTEL_GEN(gt->i915) < 8) /* for CS simplicity */
|
||||
if (GRAPHICS_VER(gt->i915) < 8) /* for CS simplicity */
|
||||
return 0;
|
||||
|
||||
if (CPU_LATENCY >= 0)
|
||||
|
@ -1031,7 +1031,7 @@ int live_rps_interrupt(void *arg)
|
|||
* First, let's check whether or not we are receiving interrupts.
|
||||
*/
|
||||
|
||||
if (!intel_rps_has_interrupts(rps) || INTEL_GEN(gt->i915) < 6)
|
||||
if (!intel_rps_has_interrupts(rps) || GRAPHICS_VER(gt->i915) < 6)
|
||||
return 0;
|
||||
|
||||
intel_gt_pm_get(gt);
|
||||
|
@ -1136,7 +1136,7 @@ int live_rps_power(void *arg)
|
|||
* that theory.
|
||||
*/
|
||||
|
||||
if (!intel_rps_is_enabled(rps) || INTEL_GEN(gt->i915) < 6)
|
||||
if (!intel_rps_is_enabled(rps) || GRAPHICS_VER(gt->i915) < 6)
|
||||
return 0;
|
||||
|
||||
if (!librapl_supported(gt->i915))
|
||||
|
@ -1240,7 +1240,7 @@ int live_rps_dynamic(void *arg)
|
|||
* moving parts into dynamic reclocking based on load.
|
||||
*/
|
||||
|
||||
if (!intel_rps_is_enabled(rps) || INTEL_GEN(gt->i915) < 6)
|
||||
if (!intel_rps_is_enabled(rps) || GRAPHICS_VER(gt->i915) < 6)
|
||||
return 0;
|
||||
|
||||
if (igt_spinner_init(&spin, gt))
|
||||
|
|
|
@ -457,12 +457,12 @@ static int emit_ggtt_store_dw(struct i915_request *rq, u32 addr, u32 value)
|
|||
if (IS_ERR(cs))
|
||||
return PTR_ERR(cs);
|
||||
|
||||
if (INTEL_GEN(rq->engine->i915) >= 8) {
|
||||
if (GRAPHICS_VER(rq->engine->i915) >= 8) {
|
||||
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
|
||||
*cs++ = addr;
|
||||
*cs++ = 0;
|
||||
*cs++ = value;
|
||||
} else if (INTEL_GEN(rq->engine->i915) >= 4) {
|
||||
} else if (GRAPHICS_VER(rq->engine->i915) >= 4) {
|
||||
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
|
||||
*cs++ = 0;
|
||||
*cs++ = addr;
|
||||
|
@ -992,7 +992,7 @@ static int live_hwsp_read(void *arg)
|
|||
* even across multiple wraps.
|
||||
*/
|
||||
|
||||
if (INTEL_GEN(gt->i915) < 8) /* CS convenience [SRM/LRM] */
|
||||
if (GRAPHICS_VER(gt->i915) < 8) /* CS convenience [SRM/LRM] */
|
||||
return 0;
|
||||
|
||||
tl = intel_timeline_create(gt);
|
||||
|
|
|
@ -145,7 +145,7 @@ read_nonprivs(struct intel_context *ce)
|
|||
goto err_req;
|
||||
|
||||
srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
|
||||
if (INTEL_GEN(engine->i915) >= 8)
|
||||
if (GRAPHICS_VER(engine->i915) >= 8)
|
||||
srm++;
|
||||
|
||||
cs = intel_ring_begin(rq, 4 * RING_MAX_NONPRIV_SLOTS);
|
||||
|
@ -546,7 +546,7 @@ retry:
|
|||
|
||||
srm = MI_STORE_REGISTER_MEM;
|
||||
lrm = MI_LOAD_REGISTER_MEM;
|
||||
if (INTEL_GEN(engine->i915) >= 8)
|
||||
if (GRAPHICS_VER(engine->i915) >= 8)
|
||||
lrm++, srm++;
|
||||
|
||||
pr_debug("%s: Writing garbage to %x\n",
|
||||
|
@ -749,7 +749,7 @@ static int live_dirty_whitelist(void *arg)
|
|||
|
||||
/* Can the user write to the whitelisted registers? */
|
||||
|
||||
if (INTEL_GEN(gt->i915) < 7) /* minimum requirement for LRI, SRM, LRM */
|
||||
if (GRAPHICS_VER(gt->i915) < 7) /* minimum requirement for LRI, SRM, LRM */
|
||||
return 0;
|
||||
|
||||
for_each_engine(engine, gt, id) {
|
||||
|
@ -829,7 +829,7 @@ static int read_whitelisted_registers(struct intel_context *ce,
|
|||
goto err_req;
|
||||
|
||||
srm = MI_STORE_REGISTER_MEM;
|
||||
if (INTEL_GEN(engine->i915) >= 8)
|
||||
if (GRAPHICS_VER(engine->i915) >= 8)
|
||||
srm++;
|
||||
|
||||
cs = intel_ring_begin(rq, 4 * engine->whitelist.count);
|
||||
|
|
|
@ -160,7 +160,7 @@ void intel_guc_init_early(struct intel_guc *guc)
|
|||
|
||||
mutex_init(&guc->send_mutex);
|
||||
spin_lock_init(&guc->irq_lock);
|
||||
if (INTEL_GEN(i915) >= 11) {
|
||||
if (GRAPHICS_VER(i915) >= 11) {
|
||||
guc->notify_reg = GEN11_GUC_HOST_INTERRUPT;
|
||||
guc->interrupts.reset = gen11_reset_guc_interrupts;
|
||||
guc->interrupts.enable = gen11_enable_guc_interrupts;
|
||||
|
|
|
@ -166,7 +166,7 @@ static void __guc_ads_init(struct intel_guc *guc)
|
|||
blob->system_info.generic_gt_sysinfo[GUC_GENERIC_GT_SYSINFO_VDBOX_SFC_SUPPORT_MASK] =
|
||||
gt->info.vdbox_sfc_access;
|
||||
|
||||
if (INTEL_GEN(i915) >= 12 && !IS_DGFX(i915)) {
|
||||
if (GRAPHICS_VER(i915) >= 12 && !IS_DGFX(i915)) {
|
||||
u32 distdbreg = intel_uncore_read(gt->uncore,
|
||||
GEN12_DIST_DBS_POPULATED);
|
||||
blob->system_info.generic_gt_sysinfo[GUC_GENERIC_GT_SYSINFO_DOORBELL_COUNT_PER_SQIDI] =
|
||||
|
|
|
@ -30,7 +30,7 @@ static void guc_prepare_xfer(struct intel_uncore *uncore)
|
|||
else
|
||||
intel_uncore_write(uncore, GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
|
||||
|
||||
if (IS_GEN(uncore->i915, 9)) {
|
||||
if (GRAPHICS_VER(uncore->i915) == 9) {
|
||||
/* DOP Clock Gating Enable for GuC clocks */
|
||||
intel_uncore_rmw(uncore, GEN7_MISCCPCTL,
|
||||
0, GEN8_DOP_CLOCK_GATE_GUC_ENABLE);
|
||||
|
|
|
@ -622,7 +622,7 @@ static void guc_default_vfuncs(struct intel_engine_cs *engine)
|
|||
engine->emit_flush = gen8_emit_flush_xcs;
|
||||
engine->emit_init_breadcrumb = gen8_emit_init_breadcrumb;
|
||||
engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_xcs;
|
||||
if (INTEL_GEN(engine->i915) >= 12) {
|
||||
if (GRAPHICS_VER(engine->i915) >= 12) {
|
||||
engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_xcs;
|
||||
engine->emit_flush = gen12_emit_flush_xcs;
|
||||
}
|
||||
|
@ -644,7 +644,7 @@ static void guc_default_vfuncs(struct intel_engine_cs *engine)
|
|||
|
||||
static void rcs_submission_override(struct intel_engine_cs *engine)
|
||||
{
|
||||
switch (INTEL_GEN(engine->i915)) {
|
||||
switch (GRAPHICS_VER(engine->i915)) {
|
||||
case 12:
|
||||
engine->emit_flush = gen12_emit_flush_rcs;
|
||||
engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_rcs;
|
||||
|
@ -674,7 +674,7 @@ int intel_guc_submission_setup(struct intel_engine_cs *engine)
|
|||
* The setup relies on several assumptions (e.g. irqs always enabled)
|
||||
* that are only valid on gen11+
|
||||
*/
|
||||
GEM_BUG_ON(INTEL_GEN(i915) < 11);
|
||||
GEM_BUG_ON(GRAPHICS_VER(i915) < 11);
|
||||
|
||||
tasklet_setup(&engine->execlists.tasklet, guc_submission_tasklet);
|
||||
|
||||
|
|
|
@ -43,7 +43,7 @@ void intel_huc_init_early(struct intel_huc *huc)
|
|||
|
||||
intel_uc_fw_init_early(&huc->fw, INTEL_UC_FW_TYPE_HUC);
|
||||
|
||||
if (INTEL_GEN(i915) >= 11) {
|
||||
if (GRAPHICS_VER(i915) >= 11) {
|
||||
huc->status.reg = GEN11_HUC_KERNEL_LOAD_INFO;
|
||||
huc->status.mask = HUC_LOAD_SUCCESSFUL;
|
||||
huc->status.value = HUC_LOAD_SUCCESSFUL;
|
||||
|
|
|
@ -23,7 +23,7 @@ static void uc_expand_default_options(struct intel_uc *uc)
|
|||
return;
|
||||
|
||||
/* Don't enable GuC/HuC on pre-Gen12 */
|
||||
if (INTEL_GEN(i915) < 12) {
|
||||
if (GRAPHICS_VER(i915) < 12) {
|
||||
i915->params.enable_guc = 0;
|
||||
return;
|
||||
}
|
||||
|
@ -467,7 +467,7 @@ static int __uc_init_hw(struct intel_uc *uc)
|
|||
|
||||
/* WaEnableuKernelHeaderValidFix:skl */
|
||||
/* WaEnableGuCBootHashCheckNotSet:skl,bxt,kbl */
|
||||
if (IS_GEN(i915, 9))
|
||||
if (GRAPHICS_VER(i915) == 9)
|
||||
attempts = 3;
|
||||
else
|
||||
attempts = 1;
|
||||
|
|
Загрузка…
Ссылка в новой задаче