Merge tag 'gvt-next-fixes-2017-04-20' of https://github.com/01org/gvt-linux into drm-intel-next-fixes
gvt-next-fixes-2017-04-20 - some code optimization from Changbin - debug message cleanup after QoS merge - misc fixes for display mmio init, reset vgpu warning, etc. Signed-off-by: Jani Nikula <jani.nikula@intel.com>
This commit is contained in:
Коммит
f8a77153b0
|
@ -616,9 +616,6 @@ static inline u32 get_opcode(u32 cmd, int ring_id)
|
|||
{
|
||||
struct decode_info *d_info;
|
||||
|
||||
if (ring_id >= I915_NUM_ENGINES)
|
||||
return INVALID_OP;
|
||||
|
||||
d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)];
|
||||
if (d_info == NULL)
|
||||
return INVALID_OP;
|
||||
|
@ -661,9 +658,6 @@ static inline void print_opcode(u32 cmd, int ring_id)
|
|||
struct decode_info *d_info;
|
||||
int i;
|
||||
|
||||
if (ring_id >= I915_NUM_ENGINES)
|
||||
return;
|
||||
|
||||
d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)];
|
||||
if (d_info == NULL)
|
||||
return;
|
||||
|
@ -2483,7 +2477,7 @@ static int cmd_parser_exec(struct parser_exec_state *s)
|
|||
|
||||
t1 = get_cycles();
|
||||
|
||||
memcpy(&s_before_advance_custom, s, sizeof(struct parser_exec_state));
|
||||
s_before_advance_custom = *s;
|
||||
|
||||
if (info->handler) {
|
||||
ret = info->handler(s);
|
||||
|
|
|
@ -189,17 +189,44 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
|
|||
}
|
||||
|
||||
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
|
||||
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT;
|
||||
vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIB_DETECTED;
|
||||
vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
|
||||
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
|
||||
TRANS_DDI_PORT_MASK);
|
||||
vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
|
||||
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
|
||||
(PORT_B << TRANS_DDI_PORT_SHIFT) |
|
||||
TRANS_DDI_FUNC_ENABLE);
|
||||
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_B)) |= DDI_BUF_CTL_ENABLE;
|
||||
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_B)) &= ~DDI_BUF_IS_IDLE;
|
||||
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT;
|
||||
}
|
||||
|
||||
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
|
||||
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT;
|
||||
vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
|
||||
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
|
||||
TRANS_DDI_PORT_MASK);
|
||||
vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
|
||||
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
|
||||
(PORT_C << TRANS_DDI_PORT_SHIFT) |
|
||||
TRANS_DDI_FUNC_ENABLE);
|
||||
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_C)) |= DDI_BUF_CTL_ENABLE;
|
||||
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_C)) &= ~DDI_BUF_IS_IDLE;
|
||||
vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIC_DETECTED;
|
||||
}
|
||||
|
||||
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D)) {
|
||||
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT;
|
||||
vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
|
||||
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
|
||||
TRANS_DDI_PORT_MASK);
|
||||
vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
|
||||
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
|
||||
(PORT_D << TRANS_DDI_PORT_SHIFT) |
|
||||
TRANS_DDI_FUNC_ENABLE);
|
||||
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_D)) |= DDI_BUF_CTL_ENABLE;
|
||||
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_D)) &= ~DDI_BUF_IS_IDLE;
|
||||
vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED;
|
||||
}
|
||||
|
||||
|
|
|
@ -56,8 +56,8 @@ static int context_switch_events[] = {
|
|||
|
||||
static int ring_id_to_context_switch_event(int ring_id)
|
||||
{
|
||||
if (WARN_ON(ring_id < RCS && ring_id >
|
||||
ARRAY_SIZE(context_switch_events)))
|
||||
if (WARN_ON(ring_id < RCS ||
|
||||
ring_id >= ARRAY_SIZE(context_switch_events)))
|
||||
return -EINVAL;
|
||||
|
||||
return context_switch_events[ring_id];
|
||||
|
@ -687,9 +687,7 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
|
|||
}
|
||||
|
||||
if (emulate_schedule_in)
|
||||
memcpy(&workload->elsp_dwords,
|
||||
&vgpu->execlist[ring_id].elsp_dwords,
|
||||
sizeof(workload->elsp_dwords));
|
||||
workload->elsp_dwords = vgpu->execlist[ring_id].elsp_dwords;
|
||||
|
||||
gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n",
|
||||
workload, ring_id, head, tail, start, ctl);
|
||||
|
|
|
@ -2294,12 +2294,15 @@ void intel_gvt_clean_gtt(struct intel_gvt *gvt)
|
|||
void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
struct drm_i915_private *dev_priv = gvt->dev_priv;
|
||||
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
|
||||
u32 index;
|
||||
u32 offset;
|
||||
u32 num_entries;
|
||||
struct intel_gvt_gtt_entry e;
|
||||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
|
||||
memset(&e, 0, sizeof(struct intel_gvt_gtt_entry));
|
||||
e.type = GTT_TYPE_GGTT_PTE;
|
||||
ops->set_pfn(&e, gvt->gtt.scratch_ggtt_mfn);
|
||||
|
@ -2314,6 +2317,8 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
|
|||
num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT;
|
||||
for (offset = 0; offset < num_entries; offset++)
|
||||
ops->set_entry(NULL, &e, index + offset, false, 0, vgpu);
|
||||
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -44,7 +44,7 @@ struct render_mmio {
|
|||
u32 value;
|
||||
};
|
||||
|
||||
static struct render_mmio gen8_render_mmio_list[] = {
|
||||
static struct render_mmio gen8_render_mmio_list[] __cacheline_aligned = {
|
||||
{RCS, _MMIO(0x229c), 0xffff, false},
|
||||
{RCS, _MMIO(0x2248), 0x0, false},
|
||||
{RCS, _MMIO(0x2098), 0x0, false},
|
||||
|
@ -75,7 +75,7 @@ static struct render_mmio gen8_render_mmio_list[] = {
|
|||
{BCS, _MMIO(0x22028), 0x0, false},
|
||||
};
|
||||
|
||||
static struct render_mmio gen9_render_mmio_list[] = {
|
||||
static struct render_mmio gen9_render_mmio_list[] __cacheline_aligned = {
|
||||
{RCS, _MMIO(0x229c), 0xffff, false},
|
||||
{RCS, _MMIO(0x2248), 0x0, false},
|
||||
{RCS, _MMIO(0x2098), 0x0, false},
|
||||
|
@ -204,9 +204,6 @@ static void load_mocs(struct intel_vgpu *vgpu, int ring_id)
|
|||
if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
|
||||
return;
|
||||
|
||||
if (!(IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)))
|
||||
return;
|
||||
|
||||
offset.reg = regs[ring_id];
|
||||
for (i = 0; i < 64; i++) {
|
||||
gen9_render_mocs[ring_id][i] = I915_READ(offset);
|
||||
|
@ -242,9 +239,6 @@ static void restore_mocs(struct intel_vgpu *vgpu, int ring_id)
|
|||
if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
|
||||
return;
|
||||
|
||||
if (!(IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)))
|
||||
return;
|
||||
|
||||
offset.reg = regs[ring_id];
|
||||
for (i = 0; i < 64; i++) {
|
||||
vgpu_vreg(vgpu, offset) = I915_READ(offset);
|
||||
|
|
|
@ -133,9 +133,6 @@ static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
|
|||
if (!scheduler->next_vgpu)
|
||||
return;
|
||||
|
||||
gvt_dbg_sched("try to schedule next vgpu %d\n",
|
||||
scheduler->next_vgpu->id);
|
||||
|
||||
/*
|
||||
* after the flag is set, workload dispatch thread will
|
||||
* stop dispatching workload for current vgpu
|
||||
|
@ -144,15 +141,10 @@ static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
|
|||
|
||||
/* still have uncompleted workload? */
|
||||
for_each_engine(engine, gvt->dev_priv, i) {
|
||||
if (scheduler->current_workload[i]) {
|
||||
gvt_dbg_sched("still have running workload\n");
|
||||
if (scheduler->current_workload[i])
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
gvt_dbg_sched("switch to next vgpu %d\n",
|
||||
scheduler->next_vgpu->id);
|
||||
|
||||
cur_time = ktime_get();
|
||||
if (scheduler->current_vgpu) {
|
||||
vgpu_data = scheduler->current_vgpu->sched_data;
|
||||
|
@ -224,17 +216,12 @@ static void tbs_sched_func(struct gvt_sched_data *sched_data)
|
|||
list_del_init(&vgpu_data->lru_list);
|
||||
list_add_tail(&vgpu_data->lru_list,
|
||||
&sched_data->lru_runq_head);
|
||||
|
||||
gvt_dbg_sched("pick next vgpu %d\n", vgpu->id);
|
||||
} else {
|
||||
scheduler->next_vgpu = gvt->idle_vgpu;
|
||||
}
|
||||
out:
|
||||
if (scheduler->next_vgpu) {
|
||||
gvt_dbg_sched("try to schedule next vgpu %d\n",
|
||||
scheduler->next_vgpu->id);
|
||||
if (scheduler->next_vgpu)
|
||||
try_to_schedule_next_vgpu(gvt);
|
||||
}
|
||||
}
|
||||
|
||||
void intel_gvt_schedule(struct intel_gvt *gvt)
|
||||
|
|
|
@ -279,11 +279,8 @@ static struct intel_vgpu_workload *pick_next_workload(
|
|||
goto out;
|
||||
}
|
||||
|
||||
if (list_empty(workload_q_head(scheduler->current_vgpu, ring_id))) {
|
||||
gvt_dbg_sched("ring id %d stop - no available workload\n",
|
||||
ring_id);
|
||||
if (list_empty(workload_q_head(scheduler->current_vgpu, ring_id)))
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* still have current workload, maybe the workload disptacher
|
||||
|
|
Загрузка…
Ссылка в новой задаче