|
|
|
@ -3951,68 +3951,68 @@ static void
|
|
|
|
|
skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv,
|
|
|
|
|
const enum pipe pipe,
|
|
|
|
|
const enum plane_id plane_id,
|
|
|
|
|
struct skl_ddb_allocation *ddb /* out */)
|
|
|
|
|
struct skl_ddb_entry *ddb_y,
|
|
|
|
|
struct skl_ddb_entry *ddb_uv)
|
|
|
|
|
{
|
|
|
|
|
u32 val, val2 = 0;
|
|
|
|
|
int fourcc, pixel_format;
|
|
|
|
|
u32 val, val2;
|
|
|
|
|
u32 fourcc = 0;
|
|
|
|
|
|
|
|
|
|
/* Cursor doesn't support NV12/planar, so no extra calculation needed */
|
|
|
|
|
if (plane_id == PLANE_CURSOR) {
|
|
|
|
|
val = I915_READ(CUR_BUF_CFG(pipe));
|
|
|
|
|
skl_ddb_entry_init_from_hw(dev_priv,
|
|
|
|
|
&ddb->plane[pipe][plane_id], val);
|
|
|
|
|
skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
val = I915_READ(PLANE_CTL(pipe, plane_id));
|
|
|
|
|
|
|
|
|
|
/* No DDB allocated for disabled planes */
|
|
|
|
|
if (!(val & PLANE_CTL_ENABLE))
|
|
|
|
|
return;
|
|
|
|
|
if (val & PLANE_CTL_ENABLE)
|
|
|
|
|
fourcc = skl_format_to_fourcc(val & PLANE_CTL_FORMAT_MASK,
|
|
|
|
|
val & PLANE_CTL_ORDER_RGBX,
|
|
|
|
|
val & PLANE_CTL_ALPHA_MASK);
|
|
|
|
|
|
|
|
|
|
pixel_format = val & PLANE_CTL_FORMAT_MASK;
|
|
|
|
|
fourcc = skl_format_to_fourcc(pixel_format,
|
|
|
|
|
val & PLANE_CTL_ORDER_RGBX,
|
|
|
|
|
val & PLANE_CTL_ALPHA_MASK);
|
|
|
|
|
|
|
|
|
|
val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
|
|
|
|
|
if (fourcc == DRM_FORMAT_NV12 && INTEL_GEN(dev_priv) < 11) {
|
|
|
|
|
if (INTEL_GEN(dev_priv) >= 11) {
|
|
|
|
|
val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
|
|
|
|
|
skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
|
|
|
|
|
} else {
|
|
|
|
|
val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
|
|
|
|
|
val2 = I915_READ(PLANE_NV12_BUF_CFG(pipe, plane_id));
|
|
|
|
|
|
|
|
|
|
skl_ddb_entry_init_from_hw(dev_priv,
|
|
|
|
|
&ddb->plane[pipe][plane_id], val2);
|
|
|
|
|
skl_ddb_entry_init_from_hw(dev_priv,
|
|
|
|
|
&ddb->uv_plane[pipe][plane_id], val);
|
|
|
|
|
} else {
|
|
|
|
|
skl_ddb_entry_init_from_hw(dev_priv,
|
|
|
|
|
&ddb->plane[pipe][plane_id], val);
|
|
|
|
|
if (fourcc == DRM_FORMAT_NV12)
|
|
|
|
|
swap(val, val2);
|
|
|
|
|
|
|
|
|
|
skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
|
|
|
|
|
skl_ddb_entry_init_from_hw(dev_priv, ddb_uv, val2);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
|
|
|
|
|
struct skl_ddb_entry *ddb_y,
|
|
|
|
|
struct skl_ddb_entry *ddb_uv)
|
|
|
|
|
{
|
|
|
|
|
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
|
|
|
|
enum intel_display_power_domain power_domain;
|
|
|
|
|
enum pipe pipe = crtc->pipe;
|
|
|
|
|
enum plane_id plane_id;
|
|
|
|
|
|
|
|
|
|
power_domain = POWER_DOMAIN_PIPE(pipe);
|
|
|
|
|
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
for_each_plane_id_on_crtc(crtc, plane_id)
|
|
|
|
|
skl_ddb_get_hw_plane_state(dev_priv, pipe,
|
|
|
|
|
plane_id,
|
|
|
|
|
&ddb_y[plane_id],
|
|
|
|
|
&ddb_uv[plane_id]);
|
|
|
|
|
|
|
|
|
|
intel_display_power_put(dev_priv, power_domain);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
|
|
|
|
|
struct skl_ddb_allocation *ddb /* out */)
|
|
|
|
|
{
|
|
|
|
|
struct intel_crtc *crtc;
|
|
|
|
|
|
|
|
|
|
memset(ddb, 0, sizeof(*ddb));
|
|
|
|
|
|
|
|
|
|
ddb->enabled_slices = intel_enabled_dbuf_slices_num(dev_priv);
|
|
|
|
|
|
|
|
|
|
for_each_intel_crtc(&dev_priv->drm, crtc) {
|
|
|
|
|
enum intel_display_power_domain power_domain;
|
|
|
|
|
enum plane_id plane_id;
|
|
|
|
|
enum pipe pipe = crtc->pipe;
|
|
|
|
|
|
|
|
|
|
power_domain = POWER_DOMAIN_PIPE(pipe);
|
|
|
|
|
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
for_each_plane_id_on_crtc(crtc, plane_id)
|
|
|
|
|
skl_ddb_get_hw_plane_state(dev_priv, pipe,
|
|
|
|
|
plane_id, ddb);
|
|
|
|
|
|
|
|
|
|
intel_display_power_put(dev_priv, power_domain);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
@ -4410,7 +4410,6 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
|
|
|
|
|
struct drm_crtc *crtc = cstate->base.crtc;
|
|
|
|
|
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
|
|
|
|
|
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
|
|
|
|
enum pipe pipe = intel_crtc->pipe;
|
|
|
|
|
struct skl_ddb_entry *alloc = &cstate->wm.skl.ddb;
|
|
|
|
|
uint16_t alloc_size, start;
|
|
|
|
|
uint16_t minimum[I915_MAX_PLANES] = {};
|
|
|
|
@ -4423,8 +4422,8 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
|
|
|
|
|
uint16_t total_min_blocks = 0;
|
|
|
|
|
|
|
|
|
|
/* Clear the partitioning for disabled planes. */
|
|
|
|
|
memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
|
|
|
|
|
memset(ddb->uv_plane[pipe], 0, sizeof(ddb->uv_plane[pipe]));
|
|
|
|
|
memset(cstate->wm.skl.plane_ddb_y, 0, sizeof(cstate->wm.skl.plane_ddb_y));
|
|
|
|
|
memset(cstate->wm.skl.plane_ddb_uv, 0, sizeof(cstate->wm.skl.plane_ddb_uv));
|
|
|
|
|
|
|
|
|
|
if (WARN_ON(!state))
|
|
|
|
|
return 0;
|
|
|
|
@ -4471,8 +4470,8 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
alloc_size -= total_min_blocks;
|
|
|
|
|
ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - minimum[PLANE_CURSOR];
|
|
|
|
|
ddb->plane[pipe][PLANE_CURSOR].end = alloc->end;
|
|
|
|
|
cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].start = alloc->end - minimum[PLANE_CURSOR];
|
|
|
|
|
cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].end = alloc->end;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* 2. Distribute the remaining space in proportion to the amount of
|
|
|
|
@ -4503,8 +4502,8 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
|
|
|
|
|
|
|
|
|
|
/* Leave disabled planes at (0,0) */
|
|
|
|
|
if (data_rate) {
|
|
|
|
|
ddb->plane[pipe][plane_id].start = start;
|
|
|
|
|
ddb->plane[pipe][plane_id].end = start + plane_blocks;
|
|
|
|
|
cstate->wm.skl.plane_ddb_y[plane_id].start = start;
|
|
|
|
|
cstate->wm.skl.plane_ddb_y[plane_id].end = start + plane_blocks;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
start += plane_blocks;
|
|
|
|
@ -4519,8 +4518,8 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
|
|
|
|
|
WARN_ON(INTEL_GEN(dev_priv) >= 11 && uv_plane_blocks);
|
|
|
|
|
|
|
|
|
|
if (uv_data_rate) {
|
|
|
|
|
ddb->uv_plane[pipe][plane_id].start = start;
|
|
|
|
|
ddb->uv_plane[pipe][plane_id].end =
|
|
|
|
|
cstate->wm.skl.plane_ddb_uv[plane_id].start = start;
|
|
|
|
|
cstate->wm.skl.plane_ddb_uv[plane_id].end =
|
|
|
|
|
start + uv_plane_blocks;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@ -4978,16 +4977,13 @@ static void skl_compute_transition_wm(const struct intel_crtc_state *cstate,
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int skl_build_plane_wm_single(struct skl_ddb_allocation *ddb,
|
|
|
|
|
struct intel_crtc_state *crtc_state,
|
|
|
|
|
static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
|
|
|
|
|
const struct intel_plane_state *plane_state,
|
|
|
|
|
enum plane_id plane_id, int color_plane)
|
|
|
|
|
{
|
|
|
|
|
struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
|
|
|
|
|
struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id];
|
|
|
|
|
u16 ddb_blocks = skl_ddb_entry_size(&crtc_state->wm.skl.plane_ddb_y[plane_id]);
|
|
|
|
|
struct skl_wm_params wm_params;
|
|
|
|
|
enum pipe pipe = plane->pipe;
|
|
|
|
|
uint16_t ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][plane_id]);
|
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
|
|
ret = skl_compute_plane_wm_params(crtc_state, plane_state,
|
|
|
|
@ -5005,16 +5001,13 @@ static int skl_build_plane_wm_single(struct skl_ddb_allocation *ddb,
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int skl_build_plane_wm_uv(struct skl_ddb_allocation *ddb,
|
|
|
|
|
struct intel_crtc_state *crtc_state,
|
|
|
|
|
static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state,
|
|
|
|
|
const struct intel_plane_state *plane_state,
|
|
|
|
|
enum plane_id plane_id)
|
|
|
|
|
{
|
|
|
|
|
struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
|
|
|
|
|
struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id];
|
|
|
|
|
u16 ddb_blocks = skl_ddb_entry_size(&crtc_state->wm.skl.plane_ddb_uv[plane_id]);
|
|
|
|
|
struct skl_wm_params wm_params;
|
|
|
|
|
enum pipe pipe = plane->pipe;
|
|
|
|
|
uint16_t ddb_blocks = skl_ddb_entry_size(&ddb->uv_plane[pipe][plane_id]);
|
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
|
|
wm->is_planar = true;
|
|
|
|
@ -5033,8 +5026,7 @@ static int skl_build_plane_wm_uv(struct skl_ddb_allocation *ddb,
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int skl_build_plane_wm(struct skl_ddb_allocation *ddb,
|
|
|
|
|
struct skl_pipe_wm *pipe_wm,
|
|
|
|
|
static int skl_build_plane_wm(struct skl_pipe_wm *pipe_wm,
|
|
|
|
|
struct intel_crtc_state *crtc_state,
|
|
|
|
|
const struct intel_plane_state *plane_state)
|
|
|
|
|
{
|
|
|
|
@ -5046,13 +5038,13 @@ static int skl_build_plane_wm(struct skl_ddb_allocation *ddb,
|
|
|
|
|
if (!intel_wm_plane_visible(crtc_state, plane_state))
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
ret = skl_build_plane_wm_single(ddb, crtc_state, plane_state,
|
|
|
|
|
ret = skl_build_plane_wm_single(crtc_state, plane_state,
|
|
|
|
|
plane_id, 0);
|
|
|
|
|
if (ret)
|
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
|
|
if (fb->format->is_yuv && fb->format->num_planes > 1) {
|
|
|
|
|
ret = skl_build_plane_wm_uv(ddb, crtc_state, plane_state,
|
|
|
|
|
ret = skl_build_plane_wm_uv(crtc_state, plane_state,
|
|
|
|
|
plane_id);
|
|
|
|
|
if (ret)
|
|
|
|
|
return ret;
|
|
|
|
@ -5061,8 +5053,7 @@ static int skl_build_plane_wm(struct skl_ddb_allocation *ddb,
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int icl_build_plane_wm(struct skl_ddb_allocation *ddb,
|
|
|
|
|
struct skl_pipe_wm *pipe_wm,
|
|
|
|
|
static int icl_build_plane_wm(struct skl_pipe_wm *pipe_wm,
|
|
|
|
|
struct intel_crtc_state *crtc_state,
|
|
|
|
|
const struct intel_plane_state *plane_state)
|
|
|
|
|
{
|
|
|
|
@ -5081,17 +5072,17 @@ static int icl_build_plane_wm(struct skl_ddb_allocation *ddb,
|
|
|
|
|
WARN_ON(!fb->format->is_yuv ||
|
|
|
|
|
fb->format->num_planes == 1);
|
|
|
|
|
|
|
|
|
|
ret = skl_build_plane_wm_single(ddb, crtc_state, plane_state,
|
|
|
|
|
ret = skl_build_plane_wm_single(crtc_state, plane_state,
|
|
|
|
|
y_plane_id, 0);
|
|
|
|
|
if (ret)
|
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
|
|
ret = skl_build_plane_wm_single(ddb, crtc_state, plane_state,
|
|
|
|
|
ret = skl_build_plane_wm_single(crtc_state, plane_state,
|
|
|
|
|
plane_id, 1);
|
|
|
|
|
if (ret)
|
|
|
|
|
return ret;
|
|
|
|
|
} else if (intel_wm_plane_visible(crtc_state, plane_state)) {
|
|
|
|
|
ret = skl_build_plane_wm_single(ddb, crtc_state, plane_state,
|
|
|
|
|
ret = skl_build_plane_wm_single(crtc_state, plane_state,
|
|
|
|
|
plane_id, 0);
|
|
|
|
|
if (ret)
|
|
|
|
|
return ret;
|
|
|
|
@ -5101,7 +5092,6 @@ static int icl_build_plane_wm(struct skl_ddb_allocation *ddb,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
|
|
|
|
|
struct skl_ddb_allocation *ddb,
|
|
|
|
|
struct skl_pipe_wm *pipe_wm)
|
|
|
|
|
{
|
|
|
|
|
struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
|
|
|
|
@ -5121,10 +5111,10 @@ static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
|
|
|
|
|
to_intel_plane_state(pstate);
|
|
|
|
|
|
|
|
|
|
if (INTEL_GEN(dev_priv) >= 11)
|
|
|
|
|
ret = icl_build_plane_wm(ddb, pipe_wm,
|
|
|
|
|
ret = icl_build_plane_wm(pipe_wm,
|
|
|
|
|
cstate, intel_pstate);
|
|
|
|
|
else
|
|
|
|
|
ret = skl_build_plane_wm(ddb, pipe_wm,
|
|
|
|
|
ret = skl_build_plane_wm(pipe_wm,
|
|
|
|
|
cstate, intel_pstate);
|
|
|
|
|
if (ret)
|
|
|
|
|
return ret;
|
|
|
|
@ -5140,9 +5130,9 @@ static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
|
|
|
|
|
const struct skl_ddb_entry *entry)
|
|
|
|
|
{
|
|
|
|
|
if (entry->end)
|
|
|
|
|
I915_WRITE(reg, (entry->end - 1) << 16 | entry->start);
|
|
|
|
|
I915_WRITE_FW(reg, (entry->end - 1) << 16 | entry->start);
|
|
|
|
|
else
|
|
|
|
|
I915_WRITE(reg, 0);
|
|
|
|
|
I915_WRITE_FW(reg, 0);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void skl_write_wm_level(struct drm_i915_private *dev_priv,
|
|
|
|
@ -5157,19 +5147,22 @@ static void skl_write_wm_level(struct drm_i915_private *dev_priv,
|
|
|
|
|
val |= level->plane_res_l << PLANE_WM_LINES_SHIFT;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
I915_WRITE(reg, val);
|
|
|
|
|
I915_WRITE_FW(reg, val);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void skl_write_plane_wm(struct intel_crtc *intel_crtc,
|
|
|
|
|
const struct skl_plane_wm *wm,
|
|
|
|
|
const struct skl_ddb_allocation *ddb,
|
|
|
|
|
enum plane_id plane_id)
|
|
|
|
|
void skl_write_plane_wm(struct intel_plane *plane,
|
|
|
|
|
const struct intel_crtc_state *crtc_state)
|
|
|
|
|
{
|
|
|
|
|
struct drm_crtc *crtc = &intel_crtc->base;
|
|
|
|
|
struct drm_device *dev = crtc->dev;
|
|
|
|
|
struct drm_i915_private *dev_priv = to_i915(dev);
|
|
|
|
|
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
|
|
|
|
|
int level, max_level = ilk_wm_max_level(dev_priv);
|
|
|
|
|
enum pipe pipe = intel_crtc->pipe;
|
|
|
|
|
enum plane_id plane_id = plane->id;
|
|
|
|
|
enum pipe pipe = plane->pipe;
|
|
|
|
|
const struct skl_plane_wm *wm =
|
|
|
|
|
&crtc_state->wm.skl.optimal.planes[plane_id];
|
|
|
|
|
const struct skl_ddb_entry *ddb_y =
|
|
|
|
|
&crtc_state->wm.skl.plane_ddb_y[plane_id];
|
|
|
|
|
const struct skl_ddb_entry *ddb_uv =
|
|
|
|
|
&crtc_state->wm.skl.plane_ddb_uv[plane_id];
|
|
|
|
|
|
|
|
|
|
for (level = 0; level <= max_level; level++) {
|
|
|
|
|
skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane_id, level),
|
|
|
|
@ -5178,29 +5171,32 @@ static void skl_write_plane_wm(struct intel_crtc *intel_crtc,
|
|
|
|
|
skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane_id),
|
|
|
|
|
&wm->trans_wm);
|
|
|
|
|
|
|
|
|
|
if (wm->is_planar && INTEL_GEN(dev_priv) < 11) {
|
|
|
|
|
skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id),
|
|
|
|
|
&ddb->uv_plane[pipe][plane_id]);
|
|
|
|
|
if (INTEL_GEN(dev_priv) >= 11) {
|
|
|
|
|
skl_ddb_entry_write(dev_priv,
|
|
|
|
|
PLANE_NV12_BUF_CFG(pipe, plane_id),
|
|
|
|
|
&ddb->plane[pipe][plane_id]);
|
|
|
|
|
} else {
|
|
|
|
|
skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id),
|
|
|
|
|
&ddb->plane[pipe][plane_id]);
|
|
|
|
|
if (INTEL_GEN(dev_priv) < 11)
|
|
|
|
|
I915_WRITE(PLANE_NV12_BUF_CFG(pipe, plane_id), 0x0);
|
|
|
|
|
PLANE_BUF_CFG(pipe, plane_id), ddb_y);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (wm->is_planar)
|
|
|
|
|
swap(ddb_y, ddb_uv);
|
|
|
|
|
|
|
|
|
|
skl_ddb_entry_write(dev_priv,
|
|
|
|
|
PLANE_BUF_CFG(pipe, plane_id), ddb_y);
|
|
|
|
|
skl_ddb_entry_write(dev_priv,
|
|
|
|
|
PLANE_NV12_BUF_CFG(pipe, plane_id), ddb_uv);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void skl_write_cursor_wm(struct intel_crtc *intel_crtc,
|
|
|
|
|
const struct skl_plane_wm *wm,
|
|
|
|
|
const struct skl_ddb_allocation *ddb)
|
|
|
|
|
void skl_write_cursor_wm(struct intel_plane *plane,
|
|
|
|
|
const struct intel_crtc_state *crtc_state)
|
|
|
|
|
{
|
|
|
|
|
struct drm_crtc *crtc = &intel_crtc->base;
|
|
|
|
|
struct drm_device *dev = crtc->dev;
|
|
|
|
|
struct drm_i915_private *dev_priv = to_i915(dev);
|
|
|
|
|
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
|
|
|
|
|
int level, max_level = ilk_wm_max_level(dev_priv);
|
|
|
|
|
enum pipe pipe = intel_crtc->pipe;
|
|
|
|
|
enum plane_id plane_id = plane->id;
|
|
|
|
|
enum pipe pipe = plane->pipe;
|
|
|
|
|
const struct skl_plane_wm *wm =
|
|
|
|
|
&crtc_state->wm.skl.optimal.planes[plane_id];
|
|
|
|
|
const struct skl_ddb_entry *ddb =
|
|
|
|
|
&crtc_state->wm.skl.plane_ddb_y[plane_id];
|
|
|
|
|
|
|
|
|
|
for (level = 0; level <= max_level; level++) {
|
|
|
|
|
skl_write_wm_level(dev_priv, CUR_WM(pipe, level),
|
|
|
|
@ -5208,22 +5204,30 @@ static void skl_write_cursor_wm(struct intel_crtc *intel_crtc,
|
|
|
|
|
}
|
|
|
|
|
skl_write_wm_level(dev_priv, CUR_WM_TRANS(pipe), &wm->trans_wm);
|
|
|
|
|
|
|
|
|
|
skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe),
|
|
|
|
|
&ddb->plane[pipe][PLANE_CURSOR]);
|
|
|
|
|
skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe), ddb);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bool skl_wm_level_equals(const struct skl_wm_level *l1,
|
|
|
|
|
const struct skl_wm_level *l2)
|
|
|
|
|
{
|
|
|
|
|
if (l1->plane_en != l2->plane_en)
|
|
|
|
|
return false;
|
|
|
|
|
return l1->plane_en == l2->plane_en &&
|
|
|
|
|
l1->plane_res_l == l2->plane_res_l &&
|
|
|
|
|
l1->plane_res_b == l2->plane_res_b;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* If both planes aren't enabled, the rest shouldn't matter */
|
|
|
|
|
if (!l1->plane_en)
|
|
|
|
|
return true;
|
|
|
|
|
static bool skl_plane_wm_equals(struct drm_i915_private *dev_priv,
|
|
|
|
|
const struct skl_plane_wm *wm1,
|
|
|
|
|
const struct skl_plane_wm *wm2)
|
|
|
|
|
{
|
|
|
|
|
int level, max_level = ilk_wm_max_level(dev_priv);
|
|
|
|
|
|
|
|
|
|
return (l1->plane_res_l == l2->plane_res_l &&
|
|
|
|
|
l1->plane_res_b == l2->plane_res_b);
|
|
|
|
|
for (level = 0; level <= max_level; level++) {
|
|
|
|
|
if (!skl_wm_level_equals(&wm1->wm[level], &wm2->wm[level]) ||
|
|
|
|
|
!skl_wm_level_equals(&wm1->uv_wm[level], &wm2->uv_wm[level]))
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return skl_wm_level_equals(&wm1->trans_wm, &wm2->trans_wm);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
|
|
|
|
@ -5250,13 +5254,12 @@ bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
|
|
|
|
|
static int skl_update_pipe_wm(struct drm_crtc_state *cstate,
|
|
|
|
|
const struct skl_pipe_wm *old_pipe_wm,
|
|
|
|
|
struct skl_pipe_wm *pipe_wm, /* out */
|
|
|
|
|
struct skl_ddb_allocation *ddb, /* out */
|
|
|
|
|
bool *changed /* out */)
|
|
|
|
|
{
|
|
|
|
|
struct intel_crtc_state *intel_cstate = to_intel_crtc_state(cstate);
|
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
|
|
ret = skl_build_pipe_wm(intel_cstate, ddb, pipe_wm);
|
|
|
|
|
ret = skl_build_pipe_wm(intel_cstate, pipe_wm);
|
|
|
|
|
if (ret)
|
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
|
@ -5282,42 +5285,29 @@ pipes_modified(struct drm_atomic_state *state)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
skl_ddb_add_affected_planes(struct intel_crtc_state *cstate)
|
|
|
|
|
skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state,
|
|
|
|
|
struct intel_crtc_state *new_crtc_state)
|
|
|
|
|
{
|
|
|
|
|
struct drm_atomic_state *state = cstate->base.state;
|
|
|
|
|
struct drm_device *dev = state->dev;
|
|
|
|
|
struct drm_crtc *crtc = cstate->base.crtc;
|
|
|
|
|
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
|
|
|
|
struct drm_i915_private *dev_priv = to_i915(dev);
|
|
|
|
|
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
|
|
|
|
|
struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb;
|
|
|
|
|
struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb;
|
|
|
|
|
struct drm_plane *plane;
|
|
|
|
|
enum pipe pipe = intel_crtc->pipe;
|
|
|
|
|
struct intel_atomic_state *state = to_intel_atomic_state(new_crtc_state->base.state);
|
|
|
|
|
struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
|
|
|
|
|
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
|
|
|
|
struct intel_plane *plane;
|
|
|
|
|
|
|
|
|
|
drm_for_each_plane_mask(plane, dev, cstate->base.plane_mask) {
|
|
|
|
|
struct drm_plane_state *plane_state;
|
|
|
|
|
struct intel_plane *linked;
|
|
|
|
|
enum plane_id plane_id = to_intel_plane(plane)->id;
|
|
|
|
|
for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
|
|
|
|
|
struct intel_plane_state *plane_state;
|
|
|
|
|
enum plane_id plane_id = plane->id;
|
|
|
|
|
|
|
|
|
|
if (skl_ddb_entry_equal(&cur_ddb->plane[pipe][plane_id],
|
|
|
|
|
&new_ddb->plane[pipe][plane_id]) &&
|
|
|
|
|
skl_ddb_entry_equal(&cur_ddb->uv_plane[pipe][plane_id],
|
|
|
|
|
&new_ddb->uv_plane[pipe][plane_id]))
|
|
|
|
|
if (skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_y[plane_id],
|
|
|
|
|
&new_crtc_state->wm.skl.plane_ddb_y[plane_id]) &&
|
|
|
|
|
skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_uv[plane_id],
|
|
|
|
|
&new_crtc_state->wm.skl.plane_ddb_uv[plane_id]))
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
plane_state = drm_atomic_get_plane_state(state, plane);
|
|
|
|
|
plane_state = intel_atomic_get_plane_state(state, plane);
|
|
|
|
|
if (IS_ERR(plane_state))
|
|
|
|
|
return PTR_ERR(plane_state);
|
|
|
|
|
|
|
|
|
|
/* Make sure linked plane is updated too */
|
|
|
|
|
linked = to_intel_plane_state(plane_state)->linked_plane;
|
|
|
|
|
if (!linked)
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
plane_state = drm_atomic_get_plane_state(state, &linked->base);
|
|
|
|
|
if (IS_ERR(plane_state))
|
|
|
|
|
return PTR_ERR(plane_state);
|
|
|
|
|
new_crtc_state->update_planes |= BIT(plane_id);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
@ -5329,18 +5319,21 @@ skl_compute_ddb(struct drm_atomic_state *state)
|
|
|
|
|
const struct drm_i915_private *dev_priv = to_i915(state->dev);
|
|
|
|
|
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
|
|
|
|
|
struct skl_ddb_allocation *ddb = &intel_state->wm_results.ddb;
|
|
|
|
|
struct intel_crtc_state *old_crtc_state;
|
|
|
|
|
struct intel_crtc_state *new_crtc_state;
|
|
|
|
|
struct intel_crtc *crtc;
|
|
|
|
|
struct intel_crtc_state *cstate;
|
|
|
|
|
int ret, i;
|
|
|
|
|
|
|
|
|
|
memcpy(ddb, &dev_priv->wm.skl_hw.ddb, sizeof(*ddb));
|
|
|
|
|
|
|
|
|
|
for_each_new_intel_crtc_in_state(intel_state, crtc, cstate, i) {
|
|
|
|
|
ret = skl_allocate_pipe_ddb(cstate, ddb);
|
|
|
|
|
for_each_oldnew_intel_crtc_in_state(intel_state, crtc, old_crtc_state,
|
|
|
|
|
new_crtc_state, i) {
|
|
|
|
|
ret = skl_allocate_pipe_ddb(new_crtc_state, ddb);
|
|
|
|
|
if (ret)
|
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
|
|
ret = skl_ddb_add_affected_planes(cstate);
|
|
|
|
|
ret = skl_ddb_add_affected_planes(old_crtc_state,
|
|
|
|
|
new_crtc_state);
|
|
|
|
|
if (ret)
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
@ -5349,36 +5342,29 @@ skl_compute_ddb(struct drm_atomic_state *state)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
skl_print_wm_changes(const struct drm_atomic_state *state)
|
|
|
|
|
skl_print_wm_changes(struct intel_atomic_state *state)
|
|
|
|
|
{
|
|
|
|
|
const struct drm_device *dev = state->dev;
|
|
|
|
|
const struct drm_i915_private *dev_priv = to_i915(dev);
|
|
|
|
|
const struct intel_atomic_state *intel_state =
|
|
|
|
|
to_intel_atomic_state(state);
|
|
|
|
|
const struct drm_crtc *crtc;
|
|
|
|
|
const struct drm_crtc_state *cstate;
|
|
|
|
|
const struct intel_plane *intel_plane;
|
|
|
|
|
const struct skl_ddb_allocation *old_ddb = &dev_priv->wm.skl_hw.ddb;
|
|
|
|
|
const struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb;
|
|
|
|
|
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
|
|
|
|
|
const struct intel_crtc_state *old_crtc_state;
|
|
|
|
|
const struct intel_crtc_state *new_crtc_state;
|
|
|
|
|
struct intel_plane *plane;
|
|
|
|
|
struct intel_crtc *crtc;
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
for_each_new_crtc_in_state(state, crtc, cstate, i) {
|
|
|
|
|
const struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
|
|
|
|
enum pipe pipe = intel_crtc->pipe;
|
|
|
|
|
|
|
|
|
|
for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
|
|
|
|
|
enum plane_id plane_id = intel_plane->id;
|
|
|
|
|
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
|
|
|
|
|
new_crtc_state, i) {
|
|
|
|
|
for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
|
|
|
|
|
enum plane_id plane_id = plane->id;
|
|
|
|
|
const struct skl_ddb_entry *old, *new;
|
|
|
|
|
|
|
|
|
|
old = &old_ddb->plane[pipe][plane_id];
|
|
|
|
|
new = &new_ddb->plane[pipe][plane_id];
|
|
|
|
|
old = &old_crtc_state->wm.skl.plane_ddb_y[plane_id];
|
|
|
|
|
new = &new_crtc_state->wm.skl.plane_ddb_y[plane_id];
|
|
|
|
|
|
|
|
|
|
if (skl_ddb_entry_equal(old, new))
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
DRM_DEBUG_KMS("[PLANE:%d:%s] ddb (%d - %d) -> (%d - %d)\n",
|
|
|
|
|
intel_plane->base.base.id,
|
|
|
|
|
intel_plane->base.name,
|
|
|
|
|
plane->base.base.id, plane->base.name,
|
|
|
|
|
old->start, old->end,
|
|
|
|
|
new->start, new->end);
|
|
|
|
|
}
|
|
|
|
@ -5475,6 +5461,66 @@ skl_ddb_add_affected_pipes(struct drm_atomic_state *state, bool *changed)
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* To make sure the cursor watermark registers are always consistent
|
|
|
|
|
* with our computed state the following scenario needs special
|
|
|
|
|
* treatment:
|
|
|
|
|
*
|
|
|
|
|
* 1. enable cursor
|
|
|
|
|
* 2. move cursor entirely offscreen
|
|
|
|
|
* 3. disable cursor
|
|
|
|
|
*
|
|
|
|
|
* Step 2. does call .disable_plane() but does not zero the watermarks
|
|
|
|
|
* (since we consider an offscreen cursor still active for the purposes
|
|
|
|
|
* of watermarks). Step 3. would not normally call .disable_plane()
|
|
|
|
|
* because the actual plane visibility isn't changing, and we don't
|
|
|
|
|
* deallocate the cursor ddb until the pipe gets disabled. So we must
|
|
|
|
|
* force step 3. to call .disable_plane() to update the watermark
|
|
|
|
|
* registers properly.
|
|
|
|
|
*
|
|
|
|
|
* Other planes do not suffer from this issues as their watermarks are
|
|
|
|
|
* calculated based on the actual plane visibility. The only time this
|
|
|
|
|
* can trigger for the other planes is during the initial readout as the
|
|
|
|
|
* default value of the watermarks registers is not zero.
|
|
|
|
|
*/
|
|
|
|
|
static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
|
|
|
|
|
struct intel_crtc *crtc)
|
|
|
|
|
{
|
|
|
|
|
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
|
|
|
|
const struct intel_crtc_state *old_crtc_state =
|
|
|
|
|
intel_atomic_get_old_crtc_state(state, crtc);
|
|
|
|
|
struct intel_crtc_state *new_crtc_state =
|
|
|
|
|
intel_atomic_get_new_crtc_state(state, crtc);
|
|
|
|
|
struct intel_plane *plane;
|
|
|
|
|
|
|
|
|
|
for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
|
|
|
|
|
struct intel_plane_state *plane_state;
|
|
|
|
|
enum plane_id plane_id = plane->id;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Force a full wm update for every plane on modeset.
|
|
|
|
|
* Required because the reset value of the wm registers
|
|
|
|
|
* is non-zero, whereas we want all disabled planes to
|
|
|
|
|
* have zero watermarks. So if we turn off the relevant
|
|
|
|
|
* power well the hardware state will go out of sync
|
|
|
|
|
* with the software state.
|
|
|
|
|
*/
|
|
|
|
|
if (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) &&
|
|
|
|
|
skl_plane_wm_equals(dev_priv,
|
|
|
|
|
&old_crtc_state->wm.skl.optimal.planes[plane_id],
|
|
|
|
|
&new_crtc_state->wm.skl.optimal.planes[plane_id]))
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
plane_state = intel_atomic_get_plane_state(state, plane);
|
|
|
|
|
if (IS_ERR(plane_state))
|
|
|
|
|
return PTR_ERR(plane_state);
|
|
|
|
|
|
|
|
|
|
new_crtc_state->update_planes |= BIT(plane_id);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
skl_compute_wm(struct drm_atomic_state *state)
|
|
|
|
|
{
|
|
|
|
@ -5514,8 +5560,12 @@ skl_compute_wm(struct drm_atomic_state *state)
|
|
|
|
|
&to_intel_crtc_state(crtc->state)->wm.skl.optimal;
|
|
|
|
|
|
|
|
|
|
pipe_wm = &intel_cstate->wm.skl.optimal;
|
|
|
|
|
ret = skl_update_pipe_wm(cstate, old_pipe_wm, pipe_wm,
|
|
|
|
|
&results->ddb, &changed);
|
|
|
|
|
ret = skl_update_pipe_wm(cstate, old_pipe_wm, pipe_wm, &changed);
|
|
|
|
|
if (ret)
|
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
|
|
ret = skl_wm_add_affected_planes(intel_state,
|
|
|
|
|
to_intel_crtc(crtc));
|
|
|
|
|
if (ret)
|
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
|
@ -5529,7 +5579,7 @@ skl_compute_wm(struct drm_atomic_state *state)
|
|
|
|
|
intel_cstate->update_wm_pre = true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
skl_print_wm_changes(state);
|
|
|
|
|
skl_print_wm_changes(intel_state);
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
@ -5540,23 +5590,12 @@ static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state,
|
|
|
|
|
struct intel_crtc *crtc = to_intel_crtc(cstate->base.crtc);
|
|
|
|
|
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
|
|
|
|
|
struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
|
|
|
|
|
const struct skl_ddb_allocation *ddb = &state->wm_results.ddb;
|
|
|
|
|
enum pipe pipe = crtc->pipe;
|
|
|
|
|
enum plane_id plane_id;
|
|
|
|
|
|
|
|
|
|
if (!(state->wm_results.dirty_pipes & drm_crtc_mask(&crtc->base)))
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
I915_WRITE(PIPE_WM_LINETIME(pipe), pipe_wm->linetime);
|
|
|
|
|
|
|
|
|
|
for_each_plane_id_on_crtc(crtc, plane_id) {
|
|
|
|
|
if (plane_id != PLANE_CURSOR)
|
|
|
|
|
skl_write_plane_wm(crtc, &pipe_wm->planes[plane_id],
|
|
|
|
|
ddb, plane_id);
|
|
|
|
|
else
|
|
|
|
|
skl_write_cursor_wm(crtc, &pipe_wm->planes[plane_id],
|
|
|
|
|
ddb);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void skl_initial_wm(struct intel_atomic_state *state,
|
|
|
|
@ -5566,8 +5605,6 @@ static void skl_initial_wm(struct intel_atomic_state *state,
|
|
|
|
|
struct drm_device *dev = intel_crtc->base.dev;
|
|
|
|
|
struct drm_i915_private *dev_priv = to_i915(dev);
|
|
|
|
|
struct skl_ddb_values *results = &state->wm_results;
|
|
|
|
|
struct skl_ddb_values *hw_vals = &dev_priv->wm.skl_hw;
|
|
|
|
|
enum pipe pipe = intel_crtc->pipe;
|
|
|
|
|
|
|
|
|
|
if ((results->dirty_pipes & drm_crtc_mask(&intel_crtc->base)) == 0)
|
|
|
|
|
return;
|
|
|
|
@ -5577,11 +5614,6 @@ static void skl_initial_wm(struct intel_atomic_state *state,
|
|
|
|
|
if (cstate->base.active_changed)
|
|
|
|
|
skl_atomic_update_crtc_wm(state, cstate);
|
|
|
|
|
|
|
|
|
|
memcpy(hw_vals->ddb.uv_plane[pipe], results->ddb.uv_plane[pipe],
|
|
|
|
|
sizeof(hw_vals->ddb.uv_plane[pipe]));
|
|
|
|
|
memcpy(hw_vals->ddb.plane[pipe], results->ddb.plane[pipe],
|
|
|
|
|
sizeof(hw_vals->ddb.plane[pipe]));
|
|
|
|
|
|
|
|
|
|
mutex_unlock(&dev_priv->wm.wm_mutex);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@ -5732,13 +5764,6 @@ void skl_wm_get_hw_state(struct drm_device *dev)
|
|
|
|
|
if (dev_priv->active_crtcs) {
|
|
|
|
|
/* Fully recompute DDB on first atomic commit */
|
|
|
|
|
dev_priv->wm.distrust_bios_wm = true;
|
|
|
|
|
} else {
|
|
|
|
|
/*
|
|
|
|
|
* Easy/common case; just sanitize DDB now if everything off
|
|
|
|
|
* Keep dbuf slice info intact
|
|
|
|
|
*/
|
|
|
|
|
memset(ddb->plane, 0, sizeof(ddb->plane));
|
|
|
|
|
memset(ddb->uv_plane, 0, sizeof(ddb->uv_plane));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|