Merge tag 'drm-intel-next-2020-07-15' of git://anongit.freedesktop.org/drm/drm-intel into drm-next
drm/i915 features for v5.9, batch #2 Highlights: - Very early DG1 enabling (Abdiel, Lucas, Anusha) Gem/GT: - Fix spinlock recursion on signaling a signaled request (Chris) - Perf: Use GTT when saving/restoring engine GPR (Umesh Nerlige Ramappa) - SSEU refactoring, debugfs move under gt/ (Daniele, Venkata Sandeep Dhanalakota) - Various GT refactoring and cleanup, preparation for future changes (Daniele) - Adjust HuC state accordingly after GuC fetch error (Michał Winiarski) - UC debugfs updates (Michał Winiarski) - Only revoke the GGTT mmappings on aperture detiling changes (Chris) - Only revoke mmap handlers if active (Chris) - Split the context's obj:vma lut into its own mutex (Chris) - Various memory, mmap and performance optimisations (Chris) - Improve system stability in case of false CS events (Chris) - Various refactorings and cleanup (Chris) - Always reset the engine on execlist failures (Chris) - Trace placement of timeline HWSP (Chris) - Update dma-attributes for our sg DMA (Chris) Display: - TGL CDCLK workaround tweaks to unbreak 8K display support (Stanislav) - A number of FBC fixes, along with i865 FBC enabling (Ville) - Validate MST modes against PBN limits (Lyude, Shawn Lee) - Do not access non-existing swizzle registers (Lucas) - Revert GEN11+ HBR3 rate fix that caused issues on TGL (Matt Atwood) - Update TGL+ combo phy initialization to match spec update (José) - Fix HDCP Content Protection property state machine (Anshuman) - Fix HDCP revoked keys handling (Ram) - Improve DDI BUF status checks and waits (Manasi) - Various SDVO+HDMI+DVI fixes around colorimetry, clocking, pixel repeat etc. (Ville) - DP voltage swing function refactoring (José) - WARN if max vswing/pre-emphasis violates the DP spec (Ville) Other: - Add new EHL PCI IDs (José) - Unify struct intel_digital_port variable naming (Lucas) - Various taint updates to aid debugging and improve CI (Michał Winiarski) - Straggler conversions to new mmio register accessors (Daniele) Signed-off-by: Dave Airlie <airlied@redhat.com> From: Jani Nikula <jani.nikula@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/87a70029vz.fsf@intel.com
This commit is contained in:
Коммит
418eda8f3f
|
@ -73,8 +73,11 @@ enum drm_mode_status drm_crtc_mode_valid(struct drm_crtc *crtc,
|
|||
const struct drm_display_mode *mode);
|
||||
enum drm_mode_status drm_encoder_mode_valid(struct drm_encoder *encoder,
|
||||
const struct drm_display_mode *mode);
|
||||
enum drm_mode_status drm_connector_mode_valid(struct drm_connector *connector,
|
||||
struct drm_display_mode *mode);
|
||||
int
|
||||
drm_connector_mode_valid(struct drm_connector *connector,
|
||||
struct drm_display_mode *mode,
|
||||
struct drm_modeset_acquire_ctx *ctx,
|
||||
enum drm_mode_status *status);
|
||||
|
||||
struct drm_encoder *
|
||||
drm_connector_get_single_encoder(struct drm_connector *connector);
|
||||
|
|
|
@ -86,17 +86,19 @@ drm_mode_validate_flag(const struct drm_display_mode *mode,
|
|||
return MODE_OK;
|
||||
}
|
||||
|
||||
static enum drm_mode_status
|
||||
static int
|
||||
drm_mode_validate_pipeline(struct drm_display_mode *mode,
|
||||
struct drm_connector *connector)
|
||||
struct drm_connector *connector,
|
||||
struct drm_modeset_acquire_ctx *ctx,
|
||||
enum drm_mode_status *status)
|
||||
{
|
||||
struct drm_device *dev = connector->dev;
|
||||
enum drm_mode_status ret = MODE_OK;
|
||||
struct drm_encoder *encoder;
|
||||
int ret;
|
||||
|
||||
/* Step 1: Validate against connector */
|
||||
ret = drm_connector_mode_valid(connector, mode);
|
||||
if (ret != MODE_OK)
|
||||
ret = drm_connector_mode_valid(connector, mode, ctx, status);
|
||||
if (ret || *status != MODE_OK)
|
||||
return ret;
|
||||
|
||||
/* Step 2: Validate against encoders and crtcs */
|
||||
|
@ -104,8 +106,8 @@ drm_mode_validate_pipeline(struct drm_display_mode *mode,
|
|||
struct drm_bridge *bridge;
|
||||
struct drm_crtc *crtc;
|
||||
|
||||
ret = drm_encoder_mode_valid(encoder, mode);
|
||||
if (ret != MODE_OK) {
|
||||
*status = drm_encoder_mode_valid(encoder, mode);
|
||||
if (*status != MODE_OK) {
|
||||
/* No point in continuing for crtc check as this encoder
|
||||
* will not accept the mode anyway. If all encoders
|
||||
* reject the mode then, at exit, ret will not be
|
||||
|
@ -114,10 +116,10 @@ drm_mode_validate_pipeline(struct drm_display_mode *mode,
|
|||
}
|
||||
|
||||
bridge = drm_bridge_chain_get_first_bridge(encoder);
|
||||
ret = drm_bridge_chain_mode_valid(bridge,
|
||||
&connector->display_info,
|
||||
mode);
|
||||
if (ret != MODE_OK) {
|
||||
*status = drm_bridge_chain_mode_valid(bridge,
|
||||
&connector->display_info,
|
||||
mode);
|
||||
if (*status != MODE_OK) {
|
||||
/* There is also no point in continuing for crtc check
|
||||
* here. */
|
||||
continue;
|
||||
|
@ -127,17 +129,17 @@ drm_mode_validate_pipeline(struct drm_display_mode *mode,
|
|||
if (!drm_encoder_crtc_ok(encoder, crtc))
|
||||
continue;
|
||||
|
||||
ret = drm_crtc_mode_valid(crtc, mode);
|
||||
if (ret == MODE_OK) {
|
||||
*status = drm_crtc_mode_valid(crtc, mode);
|
||||
if (*status == MODE_OK) {
|
||||
/* If we get to this point there is at least
|
||||
* one combination of encoder+crtc that works
|
||||
* for this mode. Lets return now. */
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int drm_helper_probe_add_cmdline_mode(struct drm_connector *connector)
|
||||
|
@ -198,16 +200,27 @@ enum drm_mode_status drm_encoder_mode_valid(struct drm_encoder *encoder,
|
|||
return encoder_funcs->mode_valid(encoder, mode);
|
||||
}
|
||||
|
||||
enum drm_mode_status drm_connector_mode_valid(struct drm_connector *connector,
|
||||
struct drm_display_mode *mode)
|
||||
int
|
||||
drm_connector_mode_valid(struct drm_connector *connector,
|
||||
struct drm_display_mode *mode,
|
||||
struct drm_modeset_acquire_ctx *ctx,
|
||||
enum drm_mode_status *status)
|
||||
{
|
||||
const struct drm_connector_helper_funcs *connector_funcs =
|
||||
connector->helper_private;
|
||||
int ret = 0;
|
||||
|
||||
if (!connector_funcs || !connector_funcs->mode_valid)
|
||||
return MODE_OK;
|
||||
if (!connector_funcs)
|
||||
*status = MODE_OK;
|
||||
else if (connector_funcs->mode_valid_ctx)
|
||||
ret = connector_funcs->mode_valid_ctx(connector, mode, ctx,
|
||||
status);
|
||||
else if (connector_funcs->mode_valid)
|
||||
*status = connector_funcs->mode_valid(connector, mode);
|
||||
else
|
||||
*status = MODE_OK;
|
||||
|
||||
return connector_funcs->mode_valid(connector, mode);
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define DRM_OUTPUT_POLL_PERIOD (10*HZ)
|
||||
|
@ -385,8 +398,9 @@ EXPORT_SYMBOL(drm_helper_probe_detect);
|
|||
* (if specified)
|
||||
* - drm_mode_validate_flag() checks the modes against basic connector
|
||||
* capabilities (interlace_allowed,doublescan_allowed,stereo_allowed)
|
||||
* - the optional &drm_connector_helper_funcs.mode_valid helper can perform
|
||||
* driver and/or sink specific checks
|
||||
* - the optional &drm_connector_helper_funcs.mode_valid or
|
||||
* &drm_connector_helper_funcs.mode_valid_ctx helpers can perform driver
|
||||
* and/or sink specific checks
|
||||
* - the optional &drm_crtc_helper_funcs.mode_valid,
|
||||
* &drm_bridge_funcs.mode_valid and &drm_encoder_helper_funcs.mode_valid
|
||||
* helpers can perform driver and/or source specific checks which are also
|
||||
|
@ -517,22 +531,39 @@ retry:
|
|||
mode_flags |= DRM_MODE_FLAG_3D_MASK;
|
||||
|
||||
list_for_each_entry(mode, &connector->modes, head) {
|
||||
if (mode->status == MODE_OK)
|
||||
mode->status = drm_mode_validate_driver(dev, mode);
|
||||
if (mode->status != MODE_OK)
|
||||
continue;
|
||||
|
||||
if (mode->status == MODE_OK)
|
||||
mode->status = drm_mode_validate_size(mode, maxX, maxY);
|
||||
mode->status = drm_mode_validate_driver(dev, mode);
|
||||
if (mode->status != MODE_OK)
|
||||
continue;
|
||||
|
||||
if (mode->status == MODE_OK)
|
||||
mode->status = drm_mode_validate_flag(mode, mode_flags);
|
||||
mode->status = drm_mode_validate_size(mode, maxX, maxY);
|
||||
if (mode->status != MODE_OK)
|
||||
continue;
|
||||
|
||||
if (mode->status == MODE_OK)
|
||||
mode->status = drm_mode_validate_pipeline(mode,
|
||||
connector);
|
||||
mode->status = drm_mode_validate_flag(mode, mode_flags);
|
||||
if (mode->status != MODE_OK)
|
||||
continue;
|
||||
|
||||
if (mode->status == MODE_OK)
|
||||
mode->status = drm_mode_validate_ycbcr420(mode,
|
||||
connector);
|
||||
ret = drm_mode_validate_pipeline(mode, connector, &ctx,
|
||||
&mode->status);
|
||||
if (ret) {
|
||||
drm_dbg_kms(dev,
|
||||
"drm_mode_validate_pipeline failed: %d\n",
|
||||
ret);
|
||||
|
||||
if (drm_WARN_ON_ONCE(dev, ret != -EDEADLK)) {
|
||||
mode->status = MODE_ERROR;
|
||||
} else {
|
||||
drm_modeset_backoff(&ctx);
|
||||
goto retry;
|
||||
}
|
||||
}
|
||||
|
||||
if (mode->status != MODE_OK)
|
||||
continue;
|
||||
mode->status = drm_mode_validate_ycbcr420(mode, connector);
|
||||
}
|
||||
|
||||
prune:
|
||||
|
|
|
@ -112,6 +112,7 @@ gt-y += \
|
|||
gt/intel_ring_submission.o \
|
||||
gt/intel_rps.o \
|
||||
gt/intel_sseu.o \
|
||||
gt/intel_sseu_debugfs.o \
|
||||
gt/intel_timeline.o \
|
||||
gt/intel_workarounds.o \
|
||||
gt/shmem_utils.o \
|
||||
|
|
|
@ -722,6 +722,9 @@ parse_power_conservation_features(struct drm_i915_private *dev_priv,
|
|||
*/
|
||||
if (!(power->drrs & BIT(panel_type)))
|
||||
dev_priv->vbt.drrs_type = DRRS_NOT_SUPPORTED;
|
||||
|
||||
if (bdb->version >= 232)
|
||||
dev_priv->vbt.edp.hobl = power->hobl & BIT(panel_type);
|
||||
}
|
||||
|
||||
static void
|
||||
|
|
|
@ -2080,8 +2080,15 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
|
|||
* Explicitly stating here that this seems to be currently
|
||||
* rather a Hack, than final solution.
|
||||
*/
|
||||
if (IS_TIGERLAKE(dev_priv))
|
||||
min_cdclk = max(min_cdclk, (int)crtc_state->pixel_rate);
|
||||
if (IS_TIGERLAKE(dev_priv)) {
|
||||
/*
|
||||
* Clamp to max_cdclk_freq in case pixel rate is higher,
|
||||
* in order not to break an 8K, but still leave W/A at place.
|
||||
*/
|
||||
min_cdclk = max_t(int, min_cdclk,
|
||||
min_t(int, crtc_state->pixel_rate,
|
||||
dev_priv->max_cdclk_freq));
|
||||
}
|
||||
|
||||
if (min_cdclk > dev_priv->max_cdclk_freq) {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
|
|
|
@ -264,6 +264,18 @@ static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv,
|
|||
if (!icl_combo_phy_enabled(dev_priv, phy))
|
||||
return false;
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 12) {
|
||||
ret &= check_phy_reg(dev_priv, phy, ICL_PORT_TX_DW8_LN0(phy),
|
||||
ICL_PORT_TX_DW8_ODCC_CLK_SEL |
|
||||
ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_MASK,
|
||||
ICL_PORT_TX_DW8_ODCC_CLK_SEL |
|
||||
ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_DIV2);
|
||||
|
||||
ret &= check_phy_reg(dev_priv, phy, ICL_PORT_PCS_DW1_LN0(phy),
|
||||
DCC_MODE_SELECT_MASK,
|
||||
DCC_MODE_SELECT_CONTINUOSLY);
|
||||
}
|
||||
|
||||
ret = cnl_verify_procmon_ref_values(dev_priv, phy);
|
||||
|
||||
if (phy_is_master(dev_priv, phy)) {
|
||||
|
@ -375,6 +387,19 @@ static void icl_combo_phys_init(struct drm_i915_private *dev_priv)
|
|||
intel_de_write(dev_priv, ICL_PHY_MISC(phy), val);
|
||||
|
||||
skip_phy_misc:
|
||||
if (INTEL_GEN(dev_priv) >= 12) {
|
||||
val = intel_de_read(dev_priv, ICL_PORT_TX_DW8_LN0(phy));
|
||||
val &= ~ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_MASK;
|
||||
val |= ICL_PORT_TX_DW8_ODCC_CLK_SEL;
|
||||
val |= ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_DIV2;
|
||||
intel_de_write(dev_priv, ICL_PORT_TX_DW8_GRP(phy), val);
|
||||
|
||||
val = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN0(phy));
|
||||
val &= ~DCC_MODE_SELECT_MASK;
|
||||
val |= DCC_MODE_SELECT_CONTINUOSLY;
|
||||
intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy), val);
|
||||
}
|
||||
|
||||
cnl_set_procmon_ref_values(dev_priv, phy);
|
||||
|
||||
if (phy_is_master(dev_priv, phy)) {
|
||||
|
|
|
@ -707,8 +707,10 @@ static const struct cnl_ddi_buf_trans tgl_combo_phy_ddi_translations_dp_hbr2[] =
|
|||
};
|
||||
|
||||
static const struct ddi_buf_trans *
|
||||
bdw_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
|
||||
bdw_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
|
||||
if (dev_priv->vbt.edp.low_vswing) {
|
||||
*n_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
|
||||
return bdw_ddi_translations_edp;
|
||||
|
@ -719,8 +721,10 @@ bdw_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
|
|||
}
|
||||
|
||||
static const struct ddi_buf_trans *
|
||||
skl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
|
||||
skl_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
|
||||
if (IS_SKL_ULX(dev_priv)) {
|
||||
*n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp);
|
||||
return skl_y_ddi_translations_dp;
|
||||
|
@ -734,8 +738,10 @@ skl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
|
|||
}
|
||||
|
||||
static const struct ddi_buf_trans *
|
||||
kbl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
|
||||
kbl_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
|
||||
if (IS_KBL_ULX(dev_priv) ||
|
||||
IS_CFL_ULX(dev_priv) ||
|
||||
IS_CML_ULX(dev_priv)) {
|
||||
|
@ -753,8 +759,10 @@ kbl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
|
|||
}
|
||||
|
||||
static const struct ddi_buf_trans *
|
||||
skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
|
||||
skl_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
|
||||
if (dev_priv->vbt.edp.low_vswing) {
|
||||
if (IS_SKL_ULX(dev_priv) ||
|
||||
IS_KBL_ULX(dev_priv) ||
|
||||
|
@ -777,9 +785,9 @@ skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
|
|||
if (IS_KABYLAKE(dev_priv) ||
|
||||
IS_COFFEELAKE(dev_priv) ||
|
||||
IS_COMETLAKE(dev_priv))
|
||||
return kbl_get_buf_trans_dp(dev_priv, n_entries);
|
||||
return kbl_get_buf_trans_dp(encoder, n_entries);
|
||||
else
|
||||
return skl_get_buf_trans_dp(dev_priv, n_entries);
|
||||
return skl_get_buf_trans_dp(encoder, n_entries);
|
||||
}
|
||||
|
||||
static const struct ddi_buf_trans *
|
||||
|
@ -807,20 +815,21 @@ static int skl_buf_trans_num_entries(enum port port, int n_entries)
|
|||
}
|
||||
|
||||
static const struct ddi_buf_trans *
|
||||
intel_ddi_get_buf_trans_dp(struct drm_i915_private *dev_priv,
|
||||
enum port port, int *n_entries)
|
||||
intel_ddi_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
|
||||
if (IS_KABYLAKE(dev_priv) ||
|
||||
IS_COFFEELAKE(dev_priv) ||
|
||||
IS_COMETLAKE(dev_priv)) {
|
||||
const struct ddi_buf_trans *ddi_translations =
|
||||
kbl_get_buf_trans_dp(dev_priv, n_entries);
|
||||
*n_entries = skl_buf_trans_num_entries(port, *n_entries);
|
||||
kbl_get_buf_trans_dp(encoder, n_entries);
|
||||
*n_entries = skl_buf_trans_num_entries(encoder->port, *n_entries);
|
||||
return ddi_translations;
|
||||
} else if (IS_SKYLAKE(dev_priv)) {
|
||||
const struct ddi_buf_trans *ddi_translations =
|
||||
skl_get_buf_trans_dp(dev_priv, n_entries);
|
||||
*n_entries = skl_buf_trans_num_entries(port, *n_entries);
|
||||
skl_get_buf_trans_dp(encoder, n_entries);
|
||||
*n_entries = skl_buf_trans_num_entries(encoder->port, *n_entries);
|
||||
return ddi_translations;
|
||||
} else if (IS_BROADWELL(dev_priv)) {
|
||||
*n_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
|
||||
|
@ -835,16 +844,17 @@ intel_ddi_get_buf_trans_dp(struct drm_i915_private *dev_priv,
|
|||
}
|
||||
|
||||
static const struct ddi_buf_trans *
|
||||
intel_ddi_get_buf_trans_edp(struct drm_i915_private *dev_priv,
|
||||
enum port port, int *n_entries)
|
||||
intel_ddi_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
|
||||
if (IS_GEN9_BC(dev_priv)) {
|
||||
const struct ddi_buf_trans *ddi_translations =
|
||||
skl_get_buf_trans_edp(dev_priv, n_entries);
|
||||
*n_entries = skl_buf_trans_num_entries(port, *n_entries);
|
||||
skl_get_buf_trans_edp(encoder, n_entries);
|
||||
*n_entries = skl_buf_trans_num_entries(encoder->port, *n_entries);
|
||||
return ddi_translations;
|
||||
} else if (IS_BROADWELL(dev_priv)) {
|
||||
return bdw_get_buf_trans_edp(dev_priv, n_entries);
|
||||
return bdw_get_buf_trans_edp(encoder, n_entries);
|
||||
} else if (IS_HASWELL(dev_priv)) {
|
||||
*n_entries = ARRAY_SIZE(hsw_ddi_translations_dp);
|
||||
return hsw_ddi_translations_dp;
|
||||
|
@ -871,9 +881,11 @@ intel_ddi_get_buf_trans_fdi(struct drm_i915_private *dev_priv,
|
|||
}
|
||||
|
||||
static const struct ddi_buf_trans *
|
||||
intel_ddi_get_buf_trans_hdmi(struct drm_i915_private *dev_priv,
|
||||
intel_ddi_get_buf_trans_hdmi(struct intel_encoder *encoder,
|
||||
int *n_entries)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
|
||||
if (IS_GEN9_BC(dev_priv)) {
|
||||
return skl_get_buf_trans_hdmi(dev_priv, n_entries);
|
||||
} else if (IS_BROADWELL(dev_priv)) {
|
||||
|
@ -889,33 +901,36 @@ intel_ddi_get_buf_trans_hdmi(struct drm_i915_private *dev_priv,
|
|||
}
|
||||
|
||||
static const struct bxt_ddi_buf_trans *
|
||||
bxt_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
|
||||
bxt_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries)
|
||||
{
|
||||
*n_entries = ARRAY_SIZE(bxt_ddi_translations_dp);
|
||||
return bxt_ddi_translations_dp;
|
||||
}
|
||||
|
||||
static const struct bxt_ddi_buf_trans *
|
||||
bxt_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
|
||||
bxt_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
|
||||
if (dev_priv->vbt.edp.low_vswing) {
|
||||
*n_entries = ARRAY_SIZE(bxt_ddi_translations_edp);
|
||||
return bxt_ddi_translations_edp;
|
||||
}
|
||||
|
||||
return bxt_get_buf_trans_dp(dev_priv, n_entries);
|
||||
return bxt_get_buf_trans_dp(encoder, n_entries);
|
||||
}
|
||||
|
||||
static const struct bxt_ddi_buf_trans *
|
||||
bxt_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
|
||||
bxt_get_buf_trans_hdmi(struct intel_encoder *encoder, int *n_entries)
|
||||
{
|
||||
*n_entries = ARRAY_SIZE(bxt_ddi_translations_hdmi);
|
||||
return bxt_ddi_translations_hdmi;
|
||||
}
|
||||
|
||||
static const struct cnl_ddi_buf_trans *
|
||||
cnl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
|
||||
cnl_get_buf_trans_hdmi(struct intel_encoder *encoder, int *n_entries)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
|
||||
|
||||
if (voltage == VOLTAGE_INFO_0_85V) {
|
||||
|
@ -935,8 +950,9 @@ cnl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
|
|||
}
|
||||
|
||||
static const struct cnl_ddi_buf_trans *
|
||||
cnl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
|
||||
cnl_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
|
||||
|
||||
if (voltage == VOLTAGE_INFO_0_85V) {
|
||||
|
@ -956,8 +972,9 @@ cnl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
|
|||
}
|
||||
|
||||
static const struct cnl_ddi_buf_trans *
|
||||
cnl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
|
||||
cnl_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
|
||||
|
||||
if (dev_priv->vbt.edp.low_vswing) {
|
||||
|
@ -976,14 +993,16 @@ cnl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
|
|||
}
|
||||
return NULL;
|
||||
} else {
|
||||
return cnl_get_buf_trans_dp(dev_priv, n_entries);
|
||||
return cnl_get_buf_trans_dp(encoder, n_entries);
|
||||
}
|
||||
}
|
||||
|
||||
static const struct cnl_ddi_buf_trans *
|
||||
icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
|
||||
icl_get_combo_buf_trans(struct intel_encoder *encoder, int type, int rate,
|
||||
int *n_entries)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
|
||||
if (type == INTEL_OUTPUT_HDMI) {
|
||||
*n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi);
|
||||
return icl_combo_phy_ddi_translations_hdmi;
|
||||
|
@ -1000,7 +1019,7 @@ icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
|
|||
}
|
||||
|
||||
static const struct icl_mg_phy_ddi_buf_trans *
|
||||
icl_get_mg_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
|
||||
icl_get_mg_buf_trans(struct intel_encoder *encoder, int type, int rate,
|
||||
int *n_entries)
|
||||
{
|
||||
if (type == INTEL_OUTPUT_HDMI) {
|
||||
|
@ -1016,7 +1035,7 @@ icl_get_mg_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
|
|||
}
|
||||
|
||||
static const struct cnl_ddi_buf_trans *
|
||||
ehl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
|
||||
ehl_get_combo_buf_trans(struct intel_encoder *encoder, int type, int rate,
|
||||
int *n_entries)
|
||||
{
|
||||
if (type != INTEL_OUTPUT_HDMI && type != INTEL_OUTPUT_EDP) {
|
||||
|
@ -1024,15 +1043,15 @@ ehl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
|
|||
return ehl_combo_phy_ddi_translations_dp;
|
||||
}
|
||||
|
||||
return icl_get_combo_buf_trans(dev_priv, type, rate, n_entries);
|
||||
return icl_get_combo_buf_trans(encoder, type, rate, n_entries);
|
||||
}
|
||||
|
||||
static const struct cnl_ddi_buf_trans *
|
||||
tgl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
|
||||
tgl_get_combo_buf_trans(struct intel_encoder *encoder, int type, int rate,
|
||||
int *n_entries)
|
||||
{
|
||||
if (type == INTEL_OUTPUT_HDMI || type == INTEL_OUTPUT_EDP) {
|
||||
return icl_get_combo_buf_trans(dev_priv, type, rate, n_entries);
|
||||
return icl_get_combo_buf_trans(encoder, type, rate, n_entries);
|
||||
} else if (rate > 270000) {
|
||||
*n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_dp_hbr2);
|
||||
return tgl_combo_phy_ddi_translations_dp_hbr2;
|
||||
|
@ -1043,7 +1062,7 @@ tgl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
|
|||
}
|
||||
|
||||
static const struct tgl_dkl_phy_ddi_buf_trans *
|
||||
tgl_get_dkl_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
|
||||
tgl_get_dkl_buf_trans(struct intel_encoder *encoder, int type, int rate,
|
||||
int *n_entries)
|
||||
{
|
||||
if (type == INTEL_OUTPUT_HDMI) {
|
||||
|
@ -1066,34 +1085,34 @@ static int intel_ddi_hdmi_level(struct intel_encoder *encoder)
|
|||
|
||||
if (INTEL_GEN(dev_priv) >= 12) {
|
||||
if (intel_phy_is_combo(dev_priv, phy))
|
||||
tgl_get_combo_buf_trans(dev_priv, INTEL_OUTPUT_HDMI,
|
||||
tgl_get_combo_buf_trans(encoder, INTEL_OUTPUT_HDMI,
|
||||
0, &n_entries);
|
||||
else
|
||||
tgl_get_dkl_buf_trans(dev_priv, INTEL_OUTPUT_HDMI, 0,
|
||||
tgl_get_dkl_buf_trans(encoder, INTEL_OUTPUT_HDMI, 0,
|
||||
&n_entries);
|
||||
default_entry = n_entries - 1;
|
||||
} else if (INTEL_GEN(dev_priv) == 11) {
|
||||
if (intel_phy_is_combo(dev_priv, phy))
|
||||
icl_get_combo_buf_trans(dev_priv, INTEL_OUTPUT_HDMI,
|
||||
icl_get_combo_buf_trans(encoder, INTEL_OUTPUT_HDMI,
|
||||
0, &n_entries);
|
||||
else
|
||||
icl_get_mg_buf_trans(dev_priv, INTEL_OUTPUT_HDMI, 0,
|
||||
icl_get_mg_buf_trans(encoder, INTEL_OUTPUT_HDMI, 0,
|
||||
&n_entries);
|
||||
default_entry = n_entries - 1;
|
||||
} else if (IS_CANNONLAKE(dev_priv)) {
|
||||
cnl_get_buf_trans_hdmi(dev_priv, &n_entries);
|
||||
cnl_get_buf_trans_hdmi(encoder, &n_entries);
|
||||
default_entry = n_entries - 1;
|
||||
} else if (IS_GEN9_LP(dev_priv)) {
|
||||
bxt_get_buf_trans_hdmi(dev_priv, &n_entries);
|
||||
bxt_get_buf_trans_hdmi(encoder, &n_entries);
|
||||
default_entry = n_entries - 1;
|
||||
} else if (IS_GEN9_BC(dev_priv)) {
|
||||
intel_ddi_get_buf_trans_hdmi(dev_priv, &n_entries);
|
||||
intel_ddi_get_buf_trans_hdmi(encoder, &n_entries);
|
||||
default_entry = 8;
|
||||
} else if (IS_BROADWELL(dev_priv)) {
|
||||
intel_ddi_get_buf_trans_hdmi(dev_priv, &n_entries);
|
||||
intel_ddi_get_buf_trans_hdmi(encoder, &n_entries);
|
||||
default_entry = 7;
|
||||
} else if (IS_HASWELL(dev_priv)) {
|
||||
intel_ddi_get_buf_trans_hdmi(dev_priv, &n_entries);
|
||||
intel_ddi_get_buf_trans_hdmi(encoder, &n_entries);
|
||||
default_entry = 6;
|
||||
} else {
|
||||
drm_WARN(&dev_priv->drm, 1, "ddi translation table missing\n");
|
||||
|
@ -1131,10 +1150,10 @@ static void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder,
|
|||
ddi_translations = intel_ddi_get_buf_trans_fdi(dev_priv,
|
||||
&n_entries);
|
||||
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
|
||||
ddi_translations = intel_ddi_get_buf_trans_edp(dev_priv, port,
|
||||
ddi_translations = intel_ddi_get_buf_trans_edp(encoder,
|
||||
&n_entries);
|
||||
else
|
||||
ddi_translations = intel_ddi_get_buf_trans_dp(dev_priv, port,
|
||||
ddi_translations = intel_ddi_get_buf_trans_dp(encoder,
|
||||
&n_entries);
|
||||
|
||||
/* If we're boosting the current, set bit 31 of trans1 */
|
||||
|
@ -1163,7 +1182,7 @@ static void intel_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder,
|
|||
enum port port = encoder->port;
|
||||
const struct ddi_buf_trans *ddi_translations;
|
||||
|
||||
ddi_translations = intel_ddi_get_buf_trans_hdmi(dev_priv, &n_entries);
|
||||
ddi_translations = intel_ddi_get_buf_trans_hdmi(encoder, &n_entries);
|
||||
|
||||
if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
|
||||
return;
|
||||
|
@ -1184,16 +1203,30 @@ static void intel_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder,
|
|||
static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
|
||||
enum port port)
|
||||
{
|
||||
i915_reg_t reg = DDI_BUF_CTL(port);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 16; i++) {
|
||||
udelay(1);
|
||||
if (intel_de_read(dev_priv, reg) & DDI_BUF_IS_IDLE)
|
||||
return;
|
||||
if (IS_BROXTON(dev_priv)) {
|
||||
udelay(16);
|
||||
return;
|
||||
}
|
||||
drm_err(&dev_priv->drm, "Timeout waiting for DDI BUF %c idle bit\n",
|
||||
port_name(port));
|
||||
|
||||
if (wait_for_us((intel_de_read(dev_priv, DDI_BUF_CTL(port)) &
|
||||
DDI_BUF_IS_IDLE), 8))
|
||||
drm_err(&dev_priv->drm, "Timeout waiting for DDI BUF %c to get idle\n",
|
||||
port_name(port));
|
||||
}
|
||||
|
||||
static void intel_wait_ddi_buf_active(struct drm_i915_private *dev_priv,
|
||||
enum port port)
|
||||
{
|
||||
/* Wait > 518 usecs for DDI_BUF_CTL to be non idle */
|
||||
if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
|
||||
usleep_range(518, 1000);
|
||||
return;
|
||||
}
|
||||
|
||||
if (wait_for_us(!(intel_de_read(dev_priv, DDI_BUF_CTL(port)) &
|
||||
DDI_BUF_IS_IDLE), 500))
|
||||
drm_err(&dev_priv->drm, "Timeout waiting for DDI BUF %c to get active\n",
|
||||
port_name(port));
|
||||
}
|
||||
|
||||
static u32 hsw_pll_to_ddi_pll_sel(const struct intel_shared_dpll *pll)
|
||||
|
@ -1394,10 +1427,9 @@ void hsw_fdi_link_train(struct intel_encoder *encoder,
|
|||
static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder)
|
||||
{
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
struct intel_digital_port *intel_dig_port =
|
||||
enc_to_dig_port(encoder);
|
||||
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
|
||||
|
||||
intel_dp->DP = intel_dig_port->saved_port_bits |
|
||||
intel_dp->DP = dig_port->saved_port_bits |
|
||||
DDI_BUF_CTL_ENABLE | DDI_BUF_TRANS_SELECT(0);
|
||||
intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count);
|
||||
}
|
||||
|
@ -2070,9 +2102,8 @@ static void _skl_ddi_set_iboost(struct drm_i915_private *dev_priv,
|
|||
static void skl_ddi_set_iboost(struct intel_encoder *encoder,
|
||||
int level, enum intel_output_type type)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
|
||||
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
enum port port = encoder->port;
|
||||
u8 iboost;
|
||||
|
||||
if (type == INTEL_OUTPUT_HDMI)
|
||||
|
@ -2085,11 +2116,13 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder,
|
|||
int n_entries;
|
||||
|
||||
if (type == INTEL_OUTPUT_HDMI)
|
||||
ddi_translations = intel_ddi_get_buf_trans_hdmi(dev_priv, &n_entries);
|
||||
ddi_translations = intel_ddi_get_buf_trans_hdmi(encoder, &n_entries);
|
||||
else if (type == INTEL_OUTPUT_EDP)
|
||||
ddi_translations = intel_ddi_get_buf_trans_edp(dev_priv, port, &n_entries);
|
||||
ddi_translations = intel_ddi_get_buf_trans_edp(encoder,
|
||||
&n_entries);
|
||||
else
|
||||
ddi_translations = intel_ddi_get_buf_trans_dp(dev_priv, port, &n_entries);
|
||||
ddi_translations = intel_ddi_get_buf_trans_dp(encoder,
|
||||
&n_entries);
|
||||
|
||||
if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
|
||||
return;
|
||||
|
@ -2105,9 +2138,9 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder,
|
|||
return;
|
||||
}
|
||||
|
||||
_skl_ddi_set_iboost(dev_priv, port, iboost);
|
||||
_skl_ddi_set_iboost(dev_priv, encoder->port, iboost);
|
||||
|
||||
if (port == PORT_A && intel_dig_port->max_lanes == 4)
|
||||
if (encoder->port == PORT_A && dig_port->max_lanes == 4)
|
||||
_skl_ddi_set_iboost(dev_priv, PORT_E, iboost);
|
||||
}
|
||||
|
||||
|
@ -2120,11 +2153,11 @@ static void bxt_ddi_vswing_sequence(struct intel_encoder *encoder,
|
|||
int n_entries;
|
||||
|
||||
if (type == INTEL_OUTPUT_HDMI)
|
||||
ddi_translations = bxt_get_buf_trans_hdmi(dev_priv, &n_entries);
|
||||
ddi_translations = bxt_get_buf_trans_hdmi(encoder, &n_entries);
|
||||
else if (type == INTEL_OUTPUT_EDP)
|
||||
ddi_translations = bxt_get_buf_trans_edp(dev_priv, &n_entries);
|
||||
ddi_translations = bxt_get_buf_trans_edp(encoder, &n_entries);
|
||||
else
|
||||
ddi_translations = bxt_get_buf_trans_dp(dev_priv, &n_entries);
|
||||
ddi_translations = bxt_get_buf_trans_dp(encoder, &n_entries);
|
||||
|
||||
if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
|
||||
return;
|
||||
|
@ -2148,36 +2181,36 @@ static u8 intel_ddi_dp_voltage_max(struct intel_dp *intel_dp)
|
|||
|
||||
if (INTEL_GEN(dev_priv) >= 12) {
|
||||
if (intel_phy_is_combo(dev_priv, phy))
|
||||
tgl_get_combo_buf_trans(dev_priv, encoder->type,
|
||||
tgl_get_combo_buf_trans(encoder, encoder->type,
|
||||
intel_dp->link_rate, &n_entries);
|
||||
else
|
||||
tgl_get_dkl_buf_trans(dev_priv, encoder->type,
|
||||
tgl_get_dkl_buf_trans(encoder, encoder->type,
|
||||
intel_dp->link_rate, &n_entries);
|
||||
} else if (INTEL_GEN(dev_priv) == 11) {
|
||||
if (IS_ELKHARTLAKE(dev_priv))
|
||||
ehl_get_combo_buf_trans(dev_priv, encoder->type,
|
||||
ehl_get_combo_buf_trans(encoder, encoder->type,
|
||||
intel_dp->link_rate, &n_entries);
|
||||
else if (intel_phy_is_combo(dev_priv, phy))
|
||||
icl_get_combo_buf_trans(dev_priv, encoder->type,
|
||||
icl_get_combo_buf_trans(encoder, encoder->type,
|
||||
intel_dp->link_rate, &n_entries);
|
||||
else
|
||||
icl_get_mg_buf_trans(dev_priv, encoder->type,
|
||||
icl_get_mg_buf_trans(encoder, encoder->type,
|
||||
intel_dp->link_rate, &n_entries);
|
||||
} else if (IS_CANNONLAKE(dev_priv)) {
|
||||
if (encoder->type == INTEL_OUTPUT_EDP)
|
||||
cnl_get_buf_trans_edp(dev_priv, &n_entries);
|
||||
cnl_get_buf_trans_edp(encoder, &n_entries);
|
||||
else
|
||||
cnl_get_buf_trans_dp(dev_priv, &n_entries);
|
||||
cnl_get_buf_trans_dp(encoder, &n_entries);
|
||||
} else if (IS_GEN9_LP(dev_priv)) {
|
||||
if (encoder->type == INTEL_OUTPUT_EDP)
|
||||
bxt_get_buf_trans_edp(dev_priv, &n_entries);
|
||||
bxt_get_buf_trans_edp(encoder, &n_entries);
|
||||
else
|
||||
bxt_get_buf_trans_dp(dev_priv, &n_entries);
|
||||
bxt_get_buf_trans_dp(encoder, &n_entries);
|
||||
} else {
|
||||
if (encoder->type == INTEL_OUTPUT_EDP)
|
||||
intel_ddi_get_buf_trans_edp(dev_priv, port, &n_entries);
|
||||
intel_ddi_get_buf_trans_edp(encoder, &n_entries);
|
||||
else
|
||||
intel_ddi_get_buf_trans_dp(dev_priv, port, &n_entries);
|
||||
intel_ddi_get_buf_trans_dp(encoder, &n_entries);
|
||||
}
|
||||
|
||||
if (drm_WARN_ON(&dev_priv->drm, n_entries < 1))
|
||||
|
@ -2210,11 +2243,11 @@ static void cnl_ddi_vswing_program(struct intel_encoder *encoder,
|
|||
u32 val;
|
||||
|
||||
if (type == INTEL_OUTPUT_HDMI)
|
||||
ddi_translations = cnl_get_buf_trans_hdmi(dev_priv, &n_entries);
|
||||
ddi_translations = cnl_get_buf_trans_hdmi(encoder, &n_entries);
|
||||
else if (type == INTEL_OUTPUT_EDP)
|
||||
ddi_translations = cnl_get_buf_trans_edp(dev_priv, &n_entries);
|
||||
ddi_translations = cnl_get_buf_trans_edp(encoder, &n_entries);
|
||||
else
|
||||
ddi_translations = cnl_get_buf_trans_dp(dev_priv, &n_entries);
|
||||
ddi_translations = cnl_get_buf_trans_dp(encoder, &n_entries);
|
||||
|
||||
if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
|
||||
return;
|
||||
|
@ -2331,22 +2364,23 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder,
|
|||
intel_de_write(dev_priv, CNL_PORT_TX_DW5_GRP(port), val);
|
||||
}
|
||||
|
||||
static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv,
|
||||
u32 level, enum phy phy, int type,
|
||||
int rate)
|
||||
static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
|
||||
u32 level, int type, int rate)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
|
||||
const struct cnl_ddi_buf_trans *ddi_translations = NULL;
|
||||
u32 n_entries, val;
|
||||
int ln;
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 12)
|
||||
ddi_translations = tgl_get_combo_buf_trans(dev_priv, type, rate,
|
||||
ddi_translations = tgl_get_combo_buf_trans(encoder, type, rate,
|
||||
&n_entries);
|
||||
else if (IS_ELKHARTLAKE(dev_priv))
|
||||
ddi_translations = ehl_get_combo_buf_trans(dev_priv, type, rate,
|
||||
ddi_translations = ehl_get_combo_buf_trans(encoder, type, rate,
|
||||
&n_entries);
|
||||
else
|
||||
ddi_translations = icl_get_combo_buf_trans(dev_priv, type, rate,
|
||||
ddi_translations = icl_get_combo_buf_trans(encoder, type, rate,
|
||||
&n_entries);
|
||||
if (!ddi_translations)
|
||||
return;
|
||||
|
@ -2458,7 +2492,7 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
|
|||
intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), val);
|
||||
|
||||
/* 5. Program swing and de-emphasis */
|
||||
icl_ddi_combo_vswing_program(dev_priv, level, phy, type, rate);
|
||||
icl_ddi_combo_vswing_program(encoder, level, type, rate);
|
||||
|
||||
/* 6. Set training enable to trigger update */
|
||||
val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN0(phy));
|
||||
|
@ -2482,7 +2516,7 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
|
|||
rate = intel_dp->link_rate;
|
||||
}
|
||||
|
||||
ddi_translations = icl_get_mg_buf_trans(dev_priv, type, rate,
|
||||
ddi_translations = icl_get_mg_buf_trans(encoder, type, rate,
|
||||
&n_entries);
|
||||
/* The table does not have values for level 3 and level 9. */
|
||||
if (level >= n_entries || level == 3 || level == 9) {
|
||||
|
@ -2627,7 +2661,7 @@ tgl_dkl_phy_ddi_vswing_sequence(struct intel_encoder *encoder, int link_clock,
|
|||
rate = intel_dp->link_rate;
|
||||
}
|
||||
|
||||
ddi_translations = tgl_get_dkl_buf_trans(dev_priv, encoder->type, rate,
|
||||
ddi_translations = tgl_get_dkl_buf_trans(encoder, encoder->type, rate,
|
||||
&n_entries);
|
||||
|
||||
if (level >= n_entries)
|
||||
|
@ -3000,15 +3034,15 @@ static void intel_ddi_clk_disable(struct intel_encoder *encoder)
|
|||
}
|
||||
|
||||
static void
|
||||
icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port,
|
||||
icl_program_mg_dp_mode(struct intel_digital_port *dig_port,
|
||||
const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
|
||||
enum tc_port tc_port = intel_port_to_tc(dev_priv, intel_dig_port->base.port);
|
||||
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
|
||||
enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
|
||||
u32 ln0, ln1, pin_assignment;
|
||||
u8 width;
|
||||
|
||||
if (intel_dig_port->tc_mode == TC_PORT_TBT_ALT)
|
||||
if (dig_port->tc_mode == TC_PORT_TBT_ALT)
|
||||
return;
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 12) {
|
||||
|
@ -3027,13 +3061,13 @@ icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port,
|
|||
ln1 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
|
||||
|
||||
/* DPPATC */
|
||||
pin_assignment = intel_tc_port_get_pin_assignment_mask(intel_dig_port);
|
||||
pin_assignment = intel_tc_port_get_pin_assignment_mask(dig_port);
|
||||
width = crtc_state->lane_count;
|
||||
|
||||
switch (pin_assignment) {
|
||||
case 0x0:
|
||||
drm_WARN_ON(&dev_priv->drm,
|
||||
intel_dig_port->tc_mode != TC_PORT_LEGACY);
|
||||
dig_port->tc_mode != TC_PORT_LEGACY);
|
||||
if (width == 1) {
|
||||
ln1 |= MG_DP_MODE_CFG_DP_X1_MODE;
|
||||
} else {
|
||||
|
@ -3978,10 +4012,9 @@ intel_ddi_pre_pll_enable(struct intel_atomic_state *state,
|
|||
|
||||
static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_i915_private *dev_priv =
|
||||
to_i915(intel_dig_port->base.base.dev);
|
||||
enum port port = intel_dig_port->base.port;
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
|
||||
enum port port = dig_port->base.port;
|
||||
u32 dp_tp_ctl, ddi_buf_ctl;
|
||||
bool wait = false;
|
||||
|
||||
|
@ -4020,7 +4053,7 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
|
|||
intel_de_write(dev_priv, DDI_BUF_CTL(port), intel_dp->DP);
|
||||
intel_de_posting_read(dev_priv, DDI_BUF_CTL(port));
|
||||
|
||||
udelay(600);
|
||||
intel_wait_ddi_buf_active(dev_priv, port);
|
||||
}
|
||||
|
||||
static void intel_ddi_set_link_train(struct intel_dp *intel_dp,
|
||||
|
@ -4536,42 +4569,41 @@ static const struct drm_encoder_funcs intel_ddi_funcs = {
|
|||
};
|
||||
|
||||
static struct intel_connector *
|
||||
intel_ddi_init_dp_connector(struct intel_digital_port *intel_dig_port)
|
||||
intel_ddi_init_dp_connector(struct intel_digital_port *dig_port)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
|
||||
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
|
||||
struct intel_connector *connector;
|
||||
enum port port = intel_dig_port->base.port;
|
||||
enum port port = dig_port->base.port;
|
||||
|
||||
connector = intel_connector_alloc();
|
||||
if (!connector)
|
||||
return NULL;
|
||||
|
||||
intel_dig_port->dp.output_reg = DDI_BUF_CTL(port);
|
||||
intel_dig_port->dp.prepare_link_retrain =
|
||||
intel_ddi_prepare_link_retrain;
|
||||
intel_dig_port->dp.set_link_train = intel_ddi_set_link_train;
|
||||
intel_dig_port->dp.set_idle_link_train = intel_ddi_set_idle_link_train;
|
||||
dig_port->dp.output_reg = DDI_BUF_CTL(port);
|
||||
dig_port->dp.prepare_link_retrain = intel_ddi_prepare_link_retrain;
|
||||
dig_port->dp.set_link_train = intel_ddi_set_link_train;
|
||||
dig_port->dp.set_idle_link_train = intel_ddi_set_idle_link_train;
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 12)
|
||||
intel_dig_port->dp.set_signal_levels = tgl_set_signal_levels;
|
||||
dig_port->dp.set_signal_levels = tgl_set_signal_levels;
|
||||
else if (INTEL_GEN(dev_priv) >= 11)
|
||||
intel_dig_port->dp.set_signal_levels = icl_set_signal_levels;
|
||||
dig_port->dp.set_signal_levels = icl_set_signal_levels;
|
||||
else if (IS_CANNONLAKE(dev_priv))
|
||||
intel_dig_port->dp.set_signal_levels = cnl_set_signal_levels;
|
||||
dig_port->dp.set_signal_levels = cnl_set_signal_levels;
|
||||
else if (IS_GEN9_LP(dev_priv))
|
||||
intel_dig_port->dp.set_signal_levels = bxt_set_signal_levels;
|
||||
dig_port->dp.set_signal_levels = bxt_set_signal_levels;
|
||||
else
|
||||
intel_dig_port->dp.set_signal_levels = hsw_set_signal_levels;
|
||||
dig_port->dp.set_signal_levels = hsw_set_signal_levels;
|
||||
|
||||
intel_dig_port->dp.voltage_max = intel_ddi_dp_voltage_max;
|
||||
intel_dig_port->dp.preemph_max = intel_ddi_dp_preemph_max;
|
||||
dig_port->dp.voltage_max = intel_ddi_dp_voltage_max;
|
||||
dig_port->dp.preemph_max = intel_ddi_dp_preemph_max;
|
||||
|
||||
if (INTEL_GEN(dev_priv) < 12) {
|
||||
intel_dig_port->dp.regs.dp_tp_ctl = DP_TP_CTL(port);
|
||||
intel_dig_port->dp.regs.dp_tp_status = DP_TP_STATUS(port);
|
||||
dig_port->dp.regs.dp_tp_ctl = DP_TP_CTL(port);
|
||||
dig_port->dp.regs.dp_tp_status = DP_TP_STATUS(port);
|
||||
}
|
||||
|
||||
if (!intel_dp_init_connector(intel_dig_port, connector)) {
|
||||
if (!intel_dp_init_connector(dig_port, connector)) {
|
||||
kfree(connector);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -4770,29 +4802,29 @@ static bool bdw_digital_port_connected(struct intel_encoder *encoder)
|
|||
}
|
||||
|
||||
static struct intel_connector *
|
||||
intel_ddi_init_hdmi_connector(struct intel_digital_port *intel_dig_port)
|
||||
intel_ddi_init_hdmi_connector(struct intel_digital_port *dig_port)
|
||||
{
|
||||
struct intel_connector *connector;
|
||||
enum port port = intel_dig_port->base.port;
|
||||
enum port port = dig_port->base.port;
|
||||
|
||||
connector = intel_connector_alloc();
|
||||
if (!connector)
|
||||
return NULL;
|
||||
|
||||
intel_dig_port->hdmi.hdmi_reg = DDI_BUF_CTL(port);
|
||||
intel_hdmi_init_connector(intel_dig_port, connector);
|
||||
dig_port->hdmi.hdmi_reg = DDI_BUF_CTL(port);
|
||||
intel_hdmi_init_connector(dig_port, connector);
|
||||
|
||||
return connector;
|
||||
}
|
||||
|
||||
static bool intel_ddi_a_force_4_lanes(struct intel_digital_port *dport)
|
||||
static bool intel_ddi_a_force_4_lanes(struct intel_digital_port *dig_port)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
|
||||
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
|
||||
|
||||
if (dport->base.port != PORT_A)
|
||||
if (dig_port->base.port != PORT_A)
|
||||
return false;
|
||||
|
||||
if (dport->saved_port_bits & DDI_A_4_LANES)
|
||||
if (dig_port->saved_port_bits & DDI_A_4_LANES)
|
||||
return false;
|
||||
|
||||
/* Broxton/Geminilake: Bspec says that DDI_A_4_LANES is the only
|
||||
|
@ -4814,10 +4846,10 @@ static bool intel_ddi_a_force_4_lanes(struct intel_digital_port *dport)
|
|||
}
|
||||
|
||||
static int
|
||||
intel_ddi_max_lanes(struct intel_digital_port *intel_dport)
|
||||
intel_ddi_max_lanes(struct intel_digital_port *dig_port)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(intel_dport->base.base.dev);
|
||||
enum port port = intel_dport->base.port;
|
||||
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
|
||||
enum port port = dig_port->base.port;
|
||||
int max_lanes = 4;
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 11)
|
||||
|
@ -4836,10 +4868,10 @@ intel_ddi_max_lanes(struct intel_digital_port *intel_dport)
|
|||
* wasn't lit up at boot. Force this bit set when needed
|
||||
* so we use the proper lane count for our calculations.
|
||||
*/
|
||||
if (intel_ddi_a_force_4_lanes(intel_dport)) {
|
||||
if (intel_ddi_a_force_4_lanes(dig_port)) {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"Forcing DDI_A_4_LANES for port A\n");
|
||||
intel_dport->saved_port_bits |= DDI_A_4_LANES;
|
||||
dig_port->saved_port_bits |= DDI_A_4_LANES;
|
||||
max_lanes = 4;
|
||||
}
|
||||
|
||||
|
@ -4848,7 +4880,7 @@ intel_ddi_max_lanes(struct intel_digital_port *intel_dport)
|
|||
|
||||
void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port;
|
||||
struct intel_digital_port *dig_port;
|
||||
struct intel_encoder *encoder;
|
||||
bool init_hdmi, init_dp, init_lspcon = false;
|
||||
enum phy phy = intel_port_to_phy(dev_priv, port);
|
||||
|
@ -4877,11 +4909,11 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
|
|||
return;
|
||||
}
|
||||
|
||||
intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
|
||||
if (!intel_dig_port)
|
||||
dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL);
|
||||
if (!dig_port)
|
||||
return;
|
||||
|
||||
encoder = &intel_dig_port->base;
|
||||
encoder = &dig_port->base;
|
||||
|
||||
drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_ddi_funcs,
|
||||
DRM_MODE_ENCODER_TMDS, "DDI %c", port_name(port));
|
||||
|
@ -4908,49 +4940,49 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
|
|||
encoder->pipe_mask = ~0;
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 11)
|
||||
intel_dig_port->saved_port_bits = intel_de_read(dev_priv,
|
||||
DDI_BUF_CTL(port)) &
|
||||
DDI_BUF_PORT_REVERSAL;
|
||||
dig_port->saved_port_bits =
|
||||
intel_de_read(dev_priv, DDI_BUF_CTL(port))
|
||||
& DDI_BUF_PORT_REVERSAL;
|
||||
else
|
||||
intel_dig_port->saved_port_bits = intel_de_read(dev_priv,
|
||||
DDI_BUF_CTL(port)) &
|
||||
(DDI_BUF_PORT_REVERSAL | DDI_A_4_LANES);
|
||||
dig_port->saved_port_bits =
|
||||
intel_de_read(dev_priv, DDI_BUF_CTL(port))
|
||||
& (DDI_BUF_PORT_REVERSAL | DDI_A_4_LANES);
|
||||
|
||||
intel_dig_port->dp.output_reg = INVALID_MMIO_REG;
|
||||
intel_dig_port->max_lanes = intel_ddi_max_lanes(intel_dig_port);
|
||||
intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
|
||||
dig_port->dp.output_reg = INVALID_MMIO_REG;
|
||||
dig_port->max_lanes = intel_ddi_max_lanes(dig_port);
|
||||
dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
|
||||
|
||||
if (intel_phy_is_tc(dev_priv, phy)) {
|
||||
bool is_legacy =
|
||||
!intel_bios_port_supports_typec_usb(dev_priv, port) &&
|
||||
!intel_bios_port_supports_tbt(dev_priv, port);
|
||||
|
||||
intel_tc_port_init(intel_dig_port, is_legacy);
|
||||
intel_tc_port_init(dig_port, is_legacy);
|
||||
|
||||
encoder->update_prepare = intel_ddi_update_prepare;
|
||||
encoder->update_complete = intel_ddi_update_complete;
|
||||
}
|
||||
|
||||
drm_WARN_ON(&dev_priv->drm, port > PORT_I);
|
||||
intel_dig_port->ddi_io_power_domain = POWER_DOMAIN_PORT_DDI_A_IO +
|
||||
dig_port->ddi_io_power_domain = POWER_DOMAIN_PORT_DDI_A_IO +
|
||||
port - PORT_A;
|
||||
|
||||
if (init_dp) {
|
||||
if (!intel_ddi_init_dp_connector(intel_dig_port))
|
||||
if (!intel_ddi_init_dp_connector(dig_port))
|
||||
goto err;
|
||||
|
||||
intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
|
||||
dig_port->hpd_pulse = intel_dp_hpd_pulse;
|
||||
}
|
||||
|
||||
/* In theory we don't need the encoder->type check, but leave it just in
|
||||
* case we have some really bad VBTs... */
|
||||
if (encoder->type != INTEL_OUTPUT_EDP && init_hdmi) {
|
||||
if (!intel_ddi_init_hdmi_connector(intel_dig_port))
|
||||
if (!intel_ddi_init_hdmi_connector(dig_port))
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (init_lspcon) {
|
||||
if (lspcon_init(intel_dig_port))
|
||||
if (lspcon_init(dig_port))
|
||||
/* TODO: handle hdmi info frame part */
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"LSPCON init success on port %c\n",
|
||||
|
@ -4967,26 +4999,26 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
|
|||
|
||||
if (INTEL_GEN(dev_priv) >= 11) {
|
||||
if (intel_phy_is_tc(dev_priv, phy))
|
||||
intel_dig_port->connected = intel_tc_port_connected;
|
||||
dig_port->connected = intel_tc_port_connected;
|
||||
else
|
||||
intel_dig_port->connected = lpt_digital_port_connected;
|
||||
dig_port->connected = lpt_digital_port_connected;
|
||||
} else if (INTEL_GEN(dev_priv) >= 8) {
|
||||
if (port == PORT_A || IS_GEN9_LP(dev_priv))
|
||||
intel_dig_port->connected = bdw_digital_port_connected;
|
||||
dig_port->connected = bdw_digital_port_connected;
|
||||
else
|
||||
intel_dig_port->connected = lpt_digital_port_connected;
|
||||
dig_port->connected = lpt_digital_port_connected;
|
||||
} else {
|
||||
if (port == PORT_A)
|
||||
intel_dig_port->connected = hsw_digital_port_connected;
|
||||
dig_port->connected = hsw_digital_port_connected;
|
||||
else
|
||||
intel_dig_port->connected = lpt_digital_port_connected;
|
||||
dig_port->connected = lpt_digital_port_connected;
|
||||
}
|
||||
|
||||
intel_infoframe_init(intel_dig_port);
|
||||
intel_infoframe_init(dig_port);
|
||||
|
||||
return;
|
||||
|
||||
err:
|
||||
drm_encoder_cleanup(&encoder->base);
|
||||
kfree(intel_dig_port);
|
||||
kfree(dig_port);
|
||||
}
|
||||
|
|
|
@ -1612,13 +1612,13 @@ static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
|
|||
}
|
||||
|
||||
void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
|
||||
struct intel_digital_port *dport,
|
||||
struct intel_digital_port *dig_port,
|
||||
unsigned int expected_mask)
|
||||
{
|
||||
u32 port_mask;
|
||||
i915_reg_t dpll_reg;
|
||||
|
||||
switch (dport->base.port) {
|
||||
switch (dig_port->base.port) {
|
||||
case PORT_B:
|
||||
port_mask = DPLL_PORTB_READY_MASK;
|
||||
dpll_reg = DPLL(0);
|
||||
|
@ -1640,7 +1640,7 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
|
|||
port_mask, expected_mask, 1000))
|
||||
drm_WARN(&dev_priv->drm, 1,
|
||||
"timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
|
||||
dport->base.base.base.id, dport->base.base.name,
|
||||
dig_port->base.base.base.id, dig_port->base.base.name,
|
||||
intel_de_read(dev_priv, dpll_reg) & port_mask,
|
||||
expected_mask);
|
||||
}
|
||||
|
@ -10073,7 +10073,8 @@ static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
|
|||
drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range &&
|
||||
crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
|
||||
|
||||
if (crtc_state->limited_color_range)
|
||||
if (crtc_state->limited_color_range &&
|
||||
!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
|
||||
val |= PIPECONF_COLOR_RANGE_SELECT;
|
||||
|
||||
if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
|
||||
|
@ -16332,7 +16333,8 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
|
|||
* On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
|
||||
* port is hooked to pipe B. Hence we want plane A feeding pipe B.
|
||||
*/
|
||||
if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4)
|
||||
if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4 &&
|
||||
INTEL_NUM_PIPES(dev_priv) == 2)
|
||||
plane->i9xx_plane = (enum i9xx_plane_id) !pipe;
|
||||
else
|
||||
plane->i9xx_plane = (enum i9xx_plane_id) pipe;
|
||||
|
|
|
@ -542,7 +542,7 @@ void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state);
|
|||
|
||||
int ilk_get_lanes_required(int target_clock, int link_bw, int bpp);
|
||||
void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
|
||||
struct intel_digital_port *dport,
|
||||
struct intel_digital_port *dig_port,
|
||||
unsigned int expected_mask);
|
||||
int intel_get_load_detect_pipe(struct drm_connector *connector,
|
||||
struct intel_load_detect_pipe *old,
|
||||
|
|
|
@ -1194,7 +1194,7 @@ static int i915_dp_mst_info(struct seq_file *m, void *unused)
|
|||
struct drm_i915_private *dev_priv = node_to_i915(m->private);
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct intel_encoder *intel_encoder;
|
||||
struct intel_digital_port *intel_dig_port;
|
||||
struct intel_digital_port *dig_port;
|
||||
struct drm_connector *connector;
|
||||
struct drm_connector_list_iter conn_iter;
|
||||
|
||||
|
@ -1207,14 +1207,14 @@ static int i915_dp_mst_info(struct seq_file *m, void *unused)
|
|||
if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
|
||||
continue;
|
||||
|
||||
intel_dig_port = enc_to_dig_port(intel_encoder);
|
||||
if (!intel_dig_port->dp.can_mst)
|
||||
dig_port = enc_to_dig_port(intel_encoder);
|
||||
if (!dig_port->dp.can_mst)
|
||||
continue;
|
||||
|
||||
seq_printf(m, "MST Source Port [ENCODER:%d:%s]\n",
|
||||
intel_dig_port->base.base.base.id,
|
||||
intel_dig_port->base.base.name);
|
||||
drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
|
||||
dig_port->base.base.base.id,
|
||||
dig_port->base.base.name);
|
||||
drm_dp_mst_dump_topology(m, &dig_port->dp.mst_mgr);
|
||||
}
|
||||
drm_connector_list_iter_end(&conn_iter);
|
||||
|
||||
|
|
|
@ -1817,8 +1817,8 @@ void chv_phy_powergate_lanes(struct intel_encoder *encoder,
|
|||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct i915_power_domains *power_domains = &dev_priv->power_domains;
|
||||
enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(encoder));
|
||||
enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(encoder));
|
||||
enum dpio_phy phy = vlv_dig_port_to_phy(enc_to_dig_port(encoder));
|
||||
enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder));
|
||||
|
||||
mutex_lock(&power_domains->lock);
|
||||
|
||||
|
|
|
@ -279,10 +279,10 @@ enum check_link_response {
|
|||
*/
|
||||
struct intel_hdcp_shim {
|
||||
/* Outputs the transmitter's An and Aksv values to the receiver. */
|
||||
int (*write_an_aksv)(struct intel_digital_port *intel_dig_port, u8 *an);
|
||||
int (*write_an_aksv)(struct intel_digital_port *dig_port, u8 *an);
|
||||
|
||||
/* Reads the receiver's key selection vector */
|
||||
int (*read_bksv)(struct intel_digital_port *intel_dig_port, u8 *bksv);
|
||||
int (*read_bksv)(struct intel_digital_port *dig_port, u8 *bksv);
|
||||
|
||||
/*
|
||||
* Reads BINFO from DP receivers and BSTATUS from HDMI receivers. The
|
||||
|
@ -290,52 +290,52 @@ struct intel_hdcp_shim {
|
|||
* different. Call it BSTATUS since that's the name the HDMI spec
|
||||
* uses and it was there first.
|
||||
*/
|
||||
int (*read_bstatus)(struct intel_digital_port *intel_dig_port,
|
||||
int (*read_bstatus)(struct intel_digital_port *dig_port,
|
||||
u8 *bstatus);
|
||||
|
||||
/* Determines whether a repeater is present downstream */
|
||||
int (*repeater_present)(struct intel_digital_port *intel_dig_port,
|
||||
int (*repeater_present)(struct intel_digital_port *dig_port,
|
||||
bool *repeater_present);
|
||||
|
||||
/* Reads the receiver's Ri' value */
|
||||
int (*read_ri_prime)(struct intel_digital_port *intel_dig_port, u8 *ri);
|
||||
int (*read_ri_prime)(struct intel_digital_port *dig_port, u8 *ri);
|
||||
|
||||
/* Determines if the receiver's KSV FIFO is ready for consumption */
|
||||
int (*read_ksv_ready)(struct intel_digital_port *intel_dig_port,
|
||||
int (*read_ksv_ready)(struct intel_digital_port *dig_port,
|
||||
bool *ksv_ready);
|
||||
|
||||
/* Reads the ksv fifo for num_downstream devices */
|
||||
int (*read_ksv_fifo)(struct intel_digital_port *intel_dig_port,
|
||||
int (*read_ksv_fifo)(struct intel_digital_port *dig_port,
|
||||
int num_downstream, u8 *ksv_fifo);
|
||||
|
||||
/* Reads a 32-bit part of V' from the receiver */
|
||||
int (*read_v_prime_part)(struct intel_digital_port *intel_dig_port,
|
||||
int (*read_v_prime_part)(struct intel_digital_port *dig_port,
|
||||
int i, u32 *part);
|
||||
|
||||
/* Enables HDCP signalling on the port */
|
||||
int (*toggle_signalling)(struct intel_digital_port *intel_dig_port,
|
||||
int (*toggle_signalling)(struct intel_digital_port *dig_port,
|
||||
bool enable);
|
||||
|
||||
/* Ensures the link is still protected */
|
||||
bool (*check_link)(struct intel_digital_port *intel_dig_port);
|
||||
bool (*check_link)(struct intel_digital_port *dig_port);
|
||||
|
||||
/* Detects panel's hdcp capability. This is optional for HDMI. */
|
||||
int (*hdcp_capable)(struct intel_digital_port *intel_dig_port,
|
||||
int (*hdcp_capable)(struct intel_digital_port *dig_port,
|
||||
bool *hdcp_capable);
|
||||
|
||||
/* HDCP adaptation(DP/HDMI) required on the port */
|
||||
enum hdcp_wired_protocol protocol;
|
||||
|
||||
/* Detects whether sink is HDCP2.2 capable */
|
||||
int (*hdcp_2_2_capable)(struct intel_digital_port *intel_dig_port,
|
||||
int (*hdcp_2_2_capable)(struct intel_digital_port *dig_port,
|
||||
bool *capable);
|
||||
|
||||
/* Write HDCP2.2 messages */
|
||||
int (*write_2_2_msg)(struct intel_digital_port *intel_dig_port,
|
||||
int (*write_2_2_msg)(struct intel_digital_port *dig_port,
|
||||
void *buf, size_t size);
|
||||
|
||||
/* Read HDCP2.2 messages */
|
||||
int (*read_2_2_msg)(struct intel_digital_port *intel_dig_port,
|
||||
int (*read_2_2_msg)(struct intel_digital_port *dig_port,
|
||||
u8 msg_id, void *buf, size_t size);
|
||||
|
||||
/*
|
||||
|
@ -343,11 +343,11 @@ struct intel_hdcp_shim {
|
|||
* type to Receivers. In DP HDCP2.2 Stream type is one of the input to
|
||||
* the HDCP2.2 Cipher for En/De-Cryption. Not applicable for HDMI.
|
||||
*/
|
||||
int (*config_stream_type)(struct intel_digital_port *intel_dig_port,
|
||||
int (*config_stream_type)(struct intel_digital_port *dig_port,
|
||||
bool is_repeater, u8 type);
|
||||
|
||||
/* HDCP2.2 Link Integrity Check */
|
||||
int (*check_2_2_link)(struct intel_digital_port *intel_dig_port);
|
||||
int (*check_2_2_link)(struct intel_digital_port *dig_port);
|
||||
};
|
||||
|
||||
struct intel_hdcp {
|
||||
|
@ -1434,9 +1434,9 @@ struct intel_dp_mst_encoder {
|
|||
};
|
||||
|
||||
static inline enum dpio_channel
|
||||
vlv_dport_to_channel(struct intel_digital_port *dport)
|
||||
vlv_dig_port_to_channel(struct intel_digital_port *dig_port)
|
||||
{
|
||||
switch (dport->base.port) {
|
||||
switch (dig_port->base.port) {
|
||||
case PORT_B:
|
||||
case PORT_D:
|
||||
return DPIO_CH0;
|
||||
|
@ -1448,9 +1448,9 @@ vlv_dport_to_channel(struct intel_digital_port *dport)
|
|||
}
|
||||
|
||||
static inline enum dpio_phy
|
||||
vlv_dport_to_phy(struct intel_digital_port *dport)
|
||||
vlv_dig_port_to_phy(struct intel_digital_port *dig_port)
|
||||
{
|
||||
switch (dport->base.port) {
|
||||
switch (dig_port->base.port) {
|
||||
case PORT_B:
|
||||
case PORT_C:
|
||||
return DPIO_PHY0;
|
||||
|
|
|
@ -137,14 +137,12 @@ static const u8 valid_dsc_slicecount[] = {1, 2, 4};
|
|||
*
|
||||
* If a CPU or PCH DP output is attached to an eDP panel, this function
|
||||
* will return true, and false otherwise.
|
||||
*
|
||||
* This function is not safe to use prior to encoder type being set.
|
||||
*/
|
||||
bool intel_dp_is_edp(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
|
||||
return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
|
||||
return dig_port->base.type == INTEL_OUTPUT_EDP;
|
||||
}
|
||||
|
||||
static void intel_dp_link_down(struct intel_encoder *encoder,
|
||||
|
@ -218,10 +216,10 @@ static int intel_dp_max_common_rate(struct intel_dp *intel_dp)
|
|||
/* Theoretical max between source and sink */
|
||||
static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
|
||||
int source_max = intel_dig_port->max_lanes;
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
int source_max = dig_port->max_lanes;
|
||||
int sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
|
||||
int fia_max = intel_tc_port_fia_max_lane_count(intel_dig_port);
|
||||
int fia_max = intel_tc_port_fia_max_lane_count(dig_port);
|
||||
|
||||
return min3(source_max, sink_max, fia_max);
|
||||
}
|
||||
|
@ -253,8 +251,8 @@ intel_dp_max_data_rate(int max_link_clock, int max_lanes)
|
|||
static int
|
||||
intel_dp_downstream_max_dotclock(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
|
||||
struct intel_encoder *encoder = &intel_dig_port->base;
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
struct intel_encoder *encoder = &dig_port->base;
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
int max_dotclk = dev_priv->max_dotclk_freq;
|
||||
int ds_max_dotclk;
|
||||
|
@ -780,7 +778,7 @@ static void
|
|||
vlv_power_sequencer_kick(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
||||
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
enum pipe pipe = intel_dp->pps_pipe;
|
||||
bool pll_enabled, release_cl_override = false;
|
||||
enum dpio_phy phy = DPIO_PHY(pipe);
|
||||
|
@ -790,14 +788,14 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
|
|||
if (drm_WARN(&dev_priv->drm,
|
||||
intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN,
|
||||
"skipping pipe %c power sequencer kick due to [ENCODER:%d:%s] being active\n",
|
||||
pipe_name(pipe), intel_dig_port->base.base.base.id,
|
||||
intel_dig_port->base.base.name))
|
||||
pipe_name(pipe), dig_port->base.base.base.id,
|
||||
dig_port->base.base.name))
|
||||
return;
|
||||
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"kicking pipe %c power sequencer for [ENCODER:%d:%s]\n",
|
||||
pipe_name(pipe), intel_dig_port->base.base.base.id,
|
||||
intel_dig_port->base.base.name);
|
||||
pipe_name(pipe), dig_port->base.base.base.id,
|
||||
dig_port->base.base.name);
|
||||
|
||||
/* Preserve the BIOS-computed detected bit. This is
|
||||
* supposed to be read-only.
|
||||
|
@ -893,7 +891,7 @@ static enum pipe
|
|||
vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
||||
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
enum pipe pipe;
|
||||
|
||||
lockdep_assert_held(&dev_priv->pps_mutex);
|
||||
|
@ -922,8 +920,8 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
|
|||
drm_dbg_kms(&dev_priv->drm,
|
||||
"picked pipe %c power sequencer for [ENCODER:%d:%s]\n",
|
||||
pipe_name(intel_dp->pps_pipe),
|
||||
intel_dig_port->base.base.base.id,
|
||||
intel_dig_port->base.base.name);
|
||||
dig_port->base.base.base.id,
|
||||
dig_port->base.base.name);
|
||||
|
||||
/* init power sequencer on this pipe and port */
|
||||
intel_dp_init_panel_power_sequencer(intel_dp);
|
||||
|
@ -1011,8 +1009,8 @@ static void
|
|||
vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
||||
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
|
||||
enum port port = intel_dig_port->base.port;
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
enum port port = dig_port->base.port;
|
||||
|
||||
lockdep_assert_held(&dev_priv->pps_mutex);
|
||||
|
||||
|
@ -1033,15 +1031,15 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
|
|||
if (intel_dp->pps_pipe == INVALID_PIPE) {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"no initial power sequencer for [ENCODER:%d:%s]\n",
|
||||
intel_dig_port->base.base.base.id,
|
||||
intel_dig_port->base.base.name);
|
||||
dig_port->base.base.base.id,
|
||||
dig_port->base.base.name);
|
||||
return;
|
||||
}
|
||||
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"initial power sequencer for [ENCODER:%d:%s]: pipe %c\n",
|
||||
intel_dig_port->base.base.base.id,
|
||||
intel_dig_port->base.base.name,
|
||||
dig_port->base.base.base.id,
|
||||
dig_port->base.base.name,
|
||||
pipe_name(intel_dp->pps_pipe));
|
||||
|
||||
intel_dp_init_panel_power_sequencer(intel_dp);
|
||||
|
@ -1306,9 +1304,9 @@ static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
|
|||
int send_bytes,
|
||||
u32 aux_clock_divider)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_i915_private *dev_priv =
|
||||
to_i915(intel_dig_port->base.base.dev);
|
||||
to_i915(dig_port->base.base.dev);
|
||||
u32 precharge, timeout;
|
||||
|
||||
if (IS_GEN(dev_priv, 6))
|
||||
|
@ -1336,10 +1334,10 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
|
|||
int send_bytes,
|
||||
u32 unused)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_i915_private *i915 =
|
||||
to_i915(intel_dig_port->base.base.dev);
|
||||
enum phy phy = intel_port_to_phy(i915, intel_dig_port->base.port);
|
||||
to_i915(dig_port->base.base.dev);
|
||||
enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
|
||||
u32 ret;
|
||||
|
||||
ret = DP_AUX_CH_CTL_SEND_BUSY |
|
||||
|
@ -1353,7 +1351,7 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
|
|||
DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
|
||||
|
||||
if (intel_phy_is_tc(i915, phy) &&
|
||||
intel_dig_port->tc_mode == TC_PORT_TBT_ALT)
|
||||
dig_port->tc_mode == TC_PORT_TBT_ALT)
|
||||
ret |= DP_AUX_CH_CTL_TBT_IO;
|
||||
|
||||
return ret;
|
||||
|
@ -1365,11 +1363,11 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
|
|||
u8 *recv, int recv_size,
|
||||
u32 aux_send_ctl_flags)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_i915_private *i915 =
|
||||
to_i915(intel_dig_port->base.base.dev);
|
||||
to_i915(dig_port->base.base.dev);
|
||||
struct intel_uncore *uncore = &i915->uncore;
|
||||
enum phy phy = intel_port_to_phy(i915, intel_dig_port->base.port);
|
||||
enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
|
||||
bool is_tc_port = intel_phy_is_tc(i915, phy);
|
||||
i915_reg_t ch_ctl, ch_data[5];
|
||||
u32 aux_clock_divider;
|
||||
|
@ -1386,9 +1384,9 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
|
|||
ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i);
|
||||
|
||||
if (is_tc_port)
|
||||
intel_tc_port_lock(intel_dig_port);
|
||||
intel_tc_port_lock(dig_port);
|
||||
|
||||
aux_domain = intel_aux_power_domain(intel_dig_port);
|
||||
aux_domain = intel_aux_power_domain(dig_port);
|
||||
|
||||
aux_wakeref = intel_display_power_get(i915, aux_domain);
|
||||
pps_wakeref = pps_lock(intel_dp);
|
||||
|
@ -1547,7 +1545,7 @@ out:
|
|||
intel_display_power_put_async(i915, aux_domain, aux_wakeref);
|
||||
|
||||
if (is_tc_port)
|
||||
intel_tc_port_unlock(intel_dig_port);
|
||||
intel_tc_port_unlock(dig_port);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -2893,7 +2891,7 @@ static u32 ilk_get_pp_control(struct intel_dp *intel_dp)
|
|||
static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
||||
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
u32 pp;
|
||||
i915_reg_t pp_stat_reg, pp_ctrl_reg;
|
||||
bool need_to_disable = !intel_dp->want_panel_vdd;
|
||||
|
@ -2910,11 +2908,11 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
|
|||
return need_to_disable;
|
||||
|
||||
intel_display_power_get(dev_priv,
|
||||
intel_aux_power_domain(intel_dig_port));
|
||||
intel_aux_power_domain(dig_port));
|
||||
|
||||
drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD on\n",
|
||||
intel_dig_port->base.base.base.id,
|
||||
intel_dig_port->base.base.name);
|
||||
dig_port->base.base.base.id,
|
||||
dig_port->base.base.name);
|
||||
|
||||
if (!edp_have_panel_power(intel_dp))
|
||||
wait_panel_power_cycle(intel_dp);
|
||||
|
@ -2936,8 +2934,8 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
|
|||
if (!edp_have_panel_power(intel_dp)) {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"[ENCODER:%d:%s] panel power wasn't enabled\n",
|
||||
intel_dig_port->base.base.base.id,
|
||||
intel_dig_port->base.base.name);
|
||||
dig_port->base.base.base.id,
|
||||
dig_port->base.base.name);
|
||||
msleep(intel_dp->panel_power_up_delay);
|
||||
}
|
||||
|
||||
|
@ -2970,7 +2968,7 @@ void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
|
|||
static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
||||
struct intel_digital_port *intel_dig_port =
|
||||
struct intel_digital_port *dig_port =
|
||||
dp_to_dig_port(intel_dp);
|
||||
u32 pp;
|
||||
i915_reg_t pp_stat_reg, pp_ctrl_reg;
|
||||
|
@ -2983,8 +2981,8 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
|
|||
return;
|
||||
|
||||
drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD off\n",
|
||||
intel_dig_port->base.base.base.id,
|
||||
intel_dig_port->base.base.name);
|
||||
dig_port->base.base.base.id,
|
||||
dig_port->base.base.name);
|
||||
|
||||
pp = ilk_get_pp_control(intel_dp);
|
||||
pp &= ~EDP_FORCE_VDD;
|
||||
|
@ -3004,7 +3002,7 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
|
|||
intel_dp->panel_power_off_time = ktime_get_boottime();
|
||||
|
||||
intel_display_power_put_unchecked(dev_priv,
|
||||
intel_aux_power_domain(intel_dig_port));
|
||||
intel_aux_power_domain(dig_port));
|
||||
}
|
||||
|
||||
static void edp_panel_vdd_work(struct work_struct *__work)
|
||||
|
@ -3835,8 +3833,8 @@ static void g4x_pre_enable_dp(struct intel_atomic_state *state,
|
|||
|
||||
static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
|
||||
enum pipe pipe = intel_dp->pps_pipe;
|
||||
i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
|
||||
|
||||
|
@ -3858,8 +3856,8 @@ static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
|
|||
*/
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"detaching pipe %c power sequencer from [ENCODER:%d:%s]\n",
|
||||
pipe_name(pipe), intel_dig_port->base.base.base.id,
|
||||
intel_dig_port->base.base.name);
|
||||
pipe_name(pipe), dig_port->base.base.base.id,
|
||||
dig_port->base.base.name);
|
||||
intel_de_write(dev_priv, pp_on_reg, 0);
|
||||
intel_de_posting_read(dev_priv, pp_on_reg);
|
||||
|
||||
|
@ -4925,7 +4923,7 @@ static void intel_write_dp_sdp(struct intel_encoder *encoder,
|
|||
const struct intel_crtc_state *crtc_state,
|
||||
unsigned int type)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
|
||||
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct dp_sdp sdp = {};
|
||||
ssize_t len;
|
||||
|
@ -4951,14 +4949,14 @@ static void intel_write_dp_sdp(struct intel_encoder *encoder,
|
|||
if (drm_WARN_ON(&dev_priv->drm, len < 0))
|
||||
return;
|
||||
|
||||
intel_dig_port->write_infoframe(encoder, crtc_state, type, &sdp, len);
|
||||
dig_port->write_infoframe(encoder, crtc_state, type, &sdp, len);
|
||||
}
|
||||
|
||||
void intel_write_dp_vsc_sdp(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
struct drm_dp_vsc_sdp *vsc)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
|
||||
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct dp_sdp sdp = {};
|
||||
ssize_t len;
|
||||
|
@ -4968,7 +4966,7 @@ void intel_write_dp_vsc_sdp(struct intel_encoder *encoder,
|
|||
if (drm_WARN_ON(&dev_priv->drm, len < 0))
|
||||
return;
|
||||
|
||||
intel_dig_port->write_infoframe(encoder, crtc_state, DP_SDP_VSC,
|
||||
dig_port->write_infoframe(encoder, crtc_state, DP_SDP_VSC,
|
||||
&sdp, len);
|
||||
}
|
||||
|
||||
|
@ -5128,7 +5126,7 @@ static void intel_read_dp_vsc_sdp(struct intel_encoder *encoder,
|
|||
struct intel_crtc_state *crtc_state,
|
||||
struct drm_dp_vsc_sdp *vsc)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
|
||||
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
unsigned int type = DP_SDP_VSC;
|
||||
|
@ -5143,7 +5141,7 @@ static void intel_read_dp_vsc_sdp(struct intel_encoder *encoder,
|
|||
intel_hdmi_infoframe_enable(type)) == 0)
|
||||
return;
|
||||
|
||||
intel_dig_port->read_infoframe(encoder, crtc_state, type, &sdp, sizeof(sdp));
|
||||
dig_port->read_infoframe(encoder, crtc_state, type, &sdp, sizeof(sdp));
|
||||
|
||||
ret = intel_dp_vsc_sdp_unpack(vsc, &sdp, sizeof(sdp));
|
||||
|
||||
|
@ -5155,7 +5153,7 @@ static void intel_read_dp_hdr_metadata_infoframe_sdp(struct intel_encoder *encod
|
|||
struct intel_crtc_state *crtc_state,
|
||||
struct hdmi_drm_infoframe *drm_infoframe)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
|
||||
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
unsigned int type = HDMI_PACKET_TYPE_GAMUT_METADATA;
|
||||
struct dp_sdp sdp = {};
|
||||
|
@ -5165,8 +5163,8 @@ static void intel_read_dp_hdr_metadata_infoframe_sdp(struct intel_encoder *encod
|
|||
intel_hdmi_infoframe_enable(type)) == 0)
|
||||
return;
|
||||
|
||||
intel_dig_port->read_infoframe(encoder, crtc_state, type, &sdp,
|
||||
sizeof(sdp));
|
||||
dig_port->read_infoframe(encoder, crtc_state, type, &sdp,
|
||||
sizeof(sdp));
|
||||
|
||||
ret = intel_dp_hdr_metadata_infoframe_sdp_unpack(drm_infoframe, &sdp,
|
||||
sizeof(sdp));
|
||||
|
@ -5368,10 +5366,10 @@ static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp)
|
|||
{
|
||||
struct drm_i915_private *dev_priv =
|
||||
to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
|
||||
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_dp_phy_test_params *data =
|
||||
&intel_dp->compliance.test_data.phytest;
|
||||
struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
|
||||
struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
|
||||
enum pipe pipe = crtc->pipe;
|
||||
u32 pattern_val;
|
||||
|
||||
|
@ -5433,10 +5431,10 @@ static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp)
|
|||
static void
|
||||
intel_dp_autotest_phy_ddi_disable(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_device *dev = intel_dig_port->base.base.dev;
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_device *dev = dig_port->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
|
||||
struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
|
||||
enum pipe pipe = crtc->pipe;
|
||||
u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value;
|
||||
|
||||
|
@ -5459,11 +5457,11 @@ intel_dp_autotest_phy_ddi_disable(struct intel_dp *intel_dp)
|
|||
static void
|
||||
intel_dp_autotest_phy_ddi_enable(struct intel_dp *intel_dp, uint8_t lane_cnt)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_device *dev = intel_dig_port->base.base.dev;
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_device *dev = dig_port->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
enum port port = intel_dig_port->base.port;
|
||||
struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
|
||||
enum port port = dig_port->base.port;
|
||||
struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
|
||||
enum pipe pipe = crtc->pipe;
|
||||
u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value;
|
||||
|
||||
|
@ -6334,10 +6332,10 @@ intel_dp_connector_unregister(struct drm_connector *connector)
|
|||
|
||||
void intel_dp_encoder_flush_work(struct drm_encoder *encoder)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port = enc_to_dig_port(to_intel_encoder(encoder));
|
||||
struct intel_dp *intel_dp = &intel_dig_port->dp;
|
||||
struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder));
|
||||
struct intel_dp *intel_dp = &dig_port->dp;
|
||||
|
||||
intel_dp_mst_encoder_cleanup(intel_dig_port);
|
||||
intel_dp_mst_encoder_cleanup(dig_port);
|
||||
if (intel_dp_is_edp(intel_dp)) {
|
||||
intel_wakeref_t wakeref;
|
||||
|
||||
|
@ -6396,11 +6394,11 @@ static void intel_dp_hdcp_wait_for_cp_irq(struct intel_hdcp *hdcp, int timeout)
|
|||
}
|
||||
|
||||
static
|
||||
int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
|
||||
int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *dig_port,
|
||||
u8 *an)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(&intel_dig_port->base.base));
|
||||
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(&dig_port->base.base));
|
||||
static const struct drm_dp_aux_msg msg = {
|
||||
.request = DP_AUX_NATIVE_WRITE,
|
||||
.address = DP_AUX_HDCP_AKSV,
|
||||
|
@ -6411,7 +6409,7 @@ int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
|
|||
int ret;
|
||||
|
||||
/* Output An first, that's easy */
|
||||
dpcd_ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux, DP_AUX_HDCP_AN,
|
||||
dpcd_ret = drm_dp_dpcd_write(&dig_port->dp.aux, DP_AUX_HDCP_AN,
|
||||
an, DRM_HDCP_AN_LEN);
|
||||
if (dpcd_ret != DRM_HDCP_AN_LEN) {
|
||||
drm_dbg_kms(&i915->drm,
|
||||
|
@ -6450,13 +6448,13 @@ int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int intel_dp_hdcp_read_bksv(struct intel_digital_port *intel_dig_port,
|
||||
static int intel_dp_hdcp_read_bksv(struct intel_digital_port *dig_port,
|
||||
u8 *bksv)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
|
||||
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
|
||||
ssize_t ret;
|
||||
|
||||
ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv,
|
||||
ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv,
|
||||
DRM_HDCP_KSV_LEN);
|
||||
if (ret != DRM_HDCP_KSV_LEN) {
|
||||
drm_dbg_kms(&i915->drm,
|
||||
|
@ -6466,10 +6464,10 @@ static int intel_dp_hdcp_read_bksv(struct intel_digital_port *intel_dig_port,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port,
|
||||
static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *dig_port,
|
||||
u8 *bstatus)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
|
||||
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
|
||||
ssize_t ret;
|
||||
|
||||
/*
|
||||
|
@ -6477,7 +6475,7 @@ static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port,
|
|||
* definition by different names. In the HDMI spec, it's called BSTATUS,
|
||||
* but in DP it's called BINFO.
|
||||
*/
|
||||
ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BINFO,
|
||||
ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BINFO,
|
||||
bstatus, DRM_HDCP_BSTATUS_LEN);
|
||||
if (ret != DRM_HDCP_BSTATUS_LEN) {
|
||||
drm_dbg_kms(&i915->drm,
|
||||
|
@ -6488,13 +6486,13 @@ static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port,
|
|||
}
|
||||
|
||||
static
|
||||
int intel_dp_hdcp_read_bcaps(struct intel_digital_port *intel_dig_port,
|
||||
int intel_dp_hdcp_read_bcaps(struct intel_digital_port *dig_port,
|
||||
u8 *bcaps)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
|
||||
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
|
||||
ssize_t ret;
|
||||
|
||||
ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BCAPS,
|
||||
ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BCAPS,
|
||||
bcaps, 1);
|
||||
if (ret != 1) {
|
||||
drm_dbg_kms(&i915->drm,
|
||||
|
@ -6506,13 +6504,13 @@ int intel_dp_hdcp_read_bcaps(struct intel_digital_port *intel_dig_port,
|
|||
}
|
||||
|
||||
static
|
||||
int intel_dp_hdcp_repeater_present(struct intel_digital_port *intel_dig_port,
|
||||
int intel_dp_hdcp_repeater_present(struct intel_digital_port *dig_port,
|
||||
bool *repeater_present)
|
||||
{
|
||||
ssize_t ret;
|
||||
u8 bcaps;
|
||||
|
||||
ret = intel_dp_hdcp_read_bcaps(intel_dig_port, &bcaps);
|
||||
ret = intel_dp_hdcp_read_bcaps(dig_port, &bcaps);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -6521,13 +6519,13 @@ int intel_dp_hdcp_repeater_present(struct intel_digital_port *intel_dig_port,
|
|||
}
|
||||
|
||||
static
|
||||
int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port,
|
||||
int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *dig_port,
|
||||
u8 *ri_prime)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
|
||||
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
|
||||
ssize_t ret;
|
||||
|
||||
ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME,
|
||||
ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME,
|
||||
ri_prime, DRM_HDCP_RI_LEN);
|
||||
if (ret != DRM_HDCP_RI_LEN) {
|
||||
drm_dbg_kms(&i915->drm, "Read Ri' from DP/AUX failed (%zd)\n",
|
||||
|
@ -6538,14 +6536,14 @@ int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port,
|
|||
}
|
||||
|
||||
static
|
||||
int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port,
|
||||
int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *dig_port,
|
||||
bool *ksv_ready)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
|
||||
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
|
||||
ssize_t ret;
|
||||
u8 bstatus;
|
||||
|
||||
ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
|
||||
ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
|
||||
&bstatus, 1);
|
||||
if (ret != 1) {
|
||||
drm_dbg_kms(&i915->drm,
|
||||
|
@ -6557,17 +6555,17 @@ int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port,
|
|||
}
|
||||
|
||||
static
|
||||
int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port,
|
||||
int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *dig_port,
|
||||
int num_downstream, u8 *ksv_fifo)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
|
||||
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
|
||||
ssize_t ret;
|
||||
int i;
|
||||
|
||||
/* KSV list is read via 15 byte window (3 entries @ 5 bytes each) */
|
||||
for (i = 0; i < num_downstream; i += 3) {
|
||||
size_t len = min(num_downstream - i, 3) * DRM_HDCP_KSV_LEN;
|
||||
ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
|
||||
ret = drm_dp_dpcd_read(&dig_port->dp.aux,
|
||||
DP_AUX_HDCP_KSV_FIFO,
|
||||
ksv_fifo + i * DRM_HDCP_KSV_LEN,
|
||||
len);
|
||||
|
@ -6582,16 +6580,16 @@ int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port,
|
|||
}
|
||||
|
||||
static
|
||||
int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
|
||||
int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *dig_port,
|
||||
int i, u32 *part)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
|
||||
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
|
||||
ssize_t ret;
|
||||
|
||||
if (i >= DRM_HDCP_V_PRIME_NUM_PARTS)
|
||||
return -EINVAL;
|
||||
|
||||
ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
|
||||
ret = drm_dp_dpcd_read(&dig_port->dp.aux,
|
||||
DP_AUX_HDCP_V_PRIME(i), part,
|
||||
DRM_HDCP_V_PRIME_PART_LEN);
|
||||
if (ret != DRM_HDCP_V_PRIME_PART_LEN) {
|
||||
|
@ -6603,7 +6601,7 @@ int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
|
|||
}
|
||||
|
||||
static
|
||||
int intel_dp_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port,
|
||||
int intel_dp_hdcp_toggle_signalling(struct intel_digital_port *dig_port,
|
||||
bool enable)
|
||||
{
|
||||
/* Not used for single stream DisplayPort setups */
|
||||
|
@ -6611,13 +6609,13 @@ int intel_dp_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port,
|
|||
}
|
||||
|
||||
static
|
||||
bool intel_dp_hdcp_check_link(struct intel_digital_port *intel_dig_port)
|
||||
bool intel_dp_hdcp_check_link(struct intel_digital_port *dig_port)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
|
||||
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
|
||||
ssize_t ret;
|
||||
u8 bstatus;
|
||||
|
||||
ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
|
||||
ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
|
||||
&bstatus, 1);
|
||||
if (ret != 1) {
|
||||
drm_dbg_kms(&i915->drm,
|
||||
|
@ -6629,13 +6627,13 @@ bool intel_dp_hdcp_check_link(struct intel_digital_port *intel_dig_port)
|
|||
}
|
||||
|
||||
static
|
||||
int intel_dp_hdcp_capable(struct intel_digital_port *intel_dig_port,
|
||||
int intel_dp_hdcp_capable(struct intel_digital_port *dig_port,
|
||||
bool *hdcp_capable)
|
||||
{
|
||||
ssize_t ret;
|
||||
u8 bcaps;
|
||||
|
||||
ret = intel_dp_hdcp_read_bcaps(intel_dig_port, &bcaps);
|
||||
ret = intel_dp_hdcp_read_bcaps(dig_port, &bcaps);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -6693,13 +6691,13 @@ static const struct hdcp2_dp_msg_data hdcp2_dp_msg_data[] = {
|
|||
};
|
||||
|
||||
static int
|
||||
intel_dp_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port,
|
||||
intel_dp_hdcp2_read_rx_status(struct intel_digital_port *dig_port,
|
||||
u8 *rx_status)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
|
||||
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
|
||||
ssize_t ret;
|
||||
|
||||
ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
|
||||
ret = drm_dp_dpcd_read(&dig_port->dp.aux,
|
||||
DP_HDCP_2_2_REG_RXSTATUS_OFFSET, rx_status,
|
||||
HDCP_2_2_DP_RXSTATUS_LEN);
|
||||
if (ret != HDCP_2_2_DP_RXSTATUS_LEN) {
|
||||
|
@ -6712,14 +6710,14 @@ intel_dp_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port,
|
|||
}
|
||||
|
||||
static
|
||||
int hdcp2_detect_msg_availability(struct intel_digital_port *intel_dig_port,
|
||||
int hdcp2_detect_msg_availability(struct intel_digital_port *dig_port,
|
||||
u8 msg_id, bool *msg_ready)
|
||||
{
|
||||
u8 rx_status;
|
||||
int ret;
|
||||
|
||||
*msg_ready = false;
|
||||
ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status);
|
||||
ret = intel_dp_hdcp2_read_rx_status(dig_port, &rx_status);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
|
@ -6745,11 +6743,11 @@ int hdcp2_detect_msg_availability(struct intel_digital_port *intel_dig_port,
|
|||
}
|
||||
|
||||
static ssize_t
|
||||
intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
|
||||
intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *dig_port,
|
||||
const struct hdcp2_dp_msg_data *hdcp2_msg_data)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
|
||||
struct intel_dp *dp = &intel_dig_port->dp;
|
||||
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
|
||||
struct intel_dp *dp = &dig_port->dp;
|
||||
struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
|
||||
u8 msg_id = hdcp2_msg_data->msg_id;
|
||||
int ret, timeout;
|
||||
|
@ -6773,7 +6771,7 @@ intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
|
|||
* the timeout at wait for CP_IRQ.
|
||||
*/
|
||||
intel_dp_hdcp_wait_for_cp_irq(hdcp, timeout);
|
||||
ret = hdcp2_detect_msg_availability(intel_dig_port,
|
||||
ret = hdcp2_detect_msg_availability(dig_port,
|
||||
msg_id, &msg_ready);
|
||||
if (!msg_ready)
|
||||
ret = -ETIMEDOUT;
|
||||
|
@ -6799,10 +6797,10 @@ static const struct hdcp2_dp_msg_data *get_hdcp2_dp_msg_data(u8 msg_id)
|
|||
}
|
||||
|
||||
static
|
||||
int intel_dp_hdcp2_write_msg(struct intel_digital_port *intel_dig_port,
|
||||
int intel_dp_hdcp2_write_msg(struct intel_digital_port *dig_port,
|
||||
void *buf, size_t size)
|
||||
{
|
||||
struct intel_dp *dp = &intel_dig_port->dp;
|
||||
struct intel_dp *dp = &dig_port->dp;
|
||||
struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
|
||||
unsigned int offset;
|
||||
u8 *byte = buf;
|
||||
|
@ -6825,7 +6823,7 @@ int intel_dp_hdcp2_write_msg(struct intel_digital_port *intel_dig_port,
|
|||
len = bytes_to_write > DP_AUX_MAX_PAYLOAD_BYTES ?
|
||||
DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_write;
|
||||
|
||||
ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux,
|
||||
ret = drm_dp_dpcd_write(&dig_port->dp.aux,
|
||||
offset, (void *)byte, len);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
@ -6839,13 +6837,13 @@ int intel_dp_hdcp2_write_msg(struct intel_digital_port *intel_dig_port,
|
|||
}
|
||||
|
||||
static
|
||||
ssize_t get_receiver_id_list_size(struct intel_digital_port *intel_dig_port)
|
||||
ssize_t get_receiver_id_list_size(struct intel_digital_port *dig_port)
|
||||
{
|
||||
u8 rx_info[HDCP_2_2_RXINFO_LEN];
|
||||
u32 dev_cnt;
|
||||
ssize_t ret;
|
||||
|
||||
ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
|
||||
ret = drm_dp_dpcd_read(&dig_port->dp.aux,
|
||||
DP_HDCP_2_2_REG_RXINFO_OFFSET,
|
||||
(void *)rx_info, HDCP_2_2_RXINFO_LEN);
|
||||
if (ret != HDCP_2_2_RXINFO_LEN)
|
||||
|
@ -6865,10 +6863,10 @@ ssize_t get_receiver_id_list_size(struct intel_digital_port *intel_dig_port)
|
|||
}
|
||||
|
||||
static
|
||||
int intel_dp_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
|
||||
int intel_dp_hdcp2_read_msg(struct intel_digital_port *dig_port,
|
||||
u8 msg_id, void *buf, size_t size)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
|
||||
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
|
||||
unsigned int offset;
|
||||
u8 *byte = buf;
|
||||
ssize_t ret, bytes_to_recv, len;
|
||||
|
@ -6879,12 +6877,12 @@ int intel_dp_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
|
|||
return -EINVAL;
|
||||
offset = hdcp2_msg_data->offset;
|
||||
|
||||
ret = intel_dp_hdcp2_wait_for_msg(intel_dig_port, hdcp2_msg_data);
|
||||
ret = intel_dp_hdcp2_wait_for_msg(dig_port, hdcp2_msg_data);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (msg_id == HDCP_2_2_REP_SEND_RECVID_LIST) {
|
||||
ret = get_receiver_id_list_size(intel_dig_port);
|
||||
ret = get_receiver_id_list_size(dig_port);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
|
@ -6899,7 +6897,7 @@ int intel_dp_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
|
|||
len = bytes_to_recv > DP_AUX_MAX_PAYLOAD_BYTES ?
|
||||
DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_recv;
|
||||
|
||||
ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, offset,
|
||||
ret = drm_dp_dpcd_read(&dig_port->dp.aux, offset,
|
||||
(void *)byte, len);
|
||||
if (ret < 0) {
|
||||
drm_dbg_kms(&i915->drm, "msg_id %d, ret %zd\n",
|
||||
|
@ -6918,7 +6916,7 @@ int intel_dp_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
|
|||
}
|
||||
|
||||
static
|
||||
int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *intel_dig_port,
|
||||
int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *dig_port,
|
||||
bool is_repeater, u8 content_type)
|
||||
{
|
||||
int ret;
|
||||
|
@ -6937,7 +6935,7 @@ int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *intel_dig_port,
|
|||
stream_type_msg.msg_id = HDCP_2_2_ERRATA_DP_STREAM_TYPE;
|
||||
stream_type_msg.stream_type = content_type;
|
||||
|
||||
ret = intel_dp_hdcp2_write_msg(intel_dig_port, &stream_type_msg,
|
||||
ret = intel_dp_hdcp2_write_msg(dig_port, &stream_type_msg,
|
||||
sizeof(stream_type_msg));
|
||||
|
||||
return ret < 0 ? ret : 0;
|
||||
|
@ -6945,12 +6943,12 @@ int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *intel_dig_port,
|
|||
}
|
||||
|
||||
static
|
||||
int intel_dp_hdcp2_check_link(struct intel_digital_port *intel_dig_port)
|
||||
int intel_dp_hdcp2_check_link(struct intel_digital_port *dig_port)
|
||||
{
|
||||
u8 rx_status;
|
||||
int ret;
|
||||
|
||||
ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status);
|
||||
ret = intel_dp_hdcp2_read_rx_status(dig_port, &rx_status);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -6965,14 +6963,14 @@ int intel_dp_hdcp2_check_link(struct intel_digital_port *intel_dig_port)
|
|||
}
|
||||
|
||||
static
|
||||
int intel_dp_hdcp2_capable(struct intel_digital_port *intel_dig_port,
|
||||
int intel_dp_hdcp2_capable(struct intel_digital_port *dig_port,
|
||||
bool *capable)
|
||||
{
|
||||
u8 rx_caps[3];
|
||||
int ret;
|
||||
|
||||
*capable = false;
|
||||
ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
|
||||
ret = drm_dp_dpcd_read(&dig_port->dp.aux,
|
||||
DP_HDCP_2_2_REG_RX_CAPS_OFFSET,
|
||||
rx_caps, HDCP_2_2_RXCAPS_LEN);
|
||||
if (ret != HDCP_2_2_RXCAPS_LEN)
|
||||
|
@ -7251,12 +7249,12 @@ static bool intel_edp_have_power(struct intel_dp *intel_dp)
|
|||
}
|
||||
|
||||
enum irqreturn
|
||||
intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
|
||||
intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
|
||||
struct intel_dp *intel_dp = &intel_dig_port->dp;
|
||||
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
|
||||
struct intel_dp *intel_dp = &dig_port->dp;
|
||||
|
||||
if (intel_dig_port->base.type == INTEL_OUTPUT_EDP &&
|
||||
if (dig_port->base.type == INTEL_OUTPUT_EDP &&
|
||||
(long_hpd || !intel_edp_have_power(intel_dp))) {
|
||||
/*
|
||||
* vdd off can generate a long/short pulse on eDP which
|
||||
|
@ -7267,14 +7265,14 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
|
|||
drm_dbg_kms(&i915->drm,
|
||||
"ignoring %s hpd on eDP [ENCODER:%d:%s]\n",
|
||||
long_hpd ? "long" : "short",
|
||||
intel_dig_port->base.base.base.id,
|
||||
intel_dig_port->base.base.name);
|
||||
dig_port->base.base.base.id,
|
||||
dig_port->base.base.name);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
drm_dbg_kms(&i915->drm, "got hpd irq on [ENCODER:%d:%s] - %s\n",
|
||||
intel_dig_port->base.base.base.id,
|
||||
intel_dig_port->base.base.name,
|
||||
dig_port->base.base.base.id,
|
||||
dig_port->base.base.name,
|
||||
long_hpd ? "long" : "short");
|
||||
|
||||
if (long_hpd) {
|
||||
|
@ -8137,12 +8135,12 @@ static void intel_dp_modeset_retry_work_fn(struct work_struct *work)
|
|||
}
|
||||
|
||||
bool
|
||||
intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
|
||||
intel_dp_init_connector(struct intel_digital_port *dig_port,
|
||||
struct intel_connector *intel_connector)
|
||||
{
|
||||
struct drm_connector *connector = &intel_connector->base;
|
||||
struct intel_dp *intel_dp = &intel_dig_port->dp;
|
||||
struct intel_encoder *intel_encoder = &intel_dig_port->base;
|
||||
struct intel_dp *intel_dp = &dig_port->dp;
|
||||
struct intel_encoder *intel_encoder = &dig_port->base;
|
||||
struct drm_device *dev = intel_encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
enum port port = intel_encoder->port;
|
||||
|
@ -8153,12 +8151,14 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
|
|||
INIT_WORK(&intel_connector->modeset_retry_work,
|
||||
intel_dp_modeset_retry_work_fn);
|
||||
|
||||
if (drm_WARN(dev, intel_dig_port->max_lanes < 1,
|
||||
if (drm_WARN(dev, dig_port->max_lanes < 1,
|
||||
"Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n",
|
||||
intel_dig_port->max_lanes, intel_encoder->base.base.id,
|
||||
dig_port->max_lanes, intel_encoder->base.base.id,
|
||||
intel_encoder->base.name))
|
||||
return false;
|
||||
|
||||
intel_dp_set_source_rates(intel_dp);
|
||||
|
||||
intel_dp->reset_link_params = true;
|
||||
intel_dp->pps_pipe = INVALID_PIPE;
|
||||
intel_dp->active_pipe = INVALID_PIPE;
|
||||
|
@ -8174,22 +8174,28 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
|
|||
*/
|
||||
drm_WARN_ON(dev, intel_phy_is_tc(dev_priv, phy));
|
||||
type = DRM_MODE_CONNECTOR_eDP;
|
||||
intel_encoder->type = INTEL_OUTPUT_EDP;
|
||||
|
||||
/* eDP only on port B and/or C on vlv/chv */
|
||||
if (drm_WARN_ON(dev, (IS_VALLEYVIEW(dev_priv) ||
|
||||
IS_CHERRYVIEW(dev_priv)) &&
|
||||
port != PORT_B && port != PORT_C))
|
||||
return false;
|
||||
} else {
|
||||
type = DRM_MODE_CONNECTOR_DisplayPort;
|
||||
}
|
||||
|
||||
intel_dp_set_source_rates(intel_dp);
|
||||
|
||||
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
|
||||
intel_dp->active_pipe = vlv_active_pipe(intel_dp);
|
||||
|
||||
/*
|
||||
* For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
|
||||
* for DP the encoder type can be set by the caller to
|
||||
* INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
|
||||
*/
|
||||
if (type == DRM_MODE_CONNECTOR_eDP)
|
||||
intel_encoder->type = INTEL_OUTPUT_EDP;
|
||||
|
||||
/* eDP only on port B and/or C on vlv/chv */
|
||||
if (drm_WARN_ON(dev, (IS_VALLEYVIEW(dev_priv) ||
|
||||
IS_CHERRYVIEW(dev_priv)) &&
|
||||
intel_dp_is_edp(intel_dp) &&
|
||||
port != PORT_B && port != PORT_C))
|
||||
return false;
|
||||
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"Adding %s connector on [ENCODER:%d:%s]\n",
|
||||
type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
|
||||
|
@ -8218,12 +8224,12 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
|
|||
intel_connector->get_hw_state = intel_connector_get_hw_state;
|
||||
|
||||
/* init MST on ports that can support it */
|
||||
intel_dp_mst_encoder_init(intel_dig_port,
|
||||
intel_dp_mst_encoder_init(dig_port,
|
||||
intel_connector->base.base.id);
|
||||
|
||||
if (!intel_edp_init_connector(intel_dp, intel_connector)) {
|
||||
intel_dp_aux_fini(intel_dp);
|
||||
intel_dp_mst_encoder_cleanup(intel_dig_port);
|
||||
intel_dp_mst_encoder_cleanup(dig_port);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
|
@ -8258,20 +8264,20 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
|
|||
i915_reg_t output_reg,
|
||||
enum port port)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port;
|
||||
struct intel_digital_port *dig_port;
|
||||
struct intel_encoder *intel_encoder;
|
||||
struct drm_encoder *encoder;
|
||||
struct intel_connector *intel_connector;
|
||||
|
||||
intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
|
||||
if (!intel_dig_port)
|
||||
dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL);
|
||||
if (!dig_port)
|
||||
return false;
|
||||
|
||||
intel_connector = intel_connector_alloc();
|
||||
if (!intel_connector)
|
||||
goto err_connector_alloc;
|
||||
|
||||
intel_encoder = &intel_dig_port->base;
|
||||
intel_encoder = &dig_port->base;
|
||||
encoder = &intel_encoder->base;
|
||||
|
||||
if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
|
||||
|
@ -8307,34 +8313,34 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
|
|||
|
||||
if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
|
||||
(HAS_PCH_CPT(dev_priv) && port != PORT_A))
|
||||
intel_dig_port->dp.set_link_train = cpt_set_link_train;
|
||||
dig_port->dp.set_link_train = cpt_set_link_train;
|
||||
else
|
||||
intel_dig_port->dp.set_link_train = g4x_set_link_train;
|
||||
dig_port->dp.set_link_train = g4x_set_link_train;
|
||||
|
||||
if (IS_CHERRYVIEW(dev_priv))
|
||||
intel_dig_port->dp.set_signal_levels = chv_set_signal_levels;
|
||||
dig_port->dp.set_signal_levels = chv_set_signal_levels;
|
||||
else if (IS_VALLEYVIEW(dev_priv))
|
||||
intel_dig_port->dp.set_signal_levels = vlv_set_signal_levels;
|
||||
dig_port->dp.set_signal_levels = vlv_set_signal_levels;
|
||||
else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
|
||||
intel_dig_port->dp.set_signal_levels = ivb_cpu_edp_set_signal_levels;
|
||||
dig_port->dp.set_signal_levels = ivb_cpu_edp_set_signal_levels;
|
||||
else if (IS_GEN(dev_priv, 6) && port == PORT_A)
|
||||
intel_dig_port->dp.set_signal_levels = snb_cpu_edp_set_signal_levels;
|
||||
dig_port->dp.set_signal_levels = snb_cpu_edp_set_signal_levels;
|
||||
else
|
||||
intel_dig_port->dp.set_signal_levels = g4x_set_signal_levels;
|
||||
dig_port->dp.set_signal_levels = g4x_set_signal_levels;
|
||||
|
||||
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv) ||
|
||||
(HAS_PCH_SPLIT(dev_priv) && port != PORT_A)) {
|
||||
intel_dig_port->dp.preemph_max = intel_dp_pre_empemph_max_3;
|
||||
intel_dig_port->dp.voltage_max = intel_dp_voltage_max_3;
|
||||
dig_port->dp.preemph_max = intel_dp_pre_empemph_max_3;
|
||||
dig_port->dp.voltage_max = intel_dp_voltage_max_3;
|
||||
} else {
|
||||
intel_dig_port->dp.preemph_max = intel_dp_pre_empemph_max_2;
|
||||
intel_dig_port->dp.voltage_max = intel_dp_voltage_max_2;
|
||||
dig_port->dp.preemph_max = intel_dp_pre_empemph_max_2;
|
||||
dig_port->dp.voltage_max = intel_dp_voltage_max_2;
|
||||
}
|
||||
|
||||
intel_dig_port->dp.output_reg = output_reg;
|
||||
intel_dig_port->max_lanes = 4;
|
||||
intel_dig_port->dp.regs.dp_tp_ctl = DP_TP_CTL(port);
|
||||
intel_dig_port->dp.regs.dp_tp_status = DP_TP_STATUS(port);
|
||||
dig_port->dp.output_reg = output_reg;
|
||||
dig_port->max_lanes = 4;
|
||||
dig_port->dp.regs.dp_tp_ctl = DP_TP_CTL(port);
|
||||
dig_port->dp.regs.dp_tp_status = DP_TP_STATUS(port);
|
||||
|
||||
intel_encoder->type = INTEL_OUTPUT_DP;
|
||||
intel_encoder->power_domain = intel_port_to_power_domain(port);
|
||||
|
@ -8349,25 +8355,25 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
|
|||
intel_encoder->cloneable = 0;
|
||||
intel_encoder->port = port;
|
||||
|
||||
intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
|
||||
dig_port->hpd_pulse = intel_dp_hpd_pulse;
|
||||
|
||||
if (HAS_GMCH(dev_priv)) {
|
||||
if (IS_GM45(dev_priv))
|
||||
intel_dig_port->connected = gm45_digital_port_connected;
|
||||
dig_port->connected = gm45_digital_port_connected;
|
||||
else
|
||||
intel_dig_port->connected = g4x_digital_port_connected;
|
||||
dig_port->connected = g4x_digital_port_connected;
|
||||
} else {
|
||||
if (port == PORT_A)
|
||||
intel_dig_port->connected = ilk_digital_port_connected;
|
||||
dig_port->connected = ilk_digital_port_connected;
|
||||
else
|
||||
intel_dig_port->connected = ibx_digital_port_connected;
|
||||
dig_port->connected = ibx_digital_port_connected;
|
||||
}
|
||||
|
||||
if (port != PORT_A)
|
||||
intel_infoframe_init(intel_dig_port);
|
||||
intel_infoframe_init(dig_port);
|
||||
|
||||
intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
|
||||
if (!intel_dp_init_connector(intel_dig_port, intel_connector))
|
||||
dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
|
||||
if (!intel_dp_init_connector(dig_port, intel_connector))
|
||||
goto err_init_connector;
|
||||
|
||||
return true;
|
||||
|
@ -8377,7 +8383,7 @@ err_init_connector:
|
|||
err_encoder_init:
|
||||
kfree(intel_connector);
|
||||
err_connector_alloc:
|
||||
kfree(intel_dig_port);
|
||||
kfree(dig_port);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
|
@ -40,7 +40,7 @@ bool intel_dp_port_enabled(struct drm_i915_private *dev_priv,
|
|||
enum pipe *pipe);
|
||||
bool intel_dp_init(struct drm_i915_private *dev_priv, i915_reg_t output_reg,
|
||||
enum port port);
|
||||
bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
|
||||
bool intel_dp_init_connector(struct intel_digital_port *dig_port,
|
||||
struct intel_connector *intel_connector);
|
||||
void intel_dp_set_link_params(struct intel_dp *intel_dp,
|
||||
int link_rate, u8 lane_count,
|
||||
|
@ -61,7 +61,7 @@ int intel_dp_compute_config(struct intel_encoder *encoder,
|
|||
struct drm_connector_state *conn_state);
|
||||
bool intel_dp_is_edp(struct intel_dp *intel_dp);
|
||||
bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
|
||||
enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port,
|
||||
enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *dig_port,
|
||||
bool long_hpd);
|
||||
void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state);
|
||||
|
|
|
@ -52,6 +52,7 @@ static u8 dp_voltage_max(u8 preemph)
|
|||
void intel_dp_get_adjust_train(struct intel_dp *intel_dp,
|
||||
const u8 link_status[DP_LINK_STATUS_SIZE])
|
||||
{
|
||||
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
|
||||
u8 v = 0;
|
||||
u8 p = 0;
|
||||
int lane;
|
||||
|
@ -64,12 +65,20 @@ void intel_dp_get_adjust_train(struct intel_dp *intel_dp,
|
|||
}
|
||||
|
||||
preemph_max = intel_dp->preemph_max(intel_dp);
|
||||
drm_WARN_ON_ONCE(&i915->drm,
|
||||
preemph_max != DP_TRAIN_PRE_EMPH_LEVEL_2 &&
|
||||
preemph_max != DP_TRAIN_PRE_EMPH_LEVEL_3);
|
||||
|
||||
if (p >= preemph_max)
|
||||
p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
|
||||
|
||||
v = min(v, dp_voltage_max(p));
|
||||
|
||||
voltage_max = intel_dp->voltage_max(intel_dp);
|
||||
drm_WARN_ON_ONCE(&i915->drm,
|
||||
voltage_max != DP_TRAIN_VOLTAGE_SWING_LEVEL_2 &&
|
||||
voltage_max != DP_TRAIN_VOLTAGE_SWING_LEVEL_3);
|
||||
|
||||
if (v >= voltage_max)
|
||||
v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
|
||||
|
||||
|
|
|
@ -342,8 +342,8 @@ static void intel_mst_disable_dp(struct intel_atomic_state *state,
|
|||
const struct drm_connector_state *old_conn_state)
|
||||
{
|
||||
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
|
||||
struct intel_digital_port *intel_dig_port = intel_mst->primary;
|
||||
struct intel_dp *intel_dp = &intel_dig_port->dp;
|
||||
struct intel_digital_port *dig_port = intel_mst->primary;
|
||||
struct intel_dp *intel_dp = &dig_port->dp;
|
||||
struct intel_connector *connector =
|
||||
to_intel_connector(old_conn_state->connector);
|
||||
struct drm_i915_private *i915 = to_i915(connector->base.dev);
|
||||
|
@ -369,8 +369,8 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
|
|||
const struct drm_connector_state *old_conn_state)
|
||||
{
|
||||
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
|
||||
struct intel_digital_port *intel_dig_port = intel_mst->primary;
|
||||
struct intel_dp *intel_dp = &intel_dig_port->dp;
|
||||
struct intel_digital_port *dig_port = intel_mst->primary;
|
||||
struct intel_dp *intel_dp = &dig_port->dp;
|
||||
struct intel_connector *connector =
|
||||
to_intel_connector(old_conn_state->connector);
|
||||
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
|
||||
|
@ -421,7 +421,7 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
|
|||
* the transcoder clock select is set to none.
|
||||
*/
|
||||
if (last_mst_stream)
|
||||
intel_dp_set_infoframes(&intel_dig_port->base, false,
|
||||
intel_dp_set_infoframes(&dig_port->base, false,
|
||||
old_crtc_state, NULL);
|
||||
/*
|
||||
* From TGL spec: "If multi-stream slave transcoder: Configure
|
||||
|
@ -436,7 +436,7 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
|
|||
|
||||
intel_mst->connector = NULL;
|
||||
if (last_mst_stream)
|
||||
intel_dig_port->base.post_disable(state, &intel_dig_port->base,
|
||||
dig_port->base.post_disable(state, &dig_port->base,
|
||||
old_crtc_state, NULL);
|
||||
|
||||
drm_dbg_kms(&dev_priv->drm, "active links %d\n",
|
||||
|
@ -449,11 +449,11 @@ static void intel_mst_pre_pll_enable_dp(struct intel_atomic_state *state,
|
|||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
|
||||
struct intel_digital_port *intel_dig_port = intel_mst->primary;
|
||||
struct intel_dp *intel_dp = &intel_dig_port->dp;
|
||||
struct intel_digital_port *dig_port = intel_mst->primary;
|
||||
struct intel_dp *intel_dp = &dig_port->dp;
|
||||
|
||||
if (intel_dp->active_mst_links == 0)
|
||||
intel_dig_port->base.pre_pll_enable(state, &intel_dig_port->base,
|
||||
dig_port->base.pre_pll_enable(state, &dig_port->base,
|
||||
pipe_config, NULL);
|
||||
}
|
||||
|
||||
|
@ -463,8 +463,8 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
|
|||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
|
||||
struct intel_digital_port *intel_dig_port = intel_mst->primary;
|
||||
struct intel_dp *intel_dp = &intel_dig_port->dp;
|
||||
struct intel_digital_port *dig_port = intel_mst->primary;
|
||||
struct intel_dp *intel_dp = &dig_port->dp;
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_connector *connector =
|
||||
to_intel_connector(conn_state->connector);
|
||||
|
@ -490,7 +490,7 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
|
|||
drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port, true);
|
||||
|
||||
if (first_mst_stream)
|
||||
intel_dig_port->base.pre_enable(state, &intel_dig_port->base,
|
||||
dig_port->base.pre_enable(state, &dig_port->base,
|
||||
pipe_config, NULL);
|
||||
|
||||
ret = drm_dp_mst_allocate_vcpi(&intel_dp->mst_mgr,
|
||||
|
@ -506,7 +506,7 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
|
|||
|
||||
/*
|
||||
* Before Gen 12 this is not done as part of
|
||||
* intel_dig_port->base.pre_enable() and should be done here. For
|
||||
* dig_port->base.pre_enable() and should be done here. For
|
||||
* Gen 12+ the step in which this should be done is different for the
|
||||
* first MST stream, so it's done on the DDI for the first stream and
|
||||
* here for the following ones.
|
||||
|
@ -525,8 +525,8 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
|
|||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
|
||||
struct intel_digital_port *intel_dig_port = intel_mst->primary;
|
||||
struct intel_dp *intel_dp = &intel_dig_port->dp;
|
||||
struct intel_digital_port *dig_port = intel_mst->primary;
|
||||
struct intel_dp *intel_dp = &dig_port->dp;
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
u32 val;
|
||||
|
||||
|
@ -572,9 +572,9 @@ static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder,
|
|||
struct intel_crtc_state *pipe_config)
|
||||
{
|
||||
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
|
||||
struct intel_digital_port *intel_dig_port = intel_mst->primary;
|
||||
struct intel_digital_port *dig_port = intel_mst->primary;
|
||||
|
||||
intel_ddi_get_config(&intel_dig_port->base, pipe_config);
|
||||
intel_ddi_get_config(&dig_port->base, pipe_config);
|
||||
}
|
||||
|
||||
static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector)
|
||||
|
@ -639,39 +639,60 @@ static int intel_dp_mst_get_modes(struct drm_connector *connector)
|
|||
return intel_dp_mst_get_ddc_modes(connector);
|
||||
}
|
||||
|
||||
static enum drm_mode_status
|
||||
intel_dp_mst_mode_valid(struct drm_connector *connector,
|
||||
struct drm_display_mode *mode)
|
||||
static int
|
||||
intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
|
||||
struct drm_display_mode *mode,
|
||||
struct drm_modeset_acquire_ctx *ctx,
|
||||
enum drm_mode_status *status)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(connector->dev);
|
||||
struct intel_connector *intel_connector = to_intel_connector(connector);
|
||||
struct intel_dp *intel_dp = intel_connector->mst_port;
|
||||
struct drm_dp_mst_topology_mgr *mgr = &intel_dp->mst_mgr;
|
||||
struct drm_dp_mst_port *port = intel_connector->port;
|
||||
const int min_bpp = 18;
|
||||
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
|
||||
int max_rate, mode_rate, max_lanes, max_link_clock;
|
||||
int ret;
|
||||
|
||||
if (drm_connector_is_unregistered(connector))
|
||||
return MODE_ERROR;
|
||||
if (drm_connector_is_unregistered(connector)) {
|
||||
*status = MODE_ERROR;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
|
||||
return MODE_NO_DBLESCAN;
|
||||
if (mode->flags & DRM_MODE_FLAG_DBLSCAN) {
|
||||
*status = MODE_NO_DBLESCAN;
|
||||
return 0;
|
||||
}
|
||||
|
||||
max_link_clock = intel_dp_max_link_rate(intel_dp);
|
||||
max_lanes = intel_dp_max_lane_count(intel_dp);
|
||||
|
||||
max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
|
||||
mode_rate = intel_dp_link_required(mode->clock, 18);
|
||||
mode_rate = intel_dp_link_required(mode->clock, min_bpp);
|
||||
|
||||
/* TODO - validate mode against available PBN for link */
|
||||
if (mode->clock < 10000)
|
||||
return MODE_CLOCK_LOW;
|
||||
ret = drm_modeset_lock(&mgr->base.lock, ctx);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
|
||||
return MODE_H_ILLEGAL;
|
||||
if (mode_rate > max_rate || mode->clock > max_dotclk ||
|
||||
drm_dp_calc_pbn_mode(mode->clock, min_bpp, false) > port->full_pbn) {
|
||||
*status = MODE_CLOCK_HIGH;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (mode_rate > max_rate || mode->clock > max_dotclk)
|
||||
return MODE_CLOCK_HIGH;
|
||||
if (mode->clock < 10000) {
|
||||
*status = MODE_CLOCK_LOW;
|
||||
return 0;
|
||||
}
|
||||
|
||||
return intel_mode_valid_max_plane_size(dev_priv, mode);
|
||||
if (mode->flags & DRM_MODE_FLAG_DBLCLK) {
|
||||
*status = MODE_H_ILLEGAL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
*status = intel_mode_valid_max_plane_size(dev_priv, mode);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *connector,
|
||||
|
@ -700,7 +721,7 @@ intel_dp_mst_detect(struct drm_connector *connector,
|
|||
|
||||
static const struct drm_connector_helper_funcs intel_dp_mst_connector_helper_funcs = {
|
||||
.get_modes = intel_dp_mst_get_modes,
|
||||
.mode_valid = intel_dp_mst_mode_valid,
|
||||
.mode_valid_ctx = intel_dp_mst_mode_valid_ctx,
|
||||
.atomic_best_encoder = intel_mst_atomic_best_encoder,
|
||||
.atomic_check = intel_dp_mst_atomic_check,
|
||||
.detect_ctx = intel_dp_mst_detect,
|
||||
|
@ -732,8 +753,8 @@ static bool intel_dp_mst_get_hw_state(struct intel_connector *connector)
|
|||
static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *pathprop)
|
||||
{
|
||||
struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr);
|
||||
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_device *dev = intel_dig_port->base.base.dev;
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_device *dev = dig_port->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_connector *intel_connector;
|
||||
struct drm_connector *connector;
|
||||
|
@ -808,11 +829,11 @@ static const struct drm_dp_mst_topology_cbs mst_cbs = {
|
|||
};
|
||||
|
||||
static struct intel_dp_mst_encoder *
|
||||
intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum pipe pipe)
|
||||
intel_dp_create_fake_mst_encoder(struct intel_digital_port *dig_port, enum pipe pipe)
|
||||
{
|
||||
struct intel_dp_mst_encoder *intel_mst;
|
||||
struct intel_encoder *intel_encoder;
|
||||
struct drm_device *dev = intel_dig_port->base.base.dev;
|
||||
struct drm_device *dev = dig_port->base.base.dev;
|
||||
|
||||
intel_mst = kzalloc(sizeof(*intel_mst), GFP_KERNEL);
|
||||
|
||||
|
@ -821,14 +842,14 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum
|
|||
|
||||
intel_mst->pipe = pipe;
|
||||
intel_encoder = &intel_mst->base;
|
||||
intel_mst->primary = intel_dig_port;
|
||||
intel_mst->primary = dig_port;
|
||||
|
||||
drm_encoder_init(dev, &intel_encoder->base, &intel_dp_mst_enc_funcs,
|
||||
DRM_MODE_ENCODER_DPMST, "DP-MST %c", pipe_name(pipe));
|
||||
|
||||
intel_encoder->type = INTEL_OUTPUT_DP_MST;
|
||||
intel_encoder->power_domain = intel_dig_port->base.power_domain;
|
||||
intel_encoder->port = intel_dig_port->base.port;
|
||||
intel_encoder->power_domain = dig_port->base.power_domain;
|
||||
intel_encoder->port = dig_port->base.port;
|
||||
intel_encoder->cloneable = 0;
|
||||
/*
|
||||
* This is wrong, but broken userspace uses the intersection
|
||||
|
@ -855,29 +876,29 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum
|
|||
}
|
||||
|
||||
static bool
|
||||
intel_dp_create_fake_mst_encoders(struct intel_digital_port *intel_dig_port)
|
||||
intel_dp_create_fake_mst_encoders(struct intel_digital_port *dig_port)
|
||||
{
|
||||
struct intel_dp *intel_dp = &intel_dig_port->dp;
|
||||
struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
|
||||
struct intel_dp *intel_dp = &dig_port->dp;
|
||||
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
|
||||
enum pipe pipe;
|
||||
|
||||
for_each_pipe(dev_priv, pipe)
|
||||
intel_dp->mst_encoders[pipe] = intel_dp_create_fake_mst_encoder(intel_dig_port, pipe);
|
||||
intel_dp->mst_encoders[pipe] = intel_dp_create_fake_mst_encoder(dig_port, pipe);
|
||||
return true;
|
||||
}
|
||||
|
||||
int
|
||||
intel_dp_mst_encoder_active_links(struct intel_digital_port *intel_dig_port)
|
||||
intel_dp_mst_encoder_active_links(struct intel_digital_port *dig_port)
|
||||
{
|
||||
return intel_dig_port->dp.active_mst_links;
|
||||
return dig_port->dp.active_mst_links;
|
||||
}
|
||||
|
||||
int
|
||||
intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_base_id)
|
||||
intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_base_id)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
|
||||
struct intel_dp *intel_dp = &intel_dig_port->dp;
|
||||
enum port port = intel_dig_port->base.port;
|
||||
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
|
||||
struct intel_dp *intel_dp = &dig_port->dp;
|
||||
enum port port = dig_port->base.port;
|
||||
int ret;
|
||||
|
||||
if (!HAS_DP_MST(i915) || intel_dp_is_edp(intel_dp))
|
||||
|
@ -892,7 +913,7 @@ intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_ba
|
|||
intel_dp->mst_mgr.cbs = &mst_cbs;
|
||||
|
||||
/* create encoders */
|
||||
intel_dp_create_fake_mst_encoders(intel_dig_port);
|
||||
intel_dp_create_fake_mst_encoders(dig_port);
|
||||
ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, &i915->drm,
|
||||
&intel_dp->aux, 16, 3, conn_base_id);
|
||||
if (ret)
|
||||
|
@ -904,9 +925,9 @@ intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_ba
|
|||
}
|
||||
|
||||
void
|
||||
intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port)
|
||||
intel_dp_mst_encoder_cleanup(struct intel_digital_port *dig_port)
|
||||
{
|
||||
struct intel_dp *intel_dp = &intel_dig_port->dp;
|
||||
struct intel_dp *intel_dp = &dig_port->dp;
|
||||
|
||||
if (!intel_dp->can_mst)
|
||||
return;
|
||||
|
|
|
@ -11,9 +11,9 @@
|
|||
struct intel_digital_port;
|
||||
struct intel_crtc_state;
|
||||
|
||||
int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
|
||||
void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port);
|
||||
int intel_dp_mst_encoder_active_links(struct intel_digital_port *intel_dig_port);
|
||||
int intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_id);
|
||||
void intel_dp_mst_encoder_cleanup(struct intel_digital_port *dig_port);
|
||||
int intel_dp_mst_encoder_active_links(struct intel_digital_port *dig_port);
|
||||
bool intel_dp_mst_is_master_trans(const struct intel_crtc_state *crtc_state);
|
||||
bool intel_dp_mst_is_slave_trans(const struct intel_crtc_state *crtc_state);
|
||||
|
||||
|
|
|
@ -650,9 +650,9 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder,
|
|||
bool uniq_trans_scale)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_digital_port *dport = enc_to_dig_port(encoder);
|
||||
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
|
||||
enum dpio_channel ch = vlv_dport_to_channel(dport);
|
||||
enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
|
||||
enum pipe pipe = intel_crtc->pipe;
|
||||
u32 val;
|
||||
int i;
|
||||
|
@ -746,7 +746,7 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder,
|
|||
bool reset)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(encoder));
|
||||
enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder));
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
enum pipe pipe = crtc->pipe;
|
||||
u32 val;
|
||||
|
@ -789,10 +789,10 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder,
|
|||
void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct intel_digital_port *dport = enc_to_dig_port(encoder);
|
||||
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
enum dpio_channel ch = vlv_dport_to_channel(dport);
|
||||
enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
|
||||
enum pipe pipe = crtc->pipe;
|
||||
unsigned int lane_mask =
|
||||
intel_dp_unused_lane_mask(crtc_state->lane_count);
|
||||
|
@ -803,7 +803,7 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
|
|||
* Otherwise we can't even access the PLL.
|
||||
*/
|
||||
if (ch == DPIO_CH0 && pipe == PIPE_B)
|
||||
dport->release_cl2_override =
|
||||
dig_port->release_cl2_override =
|
||||
!chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
|
||||
|
||||
chv_phy_powergate_lanes(encoder, true, lane_mask);
|
||||
|
@ -870,10 +870,10 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
|
|||
const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
enum dpio_channel ch = vlv_dport_to_channel(dport);
|
||||
enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
|
||||
enum pipe pipe = crtc->pipe;
|
||||
int data, i, stagger;
|
||||
u32 val;
|
||||
|
@ -948,12 +948,12 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
|
|||
|
||||
void chv_phy_release_cl2_override(struct intel_encoder *encoder)
|
||||
{
|
||||
struct intel_digital_port *dport = enc_to_dig_port(encoder);
|
||||
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
|
||||
if (dport->release_cl2_override) {
|
||||
if (dig_port->release_cl2_override) {
|
||||
chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
|
||||
dport->release_cl2_override = false;
|
||||
dig_port->release_cl2_override = false;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -997,8 +997,8 @@ void vlv_set_phy_signal_level(struct intel_encoder *encoder,
|
|||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
|
||||
struct intel_digital_port *dport = enc_to_dig_port(encoder);
|
||||
enum dpio_channel port = vlv_dport_to_channel(dport);
|
||||
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
|
||||
enum dpio_channel port = vlv_dig_port_to_channel(dig_port);
|
||||
enum pipe pipe = intel_crtc->pipe;
|
||||
|
||||
vlv_dpio_get(dev_priv);
|
||||
|
@ -1022,10 +1022,10 @@ void vlv_set_phy_signal_level(struct intel_encoder *encoder,
|
|||
void vlv_phy_pre_pll_enable(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct intel_digital_port *dport = enc_to_dig_port(encoder);
|
||||
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
enum dpio_channel port = vlv_dport_to_channel(dport);
|
||||
enum dpio_channel port = vlv_dig_port_to_channel(dig_port);
|
||||
enum pipe pipe = crtc->pipe;
|
||||
|
||||
/* Program Tx lane resets to default */
|
||||
|
@ -1052,10 +1052,10 @@ void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
|
|||
const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
enum dpio_channel port = vlv_dport_to_channel(dport);
|
||||
enum dpio_channel port = vlv_dig_port_to_channel(dig_port);
|
||||
enum pipe pipe = crtc->pipe;
|
||||
u32 val;
|
||||
|
||||
|
@ -1081,10 +1081,10 @@ void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
|
|||
void vlv_phy_reset_lanes(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *old_crtc_state)
|
||||
{
|
||||
struct intel_digital_port *dport = enc_to_dig_port(encoder);
|
||||
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
|
||||
enum dpio_channel port = vlv_dport_to_channel(dport);
|
||||
enum dpio_channel port = vlv_dig_port_to_channel(dig_port);
|
||||
enum pipe pipe = crtc->pipe;
|
||||
|
||||
vlv_dpio_get(dev_priv);
|
||||
|
|
|
@ -324,6 +324,7 @@ static int intel_dvo_get_modes(struct drm_connector *connector)
|
|||
struct drm_i915_private *dev_priv = to_i915(connector->dev);
|
||||
const struct drm_display_mode *fixed_mode =
|
||||
to_intel_connector(connector)->panel.fixed_mode;
|
||||
int num_modes;
|
||||
|
||||
/*
|
||||
* We should probably have an i2c driver get_modes function for those
|
||||
|
@ -331,21 +332,22 @@ static int intel_dvo_get_modes(struct drm_connector *connector)
|
|||
* (TV-out, for example), but for now with just TMDS and LVDS,
|
||||
* that's not the case.
|
||||
*/
|
||||
intel_ddc_get_modes(connector,
|
||||
intel_gmbus_get_adapter(dev_priv, GMBUS_PIN_DPC));
|
||||
if (!list_empty(&connector->probed_modes))
|
||||
return 1;
|
||||
num_modes = intel_ddc_get_modes(connector,
|
||||
intel_gmbus_get_adapter(dev_priv, GMBUS_PIN_DPC));
|
||||
if (num_modes)
|
||||
return num_modes;
|
||||
|
||||
if (fixed_mode) {
|
||||
struct drm_display_mode *mode;
|
||||
|
||||
mode = drm_mode_duplicate(connector->dev, fixed_mode);
|
||||
if (mode) {
|
||||
drm_mode_probed_add(connector, mode);
|
||||
return 1;
|
||||
num_modes++;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
return num_modes;
|
||||
}
|
||||
|
||||
static const struct drm_connector_funcs intel_dvo_connector_funcs = {
|
||||
|
|
|
@ -187,8 +187,30 @@ static bool g4x_fbc_is_active(struct drm_i915_private *dev_priv)
|
|||
return intel_de_read(dev_priv, DPFC_CONTROL) & DPFC_CTL_EN;
|
||||
}
|
||||
|
||||
static void i8xx_fbc_recompress(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
|
||||
enum i9xx_plane_id i9xx_plane = params->crtc.i9xx_plane;
|
||||
|
||||
spin_lock_irq(&dev_priv->uncore.lock);
|
||||
intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane),
|
||||
intel_de_read_fw(dev_priv, DSPADDR(i9xx_plane)));
|
||||
spin_unlock_irq(&dev_priv->uncore.lock);
|
||||
}
|
||||
|
||||
static void i965_fbc_recompress(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
|
||||
enum i9xx_plane_id i9xx_plane = params->crtc.i9xx_plane;
|
||||
|
||||
spin_lock_irq(&dev_priv->uncore.lock);
|
||||
intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane),
|
||||
intel_de_read_fw(dev_priv, DSPSURF(i9xx_plane)));
|
||||
spin_unlock_irq(&dev_priv->uncore.lock);
|
||||
}
|
||||
|
||||
/* This function forces a CFB recompression through the nuke operation. */
|
||||
static void intel_fbc_recompress(struct drm_i915_private *dev_priv)
|
||||
static void snb_fbc_recompress(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_fbc *fbc = &dev_priv->fbc;
|
||||
|
||||
|
@ -198,6 +220,16 @@ static void intel_fbc_recompress(struct drm_i915_private *dev_priv)
|
|||
intel_de_posting_read(dev_priv, MSG_FBC_REND_STATE);
|
||||
}
|
||||
|
||||
static void intel_fbc_recompress(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (INTEL_GEN(dev_priv) >= 6)
|
||||
snb_fbc_recompress(dev_priv);
|
||||
else if (INTEL_GEN(dev_priv) >= 4)
|
||||
i965_fbc_recompress(dev_priv);
|
||||
else
|
||||
i8xx_fbc_recompress(dev_priv);
|
||||
}
|
||||
|
||||
static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
|
||||
|
@ -315,21 +347,6 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
|
|||
if (dev_priv->fbc.false_color)
|
||||
dpfc_ctl |= FBC_CTL_FALSE_COLOR;
|
||||
|
||||
if (IS_IVYBRIDGE(dev_priv)) {
|
||||
/* WaFbcAsynchFlipDisableFbcQueue:ivb */
|
||||
intel_de_write(dev_priv, ILK_DISPLAY_CHICKEN1,
|
||||
intel_de_read(dev_priv, ILK_DISPLAY_CHICKEN1) | ILK_FBCQ_DIS);
|
||||
} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
|
||||
/* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
|
||||
intel_de_write(dev_priv, CHICKEN_PIPESL_1(params->crtc.pipe),
|
||||
intel_de_read(dev_priv, CHICKEN_PIPESL_1(params->crtc.pipe)) | HSW_FBCQ_DIS);
|
||||
}
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 11)
|
||||
/* Wa_1409120013:icl,ehl,tgl */
|
||||
intel_de_write(dev_priv, ILK_DPFC_CHICKEN,
|
||||
ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL);
|
||||
|
||||
intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
|
||||
|
||||
intel_fbc_recompress(dev_priv);
|
||||
|
@ -695,9 +712,13 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
|
|||
cache->plane.pixel_blend_mode = plane_state->hw.pixel_blend_mode;
|
||||
|
||||
cache->fb.format = fb->format;
|
||||
cache->fb.stride = fb->pitches[0];
|
||||
cache->fb.modifier = fb->modifier;
|
||||
|
||||
/* FIXME is this correct? */
|
||||
cache->fb.stride = plane_state->color_plane[0].stride;
|
||||
if (drm_rotation_90_or_270(plane_state->hw.rotation))
|
||||
cache->fb.stride *= fb->format->cpp[0];
|
||||
|
||||
/* FBC1 compression interval: arbitrary choice of 1 second */
|
||||
cache->interval = drm_mode_vrefresh(&crtc_state->hw.adjusted_mode);
|
||||
|
||||
|
@ -816,6 +837,11 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
|
|||
return false;
|
||||
}
|
||||
|
||||
if (!pixel_format_is_valid(dev_priv, cache->fb.format->format)) {
|
||||
fbc->no_fbc_reason = "pixel format is invalid";
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!rotation_is_valid(dev_priv, cache->fb.format->format,
|
||||
cache->plane.rotation)) {
|
||||
fbc->no_fbc_reason = "rotation unsupported";
|
||||
|
@ -832,11 +858,6 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
|
|||
return false;
|
||||
}
|
||||
|
||||
if (!pixel_format_is_valid(dev_priv, cache->fb.format->format)) {
|
||||
fbc->no_fbc_reason = "pixel format is invalid";
|
||||
return false;
|
||||
}
|
||||
|
||||
if (cache->plane.pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE &&
|
||||
cache->fb.format->has_alpha) {
|
||||
fbc->no_fbc_reason = "per-pixel alpha blending is incompatible with FBC";
|
||||
|
|
|
@ -40,15 +40,15 @@ bool intel_hdcp_is_ksv_valid(u8 *ksv)
|
|||
}
|
||||
|
||||
static
|
||||
int intel_hdcp_read_valid_bksv(struct intel_digital_port *intel_dig_port,
|
||||
int intel_hdcp_read_valid_bksv(struct intel_digital_port *dig_port,
|
||||
const struct intel_hdcp_shim *shim, u8 *bksv)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
|
||||
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
|
||||
int ret, i, tries = 2;
|
||||
|
||||
/* HDCP spec states that we must retry the bksv if it is invalid */
|
||||
for (i = 0; i < tries; i++) {
|
||||
ret = shim->read_bksv(intel_dig_port, bksv);
|
||||
ret = shim->read_bksv(dig_port, bksv);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (intel_hdcp_is_ksv_valid(bksv))
|
||||
|
@ -65,7 +65,7 @@ int intel_hdcp_read_valid_bksv(struct intel_digital_port *intel_dig_port,
|
|||
/* Is HDCP1.4 capable on Platform and Sink */
|
||||
bool intel_hdcp_capable(struct intel_connector *connector)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
|
||||
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
|
||||
const struct intel_hdcp_shim *shim = connector->hdcp.shim;
|
||||
bool capable = false;
|
||||
u8 bksv[5];
|
||||
|
@ -74,9 +74,9 @@ bool intel_hdcp_capable(struct intel_connector *connector)
|
|||
return capable;
|
||||
|
||||
if (shim->hdcp_capable) {
|
||||
shim->hdcp_capable(intel_dig_port, &capable);
|
||||
shim->hdcp_capable(dig_port, &capable);
|
||||
} else {
|
||||
if (!intel_hdcp_read_valid_bksv(intel_dig_port, shim, bksv))
|
||||
if (!intel_hdcp_read_valid_bksv(dig_port, shim, bksv))
|
||||
capable = true;
|
||||
}
|
||||
|
||||
|
@ -86,7 +86,7 @@ bool intel_hdcp_capable(struct intel_connector *connector)
|
|||
/* Is HDCP2.2 capable on Platform and Sink */
|
||||
bool intel_hdcp2_capable(struct intel_connector *connector)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
|
||||
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
|
||||
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
|
||||
struct intel_hdcp *hdcp = &connector->hdcp;
|
||||
bool capable = false;
|
||||
|
@ -104,7 +104,7 @@ bool intel_hdcp2_capable(struct intel_connector *connector)
|
|||
mutex_unlock(&dev_priv->hdcp_comp_mutex);
|
||||
|
||||
/* Sink's capability for HDCP2.2 */
|
||||
hdcp->shim->hdcp_2_2_capable(intel_dig_port, &capable);
|
||||
hdcp->shim->hdcp_2_2_capable(dig_port, &capable);
|
||||
|
||||
return capable;
|
||||
}
|
||||
|
@ -125,14 +125,14 @@ static bool intel_hdcp2_in_use(struct drm_i915_private *dev_priv,
|
|||
LINK_ENCRYPTION_STATUS;
|
||||
}
|
||||
|
||||
static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *intel_dig_port,
|
||||
static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *dig_port,
|
||||
const struct intel_hdcp_shim *shim)
|
||||
{
|
||||
int ret, read_ret;
|
||||
bool ksv_ready;
|
||||
|
||||
/* Poll for ksv list ready (spec says max time allowed is 5s) */
|
||||
ret = __wait_for(read_ret = shim->read_ksv_ready(intel_dig_port,
|
||||
ret = __wait_for(read_ret = shim->read_ksv_ready(dig_port,
|
||||
&ksv_ready),
|
||||
read_ret || ksv_ready, 5 * 1000 * 1000, 1000,
|
||||
100 * 1000);
|
||||
|
@ -300,16 +300,16 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
|
|||
const struct intel_hdcp_shim *shim,
|
||||
u8 *ksv_fifo, u8 num_downstream, u8 *bstatus)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
|
||||
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
|
||||
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
|
||||
enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
|
||||
enum port port = intel_dig_port->base.port;
|
||||
enum port port = dig_port->base.port;
|
||||
u32 vprime, sha_text, sha_leftovers, rep_ctl;
|
||||
int ret, i, j, sha_idx;
|
||||
|
||||
/* Process V' values from the receiver */
|
||||
for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) {
|
||||
ret = shim->read_v_prime_part(intel_dig_port, i, &vprime);
|
||||
ret = shim->read_v_prime_part(dig_port, i, &vprime);
|
||||
if (ret)
|
||||
return ret;
|
||||
intel_de_write(dev_priv, HDCP_SHA_V_PRIME(i), vprime);
|
||||
|
@ -528,20 +528,20 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
|
|||
static
|
||||
int intel_hdcp_auth_downstream(struct intel_connector *connector)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
|
||||
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
|
||||
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
|
||||
const struct intel_hdcp_shim *shim = connector->hdcp.shim;
|
||||
u8 bstatus[2], num_downstream, *ksv_fifo;
|
||||
int ret, i, tries = 3;
|
||||
|
||||
ret = intel_hdcp_poll_ksv_fifo(intel_dig_port, shim);
|
||||
ret = intel_hdcp_poll_ksv_fifo(dig_port, shim);
|
||||
if (ret) {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"KSV list failed to become ready (%d)\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = shim->read_bstatus(intel_dig_port, bstatus);
|
||||
ret = shim->read_bstatus(dig_port, bstatus);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -571,12 +571,12 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector)
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ret = shim->read_ksv_fifo(intel_dig_port, num_downstream, ksv_fifo);
|
||||
ret = shim->read_ksv_fifo(dig_port, num_downstream, ksv_fifo);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, ksv_fifo,
|
||||
num_downstream)) {
|
||||
num_downstream) > 0) {
|
||||
drm_err(&dev_priv->drm, "Revoked Ksv(s) in ksv_fifo\n");
|
||||
ret = -EPERM;
|
||||
goto err;
|
||||
|
@ -611,12 +611,12 @@ err:
|
|||
/* Implements Part 1 of the HDCP authorization procedure */
|
||||
static int intel_hdcp_auth(struct intel_connector *connector)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
|
||||
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
|
||||
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
|
||||
struct intel_hdcp *hdcp = &connector->hdcp;
|
||||
const struct intel_hdcp_shim *shim = hdcp->shim;
|
||||
enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
|
||||
enum port port = intel_dig_port->base.port;
|
||||
enum port port = dig_port->base.port;
|
||||
unsigned long r0_prime_gen_start;
|
||||
int ret, i, tries = 2;
|
||||
union {
|
||||
|
@ -640,7 +640,7 @@ static int intel_hdcp_auth(struct intel_connector *connector)
|
|||
* displays, this is not necessary.
|
||||
*/
|
||||
if (shim->hdcp_capable) {
|
||||
ret = shim->hdcp_capable(intel_dig_port, &hdcp_capable);
|
||||
ret = shim->hdcp_capable(dig_port, &hdcp_capable);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (!hdcp_capable) {
|
||||
|
@ -670,7 +670,7 @@ static int intel_hdcp_auth(struct intel_connector *connector)
|
|||
HDCP_ANLO(dev_priv, cpu_transcoder, port));
|
||||
an.reg[1] = intel_de_read(dev_priv,
|
||||
HDCP_ANHI(dev_priv, cpu_transcoder, port));
|
||||
ret = shim->write_an_aksv(intel_dig_port, an.shim);
|
||||
ret = shim->write_an_aksv(dig_port, an.shim);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -678,11 +678,11 @@ static int intel_hdcp_auth(struct intel_connector *connector)
|
|||
|
||||
memset(&bksv, 0, sizeof(bksv));
|
||||
|
||||
ret = intel_hdcp_read_valid_bksv(intel_dig_port, shim, bksv.shim);
|
||||
ret = intel_hdcp_read_valid_bksv(dig_port, shim, bksv.shim);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, bksv.shim, 1)) {
|
||||
if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, bksv.shim, 1) > 0) {
|
||||
drm_err(&dev_priv->drm, "BKSV is revoked\n");
|
||||
return -EPERM;
|
||||
}
|
||||
|
@ -692,14 +692,14 @@ static int intel_hdcp_auth(struct intel_connector *connector)
|
|||
intel_de_write(dev_priv, HDCP_BKSVHI(dev_priv, cpu_transcoder, port),
|
||||
bksv.reg[1]);
|
||||
|
||||
ret = shim->repeater_present(intel_dig_port, &repeater_present);
|
||||
ret = shim->repeater_present(dig_port, &repeater_present);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (repeater_present)
|
||||
intel_de_write(dev_priv, HDCP_REP_CTL,
|
||||
intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port));
|
||||
|
||||
ret = shim->toggle_signalling(intel_dig_port, true);
|
||||
ret = shim->toggle_signalling(dig_port, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -732,7 +732,7 @@ static int intel_hdcp_auth(struct intel_connector *connector)
|
|||
*/
|
||||
for (i = 0; i < tries; i++) {
|
||||
ri.reg = 0;
|
||||
ret = shim->read_ri_prime(intel_dig_port, ri.shim);
|
||||
ret = shim->read_ri_prime(dig_port, ri.shim);
|
||||
if (ret)
|
||||
return ret;
|
||||
intel_de_write(dev_priv,
|
||||
|
@ -776,10 +776,10 @@ static int intel_hdcp_auth(struct intel_connector *connector)
|
|||
|
||||
static int _intel_hdcp_disable(struct intel_connector *connector)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
|
||||
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
|
||||
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
|
||||
struct intel_hdcp *hdcp = &connector->hdcp;
|
||||
enum port port = intel_dig_port->base.port;
|
||||
enum port port = dig_port->base.port;
|
||||
enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
|
||||
int ret;
|
||||
|
||||
|
@ -796,7 +796,7 @@ static int _intel_hdcp_disable(struct intel_connector *connector)
|
|||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
ret = hdcp->shim->toggle_signalling(intel_dig_port, false);
|
||||
ret = hdcp->shim->toggle_signalling(dig_port, false);
|
||||
if (ret) {
|
||||
drm_err(&dev_priv->drm, "Failed to disable HDCP signalling\n");
|
||||
return ret;
|
||||
|
@ -859,10 +859,10 @@ static struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp)
|
|||
/* Implements Part 3 of the HDCP authorization procedure */
|
||||
static int intel_hdcp_check_link(struct intel_connector *connector)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
|
||||
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
|
||||
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
|
||||
struct intel_hdcp *hdcp = &connector->hdcp;
|
||||
enum port port = intel_dig_port->base.port;
|
||||
enum port port = dig_port->base.port;
|
||||
enum transcoder cpu_transcoder;
|
||||
int ret = 0;
|
||||
|
||||
|
@ -888,7 +888,7 @@ static int intel_hdcp_check_link(struct intel_connector *connector)
|
|||
goto out;
|
||||
}
|
||||
|
||||
if (hdcp->shim->check_link(intel_dig_port)) {
|
||||
if (hdcp->shim->check_link(dig_port)) {
|
||||
if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
|
||||
hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
|
||||
schedule_work(&hdcp->prop_work);
|
||||
|
@ -1242,7 +1242,7 @@ static int hdcp2_deauthenticate_port(struct intel_connector *connector)
|
|||
/* Authentication flow starts from here */
|
||||
static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
|
||||
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
|
||||
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
|
||||
struct intel_hdcp *hdcp = &connector->hdcp;
|
||||
union {
|
||||
|
@ -1264,12 +1264,12 @@ static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
|
|||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = shim->write_2_2_msg(intel_dig_port, &msgs.ake_init,
|
||||
ret = shim->write_2_2_msg(dig_port, &msgs.ake_init,
|
||||
sizeof(msgs.ake_init));
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_AKE_SEND_CERT,
|
||||
ret = shim->read_2_2_msg(dig_port, HDCP_2_2_AKE_SEND_CERT,
|
||||
&msgs.send_cert, sizeof(msgs.send_cert));
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
@ -1283,7 +1283,7 @@ static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
|
|||
|
||||
if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm,
|
||||
msgs.send_cert.cert_rx.receiver_id,
|
||||
1)) {
|
||||
1) > 0) {
|
||||
drm_err(&dev_priv->drm, "Receiver ID is revoked\n");
|
||||
return -EPERM;
|
||||
}
|
||||
|
@ -1298,11 +1298,11 @@ static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
|
|||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = shim->write_2_2_msg(intel_dig_port, &msgs.no_stored_km, size);
|
||||
ret = shim->write_2_2_msg(dig_port, &msgs.no_stored_km, size);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_AKE_SEND_HPRIME,
|
||||
ret = shim->read_2_2_msg(dig_port, HDCP_2_2_AKE_SEND_HPRIME,
|
||||
&msgs.send_hprime, sizeof(msgs.send_hprime));
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
@ -1313,7 +1313,7 @@ static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
|
|||
|
||||
if (!hdcp->is_paired) {
|
||||
/* Pairing is required */
|
||||
ret = shim->read_2_2_msg(intel_dig_port,
|
||||
ret = shim->read_2_2_msg(dig_port,
|
||||
HDCP_2_2_AKE_SEND_PAIRING_INFO,
|
||||
&msgs.pairing_info,
|
||||
sizeof(msgs.pairing_info));
|
||||
|
@ -1331,7 +1331,7 @@ static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
|
|||
|
||||
static int hdcp2_locality_check(struct intel_connector *connector)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
|
||||
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
|
||||
struct intel_hdcp *hdcp = &connector->hdcp;
|
||||
union {
|
||||
struct hdcp2_lc_init lc_init;
|
||||
|
@ -1345,12 +1345,12 @@ static int hdcp2_locality_check(struct intel_connector *connector)
|
|||
if (ret < 0)
|
||||
continue;
|
||||
|
||||
ret = shim->write_2_2_msg(intel_dig_port, &msgs.lc_init,
|
||||
ret = shim->write_2_2_msg(dig_port, &msgs.lc_init,
|
||||
sizeof(msgs.lc_init));
|
||||
if (ret < 0)
|
||||
continue;
|
||||
|
||||
ret = shim->read_2_2_msg(intel_dig_port,
|
||||
ret = shim->read_2_2_msg(dig_port,
|
||||
HDCP_2_2_LC_SEND_LPRIME,
|
||||
&msgs.send_lprime,
|
||||
sizeof(msgs.send_lprime));
|
||||
|
@ -1367,7 +1367,7 @@ static int hdcp2_locality_check(struct intel_connector *connector)
|
|||
|
||||
static int hdcp2_session_key_exchange(struct intel_connector *connector)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
|
||||
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
|
||||
struct intel_hdcp *hdcp = &connector->hdcp;
|
||||
struct hdcp2_ske_send_eks send_eks;
|
||||
int ret;
|
||||
|
@ -1376,7 +1376,7 @@ static int hdcp2_session_key_exchange(struct intel_connector *connector)
|
|||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = hdcp->shim->write_2_2_msg(intel_dig_port, &send_eks,
|
||||
ret = hdcp->shim->write_2_2_msg(dig_port, &send_eks,
|
||||
sizeof(send_eks));
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
@ -1387,7 +1387,7 @@ static int hdcp2_session_key_exchange(struct intel_connector *connector)
|
|||
static
|
||||
int hdcp2_propagate_stream_management_info(struct intel_connector *connector)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
|
||||
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
|
||||
struct drm_i915_private *i915 = to_i915(connector->base.dev);
|
||||
struct intel_hdcp *hdcp = &connector->hdcp;
|
||||
union {
|
||||
|
@ -1409,12 +1409,12 @@ int hdcp2_propagate_stream_management_info(struct intel_connector *connector)
|
|||
msgs.stream_manage.streams[0].stream_type = hdcp->content_type;
|
||||
|
||||
/* Send it to Repeater */
|
||||
ret = shim->write_2_2_msg(intel_dig_port, &msgs.stream_manage,
|
||||
ret = shim->write_2_2_msg(dig_port, &msgs.stream_manage,
|
||||
sizeof(msgs.stream_manage));
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_REP_STREAM_READY,
|
||||
ret = shim->read_2_2_msg(dig_port, HDCP_2_2_REP_STREAM_READY,
|
||||
&msgs.stream_ready, sizeof(msgs.stream_ready));
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
@ -1439,7 +1439,7 @@ int hdcp2_propagate_stream_management_info(struct intel_connector *connector)
|
|||
static
|
||||
int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
|
||||
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
|
||||
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
|
||||
struct intel_hdcp *hdcp = &connector->hdcp;
|
||||
union {
|
||||
|
@ -1451,7 +1451,7 @@ int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
|
|||
u8 *rx_info;
|
||||
int ret;
|
||||
|
||||
ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_REP_SEND_RECVID_LIST,
|
||||
ret = shim->read_2_2_msg(dig_port, HDCP_2_2_REP_SEND_RECVID_LIST,
|
||||
&msgs.recvid_list, sizeof(msgs.recvid_list));
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
@ -1484,7 +1484,7 @@ int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
|
|||
HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
|
||||
if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm,
|
||||
msgs.recvid_list.receiver_ids,
|
||||
device_cnt)) {
|
||||
device_cnt) > 0) {
|
||||
drm_err(&dev_priv->drm, "Revoked receiver ID(s) is in list\n");
|
||||
return -EPERM;
|
||||
}
|
||||
|
@ -1496,7 +1496,7 @@ int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
|
|||
return ret;
|
||||
|
||||
hdcp->seq_num_v = seq_num_v;
|
||||
ret = shim->write_2_2_msg(intel_dig_port, &msgs.rep_ack,
|
||||
ret = shim->write_2_2_msg(dig_port, &msgs.rep_ack,
|
||||
sizeof(msgs.rep_ack));
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
@ -1517,7 +1517,7 @@ static int hdcp2_authenticate_repeater(struct intel_connector *connector)
|
|||
|
||||
static int hdcp2_authenticate_sink(struct intel_connector *connector)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
|
||||
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
|
||||
struct drm_i915_private *i915 = to_i915(connector->base.dev);
|
||||
struct intel_hdcp *hdcp = &connector->hdcp;
|
||||
const struct intel_hdcp_shim *shim = hdcp->shim;
|
||||
|
@ -1543,7 +1543,7 @@ static int hdcp2_authenticate_sink(struct intel_connector *connector)
|
|||
}
|
||||
|
||||
if (shim->config_stream_type) {
|
||||
ret = shim->config_stream_type(intel_dig_port,
|
||||
ret = shim->config_stream_type(dig_port,
|
||||
hdcp->is_repeater,
|
||||
hdcp->content_type);
|
||||
if (ret < 0)
|
||||
|
@ -1569,10 +1569,10 @@ static int hdcp2_authenticate_sink(struct intel_connector *connector)
|
|||
|
||||
static int hdcp2_enable_encryption(struct intel_connector *connector)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
|
||||
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
|
||||
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
|
||||
struct intel_hdcp *hdcp = &connector->hdcp;
|
||||
enum port port = intel_dig_port->base.port;
|
||||
enum port port = dig_port->base.port;
|
||||
enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
|
||||
int ret;
|
||||
|
||||
|
@ -1580,7 +1580,7 @@ static int hdcp2_enable_encryption(struct intel_connector *connector)
|
|||
intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
|
||||
LINK_ENCRYPTION_STATUS);
|
||||
if (hdcp->shim->toggle_signalling) {
|
||||
ret = hdcp->shim->toggle_signalling(intel_dig_port, true);
|
||||
ret = hdcp->shim->toggle_signalling(dig_port, true);
|
||||
if (ret) {
|
||||
drm_err(&dev_priv->drm,
|
||||
"Failed to enable HDCP signalling. %d\n",
|
||||
|
@ -1608,10 +1608,10 @@ static int hdcp2_enable_encryption(struct intel_connector *connector)
|
|||
|
||||
static int hdcp2_disable_encryption(struct intel_connector *connector)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
|
||||
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
|
||||
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
|
||||
struct intel_hdcp *hdcp = &connector->hdcp;
|
||||
enum port port = intel_dig_port->base.port;
|
||||
enum port port = dig_port->base.port;
|
||||
enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
|
||||
int ret;
|
||||
|
||||
|
@ -1630,7 +1630,7 @@ static int hdcp2_disable_encryption(struct intel_connector *connector)
|
|||
drm_dbg_kms(&dev_priv->drm, "Disable Encryption Timedout");
|
||||
|
||||
if (hdcp->shim->toggle_signalling) {
|
||||
ret = hdcp->shim->toggle_signalling(intel_dig_port, false);
|
||||
ret = hdcp->shim->toggle_signalling(dig_port, false);
|
||||
if (ret) {
|
||||
drm_err(&dev_priv->drm,
|
||||
"Failed to disable HDCP signalling. %d\n",
|
||||
|
@ -1723,10 +1723,10 @@ static int _intel_hdcp2_disable(struct intel_connector *connector)
|
|||
/* Implements the Link Integrity Check for HDCP2.2 */
|
||||
static int intel_hdcp2_check_link(struct intel_connector *connector)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
|
||||
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
|
||||
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
|
||||
struct intel_hdcp *hdcp = &connector->hdcp;
|
||||
enum port port = intel_dig_port->base.port;
|
||||
enum port port = dig_port->base.port;
|
||||
enum transcoder cpu_transcoder;
|
||||
int ret = 0;
|
||||
|
||||
|
@ -1751,7 +1751,7 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
|
|||
goto out;
|
||||
}
|
||||
|
||||
ret = hdcp->shim->check_2_2_link(intel_dig_port);
|
||||
ret = hdcp->shim->check_2_2_link(dig_port);
|
||||
if (ret == HDCP_LINK_PROTECTED) {
|
||||
if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
|
||||
hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
|
||||
|
@ -2086,6 +2086,7 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
|
|||
(conn_state->hdcp_content_type != hdcp->content_type &&
|
||||
conn_state->content_protection !=
|
||||
DRM_MODE_CONTENT_PROTECTION_UNDESIRED);
|
||||
bool desired_and_not_enabled = false;
|
||||
|
||||
/*
|
||||
* During the HDCP encryption session if Type change is requested,
|
||||
|
@ -2108,8 +2109,15 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
|
|||
}
|
||||
|
||||
if (conn_state->content_protection ==
|
||||
DRM_MODE_CONTENT_PROTECTION_DESIRED ||
|
||||
content_protection_type_changed)
|
||||
DRM_MODE_CONTENT_PROTECTION_DESIRED) {
|
||||
mutex_lock(&hdcp->mutex);
|
||||
/* Avoid enabling hdcp, if it already ENABLED */
|
||||
desired_and_not_enabled =
|
||||
hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED;
|
||||
mutex_unlock(&hdcp->mutex);
|
||||
}
|
||||
|
||||
if (desired_and_not_enabled || content_protection_type_changed)
|
||||
intel_hdcp_enable(connector,
|
||||
crtc_state->cpu_transcoder,
|
||||
(u8)conn_state->hdcp_content_type);
|
||||
|
@ -2158,6 +2166,19 @@ void intel_hdcp_atomic_check(struct drm_connector *connector,
|
|||
return;
|
||||
}
|
||||
|
||||
crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
|
||||
new_state->crtc);
|
||||
/*
|
||||
* Fix the HDCP uapi content protection state in case of modeset.
|
||||
* FIXME: As per HDCP content protection property uapi doc, an uevent()
|
||||
* need to be sent if there is transition from ENABLED->DESIRED.
|
||||
*/
|
||||
if (drm_atomic_crtc_needs_modeset(crtc_state) &&
|
||||
(old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
|
||||
new_cp != DRM_MODE_CONTENT_PROTECTION_UNDESIRED))
|
||||
new_state->content_protection =
|
||||
DRM_MODE_CONTENT_PROTECTION_DESIRED;
|
||||
|
||||
/*
|
||||
* Nothing to do if the state didn't change, or HDCP was activated since
|
||||
* the last commit. And also no change in hdcp content type.
|
||||
|
@ -2170,8 +2191,6 @@ void intel_hdcp_atomic_check(struct drm_connector *connector,
|
|||
return;
|
||||
}
|
||||
|
||||
crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
|
||||
new_state->crtc);
|
||||
crtc_state->mode_changed = true;
|
||||
}
|
||||
|
||||
|
|
|
@ -88,10 +88,10 @@ assert_hdmi_transcoder_func_disabled(struct drm_i915_private *dev_priv,
|
|||
|
||||
struct intel_hdmi *enc_to_intel_hdmi(struct intel_encoder *encoder)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port =
|
||||
struct intel_digital_port *dig_port =
|
||||
container_of(&encoder->base, struct intel_digital_port,
|
||||
base.base);
|
||||
return &intel_dig_port->hdmi;
|
||||
return &dig_port->hdmi;
|
||||
}
|
||||
|
||||
static struct intel_hdmi *intel_attached_hdmi(struct intel_connector *connector)
|
||||
|
@ -660,7 +660,7 @@ static void intel_write_infoframe(struct intel_encoder *encoder,
|
|||
enum hdmi_infoframe_type type,
|
||||
const union hdmi_infoframe *frame)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
|
||||
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
|
||||
u8 buffer[VIDEO_DIP_DATA_SIZE];
|
||||
ssize_t len;
|
||||
|
||||
|
@ -681,7 +681,7 @@ static void intel_write_infoframe(struct intel_encoder *encoder,
|
|||
buffer[3] = 0;
|
||||
len++;
|
||||
|
||||
intel_dig_port->write_infoframe(encoder, crtc_state, type, buffer, len);
|
||||
dig_port->write_infoframe(encoder, crtc_state, type, buffer, len);
|
||||
}
|
||||
|
||||
void intel_read_infoframe(struct intel_encoder *encoder,
|
||||
|
@ -689,7 +689,7 @@ void intel_read_infoframe(struct intel_encoder *encoder,
|
|||
enum hdmi_infoframe_type type,
|
||||
union hdmi_infoframe *frame)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
|
||||
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
|
||||
u8 buffer[VIDEO_DIP_DATA_SIZE];
|
||||
int ret;
|
||||
|
||||
|
@ -697,7 +697,7 @@ void intel_read_infoframe(struct intel_encoder *encoder,
|
|||
intel_hdmi_infoframe_enable(type)) == 0)
|
||||
return;
|
||||
|
||||
intel_dig_port->read_infoframe(encoder, crtc_state,
|
||||
dig_port->read_infoframe(encoder, crtc_state,
|
||||
type, buffer, sizeof(buffer));
|
||||
|
||||
/* Fill the 'hole' (see big comment above) at position 3 */
|
||||
|
@ -872,8 +872,8 @@ static void g4x_set_infoframes(struct intel_encoder *encoder,
|
|||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
|
||||
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
|
||||
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
|
||||
struct intel_hdmi *intel_hdmi = &dig_port->hdmi;
|
||||
i915_reg_t reg = VIDEO_DIP_CTL;
|
||||
u32 val = intel_de_read(dev_priv, reg);
|
||||
u32 port = VIDEO_DIP_PORT(encoder->port);
|
||||
|
@ -1057,8 +1057,8 @@ static void ibx_set_infoframes(struct intel_encoder *encoder,
|
|||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
|
||||
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
|
||||
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
|
||||
struct intel_hdmi *intel_hdmi = &dig_port->hdmi;
|
||||
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
|
||||
u32 val = intel_de_read(dev_priv, reg);
|
||||
u32 port = VIDEO_DIP_PORT(encoder->port);
|
||||
|
@ -1275,11 +1275,11 @@ void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable)
|
|||
adapter, enable);
|
||||
}
|
||||
|
||||
static int intel_hdmi_hdcp_read(struct intel_digital_port *intel_dig_port,
|
||||
static int intel_hdmi_hdcp_read(struct intel_digital_port *dig_port,
|
||||
unsigned int offset, void *buffer, size_t size)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
|
||||
struct intel_hdmi *hdmi = &intel_dig_port->hdmi;
|
||||
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
|
||||
struct intel_hdmi *hdmi = &dig_port->hdmi;
|
||||
struct i2c_adapter *adapter = intel_gmbus_get_adapter(i915,
|
||||
hdmi->ddc_bus);
|
||||
int ret;
|
||||
|
@ -1304,11 +1304,11 @@ static int intel_hdmi_hdcp_read(struct intel_digital_port *intel_dig_port,
|
|||
return ret >= 0 ? -EIO : ret;
|
||||
}
|
||||
|
||||
static int intel_hdmi_hdcp_write(struct intel_digital_port *intel_dig_port,
|
||||
static int intel_hdmi_hdcp_write(struct intel_digital_port *dig_port,
|
||||
unsigned int offset, void *buffer, size_t size)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
|
||||
struct intel_hdmi *hdmi = &intel_dig_port->hdmi;
|
||||
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
|
||||
struct intel_hdmi *hdmi = &dig_port->hdmi;
|
||||
struct i2c_adapter *adapter = intel_gmbus_get_adapter(i915,
|
||||
hdmi->ddc_bus);
|
||||
int ret;
|
||||
|
@ -1338,16 +1338,16 @@ static int intel_hdmi_hdcp_write(struct intel_digital_port *intel_dig_port,
|
|||
}
|
||||
|
||||
static
|
||||
int intel_hdmi_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
|
||||
int intel_hdmi_hdcp_write_an_aksv(struct intel_digital_port *dig_port,
|
||||
u8 *an)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
|
||||
struct intel_hdmi *hdmi = &intel_dig_port->hdmi;
|
||||
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
|
||||
struct intel_hdmi *hdmi = &dig_port->hdmi;
|
||||
struct i2c_adapter *adapter = intel_gmbus_get_adapter(i915,
|
||||
hdmi->ddc_bus);
|
||||
int ret;
|
||||
|
||||
ret = intel_hdmi_hdcp_write(intel_dig_port, DRM_HDCP_DDC_AN, an,
|
||||
ret = intel_hdmi_hdcp_write(dig_port, DRM_HDCP_DDC_AN, an,
|
||||
DRM_HDCP_AN_LEN);
|
||||
if (ret) {
|
||||
drm_dbg_kms(&i915->drm, "Write An over DDC failed (%d)\n",
|
||||
|
@ -1363,13 +1363,13 @@ int intel_hdmi_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int intel_hdmi_hdcp_read_bksv(struct intel_digital_port *intel_dig_port,
|
||||
static int intel_hdmi_hdcp_read_bksv(struct intel_digital_port *dig_port,
|
||||
u8 *bksv)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
|
||||
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
|
||||
|
||||
int ret;
|
||||
ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BKSV, bksv,
|
||||
ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_BKSV, bksv,
|
||||
DRM_HDCP_KSV_LEN);
|
||||
if (ret)
|
||||
drm_dbg_kms(&i915->drm, "Read Bksv over DDC failed (%d)\n",
|
||||
|
@ -1378,13 +1378,13 @@ static int intel_hdmi_hdcp_read_bksv(struct intel_digital_port *intel_dig_port,
|
|||
}
|
||||
|
||||
static
|
||||
int intel_hdmi_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port,
|
||||
int intel_hdmi_hdcp_read_bstatus(struct intel_digital_port *dig_port,
|
||||
u8 *bstatus)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
|
||||
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
|
||||
|
||||
int ret;
|
||||
ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BSTATUS,
|
||||
ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_BSTATUS,
|
||||
bstatus, DRM_HDCP_BSTATUS_LEN);
|
||||
if (ret)
|
||||
drm_dbg_kms(&i915->drm, "Read bstatus over DDC failed (%d)\n",
|
||||
|
@ -1393,14 +1393,14 @@ int intel_hdmi_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port,
|
|||
}
|
||||
|
||||
static
|
||||
int intel_hdmi_hdcp_repeater_present(struct intel_digital_port *intel_dig_port,
|
||||
int intel_hdmi_hdcp_repeater_present(struct intel_digital_port *dig_port,
|
||||
bool *repeater_present)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
|
||||
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
|
||||
int ret;
|
||||
u8 val;
|
||||
|
||||
ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BCAPS, &val, 1);
|
||||
ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_BCAPS, &val, 1);
|
||||
if (ret) {
|
||||
drm_dbg_kms(&i915->drm, "Read bcaps over DDC failed (%d)\n",
|
||||
ret);
|
||||
|
@ -1411,13 +1411,13 @@ int intel_hdmi_hdcp_repeater_present(struct intel_digital_port *intel_dig_port,
|
|||
}
|
||||
|
||||
static
|
||||
int intel_hdmi_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port,
|
||||
int intel_hdmi_hdcp_read_ri_prime(struct intel_digital_port *dig_port,
|
||||
u8 *ri_prime)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
|
||||
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
|
||||
|
||||
int ret;
|
||||
ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_RI_PRIME,
|
||||
ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_RI_PRIME,
|
||||
ri_prime, DRM_HDCP_RI_LEN);
|
||||
if (ret)
|
||||
drm_dbg_kms(&i915->drm, "Read Ri' over DDC failed (%d)\n",
|
||||
|
@ -1426,14 +1426,14 @@ int intel_hdmi_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port,
|
|||
}
|
||||
|
||||
static
|
||||
int intel_hdmi_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port,
|
||||
int intel_hdmi_hdcp_read_ksv_ready(struct intel_digital_port *dig_port,
|
||||
bool *ksv_ready)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
|
||||
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
|
||||
int ret;
|
||||
u8 val;
|
||||
|
||||
ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BCAPS, &val, 1);
|
||||
ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_BCAPS, &val, 1);
|
||||
if (ret) {
|
||||
drm_dbg_kms(&i915->drm, "Read bcaps over DDC failed (%d)\n",
|
||||
ret);
|
||||
|
@ -1444,12 +1444,12 @@ int intel_hdmi_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port,
|
|||
}
|
||||
|
||||
static
|
||||
int intel_hdmi_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port,
|
||||
int intel_hdmi_hdcp_read_ksv_fifo(struct intel_digital_port *dig_port,
|
||||
int num_downstream, u8 *ksv_fifo)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
|
||||
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
|
||||
int ret;
|
||||
ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_KSV_FIFO,
|
||||
ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_KSV_FIFO,
|
||||
ksv_fifo, num_downstream * DRM_HDCP_KSV_LEN);
|
||||
if (ret) {
|
||||
drm_dbg_kms(&i915->drm,
|
||||
|
@ -1460,16 +1460,16 @@ int intel_hdmi_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port,
|
|||
}
|
||||
|
||||
static
|
||||
int intel_hdmi_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
|
||||
int intel_hdmi_hdcp_read_v_prime_part(struct intel_digital_port *dig_port,
|
||||
int i, u32 *part)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
|
||||
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
|
||||
int ret;
|
||||
|
||||
if (i >= DRM_HDCP_V_PRIME_NUM_PARTS)
|
||||
return -EINVAL;
|
||||
|
||||
ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_V_PRIME(i),
|
||||
ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_V_PRIME(i),
|
||||
part, DRM_HDCP_V_PRIME_PART_LEN);
|
||||
if (ret)
|
||||
drm_dbg_kms(&i915->drm, "Read V'[%d] over DDC failed (%d)\n",
|
||||
|
@ -1480,7 +1480,7 @@ int intel_hdmi_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
|
|||
static int kbl_repositioning_enc_en_signal(struct intel_connector *connector)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
|
||||
struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
|
||||
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
|
||||
struct drm_crtc *crtc = connector->base.state->crtc;
|
||||
struct intel_crtc *intel_crtc = container_of(crtc,
|
||||
struct intel_crtc, base);
|
||||
|
@ -1494,13 +1494,13 @@ static int kbl_repositioning_enc_en_signal(struct intel_connector *connector)
|
|||
usleep_range(25, 50);
|
||||
}
|
||||
|
||||
ret = intel_ddi_toggle_hdcp_signalling(&intel_dig_port->base, false);
|
||||
ret = intel_ddi_toggle_hdcp_signalling(&dig_port->base, false);
|
||||
if (ret) {
|
||||
drm_err(&dev_priv->drm,
|
||||
"Disable HDCP signalling failed (%d)\n", ret);
|
||||
return ret;
|
||||
}
|
||||
ret = intel_ddi_toggle_hdcp_signalling(&intel_dig_port->base, true);
|
||||
ret = intel_ddi_toggle_hdcp_signalling(&dig_port->base, true);
|
||||
if (ret) {
|
||||
drm_err(&dev_priv->drm,
|
||||
"Enable HDCP signalling failed (%d)\n", ret);
|
||||
|
@ -1511,10 +1511,10 @@ static int kbl_repositioning_enc_en_signal(struct intel_connector *connector)
|
|||
}
|
||||
|
||||
static
|
||||
int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port,
|
||||
int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *dig_port,
|
||||
bool enable)
|
||||
{
|
||||
struct intel_hdmi *hdmi = &intel_dig_port->hdmi;
|
||||
struct intel_hdmi *hdmi = &dig_port->hdmi;
|
||||
struct intel_connector *connector = hdmi->attached_connector;
|
||||
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
|
||||
int ret;
|
||||
|
@ -1522,7 +1522,7 @@ int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port,
|
|||
if (!enable)
|
||||
usleep_range(6, 60); /* Bspec says >= 6us */
|
||||
|
||||
ret = intel_ddi_toggle_hdcp_signalling(&intel_dig_port->base, enable);
|
||||
ret = intel_ddi_toggle_hdcp_signalling(&dig_port->base, enable);
|
||||
if (ret) {
|
||||
drm_err(&dev_priv->drm, "%s HDCP signalling failed (%d)\n",
|
||||
enable ? "Enable" : "Disable", ret);
|
||||
|
@ -1540,12 +1540,12 @@ int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port,
|
|||
}
|
||||
|
||||
static
|
||||
bool intel_hdmi_hdcp_check_link_once(struct intel_digital_port *intel_dig_port)
|
||||
bool intel_hdmi_hdcp_check_link_once(struct intel_digital_port *dig_port)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
|
||||
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
|
||||
struct intel_connector *connector =
|
||||
intel_dig_port->hdmi.attached_connector;
|
||||
enum port port = intel_dig_port->base.port;
|
||||
dig_port->hdmi.attached_connector;
|
||||
enum port port = dig_port->base.port;
|
||||
enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
|
||||
int ret;
|
||||
union {
|
||||
|
@ -1553,7 +1553,7 @@ bool intel_hdmi_hdcp_check_link_once(struct intel_digital_port *intel_dig_port)
|
|||
u8 shim[DRM_HDCP_RI_LEN];
|
||||
} ri;
|
||||
|
||||
ret = intel_hdmi_hdcp_read_ri_prime(intel_dig_port, ri.shim);
|
||||
ret = intel_hdmi_hdcp_read_ri_prime(dig_port, ri.shim);
|
||||
if (ret)
|
||||
return false;
|
||||
|
||||
|
@ -1572,13 +1572,13 @@ bool intel_hdmi_hdcp_check_link_once(struct intel_digital_port *intel_dig_port)
|
|||
}
|
||||
|
||||
static
|
||||
bool intel_hdmi_hdcp_check_link(struct intel_digital_port *intel_dig_port)
|
||||
bool intel_hdmi_hdcp_check_link(struct intel_digital_port *dig_port)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
|
||||
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
|
||||
int retry;
|
||||
|
||||
for (retry = 0; retry < 3; retry++)
|
||||
if (intel_hdmi_hdcp_check_link_once(intel_dig_port))
|
||||
if (intel_hdmi_hdcp_check_link_once(dig_port))
|
||||
return true;
|
||||
|
||||
drm_err(&i915->drm, "Link check failed\n");
|
||||
|
@ -1599,10 +1599,10 @@ static const struct hdcp2_hdmi_msg_timeout hdcp2_msg_timeout[] = {
|
|||
};
|
||||
|
||||
static
|
||||
int intel_hdmi_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port,
|
||||
int intel_hdmi_hdcp2_read_rx_status(struct intel_digital_port *dig_port,
|
||||
u8 *rx_status)
|
||||
{
|
||||
return intel_hdmi_hdcp_read(intel_dig_port,
|
||||
return intel_hdmi_hdcp_read(dig_port,
|
||||
HDCP_2_2_HDMI_REG_RXSTATUS_OFFSET,
|
||||
rx_status,
|
||||
HDCP_2_2_HDMI_RXSTATUS_LEN);
|
||||
|
@ -1628,15 +1628,15 @@ static int get_hdcp2_msg_timeout(u8 msg_id, bool is_paired)
|
|||
}
|
||||
|
||||
static int
|
||||
hdcp2_detect_msg_availability(struct intel_digital_port *intel_dig_port,
|
||||
hdcp2_detect_msg_availability(struct intel_digital_port *dig_port,
|
||||
u8 msg_id, bool *msg_ready,
|
||||
ssize_t *msg_sz)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
|
||||
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
|
||||
u8 rx_status[HDCP_2_2_HDMI_RXSTATUS_LEN];
|
||||
int ret;
|
||||
|
||||
ret = intel_hdmi_hdcp2_read_rx_status(intel_dig_port, rx_status);
|
||||
ret = intel_hdmi_hdcp2_read_rx_status(dig_port, rx_status);
|
||||
if (ret < 0) {
|
||||
drm_dbg_kms(&i915->drm, "rx_status read failed. Err %d\n",
|
||||
ret);
|
||||
|
@ -1656,10 +1656,10 @@ hdcp2_detect_msg_availability(struct intel_digital_port *intel_dig_port,
|
|||
}
|
||||
|
||||
static ssize_t
|
||||
intel_hdmi_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
|
||||
intel_hdmi_hdcp2_wait_for_msg(struct intel_digital_port *dig_port,
|
||||
u8 msg_id, bool paired)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
|
||||
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
|
||||
bool msg_ready = false;
|
||||
int timeout, ret;
|
||||
ssize_t msg_sz = 0;
|
||||
|
@ -1668,7 +1668,7 @@ intel_hdmi_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
|
|||
if (timeout < 0)
|
||||
return timeout;
|
||||
|
||||
ret = __wait_for(ret = hdcp2_detect_msg_availability(intel_dig_port,
|
||||
ret = __wait_for(ret = hdcp2_detect_msg_availability(dig_port,
|
||||
msg_id, &msg_ready,
|
||||
&msg_sz),
|
||||
!ret && msg_ready && msg_sz, timeout * 1000,
|
||||
|
@ -1681,26 +1681,26 @@ intel_hdmi_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
|
|||
}
|
||||
|
||||
static
|
||||
int intel_hdmi_hdcp2_write_msg(struct intel_digital_port *intel_dig_port,
|
||||
int intel_hdmi_hdcp2_write_msg(struct intel_digital_port *dig_port,
|
||||
void *buf, size_t size)
|
||||
{
|
||||
unsigned int offset;
|
||||
|
||||
offset = HDCP_2_2_HDMI_REG_WR_MSG_OFFSET;
|
||||
return intel_hdmi_hdcp_write(intel_dig_port, offset, buf, size);
|
||||
return intel_hdmi_hdcp_write(dig_port, offset, buf, size);
|
||||
}
|
||||
|
||||
static
|
||||
int intel_hdmi_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
|
||||
int intel_hdmi_hdcp2_read_msg(struct intel_digital_port *dig_port,
|
||||
u8 msg_id, void *buf, size_t size)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
|
||||
struct intel_hdmi *hdmi = &intel_dig_port->hdmi;
|
||||
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
|
||||
struct intel_hdmi *hdmi = &dig_port->hdmi;
|
||||
struct intel_hdcp *hdcp = &hdmi->attached_connector->hdcp;
|
||||
unsigned int offset;
|
||||
ssize_t ret;
|
||||
|
||||
ret = intel_hdmi_hdcp2_wait_for_msg(intel_dig_port, msg_id,
|
||||
ret = intel_hdmi_hdcp2_wait_for_msg(dig_port, msg_id,
|
||||
hdcp->is_paired);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
@ -1717,7 +1717,7 @@ int intel_hdmi_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
|
|||
}
|
||||
|
||||
offset = HDCP_2_2_HDMI_REG_RD_MSG_OFFSET;
|
||||
ret = intel_hdmi_hdcp_read(intel_dig_port, offset, buf, ret);
|
||||
ret = intel_hdmi_hdcp_read(dig_port, offset, buf, ret);
|
||||
if (ret)
|
||||
drm_dbg_kms(&i915->drm, "Failed to read msg_id: %d(%zd)\n",
|
||||
msg_id, ret);
|
||||
|
@ -1726,12 +1726,12 @@ int intel_hdmi_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
|
|||
}
|
||||
|
||||
static
|
||||
int intel_hdmi_hdcp2_check_link(struct intel_digital_port *intel_dig_port)
|
||||
int intel_hdmi_hdcp2_check_link(struct intel_digital_port *dig_port)
|
||||
{
|
||||
u8 rx_status[HDCP_2_2_HDMI_RXSTATUS_LEN];
|
||||
int ret;
|
||||
|
||||
ret = intel_hdmi_hdcp2_read_rx_status(intel_dig_port, rx_status);
|
||||
ret = intel_hdmi_hdcp2_read_rx_status(dig_port, rx_status);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -1748,14 +1748,14 @@ int intel_hdmi_hdcp2_check_link(struct intel_digital_port *intel_dig_port)
|
|||
}
|
||||
|
||||
static
|
||||
int intel_hdmi_hdcp2_capable(struct intel_digital_port *intel_dig_port,
|
||||
int intel_hdmi_hdcp2_capable(struct intel_digital_port *dig_port,
|
||||
bool *capable)
|
||||
{
|
||||
u8 hdcp2_version;
|
||||
int ret;
|
||||
|
||||
*capable = false;
|
||||
ret = intel_hdmi_hdcp_read(intel_dig_port, HDCP_2_2_HDMI_REG_VER_OFFSET,
|
||||
ret = intel_hdmi_hdcp_read(dig_port, HDCP_2_2_HDMI_REG_VER_OFFSET,
|
||||
&hdcp2_version, sizeof(hdcp2_version));
|
||||
if (!ret && hdcp2_version & HDCP_2_2_HDMI_SUPPORT_MASK)
|
||||
*capable = true;
|
||||
|
@ -2063,7 +2063,7 @@ static void intel_disable_hdmi(struct intel_atomic_state *state,
|
|||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
|
||||
struct intel_digital_port *intel_dig_port =
|
||||
struct intel_digital_port *dig_port =
|
||||
hdmi_to_dig_port(intel_hdmi);
|
||||
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
|
||||
u32 temp;
|
||||
|
@ -2107,7 +2107,7 @@ static void intel_disable_hdmi(struct intel_atomic_state *state,
|
|||
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
|
||||
}
|
||||
|
||||
intel_dig_port->set_infoframes(encoder,
|
||||
dig_port->set_infoframes(encoder,
|
||||
false,
|
||||
old_crtc_state, old_conn_state);
|
||||
|
||||
|
@ -2242,8 +2242,11 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
|
|||
if (clock > max_dotclk)
|
||||
return MODE_CLOCK_HIGH;
|
||||
|
||||
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
|
||||
if (mode->flags & DRM_MODE_FLAG_DBLCLK) {
|
||||
if (!has_hdmi_sink)
|
||||
return MODE_CLOCK_LOW;
|
||||
clock *= 2;
|
||||
}
|
||||
|
||||
if (drm_mode_is_420_only(&connector->display_info, mode))
|
||||
clock /= 2;
|
||||
|
@ -2428,8 +2431,8 @@ static int intel_hdmi_compute_clock(struct intel_encoder *encoder,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static bool intel_hdmi_limited_color_range(const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state)
|
||||
bool intel_hdmi_limited_color_range(const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
const struct intel_digital_connector_state *intel_conn_state =
|
||||
to_intel_digital_connector_state(conn_state);
|
||||
|
@ -2722,12 +2725,12 @@ static void intel_hdmi_pre_enable(struct intel_atomic_state *state,
|
|||
const struct intel_crtc_state *pipe_config,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port =
|
||||
struct intel_digital_port *dig_port =
|
||||
enc_to_dig_port(encoder);
|
||||
|
||||
intel_hdmi_prepare(encoder, pipe_config);
|
||||
|
||||
intel_dig_port->set_infoframes(encoder,
|
||||
dig_port->set_infoframes(encoder,
|
||||
pipe_config->has_infoframe,
|
||||
pipe_config, conn_state);
|
||||
}
|
||||
|
@ -2737,7 +2740,7 @@ static void vlv_hdmi_pre_enable(struct intel_atomic_state *state,
|
|||
const struct intel_crtc_state *pipe_config,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct intel_digital_port *dport = enc_to_dig_port(encoder);
|
||||
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
|
||||
vlv_phy_pre_encoder_enable(encoder, pipe_config);
|
||||
|
@ -2746,13 +2749,13 @@ static void vlv_hdmi_pre_enable(struct intel_atomic_state *state,
|
|||
vlv_set_phy_signal_level(encoder, 0x2b245f5f, 0x00002000, 0x5578b83a,
|
||||
0x2b247878);
|
||||
|
||||
dport->set_infoframes(encoder,
|
||||
dig_port->set_infoframes(encoder,
|
||||
pipe_config->has_infoframe,
|
||||
pipe_config, conn_state);
|
||||
|
||||
g4x_enable_hdmi(state, encoder, pipe_config, conn_state);
|
||||
|
||||
vlv_wait_port_ready(dev_priv, dport, 0x0);
|
||||
vlv_wait_port_ready(dev_priv, dig_port, 0x0);
|
||||
}
|
||||
|
||||
static void vlv_hdmi_pre_pll_enable(struct intel_atomic_state *state,
|
||||
|
@ -2813,7 +2816,7 @@ static void chv_hdmi_pre_enable(struct intel_atomic_state *state,
|
|||
const struct intel_crtc_state *pipe_config,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct intel_digital_port *dport = enc_to_dig_port(encoder);
|
||||
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
|
@ -2823,13 +2826,13 @@ static void chv_hdmi_pre_enable(struct intel_atomic_state *state,
|
|||
/* Use 800mV-0dB */
|
||||
chv_set_phy_signal_level(encoder, 128, 102, false);
|
||||
|
||||
dport->set_infoframes(encoder,
|
||||
dig_port->set_infoframes(encoder,
|
||||
pipe_config->has_infoframe,
|
||||
pipe_config, conn_state);
|
||||
|
||||
g4x_enable_hdmi(state, encoder, pipe_config, conn_state);
|
||||
|
||||
vlv_wait_port_ready(dev_priv, dport, 0x0);
|
||||
vlv_wait_port_ready(dev_priv, dig_port, 0x0);
|
||||
|
||||
/* Second common lane will stay alive on its own now */
|
||||
chv_phy_release_cl2_override(encoder);
|
||||
|
@ -2917,7 +2920,7 @@ static void
|
|||
intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(connector->dev);
|
||||
struct intel_digital_port *intel_dig_port =
|
||||
struct intel_digital_port *dig_port =
|
||||
hdmi_to_dig_port(intel_hdmi);
|
||||
|
||||
intel_attach_force_audio_property(connector);
|
||||
|
@ -2929,7 +2932,7 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
|
|||
* ToDo: This needs to be extended for LSPCON implementation
|
||||
* as well. Will be implemented separately.
|
||||
*/
|
||||
if (!intel_dig_port->lspcon.active)
|
||||
if (!dig_port->lspcon.active)
|
||||
intel_attach_colorspace_property(connector);
|
||||
|
||||
drm_connector_attach_content_type_property(connector);
|
||||
|
@ -3166,52 +3169,52 @@ static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder)
|
|||
return ddc_pin;
|
||||
}
|
||||
|
||||
void intel_infoframe_init(struct intel_digital_port *intel_dig_port)
|
||||
void intel_infoframe_init(struct intel_digital_port *dig_port)
|
||||
{
|
||||
struct drm_i915_private *dev_priv =
|
||||
to_i915(intel_dig_port->base.base.dev);
|
||||
to_i915(dig_port->base.base.dev);
|
||||
|
||||
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
|
||||
intel_dig_port->write_infoframe = vlv_write_infoframe;
|
||||
intel_dig_port->read_infoframe = vlv_read_infoframe;
|
||||
intel_dig_port->set_infoframes = vlv_set_infoframes;
|
||||
intel_dig_port->infoframes_enabled = vlv_infoframes_enabled;
|
||||
dig_port->write_infoframe = vlv_write_infoframe;
|
||||
dig_port->read_infoframe = vlv_read_infoframe;
|
||||
dig_port->set_infoframes = vlv_set_infoframes;
|
||||
dig_port->infoframes_enabled = vlv_infoframes_enabled;
|
||||
} else if (IS_G4X(dev_priv)) {
|
||||
intel_dig_port->write_infoframe = g4x_write_infoframe;
|
||||
intel_dig_port->read_infoframe = g4x_read_infoframe;
|
||||
intel_dig_port->set_infoframes = g4x_set_infoframes;
|
||||
intel_dig_port->infoframes_enabled = g4x_infoframes_enabled;
|
||||
dig_port->write_infoframe = g4x_write_infoframe;
|
||||
dig_port->read_infoframe = g4x_read_infoframe;
|
||||
dig_port->set_infoframes = g4x_set_infoframes;
|
||||
dig_port->infoframes_enabled = g4x_infoframes_enabled;
|
||||
} else if (HAS_DDI(dev_priv)) {
|
||||
if (intel_dig_port->lspcon.active) {
|
||||
intel_dig_port->write_infoframe = lspcon_write_infoframe;
|
||||
intel_dig_port->read_infoframe = lspcon_read_infoframe;
|
||||
intel_dig_port->set_infoframes = lspcon_set_infoframes;
|
||||
intel_dig_port->infoframes_enabled = lspcon_infoframes_enabled;
|
||||
if (dig_port->lspcon.active) {
|
||||
dig_port->write_infoframe = lspcon_write_infoframe;
|
||||
dig_port->read_infoframe = lspcon_read_infoframe;
|
||||
dig_port->set_infoframes = lspcon_set_infoframes;
|
||||
dig_port->infoframes_enabled = lspcon_infoframes_enabled;
|
||||
} else {
|
||||
intel_dig_port->write_infoframe = hsw_write_infoframe;
|
||||
intel_dig_port->read_infoframe = hsw_read_infoframe;
|
||||
intel_dig_port->set_infoframes = hsw_set_infoframes;
|
||||
intel_dig_port->infoframes_enabled = hsw_infoframes_enabled;
|
||||
dig_port->write_infoframe = hsw_write_infoframe;
|
||||
dig_port->read_infoframe = hsw_read_infoframe;
|
||||
dig_port->set_infoframes = hsw_set_infoframes;
|
||||
dig_port->infoframes_enabled = hsw_infoframes_enabled;
|
||||
}
|
||||
} else if (HAS_PCH_IBX(dev_priv)) {
|
||||
intel_dig_port->write_infoframe = ibx_write_infoframe;
|
||||
intel_dig_port->read_infoframe = ibx_read_infoframe;
|
||||
intel_dig_port->set_infoframes = ibx_set_infoframes;
|
||||
intel_dig_port->infoframes_enabled = ibx_infoframes_enabled;
|
||||
dig_port->write_infoframe = ibx_write_infoframe;
|
||||
dig_port->read_infoframe = ibx_read_infoframe;
|
||||
dig_port->set_infoframes = ibx_set_infoframes;
|
||||
dig_port->infoframes_enabled = ibx_infoframes_enabled;
|
||||
} else {
|
||||
intel_dig_port->write_infoframe = cpt_write_infoframe;
|
||||
intel_dig_port->read_infoframe = cpt_read_infoframe;
|
||||
intel_dig_port->set_infoframes = cpt_set_infoframes;
|
||||
intel_dig_port->infoframes_enabled = cpt_infoframes_enabled;
|
||||
dig_port->write_infoframe = cpt_write_infoframe;
|
||||
dig_port->read_infoframe = cpt_read_infoframe;
|
||||
dig_port->set_infoframes = cpt_set_infoframes;
|
||||
dig_port->infoframes_enabled = cpt_infoframes_enabled;
|
||||
}
|
||||
}
|
||||
|
||||
void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
|
||||
void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
|
||||
struct intel_connector *intel_connector)
|
||||
{
|
||||
struct drm_connector *connector = &intel_connector->base;
|
||||
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
|
||||
struct intel_encoder *intel_encoder = &intel_dig_port->base;
|
||||
struct intel_hdmi *intel_hdmi = &dig_port->hdmi;
|
||||
struct intel_encoder *intel_encoder = &dig_port->base;
|
||||
struct drm_device *dev = intel_encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct i2c_adapter *ddc;
|
||||
|
@ -3225,9 +3228,9 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
|
|||
if (INTEL_GEN(dev_priv) < 12 && drm_WARN_ON(dev, port == PORT_A))
|
||||
return;
|
||||
|
||||
if (drm_WARN(dev, intel_dig_port->max_lanes < 4,
|
||||
if (drm_WARN(dev, dig_port->max_lanes < 4,
|
||||
"Not enough lanes (%d) for HDMI on [ENCODER:%d:%s]\n",
|
||||
intel_dig_port->max_lanes, intel_encoder->base.base.id,
|
||||
dig_port->max_lanes, intel_encoder->base.base.id,
|
||||
intel_encoder->base.name))
|
||||
return;
|
||||
|
||||
|
@ -3316,21 +3319,21 @@ intel_hdmi_hotplug(struct intel_encoder *encoder,
|
|||
void intel_hdmi_init(struct drm_i915_private *dev_priv,
|
||||
i915_reg_t hdmi_reg, enum port port)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port;
|
||||
struct intel_digital_port *dig_port;
|
||||
struct intel_encoder *intel_encoder;
|
||||
struct intel_connector *intel_connector;
|
||||
|
||||
intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
|
||||
if (!intel_dig_port)
|
||||
dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL);
|
||||
if (!dig_port)
|
||||
return;
|
||||
|
||||
intel_connector = intel_connector_alloc();
|
||||
if (!intel_connector) {
|
||||
kfree(intel_dig_port);
|
||||
kfree(dig_port);
|
||||
return;
|
||||
}
|
||||
|
||||
intel_encoder = &intel_dig_port->base;
|
||||
intel_encoder = &dig_port->base;
|
||||
|
||||
drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
|
||||
&intel_hdmi_enc_funcs, DRM_MODE_ENCODER_TMDS,
|
||||
|
@ -3387,12 +3390,12 @@ void intel_hdmi_init(struct drm_i915_private *dev_priv,
|
|||
if (IS_G4X(dev_priv))
|
||||
intel_encoder->cloneable |= 1 << INTEL_OUTPUT_HDMI;
|
||||
|
||||
intel_dig_port->hdmi.hdmi_reg = hdmi_reg;
|
||||
intel_dig_port->dp.output_reg = INVALID_MMIO_REG;
|
||||
intel_dig_port->max_lanes = 4;
|
||||
dig_port->hdmi.hdmi_reg = hdmi_reg;
|
||||
dig_port->dp.output_reg = INVALID_MMIO_REG;
|
||||
dig_port->max_lanes = 4;
|
||||
|
||||
intel_infoframe_init(intel_dig_port);
|
||||
intel_infoframe_init(dig_port);
|
||||
|
||||
intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
|
||||
intel_hdmi_init_connector(intel_dig_port, intel_connector);
|
||||
dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
|
||||
intel_hdmi_init_connector(dig_port, intel_connector);
|
||||
}
|
||||
|
|
|
@ -25,7 +25,7 @@ enum port;
|
|||
|
||||
void intel_hdmi_init(struct drm_i915_private *dev_priv, i915_reg_t hdmi_reg,
|
||||
enum port port);
|
||||
void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
|
||||
void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
|
||||
struct intel_connector *intel_connector);
|
||||
struct intel_hdmi *enc_to_intel_hdmi(struct intel_encoder *encoder);
|
||||
int intel_hdmi_compute_config(struct intel_encoder *encoder,
|
||||
|
@ -36,7 +36,7 @@ bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder,
|
|||
bool high_tmds_clock_ratio,
|
||||
bool scrambling);
|
||||
void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable);
|
||||
void intel_infoframe_init(struct intel_digital_port *intel_dig_port);
|
||||
void intel_infoframe_init(struct intel_digital_port *dig_port);
|
||||
u32 intel_hdmi_infoframes_enabled(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state);
|
||||
u32 intel_hdmi_infoframe_enable(unsigned int type);
|
||||
|
@ -46,5 +46,7 @@ void intel_read_infoframe(struct intel_encoder *encoder,
|
|||
const struct intel_crtc_state *crtc_state,
|
||||
enum hdmi_infoframe_type type,
|
||||
union hdmi_infoframe *frame);
|
||||
bool intel_hdmi_limited_color_range(const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state);
|
||||
|
||||
#endif /* __INTEL_HDMI_H__ */
|
||||
|
|
|
@ -550,11 +550,11 @@ void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon)
|
|||
lspcon_wait_mode(lspcon, DRM_LSPCON_MODE_PCON);
|
||||
}
|
||||
|
||||
bool lspcon_init(struct intel_digital_port *intel_dig_port)
|
||||
bool lspcon_init(struct intel_digital_port *dig_port)
|
||||
{
|
||||
struct intel_dp *dp = &intel_dig_port->dp;
|
||||
struct intel_lspcon *lspcon = &intel_dig_port->lspcon;
|
||||
struct drm_device *dev = intel_dig_port->base.base.dev;
|
||||
struct intel_dp *dp = &dig_port->dp;
|
||||
struct intel_lspcon *lspcon = &dig_port->lspcon;
|
||||
struct drm_device *dev = dig_port->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_connector *connector = &dp->attached_connector->base;
|
||||
|
||||
|
|
|
@ -15,7 +15,7 @@ struct intel_digital_port;
|
|||
struct intel_encoder;
|
||||
struct intel_lspcon;
|
||||
|
||||
bool lspcon_init(struct intel_digital_port *intel_dig_port);
|
||||
bool lspcon_init(struct intel_digital_port *dig_port);
|
||||
void lspcon_resume(struct intel_lspcon *lspcon);
|
||||
void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon);
|
||||
void lspcon_write_infoframe(struct intel_encoder *encoder,
|
||||
|
|
|
@ -905,8 +905,8 @@ static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
|
|||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct intel_dp *intel_dp = dev_priv->psr.dp;
|
||||
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
|
||||
struct intel_encoder *encoder = &intel_dig_port->base;
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
struct intel_encoder *encoder = &dig_port->base;
|
||||
u32 val;
|
||||
|
||||
drm_WARN_ON(&dev_priv->drm, dev_priv->psr.enabled);
|
||||
|
|
|
@ -94,6 +94,8 @@ struct intel_sdvo {
|
|||
*/
|
||||
struct intel_sdvo_caps caps;
|
||||
|
||||
u8 colorimetry_cap;
|
||||
|
||||
/* Pixel clock limitations reported by the SDVO device, in kHz */
|
||||
int pixel_clock_min, pixel_clock_max;
|
||||
|
||||
|
@ -942,6 +944,13 @@ static bool intel_sdvo_set_colorimetry(struct intel_sdvo *intel_sdvo,
|
|||
return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_COLORIMETRY, &mode, 1);
|
||||
}
|
||||
|
||||
static bool intel_sdvo_set_pixel_replication(struct intel_sdvo *intel_sdvo,
|
||||
u8 pixel_repeat)
|
||||
{
|
||||
return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_PIXEL_REPLI,
|
||||
&pixel_repeat, 1);
|
||||
}
|
||||
|
||||
static bool intel_sdvo_set_audio_state(struct intel_sdvo *intel_sdvo,
|
||||
u8 audio_state)
|
||||
{
|
||||
|
@ -1277,6 +1286,18 @@ static bool intel_has_hdmi_sink(struct intel_sdvo *sdvo,
|
|||
READ_ONCE(to_intel_digital_connector_state(conn_state)->force_audio) != HDMI_AUDIO_OFF_DVI;
|
||||
}
|
||||
|
||||
static bool intel_sdvo_limited_color_range(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
|
||||
|
||||
if ((intel_sdvo->colorimetry_cap & SDVO_COLORIMETRY_RGB220) == 0)
|
||||
return false;
|
||||
|
||||
return intel_hdmi_limited_color_range(crtc_state, conn_state);
|
||||
}
|
||||
|
||||
static int intel_sdvo_compute_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *pipe_config,
|
||||
struct drm_connector_state *conn_state)
|
||||
|
@ -1342,21 +1363,9 @@ static int intel_sdvo_compute_config(struct intel_encoder *encoder,
|
|||
intel_sdvo_state->base.force_audio == HDMI_AUDIO_ON;
|
||||
}
|
||||
|
||||
if (intel_sdvo_state->base.broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
|
||||
/*
|
||||
* See CEA-861-E - 5.1 Default Encoding Parameters
|
||||
*
|
||||
* FIXME: This bit is only valid when using TMDS encoding and 8
|
||||
* bit per color mode.
|
||||
*/
|
||||
if (pipe_config->has_hdmi_sink &&
|
||||
drm_match_cea_mode(adjusted_mode) > 1)
|
||||
pipe_config->limited_color_range = true;
|
||||
} else {
|
||||
if (pipe_config->has_hdmi_sink &&
|
||||
intel_sdvo_state->base.broadcast_rgb == INTEL_BROADCAST_RGB_LIMITED)
|
||||
pipe_config->limited_color_range = true;
|
||||
}
|
||||
pipe_config->limited_color_range =
|
||||
intel_sdvo_limited_color_range(encoder, pipe_config,
|
||||
conn_state);
|
||||
|
||||
/* Clock computation needs to happen after pixel multiplier. */
|
||||
if (IS_TV(intel_sdvo_connector))
|
||||
|
@ -1495,8 +1504,13 @@ static void intel_sdvo_pre_enable(struct intel_atomic_state *state,
|
|||
if (crtc_state->has_hdmi_sink) {
|
||||
intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
|
||||
intel_sdvo_set_colorimetry(intel_sdvo,
|
||||
crtc_state->limited_color_range ?
|
||||
SDVO_COLORIMETRY_RGB220 :
|
||||
SDVO_COLORIMETRY_RGB256);
|
||||
intel_sdvo_set_avi_infoframe(intel_sdvo, crtc_state);
|
||||
intel_sdvo_set_pixel_replication(intel_sdvo,
|
||||
!!(adjusted_mode->flags &
|
||||
DRM_MODE_FLAG_DBLCLK));
|
||||
} else
|
||||
intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_DVI);
|
||||
|
||||
|
@ -1530,8 +1544,6 @@ static void intel_sdvo_pre_enable(struct intel_atomic_state *state,
|
|||
/* The real mode polarity is set by the SDVO commands, using
|
||||
* struct intel_sdvo_dtd. */
|
||||
sdvox = SDVO_VSYNC_ACTIVE_HIGH | SDVO_HSYNC_ACTIVE_HIGH;
|
||||
if (!HAS_PCH_SPLIT(dev_priv) && crtc_state->limited_color_range)
|
||||
sdvox |= HDMI_COLOR_RANGE_16_235;
|
||||
if (INTEL_GEN(dev_priv) < 5)
|
||||
sdvox |= SDVO_BORDER_ENABLE;
|
||||
} else {
|
||||
|
@ -1689,8 +1701,11 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
|
|||
"SDVO pixel multiplier mismatch, port: %i, encoder: %i\n",
|
||||
pipe_config->pixel_multiplier, encoder_pixel_multiplier);
|
||||
|
||||
if (sdvox & HDMI_COLOR_RANGE_16_235)
|
||||
pipe_config->limited_color_range = true;
|
||||
if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_COLORIMETRY,
|
||||
&val, 1)) {
|
||||
if (val == SDVO_COLORIMETRY_RGB220)
|
||||
pipe_config->limited_color_range = true;
|
||||
}
|
||||
|
||||
if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_AUDIO_STAT,
|
||||
&val, 1)) {
|
||||
|
@ -1850,17 +1865,26 @@ intel_sdvo_mode_valid(struct drm_connector *connector,
|
|||
struct intel_sdvo_connector *intel_sdvo_connector =
|
||||
to_intel_sdvo_connector(connector);
|
||||
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
|
||||
bool has_hdmi_sink = intel_has_hdmi_sink(intel_sdvo, connector->state);
|
||||
int clock = mode->clock;
|
||||
|
||||
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
|
||||
return MODE_NO_DBLESCAN;
|
||||
|
||||
if (intel_sdvo->pixel_clock_min > mode->clock)
|
||||
return MODE_CLOCK_LOW;
|
||||
|
||||
if (intel_sdvo->pixel_clock_max < mode->clock)
|
||||
if (clock > max_dotclk)
|
||||
return MODE_CLOCK_HIGH;
|
||||
|
||||
if (mode->clock > max_dotclk)
|
||||
if (mode->flags & DRM_MODE_FLAG_DBLCLK) {
|
||||
if (!has_hdmi_sink)
|
||||
return MODE_CLOCK_LOW;
|
||||
clock *= 2;
|
||||
}
|
||||
|
||||
if (intel_sdvo->pixel_clock_min > clock)
|
||||
return MODE_CLOCK_LOW;
|
||||
|
||||
if (intel_sdvo->pixel_clock_max < clock)
|
||||
return MODE_CLOCK_HIGH;
|
||||
|
||||
if (IS_LVDS(intel_sdvo_connector)) {
|
||||
|
@ -1914,6 +1938,17 @@ static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct in
|
|||
return true;
|
||||
}
|
||||
|
||||
static u8 intel_sdvo_get_colorimetry_cap(struct intel_sdvo *intel_sdvo)
|
||||
{
|
||||
u8 cap;
|
||||
|
||||
if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_COLORIMETRY_CAP,
|
||||
&cap, sizeof(cap)))
|
||||
return SDVO_COLORIMETRY_RGB256;
|
||||
|
||||
return cap;
|
||||
}
|
||||
|
||||
static u16 intel_sdvo_get_hotplug_support(struct intel_sdvo *intel_sdvo)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(intel_sdvo->base.base.dev);
|
||||
|
@ -2100,8 +2135,9 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
|
||||
static int intel_sdvo_get_ddc_modes(struct drm_connector *connector)
|
||||
{
|
||||
int num_modes = 0;
|
||||
struct edid *edid;
|
||||
|
||||
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
|
||||
|
@ -2116,18 +2152,19 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
|
|||
* DDC fails, check to see if the analog output is disconnected, in
|
||||
* which case we'll look there for the digital DDC data.
|
||||
*/
|
||||
if (edid == NULL)
|
||||
if (!edid)
|
||||
edid = intel_sdvo_get_analog_edid(connector);
|
||||
|
||||
if (edid != NULL) {
|
||||
if (intel_sdvo_connector_matches_edid(to_intel_sdvo_connector(connector),
|
||||
edid)) {
|
||||
drm_connector_update_edid_property(connector, edid);
|
||||
drm_add_edid_modes(connector, edid);
|
||||
}
|
||||
if (!edid)
|
||||
return 0;
|
||||
|
||||
kfree(edid);
|
||||
}
|
||||
if (intel_sdvo_connector_matches_edid(to_intel_sdvo_connector(connector),
|
||||
edid))
|
||||
num_modes += intel_connector_update_modes(connector, edid);
|
||||
|
||||
kfree(edid);
|
||||
|
||||
return num_modes;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2195,12 +2232,13 @@ static const struct drm_display_mode sdvo_tv_modes[] = {
|
|||
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
|
||||
};
|
||||
|
||||
static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
|
||||
static int intel_sdvo_get_tv_modes(struct drm_connector *connector)
|
||||
{
|
||||
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector));
|
||||
const struct drm_connector_state *conn_state = connector->state;
|
||||
struct intel_sdvo_sdtv_resolution_request tv_res;
|
||||
u32 reply = 0, format_map = 0;
|
||||
int num_modes = 0;
|
||||
int i;
|
||||
|
||||
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
|
||||
|
@ -2215,31 +2253,37 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
|
|||
min(sizeof(format_map), sizeof(struct intel_sdvo_sdtv_resolution_request)));
|
||||
|
||||
if (!intel_sdvo_set_target_output(intel_sdvo, intel_sdvo->attached_output))
|
||||
return;
|
||||
return 0;
|
||||
|
||||
BUILD_BUG_ON(sizeof(tv_res) != 3);
|
||||
if (!intel_sdvo_write_cmd(intel_sdvo,
|
||||
SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
|
||||
&tv_res, sizeof(tv_res)))
|
||||
return;
|
||||
return 0;
|
||||
if (!intel_sdvo_read_response(intel_sdvo, &reply, 3))
|
||||
return;
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(sdvo_tv_modes); i++)
|
||||
for (i = 0; i < ARRAY_SIZE(sdvo_tv_modes); i++) {
|
||||
if (reply & (1 << i)) {
|
||||
struct drm_display_mode *nmode;
|
||||
nmode = drm_mode_duplicate(connector->dev,
|
||||
&sdvo_tv_modes[i]);
|
||||
if (nmode)
|
||||
if (nmode) {
|
||||
drm_mode_probed_add(connector, nmode);
|
||||
num_modes++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return num_modes;
|
||||
}
|
||||
|
||||
static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
|
||||
static int intel_sdvo_get_lvds_modes(struct drm_connector *connector)
|
||||
{
|
||||
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector));
|
||||
struct drm_i915_private *dev_priv = to_i915(connector->dev);
|
||||
struct drm_display_mode *newmode;
|
||||
int num_modes = 0;
|
||||
|
||||
drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
|
||||
connector->base.id, connector->name);
|
||||
|
@ -2256,6 +2300,7 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
|
|||
newmode->type = (DRM_MODE_TYPE_PREFERRED |
|
||||
DRM_MODE_TYPE_DRIVER);
|
||||
drm_mode_probed_add(connector, newmode);
|
||||
num_modes++;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2264,7 +2309,9 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
|
|||
* Assume that the preferred modes are
|
||||
* arranged in priority order.
|
||||
*/
|
||||
intel_ddc_get_modes(connector, &intel_sdvo->ddc);
|
||||
num_modes += intel_ddc_get_modes(connector, &intel_sdvo->ddc);
|
||||
|
||||
return num_modes;
|
||||
}
|
||||
|
||||
static int intel_sdvo_get_modes(struct drm_connector *connector)
|
||||
|
@ -2272,13 +2319,11 @@ static int intel_sdvo_get_modes(struct drm_connector *connector)
|
|||
struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
|
||||
|
||||
if (IS_TV(intel_sdvo_connector))
|
||||
intel_sdvo_get_tv_modes(connector);
|
||||
return intel_sdvo_get_tv_modes(connector);
|
||||
else if (IS_LVDS(intel_sdvo_connector))
|
||||
intel_sdvo_get_lvds_modes(connector);
|
||||
return intel_sdvo_get_lvds_modes(connector);
|
||||
else
|
||||
intel_sdvo_get_ddc_modes(connector);
|
||||
|
||||
return !list_empty(&connector->probed_modes);
|
||||
return intel_sdvo_get_ddc_modes(connector);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -2669,12 +2714,9 @@ static void
|
|||
intel_sdvo_add_hdmi_properties(struct intel_sdvo *intel_sdvo,
|
||||
struct intel_sdvo_connector *connector)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(connector->base.base.dev);
|
||||
|
||||
intel_attach_force_audio_property(&connector->base.base);
|
||||
if (INTEL_GEN(dev_priv) >= 4 && IS_MOBILE(dev_priv)) {
|
||||
if (intel_sdvo->colorimetry_cap & SDVO_COLORIMETRY_RGB220)
|
||||
intel_attach_broadcast_rgb_property(&connector->base.base);
|
||||
}
|
||||
intel_attach_aspect_ratio_property(&connector->base.base);
|
||||
}
|
||||
|
||||
|
@ -3315,6 +3357,9 @@ bool intel_sdvo_init(struct drm_i915_private *dev_priv,
|
|||
if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps))
|
||||
goto err;
|
||||
|
||||
intel_sdvo->colorimetry_cap =
|
||||
intel_sdvo_get_colorimetry_cap(intel_sdvo);
|
||||
|
||||
if (intel_sdvo_output_setup(intel_sdvo,
|
||||
intel_sdvo->caps.output_flags) != true) {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
|
|
|
@ -705,10 +705,10 @@ struct intel_sdvo_enhancements_arg {
|
|||
#define SDVO_CMD_GET_PIXEL_REPLI 0x8c
|
||||
#define SDVO_CMD_GET_COLORIMETRY_CAP 0x8d
|
||||
#define SDVO_CMD_SET_COLORIMETRY 0x8e
|
||||
#define SDVO_COLORIMETRY_RGB256 0x0
|
||||
#define SDVO_COLORIMETRY_RGB220 0x1
|
||||
#define SDVO_COLORIMETRY_YCrCb422 0x3
|
||||
#define SDVO_COLORIMETRY_YCrCb444 0x4
|
||||
#define SDVO_COLORIMETRY_RGB256 (1 << 0)
|
||||
#define SDVO_COLORIMETRY_RGB220 (1 << 1)
|
||||
#define SDVO_COLORIMETRY_YCrCb422 (1 << 2)
|
||||
#define SDVO_COLORIMETRY_YCrCb444 (1 << 3)
|
||||
#define SDVO_CMD_GET_COLORIMETRY 0x8f
|
||||
#define SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER 0x90
|
||||
#define SDVO_CMD_SET_AUDIO_STAT 0x91
|
||||
|
|
|
@ -820,6 +820,7 @@ struct bdb_lfp_power {
|
|||
u16 adb;
|
||||
u16 lace_enabled_status;
|
||||
struct agressiveness_profile_entry aggressivenes[16];
|
||||
u16 hobl; /* 232+ */
|
||||
} __packed;
|
||||
|
||||
/*
|
||||
|
|
|
@ -1045,7 +1045,7 @@ static void intel_dsc_dp_pps_write(struct intel_encoder *encoder,
|
|||
const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
|
||||
struct drm_dsc_pps_infoframe dp_dsc_pps_sdp;
|
||||
|
||||
|
@ -1055,9 +1055,9 @@ static void intel_dsc_dp_pps_write(struct intel_encoder *encoder,
|
|||
/* Fill the PPS payload bytes as per DSC spec 1.2 Table 4-1 */
|
||||
drm_dsc_pps_payload_pack(&dp_dsc_pps_sdp.pps_payload, vdsc_cfg);
|
||||
|
||||
intel_dig_port->write_infoframe(encoder, crtc_state,
|
||||
DP_SDP_PPS, &dp_dsc_pps_sdp,
|
||||
sizeof(dp_dsc_pps_sdp));
|
||||
dig_port->write_infoframe(encoder, crtc_state,
|
||||
DP_SDP_PPS, &dp_dsc_pps_sdp,
|
||||
sizeof(dp_dsc_pps_sdp));
|
||||
}
|
||||
|
||||
void intel_dsc_enable(struct intel_encoder *encoder,
|
||||
|
|
|
@ -32,16 +32,17 @@ static void vma_clear_pages(struct i915_vma *vma)
|
|||
vma->pages = NULL;
|
||||
}
|
||||
|
||||
static int vma_bind(struct i915_vma *vma,
|
||||
static int vma_bind(struct i915_address_space *vm,
|
||||
struct i915_vma *vma,
|
||||
enum i915_cache_level cache_level,
|
||||
u32 flags)
|
||||
{
|
||||
return vma->vm->vma_ops.bind_vma(vma, cache_level, flags);
|
||||
return vm->vma_ops.bind_vma(vm, vma, cache_level, flags);
|
||||
}
|
||||
|
||||
static void vma_unbind(struct i915_vma *vma)
|
||||
static void vma_unbind(struct i915_address_space *vm, struct i915_vma *vma)
|
||||
{
|
||||
vma->vm->vma_ops.unbind_vma(vma);
|
||||
vm->vma_ops.unbind_vma(vm, vma);
|
||||
}
|
||||
|
||||
static const struct i915_vma_ops proxy_vma_ops = {
|
||||
|
|
|
@ -101,8 +101,7 @@ static void lut_close(struct i915_gem_context *ctx)
|
|||
struct radix_tree_iter iter;
|
||||
void __rcu **slot;
|
||||
|
||||
lockdep_assert_held(&ctx->mutex);
|
||||
|
||||
mutex_lock(&ctx->lut_mutex);
|
||||
rcu_read_lock();
|
||||
radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) {
|
||||
struct i915_vma *vma = rcu_dereference_raw(*slot);
|
||||
|
@ -135,6 +134,7 @@ static void lut_close(struct i915_gem_context *ctx)
|
|||
i915_gem_object_put(obj);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
mutex_unlock(&ctx->lut_mutex);
|
||||
}
|
||||
|
||||
static struct intel_context *
|
||||
|
@ -342,6 +342,7 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
|
|||
spin_unlock(&ctx->i915->gem.contexts.lock);
|
||||
|
||||
mutex_destroy(&ctx->engines_mutex);
|
||||
mutex_destroy(&ctx->lut_mutex);
|
||||
|
||||
if (ctx->timeline)
|
||||
intel_timeline_put(ctx->timeline);
|
||||
|
@ -725,6 +726,7 @@ __create_context(struct drm_i915_private *i915)
|
|||
RCU_INIT_POINTER(ctx->engines, e);
|
||||
|
||||
INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
|
||||
mutex_init(&ctx->lut_mutex);
|
||||
|
||||
/* NB: Mark all slices as needing a remap so that when the context first
|
||||
* loads it will restore whatever remap state already exists. If there
|
||||
|
@ -1312,11 +1314,11 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv,
|
|||
if (vm == rcu_access_pointer(ctx->vm))
|
||||
goto unlock;
|
||||
|
||||
old = __set_ppgtt(ctx, vm);
|
||||
|
||||
/* Teardown the existing obj:vma cache, it will have to be rebuilt. */
|
||||
lut_close(ctx);
|
||||
|
||||
old = __set_ppgtt(ctx, vm);
|
||||
|
||||
/*
|
||||
* We need to flush any requests using the current ppgtt before
|
||||
* we release it as the requests do not hold a reference themselves,
|
||||
|
@ -1330,6 +1332,7 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv,
|
|||
if (err) {
|
||||
i915_vm_close(__set_ppgtt(ctx, old));
|
||||
i915_vm_close(old);
|
||||
lut_close(ctx); /* force a rebuild of the old obj:vma cache */
|
||||
}
|
||||
|
||||
unlock:
|
||||
|
@ -1397,11 +1400,12 @@ static int get_ringsize(struct i915_gem_context *ctx,
|
|||
}
|
||||
|
||||
int
|
||||
i915_gem_user_to_context_sseu(struct drm_i915_private *i915,
|
||||
i915_gem_user_to_context_sseu(struct intel_gt *gt,
|
||||
const struct drm_i915_gem_context_param_sseu *user,
|
||||
struct intel_sseu *context)
|
||||
{
|
||||
const struct sseu_dev_info *device = &RUNTIME_INFO(i915)->sseu;
|
||||
const struct sseu_dev_info *device = >->info.sseu;
|
||||
struct drm_i915_private *i915 = gt->i915;
|
||||
|
||||
/* No zeros in any field. */
|
||||
if (!user->slice_mask || !user->subslice_mask ||
|
||||
|
@ -1534,7 +1538,7 @@ static int set_sseu(struct i915_gem_context *ctx,
|
|||
goto out_ce;
|
||||
}
|
||||
|
||||
ret = i915_gem_user_to_context_sseu(i915, &user_sseu, &sseu);
|
||||
ret = i915_gem_user_to_context_sseu(ce->engine->gt, &user_sseu, &sseu);
|
||||
if (ret)
|
||||
goto out_ce;
|
||||
|
||||
|
|
|
@ -225,7 +225,7 @@ i915_gem_engines_iter_next(struct i915_gem_engines_iter *it);
|
|||
struct i915_lut_handle *i915_lut_handle_alloc(void);
|
||||
void i915_lut_handle_free(struct i915_lut_handle *lut);
|
||||
|
||||
int i915_gem_user_to_context_sseu(struct drm_i915_private *i915,
|
||||
int i915_gem_user_to_context_sseu(struct intel_gt *gt,
|
||||
const struct drm_i915_gem_context_param_sseu *user,
|
||||
struct intel_sseu *context);
|
||||
|
||||
|
|
|
@ -170,6 +170,7 @@ struct i915_gem_context {
|
|||
* per vm, which may be one per context or shared with the global GTT)
|
||||
*/
|
||||
struct radix_tree_root handles_vma;
|
||||
struct mutex lut_mutex;
|
||||
|
||||
/**
|
||||
* @name: arbitrary name, used for user debug
|
||||
|
|
|
@ -782,10 +782,15 @@ static int __eb_add_lut(struct i915_execbuffer *eb,
|
|||
|
||||
/* Check that the context hasn't been closed in the meantime */
|
||||
err = -EINTR;
|
||||
if (!mutex_lock_interruptible(&ctx->mutex)) {
|
||||
err = -ENOENT;
|
||||
if (likely(!i915_gem_context_is_closed(ctx)))
|
||||
if (!mutex_lock_interruptible(&ctx->lut_mutex)) {
|
||||
struct i915_address_space *vm = rcu_access_pointer(ctx->vm);
|
||||
|
||||
if (unlikely(vm && vma->vm != vm))
|
||||
err = -EAGAIN; /* user racing with ctx set-vm */
|
||||
else if (likely(!i915_gem_context_is_closed(ctx)))
|
||||
err = radix_tree_insert(&ctx->handles_vma, handle, vma);
|
||||
else
|
||||
err = -ENOENT;
|
||||
if (err == 0) { /* And nor has this handle */
|
||||
struct drm_i915_gem_object *obj = vma->obj;
|
||||
|
||||
|
@ -798,7 +803,7 @@ static int __eb_add_lut(struct i915_execbuffer *eb,
|
|||
}
|
||||
spin_unlock(&obj->lut_lock);
|
||||
}
|
||||
mutex_unlock(&ctx->mutex);
|
||||
mutex_unlock(&ctx->lut_mutex);
|
||||
}
|
||||
if (unlikely(err))
|
||||
goto err;
|
||||
|
@ -814,6 +819,8 @@ err:
|
|||
|
||||
static struct i915_vma *eb_lookup_vma(struct i915_execbuffer *eb, u32 handle)
|
||||
{
|
||||
struct i915_address_space *vm = eb->context->vm;
|
||||
|
||||
do {
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct i915_vma *vma;
|
||||
|
@ -821,7 +828,7 @@ static struct i915_vma *eb_lookup_vma(struct i915_execbuffer *eb, u32 handle)
|
|||
|
||||
rcu_read_lock();
|
||||
vma = radix_tree_lookup(&eb->gem_context->handles_vma, handle);
|
||||
if (likely(vma))
|
||||
if (likely(vma && vma->vm == vm))
|
||||
vma = i915_vma_tryget(vma);
|
||||
rcu_read_unlock();
|
||||
if (likely(vma))
|
||||
|
@ -831,7 +838,7 @@ static struct i915_vma *eb_lookup_vma(struct i915_execbuffer *eb, u32 handle)
|
|||
if (unlikely(!obj))
|
||||
return ERR_PTR(-ENOENT);
|
||||
|
||||
vma = i915_vma_instance(obj, eb->context->vm, NULL);
|
||||
vma = i915_vma_instance(obj, vm, NULL);
|
||||
if (IS_ERR(vma)) {
|
||||
i915_gem_object_put(obj);
|
||||
return vma;
|
||||
|
@ -1973,8 +1980,7 @@ static int eb_submit(struct i915_execbuffer *eb, struct i915_vma *batch)
|
|||
|
||||
static int num_vcs_engines(const struct drm_i915_private *i915)
|
||||
{
|
||||
return hweight64(INTEL_INFO(i915)->engine_mask &
|
||||
GENMASK_ULL(VCS0 + I915_MAX_VCS - 1, VCS0));
|
||||
return hweight64(VDBOX_MASK(&i915->gt));
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -448,7 +448,7 @@ void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
|
|||
* mapping will then trigger a page fault on the next user access, allowing
|
||||
* fixup by vm_fault_gtt().
|
||||
*/
|
||||
static void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
|
||||
void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
||||
intel_wakeref_t wakeref;
|
||||
|
@ -507,19 +507,6 @@ void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
|
|||
spin_unlock(&obj->mmo.lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_gem_object_release_mmap - remove physical page mappings
|
||||
* @obj: obj in question
|
||||
*
|
||||
* Preserve the reservation of the mmapping with the DRM core code, but
|
||||
* relinquish ownership of the pages back to the system.
|
||||
*/
|
||||
void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
i915_gem_object_release_mmap_gtt(obj);
|
||||
i915_gem_object_release_mmap_offset(obj);
|
||||
}
|
||||
|
||||
static struct i915_mmap_offset *
|
||||
lookup_mmo(struct drm_i915_gem_object *obj,
|
||||
enum i915_mmap_type mmap_type)
|
||||
|
|
|
@ -25,7 +25,8 @@ int i915_gem_dumb_mmap_offset(struct drm_file *file_priv,
|
|||
u32 handle, u64 *offset);
|
||||
|
||||
void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj);
|
||||
void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj);
|
||||
void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj);
|
||||
|
||||
void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -143,14 +143,14 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
|
|||
* vma, in the same fd namespace, by virtue of flink/open.
|
||||
*/
|
||||
|
||||
mutex_lock(&ctx->mutex);
|
||||
mutex_lock(&ctx->lut_mutex);
|
||||
vma = radix_tree_delete(&ctx->handles_vma, lut->handle);
|
||||
if (vma) {
|
||||
GEM_BUG_ON(vma->obj != obj);
|
||||
GEM_BUG_ON(!atomic_read(&vma->open_count));
|
||||
i915_vma_close(vma);
|
||||
}
|
||||
mutex_unlock(&ctx->mutex);
|
||||
mutex_unlock(&ctx->lut_mutex);
|
||||
|
||||
i915_gem_context_put(lut->ctx);
|
||||
i915_lut_handle_free(lut);
|
||||
|
@ -171,14 +171,35 @@ static void __i915_gem_free_object_rcu(struct rcu_head *head)
|
|||
atomic_dec(&i915->mm.free_count);
|
||||
}
|
||||
|
||||
static void __i915_gem_object_free_mmaps(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
/* Skip serialisation and waking the device if known to be not used. */
|
||||
|
||||
if (obj->userfault_count)
|
||||
i915_gem_object_release_mmap_gtt(obj);
|
||||
|
||||
if (!RB_EMPTY_ROOT(&obj->mmo.offsets)) {
|
||||
struct i915_mmap_offset *mmo, *mn;
|
||||
|
||||
i915_gem_object_release_mmap_offset(obj);
|
||||
|
||||
rbtree_postorder_for_each_entry_safe(mmo, mn,
|
||||
&obj->mmo.offsets,
|
||||
offset) {
|
||||
drm_vma_offset_remove(obj->base.dev->vma_offset_manager,
|
||||
&mmo->vma_node);
|
||||
kfree(mmo);
|
||||
}
|
||||
obj->mmo.offsets = RB_ROOT;
|
||||
}
|
||||
}
|
||||
|
||||
static void __i915_gem_free_objects(struct drm_i915_private *i915,
|
||||
struct llist_node *freed)
|
||||
{
|
||||
struct drm_i915_gem_object *obj, *on;
|
||||
|
||||
llist_for_each_entry_safe(obj, on, freed, freed) {
|
||||
struct i915_mmap_offset *mmo, *mn;
|
||||
|
||||
trace_i915_gem_object_destroy(obj);
|
||||
|
||||
if (!list_empty(&obj->vma.list)) {
|
||||
|
@ -204,18 +225,8 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
|
|||
spin_unlock(&obj->vma.lock);
|
||||
}
|
||||
|
||||
i915_gem_object_release_mmap(obj);
|
||||
__i915_gem_object_free_mmaps(obj);
|
||||
|
||||
rbtree_postorder_for_each_entry_safe(mmo, mn,
|
||||
&obj->mmo.offsets,
|
||||
offset) {
|
||||
drm_vma_offset_remove(obj->base.dev->vma_offset_manager,
|
||||
&mmo->vma_node);
|
||||
kfree(mmo);
|
||||
}
|
||||
obj->mmo.offsets = RB_ROOT;
|
||||
|
||||
GEM_BUG_ON(obj->userfault_count);
|
||||
GEM_BUG_ON(!list_empty(&obj->lut_list));
|
||||
|
||||
atomic_set(&obj->mm.pages_pin_count, 0);
|
||||
|
|
|
@ -258,10 +258,6 @@ struct page *
|
|||
i915_gem_object_get_page(struct drm_i915_gem_object *obj,
|
||||
unsigned int n);
|
||||
|
||||
struct page *
|
||||
i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
|
||||
unsigned int n);
|
||||
|
||||
dma_addr_t
|
||||
i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
|
||||
unsigned long n,
|
||||
|
@ -394,6 +390,8 @@ static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
|
|||
i915_gem_object_unpin_pages(obj);
|
||||
}
|
||||
|
||||
void __i915_gem_object_release_map(struct drm_i915_gem_object *obj);
|
||||
|
||||
void
|
||||
i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
|
||||
unsigned int flush_domains);
|
||||
|
|
|
@ -408,6 +408,21 @@ void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
|
|||
}
|
||||
}
|
||||
|
||||
void __i915_gem_object_release_map(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
GEM_BUG_ON(!obj->mm.mapping);
|
||||
|
||||
/*
|
||||
* We allow removing the mapping from underneath pinned pages!
|
||||
*
|
||||
* Furthermore, since this is an unsafe operation reserved only
|
||||
* for construction time manipulation, we ignore locking prudence.
|
||||
*/
|
||||
unmap_object(obj, page_mask_bits(fetch_and_zero(&obj->mm.mapping)));
|
||||
|
||||
i915_gem_object_unpin_map(obj);
|
||||
}
|
||||
|
||||
struct scatterlist *
|
||||
i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
|
||||
unsigned int n,
|
||||
|
@ -533,20 +548,6 @@ i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
|
|||
return nth_page(sg_page(sg), offset);
|
||||
}
|
||||
|
||||
/* Like i915_gem_object_get_page(), but mark the returned page dirty */
|
||||
struct page *
|
||||
i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
|
||||
unsigned int n)
|
||||
{
|
||||
struct page *page;
|
||||
|
||||
page = i915_gem_object_get_page(obj, n);
|
||||
if (!obj->mm.dirty)
|
||||
set_page_dirty(page);
|
||||
|
||||
return page;
|
||||
}
|
||||
|
||||
dma_addr_t
|
||||
i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
|
||||
unsigned long n,
|
||||
|
|
|
@ -13,6 +13,8 @@
|
|||
#include <linux/dma-buf.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
#include "gt/intel_gt_requests.h"
|
||||
|
||||
#include "i915_trace.h"
|
||||
|
||||
static bool swap_available(void)
|
||||
|
@ -111,15 +113,6 @@ i915_gem_shrink(struct drm_i915_private *i915,
|
|||
unsigned long count = 0;
|
||||
unsigned long scanned = 0;
|
||||
|
||||
/*
|
||||
* When shrinking the active list, we should also consider active
|
||||
* contexts. Active contexts are pinned until they are retired, and
|
||||
* so can not be simply unbound to retire and unpin their pages. To
|
||||
* shrink the contexts, we must wait until the gpu is idle and
|
||||
* completed its switch to the kernel context. In short, we do
|
||||
* not have a good mechanism for idling a specific context.
|
||||
*/
|
||||
|
||||
trace_i915_gem_shrink(i915, target, shrink);
|
||||
|
||||
/*
|
||||
|
@ -133,6 +126,20 @@ i915_gem_shrink(struct drm_i915_private *i915,
|
|||
shrink &= ~I915_SHRINK_BOUND;
|
||||
}
|
||||
|
||||
/*
|
||||
* When shrinking the active list, we should also consider active
|
||||
* contexts. Active contexts are pinned until they are retired, and
|
||||
* so can not be simply unbound to retire and unpin their pages. To
|
||||
* shrink the contexts, we must wait until the gpu is idle and
|
||||
* completed its switch to the kernel context. In short, we do
|
||||
* not have a good mechanism for idling a specific context, but
|
||||
* what we can do is give them a kick so that we do not keep idle
|
||||
* contexts around longer than is necessary.
|
||||
*/
|
||||
if (shrink & I915_SHRINK_ACTIVE)
|
||||
/* Retire requests to unpin all idle contexts */
|
||||
intel_gt_retire_requests(&i915->gt);
|
||||
|
||||
/*
|
||||
* As we may completely rewrite the (un)bound list whilst unbinding
|
||||
* (due to retiring requests) we have to strictly process only
|
||||
|
@ -408,26 +415,15 @@ void i915_gem_driver_unregister__shrinker(struct drm_i915_private *i915)
|
|||
void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,
|
||||
struct mutex *mutex)
|
||||
{
|
||||
bool unlock = false;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_LOCKDEP))
|
||||
return;
|
||||
|
||||
if (!lockdep_is_held_type(&i915->drm.struct_mutex, -1)) {
|
||||
mutex_acquire(&i915->drm.struct_mutex.dep_map,
|
||||
I915_MM_NORMAL, 0, _RET_IP_);
|
||||
unlock = true;
|
||||
}
|
||||
|
||||
fs_reclaim_acquire(GFP_KERNEL);
|
||||
|
||||
mutex_acquire(&mutex->dep_map, 0, 0, _RET_IP_);
|
||||
mutex_release(&mutex->dep_map, _RET_IP_);
|
||||
|
||||
fs_reclaim_release(GFP_KERNEL);
|
||||
|
||||
if (unlock)
|
||||
mutex_release(&i915->drm.struct_mutex.dep_map, _RET_IP_);
|
||||
}
|
||||
|
||||
#define obj_to_i915(obj__) to_i915((obj__)->base.dev)
|
||||
|
|
|
@ -299,7 +299,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
|
|||
i915_gem_object_unlock(obj);
|
||||
|
||||
/* Force the fence to be reacquired for GTT access */
|
||||
i915_gem_object_release_mmap(obj);
|
||||
i915_gem_object_release_mmap_gtt(obj);
|
||||
|
||||
/* Try to preallocate memory required to save swizzling on put-pages */
|
||||
if (i915_gem_object_needs_bit17_swizzle(obj)) {
|
||||
|
|
|
@ -1229,7 +1229,7 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
|
|||
int inst = 0;
|
||||
int ret = 0;
|
||||
|
||||
if (INTEL_GEN(i915) < 9 || !RUNTIME_INFO(i915)->sseu.has_slice_pg)
|
||||
if (INTEL_GEN(i915) < 9)
|
||||
return 0;
|
||||
|
||||
if (flags & TEST_RESET)
|
||||
|
@ -1255,6 +1255,9 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
|
|||
if (hweight32(engine->sseu.slice_mask) < 2)
|
||||
continue;
|
||||
|
||||
if (!engine->gt->info.sseu.has_slice_pg)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Gen11 VME friendly power-gated configuration with
|
||||
* half enabled sub-slices.
|
||||
|
|
|
@ -23,6 +23,8 @@ mock_context(struct drm_i915_private *i915,
|
|||
INIT_LIST_HEAD(&ctx->link);
|
||||
ctx->i915 = i915;
|
||||
|
||||
mutex_init(&ctx->mutex);
|
||||
|
||||
spin_lock_init(&ctx->stale.lock);
|
||||
INIT_LIST_HEAD(&ctx->stale.engines);
|
||||
|
||||
|
@ -35,7 +37,7 @@ mock_context(struct drm_i915_private *i915,
|
|||
RCU_INIT_POINTER(ctx->engines, e);
|
||||
|
||||
INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
|
||||
mutex_init(&ctx->mutex);
|
||||
mutex_init(&ctx->lut_mutex);
|
||||
|
||||
if (name) {
|
||||
struct i915_ppgtt *ppgtt;
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
#include "debugfs_engines.h"
|
||||
#include "debugfs_gt.h"
|
||||
#include "debugfs_gt_pm.h"
|
||||
#include "intel_sseu_debugfs.h"
|
||||
#include "uc/intel_uc_debugfs.h"
|
||||
#include "i915_drv.h"
|
||||
|
||||
|
@ -25,6 +26,7 @@ void debugfs_gt_register(struct intel_gt *gt)
|
|||
|
||||
debugfs_engines_register(gt, root);
|
||||
debugfs_gt_pm_register(gt, root);
|
||||
intel_sseu_debugfs_register(gt, root);
|
||||
|
||||
intel_uc_debugfs_register(>->uc, root);
|
||||
}
|
||||
|
|
|
@ -183,13 +183,11 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
|
|||
struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
|
||||
struct i915_page_directory * const pd = ppgtt->base.pd;
|
||||
struct i915_page_table *pt, *alloc = NULL;
|
||||
intel_wakeref_t wakeref;
|
||||
bool flush = false;
|
||||
u64 from = start;
|
||||
unsigned int pde;
|
||||
int ret = 0;
|
||||
|
||||
wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
|
||||
|
||||
spin_lock(&pd->lock);
|
||||
gen6_for_each_pde(pt, pd, start, length, pde) {
|
||||
const unsigned int count = gen6_pte_count(start, length);
|
||||
|
@ -214,14 +212,20 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
|
|||
alloc = pt;
|
||||
pt = pd->entry[pde];
|
||||
}
|
||||
|
||||
flush = true;
|
||||
}
|
||||
|
||||
atomic_add(count, &pt->used);
|
||||
}
|
||||
spin_unlock(&pd->lock);
|
||||
|
||||
if (i915_vma_is_bound(ppgtt->vma, I915_VMA_GLOBAL_BIND))
|
||||
gen6_flush_pd(ppgtt, from, start);
|
||||
if (flush && i915_vma_is_bound(ppgtt->vma, I915_VMA_GLOBAL_BIND)) {
|
||||
intel_wakeref_t wakeref;
|
||||
|
||||
with_intel_runtime_pm(&vm->i915->runtime_pm, wakeref)
|
||||
gen6_flush_pd(ppgtt, from, start);
|
||||
}
|
||||
|
||||
goto out;
|
||||
|
||||
|
@ -230,7 +234,6 @@ unwind_out:
|
|||
out:
|
||||
if (alloc)
|
||||
free_px(vm, alloc);
|
||||
intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -299,11 +302,12 @@ static void pd_vma_clear_pages(struct i915_vma *vma)
|
|||
vma->pages = NULL;
|
||||
}
|
||||
|
||||
static int pd_vma_bind(struct i915_vma *vma,
|
||||
static int pd_vma_bind(struct i915_address_space *vm,
|
||||
struct i915_vma *vma,
|
||||
enum i915_cache_level cache_level,
|
||||
u32 unused)
|
||||
{
|
||||
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm);
|
||||
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
|
||||
struct gen6_ppgtt *ppgtt = vma->private;
|
||||
u32 ggtt_offset = i915_ggtt_offset(vma) / I915_GTT_PAGE_SIZE;
|
||||
|
||||
|
@ -314,7 +318,7 @@ static int pd_vma_bind(struct i915_vma *vma,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void pd_vma_unbind(struct i915_vma *vma)
|
||||
static void pd_vma_unbind(struct i915_address_space *vm, struct i915_vma *vma)
|
||||
{
|
||||
struct gen6_ppgtt *ppgtt = vma->private;
|
||||
struct i915_page_directory * const pd = ppgtt->base.pd;
|
||||
|
|
|
@ -396,7 +396,7 @@ int gen7_setup_clear_gpr_bb(struct intel_engine_cs * const engine,
|
|||
emit_batch(vma, memset(batch, 0, bv.max_size), &bv);
|
||||
|
||||
i915_gem_object_flush_map(vma->obj);
|
||||
i915_gem_object_unpin_map(vma->obj);
|
||||
__i915_gem_object_release_map(vma->obj);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -314,13 +314,18 @@ bool i915_request_enable_breadcrumb(struct i915_request *rq)
|
|||
{
|
||||
lockdep_assert_held(&rq->lock);
|
||||
|
||||
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
|
||||
return true;
|
||||
|
||||
if (test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) {
|
||||
struct intel_breadcrumbs *b = &rq->engine->breadcrumbs;
|
||||
struct intel_context *ce = rq->context;
|
||||
struct list_head *pos;
|
||||
|
||||
spin_lock(&b->irq_lock);
|
||||
GEM_BUG_ON(test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags));
|
||||
|
||||
if (test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags))
|
||||
goto unlock;
|
||||
|
||||
if (!__intel_breadcrumbs_arm_irq(b))
|
||||
goto unlock;
|
||||
|
|
|
@ -30,7 +30,7 @@ static int gen8_emit_rpcs_config(struct i915_request *rq,
|
|||
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
|
||||
*cs++ = lower_32_bits(offset);
|
||||
*cs++ = upper_32_bits(offset);
|
||||
*cs++ = intel_sseu_make_rpcs(rq->engine->i915, &sseu);
|
||||
*cs++ = intel_sseu_make_rpcs(rq->engine->gt, &sseu);
|
||||
|
||||
intel_ring_advance(rq, cs);
|
||||
|
||||
|
|
|
@ -370,7 +370,7 @@ static void __setup_engine_capabilities(struct intel_engine_cs *engine)
|
|||
* instances.
|
||||
*/
|
||||
if ((INTEL_GEN(i915) >= 11 &&
|
||||
RUNTIME_INFO(i915)->vdbox_sfc_access & engine->mask) ||
|
||||
engine->gt->info.vdbox_sfc_access & engine->mask) ||
|
||||
(INTEL_GEN(i915) >= 9 && engine->instance == 0))
|
||||
engine->uabi_capabilities |=
|
||||
I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC;
|
||||
|
@ -450,6 +450,80 @@ void intel_engines_free(struct intel_gt *gt)
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Determine which engines are fused off in our particular hardware.
|
||||
* Note that we have a catch-22 situation where we need to be able to access
|
||||
* the blitter forcewake domain to read the engine fuses, but at the same time
|
||||
* we need to know which engines are available on the system to know which
|
||||
* forcewake domains are present. We solve this by intializing the forcewake
|
||||
* domains based on the full engine mask in the platform capabilities before
|
||||
* calling this function and pruning the domains for fused-off engines
|
||||
* afterwards.
|
||||
*/
|
||||
static intel_engine_mask_t init_engine_mask(struct intel_gt *gt)
|
||||
{
|
||||
struct drm_i915_private *i915 = gt->i915;
|
||||
struct intel_gt_info *info = >->info;
|
||||
struct intel_uncore *uncore = gt->uncore;
|
||||
unsigned int logical_vdbox = 0;
|
||||
unsigned int i;
|
||||
u32 media_fuse;
|
||||
u16 vdbox_mask;
|
||||
u16 vebox_mask;
|
||||
|
||||
info->engine_mask = INTEL_INFO(i915)->platform_engine_mask;
|
||||
|
||||
if (INTEL_GEN(i915) < 11)
|
||||
return info->engine_mask;
|
||||
|
||||
media_fuse = ~intel_uncore_read(uncore, GEN11_GT_VEBOX_VDBOX_DISABLE);
|
||||
|
||||
vdbox_mask = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
|
||||
vebox_mask = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
|
||||
GEN11_GT_VEBOX_DISABLE_SHIFT;
|
||||
|
||||
for (i = 0; i < I915_MAX_VCS; i++) {
|
||||
if (!HAS_ENGINE(gt, _VCS(i))) {
|
||||
vdbox_mask &= ~BIT(i);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!(BIT(i) & vdbox_mask)) {
|
||||
info->engine_mask &= ~BIT(_VCS(i));
|
||||
drm_dbg(&i915->drm, "vcs%u fused off\n", i);
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* In Gen11, only even numbered logical VDBOXes are
|
||||
* hooked up to an SFC (Scaler & Format Converter) unit.
|
||||
* In TGL each VDBOX has access to an SFC.
|
||||
*/
|
||||
if (INTEL_GEN(i915) >= 12 || logical_vdbox++ % 2 == 0)
|
||||
gt->info.vdbox_sfc_access |= BIT(i);
|
||||
}
|
||||
drm_dbg(&i915->drm, "vdbox enable: %04x, instances: %04lx\n",
|
||||
vdbox_mask, VDBOX_MASK(gt));
|
||||
GEM_BUG_ON(vdbox_mask != VDBOX_MASK(gt));
|
||||
|
||||
for (i = 0; i < I915_MAX_VECS; i++) {
|
||||
if (!HAS_ENGINE(gt, _VECS(i))) {
|
||||
vebox_mask &= ~BIT(i);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!(BIT(i) & vebox_mask)) {
|
||||
info->engine_mask &= ~BIT(_VECS(i));
|
||||
drm_dbg(&i915->drm, "vecs%u fused off\n", i);
|
||||
}
|
||||
}
|
||||
drm_dbg(&i915->drm, "vebox enable: %04x, instances: %04lx\n",
|
||||
vebox_mask, VEBOX_MASK(gt));
|
||||
GEM_BUG_ON(vebox_mask != VEBOX_MASK(gt));
|
||||
|
||||
return info->engine_mask;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_engines_init_mmio() - allocate and prepare the Engine Command Streamers
|
||||
* @gt: pointer to struct intel_gt
|
||||
|
@ -459,8 +533,7 @@ void intel_engines_free(struct intel_gt *gt)
|
|||
int intel_engines_init_mmio(struct intel_gt *gt)
|
||||
{
|
||||
struct drm_i915_private *i915 = gt->i915;
|
||||
struct intel_device_info *device_info = mkwrite_device_info(i915);
|
||||
const unsigned int engine_mask = INTEL_INFO(i915)->engine_mask;
|
||||
const unsigned int engine_mask = init_engine_mask(gt);
|
||||
unsigned int mask = 0;
|
||||
unsigned int i;
|
||||
int err;
|
||||
|
@ -473,7 +546,7 @@ int intel_engines_init_mmio(struct intel_gt *gt)
|
|||
return -ENODEV;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(intel_engines); i++) {
|
||||
if (!HAS_ENGINE(i915, i))
|
||||
if (!HAS_ENGINE(gt, i))
|
||||
continue;
|
||||
|
||||
err = intel_engine_setup(gt, i);
|
||||
|
@ -489,14 +562,16 @@ int intel_engines_init_mmio(struct intel_gt *gt)
|
|||
* engines.
|
||||
*/
|
||||
if (drm_WARN_ON(&i915->drm, mask != engine_mask))
|
||||
device_info->engine_mask = mask;
|
||||
gt->info.engine_mask = mask;
|
||||
|
||||
RUNTIME_INFO(i915)->num_engines = hweight32(mask);
|
||||
gt->info.num_engines = hweight32(mask);
|
||||
|
||||
intel_gt_check_and_clear_faults(gt);
|
||||
|
||||
intel_setup_engine_capabilities(gt);
|
||||
|
||||
intel_uncore_prune_engine_fw_domains(gt->uncore, gt);
|
||||
|
||||
return 0;
|
||||
|
||||
cleanup:
|
||||
|
@ -634,7 +709,7 @@ static int engine_setup_common(struct intel_engine_cs *engine)
|
|||
|
||||
/* Use the whole device by default */
|
||||
engine->sseu =
|
||||
intel_sseu_from_device_info(&RUNTIME_INFO(engine->i915)->sseu);
|
||||
intel_sseu_from_device_info(&engine->gt->info.sseu);
|
||||
|
||||
intel_engine_init_workarounds(engine);
|
||||
intel_engine_init_whitelist(engine);
|
||||
|
@ -1000,7 +1075,7 @@ void intel_engine_get_instdone(const struct intel_engine_cs *engine,
|
|||
struct intel_instdone *instdone)
|
||||
{
|
||||
struct drm_i915_private *i915 = engine->i915;
|
||||
const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu;
|
||||
const struct sseu_dev_info *sseu = &engine->gt->info.sseu;
|
||||
struct intel_uncore *uncore = engine->uncore;
|
||||
u32 mmio_base = engine->mmio_base;
|
||||
int slice;
|
||||
|
|
|
@ -142,6 +142,7 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine)
|
|||
return true;
|
||||
|
||||
GEM_BUG_ON(!intel_context_is_barrier(ce));
|
||||
GEM_BUG_ON(ce->timeline->hwsp_ggtt != engine->status_page.vma);
|
||||
|
||||
/* Already inside the kernel context, safe to power down. */
|
||||
if (engine->wakeref_serial == engine->serial)
|
||||
|
|
|
@ -177,8 +177,12 @@ struct intel_engine_execlists {
|
|||
* the first error interrupt, record the EIR and schedule the tasklet.
|
||||
* In the tasklet, we process the pending CS events to ensure we have
|
||||
* the guilty request, and then reset the engine.
|
||||
*
|
||||
* Low 16b are used by HW, with the upper 16b used as the enabling mask.
|
||||
* Reserve the upper 16b for tracking internal errors.
|
||||
*/
|
||||
u32 error_interrupt;
|
||||
#define ERROR_CSB BIT(31)
|
||||
|
||||
/**
|
||||
* @reset_ccid: Active CCID [EXECLISTS_STATUS_HI] at the time of reset
|
||||
|
|
|
@ -201,7 +201,7 @@ void intel_engines_driver_register(struct drm_i915_private *i915)
|
|||
uabi_node);
|
||||
char old[sizeof(engine->name)];
|
||||
|
||||
if (intel_gt_has_init_error(engine->gt))
|
||||
if (intel_gt_has_unrecoverable_error(engine->gt))
|
||||
continue; /* ignore incomplete engines */
|
||||
|
||||
GEM_BUG_ON(engine->class >= ARRAY_SIZE(uabi_classes));
|
||||
|
|
|
@ -436,7 +436,8 @@ static void i915_ggtt_clear_range(struct i915_address_space *vm,
|
|||
intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
|
||||
}
|
||||
|
||||
static int ggtt_bind_vma(struct i915_vma *vma,
|
||||
static int ggtt_bind_vma(struct i915_address_space *vm,
|
||||
struct i915_vma *vma,
|
||||
enum i915_cache_level cache_level,
|
||||
u32 flags)
|
||||
{
|
||||
|
@ -451,15 +452,15 @@ static int ggtt_bind_vma(struct i915_vma *vma,
|
|||
if (i915_gem_object_is_readonly(obj))
|
||||
pte_flags |= PTE_READ_ONLY;
|
||||
|
||||
vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
|
||||
vm->insert_entries(vm, vma, cache_level, pte_flags);
|
||||
vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ggtt_unbind_vma(struct i915_vma *vma)
|
||||
static void ggtt_unbind_vma(struct i915_address_space *vm, struct i915_vma *vma)
|
||||
{
|
||||
vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
|
||||
vm->clear_range(vm, vma->node.start, vma->size);
|
||||
}
|
||||
|
||||
static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt)
|
||||
|
@ -567,7 +568,8 @@ err:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int aliasing_gtt_bind_vma(struct i915_vma *vma,
|
||||
static int aliasing_gtt_bind_vma(struct i915_address_space *vm,
|
||||
struct i915_vma *vma,
|
||||
enum i915_cache_level cache_level,
|
||||
u32 flags)
|
||||
{
|
||||
|
@ -580,44 +582,27 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
|
|||
pte_flags |= PTE_READ_ONLY;
|
||||
|
||||
if (flags & I915_VMA_LOCAL_BIND) {
|
||||
struct i915_ppgtt *alias = i915_vm_to_ggtt(vma->vm)->alias;
|
||||
struct i915_ppgtt *alias = i915_vm_to_ggtt(vm)->alias;
|
||||
|
||||
if (flags & I915_VMA_ALLOC) {
|
||||
ret = alias->vm.allocate_va_range(&alias->vm,
|
||||
vma->node.start,
|
||||
vma->size);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
set_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma));
|
||||
}
|
||||
|
||||
GEM_BUG_ON(!test_bit(I915_VMA_ALLOC_BIT,
|
||||
__i915_vma_flags(vma)));
|
||||
alias->vm.insert_entries(&alias->vm, vma,
|
||||
cache_level, pte_flags);
|
||||
ret = ppgtt_bind_vma(&alias->vm, vma, cache_level, flags);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (flags & I915_VMA_GLOBAL_BIND)
|
||||
vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
|
||||
vm->insert_entries(vm, vma, cache_level, pte_flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
|
||||
static void aliasing_gtt_unbind_vma(struct i915_address_space *vm,
|
||||
struct i915_vma *vma)
|
||||
{
|
||||
if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) {
|
||||
struct i915_address_space *vm = vma->vm;
|
||||
|
||||
if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
|
||||
vm->clear_range(vm, vma->node.start, vma->size);
|
||||
}
|
||||
|
||||
if (test_and_clear_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma))) {
|
||||
struct i915_address_space *vm =
|
||||
&i915_vm_to_ggtt(vma->vm)->alias->vm;
|
||||
|
||||
vm->clear_range(vm, vma->node.start, vma->size);
|
||||
}
|
||||
if (i915_vma_is_bound(vma, I915_VMA_LOCAL_BIND))
|
||||
ppgtt_unbind_vma(&i915_vm_to_ggtt(vm)->alias->vm, vma);
|
||||
}
|
||||
|
||||
static int init_aliasing_ppgtt(struct i915_ggtt *ggtt)
|
||||
|
|
|
@ -44,6 +44,14 @@ void intel_gt_init_hw_early(struct intel_gt *gt, struct i915_ggtt *ggtt)
|
|||
gt->ggtt = ggtt;
|
||||
}
|
||||
|
||||
int intel_gt_init_mmio(struct intel_gt *gt)
|
||||
{
|
||||
intel_uc_init_mmio(>->uc);
|
||||
intel_sseu_info_init(gt);
|
||||
|
||||
return intel_engines_init_mmio(gt);
|
||||
}
|
||||
|
||||
static void init_unused_ring(struct intel_gt *gt, u32 base)
|
||||
{
|
||||
struct intel_uncore *uncore = gt->uncore;
|
||||
|
@ -510,7 +518,7 @@ static int __engines_verify_workarounds(struct intel_gt *gt)
|
|||
|
||||
static void __intel_gt_disable(struct intel_gt *gt)
|
||||
{
|
||||
intel_gt_set_wedged_on_init(gt);
|
||||
intel_gt_set_wedged_on_fini(gt);
|
||||
|
||||
intel_gt_suspend_prepare(gt);
|
||||
intel_gt_suspend_late(gt);
|
||||
|
@ -642,3 +650,11 @@ void intel_gt_driver_late_release(struct intel_gt *gt)
|
|||
intel_gt_fini_timelines(gt);
|
||||
intel_engines_free(gt);
|
||||
}
|
||||
|
||||
void intel_gt_info_print(const struct intel_gt_info *info,
|
||||
struct drm_printer *p)
|
||||
{
|
||||
drm_printf(p, "available engines: %x\n", info->engine_mask);
|
||||
|
||||
intel_sseu_dump(&info->sseu, p);
|
||||
}
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
#include "intel_reset.h"
|
||||
|
||||
struct drm_i915_private;
|
||||
struct drm_printer;
|
||||
|
||||
#define GT_TRACE(gt, fmt, ...) do { \
|
||||
const struct intel_gt *gt__ __maybe_unused = (gt); \
|
||||
|
@ -35,6 +36,7 @@ static inline struct intel_gt *huc_to_gt(struct intel_huc *huc)
|
|||
|
||||
void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915);
|
||||
void intel_gt_init_hw_early(struct intel_gt *gt, struct i915_ggtt *ggtt);
|
||||
int intel_gt_init_mmio(struct intel_gt *gt);
|
||||
int __must_check intel_gt_init_hw(struct intel_gt *gt);
|
||||
int intel_gt_init(struct intel_gt *gt);
|
||||
void intel_gt_driver_register(struct intel_gt *gt);
|
||||
|
@ -58,14 +60,21 @@ static inline u32 intel_gt_scratch_offset(const struct intel_gt *gt,
|
|||
return i915_ggtt_offset(gt->scratch) + field;
|
||||
}
|
||||
|
||||
static inline bool intel_gt_has_unrecoverable_error(const struct intel_gt *gt)
|
||||
{
|
||||
return test_bit(I915_WEDGED_ON_INIT, >->reset.flags) ||
|
||||
test_bit(I915_WEDGED_ON_FINI, >->reset.flags);
|
||||
}
|
||||
|
||||
static inline bool intel_gt_is_wedged(const struct intel_gt *gt)
|
||||
{
|
||||
return __intel_reset_failed(>->reset);
|
||||
GEM_BUG_ON(intel_gt_has_unrecoverable_error(gt) &&
|
||||
!test_bit(I915_WEDGED, >->reset.flags));
|
||||
|
||||
return unlikely(test_bit(I915_WEDGED, >->reset.flags));
|
||||
}
|
||||
|
||||
static inline bool intel_gt_has_init_error(const struct intel_gt *gt)
|
||||
{
|
||||
return test_bit(I915_WEDGED_ON_INIT, >->reset.flags);
|
||||
}
|
||||
void intel_gt_info_print(const struct intel_gt_info *info,
|
||||
struct drm_printer *p);
|
||||
|
||||
#endif /* __INTEL_GT_H__ */
|
||||
|
|
|
@ -27,7 +27,8 @@ cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
|
|||
if (unlikely(iir & GT_CS_MASTER_ERROR_INTERRUPT)) {
|
||||
u32 eir;
|
||||
|
||||
eir = ENGINE_READ(engine, RING_EIR);
|
||||
/* Upper 16b are the enabling mask, rsvd for internal errors */
|
||||
eir = ENGINE_READ(engine, RING_EIR) & GENMASK(15, 0);
|
||||
ENGINE_TRACE(engine, "CS error: %x\n", eir);
|
||||
|
||||
/* Disable the error interrupt until after the reset */
|
||||
|
@ -457,7 +458,7 @@ void gen5_gt_irq_postinstall(struct intel_gt *gt)
|
|||
* RPS interrupts will get enabled/disabled on demand when RPS
|
||||
* itself is enabled/disabled.
|
||||
*/
|
||||
if (HAS_ENGINE(gt->i915, VECS0)) {
|
||||
if (HAS_ENGINE(gt, VECS0)) {
|
||||
pm_irqs |= PM_VEBOX_USER_INTERRUPT;
|
||||
gt->pm_ier |= PM_VEBOX_USER_INTERRUPT;
|
||||
}
|
||||
|
|
|
@ -188,7 +188,7 @@ int intel_gt_resume(struct intel_gt *gt)
|
|||
enum intel_engine_id id;
|
||||
int err;
|
||||
|
||||
err = intel_gt_has_init_error(gt);
|
||||
err = intel_gt_has_unrecoverable_error(gt);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
|
|
@ -31,12 +31,15 @@ static bool engine_active(const struct intel_engine_cs *engine)
|
|||
return !list_empty(&engine->kernel_context->timeline->requests);
|
||||
}
|
||||
|
||||
static bool flush_submission(struct intel_gt *gt)
|
||||
static bool flush_submission(struct intel_gt *gt, long timeout)
|
||||
{
|
||||
struct intel_engine_cs *engine;
|
||||
enum intel_engine_id id;
|
||||
bool active = false;
|
||||
|
||||
if (!timeout)
|
||||
return false;
|
||||
|
||||
if (!intel_gt_pm_is_awake(gt))
|
||||
return false;
|
||||
|
||||
|
@ -139,7 +142,7 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
|
|||
if (unlikely(timeout < 0))
|
||||
timeout = -timeout, interruptible = false;
|
||||
|
||||
flush_submission(gt); /* kick the ksoftirqd tasklets */
|
||||
flush_submission(gt, timeout); /* kick the ksoftirqd tasklets */
|
||||
spin_lock(&timelines->lock);
|
||||
list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
|
||||
if (!mutex_trylock(&tl->mutex)) {
|
||||
|
@ -194,7 +197,7 @@ out_active: spin_lock(&timelines->lock);
|
|||
list_for_each_entry_safe(tl, tn, &free, link)
|
||||
__intel_timeline_free(&tl->kref);
|
||||
|
||||
if (flush_submission(gt)) /* Wait, there's more! */
|
||||
if (flush_submission(gt, timeout)) /* Wait, there's more! */
|
||||
active_count++;
|
||||
|
||||
return active_count ? timeout : 0;
|
||||
|
|
|
@ -109,6 +109,17 @@ struct intel_gt {
|
|||
struct intel_gt_buffer_pool buffer_pool;
|
||||
|
||||
struct i915_vma *scratch;
|
||||
|
||||
struct intel_gt_info {
|
||||
intel_engine_mask_t engine_mask;
|
||||
u8 num_engines;
|
||||
|
||||
/* Media engine access to SFC per instance */
|
||||
u8 vdbox_sfc_access;
|
||||
|
||||
/* Slice/subslice/EU info */
|
||||
struct sseu_dev_info sseu;
|
||||
} info;
|
||||
};
|
||||
|
||||
enum intel_gt_scratch_field {
|
||||
|
|
|
@ -198,14 +198,16 @@ struct intel_gt;
|
|||
|
||||
struct i915_vma_ops {
|
||||
/* Map an object into an address space with the given cache flags. */
|
||||
int (*bind_vma)(struct i915_vma *vma,
|
||||
int (*bind_vma)(struct i915_address_space *vm,
|
||||
struct i915_vma *vma,
|
||||
enum i915_cache_level cache_level,
|
||||
u32 flags);
|
||||
/*
|
||||
* Unmap an object from an address space. This usually consists of
|
||||
* setting the valid PTE entries to a reserved scratch page.
|
||||
*/
|
||||
void (*unbind_vma)(struct i915_vma *vma);
|
||||
void (*unbind_vma)(struct i915_address_space *vm,
|
||||
struct i915_vma *vma);
|
||||
|
||||
int (*set_pages)(struct i915_vma *vma);
|
||||
void (*clear_pages)(struct i915_vma *vma);
|
||||
|
@ -566,6 +568,13 @@ int ggtt_set_pages(struct i915_vma *vma);
|
|||
int ppgtt_set_pages(struct i915_vma *vma);
|
||||
void clear_pages(struct i915_vma *vma);
|
||||
|
||||
int ppgtt_bind_vma(struct i915_address_space *vm,
|
||||
struct i915_vma *vma,
|
||||
enum i915_cache_level cache_level,
|
||||
u32 flags);
|
||||
void ppgtt_unbind_vma(struct i915_address_space *vm,
|
||||
struct i915_vma *vma);
|
||||
|
||||
void gtt_write_workarounds(struct intel_gt *gt);
|
||||
|
||||
void setup_private_pat(struct intel_uncore *uncore);
|
||||
|
|
|
@ -2568,6 +2568,25 @@ static void process_csb(struct intel_engine_cs *engine)
|
|||
if (unlikely(head == tail))
|
||||
return;
|
||||
|
||||
/*
|
||||
* We will consume all events from HW, or at least pretend to.
|
||||
*
|
||||
* The sequence of events from the HW is deterministic, and derived
|
||||
* from our writes to the ELSP, with a smidgen of variability for
|
||||
* the arrival of the asynchronous requests wrt to the inflight
|
||||
* execution. If the HW sends an event that does not correspond with
|
||||
* the one we are expecting, we have to abandon all hope as we lose
|
||||
* all tracking of what the engine is actually executing. We will
|
||||
* only detect we are out of sequence with the HW when we get an
|
||||
* 'impossible' event because we have already drained our own
|
||||
* preemption/promotion queue. If this occurs, we know that we likely
|
||||
* lost track of execution earlier and must unwind and restart, the
|
||||
* simplest way is by stop processing the event queue and force the
|
||||
* engine to reset.
|
||||
*/
|
||||
execlists->csb_head = tail;
|
||||
ENGINE_TRACE(engine, "cs-irq head=%d, tail=%d\n", head, tail);
|
||||
|
||||
/*
|
||||
* Hopefully paired with a wmb() in HW!
|
||||
*
|
||||
|
@ -2577,8 +2596,6 @@ static void process_csb(struct intel_engine_cs *engine)
|
|||
* we perform the READ_ONCE(*csb_write).
|
||||
*/
|
||||
rmb();
|
||||
|
||||
ENGINE_TRACE(engine, "cs-irq head=%d, tail=%d\n", head, tail);
|
||||
do {
|
||||
bool promote;
|
||||
|
||||
|
@ -2613,6 +2630,11 @@ static void process_csb(struct intel_engine_cs *engine)
|
|||
if (promote) {
|
||||
struct i915_request * const *old = execlists->active;
|
||||
|
||||
if (GEM_WARN_ON(!*execlists->pending)) {
|
||||
execlists->error_interrupt |= ERROR_CSB;
|
||||
break;
|
||||
}
|
||||
|
||||
ring_set_paused(engine, 0);
|
||||
|
||||
/* Point active to the new ELSP; prevent overwriting */
|
||||
|
@ -2635,7 +2657,10 @@ static void process_csb(struct intel_engine_cs *engine)
|
|||
|
||||
WRITE_ONCE(execlists->pending[0], NULL);
|
||||
} else {
|
||||
GEM_BUG_ON(!*execlists->active);
|
||||
if (GEM_WARN_ON(!*execlists->active)) {
|
||||
execlists->error_interrupt |= ERROR_CSB;
|
||||
break;
|
||||
}
|
||||
|
||||
/* port0 completed, advanced to port1 */
|
||||
trace_ports(execlists, "completed", execlists->active);
|
||||
|
@ -2686,7 +2711,6 @@ static void process_csb(struct intel_engine_cs *engine)
|
|||
}
|
||||
} while (head != tail);
|
||||
|
||||
execlists->csb_head = head;
|
||||
set_timeslice(engine);
|
||||
|
||||
/*
|
||||
|
@ -3005,12 +3029,12 @@ static u32 active_ccid(struct intel_engine_cs *engine)
|
|||
return ENGINE_READ_FW(engine, RING_EXECLIST_STATUS_HI);
|
||||
}
|
||||
|
||||
static bool execlists_capture(struct intel_engine_cs *engine)
|
||||
static void execlists_capture(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct execlists_capture *cap;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR))
|
||||
return true;
|
||||
return;
|
||||
|
||||
/*
|
||||
* We need to _quickly_ capture the engine state before we reset.
|
||||
|
@ -3019,7 +3043,7 @@ static bool execlists_capture(struct intel_engine_cs *engine)
|
|||
*/
|
||||
cap = capture_regs(engine);
|
||||
if (!cap)
|
||||
return true;
|
||||
return;
|
||||
|
||||
spin_lock_irq(&engine->active.lock);
|
||||
cap->rq = active_context(engine, active_ccid(engine));
|
||||
|
@ -3056,14 +3080,13 @@ static bool execlists_capture(struct intel_engine_cs *engine)
|
|||
|
||||
INIT_WORK(&cap->work, execlists_capture_work);
|
||||
schedule_work(&cap->work);
|
||||
return true;
|
||||
return;
|
||||
|
||||
err_rq:
|
||||
i915_request_put(cap->rq);
|
||||
err_free:
|
||||
i915_gpu_coredump_put(cap->error);
|
||||
kfree(cap);
|
||||
return false;
|
||||
}
|
||||
|
||||
static void execlists_reset(struct intel_engine_cs *engine, const char *msg)
|
||||
|
@ -3083,10 +3106,8 @@ static void execlists_reset(struct intel_engine_cs *engine, const char *msg)
|
|||
tasklet_disable_nosync(&engine->execlists.tasklet);
|
||||
|
||||
ring_set_paused(engine, 1); /* Freeze the current request in place */
|
||||
if (execlists_capture(engine))
|
||||
intel_engine_reset(engine, msg);
|
||||
else
|
||||
ring_set_paused(engine, 0);
|
||||
execlists_capture(engine);
|
||||
intel_engine_reset(engine, msg);
|
||||
|
||||
tasklet_enable(&engine->execlists.tasklet);
|
||||
clear_and_wake_up_bit(bit, lock);
|
||||
|
@ -3117,9 +3138,18 @@ static void execlists_submission_tasklet(unsigned long data)
|
|||
process_csb(engine);
|
||||
|
||||
if (unlikely(READ_ONCE(engine->execlists.error_interrupt))) {
|
||||
const char *msg;
|
||||
|
||||
/* Generate the error message in priority wrt to the user! */
|
||||
if (engine->execlists.error_interrupt & GENMASK(15, 0))
|
||||
msg = "CS error"; /* thrown by a user payload */
|
||||
else if (engine->execlists.error_interrupt & ERROR_CSB)
|
||||
msg = "invalid CSB event";
|
||||
else
|
||||
msg = "internal error";
|
||||
|
||||
engine->execlists.error_interrupt = 0;
|
||||
if (ENGINE_READ(engine, RING_ESR)) /* confirm the error */
|
||||
execlists_reset(engine, "CS error");
|
||||
execlists_reset(engine, msg);
|
||||
}
|
||||
|
||||
if (!READ_ONCE(engine->execlists.pending[0]) || timeout) {
|
||||
|
@ -3422,7 +3452,7 @@ __execlists_update_reg_state(const struct intel_context *ce,
|
|||
/* RPCS */
|
||||
if (engine->class == RENDER_CLASS) {
|
||||
regs[CTX_R_PWR_CLK_STATE] =
|
||||
intel_sseu_make_rpcs(engine->i915, &ce->sseu);
|
||||
intel_sseu_make_rpcs(engine->gt, &ce->sseu);
|
||||
|
||||
i915_oa_init_reg_state(ce, engine);
|
||||
}
|
||||
|
@ -3880,7 +3910,6 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
|
|||
struct i915_wa_ctx_bb *wa_bb[2] = { &wa_ctx->indirect_ctx,
|
||||
&wa_ctx->per_ctx };
|
||||
wa_bb_func_t wa_bb_fn[2];
|
||||
struct page *page;
|
||||
void *batch, *batch_ptr;
|
||||
unsigned int i;
|
||||
int ret;
|
||||
|
@ -3916,14 +3945,14 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
|
|||
return ret;
|
||||
}
|
||||
|
||||
page = i915_gem_object_get_dirty_page(wa_ctx->vma->obj, 0);
|
||||
batch = batch_ptr = kmap_atomic(page);
|
||||
batch = i915_gem_object_pin_map(wa_ctx->vma->obj, I915_MAP_WB);
|
||||
|
||||
/*
|
||||
* Emit the two workaround batch buffers, recording the offset from the
|
||||
* start of the workaround batch buffer object for each and their
|
||||
* respective sizes.
|
||||
*/
|
||||
batch_ptr = batch;
|
||||
for (i = 0; i < ARRAY_SIZE(wa_bb_fn); i++) {
|
||||
wa_bb[i]->offset = batch_ptr - batch;
|
||||
if (GEM_DEBUG_WARN_ON(!IS_ALIGNED(wa_bb[i]->offset,
|
||||
|
@ -3935,10 +3964,10 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
|
|||
batch_ptr = wa_bb_fn[i](engine, batch_ptr);
|
||||
wa_bb[i]->size = batch_ptr - (batch + wa_bb[i]->offset);
|
||||
}
|
||||
GEM_BUG_ON(batch_ptr - batch > CTX_WA_BB_OBJ_SIZE);
|
||||
|
||||
BUG_ON(batch_ptr - batch > CTX_WA_BB_OBJ_SIZE);
|
||||
|
||||
kunmap_atomic(batch);
|
||||
__i915_gem_object_flush_map(wa_ctx->vma->obj, 0, batch_ptr - batch);
|
||||
__i915_gem_object_release_map(wa_ctx->vma->obj);
|
||||
if (ret)
|
||||
lrc_destroy_wa_ctx(engine);
|
||||
|
||||
|
|
|
@ -155,16 +155,16 @@ struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt)
|
|||
return ppgtt;
|
||||
}
|
||||
|
||||
static int ppgtt_bind_vma(struct i915_vma *vma,
|
||||
enum i915_cache_level cache_level,
|
||||
u32 flags)
|
||||
int ppgtt_bind_vma(struct i915_address_space *vm,
|
||||
struct i915_vma *vma,
|
||||
enum i915_cache_level cache_level,
|
||||
u32 flags)
|
||||
{
|
||||
u32 pte_flags;
|
||||
int err;
|
||||
|
||||
if (flags & I915_VMA_ALLOC) {
|
||||
err = vma->vm->allocate_va_range(vma->vm,
|
||||
vma->node.start, vma->size);
|
||||
if (!test_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma))) {
|
||||
err = vm->allocate_va_range(vm, vma->node.start, vma->size);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
@ -176,17 +176,16 @@ static int ppgtt_bind_vma(struct i915_vma *vma,
|
|||
if (i915_gem_object_is_readonly(vma->obj))
|
||||
pte_flags |= PTE_READ_ONLY;
|
||||
|
||||
GEM_BUG_ON(!test_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma)));
|
||||
vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
|
||||
vm->insert_entries(vm, vma, cache_level, pte_flags);
|
||||
wmb();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ppgtt_unbind_vma(struct i915_vma *vma)
|
||||
void ppgtt_unbind_vma(struct i915_address_space *vm, struct i915_vma *vma)
|
||||
{
|
||||
if (test_and_clear_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma)))
|
||||
vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
|
||||
vm->clear_range(vm, vma->node.start, vma->size);
|
||||
}
|
||||
|
||||
int ppgtt_set_pages(struct i915_vma *vma)
|
||||
|
|
|
@ -150,7 +150,7 @@ static int render_state_setup(struct intel_renderstate *so,
|
|||
ret = 0;
|
||||
out:
|
||||
__i915_gem_object_flush_map(so->vma->obj, 0, i * sizeof(u32));
|
||||
i915_gem_object_unpin_map(so->vma->obj);
|
||||
__i915_gem_object_release_map(so->vma->obj);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -342,7 +342,7 @@ static int gen6_reset_engines(struct intel_gt *gt,
|
|||
static int gen11_lock_sfc(struct intel_engine_cs *engine, u32 *hw_mask)
|
||||
{
|
||||
struct intel_uncore *uncore = engine->uncore;
|
||||
u8 vdbox_sfc_access = RUNTIME_INFO(engine->i915)->vdbox_sfc_access;
|
||||
u8 vdbox_sfc_access = engine->gt->info.vdbox_sfc_access;
|
||||
i915_reg_t sfc_forced_lock, sfc_forced_lock_ack;
|
||||
u32 sfc_forced_lock_bit, sfc_forced_lock_ack_bit;
|
||||
i915_reg_t sfc_usage;
|
||||
|
@ -417,7 +417,7 @@ static int gen11_lock_sfc(struct intel_engine_cs *engine, u32 *hw_mask)
|
|||
static void gen11_unlock_sfc(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct intel_uncore *uncore = engine->uncore;
|
||||
u8 vdbox_sfc_access = RUNTIME_INFO(engine->i915)->vdbox_sfc_access;
|
||||
u8 vdbox_sfc_access = engine->gt->info.vdbox_sfc_access;
|
||||
i915_reg_t sfc_forced_lock;
|
||||
u32 sfc_forced_lock_bit;
|
||||
|
||||
|
@ -880,7 +880,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
|
|||
return true;
|
||||
|
||||
/* Never fully initialised, recovery impossible */
|
||||
if (test_bit(I915_WEDGED_ON_INIT, >->reset.flags))
|
||||
if (intel_gt_has_unrecoverable_error(gt))
|
||||
return false;
|
||||
|
||||
GT_TRACE(gt, "start\n");
|
||||
|
@ -930,7 +930,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
|
|||
* Warn CI about the unrecoverable wedged condition.
|
||||
* Time for a reboot.
|
||||
*/
|
||||
add_taint_for_CI(TAINT_WARN);
|
||||
add_taint_for_CI(gt->i915, TAINT_WARN);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -1097,7 +1097,7 @@ taint:
|
|||
* rather than continue on into oblivion. For everyone else,
|
||||
* the system should still plod along, but they have been warned!
|
||||
*/
|
||||
add_taint_for_CI(TAINT_WARN);
|
||||
add_taint_for_CI(gt->i915, TAINT_WARN);
|
||||
error:
|
||||
__intel_gt_set_wedged(gt);
|
||||
goto finish;
|
||||
|
@ -1246,7 +1246,7 @@ void intel_gt_handle_error(struct intel_gt *gt,
|
|||
*/
|
||||
wakeref = intel_runtime_pm_get(gt->uncore->rpm);
|
||||
|
||||
engine_mask &= INTEL_INFO(gt->i915)->engine_mask;
|
||||
engine_mask &= gt->info.engine_mask;
|
||||
|
||||
if (flags & I915_ERROR_CAPTURE) {
|
||||
i915_capture_error_state(gt->i915);
|
||||
|
@ -1342,7 +1342,7 @@ int intel_gt_terminally_wedged(struct intel_gt *gt)
|
|||
if (!intel_gt_is_wedged(gt))
|
||||
return 0;
|
||||
|
||||
if (intel_gt_has_init_error(gt))
|
||||
if (intel_gt_has_unrecoverable_error(gt))
|
||||
return -EIO;
|
||||
|
||||
/* Reset still in progress? Maybe we will recover? */
|
||||
|
@ -1360,6 +1360,15 @@ void intel_gt_set_wedged_on_init(struct intel_gt *gt)
|
|||
I915_WEDGED_ON_INIT);
|
||||
intel_gt_set_wedged(gt);
|
||||
set_bit(I915_WEDGED_ON_INIT, >->reset.flags);
|
||||
|
||||
/* Wedged on init is non-recoverable */
|
||||
add_taint_for_CI(gt->i915, TAINT_WARN);
|
||||
}
|
||||
|
||||
void intel_gt_set_wedged_on_fini(struct intel_gt *gt)
|
||||
{
|
||||
intel_gt_set_wedged(gt);
|
||||
set_bit(I915_WEDGED_ON_FINI, >->reset.flags);
|
||||
}
|
||||
|
||||
void intel_gt_init_reset(struct intel_gt *gt)
|
||||
|
|
|
@ -47,8 +47,10 @@ int intel_gt_terminally_wedged(struct intel_gt *gt);
|
|||
/*
|
||||
* There's no unset_wedged_on_init paired with this one.
|
||||
* Once we're wedged on init, there's no going back.
|
||||
* Same thing for unset_wedged_on_fini.
|
||||
*/
|
||||
void intel_gt_set_wedged_on_init(struct intel_gt *gt);
|
||||
void intel_gt_set_wedged_on_fini(struct intel_gt *gt);
|
||||
|
||||
int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask);
|
||||
|
||||
|
@ -71,14 +73,6 @@ void __intel_fini_wedge(struct intel_wedge_me *w);
|
|||
(W)->gt; \
|
||||
__intel_fini_wedge((W)))
|
||||
|
||||
static inline bool __intel_reset_failed(const struct intel_reset *reset)
|
||||
{
|
||||
GEM_BUG_ON(test_bit(I915_WEDGED_ON_INIT, &reset->flags) ?
|
||||
!test_bit(I915_WEDGED, &reset->flags) : false);
|
||||
|
||||
return unlikely(test_bit(I915_WEDGED, &reset->flags));
|
||||
}
|
||||
|
||||
bool intel_has_gpu_reset(const struct intel_gt *gt);
|
||||
bool intel_has_reset_engine(const struct intel_gt *gt);
|
||||
|
||||
|
|
|
@ -34,12 +34,17 @@ struct intel_reset {
|
|||
* longer use the GPU - similar to #I915_WEDGED bit. The difference in
|
||||
* in the way we're handling "forced" unwedged (e.g. through debugfs),
|
||||
* which is not allowed in case we failed to initialize.
|
||||
*
|
||||
* #I915_WEDGED_ON_FINI - Similar to #I915_WEDGED_ON_INIT, except we
|
||||
* use it to mark that the GPU is no longer available (and prevent
|
||||
* users from using it).
|
||||
*/
|
||||
unsigned long flags;
|
||||
#define I915_RESET_BACKOFF 0
|
||||
#define I915_RESET_MODESET 1
|
||||
#define I915_RESET_ENGINE 2
|
||||
#define I915_WEDGED_ON_INIT (BITS_PER_LONG - 2)
|
||||
#define I915_WEDGED_ON_INIT (BITS_PER_LONG - 3)
|
||||
#define I915_WEDGED_ON_FINI (BITS_PER_LONG - 2)
|
||||
#define I915_WEDGED (BITS_PER_LONG - 1)
|
||||
|
||||
struct mutex mutex; /* serialises wedging/unwedging */
|
||||
|
|
|
@ -543,7 +543,7 @@ alloc_context_vma(struct intel_engine_cs *engine)
|
|||
vaddr, engine->context_size);
|
||||
|
||||
i915_gem_object_flush_map(obj);
|
||||
i915_gem_object_unpin_map(obj);
|
||||
__i915_gem_object_release_map(obj);
|
||||
}
|
||||
|
||||
vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
|
||||
|
@ -649,7 +649,7 @@ static inline int mi_set_context(struct i915_request *rq,
|
|||
struct drm_i915_private *i915 = engine->i915;
|
||||
enum intel_engine_id id;
|
||||
const int num_engines =
|
||||
IS_HASWELL(i915) ? RUNTIME_INFO(i915)->num_engines - 1 : 0;
|
||||
IS_HASWELL(i915) ? engine->gt->info.num_engines - 1 : 0;
|
||||
bool force_restore = false;
|
||||
int len;
|
||||
u32 *cs;
|
||||
|
|
|
@ -1062,11 +1062,12 @@ static bool gen6_rps_enable(struct intel_rps *rps)
|
|||
static int chv_rps_max_freq(struct intel_rps *rps)
|
||||
{
|
||||
struct drm_i915_private *i915 = rps_to_i915(rps);
|
||||
struct intel_gt *gt = rps_to_gt(rps);
|
||||
u32 val;
|
||||
|
||||
val = vlv_punit_read(i915, FB_GFX_FMAX_AT_VMAX_FUSE);
|
||||
|
||||
switch (RUNTIME_INFO(i915)->sseu.eu_total) {
|
||||
switch (gt->info.sseu.eu_total) {
|
||||
case 8:
|
||||
/* (2 * 4) config */
|
||||
val >>= FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT;
|
||||
|
|
|
@ -60,10 +60,552 @@ intel_sseu_subslices_per_slice(const struct sseu_dev_info *sseu, u8 slice)
|
|||
return hweight32(intel_sseu_get_subslices(sseu, slice));
|
||||
}
|
||||
|
||||
u32 intel_sseu_make_rpcs(struct drm_i915_private *i915,
|
||||
static int sseu_eu_idx(const struct sseu_dev_info *sseu, int slice,
|
||||
int subslice)
|
||||
{
|
||||
int slice_stride = sseu->max_subslices * sseu->eu_stride;
|
||||
|
||||
return slice * slice_stride + subslice * sseu->eu_stride;
|
||||
}
|
||||
|
||||
static u16 sseu_get_eus(const struct sseu_dev_info *sseu, int slice,
|
||||
int subslice)
|
||||
{
|
||||
int i, offset = sseu_eu_idx(sseu, slice, subslice);
|
||||
u16 eu_mask = 0;
|
||||
|
||||
for (i = 0; i < sseu->eu_stride; i++)
|
||||
eu_mask |=
|
||||
((u16)sseu->eu_mask[offset + i]) << (i * BITS_PER_BYTE);
|
||||
|
||||
return eu_mask;
|
||||
}
|
||||
|
||||
static void sseu_set_eus(struct sseu_dev_info *sseu, int slice, int subslice,
|
||||
u16 eu_mask)
|
||||
{
|
||||
int i, offset = sseu_eu_idx(sseu, slice, subslice);
|
||||
|
||||
for (i = 0; i < sseu->eu_stride; i++)
|
||||
sseu->eu_mask[offset + i] =
|
||||
(eu_mask >> (BITS_PER_BYTE * i)) & 0xff;
|
||||
}
|
||||
|
||||
static u16 compute_eu_total(const struct sseu_dev_info *sseu)
|
||||
{
|
||||
u16 i, total = 0;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(sseu->eu_mask); i++)
|
||||
total += hweight8(sseu->eu_mask[i]);
|
||||
|
||||
return total;
|
||||
}
|
||||
|
||||
static void gen11_compute_sseu_info(struct sseu_dev_info *sseu,
|
||||
u8 s_en, u32 ss_en, u16 eu_en)
|
||||
{
|
||||
int s, ss;
|
||||
|
||||
/* ss_en represents entire subslice mask across all slices */
|
||||
GEM_BUG_ON(sseu->max_slices * sseu->max_subslices >
|
||||
sizeof(ss_en) * BITS_PER_BYTE);
|
||||
|
||||
for (s = 0; s < sseu->max_slices; s++) {
|
||||
if ((s_en & BIT(s)) == 0)
|
||||
continue;
|
||||
|
||||
sseu->slice_mask |= BIT(s);
|
||||
|
||||
intel_sseu_set_subslices(sseu, s, ss_en);
|
||||
|
||||
for (ss = 0; ss < sseu->max_subslices; ss++)
|
||||
if (intel_sseu_has_subslice(sseu, s, ss))
|
||||
sseu_set_eus(sseu, s, ss, eu_en);
|
||||
}
|
||||
sseu->eu_per_subslice = hweight16(eu_en);
|
||||
sseu->eu_total = compute_eu_total(sseu);
|
||||
}
|
||||
|
||||
static void gen12_sseu_info_init(struct intel_gt *gt)
|
||||
{
|
||||
struct sseu_dev_info *sseu = >->info.sseu;
|
||||
struct intel_uncore *uncore = gt->uncore;
|
||||
u32 dss_en;
|
||||
u16 eu_en = 0;
|
||||
u8 eu_en_fuse;
|
||||
u8 s_en;
|
||||
int eu;
|
||||
|
||||
/*
|
||||
* Gen12 has Dual-Subslices, which behave similarly to 2 gen11 SS.
|
||||
* Instead of splitting these, provide userspace with an array
|
||||
* of DSS to more closely represent the hardware resource.
|
||||
*/
|
||||
intel_sseu_set_info(sseu, 1, 6, 16);
|
||||
|
||||
s_en = intel_uncore_read(uncore, GEN11_GT_SLICE_ENABLE) &
|
||||
GEN11_GT_S_ENA_MASK;
|
||||
|
||||
dss_en = intel_uncore_read(uncore, GEN12_GT_DSS_ENABLE);
|
||||
|
||||
/* one bit per pair of EUs */
|
||||
eu_en_fuse = ~(intel_uncore_read(uncore, GEN11_EU_DISABLE) &
|
||||
GEN11_EU_DIS_MASK);
|
||||
for (eu = 0; eu < sseu->max_eus_per_subslice / 2; eu++)
|
||||
if (eu_en_fuse & BIT(eu))
|
||||
eu_en |= BIT(eu * 2) | BIT(eu * 2 + 1);
|
||||
|
||||
gen11_compute_sseu_info(sseu, s_en, dss_en, eu_en);
|
||||
|
||||
/* TGL only supports slice-level power gating */
|
||||
sseu->has_slice_pg = 1;
|
||||
}
|
||||
|
||||
static void gen11_sseu_info_init(struct intel_gt *gt)
|
||||
{
|
||||
struct sseu_dev_info *sseu = >->info.sseu;
|
||||
struct intel_uncore *uncore = gt->uncore;
|
||||
u32 ss_en;
|
||||
u8 eu_en;
|
||||
u8 s_en;
|
||||
|
||||
if (IS_ELKHARTLAKE(gt->i915))
|
||||
intel_sseu_set_info(sseu, 1, 4, 8);
|
||||
else
|
||||
intel_sseu_set_info(sseu, 1, 8, 8);
|
||||
|
||||
s_en = intel_uncore_read(uncore, GEN11_GT_SLICE_ENABLE) &
|
||||
GEN11_GT_S_ENA_MASK;
|
||||
ss_en = ~intel_uncore_read(uncore, GEN11_GT_SUBSLICE_DISABLE);
|
||||
|
||||
eu_en = ~(intel_uncore_read(uncore, GEN11_EU_DISABLE) &
|
||||
GEN11_EU_DIS_MASK);
|
||||
|
||||
gen11_compute_sseu_info(sseu, s_en, ss_en, eu_en);
|
||||
|
||||
/* ICL has no power gating restrictions. */
|
||||
sseu->has_slice_pg = 1;
|
||||
sseu->has_subslice_pg = 1;
|
||||
sseu->has_eu_pg = 1;
|
||||
}
|
||||
|
||||
static void gen10_sseu_info_init(struct intel_gt *gt)
|
||||
{
|
||||
struct intel_uncore *uncore = gt->uncore;
|
||||
struct sseu_dev_info *sseu = >->info.sseu;
|
||||
const u32 fuse2 = intel_uncore_read(uncore, GEN8_FUSE2);
|
||||
const int eu_mask = 0xff;
|
||||
u32 subslice_mask, eu_en;
|
||||
int s, ss;
|
||||
|
||||
intel_sseu_set_info(sseu, 6, 4, 8);
|
||||
|
||||
sseu->slice_mask = (fuse2 & GEN10_F2_S_ENA_MASK) >>
|
||||
GEN10_F2_S_ENA_SHIFT;
|
||||
|
||||
/* Slice0 */
|
||||
eu_en = ~intel_uncore_read(uncore, GEN8_EU_DISABLE0);
|
||||
for (ss = 0; ss < sseu->max_subslices; ss++)
|
||||
sseu_set_eus(sseu, 0, ss, (eu_en >> (8 * ss)) & eu_mask);
|
||||
/* Slice1 */
|
||||
sseu_set_eus(sseu, 1, 0, (eu_en >> 24) & eu_mask);
|
||||
eu_en = ~intel_uncore_read(uncore, GEN8_EU_DISABLE1);
|
||||
sseu_set_eus(sseu, 1, 1, eu_en & eu_mask);
|
||||
/* Slice2 */
|
||||
sseu_set_eus(sseu, 2, 0, (eu_en >> 8) & eu_mask);
|
||||
sseu_set_eus(sseu, 2, 1, (eu_en >> 16) & eu_mask);
|
||||
/* Slice3 */
|
||||
sseu_set_eus(sseu, 3, 0, (eu_en >> 24) & eu_mask);
|
||||
eu_en = ~intel_uncore_read(uncore, GEN8_EU_DISABLE2);
|
||||
sseu_set_eus(sseu, 3, 1, eu_en & eu_mask);
|
||||
/* Slice4 */
|
||||
sseu_set_eus(sseu, 4, 0, (eu_en >> 8) & eu_mask);
|
||||
sseu_set_eus(sseu, 4, 1, (eu_en >> 16) & eu_mask);
|
||||
/* Slice5 */
|
||||
sseu_set_eus(sseu, 5, 0, (eu_en >> 24) & eu_mask);
|
||||
eu_en = ~intel_uncore_read(uncore, GEN10_EU_DISABLE3);
|
||||
sseu_set_eus(sseu, 5, 1, eu_en & eu_mask);
|
||||
|
||||
subslice_mask = (1 << 4) - 1;
|
||||
subslice_mask &= ~((fuse2 & GEN10_F2_SS_DIS_MASK) >>
|
||||
GEN10_F2_SS_DIS_SHIFT);
|
||||
|
||||
for (s = 0; s < sseu->max_slices; s++) {
|
||||
u32 subslice_mask_with_eus = subslice_mask;
|
||||
|
||||
for (ss = 0; ss < sseu->max_subslices; ss++) {
|
||||
if (sseu_get_eus(sseu, s, ss) == 0)
|
||||
subslice_mask_with_eus &= ~BIT(ss);
|
||||
}
|
||||
|
||||
/*
|
||||
* Slice0 can have up to 3 subslices, but there are only 2 in
|
||||
* slice1/2.
|
||||
*/
|
||||
intel_sseu_set_subslices(sseu, s, s == 0 ?
|
||||
subslice_mask_with_eus :
|
||||
subslice_mask_with_eus & 0x3);
|
||||
}
|
||||
|
||||
sseu->eu_total = compute_eu_total(sseu);
|
||||
|
||||
/*
|
||||
* CNL is expected to always have a uniform distribution
|
||||
* of EU across subslices with the exception that any one
|
||||
* EU in any one subslice may be fused off for die
|
||||
* recovery.
|
||||
*/
|
||||
sseu->eu_per_subslice =
|
||||
intel_sseu_subslice_total(sseu) ?
|
||||
DIV_ROUND_UP(sseu->eu_total, intel_sseu_subslice_total(sseu)) :
|
||||
0;
|
||||
|
||||
/* No restrictions on Power Gating */
|
||||
sseu->has_slice_pg = 1;
|
||||
sseu->has_subslice_pg = 1;
|
||||
sseu->has_eu_pg = 1;
|
||||
}
|
||||
|
||||
static void cherryview_sseu_info_init(struct intel_gt *gt)
|
||||
{
|
||||
struct sseu_dev_info *sseu = >->info.sseu;
|
||||
u32 fuse;
|
||||
u8 subslice_mask = 0;
|
||||
|
||||
fuse = intel_uncore_read(gt->uncore, CHV_FUSE_GT);
|
||||
|
||||
sseu->slice_mask = BIT(0);
|
||||
intel_sseu_set_info(sseu, 1, 2, 8);
|
||||
|
||||
if (!(fuse & CHV_FGT_DISABLE_SS0)) {
|
||||
u8 disabled_mask =
|
||||
((fuse & CHV_FGT_EU_DIS_SS0_R0_MASK) >>
|
||||
CHV_FGT_EU_DIS_SS0_R0_SHIFT) |
|
||||
(((fuse & CHV_FGT_EU_DIS_SS0_R1_MASK) >>
|
||||
CHV_FGT_EU_DIS_SS0_R1_SHIFT) << 4);
|
||||
|
||||
subslice_mask |= BIT(0);
|
||||
sseu_set_eus(sseu, 0, 0, ~disabled_mask);
|
||||
}
|
||||
|
||||
if (!(fuse & CHV_FGT_DISABLE_SS1)) {
|
||||
u8 disabled_mask =
|
||||
((fuse & CHV_FGT_EU_DIS_SS1_R0_MASK) >>
|
||||
CHV_FGT_EU_DIS_SS1_R0_SHIFT) |
|
||||
(((fuse & CHV_FGT_EU_DIS_SS1_R1_MASK) >>
|
||||
CHV_FGT_EU_DIS_SS1_R1_SHIFT) << 4);
|
||||
|
||||
subslice_mask |= BIT(1);
|
||||
sseu_set_eus(sseu, 0, 1, ~disabled_mask);
|
||||
}
|
||||
|
||||
intel_sseu_set_subslices(sseu, 0, subslice_mask);
|
||||
|
||||
sseu->eu_total = compute_eu_total(sseu);
|
||||
|
||||
/*
|
||||
* CHV expected to always have a uniform distribution of EU
|
||||
* across subslices.
|
||||
*/
|
||||
sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
|
||||
sseu->eu_total /
|
||||
intel_sseu_subslice_total(sseu) :
|
||||
0;
|
||||
/*
|
||||
* CHV supports subslice power gating on devices with more than
|
||||
* one subslice, and supports EU power gating on devices with
|
||||
* more than one EU pair per subslice.
|
||||
*/
|
||||
sseu->has_slice_pg = 0;
|
||||
sseu->has_subslice_pg = intel_sseu_subslice_total(sseu) > 1;
|
||||
sseu->has_eu_pg = (sseu->eu_per_subslice > 2);
|
||||
}
|
||||
|
||||
static void gen9_sseu_info_init(struct intel_gt *gt)
|
||||
{
|
||||
struct drm_i915_private *i915 = gt->i915;
|
||||
struct intel_device_info *info = mkwrite_device_info(i915);
|
||||
struct sseu_dev_info *sseu = >->info.sseu;
|
||||
struct intel_uncore *uncore = gt->uncore;
|
||||
u32 fuse2, eu_disable, subslice_mask;
|
||||
const u8 eu_mask = 0xff;
|
||||
int s, ss;
|
||||
|
||||
fuse2 = intel_uncore_read(uncore, GEN8_FUSE2);
|
||||
sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
|
||||
|
||||
/* BXT has a single slice and at most 3 subslices. */
|
||||
intel_sseu_set_info(sseu, IS_GEN9_LP(i915) ? 1 : 3,
|
||||
IS_GEN9_LP(i915) ? 3 : 4, 8);
|
||||
|
||||
/*
|
||||
* The subslice disable field is global, i.e. it applies
|
||||
* to each of the enabled slices.
|
||||
*/
|
||||
subslice_mask = (1 << sseu->max_subslices) - 1;
|
||||
subslice_mask &= ~((fuse2 & GEN9_F2_SS_DIS_MASK) >>
|
||||
GEN9_F2_SS_DIS_SHIFT);
|
||||
|
||||
/*
|
||||
* Iterate through enabled slices and subslices to
|
||||
* count the total enabled EU.
|
||||
*/
|
||||
for (s = 0; s < sseu->max_slices; s++) {
|
||||
if (!(sseu->slice_mask & BIT(s)))
|
||||
/* skip disabled slice */
|
||||
continue;
|
||||
|
||||
intel_sseu_set_subslices(sseu, s, subslice_mask);
|
||||
|
||||
eu_disable = intel_uncore_read(uncore, GEN9_EU_DISABLE(s));
|
||||
for (ss = 0; ss < sseu->max_subslices; ss++) {
|
||||
int eu_per_ss;
|
||||
u8 eu_disabled_mask;
|
||||
|
||||
if (!intel_sseu_has_subslice(sseu, s, ss))
|
||||
/* skip disabled subslice */
|
||||
continue;
|
||||
|
||||
eu_disabled_mask = (eu_disable >> (ss * 8)) & eu_mask;
|
||||
|
||||
sseu_set_eus(sseu, s, ss, ~eu_disabled_mask);
|
||||
|
||||
eu_per_ss = sseu->max_eus_per_subslice -
|
||||
hweight8(eu_disabled_mask);
|
||||
|
||||
/*
|
||||
* Record which subslice(s) has(have) 7 EUs. we
|
||||
* can tune the hash used to spread work among
|
||||
* subslices if they are unbalanced.
|
||||
*/
|
||||
if (eu_per_ss == 7)
|
||||
sseu->subslice_7eu[s] |= BIT(ss);
|
||||
}
|
||||
}
|
||||
|
||||
sseu->eu_total = compute_eu_total(sseu);
|
||||
|
||||
/*
|
||||
* SKL is expected to always have a uniform distribution
|
||||
* of EU across subslices with the exception that any one
|
||||
* EU in any one subslice may be fused off for die
|
||||
* recovery. BXT is expected to be perfectly uniform in EU
|
||||
* distribution.
|
||||
*/
|
||||
sseu->eu_per_subslice =
|
||||
intel_sseu_subslice_total(sseu) ?
|
||||
DIV_ROUND_UP(sseu->eu_total, intel_sseu_subslice_total(sseu)) :
|
||||
0;
|
||||
|
||||
/*
|
||||
* SKL+ supports slice power gating on devices with more than
|
||||
* one slice, and supports EU power gating on devices with
|
||||
* more than one EU pair per subslice. BXT+ supports subslice
|
||||
* power gating on devices with more than one subslice, and
|
||||
* supports EU power gating on devices with more than one EU
|
||||
* pair per subslice.
|
||||
*/
|
||||
sseu->has_slice_pg =
|
||||
!IS_GEN9_LP(i915) && hweight8(sseu->slice_mask) > 1;
|
||||
sseu->has_subslice_pg =
|
||||
IS_GEN9_LP(i915) && intel_sseu_subslice_total(sseu) > 1;
|
||||
sseu->has_eu_pg = sseu->eu_per_subslice > 2;
|
||||
|
||||
if (IS_GEN9_LP(i915)) {
|
||||
#define IS_SS_DISABLED(ss) (!(sseu->subslice_mask[0] & BIT(ss)))
|
||||
info->has_pooled_eu = hweight8(sseu->subslice_mask[0]) == 3;
|
||||
|
||||
sseu->min_eu_in_pool = 0;
|
||||
if (info->has_pooled_eu) {
|
||||
if (IS_SS_DISABLED(2) || IS_SS_DISABLED(0))
|
||||
sseu->min_eu_in_pool = 3;
|
||||
else if (IS_SS_DISABLED(1))
|
||||
sseu->min_eu_in_pool = 6;
|
||||
else
|
||||
sseu->min_eu_in_pool = 9;
|
||||
}
|
||||
#undef IS_SS_DISABLED
|
||||
}
|
||||
}
|
||||
|
||||
static void bdw_sseu_info_init(struct intel_gt *gt)
|
||||
{
|
||||
struct sseu_dev_info *sseu = >->info.sseu;
|
||||
struct intel_uncore *uncore = gt->uncore;
|
||||
int s, ss;
|
||||
u32 fuse2, subslice_mask, eu_disable[3]; /* s_max */
|
||||
u32 eu_disable0, eu_disable1, eu_disable2;
|
||||
|
||||
fuse2 = intel_uncore_read(uncore, GEN8_FUSE2);
|
||||
sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
|
||||
intel_sseu_set_info(sseu, 3, 3, 8);
|
||||
|
||||
/*
|
||||
* The subslice disable field is global, i.e. it applies
|
||||
* to each of the enabled slices.
|
||||
*/
|
||||
subslice_mask = GENMASK(sseu->max_subslices - 1, 0);
|
||||
subslice_mask &= ~((fuse2 & GEN8_F2_SS_DIS_MASK) >>
|
||||
GEN8_F2_SS_DIS_SHIFT);
|
||||
eu_disable0 = intel_uncore_read(uncore, GEN8_EU_DISABLE0);
|
||||
eu_disable1 = intel_uncore_read(uncore, GEN8_EU_DISABLE1);
|
||||
eu_disable2 = intel_uncore_read(uncore, GEN8_EU_DISABLE2);
|
||||
eu_disable[0] = eu_disable0 & GEN8_EU_DIS0_S0_MASK;
|
||||
eu_disable[1] = (eu_disable0 >> GEN8_EU_DIS0_S1_SHIFT) |
|
||||
((eu_disable1 & GEN8_EU_DIS1_S1_MASK) <<
|
||||
(32 - GEN8_EU_DIS0_S1_SHIFT));
|
||||
eu_disable[2] = (eu_disable1 >> GEN8_EU_DIS1_S2_SHIFT) |
|
||||
((eu_disable2 & GEN8_EU_DIS2_S2_MASK) <<
|
||||
(32 - GEN8_EU_DIS1_S2_SHIFT));
|
||||
|
||||
/*
|
||||
* Iterate through enabled slices and subslices to
|
||||
* count the total enabled EU.
|
||||
*/
|
||||
for (s = 0; s < sseu->max_slices; s++) {
|
||||
if (!(sseu->slice_mask & BIT(s)))
|
||||
/* skip disabled slice */
|
||||
continue;
|
||||
|
||||
intel_sseu_set_subslices(sseu, s, subslice_mask);
|
||||
|
||||
for (ss = 0; ss < sseu->max_subslices; ss++) {
|
||||
u8 eu_disabled_mask;
|
||||
u32 n_disabled;
|
||||
|
||||
if (!intel_sseu_has_subslice(sseu, s, ss))
|
||||
/* skip disabled subslice */
|
||||
continue;
|
||||
|
||||
eu_disabled_mask =
|
||||
eu_disable[s] >> (ss * sseu->max_eus_per_subslice);
|
||||
|
||||
sseu_set_eus(sseu, s, ss, ~eu_disabled_mask);
|
||||
|
||||
n_disabled = hweight8(eu_disabled_mask);
|
||||
|
||||
/*
|
||||
* Record which subslices have 7 EUs.
|
||||
*/
|
||||
if (sseu->max_eus_per_subslice - n_disabled == 7)
|
||||
sseu->subslice_7eu[s] |= 1 << ss;
|
||||
}
|
||||
}
|
||||
|
||||
sseu->eu_total = compute_eu_total(sseu);
|
||||
|
||||
/*
|
||||
* BDW is expected to always have a uniform distribution of EU across
|
||||
* subslices with the exception that any one EU in any one subslice may
|
||||
* be fused off for die recovery.
|
||||
*/
|
||||
sseu->eu_per_subslice =
|
||||
intel_sseu_subslice_total(sseu) ?
|
||||
DIV_ROUND_UP(sseu->eu_total, intel_sseu_subslice_total(sseu)) :
|
||||
0;
|
||||
|
||||
/*
|
||||
* BDW supports slice power gating on devices with more than
|
||||
* one slice.
|
||||
*/
|
||||
sseu->has_slice_pg = hweight8(sseu->slice_mask) > 1;
|
||||
sseu->has_subslice_pg = 0;
|
||||
sseu->has_eu_pg = 0;
|
||||
}
|
||||
|
||||
static void hsw_sseu_info_init(struct intel_gt *gt)
|
||||
{
|
||||
struct drm_i915_private *i915 = gt->i915;
|
||||
struct sseu_dev_info *sseu = >->info.sseu;
|
||||
u32 fuse1;
|
||||
u8 subslice_mask = 0;
|
||||
int s, ss;
|
||||
|
||||
/*
|
||||
* There isn't a register to tell us how many slices/subslices. We
|
||||
* work off the PCI-ids here.
|
||||
*/
|
||||
switch (INTEL_INFO(i915)->gt) {
|
||||
default:
|
||||
MISSING_CASE(INTEL_INFO(i915)->gt);
|
||||
fallthrough;
|
||||
case 1:
|
||||
sseu->slice_mask = BIT(0);
|
||||
subslice_mask = BIT(0);
|
||||
break;
|
||||
case 2:
|
||||
sseu->slice_mask = BIT(0);
|
||||
subslice_mask = BIT(0) | BIT(1);
|
||||
break;
|
||||
case 3:
|
||||
sseu->slice_mask = BIT(0) | BIT(1);
|
||||
subslice_mask = BIT(0) | BIT(1);
|
||||
break;
|
||||
}
|
||||
|
||||
fuse1 = intel_uncore_read(gt->uncore, HSW_PAVP_FUSE1);
|
||||
switch ((fuse1 & HSW_F1_EU_DIS_MASK) >> HSW_F1_EU_DIS_SHIFT) {
|
||||
default:
|
||||
MISSING_CASE((fuse1 & HSW_F1_EU_DIS_MASK) >>
|
||||
HSW_F1_EU_DIS_SHIFT);
|
||||
fallthrough;
|
||||
case HSW_F1_EU_DIS_10EUS:
|
||||
sseu->eu_per_subslice = 10;
|
||||
break;
|
||||
case HSW_F1_EU_DIS_8EUS:
|
||||
sseu->eu_per_subslice = 8;
|
||||
break;
|
||||
case HSW_F1_EU_DIS_6EUS:
|
||||
sseu->eu_per_subslice = 6;
|
||||
break;
|
||||
}
|
||||
|
||||
intel_sseu_set_info(sseu, hweight8(sseu->slice_mask),
|
||||
hweight8(subslice_mask),
|
||||
sseu->eu_per_subslice);
|
||||
|
||||
for (s = 0; s < sseu->max_slices; s++) {
|
||||
intel_sseu_set_subslices(sseu, s, subslice_mask);
|
||||
|
||||
for (ss = 0; ss < sseu->max_subslices; ss++) {
|
||||
sseu_set_eus(sseu, s, ss,
|
||||
(1UL << sseu->eu_per_subslice) - 1);
|
||||
}
|
||||
}
|
||||
|
||||
sseu->eu_total = compute_eu_total(sseu);
|
||||
|
||||
/* No powergating for you. */
|
||||
sseu->has_slice_pg = 0;
|
||||
sseu->has_subslice_pg = 0;
|
||||
sseu->has_eu_pg = 0;
|
||||
}
|
||||
|
||||
void intel_sseu_info_init(struct intel_gt *gt)
|
||||
{
|
||||
struct drm_i915_private *i915 = gt->i915;
|
||||
|
||||
if (IS_HASWELL(i915))
|
||||
hsw_sseu_info_init(gt);
|
||||
else if (IS_CHERRYVIEW(i915))
|
||||
cherryview_sseu_info_init(gt);
|
||||
else if (IS_BROADWELL(i915))
|
||||
bdw_sseu_info_init(gt);
|
||||
else if (IS_GEN(i915, 9))
|
||||
gen9_sseu_info_init(gt);
|
||||
else if (IS_GEN(i915, 10))
|
||||
gen10_sseu_info_init(gt);
|
||||
else if (IS_GEN(i915, 11))
|
||||
gen11_sseu_info_init(gt);
|
||||
else if (INTEL_GEN(i915) >= 12)
|
||||
gen12_sseu_info_init(gt);
|
||||
}
|
||||
|
||||
u32 intel_sseu_make_rpcs(struct intel_gt *gt,
|
||||
const struct intel_sseu *req_sseu)
|
||||
{
|
||||
const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu;
|
||||
struct drm_i915_private *i915 = gt->i915;
|
||||
const struct sseu_dev_info *sseu = >->info.sseu;
|
||||
bool subslice_pg = sseu->has_subslice_pg;
|
||||
u8 slices, subslices;
|
||||
u32 rpcs = 0;
|
||||
|
@ -173,3 +715,48 @@ u32 intel_sseu_make_rpcs(struct drm_i915_private *i915,
|
|||
|
||||
return rpcs;
|
||||
}
|
||||
|
||||
void intel_sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p)
|
||||
{
|
||||
int s;
|
||||
|
||||
drm_printf(p, "slice total: %u, mask=%04x\n",
|
||||
hweight8(sseu->slice_mask), sseu->slice_mask);
|
||||
drm_printf(p, "subslice total: %u\n", intel_sseu_subslice_total(sseu));
|
||||
for (s = 0; s < sseu->max_slices; s++) {
|
||||
drm_printf(p, "slice%d: %u subslices, mask=%08x\n",
|
||||
s, intel_sseu_subslices_per_slice(sseu, s),
|
||||
intel_sseu_get_subslices(sseu, s));
|
||||
}
|
||||
drm_printf(p, "EU total: %u\n", sseu->eu_total);
|
||||
drm_printf(p, "EU per subslice: %u\n", sseu->eu_per_subslice);
|
||||
drm_printf(p, "has slice power gating: %s\n",
|
||||
yesno(sseu->has_slice_pg));
|
||||
drm_printf(p, "has subslice power gating: %s\n",
|
||||
yesno(sseu->has_subslice_pg));
|
||||
drm_printf(p, "has EU power gating: %s\n", yesno(sseu->has_eu_pg));
|
||||
}
|
||||
|
||||
void intel_sseu_print_topology(const struct sseu_dev_info *sseu,
|
||||
struct drm_printer *p)
|
||||
{
|
||||
int s, ss;
|
||||
|
||||
if (sseu->max_slices == 0) {
|
||||
drm_printf(p, "Unavailable\n");
|
||||
return;
|
||||
}
|
||||
|
||||
for (s = 0; s < sseu->max_slices; s++) {
|
||||
drm_printf(p, "slice%d: %u subslice(s) (0x%08x):\n",
|
||||
s, intel_sseu_subslices_per_slice(sseu, s),
|
||||
intel_sseu_get_subslices(sseu, s));
|
||||
|
||||
for (ss = 0; ss < sseu->max_subslices; ss++) {
|
||||
u16 enabled_eus = sseu_get_eus(sseu, s, ss);
|
||||
|
||||
drm_printf(p, "\tsubslice%d: %u EUs (0x%hx)\n",
|
||||
ss, hweight16(enabled_eus), enabled_eus);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -13,6 +13,8 @@
|
|||
#include "i915_gem.h"
|
||||
|
||||
struct drm_i915_private;
|
||||
struct intel_gt;
|
||||
struct drm_printer;
|
||||
|
||||
#define GEN_MAX_SLICES (6) /* CNL upper bound */
|
||||
#define GEN_MAX_SUBSLICES (8) /* ICL upper bound */
|
||||
|
@ -94,7 +96,13 @@ u32 intel_sseu_get_subslices(const struct sseu_dev_info *sseu, u8 slice);
|
|||
void intel_sseu_set_subslices(struct sseu_dev_info *sseu, int slice,
|
||||
u32 ss_mask);
|
||||
|
||||
u32 intel_sseu_make_rpcs(struct drm_i915_private *i915,
|
||||
void intel_sseu_info_init(struct intel_gt *gt);
|
||||
|
||||
u32 intel_sseu_make_rpcs(struct intel_gt *gt,
|
||||
const struct intel_sseu *req_sseu);
|
||||
|
||||
void intel_sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p);
|
||||
void intel_sseu_print_topology(const struct sseu_dev_info *sseu,
|
||||
struct drm_printer *p);
|
||||
|
||||
#endif /* __INTEL_SSEU_H__ */
|
||||
|
|
|
@ -0,0 +1,306 @@
|
|||
// SPDX-License-Identifier: MIT
|
||||
|
||||
/*
|
||||
* Copyright © 2020 Intel Corporation
|
||||
*/
|
||||
|
||||
#include "debugfs_gt.h"
|
||||
#include "intel_sseu_debugfs.h"
|
||||
#include "i915_drv.h"
|
||||
|
||||
static void sseu_copy_subslices(const struct sseu_dev_info *sseu,
|
||||
int slice, u8 *to_mask)
|
||||
{
|
||||
int offset = slice * sseu->ss_stride;
|
||||
|
||||
memcpy(&to_mask[offset], &sseu->subslice_mask[offset], sseu->ss_stride);
|
||||
}
|
||||
|
||||
static void cherryview_sseu_device_status(struct intel_gt *gt,
|
||||
struct sseu_dev_info *sseu)
|
||||
{
|
||||
#define SS_MAX 2
|
||||
struct intel_uncore *uncore = gt->uncore;
|
||||
const int ss_max = SS_MAX;
|
||||
u32 sig1[SS_MAX], sig2[SS_MAX];
|
||||
int ss;
|
||||
|
||||
sig1[0] = intel_uncore_read(uncore, CHV_POWER_SS0_SIG1);
|
||||
sig1[1] = intel_uncore_read(uncore, CHV_POWER_SS1_SIG1);
|
||||
sig2[0] = intel_uncore_read(uncore, CHV_POWER_SS0_SIG2);
|
||||
sig2[1] = intel_uncore_read(uncore, CHV_POWER_SS1_SIG2);
|
||||
|
||||
for (ss = 0; ss < ss_max; ss++) {
|
||||
unsigned int eu_cnt;
|
||||
|
||||
if (sig1[ss] & CHV_SS_PG_ENABLE)
|
||||
/* skip disabled subslice */
|
||||
continue;
|
||||
|
||||
sseu->slice_mask = BIT(0);
|
||||
sseu->subslice_mask[0] |= BIT(ss);
|
||||
eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
|
||||
((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
|
||||
((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
|
||||
((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
|
||||
sseu->eu_total += eu_cnt;
|
||||
sseu->eu_per_subslice = max_t(unsigned int,
|
||||
sseu->eu_per_subslice, eu_cnt);
|
||||
}
|
||||
#undef SS_MAX
|
||||
}
|
||||
|
||||
static void gen10_sseu_device_status(struct intel_gt *gt,
|
||||
struct sseu_dev_info *sseu)
|
||||
{
|
||||
#define SS_MAX 6
|
||||
struct intel_uncore *uncore = gt->uncore;
|
||||
const struct intel_gt_info *info = >->info;
|
||||
u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
|
||||
int s, ss;
|
||||
|
||||
for (s = 0; s < info->sseu.max_slices; s++) {
|
||||
/*
|
||||
* FIXME: Valid SS Mask respects the spec and read
|
||||
* only valid bits for those registers, excluding reserved
|
||||
* although this seems wrong because it would leave many
|
||||
* subslices without ACK.
|
||||
*/
|
||||
s_reg[s] = intel_uncore_read(uncore, GEN10_SLICE_PGCTL_ACK(s)) &
|
||||
GEN10_PGCTL_VALID_SS_MASK(s);
|
||||
eu_reg[2 * s] = intel_uncore_read(uncore,
|
||||
GEN10_SS01_EU_PGCTL_ACK(s));
|
||||
eu_reg[2 * s + 1] = intel_uncore_read(uncore,
|
||||
GEN10_SS23_EU_PGCTL_ACK(s));
|
||||
}
|
||||
|
||||
eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
|
||||
GEN9_PGCTL_SSA_EU19_ACK |
|
||||
GEN9_PGCTL_SSA_EU210_ACK |
|
||||
GEN9_PGCTL_SSA_EU311_ACK;
|
||||
eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
|
||||
GEN9_PGCTL_SSB_EU19_ACK |
|
||||
GEN9_PGCTL_SSB_EU210_ACK |
|
||||
GEN9_PGCTL_SSB_EU311_ACK;
|
||||
|
||||
for (s = 0; s < info->sseu.max_slices; s++) {
|
||||
if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
|
||||
/* skip disabled slice */
|
||||
continue;
|
||||
|
||||
sseu->slice_mask |= BIT(s);
|
||||
sseu_copy_subslices(&info->sseu, s, sseu->subslice_mask);
|
||||
|
||||
for (ss = 0; ss < info->sseu.max_subslices; ss++) {
|
||||
unsigned int eu_cnt;
|
||||
|
||||
if (info->sseu.has_subslice_pg &&
|
||||
!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
|
||||
/* skip disabled subslice */
|
||||
continue;
|
||||
|
||||
eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
|
||||
eu_mask[ss % 2]);
|
||||
sseu->eu_total += eu_cnt;
|
||||
sseu->eu_per_subslice = max_t(unsigned int,
|
||||
sseu->eu_per_subslice,
|
||||
eu_cnt);
|
||||
}
|
||||
}
|
||||
#undef SS_MAX
|
||||
}
|
||||
|
||||
static void gen9_sseu_device_status(struct intel_gt *gt,
|
||||
struct sseu_dev_info *sseu)
|
||||
{
|
||||
#define SS_MAX 3
|
||||
struct intel_uncore *uncore = gt->uncore;
|
||||
const struct intel_gt_info *info = >->info;
|
||||
u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
|
||||
int s, ss;
|
||||
|
||||
for (s = 0; s < info->sseu.max_slices; s++) {
|
||||
s_reg[s] = intel_uncore_read(uncore, GEN9_SLICE_PGCTL_ACK(s));
|
||||
eu_reg[2 * s] =
|
||||
intel_uncore_read(uncore, GEN9_SS01_EU_PGCTL_ACK(s));
|
||||
eu_reg[2 * s + 1] =
|
||||
intel_uncore_read(uncore, GEN9_SS23_EU_PGCTL_ACK(s));
|
||||
}
|
||||
|
||||
eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
|
||||
GEN9_PGCTL_SSA_EU19_ACK |
|
||||
GEN9_PGCTL_SSA_EU210_ACK |
|
||||
GEN9_PGCTL_SSA_EU311_ACK;
|
||||
eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
|
||||
GEN9_PGCTL_SSB_EU19_ACK |
|
||||
GEN9_PGCTL_SSB_EU210_ACK |
|
||||
GEN9_PGCTL_SSB_EU311_ACK;
|
||||
|
||||
for (s = 0; s < info->sseu.max_slices; s++) {
|
||||
if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
|
||||
/* skip disabled slice */
|
||||
continue;
|
||||
|
||||
sseu->slice_mask |= BIT(s);
|
||||
|
||||
if (IS_GEN9_BC(gt->i915))
|
||||
sseu_copy_subslices(&info->sseu, s,
|
||||
sseu->subslice_mask);
|
||||
|
||||
for (ss = 0; ss < info->sseu.max_subslices; ss++) {
|
||||
unsigned int eu_cnt;
|
||||
u8 ss_idx = s * info->sseu.ss_stride +
|
||||
ss / BITS_PER_BYTE;
|
||||
|
||||
if (IS_GEN9_LP(gt->i915)) {
|
||||
if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
|
||||
/* skip disabled subslice */
|
||||
continue;
|
||||
|
||||
sseu->subslice_mask[ss_idx] |=
|
||||
BIT(ss % BITS_PER_BYTE);
|
||||
}
|
||||
|
||||
eu_cnt = eu_reg[2 * s + ss / 2] & eu_mask[ss % 2];
|
||||
eu_cnt = 2 * hweight32(eu_cnt);
|
||||
|
||||
sseu->eu_total += eu_cnt;
|
||||
sseu->eu_per_subslice = max_t(unsigned int,
|
||||
sseu->eu_per_subslice,
|
||||
eu_cnt);
|
||||
}
|
||||
}
|
||||
#undef SS_MAX
|
||||
}
|
||||
|
||||
static void bdw_sseu_device_status(struct intel_gt *gt,
|
||||
struct sseu_dev_info *sseu)
|
||||
{
|
||||
const struct intel_gt_info *info = >->info;
|
||||
u32 slice_info = intel_uncore_read(gt->uncore, GEN8_GT_SLICE_INFO);
|
||||
int s;
|
||||
|
||||
sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
|
||||
|
||||
if (sseu->slice_mask) {
|
||||
sseu->eu_per_subslice = info->sseu.eu_per_subslice;
|
||||
for (s = 0; s < fls(sseu->slice_mask); s++)
|
||||
sseu_copy_subslices(&info->sseu, s,
|
||||
sseu->subslice_mask);
|
||||
sseu->eu_total = sseu->eu_per_subslice *
|
||||
intel_sseu_subslice_total(sseu);
|
||||
|
||||
/* subtract fused off EU(s) from enabled slice(s) */
|
||||
for (s = 0; s < fls(sseu->slice_mask); s++) {
|
||||
u8 subslice_7eu = info->sseu.subslice_7eu[s];
|
||||
|
||||
sseu->eu_total -= hweight8(subslice_7eu);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void i915_print_sseu_info(struct seq_file *m,
|
||||
bool is_available_info,
|
||||
bool has_pooled_eu,
|
||||
const struct sseu_dev_info *sseu)
|
||||
{
|
||||
const char *type = is_available_info ? "Available" : "Enabled";
|
||||
int s;
|
||||
|
||||
seq_printf(m, " %s Slice Mask: %04x\n", type,
|
||||
sseu->slice_mask);
|
||||
seq_printf(m, " %s Slice Total: %u\n", type,
|
||||
hweight8(sseu->slice_mask));
|
||||
seq_printf(m, " %s Subslice Total: %u\n", type,
|
||||
intel_sseu_subslice_total(sseu));
|
||||
for (s = 0; s < fls(sseu->slice_mask); s++) {
|
||||
seq_printf(m, " %s Slice%i subslices: %u\n", type,
|
||||
s, intel_sseu_subslices_per_slice(sseu, s));
|
||||
}
|
||||
seq_printf(m, " %s EU Total: %u\n", type,
|
||||
sseu->eu_total);
|
||||
seq_printf(m, " %s EU Per Subslice: %u\n", type,
|
||||
sseu->eu_per_subslice);
|
||||
|
||||
if (!is_available_info)
|
||||
return;
|
||||
|
||||
seq_printf(m, " Has Pooled EU: %s\n", yesno(has_pooled_eu));
|
||||
if (has_pooled_eu)
|
||||
seq_printf(m, " Min EU in pool: %u\n", sseu->min_eu_in_pool);
|
||||
|
||||
seq_printf(m, " Has Slice Power Gating: %s\n",
|
||||
yesno(sseu->has_slice_pg));
|
||||
seq_printf(m, " Has Subslice Power Gating: %s\n",
|
||||
yesno(sseu->has_subslice_pg));
|
||||
seq_printf(m, " Has EU Power Gating: %s\n",
|
||||
yesno(sseu->has_eu_pg));
|
||||
}
|
||||
|
||||
/*
|
||||
* this is called from top-level debugfs as well, so we can't get the gt from
|
||||
* the seq_file.
|
||||
*/
|
||||
int intel_sseu_status(struct seq_file *m, struct intel_gt *gt)
|
||||
{
|
||||
struct drm_i915_private *i915 = gt->i915;
|
||||
const struct intel_gt_info *info = >->info;
|
||||
struct sseu_dev_info sseu;
|
||||
intel_wakeref_t wakeref;
|
||||
|
||||
if (INTEL_GEN(i915) < 8)
|
||||
return -ENODEV;
|
||||
|
||||
seq_puts(m, "SSEU Device Info\n");
|
||||
i915_print_sseu_info(m, true, HAS_POOLED_EU(i915), &info->sseu);
|
||||
|
||||
seq_puts(m, "SSEU Device Status\n");
|
||||
memset(&sseu, 0, sizeof(sseu));
|
||||
intel_sseu_set_info(&sseu, info->sseu.max_slices,
|
||||
info->sseu.max_subslices,
|
||||
info->sseu.max_eus_per_subslice);
|
||||
|
||||
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
|
||||
if (IS_CHERRYVIEW(i915))
|
||||
cherryview_sseu_device_status(gt, &sseu);
|
||||
else if (IS_BROADWELL(i915))
|
||||
bdw_sseu_device_status(gt, &sseu);
|
||||
else if (IS_GEN(i915, 9))
|
||||
gen9_sseu_device_status(gt, &sseu);
|
||||
else if (INTEL_GEN(i915) >= 10)
|
||||
gen10_sseu_device_status(gt, &sseu);
|
||||
}
|
||||
|
||||
i915_print_sseu_info(m, false, HAS_POOLED_EU(i915), &sseu);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sseu_status_show(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct intel_gt *gt = m->private;
|
||||
|
||||
return intel_sseu_status(m, gt);
|
||||
}
|
||||
DEFINE_GT_DEBUGFS_ATTRIBUTE(sseu_status);
|
||||
|
||||
static int rcs_topology_show(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct intel_gt *gt = m->private;
|
||||
struct drm_printer p = drm_seq_file_printer(m);
|
||||
|
||||
intel_sseu_print_topology(>->info.sseu, &p);
|
||||
|
||||
return 0;
|
||||
}
|
||||
DEFINE_GT_DEBUGFS_ATTRIBUTE(rcs_topology);
|
||||
|
||||
void intel_sseu_debugfs_register(struct intel_gt *gt, struct dentry *root)
|
||||
{
|
||||
static const struct debugfs_gt_file files[] = {
|
||||
{ "sseu_status", &sseu_status_fops, NULL },
|
||||
{ "rcs_topology", &rcs_topology_fops, NULL },
|
||||
};
|
||||
|
||||
intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), gt);
|
||||
}
|
|
@ -0,0 +1,17 @@
|
|||
/* SPDX-License-Identifier: MIT */
|
||||
|
||||
/*
|
||||
* Copyright © 2020 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef INTEL_SSEU_DEBUGFS_H
|
||||
#define INTEL_SSEU_DEBUGFS_H
|
||||
|
||||
struct intel_gt;
|
||||
struct dentry;
|
||||
struct seq_file;
|
||||
|
||||
int intel_sseu_status(struct seq_file *m, struct intel_gt *gt);
|
||||
void intel_sseu_debugfs_register(struct intel_gt *gt, struct dentry *root);
|
||||
|
||||
#endif /* INTEL_SSEU_DEBUGFS_H */
|
|
@ -73,6 +73,8 @@ hwsp_alloc(struct intel_timeline *timeline, unsigned int *cacheline)
|
|||
return vma;
|
||||
}
|
||||
|
||||
GT_TRACE(timeline->gt, "new HWSP allocated\n");
|
||||
|
||||
vma->private = hwsp;
|
||||
hwsp->gt = timeline->gt;
|
||||
hwsp->vma = vma;
|
||||
|
@ -327,6 +329,8 @@ int intel_timeline_pin(struct intel_timeline *tl)
|
|||
tl->hwsp_offset =
|
||||
i915_ggtt_offset(tl->hwsp_ggtt) +
|
||||
offset_in_page(tl->hwsp_offset);
|
||||
GT_TRACE(tl->gt, "timeline:%llx using HWSP offset:%x\n",
|
||||
tl->fence_context, tl->hwsp_offset);
|
||||
|
||||
cacheline_acquire(tl->hwsp_cacheline);
|
||||
if (atomic_fetch_inc(&tl->pin_count)) {
|
||||
|
@ -434,6 +438,7 @@ __intel_timeline_get_seqno(struct intel_timeline *tl,
|
|||
int err;
|
||||
|
||||
might_lock(&tl->gt->ggtt->vm.mutex);
|
||||
GT_TRACE(tl->gt, "timeline:%llx wrapped\n", tl->fence_context);
|
||||
|
||||
/*
|
||||
* If there is an outstanding GPU reference to this cacheline,
|
||||
|
@ -497,6 +502,8 @@ __intel_timeline_get_seqno(struct intel_timeline *tl,
|
|||
memset(vaddr + tl->hwsp_offset, 0, CACHELINE_BYTES);
|
||||
|
||||
tl->hwsp_offset += i915_ggtt_offset(vma);
|
||||
GT_TRACE(tl->gt, "timeline:%llx using HWSP offset:%x\n",
|
||||
tl->fence_context, tl->hwsp_offset);
|
||||
|
||||
cacheline_acquire(cl);
|
||||
tl->hwsp_cacheline = cl;
|
||||
|
|
|
@ -404,7 +404,7 @@ static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine,
|
|||
static void skl_tune_iz_hashing(struct intel_engine_cs *engine,
|
||||
struct i915_wa_list *wal)
|
||||
{
|
||||
struct drm_i915_private *i915 = engine->i915;
|
||||
struct intel_gt *gt = engine->gt;
|
||||
u8 vals[3] = { 0, 0, 0 };
|
||||
unsigned int i;
|
||||
|
||||
|
@ -415,7 +415,7 @@ static void skl_tune_iz_hashing(struct intel_engine_cs *engine,
|
|||
* Only consider slices where one, and only one, subslice has 7
|
||||
* EUs
|
||||
*/
|
||||
if (!is_power_of_2(RUNTIME_INFO(i915)->sseu.subslice_7eu[i]))
|
||||
if (!is_power_of_2(gt->info.sseu.subslice_7eu[i]))
|
||||
continue;
|
||||
|
||||
/*
|
||||
|
@ -424,7 +424,7 @@ static void skl_tune_iz_hashing(struct intel_engine_cs *engine,
|
|||
*
|
||||
* -> 0 <= ss <= 3;
|
||||
*/
|
||||
ss = ffs(RUNTIME_INFO(i915)->sseu.subslice_7eu[i]) - 1;
|
||||
ss = ffs(gt->info.sseu.subslice_7eu[i]) - 1;
|
||||
vals[i] = 3 - ss;
|
||||
}
|
||||
|
||||
|
@ -1036,7 +1036,7 @@ cfl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
|
|||
static void
|
||||
wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
|
||||
{
|
||||
const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu;
|
||||
const struct sseu_dev_info *sseu = &i915->gt.info.sseu;
|
||||
unsigned int slice, subslice;
|
||||
u32 l3_en, mcr, mcr_mask;
|
||||
|
||||
|
@ -1649,11 +1649,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
|
|||
GEN7_SARCHKMD,
|
||||
GEN7_DISABLE_SAMPLER_PREFETCH);
|
||||
|
||||
/* Wa_1407928979:tgl */
|
||||
wa_write_or(wal,
|
||||
GEN7_FF_THREAD_MODE,
|
||||
GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
|
||||
|
||||
/* Wa_1408615072:tgl */
|
||||
wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
|
||||
VSUNIT_CLKGATE_DIS_TGL);
|
||||
|
@ -1677,6 +1672,14 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
|
|||
* Wa_14010229206:tgl
|
||||
*/
|
||||
wa_masked_en(wal, GEN9_ROW_CHICKEN4, GEN12_DISABLE_TDL_PUSH);
|
||||
|
||||
/*
|
||||
* Wa_1407928979:tgl A*
|
||||
* Wa_18011464164:tgl B0+
|
||||
* Wa_22010931296:tgl B0+
|
||||
*/
|
||||
wa_write_or(wal, GEN7_FF_THREAD_MODE,
|
||||
GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
|
||||
}
|
||||
|
||||
if (IS_GEN(i915, 11)) {
|
||||
|
|
|
@ -963,7 +963,7 @@ slice_semaphore_queue(struct intel_engine_cs *outer,
|
|||
goto out;
|
||||
|
||||
if (i915_request_wait(head, 0,
|
||||
2 * RUNTIME_INFO(outer->i915)->num_engines * (count + 2) * (count + 3)) < 0) {
|
||||
2 * outer->gt->info.num_engines * (count + 2) * (count + 3)) < 0) {
|
||||
pr_err("Failed to slice along semaphore chain of length (%d, %d)!\n",
|
||||
count, n);
|
||||
GEM_TRACE_DUMP();
|
||||
|
@ -3569,8 +3569,7 @@ static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
|
|||
}
|
||||
|
||||
pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n",
|
||||
count, flags,
|
||||
RUNTIME_INFO(smoke->gt->i915)->num_engines, smoke->ncontext);
|
||||
count, flags, smoke->gt->info.num_engines, smoke->ncontext);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -3597,8 +3596,7 @@ static int smoke_random(struct preempt_smoke *smoke, unsigned int flags)
|
|||
} while (count < smoke->ncontext && !__igt_timeout(end_time, NULL));
|
||||
|
||||
pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n",
|
||||
count, flags,
|
||||
RUNTIME_INFO(smoke->gt->i915)->num_engines, smoke->ncontext);
|
||||
count, flags, smoke->gt->info.num_engines, smoke->ncontext);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -233,7 +233,7 @@ int live_rc6_ctx_wa(void *arg)
|
|||
i915_reset_engine_count(error, engine)) {
|
||||
pr_err("%s: GPU reset required\n",
|
||||
engine->name);
|
||||
add_taint_for_CI(TAINT_WARN);
|
||||
add_taint_for_CI(gt->i915, TAINT_WARN);
|
||||
err = -EIO;
|
||||
goto out;
|
||||
}
|
||||
|
|
|
@ -562,8 +562,9 @@ static int live_hwsp_engine(void *arg)
|
|||
struct intel_timeline *tl = timelines[n];
|
||||
|
||||
if (!err && *tl->hwsp_seqno != n) {
|
||||
pr_err("Invalid seqno stored in timeline %lu, found 0x%x\n",
|
||||
n, *tl->hwsp_seqno);
|
||||
pr_err("Invalid seqno stored in timeline %lu @ %x, found 0x%x\n",
|
||||
n, tl->hwsp_offset, *tl->hwsp_seqno);
|
||||
GEM_TRACE_DUMP();
|
||||
err = -EINVAL;
|
||||
}
|
||||
intel_timeline_put(tl);
|
||||
|
@ -633,8 +634,9 @@ out:
|
|||
struct intel_timeline *tl = timelines[n];
|
||||
|
||||
if (!err && *tl->hwsp_seqno != n) {
|
||||
pr_err("Invalid seqno stored in timeline %lu, found 0x%x\n",
|
||||
n, *tl->hwsp_seqno);
|
||||
pr_err("Invalid seqno stored in timeline %lu @ %x, found 0x%x\n",
|
||||
n, tl->hwsp_offset, *tl->hwsp_seqno);
|
||||
GEM_TRACE_DUMP();
|
||||
err = -EINVAL;
|
||||
}
|
||||
intel_timeline_put(tl);
|
||||
|
@ -965,8 +967,9 @@ static int live_hwsp_recycle(void *arg)
|
|||
}
|
||||
|
||||
if (*tl->hwsp_seqno != count) {
|
||||
pr_err("Invalid seqno stored in timeline %lu, found 0x%x\n",
|
||||
pr_err("Invalid seqno stored in timeline %lu @ tl->hwsp_offset, found 0x%x\n",
|
||||
count, *tl->hwsp_seqno);
|
||||
GEM_TRACE_DUMP();
|
||||
err = -EINVAL;
|
||||
}
|
||||
|
||||
|
|
|
@ -67,7 +67,7 @@ struct __guc_ads_blob {
|
|||
|
||||
static void __guc_ads_init(struct intel_guc *guc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915;
|
||||
struct intel_gt *gt = guc_to_gt(guc);
|
||||
struct __guc_ads_blob *blob = guc->ads_blob;
|
||||
const u32 skipped_size = LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SIZE;
|
||||
u32 base;
|
||||
|
@ -99,13 +99,13 @@ static void __guc_ads_init(struct intel_guc *guc)
|
|||
}
|
||||
|
||||
/* System info */
|
||||
blob->system_info.slice_enabled = hweight8(RUNTIME_INFO(dev_priv)->sseu.slice_mask);
|
||||
blob->system_info.slice_enabled = hweight8(gt->info.sseu.slice_mask);
|
||||
blob->system_info.rcs_enabled = 1;
|
||||
blob->system_info.bcs_enabled = 1;
|
||||
|
||||
blob->system_info.vdbox_enable_mask = VDBOX_MASK(dev_priv);
|
||||
blob->system_info.vebox_enable_mask = VEBOX_MASK(dev_priv);
|
||||
blob->system_info.vdbox_sfc_support_mask = RUNTIME_INFO(dev_priv)->vdbox_sfc_access;
|
||||
blob->system_info.vdbox_enable_mask = VDBOX_MASK(gt);
|
||||
blob->system_info.vebox_enable_mask = VEBOX_MASK(gt);
|
||||
blob->system_info.vdbox_sfc_support_mask = gt->info.vdbox_sfc_access;
|
||||
|
||||
base = intel_guc_ggtt_offset(guc, guc->ads_vma);
|
||||
|
||||
|
|
|
@ -267,8 +267,17 @@ static void __uc_fetch_firmwares(struct intel_uc *uc)
|
|||
GEM_BUG_ON(!intel_uc_wants_guc(uc));
|
||||
|
||||
err = intel_uc_fw_fetch(&uc->guc.fw);
|
||||
if (err)
|
||||
if (err) {
|
||||
/* Make sure we transition out of transient "SELECTED" state */
|
||||
if (intel_uc_wants_huc(uc)) {
|
||||
drm_dbg(&uc_to_gt(uc)->i915->drm,
|
||||
"Failed to fetch GuC: %d disabling HuC\n", err);
|
||||
intel_uc_fw_change_status(&uc->huc.fw,
|
||||
INTEL_UC_FIRMWARE_ERROR);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
if (intel_uc_wants_huc(uc))
|
||||
intel_uc_fw_fetch(&uc->huc.fw);
|
||||
|
|
|
@ -4,14 +4,41 @@
|
|||
*/
|
||||
|
||||
#include <linux/debugfs.h>
|
||||
#include <drm/drm_print.h>
|
||||
|
||||
#include "gt/debugfs_gt.h"
|
||||
#include "intel_guc_debugfs.h"
|
||||
#include "intel_huc_debugfs.h"
|
||||
#include "intel_uc.h"
|
||||
#include "intel_uc_debugfs.h"
|
||||
|
||||
static int uc_usage_show(struct seq_file *m, void *data)
|
||||
{
|
||||
struct intel_uc *uc = m->private;
|
||||
struct drm_printer p = drm_seq_file_printer(m);
|
||||
|
||||
drm_printf(&p, "[guc] supported:%s wanted:%s used:%s\n",
|
||||
yesno(intel_uc_supports_guc(uc)),
|
||||
yesno(intel_uc_wants_guc(uc)),
|
||||
yesno(intel_uc_uses_guc(uc)));
|
||||
drm_printf(&p, "[huc] supported:%s wanted:%s used:%s\n",
|
||||
yesno(intel_uc_supports_huc(uc)),
|
||||
yesno(intel_uc_wants_huc(uc)),
|
||||
yesno(intel_uc_uses_huc(uc)));
|
||||
drm_printf(&p, "[submission] supported:%s wanted:%s used:%s\n",
|
||||
yesno(intel_uc_supports_guc_submission(uc)),
|
||||
yesno(intel_uc_wants_guc_submission(uc)),
|
||||
yesno(intel_uc_uses_guc_submission(uc)));
|
||||
|
||||
return 0;
|
||||
}
|
||||
DEFINE_GT_DEBUGFS_ATTRIBUTE(uc_usage);
|
||||
|
||||
void intel_uc_debugfs_register(struct intel_uc *uc, struct dentry *gt_root)
|
||||
{
|
||||
static const struct debugfs_gt_file files[] = {
|
||||
{ "usage", &uc_usage_fops, NULL },
|
||||
};
|
||||
struct dentry *root;
|
||||
|
||||
if (!gt_root)
|
||||
|
@ -25,6 +52,8 @@ void intel_uc_debugfs_register(struct intel_uc *uc, struct dentry *gt_root)
|
|||
if (IS_ERR(root))
|
||||
return;
|
||||
|
||||
intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), uc);
|
||||
|
||||
intel_guc_debugfs_register(&uc->guc, root);
|
||||
intel_huc_debugfs_register(&uc->huc, root);
|
||||
}
|
||||
|
|
|
@ -347,7 +347,7 @@ static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
|||
gvt_dbg_mmio("vgpu%d: request GUC Reset\n", vgpu->id);
|
||||
vgpu_vreg_t(vgpu, GUC_STATUS) |= GS_MIA_IN_RESET;
|
||||
}
|
||||
engine_mask &= INTEL_INFO(vgpu->gvt->gt->i915)->engine_mask;
|
||||
engine_mask &= vgpu->gvt->gt->info.engine_mask;
|
||||
}
|
||||
|
||||
/* vgpu_lock already hold by emulate mmio r/w */
|
||||
|
@ -1868,7 +1868,7 @@ static int csfe_chicken1_mmio_write(struct intel_vgpu *vgpu,
|
|||
MMIO_F(prefix(BLT_RING_BASE), s, f, am, rm, d, r, w); \
|
||||
MMIO_F(prefix(GEN6_BSD_RING_BASE), s, f, am, rm, d, r, w); \
|
||||
MMIO_F(prefix(VEBOX_RING_BASE), s, f, am, rm, d, r, w); \
|
||||
if (HAS_ENGINE(dev_priv, VCS1)) \
|
||||
if (HAS_ENGINE(gvt->gt, VCS1)) \
|
||||
MMIO_F(prefix(GEN8_BSD2_RING_BASE), s, f, am, rm, d, r, w); \
|
||||
} while (0)
|
||||
|
||||
|
|
|
@ -540,7 +540,7 @@ static void gen8_init_irq(
|
|||
SET_BIT_INFO(irq, 4, VCS_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT1);
|
||||
SET_BIT_INFO(irq, 8, VCS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT1);
|
||||
|
||||
if (HAS_ENGINE(gvt->gt->i915, VCS1)) {
|
||||
if (HAS_ENGINE(gvt->gt, VCS1)) {
|
||||
SET_BIT_INFO(irq, 16, VCS2_MI_USER_INTERRUPT,
|
||||
INTEL_GVT_IRQ_INFO_GT1);
|
||||
SET_BIT_INFO(irq, 20, VCS2_MI_FLUSH_DW,
|
||||
|
|
|
@ -171,7 +171,7 @@ static void load_render_mocs(const struct intel_engine_cs *engine)
|
|||
return;
|
||||
|
||||
for (ring_id = 0; ring_id < cnt; ring_id++) {
|
||||
if (!HAS_ENGINE(engine->i915, ring_id))
|
||||
if (!HAS_ENGINE(engine->gt, ring_id))
|
||||
continue;
|
||||
|
||||
offset.reg = regs[ring_id];
|
||||
|
|
|
@ -34,11 +34,13 @@
|
|||
#include "gem/i915_gem_context.h"
|
||||
#include "gt/intel_gt_buffer_pool.h"
|
||||
#include "gt/intel_gt_clock_utils.h"
|
||||
#include "gt/intel_gt.h"
|
||||
#include "gt/intel_gt_pm.h"
|
||||
#include "gt/intel_gt_requests.h"
|
||||
#include "gt/intel_reset.h"
|
||||
#include "gt/intel_rc6.h"
|
||||
#include "gt/intel_rps.h"
|
||||
#include "gt/intel_sseu_debugfs.h"
|
||||
|
||||
#include "i915_debugfs.h"
|
||||
#include "i915_debugfs_params.h"
|
||||
|
@ -61,6 +63,7 @@ static int i915_capabilities(struct seq_file *m, void *data)
|
|||
|
||||
intel_device_info_print_static(INTEL_INFO(i915), &p);
|
||||
intel_device_info_print_runtime(RUNTIME_INFO(i915), &p);
|
||||
intel_gt_info_print(&i915->gt.info, &p);
|
||||
intel_driver_caps_print(&i915->caps, &p);
|
||||
|
||||
kernel_param_lock(THIS_MODULE);
|
||||
|
@ -492,6 +495,10 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
|
|||
seq_printf(m, "PCU interrupt enable:\t%08x\n",
|
||||
I915_READ(GEN8_PCU_IER));
|
||||
} else if (INTEL_GEN(dev_priv) >= 11) {
|
||||
if (HAS_MASTER_UNIT_IRQ(dev_priv))
|
||||
seq_printf(m, "Master Unit Interrupt Control: %08x\n",
|
||||
I915_READ(DG1_MSTR_UNIT_INTR));
|
||||
|
||||
seq_printf(m, "Master Interrupt Control: %08x\n",
|
||||
I915_READ(GEN11_GFX_MSTR_IRQ));
|
||||
|
||||
|
@ -1138,13 +1145,20 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
|
|||
struct intel_uncore *uncore = &dev_priv->uncore;
|
||||
intel_wakeref_t wakeref;
|
||||
|
||||
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
|
||||
|
||||
seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
|
||||
swizzle_string(dev_priv->ggtt.bit_6_swizzle_x));
|
||||
seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
|
||||
swizzle_string(dev_priv->ggtt.bit_6_swizzle_y));
|
||||
|
||||
if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
|
||||
seq_puts(m, "L-shaped memory detected\n");
|
||||
|
||||
/* On BDW+, swizzling is not used. See detect_bit_6_swizzle() */
|
||||
if (INTEL_GEN(dev_priv) >= 8 || IS_VALLEYVIEW(dev_priv))
|
||||
return 0;
|
||||
|
||||
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
|
||||
|
||||
if (IS_GEN_RANGE(dev_priv, 3, 4)) {
|
||||
seq_printf(m, "DDC = 0x%08x\n",
|
||||
intel_uncore_read(uncore, DCC));
|
||||
|
@ -1173,9 +1187,6 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
|
|||
intel_uncore_read(uncore, DISP_ARB_CTL));
|
||||
}
|
||||
|
||||
if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
|
||||
seq_puts(m, "L-shaped memory detected\n");
|
||||
|
||||
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
|
||||
|
||||
return 0;
|
||||
|
@ -1316,16 +1327,6 @@ static int i915_engine_info(struct seq_file *m, void *unused)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int i915_rcs_topology(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = node_to_i915(m->private);
|
||||
struct drm_printer p = drm_seq_file_printer(m);
|
||||
|
||||
intel_device_info_print_topology(&RUNTIME_INFO(dev_priv)->sseu, &p);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int i915_shrinker_info(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct drm_i915_private *i915 = node_to_i915(m->private);
|
||||
|
@ -1572,264 +1573,16 @@ i915_cache_sharing_set(void *data, u64 val)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
intel_sseu_copy_subslices(const struct sseu_dev_info *sseu, int slice,
|
||||
u8 *to_mask)
|
||||
{
|
||||
int offset = slice * sseu->ss_stride;
|
||||
|
||||
memcpy(&to_mask[offset], &sseu->subslice_mask[offset], sseu->ss_stride);
|
||||
}
|
||||
|
||||
DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
|
||||
i915_cache_sharing_get, i915_cache_sharing_set,
|
||||
"%llu\n");
|
||||
|
||||
static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
|
||||
struct sseu_dev_info *sseu)
|
||||
{
|
||||
#define SS_MAX 2
|
||||
const int ss_max = SS_MAX;
|
||||
u32 sig1[SS_MAX], sig2[SS_MAX];
|
||||
int ss;
|
||||
|
||||
sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
|
||||
sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
|
||||
sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
|
||||
sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
|
||||
|
||||
for (ss = 0; ss < ss_max; ss++) {
|
||||
unsigned int eu_cnt;
|
||||
|
||||
if (sig1[ss] & CHV_SS_PG_ENABLE)
|
||||
/* skip disabled subslice */
|
||||
continue;
|
||||
|
||||
sseu->slice_mask = BIT(0);
|
||||
sseu->subslice_mask[0] |= BIT(ss);
|
||||
eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
|
||||
((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
|
||||
((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
|
||||
((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
|
||||
sseu->eu_total += eu_cnt;
|
||||
sseu->eu_per_subslice = max_t(unsigned int,
|
||||
sseu->eu_per_subslice, eu_cnt);
|
||||
}
|
||||
#undef SS_MAX
|
||||
}
|
||||
|
||||
static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
|
||||
struct sseu_dev_info *sseu)
|
||||
{
|
||||
#define SS_MAX 6
|
||||
const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
|
||||
u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
|
||||
int s, ss;
|
||||
|
||||
for (s = 0; s < info->sseu.max_slices; s++) {
|
||||
/*
|
||||
* FIXME: Valid SS Mask respects the spec and read
|
||||
* only valid bits for those registers, excluding reserved
|
||||
* although this seems wrong because it would leave many
|
||||
* subslices without ACK.
|
||||
*/
|
||||
s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
|
||||
GEN10_PGCTL_VALID_SS_MASK(s);
|
||||
eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
|
||||
eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
|
||||
}
|
||||
|
||||
eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
|
||||
GEN9_PGCTL_SSA_EU19_ACK |
|
||||
GEN9_PGCTL_SSA_EU210_ACK |
|
||||
GEN9_PGCTL_SSA_EU311_ACK;
|
||||
eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
|
||||
GEN9_PGCTL_SSB_EU19_ACK |
|
||||
GEN9_PGCTL_SSB_EU210_ACK |
|
||||
GEN9_PGCTL_SSB_EU311_ACK;
|
||||
|
||||
for (s = 0; s < info->sseu.max_slices; s++) {
|
||||
if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
|
||||
/* skip disabled slice */
|
||||
continue;
|
||||
|
||||
sseu->slice_mask |= BIT(s);
|
||||
intel_sseu_copy_subslices(&info->sseu, s, sseu->subslice_mask);
|
||||
|
||||
for (ss = 0; ss < info->sseu.max_subslices; ss++) {
|
||||
unsigned int eu_cnt;
|
||||
|
||||
if (info->sseu.has_subslice_pg &&
|
||||
!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
|
||||
/* skip disabled subslice */
|
||||
continue;
|
||||
|
||||
eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
|
||||
eu_mask[ss % 2]);
|
||||
sseu->eu_total += eu_cnt;
|
||||
sseu->eu_per_subslice = max_t(unsigned int,
|
||||
sseu->eu_per_subslice,
|
||||
eu_cnt);
|
||||
}
|
||||
}
|
||||
#undef SS_MAX
|
||||
}
|
||||
|
||||
static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
|
||||
struct sseu_dev_info *sseu)
|
||||
{
|
||||
#define SS_MAX 3
|
||||
const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
|
||||
u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
|
||||
int s, ss;
|
||||
|
||||
for (s = 0; s < info->sseu.max_slices; s++) {
|
||||
s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
|
||||
eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
|
||||
eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
|
||||
}
|
||||
|
||||
eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
|
||||
GEN9_PGCTL_SSA_EU19_ACK |
|
||||
GEN9_PGCTL_SSA_EU210_ACK |
|
||||
GEN9_PGCTL_SSA_EU311_ACK;
|
||||
eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
|
||||
GEN9_PGCTL_SSB_EU19_ACK |
|
||||
GEN9_PGCTL_SSB_EU210_ACK |
|
||||
GEN9_PGCTL_SSB_EU311_ACK;
|
||||
|
||||
for (s = 0; s < info->sseu.max_slices; s++) {
|
||||
if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
|
||||
/* skip disabled slice */
|
||||
continue;
|
||||
|
||||
sseu->slice_mask |= BIT(s);
|
||||
|
||||
if (IS_GEN9_BC(dev_priv))
|
||||
intel_sseu_copy_subslices(&info->sseu, s,
|
||||
sseu->subslice_mask);
|
||||
|
||||
for (ss = 0; ss < info->sseu.max_subslices; ss++) {
|
||||
unsigned int eu_cnt;
|
||||
u8 ss_idx = s * info->sseu.ss_stride +
|
||||
ss / BITS_PER_BYTE;
|
||||
|
||||
if (IS_GEN9_LP(dev_priv)) {
|
||||
if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
|
||||
/* skip disabled subslice */
|
||||
continue;
|
||||
|
||||
sseu->subslice_mask[ss_idx] |=
|
||||
BIT(ss % BITS_PER_BYTE);
|
||||
}
|
||||
|
||||
eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
|
||||
eu_mask[ss%2]);
|
||||
sseu->eu_total += eu_cnt;
|
||||
sseu->eu_per_subslice = max_t(unsigned int,
|
||||
sseu->eu_per_subslice,
|
||||
eu_cnt);
|
||||
}
|
||||
}
|
||||
#undef SS_MAX
|
||||
}
|
||||
|
||||
static void bdw_sseu_device_status(struct drm_i915_private *dev_priv,
|
||||
struct sseu_dev_info *sseu)
|
||||
{
|
||||
const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
|
||||
u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
|
||||
int s;
|
||||
|
||||
sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
|
||||
|
||||
if (sseu->slice_mask) {
|
||||
sseu->eu_per_subslice = info->sseu.eu_per_subslice;
|
||||
for (s = 0; s < fls(sseu->slice_mask); s++)
|
||||
intel_sseu_copy_subslices(&info->sseu, s,
|
||||
sseu->subslice_mask);
|
||||
sseu->eu_total = sseu->eu_per_subslice *
|
||||
intel_sseu_subslice_total(sseu);
|
||||
|
||||
/* subtract fused off EU(s) from enabled slice(s) */
|
||||
for (s = 0; s < fls(sseu->slice_mask); s++) {
|
||||
u8 subslice_7eu = info->sseu.subslice_7eu[s];
|
||||
|
||||
sseu->eu_total -= hweight8(subslice_7eu);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
|
||||
const struct sseu_dev_info *sseu)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = node_to_i915(m->private);
|
||||
const char *type = is_available_info ? "Available" : "Enabled";
|
||||
int s;
|
||||
|
||||
seq_printf(m, " %s Slice Mask: %04x\n", type,
|
||||
sseu->slice_mask);
|
||||
seq_printf(m, " %s Slice Total: %u\n", type,
|
||||
hweight8(sseu->slice_mask));
|
||||
seq_printf(m, " %s Subslice Total: %u\n", type,
|
||||
intel_sseu_subslice_total(sseu));
|
||||
for (s = 0; s < fls(sseu->slice_mask); s++) {
|
||||
seq_printf(m, " %s Slice%i subslices: %u\n", type,
|
||||
s, intel_sseu_subslices_per_slice(sseu, s));
|
||||
}
|
||||
seq_printf(m, " %s EU Total: %u\n", type,
|
||||
sseu->eu_total);
|
||||
seq_printf(m, " %s EU Per Subslice: %u\n", type,
|
||||
sseu->eu_per_subslice);
|
||||
|
||||
if (!is_available_info)
|
||||
return;
|
||||
|
||||
seq_printf(m, " Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
|
||||
if (HAS_POOLED_EU(dev_priv))
|
||||
seq_printf(m, " Min EU in pool: %u\n", sseu->min_eu_in_pool);
|
||||
|
||||
seq_printf(m, " Has Slice Power Gating: %s\n",
|
||||
yesno(sseu->has_slice_pg));
|
||||
seq_printf(m, " Has Subslice Power Gating: %s\n",
|
||||
yesno(sseu->has_subslice_pg));
|
||||
seq_printf(m, " Has EU Power Gating: %s\n",
|
||||
yesno(sseu->has_eu_pg));
|
||||
}
|
||||
|
||||
static int i915_sseu_status(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = node_to_i915(m->private);
|
||||
const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
|
||||
struct sseu_dev_info sseu;
|
||||
intel_wakeref_t wakeref;
|
||||
struct drm_i915_private *i915 = node_to_i915(m->private);
|
||||
struct intel_gt *gt = &i915->gt;
|
||||
|
||||
if (INTEL_GEN(dev_priv) < 8)
|
||||
return -ENODEV;
|
||||
|
||||
seq_puts(m, "SSEU Device Info\n");
|
||||
i915_print_sseu_info(m, true, &info->sseu);
|
||||
|
||||
seq_puts(m, "SSEU Device Status\n");
|
||||
memset(&sseu, 0, sizeof(sseu));
|
||||
intel_sseu_set_info(&sseu, info->sseu.max_slices,
|
||||
info->sseu.max_subslices,
|
||||
info->sseu.max_eus_per_subslice);
|
||||
|
||||
with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
|
||||
if (IS_CHERRYVIEW(dev_priv))
|
||||
cherryview_sseu_device_status(dev_priv, &sseu);
|
||||
else if (IS_BROADWELL(dev_priv))
|
||||
bdw_sseu_device_status(dev_priv, &sseu);
|
||||
else if (IS_GEN(dev_priv, 9))
|
||||
gen9_sseu_device_status(dev_priv, &sseu);
|
||||
else if (INTEL_GEN(dev_priv) >= 10)
|
||||
gen10_sseu_device_status(dev_priv, &sseu);
|
||||
}
|
||||
|
||||
i915_print_sseu_info(m, false, &sseu);
|
||||
|
||||
return 0;
|
||||
return intel_sseu_status(m, gt);
|
||||
}
|
||||
|
||||
static int i915_forcewake_open(struct inode *inode, struct file *file)
|
||||
|
@ -1876,7 +1629,6 @@ static const struct drm_info_list i915_debugfs_list[] = {
|
|||
{"i915_llc", i915_llc, 0},
|
||||
{"i915_runtime_pm_status", i915_runtime_pm_status, 0},
|
||||
{"i915_engine_info", i915_engine_info, 0},
|
||||
{"i915_rcs_topology", i915_rcs_topology, 0},
|
||||
{"i915_shrinker_info", i915_shrinker_info, 0},
|
||||
{"i915_wa_registers", i915_wa_registers, 0},
|
||||
{"i915_sseu_status", i915_sseu_status, 0},
|
||||
|
|
|
@ -531,13 +531,7 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
|
|||
/* Try to make sure MCHBAR is enabled before poking at it */
|
||||
intel_setup_mchbar(dev_priv);
|
||||
|
||||
intel_device_info_init_mmio(dev_priv);
|
||||
|
||||
intel_uncore_prune_mmio_domains(&dev_priv->uncore);
|
||||
|
||||
intel_uc_init_mmio(&dev_priv->gt.uc);
|
||||
|
||||
ret = intel_engines_init_mmio(&dev_priv->gt);
|
||||
ret = intel_gt_init_mmio(&dev_priv->gt);
|
||||
if (ret)
|
||||
goto err_uncore;
|
||||
|
||||
|
@ -890,6 +884,7 @@ static void i915_welcome_messages(struct drm_i915_private *dev_priv)
|
|||
|
||||
intel_device_info_print_static(INTEL_INFO(dev_priv), &p);
|
||||
intel_device_info_print_runtime(RUNTIME_INFO(dev_priv), &p);
|
||||
intel_gt_info_print(&dev_priv->gt.info, &p);
|
||||
}
|
||||
|
||||
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
|
||||
|
|
|
@ -108,8 +108,8 @@
|
|||
|
||||
#define DRIVER_NAME "i915"
|
||||
#define DRIVER_DESC "Intel Graphics"
|
||||
#define DRIVER_DATE "20200702"
|
||||
#define DRIVER_TIMESTAMP 1593714328
|
||||
#define DRIVER_DATE "20200715"
|
||||
#define DRIVER_TIMESTAMP 1594811881
|
||||
|
||||
struct drm_i915_gem_object;
|
||||
|
||||
|
@ -693,6 +693,7 @@ struct intel_vbt_data {
|
|||
bool initialized;
|
||||
int bpp;
|
||||
struct edp_power_seq pps;
|
||||
bool hobl;
|
||||
} edp;
|
||||
|
||||
struct {
|
||||
|
@ -1257,7 +1258,7 @@ static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev)
|
|||
|
||||
/* Iterator over subset of engines selected by mask */
|
||||
#define for_each_engine_masked(engine__, gt__, mask__, tmp__) \
|
||||
for ((tmp__) = (mask__) & INTEL_INFO((gt__)->i915)->engine_mask; \
|
||||
for ((tmp__) = (mask__) & (gt__)->info.engine_mask; \
|
||||
(tmp__) ? \
|
||||
((engine__) = (gt__)->engine[__mask_next_bit(tmp__)]), 1 : \
|
||||
0;)
|
||||
|
@ -1431,6 +1432,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
|
|||
#define IS_ELKHARTLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE)
|
||||
#define IS_TIGERLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_TIGERLAKE)
|
||||
#define IS_ROCKETLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ROCKETLAKE)
|
||||
#define IS_DG1(dev_priv) IS_PLATFORM(dev_priv, INTEL_DG1)
|
||||
#define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
|
||||
(INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
|
||||
#define IS_BDW_ULT(dev_priv) \
|
||||
|
@ -1559,22 +1561,29 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
|
|||
#define IS_RKL_REVID(p, since, until) \
|
||||
(IS_ROCKETLAKE(p) && IS_REVID(p, since, until))
|
||||
|
||||
#define DG1_REVID_A0 0x0
|
||||
#define DG1_REVID_B0 0x1
|
||||
|
||||
#define IS_DG1_REVID(p, since, until) \
|
||||
(IS_DG1(p) && IS_REVID(p, since, until))
|
||||
|
||||
#define IS_LP(dev_priv) (INTEL_INFO(dev_priv)->is_lp)
|
||||
#define IS_GEN9_LP(dev_priv) (IS_GEN(dev_priv, 9) && IS_LP(dev_priv))
|
||||
#define IS_GEN9_BC(dev_priv) (IS_GEN(dev_priv, 9) && !IS_LP(dev_priv))
|
||||
|
||||
#define HAS_ENGINE(dev_priv, id) (INTEL_INFO(dev_priv)->engine_mask & BIT(id))
|
||||
#define __HAS_ENGINE(engine_mask, id) ((engine_mask) & BIT(id))
|
||||
#define HAS_ENGINE(gt, id) __HAS_ENGINE((gt)->info.engine_mask, id)
|
||||
|
||||
#define ENGINE_INSTANCES_MASK(dev_priv, first, count) ({ \
|
||||
#define ENGINE_INSTANCES_MASK(gt, first, count) ({ \
|
||||
unsigned int first__ = (first); \
|
||||
unsigned int count__ = (count); \
|
||||
(INTEL_INFO(dev_priv)->engine_mask & \
|
||||
((gt)->info.engine_mask & \
|
||||
GENMASK(first__ + count__ - 1, first__)) >> first__; \
|
||||
})
|
||||
#define VDBOX_MASK(dev_priv) \
|
||||
ENGINE_INSTANCES_MASK(dev_priv, VCS0, I915_MAX_VCS)
|
||||
#define VEBOX_MASK(dev_priv) \
|
||||
ENGINE_INSTANCES_MASK(dev_priv, VECS0, I915_MAX_VECS)
|
||||
#define VDBOX_MASK(gt) \
|
||||
ENGINE_INSTANCES_MASK(gt, VCS0, I915_MAX_VCS)
|
||||
#define VEBOX_MASK(gt) \
|
||||
ENGINE_INSTANCES_MASK(gt, VECS0, I915_MAX_VECS)
|
||||
|
||||
/*
|
||||
* The Gen7 cmdparser copies the scanned buffer to the ggtt for execution
|
||||
|
@ -1598,6 +1607,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
|
|||
#define HAS_LOGICAL_RING_PREEMPTION(dev_priv) \
|
||||
(INTEL_INFO(dev_priv)->has_logical_ring_preemption)
|
||||
|
||||
#define HAS_MASTER_UNIT_IRQ(dev_priv) (INTEL_INFO(dev_priv)->has_master_unit_irq)
|
||||
|
||||
#define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv)
|
||||
|
||||
#define INTEL_PPGTT(dev_priv) (INTEL_INFO(dev_priv)->ppgtt_type)
|
||||
|
|
|
@ -72,7 +72,7 @@ struct drm_i915_private;
|
|||
trace_printk(__VA_ARGS__); \
|
||||
} while (0)
|
||||
#define GEM_TRACE_DUMP() \
|
||||
do { ftrace_dump(DUMP_ALL); add_taint_for_CI(TAINT_WARN); } while (0)
|
||||
do { ftrace_dump(DUMP_ALL); __add_taint_for_CI(TAINT_WARN); } while (0)
|
||||
#define GEM_TRACE_DUMP_ON(expr) \
|
||||
do { if (expr) GEM_TRACE_DUMP(); } while (0)
|
||||
#else
|
||||
|
|
|
@ -31,6 +31,8 @@ int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
|
|||
if (dma_map_sg_attrs(&obj->base.dev->pdev->dev,
|
||||
pages->sgl, pages->nents,
|
||||
PCI_DMA_BIDIRECTIONAL,
|
||||
DMA_ATTR_SKIP_CPU_SYNC |
|
||||
DMA_ATTR_NO_KERNEL_MAPPING |
|
||||
DMA_ATTR_NO_WARN))
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -12,7 +12,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
|
|||
struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(dev);
|
||||
const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu;
|
||||
const struct sseu_dev_info *sseu = &i915->gt.info.sseu;
|
||||
drm_i915_getparam_t *param = data;
|
||||
int value;
|
||||
|
||||
|
|
|
@ -42,6 +42,7 @@
|
|||
|
||||
#include "gem/i915_gem_context.h"
|
||||
#include "gem/i915_gem_lmem.h"
|
||||
#include "gt/intel_gt.h"
|
||||
#include "gt/intel_gt_pm.h"
|
||||
|
||||
#include "i915_drv.h"
|
||||
|
@ -425,7 +426,7 @@ static void err_compression_marker(struct drm_i915_error_state_buf *m)
|
|||
static void error_print_instdone(struct drm_i915_error_state_buf *m,
|
||||
const struct intel_engine_coredump *ee)
|
||||
{
|
||||
const struct sseu_dev_info *sseu = &RUNTIME_INFO(m->i915)->sseu;
|
||||
const struct sseu_dev_info *sseu = &ee->engine->gt->info.sseu;
|
||||
int slice;
|
||||
int subslice;
|
||||
|
||||
|
@ -619,16 +620,13 @@ static void print_error_vma(struct drm_i915_error_state_buf *m,
|
|||
}
|
||||
|
||||
static void err_print_capabilities(struct drm_i915_error_state_buf *m,
|
||||
const struct intel_device_info *info,
|
||||
const struct intel_runtime_info *runtime,
|
||||
const struct intel_driver_caps *caps)
|
||||
struct i915_gpu_coredump *error)
|
||||
{
|
||||
struct drm_printer p = i915_error_printer(m);
|
||||
|
||||
intel_device_info_print_static(info, &p);
|
||||
intel_device_info_print_runtime(runtime, &p);
|
||||
intel_device_info_print_topology(&runtime->sseu, &p);
|
||||
intel_driver_caps_print(caps, &p);
|
||||
intel_device_info_print_static(&error->device_info, &p);
|
||||
intel_device_info_print_runtime(&error->runtime_info, &p);
|
||||
intel_driver_caps_print(&error->driver_caps, &p);
|
||||
}
|
||||
|
||||
static void err_print_params(struct drm_i915_error_state_buf *m,
|
||||
|
@ -678,6 +676,15 @@ static void err_free_sgl(struct scatterlist *sgl)
|
|||
}
|
||||
}
|
||||
|
||||
static void err_print_gt_info(struct drm_i915_error_state_buf *m,
|
||||
struct intel_gt_coredump *gt)
|
||||
{
|
||||
struct drm_printer p = i915_error_printer(m);
|
||||
|
||||
intel_gt_info_print(>->info, &p);
|
||||
intel_sseu_print_topology(>->info.sseu, &p);
|
||||
}
|
||||
|
||||
static void err_print_gt(struct drm_i915_error_state_buf *m,
|
||||
struct intel_gt_coredump *gt)
|
||||
{
|
||||
|
@ -734,6 +741,8 @@ static void err_print_gt(struct drm_i915_error_state_buf *m,
|
|||
|
||||
if (gt->uc)
|
||||
err_print_uc(m, gt->uc);
|
||||
|
||||
err_print_gt_info(m, gt);
|
||||
}
|
||||
|
||||
static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
|
||||
|
@ -798,8 +807,7 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
|
|||
if (error->display)
|
||||
intel_display_print_error_state(m, error->display);
|
||||
|
||||
err_print_capabilities(m, &error->device_info, &error->runtime_info,
|
||||
&error->driver_caps);
|
||||
err_print_capabilities(m, error);
|
||||
err_print_params(m, &error->params);
|
||||
}
|
||||
|
||||
|
@ -1630,6 +1638,11 @@ static void gt_record_regs(struct intel_gt_coredump *gt)
|
|||
gt->pgtbl_er = intel_uncore_read(uncore, PGTBL_ER);
|
||||
}
|
||||
|
||||
static void gt_record_info(struct intel_gt_coredump *gt)
|
||||
{
|
||||
memcpy(>->info, >->_gt->info, sizeof(struct intel_gt_info));
|
||||
}
|
||||
|
||||
/*
|
||||
* Generate a semi-unique error code. The code is not meant to have meaning, The
|
||||
* code's only purpose is to try to prevent false duplicated bug reports by
|
||||
|
@ -1808,6 +1821,7 @@ struct i915_gpu_coredump *i915_gpu_coredump(struct drm_i915_private *i915)
|
|||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
gt_record_info(error->gt);
|
||||
gt_record_engines(error->gt, compress);
|
||||
|
||||
if (INTEL_INFO(i915)->has_gt_uc)
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
#include <drm/drm_mm.h>
|
||||
|
||||
#include "gt/intel_engine.h"
|
||||
#include "gt/intel_gt_types.h"
|
||||
#include "gt/uc/intel_uc_fw.h"
|
||||
|
||||
#include "intel_device_info.h"
|
||||
|
@ -118,6 +119,8 @@ struct intel_gt_coredump {
|
|||
bool awake;
|
||||
bool simulated;
|
||||
|
||||
struct intel_gt_info info;
|
||||
|
||||
/* Generic register state */
|
||||
u32 eir;
|
||||
u32 pgtbl_er;
|
||||
|
|
|
@ -2584,6 +2584,46 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
|
|||
gen11_master_intr_enable);
|
||||
}
|
||||
|
||||
static u32 dg1_master_intr_disable_and_ack(void __iomem * const regs)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
/* First disable interrupts */
|
||||
raw_reg_write(regs, DG1_MSTR_UNIT_INTR, 0);
|
||||
|
||||
/* Get the indication levels and ack the master unit */
|
||||
val = raw_reg_read(regs, DG1_MSTR_UNIT_INTR);
|
||||
if (unlikely(!val))
|
||||
return 0;
|
||||
|
||||
raw_reg_write(regs, DG1_MSTR_UNIT_INTR, val);
|
||||
|
||||
/*
|
||||
* Now with master disabled, get a sample of level indications
|
||||
* for this interrupt and ack them right away - we keep GEN11_MASTER_IRQ
|
||||
* out as this bit doesn't exist anymore for DG1
|
||||
*/
|
||||
val = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ) & ~GEN11_MASTER_IRQ;
|
||||
if (unlikely(!val))
|
||||
return 0;
|
||||
|
||||
raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, val);
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline void dg1_master_intr_enable(void __iomem * const regs)
|
||||
{
|
||||
raw_reg_write(regs, DG1_MSTR_UNIT_INTR, DG1_MSTR_IRQ);
|
||||
}
|
||||
|
||||
static irqreturn_t dg1_irq_handler(int irq, void *arg)
|
||||
{
|
||||
return __gen11_irq_handler(arg,
|
||||
dg1_master_intr_disable_and_ack,
|
||||
dg1_master_intr_enable);
|
||||
}
|
||||
|
||||
/* Called from drm generic code, passed 'crtc' which
|
||||
* we use as a pipe index
|
||||
*/
|
||||
|
@ -2920,7 +2960,10 @@ static void gen11_irq_reset(struct drm_i915_private *dev_priv)
|
|||
{
|
||||
struct intel_uncore *uncore = &dev_priv->uncore;
|
||||
|
||||
gen11_master_intr_disable(dev_priv->uncore.regs);
|
||||
if (HAS_MASTER_UNIT_IRQ(dev_priv))
|
||||
dg1_master_intr_disable_and_ack(dev_priv->uncore.regs);
|
||||
else
|
||||
gen11_master_intr_disable(dev_priv->uncore.regs);
|
||||
|
||||
gen11_gt_irq_reset(&dev_priv->gt);
|
||||
gen11_display_irq_reset(dev_priv);
|
||||
|
@ -3071,7 +3114,8 @@ static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv,
|
|||
hotplug_irqs = sde_ddi_mask | sde_tc_mask;
|
||||
enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
|
||||
|
||||
I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
|
||||
if (INTEL_PCH_TYPE(dev_priv) <= PCH_TGP)
|
||||
I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
|
||||
|
||||
ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
|
||||
|
||||
|
@ -3517,8 +3561,13 @@ static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
|
|||
|
||||
I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
|
||||
|
||||
gen11_master_intr_enable(uncore->regs);
|
||||
POSTING_READ(GEN11_GFX_MSTR_IRQ);
|
||||
if (HAS_MASTER_UNIT_IRQ(dev_priv)) {
|
||||
dg1_master_intr_enable(uncore->regs);
|
||||
POSTING_READ(DG1_MSTR_UNIT_INTR);
|
||||
} else {
|
||||
gen11_master_intr_enable(uncore->regs);
|
||||
POSTING_READ(GEN11_GFX_MSTR_IRQ);
|
||||
}
|
||||
}
|
||||
|
||||
static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
|
||||
|
@ -4043,6 +4092,8 @@ static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv)
|
|||
else
|
||||
return i8xx_irq_handler;
|
||||
} else {
|
||||
if (HAS_MASTER_UNIT_IRQ(dev_priv))
|
||||
return dg1_irq_handler;
|
||||
if (INTEL_GEN(dev_priv) >= 11)
|
||||
return gen11_irq_handler;
|
||||
else if (INTEL_GEN(dev_priv) >= 8)
|
||||
|
|
|
@ -168,7 +168,7 @@
|
|||
.gpu_reset_clobbers_display = true, \
|
||||
.hws_needs_physical = 1, \
|
||||
.unfenced_needs_alignment = 1, \
|
||||
.engine_mask = BIT(RCS0), \
|
||||
.platform_engine_mask = BIT(RCS0), \
|
||||
.has_snoop = true, \
|
||||
.has_coherent_ggtt = false, \
|
||||
.dma_mask_size = 32, \
|
||||
|
@ -188,7 +188,7 @@
|
|||
.gpu_reset_clobbers_display = true, \
|
||||
.hws_needs_physical = 1, \
|
||||
.unfenced_needs_alignment = 1, \
|
||||
.engine_mask = BIT(RCS0), \
|
||||
.platform_engine_mask = BIT(RCS0), \
|
||||
.has_snoop = true, \
|
||||
.has_coherent_ggtt = false, \
|
||||
.dma_mask_size = 32, \
|
||||
|
@ -217,6 +217,7 @@ static const struct intel_device_info i85x_info = {
|
|||
static const struct intel_device_info i865g_info = {
|
||||
I845_FEATURES,
|
||||
PLATFORM(INTEL_I865G),
|
||||
.display.has_fbc = 1,
|
||||
};
|
||||
|
||||
#define GEN3_FEATURES \
|
||||
|
@ -225,7 +226,7 @@ static const struct intel_device_info i865g_info = {
|
|||
.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
|
||||
.display.has_gmch = 1, \
|
||||
.gpu_reset_clobbers_display = true, \
|
||||
.engine_mask = BIT(RCS0), \
|
||||
.platform_engine_mask = BIT(RCS0), \
|
||||
.has_snoop = true, \
|
||||
.has_coherent_ggtt = true, \
|
||||
.dma_mask_size = 32, \
|
||||
|
@ -316,7 +317,7 @@ static const struct intel_device_info pnv_m_info = {
|
|||
.display.has_hotplug = 1, \
|
||||
.display.has_gmch = 1, \
|
||||
.gpu_reset_clobbers_display = true, \
|
||||
.engine_mask = BIT(RCS0), \
|
||||
.platform_engine_mask = BIT(RCS0), \
|
||||
.has_snoop = true, \
|
||||
.has_coherent_ggtt = true, \
|
||||
.dma_mask_size = 36, \
|
||||
|
@ -348,7 +349,7 @@ static const struct intel_device_info i965gm_info = {
|
|||
static const struct intel_device_info g45_info = {
|
||||
GEN4_FEATURES,
|
||||
PLATFORM(INTEL_G45),
|
||||
.engine_mask = BIT(RCS0) | BIT(VCS0),
|
||||
.platform_engine_mask = BIT(RCS0) | BIT(VCS0),
|
||||
.gpu_reset_clobbers_display = false,
|
||||
};
|
||||
|
||||
|
@ -358,7 +359,7 @@ static const struct intel_device_info gm45_info = {
|
|||
.is_mobile = 1,
|
||||
.display.has_fbc = 1,
|
||||
.display.supports_tv = 1,
|
||||
.engine_mask = BIT(RCS0) | BIT(VCS0),
|
||||
.platform_engine_mask = BIT(RCS0) | BIT(VCS0),
|
||||
.gpu_reset_clobbers_display = false,
|
||||
};
|
||||
|
||||
|
@ -367,7 +368,7 @@ static const struct intel_device_info gm45_info = {
|
|||
.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
|
||||
.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
|
||||
.display.has_hotplug = 1, \
|
||||
.engine_mask = BIT(RCS0) | BIT(VCS0), \
|
||||
.platform_engine_mask = BIT(RCS0) | BIT(VCS0), \
|
||||
.has_snoop = true, \
|
||||
.has_coherent_ggtt = true, \
|
||||
/* ilk does support rc6, but we do not implement [power] contexts */ \
|
||||
|
@ -397,7 +398,7 @@ static const struct intel_device_info ilk_m_info = {
|
|||
.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
|
||||
.display.has_hotplug = 1, \
|
||||
.display.has_fbc = 1, \
|
||||
.engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
|
||||
.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
|
||||
.has_coherent_ggtt = true, \
|
||||
.has_llc = 1, \
|
||||
.has_rc6 = 1, \
|
||||
|
@ -448,7 +449,7 @@ static const struct intel_device_info snb_m_gt2_info = {
|
|||
.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C), \
|
||||
.display.has_hotplug = 1, \
|
||||
.display.has_fbc = 1, \
|
||||
.engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
|
||||
.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
|
||||
.has_coherent_ggtt = true, \
|
||||
.has_llc = 1, \
|
||||
.has_rc6 = 1, \
|
||||
|
@ -519,7 +520,7 @@ static const struct intel_device_info vlv_info = {
|
|||
.ppgtt_size = 31,
|
||||
.has_snoop = true,
|
||||
.has_coherent_ggtt = false,
|
||||
.engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0),
|
||||
.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0),
|
||||
.display_mmio_offset = VLV_DISPLAY_BASE,
|
||||
I9XX_PIPE_OFFSETS,
|
||||
I9XX_CURSOR_OFFSETS,
|
||||
|
@ -530,7 +531,7 @@ static const struct intel_device_info vlv_info = {
|
|||
|
||||
#define G75_FEATURES \
|
||||
GEN7_FEATURES, \
|
||||
.engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
|
||||
.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
|
||||
.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
|
||||
BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP), \
|
||||
.display.has_ddi = 1, \
|
||||
|
@ -597,7 +598,7 @@ static const struct intel_device_info bdw_rsvd_info = {
|
|||
static const struct intel_device_info bdw_gt3_info = {
|
||||
BDW_PLATFORM,
|
||||
.gt = 3,
|
||||
.engine_mask =
|
||||
.platform_engine_mask =
|
||||
BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1),
|
||||
};
|
||||
|
||||
|
@ -608,7 +609,7 @@ static const struct intel_device_info chv_info = {
|
|||
.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C),
|
||||
.display.has_hotplug = 1,
|
||||
.is_lp = 1,
|
||||
.engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0),
|
||||
.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0),
|
||||
.has_64bit_reloc = 1,
|
||||
.has_runtime_pm = 1,
|
||||
.has_rc6 = 1,
|
||||
|
@ -661,7 +662,7 @@ static const struct intel_device_info skl_gt2_info = {
|
|||
|
||||
#define SKL_GT3_PLUS_PLATFORM \
|
||||
SKL_PLATFORM, \
|
||||
.engine_mask = \
|
||||
.platform_engine_mask = \
|
||||
BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1)
|
||||
|
||||
|
||||
|
@ -680,7 +681,7 @@ static const struct intel_device_info skl_gt4_info = {
|
|||
.is_lp = 1, \
|
||||
.num_supported_dbuf_slices = 1, \
|
||||
.display.has_hotplug = 1, \
|
||||
.engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
|
||||
.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
|
||||
.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \
|
||||
.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
|
||||
BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP) | \
|
||||
|
@ -743,7 +744,7 @@ static const struct intel_device_info kbl_gt2_info = {
|
|||
static const struct intel_device_info kbl_gt3_info = {
|
||||
KBL_PLATFORM,
|
||||
.gt = 3,
|
||||
.engine_mask =
|
||||
.platform_engine_mask =
|
||||
BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1),
|
||||
};
|
||||
|
||||
|
@ -764,7 +765,7 @@ static const struct intel_device_info cfl_gt2_info = {
|
|||
static const struct intel_device_info cfl_gt3_info = {
|
||||
CFL_PLATFORM,
|
||||
.gt = 3,
|
||||
.engine_mask =
|
||||
.platform_engine_mask =
|
||||
BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1),
|
||||
};
|
||||
|
||||
|
@ -833,7 +834,7 @@ static const struct intel_device_info cnl_info = {
|
|||
static const struct intel_device_info icl_info = {
|
||||
GEN11_FEATURES,
|
||||
PLATFORM(INTEL_ICELAKE),
|
||||
.engine_mask =
|
||||
.platform_engine_mask =
|
||||
BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
|
||||
};
|
||||
|
||||
|
@ -841,7 +842,7 @@ static const struct intel_device_info ehl_info = {
|
|||
GEN11_FEATURES,
|
||||
PLATFORM(INTEL_ELKHARTLAKE),
|
||||
.require_force_probe = 1,
|
||||
.engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0) | BIT(VECS0),
|
||||
.platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0) | BIT(VECS0),
|
||||
.ppgtt_size = 36,
|
||||
};
|
||||
|
||||
|
@ -877,7 +878,7 @@ static const struct intel_device_info tgl_info = {
|
|||
GEN12_FEATURES,
|
||||
PLATFORM(INTEL_TIGERLAKE),
|
||||
.display.has_modular_fia = 1,
|
||||
.engine_mask =
|
||||
.platform_engine_mask =
|
||||
BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
|
||||
};
|
||||
|
||||
|
@ -890,14 +891,26 @@ static const struct intel_device_info rkl_info = {
|
|||
BIT(TRANSCODER_C),
|
||||
.require_force_probe = 1,
|
||||
.display.has_psr_hw_tracking = 0,
|
||||
.engine_mask =
|
||||
.platform_engine_mask =
|
||||
BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0),
|
||||
};
|
||||
|
||||
#define GEN12_DGFX_FEATURES \
|
||||
GEN12_FEATURES, \
|
||||
.memory_regions = REGION_SMEM | REGION_LMEM, \
|
||||
.has_master_unit_irq = 1, \
|
||||
.is_dgfx = 1
|
||||
|
||||
static const struct intel_device_info dg1_info __maybe_unused = {
|
||||
GEN12_DGFX_FEATURES,
|
||||
PLATFORM(INTEL_DG1),
|
||||
.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
|
||||
.require_force_probe = 1,
|
||||
.platform_engine_mask =
|
||||
BIT(RCS0) | BIT(BCS0) | BIT(VECS0) |
|
||||
BIT(VCS0) | BIT(VCS2),
|
||||
};
|
||||
|
||||
#undef GEN
|
||||
#undef PLATFORM
|
||||
|
||||
|
|
|
@ -1773,7 +1773,7 @@ static int alloc_noa_wait(struct i915_perf_stream *stream)
|
|||
GEM_BUG_ON(cs - batch > PAGE_SIZE / sizeof(*batch));
|
||||
|
||||
i915_gem_object_flush_map(bo);
|
||||
i915_gem_object_unpin_map(bo);
|
||||
__i915_gem_object_release_map(bo);
|
||||
|
||||
stream->noa_wait = vma;
|
||||
return 0;
|
||||
|
@ -1868,7 +1868,7 @@ alloc_oa_config_buffer(struct i915_perf_stream *stream,
|
|||
*cs++ = 0;
|
||||
|
||||
i915_gem_object_flush_map(obj);
|
||||
i915_gem_object_unpin_map(obj);
|
||||
__i915_gem_object_release_map(obj);
|
||||
|
||||
oa_bo->vma = i915_vma_instance(obj,
|
||||
&stream->engine->gt->ggtt->vm,
|
||||
|
@ -2197,7 +2197,7 @@ static int gen8_configure_context(struct i915_gem_context *ctx,
|
|||
if (!intel_context_pin_if_active(ce))
|
||||
continue;
|
||||
|
||||
flex->value = intel_sseu_make_rpcs(ctx->i915, &ce->sseu);
|
||||
flex->value = intel_sseu_make_rpcs(ce->engine->gt, &ce->sseu);
|
||||
err = gen8_modify_context(ce, flex, count);
|
||||
|
||||
intel_context_unpin(ce);
|
||||
|
@ -2341,7 +2341,7 @@ oa_configure_all_contexts(struct i915_perf_stream *stream,
|
|||
if (engine->class != RENDER_CLASS)
|
||||
continue;
|
||||
|
||||
regs[0].value = intel_sseu_make_rpcs(i915, &ce->sseu);
|
||||
regs[0].value = intel_sseu_make_rpcs(engine->gt, &ce->sseu);
|
||||
|
||||
err = gen8_modify_self(ce, regs, num_regs, active);
|
||||
if (err)
|
||||
|
@ -2741,8 +2741,7 @@ static void
|
|||
get_default_sseu_config(struct intel_sseu *out_sseu,
|
||||
struct intel_engine_cs *engine)
|
||||
{
|
||||
const struct sseu_dev_info *devinfo_sseu =
|
||||
&RUNTIME_INFO(engine->i915)->sseu;
|
||||
const struct sseu_dev_info *devinfo_sseu = &engine->gt->info.sseu;
|
||||
|
||||
*out_sseu = intel_sseu_from_device_info(devinfo_sseu);
|
||||
|
||||
|
@ -2767,7 +2766,7 @@ get_sseu_config(struct intel_sseu *out_sseu,
|
|||
drm_sseu->engine.engine_instance != engine->uabi_instance)
|
||||
return -EINVAL;
|
||||
|
||||
return i915_gem_user_to_context_sseu(engine->i915, drm_sseu, out_sseu);
|
||||
return i915_gem_user_to_context_sseu(engine->gt, drm_sseu, out_sseu);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -31,7 +31,7 @@ static int copy_query_item(void *query_hdr, size_t query_sz,
|
|||
static int query_topology_info(struct drm_i915_private *dev_priv,
|
||||
struct drm_i915_query_item *query_item)
|
||||
{
|
||||
const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
|
||||
const struct sseu_dev_info *sseu = &dev_priv->gt.info.sseu;
|
||||
struct drm_i915_query_topology_info topo;
|
||||
u32 slice_length, subslice_length, eu_length, total_length;
|
||||
int ret;
|
||||
|
|
|
@ -868,7 +868,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
|
|||
|
||||
#define OAREPORTTRIG1 _MMIO(0x2740)
|
||||
#define OAREPORTTRIG1_THRESHOLD_MASK 0xffff
|
||||
#define OAREPORTTRIG1_EDGE_LEVEL_TRIGER_SELECT_MASK 0xffff0000 /* 0=level */
|
||||
#define OAREPORTTRIG1_EDGE_LEVEL_TRIGGER_SELECT_MASK 0xffff0000 /* 0=level */
|
||||
|
||||
#define OAREPORTTRIG2 _MMIO(0x2744)
|
||||
#define OAREPORTTRIG2_INVERT_A_0 (1 << 0)
|
||||
|
@ -921,7 +921,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
|
|||
|
||||
#define OAREPORTTRIG5 _MMIO(0x2750)
|
||||
#define OAREPORTTRIG5_THRESHOLD_MASK 0xffff
|
||||
#define OAREPORTTRIG5_EDGE_LEVEL_TRIGER_SELECT_MASK 0xffff0000 /* 0=level */
|
||||
#define OAREPORTTRIG5_EDGE_LEVEL_TRIGGER_SELECT_MASK 0xffff0000 /* 0=level */
|
||||
|
||||
#define OAREPORTTRIG6 _MMIO(0x2754)
|
||||
#define OAREPORTTRIG6_INVERT_A_0 (1 << 0)
|
||||
|
@ -1974,6 +1974,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
|
|||
#define ICL_PORT_PCS_DW1_AUX(phy) _MMIO(_ICL_PORT_PCS_DW_AUX(1, phy))
|
||||
#define ICL_PORT_PCS_DW1_GRP(phy) _MMIO(_ICL_PORT_PCS_DW_GRP(1, phy))
|
||||
#define ICL_PORT_PCS_DW1_LN0(phy) _MMIO(_ICL_PORT_PCS_DW_LN(1, 0, phy))
|
||||
#define DCC_MODE_SELECT_MASK (0x3 << 20)
|
||||
#define DCC_MODE_SELECT_CONTINUOSLY (0x3 << 20)
|
||||
#define COMMON_KEEPER_EN (1 << 26)
|
||||
#define LATENCY_OPTIM_MASK (0x3 << 2)
|
||||
#define LATENCY_OPTIM_VAL(x) ((x) << 2)
|
||||
|
@ -2072,6 +2074,13 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
|
|||
#define N_SCALAR(x) ((x) << 24)
|
||||
#define N_SCALAR_MASK (0x7F << 24)
|
||||
|
||||
#define ICL_PORT_TX_DW8_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(8, phy))
|
||||
#define ICL_PORT_TX_DW8_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(8, phy))
|
||||
#define ICL_PORT_TX_DW8_LN0(phy) _MMIO(_ICL_PORT_TX_DW_LN(8, 0, phy))
|
||||
#define ICL_PORT_TX_DW8_ODCC_CLK_SEL REG_BIT(31)
|
||||
#define ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_MASK REG_GENMASK(30, 29)
|
||||
#define ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_DIV2 REG_FIELD_PREP(ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_MASK, 0x1)
|
||||
|
||||
#define _ICL_DPHY_CHKN_REG 0x194
|
||||
#define ICL_DPHY_CHKN(port) _MMIO(_ICL_COMBOPHY(port) + _ICL_DPHY_CHKN_REG)
|
||||
#define ICL_DPHY_CHKN_AFE_OVER_PPI_STRAP REG_BIT(7)
|
||||
|
@ -2827,6 +2836,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
|
|||
#define VLV_GU_CTL0 _MMIO(VLV_DISPLAY_BASE + 0x2030)
|
||||
#define VLV_GU_CTL1 _MMIO(VLV_DISPLAY_BASE + 0x2034)
|
||||
#define SCPD0 _MMIO(0x209c) /* 915+ only */
|
||||
#define SCPD_FBC_IGNORE_3D (1 << 6)
|
||||
#define CSTATE_RENDER_CLOCK_GATE_DISABLE (1 << 5)
|
||||
#define GEN2_IER _MMIO(0x20a0)
|
||||
#define GEN2_IIR _MMIO(0x20a4)
|
||||
|
@ -7723,6 +7733,10 @@ enum {
|
|||
#define GEN11_GT_DW1_IRQ (1 << 1)
|
||||
#define GEN11_GT_DW0_IRQ (1 << 0)
|
||||
|
||||
#define DG1_MSTR_UNIT_INTR _MMIO(0x190008)
|
||||
#define DG1_MSTR_IRQ REG_BIT(31)
|
||||
#define DG1_MSTR_UNIT(u) REG_BIT(u)
|
||||
|
||||
#define GEN11_DISPLAY_INT_CTL _MMIO(0x44200)
|
||||
#define GEN11_DISPLAY_IRQ_ENABLE (1 << 31)
|
||||
#define GEN11_AUDIO_CODEC_IRQ (1 << 24)
|
||||
|
|
|
@ -560,22 +560,25 @@ bool __i915_request_submit(struct i915_request *request)
|
|||
engine->serial++;
|
||||
result = true;
|
||||
|
||||
xfer: /* We may be recursing from the signal callback of another i915 fence */
|
||||
spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
|
||||
|
||||
xfer:
|
||||
if (!test_and_set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags)) {
|
||||
list_move_tail(&request->sched.link, &engine->active.requests);
|
||||
clear_bit(I915_FENCE_FLAG_PQUEUE, &request->fence.flags);
|
||||
__notify_execute_cb(request);
|
||||
}
|
||||
GEM_BUG_ON(!llist_empty(&request->execute_cb));
|
||||
|
||||
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags) &&
|
||||
!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &request->fence.flags) &&
|
||||
!i915_request_enable_breadcrumb(request))
|
||||
intel_engine_signal_breadcrumbs(engine);
|
||||
/* We may be recursing from the signal callback of another i915 fence */
|
||||
if (!i915_request_signaled(request)) {
|
||||
spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
|
||||
|
||||
spin_unlock(&request->lock);
|
||||
__notify_execute_cb(request);
|
||||
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
|
||||
&request->fence.flags) &&
|
||||
!i915_request_enable_breadcrumb(request))
|
||||
intel_engine_signal_breadcrumbs(engine);
|
||||
|
||||
spin_unlock(&request->lock);
|
||||
GEM_BUG_ON(!llist_empty(&request->execute_cb));
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
|
|
@ -49,6 +49,16 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level,
|
|||
}
|
||||
}
|
||||
|
||||
void add_taint_for_CI(struct drm_i915_private *i915, unsigned int taint)
|
||||
{
|
||||
__i915_printk(i915, KERN_NOTICE, "CI tainted:%#x by %pS\n",
|
||||
taint, (void *)_RET_IP_);
|
||||
|
||||
/* Failures that occur during fault injection testing are expected */
|
||||
if (!i915_error_injected())
|
||||
__add_taint_for_CI(taint);
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
|
||||
static unsigned int i915_probe_fail_count;
|
||||
|
||||
|
|
|
@ -266,19 +266,6 @@ static inline int list_is_last_rcu(const struct list_head *list,
|
|||
return READ_ONCE(list->next) == head;
|
||||
}
|
||||
|
||||
/*
|
||||
* Wait until the work is finally complete, even if it tries to postpone
|
||||
* by requeueing itself. Note, that if the worker never cancels itself,
|
||||
* we will spin forever.
|
||||
*/
|
||||
static inline void drain_delayed_work(struct delayed_work *dw)
|
||||
{
|
||||
do {
|
||||
while (flush_delayed_work(dw))
|
||||
;
|
||||
} while (delayed_work_pending(dw));
|
||||
}
|
||||
|
||||
static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
|
||||
{
|
||||
unsigned long j = msecs_to_jiffies(m);
|
||||
|
@ -436,7 +423,8 @@ static inline const char *enableddisabled(bool v)
|
|||
return v ? "enabled" : "disabled";
|
||||
}
|
||||
|
||||
static inline void add_taint_for_CI(unsigned int taint)
|
||||
void add_taint_for_CI(struct drm_i915_private *i915, unsigned int taint);
|
||||
static inline void __add_taint_for_CI(unsigned int taint)
|
||||
{
|
||||
/*
|
||||
* The system is "ok", just about surviving for the user, but
|
||||
|
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче