UAPI Changes:
- Introduce a mechanism to extend execbuf2 (Lionel) - Add syncobj timeline support (Lionel) Driver Changes: - Limit stolen mem usage on the compressed frame buffer (Ville) - Some clean-up around display's cdclk (Ville) - Some DDI changes for better DP link training according to spec (Imre) - Provide the perf pmu.module (Chris) - Remove dobious Valleyview PCI IDs (Alexei) - Add new display power saving feature for gen12+ called HOBL (Jose) - Move SKL's clock gating w/a to skl_init_clock_gating() (Ville) - Rocket Lake display additions (Matt) - Selftest: temporarily downgrade on severity of frequency scaling tests (Chris) - Introduce a new display workaround for fixing FLR related issues on new PCH. (Jose) - Temporarily disable FBC on TGL. It was the culprit of random underruns. (Uma). - Copy default modparams to mock i915_device (Chris) - Add compiler paranoia for checking HWSP values (Chris) - Remove useless gen check before calling intel_rps_boost (Chris) - Fix a null pointer dereference (Chris) - Add a couple of missing i915_active_fini() (Chris) - Update TGL display power's bw_buddy table according to update spec (Matt) - Fix couple wrong return values (Tianjia) - Selftest: Avoid passing random 0 into ilog2 (George) - Many Tiger Lake display fixes and improvements for Type-C and DP compliance (Imre, Jose) - Start the addition of PSR2 selective fetch (Jose) - Update a few DMC and HuC firmware versions (Jose) - Add gen11+ w/a to fix underuns (Matt) - Fix cmd parser desc matching with mask (Mika) -----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEEbSBwaO7dZQkcLOKj+mJfZA7rE8oFAl9EBoQACgkQ+mJfZA7r E8qNIgf9E3t12hq0z+8SidMyUPXCz6+BJzed+zjF6q6w3lVaxloQbJbQc/ujec6Y DcnHKdZWN4/BjPtO9PYsOo7JRPlw9mounMfMqhmsgCNigpy8jdE6EQB2wDY/JtWG I/OmVwaIDWF/srRJZNJlmdx1IT6pes3A/1HBJmJWFFPFFQxl6Y8vbaZGmMDwXRzS 6/LOy7otXVGvSHqYDFzNWBPNRstUYmQuPbE4/Iei3zbS8Di3uCkspa6LbocE+T5g cokw9fxE1cJv9bIhIY65R611XyzqqHDzM+2s3x35r8a/ectItLE7kkU07/X3RXmc lrqf4xxzmg+lvbKaLMGdI7YRFPcvbQ== =cGnK -----END PGP SIGNATURE----- Merge tag 'drm-intel-next-2020-08-24-1' of git://anongit.freedesktop.org/drm/drm-intel into drm-next UAPI Changes: - Introduce a mechanism to extend execbuf2 (Lionel) - Add syncobj timeline support (Lionel) Driver Changes: - Limit stolen mem usage on the compressed frame buffer (Ville) - Some clean-up around display's cdclk (Ville) - Some DDI changes for better DP link training according to spec (Imre) - Provide the perf pmu.module (Chris) - Remove dobious Valleyview PCI IDs (Alexei) - Add new display power saving feature for gen12+ called HOBL (Jose) - Move SKL's clock gating w/a to skl_init_clock_gating() (Ville) - Rocket Lake display additions (Matt) - Selftest: temporarily downgrade on severity of frequency scaling tests (Chris) - Introduce a new display workaround for fixing FLR related issues on new PCH. (Jose) - Temporarily disable FBC on TGL. It was the culprit of random underruns. (Uma). - Copy default modparams to mock i915_device (Chris) - Add compiler paranoia for checking HWSP values (Chris) - Remove useless gen check before calling intel_rps_boost (Chris) - Fix a null pointer dereference (Chris) - Add a couple of missing i915_active_fini() (Chris) - Update TGL display power's bw_buddy table according to update spec (Matt) - Fix couple wrong return values (Tianjia) - Selftest: Avoid passing random 0 into ilog2 (George) - Many Tiger Lake display fixes and improvements for Type-C and DP compliance (Imre, Jose) - Start the addition of PSR2 selective fetch (Jose) - Update a few DMC and HuC firmware versions (Jose) - Add gen11+ w/a to fix underuns (Matt) - Fix cmd parser desc matching with mask (Mika) Signed-off-by: Dave Airlie <airlied@redhat.com> From: Rodrigo Vivi <rodrigo.vivi@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20200826232733.GA129053@intel.com
This commit is contained in:
Коммит
3393649977
|
@ -2677,7 +2677,7 @@ void intel_update_cdclk(struct drm_i915_private *dev_priv)
|
|||
*/
|
||||
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
|
||||
intel_de_write(dev_priv, GMBUSFREQ_VLV,
|
||||
DIV_ROUND_UP(dev_priv->cdclk.hw.cdclk, 1000));
|
||||
DIV_ROUND_UP(dev_priv->cdclk.hw.cdclk, 1000));
|
||||
}
|
||||
|
||||
static int cnp_rawclk(struct drm_i915_private *dev_priv)
|
||||
|
@ -2903,9 +2903,10 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
|
|||
dev_priv->display.get_cdclk = i85x_get_cdclk;
|
||||
else if (IS_I845G(dev_priv))
|
||||
dev_priv->display.get_cdclk = fixed_200mhz_get_cdclk;
|
||||
else { /* 830 */
|
||||
drm_WARN(&dev_priv->drm, !IS_I830(dev_priv),
|
||||
"Unknown platform. Assuming 133 MHz CDCLK\n");
|
||||
else if (IS_I830(dev_priv))
|
||||
dev_priv->display.get_cdclk = fixed_133mhz_get_cdclk;
|
||||
|
||||
if (drm_WARN(&dev_priv->drm, !dev_priv->display.get_cdclk,
|
||||
"Unknown platform. Assuming 133 MHz CDCLK\n"))
|
||||
dev_priv->display.get_cdclk = fixed_133mhz_get_cdclk;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,8 +17,8 @@ struct intel_atomic_state;
|
|||
struct intel_crtc_state;
|
||||
|
||||
struct intel_cdclk_vals {
|
||||
u16 refclk;
|
||||
u32 cdclk;
|
||||
u16 refclk;
|
||||
u8 divider; /* CD2X divider * 2 */
|
||||
u8 ratio;
|
||||
};
|
||||
|
|
|
@ -40,12 +40,12 @@
|
|||
|
||||
#define GEN12_CSR_MAX_FW_SIZE ICL_CSR_MAX_FW_SIZE
|
||||
|
||||
#define RKL_CSR_PATH "i915/rkl_dmc_ver2_01.bin"
|
||||
#define RKL_CSR_VERSION_REQUIRED CSR_VERSION(2, 1)
|
||||
#define RKL_CSR_PATH "i915/rkl_dmc_ver2_02.bin"
|
||||
#define RKL_CSR_VERSION_REQUIRED CSR_VERSION(2, 2)
|
||||
MODULE_FIRMWARE(RKL_CSR_PATH);
|
||||
|
||||
#define TGL_CSR_PATH "i915/tgl_dmc_ver2_06.bin"
|
||||
#define TGL_CSR_VERSION_REQUIRED CSR_VERSION(2, 6)
|
||||
#define TGL_CSR_PATH "i915/tgl_dmc_ver2_08.bin"
|
||||
#define TGL_CSR_VERSION_REQUIRED CSR_VERSION(2, 8)
|
||||
#define TGL_CSR_MAX_FW_SIZE 0x6000
|
||||
MODULE_FIRMWARE(TGL_CSR_PATH);
|
||||
|
||||
|
|
|
@ -706,6 +706,42 @@ static const struct cnl_ddi_buf_trans tgl_combo_phy_ddi_translations_dp_hbr2[] =
|
|||
{ 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
|
||||
};
|
||||
|
||||
static const struct cnl_ddi_buf_trans tgl_uy_combo_phy_ddi_translations_dp_hbr2[] = {
|
||||
/* NT mV Trans mV db */
|
||||
{ 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
|
||||
{ 0xA, 0x4F, 0x36, 0x00, 0x09 }, /* 350 500 3.1 */
|
||||
{ 0xC, 0x60, 0x32, 0x00, 0x0D }, /* 350 700 6.0 */
|
||||
{ 0xC, 0x7F, 0x2D, 0x00, 0x12 }, /* 350 900 8.2 */
|
||||
{ 0xC, 0x47, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
|
||||
{ 0xC, 0x6F, 0x36, 0x00, 0x09 }, /* 500 700 2.9 */
|
||||
{ 0x6, 0x7D, 0x32, 0x00, 0x0D }, /* 500 900 5.1 */
|
||||
{ 0x6, 0x60, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */
|
||||
{ 0x6, 0x7F, 0x34, 0x00, 0x0B }, /* 600 900 3.5 */
|
||||
{ 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
|
||||
};
|
||||
|
||||
/*
|
||||
* Cloned the HOBL entry to comply with the voltage and pre-emphasis entries
|
||||
* that DisplayPort specification requires
|
||||
*/
|
||||
static const struct cnl_ddi_buf_trans tgl_combo_phy_ddi_translations_edp_hbr2_hobl[] = {
|
||||
/* VS pre-emp */
|
||||
{ 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 0 0 */
|
||||
{ 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 0 1 */
|
||||
{ 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 0 2 */
|
||||
{ 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 0 3 */
|
||||
{ 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 1 0 */
|
||||
{ 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 1 1 */
|
||||
{ 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 1 2 */
|
||||
{ 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 2 0 */
|
||||
{ 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 2 1 */
|
||||
};
|
||||
|
||||
static bool is_hobl_buf_trans(const struct cnl_ddi_buf_trans *table)
|
||||
{
|
||||
return table == tgl_combo_phy_ddi_translations_edp_hbr2_hobl;
|
||||
}
|
||||
|
||||
static const struct ddi_buf_trans *
|
||||
bdw_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries)
|
||||
{
|
||||
|
@ -1050,9 +1086,26 @@ static const struct cnl_ddi_buf_trans *
|
|||
tgl_get_combo_buf_trans(struct intel_encoder *encoder, int type, int rate,
|
||||
int *n_entries)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
|
||||
if (type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.hobl) {
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
|
||||
if (!intel_dp->hobl_failed && rate <= 540000) {
|
||||
/* Same table applies to TGL, RKL and DG1 */
|
||||
*n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_edp_hbr2_hobl);
|
||||
return tgl_combo_phy_ddi_translations_edp_hbr2_hobl;
|
||||
}
|
||||
}
|
||||
|
||||
if (type == INTEL_OUTPUT_HDMI || type == INTEL_OUTPUT_EDP) {
|
||||
return icl_get_combo_buf_trans(encoder, type, rate, n_entries);
|
||||
} else if (rate > 270000) {
|
||||
if (IS_TGL_U(dev_priv) || IS_TGL_Y(dev_priv)) {
|
||||
*n_entries = ARRAY_SIZE(tgl_uy_combo_phy_ddi_translations_dp_hbr2);
|
||||
return tgl_uy_combo_phy_ddi_translations_dp_hbr2;
|
||||
}
|
||||
|
||||
*n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_dp_hbr2);
|
||||
return tgl_combo_phy_ddi_translations_dp_hbr2;
|
||||
}
|
||||
|
@ -2392,6 +2445,15 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
|
|||
level = n_entries - 1;
|
||||
}
|
||||
|
||||
if (type == INTEL_OUTPUT_EDP) {
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
|
||||
val = EDP4K2K_MODE_OVRD_EN | EDP4K2K_MODE_OVRD_OPTIMIZED;
|
||||
intel_dp->hobl_active = is_hobl_buf_trans(ddi_translations);
|
||||
intel_de_rmw(dev_priv, ICL_PORT_CL_DW10(phy), val,
|
||||
intel_dp->hobl_active ? val : 0);
|
||||
}
|
||||
|
||||
/* Set PORT_TX_DW5 */
|
||||
val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN0(phy));
|
||||
val &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK |
|
||||
|
@ -2802,7 +2864,9 @@ hsw_set_signal_levels(struct intel_dp *intel_dp)
|
|||
static u32 icl_dpclka_cfgcr0_clk_off(struct drm_i915_private *dev_priv,
|
||||
enum phy phy)
|
||||
{
|
||||
if (intel_phy_is_combo(dev_priv, phy)) {
|
||||
if (IS_ROCKETLAKE(dev_priv)) {
|
||||
return RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
|
||||
} else if (intel_phy_is_combo(dev_priv, phy)) {
|
||||
return ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
|
||||
} else if (intel_phy_is_tc(dev_priv, phy)) {
|
||||
enum tc_port tc_port = intel_port_to_tc(dev_priv,
|
||||
|
@ -2829,6 +2893,16 @@ static void icl_map_plls_to_ports(struct intel_encoder *encoder,
|
|||
(val & icl_dpclka_cfgcr0_clk_off(dev_priv, phy)) == 0);
|
||||
|
||||
if (intel_phy_is_combo(dev_priv, phy)) {
|
||||
u32 mask, sel;
|
||||
|
||||
if (IS_ROCKETLAKE(dev_priv)) {
|
||||
mask = RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
|
||||
sel = RKL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy);
|
||||
} else {
|
||||
mask = ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
|
||||
sel = ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy);
|
||||
}
|
||||
|
||||
/*
|
||||
* Even though this register references DDIs, note that we
|
||||
* want to pass the PHY rather than the port (DDI). For
|
||||
|
@ -2839,8 +2913,8 @@ static void icl_map_plls_to_ports(struct intel_encoder *encoder,
|
|||
* Clock Select chooses the PLL for both DDIA and DDID and
|
||||
* drives port A in all cases."
|
||||
*/
|
||||
val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
|
||||
val |= ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy);
|
||||
val &= ~mask;
|
||||
val |= sel;
|
||||
intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val);
|
||||
intel_de_posting_read(dev_priv, ICL_DPCLKA_CFGCR0);
|
||||
}
|
||||
|
@ -4037,8 +4111,7 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
|
|||
intel_wait_ddi_buf_idle(dev_priv, port);
|
||||
}
|
||||
|
||||
dp_tp_ctl = DP_TP_CTL_ENABLE |
|
||||
DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_SCRAMBLE_DISABLE;
|
||||
dp_tp_ctl = DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_PAT1;
|
||||
if (intel_dp->link_mst)
|
||||
dp_tp_ctl |= DP_TP_CTL_MODE_MST;
|
||||
else {
|
||||
|
@ -4061,16 +4134,10 @@ static void intel_ddi_set_link_train(struct intel_dp *intel_dp,
|
|||
{
|
||||
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
||||
u8 train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd);
|
||||
enum port port = dp_to_dig_port(intel_dp)->base.port;
|
||||
u32 temp;
|
||||
|
||||
temp = intel_de_read(dev_priv, intel_dp->regs.dp_tp_ctl);
|
||||
|
||||
if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
|
||||
temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
|
||||
else
|
||||
temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
|
||||
|
||||
temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
|
||||
switch (dp_train_pat & train_pat_mask) {
|
||||
case DP_TRAINING_PATTERN_DISABLE:
|
||||
|
@ -4091,9 +4158,6 @@ static void intel_ddi_set_link_train(struct intel_dp *intel_dp,
|
|||
}
|
||||
|
||||
intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, temp);
|
||||
|
||||
intel_de_write(dev_priv, DDI_BUF_CTL(port), intel_dp->DP);
|
||||
intel_de_posting_read(dev_priv, DDI_BUF_CTL(port));
|
||||
}
|
||||
|
||||
static void intel_ddi_set_idle_link_train(struct intel_dp *intel_dp)
|
||||
|
@ -4878,6 +4942,13 @@ intel_ddi_max_lanes(struct intel_digital_port *dig_port)
|
|||
return max_lanes;
|
||||
}
|
||||
|
||||
static bool hti_uses_phy(struct drm_i915_private *i915, enum phy phy)
|
||||
{
|
||||
return i915->hti_state & HDPORT_ENABLED &&
|
||||
(i915->hti_state & HDPORT_PHY_USED_DP(phy) ||
|
||||
i915->hti_state & HDPORT_PHY_USED_HDMI(phy));
|
||||
}
|
||||
|
||||
void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
|
||||
{
|
||||
struct intel_digital_port *dig_port;
|
||||
|
@ -4885,6 +4956,18 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
|
|||
bool init_hdmi, init_dp, init_lspcon = false;
|
||||
enum phy phy = intel_port_to_phy(dev_priv, port);
|
||||
|
||||
/*
|
||||
* On platforms with HTI (aka HDPORT), if it's enabled at boot it may
|
||||
* have taken over some of the PHYs and made them unavailable to the
|
||||
* driver. In that case we should skip initializing the corresponding
|
||||
* outputs.
|
||||
*/
|
||||
if (hti_uses_phy(dev_priv, phy)) {
|
||||
drm_dbg_kms(&dev_priv->drm, "PORT %c / PHY %c reserved by HTI\n",
|
||||
port_name(port), phy_name(phy));
|
||||
return;
|
||||
}
|
||||
|
||||
init_hdmi = intel_bios_port_supports_dvi(dev_priv, port) ||
|
||||
intel_bios_port_supports_hdmi(dev_priv, port);
|
||||
init_dp = intel_bios_port_supports_dp(dev_priv, port);
|
||||
|
|
|
@ -47,6 +47,7 @@
|
|||
#include "display/intel_ddi.h"
|
||||
#include "display/intel_dp.h"
|
||||
#include "display/intel_dp_mst.h"
|
||||
#include "display/intel_dpll_mgr.h"
|
||||
#include "display/intel_dsi.h"
|
||||
#include "display/intel_dvo.h"
|
||||
#include "display/intel_gmbus.h"
|
||||
|
@ -3761,6 +3762,44 @@ static int glk_max_plane_width(const struct drm_framebuffer *fb,
|
|||
}
|
||||
}
|
||||
|
||||
static int icl_min_plane_width(const struct drm_framebuffer *fb)
|
||||
{
|
||||
/* Wa_14011264657, Wa_14011050563: gen11+ */
|
||||
switch (fb->format->format) {
|
||||
case DRM_FORMAT_C8:
|
||||
return 18;
|
||||
case DRM_FORMAT_RGB565:
|
||||
return 10;
|
||||
case DRM_FORMAT_XRGB8888:
|
||||
case DRM_FORMAT_XBGR8888:
|
||||
case DRM_FORMAT_ARGB8888:
|
||||
case DRM_FORMAT_ABGR8888:
|
||||
case DRM_FORMAT_XRGB2101010:
|
||||
case DRM_FORMAT_XBGR2101010:
|
||||
case DRM_FORMAT_ARGB2101010:
|
||||
case DRM_FORMAT_ABGR2101010:
|
||||
case DRM_FORMAT_XVYU2101010:
|
||||
case DRM_FORMAT_Y212:
|
||||
case DRM_FORMAT_Y216:
|
||||
return 6;
|
||||
case DRM_FORMAT_NV12:
|
||||
return 20;
|
||||
case DRM_FORMAT_P010:
|
||||
case DRM_FORMAT_P012:
|
||||
case DRM_FORMAT_P016:
|
||||
return 12;
|
||||
case DRM_FORMAT_XRGB16161616F:
|
||||
case DRM_FORMAT_XBGR16161616F:
|
||||
case DRM_FORMAT_ARGB16161616F:
|
||||
case DRM_FORMAT_ABGR16161616F:
|
||||
case DRM_FORMAT_XVYU12_16161616:
|
||||
case DRM_FORMAT_XVYU16161616:
|
||||
return 4;
|
||||
default:
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
static int icl_max_plane_width(const struct drm_framebuffer *fb,
|
||||
int color_plane,
|
||||
unsigned int rotation)
|
||||
|
@ -3843,29 +3882,31 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
|
|||
int y = plane_state->uapi.src.y1 >> 16;
|
||||
int w = drm_rect_width(&plane_state->uapi.src) >> 16;
|
||||
int h = drm_rect_height(&plane_state->uapi.src) >> 16;
|
||||
int max_width;
|
||||
int max_height;
|
||||
u32 alignment;
|
||||
u32 offset;
|
||||
int max_width, min_width, max_height;
|
||||
u32 alignment, offset;
|
||||
int aux_plane = intel_main_to_aux_plane(fb, 0);
|
||||
u32 aux_offset = plane_state->color_plane[aux_plane].offset;
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 11)
|
||||
if (INTEL_GEN(dev_priv) >= 11) {
|
||||
max_width = icl_max_plane_width(fb, 0, rotation);
|
||||
else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
|
||||
min_width = icl_min_plane_width(fb);
|
||||
} else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
|
||||
max_width = glk_max_plane_width(fb, 0, rotation);
|
||||
else
|
||||
min_width = 1;
|
||||
} else {
|
||||
max_width = skl_max_plane_width(fb, 0, rotation);
|
||||
min_width = 1;
|
||||
}
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 11)
|
||||
max_height = icl_max_plane_height();
|
||||
else
|
||||
max_height = skl_max_plane_height();
|
||||
|
||||
if (w > max_width || h > max_height) {
|
||||
if (w > max_width || w < min_width || h > max_height) {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
|
||||
w, h, max_width, max_height);
|
||||
"requested Y/RGB source size %dx%d outside limits (min: %dx1 max: %dx%d)\n",
|
||||
w, h, min_width, max_width, max_height);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -10802,9 +10843,18 @@ static void icl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
|
|||
u32 temp;
|
||||
|
||||
if (intel_phy_is_combo(dev_priv, phy)) {
|
||||
temp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0) &
|
||||
ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
|
||||
id = temp >> ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy);
|
||||
u32 mask, shift;
|
||||
|
||||
if (IS_ROCKETLAKE(dev_priv)) {
|
||||
mask = RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
|
||||
shift = RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy);
|
||||
} else {
|
||||
mask = ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
|
||||
shift = ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy);
|
||||
}
|
||||
|
||||
temp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0) & mask;
|
||||
id = temp >> shift;
|
||||
port_dpll_id = ICL_PORT_DPLL_DEFAULT;
|
||||
} else if (intel_phy_is_tc(dev_priv, phy)) {
|
||||
u32 clk_sel = intel_de_read(dev_priv, DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK;
|
||||
|
@ -12760,6 +12810,9 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
|
|||
|
||||
}
|
||||
|
||||
if (!mode_changed)
|
||||
intel_psr2_sel_fetch_update(state, crtc);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -15136,6 +15189,8 @@ static void commit_pipe_config(struct intel_atomic_state *state,
|
|||
|
||||
if (new_crtc_state->update_pipe)
|
||||
intel_pipe_fastset(old_crtc_state, new_crtc_state);
|
||||
|
||||
intel_psr2_program_trans_man_trk_ctl(new_crtc_state);
|
||||
}
|
||||
|
||||
if (dev_priv->display.atomic_update_watermarks)
|
||||
|
@ -17894,6 +17949,13 @@ int intel_modeset_init(struct drm_i915_private *i915)
|
|||
if (i915->max_cdclk_freq == 0)
|
||||
intel_update_max_cdclk(i915);
|
||||
|
||||
/*
|
||||
* If the platform has HTI, we need to find out whether it has reserved
|
||||
* any display resources before we create our display outputs.
|
||||
*/
|
||||
if (INTEL_INFO(i915)->display.has_hti)
|
||||
i915->hti_state = intel_de_read(i915, HDPORT_STATE);
|
||||
|
||||
/* Just disable it once at startup */
|
||||
intel_vga_disable(i915);
|
||||
intel_setup_outputs(i915);
|
||||
|
|
|
@ -417,6 +417,9 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
|
|||
su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
|
||||
seq_printf(m, "%d\t%d\n", frame, su_blocks);
|
||||
}
|
||||
|
||||
seq_printf(m, "PSR2 selective fetch: %s\n",
|
||||
enableddisabled(psr->psr2_sel_fetch_enabled));
|
||||
}
|
||||
|
||||
unlock:
|
||||
|
|
|
@ -3927,12 +3927,13 @@ tgl_tc_cold_request(struct drm_i915_private *i915, bool block)
|
|||
int ret;
|
||||
|
||||
while (1) {
|
||||
u32 low_val = 0, high_val;
|
||||
u32 low_val;
|
||||
u32 high_val = 0;
|
||||
|
||||
if (block)
|
||||
high_val = TGL_PCODE_EXIT_TCCOLD_DATA_H_BLOCK_REQ;
|
||||
low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_BLOCK_REQ;
|
||||
else
|
||||
high_val = TGL_PCODE_EXIT_TCCOLD_DATA_H_UNBLOCK_REQ;
|
||||
low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_UNBLOCK_REQ;
|
||||
|
||||
/*
|
||||
* Spec states that we should timeout the request after 200us
|
||||
|
@ -3951,8 +3952,7 @@ tgl_tc_cold_request(struct drm_i915_private *i915, bool block)
|
|||
if (++tries == 3)
|
||||
break;
|
||||
|
||||
if (ret == -EAGAIN)
|
||||
msleep(1);
|
||||
msleep(1);
|
||||
}
|
||||
|
||||
if (ret)
|
||||
|
@ -5302,6 +5302,12 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
|
|||
|
||||
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
|
||||
|
||||
/* Wa_14011294188:ehl,jsl,tgl,rkl */
|
||||
if (INTEL_PCH_TYPE(dev_priv) >= PCH_JSP &&
|
||||
INTEL_PCH_TYPE(dev_priv) < PCH_DG1)
|
||||
intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D, 0,
|
||||
PCH_DPMGUNIT_CLOCK_GATE_DISABLE);
|
||||
|
||||
/* 1. Enable PCH reset handshake. */
|
||||
intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
|
||||
|
||||
|
|
|
@ -931,6 +931,7 @@ struct intel_crtc_state {
|
|||
|
||||
bool has_psr;
|
||||
bool has_psr2;
|
||||
bool enable_psr2_sel_fetch;
|
||||
u32 dc3co_exitline;
|
||||
|
||||
/*
|
||||
|
@ -1073,6 +1074,8 @@ struct intel_crtc_state {
|
|||
|
||||
/* For DSB related info */
|
||||
struct intel_dsb *dsb;
|
||||
|
||||
u32 psr2_man_track_ctl;
|
||||
};
|
||||
|
||||
enum intel_pipe_crc_source {
|
||||
|
@ -1375,6 +1378,9 @@ struct intel_dp {
|
|||
|
||||
/* Display stream compression testing */
|
||||
bool force_dsc_en;
|
||||
|
||||
bool hobl_failed;
|
||||
bool hobl_active;
|
||||
};
|
||||
|
||||
enum lspcon_vendor {
|
||||
|
|
|
@ -410,10 +410,17 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
|
|||
intel_connector->base.base.id,
|
||||
intel_connector->base.name,
|
||||
intel_dp->link_rate, intel_dp->lane_count);
|
||||
if (!intel_dp_get_link_train_fallback_values(intel_dp,
|
||||
intel_dp->link_rate,
|
||||
intel_dp->lane_count))
|
||||
/* Schedule a Hotplug Uevent to userspace to start modeset */
|
||||
schedule_work(&intel_connector->modeset_retry_work);
|
||||
return;
|
||||
|
||||
if (intel_dp->hobl_active) {
|
||||
drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
|
||||
"Link Training failed with HOBL active, not enabling it from now on");
|
||||
intel_dp->hobl_failed = true;
|
||||
} else if (intel_dp_get_link_train_fallback_values(intel_dp,
|
||||
intel_dp->link_rate,
|
||||
intel_dp->lane_count)) {
|
||||
return;
|
||||
}
|
||||
|
||||
/* Schedule a Hotplug Uevent to userspace to start modeset */
|
||||
schedule_work(&intel_connector->modeset_retry_work);
|
||||
}
|
||||
|
|
|
@ -3475,6 +3475,14 @@ static void icl_update_active_dpll(struct intel_atomic_state *state,
|
|||
icl_set_active_port_dpll(crtc_state, port_dpll_id);
|
||||
}
|
||||
|
||||
static u32 intel_get_hti_plls(struct drm_i915_private *i915)
|
||||
{
|
||||
if (!(i915->hti_state & HDPORT_ENABLED))
|
||||
return 0;
|
||||
|
||||
return REG_FIELD_GET(HDPORT_DPLL_USED_MASK, i915->hti_state);
|
||||
}
|
||||
|
||||
static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc,
|
||||
struct intel_encoder *encoder)
|
||||
|
@ -3504,13 +3512,22 @@ static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state,
|
|||
|
||||
icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
|
||||
|
||||
if (IS_ELKHARTLAKE(dev_priv) && port != PORT_A)
|
||||
if (IS_ROCKETLAKE(dev_priv)) {
|
||||
dpll_mask =
|
||||
BIT(DPLL_ID_EHL_DPLL4) |
|
||||
BIT(DPLL_ID_ICL_DPLL1) |
|
||||
BIT(DPLL_ID_ICL_DPLL0);
|
||||
else
|
||||
} else if (IS_ELKHARTLAKE(dev_priv) && port != PORT_A) {
|
||||
dpll_mask =
|
||||
BIT(DPLL_ID_EHL_DPLL4) |
|
||||
BIT(DPLL_ID_ICL_DPLL1) |
|
||||
BIT(DPLL_ID_ICL_DPLL0);
|
||||
} else {
|
||||
dpll_mask = BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0);
|
||||
}
|
||||
|
||||
/* Eliminate DPLLs from consideration if reserved by HTI */
|
||||
dpll_mask &= ~intel_get_hti_plls(dev_priv);
|
||||
|
||||
port_dpll->pll = intel_find_shared_dpll(state, crtc,
|
||||
&port_dpll->hw_state,
|
||||
|
@ -3791,7 +3808,12 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
|
|||
if (!(val & PLL_ENABLE))
|
||||
goto out;
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 12) {
|
||||
if (IS_ROCKETLAKE(dev_priv)) {
|
||||
hw_state->cfgcr0 = intel_de_read(dev_priv,
|
||||
RKL_DPLL_CFGCR0(id));
|
||||
hw_state->cfgcr1 = intel_de_read(dev_priv,
|
||||
RKL_DPLL_CFGCR1(id));
|
||||
} else if (INTEL_GEN(dev_priv) >= 12) {
|
||||
hw_state->cfgcr0 = intel_de_read(dev_priv,
|
||||
TGL_DPLL_CFGCR0(id));
|
||||
hw_state->cfgcr1 = intel_de_read(dev_priv,
|
||||
|
@ -3844,7 +3866,10 @@ static void icl_dpll_write(struct drm_i915_private *dev_priv,
|
|||
const enum intel_dpll_id id = pll->info->id;
|
||||
i915_reg_t cfgcr0_reg, cfgcr1_reg;
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 12) {
|
||||
if (IS_ROCKETLAKE(dev_priv)) {
|
||||
cfgcr0_reg = RKL_DPLL_CFGCR0(id);
|
||||
cfgcr1_reg = RKL_DPLL_CFGCR1(id);
|
||||
} else if (INTEL_GEN(dev_priv) >= 12) {
|
||||
cfgcr0_reg = TGL_DPLL_CFGCR0(id);
|
||||
cfgcr1_reg = TGL_DPLL_CFGCR1(id);
|
||||
} else {
|
||||
|
@ -4276,6 +4301,21 @@ static const struct intel_dpll_mgr tgl_pll_mgr = {
|
|||
.dump_hw_state = icl_dump_hw_state,
|
||||
};
|
||||
|
||||
static const struct dpll_info rkl_plls[] = {
|
||||
{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
|
||||
{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
|
||||
{ "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
|
||||
{ },
|
||||
};
|
||||
|
||||
static const struct intel_dpll_mgr rkl_pll_mgr = {
|
||||
.dpll_info = rkl_plls,
|
||||
.get_dplls = icl_get_dplls,
|
||||
.put_dplls = icl_put_dplls,
|
||||
.update_ref_clks = icl_update_dpll_ref_clks,
|
||||
.dump_hw_state = icl_dump_hw_state,
|
||||
};
|
||||
|
||||
/**
|
||||
* intel_shared_dpll_init - Initialize shared DPLLs
|
||||
* @dev: drm device
|
||||
|
@ -4289,7 +4329,9 @@ void intel_shared_dpll_init(struct drm_device *dev)
|
|||
const struct dpll_info *dpll_info;
|
||||
int i;
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 12)
|
||||
if (IS_ROCKETLAKE(dev_priv))
|
||||
dpll_mgr = &rkl_pll_mgr;
|
||||
else if (INTEL_GEN(dev_priv) >= 12)
|
||||
dpll_mgr = &tgl_pll_mgr;
|
||||
else if (IS_ELKHARTLAKE(dev_priv))
|
||||
dpll_mgr = &ehl_pll_mgr;
|
||||
|
|
|
@ -424,6 +424,14 @@ static void intel_fbc_deactivate(struct drm_i915_private *dev_priv,
|
|||
fbc->no_fbc_reason = reason;
|
||||
}
|
||||
|
||||
static u64 intel_fbc_cfb_base_max(struct drm_i915_private *i915)
|
||||
{
|
||||
if (INTEL_GEN(i915) >= 5 || IS_G4X(i915))
|
||||
return BIT_ULL(28);
|
||||
else
|
||||
return BIT_ULL(32);
|
||||
}
|
||||
|
||||
static int find_compression_threshold(struct drm_i915_private *dev_priv,
|
||||
struct drm_mm_node *node,
|
||||
unsigned int size,
|
||||
|
@ -442,6 +450,8 @@ static int find_compression_threshold(struct drm_i915_private *dev_priv,
|
|||
else
|
||||
end = U64_MAX;
|
||||
|
||||
end = min(end, intel_fbc_cfb_base_max(dev_priv));
|
||||
|
||||
/* HACK: This code depends on what we will do in *_enable_fbc. If that
|
||||
* code changes, this code needs to change as well.
|
||||
*
|
||||
|
@ -1416,6 +1426,13 @@ static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv)
|
|||
if (!HAS_FBC(dev_priv))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Fbc is causing random underruns in CI execution on TGL platforms.
|
||||
* Disabling the same while the problem is being debugged and analyzed.
|
||||
*/
|
||||
if (IS_TIGERLAKE(dev_priv))
|
||||
return 0;
|
||||
|
||||
if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9)
|
||||
return 1;
|
||||
|
||||
|
|
|
@ -232,6 +232,8 @@ static void frontbuffer_release(struct kref *ref)
|
|||
RCU_INIT_POINTER(obj->frontbuffer, NULL);
|
||||
spin_unlock(&to_i915(obj->base.dev)->fb_tracking.lock);
|
||||
|
||||
i915_active_fini(&front->write);
|
||||
|
||||
i915_gem_object_put(obj);
|
||||
kfree_rcu(front, rcu);
|
||||
}
|
||||
|
|
|
@ -553,6 +553,22 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
|
|||
val |= EDP_PSR2_FAST_WAKE(7);
|
||||
}
|
||||
|
||||
if (dev_priv->psr.psr2_sel_fetch_enabled) {
|
||||
/* WA 1408330847 */
|
||||
if (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_A0) ||
|
||||
IS_RKL_REVID(dev_priv, RKL_REVID_A0, RKL_REVID_A0))
|
||||
intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
|
||||
DIS_RAM_BYPASS_PSR2_MAN_TRACK,
|
||||
DIS_RAM_BYPASS_PSR2_MAN_TRACK);
|
||||
|
||||
intel_de_write(dev_priv,
|
||||
PSR2_MAN_TRK_CTL(dev_priv->psr.transcoder),
|
||||
PSR2_MAN_TRK_CTL_ENABLE);
|
||||
} else if (HAS_PSR2_SEL_FETCH(dev_priv)) {
|
||||
intel_de_write(dev_priv,
|
||||
PSR2_MAN_TRK_CTL(dev_priv->psr.transcoder), 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
|
||||
* recommending keep this bit unset while PSR2 is enabled.
|
||||
|
@ -663,6 +679,38 @@ tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
|
|||
crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines;
|
||||
}
|
||||
|
||||
static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
|
||||
struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
|
||||
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
||||
struct intel_plane_state *plane_state;
|
||||
struct intel_plane *plane;
|
||||
int i;
|
||||
|
||||
if (!dev_priv->params.enable_psr2_sel_fetch) {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"PSR2 sel fetch not enabled, disabled by parameter\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (crtc_state->uapi.async_flip) {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"PSR2 sel fetch not enabled, async flip enabled\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
|
||||
if (plane_state->uapi.rotation != DRM_MODE_ROTATE_0) {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"PSR2 sel fetch not enabled, plane rotated\n");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return crtc_state->enable_psr2_sel_fetch = true;
|
||||
}
|
||||
|
||||
static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
|
||||
struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
|
@ -732,22 +780,17 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
|
|||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Some platforms lack PSR2 HW tracking and instead require manual
|
||||
* tracking by software. In this case, the driver is required to track
|
||||
* the areas that need updates and program hardware to send selective
|
||||
* updates.
|
||||
*
|
||||
* So until the software tracking is implemented, PSR2 needs to be
|
||||
* disabled for platforms without PSR2 HW tracking.
|
||||
*/
|
||||
if (!HAS_PSR_HW_TRACKING(dev_priv)) {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"No PSR2 HW tracking in the platform\n");
|
||||
return false;
|
||||
if (HAS_PSR2_SEL_FETCH(dev_priv)) {
|
||||
if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
|
||||
!HAS_PSR_HW_TRACKING(dev_priv)) {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"PSR2 not enabled, selective fetch not valid and no HW tracking available\n");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
if (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v) {
|
||||
if (!crtc_state->enable_psr2_sel_fetch &&
|
||||
(crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
|
||||
crtc_hdisplay, crtc_vdisplay,
|
||||
|
@ -898,6 +941,11 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
|
|||
val |= EXITLINE_ENABLE;
|
||||
intel_de_write(dev_priv, EXITLINE(cpu_transcoder), val);
|
||||
}
|
||||
|
||||
if (HAS_PSR_HW_TRACKING(dev_priv))
|
||||
intel_de_rmw(dev_priv, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
|
||||
dev_priv->psr.psr2_sel_fetch_enabled ?
|
||||
IGNORE_PSR2_HW_TRACKING : 0);
|
||||
}
|
||||
|
||||
static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
|
||||
|
@ -919,6 +967,7 @@ static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
|
|||
/* DC5/DC6 requires at least 6 idle frames */
|
||||
val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6);
|
||||
dev_priv->psr.dc3co_exit_delay = val;
|
||||
dev_priv->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
|
||||
|
||||
/*
|
||||
* If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
|
||||
|
@ -1058,6 +1107,13 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
|
|||
psr_status_mask, 2000))
|
||||
drm_err(&dev_priv->drm, "Timed out waiting PSR idle state\n");
|
||||
|
||||
/* WA 1408330847 */
|
||||
if (dev_priv->psr.psr2_sel_fetch_enabled &&
|
||||
(IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_A0) ||
|
||||
IS_RKL_REVID(dev_priv, RKL_REVID_A0, RKL_REVID_A0)))
|
||||
intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
|
||||
DIS_RAM_BYPASS_PSR2_MAN_TRACK, 0);
|
||||
|
||||
/* Disable PSR on Sink */
|
||||
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
|
||||
|
||||
|
@ -1115,6 +1171,32 @@ static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv)
|
|||
intel_psr_exit(dev_priv);
|
||||
}
|
||||
|
||||
void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
struct i915_psr *psr = &dev_priv->psr;
|
||||
|
||||
if (!HAS_PSR2_SEL_FETCH(dev_priv) ||
|
||||
!crtc_state->enable_psr2_sel_fetch)
|
||||
return;
|
||||
|
||||
intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(psr->transcoder),
|
||||
crtc_state->psr2_man_track_ctl);
|
||||
}
|
||||
|
||||
void intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc)
|
||||
{
|
||||
struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
|
||||
|
||||
if (!crtc_state->enable_psr2_sel_fetch)
|
||||
return;
|
||||
|
||||
crtc_state->psr2_man_track_ctl = PSR2_MAN_TRK_CTL_ENABLE |
|
||||
PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_psr_update - Update PSR state
|
||||
* @intel_dp: Intel DP
|
||||
|
|
|
@ -13,6 +13,8 @@ struct drm_connector_state;
|
|||
struct drm_i915_private;
|
||||
struct intel_crtc_state;
|
||||
struct intel_dp;
|
||||
struct intel_crtc;
|
||||
struct intel_atomic_state;
|
||||
|
||||
#define CAN_PSR(dev_priv) (HAS_PSR(dev_priv) && dev_priv->psr.sink_support)
|
||||
void intel_psr_init_dpcd(struct intel_dp *intel_dp);
|
||||
|
@ -43,5 +45,8 @@ void intel_psr_atomic_check(struct drm_connector *connector,
|
|||
struct drm_connector_state *old_state,
|
||||
struct drm_connector_state *new_state);
|
||||
void intel_psr_set_force_mode_changed(struct intel_dp *intel_dp);
|
||||
void intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc);
|
||||
void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state);
|
||||
|
||||
#endif /* __INTEL_PSR_H__ */
|
||||
|
|
|
@ -2843,8 +2843,9 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
|
|||
static bool gen12_plane_supports_mc_ccs(struct drm_i915_private *dev_priv,
|
||||
enum plane_id plane_id)
|
||||
{
|
||||
/* Wa_14010477008:tgl[a0..c0] */
|
||||
if (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_C0))
|
||||
/* Wa_14010477008:tgl[a0..c0],rkl[all] */
|
||||
if (IS_ROCKETLAKE(dev_priv) ||
|
||||
IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_C0))
|
||||
return false;
|
||||
|
||||
return plane_id < PLANE_SPRITE4;
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#include "i915_gem_ioctls.h"
|
||||
#include "i915_sw_fence_work.h"
|
||||
#include "i915_trace.h"
|
||||
#include "i915_user_extensions.h"
|
||||
|
||||
struct eb_vma {
|
||||
struct i915_vma *vma;
|
||||
|
@ -222,6 +223,13 @@ struct eb_vma_array {
|
|||
* the batchbuffer in trusted mode, otherwise the ioctl is rejected.
|
||||
*/
|
||||
|
||||
struct eb_fence {
|
||||
struct drm_syncobj *syncobj; /* Use with ptr_mask_bits() */
|
||||
struct dma_fence *dma_fence;
|
||||
u64 value;
|
||||
struct dma_fence_chain *chain_fence;
|
||||
};
|
||||
|
||||
struct i915_execbuffer {
|
||||
struct drm_i915_private *i915; /** i915 backpointer */
|
||||
struct drm_file *file; /** per-file lookup tables and limits */
|
||||
|
@ -281,6 +289,9 @@ struct i915_execbuffer {
|
|||
int lut_size;
|
||||
struct hlist_head *buckets; /** ht for relocation handles */
|
||||
struct eb_vma_array *array;
|
||||
|
||||
struct eb_fence *fences;
|
||||
unsigned long num_fences;
|
||||
};
|
||||
|
||||
static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb)
|
||||
|
@ -1622,7 +1633,8 @@ static int i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
|
|||
return -EINVAL;
|
||||
|
||||
/* Kernel clipping was a DRI1 misfeature */
|
||||
if (!(exec->flags & I915_EXEC_FENCE_ARRAY)) {
|
||||
if (!(exec->flags & (I915_EXEC_FENCE_ARRAY |
|
||||
I915_EXEC_USE_EXTENSIONS))) {
|
||||
if (exec->num_cliprects || exec->cliprects_ptr)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -2201,107 +2213,241 @@ eb_pin_engine(struct i915_execbuffer *eb,
|
|||
}
|
||||
|
||||
static void
|
||||
__free_fence_array(struct drm_syncobj **fences, unsigned int n)
|
||||
__free_fence_array(struct eb_fence *fences, unsigned int n)
|
||||
{
|
||||
while (n--)
|
||||
drm_syncobj_put(ptr_mask_bits(fences[n], 2));
|
||||
while (n--) {
|
||||
drm_syncobj_put(ptr_mask_bits(fences[n].syncobj, 2));
|
||||
dma_fence_put(fences[n].dma_fence);
|
||||
kfree(fences[n].chain_fence);
|
||||
}
|
||||
kvfree(fences);
|
||||
}
|
||||
|
||||
static struct drm_syncobj **
|
||||
get_fence_array(struct drm_i915_gem_execbuffer2 *args,
|
||||
struct drm_file *file)
|
||||
static int
|
||||
add_timeline_fence_array(struct i915_execbuffer *eb,
|
||||
const struct drm_i915_gem_execbuffer_ext_timeline_fences *timeline_fences)
|
||||
{
|
||||
const unsigned long nfences = args->num_cliprects;
|
||||
struct drm_i915_gem_exec_fence __user *user;
|
||||
struct drm_syncobj **fences;
|
||||
unsigned long n;
|
||||
int err;
|
||||
struct drm_i915_gem_exec_fence __user *user_fences;
|
||||
u64 __user *user_values;
|
||||
struct eb_fence *f;
|
||||
u64 nfences;
|
||||
int err = 0;
|
||||
|
||||
if (!(args->flags & I915_EXEC_FENCE_ARRAY))
|
||||
return NULL;
|
||||
nfences = timeline_fences->fence_count;
|
||||
if (!nfences)
|
||||
return 0;
|
||||
|
||||
/* Check multiplication overflow for access_ok() and kvmalloc_array() */
|
||||
BUILD_BUG_ON(sizeof(size_t) > sizeof(unsigned long));
|
||||
if (nfences > min_t(unsigned long,
|
||||
ULONG_MAX / sizeof(*user),
|
||||
SIZE_MAX / sizeof(*fences)))
|
||||
return ERR_PTR(-EINVAL);
|
||||
ULONG_MAX / sizeof(*user_fences),
|
||||
SIZE_MAX / sizeof(*f)) - eb->num_fences)
|
||||
return -EINVAL;
|
||||
|
||||
user = u64_to_user_ptr(args->cliprects_ptr);
|
||||
if (!access_ok(user, nfences * sizeof(*user)))
|
||||
return ERR_PTR(-EFAULT);
|
||||
user_fences = u64_to_user_ptr(timeline_fences->handles_ptr);
|
||||
if (!access_ok(user_fences, nfences * sizeof(*user_fences)))
|
||||
return -EFAULT;
|
||||
|
||||
fences = kvmalloc_array(nfences, sizeof(*fences),
|
||||
__GFP_NOWARN | GFP_KERNEL);
|
||||
if (!fences)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
user_values = u64_to_user_ptr(timeline_fences->values_ptr);
|
||||
if (!access_ok(user_values, nfences * sizeof(*user_values)))
|
||||
return -EFAULT;
|
||||
|
||||
for (n = 0; n < nfences; n++) {
|
||||
struct drm_i915_gem_exec_fence fence;
|
||||
f = krealloc(eb->fences,
|
||||
(eb->num_fences + nfences) * sizeof(*f),
|
||||
__GFP_NOWARN | GFP_KERNEL);
|
||||
if (!f)
|
||||
return -ENOMEM;
|
||||
|
||||
eb->fences = f;
|
||||
f += eb->num_fences;
|
||||
|
||||
BUILD_BUG_ON(~(ARCH_KMALLOC_MINALIGN - 1) &
|
||||
~__I915_EXEC_FENCE_UNKNOWN_FLAGS);
|
||||
|
||||
while (nfences--) {
|
||||
struct drm_i915_gem_exec_fence user_fence;
|
||||
struct drm_syncobj *syncobj;
|
||||
struct dma_fence *fence = NULL;
|
||||
u64 point;
|
||||
|
||||
if (__copy_from_user(&fence, user++, sizeof(fence))) {
|
||||
err = -EFAULT;
|
||||
goto err;
|
||||
}
|
||||
if (__copy_from_user(&user_fence,
|
||||
user_fences++,
|
||||
sizeof(user_fence)))
|
||||
return -EFAULT;
|
||||
|
||||
if (fence.flags & __I915_EXEC_FENCE_UNKNOWN_FLAGS) {
|
||||
err = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
if (user_fence.flags & __I915_EXEC_FENCE_UNKNOWN_FLAGS)
|
||||
return -EINVAL;
|
||||
|
||||
syncobj = drm_syncobj_find(file, fence.handle);
|
||||
if (__get_user(point, user_values++))
|
||||
return -EFAULT;
|
||||
|
||||
syncobj = drm_syncobj_find(eb->file, user_fence.handle);
|
||||
if (!syncobj) {
|
||||
DRM_DEBUG("Invalid syncobj handle provided\n");
|
||||
err = -ENOENT;
|
||||
goto err;
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
fence = drm_syncobj_fence_get(syncobj);
|
||||
|
||||
if (!fence && user_fence.flags &&
|
||||
!(user_fence.flags & I915_EXEC_FENCE_SIGNAL)) {
|
||||
DRM_DEBUG("Syncobj handle has no fence\n");
|
||||
drm_syncobj_put(syncobj);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (fence)
|
||||
err = dma_fence_chain_find_seqno(&fence, point);
|
||||
|
||||
if (err && !(user_fence.flags & I915_EXEC_FENCE_SIGNAL)) {
|
||||
DRM_DEBUG("Syncobj handle missing requested point %llu\n", point);
|
||||
drm_syncobj_put(syncobj);
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
* A point might have been signaled already and
|
||||
* garbage collected from the timeline. In this case
|
||||
* just ignore the point and carry on.
|
||||
*/
|
||||
if (!fence && !(user_fence.flags & I915_EXEC_FENCE_SIGNAL)) {
|
||||
drm_syncobj_put(syncobj);
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* For timeline syncobjs we need to preallocate chains for
|
||||
* later signaling.
|
||||
*/
|
||||
if (point != 0 && user_fence.flags & I915_EXEC_FENCE_SIGNAL) {
|
||||
/*
|
||||
* Waiting and signaling the same point (when point !=
|
||||
* 0) would break the timeline.
|
||||
*/
|
||||
if (user_fence.flags & I915_EXEC_FENCE_WAIT) {
|
||||
DRM_DEBUG("Trying to wait & signal the same timeline point.\n");
|
||||
dma_fence_put(fence);
|
||||
drm_syncobj_put(syncobj);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
f->chain_fence =
|
||||
kmalloc(sizeof(*f->chain_fence),
|
||||
GFP_KERNEL);
|
||||
if (!f->chain_fence) {
|
||||
drm_syncobj_put(syncobj);
|
||||
dma_fence_put(fence);
|
||||
return -ENOMEM;
|
||||
}
|
||||
} else {
|
||||
f->chain_fence = NULL;
|
||||
}
|
||||
|
||||
f->syncobj = ptr_pack_bits(syncobj, user_fence.flags, 2);
|
||||
f->dma_fence = fence;
|
||||
f->value = point;
|
||||
f++;
|
||||
eb->num_fences++;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int add_fence_array(struct i915_execbuffer *eb)
|
||||
{
|
||||
struct drm_i915_gem_execbuffer2 *args = eb->args;
|
||||
struct drm_i915_gem_exec_fence __user *user;
|
||||
unsigned long num_fences = args->num_cliprects;
|
||||
struct eb_fence *f;
|
||||
|
||||
if (!(args->flags & I915_EXEC_FENCE_ARRAY))
|
||||
return 0;
|
||||
|
||||
if (!num_fences)
|
||||
return 0;
|
||||
|
||||
/* Check multiplication overflow for access_ok() and kvmalloc_array() */
|
||||
BUILD_BUG_ON(sizeof(size_t) > sizeof(unsigned long));
|
||||
if (num_fences > min_t(unsigned long,
|
||||
ULONG_MAX / sizeof(*user),
|
||||
SIZE_MAX / sizeof(*f) - eb->num_fences))
|
||||
return -EINVAL;
|
||||
|
||||
user = u64_to_user_ptr(args->cliprects_ptr);
|
||||
if (!access_ok(user, num_fences * sizeof(*user)))
|
||||
return -EFAULT;
|
||||
|
||||
f = krealloc(eb->fences,
|
||||
(eb->num_fences + num_fences) * sizeof(*f),
|
||||
__GFP_NOWARN | GFP_KERNEL);
|
||||
if (!f)
|
||||
return -ENOMEM;
|
||||
|
||||
eb->fences = f;
|
||||
f += eb->num_fences;
|
||||
while (num_fences--) {
|
||||
struct drm_i915_gem_exec_fence user_fence;
|
||||
struct drm_syncobj *syncobj;
|
||||
struct dma_fence *fence = NULL;
|
||||
|
||||
if (__copy_from_user(&user_fence, user++, sizeof(user_fence)))
|
||||
return -EFAULT;
|
||||
|
||||
if (user_fence.flags & __I915_EXEC_FENCE_UNKNOWN_FLAGS)
|
||||
return -EINVAL;
|
||||
|
||||
syncobj = drm_syncobj_find(eb->file, user_fence.handle);
|
||||
if (!syncobj) {
|
||||
DRM_DEBUG("Invalid syncobj handle provided\n");
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
if (user_fence.flags & I915_EXEC_FENCE_WAIT) {
|
||||
fence = drm_syncobj_fence_get(syncobj);
|
||||
if (!fence) {
|
||||
DRM_DEBUG("Syncobj handle has no fence\n");
|
||||
drm_syncobj_put(syncobj);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
BUILD_BUG_ON(~(ARCH_KMALLOC_MINALIGN - 1) &
|
||||
~__I915_EXEC_FENCE_UNKNOWN_FLAGS);
|
||||
|
||||
fences[n] = ptr_pack_bits(syncobj, fence.flags, 2);
|
||||
f->syncobj = ptr_pack_bits(syncobj, user_fence.flags, 2);
|
||||
f->dma_fence = fence;
|
||||
f->value = 0;
|
||||
f->chain_fence = NULL;
|
||||
f++;
|
||||
eb->num_fences++;
|
||||
}
|
||||
|
||||
return fences;
|
||||
|
||||
err:
|
||||
__free_fence_array(fences, n);
|
||||
return ERR_PTR(err);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
put_fence_array(struct drm_i915_gem_execbuffer2 *args,
|
||||
struct drm_syncobj **fences)
|
||||
static void put_fence_array(struct eb_fence *fences, int num_fences)
|
||||
{
|
||||
if (fences)
|
||||
__free_fence_array(fences, args->num_cliprects);
|
||||
__free_fence_array(fences, num_fences);
|
||||
}
|
||||
|
||||
static int
|
||||
await_fence_array(struct i915_execbuffer *eb,
|
||||
struct drm_syncobj **fences)
|
||||
await_fence_array(struct i915_execbuffer *eb)
|
||||
{
|
||||
const unsigned int nfences = eb->args->num_cliprects;
|
||||
unsigned int n;
|
||||
int err;
|
||||
|
||||
for (n = 0; n < nfences; n++) {
|
||||
for (n = 0; n < eb->num_fences; n++) {
|
||||
struct drm_syncobj *syncobj;
|
||||
struct dma_fence *fence;
|
||||
unsigned int flags;
|
||||
|
||||
syncobj = ptr_unpack_bits(fences[n], &flags, 2);
|
||||
if (!(flags & I915_EXEC_FENCE_WAIT))
|
||||
syncobj = ptr_unpack_bits(eb->fences[n].syncobj, &flags, 2);
|
||||
|
||||
if (!eb->fences[n].dma_fence)
|
||||
continue;
|
||||
|
||||
fence = drm_syncobj_fence_get(syncobj);
|
||||
if (!fence)
|
||||
return -EINVAL;
|
||||
|
||||
err = i915_request_await_dma_fence(eb->request, fence);
|
||||
dma_fence_put(fence);
|
||||
err = i915_request_await_dma_fence(eb->request,
|
||||
eb->fences[n].dma_fence);
|
||||
if (err < 0)
|
||||
return err;
|
||||
}
|
||||
|
@ -2309,26 +2455,47 @@ await_fence_array(struct i915_execbuffer *eb,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
signal_fence_array(struct i915_execbuffer *eb,
|
||||
struct drm_syncobj **fences)
|
||||
static void signal_fence_array(const struct i915_execbuffer *eb)
|
||||
{
|
||||
const unsigned int nfences = eb->args->num_cliprects;
|
||||
struct dma_fence * const fence = &eb->request->fence;
|
||||
unsigned int n;
|
||||
|
||||
for (n = 0; n < nfences; n++) {
|
||||
for (n = 0; n < eb->num_fences; n++) {
|
||||
struct drm_syncobj *syncobj;
|
||||
unsigned int flags;
|
||||
|
||||
syncobj = ptr_unpack_bits(fences[n], &flags, 2);
|
||||
syncobj = ptr_unpack_bits(eb->fences[n].syncobj, &flags, 2);
|
||||
if (!(flags & I915_EXEC_FENCE_SIGNAL))
|
||||
continue;
|
||||
|
||||
drm_syncobj_replace_fence(syncobj, fence);
|
||||
if (eb->fences[n].chain_fence) {
|
||||
drm_syncobj_add_point(syncobj,
|
||||
eb->fences[n].chain_fence,
|
||||
fence,
|
||||
eb->fences[n].value);
|
||||
/*
|
||||
* The chain's ownership is transferred to the
|
||||
* timeline.
|
||||
*/
|
||||
eb->fences[n].chain_fence = NULL;
|
||||
} else {
|
||||
drm_syncobj_replace_fence(syncobj, fence);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
parse_timeline_fences(struct i915_user_extension __user *ext, void *data)
|
||||
{
|
||||
struct i915_execbuffer *eb = data;
|
||||
struct drm_i915_gem_execbuffer_ext_timeline_fences timeline_fences;
|
||||
|
||||
if (copy_from_user(&timeline_fences, ext, sizeof(timeline_fences)))
|
||||
return -EFAULT;
|
||||
|
||||
return add_timeline_fence_array(eb, &timeline_fences);
|
||||
}
|
||||
|
||||
static void retire_requests(struct intel_timeline *tl, struct i915_request *end)
|
||||
{
|
||||
struct i915_request *rq, *rn;
|
||||
|
@ -2370,12 +2537,37 @@ static void eb_request_add(struct i915_execbuffer *eb)
|
|||
mutex_unlock(&tl->mutex);
|
||||
}
|
||||
|
||||
static const i915_user_extension_fn execbuf_extensions[] = {
|
||||
[DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES] = parse_timeline_fences,
|
||||
};
|
||||
|
||||
static int
|
||||
parse_execbuf2_extensions(struct drm_i915_gem_execbuffer2 *args,
|
||||
struct i915_execbuffer *eb)
|
||||
{
|
||||
if (!(args->flags & I915_EXEC_USE_EXTENSIONS))
|
||||
return 0;
|
||||
|
||||
/* The execbuf2 extension mechanism reuses cliprects_ptr. So we cannot
|
||||
* have another flag also using it at the same time.
|
||||
*/
|
||||
if (eb->args->flags & I915_EXEC_FENCE_ARRAY)
|
||||
return -EINVAL;
|
||||
|
||||
if (args->num_cliprects != 0)
|
||||
return -EINVAL;
|
||||
|
||||
return i915_user_extensions(u64_to_user_ptr(args->cliprects_ptr),
|
||||
execbuf_extensions,
|
||||
ARRAY_SIZE(execbuf_extensions),
|
||||
eb);
|
||||
}
|
||||
|
||||
static int
|
||||
i915_gem_do_execbuffer(struct drm_device *dev,
|
||||
struct drm_file *file,
|
||||
struct drm_i915_gem_execbuffer2 *args,
|
||||
struct drm_i915_gem_exec_object2 *exec,
|
||||
struct drm_syncobj **fences)
|
||||
struct drm_i915_gem_exec_object2 *exec)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(dev);
|
||||
struct i915_execbuffer eb;
|
||||
|
@ -2405,6 +2597,9 @@ i915_gem_do_execbuffer(struct drm_device *dev,
|
|||
eb.batch_len = args->batch_len;
|
||||
eb.trampoline = NULL;
|
||||
|
||||
eb.fences = NULL;
|
||||
eb.num_fences = 0;
|
||||
|
||||
eb.batch_flags = 0;
|
||||
if (args->flags & I915_EXEC_SECURE) {
|
||||
if (INTEL_GEN(i915) >= 11)
|
||||
|
@ -2422,14 +2617,24 @@ i915_gem_do_execbuffer(struct drm_device *dev,
|
|||
if (args->flags & I915_EXEC_IS_PINNED)
|
||||
eb.batch_flags |= I915_DISPATCH_PINNED;
|
||||
|
||||
err = parse_execbuf2_extensions(args, &eb);
|
||||
if (err)
|
||||
goto err_ext;
|
||||
|
||||
err = add_fence_array(&eb);
|
||||
if (err)
|
||||
goto err_ext;
|
||||
|
||||
#define IN_FENCES (I915_EXEC_FENCE_IN | I915_EXEC_FENCE_SUBMIT)
|
||||
if (args->flags & IN_FENCES) {
|
||||
if ((args->flags & IN_FENCES) == IN_FENCES)
|
||||
return -EINVAL;
|
||||
|
||||
in_fence = sync_file_get_fence(lower_32_bits(args->rsvd2));
|
||||
if (!in_fence)
|
||||
return -EINVAL;
|
||||
if (!in_fence) {
|
||||
err = -EINVAL;
|
||||
goto err_ext;
|
||||
}
|
||||
}
|
||||
#undef IN_FENCES
|
||||
|
||||
|
@ -2539,8 +2744,8 @@ i915_gem_do_execbuffer(struct drm_device *dev,
|
|||
goto err_request;
|
||||
}
|
||||
|
||||
if (fences) {
|
||||
err = await_fence_array(&eb, fences);
|
||||
if (eb.fences) {
|
||||
err = await_fence_array(&eb);
|
||||
if (err)
|
||||
goto err_request;
|
||||
}
|
||||
|
@ -2571,8 +2776,8 @@ err_request:
|
|||
i915_request_get(eb.request);
|
||||
eb_request_add(&eb);
|
||||
|
||||
if (fences)
|
||||
signal_fence_array(&eb, fences);
|
||||
if (eb.fences)
|
||||
signal_fence_array(&eb);
|
||||
|
||||
if (out_fence) {
|
||||
if (err == 0) {
|
||||
|
@ -2605,6 +2810,8 @@ err_out_fence:
|
|||
put_unused_fd(out_fence_fd);
|
||||
err_in_fence:
|
||||
dma_fence_put(in_fence);
|
||||
err_ext:
|
||||
put_fence_array(eb.fences, eb.num_fences);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -2699,7 +2906,7 @@ i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data,
|
|||
exec2_list[i].flags = 0;
|
||||
}
|
||||
|
||||
err = i915_gem_do_execbuffer(dev, file, &exec2, exec2_list, NULL);
|
||||
err = i915_gem_do_execbuffer(dev, file, &exec2, exec2_list);
|
||||
if (exec2.flags & __EXEC_HAS_RELOC) {
|
||||
struct drm_i915_gem_exec_object __user *user_exec_list =
|
||||
u64_to_user_ptr(args->buffers_ptr);
|
||||
|
@ -2731,7 +2938,6 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
|
|||
struct drm_i915_private *i915 = to_i915(dev);
|
||||
struct drm_i915_gem_execbuffer2 *args = data;
|
||||
struct drm_i915_gem_exec_object2 *exec2_list;
|
||||
struct drm_syncobj **fences = NULL;
|
||||
const size_t count = args->buffer_count;
|
||||
int err;
|
||||
|
||||
|
@ -2759,15 +2965,7 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
|
|||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (args->flags & I915_EXEC_FENCE_ARRAY) {
|
||||
fences = get_fence_array(args, file);
|
||||
if (IS_ERR(fences)) {
|
||||
kvfree(exec2_list);
|
||||
return PTR_ERR(fences);
|
||||
}
|
||||
}
|
||||
|
||||
err = i915_gem_do_execbuffer(dev, file, args, exec2_list, fences);
|
||||
err = i915_gem_do_execbuffer(dev, file, args, exec2_list);
|
||||
|
||||
/*
|
||||
* Now that we have begun execution of the batchbuffer, we ignore
|
||||
|
@ -2808,7 +3006,6 @@ end:;
|
|||
}
|
||||
|
||||
args->flags &= ~__I915_EXEC_UNKNOWN_FLAGS;
|
||||
put_fence_array(args, fences);
|
||||
kvfree(exec2_list);
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -4555,7 +4555,7 @@ static int gen8_emit_flush_render(struct i915_request *request,
|
|||
vf_flush_wa = true;
|
||||
|
||||
/* WaForGAMHang:kbl */
|
||||
if (IS_KBL_REVID(request->engine->i915, 0, KBL_REVID_B0))
|
||||
if (IS_KBL_GT_REVID(request->engine->i915, 0, KBL_REVID_B0))
|
||||
dc_flush_wa = true;
|
||||
}
|
||||
|
||||
|
|
|
@ -52,6 +52,24 @@
|
|||
* - Public functions to init or apply the given workaround type.
|
||||
*/
|
||||
|
||||
/*
|
||||
* KBL revision ID ordering is bizarre; higher revision ID's map to lower
|
||||
* steppings in some cases. So rather than test against the revision ID
|
||||
* directly, let's map that into our own range of increasing ID's that we
|
||||
* can test against in a regular manner.
|
||||
*/
|
||||
|
||||
const struct i915_rev_steppings kbl_revids[] = {
|
||||
[0] = { .gt_stepping = KBL_REVID_A0, .disp_stepping = KBL_REVID_A0 },
|
||||
[1] = { .gt_stepping = KBL_REVID_B0, .disp_stepping = KBL_REVID_B0 },
|
||||
[2] = { .gt_stepping = KBL_REVID_C0, .disp_stepping = KBL_REVID_B0 },
|
||||
[3] = { .gt_stepping = KBL_REVID_D0, .disp_stepping = KBL_REVID_B0 },
|
||||
[4] = { .gt_stepping = KBL_REVID_F0, .disp_stepping = KBL_REVID_C0 },
|
||||
[5] = { .gt_stepping = KBL_REVID_C0, .disp_stepping = KBL_REVID_B1 },
|
||||
[6] = { .gt_stepping = KBL_REVID_D1, .disp_stepping = KBL_REVID_B1 },
|
||||
[7] = { .gt_stepping = KBL_REVID_G0, .disp_stepping = KBL_REVID_C0 },
|
||||
};
|
||||
|
||||
static void wa_init_start(struct i915_wa_list *wal, const char *name, const char *engine_name)
|
||||
{
|
||||
wal->name = name;
|
||||
|
@ -470,7 +488,7 @@ static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine,
|
|||
gen9_ctx_workarounds_init(engine, wal);
|
||||
|
||||
/* WaToEnableHwFixForPushConstHWBug:kbl */
|
||||
if (IS_KBL_REVID(i915, KBL_REVID_C0, REVID_FOREVER))
|
||||
if (IS_KBL_GT_REVID(i915, KBL_REVID_C0, REVID_FOREVER))
|
||||
WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
|
||||
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
|
||||
|
||||
|
@ -596,8 +614,8 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
|
|||
wa_masked_en(wal, GEN9_ROW_CHICKEN4, GEN11_DIS_PICK_2ND_EU);
|
||||
}
|
||||
|
||||
static void tgl_ctx_workarounds_init(struct intel_engine_cs *engine,
|
||||
struct i915_wa_list *wal)
|
||||
static void gen12_ctx_workarounds_init(struct intel_engine_cs *engine,
|
||||
struct i915_wa_list *wal)
|
||||
{
|
||||
/*
|
||||
* Wa_1409142259:tgl
|
||||
|
@ -607,12 +625,28 @@ static void tgl_ctx_workarounds_init(struct intel_engine_cs *engine,
|
|||
* Wa_1409207793:tgl
|
||||
* Wa_1409178076:tgl
|
||||
* Wa_1408979724:tgl
|
||||
* Wa_14010443199:rkl
|
||||
* Wa_14010698770:rkl
|
||||
*/
|
||||
WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3,
|
||||
GEN12_DISABLE_CPS_AWARE_COLOR_PIPE);
|
||||
|
||||
/* WaDisableGPGPUMidThreadPreemption:gen12 */
|
||||
WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1,
|
||||
GEN9_PREEMPT_GPGPU_LEVEL_MASK,
|
||||
GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL);
|
||||
}
|
||||
|
||||
static void tgl_ctx_workarounds_init(struct intel_engine_cs *engine,
|
||||
struct i915_wa_list *wal)
|
||||
{
|
||||
gen12_ctx_workarounds_init(engine, wal);
|
||||
|
||||
/*
|
||||
* Wa_1604555607:gen12 and Wa_1608008084:gen12
|
||||
* Wa_1604555607:tgl,rkl
|
||||
*
|
||||
* Note that the implementation of this workaround is further modified
|
||||
* according to the FF_MODE2 guidance given by Wa_1608008084:gen12.
|
||||
* FF_MODE2 register will return the wrong value when read. The default
|
||||
* value for this register is zero for all fields and there are no bit
|
||||
* masks. So instead of doing a RMW we should just write the GS Timer
|
||||
|
@ -623,11 +657,6 @@ static void tgl_ctx_workarounds_init(struct intel_engine_cs *engine,
|
|||
FF_MODE2_GS_TIMER_MASK | FF_MODE2_TDS_TIMER_MASK,
|
||||
FF_MODE2_GS_TIMER_224 | FF_MODE2_TDS_TIMER_128,
|
||||
0);
|
||||
|
||||
/* WaDisableGPGPUMidThreadPreemption:tgl */
|
||||
WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1,
|
||||
GEN9_PREEMPT_GPGPU_LEVEL_MASK,
|
||||
GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -642,8 +671,10 @@ __intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
|
|||
|
||||
wa_init_start(wal, name, engine->name);
|
||||
|
||||
if (IS_GEN(i915, 12))
|
||||
if (IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915))
|
||||
tgl_ctx_workarounds_init(engine, wal);
|
||||
else if (IS_GEN(i915, 12))
|
||||
gen12_ctx_workarounds_init(engine, wal);
|
||||
else if (IS_GEN(i915, 11))
|
||||
icl_ctx_workarounds_init(engine, wal);
|
||||
else if (IS_CANNONLAKE(i915))
|
||||
|
@ -995,7 +1026,7 @@ kbl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
|
|||
gen9_gt_workarounds_init(i915, wal);
|
||||
|
||||
/* WaDisableDynamicCreditSharing:kbl */
|
||||
if (IS_KBL_REVID(i915, 0, KBL_REVID_B0))
|
||||
if (IS_KBL_GT_REVID(i915, 0, KBL_REVID_B0))
|
||||
wa_write_or(wal,
|
||||
GAMT_CHKN_BIT_REG,
|
||||
GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
|
||||
|
@ -1176,9 +1207,16 @@ icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
|
|||
}
|
||||
|
||||
static void
|
||||
tgl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
|
||||
gen12_gt_workarounds_init(struct drm_i915_private *i915,
|
||||
struct i915_wa_list *wal)
|
||||
{
|
||||
wa_init_mcr(i915, wal);
|
||||
}
|
||||
|
||||
static void
|
||||
tgl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
|
||||
{
|
||||
gen12_gt_workarounds_init(i915, wal);
|
||||
|
||||
/* Wa_1409420604:tgl */
|
||||
if (IS_TGL_REVID(i915, TGL_REVID_A0, TGL_REVID_A0))
|
||||
|
@ -1196,8 +1234,10 @@ tgl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
|
|||
static void
|
||||
gt_init_workarounds(struct drm_i915_private *i915, struct i915_wa_list *wal)
|
||||
{
|
||||
if (IS_GEN(i915, 12))
|
||||
if (IS_TIGERLAKE(i915))
|
||||
tgl_gt_workarounds_init(i915, wal);
|
||||
else if (IS_GEN(i915, 12))
|
||||
gen12_gt_workarounds_init(i915, wal);
|
||||
else if (IS_GEN(i915, 11))
|
||||
icl_gt_workarounds_init(i915, wal);
|
||||
else if (IS_CANNONLAKE(i915))
|
||||
|
@ -1629,18 +1669,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
|
|||
GEN9_CTX_PREEMPT_REG,
|
||||
GEN12_DISABLE_POSH_BUSY_FF_DOP_CG);
|
||||
|
||||
/*
|
||||
* Wa_1607030317:tgl
|
||||
* Wa_1607186500:tgl
|
||||
* Wa_1607297627:tgl there is 3 entries for this WA on BSpec, 2
|
||||
* of then says it is fixed on B0 the other one says it is
|
||||
* permanent
|
||||
*/
|
||||
wa_masked_en(wal,
|
||||
GEN6_RC_SLEEP_PSMI_CONTROL,
|
||||
GEN12_WAIT_FOR_EVENT_POWER_DOWN_DISABLE |
|
||||
GEN8_RC_SEMA_IDLE_MSG_DISABLE);
|
||||
|
||||
/*
|
||||
* Wa_1606679103:tgl
|
||||
* (see also Wa_1606682166:icl)
|
||||
|
@ -1654,22 +1682,17 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
|
|||
VSUNIT_CLKGATE_DIS_TGL);
|
||||
}
|
||||
|
||||
if (IS_TIGERLAKE(i915)) {
|
||||
/* Wa_1606931601:tgl */
|
||||
if (IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
|
||||
/* Wa_1606931601:tgl,rkl */
|
||||
wa_masked_en(wal, GEN7_ROW_CHICKEN2, GEN12_DISABLE_EARLY_READ);
|
||||
|
||||
/* Wa_1409804808:tgl */
|
||||
/* Wa_1409804808:tgl,rkl */
|
||||
wa_masked_en(wal, GEN7_ROW_CHICKEN2,
|
||||
GEN12_PUSH_CONST_DEREF_HOLD_DIS);
|
||||
|
||||
/* Wa_1606700617:tgl */
|
||||
wa_masked_en(wal,
|
||||
GEN9_CS_DEBUG_MODE1,
|
||||
FF_DOP_CLOCK_GATE_DISABLE);
|
||||
|
||||
/*
|
||||
* Wa_1409085225:tgl
|
||||
* Wa_14010229206:tgl
|
||||
* Wa_14010229206:tgl,rkl
|
||||
*/
|
||||
wa_masked_en(wal, GEN9_ROW_CHICKEN4, GEN12_DISABLE_TDL_PUSH);
|
||||
|
||||
|
@ -1677,9 +1700,29 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
|
|||
* Wa_1407928979:tgl A*
|
||||
* Wa_18011464164:tgl B0+
|
||||
* Wa_22010931296:tgl B0+
|
||||
* Wa_14010919138:rkl
|
||||
*/
|
||||
wa_write_or(wal, GEN7_FF_THREAD_MODE,
|
||||
GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
|
||||
|
||||
/*
|
||||
* Wa_1607030317:tgl
|
||||
* Wa_1607186500:tgl
|
||||
* Wa_1607297627:tgl,rkl there are multiple entries for this
|
||||
* WA in the BSpec; some indicate this is an A0-only WA,
|
||||
* others indicate it applies to all steppings.
|
||||
*/
|
||||
wa_masked_en(wal,
|
||||
GEN6_RC_SLEEP_PSMI_CONTROL,
|
||||
GEN12_WAIT_FOR_EVENT_POWER_DOWN_DISABLE |
|
||||
GEN8_RC_SEMA_IDLE_MSG_DISABLE);
|
||||
}
|
||||
|
||||
if (IS_TIGERLAKE(i915)) {
|
||||
/* Wa_1606700617:tgl */
|
||||
wa_masked_en(wal,
|
||||
GEN9_CS_DEBUG_MODE1,
|
||||
FF_DOP_CLOCK_GATE_DISABLE);
|
||||
}
|
||||
|
||||
if (IS_GEN(i915, 11)) {
|
||||
|
@ -1898,7 +1941,7 @@ xcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
|
|||
struct drm_i915_private *i915 = engine->i915;
|
||||
|
||||
/* WaKBLVECSSemaphoreWaitPoll:kbl */
|
||||
if (IS_KBL_REVID(i915, KBL_REVID_A0, KBL_REVID_E0)) {
|
||||
if (IS_KBL_GT_REVID(i915, KBL_REVID_A0, KBL_REVID_E0)) {
|
||||
wa_write(wal,
|
||||
RING_SEMA_WAIT_POLL(engine->mmio_base),
|
||||
1);
|
||||
|
|
|
@ -47,7 +47,10 @@ static int pulse_active(struct i915_active *active)
|
|||
|
||||
static void pulse_free(struct kref *kref)
|
||||
{
|
||||
kfree(container_of(kref, struct pulse, kref));
|
||||
struct pulse *p = container_of(kref, typeof(*p), kref);
|
||||
|
||||
i915_active_fini(&p->active);
|
||||
kfree(p);
|
||||
}
|
||||
|
||||
static void pulse_put(struct pulse *p)
|
||||
|
|
|
@ -700,7 +700,7 @@ int live_rps_frequency_cs(void *arg)
|
|||
f = act; /* may skip ahead [pcu granularity] */
|
||||
}
|
||||
|
||||
err = -EINVAL;
|
||||
err = -EINTR; /* ignore error, continue on with test */
|
||||
}
|
||||
|
||||
err_vma:
|
||||
|
@ -841,7 +841,7 @@ int live_rps_frequency_srm(void *arg)
|
|||
f = act; /* may skip ahead [pcu granularity] */
|
||||
}
|
||||
|
||||
err = -EINVAL;
|
||||
err = -EINTR; /* ignore error, continue on with test */
|
||||
}
|
||||
|
||||
err_vma:
|
||||
|
|
|
@ -491,7 +491,7 @@ checked_intel_timeline_create(struct intel_gt *gt)
|
|||
if (IS_ERR(tl))
|
||||
return tl;
|
||||
|
||||
if (*tl->hwsp_seqno != tl->seqno) {
|
||||
if (READ_ONCE(*tl->hwsp_seqno) != tl->seqno) {
|
||||
pr_err("Timeline created with incorrect breadcrumb, found %x, expected %x\n",
|
||||
*tl->hwsp_seqno, tl->seqno);
|
||||
intel_timeline_put(tl);
|
||||
|
@ -561,9 +561,9 @@ static int live_hwsp_engine(void *arg)
|
|||
for (n = 0; n < count; n++) {
|
||||
struct intel_timeline *tl = timelines[n];
|
||||
|
||||
if (!err && *tl->hwsp_seqno != n) {
|
||||
pr_err("Invalid seqno stored in timeline %lu @ %x, found 0x%x\n",
|
||||
n, tl->hwsp_offset, *tl->hwsp_seqno);
|
||||
if (!err && READ_ONCE(*tl->hwsp_seqno) != n) {
|
||||
GEM_TRACE_ERR("Invalid seqno:%lu stored in timeline %llu @ %x, found 0x%x\n",
|
||||
n, tl->fence_context, tl->hwsp_offset, *tl->hwsp_seqno);
|
||||
GEM_TRACE_DUMP();
|
||||
err = -EINVAL;
|
||||
}
|
||||
|
@ -633,9 +633,9 @@ out:
|
|||
for (n = 0; n < count; n++) {
|
||||
struct intel_timeline *tl = timelines[n];
|
||||
|
||||
if (!err && *tl->hwsp_seqno != n) {
|
||||
pr_err("Invalid seqno stored in timeline %lu @ %x, found 0x%x\n",
|
||||
n, tl->hwsp_offset, *tl->hwsp_seqno);
|
||||
if (!err && READ_ONCE(*tl->hwsp_seqno) != n) {
|
||||
GEM_TRACE_ERR("Invalid seqno:%lu stored in timeline %llu @ %x, found 0x%x\n",
|
||||
n, tl->fence_context, tl->hwsp_offset, *tl->hwsp_seqno);
|
||||
GEM_TRACE_DUMP();
|
||||
err = -EINVAL;
|
||||
}
|
||||
|
@ -733,7 +733,8 @@ static int live_hwsp_wrap(void *arg)
|
|||
goto out;
|
||||
}
|
||||
|
||||
if (*hwsp_seqno[0] != seqno[0] || *hwsp_seqno[1] != seqno[1]) {
|
||||
if (READ_ONCE(*hwsp_seqno[0]) != seqno[0] ||
|
||||
READ_ONCE(*hwsp_seqno[1]) != seqno[1]) {
|
||||
pr_err("Bad timeline values: found (%x, %x), expected (%x, %x)\n",
|
||||
*hwsp_seqno[0], *hwsp_seqno[1],
|
||||
seqno[0], seqno[1]);
|
||||
|
@ -966,9 +967,10 @@ static int live_hwsp_recycle(void *arg)
|
|||
break;
|
||||
}
|
||||
|
||||
if (*tl->hwsp_seqno != count) {
|
||||
pr_err("Invalid seqno stored in timeline %lu @ tl->hwsp_offset, found 0x%x\n",
|
||||
count, *tl->hwsp_seqno);
|
||||
if (READ_ONCE(*tl->hwsp_seqno) != count) {
|
||||
GEM_TRACE_ERR("Invalid seqno:%lu stored in timeline %llu @ %x found 0x%x\n",
|
||||
count, tl->fence_context,
|
||||
tl->hwsp_offset, *tl->hwsp_seqno);
|
||||
GEM_TRACE_DUMP();
|
||||
err = -EINVAL;
|
||||
}
|
||||
|
|
|
@ -51,8 +51,8 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
|
|||
* Note that RKL uses the same firmware as TGL.
|
||||
*/
|
||||
#define INTEL_UC_FIRMWARE_DEFS(fw_def, guc_def, huc_def) \
|
||||
fw_def(ROCKETLAKE, 0, guc_def(tgl, 35, 2, 0), huc_def(tgl, 7, 0, 12)) \
|
||||
fw_def(TIGERLAKE, 0, guc_def(tgl, 35, 2, 0), huc_def(tgl, 7, 0, 12)) \
|
||||
fw_def(ROCKETLAKE, 0, guc_def(tgl, 35, 2, 0), huc_def(tgl, 7, 5, 0)) \
|
||||
fw_def(TIGERLAKE, 0, guc_def(tgl, 35, 2, 0), huc_def(tgl, 7, 5, 0)) \
|
||||
fw_def(ELKHARTLAKE, 0, guc_def(ehl, 33, 0, 4), huc_def(ehl, 9, 0, 0)) \
|
||||
fw_def(ICELAKE, 0, guc_def(icl, 33, 0, 0), huc_def(icl, 9, 0, 0)) \
|
||||
fw_def(COMETLAKE, 5, guc_def(cml, 33, 0, 0), huc_def(cml, 4, 0, 0)) \
|
||||
|
|
|
@ -758,7 +758,6 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
|
|||
intel_engine_mask_t tmp, mask = engine->mask;
|
||||
struct llist_node *first = NULL, *last = NULL;
|
||||
struct intel_gt *gt = engine->gt;
|
||||
int err;
|
||||
|
||||
GEM_BUG_ON(i915_active_is_idle(ref));
|
||||
|
||||
|
@ -781,10 +780,8 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
|
|||
node = reuse_idle_barrier(ref, idx);
|
||||
if (!node) {
|
||||
node = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
|
||||
if (!node) {
|
||||
err = ENOMEM;
|
||||
if (!node)
|
||||
goto unwind;
|
||||
}
|
||||
|
||||
RCU_INIT_POINTER(node->base.fence, NULL);
|
||||
node->base.cb.func = node_retire;
|
||||
|
@ -832,7 +829,7 @@ unwind:
|
|||
|
||||
kmem_cache_free(global.slab_cache, node);
|
||||
}
|
||||
return err;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
void i915_active_acquire_barrier(struct i915_active *ref)
|
||||
|
|
|
@ -1204,6 +1204,12 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
|
|||
return dst;
|
||||
}
|
||||
|
||||
static inline bool cmd_desc_is(const struct drm_i915_cmd_descriptor * const desc,
|
||||
const u32 cmd)
|
||||
{
|
||||
return desc->cmd.value == (cmd & desc->cmd.mask);
|
||||
}
|
||||
|
||||
static bool check_cmd(const struct intel_engine_cs *engine,
|
||||
const struct drm_i915_cmd_descriptor *desc,
|
||||
const u32 *cmd, u32 length)
|
||||
|
@ -1242,19 +1248,19 @@ static bool check_cmd(const struct intel_engine_cs *engine,
|
|||
* allowed mask/value pair given in the whitelist entry.
|
||||
*/
|
||||
if (reg->mask) {
|
||||
if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
|
||||
if (cmd_desc_is(desc, MI_LOAD_REGISTER_MEM)) {
|
||||
DRM_DEBUG("CMD: Rejected LRM to masked register 0x%08X\n",
|
||||
reg_addr);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (desc->cmd.value == MI_LOAD_REGISTER_REG) {
|
||||
if (cmd_desc_is(desc, MI_LOAD_REGISTER_REG)) {
|
||||
DRM_DEBUG("CMD: Rejected LRR to masked register 0x%08X\n",
|
||||
reg_addr);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1) &&
|
||||
if (cmd_desc_is(desc, MI_LOAD_REGISTER_IMM(1)) &&
|
||||
(offset + 2 > length ||
|
||||
(cmd[offset + 1] & reg->mask) != reg->value)) {
|
||||
DRM_DEBUG("CMD: Rejected LRI to masked register 0x%08X\n",
|
||||
|
@ -1478,7 +1484,7 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
|
|||
break;
|
||||
}
|
||||
|
||||
if (desc->cmd.value == MI_BATCH_BUFFER_START) {
|
||||
if (cmd_desc_is(desc, MI_BATCH_BUFFER_START)) {
|
||||
ret = check_bbstart(cmd, offset, length, batch_length,
|
||||
batch_addr, shadow_addr,
|
||||
jump_whitelist);
|
||||
|
|
|
@ -392,7 +392,7 @@ static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
|
|||
pre |= IS_HSW_EARLY_SDV(dev_priv);
|
||||
pre |= IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0);
|
||||
pre |= IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST);
|
||||
pre |= IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0);
|
||||
pre |= IS_KBL_GT_REVID(dev_priv, 0, KBL_REVID_A0);
|
||||
pre |= IS_GLK_REVID(dev_priv, 0, GLK_REVID_A2);
|
||||
|
||||
if (pre) {
|
||||
|
@ -1846,7 +1846,8 @@ static struct drm_driver driver = {
|
|||
*/
|
||||
.driver_features =
|
||||
DRIVER_GEM |
|
||||
DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_SYNCOBJ,
|
||||
DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_SYNCOBJ |
|
||||
DRIVER_SYNCOBJ_TIMELINE,
|
||||
.release = i915_driver_release,
|
||||
.open = i915_driver_open,
|
||||
.lastclose = i915_driver_lastclose,
|
||||
|
|
|
@ -108,8 +108,8 @@
|
|||
|
||||
#define DRIVER_NAME "i915"
|
||||
#define DRIVER_DESC "Intel Graphics"
|
||||
#define DRIVER_DATE "20200715"
|
||||
#define DRIVER_TIMESTAMP 1594811881
|
||||
#define DRIVER_DATE "20200824"
|
||||
#define DRIVER_TIMESTAMP 1598293597
|
||||
|
||||
struct drm_i915_gem_object;
|
||||
|
||||
|
@ -506,6 +506,7 @@ struct i915_psr {
|
|||
bool link_standby;
|
||||
bool colorimetry_support;
|
||||
bool psr2_enabled;
|
||||
bool psr2_sel_fetch_enabled;
|
||||
u8 sink_sync_latency;
|
||||
ktime_t last_entry_attempt;
|
||||
ktime_t last_exit;
|
||||
|
@ -1044,6 +1045,14 @@ struct drm_i915_private {
|
|||
|
||||
struct intel_l3_parity l3_parity;
|
||||
|
||||
/*
|
||||
* HTI (aka HDPORT) state read during initial hw readout. Most
|
||||
* platforms don't have HTI, so this will just stay 0. Those that do
|
||||
* will use this later to figure out which PLLs and PHYs are unavailable
|
||||
* for driver usage.
|
||||
*/
|
||||
u32 hti_state;
|
||||
|
||||
/*
|
||||
* edram size in MB.
|
||||
* Cannot be determined by PCIID. You must always read a register.
|
||||
|
@ -1489,6 +1498,12 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
|
|||
#define IS_ICL_WITH_PORT_F(dev_priv) \
|
||||
IS_SUBPLATFORM(dev_priv, INTEL_ICELAKE, INTEL_SUBPLATFORM_PORTF)
|
||||
|
||||
#define IS_TGL_U(dev_priv) \
|
||||
IS_SUBPLATFORM(dev_priv, INTEL_TIGERLAKE, INTEL_SUBPLATFORM_ULT)
|
||||
|
||||
#define IS_TGL_Y(dev_priv) \
|
||||
IS_SUBPLATFORM(dev_priv, INTEL_TIGERLAKE, INTEL_SUBPLATFORM_ULX)
|
||||
|
||||
#define SKL_REVID_A0 0x0
|
||||
#define SKL_REVID_B0 0x1
|
||||
#define SKL_REVID_C0 0x2
|
||||
|
@ -1509,14 +1524,34 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
|
|||
#define IS_BXT_REVID(dev_priv, since, until) \
|
||||
(IS_BROXTON(dev_priv) && IS_REVID(dev_priv, since, until))
|
||||
|
||||
#define KBL_REVID_A0 0x0
|
||||
#define KBL_REVID_B0 0x1
|
||||
#define KBL_REVID_C0 0x2
|
||||
#define KBL_REVID_D0 0x3
|
||||
#define KBL_REVID_E0 0x4
|
||||
enum {
|
||||
KBL_REVID_A0,
|
||||
KBL_REVID_B0,
|
||||
KBL_REVID_B1,
|
||||
KBL_REVID_C0,
|
||||
KBL_REVID_D0,
|
||||
KBL_REVID_D1,
|
||||
KBL_REVID_E0,
|
||||
KBL_REVID_F0,
|
||||
KBL_REVID_G0,
|
||||
};
|
||||
|
||||
#define IS_KBL_REVID(dev_priv, since, until) \
|
||||
(IS_KABYLAKE(dev_priv) && IS_REVID(dev_priv, since, until))
|
||||
struct i915_rev_steppings {
|
||||
u8 gt_stepping;
|
||||
u8 disp_stepping;
|
||||
};
|
||||
|
||||
/* Defined in intel_workarounds.c */
|
||||
extern const struct i915_rev_steppings kbl_revids[];
|
||||
|
||||
#define IS_KBL_GT_REVID(dev_priv, since, until) \
|
||||
(IS_KABYLAKE(dev_priv) && \
|
||||
kbl_revids[INTEL_REVID(dev_priv)].gt_stepping >= since && \
|
||||
kbl_revids[INTEL_REVID(dev_priv)].gt_stepping <= until)
|
||||
#define IS_KBL_DISP_REVID(dev_priv, since, until) \
|
||||
(IS_KABYLAKE(dev_priv) && \
|
||||
kbl_revids[INTEL_REVID(dev_priv)].disp_stepping >= since && \
|
||||
kbl_revids[INTEL_REVID(dev_priv)].disp_stepping <= until)
|
||||
|
||||
#define GLK_REVID_A0 0x0
|
||||
#define GLK_REVID_A1 0x1
|
||||
|
@ -1665,6 +1700,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
|
|||
#define HAS_PSR(dev_priv) (INTEL_INFO(dev_priv)->display.has_psr)
|
||||
#define HAS_PSR_HW_TRACKING(dev_priv) \
|
||||
(INTEL_INFO(dev_priv)->display.has_psr_hw_tracking)
|
||||
#define HAS_PSR2_SEL_FETCH(dev_priv) (INTEL_GEN(dev_priv) >= 12)
|
||||
#define HAS_TRANSCODER(dev_priv, trans) ((INTEL_INFO(dev_priv)->cpu_transcoder_mask & BIT(trans)) != 0)
|
||||
|
||||
#define HAS_RC6(dev_priv) (INTEL_INFO(dev_priv)->has_rc6)
|
||||
|
|
|
@ -132,6 +132,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
|
|||
case I915_PARAM_HAS_EXEC_BATCH_FIRST:
|
||||
case I915_PARAM_HAS_EXEC_FENCE_ARRAY:
|
||||
case I915_PARAM_HAS_EXEC_SUBMIT_FENCE:
|
||||
case I915_PARAM_HAS_EXEC_TIMELINE_FENCES:
|
||||
/* For the time being all of these are always true;
|
||||
* if some supported hardware does not have one of these
|
||||
* features this value needs to be provided from
|
||||
|
|
|
@ -102,6 +102,11 @@ i915_param_named(psr_safest_params, bool, 0400,
|
|||
"is helpful to detect if PSR issues are related to bad values set in "
|
||||
" VBT. (0=use VBT parameters, 1=use safest parameters)");
|
||||
|
||||
i915_param_named_unsafe(enable_psr2_sel_fetch, bool, 0400,
|
||||
"Enable PSR2 selective fetch "
|
||||
"(0=disabled, 1=enabled) "
|
||||
"Default: 0");
|
||||
|
||||
i915_param_named_unsafe(force_probe, charp, 0400,
|
||||
"Force probe the driver for specified devices. "
|
||||
"See CONFIG_DRM_I915_FORCE_PROBE for details.");
|
||||
|
|
|
@ -54,6 +54,7 @@ struct drm_printer;
|
|||
param(int, enable_fbc, -1, 0600) \
|
||||
param(int, enable_psr, -1, 0600) \
|
||||
param(bool, psr_safest_params, false, 0600) \
|
||||
param(bool, enable_psr2_sel_fetch, false, 0600) \
|
||||
param(int, disable_power_well, -1, 0400) \
|
||||
param(int, enable_ips, 1, 0600) \
|
||||
param(int, invert_brightness, 0, 0600) \
|
||||
|
|
|
@ -890,6 +890,7 @@ static const struct intel_device_info rkl_info = {
|
|||
.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
|
||||
BIT(TRANSCODER_C),
|
||||
.require_force_probe = 1,
|
||||
.display.has_hti = 1,
|
||||
.display.has_psr_hw_tracking = 0,
|
||||
.platform_engine_mask =
|
||||
BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0),
|
||||
|
|
|
@ -1898,6 +1898,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
|
|||
#define PWR_DOWN_LN_3_1_0 (0xb << 4)
|
||||
#define PWR_DOWN_LN_MASK (0xf << 4)
|
||||
#define PWR_DOWN_LN_SHIFT 4
|
||||
#define EDP4K2K_MODE_OVRD_EN (1 << 3)
|
||||
#define EDP4K2K_MODE_OVRD_OPTIMIZED (1 << 2)
|
||||
|
||||
#define ICL_PORT_CL_DW12(phy) _MMIO(_ICL_PORT_CL_DW(12, phy))
|
||||
#define ICL_LANE_ENABLE_AUX (1 << 0)
|
||||
|
@ -2919,6 +2921,12 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
|
|||
#define MBUS_BBOX_CTL_S1 _MMIO(0x45040)
|
||||
#define MBUS_BBOX_CTL_S2 _MMIO(0x45044)
|
||||
|
||||
#define HDPORT_STATE _MMIO(0x45050)
|
||||
#define HDPORT_DPLL_USED_MASK REG_GENMASK(14, 12)
|
||||
#define HDPORT_PHY_USED_DP(phy) REG_BIT(2 * (phy) + 2)
|
||||
#define HDPORT_PHY_USED_HDMI(phy) REG_BIT(2 * (phy) + 1)
|
||||
#define HDPORT_ENABLED REG_BIT(0)
|
||||
|
||||
/* Make render/texture TLB fetches lower priorty than associated data
|
||||
* fetches. This is not turned on by default
|
||||
*/
|
||||
|
@ -7870,6 +7878,7 @@ enum {
|
|||
# define CHICKEN3_DGMG_DONE_FIX_DISABLE (1 << 2)
|
||||
|
||||
#define CHICKEN_PAR1_1 _MMIO(0x42080)
|
||||
#define DIS_RAM_BYPASS_PSR2_MAN_TRACK (1 << 16)
|
||||
#define SKL_DE_COMPRESSED_HASH_MODE (1 << 15)
|
||||
#define DPA_MASK_VBLANK_SRD (1 << 15)
|
||||
#define FORCE_ARB_IDLE_PLANES (1 << 14)
|
||||
|
@ -8711,6 +8720,7 @@ enum {
|
|||
#define PCH_GMBUSUNIT_CLOCK_GATE_DISABLE (1 << 31)
|
||||
#define PCH_DPLUNIT_CLOCK_GATE_DISABLE (1 << 30)
|
||||
#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1 << 29)
|
||||
#define PCH_DPMGUNIT_CLOCK_GATE_DISABLE (1 << 15)
|
||||
#define PCH_CPUNIT_CLOCK_GATE_DISABLE (1 << 14)
|
||||
#define CNP_PWM_CGE_GATING_DISABLE (1 << 13)
|
||||
#define PCH_LP_PARTITION_LEVEL_DISABLE (1 << 12)
|
||||
|
@ -9217,8 +9227,8 @@ enum {
|
|||
#define DISPLAY_IPS_CONTROL 0x19
|
||||
#define TGL_PCODE_TCCOLD 0x26
|
||||
#define TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED REG_BIT(0)
|
||||
#define TGL_PCODE_EXIT_TCCOLD_DATA_H_BLOCK_REQ 0
|
||||
#define TGL_PCODE_EXIT_TCCOLD_DATA_H_UNBLOCK_REQ REG_BIT(0)
|
||||
#define TGL_PCODE_EXIT_TCCOLD_DATA_L_BLOCK_REQ 0
|
||||
#define TGL_PCODE_EXIT_TCCOLD_DATA_L_UNBLOCK_REQ REG_BIT(0)
|
||||
/* See also IPS_CTL */
|
||||
#define IPS_PCODE_CONTROL (1 << 30)
|
||||
#define HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL 0x1A
|
||||
|
@ -10277,12 +10287,18 @@ enum skl_power_gate {
|
|||
|
||||
#define ICL_DPCLKA_CFGCR0 _MMIO(0x164280)
|
||||
#define ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy) (1 << _PICK(phy, 10, 11, 24))
|
||||
#define RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy) REG_BIT((phy) + 10)
|
||||
#define ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port) (1 << ((tc_port) < PORT_TC4 ? \
|
||||
(tc_port) + 12 : \
|
||||
(tc_port) - PORT_TC4 + 21))
|
||||
#define ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy) ((phy) * 2)
|
||||
#define ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy) (3 << ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy))
|
||||
#define ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll, phy) ((pll) << ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy))
|
||||
#define RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy) _PICK(phy, 0, 2, 4, 27)
|
||||
#define RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy) \
|
||||
(3 << RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy))
|
||||
#define RKL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll, phy) \
|
||||
((pll) << RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy))
|
||||
|
||||
/* CNL PLL */
|
||||
#define DPLL0_ENABLE 0x46010
|
||||
|
@ -10503,19 +10519,21 @@ enum skl_power_gate {
|
|||
|
||||
#define _TGL_DPLL0_CFGCR0 0x164284
|
||||
#define _TGL_DPLL1_CFGCR0 0x16428C
|
||||
/* TODO: add DPLL4 */
|
||||
#define _TGL_TBTPLL_CFGCR0 0x16429C
|
||||
#define TGL_DPLL_CFGCR0(pll) _MMIO_PLL3(pll, _TGL_DPLL0_CFGCR0, \
|
||||
_TGL_DPLL1_CFGCR0, \
|
||||
_TGL_TBTPLL_CFGCR0)
|
||||
#define RKL_DPLL_CFGCR0(pll) _MMIO_PLL(pll, _TGL_DPLL0_CFGCR0, \
|
||||
_TGL_DPLL1_CFGCR0)
|
||||
|
||||
#define _TGL_DPLL0_CFGCR1 0x164288
|
||||
#define _TGL_DPLL1_CFGCR1 0x164290
|
||||
/* TODO: add DPLL4 */
|
||||
#define _TGL_TBTPLL_CFGCR1 0x1642A0
|
||||
#define TGL_DPLL_CFGCR1(pll) _MMIO_PLL3(pll, _TGL_DPLL0_CFGCR1, \
|
||||
_TGL_DPLL1_CFGCR1, \
|
||||
_TGL_TBTPLL_CFGCR1)
|
||||
#define RKL_DPLL_CFGCR1(pll) _MMIO_PLL(pll, _TGL_DPLL0_CFGCR1, \
|
||||
_TGL_DPLL1_CFGCR1)
|
||||
|
||||
#define _DKL_PHY1_BASE 0x168000
|
||||
#define _DKL_PHY2_BASE 0x169000
|
||||
|
@ -12336,4 +12354,10 @@ enum skl_power_gate {
|
|||
#define DSB_ENABLE (1 << 31)
|
||||
#define DSB_STATUS (1 << 0)
|
||||
|
||||
#define TGL_ROOT_DEVICE_ID 0x9A00
|
||||
#define TGL_ROOT_DEVICE_MASK 0xFF00
|
||||
#define TGL_ROOT_DEVICE_SKU_MASK 0xF
|
||||
#define TGL_ROOT_DEVICE_SKU_ULX 0x2
|
||||
#define TGL_ROOT_DEVICE_SKU_ULT 0x4
|
||||
|
||||
#endif /* _I915_REG_H_ */
|
||||
|
|
|
@ -1783,11 +1783,8 @@ long i915_request_wait(struct i915_request *rq,
|
|||
* but at a cost of spending more power processing the workload
|
||||
* (bad for battery).
|
||||
*/
|
||||
if (flags & I915_WAIT_PRIORITY) {
|
||||
if (!i915_request_started(rq) &&
|
||||
INTEL_GEN(rq->engine->i915) >= 6)
|
||||
intel_rps_boost(rq);
|
||||
}
|
||||
if (flags & I915_WAIT_PRIORITY && !i915_request_started(rq))
|
||||
intel_rps_boost(rq);
|
||||
|
||||
wait.tsk = current;
|
||||
if (dma_fence_add_callback(&rq->fence, &wait.cb, request_wait_wake))
|
||||
|
|
|
@ -346,6 +346,25 @@ void intel_device_info_subplatform_init(struct drm_i915_private *i915)
|
|||
mask = BIT(INTEL_SUBPLATFORM_PORTF);
|
||||
}
|
||||
|
||||
if (IS_TIGERLAKE(i915)) {
|
||||
struct pci_dev *root, *pdev = i915->drm.pdev;
|
||||
|
||||
root = list_first_entry(&pdev->bus->devices, typeof(*root), bus_list);
|
||||
|
||||
drm_WARN_ON(&i915->drm, mask);
|
||||
drm_WARN_ON(&i915->drm, (root->device & TGL_ROOT_DEVICE_MASK) !=
|
||||
TGL_ROOT_DEVICE_ID);
|
||||
|
||||
switch (root->device & TGL_ROOT_DEVICE_SKU_MASK) {
|
||||
case TGL_ROOT_DEVICE_SKU_ULX:
|
||||
mask = BIT(INTEL_SUBPLATFORM_ULX);
|
||||
break;
|
||||
case TGL_ROOT_DEVICE_SKU_ULT:
|
||||
mask = BIT(INTEL_SUBPLATFORM_ULT);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
GEM_BUG_ON(mask & ~INTEL_SUBPLATFORM_BITS);
|
||||
|
||||
RUNTIME_INFO(i915)->platform_mask[pi] |= mask;
|
||||
|
|
|
@ -146,6 +146,7 @@ enum intel_ppgtt_type {
|
|||
func(has_gmch); \
|
||||
func(has_hdcp); \
|
||||
func(has_hotplug); \
|
||||
func(has_hti); \
|
||||
func(has_ipc); \
|
||||
func(has_modular_fia); \
|
||||
func(has_overlay); \
|
||||
|
|
|
@ -100,12 +100,6 @@ static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
|
|||
*/
|
||||
I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
|
||||
DISP_FBC_MEMORY_WAKE);
|
||||
|
||||
if (IS_SKYLAKE(dev_priv)) {
|
||||
/* WaDisableDopClockGating */
|
||||
I915_WRITE(GEN7_MISCCPCTL, I915_READ(GEN7_MISCCPCTL)
|
||||
& ~GEN7_DOP_CLOCK_GATE_ENABLE);
|
||||
}
|
||||
}
|
||||
|
||||
static void bxt_init_clock_gating(struct drm_i915_private *dev_priv)
|
||||
|
@ -7223,12 +7217,12 @@ static void kbl_init_clock_gating(struct drm_i915_private *dev_priv)
|
|||
gen9_init_clock_gating(dev_priv);
|
||||
|
||||
/* WaDisableSDEUnitClockGating:kbl */
|
||||
if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
|
||||
if (IS_KBL_GT_REVID(dev_priv, 0, KBL_REVID_B0))
|
||||
I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
|
||||
GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
|
||||
|
||||
/* WaDisableGamClockGating:kbl */
|
||||
if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
|
||||
if (IS_KBL_GT_REVID(dev_priv, 0, KBL_REVID_B0))
|
||||
I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
|
||||
GEN6_GAMUNIT_CLOCK_GATE_DISABLE);
|
||||
|
||||
|
@ -7251,6 +7245,10 @@ static void skl_init_clock_gating(struct drm_i915_private *dev_priv)
|
|||
{
|
||||
gen9_init_clock_gating(dev_priv);
|
||||
|
||||
/* WaDisableDopClockGating:skl */
|
||||
I915_WRITE(GEN7_MISCCPCTL, I915_READ(GEN7_MISCCPCTL) &
|
||||
~GEN7_DOP_CLOCK_GATE_ENABLE);
|
||||
|
||||
/* WAC6entrylatency:skl */
|
||||
I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) |
|
||||
FBC_LLC_FULLY_OPEN);
|
||||
|
|
|
@ -331,7 +331,7 @@ static int __igt_breadcrumbs_smoketest(void *arg)
|
|||
if (!wait) {
|
||||
i915_sw_fence_commit(submit);
|
||||
heap_fence_put(submit);
|
||||
err = ENOMEM;
|
||||
err = -ENOMEM;
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
@ -258,9 +258,7 @@
|
|||
INTEL_VGA_DEVICE(0x0f30, info), \
|
||||
INTEL_VGA_DEVICE(0x0f31, info), \
|
||||
INTEL_VGA_DEVICE(0x0f32, info), \
|
||||
INTEL_VGA_DEVICE(0x0f33, info), \
|
||||
INTEL_VGA_DEVICE(0x0157, info), \
|
||||
INTEL_VGA_DEVICE(0x0155, info)
|
||||
INTEL_VGA_DEVICE(0x0f33, info)
|
||||
|
||||
#define INTEL_BDW_ULT_GT1_IDS(info) \
|
||||
INTEL_VGA_DEVICE(0x1606, info), /* GT1 ULT */ \
|
||||
|
|
|
@ -619,6 +619,12 @@ typedef struct drm_i915_irq_wait {
|
|||
*/
|
||||
#define I915_PARAM_PERF_REVISION 54
|
||||
|
||||
/* Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying an array of
|
||||
* timeline syncobj through drm_i915_gem_execbuffer_ext_timeline_fences. See
|
||||
* I915_EXEC_USE_EXTENSIONS.
|
||||
*/
|
||||
#define I915_PARAM_HAS_EXEC_TIMELINE_FENCES 55
|
||||
|
||||
/* Must be kept compact -- no holes and well documented */
|
||||
|
||||
typedef struct drm_i915_getparam {
|
||||
|
@ -1046,6 +1052,38 @@ struct drm_i915_gem_exec_fence {
|
|||
__u32 flags;
|
||||
};
|
||||
|
||||
/**
|
||||
* See drm_i915_gem_execbuffer_ext_timeline_fences.
|
||||
*/
|
||||
#define DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES 0
|
||||
|
||||
/**
|
||||
* This structure describes an array of drm_syncobj and associated points for
|
||||
* timeline variants of drm_syncobj. It is invalid to append this structure to
|
||||
* the execbuf if I915_EXEC_FENCE_ARRAY is set.
|
||||
*/
|
||||
struct drm_i915_gem_execbuffer_ext_timeline_fences {
|
||||
struct i915_user_extension base;
|
||||
|
||||
/**
|
||||
* Number of element in the handles_ptr & value_ptr arrays.
|
||||
*/
|
||||
__u64 fence_count;
|
||||
|
||||
/**
|
||||
* Pointer to an array of struct drm_i915_gem_exec_fence of length
|
||||
* fence_count.
|
||||
*/
|
||||
__u64 handles_ptr;
|
||||
|
||||
/**
|
||||
* Pointer to an array of u64 values of length fence_count. Values
|
||||
* must be 0 for a binary drm_syncobj. A Value of 0 for a timeline
|
||||
* drm_syncobj is invalid as it turns a drm_syncobj into a binary one.
|
||||
*/
|
||||
__u64 values_ptr;
|
||||
};
|
||||
|
||||
struct drm_i915_gem_execbuffer2 {
|
||||
/**
|
||||
* List of gem_exec_object2 structs
|
||||
|
@ -1062,8 +1100,14 @@ struct drm_i915_gem_execbuffer2 {
|
|||
__u32 num_cliprects;
|
||||
/**
|
||||
* This is a struct drm_clip_rect *cliprects if I915_EXEC_FENCE_ARRAY
|
||||
* is not set. If I915_EXEC_FENCE_ARRAY is set, then this is a
|
||||
* struct drm_i915_gem_exec_fence *fences.
|
||||
* & I915_EXEC_USE_EXTENSIONS are not set.
|
||||
*
|
||||
* If I915_EXEC_FENCE_ARRAY is set, then this is a pointer to an array
|
||||
* of struct drm_i915_gem_exec_fence and num_cliprects is the length
|
||||
* of the array.
|
||||
*
|
||||
* If I915_EXEC_USE_EXTENSIONS is set, then this is a pointer to a
|
||||
* single struct i915_user_extension and num_cliprects is 0.
|
||||
*/
|
||||
__u64 cliprects_ptr;
|
||||
#define I915_EXEC_RING_MASK (0x3f)
|
||||
|
@ -1181,7 +1225,16 @@ struct drm_i915_gem_execbuffer2 {
|
|||
*/
|
||||
#define I915_EXEC_FENCE_SUBMIT (1 << 20)
|
||||
|
||||
#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_SUBMIT << 1))
|
||||
/*
|
||||
* Setting I915_EXEC_USE_EXTENSIONS implies that
|
||||
* drm_i915_gem_execbuffer2.cliprects_ptr is treated as a pointer to an linked
|
||||
* list of i915_user_extension. Each i915_user_extension node is the base of a
|
||||
* larger structure. The list of supported structures are listed in the
|
||||
* drm_i915_gem_execbuffer_ext enum.
|
||||
*/
|
||||
#define I915_EXEC_USE_EXTENSIONS (1 << 21)
|
||||
|
||||
#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_USE_EXTENSIONS << 1))
|
||||
|
||||
#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff)
|
||||
#define i915_execbuffer2_set_context_id(eb2, context) \
|
||||
|
|
Загрузка…
Ссылка в новой задаче