Merge tag 'drm-intel-fixes-2021-09-16' of ssh://git.freedesktop.org/git/drm/drm-intel into drm-fixes
drm/i915 fixes for v5.15-rc2: - Propagate DP link training error returns - Use max link params for eDP 1.3 and earlier - Build warning fixes - Gem selftest fixes - Ensure wakeref is held before hardware access Signed-off-by: Dave Airlie <airlied@redhat.com> From: Jani Nikula <jani.nikula@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/8735q4wsu7.fsf@intel.com
This commit is contained in:
Коммит
11654b3763
|
@ -19,7 +19,6 @@ subdir-ccflags-y += $(call cc-disable-warning, missing-field-initializers)
|
|||
subdir-ccflags-y += $(call cc-disable-warning, unused-but-set-variable)
|
||||
# clang warnings
|
||||
subdir-ccflags-y += $(call cc-disable-warning, sign-compare)
|
||||
subdir-ccflags-y += $(call cc-disable-warning, sometimes-uninitialized)
|
||||
subdir-ccflags-y += $(call cc-disable-warning, initializer-overrides)
|
||||
subdir-ccflags-y += $(call cc-disable-warning, frame-address)
|
||||
subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror
|
||||
|
|
|
@ -2445,11 +2445,14 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
|
|||
*/
|
||||
if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
|
||||
intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
|
||||
sizeof(intel_dp->edp_dpcd))
|
||||
sizeof(intel_dp->edp_dpcd)) {
|
||||
drm_dbg_kms(&dev_priv->drm, "eDP DPCD: %*ph\n",
|
||||
(int)sizeof(intel_dp->edp_dpcd),
|
||||
intel_dp->edp_dpcd);
|
||||
|
||||
intel_dp->use_max_params = intel_dp->edp_dpcd[0] < DP_EDP_14;
|
||||
}
|
||||
|
||||
/*
|
||||
* This has to be called after intel_dp->edp_dpcd is filled, PSR checks
|
||||
* for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1]
|
||||
|
|
|
@ -848,7 +848,7 @@ intel_dp_link_train_all_phys(struct intel_dp *intel_dp,
|
|||
}
|
||||
|
||||
if (ret)
|
||||
intel_dp_link_train_phy(intel_dp, crtc_state, DP_PHY_DPRX);
|
||||
ret = intel_dp_link_train_phy(intel_dp, crtc_state, DP_PHY_DPRX);
|
||||
|
||||
if (intel_dp->set_idle_link_train)
|
||||
intel_dp->set_idle_link_train(intel_dp, crtc_state);
|
||||
|
|
|
@ -986,6 +986,9 @@ void i915_gem_context_release(struct kref *ref)
|
|||
trace_i915_context_free(ctx);
|
||||
GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
|
||||
|
||||
if (ctx->syncobj)
|
||||
drm_syncobj_put(ctx->syncobj);
|
||||
|
||||
mutex_destroy(&ctx->engines_mutex);
|
||||
mutex_destroy(&ctx->lut_mutex);
|
||||
|
||||
|
@ -1205,9 +1208,6 @@ static void context_close(struct i915_gem_context *ctx)
|
|||
if (vm)
|
||||
i915_vm_close(vm);
|
||||
|
||||
if (ctx->syncobj)
|
||||
drm_syncobj_put(ctx->syncobj);
|
||||
|
||||
ctx->file_priv = ERR_PTR(-EBADF);
|
||||
|
||||
/*
|
||||
|
|
|
@ -59,13 +59,13 @@ static int igt_dmabuf_import_self(void *arg)
|
|||
err = PTR_ERR(import);
|
||||
goto out_dmabuf;
|
||||
}
|
||||
import_obj = to_intel_bo(import);
|
||||
|
||||
if (import != &obj->base) {
|
||||
pr_err("i915_gem_prime_import created a new object!\n");
|
||||
err = -EINVAL;
|
||||
goto out_import;
|
||||
}
|
||||
import_obj = to_intel_bo(import);
|
||||
|
||||
i915_gem_object_lock(import_obj, NULL);
|
||||
err = __i915_gem_object_get_pages(import_obj);
|
||||
|
@ -128,6 +128,8 @@ static int igt_dmabuf_import_same_driver_lmem(void *arg)
|
|||
pr_err("i915_gem_prime_import failed with the wrong err=%ld\n",
|
||||
PTR_ERR(import));
|
||||
err = PTR_ERR(import);
|
||||
} else {
|
||||
err = 0;
|
||||
}
|
||||
|
||||
dma_buf_put(dmabuf);
|
||||
|
@ -176,6 +178,7 @@ static int igt_dmabuf_import_same_driver(struct drm_i915_private *i915,
|
|||
err = PTR_ERR(import);
|
||||
goto out_dmabuf;
|
||||
}
|
||||
import_obj = to_intel_bo(import);
|
||||
|
||||
if (import == &obj->base) {
|
||||
pr_err("i915_gem_prime_import reused gem object!\n");
|
||||
|
@ -183,8 +186,6 @@ static int igt_dmabuf_import_same_driver(struct drm_i915_private *i915,
|
|||
goto out_import;
|
||||
}
|
||||
|
||||
import_obj = to_intel_bo(import);
|
||||
|
||||
i915_gem_object_lock(import_obj, NULL);
|
||||
err = __i915_gem_object_get_pages(import_obj);
|
||||
if (err) {
|
||||
|
|
|
@ -581,6 +581,20 @@ static enum i915_mmap_type default_mapping(struct drm_i915_private *i915)
|
|||
return I915_MMAP_TYPE_GTT;
|
||||
}
|
||||
|
||||
static struct drm_i915_gem_object *
|
||||
create_sys_or_internal(struct drm_i915_private *i915,
|
||||
unsigned long size)
|
||||
{
|
||||
if (HAS_LMEM(i915)) {
|
||||
struct intel_memory_region *sys_region =
|
||||
i915->mm.regions[INTEL_REGION_SMEM];
|
||||
|
||||
return __i915_gem_object_create_user(i915, size, &sys_region, 1);
|
||||
}
|
||||
|
||||
return i915_gem_object_create_internal(i915, size);
|
||||
}
|
||||
|
||||
static bool assert_mmap_offset(struct drm_i915_private *i915,
|
||||
unsigned long size,
|
||||
int expected)
|
||||
|
@ -589,7 +603,7 @@ static bool assert_mmap_offset(struct drm_i915_private *i915,
|
|||
u64 offset;
|
||||
int ret;
|
||||
|
||||
obj = i915_gem_object_create_internal(i915, size);
|
||||
obj = create_sys_or_internal(i915, size);
|
||||
if (IS_ERR(obj))
|
||||
return expected && expected == PTR_ERR(obj);
|
||||
|
||||
|
@ -633,6 +647,7 @@ static int igt_mmap_offset_exhaustion(void *arg)
|
|||
struct drm_mm_node *hole, *next;
|
||||
int loop, err = 0;
|
||||
u64 offset;
|
||||
int enospc = HAS_LMEM(i915) ? -ENXIO : -ENOSPC;
|
||||
|
||||
/* Disable background reaper */
|
||||
disable_retire_worker(i915);
|
||||
|
@ -683,14 +698,14 @@ static int igt_mmap_offset_exhaustion(void *arg)
|
|||
}
|
||||
|
||||
/* Too large */
|
||||
if (!assert_mmap_offset(i915, 2 * PAGE_SIZE, -ENOSPC)) {
|
||||
if (!assert_mmap_offset(i915, 2 * PAGE_SIZE, enospc)) {
|
||||
pr_err("Unexpectedly succeeded in inserting too large object into single page hole\n");
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Fill the hole, further allocation attempts should then fail */
|
||||
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
|
||||
obj = create_sys_or_internal(i915, PAGE_SIZE);
|
||||
if (IS_ERR(obj)) {
|
||||
err = PTR_ERR(obj);
|
||||
pr_err("Unable to create object for reclaimed hole\n");
|
||||
|
@ -703,7 +718,7 @@ static int igt_mmap_offset_exhaustion(void *arg)
|
|||
goto err_obj;
|
||||
}
|
||||
|
||||
if (!assert_mmap_offset(i915, PAGE_SIZE, -ENOSPC)) {
|
||||
if (!assert_mmap_offset(i915, PAGE_SIZE, enospc)) {
|
||||
pr_err("Unexpectedly succeeded in inserting object into no holes!\n");
|
||||
err = -EINVAL;
|
||||
goto err_obj;
|
||||
|
@ -839,10 +854,9 @@ static int wc_check(struct drm_i915_gem_object *obj)
|
|||
|
||||
static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
||||
bool no_map;
|
||||
|
||||
if (HAS_LMEM(i915))
|
||||
if (obj->ops->mmap_offset)
|
||||
return type == I915_MMAP_TYPE_FIXED;
|
||||
else if (type == I915_MMAP_TYPE_FIXED)
|
||||
return false;
|
||||
|
|
|
@ -1973,8 +1973,14 @@ u32 intel_rps_read_actual_frequency(struct intel_rps *rps)
|
|||
u32 intel_rps_read_punit_req(struct intel_rps *rps)
|
||||
{
|
||||
struct intel_uncore *uncore = rps_to_uncore(rps);
|
||||
struct intel_runtime_pm *rpm = rps_to_uncore(rps)->rpm;
|
||||
intel_wakeref_t wakeref;
|
||||
u32 freq = 0;
|
||||
|
||||
return intel_uncore_read(uncore, GEN6_RPNSWREQ);
|
||||
with_intel_runtime_pm_if_in_use(rpm, wakeref)
|
||||
freq = intel_uncore_read(uncore, GEN6_RPNSWREQ);
|
||||
|
||||
return freq;
|
||||
}
|
||||
|
||||
static u32 intel_rps_get_req(u32 pureq)
|
||||
|
|
|
@ -172,11 +172,6 @@ void intel_uc_driver_remove(struct intel_uc *uc)
|
|||
__uc_free_load_err_log(uc);
|
||||
}
|
||||
|
||||
static inline bool guc_communication_enabled(struct intel_guc *guc)
|
||||
{
|
||||
return intel_guc_ct_enabled(&guc->ct);
|
||||
}
|
||||
|
||||
/*
|
||||
* Events triggered while CT buffers are disabled are logged in the SCRATCH_15
|
||||
* register using the same bits used in the CT message payload. Since our
|
||||
|
@ -210,7 +205,7 @@ static void guc_get_mmio_msg(struct intel_guc *guc)
|
|||
static void guc_handle_mmio_msg(struct intel_guc *guc)
|
||||
{
|
||||
/* we need communication to be enabled to reply to GuC */
|
||||
GEM_BUG_ON(!guc_communication_enabled(guc));
|
||||
GEM_BUG_ON(!intel_guc_ct_enabled(&guc->ct));
|
||||
|
||||
spin_lock_irq(&guc->irq_lock);
|
||||
if (guc->mmio_msg) {
|
||||
|
@ -226,7 +221,7 @@ static int guc_enable_communication(struct intel_guc *guc)
|
|||
struct drm_i915_private *i915 = gt->i915;
|
||||
int ret;
|
||||
|
||||
GEM_BUG_ON(guc_communication_enabled(guc));
|
||||
GEM_BUG_ON(intel_guc_ct_enabled(&guc->ct));
|
||||
|
||||
ret = i915_inject_probe_error(i915, -ENXIO);
|
||||
if (ret)
|
||||
|
@ -662,7 +657,7 @@ static int __uc_resume(struct intel_uc *uc, bool enable_communication)
|
|||
return 0;
|
||||
|
||||
/* Make sure we enable communication if and only if it's disabled */
|
||||
GEM_BUG_ON(enable_communication == guc_communication_enabled(guc));
|
||||
GEM_BUG_ON(enable_communication == intel_guc_ct_enabled(&guc->ct));
|
||||
|
||||
if (enable_communication)
|
||||
guc_enable_communication(guc);
|
||||
|
|
Загрузка…
Ссылка в новой задаче