2005-04-17 02:20:36 +04:00
|
|
|
/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
|
|
|
|
*/
|
2006-01-02 12:14:23 +03:00
|
|
|
/*
|
2005-06-23 16:46:46 +04:00
|
|
|
*
|
2005-04-17 02:20:36 +04:00
|
|
|
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
|
|
|
|
* All Rights Reserved.
|
2005-06-23 16:46:46 +04:00
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
* copy of this software and associated documentation files (the
|
|
|
|
* "Software"), to deal in the Software without restriction, including
|
|
|
|
* without limitation the rights to use, copy, modify, merge, publish,
|
|
|
|
* distribute, sub license, and/or sell copies of the Software, and to
|
|
|
|
* permit persons to whom the Software is furnished to do so, subject to
|
|
|
|
* the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice (including the
|
|
|
|
* next paragraph) shall be included in all copies or substantial portions
|
|
|
|
* of the Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
|
|
|
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
|
|
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
|
|
|
|
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
|
|
|
|
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
|
|
|
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
|
|
|
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
|
|
*
|
2006-01-02 12:14:23 +03:00
|
|
|
*/
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2014-06-12 19:35:47 +04:00
|
|
|
#include <linux/acpi.h>
|
2016-06-24 16:00:22 +03:00
|
|
|
#include <linux/device.h>
|
|
|
|
#include <linux/oom.h>
|
2011-08-30 19:04:30 +04:00
|
|
|
#include <linux/module.h>
|
2016-06-24 16:00:22 +03:00
|
|
|
#include <linux/pci.h>
|
|
|
|
#include <linux/pm.h>
|
2014-05-07 20:57:49 +04:00
|
|
|
#include <linux/pm_runtime.h>
|
2016-06-24 16:00:22 +03:00
|
|
|
#include <linux/pnp.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/vgaarb.h>
|
2016-01-11 22:09:20 +03:00
|
|
|
#include <linux/vga_switcheroo.h>
|
2016-06-24 16:00:22 +03:00
|
|
|
#include <linux/vt.h>
|
|
|
|
#include <acpi/video.h>
|
|
|
|
|
|
|
|
#include <drm/drmP.h>
|
2012-10-02 21:01:07 +04:00
|
|
|
#include <drm/drm_crtc_helper.h>
|
2016-12-15 17:29:44 +03:00
|
|
|
#include <drm/drm_atomic_helper.h>
|
2016-06-24 16:00:22 +03:00
|
|
|
#include <drm/i915_drm.h>
|
|
|
|
|
|
|
|
#include "i915_drv.h"
|
|
|
|
#include "i915_trace.h"
|
|
|
|
#include "i915_vgpu.h"
|
|
|
|
#include "intel_drv.h"
|
2017-01-18 19:05:58 +03:00
|
|
|
#include "intel_uc.h"
|
DRM: i915: add mode setting support
This commit adds i915 driver support for the DRM mode setting APIs.
Currently, VGA, LVDS, SDVO DVI & VGA, TV and DVO LVDS outputs are
supported. HDMI, DisplayPort and additional SDVO output support will
follow.
Support for the mode setting code is controlled by the new 'modeset'
module option. A new config option, CONFIG_DRM_I915_KMS controls the
default behavior, and whether a PCI ID list is built into the module for
use by user level module utilities.
Note that if mode setting is enabled, user level drivers that access
display registers directly or that don't use the kernel graphics memory
manager will likely corrupt kernel graphics memory, disrupt output
configuration (possibly leading to hangs and/or blank displays), and
prevent panic/oops messages from appearing. So use caution when
enabling this code; be sure your user level code supports the new
interfaces.
A new SysRq key, 'g', provides emergency support for switching back to
the kernel's framebuffer console; which is useful for testing.
Co-authors: Dave Airlie <airlied@linux.ie>, Hong Liu <hong.liu@intel.com>
Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
Signed-off-by: Eric Anholt <eric@anholt.net>
Signed-off-by: Dave Airlie <airlied@redhat.com>
2008-11-08 01:24:08 +03:00
|
|
|
|
2009-01-05 00:55:33 +03:00
|
|
|
static struct drm_driver driver;
|
|
|
|
|
2016-06-24 16:00:22 +03:00
|
|
|
static unsigned int i915_load_fail_count;
|
|
|
|
|
|
|
|
bool __i915_inject_load_failure(const char *func, int line)
|
|
|
|
{
|
2017-09-19 22:38:44 +03:00
|
|
|
if (i915_load_fail_count >= i915_modparams.inject_load_failure)
|
2016-06-24 16:00:22 +03:00
|
|
|
return false;
|
|
|
|
|
2017-09-19 22:38:44 +03:00
|
|
|
if (++i915_load_fail_count == i915_modparams.inject_load_failure) {
|
2016-06-24 16:00:22 +03:00
|
|
|
DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n",
|
2017-09-19 22:38:44 +03:00
|
|
|
i915_modparams.inject_load_failure, func, line);
|
2016-06-24 16:00:22 +03:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI"
|
|
|
|
#define FDO_BUG_MSG "Please file a bug at " FDO_BUG_URL " against DRM/Intel " \
|
|
|
|
"providing the dmesg log by booting with drm.debug=0xf"
|
|
|
|
|
|
|
|
void
|
|
|
|
__i915_printk(struct drm_i915_private *dev_priv, const char *level,
|
|
|
|
const char *fmt, ...)
|
|
|
|
{
|
|
|
|
static bool shown_bug_once;
|
2016-08-22 13:32:42 +03:00
|
|
|
struct device *kdev = dev_priv->drm.dev;
|
2016-06-24 16:00:22 +03:00
|
|
|
bool is_error = level[1] <= KERN_ERR[1];
|
|
|
|
bool is_debug = level[1] == KERN_DEBUG[1];
|
|
|
|
struct va_format vaf;
|
|
|
|
va_list args;
|
|
|
|
|
|
|
|
if (is_debug && !(drm_debug & DRM_UT_DRIVER))
|
|
|
|
return;
|
|
|
|
|
|
|
|
va_start(args, fmt);
|
|
|
|
|
|
|
|
vaf.fmt = fmt;
|
|
|
|
vaf.va = &args;
|
|
|
|
|
2016-08-22 13:32:42 +03:00
|
|
|
dev_printk(level, kdev, "[" DRM_NAME ":%ps] %pV",
|
2016-06-24 16:00:22 +03:00
|
|
|
__builtin_return_address(0), &vaf);
|
|
|
|
|
|
|
|
if (is_error && !shown_bug_once) {
|
2016-08-22 13:32:42 +03:00
|
|
|
dev_notice(kdev, "%s", FDO_BUG_MSG);
|
2016-06-24 16:00:22 +03:00
|
|
|
shown_bug_once = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
va_end(args);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool i915_error_injected(struct drm_i915_private *dev_priv)
|
|
|
|
{
|
2017-09-19 22:38:44 +03:00
|
|
|
return i915_modparams.inject_load_failure &&
|
|
|
|
i915_load_fail_count == i915_modparams.inject_load_failure;
|
2016-06-24 16:00:22 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
#define i915_load_error(dev_priv, fmt, ...) \
|
|
|
|
__i915_printk(dev_priv, \
|
|
|
|
i915_error_injected(dev_priv) ? KERN_DEBUG : KERN_ERR, \
|
|
|
|
fmt, ##__VA_ARGS__)
|
|
|
|
|
|
|
|
|
2016-10-14 12:13:06 +03:00
|
|
|
static enum intel_pch intel_virt_detect_pch(struct drm_i915_private *dev_priv)
|
2016-06-24 16:00:22 +03:00
|
|
|
{
|
|
|
|
enum intel_pch ret = PCH_NOP;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* In a virtualized passthrough environment we can be in a
|
|
|
|
* setup where the ISA bridge is not able to be passed through.
|
|
|
|
* In this case, a south bridge can be emulated and we have to
|
|
|
|
* make an educated guess as to which PCH is really there.
|
|
|
|
*/
|
|
|
|
|
2016-10-14 12:13:06 +03:00
|
|
|
if (IS_GEN5(dev_priv)) {
|
2016-06-24 16:00:22 +03:00
|
|
|
ret = PCH_IBX;
|
|
|
|
DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n");
|
2016-10-14 12:13:06 +03:00
|
|
|
} else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) {
|
2016-06-24 16:00:22 +03:00
|
|
|
ret = PCH_CPT;
|
2017-06-20 16:03:07 +03:00
|
|
|
DRM_DEBUG_KMS("Assuming CougarPoint PCH\n");
|
2016-10-14 12:13:06 +03:00
|
|
|
} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
|
2016-06-24 16:00:22 +03:00
|
|
|
ret = PCH_LPT;
|
2017-06-15 06:11:45 +03:00
|
|
|
if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
|
|
|
|
dev_priv->pch_id = INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
|
|
|
|
else
|
|
|
|
dev_priv->pch_id = INTEL_PCH_LPT_DEVICE_ID_TYPE;
|
2016-06-24 16:00:22 +03:00
|
|
|
DRM_DEBUG_KMS("Assuming LynxPoint PCH\n");
|
2016-10-14 12:13:06 +03:00
|
|
|
} else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
|
2016-06-24 16:00:22 +03:00
|
|
|
ret = PCH_SPT;
|
|
|
|
DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n");
|
2017-06-08 18:49:59 +03:00
|
|
|
} else if (IS_COFFEELAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) {
|
2017-06-06 23:30:31 +03:00
|
|
|
ret = PCH_CNP;
|
2017-06-08 18:49:59 +03:00
|
|
|
DRM_DEBUG_KMS("Assuming CannonPoint PCH\n");
|
2016-06-24 16:00:22 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-12-01 17:16:40 +03:00
|
|
|
static void intel_detect_pch(struct drm_i915_private *dev_priv)
|
2016-06-24 16:00:22 +03:00
|
|
|
{
|
|
|
|
struct pci_dev *pch = NULL;
|
|
|
|
|
|
|
|
/* In all current cases, num_pipes is equivalent to the PCH_NOP setting
|
|
|
|
* (which really amounts to a PCH but no South Display).
|
|
|
|
*/
|
2016-11-09 14:30:45 +03:00
|
|
|
if (INTEL_INFO(dev_priv)->num_pipes == 0) {
|
2016-06-24 16:00:22 +03:00
|
|
|
dev_priv->pch_type = PCH_NOP;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The reason to probe ISA bridge instead of Dev31:Fun0 is to
|
|
|
|
* make graphics device passthrough work easy for VMM, that only
|
|
|
|
* need to expose ISA bridge to let driver know the real hardware
|
|
|
|
* underneath. This is a requirement from virtualization team.
|
|
|
|
*
|
|
|
|
* In some virtualized environments (e.g. XEN), there is irrelevant
|
|
|
|
* ISA bridge in the system. To work reliably, we should scan trhough
|
|
|
|
* all the ISA bridge devices and check for the first match, instead
|
|
|
|
* of only checking the first one.
|
|
|
|
*/
|
|
|
|
while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
|
|
|
|
if (pch->vendor == PCI_VENDOR_ID_INTEL) {
|
|
|
|
unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
|
2017-06-21 20:49:44 +03:00
|
|
|
|
|
|
|
dev_priv->pch_id = id;
|
2017-06-02 23:06:40 +03:00
|
|
|
|
2016-06-24 16:00:22 +03:00
|
|
|
if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
|
|
|
|
dev_priv->pch_type = PCH_IBX;
|
|
|
|
DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
|
2016-10-13 13:03:10 +03:00
|
|
|
WARN_ON(!IS_GEN5(dev_priv));
|
2016-06-24 16:00:22 +03:00
|
|
|
} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
|
|
|
|
dev_priv->pch_type = PCH_CPT;
|
|
|
|
DRM_DEBUG_KMS("Found CougarPoint PCH\n");
|
2017-06-20 16:03:09 +03:00
|
|
|
WARN_ON(!IS_GEN6(dev_priv) &&
|
|
|
|
!IS_IVYBRIDGE(dev_priv));
|
2016-06-24 16:00:22 +03:00
|
|
|
} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
|
|
|
|
/* PantherPoint is CPT compatible */
|
|
|
|
dev_priv->pch_type = PCH_CPT;
|
|
|
|
DRM_DEBUG_KMS("Found PantherPoint PCH\n");
|
2017-06-20 16:03:09 +03:00
|
|
|
WARN_ON(!IS_GEN6(dev_priv) &&
|
|
|
|
!IS_IVYBRIDGE(dev_priv));
|
2016-06-24 16:00:22 +03:00
|
|
|
} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
|
|
|
|
dev_priv->pch_type = PCH_LPT;
|
|
|
|
DRM_DEBUG_KMS("Found LynxPoint PCH\n");
|
2016-10-13 13:03:00 +03:00
|
|
|
WARN_ON(!IS_HASWELL(dev_priv) &&
|
|
|
|
!IS_BROADWELL(dev_priv));
|
2016-10-13 13:02:58 +03:00
|
|
|
WARN_ON(IS_HSW_ULT(dev_priv) ||
|
|
|
|
IS_BDW_ULT(dev_priv));
|
2016-06-24 16:00:22 +03:00
|
|
|
} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
|
|
|
|
dev_priv->pch_type = PCH_LPT;
|
|
|
|
DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
|
2016-10-13 13:03:00 +03:00
|
|
|
WARN_ON(!IS_HASWELL(dev_priv) &&
|
|
|
|
!IS_BROADWELL(dev_priv));
|
2016-10-13 13:02:58 +03:00
|
|
|
WARN_ON(!IS_HSW_ULT(dev_priv) &&
|
|
|
|
!IS_BDW_ULT(dev_priv));
|
2017-06-21 20:49:44 +03:00
|
|
|
} else if (id == INTEL_PCH_WPT_DEVICE_ID_TYPE) {
|
|
|
|
/* WildcatPoint is LPT compatible */
|
|
|
|
dev_priv->pch_type = PCH_LPT;
|
|
|
|
DRM_DEBUG_KMS("Found WildcatPoint PCH\n");
|
|
|
|
WARN_ON(!IS_HASWELL(dev_priv) &&
|
|
|
|
!IS_BROADWELL(dev_priv));
|
|
|
|
WARN_ON(IS_HSW_ULT(dev_priv) ||
|
|
|
|
IS_BDW_ULT(dev_priv));
|
|
|
|
} else if (id == INTEL_PCH_WPT_LP_DEVICE_ID_TYPE) {
|
|
|
|
/* WildcatPoint is LPT compatible */
|
|
|
|
dev_priv->pch_type = PCH_LPT;
|
|
|
|
DRM_DEBUG_KMS("Found WildcatPoint LP PCH\n");
|
|
|
|
WARN_ON(!IS_HASWELL(dev_priv) &&
|
|
|
|
!IS_BROADWELL(dev_priv));
|
|
|
|
WARN_ON(!IS_HSW_ULT(dev_priv) &&
|
|
|
|
!IS_BDW_ULT(dev_priv));
|
2016-06-24 16:00:22 +03:00
|
|
|
} else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
|
|
|
|
dev_priv->pch_type = PCH_SPT;
|
|
|
|
DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
|
2016-10-13 13:03:02 +03:00
|
|
|
WARN_ON(!IS_SKYLAKE(dev_priv) &&
|
|
|
|
!IS_KABYLAKE(dev_priv));
|
2017-06-21 20:49:44 +03:00
|
|
|
} else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
|
2016-06-24 16:00:22 +03:00
|
|
|
dev_priv->pch_type = PCH_SPT;
|
|
|
|
DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
|
2016-10-13 13:03:02 +03:00
|
|
|
WARN_ON(!IS_SKYLAKE(dev_priv) &&
|
|
|
|
!IS_KABYLAKE(dev_priv));
|
2016-07-02 03:07:12 +03:00
|
|
|
} else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) {
|
|
|
|
dev_priv->pch_type = PCH_KBP;
|
2017-07-31 21:52:20 +03:00
|
|
|
DRM_DEBUG_KMS("Found Kaby Lake PCH (KBP)\n");
|
2017-02-01 16:46:09 +03:00
|
|
|
WARN_ON(!IS_SKYLAKE(dev_priv) &&
|
2017-08-22 02:50:56 +03:00
|
|
|
!IS_KABYLAKE(dev_priv) &&
|
|
|
|
!IS_COFFEELAKE(dev_priv));
|
2017-06-02 23:06:39 +03:00
|
|
|
} else if (id == INTEL_PCH_CNP_DEVICE_ID_TYPE) {
|
|
|
|
dev_priv->pch_type = PCH_CNP;
|
2017-07-31 21:52:20 +03:00
|
|
|
DRM_DEBUG_KMS("Found Cannon Lake PCH (CNP)\n");
|
2017-06-08 18:49:59 +03:00
|
|
|
WARN_ON(!IS_CANNONLAKE(dev_priv) &&
|
|
|
|
!IS_COFFEELAKE(dev_priv));
|
2017-06-21 20:49:44 +03:00
|
|
|
} else if (id == INTEL_PCH_CNP_LP_DEVICE_ID_TYPE) {
|
2017-06-02 23:06:40 +03:00
|
|
|
dev_priv->pch_type = PCH_CNP;
|
2017-07-31 21:52:20 +03:00
|
|
|
DRM_DEBUG_KMS("Found Cannon Lake LP PCH (CNP-LP)\n");
|
2017-06-08 18:49:59 +03:00
|
|
|
WARN_ON(!IS_CANNONLAKE(dev_priv) &&
|
|
|
|
!IS_COFFEELAKE(dev_priv));
|
2017-06-20 16:03:09 +03:00
|
|
|
} else if (id == INTEL_PCH_P2X_DEVICE_ID_TYPE ||
|
|
|
|
id == INTEL_PCH_P3X_DEVICE_ID_TYPE ||
|
|
|
|
(id == INTEL_PCH_QEMU_DEVICE_ID_TYPE &&
|
2016-06-24 16:00:22 +03:00
|
|
|
pch->subsystem_vendor ==
|
|
|
|
PCI_SUBVENDOR_ID_REDHAT_QUMRANET &&
|
|
|
|
pch->subsystem_device ==
|
|
|
|
PCI_SUBDEVICE_ID_QEMU)) {
|
2016-10-14 12:13:06 +03:00
|
|
|
dev_priv->pch_type =
|
|
|
|
intel_virt_detect_pch(dev_priv);
|
2016-06-24 16:00:22 +03:00
|
|
|
} else
|
|
|
|
continue;
|
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!pch)
|
|
|
|
DRM_DEBUG_KMS("No PCH found.\n");
|
|
|
|
|
|
|
|
pci_dev_put(pch);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int i915_getparam(struct drm_device *dev, void *data,
|
|
|
|
struct drm_file *file_priv)
|
|
|
|
{
|
2016-07-04 13:34:36 +03:00
|
|
|
struct drm_i915_private *dev_priv = to_i915(dev);
|
2016-08-22 13:32:44 +03:00
|
|
|
struct pci_dev *pdev = dev_priv->drm.pdev;
|
2016-06-24 16:00:22 +03:00
|
|
|
drm_i915_getparam_t *param = data;
|
|
|
|
int value;
|
|
|
|
|
|
|
|
switch (param->param) {
|
|
|
|
case I915_PARAM_IRQ_ACTIVE:
|
|
|
|
case I915_PARAM_ALLOW_BATCHBUFFER:
|
|
|
|
case I915_PARAM_LAST_DISPATCH:
|
drm/i915: Drop support for I915_EXEC_CONSTANTS_* execbuf parameters.
This patch makes the I915_PARAM_HAS_EXEC_CONSTANTS getparam return 0
(indicating the optional feature is not supported), and makes execbuf
always return -EINVAL if the flags are used.
Apparently, no userspace ever shipped which used this optional feature:
I checked the git history of Mesa, xf86-video-intel, libva, and Beignet,
and there were zero commits showing a use of these flags. Kernel commit
72bfa19c8deb4 apparently introduced the feature prematurely. According
to Chris, the intention was to use this in cairo-drm, but "the use was
broken for gen6", so I don't think it ever happened.
'relative_constants_mode' has always been tracked per-device, but this
has actually been wrong ever since hardware contexts were introduced, as
the INSTPM register is saved (and automatically restored) as part of the
render ring context. The software per-device value could therefore get
out of sync with the hardware per-context value. This meant that using
them is actually unsafe: a client which tried to use them could damage
the state of other clients, causing the GPU to interpret their BO
offsets as absolute pointers, leading to bogus memory reads.
These flags were also never ported to execlist mode, making them no-ops
on Gen9+ (which requires execlists), and Gen8 in the default mode.
On Gen8+, userspace can write these registers directly, achieving the
same effect. On Gen6-7.5, it likely makes sense to extend the command
parser to support them. I don't think anyone wants this on Gen4-5.
Based on a patch by Dave Gordon.
v3: Return -ENODEV for the getparam, as this is what we do for other
obsolete features. Suggested by Chris Wilson.
Cc: stable@vger.kernel.org
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=92448
Signed-off-by: Kenneth Graunke <kenneth@whitecape.org>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: http://patchwork.freedesktop.org/patch/msgid/20170215093446.21291-1-kenneth@whitecape.org
Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
2017-02-15 12:34:46 +03:00
|
|
|
case I915_PARAM_HAS_EXEC_CONSTANTS:
|
2016-06-24 16:00:22 +03:00
|
|
|
/* Reject all old ums/dri params. */
|
|
|
|
return -ENODEV;
|
|
|
|
case I915_PARAM_CHIPSET_ID:
|
2016-08-22 13:32:44 +03:00
|
|
|
value = pdev->device;
|
2016-06-24 16:00:22 +03:00
|
|
|
break;
|
|
|
|
case I915_PARAM_REVISION:
|
2016-08-22 13:32:44 +03:00
|
|
|
value = pdev->revision;
|
2016-06-24 16:00:22 +03:00
|
|
|
break;
|
|
|
|
case I915_PARAM_NUM_FENCES_AVAIL:
|
|
|
|
value = dev_priv->num_fence_regs;
|
|
|
|
break;
|
|
|
|
case I915_PARAM_HAS_OVERLAY:
|
|
|
|
value = dev_priv->overlay ? 1 : 0;
|
|
|
|
break;
|
|
|
|
case I915_PARAM_HAS_BSD:
|
drm/i915: Allocate intel_engine_cs structure only for the enabled engines
With the possibility of addition of many more number of rings in future,
the drm_i915_private structure could bloat as an array, of type
intel_engine_cs, is embedded inside it.
struct intel_engine_cs engine[I915_NUM_ENGINES];
Though this is still fine as generally there is only a single instance of
drm_i915_private structure used, but not all of the possible rings would be
enabled or active on most of the platforms. Some memory can be saved by
allocating intel_engine_cs structure only for the enabled/active engines.
Currently the engine/ring ID is kept static and dev_priv->engine[] is simply
indexed using the enums defined in intel_engine_id.
To save memory and continue using the static engine/ring IDs, 'engine' is
defined as an array of pointers.
struct intel_engine_cs *engine[I915_NUM_ENGINES];
dev_priv->engine[engine_ID] will be NULL for disabled engine instances.
There is a text size reduction of 928 bytes, from 1028200 to 1027272, for
i915.o file (but for i915.ko file text size remain same as 1193131 bytes).
v2:
- Remove the engine iterator field added in drm_i915_private structure,
instead pass a local iterator variable to the for_each_engine**
macros. (Chris)
- Do away with intel_engine_initialized() and instead directly use the
NULL pointer check on engine pointer. (Chris)
v3:
- Remove for_each_engine_id() macro, as the updated macro for_each_engine()
can be used in place of it. (Chris)
- Protect the access to Render engine Fault register with a NULL check, as
engine specific init is done later in Driver load sequence.
v4:
- Use !!dev_priv->engine[VCS] style for the engine check in getparam. (Chris)
- Kill the superfluous init_engine_lists().
v5:
- Cleanup the intel_engines_init() & intel_engines_setup(), with respect to
allocation of intel_engine_cs structure. (Chris)
v6:
- Rebase.
v7:
- Optimize the for_each_engine_masked() macro. (Chris)
- Change the type of 'iter' local variable to enum intel_engine_id. (Chris)
- Rebase.
v8: Rebase.
v9: Rebase.
v10:
- For index calculation use engine ID instead of pointer based arithmetic in
intel_engine_sync_index() as engine pointers are not contiguous now (Chris)
- For appropriateness, rename local enum variable 'iter' to 'id'. (Joonas)
- Use for_each_engine macro for cleanup in intel_engines_init() and remove
check for NULL engine pointer in cleanup() routines. (Joonas)
v11: Rebase.
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Akash Goel <akash.goel@intel.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1476378888-7372-1-git-send-email-akash.goel@intel.com
2016-10-13 20:14:48 +03:00
|
|
|
value = !!dev_priv->engine[VCS];
|
2016-06-24 16:00:22 +03:00
|
|
|
break;
|
|
|
|
case I915_PARAM_HAS_BLT:
|
drm/i915: Allocate intel_engine_cs structure only for the enabled engines
With the possibility of addition of many more number of rings in future,
the drm_i915_private structure could bloat as an array, of type
intel_engine_cs, is embedded inside it.
struct intel_engine_cs engine[I915_NUM_ENGINES];
Though this is still fine as generally there is only a single instance of
drm_i915_private structure used, but not all of the possible rings would be
enabled or active on most of the platforms. Some memory can be saved by
allocating intel_engine_cs structure only for the enabled/active engines.
Currently the engine/ring ID is kept static and dev_priv->engine[] is simply
indexed using the enums defined in intel_engine_id.
To save memory and continue using the static engine/ring IDs, 'engine' is
defined as an array of pointers.
struct intel_engine_cs *engine[I915_NUM_ENGINES];
dev_priv->engine[engine_ID] will be NULL for disabled engine instances.
There is a text size reduction of 928 bytes, from 1028200 to 1027272, for
i915.o file (but for i915.ko file text size remain same as 1193131 bytes).
v2:
- Remove the engine iterator field added in drm_i915_private structure,
instead pass a local iterator variable to the for_each_engine**
macros. (Chris)
- Do away with intel_engine_initialized() and instead directly use the
NULL pointer check on engine pointer. (Chris)
v3:
- Remove for_each_engine_id() macro, as the updated macro for_each_engine()
can be used in place of it. (Chris)
- Protect the access to Render engine Fault register with a NULL check, as
engine specific init is done later in Driver load sequence.
v4:
- Use !!dev_priv->engine[VCS] style for the engine check in getparam. (Chris)
- Kill the superfluous init_engine_lists().
v5:
- Cleanup the intel_engines_init() & intel_engines_setup(), with respect to
allocation of intel_engine_cs structure. (Chris)
v6:
- Rebase.
v7:
- Optimize the for_each_engine_masked() macro. (Chris)
- Change the type of 'iter' local variable to enum intel_engine_id. (Chris)
- Rebase.
v8: Rebase.
v9: Rebase.
v10:
- For index calculation use engine ID instead of pointer based arithmetic in
intel_engine_sync_index() as engine pointers are not contiguous now (Chris)
- For appropriateness, rename local enum variable 'iter' to 'id'. (Joonas)
- Use for_each_engine macro for cleanup in intel_engines_init() and remove
check for NULL engine pointer in cleanup() routines. (Joonas)
v11: Rebase.
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Akash Goel <akash.goel@intel.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1476378888-7372-1-git-send-email-akash.goel@intel.com
2016-10-13 20:14:48 +03:00
|
|
|
value = !!dev_priv->engine[BCS];
|
2016-06-24 16:00:22 +03:00
|
|
|
break;
|
|
|
|
case I915_PARAM_HAS_VEBOX:
|
drm/i915: Allocate intel_engine_cs structure only for the enabled engines
With the possibility of addition of many more number of rings in future,
the drm_i915_private structure could bloat as an array, of type
intel_engine_cs, is embedded inside it.
struct intel_engine_cs engine[I915_NUM_ENGINES];
Though this is still fine as generally there is only a single instance of
drm_i915_private structure used, but not all of the possible rings would be
enabled or active on most of the platforms. Some memory can be saved by
allocating intel_engine_cs structure only for the enabled/active engines.
Currently the engine/ring ID is kept static and dev_priv->engine[] is simply
indexed using the enums defined in intel_engine_id.
To save memory and continue using the static engine/ring IDs, 'engine' is
defined as an array of pointers.
struct intel_engine_cs *engine[I915_NUM_ENGINES];
dev_priv->engine[engine_ID] will be NULL for disabled engine instances.
There is a text size reduction of 928 bytes, from 1028200 to 1027272, for
i915.o file (but for i915.ko file text size remain same as 1193131 bytes).
v2:
- Remove the engine iterator field added in drm_i915_private structure,
instead pass a local iterator variable to the for_each_engine**
macros. (Chris)
- Do away with intel_engine_initialized() and instead directly use the
NULL pointer check on engine pointer. (Chris)
v3:
- Remove for_each_engine_id() macro, as the updated macro for_each_engine()
can be used in place of it. (Chris)
- Protect the access to Render engine Fault register with a NULL check, as
engine specific init is done later in Driver load sequence.
v4:
- Use !!dev_priv->engine[VCS] style for the engine check in getparam. (Chris)
- Kill the superfluous init_engine_lists().
v5:
- Cleanup the intel_engines_init() & intel_engines_setup(), with respect to
allocation of intel_engine_cs structure. (Chris)
v6:
- Rebase.
v7:
- Optimize the for_each_engine_masked() macro. (Chris)
- Change the type of 'iter' local variable to enum intel_engine_id. (Chris)
- Rebase.
v8: Rebase.
v9: Rebase.
v10:
- For index calculation use engine ID instead of pointer based arithmetic in
intel_engine_sync_index() as engine pointers are not contiguous now (Chris)
- For appropriateness, rename local enum variable 'iter' to 'id'. (Joonas)
- Use for_each_engine macro for cleanup in intel_engines_init() and remove
check for NULL engine pointer in cleanup() routines. (Joonas)
v11: Rebase.
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Akash Goel <akash.goel@intel.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1476378888-7372-1-git-send-email-akash.goel@intel.com
2016-10-13 20:14:48 +03:00
|
|
|
value = !!dev_priv->engine[VECS];
|
2016-06-24 16:00:22 +03:00
|
|
|
break;
|
|
|
|
case I915_PARAM_HAS_BSD2:
|
drm/i915: Allocate intel_engine_cs structure only for the enabled engines
With the possibility of addition of many more number of rings in future,
the drm_i915_private structure could bloat as an array, of type
intel_engine_cs, is embedded inside it.
struct intel_engine_cs engine[I915_NUM_ENGINES];
Though this is still fine as generally there is only a single instance of
drm_i915_private structure used, but not all of the possible rings would be
enabled or active on most of the platforms. Some memory can be saved by
allocating intel_engine_cs structure only for the enabled/active engines.
Currently the engine/ring ID is kept static and dev_priv->engine[] is simply
indexed using the enums defined in intel_engine_id.
To save memory and continue using the static engine/ring IDs, 'engine' is
defined as an array of pointers.
struct intel_engine_cs *engine[I915_NUM_ENGINES];
dev_priv->engine[engine_ID] will be NULL for disabled engine instances.
There is a text size reduction of 928 bytes, from 1028200 to 1027272, for
i915.o file (but for i915.ko file text size remain same as 1193131 bytes).
v2:
- Remove the engine iterator field added in drm_i915_private structure,
instead pass a local iterator variable to the for_each_engine**
macros. (Chris)
- Do away with intel_engine_initialized() and instead directly use the
NULL pointer check on engine pointer. (Chris)
v3:
- Remove for_each_engine_id() macro, as the updated macro for_each_engine()
can be used in place of it. (Chris)
- Protect the access to Render engine Fault register with a NULL check, as
engine specific init is done later in Driver load sequence.
v4:
- Use !!dev_priv->engine[VCS] style for the engine check in getparam. (Chris)
- Kill the superfluous init_engine_lists().
v5:
- Cleanup the intel_engines_init() & intel_engines_setup(), with respect to
allocation of intel_engine_cs structure. (Chris)
v6:
- Rebase.
v7:
- Optimize the for_each_engine_masked() macro. (Chris)
- Change the type of 'iter' local variable to enum intel_engine_id. (Chris)
- Rebase.
v8: Rebase.
v9: Rebase.
v10:
- For index calculation use engine ID instead of pointer based arithmetic in
intel_engine_sync_index() as engine pointers are not contiguous now (Chris)
- For appropriateness, rename local enum variable 'iter' to 'id'. (Joonas)
- Use for_each_engine macro for cleanup in intel_engines_init() and remove
check for NULL engine pointer in cleanup() routines. (Joonas)
v11: Rebase.
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Akash Goel <akash.goel@intel.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1476378888-7372-1-git-send-email-akash.goel@intel.com
2016-10-13 20:14:48 +03:00
|
|
|
value = !!dev_priv->engine[VCS2];
|
2016-06-24 16:00:22 +03:00
|
|
|
break;
|
|
|
|
case I915_PARAM_HAS_LLC:
|
2016-09-02 13:46:17 +03:00
|
|
|
value = HAS_LLC(dev_priv);
|
2016-06-24 16:00:22 +03:00
|
|
|
break;
|
|
|
|
case I915_PARAM_HAS_WT:
|
2016-09-02 13:46:17 +03:00
|
|
|
value = HAS_WT(dev_priv);
|
2016-06-24 16:00:22 +03:00
|
|
|
break;
|
|
|
|
case I915_PARAM_HAS_ALIASING_PPGTT:
|
2016-09-02 13:46:17 +03:00
|
|
|
value = USES_PPGTT(dev_priv);
|
2016-06-24 16:00:22 +03:00
|
|
|
break;
|
|
|
|
case I915_PARAM_HAS_SEMAPHORES:
|
2017-09-19 22:38:44 +03:00
|
|
|
value = i915_modparams.semaphores;
|
2016-06-24 16:00:22 +03:00
|
|
|
break;
|
|
|
|
case I915_PARAM_HAS_SECURE_BATCHES:
|
|
|
|
value = capable(CAP_SYS_ADMIN);
|
|
|
|
break;
|
|
|
|
case I915_PARAM_CMD_PARSER_VERSION:
|
|
|
|
value = i915_cmd_parser_get_version(dev_priv);
|
|
|
|
break;
|
|
|
|
case I915_PARAM_SUBSLICE_TOTAL:
|
2016-08-31 19:13:05 +03:00
|
|
|
value = sseu_subslice_total(&INTEL_INFO(dev_priv)->sseu);
|
2016-06-24 16:00:22 +03:00
|
|
|
if (!value)
|
|
|
|
return -ENODEV;
|
|
|
|
break;
|
|
|
|
case I915_PARAM_EU_TOTAL:
|
2016-08-31 19:13:02 +03:00
|
|
|
value = INTEL_INFO(dev_priv)->sseu.eu_total;
|
2016-06-24 16:00:22 +03:00
|
|
|
if (!value)
|
|
|
|
return -ENODEV;
|
|
|
|
break;
|
|
|
|
case I915_PARAM_HAS_GPU_RESET:
|
2017-09-19 22:38:44 +03:00
|
|
|
value = i915_modparams.enable_hangcheck &&
|
|
|
|
intel_has_gpu_reset(dev_priv);
|
drm/i915: Modify error handler for per engine hang recovery
This is a preparatory patch which modifies error handler to do per engine
hang recovery. The actual patch which implements this sequence follows
later in the series. The aim is to prepare existing recovery function to
adapt to this new function where applicable (which fails at this point
because core implementation is lacking) and continue recovery using legacy
full gpu reset.
A helper function is also added to query the availability of engine
reset. A subsequent patch will add the capability to query which type
of reset is present (engine -> full -> no-reset) via the get-param
ioctl.
It has been decided that the error events that are used to notify user of
reset will only be sent in case if full chip reset. In case of just
single (or multiple) engine resets, userspace won't be notified by these
events.
Note that this implementation of engine reset is for i915 directly
submitting to the ELSP, where the driver manages the hang detection,
recovery and resubmission. With GuC submission these tasks are shared
between driver and firmware; i915 will still responsible for detecting a
hang, and when it does it will have to request GuC to reset that Engine and
remind the firmware about the outstanding submissions. This will be
added in different patch.
v2: rebase, advertise engine reset availability in platform definition,
add note about GuC submission.
v3: s/*engine_reset*/*reset_engine*/. (Chris)
Handle reset as 2 level resets, by first going to engine only and fall
backing to full/chip reset as needed, i.e. reset_engine will need the
struct_mutex.
v4: Pass the engine mask to i915_reset. (Chris)
v5: Rebase, update selftests.
v6: Rebase, prepare for mutex-less reset engine.
v7: Pass reset_engine mask as a function parameter, and iterate over the
engine mask for reset_engine. (Chris)
v8: Use i915.reset >=2 in has_reset_engine; remove redundant reset
logging; add a reset-engine-in-progress flag to prevent concurrent
resets, and avoid dual purposing of reset-backoff. (Chris)
v9: Support reset of different engines in parallel (Chris)
v10: Handle reset-engine flag locking better (Chris)
v11: Squash in reporting of per-engine-reset availability.
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Signed-off-by: Ian Lister <ian.lister@intel.com>
Signed-off-by: Tomas Elf <tomas.elf@intel.com>
Signed-off-by: Arun Siluvery <arun.siluvery@linux.intel.com>
Signed-off-by: Michel Thierry <michel.thierry@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20170615201828.23144-4-michel.thierry@intel.com
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: http://patchwork.freedesktop.org/patch/msgid/20170620095751.13127-5-chris@chris-wilson.co.uk
2017-06-20 12:57:46 +03:00
|
|
|
if (value && intel_has_reset_engine(dev_priv))
|
|
|
|
value = 2;
|
2016-06-24 16:00:22 +03:00
|
|
|
break;
|
|
|
|
case I915_PARAM_HAS_RESOURCE_STREAMER:
|
2016-09-02 13:46:17 +03:00
|
|
|
value = HAS_RESOURCE_STREAMER(dev_priv);
|
2016-06-24 16:00:22 +03:00
|
|
|
break;
|
2016-07-01 13:43:02 +03:00
|
|
|
case I915_PARAM_HAS_POOLED_EU:
|
2016-09-02 13:46:17 +03:00
|
|
|
value = HAS_POOLED_EU(dev_priv);
|
2016-07-01 13:43:02 +03:00
|
|
|
break;
|
|
|
|
case I915_PARAM_MIN_EU_IN_POOL:
|
2016-08-31 19:13:02 +03:00
|
|
|
value = INTEL_INFO(dev_priv)->sseu.min_eu_in_pool;
|
2016-07-01 13:43:02 +03:00
|
|
|
break;
|
2017-01-18 19:05:58 +03:00
|
|
|
case I915_PARAM_HUC_STATUS:
|
2017-02-03 11:28:33 +03:00
|
|
|
intel_runtime_pm_get(dev_priv);
|
2017-01-18 19:05:58 +03:00
|
|
|
value = I915_READ(HUC_STATUS2) & HUC_FW_VERIFIED;
|
2017-02-03 11:28:33 +03:00
|
|
|
intel_runtime_pm_put(dev_priv);
|
2017-01-18 19:05:58 +03:00
|
|
|
break;
|
2016-08-25 21:05:19 +03:00
|
|
|
case I915_PARAM_MMAP_GTT_VERSION:
|
|
|
|
/* Though we've started our numbering from 1, and so class all
|
|
|
|
* earlier versions as 0, in effect their value is undefined as
|
|
|
|
* the ioctl will report EINVAL for the unknown param!
|
|
|
|
*/
|
|
|
|
value = i915_gem_mmap_gtt_version();
|
|
|
|
break;
|
2016-11-14 23:41:01 +03:00
|
|
|
case I915_PARAM_HAS_SCHEDULER:
|
2017-10-03 23:34:51 +03:00
|
|
|
value = 0;
|
drm/i915/execlists: Preemption!
When we write to ELSP, it triggers a context preemption at the earliest
arbitration point (3DPRIMITIVE, some PIPECONTROLs, a few other
operations and the explicit MI_ARB_CHECK). If this is to the same
context, it triggers a LITE_RESTORE where the RING_TAIL is merely
updated (used currently to chain requests from the same context
together, avoiding bubbles). However, if it is to a different context, a
full context-switch is performed and it will start to execute the new
context saving the image of the old for later execution.
Previously we avoided preemption by only submitting a new context when
the old was idle. But now we wish embrace it, and if the new request has
a higher priority than the currently executing request, we write to the
ELSP regardless, thus triggering preemption, but we tell the GPU to
switch to our special preemption context (not the target). In the
context-switch interrupt handler, we know that the previous contexts
have finished execution and so can unwind all the incomplete requests
and compute the new highest priority request to execute.
It would be feasible to avoid the switch-to-idle intermediate by
programming the ELSP with the target context. The difficulty is in
tracking which request that should be whilst maintaining the dependency
change, the error comes in with coalesced requests. As we only track the
most recent request and its priority, we may run into the issue of being
tricked in preempting a high priority request that was followed by a
low priority request from the same context (e.g. for PI); worse still
that earlier request may be our own dependency and the order then broken
by preemption. By injecting the switch-to-idle and then recomputing the
priority queue, we avoid the issue with tracking in-flight coalesced
requests. Having tried the preempt-to-busy approach, and failed to find
a way around the coalesced priority issue, Michal's original proposal to
inject an idle context (based on handling GuC preemption) succeeds.
The current heuristic for deciding when to preempt are only if the new
request is of higher priority, and has the privileged priority of
greater than 0. Note that the scheduler remains unfair!
v2: Disable for gen8 (bdw/bsw) as we need additional w/a for GPGPU.
Since, the feature is now conditional and not always available when we
have a scheduler, make it known via the HAS_SCHEDULER GETPARAM (now a
capability mask).
v3: Stylistic tweaks.
v4: Appease Joonas with a snippet of kerneldoc, only to fuel to fire of
the preempt vs preempting debate.
Suggested-by: Michal Winiarski <michal.winiarski@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Michal Winiarski <michal.winiarski@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Arkadiusz Hiler <arkadiusz.hiler@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@intel.com>
Cc: Ben Widawsky <benjamin.widawsky@intel.com>
Cc: Zhenyu Wang <zhenyuw@linux.intel.com>
Cc: Zhi Wang <zhi.a.wang@intel.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20171003203453.15692-8-chris@chris-wilson.co.uk
2017-10-03 23:34:52 +03:00
|
|
|
if (dev_priv->engine[RCS] && dev_priv->engine[RCS]->schedule) {
|
2017-10-03 23:34:51 +03:00
|
|
|
value |= I915_SCHEDULER_CAP_ENABLED;
|
drm/i915/scheduler: Support user-defined priorities
Use a priority stored in the context as the initial value when
submitting a request. This allows us to change the default priority on a
per-context basis, allowing different contexts to be favoured with GPU
time at the expense of lower importance work. The user can adjust the
context's priority via I915_CONTEXT_PARAM_PRIORITY, with more positive
values being higher priority (they will be serviced earlier, after their
dependencies have been resolved). Any prerequisite work for an execbuf
will have its priority raised to match the new request as required.
Normal users can specify any value in the range of -1023 to 0 [default],
i.e. they can reduce the priority of their workloads (and temporarily
boost it back to normal if so desired).
Privileged users can specify any value in the range of -1023 to 1023,
[default is 0], i.e. they can raise their priority above all overs and
so potentially starve the system.
Note that the existing schedulers are not fair, nor load balancing, the
execution is strictly by priority on a first-come, first-served basis,
and the driver may choose to boost some requests above the range
available to users.
This priority was originally based around nice(2), but evolved to allow
clients to adjust their priority within a small range, and allow for a
privileged high priority range.
For example, this can be used to implement EGL_IMG_context_priority
https://www.khronos.org/registry/egl/extensions/IMG/EGL_IMG_context_priority.txt
EGL_CONTEXT_PRIORITY_LEVEL_IMG determines the priority level of
the context to be created. This attribute is a hint, as an
implementation may not support multiple contexts at some
priority levels and system policy may limit access to high
priority contexts to appropriate system privilege level. The
default value for EGL_CONTEXT_PRIORITY_LEVEL_IMG is
EGL_CONTEXT_PRIORITY_MEDIUM_IMG."
so we can map
PRIORITY_HIGH -> 1023 [privileged, will failback to 0]
PRIORITY_MED -> 0 [default]
PRIORITY_LOW -> -1023
They also map onto the priorities used by VkQueue (and a VkQueue is
essentially a timeline, our i915_gem_context under full-ppgtt).
v2: s/CAP_SYS_ADMIN/CAP_SYS_NICE/
v3: Report min/max user priorities as defines in the uapi, and rebase
internal priorities on the exposed values.
Testcase: igt/gem_exec_schedule
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20171003203453.15692-9-chris@chris-wilson.co.uk
2017-10-03 23:34:53 +03:00
|
|
|
value |= I915_SCHEDULER_CAP_PRIORITY;
|
drm/i915/execlists: Preemption!
When we write to ELSP, it triggers a context preemption at the earliest
arbitration point (3DPRIMITIVE, some PIPECONTROLs, a few other
operations and the explicit MI_ARB_CHECK). If this is to the same
context, it triggers a LITE_RESTORE where the RING_TAIL is merely
updated (used currently to chain requests from the same context
together, avoiding bubbles). However, if it is to a different context, a
full context-switch is performed and it will start to execute the new
context saving the image of the old for later execution.
Previously we avoided preemption by only submitting a new context when
the old was idle. But now we wish embrace it, and if the new request has
a higher priority than the currently executing request, we write to the
ELSP regardless, thus triggering preemption, but we tell the GPU to
switch to our special preemption context (not the target). In the
context-switch interrupt handler, we know that the previous contexts
have finished execution and so can unwind all the incomplete requests
and compute the new highest priority request to execute.
It would be feasible to avoid the switch-to-idle intermediate by
programming the ELSP with the target context. The difficulty is in
tracking which request that should be whilst maintaining the dependency
change, the error comes in with coalesced requests. As we only track the
most recent request and its priority, we may run into the issue of being
tricked in preempting a high priority request that was followed by a
low priority request from the same context (e.g. for PI); worse still
that earlier request may be our own dependency and the order then broken
by preemption. By injecting the switch-to-idle and then recomputing the
priority queue, we avoid the issue with tracking in-flight coalesced
requests. Having tried the preempt-to-busy approach, and failed to find
a way around the coalesced priority issue, Michal's original proposal to
inject an idle context (based on handling GuC preemption) succeeds.
The current heuristic for deciding when to preempt are only if the new
request is of higher priority, and has the privileged priority of
greater than 0. Note that the scheduler remains unfair!
v2: Disable for gen8 (bdw/bsw) as we need additional w/a for GPGPU.
Since, the feature is now conditional and not always available when we
have a scheduler, make it known via the HAS_SCHEDULER GETPARAM (now a
capability mask).
v3: Stylistic tweaks.
v4: Appease Joonas with a snippet of kerneldoc, only to fuel to fire of
the preempt vs preempting debate.
Suggested-by: Michal Winiarski <michal.winiarski@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Michal Winiarski <michal.winiarski@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Arkadiusz Hiler <arkadiusz.hiler@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@intel.com>
Cc: Ben Widawsky <benjamin.widawsky@intel.com>
Cc: Zhenyu Wang <zhenyuw@linux.intel.com>
Cc: Zhi Wang <zhi.a.wang@intel.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20171003203453.15692-8-chris@chris-wilson.co.uk
2017-10-03 23:34:52 +03:00
|
|
|
|
2017-10-25 23:00:18 +03:00
|
|
|
if (HAS_LOGICAL_RING_PREEMPTION(dev_priv) &&
|
drm/i915/guc: Preemption! With GuC
Pretty similar to what we have on execlists.
We're reusing most of the GEM code, however, due to GuC quirks we need a
couple of extra bits.
Preemption is implemented as GuC action, and actions can be pretty slow.
Because of that, we're using a mutex to serialize them. Since we're
requesting preemption from the tasklet, the task of creating a workitem
and wrapping it in GuC action is delegated to a worker.
To distinguish that preemption has finished, we're using additional
piece of HWSP, and since we're not getting context switch interrupts,
we're also adding a user interrupt.
The fact that our special preempt context has completed unfortunately
doesn't mean that we're ready to submit new work. We also need to wait
for GuC to finish its own processing.
v2: Don't compile out the wait for GuC, handle workqueue flush on reset,
no need for ordered workqueue, put on a reviewer hat when looking at my own
patches (Chris)
Move struct work around in intel_guc, move user interruput outside of
conditional (Michał)
Keep ring around rather than chase though intel_context
v3: Extract WA for flushing ggtt writes to a helper (Chris)
Keep work_struct in intel_guc rather than engine (Michał)
Use ordered workqueue for inject_preempt worker to avoid GuC quirks.
v4: Drop now unused INTEL_GUC_PREEMPT_OPTION_IMMEDIATE (Daniele)
Drop stray newlines, use container_of for intel_guc in worker,
check for presence of workqueue when flushing it, rather than
enable_guc_submission modparam, reorder preempt postprocessing (Chris)
v5: Make wq NULL after destroying it
v6: Swap struct guc_preempt_work members (Michał)
Signed-off-by: Michał Winiarski <michal.winiarski@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Jeff McGee <jeff.mcgee@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Oscar Mateo <oscar.mateo@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20171026133558.19580-1-michal.winiarski@intel.com
2017-10-26 16:35:58 +03:00
|
|
|
i915_modparams.enable_execlists)
|
drm/i915/execlists: Preemption!
When we write to ELSP, it triggers a context preemption at the earliest
arbitration point (3DPRIMITIVE, some PIPECONTROLs, a few other
operations and the explicit MI_ARB_CHECK). If this is to the same
context, it triggers a LITE_RESTORE where the RING_TAIL is merely
updated (used currently to chain requests from the same context
together, avoiding bubbles). However, if it is to a different context, a
full context-switch is performed and it will start to execute the new
context saving the image of the old for later execution.
Previously we avoided preemption by only submitting a new context when
the old was idle. But now we wish embrace it, and if the new request has
a higher priority than the currently executing request, we write to the
ELSP regardless, thus triggering preemption, but we tell the GPU to
switch to our special preemption context (not the target). In the
context-switch interrupt handler, we know that the previous contexts
have finished execution and so can unwind all the incomplete requests
and compute the new highest priority request to execute.
It would be feasible to avoid the switch-to-idle intermediate by
programming the ELSP with the target context. The difficulty is in
tracking which request that should be whilst maintaining the dependency
change, the error comes in with coalesced requests. As we only track the
most recent request and its priority, we may run into the issue of being
tricked in preempting a high priority request that was followed by a
low priority request from the same context (e.g. for PI); worse still
that earlier request may be our own dependency and the order then broken
by preemption. By injecting the switch-to-idle and then recomputing the
priority queue, we avoid the issue with tracking in-flight coalesced
requests. Having tried the preempt-to-busy approach, and failed to find
a way around the coalesced priority issue, Michal's original proposal to
inject an idle context (based on handling GuC preemption) succeeds.
The current heuristic for deciding when to preempt are only if the new
request is of higher priority, and has the privileged priority of
greater than 0. Note that the scheduler remains unfair!
v2: Disable for gen8 (bdw/bsw) as we need additional w/a for GPGPU.
Since, the feature is now conditional and not always available when we
have a scheduler, make it known via the HAS_SCHEDULER GETPARAM (now a
capability mask).
v3: Stylistic tweaks.
v4: Appease Joonas with a snippet of kerneldoc, only to fuel to fire of
the preempt vs preempting debate.
Suggested-by: Michal Winiarski <michal.winiarski@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Michal Winiarski <michal.winiarski@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Arkadiusz Hiler <arkadiusz.hiler@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@intel.com>
Cc: Ben Widawsky <benjamin.widawsky@intel.com>
Cc: Zhenyu Wang <zhenyuw@linux.intel.com>
Cc: Zhi Wang <zhi.a.wang@intel.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20171003203453.15692-8-chris@chris-wilson.co.uk
2017-10-03 23:34:52 +03:00
|
|
|
value |= I915_SCHEDULER_CAP_PREEMPTION;
|
|
|
|
}
|
2016-11-14 23:41:01 +03:00
|
|
|
break;
|
drm/i915/execlists: Preemption!
When we write to ELSP, it triggers a context preemption at the earliest
arbitration point (3DPRIMITIVE, some PIPECONTROLs, a few other
operations and the explicit MI_ARB_CHECK). If this is to the same
context, it triggers a LITE_RESTORE where the RING_TAIL is merely
updated (used currently to chain requests from the same context
together, avoiding bubbles). However, if it is to a different context, a
full context-switch is performed and it will start to execute the new
context saving the image of the old for later execution.
Previously we avoided preemption by only submitting a new context when
the old was idle. But now we wish embrace it, and if the new request has
a higher priority than the currently executing request, we write to the
ELSP regardless, thus triggering preemption, but we tell the GPU to
switch to our special preemption context (not the target). In the
context-switch interrupt handler, we know that the previous contexts
have finished execution and so can unwind all the incomplete requests
and compute the new highest priority request to execute.
It would be feasible to avoid the switch-to-idle intermediate by
programming the ELSP with the target context. The difficulty is in
tracking which request that should be whilst maintaining the dependency
change, the error comes in with coalesced requests. As we only track the
most recent request and its priority, we may run into the issue of being
tricked in preempting a high priority request that was followed by a
low priority request from the same context (e.g. for PI); worse still
that earlier request may be our own dependency and the order then broken
by preemption. By injecting the switch-to-idle and then recomputing the
priority queue, we avoid the issue with tracking in-flight coalesced
requests. Having tried the preempt-to-busy approach, and failed to find
a way around the coalesced priority issue, Michal's original proposal to
inject an idle context (based on handling GuC preemption) succeeds.
The current heuristic for deciding when to preempt are only if the new
request is of higher priority, and has the privileged priority of
greater than 0. Note that the scheduler remains unfair!
v2: Disable for gen8 (bdw/bsw) as we need additional w/a for GPGPU.
Since, the feature is now conditional and not always available when we
have a scheduler, make it known via the HAS_SCHEDULER GETPARAM (now a
capability mask).
v3: Stylistic tweaks.
v4: Appease Joonas with a snippet of kerneldoc, only to fuel to fire of
the preempt vs preempting debate.
Suggested-by: Michal Winiarski <michal.winiarski@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Michal Winiarski <michal.winiarski@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Arkadiusz Hiler <arkadiusz.hiler@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@intel.com>
Cc: Ben Widawsky <benjamin.widawsky@intel.com>
Cc: Zhenyu Wang <zhenyuw@linux.intel.com>
Cc: Zhi Wang <zhi.a.wang@intel.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20171003203453.15692-8-chris@chris-wilson.co.uk
2017-10-03 23:34:52 +03:00
|
|
|
|
2016-09-02 13:46:17 +03:00
|
|
|
case I915_PARAM_MMAP_VERSION:
|
|
|
|
/* Remember to bump this if the version changes! */
|
|
|
|
case I915_PARAM_HAS_GEM:
|
|
|
|
case I915_PARAM_HAS_PAGEFLIPPING:
|
|
|
|
case I915_PARAM_HAS_EXECBUF2: /* depends on GEM */
|
|
|
|
case I915_PARAM_HAS_RELAXED_FENCING:
|
|
|
|
case I915_PARAM_HAS_COHERENT_RINGS:
|
|
|
|
case I915_PARAM_HAS_RELAXED_DELTA:
|
|
|
|
case I915_PARAM_HAS_GEN7_SOL_RESET:
|
|
|
|
case I915_PARAM_HAS_WAIT_TIMEOUT:
|
|
|
|
case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
|
|
|
|
case I915_PARAM_HAS_PINNED_BATCHES:
|
|
|
|
case I915_PARAM_HAS_EXEC_NO_RELOC:
|
|
|
|
case I915_PARAM_HAS_EXEC_HANDLE_LUT:
|
|
|
|
case I915_PARAM_HAS_COHERENT_PHYS_GTT:
|
|
|
|
case I915_PARAM_HAS_EXEC_SOFTPIN:
|
2017-01-27 12:40:07 +03:00
|
|
|
case I915_PARAM_HAS_EXEC_ASYNC:
|
2017-01-27 12:40:08 +03:00
|
|
|
case I915_PARAM_HAS_EXEC_FENCE:
|
2017-04-15 12:39:02 +03:00
|
|
|
case I915_PARAM_HAS_EXEC_CAPTURE:
|
2017-06-16 17:05:23 +03:00
|
|
|
case I915_PARAM_HAS_EXEC_BATCH_FIRST:
|
2017-08-15 17:57:33 +03:00
|
|
|
case I915_PARAM_HAS_EXEC_FENCE_ARRAY:
|
2016-09-02 13:46:17 +03:00
|
|
|
/* For the time being all of these are always true;
|
|
|
|
* if some supported hardware does not have one of these
|
|
|
|
* features this value needs to be provided from
|
|
|
|
* INTEL_INFO(), a feature macro, or similar.
|
|
|
|
*/
|
|
|
|
value = 1;
|
|
|
|
break;
|
2017-11-10 17:26:33 +03:00
|
|
|
case I915_PARAM_HAS_CONTEXT_ISOLATION:
|
|
|
|
value = intel_engines_has_context_isolation(dev_priv);
|
|
|
|
break;
|
2017-06-13 14:22:59 +03:00
|
|
|
case I915_PARAM_SLICE_MASK:
|
|
|
|
value = INTEL_INFO(dev_priv)->sseu.slice_mask;
|
|
|
|
if (!value)
|
|
|
|
return -ENODEV;
|
|
|
|
break;
|
2017-06-13 14:23:00 +03:00
|
|
|
case I915_PARAM_SUBSLICE_MASK:
|
|
|
|
value = INTEL_INFO(dev_priv)->sseu.subslice_mask;
|
|
|
|
if (!value)
|
|
|
|
return -ENODEV;
|
|
|
|
break;
|
2017-11-10 22:08:44 +03:00
|
|
|
case I915_PARAM_CS_TIMESTAMP_FREQUENCY:
|
2017-11-14 02:34:53 +03:00
|
|
|
value = 1000 * INTEL_INFO(dev_priv)->cs_timestamp_frequency_khz;
|
2017-11-10 22:08:44 +03:00
|
|
|
break;
|
2016-06-24 16:00:22 +03:00
|
|
|
default:
|
|
|
|
DRM_DEBUG("Unknown parameter %d\n", param->param);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2016-06-24 16:00:23 +03:00
|
|
|
if (put_user(value, param->value))
|
2016-06-24 16:00:22 +03:00
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-12-01 17:16:40 +03:00
|
|
|
static int i915_get_bridge_dev(struct drm_i915_private *dev_priv)
|
2016-06-24 16:00:22 +03:00
|
|
|
{
|
|
|
|
dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
|
|
|
|
if (!dev_priv->bridge_dev) {
|
|
|
|
DRM_ERROR("bridge device not found\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Allocate space for the MCH regs if needed, return nonzero on error */
|
|
|
|
static int
|
2016-12-01 17:16:40 +03:00
|
|
|
intel_alloc_mchbar_resource(struct drm_i915_private *dev_priv)
|
2016-06-24 16:00:22 +03:00
|
|
|
{
|
2016-11-04 17:42:48 +03:00
|
|
|
int reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
|
2016-06-24 16:00:22 +03:00
|
|
|
u32 temp_lo, temp_hi = 0;
|
|
|
|
u64 mchbar_addr;
|
|
|
|
int ret;
|
|
|
|
|
2016-11-04 17:42:48 +03:00
|
|
|
if (INTEL_GEN(dev_priv) >= 4)
|
2016-06-24 16:00:22 +03:00
|
|
|
pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
|
|
|
|
pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
|
|
|
|
mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
|
|
|
|
|
|
|
|
/* If ACPI doesn't have it, assume we need to allocate it ourselves */
|
|
|
|
#ifdef CONFIG_PNP
|
|
|
|
if (mchbar_addr &&
|
|
|
|
pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
|
|
|
|
return 0;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Get some space for it */
|
|
|
|
dev_priv->mch_res.name = "i915 MCHBAR";
|
|
|
|
dev_priv->mch_res.flags = IORESOURCE_MEM;
|
|
|
|
ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
|
|
|
|
&dev_priv->mch_res,
|
|
|
|
MCHBAR_SIZE, MCHBAR_SIZE,
|
|
|
|
PCIBIOS_MIN_MEM,
|
|
|
|
0, pcibios_align_resource,
|
|
|
|
dev_priv->bridge_dev);
|
|
|
|
if (ret) {
|
|
|
|
DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
|
|
|
|
dev_priv->mch_res.start = 0;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-11-04 17:42:48 +03:00
|
|
|
if (INTEL_GEN(dev_priv) >= 4)
|
2016-06-24 16:00:22 +03:00
|
|
|
pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
|
|
|
|
upper_32_bits(dev_priv->mch_res.start));
|
|
|
|
|
|
|
|
pci_write_config_dword(dev_priv->bridge_dev, reg,
|
|
|
|
lower_32_bits(dev_priv->mch_res.start));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Setup MCHBAR if possible, return true if we should disable it again */
|
|
|
|
static void
|
2016-12-01 17:16:40 +03:00
|
|
|
intel_setup_mchbar(struct drm_i915_private *dev_priv)
|
2016-06-24 16:00:22 +03:00
|
|
|
{
|
2016-11-04 17:42:48 +03:00
|
|
|
int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
|
2016-06-24 16:00:22 +03:00
|
|
|
u32 temp;
|
|
|
|
bool enabled;
|
|
|
|
|
2016-10-14 12:13:44 +03:00
|
|
|
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
|
2016-06-24 16:00:22 +03:00
|
|
|
return;
|
|
|
|
|
|
|
|
dev_priv->mchbar_need_disable = false;
|
|
|
|
|
2016-10-13 13:02:58 +03:00
|
|
|
if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
|
2016-06-24 16:00:22 +03:00
|
|
|
pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp);
|
|
|
|
enabled = !!(temp & DEVEN_MCHBAR_EN);
|
|
|
|
} else {
|
|
|
|
pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
|
|
|
|
enabled = temp & 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If it's already enabled, don't have to do anything */
|
|
|
|
if (enabled)
|
|
|
|
return;
|
|
|
|
|
2016-12-01 17:16:40 +03:00
|
|
|
if (intel_alloc_mchbar_resource(dev_priv))
|
2016-06-24 16:00:22 +03:00
|
|
|
return;
|
|
|
|
|
|
|
|
dev_priv->mchbar_need_disable = true;
|
|
|
|
|
|
|
|
/* Space is allocated or reserved, so enable it. */
|
2016-10-13 13:02:58 +03:00
|
|
|
if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
|
2016-06-24 16:00:22 +03:00
|
|
|
pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
|
|
|
|
temp | DEVEN_MCHBAR_EN);
|
|
|
|
} else {
|
|
|
|
pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
|
|
|
|
pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2016-12-01 17:16:40 +03:00
|
|
|
intel_teardown_mchbar(struct drm_i915_private *dev_priv)
|
2016-06-24 16:00:22 +03:00
|
|
|
{
|
2016-11-04 17:42:48 +03:00
|
|
|
int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
|
2016-06-24 16:00:22 +03:00
|
|
|
|
|
|
|
if (dev_priv->mchbar_need_disable) {
|
2016-10-13 13:02:58 +03:00
|
|
|
if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
|
2016-06-24 16:00:22 +03:00
|
|
|
u32 deven_val;
|
|
|
|
|
|
|
|
pci_read_config_dword(dev_priv->bridge_dev, DEVEN,
|
|
|
|
&deven_val);
|
|
|
|
deven_val &= ~DEVEN_MCHBAR_EN;
|
|
|
|
pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
|
|
|
|
deven_val);
|
|
|
|
} else {
|
|
|
|
u32 mchbar_val;
|
|
|
|
|
|
|
|
pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg,
|
|
|
|
&mchbar_val);
|
|
|
|
mchbar_val &= ~1;
|
|
|
|
pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg,
|
|
|
|
mchbar_val);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (dev_priv->mch_res.start)
|
|
|
|
release_resource(&dev_priv->mch_res);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* true = enable decode, false = disable decoder */
|
|
|
|
static unsigned int i915_vga_set_decode(void *cookie, bool state)
|
|
|
|
{
|
2016-12-01 17:16:40 +03:00
|
|
|
struct drm_i915_private *dev_priv = cookie;
|
2016-06-24 16:00:22 +03:00
|
|
|
|
2016-12-01 17:16:40 +03:00
|
|
|
intel_modeset_vga_set_state(dev_priv, state);
|
2016-06-24 16:00:22 +03:00
|
|
|
if (state)
|
|
|
|
return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
|
|
|
|
VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
|
|
|
|
else
|
|
|
|
return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
|
|
|
|
}
|
|
|
|
|
2016-12-01 17:16:41 +03:00
|
|
|
static int i915_resume_switcheroo(struct drm_device *dev);
|
|
|
|
static int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state);
|
|
|
|
|
2016-06-24 16:00:22 +03:00
|
|
|
static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
|
|
|
|
{
|
|
|
|
struct drm_device *dev = pci_get_drvdata(pdev);
|
|
|
|
pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
|
|
|
|
|
|
|
|
if (state == VGA_SWITCHEROO_ON) {
|
|
|
|
pr_info("switched on\n");
|
|
|
|
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
|
|
|
|
/* i915 resume handler doesn't set to D0 */
|
2016-08-22 13:32:44 +03:00
|
|
|
pci_set_power_state(pdev, PCI_D0);
|
2016-06-24 16:00:22 +03:00
|
|
|
i915_resume_switcheroo(dev);
|
|
|
|
dev->switch_power_state = DRM_SWITCH_POWER_ON;
|
|
|
|
} else {
|
|
|
|
pr_info("switched off\n");
|
|
|
|
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
|
|
|
|
i915_suspend_switcheroo(dev, pmm);
|
|
|
|
dev->switch_power_state = DRM_SWITCH_POWER_OFF;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
|
|
|
|
{
|
|
|
|
struct drm_device *dev = pci_get_drvdata(pdev);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* FIXME: open_count is protected by drm_global_mutex but that would lead to
|
|
|
|
* locking inversion with the driver load path. And the access here is
|
|
|
|
* completely racy anyway. So don't bother with locking for now.
|
|
|
|
*/
|
|
|
|
return dev->open_count == 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
|
|
|
|
.set_gpu_state = i915_switcheroo_set_state,
|
|
|
|
.reprobe = NULL,
|
|
|
|
.can_switch = i915_switcheroo_can_switch,
|
|
|
|
};
|
|
|
|
|
2016-10-28 15:58:42 +03:00
|
|
|
static void i915_gem_fini(struct drm_i915_private *dev_priv)
|
2016-06-24 16:00:22 +03:00
|
|
|
{
|
2017-07-18 16:41:24 +03:00
|
|
|
/* Flush any outstanding unpin_work. */
|
|
|
|
i915_gem_drain_workqueue(dev_priv);
|
2017-06-20 14:05:46 +03:00
|
|
|
|
2016-10-28 15:58:42 +03:00
|
|
|
mutex_lock(&dev_priv->drm.struct_mutex);
|
2017-03-28 19:53:47 +03:00
|
|
|
intel_uc_fini_hw(dev_priv);
|
2016-12-01 17:16:39 +03:00
|
|
|
i915_gem_cleanup_engines(dev_priv);
|
2017-06-20 14:05:45 +03:00
|
|
|
i915_gem_contexts_fini(dev_priv);
|
2016-10-28 15:58:42 +03:00
|
|
|
mutex_unlock(&dev_priv->drm.struct_mutex);
|
2016-06-24 16:00:22 +03:00
|
|
|
|
2017-10-11 17:18:57 +03:00
|
|
|
i915_gem_cleanup_userptr(dev_priv);
|
|
|
|
|
2016-12-23 17:57:56 +03:00
|
|
|
i915_gem_drain_freed_objects(dev_priv);
|
2016-10-28 15:58:42 +03:00
|
|
|
|
2017-06-20 14:05:45 +03:00
|
|
|
WARN_ON(!list_empty(&dev_priv->contexts.list));
|
2016-06-24 16:00:22 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static int i915_load_modeset_init(struct drm_device *dev)
|
|
|
|
{
|
2016-07-04 13:34:36 +03:00
|
|
|
struct drm_i915_private *dev_priv = to_i915(dev);
|
2016-08-22 13:32:44 +03:00
|
|
|
struct pci_dev *pdev = dev_priv->drm.pdev;
|
2016-06-24 16:00:22 +03:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (i915_inject_load_failure())
|
|
|
|
return -ENODEV;
|
|
|
|
|
2017-03-10 16:27:57 +03:00
|
|
|
intel_bios_init(dev_priv);
|
2016-06-24 16:00:22 +03:00
|
|
|
|
|
|
|
/* If we have > 1 VGA cards, then we need to arbitrate access
|
|
|
|
* to the common VGA resources.
|
|
|
|
*
|
|
|
|
* If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
|
|
|
|
* then we do not take part in VGA arbitration and the
|
|
|
|
* vga_client_register() fails with -ENODEV.
|
|
|
|
*/
|
2016-12-01 17:16:40 +03:00
|
|
|
ret = vga_client_register(pdev, dev_priv, NULL, i915_vga_set_decode);
|
2016-06-24 16:00:22 +03:00
|
|
|
if (ret && ret != -ENODEV)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
intel_register_dsm_handler();
|
|
|
|
|
2016-08-22 13:32:44 +03:00
|
|
|
ret = vga_switcheroo_register_client(pdev, &i915_switcheroo_ops, false);
|
2016-06-24 16:00:22 +03:00
|
|
|
if (ret)
|
|
|
|
goto cleanup_vga_client;
|
|
|
|
|
|
|
|
/* must happen before intel_power_domains_init_hw() on VLV/CHV */
|
|
|
|
intel_update_rawclk(dev_priv);
|
|
|
|
|
|
|
|
intel_power_domains_init_hw(dev_priv, false);
|
|
|
|
|
|
|
|
intel_csr_ucode_init(dev_priv);
|
|
|
|
|
|
|
|
ret = intel_irq_install(dev_priv);
|
|
|
|
if (ret)
|
|
|
|
goto cleanup_csr;
|
|
|
|
|
2016-12-01 17:16:42 +03:00
|
|
|
intel_setup_gmbus(dev_priv);
|
2016-06-24 16:00:22 +03:00
|
|
|
|
|
|
|
/* Important: The output setup functions called by modeset_init need
|
|
|
|
* working irqs for e.g. gmbus and dp aux transfers. */
|
2016-10-25 18:58:02 +03:00
|
|
|
ret = intel_modeset_init(dev);
|
|
|
|
if (ret)
|
|
|
|
goto cleanup_irq;
|
2016-06-24 16:00:22 +03:00
|
|
|
|
2017-03-14 17:28:09 +03:00
|
|
|
intel_uc_init_fw(dev_priv);
|
2016-06-24 16:00:22 +03:00
|
|
|
|
2016-12-01 17:16:38 +03:00
|
|
|
ret = i915_gem_init(dev_priv);
|
2016-06-24 16:00:22 +03:00
|
|
|
if (ret)
|
2017-03-22 20:39:46 +03:00
|
|
|
goto cleanup_uc;
|
2016-06-24 16:00:22 +03:00
|
|
|
|
2017-11-10 17:26:31 +03:00
|
|
|
intel_setup_overlay(dev_priv);
|
2016-06-24 16:00:22 +03:00
|
|
|
|
2016-11-09 14:30:45 +03:00
|
|
|
if (INTEL_INFO(dev_priv)->num_pipes == 0)
|
2016-06-24 16:00:22 +03:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
ret = intel_fbdev_init(dev);
|
|
|
|
if (ret)
|
|
|
|
goto cleanup_gem;
|
|
|
|
|
|
|
|
/* Only enable hotplug handling once the fbdev is fully set up. */
|
|
|
|
intel_hpd_init(dev_priv);
|
|
|
|
|
|
|
|
drm_kms_helper_poll_init(dev);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
cleanup_gem:
|
2016-12-01 17:16:38 +03:00
|
|
|
if (i915_gem_suspend(dev_priv))
|
2016-10-12 17:46:37 +03:00
|
|
|
DRM_ERROR("failed to idle hardware; continuing to unload!\n");
|
2016-10-28 15:58:42 +03:00
|
|
|
i915_gem_fini(dev_priv);
|
2017-03-22 20:39:46 +03:00
|
|
|
cleanup_uc:
|
|
|
|
intel_uc_fini_fw(dev_priv);
|
2016-06-24 16:00:22 +03:00
|
|
|
cleanup_irq:
|
|
|
|
drm_irq_uninstall(dev);
|
2016-12-01 17:16:42 +03:00
|
|
|
intel_teardown_gmbus(dev_priv);
|
2016-06-24 16:00:22 +03:00
|
|
|
cleanup_csr:
|
|
|
|
intel_csr_ucode_fini(dev_priv);
|
|
|
|
intel_power_domains_fini(dev_priv);
|
2016-08-22 13:32:44 +03:00
|
|
|
vga_switcheroo_unregister_client(pdev);
|
2016-06-24 16:00:22 +03:00
|
|
|
cleanup_vga_client:
|
2016-08-22 13:32:44 +03:00
|
|
|
vga_client_register(pdev, NULL, NULL, NULL);
|
2016-06-24 16:00:22 +03:00
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
|
|
|
|
{
|
|
|
|
struct apertures_struct *ap;
|
2016-07-05 12:40:23 +03:00
|
|
|
struct pci_dev *pdev = dev_priv->drm.pdev;
|
2016-06-24 16:00:22 +03:00
|
|
|
struct i915_ggtt *ggtt = &dev_priv->ggtt;
|
|
|
|
bool primary;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ap = alloc_apertures(1);
|
|
|
|
if (!ap)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
ap->ranges[0].base = ggtt->mappable_base;
|
|
|
|
ap->ranges[0].size = ggtt->mappable_end;
|
|
|
|
|
|
|
|
primary =
|
|
|
|
pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
|
|
|
|
|
2016-08-10 19:52:34 +03:00
|
|
|
ret = drm_fb_helper_remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
|
2016-06-24 16:00:22 +03:00
|
|
|
|
|
|
|
kfree(ap);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
#if !defined(CONFIG_VGA_CONSOLE)
|
|
|
|
static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#elif !defined(CONFIG_DUMMY_CONSOLE)
|
|
|
|
static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
|
|
|
|
{
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
DRM_INFO("Replacing VGA console driver\n");
|
|
|
|
|
|
|
|
console_lock();
|
|
|
|
if (con_is_bound(&vga_con))
|
|
|
|
ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1);
|
|
|
|
if (ret == 0) {
|
|
|
|
ret = do_unregister_con_driver(&vga_con);
|
|
|
|
|
|
|
|
/* Ignore "already unregistered". */
|
|
|
|
if (ret == -ENODEV)
|
|
|
|
ret = 0;
|
|
|
|
}
|
|
|
|
console_unlock();
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static void intel_init_dpio(struct drm_i915_private *dev_priv)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
|
|
|
|
* CHV x1 PHY (DP/HDMI D)
|
|
|
|
* IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
|
|
|
|
*/
|
|
|
|
if (IS_CHERRYVIEW(dev_priv)) {
|
|
|
|
DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
|
|
|
|
DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
|
|
|
|
} else if (IS_VALLEYVIEW(dev_priv)) {
|
|
|
|
DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int i915_workqueues_init(struct drm_i915_private *dev_priv)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* The i915 workqueue is primarily used for batched retirement of
|
|
|
|
* requests (and thus managing bo) once the task has been completed
|
|
|
|
* by the GPU. i915_gem_retire_requests() is called directly when we
|
|
|
|
* need high-priority retirement, such as waiting for an explicit
|
|
|
|
* bo.
|
|
|
|
*
|
|
|
|
* It is also used for periodic low-priority events, such as
|
|
|
|
* idle-timers and recording error state.
|
|
|
|
*
|
|
|
|
* All tasks on the workqueue are expected to acquire the dev mutex
|
|
|
|
* so there is no point in running more than one instance of the
|
|
|
|
* workqueue at any time. Use an ordered one.
|
|
|
|
*/
|
|
|
|
dev_priv->wq = alloc_ordered_workqueue("i915", 0);
|
|
|
|
if (dev_priv->wq == NULL)
|
|
|
|
goto out_err;
|
|
|
|
|
|
|
|
dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
|
|
|
|
if (dev_priv->hotplug.dp_wq == NULL)
|
|
|
|
goto out_free_wq;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
out_free_wq:
|
|
|
|
destroy_workqueue(dev_priv->wq);
|
|
|
|
out_err:
|
|
|
|
DRM_ERROR("Failed to allocate workqueues.\n");
|
|
|
|
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2017-01-24 14:01:34 +03:00
|
|
|
static void i915_engines_cleanup(struct drm_i915_private *i915)
|
|
|
|
{
|
|
|
|
struct intel_engine_cs *engine;
|
|
|
|
enum intel_engine_id id;
|
|
|
|
|
|
|
|
for_each_engine(engine, i915, id)
|
|
|
|
kfree(engine);
|
|
|
|
}
|
|
|
|
|
2016-06-24 16:00:22 +03:00
|
|
|
static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
|
|
|
|
{
|
|
|
|
destroy_workqueue(dev_priv->hotplug.dp_wq);
|
|
|
|
destroy_workqueue(dev_priv->wq);
|
|
|
|
}
|
|
|
|
|
2016-09-26 15:07:52 +03:00
|
|
|
/*
|
|
|
|
* We don't keep the workarounds for pre-production hardware, so we expect our
|
|
|
|
* driver to fail on these machines in one way or another. A little warning on
|
|
|
|
* dmesg may help both the user and the bug triagers.
|
2017-11-17 13:26:35 +03:00
|
|
|
*
|
|
|
|
* Our policy for removing pre-production workarounds is to keep the
|
|
|
|
* current gen workarounds as a guide to the bring-up of the next gen
|
|
|
|
* (workarounds have a habit of persisting!). Anything older than that
|
|
|
|
* should be removed along with the complications they introduce.
|
2016-09-26 15:07:52 +03:00
|
|
|
*/
|
|
|
|
static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
|
|
|
|
{
|
2017-01-30 13:44:56 +03:00
|
|
|
bool pre = false;
|
|
|
|
|
|
|
|
pre |= IS_HSW_EARLY_SDV(dev_priv);
|
|
|
|
pre |= IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0);
|
2017-01-30 13:44:58 +03:00
|
|
|
pre |= IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST);
|
2017-01-30 13:44:56 +03:00
|
|
|
|
2017-01-30 13:44:57 +03:00
|
|
|
if (pre) {
|
2016-09-26 15:07:52 +03:00
|
|
|
DRM_ERROR("This is a pre-production stepping. "
|
|
|
|
"It may not be fully functional.\n");
|
2017-01-30 13:44:57 +03:00
|
|
|
add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK);
|
|
|
|
}
|
2016-09-26 15:07:52 +03:00
|
|
|
}
|
|
|
|
|
2016-06-24 16:00:22 +03:00
|
|
|
/**
|
|
|
|
* i915_driver_init_early - setup state not requiring device access
|
|
|
|
* @dev_priv: device private
|
|
|
|
*
|
|
|
|
* Initialize everything that is a "SW-only" state, that is state not
|
|
|
|
* requiring accessing the device or exposing the driver via kernel internal
|
|
|
|
* or userspace interfaces. Example steps belonging here: lock initialization,
|
|
|
|
* system memory allocation, setting up device specific attributes and
|
|
|
|
* function hooks not requiring accessing the device.
|
|
|
|
*/
|
|
|
|
static int i915_driver_init_early(struct drm_i915_private *dev_priv,
|
|
|
|
const struct pci_device_id *ent)
|
|
|
|
{
|
|
|
|
const struct intel_device_info *match_info =
|
|
|
|
(struct intel_device_info *)ent->driver_data;
|
|
|
|
struct intel_device_info *device_info;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (i915_inject_load_failure())
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
/* Setup the write-once "constant" device info */
|
2016-07-05 12:40:20 +03:00
|
|
|
device_info = mkwrite_device_info(dev_priv);
|
2016-06-24 16:00:22 +03:00
|
|
|
memcpy(device_info, match_info, sizeof(*device_info));
|
|
|
|
device_info->device_id = dev_priv->drm.pdev->device;
|
|
|
|
|
2017-09-27 19:41:38 +03:00
|
|
|
BUILD_BUG_ON(INTEL_MAX_PLATFORMS >
|
|
|
|
sizeof(device_info->platform_mask) * BITS_PER_BYTE);
|
|
|
|
device_info->platform_mask = BIT(device_info->platform);
|
|
|
|
|
2016-06-24 16:00:22 +03:00
|
|
|
BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE);
|
|
|
|
device_info->gen_mask = BIT(device_info->gen - 1);
|
|
|
|
|
|
|
|
spin_lock_init(&dev_priv->irq_lock);
|
|
|
|
spin_lock_init(&dev_priv->gpu_error.lock);
|
|
|
|
mutex_init(&dev_priv->backlight_lock);
|
|
|
|
spin_lock_init(&dev_priv->uncore.lock);
|
2017-02-04 05:18:25 +03:00
|
|
|
|
2016-06-24 16:00:22 +03:00
|
|
|
mutex_init(&dev_priv->sb_lock);
|
|
|
|
mutex_init(&dev_priv->modeset_restore_lock);
|
|
|
|
mutex_init(&dev_priv->av_mutex);
|
|
|
|
mutex_init(&dev_priv->wm.wm_mutex);
|
|
|
|
mutex_init(&dev_priv->pps_mutex);
|
|
|
|
|
2016-11-25 20:59:36 +03:00
|
|
|
intel_uc_init_early(dev_priv);
|
2016-08-12 14:39:59 +03:00
|
|
|
i915_memcpy_init_early(dev_priv);
|
|
|
|
|
2016-06-24 16:00:22 +03:00
|
|
|
ret = i915_workqueues_init(dev_priv);
|
|
|
|
if (ret < 0)
|
2017-01-24 14:01:34 +03:00
|
|
|
goto err_engines;
|
2016-06-24 16:00:22 +03:00
|
|
|
|
|
|
|
/* This must be called before any calls to HAS_PCH_* */
|
2016-12-01 17:16:40 +03:00
|
|
|
intel_detect_pch(dev_priv);
|
2016-06-24 16:00:22 +03:00
|
|
|
|
2016-12-01 17:16:45 +03:00
|
|
|
intel_pm_setup(dev_priv);
|
2016-06-24 16:00:22 +03:00
|
|
|
intel_init_dpio(dev_priv);
|
|
|
|
intel_power_domains_init(dev_priv);
|
|
|
|
intel_irq_init(dev_priv);
|
2016-11-01 19:43:03 +03:00
|
|
|
intel_hangcheck_init(dev_priv);
|
2016-06-24 16:00:22 +03:00
|
|
|
intel_init_display_hooks(dev_priv);
|
|
|
|
intel_init_clock_gating_hooks(dev_priv);
|
|
|
|
intel_init_audio_hooks(dev_priv);
|
2016-12-01 17:16:39 +03:00
|
|
|
ret = i915_gem_load_init(dev_priv);
|
2016-10-28 15:58:46 +03:00
|
|
|
if (ret < 0)
|
2017-04-28 10:58:39 +03:00
|
|
|
goto err_irq;
|
2016-06-24 16:00:22 +03:00
|
|
|
|
drm/i915: debugfs spring cleaning
Just like with sysfs, we do some major overhaul.
Pass dev_priv instead of dev to all feature macros (IS_, HAS_,
INTEL_, etc.). This has the side effect that a bunch of functions
now get dev_priv passed instead of dev.
All calls to INTEL_INFO()->gen have been replaced with
INTEL_GEN().
We want access to to_i915(node->minor->dev) in a lot of places,
so add the node_to_i915() helper to accommodate for this.
Finally, we have quite a few cases where we get a void * pointer,
and need to cast it to drm_device *, only to run to_i915() on it.
Add cast_to_i915() to do this.
v2: Don't introduce extra dev (Chris)
v3: Make pipe_crc_info have a pointer to drm_i915_private instead of
drm_device. This saves a bit of space, since we never use
drm_device anywhere in these functions.
Also some minor fixup that I missed in the previous version.
v4: Changed the code a bit so that dev_priv is passed directly
to various functions, thus removing the need for the
cast_to_i915() helper. Also did some additional cleanup.
v5: Additional cleanup of newly introduced changes.
v6: Rebase again because of conflict.
Signed-off-by: David Weinehall <david.weinehall@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160822105931.pcbe2lpsgzckzboa@boom
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
2016-08-22 13:59:31 +03:00
|
|
|
intel_display_crc_init(dev_priv);
|
2016-06-24 16:00:22 +03:00
|
|
|
|
2016-07-05 12:40:20 +03:00
|
|
|
intel_device_info_dump(dev_priv);
|
2016-06-24 16:00:22 +03:00
|
|
|
|
2016-09-26 15:07:52 +03:00
|
|
|
intel_detect_preproduction_hw(dev_priv);
|
2016-06-24 16:00:22 +03:00
|
|
|
|
drm/i915: Add i915 perf infrastructure
Adds base i915 perf infrastructure for Gen performance metrics.
This adds a DRM_IOCTL_I915_PERF_OPEN ioctl that takes an array of uint64
properties to configure a stream of metrics and returns a new fd usable
with standard VFS system calls including read() to read typed and sized
records; ioctl() to enable or disable capture and poll() to wait for
data.
A stream is opened something like:
uint64_t properties[] = {
/* Single context sampling */
DRM_I915_PERF_PROP_CTX_HANDLE, ctx_handle,
/* Include OA reports in samples */
DRM_I915_PERF_PROP_SAMPLE_OA, true,
/* OA unit configuration */
DRM_I915_PERF_PROP_OA_METRICS_SET, metrics_set_id,
DRM_I915_PERF_PROP_OA_FORMAT, report_format,
DRM_I915_PERF_PROP_OA_EXPONENT, period_exponent,
};
struct drm_i915_perf_open_param parm = {
.flags = I915_PERF_FLAG_FD_CLOEXEC |
I915_PERF_FLAG_FD_NONBLOCK |
I915_PERF_FLAG_DISABLED,
.properties_ptr = (uint64_t)properties,
.num_properties = sizeof(properties) / 16,
};
int fd = drmIoctl(drm_fd, DRM_IOCTL_I915_PERF_OPEN, ¶m);
Records read all start with a common { type, size } header with
DRM_I915_PERF_RECORD_SAMPLE being of most interest. Sample records
contain an extensible number of fields and it's the
DRM_I915_PERF_PROP_SAMPLE_xyz properties given when opening that
determine what's included in every sample.
No specific streams are supported yet so any attempt to open a stream
will return an error.
v2:
use i915_gem_context_get() - Chris Wilson
v3:
update read() interface to avoid passing state struct - Chris Wilson
fix some rebase fallout, with i915-perf init/deinit
v4:
s/DRM_IORW/DRM_IOW/ - Emil Velikov
Signed-off-by: Robert Bragg <robert@sixbynine.org>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Sourab Gupta <sourab.gupta@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: http://patchwork.freedesktop.org/patch/msgid/20161107194957.3385-2-robert@sixbynine.org
2016-11-07 22:49:47 +03:00
|
|
|
i915_perf_init(dev_priv);
|
|
|
|
|
2016-06-24 16:00:22 +03:00
|
|
|
return 0;
|
|
|
|
|
2017-04-28 10:58:39 +03:00
|
|
|
err_irq:
|
|
|
|
intel_irq_fini(dev_priv);
|
2016-06-24 16:00:22 +03:00
|
|
|
i915_workqueues_cleanup(dev_priv);
|
2017-01-24 14:01:34 +03:00
|
|
|
err_engines:
|
|
|
|
i915_engines_cleanup(dev_priv);
|
2016-06-24 16:00:22 +03:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* i915_driver_cleanup_early - cleanup the setup done in i915_driver_init_early()
|
|
|
|
* @dev_priv: device private
|
|
|
|
*/
|
|
|
|
static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv)
|
|
|
|
{
|
drm/i915: Add i915 perf infrastructure
Adds base i915 perf infrastructure for Gen performance metrics.
This adds a DRM_IOCTL_I915_PERF_OPEN ioctl that takes an array of uint64
properties to configure a stream of metrics and returns a new fd usable
with standard VFS system calls including read() to read typed and sized
records; ioctl() to enable or disable capture and poll() to wait for
data.
A stream is opened something like:
uint64_t properties[] = {
/* Single context sampling */
DRM_I915_PERF_PROP_CTX_HANDLE, ctx_handle,
/* Include OA reports in samples */
DRM_I915_PERF_PROP_SAMPLE_OA, true,
/* OA unit configuration */
DRM_I915_PERF_PROP_OA_METRICS_SET, metrics_set_id,
DRM_I915_PERF_PROP_OA_FORMAT, report_format,
DRM_I915_PERF_PROP_OA_EXPONENT, period_exponent,
};
struct drm_i915_perf_open_param parm = {
.flags = I915_PERF_FLAG_FD_CLOEXEC |
I915_PERF_FLAG_FD_NONBLOCK |
I915_PERF_FLAG_DISABLED,
.properties_ptr = (uint64_t)properties,
.num_properties = sizeof(properties) / 16,
};
int fd = drmIoctl(drm_fd, DRM_IOCTL_I915_PERF_OPEN, ¶m);
Records read all start with a common { type, size } header with
DRM_I915_PERF_RECORD_SAMPLE being of most interest. Sample records
contain an extensible number of fields and it's the
DRM_I915_PERF_PROP_SAMPLE_xyz properties given when opening that
determine what's included in every sample.
No specific streams are supported yet so any attempt to open a stream
will return an error.
v2:
use i915_gem_context_get() - Chris Wilson
v3:
update read() interface to avoid passing state struct - Chris Wilson
fix some rebase fallout, with i915-perf init/deinit
v4:
s/DRM_IORW/DRM_IOW/ - Emil Velikov
Signed-off-by: Robert Bragg <robert@sixbynine.org>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Sourab Gupta <sourab.gupta@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: http://patchwork.freedesktop.org/patch/msgid/20161107194957.3385-2-robert@sixbynine.org
2016-11-07 22:49:47 +03:00
|
|
|
i915_perf_fini(dev_priv);
|
2016-12-01 17:16:39 +03:00
|
|
|
i915_gem_load_cleanup(dev_priv);
|
2017-04-28 10:58:39 +03:00
|
|
|
intel_irq_fini(dev_priv);
|
2016-06-24 16:00:22 +03:00
|
|
|
i915_workqueues_cleanup(dev_priv);
|
2017-01-24 14:01:34 +03:00
|
|
|
i915_engines_cleanup(dev_priv);
|
2016-06-24 16:00:22 +03:00
|
|
|
}
|
|
|
|
|
2016-12-01 17:16:40 +03:00
|
|
|
static int i915_mmio_setup(struct drm_i915_private *dev_priv)
|
2016-06-24 16:00:22 +03:00
|
|
|
{
|
2016-08-22 13:32:44 +03:00
|
|
|
struct pci_dev *pdev = dev_priv->drm.pdev;
|
2016-06-24 16:00:22 +03:00
|
|
|
int mmio_bar;
|
|
|
|
int mmio_size;
|
|
|
|
|
2016-10-13 13:03:10 +03:00
|
|
|
mmio_bar = IS_GEN2(dev_priv) ? 1 : 0;
|
2016-06-24 16:00:22 +03:00
|
|
|
/*
|
|
|
|
* Before gen4, the registers and the GTT are behind different BARs.
|
|
|
|
* However, from gen4 onwards, the registers and the GTT are shared
|
|
|
|
* in the same BAR, so we want to restrict this ioremap from
|
|
|
|
* clobbering the GTT which we want ioremap_wc instead. Fortunately,
|
|
|
|
* the register BAR remains the same size for all the earlier
|
|
|
|
* generations up to Ironlake.
|
|
|
|
*/
|
2016-11-04 17:42:48 +03:00
|
|
|
if (INTEL_GEN(dev_priv) < 5)
|
2016-06-24 16:00:22 +03:00
|
|
|
mmio_size = 512 * 1024;
|
|
|
|
else
|
|
|
|
mmio_size = 2 * 1024 * 1024;
|
2016-08-22 13:32:44 +03:00
|
|
|
dev_priv->regs = pci_iomap(pdev, mmio_bar, mmio_size);
|
2016-06-24 16:00:22 +03:00
|
|
|
if (dev_priv->regs == NULL) {
|
|
|
|
DRM_ERROR("failed to map registers\n");
|
|
|
|
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Try to make sure MCHBAR is enabled before poking at it */
|
2016-12-01 17:16:40 +03:00
|
|
|
intel_setup_mchbar(dev_priv);
|
2016-06-24 16:00:22 +03:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-12-01 17:16:40 +03:00
|
|
|
static void i915_mmio_cleanup(struct drm_i915_private *dev_priv)
|
2016-06-24 16:00:22 +03:00
|
|
|
{
|
2016-08-22 13:32:44 +03:00
|
|
|
struct pci_dev *pdev = dev_priv->drm.pdev;
|
2016-06-24 16:00:22 +03:00
|
|
|
|
2016-12-01 17:16:40 +03:00
|
|
|
intel_teardown_mchbar(dev_priv);
|
2016-08-22 13:32:44 +03:00
|
|
|
pci_iounmap(pdev, dev_priv->regs);
|
2016-06-24 16:00:22 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* i915_driver_init_mmio - setup device MMIO
|
|
|
|
* @dev_priv: device private
|
|
|
|
*
|
|
|
|
* Setup minimal device state necessary for MMIO accesses later in the
|
|
|
|
* initialization sequence. The setup here should avoid any other device-wide
|
|
|
|
* side effects or exposing the driver via kernel internal or user space
|
|
|
|
* interfaces.
|
|
|
|
*/
|
|
|
|
static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (i915_inject_load_failure())
|
|
|
|
return -ENODEV;
|
|
|
|
|
2016-12-01 17:16:40 +03:00
|
|
|
if (i915_get_bridge_dev(dev_priv))
|
2016-06-24 16:00:22 +03:00
|
|
|
return -EIO;
|
|
|
|
|
2016-12-01 17:16:40 +03:00
|
|
|
ret = i915_mmio_setup(dev_priv);
|
2016-06-24 16:00:22 +03:00
|
|
|
if (ret < 0)
|
2017-04-28 10:53:36 +03:00
|
|
|
goto err_bridge;
|
2016-06-24 16:00:22 +03:00
|
|
|
|
|
|
|
intel_uncore_init(dev_priv);
|
2017-04-28 10:53:36 +03:00
|
|
|
|
2017-10-04 18:33:24 +03:00
|
|
|
intel_uc_init_mmio(dev_priv);
|
|
|
|
|
2017-04-28 10:53:36 +03:00
|
|
|
ret = intel_engines_init_mmio(dev_priv);
|
|
|
|
if (ret)
|
|
|
|
goto err_uncore;
|
|
|
|
|
2017-01-24 14:01:35 +03:00
|
|
|
i915_gem_init_mmio(dev_priv);
|
2016-06-24 16:00:22 +03:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
2017-04-28 10:53:36 +03:00
|
|
|
err_uncore:
|
|
|
|
intel_uncore_fini(dev_priv);
|
|
|
|
err_bridge:
|
2016-06-24 16:00:22 +03:00
|
|
|
pci_dev_put(dev_priv->bridge_dev);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* i915_driver_cleanup_mmio - cleanup the setup done in i915_driver_init_mmio()
|
|
|
|
* @dev_priv: device private
|
|
|
|
*/
|
|
|
|
static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
|
|
|
|
{
|
|
|
|
intel_uncore_fini(dev_priv);
|
2016-12-01 17:16:40 +03:00
|
|
|
i915_mmio_cleanup(dev_priv);
|
2016-06-24 16:00:22 +03:00
|
|
|
pci_dev_put(dev_priv->bridge_dev);
|
|
|
|
}
|
|
|
|
|
2016-07-05 12:40:20 +03:00
|
|
|
static void intel_sanitize_options(struct drm_i915_private *dev_priv)
|
|
|
|
{
|
2017-09-19 22:38:44 +03:00
|
|
|
i915_modparams.enable_execlists =
|
2016-07-05 12:40:20 +03:00
|
|
|
intel_sanitize_enable_execlists(dev_priv,
|
2017-09-19 22:38:44 +03:00
|
|
|
i915_modparams.enable_execlists);
|
2016-07-05 12:40:20 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* i915.enable_ppgtt is read-only, so do an early pass to validate the
|
|
|
|
* user's requested state against the hardware/driver capabilities. We
|
|
|
|
* do this now so that we can print out any log messages once rather
|
|
|
|
* than every time we check intel_enable_ppgtt().
|
|
|
|
*/
|
2017-09-19 22:38:44 +03:00
|
|
|
i915_modparams.enable_ppgtt =
|
|
|
|
intel_sanitize_enable_ppgtt(dev_priv,
|
|
|
|
i915_modparams.enable_ppgtt);
|
|
|
|
DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915_modparams.enable_ppgtt);
|
2016-07-20 15:31:57 +03:00
|
|
|
|
2017-09-19 22:38:44 +03:00
|
|
|
i915_modparams.semaphores =
|
|
|
|
intel_sanitize_semaphores(dev_priv, i915_modparams.semaphores);
|
|
|
|
DRM_DEBUG_DRIVER("use GPU semaphores? %s\n",
|
|
|
|
yesno(i915_modparams.semaphores));
|
2017-03-14 17:28:10 +03:00
|
|
|
|
|
|
|
intel_uc_sanitize_options(dev_priv);
|
2017-05-27 12:44:17 +03:00
|
|
|
|
|
|
|
intel_gvt_sanitize_options(dev_priv);
|
2016-07-05 12:40:20 +03:00
|
|
|
}
|
|
|
|
|
2016-06-24 16:00:22 +03:00
|
|
|
/**
|
|
|
|
* i915_driver_init_hw - setup state requiring device access
|
|
|
|
* @dev_priv: device private
|
|
|
|
*
|
|
|
|
* Setup state that requires accessing the device, but doesn't require
|
|
|
|
* exposing the driver via kernel internal or userspace interfaces.
|
|
|
|
*/
|
|
|
|
static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
|
|
|
|
{
|
2016-08-22 13:32:44 +03:00
|
|
|
struct pci_dev *pdev = dev_priv->drm.pdev;
|
2016-06-24 16:00:22 +03:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (i915_inject_load_failure())
|
|
|
|
return -ENODEV;
|
|
|
|
|
2016-07-05 12:40:20 +03:00
|
|
|
intel_device_info_runtime_init(dev_priv);
|
|
|
|
|
|
|
|
intel_sanitize_options(dev_priv);
|
2016-06-24 16:00:22 +03:00
|
|
|
|
2016-08-04 09:52:22 +03:00
|
|
|
ret = i915_ggtt_probe_hw(dev_priv);
|
2016-06-24 16:00:22 +03:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
/* WARNING: Apparently we must kick fbdev drivers before vgacon,
|
|
|
|
* otherwise the vga fbdev driver falls over. */
|
|
|
|
ret = i915_kick_out_firmware_fb(dev_priv);
|
|
|
|
if (ret) {
|
|
|
|
DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
|
|
|
|
goto out_ggtt;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = i915_kick_out_vgacon(dev_priv);
|
|
|
|
if (ret) {
|
|
|
|
DRM_ERROR("failed to remove conflicting VGA console\n");
|
|
|
|
goto out_ggtt;
|
|
|
|
}
|
|
|
|
|
2016-08-04 09:52:22 +03:00
|
|
|
ret = i915_ggtt_init_hw(dev_priv);
|
2016-08-04 09:52:21 +03:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2016-08-04 09:52:22 +03:00
|
|
|
ret = i915_ggtt_enable_hw(dev_priv);
|
2016-08-04 09:52:21 +03:00
|
|
|
if (ret) {
|
|
|
|
DRM_ERROR("failed to enable GGTT\n");
|
|
|
|
goto out_ggtt;
|
|
|
|
}
|
|
|
|
|
2016-08-22 13:32:44 +03:00
|
|
|
pci_set_master(pdev);
|
2016-06-24 16:00:22 +03:00
|
|
|
|
|
|
|
/* overlay on gen2 is broken and can't address above 1G */
|
2016-10-13 13:03:10 +03:00
|
|
|
if (IS_GEN2(dev_priv)) {
|
2016-08-22 13:32:44 +03:00
|
|
|
ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30));
|
2016-06-24 16:00:22 +03:00
|
|
|
if (ret) {
|
|
|
|
DRM_ERROR("failed to set DMA mask\n");
|
|
|
|
|
|
|
|
goto out_ggtt;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* 965GM sometimes incorrectly writes to hardware status page (HWS)
|
|
|
|
* using 32bit addressing, overwriting memory if HWS is located
|
|
|
|
* above 4GB.
|
|
|
|
*
|
|
|
|
* The documentation also mentions an issue with undefined
|
|
|
|
* behaviour if any general state is accessed within a page above 4GB,
|
|
|
|
* which also needs to be handled carefully.
|
|
|
|
*/
|
2016-12-07 13:13:04 +03:00
|
|
|
if (IS_I965G(dev_priv) || IS_I965GM(dev_priv)) {
|
2016-08-22 13:32:44 +03:00
|
|
|
ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
|
2016-06-24 16:00:22 +03:00
|
|
|
|
|
|
|
if (ret) {
|
|
|
|
DRM_ERROR("failed to set DMA mask\n");
|
|
|
|
|
|
|
|
goto out_ggtt;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
|
|
|
|
PM_QOS_DEFAULT_VALUE);
|
|
|
|
|
|
|
|
intel_uncore_sanitize(dev_priv);
|
|
|
|
|
|
|
|
intel_opregion_setup(dev_priv);
|
|
|
|
|
|
|
|
i915_gem_load_init_fences(dev_priv);
|
|
|
|
|
|
|
|
/* On the 945G/GM, the chipset reports the MSI capability on the
|
|
|
|
* integrated graphics even though the support isn't actually there
|
|
|
|
* according to the published specs. It doesn't appear to function
|
|
|
|
* correctly in testing on 945G.
|
|
|
|
* This may be a side effect of MSI having been made available for PEG
|
|
|
|
* and the registers being closely associated.
|
|
|
|
*
|
|
|
|
* According to chipset errata, on the 965GM, MSI interrupts may
|
2017-06-26 23:30:51 +03:00
|
|
|
* be lost or delayed, and was defeatured. MSI interrupts seem to
|
|
|
|
* get lost on g4x as well, and interrupt delivery seems to stay
|
|
|
|
* properly dead afterwards. So we'll just disable them for all
|
|
|
|
* pre-gen5 chipsets.
|
2016-06-24 16:00:22 +03:00
|
|
|
*/
|
2017-06-26 23:30:51 +03:00
|
|
|
if (INTEL_GEN(dev_priv) >= 5) {
|
2016-08-22 13:32:44 +03:00
|
|
|
if (pci_enable_msi(pdev) < 0)
|
2016-06-24 16:00:22 +03:00
|
|
|
DRM_DEBUG_DRIVER("can't enable MSI");
|
|
|
|
}
|
|
|
|
|
2017-01-13 05:46:09 +03:00
|
|
|
ret = intel_gvt_init(dev_priv);
|
|
|
|
if (ret)
|
|
|
|
goto out_ggtt;
|
|
|
|
|
2016-06-24 16:00:22 +03:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
out_ggtt:
|
2016-08-04 09:52:22 +03:00
|
|
|
i915_ggtt_cleanup_hw(dev_priv);
|
2016-06-24 16:00:22 +03:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* i915_driver_cleanup_hw - cleanup the setup done in i915_driver_init_hw()
|
|
|
|
* @dev_priv: device private
|
|
|
|
*/
|
|
|
|
static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv)
|
|
|
|
{
|
2016-08-22 13:32:44 +03:00
|
|
|
struct pci_dev *pdev = dev_priv->drm.pdev;
|
2016-06-24 16:00:22 +03:00
|
|
|
|
2016-08-22 13:32:44 +03:00
|
|
|
if (pdev->msi_enabled)
|
|
|
|
pci_disable_msi(pdev);
|
2016-06-24 16:00:22 +03:00
|
|
|
|
|
|
|
pm_qos_remove_request(&dev_priv->pm_qos);
|
2016-08-04 09:52:22 +03:00
|
|
|
i915_ggtt_cleanup_hw(dev_priv);
|
2016-06-24 16:00:22 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* i915_driver_register - register the driver with the rest of the system
|
|
|
|
* @dev_priv: device private
|
|
|
|
*
|
|
|
|
* Perform any steps necessary to make the driver available via kernel
|
|
|
|
* internal or userspace interfaces.
|
|
|
|
*/
|
|
|
|
static void i915_driver_register(struct drm_i915_private *dev_priv)
|
|
|
|
{
|
2016-07-05 12:40:23 +03:00
|
|
|
struct drm_device *dev = &dev_priv->drm;
|
2016-06-24 16:00:22 +03:00
|
|
|
|
|
|
|
i915_gem_shrinker_init(dev_priv);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Notify a valid surface after modesetting,
|
|
|
|
* when running inside a VM.
|
|
|
|
*/
|
|
|
|
if (intel_vgpu_active(dev_priv))
|
|
|
|
I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);
|
|
|
|
|
|
|
|
/* Reveal our presence to userspace */
|
|
|
|
if (drm_dev_register(dev, 0) == 0) {
|
|
|
|
i915_debugfs_register(dev_priv);
|
2017-01-13 20:41:57 +03:00
|
|
|
i915_guc_log_register(dev_priv);
|
2016-08-22 13:32:43 +03:00
|
|
|
i915_setup_sysfs(dev_priv);
|
2016-11-07 22:49:53 +03:00
|
|
|
|
|
|
|
/* Depends on sysfs having been initialized */
|
|
|
|
i915_perf_register(dev_priv);
|
2016-06-24 16:00:22 +03:00
|
|
|
} else
|
|
|
|
DRM_ERROR("Failed to register driver for userspace access!\n");
|
|
|
|
|
|
|
|
if (INTEL_INFO(dev_priv)->num_pipes) {
|
|
|
|
/* Must be done after probing outputs */
|
|
|
|
intel_opregion_register(dev_priv);
|
|
|
|
acpi_video_register();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (IS_GEN5(dev_priv))
|
|
|
|
intel_gpu_ips_init(dev_priv);
|
|
|
|
|
2017-01-25 01:57:49 +03:00
|
|
|
intel_audio_init(dev_priv);
|
2016-06-24 16:00:22 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Some ports require correctly set-up hpd registers for detection to
|
|
|
|
* work properly (leading to ghost connected connector status), e.g. VGA
|
|
|
|
* on gm45. Hence we can only set up the initial fbdev config after hpd
|
|
|
|
* irqs are fully enabled. We do it last so that the async config
|
|
|
|
* cannot run before the connectors are registered.
|
|
|
|
*/
|
|
|
|
intel_fbdev_initial_config_async(dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* i915_driver_unregister - cleanup the registration done in i915_driver_regiser()
|
|
|
|
* @dev_priv: device private
|
|
|
|
*/
|
|
|
|
static void i915_driver_unregister(struct drm_i915_private *dev_priv)
|
|
|
|
{
|
2017-07-15 01:46:55 +03:00
|
|
|
intel_fbdev_unregister(dev_priv);
|
2017-01-25 01:57:49 +03:00
|
|
|
intel_audio_deinit(dev_priv);
|
2016-06-24 16:00:22 +03:00
|
|
|
|
|
|
|
intel_gpu_ips_teardown();
|
|
|
|
acpi_video_unregister();
|
|
|
|
intel_opregion_unregister(dev_priv);
|
|
|
|
|
2016-11-07 22:49:53 +03:00
|
|
|
i915_perf_unregister(dev_priv);
|
|
|
|
|
2016-08-22 13:32:43 +03:00
|
|
|
i915_teardown_sysfs(dev_priv);
|
2017-01-13 20:41:57 +03:00
|
|
|
i915_guc_log_unregister(dev_priv);
|
2016-07-05 12:40:23 +03:00
|
|
|
drm_dev_unregister(&dev_priv->drm);
|
2016-06-24 16:00:22 +03:00
|
|
|
|
|
|
|
i915_gem_shrinker_cleanup(dev_priv);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* i915_driver_load - setup chip and create an initial config
|
2016-11-10 16:36:34 +03:00
|
|
|
* @pdev: PCI device
|
|
|
|
* @ent: matching PCI ID entry
|
2016-06-24 16:00:22 +03:00
|
|
|
*
|
|
|
|
* The driver load routine has to do several things:
|
|
|
|
* - drive output discovery via intel_modeset_init()
|
|
|
|
* - initialize the memory manager
|
|
|
|
* - allocate initial config memory
|
|
|
|
* - setup the DRM framebuffer with the allocated memory
|
|
|
|
*/
|
2016-06-24 16:00:26 +03:00
|
|
|
int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
|
2016-06-24 16:00:22 +03:00
|
|
|
{
|
2017-02-02 10:41:42 +03:00
|
|
|
const struct intel_device_info *match_info =
|
|
|
|
(struct intel_device_info *)ent->driver_data;
|
2016-06-24 16:00:22 +03:00
|
|
|
struct drm_i915_private *dev_priv;
|
|
|
|
int ret;
|
2014-04-09 19:19:04 +04:00
|
|
|
|
2017-03-03 18:19:28 +03:00
|
|
|
/* Enable nuclear pageflip on ILK+ */
|
2017-09-19 22:38:44 +03:00
|
|
|
if (!i915_modparams.nuclear_pageflip && match_info->gen < 5)
|
2017-02-02 10:41:42 +03:00
|
|
|
driver.driver_features &= ~DRIVER_ATOMIC;
|
2016-06-24 16:00:27 +03:00
|
|
|
|
2016-06-24 16:00:22 +03:00
|
|
|
ret = -ENOMEM;
|
|
|
|
dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
|
|
|
|
if (dev_priv)
|
|
|
|
ret = drm_dev_init(&dev_priv->drm, &driver, &pdev->dev);
|
|
|
|
if (ret) {
|
2016-12-06 22:04:13 +03:00
|
|
|
DRM_DEV_ERROR(&pdev->dev, "allocation failed\n");
|
2017-02-10 19:35:21 +03:00
|
|
|
goto out_free;
|
2016-06-24 16:00:22 +03:00
|
|
|
}
|
2013-02-13 19:27:37 +04:00
|
|
|
|
2016-06-24 16:00:22 +03:00
|
|
|
dev_priv->drm.pdev = pdev;
|
|
|
|
dev_priv->drm.dev_private = dev_priv;
|
2015-02-04 16:22:27 +03:00
|
|
|
|
2016-06-24 16:00:22 +03:00
|
|
|
ret = pci_enable_device(pdev);
|
|
|
|
if (ret)
|
2017-02-10 19:35:21 +03:00
|
|
|
goto out_fini;
|
2015-03-17 12:39:27 +03:00
|
|
|
|
2016-06-24 16:00:22 +03:00
|
|
|
pci_set_drvdata(pdev, &dev_priv->drm);
|
2017-05-02 15:04:09 +03:00
|
|
|
/*
|
|
|
|
* Disable the system suspend direct complete optimization, which can
|
|
|
|
* leave the device suspended skipping the driver's suspend handlers
|
|
|
|
* if the device was already runtime suspended. This is needed due to
|
|
|
|
* the difference in our runtime and system suspend sequence and
|
|
|
|
* becaue the HDA driver may require us to enable the audio power
|
|
|
|
* domain during system suspend.
|
|
|
|
*/
|
|
|
|
pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME;
|
2015-10-28 14:16:45 +03:00
|
|
|
|
2016-06-24 16:00:22 +03:00
|
|
|
ret = i915_driver_init_early(dev_priv, ent);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out_pci_disable;
|
2015-10-28 14:16:45 +03:00
|
|
|
|
2016-06-24 16:00:22 +03:00
|
|
|
intel_runtime_pm_get(dev_priv);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2016-06-24 16:00:22 +03:00
|
|
|
ret = i915_driver_init_mmio(dev_priv);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out_runtime_pm_put;
|
DRM: i915: add mode setting support
This commit adds i915 driver support for the DRM mode setting APIs.
Currently, VGA, LVDS, SDVO DVI & VGA, TV and DVO LVDS outputs are
supported. HDMI, DisplayPort and additional SDVO output support will
follow.
Support for the mode setting code is controlled by the new 'modeset'
module option. A new config option, CONFIG_DRM_I915_KMS controls the
default behavior, and whether a PCI ID list is built into the module for
use by user level module utilities.
Note that if mode setting is enabled, user level drivers that access
display registers directly or that don't use the kernel graphics memory
manager will likely corrupt kernel graphics memory, disrupt output
configuration (possibly leading to hangs and/or blank displays), and
prevent panic/oops messages from appearing. So use caution when
enabling this code; be sure your user level code supports the new
interfaces.
A new SysRq key, 'g', provides emergency support for switching back to
the kernel's framebuffer console; which is useful for testing.
Co-authors: Dave Airlie <airlied@linux.ie>, Hong Liu <hong.liu@intel.com>
Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
Signed-off-by: Eric Anholt <eric@anholt.net>
Signed-off-by: Dave Airlie <airlied@redhat.com>
2008-11-08 01:24:08 +03:00
|
|
|
|
2016-06-24 16:00:22 +03:00
|
|
|
ret = i915_driver_init_hw(dev_priv);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out_cleanup_mmio;
|
2015-08-28 15:10:22 +03:00
|
|
|
|
|
|
|
/*
|
2016-06-24 16:00:22 +03:00
|
|
|
* TODO: move the vblank init and parts of modeset init steps into one
|
|
|
|
* of the i915_driver_init_/i915_driver_register functions according
|
|
|
|
* to the role/effect of the given init step.
|
2015-08-28 15:10:22 +03:00
|
|
|
*/
|
2016-06-24 16:00:22 +03:00
|
|
|
if (INTEL_INFO(dev_priv)->num_pipes) {
|
2016-07-05 12:40:23 +03:00
|
|
|
ret = drm_vblank_init(&dev_priv->drm,
|
2016-06-24 16:00:22 +03:00
|
|
|
INTEL_INFO(dev_priv)->num_pipes);
|
|
|
|
if (ret)
|
|
|
|
goto out_cleanup_hw;
|
2015-08-28 15:10:22 +03:00
|
|
|
}
|
|
|
|
|
2016-07-05 12:40:23 +03:00
|
|
|
ret = i915_load_modeset_init(&dev_priv->drm);
|
2016-06-24 16:00:22 +03:00
|
|
|
if (ret < 0)
|
2017-06-21 11:28:41 +03:00
|
|
|
goto out_cleanup_hw;
|
2016-06-24 16:00:22 +03:00
|
|
|
|
|
|
|
i915_driver_register(dev_priv);
|
|
|
|
|
|
|
|
intel_runtime_pm_enable(dev_priv);
|
|
|
|
|
2017-08-17 16:45:28 +03:00
|
|
|
intel_init_ipc(dev_priv);
|
2016-12-01 18:49:34 +03:00
|
|
|
|
2016-10-14 16:27:07 +03:00
|
|
|
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
|
|
|
|
DRM_INFO("DRM_I915_DEBUG enabled\n");
|
|
|
|
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
|
|
|
|
DRM_INFO("DRM_I915_DEBUG_GEM enabled\n");
|
2016-08-25 10:23:14 +03:00
|
|
|
|
2016-06-24 16:00:22 +03:00
|
|
|
intel_runtime_pm_put(dev_priv);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
out_cleanup_hw:
|
|
|
|
i915_driver_cleanup_hw(dev_priv);
|
|
|
|
out_cleanup_mmio:
|
|
|
|
i915_driver_cleanup_mmio(dev_priv);
|
|
|
|
out_runtime_pm_put:
|
|
|
|
intel_runtime_pm_put(dev_priv);
|
|
|
|
i915_driver_cleanup_early(dev_priv);
|
|
|
|
out_pci_disable:
|
|
|
|
pci_disable_device(pdev);
|
2017-02-10 19:35:21 +03:00
|
|
|
out_fini:
|
2016-06-24 16:00:22 +03:00
|
|
|
i915_load_error(dev_priv, "Device initialization failed (%d)\n", ret);
|
2017-02-10 19:35:21 +03:00
|
|
|
drm_dev_fini(&dev_priv->drm);
|
|
|
|
out_free:
|
|
|
|
kfree(dev_priv);
|
2015-08-28 15:10:22 +03:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-06-24 16:00:26 +03:00
|
|
|
void i915_driver_unload(struct drm_device *dev)
|
2010-04-07 12:15:53 +04:00
|
|
|
{
|
2016-07-04 13:34:36 +03:00
|
|
|
struct drm_i915_private *dev_priv = to_i915(dev);
|
2016-08-22 13:32:44 +03:00
|
|
|
struct pci_dev *pdev = dev_priv->drm.pdev;
|
2010-04-07 12:15:53 +04:00
|
|
|
|
2017-07-15 01:46:56 +03:00
|
|
|
i915_driver_unregister(dev_priv);
|
|
|
|
|
2016-12-01 17:16:38 +03:00
|
|
|
if (i915_gem_suspend(dev_priv))
|
2016-06-24 16:00:26 +03:00
|
|
|
DRM_ERROR("failed to idle hardware; continuing to unload!\n");
|
2013-04-06 00:12:44 +04:00
|
|
|
|
2016-06-24 16:00:22 +03:00
|
|
|
intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
|
|
|
|
|
2017-03-21 19:41:49 +03:00
|
|
|
drm_atomic_helper_shutdown(dev);
|
2016-12-15 17:29:44 +03:00
|
|
|
|
2017-01-13 05:46:09 +03:00
|
|
|
intel_gvt_cleanup(dev_priv);
|
|
|
|
|
2016-06-24 16:00:22 +03:00
|
|
|
intel_modeset_cleanup(dev);
|
|
|
|
|
2010-04-07 12:15:53 +04:00
|
|
|
/*
|
2016-06-24 16:00:22 +03:00
|
|
|
* free the memory space allocated for the child device
|
|
|
|
* config parsed from VBT
|
2010-04-07 12:15:53 +04:00
|
|
|
*/
|
2016-06-24 16:00:22 +03:00
|
|
|
if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
|
|
|
|
kfree(dev_priv->vbt.child_dev);
|
|
|
|
dev_priv->vbt.child_dev = NULL;
|
|
|
|
dev_priv->vbt.child_dev_num = 0;
|
|
|
|
}
|
|
|
|
kfree(dev_priv->vbt.sdvo_lvds_vbt_mode);
|
|
|
|
dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
|
|
|
|
kfree(dev_priv->vbt.lfp_lvds_vbt_mode);
|
|
|
|
dev_priv->vbt.lfp_lvds_vbt_mode = NULL;
|
2010-04-07 12:15:53 +04:00
|
|
|
|
2016-08-22 13:32:44 +03:00
|
|
|
vga_switcheroo_unregister_client(pdev);
|
|
|
|
vga_client_register(pdev, NULL, NULL, NULL);
|
2014-02-14 22:23:54 +04:00
|
|
|
|
2016-06-24 16:00:22 +03:00
|
|
|
intel_csr_ucode_fini(dev_priv);
|
2014-02-14 22:23:54 +04:00
|
|
|
|
2016-06-24 16:00:22 +03:00
|
|
|
/* Free error state after interrupts are fully disabled. */
|
|
|
|
cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
|
2017-02-14 19:46:11 +03:00
|
|
|
i915_reset_error_state(dev_priv);
|
2016-06-24 16:00:22 +03:00
|
|
|
|
2016-10-28 15:58:42 +03:00
|
|
|
i915_gem_fini(dev_priv);
|
2017-03-22 20:39:46 +03:00
|
|
|
intel_uc_fini_fw(dev_priv);
|
2016-06-24 16:00:22 +03:00
|
|
|
intel_fbc_cleanup_cfb(dev_priv);
|
|
|
|
|
|
|
|
intel_power_domains_fini(dev_priv);
|
|
|
|
|
|
|
|
i915_driver_cleanup_hw(dev_priv);
|
|
|
|
i915_driver_cleanup_mmio(dev_priv);
|
|
|
|
|
|
|
|
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
|
2017-02-10 19:35:21 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static void i915_driver_release(struct drm_device *dev)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *dev_priv = to_i915(dev);
|
2016-06-24 16:00:22 +03:00
|
|
|
|
|
|
|
i915_driver_cleanup_early(dev_priv);
|
2017-02-10 19:35:21 +03:00
|
|
|
drm_dev_fini(&dev_priv->drm);
|
|
|
|
|
|
|
|
kfree(dev_priv);
|
2010-04-07 12:15:53 +04:00
|
|
|
}
|
|
|
|
|
2016-06-24 16:00:22 +03:00
|
|
|
static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
|
2012-04-06 01:47:36 +04:00
|
|
|
{
|
2017-06-20 14:05:45 +03:00
|
|
|
struct drm_i915_private *i915 = to_i915(dev);
|
2016-06-24 16:00:22 +03:00
|
|
|
int ret;
|
2012-04-06 01:47:36 +04:00
|
|
|
|
2017-06-20 14:05:45 +03:00
|
|
|
ret = i915_gem_open(i915, file);
|
2016-06-24 16:00:22 +03:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
2012-04-06 01:47:36 +04:00
|
|
|
|
2016-06-24 16:00:22 +03:00
|
|
|
return 0;
|
|
|
|
}
|
2014-07-24 20:04:44 +04:00
|
|
|
|
2016-06-24 16:00:22 +03:00
|
|
|
/**
|
|
|
|
* i915_driver_lastclose - clean up after all DRM clients have exited
|
|
|
|
* @dev: DRM device
|
|
|
|
*
|
|
|
|
* Take care of cleaning up after all DRM clients have exited. In the
|
|
|
|
* mode setting case, we want to restore the kernel's initial mode (just
|
|
|
|
* in case the last client left us in a bad state).
|
|
|
|
*
|
|
|
|
* Additionally, in the non-mode setting case, we'll tear down the GTT
|
|
|
|
* and DMA structures, since the kernel won't be using them, and clea
|
|
|
|
* up any GEM state.
|
|
|
|
*/
|
|
|
|
static void i915_driver_lastclose(struct drm_device *dev)
|
|
|
|
{
|
|
|
|
intel_fbdev_restore_mode(dev);
|
|
|
|
vga_switcheroo_process_delayed_switch();
|
|
|
|
}
|
2012-04-06 01:47:36 +04:00
|
|
|
|
2017-03-08 17:12:45 +03:00
|
|
|
static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
|
2016-06-24 16:00:22 +03:00
|
|
|
{
|
2017-03-08 17:12:45 +03:00
|
|
|
struct drm_i915_file_private *file_priv = file->driver_priv;
|
|
|
|
|
2016-06-24 16:00:22 +03:00
|
|
|
mutex_lock(&dev->struct_mutex);
|
2017-06-20 14:05:45 +03:00
|
|
|
i915_gem_context_close(file);
|
2016-06-24 16:00:22 +03:00
|
|
|
i915_gem_release(dev, file);
|
|
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
|
|
|
|
|
kfree(file_priv);
|
2012-04-06 01:47:36 +04:00
|
|
|
}
|
|
|
|
|
2014-08-18 15:42:45 +04:00
|
|
|
static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
|
|
|
|
{
|
2016-07-05 12:40:23 +03:00
|
|
|
struct drm_device *dev = &dev_priv->drm;
|
2015-12-16 13:48:16 +03:00
|
|
|
struct intel_encoder *encoder;
|
2014-08-18 15:42:45 +04:00
|
|
|
|
|
|
|
drm_modeset_lock_all(dev);
|
2015-12-16 13:48:16 +03:00
|
|
|
for_each_intel_encoder(dev, encoder)
|
|
|
|
if (encoder->suspend)
|
|
|
|
encoder->suspend(encoder);
|
2014-08-18 15:42:45 +04:00
|
|
|
drm_modeset_unlock_all(dev);
|
|
|
|
}
|
|
|
|
|
2014-10-27 22:54:32 +03:00
|
|
|
static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
|
|
|
|
bool rpm_resume);
|
2016-04-20 20:27:54 +03:00
|
|
|
static int vlv_suspend_complete(struct drm_i915_private *dev_priv);
|
2015-04-16 11:52:11 +03:00
|
|
|
|
2015-11-18 18:32:30 +03:00
|
|
|
static bool suspend_to_idle(struct drm_i915_private *dev_priv)
|
|
|
|
{
|
|
|
|
#if IS_ENABLED(CONFIG_ACPI_SLEEP)
|
|
|
|
if (acpi_target_system_state() < ACPI_STATE_S3)
|
|
|
|
return true;
|
|
|
|
#endif
|
|
|
|
return false;
|
|
|
|
}
|
2014-08-13 21:37:05 +04:00
|
|
|
|
2014-10-23 20:23:25 +04:00
|
|
|
static int i915_drm_suspend(struct drm_device *dev)
|
2007-11-22 07:14:14 +03:00
|
|
|
{
|
2016-07-04 13:34:36 +03:00
|
|
|
struct drm_i915_private *dev_priv = to_i915(dev);
|
2016-08-22 13:32:44 +03:00
|
|
|
struct pci_dev *pdev = dev_priv->drm.pdev;
|
2014-06-12 19:35:47 +04:00
|
|
|
pci_power_t opregion_target_state;
|
2015-02-23 14:03:26 +03:00
|
|
|
int error;
|
2010-02-19 01:06:27 +03:00
|
|
|
|
i915: ignore lid open event when resuming
i915 driver needs to do modeset when
1. system resumes from sleep
2. lid is opened
In PM_SUSPEND_MEM state, all the GPEs are cleared when system resumes,
thus it is the i915_resume code does the modeset rather than intel_lid_notify().
But in PM_SUSPEND_FREEZE state, this will be broken because
system is still responsive to the lid events.
1. When we close the lid in Freeze state, intel_lid_notify() sets modeset_on_lid.
2. When we reopen the lid, intel_lid_notify() will do a modeset,
before the system is resumed.
here is the error log,
[92146.548074] WARNING: at drivers/gpu/drm/i915/intel_display.c:1028 intel_wait_for_pipe_off+0x184/0x190 [i915]()
[92146.548076] Hardware name: VGN-Z540N
[92146.548078] pipe_off wait timed out
[92146.548167] Modules linked in: hid_generic usbhid hid snd_hda_codec_realtek snd_hda_intel snd_hda_codec parport_pc snd_hwdep ppdev snd_pcm_oss i915 snd_mixer_oss snd_pcm arc4 iwldvm snd_seq_dummy mac80211 snd_seq_oss snd_seq_midi fbcon tileblit font bitblit softcursor drm_kms_helper snd_rawmidi snd_seq_midi_event coretemp drm snd_seq kvm btusb bluetooth snd_timer iwlwifi pcmcia tpm_infineon i2c_algo_bit joydev snd_seq_device intel_agp cfg80211 snd intel_gtt yenta_socket pcmcia_rsrc sony_laptop agpgart microcode psmouse tpm_tis serio_raw mxm_wmi soundcore snd_page_alloc tpm acpi_cpufreq lpc_ich pcmcia_core tpm_bios mperf processor lp parport firewire_ohci firewire_core crc_itu_t sdhci_pci sdhci thermal e1000e
[92146.548173] Pid: 4304, comm: kworker/0:0 Tainted: G W 3.8.0-rc3-s0i3-v3-test+ #9
[92146.548175] Call Trace:
[92146.548189] [<c10378e2>] warn_slowpath_common+0x72/0xa0
[92146.548227] [<f86398b4>] ? intel_wait_for_pipe_off+0x184/0x190 [i915]
[92146.548263] [<f86398b4>] ? intel_wait_for_pipe_off+0x184/0x190 [i915]
[92146.548270] [<c10379b3>] warn_slowpath_fmt+0x33/0x40
[92146.548307] [<f86398b4>] intel_wait_for_pipe_off+0x184/0x190 [i915]
[92146.548344] [<f86399c2>] intel_disable_pipe+0x102/0x190 [i915]
[92146.548380] [<f8639ea4>] ? intel_disable_plane+0x64/0x80 [i915]
[92146.548417] [<f8639f7c>] i9xx_crtc_disable+0xbc/0x150 [i915]
[92146.548456] [<f863ebee>] intel_crtc_update_dpms+0x5e/0x90 [i915]
[92146.548493] [<f86437cf>] intel_modeset_setup_hw_state+0x42f/0x8f0 [i915]
[92146.548535] [<f8645b0b>] intel_lid_notify+0x9b/0xc0 [i915]
[92146.548543] [<c15610d3>] notifier_call_chain+0x43/0x60
[92146.548550] [<c105d1e1>] __blocking_notifier_call_chain+0x41/0x80
[92146.548556] [<c105d23f>] blocking_notifier_call_chain+0x1f/0x30
[92146.548563] [<c131a684>] acpi_lid_send_state+0x78/0xa4
[92146.548569] [<c131aa9e>] acpi_button_notify+0x3b/0xf1
[92146.548577] [<c12df56a>] ? acpi_os_execute+0x17/0x19
[92146.548582] [<c12e591a>] ? acpi_ec_sync_query+0xa5/0xbc
[92146.548589] [<c12e2b82>] acpi_device_notify+0x16/0x18
[92146.548595] [<c12f4904>] acpi_ev_notify_dispatch+0x38/0x4f
[92146.548600] [<c12df0e8>] acpi_os_execute_deferred+0x20/0x2b
[92146.548607] [<c1051208>] process_one_work+0x128/0x3f0
[92146.548613] [<c1564f73>] ? common_interrupt+0x33/0x38
[92146.548618] [<c104f8c0>] ? wake_up_worker+0x30/0x30
[92146.548624] [<c12df0c8>] ? acpi_os_wait_events_complete+0x1e/0x1e
[92146.548629] [<c10524f9>] worker_thread+0x119/0x3b0
[92146.548634] [<c10523e0>] ? manage_workers+0x240/0x240
[92146.548640] [<c1056e84>] kthread+0x94/0xa0
[92146.548647] [<c1060000>] ? ftrace_raw_output_sched_stat_runtime+0x70/0xf0
[92146.548652] [<c15649b7>] ret_from_kernel_thread+0x1b/0x28
[92146.548658] [<c1056df0>] ? kthread_create_on_node+0xc0/0xc0
three different modeset flags are introduced in this patch
MODESET_ON_LID_OPEN: do modeset on next lid open event
MODESET_DONE: modeset already done
MODESET_SUSPENDED: suspended, only do modeset when system is resumed
In this way,
1. when lid is closed, MODESET_ON_LID_OPEN is set so that
we'll do modeset on next lid open event.
2. when lid is opened, MODESET_DONE is set
so that duplicate lid open events will be ignored.
3. when system suspends, MODESET_SUSPENDED is set.
In this case, we will not do modeset on any lid events.
Plus, locking mechanism is also introduced to avoid racing.
Signed-off-by: Zhang Rui <rui.zhang@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-02-05 11:41:53 +04:00
|
|
|
/* ignore lid events during suspend */
|
|
|
|
mutex_lock(&dev_priv->modeset_restore_lock);
|
|
|
|
dev_priv->modeset_restore = MODESET_SUSPENDED;
|
|
|
|
mutex_unlock(&dev_priv->modeset_restore_lock);
|
|
|
|
|
2015-12-16 03:52:19 +03:00
|
|
|
disable_rpm_wakeref_asserts(dev_priv);
|
|
|
|
|
2013-08-19 20:18:09 +04:00
|
|
|
/* We do a lot of poking in a lot of registers, make sure they work
|
|
|
|
* properly. */
|
2014-02-18 02:02:02 +04:00
|
|
|
intel_display_set_init_power(dev_priv, true);
|
2013-01-25 22:59:15 +04:00
|
|
|
|
2010-12-07 02:20:40 +03:00
|
|
|
drm_kms_helper_poll_disable(dev);
|
|
|
|
|
2016-08-22 13:32:44 +03:00
|
|
|
pci_save_state(pdev);
|
2007-11-22 07:14:14 +03:00
|
|
|
|
2016-12-01 17:16:38 +03:00
|
|
|
error = i915_gem_suspend(dev_priv);
|
2015-02-23 14:03:26 +03:00
|
|
|
if (error) {
|
2016-08-22 13:32:44 +03:00
|
|
|
dev_err(&pdev->dev,
|
2015-02-23 14:03:26 +03:00
|
|
|
"GEM idle failed, resume might fail\n");
|
2015-12-16 03:52:19 +03:00
|
|
|
goto out;
|
2015-02-23 14:03:26 +03:00
|
|
|
}
|
2013-07-09 18:51:37 +04:00
|
|
|
|
2015-06-01 13:49:47 +03:00
|
|
|
intel_display_suspend(dev);
|
2014-11-19 16:30:05 +03:00
|
|
|
|
2015-02-23 14:03:26 +03:00
|
|
|
intel_dp_mst_suspend(dev);
|
2013-04-17 15:04:50 +04:00
|
|
|
|
2015-02-23 14:03:26 +03:00
|
|
|
intel_runtime_pm_disable_interrupts(dev_priv);
|
|
|
|
intel_hpd_cancel_work(dev_priv);
|
2014-07-23 08:25:24 +04:00
|
|
|
|
2015-02-23 14:03:26 +03:00
|
|
|
intel_suspend_encoders(dev_priv);
|
2014-05-02 08:02:48 +04:00
|
|
|
|
2016-10-31 23:37:23 +03:00
|
|
|
intel_suspend_hw(dev_priv);
|
2009-02-18 02:13:31 +03:00
|
|
|
|
2016-11-16 11:55:34 +03:00
|
|
|
i915_gem_suspend_gtt_mappings(dev_priv);
|
2013-10-16 20:21:30 +04:00
|
|
|
|
2016-12-01 17:16:44 +03:00
|
|
|
i915_save_state(dev_priv);
|
2009-06-23 05:05:12 +04:00
|
|
|
|
2015-11-18 18:32:30 +03:00
|
|
|
opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
|
2016-05-23 17:08:09 +03:00
|
|
|
intel_opregion_notify_adapter(dev_priv, opregion_target_state);
|
2014-06-12 19:35:47 +04:00
|
|
|
|
2017-02-10 13:28:01 +03:00
|
|
|
intel_uncore_suspend(dev_priv);
|
2016-05-23 17:08:10 +03:00
|
|
|
intel_opregion_unregister(dev_priv);
|
2008-08-05 22:37:25 +04:00
|
|
|
|
2014-08-13 16:09:46 +04:00
|
|
|
intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
|
2012-03-28 13:48:49 +04:00
|
|
|
|
2014-02-25 19:11:28 +04:00
|
|
|
dev_priv->suspend_count++;
|
|
|
|
|
2016-04-18 14:48:21 +03:00
|
|
|
intel_csr_ucode_suspend(dev_priv);
|
2015-10-29 00:59:06 +03:00
|
|
|
|
2015-12-16 03:52:19 +03:00
|
|
|
out:
|
|
|
|
enable_rpm_wakeref_asserts(dev_priv);
|
|
|
|
|
|
|
|
return error;
|
2010-02-07 23:48:24 +03:00
|
|
|
}
|
|
|
|
|
2016-08-22 13:32:42 +03:00
|
|
|
static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
|
2014-10-23 20:23:15 +04:00
|
|
|
{
|
2016-08-22 13:32:42 +03:00
|
|
|
struct drm_i915_private *dev_priv = to_i915(dev);
|
2016-08-22 13:32:44 +03:00
|
|
|
struct pci_dev *pdev = dev_priv->drm.pdev;
|
2015-11-18 18:32:30 +03:00
|
|
|
bool fw_csr;
|
2014-10-23 20:23:15 +04:00
|
|
|
int ret;
|
|
|
|
|
2015-12-16 03:52:19 +03:00
|
|
|
disable_rpm_wakeref_asserts(dev_priv);
|
|
|
|
|
2016-10-13 14:34:06 +03:00
|
|
|
intel_display_set_init_power(dev_priv, false);
|
|
|
|
|
2017-08-16 17:46:07 +03:00
|
|
|
fw_csr = !IS_GEN9_LP(dev_priv) && !hibernation &&
|
2016-04-01 16:02:38 +03:00
|
|
|
suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload;
|
2015-11-18 18:32:30 +03:00
|
|
|
/*
|
|
|
|
* In case of firmware assisted context save/restore don't manually
|
|
|
|
* deinit the power domains. This also means the CSR/DMC firmware will
|
|
|
|
* stay active, it will power down any HW resources as required and
|
|
|
|
* also enable deeper system power states that would be blocked if the
|
|
|
|
* firmware was inactive.
|
|
|
|
*/
|
|
|
|
if (!fw_csr)
|
|
|
|
intel_power_domains_suspend(dev_priv);
|
2015-11-17 18:33:53 +03:00
|
|
|
|
2016-04-20 20:27:54 +03:00
|
|
|
ret = 0;
|
2016-12-16 18:42:25 +03:00
|
|
|
if (IS_GEN9_LP(dev_priv))
|
2016-04-20 20:27:54 +03:00
|
|
|
bxt_enable_dc9(dev_priv);
|
2016-04-20 20:27:55 +03:00
|
|
|
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
|
2016-04-20 20:27:54 +03:00
|
|
|
hsw_enable_pc8(dev_priv);
|
|
|
|
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
|
|
|
|
ret = vlv_suspend_complete(dev_priv);
|
2014-10-23 20:23:15 +04:00
|
|
|
|
|
|
|
if (ret) {
|
|
|
|
DRM_ERROR("Suspend complete failed: %d\n", ret);
|
2015-11-18 18:32:30 +03:00
|
|
|
if (!fw_csr)
|
|
|
|
intel_power_domains_init_hw(dev_priv, true);
|
2014-10-23 20:23:15 +04:00
|
|
|
|
2015-12-16 03:52:19 +03:00
|
|
|
goto out;
|
2014-10-23 20:23:15 +04:00
|
|
|
}
|
|
|
|
|
2016-08-22 13:32:44 +03:00
|
|
|
pci_disable_device(pdev);
|
2015-03-02 14:04:41 +03:00
|
|
|
/*
|
2015-06-30 17:06:47 +03:00
|
|
|
* During hibernation on some platforms the BIOS may try to access
|
2015-03-02 14:04:41 +03:00
|
|
|
* the device even though it's already in D3 and hang the machine. So
|
|
|
|
* leave the device in D0 on those platforms and hope the BIOS will
|
2015-06-30 17:06:47 +03:00
|
|
|
* power down the device properly. The issue was seen on multiple old
|
|
|
|
* GENs with different BIOS vendors, so having an explicit blacklist
|
|
|
|
* is inpractical; apply the workaround on everything pre GEN6. The
|
|
|
|
* platforms where the issue was seen:
|
|
|
|
* Lenovo Thinkpad X301, X61s, X60, T60, X41
|
|
|
|
* Fujitsu FSC S7110
|
|
|
|
* Acer Aspire 1830T
|
2015-03-02 14:04:41 +03:00
|
|
|
*/
|
2016-11-04 17:42:48 +03:00
|
|
|
if (!(hibernation && INTEL_GEN(dev_priv) < 6))
|
2016-08-22 13:32:44 +03:00
|
|
|
pci_set_power_state(pdev, PCI_D3hot);
|
2014-10-23 20:23:15 +04:00
|
|
|
|
2015-11-18 18:32:30 +03:00
|
|
|
dev_priv->suspended_to_idle = suspend_to_idle(dev_priv);
|
|
|
|
|
2015-12-16 03:52:19 +03:00
|
|
|
out:
|
|
|
|
enable_rpm_wakeref_asserts(dev_priv);
|
|
|
|
|
|
|
|
return ret;
|
2014-10-23 20:23:15 +04:00
|
|
|
}
|
|
|
|
|
2016-12-02 13:24:11 +03:00
|
|
|
static int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state)
|
2010-02-07 23:48:24 +03:00
|
|
|
{
|
|
|
|
int error;
|
|
|
|
|
2016-07-05 12:40:22 +03:00
|
|
|
if (!dev) {
|
2010-02-07 23:48:24 +03:00
|
|
|
DRM_ERROR("dev: %p\n", dev);
|
|
|
|
DRM_ERROR("DRM not initialized, aborting suspend.\n");
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
2014-09-10 19:16:55 +04:00
|
|
|
if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND &&
|
|
|
|
state.event != PM_EVENT_FREEZE))
|
|
|
|
return -EINVAL;
|
2010-12-07 02:20:40 +03:00
|
|
|
|
|
|
|
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
|
|
|
|
return 0;
|
2010-09-08 12:45:11 +04:00
|
|
|
|
2014-10-23 20:23:25 +04:00
|
|
|
error = i915_drm_suspend(dev);
|
2010-02-07 23:48:24 +03:00
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
|
2015-03-02 14:04:41 +03:00
|
|
|
return i915_drm_suspend_late(dev, false);
|
2007-11-22 07:14:14 +03:00
|
|
|
}
|
|
|
|
|
2014-10-23 20:23:25 +04:00
|
|
|
static int i915_drm_resume(struct drm_device *dev)
|
2014-04-01 20:55:22 +04:00
|
|
|
{
|
2016-07-04 13:34:36 +03:00
|
|
|
struct drm_i915_private *dev_priv = to_i915(dev);
|
2016-05-06 21:35:55 +03:00
|
|
|
int ret;
|
2013-09-13 01:06:43 +04:00
|
|
|
|
2015-12-16 03:52:19 +03:00
|
|
|
disable_rpm_wakeref_asserts(dev_priv);
|
2016-08-24 12:27:01 +03:00
|
|
|
intel_sanitize_gt_powersave(dev_priv);
|
2015-12-16 03:52:19 +03:00
|
|
|
|
2016-08-04 09:52:22 +03:00
|
|
|
ret = i915_ggtt_enable_hw(dev_priv);
|
2016-05-06 21:35:55 +03:00
|
|
|
if (ret)
|
|
|
|
DRM_ERROR("failed to re-enable GGTT\n");
|
|
|
|
|
2016-04-18 14:48:21 +03:00
|
|
|
intel_csr_ucode_resume(dev_priv);
|
|
|
|
|
2016-12-01 17:16:44 +03:00
|
|
|
i915_restore_state(dev_priv);
|
2016-08-10 14:07:33 +03:00
|
|
|
intel_pps_unlock_regs_wa(dev_priv);
|
2016-05-23 17:08:09 +03:00
|
|
|
intel_opregion_setup(dev_priv);
|
2010-02-19 01:06:27 +03:00
|
|
|
|
2016-11-23 17:21:44 +03:00
|
|
|
intel_init_pch_refclk(dev_priv);
|
2012-05-09 14:56:28 +04:00
|
|
|
|
2015-05-11 10:50:45 +03:00
|
|
|
/*
|
|
|
|
* Interrupts have to be enabled before any batches are run. If not the
|
|
|
|
* GPU will hang. i915_gem_init_hw() will initiate batches to
|
|
|
|
* update/restore the context.
|
|
|
|
*
|
2016-11-29 22:40:29 +03:00
|
|
|
* drm_mode_config_reset() needs AUX interrupts.
|
|
|
|
*
|
2015-05-11 10:50:45 +03:00
|
|
|
* Modeset enabling in intel_modeset_init_hw() also needs working
|
|
|
|
* interrupts.
|
|
|
|
*/
|
|
|
|
intel_runtime_pm_enable_interrupts(dev_priv);
|
|
|
|
|
2016-11-29 22:40:29 +03:00
|
|
|
drm_mode_config_reset(dev);
|
|
|
|
|
2017-11-12 14:27:38 +03:00
|
|
|
i915_gem_resume(dev_priv);
|
2009-02-24 02:41:09 +03:00
|
|
|
|
2015-02-23 14:03:26 +03:00
|
|
|
intel_modeset_init_hw(dev);
|
2013-03-26 20:25:45 +04:00
|
|
|
|
2015-02-23 14:03:26 +03:00
|
|
|
spin_lock_irq(&dev_priv->irq_lock);
|
|
|
|
if (dev_priv->display.hpd_irq_setup)
|
2016-05-06 16:48:28 +03:00
|
|
|
dev_priv->display.hpd_irq_setup(dev_priv);
|
2015-02-23 14:03:26 +03:00
|
|
|
spin_unlock_irq(&dev_priv->irq_lock);
|
2014-05-02 08:02:48 +04:00
|
|
|
|
2015-02-23 14:03:26 +03:00
|
|
|
intel_dp_mst_resume(dev);
|
2014-12-08 06:23:37 +03:00
|
|
|
|
2016-03-11 18:57:01 +03:00
|
|
|
intel_display_resume(dev);
|
|
|
|
|
2016-11-02 04:06:30 +03:00
|
|
|
drm_kms_helper_poll_enable(dev);
|
|
|
|
|
2015-02-23 14:03:26 +03:00
|
|
|
/*
|
|
|
|
* ... but also need to make sure that hotplug processing
|
|
|
|
* doesn't cause havoc. Like in the driver load code we don't
|
|
|
|
* bother with the tiny race here where we might loose hotplug
|
|
|
|
* notifications.
|
|
|
|
* */
|
|
|
|
intel_hpd_init(dev_priv);
|
2011-01-05 23:01:25 +03:00
|
|
|
|
2016-05-23 17:08:10 +03:00
|
|
|
intel_opregion_register(dev_priv);
|
2010-08-19 19:09:23 +04:00
|
|
|
|
2014-08-13 16:09:46 +04:00
|
|
|
intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
|
2012-11-02 22:13:59 +04:00
|
|
|
|
i915: ignore lid open event when resuming
i915 driver needs to do modeset when
1. system resumes from sleep
2. lid is opened
In PM_SUSPEND_MEM state, all the GPEs are cleared when system resumes,
thus it is the i915_resume code does the modeset rather than intel_lid_notify().
But in PM_SUSPEND_FREEZE state, this will be broken because
system is still responsive to the lid events.
1. When we close the lid in Freeze state, intel_lid_notify() sets modeset_on_lid.
2. When we reopen the lid, intel_lid_notify() will do a modeset,
before the system is resumed.
here is the error log,
[92146.548074] WARNING: at drivers/gpu/drm/i915/intel_display.c:1028 intel_wait_for_pipe_off+0x184/0x190 [i915]()
[92146.548076] Hardware name: VGN-Z540N
[92146.548078] pipe_off wait timed out
[92146.548167] Modules linked in: hid_generic usbhid hid snd_hda_codec_realtek snd_hda_intel snd_hda_codec parport_pc snd_hwdep ppdev snd_pcm_oss i915 snd_mixer_oss snd_pcm arc4 iwldvm snd_seq_dummy mac80211 snd_seq_oss snd_seq_midi fbcon tileblit font bitblit softcursor drm_kms_helper snd_rawmidi snd_seq_midi_event coretemp drm snd_seq kvm btusb bluetooth snd_timer iwlwifi pcmcia tpm_infineon i2c_algo_bit joydev snd_seq_device intel_agp cfg80211 snd intel_gtt yenta_socket pcmcia_rsrc sony_laptop agpgart microcode psmouse tpm_tis serio_raw mxm_wmi soundcore snd_page_alloc tpm acpi_cpufreq lpc_ich pcmcia_core tpm_bios mperf processor lp parport firewire_ohci firewire_core crc_itu_t sdhci_pci sdhci thermal e1000e
[92146.548173] Pid: 4304, comm: kworker/0:0 Tainted: G W 3.8.0-rc3-s0i3-v3-test+ #9
[92146.548175] Call Trace:
[92146.548189] [<c10378e2>] warn_slowpath_common+0x72/0xa0
[92146.548227] [<f86398b4>] ? intel_wait_for_pipe_off+0x184/0x190 [i915]
[92146.548263] [<f86398b4>] ? intel_wait_for_pipe_off+0x184/0x190 [i915]
[92146.548270] [<c10379b3>] warn_slowpath_fmt+0x33/0x40
[92146.548307] [<f86398b4>] intel_wait_for_pipe_off+0x184/0x190 [i915]
[92146.548344] [<f86399c2>] intel_disable_pipe+0x102/0x190 [i915]
[92146.548380] [<f8639ea4>] ? intel_disable_plane+0x64/0x80 [i915]
[92146.548417] [<f8639f7c>] i9xx_crtc_disable+0xbc/0x150 [i915]
[92146.548456] [<f863ebee>] intel_crtc_update_dpms+0x5e/0x90 [i915]
[92146.548493] [<f86437cf>] intel_modeset_setup_hw_state+0x42f/0x8f0 [i915]
[92146.548535] [<f8645b0b>] intel_lid_notify+0x9b/0xc0 [i915]
[92146.548543] [<c15610d3>] notifier_call_chain+0x43/0x60
[92146.548550] [<c105d1e1>] __blocking_notifier_call_chain+0x41/0x80
[92146.548556] [<c105d23f>] blocking_notifier_call_chain+0x1f/0x30
[92146.548563] [<c131a684>] acpi_lid_send_state+0x78/0xa4
[92146.548569] [<c131aa9e>] acpi_button_notify+0x3b/0xf1
[92146.548577] [<c12df56a>] ? acpi_os_execute+0x17/0x19
[92146.548582] [<c12e591a>] ? acpi_ec_sync_query+0xa5/0xbc
[92146.548589] [<c12e2b82>] acpi_device_notify+0x16/0x18
[92146.548595] [<c12f4904>] acpi_ev_notify_dispatch+0x38/0x4f
[92146.548600] [<c12df0e8>] acpi_os_execute_deferred+0x20/0x2b
[92146.548607] [<c1051208>] process_one_work+0x128/0x3f0
[92146.548613] [<c1564f73>] ? common_interrupt+0x33/0x38
[92146.548618] [<c104f8c0>] ? wake_up_worker+0x30/0x30
[92146.548624] [<c12df0c8>] ? acpi_os_wait_events_complete+0x1e/0x1e
[92146.548629] [<c10524f9>] worker_thread+0x119/0x3b0
[92146.548634] [<c10523e0>] ? manage_workers+0x240/0x240
[92146.548640] [<c1056e84>] kthread+0x94/0xa0
[92146.548647] [<c1060000>] ? ftrace_raw_output_sched_stat_runtime+0x70/0xf0
[92146.548652] [<c15649b7>] ret_from_kernel_thread+0x1b/0x28
[92146.548658] [<c1056df0>] ? kthread_create_on_node+0xc0/0xc0
three different modeset flags are introduced in this patch
MODESET_ON_LID_OPEN: do modeset on next lid open event
MODESET_DONE: modeset already done
MODESET_SUSPENDED: suspended, only do modeset when system is resumed
In this way,
1. when lid is closed, MODESET_ON_LID_OPEN is set so that
we'll do modeset on next lid open event.
2. when lid is opened, MODESET_DONE is set
so that duplicate lid open events will be ignored.
3. when system suspends, MODESET_SUSPENDED is set.
In this case, we will not do modeset on any lid events.
Plus, locking mechanism is also introduced to avoid racing.
Signed-off-by: Zhang Rui <rui.zhang@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-02-05 11:41:53 +04:00
|
|
|
mutex_lock(&dev_priv->modeset_restore_lock);
|
|
|
|
dev_priv->modeset_restore = MODESET_DONE;
|
|
|
|
mutex_unlock(&dev_priv->modeset_restore_lock);
|
2013-12-07 02:32:13 +04:00
|
|
|
|
2016-05-23 17:08:09 +03:00
|
|
|
intel_opregion_notify_adapter(dev_priv, PCI_D0);
|
2014-06-12 19:35:47 +04:00
|
|
|
|
2015-12-16 03:52:19 +03:00
|
|
|
enable_rpm_wakeref_asserts(dev_priv);
|
|
|
|
|
2014-04-09 12:19:43 +04:00
|
|
|
return 0;
|
2010-02-07 23:48:24 +03:00
|
|
|
}
|
|
|
|
|
2014-10-23 20:23:25 +04:00
|
|
|
static int i915_drm_resume_early(struct drm_device *dev)
|
2010-02-07 23:48:24 +03:00
|
|
|
{
|
2016-07-04 13:34:36 +03:00
|
|
|
struct drm_i915_private *dev_priv = to_i915(dev);
|
2016-08-22 13:32:44 +03:00
|
|
|
struct pci_dev *pdev = dev_priv->drm.pdev;
|
2016-04-18 14:45:54 +03:00
|
|
|
int ret;
|
2014-10-23 20:23:24 +04:00
|
|
|
|
2014-04-01 20:55:22 +04:00
|
|
|
/*
|
|
|
|
* We have a resume ordering issue with the snd-hda driver also
|
|
|
|
* requiring our device to be power up. Due to the lack of a
|
|
|
|
* parent/child relationship we currently solve this with an early
|
|
|
|
* resume hook.
|
|
|
|
*
|
|
|
|
* FIXME: This should be solved with a special hdmi sink device or
|
|
|
|
* similar so that power domains can be employed.
|
|
|
|
*/
|
2016-04-18 14:45:54 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Note that we need to set the power state explicitly, since we
|
|
|
|
* powered off the device during freeze and the PCI core won't power
|
|
|
|
* it back up for us during thaw. Powering off the device during
|
|
|
|
* freeze is not a hard requirement though, and during the
|
|
|
|
* suspend/resume phases the PCI core makes sure we get here with the
|
|
|
|
* device powered on. So in case we change our freeze logic and keep
|
|
|
|
* the device powered we can also remove the following set power state
|
|
|
|
* call.
|
|
|
|
*/
|
2016-08-22 13:32:44 +03:00
|
|
|
ret = pci_set_power_state(pdev, PCI_D0);
|
2016-04-18 14:45:54 +03:00
|
|
|
if (ret) {
|
|
|
|
DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Note that pci_enable_device() first enables any parent bridge
|
|
|
|
* device and only then sets the power state for this device. The
|
|
|
|
* bridge enabling is a nop though, since bridge devices are resumed
|
|
|
|
* first. The order of enabling power and enabling the device is
|
|
|
|
* imposed by the PCI core as described above, so here we preserve the
|
|
|
|
* same order for the freeze/thaw phases.
|
|
|
|
*
|
|
|
|
* TODO: eventually we should remove pci_disable_device() /
|
|
|
|
* pci_enable_enable_device() from suspend/resume. Due to how they
|
|
|
|
* depend on the device enable refcount we can't anyway depend on them
|
|
|
|
* disabling/enabling the device.
|
|
|
|
*/
|
2016-08-22 13:32:44 +03:00
|
|
|
if (pci_enable_device(pdev)) {
|
2015-11-18 18:32:30 +03:00
|
|
|
ret = -EIO;
|
|
|
|
goto out;
|
|
|
|
}
|
2010-02-07 23:48:24 +03:00
|
|
|
|
2016-08-22 13:32:44 +03:00
|
|
|
pci_set_master(pdev);
|
2010-02-07 23:48:24 +03:00
|
|
|
|
2015-12-16 03:52:19 +03:00
|
|
|
disable_rpm_wakeref_asserts(dev_priv);
|
|
|
|
|
2015-12-09 23:29:35 +03:00
|
|
|
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
|
2014-10-27 22:54:32 +03:00
|
|
|
ret = vlv_resume_prepare(dev_priv, false);
|
2014-10-23 20:23:24 +04:00
|
|
|
if (ret)
|
2015-05-20 16:45:15 +03:00
|
|
|
DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
|
|
|
|
ret);
|
2014-10-23 20:23:24 +04:00
|
|
|
|
2017-02-10 13:28:01 +03:00
|
|
|
intel_uncore_resume_early(dev_priv);
|
2014-10-27 22:54:33 +03:00
|
|
|
|
2016-12-16 18:42:25 +03:00
|
|
|
if (IS_GEN9_LP(dev_priv)) {
|
2016-04-20 20:27:56 +03:00
|
|
|
if (!dev_priv->suspended_to_idle)
|
|
|
|
gen9_sanitize_dc_state(dev_priv);
|
2016-04-20 20:27:54 +03:00
|
|
|
bxt_disable_dc9(dev_priv);
|
2016-04-20 20:27:56 +03:00
|
|
|
} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
|
2015-05-20 16:45:14 +03:00
|
|
|
hsw_disable_pc8(dev_priv);
|
2016-04-20 20:27:56 +03:00
|
|
|
}
|
2014-10-27 22:54:33 +03:00
|
|
|
|
2016-05-10 16:10:04 +03:00
|
|
|
intel_uncore_sanitize(dev_priv);
|
2015-11-18 18:32:30 +03:00
|
|
|
|
2016-12-16 18:42:25 +03:00
|
|
|
if (IS_GEN9_LP(dev_priv) ||
|
2016-04-01 16:02:38 +03:00
|
|
|
!(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
|
2015-11-18 18:32:30 +03:00
|
|
|
intel_power_domains_init_hw(dev_priv, true);
|
|
|
|
|
2017-01-24 14:01:35 +03:00
|
|
|
i915_gem_sanitize(dev_priv);
|
|
|
|
|
2016-04-18 10:04:19 +03:00
|
|
|
enable_rpm_wakeref_asserts(dev_priv);
|
|
|
|
|
2015-11-18 18:32:30 +03:00
|
|
|
out:
|
|
|
|
dev_priv->suspended_to_idle = false;
|
2014-10-23 20:23:24 +04:00
|
|
|
|
|
|
|
return ret;
|
2014-04-01 20:55:22 +04:00
|
|
|
}
|
|
|
|
|
2016-12-01 17:16:41 +03:00
|
|
|
static int i915_resume_switcheroo(struct drm_device *dev)
|
2014-04-01 20:55:22 +04:00
|
|
|
{
|
2014-10-23 20:23:17 +04:00
|
|
|
int ret;
|
2014-04-01 20:55:22 +04:00
|
|
|
|
2014-10-23 20:23:19 +04:00
|
|
|
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
|
|
|
|
return 0;
|
|
|
|
|
2014-10-23 20:23:25 +04:00
|
|
|
ret = i915_drm_resume_early(dev);
|
2014-10-23 20:23:17 +04:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2014-10-23 20:23:18 +04:00
|
|
|
return i915_drm_resume(dev);
|
|
|
|
}
|
|
|
|
|
2009-09-15 01:48:45 +04:00
|
|
|
/**
|
2011-11-28 22:15:17 +04:00
|
|
|
* i915_reset - reset chip after a hang
|
2017-07-21 15:32:37 +03:00
|
|
|
* @i915: #drm_i915_private to reset
|
|
|
|
* @flags: Instructions
|
2009-09-15 01:48:45 +04:00
|
|
|
*
|
2016-09-09 16:11:52 +03:00
|
|
|
* Reset the chip. Useful if a hang is detected. Marks the device as wedged
|
|
|
|
* on failure.
|
2009-09-15 01:48:45 +04:00
|
|
|
*
|
2016-09-09 16:11:51 +03:00
|
|
|
* Caller must hold the struct_mutex.
|
|
|
|
*
|
2009-09-15 01:48:45 +04:00
|
|
|
* Procedure is fairly simple:
|
|
|
|
* - reset the chip using the reset reg
|
|
|
|
* - re-init context state
|
|
|
|
* - re-init hardware status page
|
|
|
|
* - re-init ring buffer
|
|
|
|
* - re-init interrupt state
|
|
|
|
* - re-init display
|
|
|
|
*/
|
2017-07-21 15:32:37 +03:00
|
|
|
void i915_reset(struct drm_i915_private *i915, unsigned int flags)
|
2009-09-15 01:48:45 +04:00
|
|
|
{
|
2017-07-21 15:32:37 +03:00
|
|
|
struct i915_gpu_error *error = &i915->gpu_error;
|
2010-09-11 14:17:19 +04:00
|
|
|
int ret;
|
2009-09-15 01:48:45 +04:00
|
|
|
|
2017-07-21 15:32:37 +03:00
|
|
|
lockdep_assert_held(&i915->drm.struct_mutex);
|
2017-03-16 20:13:02 +03:00
|
|
|
GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &error->flags));
|
2016-09-09 16:11:51 +03:00
|
|
|
|
2017-03-16 20:13:02 +03:00
|
|
|
if (!test_bit(I915_RESET_HANDOFF, &error->flags))
|
2016-09-09 16:11:52 +03:00
|
|
|
return;
|
2009-09-15 01:48:45 +04:00
|
|
|
|
2016-04-13 19:35:05 +03:00
|
|
|
/* Clear any previous failed attempts at recovery. Time to try again. */
|
2017-07-21 15:32:37 +03:00
|
|
|
if (!i915_gem_unset_wedged(i915))
|
2017-03-16 20:13:04 +03:00
|
|
|
goto wakeup;
|
|
|
|
|
2017-07-21 15:32:37 +03:00
|
|
|
if (!(flags & I915_RESET_QUIET))
|
|
|
|
dev_notice(i915->drm.dev, "Resetting chip after gpu hang\n");
|
2016-09-09 16:11:47 +03:00
|
|
|
error->reset_count++;
|
2016-04-13 19:35:05 +03:00
|
|
|
|
2017-07-21 15:32:37 +03:00
|
|
|
disable_irq(i915->drm.irq);
|
|
|
|
ret = i915_gem_reset_prepare(i915);
|
2017-01-17 18:59:06 +03:00
|
|
|
if (ret) {
|
|
|
|
DRM_ERROR("GPU recovery failed\n");
|
2017-07-21 15:32:37 +03:00
|
|
|
intel_gpu_reset(i915, ALL_ENGINES);
|
2017-01-17 18:59:06 +03:00
|
|
|
goto error;
|
|
|
|
}
|
2016-10-04 23:11:28 +03:00
|
|
|
|
2017-07-21 15:32:37 +03:00
|
|
|
ret = intel_gpu_reset(i915, ALL_ENGINES);
|
2010-09-11 14:17:19 +04:00
|
|
|
if (ret) {
|
2016-04-13 19:35:09 +03:00
|
|
|
if (ret != -ENODEV)
|
|
|
|
DRM_ERROR("Failed to reset chip: %i\n", ret);
|
|
|
|
else
|
|
|
|
DRM_DEBUG_DRIVER("GPU reset disabled\n");
|
2016-04-13 19:35:05 +03:00
|
|
|
goto error;
|
2009-09-15 01:48:45 +04:00
|
|
|
}
|
|
|
|
|
2017-07-21 15:32:37 +03:00
|
|
|
i915_gem_reset(i915);
|
|
|
|
intel_overlay_reset(i915);
|
2014-11-26 18:07:29 +03:00
|
|
|
|
2009-09-15 01:48:45 +04:00
|
|
|
/* Ok, now get things going again... */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Everything depends on having the GTT running, so we need to start
|
2017-09-06 14:14:05 +03:00
|
|
|
* there.
|
|
|
|
*/
|
|
|
|
ret = i915_ggtt_enable_hw(i915);
|
|
|
|
if (ret) {
|
|
|
|
DRM_ERROR("Failed to re-enable GGTT following reset %d\n", ret);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2009-09-15 01:48:45 +04:00
|
|
|
* Next we need to restore the context, but we don't use those
|
|
|
|
* yet either...
|
|
|
|
*
|
|
|
|
* Ring buffer needs to be re-initialized in the KMS case, or if X
|
|
|
|
* was running at the time of the reset (i.e. we weren't VT
|
|
|
|
* switched away).
|
|
|
|
*/
|
2017-07-21 15:32:37 +03:00
|
|
|
ret = i915_gem_init_hw(i915);
|
2015-02-23 14:03:27 +03:00
|
|
|
if (ret) {
|
|
|
|
DRM_ERROR("Failed hw init on reset %d\n", ret);
|
2016-04-13 19:35:05 +03:00
|
|
|
goto error;
|
2009-09-15 01:48:45 +04:00
|
|
|
}
|
|
|
|
|
2017-07-21 15:32:37 +03:00
|
|
|
i915_queue_hangcheck(i915);
|
2016-11-22 17:41:19 +03:00
|
|
|
|
2017-03-16 20:13:04 +03:00
|
|
|
finish:
|
2017-07-21 15:32:37 +03:00
|
|
|
i915_gem_reset_finish(i915);
|
|
|
|
enable_irq(i915->drm.irq);
|
2017-03-16 20:13:02 +03:00
|
|
|
|
2017-03-16 20:13:04 +03:00
|
|
|
wakeup:
|
2017-03-16 20:13:02 +03:00
|
|
|
clear_bit(I915_RESET_HANDOFF, &error->flags);
|
|
|
|
wake_up_bit(&error->flags, I915_RESET_HANDOFF);
|
2016-09-09 16:11:52 +03:00
|
|
|
return;
|
2016-04-13 19:35:05 +03:00
|
|
|
|
|
|
|
error:
|
2017-07-21 15:32:37 +03:00
|
|
|
i915_gem_set_wedged(i915);
|
|
|
|
i915_gem_retire_requests(i915);
|
2017-03-16 20:13:04 +03:00
|
|
|
goto finish;
|
2009-09-15 01:48:45 +04:00
|
|
|
}
|
|
|
|
|
2017-11-01 01:53:09 +03:00
|
|
|
static inline int intel_gt_reset_engine(struct drm_i915_private *dev_priv,
|
|
|
|
struct intel_engine_cs *engine)
|
|
|
|
{
|
|
|
|
return intel_gpu_reset(dev_priv, intel_engine_flag(engine));
|
|
|
|
}
|
|
|
|
|
drm/i915: Modify error handler for per engine hang recovery
This is a preparatory patch which modifies error handler to do per engine
hang recovery. The actual patch which implements this sequence follows
later in the series. The aim is to prepare existing recovery function to
adapt to this new function where applicable (which fails at this point
because core implementation is lacking) and continue recovery using legacy
full gpu reset.
A helper function is also added to query the availability of engine
reset. A subsequent patch will add the capability to query which type
of reset is present (engine -> full -> no-reset) via the get-param
ioctl.
It has been decided that the error events that are used to notify user of
reset will only be sent in case if full chip reset. In case of just
single (or multiple) engine resets, userspace won't be notified by these
events.
Note that this implementation of engine reset is for i915 directly
submitting to the ELSP, where the driver manages the hang detection,
recovery and resubmission. With GuC submission these tasks are shared
between driver and firmware; i915 will still responsible for detecting a
hang, and when it does it will have to request GuC to reset that Engine and
remind the firmware about the outstanding submissions. This will be
added in different patch.
v2: rebase, advertise engine reset availability in platform definition,
add note about GuC submission.
v3: s/*engine_reset*/*reset_engine*/. (Chris)
Handle reset as 2 level resets, by first going to engine only and fall
backing to full/chip reset as needed, i.e. reset_engine will need the
struct_mutex.
v4: Pass the engine mask to i915_reset. (Chris)
v5: Rebase, update selftests.
v6: Rebase, prepare for mutex-less reset engine.
v7: Pass reset_engine mask as a function parameter, and iterate over the
engine mask for reset_engine. (Chris)
v8: Use i915.reset >=2 in has_reset_engine; remove redundant reset
logging; add a reset-engine-in-progress flag to prevent concurrent
resets, and avoid dual purposing of reset-backoff. (Chris)
v9: Support reset of different engines in parallel (Chris)
v10: Handle reset-engine flag locking better (Chris)
v11: Squash in reporting of per-engine-reset availability.
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Signed-off-by: Ian Lister <ian.lister@intel.com>
Signed-off-by: Tomas Elf <tomas.elf@intel.com>
Signed-off-by: Arun Siluvery <arun.siluvery@linux.intel.com>
Signed-off-by: Michel Thierry <michel.thierry@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20170615201828.23144-4-michel.thierry@intel.com
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: http://patchwork.freedesktop.org/patch/msgid/20170620095751.13127-5-chris@chris-wilson.co.uk
2017-06-20 12:57:46 +03:00
|
|
|
/**
|
|
|
|
* i915_reset_engine - reset GPU engine to recover from a hang
|
|
|
|
* @engine: engine to reset
|
2017-07-21 15:32:37 +03:00
|
|
|
* @flags: options
|
drm/i915: Modify error handler for per engine hang recovery
This is a preparatory patch which modifies error handler to do per engine
hang recovery. The actual patch which implements this sequence follows
later in the series. The aim is to prepare existing recovery function to
adapt to this new function where applicable (which fails at this point
because core implementation is lacking) and continue recovery using legacy
full gpu reset.
A helper function is also added to query the availability of engine
reset. A subsequent patch will add the capability to query which type
of reset is present (engine -> full -> no-reset) via the get-param
ioctl.
It has been decided that the error events that are used to notify user of
reset will only be sent in case if full chip reset. In case of just
single (or multiple) engine resets, userspace won't be notified by these
events.
Note that this implementation of engine reset is for i915 directly
submitting to the ELSP, where the driver manages the hang detection,
recovery and resubmission. With GuC submission these tasks are shared
between driver and firmware; i915 will still responsible for detecting a
hang, and when it does it will have to request GuC to reset that Engine and
remind the firmware about the outstanding submissions. This will be
added in different patch.
v2: rebase, advertise engine reset availability in platform definition,
add note about GuC submission.
v3: s/*engine_reset*/*reset_engine*/. (Chris)
Handle reset as 2 level resets, by first going to engine only and fall
backing to full/chip reset as needed, i.e. reset_engine will need the
struct_mutex.
v4: Pass the engine mask to i915_reset. (Chris)
v5: Rebase, update selftests.
v6: Rebase, prepare for mutex-less reset engine.
v7: Pass reset_engine mask as a function parameter, and iterate over the
engine mask for reset_engine. (Chris)
v8: Use i915.reset >=2 in has_reset_engine; remove redundant reset
logging; add a reset-engine-in-progress flag to prevent concurrent
resets, and avoid dual purposing of reset-backoff. (Chris)
v9: Support reset of different engines in parallel (Chris)
v10: Handle reset-engine flag locking better (Chris)
v11: Squash in reporting of per-engine-reset availability.
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Signed-off-by: Ian Lister <ian.lister@intel.com>
Signed-off-by: Tomas Elf <tomas.elf@intel.com>
Signed-off-by: Arun Siluvery <arun.siluvery@linux.intel.com>
Signed-off-by: Michel Thierry <michel.thierry@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20170615201828.23144-4-michel.thierry@intel.com
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: http://patchwork.freedesktop.org/patch/msgid/20170620095751.13127-5-chris@chris-wilson.co.uk
2017-06-20 12:57:46 +03:00
|
|
|
*
|
|
|
|
* Reset a specific GPU engine. Useful if a hang is detected.
|
|
|
|
* Returns zero on successful reset or otherwise an error code.
|
drm/i915: Add support for per engine reset recovery
This change implements support for per-engine reset as an initial, less
intrusive hang recovery option to be attempted before falling back to the
legacy full GPU reset recovery mode if necessary. This is only supported
from Gen8 onwards.
Hangchecker determines which engines are hung and invokes error handler to
recover from it. Error handler schedules recovery for each of those engines
that are hung. The recovery procedure is as follows,
- identifies the request that caused the hang and it is dropped
- force engine to idle: this is done by issuing a reset request
- reset the engine
- re-init the engine to resume submissions.
If engine reset fails then we fall back to heavy weight full gpu reset
which resets all engines and reinitiazes complete state of HW and SW.
v2: Rebase.
v3: s/*engine_reset*/*reset_engine*/; freeze engine and irqs before
calling i915_gem_reset_engine (Chris).
v4: Rebase, modify i915_gem_reset_prepare to use a ring mask and
reuse the function for reset_engine.
v5: intel_reset_engine_start/cancel instead of request/unrequest_reset.
v6: Clean up reset_engine function to not require mutex, i.e. no need to call
revoke/restore_fences and _retire_requests (Chris).
v7: Remove leftovers from v5, i.e. no need to disable irq, hold
forcewake or wakeup the handoff bit (Chris).
v8: engine_retire_requests should be (and it was) static; explain that
we have to re-init the engine after reset, which is why the init_hw call
is needed; check reset-in-progress flag (Chris).
v9: Rebase, include code to pass the active request to gem_reset_engine
(as it is already done in full reset). Remove unnecessary
intel_reset_engine_start/cancel, these are executed as part of the
reset.
v10: Rebase, use the right I915_RESET_ENGINE flag.
v11: Fixup to call reset_finish_engine even on error.
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Signed-off-by: Tomas Elf <tomas.elf@intel.com>
Signed-off-by: Arun Siluvery <arun.siluvery@linux.intel.com>
Signed-off-by: Michel Thierry <michel.thierry@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20170615201828.23144-6-michel.thierry@intel.com
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: http://patchwork.freedesktop.org/patch/msgid/20170620095751.13127-6-chris@chris-wilson.co.uk
2017-06-20 12:57:47 +03:00
|
|
|
*
|
|
|
|
* Procedure is:
|
|
|
|
* - identifies the request that caused the hang and it is dropped
|
|
|
|
* - reset engine (which will force the engine to idle)
|
|
|
|
* - re-init/configure engine
|
drm/i915: Modify error handler for per engine hang recovery
This is a preparatory patch which modifies error handler to do per engine
hang recovery. The actual patch which implements this sequence follows
later in the series. The aim is to prepare existing recovery function to
adapt to this new function where applicable (which fails at this point
because core implementation is lacking) and continue recovery using legacy
full gpu reset.
A helper function is also added to query the availability of engine
reset. A subsequent patch will add the capability to query which type
of reset is present (engine -> full -> no-reset) via the get-param
ioctl.
It has been decided that the error events that are used to notify user of
reset will only be sent in case if full chip reset. In case of just
single (or multiple) engine resets, userspace won't be notified by these
events.
Note that this implementation of engine reset is for i915 directly
submitting to the ELSP, where the driver manages the hang detection,
recovery and resubmission. With GuC submission these tasks are shared
between driver and firmware; i915 will still responsible for detecting a
hang, and when it does it will have to request GuC to reset that Engine and
remind the firmware about the outstanding submissions. This will be
added in different patch.
v2: rebase, advertise engine reset availability in platform definition,
add note about GuC submission.
v3: s/*engine_reset*/*reset_engine*/. (Chris)
Handle reset as 2 level resets, by first going to engine only and fall
backing to full/chip reset as needed, i.e. reset_engine will need the
struct_mutex.
v4: Pass the engine mask to i915_reset. (Chris)
v5: Rebase, update selftests.
v6: Rebase, prepare for mutex-less reset engine.
v7: Pass reset_engine mask as a function parameter, and iterate over the
engine mask for reset_engine. (Chris)
v8: Use i915.reset >=2 in has_reset_engine; remove redundant reset
logging; add a reset-engine-in-progress flag to prevent concurrent
resets, and avoid dual purposing of reset-backoff. (Chris)
v9: Support reset of different engines in parallel (Chris)
v10: Handle reset-engine flag locking better (Chris)
v11: Squash in reporting of per-engine-reset availability.
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Signed-off-by: Ian Lister <ian.lister@intel.com>
Signed-off-by: Tomas Elf <tomas.elf@intel.com>
Signed-off-by: Arun Siluvery <arun.siluvery@linux.intel.com>
Signed-off-by: Michel Thierry <michel.thierry@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20170615201828.23144-4-michel.thierry@intel.com
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: http://patchwork.freedesktop.org/patch/msgid/20170620095751.13127-5-chris@chris-wilson.co.uk
2017-06-20 12:57:46 +03:00
|
|
|
*/
|
2017-07-21 15:32:37 +03:00
|
|
|
int i915_reset_engine(struct intel_engine_cs *engine, unsigned int flags)
|
drm/i915: Modify error handler for per engine hang recovery
This is a preparatory patch which modifies error handler to do per engine
hang recovery. The actual patch which implements this sequence follows
later in the series. The aim is to prepare existing recovery function to
adapt to this new function where applicable (which fails at this point
because core implementation is lacking) and continue recovery using legacy
full gpu reset.
A helper function is also added to query the availability of engine
reset. A subsequent patch will add the capability to query which type
of reset is present (engine -> full -> no-reset) via the get-param
ioctl.
It has been decided that the error events that are used to notify user of
reset will only be sent in case if full chip reset. In case of just
single (or multiple) engine resets, userspace won't be notified by these
events.
Note that this implementation of engine reset is for i915 directly
submitting to the ELSP, where the driver manages the hang detection,
recovery and resubmission. With GuC submission these tasks are shared
between driver and firmware; i915 will still responsible for detecting a
hang, and when it does it will have to request GuC to reset that Engine and
remind the firmware about the outstanding submissions. This will be
added in different patch.
v2: rebase, advertise engine reset availability in platform definition,
add note about GuC submission.
v3: s/*engine_reset*/*reset_engine*/. (Chris)
Handle reset as 2 level resets, by first going to engine only and fall
backing to full/chip reset as needed, i.e. reset_engine will need the
struct_mutex.
v4: Pass the engine mask to i915_reset. (Chris)
v5: Rebase, update selftests.
v6: Rebase, prepare for mutex-less reset engine.
v7: Pass reset_engine mask as a function parameter, and iterate over the
engine mask for reset_engine. (Chris)
v8: Use i915.reset >=2 in has_reset_engine; remove redundant reset
logging; add a reset-engine-in-progress flag to prevent concurrent
resets, and avoid dual purposing of reset-backoff. (Chris)
v9: Support reset of different engines in parallel (Chris)
v10: Handle reset-engine flag locking better (Chris)
v11: Squash in reporting of per-engine-reset availability.
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Signed-off-by: Ian Lister <ian.lister@intel.com>
Signed-off-by: Tomas Elf <tomas.elf@intel.com>
Signed-off-by: Arun Siluvery <arun.siluvery@linux.intel.com>
Signed-off-by: Michel Thierry <michel.thierry@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20170615201828.23144-4-michel.thierry@intel.com
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: http://patchwork.freedesktop.org/patch/msgid/20170620095751.13127-5-chris@chris-wilson.co.uk
2017-06-20 12:57:46 +03:00
|
|
|
{
|
drm/i915: Add support for per engine reset recovery
This change implements support for per-engine reset as an initial, less
intrusive hang recovery option to be attempted before falling back to the
legacy full GPU reset recovery mode if necessary. This is only supported
from Gen8 onwards.
Hangchecker determines which engines are hung and invokes error handler to
recover from it. Error handler schedules recovery for each of those engines
that are hung. The recovery procedure is as follows,
- identifies the request that caused the hang and it is dropped
- force engine to idle: this is done by issuing a reset request
- reset the engine
- re-init the engine to resume submissions.
If engine reset fails then we fall back to heavy weight full gpu reset
which resets all engines and reinitiazes complete state of HW and SW.
v2: Rebase.
v3: s/*engine_reset*/*reset_engine*/; freeze engine and irqs before
calling i915_gem_reset_engine (Chris).
v4: Rebase, modify i915_gem_reset_prepare to use a ring mask and
reuse the function for reset_engine.
v5: intel_reset_engine_start/cancel instead of request/unrequest_reset.
v6: Clean up reset_engine function to not require mutex, i.e. no need to call
revoke/restore_fences and _retire_requests (Chris).
v7: Remove leftovers from v5, i.e. no need to disable irq, hold
forcewake or wakeup the handoff bit (Chris).
v8: engine_retire_requests should be (and it was) static; explain that
we have to re-init the engine after reset, which is why the init_hw call
is needed; check reset-in-progress flag (Chris).
v9: Rebase, include code to pass the active request to gem_reset_engine
(as it is already done in full reset). Remove unnecessary
intel_reset_engine_start/cancel, these are executed as part of the
reset.
v10: Rebase, use the right I915_RESET_ENGINE flag.
v11: Fixup to call reset_finish_engine even on error.
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Signed-off-by: Tomas Elf <tomas.elf@intel.com>
Signed-off-by: Arun Siluvery <arun.siluvery@linux.intel.com>
Signed-off-by: Michel Thierry <michel.thierry@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20170615201828.23144-6-michel.thierry@intel.com
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: http://patchwork.freedesktop.org/patch/msgid/20170620095751.13127-6-chris@chris-wilson.co.uk
2017-06-20 12:57:47 +03:00
|
|
|
struct i915_gpu_error *error = &engine->i915->gpu_error;
|
|
|
|
struct drm_i915_gem_request *active_request;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags));
|
|
|
|
|
2017-07-21 15:32:37 +03:00
|
|
|
if (!(flags & I915_RESET_QUIET)) {
|
|
|
|
dev_notice(engine->i915->drm.dev,
|
|
|
|
"Resetting %s after gpu hang\n", engine->name);
|
|
|
|
}
|
2017-07-21 15:32:31 +03:00
|
|
|
error->reset_engine_count[engine->id]++;
|
drm/i915: Add support for per engine reset recovery
This change implements support for per-engine reset as an initial, less
intrusive hang recovery option to be attempted before falling back to the
legacy full GPU reset recovery mode if necessary. This is only supported
from Gen8 onwards.
Hangchecker determines which engines are hung and invokes error handler to
recover from it. Error handler schedules recovery for each of those engines
that are hung. The recovery procedure is as follows,
- identifies the request that caused the hang and it is dropped
- force engine to idle: this is done by issuing a reset request
- reset the engine
- re-init the engine to resume submissions.
If engine reset fails then we fall back to heavy weight full gpu reset
which resets all engines and reinitiazes complete state of HW and SW.
v2: Rebase.
v3: s/*engine_reset*/*reset_engine*/; freeze engine and irqs before
calling i915_gem_reset_engine (Chris).
v4: Rebase, modify i915_gem_reset_prepare to use a ring mask and
reuse the function for reset_engine.
v5: intel_reset_engine_start/cancel instead of request/unrequest_reset.
v6: Clean up reset_engine function to not require mutex, i.e. no need to call
revoke/restore_fences and _retire_requests (Chris).
v7: Remove leftovers from v5, i.e. no need to disable irq, hold
forcewake or wakeup the handoff bit (Chris).
v8: engine_retire_requests should be (and it was) static; explain that
we have to re-init the engine after reset, which is why the init_hw call
is needed; check reset-in-progress flag (Chris).
v9: Rebase, include code to pass the active request to gem_reset_engine
(as it is already done in full reset). Remove unnecessary
intel_reset_engine_start/cancel, these are executed as part of the
reset.
v10: Rebase, use the right I915_RESET_ENGINE flag.
v11: Fixup to call reset_finish_engine even on error.
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Signed-off-by: Tomas Elf <tomas.elf@intel.com>
Signed-off-by: Arun Siluvery <arun.siluvery@linux.intel.com>
Signed-off-by: Michel Thierry <michel.thierry@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20170615201828.23144-6-michel.thierry@intel.com
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: http://patchwork.freedesktop.org/patch/msgid/20170620095751.13127-6-chris@chris-wilson.co.uk
2017-06-20 12:57:47 +03:00
|
|
|
|
|
|
|
active_request = i915_gem_reset_prepare_engine(engine);
|
|
|
|
if (IS_ERR(active_request)) {
|
|
|
|
DRM_DEBUG_DRIVER("Previous reset failed, promote to full reset\n");
|
|
|
|
ret = PTR_ERR(active_request);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2017-11-01 01:53:09 +03:00
|
|
|
if (!engine->i915->guc.execbuf_client)
|
|
|
|
ret = intel_gt_reset_engine(engine->i915, engine);
|
|
|
|
else
|
|
|
|
ret = intel_guc_reset_engine(&engine->i915->guc, engine);
|
2017-07-21 15:32:21 +03:00
|
|
|
if (ret) {
|
|
|
|
/* If we fail here, we expect to fallback to a global reset */
|
2017-11-01 01:53:09 +03:00
|
|
|
DRM_DEBUG_DRIVER("%sFailed to reset %s, ret=%d\n",
|
|
|
|
engine->i915->guc.execbuf_client ? "GuC " : "",
|
2017-07-21 15:32:21 +03:00
|
|
|
engine->name, ret);
|
|
|
|
goto out;
|
|
|
|
}
|
2017-07-21 15:32:20 +03:00
|
|
|
|
drm/i915: Add support for per engine reset recovery
This change implements support for per-engine reset as an initial, less
intrusive hang recovery option to be attempted before falling back to the
legacy full GPU reset recovery mode if necessary. This is only supported
from Gen8 onwards.
Hangchecker determines which engines are hung and invokes error handler to
recover from it. Error handler schedules recovery for each of those engines
that are hung. The recovery procedure is as follows,
- identifies the request that caused the hang and it is dropped
- force engine to idle: this is done by issuing a reset request
- reset the engine
- re-init the engine to resume submissions.
If engine reset fails then we fall back to heavy weight full gpu reset
which resets all engines and reinitiazes complete state of HW and SW.
v2: Rebase.
v3: s/*engine_reset*/*reset_engine*/; freeze engine and irqs before
calling i915_gem_reset_engine (Chris).
v4: Rebase, modify i915_gem_reset_prepare to use a ring mask and
reuse the function for reset_engine.
v5: intel_reset_engine_start/cancel instead of request/unrequest_reset.
v6: Clean up reset_engine function to not require mutex, i.e. no need to call
revoke/restore_fences and _retire_requests (Chris).
v7: Remove leftovers from v5, i.e. no need to disable irq, hold
forcewake or wakeup the handoff bit (Chris).
v8: engine_retire_requests should be (and it was) static; explain that
we have to re-init the engine after reset, which is why the init_hw call
is needed; check reset-in-progress flag (Chris).
v9: Rebase, include code to pass the active request to gem_reset_engine
(as it is already done in full reset). Remove unnecessary
intel_reset_engine_start/cancel, these are executed as part of the
reset.
v10: Rebase, use the right I915_RESET_ENGINE flag.
v11: Fixup to call reset_finish_engine even on error.
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Signed-off-by: Tomas Elf <tomas.elf@intel.com>
Signed-off-by: Arun Siluvery <arun.siluvery@linux.intel.com>
Signed-off-by: Michel Thierry <michel.thierry@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20170615201828.23144-6-michel.thierry@intel.com
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: http://patchwork.freedesktop.org/patch/msgid/20170620095751.13127-6-chris@chris-wilson.co.uk
2017-06-20 12:57:47 +03:00
|
|
|
/*
|
|
|
|
* The request that caused the hang is stuck on elsp, we know the
|
|
|
|
* active request and can drop it, adjust head to skip the offending
|
|
|
|
* request to resume executing remaining requests in the queue.
|
|
|
|
*/
|
|
|
|
i915_gem_reset_engine(engine, active_request);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The engine and its registers (and workarounds in case of render)
|
|
|
|
* have been reset to their default values. Follow the init_ring
|
|
|
|
* process to program RING_MODE, HWSP and re-enable submission.
|
|
|
|
*/
|
|
|
|
ret = engine->init_hw(engine);
|
2017-06-20 12:57:48 +03:00
|
|
|
if (ret)
|
|
|
|
goto out;
|
drm/i915: Add support for per engine reset recovery
This change implements support for per-engine reset as an initial, less
intrusive hang recovery option to be attempted before falling back to the
legacy full GPU reset recovery mode if necessary. This is only supported
from Gen8 onwards.
Hangchecker determines which engines are hung and invokes error handler to
recover from it. Error handler schedules recovery for each of those engines
that are hung. The recovery procedure is as follows,
- identifies the request that caused the hang and it is dropped
- force engine to idle: this is done by issuing a reset request
- reset the engine
- re-init the engine to resume submissions.
If engine reset fails then we fall back to heavy weight full gpu reset
which resets all engines and reinitiazes complete state of HW and SW.
v2: Rebase.
v3: s/*engine_reset*/*reset_engine*/; freeze engine and irqs before
calling i915_gem_reset_engine (Chris).
v4: Rebase, modify i915_gem_reset_prepare to use a ring mask and
reuse the function for reset_engine.
v5: intel_reset_engine_start/cancel instead of request/unrequest_reset.
v6: Clean up reset_engine function to not require mutex, i.e. no need to call
revoke/restore_fences and _retire_requests (Chris).
v7: Remove leftovers from v5, i.e. no need to disable irq, hold
forcewake or wakeup the handoff bit (Chris).
v8: engine_retire_requests should be (and it was) static; explain that
we have to re-init the engine after reset, which is why the init_hw call
is needed; check reset-in-progress flag (Chris).
v9: Rebase, include code to pass the active request to gem_reset_engine
(as it is already done in full reset). Remove unnecessary
intel_reset_engine_start/cancel, these are executed as part of the
reset.
v10: Rebase, use the right I915_RESET_ENGINE flag.
v11: Fixup to call reset_finish_engine even on error.
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Signed-off-by: Tomas Elf <tomas.elf@intel.com>
Signed-off-by: Arun Siluvery <arun.siluvery@linux.intel.com>
Signed-off-by: Michel Thierry <michel.thierry@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20170615201828.23144-6-michel.thierry@intel.com
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: http://patchwork.freedesktop.org/patch/msgid/20170620095751.13127-6-chris@chris-wilson.co.uk
2017-06-20 12:57:47 +03:00
|
|
|
|
|
|
|
out:
|
2017-07-21 15:32:21 +03:00
|
|
|
i915_gem_reset_finish_engine(engine);
|
drm/i915: Add support for per engine reset recovery
This change implements support for per-engine reset as an initial, less
intrusive hang recovery option to be attempted before falling back to the
legacy full GPU reset recovery mode if necessary. This is only supported
from Gen8 onwards.
Hangchecker determines which engines are hung and invokes error handler to
recover from it. Error handler schedules recovery for each of those engines
that are hung. The recovery procedure is as follows,
- identifies the request that caused the hang and it is dropped
- force engine to idle: this is done by issuing a reset request
- reset the engine
- re-init the engine to resume submissions.
If engine reset fails then we fall back to heavy weight full gpu reset
which resets all engines and reinitiazes complete state of HW and SW.
v2: Rebase.
v3: s/*engine_reset*/*reset_engine*/; freeze engine and irqs before
calling i915_gem_reset_engine (Chris).
v4: Rebase, modify i915_gem_reset_prepare to use a ring mask and
reuse the function for reset_engine.
v5: intel_reset_engine_start/cancel instead of request/unrequest_reset.
v6: Clean up reset_engine function to not require mutex, i.e. no need to call
revoke/restore_fences and _retire_requests (Chris).
v7: Remove leftovers from v5, i.e. no need to disable irq, hold
forcewake or wakeup the handoff bit (Chris).
v8: engine_retire_requests should be (and it was) static; explain that
we have to re-init the engine after reset, which is why the init_hw call
is needed; check reset-in-progress flag (Chris).
v9: Rebase, include code to pass the active request to gem_reset_engine
(as it is already done in full reset). Remove unnecessary
intel_reset_engine_start/cancel, these are executed as part of the
reset.
v10: Rebase, use the right I915_RESET_ENGINE flag.
v11: Fixup to call reset_finish_engine even on error.
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Signed-off-by: Tomas Elf <tomas.elf@intel.com>
Signed-off-by: Arun Siluvery <arun.siluvery@linux.intel.com>
Signed-off-by: Michel Thierry <michel.thierry@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20170615201828.23144-6-michel.thierry@intel.com
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: http://patchwork.freedesktop.org/patch/msgid/20170620095751.13127-6-chris@chris-wilson.co.uk
2017-06-20 12:57:47 +03:00
|
|
|
return ret;
|
drm/i915: Modify error handler for per engine hang recovery
This is a preparatory patch which modifies error handler to do per engine
hang recovery. The actual patch which implements this sequence follows
later in the series. The aim is to prepare existing recovery function to
adapt to this new function where applicable (which fails at this point
because core implementation is lacking) and continue recovery using legacy
full gpu reset.
A helper function is also added to query the availability of engine
reset. A subsequent patch will add the capability to query which type
of reset is present (engine -> full -> no-reset) via the get-param
ioctl.
It has been decided that the error events that are used to notify user of
reset will only be sent in case if full chip reset. In case of just
single (or multiple) engine resets, userspace won't be notified by these
events.
Note that this implementation of engine reset is for i915 directly
submitting to the ELSP, where the driver manages the hang detection,
recovery and resubmission. With GuC submission these tasks are shared
between driver and firmware; i915 will still responsible for detecting a
hang, and when it does it will have to request GuC to reset that Engine and
remind the firmware about the outstanding submissions. This will be
added in different patch.
v2: rebase, advertise engine reset availability in platform definition,
add note about GuC submission.
v3: s/*engine_reset*/*reset_engine*/. (Chris)
Handle reset as 2 level resets, by first going to engine only and fall
backing to full/chip reset as needed, i.e. reset_engine will need the
struct_mutex.
v4: Pass the engine mask to i915_reset. (Chris)
v5: Rebase, update selftests.
v6: Rebase, prepare for mutex-less reset engine.
v7: Pass reset_engine mask as a function parameter, and iterate over the
engine mask for reset_engine. (Chris)
v8: Use i915.reset >=2 in has_reset_engine; remove redundant reset
logging; add a reset-engine-in-progress flag to prevent concurrent
resets, and avoid dual purposing of reset-backoff. (Chris)
v9: Support reset of different engines in parallel (Chris)
v10: Handle reset-engine flag locking better (Chris)
v11: Squash in reporting of per-engine-reset availability.
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Signed-off-by: Ian Lister <ian.lister@intel.com>
Signed-off-by: Tomas Elf <tomas.elf@intel.com>
Signed-off-by: Arun Siluvery <arun.siluvery@linux.intel.com>
Signed-off-by: Michel Thierry <michel.thierry@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20170615201828.23144-4-michel.thierry@intel.com
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: http://patchwork.freedesktop.org/patch/msgid/20170620095751.13127-5-chris@chris-wilson.co.uk
2017-06-20 12:57:46 +03:00
|
|
|
}
|
|
|
|
|
2016-08-22 13:32:42 +03:00
|
|
|
static int i915_pm_suspend(struct device *kdev)
|
2009-01-05 00:55:33 +03:00
|
|
|
{
|
2016-08-22 13:32:42 +03:00
|
|
|
struct pci_dev *pdev = to_pci_dev(kdev);
|
|
|
|
struct drm_device *dev = pci_get_drvdata(pdev);
|
2009-01-05 00:55:33 +03:00
|
|
|
|
2016-08-22 13:32:42 +03:00
|
|
|
if (!dev) {
|
|
|
|
dev_err(kdev, "DRM not initialized, aborting suspend.\n");
|
2010-02-07 23:48:24 +03:00
|
|
|
return -ENODEV;
|
|
|
|
}
|
2009-01-05 00:55:33 +03:00
|
|
|
|
2016-08-22 13:32:42 +03:00
|
|
|
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
|
2010-12-07 02:20:40 +03:00
|
|
|
return 0;
|
|
|
|
|
2016-08-22 13:32:42 +03:00
|
|
|
return i915_drm_suspend(dev);
|
2014-04-01 20:55:22 +04:00
|
|
|
}
|
|
|
|
|
2016-08-22 13:32:42 +03:00
|
|
|
static int i915_pm_suspend_late(struct device *kdev)
|
2014-04-01 20:55:22 +04:00
|
|
|
{
|
2016-08-22 13:32:42 +03:00
|
|
|
struct drm_device *dev = &kdev_to_i915(kdev)->drm;
|
2014-04-01 20:55:22 +04:00
|
|
|
|
|
|
|
/*
|
2015-05-18 21:53:48 +03:00
|
|
|
* We have a suspend ordering issue with the snd-hda driver also
|
2014-04-01 20:55:22 +04:00
|
|
|
* requiring our device to be power up. Due to the lack of a
|
|
|
|
* parent/child relationship we currently solve this with an late
|
|
|
|
* suspend hook.
|
|
|
|
*
|
|
|
|
* FIXME: This should be solved with a special hdmi sink device or
|
|
|
|
* similar so that power domains can be employed.
|
|
|
|
*/
|
2016-08-22 13:32:42 +03:00
|
|
|
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
|
2014-04-01 20:55:22 +04:00
|
|
|
return 0;
|
2009-01-05 00:55:33 +03:00
|
|
|
|
2016-08-22 13:32:42 +03:00
|
|
|
return i915_drm_suspend_late(dev, false);
|
2015-03-02 14:04:41 +03:00
|
|
|
}
|
|
|
|
|
2016-08-22 13:32:42 +03:00
|
|
|
static int i915_pm_poweroff_late(struct device *kdev)
|
2015-03-02 14:04:41 +03:00
|
|
|
{
|
2016-08-22 13:32:42 +03:00
|
|
|
struct drm_device *dev = &kdev_to_i915(kdev)->drm;
|
2015-03-02 14:04:41 +03:00
|
|
|
|
2016-08-22 13:32:42 +03:00
|
|
|
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
|
2015-03-02 14:04:41 +03:00
|
|
|
return 0;
|
|
|
|
|
2016-08-22 13:32:42 +03:00
|
|
|
return i915_drm_suspend_late(dev, true);
|
2009-12-16 08:36:10 +03:00
|
|
|
}
|
|
|
|
|
2016-08-22 13:32:42 +03:00
|
|
|
static int i915_pm_resume_early(struct device *kdev)
|
2014-04-01 20:55:22 +04:00
|
|
|
{
|
2016-08-22 13:32:42 +03:00
|
|
|
struct drm_device *dev = &kdev_to_i915(kdev)->drm;
|
2014-04-01 20:55:22 +04:00
|
|
|
|
2016-08-22 13:32:42 +03:00
|
|
|
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
|
2014-10-23 20:23:19 +04:00
|
|
|
return 0;
|
|
|
|
|
2016-08-22 13:32:42 +03:00
|
|
|
return i915_drm_resume_early(dev);
|
2014-04-01 20:55:22 +04:00
|
|
|
}
|
|
|
|
|
2016-08-22 13:32:42 +03:00
|
|
|
static int i915_pm_resume(struct device *kdev)
|
2009-12-16 08:36:10 +03:00
|
|
|
{
|
2016-08-22 13:32:42 +03:00
|
|
|
struct drm_device *dev = &kdev_to_i915(kdev)->drm;
|
2010-02-07 23:48:24 +03:00
|
|
|
|
2016-08-22 13:32:42 +03:00
|
|
|
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
|
2014-10-23 20:23:19 +04:00
|
|
|
return 0;
|
|
|
|
|
2016-08-22 13:32:42 +03:00
|
|
|
return i915_drm_resume(dev);
|
2009-12-16 08:36:10 +03:00
|
|
|
}
|
|
|
|
|
2016-05-14 09:26:32 +03:00
|
|
|
/* freeze: before creating the hibernation_image */
|
2016-08-22 13:32:42 +03:00
|
|
|
static int i915_pm_freeze(struct device *kdev)
|
2016-05-14 09:26:32 +03:00
|
|
|
{
|
2017-08-16 17:46:07 +03:00
|
|
|
struct drm_device *dev = &kdev_to_i915(kdev)->drm;
|
2016-09-21 16:51:07 +03:00
|
|
|
int ret;
|
|
|
|
|
2017-08-16 17:46:07 +03:00
|
|
|
if (dev->switch_power_state != DRM_SWITCH_POWER_OFF) {
|
|
|
|
ret = i915_drm_suspend(dev);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
2016-09-21 16:51:07 +03:00
|
|
|
|
|
|
|
ret = i915_gem_freeze(kdev_to_i915(kdev));
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
return 0;
|
2016-05-14 09:26:32 +03:00
|
|
|
}
|
|
|
|
|
2016-08-22 13:32:42 +03:00
|
|
|
static int i915_pm_freeze_late(struct device *kdev)
|
2016-05-14 09:26:32 +03:00
|
|
|
{
|
2017-08-16 17:46:07 +03:00
|
|
|
struct drm_device *dev = &kdev_to_i915(kdev)->drm;
|
2016-05-14 09:26:33 +03:00
|
|
|
int ret;
|
|
|
|
|
2017-08-16 17:46:07 +03:00
|
|
|
if (dev->switch_power_state != DRM_SWITCH_POWER_OFF) {
|
|
|
|
ret = i915_drm_suspend_late(dev, true);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
2016-05-14 09:26:33 +03:00
|
|
|
|
2016-08-22 13:32:42 +03:00
|
|
|
ret = i915_gem_freeze_late(kdev_to_i915(kdev));
|
2016-05-14 09:26:33 +03:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
return 0;
|
2016-05-14 09:26:32 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/* thaw: called after creating the hibernation image, but before turning off. */
|
2016-08-22 13:32:42 +03:00
|
|
|
static int i915_pm_thaw_early(struct device *kdev)
|
2016-05-14 09:26:32 +03:00
|
|
|
{
|
2016-08-22 13:32:42 +03:00
|
|
|
return i915_pm_resume_early(kdev);
|
2016-05-14 09:26:32 +03:00
|
|
|
}
|
|
|
|
|
2016-08-22 13:32:42 +03:00
|
|
|
static int i915_pm_thaw(struct device *kdev)
|
2016-05-14 09:26:32 +03:00
|
|
|
{
|
2016-08-22 13:32:42 +03:00
|
|
|
return i915_pm_resume(kdev);
|
2016-05-14 09:26:32 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/* restore: called after loading the hibernation image. */
|
2016-08-22 13:32:42 +03:00
|
|
|
static int i915_pm_restore_early(struct device *kdev)
|
2016-05-14 09:26:32 +03:00
|
|
|
{
|
2016-08-22 13:32:42 +03:00
|
|
|
return i915_pm_resume_early(kdev);
|
2016-05-14 09:26:32 +03:00
|
|
|
}
|
|
|
|
|
2016-08-22 13:32:42 +03:00
|
|
|
static int i915_pm_restore(struct device *kdev)
|
2016-05-14 09:26:32 +03:00
|
|
|
{
|
2016-08-22 13:32:42 +03:00
|
|
|
return i915_pm_resume(kdev);
|
2016-05-14 09:26:32 +03:00
|
|
|
}
|
|
|
|
|
2014-05-05 16:19:56 +04:00
|
|
|
/*
|
|
|
|
* Save all Gunit registers that may be lost after a D3 and a subsequent
|
|
|
|
* S0i[R123] transition. The list of registers needing a save/restore is
|
|
|
|
* defined in the VLV2_S0IXRegs document. This documents marks all Gunit
|
|
|
|
* registers in the following way:
|
|
|
|
* - Driver: saved/restored by the driver
|
|
|
|
* - Punit : saved/restored by the Punit firmware
|
|
|
|
* - No, w/o marking: no need to save/restore, since the register is R/O or
|
|
|
|
* used internally by the HW in a way that doesn't depend
|
|
|
|
* keeping the content across a suspend/resume.
|
|
|
|
* - Debug : used for debugging
|
|
|
|
*
|
|
|
|
* We save/restore all registers marked with 'Driver', with the following
|
|
|
|
* exceptions:
|
|
|
|
* - Registers out of use, including also registers marked with 'Debug'.
|
|
|
|
* These have no effect on the driver's operation, so we don't save/restore
|
|
|
|
* them to reduce the overhead.
|
|
|
|
* - Registers that are fully setup by an initialization function called from
|
|
|
|
* the resume path. For example many clock gating and RPS/RC6 registers.
|
|
|
|
* - Registers that provide the right functionality with their reset defaults.
|
|
|
|
*
|
|
|
|
* TODO: Except for registers that based on the above 3 criteria can be safely
|
|
|
|
* ignored, we save/restore all others, practically treating the HW context as
|
|
|
|
* a black-box for the driver. Further investigation is needed to reduce the
|
|
|
|
* saved/restored registers even further, by following the same 3 criteria.
|
|
|
|
*/
|
|
|
|
static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
|
|
|
|
{
|
|
|
|
struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* GAM 0x4000-0x4770 */
|
|
|
|
s->wr_watermark = I915_READ(GEN7_WR_WATERMARK);
|
|
|
|
s->gfx_prio_ctrl = I915_READ(GEN7_GFX_PRIO_CTRL);
|
|
|
|
s->arb_mode = I915_READ(ARB_MODE);
|
|
|
|
s->gfx_pend_tlb0 = I915_READ(GEN7_GFX_PEND_TLB0);
|
|
|
|
s->gfx_pend_tlb1 = I915_READ(GEN7_GFX_PEND_TLB1);
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
|
2015-09-18 20:03:16 +03:00
|
|
|
s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS(i));
|
2014-05-05 16:19:56 +04:00
|
|
|
|
|
|
|
s->media_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
|
2015-04-16 02:52:30 +03:00
|
|
|
s->gfx_max_req_count = I915_READ(GEN7_GFX_MAX_REQ_COUNT);
|
2014-05-05 16:19:56 +04:00
|
|
|
|
|
|
|
s->render_hwsp = I915_READ(RENDER_HWS_PGA_GEN7);
|
|
|
|
s->ecochk = I915_READ(GAM_ECOCHK);
|
|
|
|
s->bsd_hwsp = I915_READ(BSD_HWS_PGA_GEN7);
|
|
|
|
s->blt_hwsp = I915_READ(BLT_HWS_PGA_GEN7);
|
|
|
|
|
|
|
|
s->tlb_rd_addr = I915_READ(GEN7_TLB_RD_ADDR);
|
|
|
|
|
|
|
|
/* MBC 0x9024-0x91D0, 0x8500 */
|
|
|
|
s->g3dctl = I915_READ(VLV_G3DCTL);
|
|
|
|
s->gsckgctl = I915_READ(VLV_GSCKGCTL);
|
|
|
|
s->mbctl = I915_READ(GEN6_MBCTL);
|
|
|
|
|
|
|
|
/* GCP 0x9400-0x9424, 0x8100-0x810C */
|
|
|
|
s->ucgctl1 = I915_READ(GEN6_UCGCTL1);
|
|
|
|
s->ucgctl3 = I915_READ(GEN6_UCGCTL3);
|
|
|
|
s->rcgctl1 = I915_READ(GEN6_RCGCTL1);
|
|
|
|
s->rcgctl2 = I915_READ(GEN6_RCGCTL2);
|
|
|
|
s->rstctl = I915_READ(GEN6_RSTCTL);
|
|
|
|
s->misccpctl = I915_READ(GEN7_MISCCPCTL);
|
|
|
|
|
|
|
|
/* GPM 0xA000-0xAA84, 0x8000-0x80FC */
|
|
|
|
s->gfxpause = I915_READ(GEN6_GFXPAUSE);
|
|
|
|
s->rpdeuhwtc = I915_READ(GEN6_RPDEUHWTC);
|
|
|
|
s->rpdeuc = I915_READ(GEN6_RPDEUC);
|
|
|
|
s->ecobus = I915_READ(ECOBUS);
|
|
|
|
s->pwrdwnupctl = I915_READ(VLV_PWRDWNUPCTL);
|
|
|
|
s->rp_down_timeout = I915_READ(GEN6_RP_DOWN_TIMEOUT);
|
|
|
|
s->rp_deucsw = I915_READ(GEN6_RPDEUCSW);
|
|
|
|
s->rcubmabdtmr = I915_READ(GEN6_RCUBMABDTMR);
|
|
|
|
s->rcedata = I915_READ(VLV_RCEDATA);
|
|
|
|
s->spare2gh = I915_READ(VLV_SPAREG2H);
|
|
|
|
|
|
|
|
/* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
|
|
|
|
s->gt_imr = I915_READ(GTIMR);
|
|
|
|
s->gt_ier = I915_READ(GTIER);
|
|
|
|
s->pm_imr = I915_READ(GEN6_PMIMR);
|
|
|
|
s->pm_ier = I915_READ(GEN6_PMIER);
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
|
2015-09-18 20:03:16 +03:00
|
|
|
s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH(i));
|
2014-05-05 16:19:56 +04:00
|
|
|
|
|
|
|
/* GT SA CZ domain, 0x100000-0x138124 */
|
|
|
|
s->tilectl = I915_READ(TILECTL);
|
|
|
|
s->gt_fifoctl = I915_READ(GTFIFOCTL);
|
|
|
|
s->gtlc_wake_ctrl = I915_READ(VLV_GTLC_WAKE_CTRL);
|
|
|
|
s->gtlc_survive = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
|
|
|
|
s->pmwgicz = I915_READ(VLV_PMWGICZ);
|
|
|
|
|
|
|
|
/* Gunit-Display CZ domain, 0x182028-0x1821CF */
|
|
|
|
s->gu_ctl0 = I915_READ(VLV_GU_CTL0);
|
|
|
|
s->gu_ctl1 = I915_READ(VLV_GU_CTL1);
|
2015-04-02 00:22:57 +03:00
|
|
|
s->pcbr = I915_READ(VLV_PCBR);
|
2014-05-05 16:19:56 +04:00
|
|
|
s->clock_gate_dis2 = I915_READ(VLV_GUNIT_CLOCK_GATE2);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Not saving any of:
|
|
|
|
* DFT, 0x9800-0x9EC0
|
|
|
|
* SARB, 0xB000-0xB1FC
|
|
|
|
* GAC, 0x5208-0x524C, 0x14000-0x14C000
|
|
|
|
* PCI CFG
|
|
|
|
*/
|
|
|
|
}
|
|
|
|
|
|
|
|
static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
|
|
|
|
{
|
|
|
|
struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
|
|
|
|
u32 val;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* GAM 0x4000-0x4770 */
|
|
|
|
I915_WRITE(GEN7_WR_WATERMARK, s->wr_watermark);
|
|
|
|
I915_WRITE(GEN7_GFX_PRIO_CTRL, s->gfx_prio_ctrl);
|
|
|
|
I915_WRITE(ARB_MODE, s->arb_mode | (0xffff << 16));
|
|
|
|
I915_WRITE(GEN7_GFX_PEND_TLB0, s->gfx_pend_tlb0);
|
|
|
|
I915_WRITE(GEN7_GFX_PEND_TLB1, s->gfx_pend_tlb1);
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
|
2015-09-18 20:03:16 +03:00
|
|
|
I915_WRITE(GEN7_LRA_LIMITS(i), s->lra_limits[i]);
|
2014-05-05 16:19:56 +04:00
|
|
|
|
|
|
|
I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
|
2015-04-16 02:52:30 +03:00
|
|
|
I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count);
|
2014-05-05 16:19:56 +04:00
|
|
|
|
|
|
|
I915_WRITE(RENDER_HWS_PGA_GEN7, s->render_hwsp);
|
|
|
|
I915_WRITE(GAM_ECOCHK, s->ecochk);
|
|
|
|
I915_WRITE(BSD_HWS_PGA_GEN7, s->bsd_hwsp);
|
|
|
|
I915_WRITE(BLT_HWS_PGA_GEN7, s->blt_hwsp);
|
|
|
|
|
|
|
|
I915_WRITE(GEN7_TLB_RD_ADDR, s->tlb_rd_addr);
|
|
|
|
|
|
|
|
/* MBC 0x9024-0x91D0, 0x8500 */
|
|
|
|
I915_WRITE(VLV_G3DCTL, s->g3dctl);
|
|
|
|
I915_WRITE(VLV_GSCKGCTL, s->gsckgctl);
|
|
|
|
I915_WRITE(GEN6_MBCTL, s->mbctl);
|
|
|
|
|
|
|
|
/* GCP 0x9400-0x9424, 0x8100-0x810C */
|
|
|
|
I915_WRITE(GEN6_UCGCTL1, s->ucgctl1);
|
|
|
|
I915_WRITE(GEN6_UCGCTL3, s->ucgctl3);
|
|
|
|
I915_WRITE(GEN6_RCGCTL1, s->rcgctl1);
|
|
|
|
I915_WRITE(GEN6_RCGCTL2, s->rcgctl2);
|
|
|
|
I915_WRITE(GEN6_RSTCTL, s->rstctl);
|
|
|
|
I915_WRITE(GEN7_MISCCPCTL, s->misccpctl);
|
|
|
|
|
|
|
|
/* GPM 0xA000-0xAA84, 0x8000-0x80FC */
|
|
|
|
I915_WRITE(GEN6_GFXPAUSE, s->gfxpause);
|
|
|
|
I915_WRITE(GEN6_RPDEUHWTC, s->rpdeuhwtc);
|
|
|
|
I915_WRITE(GEN6_RPDEUC, s->rpdeuc);
|
|
|
|
I915_WRITE(ECOBUS, s->ecobus);
|
|
|
|
I915_WRITE(VLV_PWRDWNUPCTL, s->pwrdwnupctl);
|
|
|
|
I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout);
|
|
|
|
I915_WRITE(GEN6_RPDEUCSW, s->rp_deucsw);
|
|
|
|
I915_WRITE(GEN6_RCUBMABDTMR, s->rcubmabdtmr);
|
|
|
|
I915_WRITE(VLV_RCEDATA, s->rcedata);
|
|
|
|
I915_WRITE(VLV_SPAREG2H, s->spare2gh);
|
|
|
|
|
|
|
|
/* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
|
|
|
|
I915_WRITE(GTIMR, s->gt_imr);
|
|
|
|
I915_WRITE(GTIER, s->gt_ier);
|
|
|
|
I915_WRITE(GEN6_PMIMR, s->pm_imr);
|
|
|
|
I915_WRITE(GEN6_PMIER, s->pm_ier);
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
|
2015-09-18 20:03:16 +03:00
|
|
|
I915_WRITE(GEN7_GT_SCRATCH(i), s->gt_scratch[i]);
|
2014-05-05 16:19:56 +04:00
|
|
|
|
|
|
|
/* GT SA CZ domain, 0x100000-0x138124 */
|
|
|
|
I915_WRITE(TILECTL, s->tilectl);
|
|
|
|
I915_WRITE(GTFIFOCTL, s->gt_fifoctl);
|
|
|
|
/*
|
|
|
|
* Preserve the GT allow wake and GFX force clock bit, they are not
|
|
|
|
* be restored, as they are used to control the s0ix suspend/resume
|
|
|
|
* sequence by the caller.
|
|
|
|
*/
|
|
|
|
val = I915_READ(VLV_GTLC_WAKE_CTRL);
|
|
|
|
val &= VLV_GTLC_ALLOWWAKEREQ;
|
|
|
|
val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ;
|
|
|
|
I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
|
|
|
|
|
|
|
|
val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
|
|
|
|
val &= VLV_GFX_CLK_FORCE_ON_BIT;
|
|
|
|
val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT;
|
|
|
|
I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
|
|
|
|
|
|
|
|
I915_WRITE(VLV_PMWGICZ, s->pmwgicz);
|
|
|
|
|
|
|
|
/* Gunit-Display CZ domain, 0x182028-0x1821CF */
|
|
|
|
I915_WRITE(VLV_GU_CTL0, s->gu_ctl0);
|
|
|
|
I915_WRITE(VLV_GU_CTL1, s->gu_ctl1);
|
2015-04-02 00:22:57 +03:00
|
|
|
I915_WRITE(VLV_PCBR, s->pcbr);
|
2014-05-05 16:19:56 +04:00
|
|
|
I915_WRITE(VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2);
|
|
|
|
}
|
|
|
|
|
2017-04-21 16:58:15 +03:00
|
|
|
static int vlv_wait_for_pw_status(struct drm_i915_private *dev_priv,
|
|
|
|
u32 mask, u32 val)
|
|
|
|
{
|
|
|
|
/* The HW does not like us polling for PW_STATUS frequently, so
|
|
|
|
* use the sleeping loop rather than risk the busy spin within
|
|
|
|
* intel_wait_for_register().
|
|
|
|
*
|
|
|
|
* Transitioning between RC6 states should be at most 2ms (see
|
|
|
|
* valleyview_enable_rps) so use a 3ms timeout.
|
|
|
|
*/
|
|
|
|
return wait_for((I915_READ_NOTRACE(VLV_GTLC_PW_STATUS) & mask) == val,
|
|
|
|
3);
|
|
|
|
}
|
|
|
|
|
2014-04-18 17:35:02 +04:00
|
|
|
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
|
|
|
|
{
|
|
|
|
u32 val;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
|
|
|
|
val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
|
|
|
|
if (force_on)
|
|
|
|
val |= VLV_GFX_CLK_FORCE_ON_BIT;
|
|
|
|
I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
|
|
|
|
|
|
|
|
if (!force_on)
|
|
|
|
return 0;
|
|
|
|
|
2016-06-30 17:32:46 +03:00
|
|
|
err = intel_wait_for_register(dev_priv,
|
|
|
|
VLV_GTLC_SURVIVABILITY_REG,
|
|
|
|
VLV_GFX_CLK_STATUS_BIT,
|
|
|
|
VLV_GFX_CLK_STATUS_BIT,
|
|
|
|
20);
|
2014-04-18 17:35:02 +04:00
|
|
|
if (err)
|
|
|
|
DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n",
|
|
|
|
I915_READ(VLV_GTLC_SURVIVABILITY_REG));
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2014-05-05 16:19:56 +04:00
|
|
|
static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
|
|
|
|
{
|
2017-04-21 16:58:15 +03:00
|
|
|
u32 mask;
|
2014-05-05 16:19:56 +04:00
|
|
|
u32 val;
|
2017-04-21 16:58:15 +03:00
|
|
|
int err;
|
2014-05-05 16:19:56 +04:00
|
|
|
|
|
|
|
val = I915_READ(VLV_GTLC_WAKE_CTRL);
|
|
|
|
val &= ~VLV_GTLC_ALLOWWAKEREQ;
|
|
|
|
if (allow)
|
|
|
|
val |= VLV_GTLC_ALLOWWAKEREQ;
|
|
|
|
I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
|
|
|
|
POSTING_READ(VLV_GTLC_WAKE_CTRL);
|
|
|
|
|
2017-04-21 16:58:15 +03:00
|
|
|
mask = VLV_GTLC_ALLOWWAKEACK;
|
|
|
|
val = allow ? mask : 0;
|
|
|
|
|
|
|
|
err = vlv_wait_for_pw_status(dev_priv, mask, val);
|
2014-05-05 16:19:56 +04:00
|
|
|
if (err)
|
|
|
|
DRM_ERROR("timeout disabling GT waking\n");
|
2016-06-30 17:32:47 +03:00
|
|
|
|
2014-05-05 16:19:56 +04:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2017-04-21 16:58:15 +03:00
|
|
|
static void vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
|
|
|
|
bool wait_for_on)
|
2014-05-05 16:19:56 +04:00
|
|
|
{
|
|
|
|
u32 mask;
|
|
|
|
u32 val;
|
|
|
|
|
|
|
|
mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
|
|
|
|
val = wait_for_on ? mask : 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* RC6 transitioning can be delayed up to 2 msec (see
|
|
|
|
* valleyview_enable_rps), use 3 msec for safety.
|
|
|
|
*/
|
2017-04-21 16:58:15 +03:00
|
|
|
if (vlv_wait_for_pw_status(dev_priv, mask, val))
|
2014-05-05 16:19:56 +04:00
|
|
|
DRM_ERROR("timeout waiting for GT wells to go %s\n",
|
2016-01-14 13:53:34 +03:00
|
|
|
onoff(wait_for_on));
|
2014-05-05 16:19:56 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
|
|
|
|
{
|
|
|
|
if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
|
|
|
|
return;
|
|
|
|
|
2016-01-19 23:00:56 +03:00
|
|
|
DRM_DEBUG_DRIVER("GT register access while GT waking disabled\n");
|
2014-05-05 16:19:56 +04:00
|
|
|
I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
|
|
|
|
}
|
|
|
|
|
2014-08-13 21:37:05 +04:00
|
|
|
static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
|
2014-05-05 16:19:56 +04:00
|
|
|
{
|
|
|
|
u32 mask;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Bspec defines the following GT well on flags as debug only, so
|
|
|
|
* don't treat them as hard failures.
|
|
|
|
*/
|
2017-04-21 16:58:15 +03:00
|
|
|
vlv_wait_for_gt_wells(dev_priv, false);
|
2014-05-05 16:19:56 +04:00
|
|
|
|
|
|
|
mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS;
|
|
|
|
WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask);
|
|
|
|
|
|
|
|
vlv_check_no_gt_access(dev_priv);
|
|
|
|
|
|
|
|
err = vlv_force_gfx_clock(dev_priv, true);
|
|
|
|
if (err)
|
|
|
|
goto err1;
|
|
|
|
|
|
|
|
err = vlv_allow_gt_wake(dev_priv, false);
|
|
|
|
if (err)
|
|
|
|
goto err2;
|
2014-12-12 11:48:16 +03:00
|
|
|
|
2016-04-07 11:08:05 +03:00
|
|
|
if (!IS_CHERRYVIEW(dev_priv))
|
2014-12-12 11:48:16 +03:00
|
|
|
vlv_save_gunit_s0ix_state(dev_priv);
|
2014-05-05 16:19:56 +04:00
|
|
|
|
|
|
|
err = vlv_force_gfx_clock(dev_priv, false);
|
|
|
|
if (err)
|
|
|
|
goto err2;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err2:
|
|
|
|
/* For safety always re-enable waking and disable gfx clock forcing */
|
|
|
|
vlv_allow_gt_wake(dev_priv, true);
|
|
|
|
err1:
|
|
|
|
vlv_force_gfx_clock(dev_priv, false);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2014-08-13 21:37:06 +04:00
|
|
|
static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
|
|
|
|
bool rpm_resume)
|
2014-05-05 16:19:56 +04:00
|
|
|
{
|
|
|
|
int err;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If any of the steps fail just try to continue, that's the best we
|
|
|
|
* can do at this point. Return the first error code (which will also
|
|
|
|
* leave RPM permanently disabled).
|
|
|
|
*/
|
|
|
|
ret = vlv_force_gfx_clock(dev_priv, true);
|
|
|
|
|
2016-04-07 11:08:05 +03:00
|
|
|
if (!IS_CHERRYVIEW(dev_priv))
|
2014-12-12 11:48:16 +03:00
|
|
|
vlv_restore_gunit_s0ix_state(dev_priv);
|
2014-05-05 16:19:56 +04:00
|
|
|
|
|
|
|
err = vlv_allow_gt_wake(dev_priv, true);
|
|
|
|
if (!ret)
|
|
|
|
ret = err;
|
|
|
|
|
|
|
|
err = vlv_force_gfx_clock(dev_priv, false);
|
|
|
|
if (!ret)
|
|
|
|
ret = err;
|
|
|
|
|
|
|
|
vlv_check_no_gt_access(dev_priv);
|
|
|
|
|
2016-10-24 15:42:18 +03:00
|
|
|
if (rpm_resume)
|
2016-10-31 23:37:22 +03:00
|
|
|
intel_init_clock_gating(dev_priv);
|
2014-05-05 16:19:56 +04:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-08-22 13:32:42 +03:00
|
|
|
static int intel_runtime_suspend(struct device *kdev)
|
2013-12-07 02:32:13 +04:00
|
|
|
{
|
2016-08-22 13:32:42 +03:00
|
|
|
struct pci_dev *pdev = to_pci_dev(kdev);
|
2013-12-07 02:32:13 +04:00
|
|
|
struct drm_device *dev = pci_get_drvdata(pdev);
|
2016-07-04 13:34:36 +03:00
|
|
|
struct drm_i915_private *dev_priv = to_i915(dev);
|
2014-04-15 17:39:45 +04:00
|
|
|
int ret;
|
2013-12-07 02:32:13 +04:00
|
|
|
|
2017-10-11 00:30:10 +03:00
|
|
|
if (WARN_ON_ONCE(!(dev_priv->gt_pm.rc6.enabled && intel_rc6_enabled())))
|
2014-04-14 21:24:29 +04:00
|
|
|
return -ENODEV;
|
|
|
|
|
2016-10-13 13:02:55 +03:00
|
|
|
if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
|
2014-08-26 14:26:56 +04:00
|
|
|
return -ENODEV;
|
|
|
|
|
2013-12-07 02:32:13 +04:00
|
|
|
DRM_DEBUG_KMS("Suspending device\n");
|
|
|
|
|
2015-12-16 03:52:19 +03:00
|
|
|
disable_rpm_wakeref_asserts(dev_priv);
|
|
|
|
|
2014-05-07 20:57:49 +04:00
|
|
|
/*
|
|
|
|
* We are safe here against re-faults, since the fault handler takes
|
|
|
|
* an RPM reference.
|
|
|
|
*/
|
2016-10-24 15:42:18 +03:00
|
|
|
i915_gem_runtime_suspend(dev_priv);
|
2014-05-07 20:57:49 +04:00
|
|
|
|
2016-12-01 17:16:38 +03:00
|
|
|
intel_guc_suspend(dev_priv);
|
2015-09-30 19:46:37 +03:00
|
|
|
|
2014-11-19 16:30:05 +03:00
|
|
|
intel_runtime_pm_disable_interrupts(dev_priv);
|
2014-04-14 21:24:37 +04:00
|
|
|
|
2017-11-14 16:55:18 +03:00
|
|
|
intel_uncore_suspend(dev_priv);
|
|
|
|
|
2016-04-20 20:27:54 +03:00
|
|
|
ret = 0;
|
2016-12-16 18:42:25 +03:00
|
|
|
if (IS_GEN9_LP(dev_priv)) {
|
2016-04-20 20:27:54 +03:00
|
|
|
bxt_display_core_uninit(dev_priv);
|
|
|
|
bxt_enable_dc9(dev_priv);
|
|
|
|
} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
|
|
|
|
hsw_enable_pc8(dev_priv);
|
|
|
|
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
|
|
|
|
ret = vlv_suspend_complete(dev_priv);
|
|
|
|
}
|
|
|
|
|
2014-04-15 17:39:45 +04:00
|
|
|
if (ret) {
|
|
|
|
DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
|
2017-11-14 16:55:18 +03:00
|
|
|
intel_uncore_runtime_resume(dev_priv);
|
|
|
|
|
2014-09-30 12:56:44 +04:00
|
|
|
intel_runtime_pm_enable_interrupts(dev_priv);
|
2014-04-15 17:39:45 +04:00
|
|
|
|
2015-12-16 03:52:19 +03:00
|
|
|
enable_rpm_wakeref_asserts(dev_priv);
|
|
|
|
|
2014-04-15 17:39:45 +04:00
|
|
|
return ret;
|
|
|
|
}
|
drm/i915: make PC8 be part of runtime PM suspend/resume
Currently, when our driver becomes idle for i915.pc8_timeout (default:
5s) we enable PC8, so we save some power, but not everything we can.
Then, while PC8 is enabled, if we stay idle for more
autosuspend_delay_ms (default: 10s) we'll enter runtime PM and put the
graphics device in D3 state, saving even more power. The two features
are separate things with increasing levels of power savings, but if we
disable PC8 we'll never get into D3.
While from the modularity point of view it would be nice to keep these
features as separate, we have reasons to merge them:
- We are not aware of anybody wanting a "PC8 without D3" environment.
- If we keep both features as separate, we'll have to to test both
PC8 and PC8+D3 code paths. We're already having a major pain to
make QA do automated testing of just one thing, testing both paths
will cost even more.
- Only Haswell+ supports PC8, so if we want to add runtime PM support
to, for example, IVB, we'll have to copy some code from the PC8
feature to runtime PM, so merging both features as a single thing
will make it easier for enabling runtime PM on other platforms.
This patch only does the very basic steps required to have PC8 and
runtime PM merged on a single feature: the next patches will take care
of cleaning up everything.
v2: - Rebase.
v3: - Rebase.
- Fully remove the deprecated i915 params since Daniel doesn't
consider them as part of the ABI.
v4: - Rebase.
- Fix typo in the commit message.
v5: - Rebase, again.
- Add a huge comment explaining the different forcewake usage
(Chris, Daniel).
- Use open-coded forcewake functions (Daniel).
Signed-off-by: Paulo Zanoni <paulo.r.zanoni@intel.com>
Reviewed-by: Imre Deak <imre.deak@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2014-03-08 03:08:05 +04:00
|
|
|
|
2015-12-16 03:52:19 +03:00
|
|
|
enable_rpm_wakeref_asserts(dev_priv);
|
2017-10-11 00:30:04 +03:00
|
|
|
WARN_ON_ONCE(atomic_read(&dev_priv->runtime_pm.wakeref_count));
|
2015-12-15 17:25:08 +03:00
|
|
|
|
2016-01-08 16:51:20 +03:00
|
|
|
if (intel_uncore_arm_unclaimed_mmio_detection(dev_priv))
|
2015-12-15 17:25:08 +03:00
|
|
|
DRM_ERROR("Unclaimed access detected prior to suspending\n");
|
|
|
|
|
2017-10-11 00:30:04 +03:00
|
|
|
dev_priv->runtime_pm.suspended = true;
|
2014-01-15 03:36:15 +04:00
|
|
|
|
|
|
|
/*
|
2014-08-22 00:09:38 +04:00
|
|
|
* FIXME: We really should find a document that references the arguments
|
|
|
|
* used below!
|
2014-01-15 03:36:15 +04:00
|
|
|
*/
|
2016-05-23 17:08:09 +03:00
|
|
|
if (IS_BROADWELL(dev_priv)) {
|
2015-07-31 00:20:29 +03:00
|
|
|
/*
|
|
|
|
* On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
|
|
|
|
* being detected, and the call we do at intel_runtime_resume()
|
|
|
|
* won't be able to restore them. Since PCI_D3hot matches the
|
|
|
|
* actual specification and appears to be working, use it.
|
|
|
|
*/
|
2016-05-23 17:08:09 +03:00
|
|
|
intel_opregion_notify_adapter(dev_priv, PCI_D3hot);
|
2015-07-31 00:20:29 +03:00
|
|
|
} else {
|
2014-08-22 00:09:38 +04:00
|
|
|
/*
|
|
|
|
* current versions of firmware which depend on this opregion
|
|
|
|
* notification have repurposed the D1 definition to mean
|
|
|
|
* "runtime suspended" vs. what you would normally expect (D3)
|
|
|
|
* to distinguish it from notifications that might be sent via
|
|
|
|
* the suspend path.
|
|
|
|
*/
|
2016-05-23 17:08:09 +03:00
|
|
|
intel_opregion_notify_adapter(dev_priv, PCI_D1);
|
2014-08-22 00:09:38 +04:00
|
|
|
}
|
2013-12-07 02:32:13 +04:00
|
|
|
|
2015-01-16 12:34:40 +03:00
|
|
|
assert_forcewakes_inactive(dev_priv);
|
2015-01-16 12:34:34 +03:00
|
|
|
|
2017-01-20 17:28:43 +03:00
|
|
|
if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
|
2016-06-22 00:03:44 +03:00
|
|
|
intel_hpd_poll_init(dev_priv);
|
|
|
|
|
drm/i915: make PC8 be part of runtime PM suspend/resume
Currently, when our driver becomes idle for i915.pc8_timeout (default:
5s) we enable PC8, so we save some power, but not everything we can.
Then, while PC8 is enabled, if we stay idle for more
autosuspend_delay_ms (default: 10s) we'll enter runtime PM and put the
graphics device in D3 state, saving even more power. The two features
are separate things with increasing levels of power savings, but if we
disable PC8 we'll never get into D3.
While from the modularity point of view it would be nice to keep these
features as separate, we have reasons to merge them:
- We are not aware of anybody wanting a "PC8 without D3" environment.
- If we keep both features as separate, we'll have to to test both
PC8 and PC8+D3 code paths. We're already having a major pain to
make QA do automated testing of just one thing, testing both paths
will cost even more.
- Only Haswell+ supports PC8, so if we want to add runtime PM support
to, for example, IVB, we'll have to copy some code from the PC8
feature to runtime PM, so merging both features as a single thing
will make it easier for enabling runtime PM on other platforms.
This patch only does the very basic steps required to have PC8 and
runtime PM merged on a single feature: the next patches will take care
of cleaning up everything.
v2: - Rebase.
v3: - Rebase.
- Fully remove the deprecated i915 params since Daniel doesn't
consider them as part of the ABI.
v4: - Rebase.
- Fix typo in the commit message.
v5: - Rebase, again.
- Add a huge comment explaining the different forcewake usage
(Chris, Daniel).
- Use open-coded forcewake functions (Daniel).
Signed-off-by: Paulo Zanoni <paulo.r.zanoni@intel.com>
Reviewed-by: Imre Deak <imre.deak@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2014-03-08 03:08:05 +04:00
|
|
|
DRM_DEBUG_KMS("Device suspended\n");
|
2013-12-07 02:32:13 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-08-22 13:32:42 +03:00
|
|
|
static int intel_runtime_resume(struct device *kdev)
|
2013-12-07 02:32:13 +04:00
|
|
|
{
|
2016-08-22 13:32:42 +03:00
|
|
|
struct pci_dev *pdev = to_pci_dev(kdev);
|
2013-12-07 02:32:13 +04:00
|
|
|
struct drm_device *dev = pci_get_drvdata(pdev);
|
2016-07-04 13:34:36 +03:00
|
|
|
struct drm_i915_private *dev_priv = to_i915(dev);
|
2014-10-27 22:54:32 +03:00
|
|
|
int ret = 0;
|
2013-12-07 02:32:13 +04:00
|
|
|
|
2016-10-13 13:02:55 +03:00
|
|
|
if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
|
2014-08-26 14:26:56 +04:00
|
|
|
return -ENODEV;
|
2013-12-07 02:32:13 +04:00
|
|
|
|
|
|
|
DRM_DEBUG_KMS("Resuming device\n");
|
|
|
|
|
2017-10-11 00:30:04 +03:00
|
|
|
WARN_ON_ONCE(atomic_read(&dev_priv->runtime_pm.wakeref_count));
|
2015-12-16 03:52:19 +03:00
|
|
|
disable_rpm_wakeref_asserts(dev_priv);
|
|
|
|
|
2016-05-23 17:08:09 +03:00
|
|
|
intel_opregion_notify_adapter(dev_priv, PCI_D0);
|
2017-10-11 00:30:04 +03:00
|
|
|
dev_priv->runtime_pm.suspended = false;
|
2015-12-15 17:25:08 +03:00
|
|
|
if (intel_uncore_unclaimed_mmio(dev_priv))
|
|
|
|
DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
|
2013-12-07 02:32:13 +04:00
|
|
|
|
2016-12-01 17:16:38 +03:00
|
|
|
intel_guc_resume(dev_priv);
|
2015-09-30 19:46:37 +03:00
|
|
|
|
2016-12-16 18:42:25 +03:00
|
|
|
if (IS_GEN9_LP(dev_priv)) {
|
2016-04-20 20:27:54 +03:00
|
|
|
bxt_disable_dc9(dev_priv);
|
|
|
|
bxt_display_core_init(dev_priv, true);
|
2016-04-20 20:27:57 +03:00
|
|
|
if (dev_priv->csr.dmc_payload &&
|
|
|
|
(dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
|
|
|
|
gen9_enable_dc5(dev_priv);
|
2016-04-20 20:27:54 +03:00
|
|
|
} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
|
2014-10-27 22:54:32 +03:00
|
|
|
hsw_disable_pc8(dev_priv);
|
2016-04-20 20:27:54 +03:00
|
|
|
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
|
2014-10-27 22:54:32 +03:00
|
|
|
ret = vlv_resume_prepare(dev_priv, true);
|
2016-04-20 20:27:54 +03:00
|
|
|
}
|
2014-10-27 22:54:32 +03:00
|
|
|
|
2017-11-14 16:55:17 +03:00
|
|
|
intel_uncore_runtime_resume(dev_priv);
|
|
|
|
|
2014-04-15 17:39:45 +04:00
|
|
|
/*
|
|
|
|
* No point of rolling back things in case of an error, as the best
|
|
|
|
* we can do is to hope that things will still work (and disable RPM).
|
|
|
|
*/
|
2016-11-16 11:55:31 +03:00
|
|
|
i915_gem_init_swizzling(dev_priv);
|
2017-02-03 15:57:17 +03:00
|
|
|
i915_gem_restore_fences(dev_priv);
|
2014-04-14 21:24:39 +04:00
|
|
|
|
2014-09-30 12:56:44 +04:00
|
|
|
intel_runtime_pm_enable_interrupts(dev_priv);
|
2015-08-27 23:56:08 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* On VLV/CHV display interrupts are part of the display
|
|
|
|
* power well, so hpd is reinitialized from there. For
|
|
|
|
* everyone else do it here.
|
|
|
|
*/
|
2015-12-09 23:29:35 +03:00
|
|
|
if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
|
2015-08-27 23:56:08 +03:00
|
|
|
intel_hpd_init(dev_priv);
|
|
|
|
|
2017-08-17 16:45:28 +03:00
|
|
|
intel_enable_ipc(dev_priv);
|
|
|
|
|
2015-12-16 03:52:19 +03:00
|
|
|
enable_rpm_wakeref_asserts(dev_priv);
|
|
|
|
|
2014-04-15 17:39:45 +04:00
|
|
|
if (ret)
|
|
|
|
DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
|
|
|
|
else
|
|
|
|
DRM_DEBUG_KMS("Device resumed\n");
|
|
|
|
|
|
|
|
return ret;
|
2013-12-07 02:32:13 +04:00
|
|
|
}
|
|
|
|
|
2016-06-24 16:00:26 +03:00
|
|
|
const struct dev_pm_ops i915_pm_ops = {
|
2014-10-23 20:23:28 +04:00
|
|
|
/*
|
|
|
|
* S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
|
|
|
|
* PMSG_RESUME]
|
|
|
|
*/
|
2011-08-16 23:34:10 +04:00
|
|
|
.suspend = i915_pm_suspend,
|
2014-04-01 20:55:22 +04:00
|
|
|
.suspend_late = i915_pm_suspend_late,
|
|
|
|
.resume_early = i915_pm_resume_early,
|
2011-08-16 23:34:10 +04:00
|
|
|
.resume = i915_pm_resume,
|
2014-10-23 20:23:28 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* S4 event handlers
|
|
|
|
* @freeze, @freeze_late : called (1) before creating the
|
|
|
|
* hibernation image [PMSG_FREEZE] and
|
|
|
|
* (2) after rebooting, before restoring
|
|
|
|
* the image [PMSG_QUIESCE]
|
|
|
|
* @thaw, @thaw_early : called (1) after creating the hibernation
|
|
|
|
* image, before writing it [PMSG_THAW]
|
|
|
|
* and (2) after failing to create or
|
|
|
|
* restore the image [PMSG_RECOVER]
|
|
|
|
* @poweroff, @poweroff_late: called after writing the hibernation
|
|
|
|
* image, before rebooting [PMSG_HIBERNATE]
|
|
|
|
* @restore, @restore_early : called after rebooting and restoring the
|
|
|
|
* hibernation image [PMSG_RESTORE]
|
|
|
|
*/
|
2016-05-14 09:26:32 +03:00
|
|
|
.freeze = i915_pm_freeze,
|
|
|
|
.freeze_late = i915_pm_freeze_late,
|
|
|
|
.thaw_early = i915_pm_thaw_early,
|
|
|
|
.thaw = i915_pm_thaw,
|
2014-10-23 20:23:24 +04:00
|
|
|
.poweroff = i915_pm_suspend,
|
2015-03-02 14:04:41 +03:00
|
|
|
.poweroff_late = i915_pm_poweroff_late,
|
2016-05-14 09:26:32 +03:00
|
|
|
.restore_early = i915_pm_restore_early,
|
|
|
|
.restore = i915_pm_restore,
|
2014-10-23 20:23:28 +04:00
|
|
|
|
|
|
|
/* S0ix (via runtime suspend) event handlers */
|
2014-03-08 03:12:33 +04:00
|
|
|
.runtime_suspend = intel_runtime_suspend,
|
|
|
|
.runtime_resume = intel_runtime_resume,
|
2009-12-16 08:36:10 +03:00
|
|
|
};
|
|
|
|
|
2012-05-17 15:27:22 +04:00
|
|
|
static const struct vm_operations_struct i915_gem_vm_ops = {
|
2008-11-12 21:03:55 +03:00
|
|
|
.fault = i915_gem_fault,
|
2009-02-12 01:01:46 +03:00
|
|
|
.open = drm_gem_vm_open,
|
|
|
|
.close = drm_gem_vm_close,
|
2008-11-12 21:03:55 +03:00
|
|
|
};
|
|
|
|
|
2011-10-31 18:28:57 +04:00
|
|
|
static const struct file_operations i915_driver_fops = {
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.open = drm_open,
|
|
|
|
.release = drm_release,
|
|
|
|
.unlocked_ioctl = drm_ioctl,
|
|
|
|
.mmap = drm_gem_mmap,
|
|
|
|
.poll = drm_poll,
|
|
|
|
.read = drm_read,
|
|
|
|
.compat_ioctl = i915_compat_ioctl,
|
|
|
|
.llseek = noop_llseek,
|
|
|
|
};
|
|
|
|
|
2016-06-24 16:00:22 +03:00
|
|
|
static int
|
|
|
|
i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data,
|
|
|
|
struct drm_file *file)
|
|
|
|
{
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct drm_ioctl_desc i915_ioctls[] = {
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, drm_noop, DRM_AUTH),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
|
2017-01-27 12:40:08 +03:00
|
|
|
DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2_WR, i915_gem_execbuffer2, DRM_AUTH|DRM_RENDER_ALLOW),
|
2016-06-24 16:00:22 +03:00
|
|
|
DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW),
|
2017-01-10 15:10:44 +03:00
|
|
|
DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling_ioctl, DRM_RENDER_ALLOW),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling_ioctl, DRM_RENDER_ALLOW),
|
2016-06-24 16:00:22 +03:00
|
|
|
DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
|
drm/i915: Add i915 perf infrastructure
Adds base i915 perf infrastructure for Gen performance metrics.
This adds a DRM_IOCTL_I915_PERF_OPEN ioctl that takes an array of uint64
properties to configure a stream of metrics and returns a new fd usable
with standard VFS system calls including read() to read typed and sized
records; ioctl() to enable or disable capture and poll() to wait for
data.
A stream is opened something like:
uint64_t properties[] = {
/* Single context sampling */
DRM_I915_PERF_PROP_CTX_HANDLE, ctx_handle,
/* Include OA reports in samples */
DRM_I915_PERF_PROP_SAMPLE_OA, true,
/* OA unit configuration */
DRM_I915_PERF_PROP_OA_METRICS_SET, metrics_set_id,
DRM_I915_PERF_PROP_OA_FORMAT, report_format,
DRM_I915_PERF_PROP_OA_EXPONENT, period_exponent,
};
struct drm_i915_perf_open_param parm = {
.flags = I915_PERF_FLAG_FD_CLOEXEC |
I915_PERF_FLAG_FD_NONBLOCK |
I915_PERF_FLAG_DISABLED,
.properties_ptr = (uint64_t)properties,
.num_properties = sizeof(properties) / 16,
};
int fd = drmIoctl(drm_fd, DRM_IOCTL_I915_PERF_OPEN, ¶m);
Records read all start with a common { type, size } header with
DRM_I915_PERF_RECORD_SAMPLE being of most interest. Sample records
contain an extensible number of fields and it's the
DRM_I915_PERF_PROP_SAMPLE_xyz properties given when opening that
determine what's included in every sample.
No specific streams are supported yet so any attempt to open a stream
will return an error.
v2:
use i915_gem_context_get() - Chris Wilson
v3:
update read() interface to avoid passing state struct - Chris Wilson
fix some rebase fallout, with i915-perf init/deinit
v4:
s/DRM_IORW/DRM_IOW/ - Emil Velikov
Signed-off-by: Robert Bragg <robert@sixbynine.org>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Sourab Gupta <sourab.gupta@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: http://patchwork.freedesktop.org/patch/msgid/20161107194957.3385-2-robert@sixbynine.org
2016-11-07 22:49:47 +03:00
|
|
|
DRM_IOCTL_DEF_DRV(I915_PERF_OPEN, i915_perf_open_ioctl, DRM_RENDER_ALLOW),
|
2017-08-03 20:05:50 +03:00
|
|
|
DRM_IOCTL_DEF_DRV(I915_PERF_ADD_CONFIG, i915_perf_add_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_PERF_REMOVE_CONFIG, i915_perf_remove_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
|
2016-06-24 16:00:22 +03:00
|
|
|
};
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
static struct drm_driver driver = {
|
2011-08-25 21:55:54 +04:00
|
|
|
/* Don't use MTRRs here; the Xserver or userspace app should
|
|
|
|
* deal with them for Intel hardware.
|
2005-11-11 15:30:27 +03:00
|
|
|
*/
|
2008-07-30 23:06:12 +04:00
|
|
|
.driver_features =
|
2013-08-25 20:29:01 +04:00
|
|
|
DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
|
2017-08-15 17:57:33 +03:00
|
|
|
DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_SYNCOBJ,
|
2017-02-10 19:35:21 +03:00
|
|
|
.release = i915_driver_release,
|
2008-07-30 23:06:12 +04:00
|
|
|
.open = i915_driver_open,
|
2005-11-10 14:16:34 +03:00
|
|
|
.lastclose = i915_driver_lastclose,
|
2008-07-30 23:06:12 +04:00
|
|
|
.postclose = i915_driver_postclose,
|
2010-01-09 02:45:33 +03:00
|
|
|
|
2016-08-04 09:52:45 +03:00
|
|
|
.gem_close_object = i915_gem_close_object,
|
2016-10-28 15:58:43 +03:00
|
|
|
.gem_free_object_unlocked = i915_gem_free_object,
|
2008-11-12 21:03:55 +03:00
|
|
|
.gem_vm_ops = &i915_gem_vm_ops,
|
2012-05-10 17:25:09 +04:00
|
|
|
|
|
|
|
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
|
|
|
|
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
|
|
|
|
.gem_prime_export = i915_gem_prime_export,
|
|
|
|
.gem_prime_import = i915_gem_prime_import,
|
|
|
|
|
2011-02-07 05:16:14 +03:00
|
|
|
.dumb_create = i915_gem_dumb_create,
|
2014-12-24 06:11:17 +03:00
|
|
|
.dumb_map_offset = i915_gem_mmap_gtt,
|
2005-04-17 02:20:36 +04:00
|
|
|
.ioctls = i915_ioctls,
|
2016-06-24 16:00:22 +03:00
|
|
|
.num_ioctls = ARRAY_SIZE(i915_ioctls),
|
2011-10-31 18:28:57 +04:00
|
|
|
.fops = &i915_driver_fops,
|
2005-11-10 14:16:34 +03:00
|
|
|
.name = DRIVER_NAME,
|
|
|
|
.desc = DRIVER_DESC,
|
|
|
|
.date = DRIVER_DATE,
|
|
|
|
.major = DRIVER_MAJOR,
|
|
|
|
.minor = DRIVER_MINOR,
|
|
|
|
.patchlevel = DRIVER_PATCHLEVEL,
|
2005-04-17 02:20:36 +04:00
|
|
|
};
|
2017-02-13 20:15:17 +03:00
|
|
|
|
|
|
|
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
|
|
|
|
#include "selftests/mock_drm.c"
|
|
|
|
#endif
|