WSL2-Linux-Kernel/drivers/gpu/drm/i915/i915_drv.c

1785 строки
49 KiB
C
Исходник Обычный вид История

/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
*/
/*
*
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/device.h>
#include <linux/acpi.h>
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_drv.h"
#include <linux/apple-gmux.h>
#include <linux/console.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/vgaarb.h>
#include <linux/vga_switcheroo.h>
#include <drm/drm_crtc_helper.h>
static struct drm_driver driver;
#define GEN_DEFAULT_PIPEOFFSETS \
.pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
.trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
.palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
#define GEN_CHV_PIPEOFFSETS \
.pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
CHV_PIPE_C_OFFSET }, \
.trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
CHV_TRANSCODER_C_OFFSET, }, \
.palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \
CHV_PALETTE_C_OFFSET }
#define CURSOR_OFFSETS \
.cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET }
#define IVB_CURSOR_OFFSETS \
.cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET }
#define BDW_COLORS \
.color = { .degamma_lut_size = 512, .gamma_lut_size = 512 }
#define CHV_COLORS \
.color = { .degamma_lut_size = 65, .gamma_lut_size = 257 }
static const struct intel_device_info intel_i830_info = {
.gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
.has_overlay = 1, .overlay_needs_physical = 1,
.ring_mask = RENDER_RING,
GEN_DEFAULT_PIPEOFFSETS,
CURSOR_OFFSETS,
};
static const struct intel_device_info intel_845g_info = {
.gen = 2, .num_pipes = 1,
.has_overlay = 1, .overlay_needs_physical = 1,
.ring_mask = RENDER_RING,
GEN_DEFAULT_PIPEOFFSETS,
CURSOR_OFFSETS,
};
static const struct intel_device_info intel_i85x_info = {
.gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
.cursor_needs_physical = 1,
.has_overlay = 1, .overlay_needs_physical = 1,
.has_fbc = 1,
.ring_mask = RENDER_RING,
GEN_DEFAULT_PIPEOFFSETS,
CURSOR_OFFSETS,
};
static const struct intel_device_info intel_i865g_info = {
.gen = 2, .num_pipes = 1,
.has_overlay = 1, .overlay_needs_physical = 1,
.ring_mask = RENDER_RING,
GEN_DEFAULT_PIPEOFFSETS,
CURSOR_OFFSETS,
};
static const struct intel_device_info intel_i915g_info = {
.gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
.has_overlay = 1, .overlay_needs_physical = 1,
.ring_mask = RENDER_RING,
GEN_DEFAULT_PIPEOFFSETS,
CURSOR_OFFSETS,
};
static const struct intel_device_info intel_i915gm_info = {
.gen = 3, .is_mobile = 1, .num_pipes = 2,
.cursor_needs_physical = 1,
.has_overlay = 1, .overlay_needs_physical = 1,
.supports_tv = 1,
.has_fbc = 1,
.ring_mask = RENDER_RING,
GEN_DEFAULT_PIPEOFFSETS,
CURSOR_OFFSETS,
};
static const struct intel_device_info intel_i945g_info = {
.gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
.has_overlay = 1, .overlay_needs_physical = 1,
.ring_mask = RENDER_RING,
GEN_DEFAULT_PIPEOFFSETS,
CURSOR_OFFSETS,
};
static const struct intel_device_info intel_i945gm_info = {
.gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
.has_hotplug = 1, .cursor_needs_physical = 1,
.has_overlay = 1, .overlay_needs_physical = 1,
.supports_tv = 1,
.has_fbc = 1,
.ring_mask = RENDER_RING,
GEN_DEFAULT_PIPEOFFSETS,
CURSOR_OFFSETS,
};
static const struct intel_device_info intel_i965g_info = {
.gen = 4, .is_broadwater = 1, .num_pipes = 2,
.has_hotplug = 1,
.has_overlay = 1,
.ring_mask = RENDER_RING,
GEN_DEFAULT_PIPEOFFSETS,
CURSOR_OFFSETS,
};
static const struct intel_device_info intel_i965gm_info = {
.gen = 4, .is_crestline = 1, .num_pipes = 2,
.is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
.has_overlay = 1,
.supports_tv = 1,
.ring_mask = RENDER_RING,
GEN_DEFAULT_PIPEOFFSETS,
CURSOR_OFFSETS,
};
static const struct intel_device_info intel_g33_info = {
.gen = 3, .is_g33 = 1, .num_pipes = 2,
.need_gfx_hws = 1, .has_hotplug = 1,
.has_overlay = 1,
.ring_mask = RENDER_RING,
GEN_DEFAULT_PIPEOFFSETS,
CURSOR_OFFSETS,
};
static const struct intel_device_info intel_g45_info = {
.gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
.has_pipe_cxsr = 1, .has_hotplug = 1,
.ring_mask = RENDER_RING | BSD_RING,
GEN_DEFAULT_PIPEOFFSETS,
CURSOR_OFFSETS,
};
static const struct intel_device_info intel_gm45_info = {
.gen = 4, .is_g4x = 1, .num_pipes = 2,
.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
.has_pipe_cxsr = 1, .has_hotplug = 1,
.supports_tv = 1,
.ring_mask = RENDER_RING | BSD_RING,
GEN_DEFAULT_PIPEOFFSETS,
CURSOR_OFFSETS,
};
static const struct intel_device_info intel_pineview_info = {
.gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
.need_gfx_hws = 1, .has_hotplug = 1,
.has_overlay = 1,
GEN_DEFAULT_PIPEOFFSETS,
CURSOR_OFFSETS,
};
static const struct intel_device_info intel_ironlake_d_info = {
.gen = 5, .num_pipes = 2,
.need_gfx_hws = 1, .has_hotplug = 1,
.ring_mask = RENDER_RING | BSD_RING,
GEN_DEFAULT_PIPEOFFSETS,
CURSOR_OFFSETS,
};
static const struct intel_device_info intel_ironlake_m_info = {
.gen = 5, .is_mobile = 1, .num_pipes = 2,
.need_gfx_hws = 1, .has_hotplug = 1,
.has_fbc = 1,
.ring_mask = RENDER_RING | BSD_RING,
GEN_DEFAULT_PIPEOFFSETS,
CURSOR_OFFSETS,
};
static const struct intel_device_info intel_sandybridge_d_info = {
.gen = 6, .num_pipes = 2,
.need_gfx_hws = 1, .has_hotplug = 1,
.has_fbc = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
.has_llc = 1,
GEN_DEFAULT_PIPEOFFSETS,
CURSOR_OFFSETS,
};
static const struct intel_device_info intel_sandybridge_m_info = {
.gen = 6, .is_mobile = 1, .num_pipes = 2,
.need_gfx_hws = 1, .has_hotplug = 1,
.has_fbc = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
.has_llc = 1,
GEN_DEFAULT_PIPEOFFSETS,
CURSOR_OFFSETS,
};
#define GEN7_FEATURES \
.gen = 7, .num_pipes = 3, \
.need_gfx_hws = 1, .has_hotplug = 1, \
.has_fbc = 1, \
.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
.has_llc = 1, \
GEN_DEFAULT_PIPEOFFSETS, \
IVB_CURSOR_OFFSETS
static const struct intel_device_info intel_ivybridge_d_info = {
GEN7_FEATURES,
.is_ivybridge = 1,
};
static const struct intel_device_info intel_ivybridge_m_info = {
GEN7_FEATURES,
.is_ivybridge = 1,
.is_mobile = 1,
};
static const struct intel_device_info intel_ivybridge_q_info = {
GEN7_FEATURES,
.is_ivybridge = 1,
.num_pipes = 0, /* legal, last one wins */
};
#define VLV_FEATURES \
.gen = 7, .num_pipes = 2, \
.need_gfx_hws = 1, .has_hotplug = 1, \
.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
.display_mmio_offset = VLV_DISPLAY_BASE, \
GEN_DEFAULT_PIPEOFFSETS, \
CURSOR_OFFSETS
static const struct intel_device_info intel_valleyview_m_info = {
VLV_FEATURES,
.is_valleyview = 1,
.is_mobile = 1,
};
static const struct intel_device_info intel_valleyview_d_info = {
VLV_FEATURES,
.is_valleyview = 1,
};
#define HSW_FEATURES \
GEN7_FEATURES, \
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \
.has_ddi = 1, \
.has_fpga_dbg = 1
static const struct intel_device_info intel_haswell_d_info = {
HSW_FEATURES,
.is_haswell = 1,
};
static const struct intel_device_info intel_haswell_m_info = {
HSW_FEATURES,
.is_haswell = 1,
.is_mobile = 1,
};
#define BDW_FEATURES \
HSW_FEATURES, \
BDW_COLORS
static const struct intel_device_info intel_broadwell_d_info = {
BDW_FEATURES,
.gen = 8,
};
static const struct intel_device_info intel_broadwell_m_info = {
BDW_FEATURES,
.gen = 8, .is_mobile = 1,
};
static const struct intel_device_info intel_broadwell_gt3d_info = {
BDW_FEATURES,
.gen = 8,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
};
static const struct intel_device_info intel_broadwell_gt3m_info = {
BDW_FEATURES,
.gen = 8, .is_mobile = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
};
static const struct intel_device_info intel_cherryview_info = {
.gen = 8, .num_pipes = 3,
.need_gfx_hws = 1, .has_hotplug = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
.is_cherryview = 1,
.display_mmio_offset = VLV_DISPLAY_BASE,
GEN_CHV_PIPEOFFSETS,
CURSOR_OFFSETS,
CHV_COLORS,
};
static const struct intel_device_info intel_skylake_info = {
BDW_FEATURES,
.is_skylake = 1,
.gen = 9,
};
static const struct intel_device_info intel_skylake_gt3_info = {
BDW_FEATURES,
.is_skylake = 1,
.gen = 9,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
};
static const struct intel_device_info intel_broxton_info = {
.is_preliminary = 1,
.is_broxton = 1,
.gen = 9,
.need_gfx_hws = 1, .has_hotplug = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
.num_pipes = 3,
.has_ddi = 1,
.has_fpga_dbg = 1,
.has_fbc = 1,
GEN_DEFAULT_PIPEOFFSETS,
IVB_CURSOR_OFFSETS,
BDW_COLORS,
};
static const struct intel_device_info intel_kabylake_info = {
BDW_FEATURES,
.is_kabylake = 1,
.gen = 9,
};
static const struct intel_device_info intel_kabylake_gt3_info = {
BDW_FEATURES,
.is_kabylake = 1,
.gen = 9,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
};
/*
* Make sure any device matches here are from most specific to most
* general. For example, since the Quanta match is based on the subsystem
* and subvendor IDs, we need it to come before the more general IVB
* PCI ID matches, otherwise we'll use the wrong info struct above.
*/
static const struct pci_device_id pciidlist[] = {
INTEL_I830_IDS(&intel_i830_info),
INTEL_I845G_IDS(&intel_845g_info),
INTEL_I85X_IDS(&intel_i85x_info),
INTEL_I865G_IDS(&intel_i865g_info),
INTEL_I915G_IDS(&intel_i915g_info),
INTEL_I915GM_IDS(&intel_i915gm_info),
INTEL_I945G_IDS(&intel_i945g_info),
INTEL_I945GM_IDS(&intel_i945gm_info),
INTEL_I965G_IDS(&intel_i965g_info),
INTEL_G33_IDS(&intel_g33_info),
INTEL_I965GM_IDS(&intel_i965gm_info),
INTEL_GM45_IDS(&intel_gm45_info),
INTEL_G45_IDS(&intel_g45_info),
INTEL_PINEVIEW_IDS(&intel_pineview_info),
INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info),
INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info),
INTEL_SNB_D_IDS(&intel_sandybridge_d_info),
INTEL_SNB_M_IDS(&intel_sandybridge_m_info),
INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */
INTEL_IVB_M_IDS(&intel_ivybridge_m_info),
INTEL_IVB_D_IDS(&intel_ivybridge_d_info),
INTEL_HSW_D_IDS(&intel_haswell_d_info),
INTEL_HSW_M_IDS(&intel_haswell_m_info),
INTEL_VLV_M_IDS(&intel_valleyview_m_info),
INTEL_VLV_D_IDS(&intel_valleyview_d_info),
INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info),
INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info),
INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info),
INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info),
INTEL_CHV_IDS(&intel_cherryview_info),
INTEL_SKL_GT1_IDS(&intel_skylake_info),
INTEL_SKL_GT2_IDS(&intel_skylake_info),
INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info),
INTEL_SKL_GT4_IDS(&intel_skylake_gt3_info),
INTEL_BXT_IDS(&intel_broxton_info),
INTEL_KBL_GT1_IDS(&intel_kabylake_info),
INTEL_KBL_GT2_IDS(&intel_kabylake_info),
INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info),
INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info),
{0, 0, 0}
};
MODULE_DEVICE_TABLE(pci, pciidlist);
static enum intel_pch intel_virt_detect_pch(struct drm_device *dev)
{
enum intel_pch ret = PCH_NOP;
/*
* In a virtualized passthrough environment we can be in a
* setup where the ISA bridge is not able to be passed through.
* In this case, a south bridge can be emulated and we have to
* make an educated guess as to which PCH is really there.
*/
if (IS_GEN5(dev)) {
ret = PCH_IBX;
DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n");
} else if (IS_GEN6(dev) || IS_IVYBRIDGE(dev)) {
ret = PCH_CPT;
DRM_DEBUG_KMS("Assuming CouarPoint PCH\n");
} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
ret = PCH_LPT;
DRM_DEBUG_KMS("Assuming LynxPoint PCH\n");
} else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
ret = PCH_SPT;
DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n");
}
return ret;
}
void intel_detect_pch(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
drm/i915: fix pch pci device enumeration pci_get_class(class, from) drops the refcount for 'from', so the extra pci_dev_put we do on it will result in a use after free bug starting with the WARN below. Regression introduced in commit 6a9c4b35e6696a63805b6da5e4889c6986e9ee1b Author: Rui Guo <firemeteor@users.sourceforge.net> Date: Wed Jun 19 21:10:23 2013 +0800 drm/i915: Fix PCH detect with multiple ISA bridges in VM [ 164.338460] WARNING: CPU: 1 PID: 2094 at include/linux/kref.h:47 klist_next+0xae/0x110() [ 164.347731] CPU: 1 PID: 2094 Comm: modprobe Tainted: G O 3.13.0-imre+ #354 [ 164.356468] Hardware name: Intel Corp. VALLEYVIEW B0 PLATFORM/NOTEBOOK, BIOS BYTICRB1.X64.0062.R70.1310112051 10/11/2013 [ 164.368796] Call Trace: [ 164.371609] [<ffffffff816a32a6>] dump_stack+0x4e/0x7a [ 164.377447] [<ffffffff8104f75d>] warn_slowpath_common+0x7d/0xa0 [ 164.384238] [<ffffffff8104f83a>] warn_slowpath_null+0x1a/0x20 [ 164.390851] [<ffffffff8169aeae>] klist_next+0xae/0x110 [ 164.396777] [<ffffffff8130a110>] ? pci_do_find_bus+0x70/0x70 [ 164.403286] [<ffffffff813cb4a9>] bus_find_device+0x89/0xc0 [ 164.409719] [<ffffffff8130a373>] pci_get_dev_by_id+0x63/0xa0 [ 164.416238] [<ffffffff8130a4e4>] pci_get_class+0x44/0x50 [ 164.422433] [<ffffffffa034821f>] intel_dsm_detect+0x16f/0x1f0 [i915] [ 164.429801] [<ffffffffa03482ae>] intel_register_dsm_handler+0xe/0x10 [i915] [ 164.437831] [<ffffffffa02d30fe>] i915_driver_load+0xafe/0xf30 [i915] [ 164.445126] [<ffffffff8158a150>] ? intel_alloc_coherent+0x110/0x110 [ 164.452340] [<ffffffffa0148c07>] drm_dev_register+0xc7/0x150 [drm] [ 164.459462] [<ffffffffa014b23f>] drm_get_pci_dev+0x11f/0x1f0 [drm] [ 164.466554] [<ffffffff816abb81>] ? _raw_spin_unlock_irqrestore+0x51/0x70 [ 164.474287] [<ffffffffa02cf7a6>] i915_pci_probe+0x56/0x60 [i915] [ 164.481185] [<ffffffff8130a028>] pci_device_probe+0x78/0xf0 [ 164.487603] [<ffffffff813cd495>] driver_probe_device+0x155/0x350 [ 164.494505] [<ffffffff813cd74e>] __driver_attach+0x6e/0xa0 [ 164.500826] [<ffffffff813cd6e0>] ? __device_attach+0x50/0x50 [ 164.507333] [<ffffffff813cb2be>] bus_for_each_dev+0x6e/0xc0 [ 164.513752] [<ffffffff813ccefe>] driver_attach+0x1e/0x20 [ 164.519870] [<ffffffff813cc958>] bus_add_driver+0x138/0x260 [ 164.526289] [<ffffffffa0188000>] ? 0xffffffffa0187fff [ 164.532116] [<ffffffff813cde78>] driver_register+0x98/0xe0 [ 164.538558] [<ffffffffa0188000>] ? 0xffffffffa0187fff [ 164.544389] [<ffffffff813087b0>] __pci_register_driver+0x60/0x70 [ 164.551336] [<ffffffffa014b37d>] drm_pci_init+0x6d/0x120 [drm] [ 164.558040] [<ffffffffa0188000>] ? 0xffffffffa0187fff [ 164.563928] [<ffffffffa018806a>] i915_init+0x6a/0x6c [i915] [ 164.570363] [<ffffffff810002da>] do_one_initcall+0xaa/0x160 [ 164.576783] [<ffffffff8103b140>] ? set_memory_nx+0x40/0x50 [ 164.583100] [<ffffffff810ce7f5>] load_module+0x1fb5/0x2550 [ 164.589410] [<ffffffff810caab0>] ? store_uevent+0x40/0x40 [ 164.595628] [<ffffffff810cee7d>] SyS_init_module+0xed/0x100 [ 164.602048] [<ffffffff816b3c52>] system_call_fastpath+0x16/0x1b v2: simplify the loop further (Chris) Signed-off-by: Imre Deak <imre.deak@intel.com> Cc: Jesse Barnes <jbarnes@virtuousgeek.org> Reported-by: Jesse Barnes <jbarnes@virtuousgeek.org> Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=65652 Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=74161 Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: stable@vger.kernel.org Signed-off-by: Jani Nikula <jani.nikula@intel.com>
2014-02-14 22:23:54 +04:00
struct pci_dev *pch = NULL;
/* In all current cases, num_pipes is equivalent to the PCH_NOP setting
* (which really amounts to a PCH but no South Display).
*/
if (INTEL_INFO(dev)->num_pipes == 0) {
dev_priv->pch_type = PCH_NOP;
return;
}
/*
* The reason to probe ISA bridge instead of Dev31:Fun0 is to
* make graphics device passthrough work easy for VMM, that only
* need to expose ISA bridge to let driver know the real hardware
* underneath. This is a requirement from virtualization team.
*
* In some virtualized environments (e.g. XEN), there is irrelevant
* ISA bridge in the system. To work reliably, we should scan trhough
* all the ISA bridge devices and check for the first match, instead
* of only checking the first one.
*/
drm/i915: fix pch pci device enumeration pci_get_class(class, from) drops the refcount for 'from', so the extra pci_dev_put we do on it will result in a use after free bug starting with the WARN below. Regression introduced in commit 6a9c4b35e6696a63805b6da5e4889c6986e9ee1b Author: Rui Guo <firemeteor@users.sourceforge.net> Date: Wed Jun 19 21:10:23 2013 +0800 drm/i915: Fix PCH detect with multiple ISA bridges in VM [ 164.338460] WARNING: CPU: 1 PID: 2094 at include/linux/kref.h:47 klist_next+0xae/0x110() [ 164.347731] CPU: 1 PID: 2094 Comm: modprobe Tainted: G O 3.13.0-imre+ #354 [ 164.356468] Hardware name: Intel Corp. VALLEYVIEW B0 PLATFORM/NOTEBOOK, BIOS BYTICRB1.X64.0062.R70.1310112051 10/11/2013 [ 164.368796] Call Trace: [ 164.371609] [<ffffffff816a32a6>] dump_stack+0x4e/0x7a [ 164.377447] [<ffffffff8104f75d>] warn_slowpath_common+0x7d/0xa0 [ 164.384238] [<ffffffff8104f83a>] warn_slowpath_null+0x1a/0x20 [ 164.390851] [<ffffffff8169aeae>] klist_next+0xae/0x110 [ 164.396777] [<ffffffff8130a110>] ? pci_do_find_bus+0x70/0x70 [ 164.403286] [<ffffffff813cb4a9>] bus_find_device+0x89/0xc0 [ 164.409719] [<ffffffff8130a373>] pci_get_dev_by_id+0x63/0xa0 [ 164.416238] [<ffffffff8130a4e4>] pci_get_class+0x44/0x50 [ 164.422433] [<ffffffffa034821f>] intel_dsm_detect+0x16f/0x1f0 [i915] [ 164.429801] [<ffffffffa03482ae>] intel_register_dsm_handler+0xe/0x10 [i915] [ 164.437831] [<ffffffffa02d30fe>] i915_driver_load+0xafe/0xf30 [i915] [ 164.445126] [<ffffffff8158a150>] ? intel_alloc_coherent+0x110/0x110 [ 164.452340] [<ffffffffa0148c07>] drm_dev_register+0xc7/0x150 [drm] [ 164.459462] [<ffffffffa014b23f>] drm_get_pci_dev+0x11f/0x1f0 [drm] [ 164.466554] [<ffffffff816abb81>] ? _raw_spin_unlock_irqrestore+0x51/0x70 [ 164.474287] [<ffffffffa02cf7a6>] i915_pci_probe+0x56/0x60 [i915] [ 164.481185] [<ffffffff8130a028>] pci_device_probe+0x78/0xf0 [ 164.487603] [<ffffffff813cd495>] driver_probe_device+0x155/0x350 [ 164.494505] [<ffffffff813cd74e>] __driver_attach+0x6e/0xa0 [ 164.500826] [<ffffffff813cd6e0>] ? __device_attach+0x50/0x50 [ 164.507333] [<ffffffff813cb2be>] bus_for_each_dev+0x6e/0xc0 [ 164.513752] [<ffffffff813ccefe>] driver_attach+0x1e/0x20 [ 164.519870] [<ffffffff813cc958>] bus_add_driver+0x138/0x260 [ 164.526289] [<ffffffffa0188000>] ? 0xffffffffa0187fff [ 164.532116] [<ffffffff813cde78>] driver_register+0x98/0xe0 [ 164.538558] [<ffffffffa0188000>] ? 0xffffffffa0187fff [ 164.544389] [<ffffffff813087b0>] __pci_register_driver+0x60/0x70 [ 164.551336] [<ffffffffa014b37d>] drm_pci_init+0x6d/0x120 [drm] [ 164.558040] [<ffffffffa0188000>] ? 0xffffffffa0187fff [ 164.563928] [<ffffffffa018806a>] i915_init+0x6a/0x6c [i915] [ 164.570363] [<ffffffff810002da>] do_one_initcall+0xaa/0x160 [ 164.576783] [<ffffffff8103b140>] ? set_memory_nx+0x40/0x50 [ 164.583100] [<ffffffff810ce7f5>] load_module+0x1fb5/0x2550 [ 164.589410] [<ffffffff810caab0>] ? store_uevent+0x40/0x40 [ 164.595628] [<ffffffff810cee7d>] SyS_init_module+0xed/0x100 [ 164.602048] [<ffffffff816b3c52>] system_call_fastpath+0x16/0x1b v2: simplify the loop further (Chris) Signed-off-by: Imre Deak <imre.deak@intel.com> Cc: Jesse Barnes <jbarnes@virtuousgeek.org> Reported-by: Jesse Barnes <jbarnes@virtuousgeek.org> Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=65652 Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=74161 Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: stable@vger.kernel.org Signed-off-by: Jani Nikula <jani.nikula@intel.com>
2014-02-14 22:23:54 +04:00
while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
if (pch->vendor == PCI_VENDOR_ID_INTEL) {
drm/i915: fix pch pci device enumeration pci_get_class(class, from) drops the refcount for 'from', so the extra pci_dev_put we do on it will result in a use after free bug starting with the WARN below. Regression introduced in commit 6a9c4b35e6696a63805b6da5e4889c6986e9ee1b Author: Rui Guo <firemeteor@users.sourceforge.net> Date: Wed Jun 19 21:10:23 2013 +0800 drm/i915: Fix PCH detect with multiple ISA bridges in VM [ 164.338460] WARNING: CPU: 1 PID: 2094 at include/linux/kref.h:47 klist_next+0xae/0x110() [ 164.347731] CPU: 1 PID: 2094 Comm: modprobe Tainted: G O 3.13.0-imre+ #354 [ 164.356468] Hardware name: Intel Corp. VALLEYVIEW B0 PLATFORM/NOTEBOOK, BIOS BYTICRB1.X64.0062.R70.1310112051 10/11/2013 [ 164.368796] Call Trace: [ 164.371609] [<ffffffff816a32a6>] dump_stack+0x4e/0x7a [ 164.377447] [<ffffffff8104f75d>] warn_slowpath_common+0x7d/0xa0 [ 164.384238] [<ffffffff8104f83a>] warn_slowpath_null+0x1a/0x20 [ 164.390851] [<ffffffff8169aeae>] klist_next+0xae/0x110 [ 164.396777] [<ffffffff8130a110>] ? pci_do_find_bus+0x70/0x70 [ 164.403286] [<ffffffff813cb4a9>] bus_find_device+0x89/0xc0 [ 164.409719] [<ffffffff8130a373>] pci_get_dev_by_id+0x63/0xa0 [ 164.416238] [<ffffffff8130a4e4>] pci_get_class+0x44/0x50 [ 164.422433] [<ffffffffa034821f>] intel_dsm_detect+0x16f/0x1f0 [i915] [ 164.429801] [<ffffffffa03482ae>] intel_register_dsm_handler+0xe/0x10 [i915] [ 164.437831] [<ffffffffa02d30fe>] i915_driver_load+0xafe/0xf30 [i915] [ 164.445126] [<ffffffff8158a150>] ? intel_alloc_coherent+0x110/0x110 [ 164.452340] [<ffffffffa0148c07>] drm_dev_register+0xc7/0x150 [drm] [ 164.459462] [<ffffffffa014b23f>] drm_get_pci_dev+0x11f/0x1f0 [drm] [ 164.466554] [<ffffffff816abb81>] ? _raw_spin_unlock_irqrestore+0x51/0x70 [ 164.474287] [<ffffffffa02cf7a6>] i915_pci_probe+0x56/0x60 [i915] [ 164.481185] [<ffffffff8130a028>] pci_device_probe+0x78/0xf0 [ 164.487603] [<ffffffff813cd495>] driver_probe_device+0x155/0x350 [ 164.494505] [<ffffffff813cd74e>] __driver_attach+0x6e/0xa0 [ 164.500826] [<ffffffff813cd6e0>] ? __device_attach+0x50/0x50 [ 164.507333] [<ffffffff813cb2be>] bus_for_each_dev+0x6e/0xc0 [ 164.513752] [<ffffffff813ccefe>] driver_attach+0x1e/0x20 [ 164.519870] [<ffffffff813cc958>] bus_add_driver+0x138/0x260 [ 164.526289] [<ffffffffa0188000>] ? 0xffffffffa0187fff [ 164.532116] [<ffffffff813cde78>] driver_register+0x98/0xe0 [ 164.538558] [<ffffffffa0188000>] ? 0xffffffffa0187fff [ 164.544389] [<ffffffff813087b0>] __pci_register_driver+0x60/0x70 [ 164.551336] [<ffffffffa014b37d>] drm_pci_init+0x6d/0x120 [drm] [ 164.558040] [<ffffffffa0188000>] ? 0xffffffffa0187fff [ 164.563928] [<ffffffffa018806a>] i915_init+0x6a/0x6c [i915] [ 164.570363] [<ffffffff810002da>] do_one_initcall+0xaa/0x160 [ 164.576783] [<ffffffff8103b140>] ? set_memory_nx+0x40/0x50 [ 164.583100] [<ffffffff810ce7f5>] load_module+0x1fb5/0x2550 [ 164.589410] [<ffffffff810caab0>] ? store_uevent+0x40/0x40 [ 164.595628] [<ffffffff810cee7d>] SyS_init_module+0xed/0x100 [ 164.602048] [<ffffffff816b3c52>] system_call_fastpath+0x16/0x1b v2: simplify the loop further (Chris) Signed-off-by: Imre Deak <imre.deak@intel.com> Cc: Jesse Barnes <jbarnes@virtuousgeek.org> Reported-by: Jesse Barnes <jbarnes@virtuousgeek.org> Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=65652 Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=74161 Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: stable@vger.kernel.org Signed-off-by: Jani Nikula <jani.nikula@intel.com>
2014-02-14 22:23:54 +04:00
unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
dev_priv->pch_id = id;
if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_IBX;
DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
WARN_ON(!IS_GEN5(dev));
} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_CPT;
DRM_DEBUG_KMS("Found CougarPoint PCH\n");
WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
/* PantherPoint is CPT compatible */
dev_priv->pch_type = PCH_CPT;
DRM_DEBUG_KMS("Found PantherPoint PCH\n");
WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_LPT;
DRM_DEBUG_KMS("Found LynxPoint PCH\n");
WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
WARN_ON(IS_HSW_ULT(dev) || IS_BDW_ULT(dev));
} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_LPT;
DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
WARN_ON(!IS_HSW_ULT(dev) && !IS_BDW_ULT(dev));
} else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_SPT;
DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
WARN_ON(!IS_SKYLAKE(dev) &&
!IS_KABYLAKE(dev));
} else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_SPT;
DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
WARN_ON(!IS_SKYLAKE(dev) &&
!IS_KABYLAKE(dev));
} else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
(id == INTEL_PCH_P3X_DEVICE_ID_TYPE) ||
((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
pch->subsystem_vendor == 0x1af4 &&
pch->subsystem_device == 0x1100)) {
dev_priv->pch_type = intel_virt_detect_pch(dev);
drm/i915: fix pch pci device enumeration pci_get_class(class, from) drops the refcount for 'from', so the extra pci_dev_put we do on it will result in a use after free bug starting with the WARN below. Regression introduced in commit 6a9c4b35e6696a63805b6da5e4889c6986e9ee1b Author: Rui Guo <firemeteor@users.sourceforge.net> Date: Wed Jun 19 21:10:23 2013 +0800 drm/i915: Fix PCH detect with multiple ISA bridges in VM [ 164.338460] WARNING: CPU: 1 PID: 2094 at include/linux/kref.h:47 klist_next+0xae/0x110() [ 164.347731] CPU: 1 PID: 2094 Comm: modprobe Tainted: G O 3.13.0-imre+ #354 [ 164.356468] Hardware name: Intel Corp. VALLEYVIEW B0 PLATFORM/NOTEBOOK, BIOS BYTICRB1.X64.0062.R70.1310112051 10/11/2013 [ 164.368796] Call Trace: [ 164.371609] [<ffffffff816a32a6>] dump_stack+0x4e/0x7a [ 164.377447] [<ffffffff8104f75d>] warn_slowpath_common+0x7d/0xa0 [ 164.384238] [<ffffffff8104f83a>] warn_slowpath_null+0x1a/0x20 [ 164.390851] [<ffffffff8169aeae>] klist_next+0xae/0x110 [ 164.396777] [<ffffffff8130a110>] ? pci_do_find_bus+0x70/0x70 [ 164.403286] [<ffffffff813cb4a9>] bus_find_device+0x89/0xc0 [ 164.409719] [<ffffffff8130a373>] pci_get_dev_by_id+0x63/0xa0 [ 164.416238] [<ffffffff8130a4e4>] pci_get_class+0x44/0x50 [ 164.422433] [<ffffffffa034821f>] intel_dsm_detect+0x16f/0x1f0 [i915] [ 164.429801] [<ffffffffa03482ae>] intel_register_dsm_handler+0xe/0x10 [i915] [ 164.437831] [<ffffffffa02d30fe>] i915_driver_load+0xafe/0xf30 [i915] [ 164.445126] [<ffffffff8158a150>] ? intel_alloc_coherent+0x110/0x110 [ 164.452340] [<ffffffffa0148c07>] drm_dev_register+0xc7/0x150 [drm] [ 164.459462] [<ffffffffa014b23f>] drm_get_pci_dev+0x11f/0x1f0 [drm] [ 164.466554] [<ffffffff816abb81>] ? _raw_spin_unlock_irqrestore+0x51/0x70 [ 164.474287] [<ffffffffa02cf7a6>] i915_pci_probe+0x56/0x60 [i915] [ 164.481185] [<ffffffff8130a028>] pci_device_probe+0x78/0xf0 [ 164.487603] [<ffffffff813cd495>] driver_probe_device+0x155/0x350 [ 164.494505] [<ffffffff813cd74e>] __driver_attach+0x6e/0xa0 [ 164.500826] [<ffffffff813cd6e0>] ? __device_attach+0x50/0x50 [ 164.507333] [<ffffffff813cb2be>] bus_for_each_dev+0x6e/0xc0 [ 164.513752] [<ffffffff813ccefe>] driver_attach+0x1e/0x20 [ 164.519870] [<ffffffff813cc958>] bus_add_driver+0x138/0x260 [ 164.526289] [<ffffffffa0188000>] ? 0xffffffffa0187fff [ 164.532116] [<ffffffff813cde78>] driver_register+0x98/0xe0 [ 164.538558] [<ffffffffa0188000>] ? 0xffffffffa0187fff [ 164.544389] [<ffffffff813087b0>] __pci_register_driver+0x60/0x70 [ 164.551336] [<ffffffffa014b37d>] drm_pci_init+0x6d/0x120 [drm] [ 164.558040] [<ffffffffa0188000>] ? 0xffffffffa0187fff [ 164.563928] [<ffffffffa018806a>] i915_init+0x6a/0x6c [i915] [ 164.570363] [<ffffffff810002da>] do_one_initcall+0xaa/0x160 [ 164.576783] [<ffffffff8103b140>] ? set_memory_nx+0x40/0x50 [ 164.583100] [<ffffffff810ce7f5>] load_module+0x1fb5/0x2550 [ 164.589410] [<ffffffff810caab0>] ? store_uevent+0x40/0x40 [ 164.595628] [<ffffffff810cee7d>] SyS_init_module+0xed/0x100 [ 164.602048] [<ffffffff816b3c52>] system_call_fastpath+0x16/0x1b v2: simplify the loop further (Chris) Signed-off-by: Imre Deak <imre.deak@intel.com> Cc: Jesse Barnes <jbarnes@virtuousgeek.org> Reported-by: Jesse Barnes <jbarnes@virtuousgeek.org> Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=65652 Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=74161 Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: stable@vger.kernel.org Signed-off-by: Jani Nikula <jani.nikula@intel.com>
2014-02-14 22:23:54 +04:00
} else
continue;
break;
}
}
if (!pch)
drm/i915: fix pch pci device enumeration pci_get_class(class, from) drops the refcount for 'from', so the extra pci_dev_put we do on it will result in a use after free bug starting with the WARN below. Regression introduced in commit 6a9c4b35e6696a63805b6da5e4889c6986e9ee1b Author: Rui Guo <firemeteor@users.sourceforge.net> Date: Wed Jun 19 21:10:23 2013 +0800 drm/i915: Fix PCH detect with multiple ISA bridges in VM [ 164.338460] WARNING: CPU: 1 PID: 2094 at include/linux/kref.h:47 klist_next+0xae/0x110() [ 164.347731] CPU: 1 PID: 2094 Comm: modprobe Tainted: G O 3.13.0-imre+ #354 [ 164.356468] Hardware name: Intel Corp. VALLEYVIEW B0 PLATFORM/NOTEBOOK, BIOS BYTICRB1.X64.0062.R70.1310112051 10/11/2013 [ 164.368796] Call Trace: [ 164.371609] [<ffffffff816a32a6>] dump_stack+0x4e/0x7a [ 164.377447] [<ffffffff8104f75d>] warn_slowpath_common+0x7d/0xa0 [ 164.384238] [<ffffffff8104f83a>] warn_slowpath_null+0x1a/0x20 [ 164.390851] [<ffffffff8169aeae>] klist_next+0xae/0x110 [ 164.396777] [<ffffffff8130a110>] ? pci_do_find_bus+0x70/0x70 [ 164.403286] [<ffffffff813cb4a9>] bus_find_device+0x89/0xc0 [ 164.409719] [<ffffffff8130a373>] pci_get_dev_by_id+0x63/0xa0 [ 164.416238] [<ffffffff8130a4e4>] pci_get_class+0x44/0x50 [ 164.422433] [<ffffffffa034821f>] intel_dsm_detect+0x16f/0x1f0 [i915] [ 164.429801] [<ffffffffa03482ae>] intel_register_dsm_handler+0xe/0x10 [i915] [ 164.437831] [<ffffffffa02d30fe>] i915_driver_load+0xafe/0xf30 [i915] [ 164.445126] [<ffffffff8158a150>] ? intel_alloc_coherent+0x110/0x110 [ 164.452340] [<ffffffffa0148c07>] drm_dev_register+0xc7/0x150 [drm] [ 164.459462] [<ffffffffa014b23f>] drm_get_pci_dev+0x11f/0x1f0 [drm] [ 164.466554] [<ffffffff816abb81>] ? _raw_spin_unlock_irqrestore+0x51/0x70 [ 164.474287] [<ffffffffa02cf7a6>] i915_pci_probe+0x56/0x60 [i915] [ 164.481185] [<ffffffff8130a028>] pci_device_probe+0x78/0xf0 [ 164.487603] [<ffffffff813cd495>] driver_probe_device+0x155/0x350 [ 164.494505] [<ffffffff813cd74e>] __driver_attach+0x6e/0xa0 [ 164.500826] [<ffffffff813cd6e0>] ? __device_attach+0x50/0x50 [ 164.507333] [<ffffffff813cb2be>] bus_for_each_dev+0x6e/0xc0 [ 164.513752] [<ffffffff813ccefe>] driver_attach+0x1e/0x20 [ 164.519870] [<ffffffff813cc958>] bus_add_driver+0x138/0x260 [ 164.526289] [<ffffffffa0188000>] ? 0xffffffffa0187fff [ 164.532116] [<ffffffff813cde78>] driver_register+0x98/0xe0 [ 164.538558] [<ffffffffa0188000>] ? 0xffffffffa0187fff [ 164.544389] [<ffffffff813087b0>] __pci_register_driver+0x60/0x70 [ 164.551336] [<ffffffffa014b37d>] drm_pci_init+0x6d/0x120 [drm] [ 164.558040] [<ffffffffa0188000>] ? 0xffffffffa0187fff [ 164.563928] [<ffffffffa018806a>] i915_init+0x6a/0x6c [i915] [ 164.570363] [<ffffffff810002da>] do_one_initcall+0xaa/0x160 [ 164.576783] [<ffffffff8103b140>] ? set_memory_nx+0x40/0x50 [ 164.583100] [<ffffffff810ce7f5>] load_module+0x1fb5/0x2550 [ 164.589410] [<ffffffff810caab0>] ? store_uevent+0x40/0x40 [ 164.595628] [<ffffffff810cee7d>] SyS_init_module+0xed/0x100 [ 164.602048] [<ffffffff816b3c52>] system_call_fastpath+0x16/0x1b v2: simplify the loop further (Chris) Signed-off-by: Imre Deak <imre.deak@intel.com> Cc: Jesse Barnes <jbarnes@virtuousgeek.org> Reported-by: Jesse Barnes <jbarnes@virtuousgeek.org> Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=65652 Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=74161 Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: stable@vger.kernel.org Signed-off-by: Jani Nikula <jani.nikula@intel.com>
2014-02-14 22:23:54 +04:00
DRM_DEBUG_KMS("No PCH found.\n");
pci_dev_put(pch);
}
bool i915_semaphore_is_enabled(struct drm_device *dev)
{
if (INTEL_INFO(dev)->gen < 6)
return false;
if (i915.semaphores >= 0)
return i915.semaphores;
/* TODO: make semaphores and Execlists play nicely together */
if (i915.enable_execlists)
return false;
/* Until we get further testing... */
if (IS_GEN8(dev))
return false;
#ifdef CONFIG_INTEL_IOMMU
/* Enable semaphores on SNB when IO remapping is off */
if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
return false;
#endif
return true;
}
static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
struct intel_encoder *encoder;
drm_modeset_lock_all(dev);
for_each_intel_encoder(dev, encoder)
if (encoder->suspend)
encoder->suspend(encoder);
drm_modeset_unlock_all(dev);
}
static int intel_suspend_complete(struct drm_i915_private *dev_priv);
static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
bool rpm_resume);
static int bxt_resume_prepare(struct drm_i915_private *dev_priv);
drm/i915/skl: Add DC6 Trigger sequence. Add triggers for DC6 as per details provided in skl_enable_dc6 and skl_disable_dc6 implementations. Also Call POSTING_READ for every write to a register to ensure it is written to immediately v1: Remove POSTING_READ and intel_prepare_ddi calls as they've been added in previous patches. v2: 1] Remove check for backlight disabled as it should be the case by that time. 2] Mark DC5 as disabled when enabling DC6. 3] Return from DC5-disabling function early if DC5 is already be disabled which can happen due to DC6-enabling earlier. 3] Ensure CSR firmware is loaded after resume from DC6 as corresponding memory contents won't be retained after runtime-suspend. 4] Ensure that CSR isn't identified as loaded before CSR-loading program is called during runtime-resume. v3: Rebase to latest Modified as per review comments from Imre and after discussion with Art: 1] DC6 should be preferably enabled when PG2 is disabled by SW as the check for PG1 being disabled is taken of by HW to enter DC6, and disabled when PG2 is enabled respectively. This helps save more power, especially in the case when display is disabled but GT is enabled. Accordingly, replacing DC5 trigger sequence with DC6 for SKL. 2] DC6 could be enabled from intel_runtime_suspend() function, if DC5 is already enabled. 3] Move CSR-load-status setting code from intel_runtime_suspend function to a new function. v4: 1] Enable/disable DC6 only when toggling the power-well using a newly defined macro ENABLE_DC6. v5: 1] Load CSR on system resume too as firmware may be lost on system suspend preventing enabling DC5, DC6. 2] DDI buffers shouldn't be programmed during driver-load/resume as it's already done during modeset initialization then and also that the encoder list is still uninitialized by then. Therefore, call intel_prepare_ddi function right after disabling DC6 but outside skl_disable_dc6 function and not during driver-load/resume. v6: 1] Rebase to latest. 2] Move SKL_ENABLE_DC6 macro definition from intel_display.c to intel_runtime_pm.c. v7: 1) Refactored the code for removing the warning got from checkpatch. 2) After adding dmc ver 1.0 support rebased on top of nightly. (Animesh) v8: - Reverted the changes done in v7. - Removed the condition check in skl_prepare_resune(). (Animesh) Issue: VIZ-2819 Signed-off-by: A.Sunil Kamath <sunil.kamath@intel.com> Signed-off-by: Suketu Shah <suketu.j.shah@intel.com> Signed-off-by: Damien Lespiau <damien.lespiau@intel.com> Signed-off-by: Animesh Manna <animesh.manna@intel.com> Reviewed-by: Imre Deak <imre.deak@intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2015-04-16 11:52:11 +03:00
static bool suspend_to_idle(struct drm_i915_private *dev_priv)
{
#if IS_ENABLED(CONFIG_ACPI_SLEEP)
if (acpi_target_system_state() < ACPI_STATE_S3)
return true;
#endif
return false;
}
static int i915_drm_suspend(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
pci_power_t opregion_target_state;
int error;
i915: ignore lid open event when resuming i915 driver needs to do modeset when 1. system resumes from sleep 2. lid is opened In PM_SUSPEND_MEM state, all the GPEs are cleared when system resumes, thus it is the i915_resume code does the modeset rather than intel_lid_notify(). But in PM_SUSPEND_FREEZE state, this will be broken because system is still responsive to the lid events. 1. When we close the lid in Freeze state, intel_lid_notify() sets modeset_on_lid. 2. When we reopen the lid, intel_lid_notify() will do a modeset, before the system is resumed. here is the error log, [92146.548074] WARNING: at drivers/gpu/drm/i915/intel_display.c:1028 intel_wait_for_pipe_off+0x184/0x190 [i915]() [92146.548076] Hardware name: VGN-Z540N [92146.548078] pipe_off wait timed out [92146.548167] Modules linked in: hid_generic usbhid hid snd_hda_codec_realtek snd_hda_intel snd_hda_codec parport_pc snd_hwdep ppdev snd_pcm_oss i915 snd_mixer_oss snd_pcm arc4 iwldvm snd_seq_dummy mac80211 snd_seq_oss snd_seq_midi fbcon tileblit font bitblit softcursor drm_kms_helper snd_rawmidi snd_seq_midi_event coretemp drm snd_seq kvm btusb bluetooth snd_timer iwlwifi pcmcia tpm_infineon i2c_algo_bit joydev snd_seq_device intel_agp cfg80211 snd intel_gtt yenta_socket pcmcia_rsrc sony_laptop agpgart microcode psmouse tpm_tis serio_raw mxm_wmi soundcore snd_page_alloc tpm acpi_cpufreq lpc_ich pcmcia_core tpm_bios mperf processor lp parport firewire_ohci firewire_core crc_itu_t sdhci_pci sdhci thermal e1000e [92146.548173] Pid: 4304, comm: kworker/0:0 Tainted: G W 3.8.0-rc3-s0i3-v3-test+ #9 [92146.548175] Call Trace: [92146.548189] [<c10378e2>] warn_slowpath_common+0x72/0xa0 [92146.548227] [<f86398b4>] ? intel_wait_for_pipe_off+0x184/0x190 [i915] [92146.548263] [<f86398b4>] ? intel_wait_for_pipe_off+0x184/0x190 [i915] [92146.548270] [<c10379b3>] warn_slowpath_fmt+0x33/0x40 [92146.548307] [<f86398b4>] intel_wait_for_pipe_off+0x184/0x190 [i915] [92146.548344] [<f86399c2>] intel_disable_pipe+0x102/0x190 [i915] [92146.548380] [<f8639ea4>] ? intel_disable_plane+0x64/0x80 [i915] [92146.548417] [<f8639f7c>] i9xx_crtc_disable+0xbc/0x150 [i915] [92146.548456] [<f863ebee>] intel_crtc_update_dpms+0x5e/0x90 [i915] [92146.548493] [<f86437cf>] intel_modeset_setup_hw_state+0x42f/0x8f0 [i915] [92146.548535] [<f8645b0b>] intel_lid_notify+0x9b/0xc0 [i915] [92146.548543] [<c15610d3>] notifier_call_chain+0x43/0x60 [92146.548550] [<c105d1e1>] __blocking_notifier_call_chain+0x41/0x80 [92146.548556] [<c105d23f>] blocking_notifier_call_chain+0x1f/0x30 [92146.548563] [<c131a684>] acpi_lid_send_state+0x78/0xa4 [92146.548569] [<c131aa9e>] acpi_button_notify+0x3b/0xf1 [92146.548577] [<c12df56a>] ? acpi_os_execute+0x17/0x19 [92146.548582] [<c12e591a>] ? acpi_ec_sync_query+0xa5/0xbc [92146.548589] [<c12e2b82>] acpi_device_notify+0x16/0x18 [92146.548595] [<c12f4904>] acpi_ev_notify_dispatch+0x38/0x4f [92146.548600] [<c12df0e8>] acpi_os_execute_deferred+0x20/0x2b [92146.548607] [<c1051208>] process_one_work+0x128/0x3f0 [92146.548613] [<c1564f73>] ? common_interrupt+0x33/0x38 [92146.548618] [<c104f8c0>] ? wake_up_worker+0x30/0x30 [92146.548624] [<c12df0c8>] ? acpi_os_wait_events_complete+0x1e/0x1e [92146.548629] [<c10524f9>] worker_thread+0x119/0x3b0 [92146.548634] [<c10523e0>] ? manage_workers+0x240/0x240 [92146.548640] [<c1056e84>] kthread+0x94/0xa0 [92146.548647] [<c1060000>] ? ftrace_raw_output_sched_stat_runtime+0x70/0xf0 [92146.548652] [<c15649b7>] ret_from_kernel_thread+0x1b/0x28 [92146.548658] [<c1056df0>] ? kthread_create_on_node+0xc0/0xc0 three different modeset flags are introduced in this patch MODESET_ON_LID_OPEN: do modeset on next lid open event MODESET_DONE: modeset already done MODESET_SUSPENDED: suspended, only do modeset when system is resumed In this way, 1. when lid is closed, MODESET_ON_LID_OPEN is set so that we'll do modeset on next lid open event. 2. when lid is opened, MODESET_DONE is set so that duplicate lid open events will be ignored. 3. when system suspends, MODESET_SUSPENDED is set. In this case, we will not do modeset on any lid events. Plus, locking mechanism is also introduced to avoid racing. Signed-off-by: Zhang Rui <rui.zhang@intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-02-05 11:41:53 +04:00
/* ignore lid events during suspend */
mutex_lock(&dev_priv->modeset_restore_lock);
dev_priv->modeset_restore = MODESET_SUSPENDED;
mutex_unlock(&dev_priv->modeset_restore_lock);
drm/i915: add support for checking if we hold an RPM reference Atm, we assert that the device is not suspended until the point when the device is truly put to a suspended state. This is fine, but we can catch more problems if we check that RPM refcount is non-zero. After that one drops to zero we shouldn't access the device any more, even if the actual device suspend may be delayed. Change assert_rpm_wakelock_held() accordingly to check for a non-zero RPM refcount in addition to the current device-not-suspended check. For the new asserts to work we need to annotate every place explicitly in the code where we expect that the device is powered. The places where we only assume this, but may not hold an RPM reference: - driver load We assume the device to be powered until we enable RPM. Make this explicit by taking an RPM reference around the load function. - system and runtime sudpend/resume handlers These handlers are called when the RPM reference becomes 0 and know the exact point after which the device can get powered off. Disable the RPM-reference-held check for their duration. - the IRQ, hangcheck and RPS work handlers These handlers are flushed in the system/runtime suspend handler before the device is powered off, so it's guaranteed that they won't run while the device is powered off even though they don't hold any RPM reference. Disable the RPM-reference-held check for their duration. In all these cases we still check that the device is not suspended. These explicit annotations also have the positive side effect of documenting our assumptions better. This caught additional WARNs from the atomic modeset path, those should be fixed separately. v2: - remove the redundant HAS_RUNTIME_PM check (moved to patch 1) (Ville) v3: - use a new dedicated RPM wakelock refcount to also catch cases where our own RPM get/put functions were not called (Chris) - assert also that the new RPM wakelock refcount is 0 in the RPM suspend handler (Chris) - change the assert error message to be more meaningful (Chris) - prevent false assert errors and check that the RPM wakelock is 0 in the RPM resume handler too - prevent false assert errors in the hangcheck work too - add a device not suspended assert check to the hangcheck work v4: - rename disable/enable_rpm_asserts to disable/enable_rpm_wakeref_asserts and wakelock_count to wakeref_count - disable the wakeref asserts in the IRQ handlers and RPS work too - update/clarify commit message v5: - mark places we plan to change to use proper RPM refcounting with separate DISABLE/ENABLE_RPM_WAKEREF_ASSERTS aliases (Chris) Signed-off-by: Imre Deak <imre.deak@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Link: http://patchwork.freedesktop.org/patch/msgid/1450227139-13471-1-git-send-email-imre.deak@intel.com
2015-12-16 03:52:19 +03:00
disable_rpm_wakeref_asserts(dev_priv);
drm/i915: allow package C8+ states on Haswell (disabled) This patch allows PC8+ states on Haswell. These states can only be reached when all the display outputs are disabled, and they allow some more power savings. The fact that the graphics device is allowing PC8+ doesn't mean that the machine will actually enter PC8+: all the other devices also need to allow PC8+. For now this option is disabled by default. You need i915.allow_pc8=1 if you want it. This patch adds a big comment inside i915_drv.h explaining how it works and how it tracks things. Read it. v2: (this is not really v2, many previous versions were already sent, but they had different names) - Use the new functions to enable/disable GTIMR and GEN6_PMIMR - Rename almost all variables and functions to names suggested by Chris - More WARNs on the IRQ handling code - Also disable PC8 when there's GPU work to do (thanks to Ben for the help on this), so apps can run caster - Enable PC8 on a delayed work function that is delayed for 5 seconds. This makes sure we only enable PC8+ if we're really idle - Make sure we're not in PC8+ when suspending v3: - WARN if IRQs are disabled on __wait_seqno - Replace some DRM_ERRORs with WARNs - Fix calls to restore GT and PM interrupts - Use intel_mark_busy instead of intel_ring_advance to disable PC8 v4: - Use the force_wake, Luke! v5: - Remove the "IIR is not zero" WARNs - Move the force_wake chunk to its own patch - Only restore what's missing from RC6, not everything Signed-off-by: Paulo Zanoni <paulo.r.zanoni@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-19 20:18:09 +04:00
/* We do a lot of poking in a lot of registers, make sure they work
* properly. */
intel_display_set_init_power(dev_priv, true);
drm_kms_helper_poll_disable(dev);
pci_save_state(dev->pdev);
error = i915_gem_suspend(dev);
if (error) {
dev_err(&dev->pdev->dev,
"GEM idle failed, resume might fail\n");
drm/i915: add support for checking if we hold an RPM reference Atm, we assert that the device is not suspended until the point when the device is truly put to a suspended state. This is fine, but we can catch more problems if we check that RPM refcount is non-zero. After that one drops to zero we shouldn't access the device any more, even if the actual device suspend may be delayed. Change assert_rpm_wakelock_held() accordingly to check for a non-zero RPM refcount in addition to the current device-not-suspended check. For the new asserts to work we need to annotate every place explicitly in the code where we expect that the device is powered. The places where we only assume this, but may not hold an RPM reference: - driver load We assume the device to be powered until we enable RPM. Make this explicit by taking an RPM reference around the load function. - system and runtime sudpend/resume handlers These handlers are called when the RPM reference becomes 0 and know the exact point after which the device can get powered off. Disable the RPM-reference-held check for their duration. - the IRQ, hangcheck and RPS work handlers These handlers are flushed in the system/runtime suspend handler before the device is powered off, so it's guaranteed that they won't run while the device is powered off even though they don't hold any RPM reference. Disable the RPM-reference-held check for their duration. In all these cases we still check that the device is not suspended. These explicit annotations also have the positive side effect of documenting our assumptions better. This caught additional WARNs from the atomic modeset path, those should be fixed separately. v2: - remove the redundant HAS_RUNTIME_PM check (moved to patch 1) (Ville) v3: - use a new dedicated RPM wakelock refcount to also catch cases where our own RPM get/put functions were not called (Chris) - assert also that the new RPM wakelock refcount is 0 in the RPM suspend handler (Chris) - change the assert error message to be more meaningful (Chris) - prevent false assert errors and check that the RPM wakelock is 0 in the RPM resume handler too - prevent false assert errors in the hangcheck work too - add a device not suspended assert check to the hangcheck work v4: - rename disable/enable_rpm_asserts to disable/enable_rpm_wakeref_asserts and wakelock_count to wakeref_count - disable the wakeref asserts in the IRQ handlers and RPS work too - update/clarify commit message v5: - mark places we plan to change to use proper RPM refcounting with separate DISABLE/ENABLE_RPM_WAKEREF_ASSERTS aliases (Chris) Signed-off-by: Imre Deak <imre.deak@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Link: http://patchwork.freedesktop.org/patch/msgid/1450227139-13471-1-git-send-email-imre.deak@intel.com
2015-12-16 03:52:19 +03:00
goto out;
}
intel_guc_suspend(dev);
intel_suspend_gt_powersave(dev);
intel_display_suspend(dev);
intel_dp_mst_suspend(dev);
intel_runtime_pm_disable_interrupts(dev_priv);
intel_hpd_cancel_work(dev_priv);
intel_suspend_encoders(dev_priv);
2014-05-02 08:02:48 +04:00
intel_suspend_hw(dev);
drm/i915: Disable GGTT PTEs on GEN6+ suspend Once the machine gets to a certain point in the suspend process, we expect the GPU to be idle. If it is not, we might corrupt memory. Empirically (with an early version of this patch) we have seen this is not the case. We cannot currently explain why the latent GPU writes occur. In the technical sense, this patch is a workaround in that we have an issue we can't explain, and the patch indirectly solves the issue. However, it's really better than a workaround because we understand why it works, and it really should be a safe thing to do in all cases. The noticeable effect other than the debug messages would be an increase in the suspend time. I have not measure how expensive it actually is. I think it would be good to spend further time to root cause why we're seeing these latent writes, but it shouldn't preclude preventing the fallout. NOTE: It should be safe (and makes some sense IMO) to also keep the VALID bit unset on resume when we clear_range(). I've opted not to do this as properly clearing those bits at some later point would be extra work. v2: Fix bugzilla link Bugzilla: http://bugs.freedesktop.org/show_bug.cgi?id=65496 Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=59321 Tested-by: Takashi Iwai <tiwai@suse.de> Tested-by: Paulo Zanoni <paulo.r.zanoni@intel.com> Signed-off-by: Ben Widawsky <ben@bwidawsk.net> Tested-By: Todd Previte <tprevite@gmail.com> Cc: stable@vger.kernel.org Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-10-16 20:21:30 +04:00
i915_gem_suspend_gtt_mappings(dev);
i915_save_state(dev);
opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
intel_opregion_notify_adapter(dev, opregion_target_state);
intel_uncore_forcewake_reset(dev, false);
intel_opregion_fini(dev);
intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
dev_priv->suspend_count++;
intel_display_set_init_power(dev_priv, false);
if (HAS_CSR(dev_priv))
flush_work(&dev_priv->csr.work);
drm/i915: add support for checking if we hold an RPM reference Atm, we assert that the device is not suspended until the point when the device is truly put to a suspended state. This is fine, but we can catch more problems if we check that RPM refcount is non-zero. After that one drops to zero we shouldn't access the device any more, even if the actual device suspend may be delayed. Change assert_rpm_wakelock_held() accordingly to check for a non-zero RPM refcount in addition to the current device-not-suspended check. For the new asserts to work we need to annotate every place explicitly in the code where we expect that the device is powered. The places where we only assume this, but may not hold an RPM reference: - driver load We assume the device to be powered until we enable RPM. Make this explicit by taking an RPM reference around the load function. - system and runtime sudpend/resume handlers These handlers are called when the RPM reference becomes 0 and know the exact point after which the device can get powered off. Disable the RPM-reference-held check for their duration. - the IRQ, hangcheck and RPS work handlers These handlers are flushed in the system/runtime suspend handler before the device is powered off, so it's guaranteed that they won't run while the device is powered off even though they don't hold any RPM reference. Disable the RPM-reference-held check for their duration. In all these cases we still check that the device is not suspended. These explicit annotations also have the positive side effect of documenting our assumptions better. This caught additional WARNs from the atomic modeset path, those should be fixed separately. v2: - remove the redundant HAS_RUNTIME_PM check (moved to patch 1) (Ville) v3: - use a new dedicated RPM wakelock refcount to also catch cases where our own RPM get/put functions were not called (Chris) - assert also that the new RPM wakelock refcount is 0 in the RPM suspend handler (Chris) - change the assert error message to be more meaningful (Chris) - prevent false assert errors and check that the RPM wakelock is 0 in the RPM resume handler too - prevent false assert errors in the hangcheck work too - add a device not suspended assert check to the hangcheck work v4: - rename disable/enable_rpm_asserts to disable/enable_rpm_wakeref_asserts and wakelock_count to wakeref_count - disable the wakeref asserts in the IRQ handlers and RPS work too - update/clarify commit message v5: - mark places we plan to change to use proper RPM refcounting with separate DISABLE/ENABLE_RPM_WAKEREF_ASSERTS aliases (Chris) Signed-off-by: Imre Deak <imre.deak@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Link: http://patchwork.freedesktop.org/patch/msgid/1450227139-13471-1-git-send-email-imre.deak@intel.com
2015-12-16 03:52:19 +03:00
out:
enable_rpm_wakeref_asserts(dev_priv);
return error;
}
2015-03-02 14:04:41 +03:00
static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
{
struct drm_i915_private *dev_priv = drm_dev->dev_private;
bool fw_csr;
int ret;
drm/i915: add support for checking if we hold an RPM reference Atm, we assert that the device is not suspended until the point when the device is truly put to a suspended state. This is fine, but we can catch more problems if we check that RPM refcount is non-zero. After that one drops to zero we shouldn't access the device any more, even if the actual device suspend may be delayed. Change assert_rpm_wakelock_held() accordingly to check for a non-zero RPM refcount in addition to the current device-not-suspended check. For the new asserts to work we need to annotate every place explicitly in the code where we expect that the device is powered. The places where we only assume this, but may not hold an RPM reference: - driver load We assume the device to be powered until we enable RPM. Make this explicit by taking an RPM reference around the load function. - system and runtime sudpend/resume handlers These handlers are called when the RPM reference becomes 0 and know the exact point after which the device can get powered off. Disable the RPM-reference-held check for their duration. - the IRQ, hangcheck and RPS work handlers These handlers are flushed in the system/runtime suspend handler before the device is powered off, so it's guaranteed that they won't run while the device is powered off even though they don't hold any RPM reference. Disable the RPM-reference-held check for their duration. In all these cases we still check that the device is not suspended. These explicit annotations also have the positive side effect of documenting our assumptions better. This caught additional WARNs from the atomic modeset path, those should be fixed separately. v2: - remove the redundant HAS_RUNTIME_PM check (moved to patch 1) (Ville) v3: - use a new dedicated RPM wakelock refcount to also catch cases where our own RPM get/put functions were not called (Chris) - assert also that the new RPM wakelock refcount is 0 in the RPM suspend handler (Chris) - change the assert error message to be more meaningful (Chris) - prevent false assert errors and check that the RPM wakelock is 0 in the RPM resume handler too - prevent false assert errors in the hangcheck work too - add a device not suspended assert check to the hangcheck work v4: - rename disable/enable_rpm_asserts to disable/enable_rpm_wakeref_asserts and wakelock_count to wakeref_count - disable the wakeref asserts in the IRQ handlers and RPS work too - update/clarify commit message v5: - mark places we plan to change to use proper RPM refcounting with separate DISABLE/ENABLE_RPM_WAKEREF_ASSERTS aliases (Chris) Signed-off-by: Imre Deak <imre.deak@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Link: http://patchwork.freedesktop.org/patch/msgid/1450227139-13471-1-git-send-email-imre.deak@intel.com
2015-12-16 03:52:19 +03:00
disable_rpm_wakeref_asserts(dev_priv);
fw_csr = !IS_BROXTON(dev_priv) &&
suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload;
/*
* In case of firmware assisted context save/restore don't manually
* deinit the power domains. This also means the CSR/DMC firmware will
* stay active, it will power down any HW resources as required and
* also enable deeper system power states that would be blocked if the
* firmware was inactive.
*/
if (!fw_csr)
intel_power_domains_suspend(dev_priv);
drm/i915/skl: init/uninit display core as part of the HW power domain state We need to initialize the display core part early, before initializing the rest of the display power state. This is also described in the bspec termed "Display initialization sequence". Atm we run this sequence during driver loading after power domain HW state initialization which is too late and during runtime suspend/resume which is unneeded and can interere with DMC functionality which handles HW resources toggled by this init/uninit sequence automatically. The init sequence must be run as the first step of HW power state initialization and during system resume. The uninit sequence must be run during system suspend. To address the above move the init sequence to the initial HW power state setup and the uninit sequence to a new power domains suspend function called during system suspend. As part of the init sequence we also have to reprogram the DMC firmware as it's lost across a system suspend/resume cycle. After this change CD clock initialization during driver loading will happen only later after other dependent HW/SW parts are initialized, while during system resume it will get initialized as the last step of the init sequence. This distinction can be removed by some refactoring of platform independent parts. I left this refactoring out from this series since I didn't want to change non-SKL parts. This is a TODO for later. v2: - fix error path in i915_drm_suspend_late() - don't try to re-program the DMC firmware if it failed to load Signed-off-by: Imre Deak <imre.deak@intel.com> Reviewed-by: Patrik Jakobsson <patrik.jakobsson@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1447774433-20834-1-git-send-email-imre.deak@intel.com
2015-11-17 18:33:53 +03:00
ret = intel_suspend_complete(dev_priv);
if (ret) {
DRM_ERROR("Suspend complete failed: %d\n", ret);
if (!fw_csr)
intel_power_domains_init_hw(dev_priv, true);
drm/i915: add support for checking if we hold an RPM reference Atm, we assert that the device is not suspended until the point when the device is truly put to a suspended state. This is fine, but we can catch more problems if we check that RPM refcount is non-zero. After that one drops to zero we shouldn't access the device any more, even if the actual device suspend may be delayed. Change assert_rpm_wakelock_held() accordingly to check for a non-zero RPM refcount in addition to the current device-not-suspended check. For the new asserts to work we need to annotate every place explicitly in the code where we expect that the device is powered. The places where we only assume this, but may not hold an RPM reference: - driver load We assume the device to be powered until we enable RPM. Make this explicit by taking an RPM reference around the load function. - system and runtime sudpend/resume handlers These handlers are called when the RPM reference becomes 0 and know the exact point after which the device can get powered off. Disable the RPM-reference-held check for their duration. - the IRQ, hangcheck and RPS work handlers These handlers are flushed in the system/runtime suspend handler before the device is powered off, so it's guaranteed that they won't run while the device is powered off even though they don't hold any RPM reference. Disable the RPM-reference-held check for their duration. In all these cases we still check that the device is not suspended. These explicit annotations also have the positive side effect of documenting our assumptions better. This caught additional WARNs from the atomic modeset path, those should be fixed separately. v2: - remove the redundant HAS_RUNTIME_PM check (moved to patch 1) (Ville) v3: - use a new dedicated RPM wakelock refcount to also catch cases where our own RPM get/put functions were not called (Chris) - assert also that the new RPM wakelock refcount is 0 in the RPM suspend handler (Chris) - change the assert error message to be more meaningful (Chris) - prevent false assert errors and check that the RPM wakelock is 0 in the RPM resume handler too - prevent false assert errors in the hangcheck work too - add a device not suspended assert check to the hangcheck work v4: - rename disable/enable_rpm_asserts to disable/enable_rpm_wakeref_asserts and wakelock_count to wakeref_count - disable the wakeref asserts in the IRQ handlers and RPS work too - update/clarify commit message v5: - mark places we plan to change to use proper RPM refcounting with separate DISABLE/ENABLE_RPM_WAKEREF_ASSERTS aliases (Chris) Signed-off-by: Imre Deak <imre.deak@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Link: http://patchwork.freedesktop.org/patch/msgid/1450227139-13471-1-git-send-email-imre.deak@intel.com
2015-12-16 03:52:19 +03:00
goto out;
}
pci_disable_device(drm_dev->pdev);
2015-03-02 14:04:41 +03:00
/*
drm/i915: apply the PCI_D0/D3 hibernation workaround everywhere on pre GEN6 commit da2bc1b9db3351addd293e5b82757efe1f77ed1d Author: Imre Deak <imre.deak@intel.com> Date: Thu Oct 23 19:23:26 2014 +0300 drm/i915: add poweroff_late handler introduced a regression on old platforms during hibernation. A workaround was added in commit ab3be73fa7b43f4c3648ce29b5fd649ea54d3adb Author: Imre Deak <imre.deak@intel.com> Date: Mon Mar 2 13:04:41 2015 +0200 drm/i915: gen4: work around hang during hibernation using an explicit blacklist for the GENs/BIOS vendors where the issue was reported. Later there we had reports of the same failure on platforms not on this list. To my best knowledge the correct thing to do is still to put the device to PCI D3 state during hibernation, see [1] and [2] for the reasons. This also aligns with our future plans to unify more the runtime and system suspend/resume paths. Since an exact blacklist seems to be impractical (multiple GENs and BIOS vendors are affected) apply the workaround on everything pre GEN6. [1] http://lists.freedesktop.org/archives/intel-gfx/2015-February/060710.html [2] https://lkml.org/lkml/2015/6/22/274 Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=95061 Reported-by: Ilya Tumaykin <itumaykin@gmail.com> Reported-by: Dirk Griesbach <spamthis@freenet.de> Reported-by: Pavel Machek <pavel@ucw.cz> Reported-by: Mikko Rapeli <mikko.rapeli@iki.fi> Tested-by: Mikko Rapeli <mikko.rapeli@iki.fi> Reported-by: Paul Bolle <pebolle@tiscali.nl> CC: stable@vger.kernel.org Signed-off-by: Imre Deak <imre.deak@intel.com> Signed-off-by: Jani Nikula <jani.nikula@intel.com>
2015-06-30 17:06:47 +03:00
* During hibernation on some platforms the BIOS may try to access
2015-03-02 14:04:41 +03:00
* the device even though it's already in D3 and hang the machine. So
* leave the device in D0 on those platforms and hope the BIOS will
drm/i915: apply the PCI_D0/D3 hibernation workaround everywhere on pre GEN6 commit da2bc1b9db3351addd293e5b82757efe1f77ed1d Author: Imre Deak <imre.deak@intel.com> Date: Thu Oct 23 19:23:26 2014 +0300 drm/i915: add poweroff_late handler introduced a regression on old platforms during hibernation. A workaround was added in commit ab3be73fa7b43f4c3648ce29b5fd649ea54d3adb Author: Imre Deak <imre.deak@intel.com> Date: Mon Mar 2 13:04:41 2015 +0200 drm/i915: gen4: work around hang during hibernation using an explicit blacklist for the GENs/BIOS vendors where the issue was reported. Later there we had reports of the same failure on platforms not on this list. To my best knowledge the correct thing to do is still to put the device to PCI D3 state during hibernation, see [1] and [2] for the reasons. This also aligns with our future plans to unify more the runtime and system suspend/resume paths. Since an exact blacklist seems to be impractical (multiple GENs and BIOS vendors are affected) apply the workaround on everything pre GEN6. [1] http://lists.freedesktop.org/archives/intel-gfx/2015-February/060710.html [2] https://lkml.org/lkml/2015/6/22/274 Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=95061 Reported-by: Ilya Tumaykin <itumaykin@gmail.com> Reported-by: Dirk Griesbach <spamthis@freenet.de> Reported-by: Pavel Machek <pavel@ucw.cz> Reported-by: Mikko Rapeli <mikko.rapeli@iki.fi> Tested-by: Mikko Rapeli <mikko.rapeli@iki.fi> Reported-by: Paul Bolle <pebolle@tiscali.nl> CC: stable@vger.kernel.org Signed-off-by: Imre Deak <imre.deak@intel.com> Signed-off-by: Jani Nikula <jani.nikula@intel.com>
2015-06-30 17:06:47 +03:00
* power down the device properly. The issue was seen on multiple old
* GENs with different BIOS vendors, so having an explicit blacklist
* is inpractical; apply the workaround on everything pre GEN6. The
* platforms where the issue was seen:
* Lenovo Thinkpad X301, X61s, X60, T60, X41
* Fujitsu FSC S7110
* Acer Aspire 1830T
2015-03-02 14:04:41 +03:00
*/
drm/i915: apply the PCI_D0/D3 hibernation workaround everywhere on pre GEN6 commit da2bc1b9db3351addd293e5b82757efe1f77ed1d Author: Imre Deak <imre.deak@intel.com> Date: Thu Oct 23 19:23:26 2014 +0300 drm/i915: add poweroff_late handler introduced a regression on old platforms during hibernation. A workaround was added in commit ab3be73fa7b43f4c3648ce29b5fd649ea54d3adb Author: Imre Deak <imre.deak@intel.com> Date: Mon Mar 2 13:04:41 2015 +0200 drm/i915: gen4: work around hang during hibernation using an explicit blacklist for the GENs/BIOS vendors where the issue was reported. Later there we had reports of the same failure on platforms not on this list. To my best knowledge the correct thing to do is still to put the device to PCI D3 state during hibernation, see [1] and [2] for the reasons. This also aligns with our future plans to unify more the runtime and system suspend/resume paths. Since an exact blacklist seems to be impractical (multiple GENs and BIOS vendors are affected) apply the workaround on everything pre GEN6. [1] http://lists.freedesktop.org/archives/intel-gfx/2015-February/060710.html [2] https://lkml.org/lkml/2015/6/22/274 Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=95061 Reported-by: Ilya Tumaykin <itumaykin@gmail.com> Reported-by: Dirk Griesbach <spamthis@freenet.de> Reported-by: Pavel Machek <pavel@ucw.cz> Reported-by: Mikko Rapeli <mikko.rapeli@iki.fi> Tested-by: Mikko Rapeli <mikko.rapeli@iki.fi> Reported-by: Paul Bolle <pebolle@tiscali.nl> CC: stable@vger.kernel.org Signed-off-by: Imre Deak <imre.deak@intel.com> Signed-off-by: Jani Nikula <jani.nikula@intel.com>
2015-06-30 17:06:47 +03:00
if (!(hibernation && INTEL_INFO(dev_priv)->gen < 6))
2015-03-02 14:04:41 +03:00
pci_set_power_state(drm_dev->pdev, PCI_D3hot);
dev_priv->suspended_to_idle = suspend_to_idle(dev_priv);
drm/i915: add support for checking if we hold an RPM reference Atm, we assert that the device is not suspended until the point when the device is truly put to a suspended state. This is fine, but we can catch more problems if we check that RPM refcount is non-zero. After that one drops to zero we shouldn't access the device any more, even if the actual device suspend may be delayed. Change assert_rpm_wakelock_held() accordingly to check for a non-zero RPM refcount in addition to the current device-not-suspended check. For the new asserts to work we need to annotate every place explicitly in the code where we expect that the device is powered. The places where we only assume this, but may not hold an RPM reference: - driver load We assume the device to be powered until we enable RPM. Make this explicit by taking an RPM reference around the load function. - system and runtime sudpend/resume handlers These handlers are called when the RPM reference becomes 0 and know the exact point after which the device can get powered off. Disable the RPM-reference-held check for their duration. - the IRQ, hangcheck and RPS work handlers These handlers are flushed in the system/runtime suspend handler before the device is powered off, so it's guaranteed that they won't run while the device is powered off even though they don't hold any RPM reference. Disable the RPM-reference-held check for their duration. In all these cases we still check that the device is not suspended. These explicit annotations also have the positive side effect of documenting our assumptions better. This caught additional WARNs from the atomic modeset path, those should be fixed separately. v2: - remove the redundant HAS_RUNTIME_PM check (moved to patch 1) (Ville) v3: - use a new dedicated RPM wakelock refcount to also catch cases where our own RPM get/put functions were not called (Chris) - assert also that the new RPM wakelock refcount is 0 in the RPM suspend handler (Chris) - change the assert error message to be more meaningful (Chris) - prevent false assert errors and check that the RPM wakelock is 0 in the RPM resume handler too - prevent false assert errors in the hangcheck work too - add a device not suspended assert check to the hangcheck work v4: - rename disable/enable_rpm_asserts to disable/enable_rpm_wakeref_asserts and wakelock_count to wakeref_count - disable the wakeref asserts in the IRQ handlers and RPS work too - update/clarify commit message v5: - mark places we plan to change to use proper RPM refcounting with separate DISABLE/ENABLE_RPM_WAKEREF_ASSERTS aliases (Chris) Signed-off-by: Imre Deak <imre.deak@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Link: http://patchwork.freedesktop.org/patch/msgid/1450227139-13471-1-git-send-email-imre.deak@intel.com
2015-12-16 03:52:19 +03:00
out:
enable_rpm_wakeref_asserts(dev_priv);
return ret;
}
int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state)
{
int error;
if (!dev || !dev->dev_private) {
DRM_ERROR("dev: %p\n", dev);
DRM_ERROR("DRM not initialized, aborting suspend.\n");
return -ENODEV;
}
if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND &&
state.event != PM_EVENT_FREEZE))
return -EINVAL;
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
error = i915_drm_suspend(dev);
if (error)
return error;
2015-03-02 14:04:41 +03:00
return i915_drm_suspend_late(dev, false);
}
static int i915_drm_resume(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
drm/i915: add support for checking if we hold an RPM reference Atm, we assert that the device is not suspended until the point when the device is truly put to a suspended state. This is fine, but we can catch more problems if we check that RPM refcount is non-zero. After that one drops to zero we shouldn't access the device any more, even if the actual device suspend may be delayed. Change assert_rpm_wakelock_held() accordingly to check for a non-zero RPM refcount in addition to the current device-not-suspended check. For the new asserts to work we need to annotate every place explicitly in the code where we expect that the device is powered. The places where we only assume this, but may not hold an RPM reference: - driver load We assume the device to be powered until we enable RPM. Make this explicit by taking an RPM reference around the load function. - system and runtime sudpend/resume handlers These handlers are called when the RPM reference becomes 0 and know the exact point after which the device can get powered off. Disable the RPM-reference-held check for their duration. - the IRQ, hangcheck and RPS work handlers These handlers are flushed in the system/runtime suspend handler before the device is powered off, so it's guaranteed that they won't run while the device is powered off even though they don't hold any RPM reference. Disable the RPM-reference-held check for their duration. In all these cases we still check that the device is not suspended. These explicit annotations also have the positive side effect of documenting our assumptions better. This caught additional WARNs from the atomic modeset path, those should be fixed separately. v2: - remove the redundant HAS_RUNTIME_PM check (moved to patch 1) (Ville) v3: - use a new dedicated RPM wakelock refcount to also catch cases where our own RPM get/put functions were not called (Chris) - assert also that the new RPM wakelock refcount is 0 in the RPM suspend handler (Chris) - change the assert error message to be more meaningful (Chris) - prevent false assert errors and check that the RPM wakelock is 0 in the RPM resume handler too - prevent false assert errors in the hangcheck work too - add a device not suspended assert check to the hangcheck work v4: - rename disable/enable_rpm_asserts to disable/enable_rpm_wakeref_asserts and wakelock_count to wakeref_count - disable the wakeref asserts in the IRQ handlers and RPS work too - update/clarify commit message v5: - mark places we plan to change to use proper RPM refcounting with separate DISABLE/ENABLE_RPM_WAKEREF_ASSERTS aliases (Chris) Signed-off-by: Imre Deak <imre.deak@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Link: http://patchwork.freedesktop.org/patch/msgid/1450227139-13471-1-git-send-email-imre.deak@intel.com
2015-12-16 03:52:19 +03:00
disable_rpm_wakeref_asserts(dev_priv);
mutex_lock(&dev->struct_mutex);
i915_gem_restore_gtt_mappings(dev);
mutex_unlock(&dev->struct_mutex);
i915_restore_state(dev);
intel_opregion_setup(dev);
intel_init_pch_refclk(dev);
drm_mode_config_reset(dev);
/*
* Interrupts have to be enabled before any batches are run. If not the
* GPU will hang. i915_gem_init_hw() will initiate batches to
* update/restore the context.
*
* Modeset enabling in intel_modeset_init_hw() also needs working
* interrupts.
*/
intel_runtime_pm_enable_interrupts(dev_priv);
mutex_lock(&dev->struct_mutex);
if (i915_gem_init_hw(dev)) {
DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
}
mutex_unlock(&dev->struct_mutex);
intel_guc_resume(dev);
intel_modeset_init_hw(dev);
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display.hpd_irq_setup)
dev_priv->display.hpd_irq_setup(dev);
spin_unlock_irq(&dev_priv->irq_lock);
2014-05-02 08:02:48 +04:00
intel_dp_mst_resume(dev);
intel_display_resume(dev);
/*
* ... but also need to make sure that hotplug processing
* doesn't cause havoc. Like in the driver load code we don't
* bother with the tiny race here where we might loose hotplug
* notifications.
* */
intel_hpd_init(dev_priv);
/* Config may have changed between suspend and resume */
drm_helper_hpd_irq_event(dev);
intel_opregion_init(dev);
intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
i915: ignore lid open event when resuming i915 driver needs to do modeset when 1. system resumes from sleep 2. lid is opened In PM_SUSPEND_MEM state, all the GPEs are cleared when system resumes, thus it is the i915_resume code does the modeset rather than intel_lid_notify(). But in PM_SUSPEND_FREEZE state, this will be broken because system is still responsive to the lid events. 1. When we close the lid in Freeze state, intel_lid_notify() sets modeset_on_lid. 2. When we reopen the lid, intel_lid_notify() will do a modeset, before the system is resumed. here is the error log, [92146.548074] WARNING: at drivers/gpu/drm/i915/intel_display.c:1028 intel_wait_for_pipe_off+0x184/0x190 [i915]() [92146.548076] Hardware name: VGN-Z540N [92146.548078] pipe_off wait timed out [92146.548167] Modules linked in: hid_generic usbhid hid snd_hda_codec_realtek snd_hda_intel snd_hda_codec parport_pc snd_hwdep ppdev snd_pcm_oss i915 snd_mixer_oss snd_pcm arc4 iwldvm snd_seq_dummy mac80211 snd_seq_oss snd_seq_midi fbcon tileblit font bitblit softcursor drm_kms_helper snd_rawmidi snd_seq_midi_event coretemp drm snd_seq kvm btusb bluetooth snd_timer iwlwifi pcmcia tpm_infineon i2c_algo_bit joydev snd_seq_device intel_agp cfg80211 snd intel_gtt yenta_socket pcmcia_rsrc sony_laptop agpgart microcode psmouse tpm_tis serio_raw mxm_wmi soundcore snd_page_alloc tpm acpi_cpufreq lpc_ich pcmcia_core tpm_bios mperf processor lp parport firewire_ohci firewire_core crc_itu_t sdhci_pci sdhci thermal e1000e [92146.548173] Pid: 4304, comm: kworker/0:0 Tainted: G W 3.8.0-rc3-s0i3-v3-test+ #9 [92146.548175] Call Trace: [92146.548189] [<c10378e2>] warn_slowpath_common+0x72/0xa0 [92146.548227] [<f86398b4>] ? intel_wait_for_pipe_off+0x184/0x190 [i915] [92146.548263] [<f86398b4>] ? intel_wait_for_pipe_off+0x184/0x190 [i915] [92146.548270] [<c10379b3>] warn_slowpath_fmt+0x33/0x40 [92146.548307] [<f86398b4>] intel_wait_for_pipe_off+0x184/0x190 [i915] [92146.548344] [<f86399c2>] intel_disable_pipe+0x102/0x190 [i915] [92146.548380] [<f8639ea4>] ? intel_disable_plane+0x64/0x80 [i915] [92146.548417] [<f8639f7c>] i9xx_crtc_disable+0xbc/0x150 [i915] [92146.548456] [<f863ebee>] intel_crtc_update_dpms+0x5e/0x90 [i915] [92146.548493] [<f86437cf>] intel_modeset_setup_hw_state+0x42f/0x8f0 [i915] [92146.548535] [<f8645b0b>] intel_lid_notify+0x9b/0xc0 [i915] [92146.548543] [<c15610d3>] notifier_call_chain+0x43/0x60 [92146.548550] [<c105d1e1>] __blocking_notifier_call_chain+0x41/0x80 [92146.548556] [<c105d23f>] blocking_notifier_call_chain+0x1f/0x30 [92146.548563] [<c131a684>] acpi_lid_send_state+0x78/0xa4 [92146.548569] [<c131aa9e>] acpi_button_notify+0x3b/0xf1 [92146.548577] [<c12df56a>] ? acpi_os_execute+0x17/0x19 [92146.548582] [<c12e591a>] ? acpi_ec_sync_query+0xa5/0xbc [92146.548589] [<c12e2b82>] acpi_device_notify+0x16/0x18 [92146.548595] [<c12f4904>] acpi_ev_notify_dispatch+0x38/0x4f [92146.548600] [<c12df0e8>] acpi_os_execute_deferred+0x20/0x2b [92146.548607] [<c1051208>] process_one_work+0x128/0x3f0 [92146.548613] [<c1564f73>] ? common_interrupt+0x33/0x38 [92146.548618] [<c104f8c0>] ? wake_up_worker+0x30/0x30 [92146.548624] [<c12df0c8>] ? acpi_os_wait_events_complete+0x1e/0x1e [92146.548629] [<c10524f9>] worker_thread+0x119/0x3b0 [92146.548634] [<c10523e0>] ? manage_workers+0x240/0x240 [92146.548640] [<c1056e84>] kthread+0x94/0xa0 [92146.548647] [<c1060000>] ? ftrace_raw_output_sched_stat_runtime+0x70/0xf0 [92146.548652] [<c15649b7>] ret_from_kernel_thread+0x1b/0x28 [92146.548658] [<c1056df0>] ? kthread_create_on_node+0xc0/0xc0 three different modeset flags are introduced in this patch MODESET_ON_LID_OPEN: do modeset on next lid open event MODESET_DONE: modeset already done MODESET_SUSPENDED: suspended, only do modeset when system is resumed In this way, 1. when lid is closed, MODESET_ON_LID_OPEN is set so that we'll do modeset on next lid open event. 2. when lid is opened, MODESET_DONE is set so that duplicate lid open events will be ignored. 3. when system suspends, MODESET_SUSPENDED is set. In this case, we will not do modeset on any lid events. Plus, locking mechanism is also introduced to avoid racing. Signed-off-by: Zhang Rui <rui.zhang@intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-02-05 11:41:53 +04:00
mutex_lock(&dev_priv->modeset_restore_lock);
dev_priv->modeset_restore = MODESET_DONE;
mutex_unlock(&dev_priv->modeset_restore_lock);
intel_opregion_notify_adapter(dev, PCI_D0);
drm_kms_helper_poll_enable(dev);
drm/i915: add support for checking if we hold an RPM reference Atm, we assert that the device is not suspended until the point when the device is truly put to a suspended state. This is fine, but we can catch more problems if we check that RPM refcount is non-zero. After that one drops to zero we shouldn't access the device any more, even if the actual device suspend may be delayed. Change assert_rpm_wakelock_held() accordingly to check for a non-zero RPM refcount in addition to the current device-not-suspended check. For the new asserts to work we need to annotate every place explicitly in the code where we expect that the device is powered. The places where we only assume this, but may not hold an RPM reference: - driver load We assume the device to be powered until we enable RPM. Make this explicit by taking an RPM reference around the load function. - system and runtime sudpend/resume handlers These handlers are called when the RPM reference becomes 0 and know the exact point after which the device can get powered off. Disable the RPM-reference-held check for their duration. - the IRQ, hangcheck and RPS work handlers These handlers are flushed in the system/runtime suspend handler before the device is powered off, so it's guaranteed that they won't run while the device is powered off even though they don't hold any RPM reference. Disable the RPM-reference-held check for their duration. In all these cases we still check that the device is not suspended. These explicit annotations also have the positive side effect of documenting our assumptions better. This caught additional WARNs from the atomic modeset path, those should be fixed separately. v2: - remove the redundant HAS_RUNTIME_PM check (moved to patch 1) (Ville) v3: - use a new dedicated RPM wakelock refcount to also catch cases where our own RPM get/put functions were not called (Chris) - assert also that the new RPM wakelock refcount is 0 in the RPM suspend handler (Chris) - change the assert error message to be more meaningful (Chris) - prevent false assert errors and check that the RPM wakelock is 0 in the RPM resume handler too - prevent false assert errors in the hangcheck work too - add a device not suspended assert check to the hangcheck work v4: - rename disable/enable_rpm_asserts to disable/enable_rpm_wakeref_asserts and wakelock_count to wakeref_count - disable the wakeref asserts in the IRQ handlers and RPS work too - update/clarify commit message v5: - mark places we plan to change to use proper RPM refcounting with separate DISABLE/ENABLE_RPM_WAKEREF_ASSERTS aliases (Chris) Signed-off-by: Imre Deak <imre.deak@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Link: http://patchwork.freedesktop.org/patch/msgid/1450227139-13471-1-git-send-email-imre.deak@intel.com
2015-12-16 03:52:19 +03:00
enable_rpm_wakeref_asserts(dev_priv);
return 0;
}
static int i915_drm_resume_early(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret = 0;
/*
* We have a resume ordering issue with the snd-hda driver also
* requiring our device to be power up. Due to the lack of a
* parent/child relationship we currently solve this with an early
* resume hook.
*
* FIXME: This should be solved with a special hdmi sink device or
* similar so that power domains can be employed.
*/
if (pci_enable_device(dev->pdev)) {
ret = -EIO;
goto out;
}
pci_set_master(dev->pdev);
drm/i915: add support for checking if we hold an RPM reference Atm, we assert that the device is not suspended until the point when the device is truly put to a suspended state. This is fine, but we can catch more problems if we check that RPM refcount is non-zero. After that one drops to zero we shouldn't access the device any more, even if the actual device suspend may be delayed. Change assert_rpm_wakelock_held() accordingly to check for a non-zero RPM refcount in addition to the current device-not-suspended check. For the new asserts to work we need to annotate every place explicitly in the code where we expect that the device is powered. The places where we only assume this, but may not hold an RPM reference: - driver load We assume the device to be powered until we enable RPM. Make this explicit by taking an RPM reference around the load function. - system and runtime sudpend/resume handlers These handlers are called when the RPM reference becomes 0 and know the exact point after which the device can get powered off. Disable the RPM-reference-held check for their duration. - the IRQ, hangcheck and RPS work handlers These handlers are flushed in the system/runtime suspend handler before the device is powered off, so it's guaranteed that they won't run while the device is powered off even though they don't hold any RPM reference. Disable the RPM-reference-held check for their duration. In all these cases we still check that the device is not suspended. These explicit annotations also have the positive side effect of documenting our assumptions better. This caught additional WARNs from the atomic modeset path, those should be fixed separately. v2: - remove the redundant HAS_RUNTIME_PM check (moved to patch 1) (Ville) v3: - use a new dedicated RPM wakelock refcount to also catch cases where our own RPM get/put functions were not called (Chris) - assert also that the new RPM wakelock refcount is 0 in the RPM suspend handler (Chris) - change the assert error message to be more meaningful (Chris) - prevent false assert errors and check that the RPM wakelock is 0 in the RPM resume handler too - prevent false assert errors in the hangcheck work too - add a device not suspended assert check to the hangcheck work v4: - rename disable/enable_rpm_asserts to disable/enable_rpm_wakeref_asserts and wakelock_count to wakeref_count - disable the wakeref asserts in the IRQ handlers and RPS work too - update/clarify commit message v5: - mark places we plan to change to use proper RPM refcounting with separate DISABLE/ENABLE_RPM_WAKEREF_ASSERTS aliases (Chris) Signed-off-by: Imre Deak <imre.deak@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Link: http://patchwork.freedesktop.org/patch/msgid/1450227139-13471-1-git-send-email-imre.deak@intel.com
2015-12-16 03:52:19 +03:00
disable_rpm_wakeref_asserts(dev_priv);
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
ret = vlv_resume_prepare(dev_priv, false);
if (ret)
DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
ret);
intel_uncore_early_sanitize(dev, true);
if (IS_BROXTON(dev))
ret = bxt_resume_prepare(dev_priv);
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
hsw_disable_pc8(dev_priv);
intel_uncore_sanitize(dev);
if (IS_BROXTON(dev_priv) ||
!(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
intel_power_domains_init_hw(dev_priv, true);
out:
dev_priv->suspended_to_idle = false;
drm/i915: add support for checking if we hold an RPM reference Atm, we assert that the device is not suspended until the point when the device is truly put to a suspended state. This is fine, but we can catch more problems if we check that RPM refcount is non-zero. After that one drops to zero we shouldn't access the device any more, even if the actual device suspend may be delayed. Change assert_rpm_wakelock_held() accordingly to check for a non-zero RPM refcount in addition to the current device-not-suspended check. For the new asserts to work we need to annotate every place explicitly in the code where we expect that the device is powered. The places where we only assume this, but may not hold an RPM reference: - driver load We assume the device to be powered until we enable RPM. Make this explicit by taking an RPM reference around the load function. - system and runtime sudpend/resume handlers These handlers are called when the RPM reference becomes 0 and know the exact point after which the device can get powered off. Disable the RPM-reference-held check for their duration. - the IRQ, hangcheck and RPS work handlers These handlers are flushed in the system/runtime suspend handler before the device is powered off, so it's guaranteed that they won't run while the device is powered off even though they don't hold any RPM reference. Disable the RPM-reference-held check for their duration. In all these cases we still check that the device is not suspended. These explicit annotations also have the positive side effect of documenting our assumptions better. This caught additional WARNs from the atomic modeset path, those should be fixed separately. v2: - remove the redundant HAS_RUNTIME_PM check (moved to patch 1) (Ville) v3: - use a new dedicated RPM wakelock refcount to also catch cases where our own RPM get/put functions were not called (Chris) - assert also that the new RPM wakelock refcount is 0 in the RPM suspend handler (Chris) - change the assert error message to be more meaningful (Chris) - prevent false assert errors and check that the RPM wakelock is 0 in the RPM resume handler too - prevent false assert errors in the hangcheck work too - add a device not suspended assert check to the hangcheck work v4: - rename disable/enable_rpm_asserts to disable/enable_rpm_wakeref_asserts and wakelock_count to wakeref_count - disable the wakeref asserts in the IRQ handlers and RPS work too - update/clarify commit message v5: - mark places we plan to change to use proper RPM refcounting with separate DISABLE/ENABLE_RPM_WAKEREF_ASSERTS aliases (Chris) Signed-off-by: Imre Deak <imre.deak@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Link: http://patchwork.freedesktop.org/patch/msgid/1450227139-13471-1-git-send-email-imre.deak@intel.com
2015-12-16 03:52:19 +03:00
enable_rpm_wakeref_asserts(dev_priv);
return ret;
}
int i915_resume_switcheroo(struct drm_device *dev)
{
int ret;
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
ret = i915_drm_resume_early(dev);
if (ret)
return ret;
return i915_drm_resume(dev);
}
/**
* i915_reset - reset chip after a hang
* @dev: drm device to reset
*
* Reset the chip. Useful if a hang is detected. Returns zero on successful
* reset or otherwise an error code.
*
* Procedure is fairly simple:
* - reset the chip using the reset reg
* - re-init context state
* - re-init hardware status page
* - re-init ring buffer
* - re-init interrupt state
* - re-init display
*/
int i915_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_gpu_error *error = &dev_priv->gpu_error;
unsigned reset_counter;
int ret;
intel_reset_gt_powersave(dev);
mutex_lock(&dev->struct_mutex);
/* Clear any previous failed attempts at recovery. Time to try again. */
atomic_andnot(I915_WEDGED, &error->reset_counter);
/* Clear the reset-in-progress flag and increment the reset epoch. */
reset_counter = atomic_inc_return(&error->reset_counter);
if (WARN_ON(__i915_reset_in_progress(reset_counter))) {
ret = -EIO;
goto error;
}
i915_gem_reset(dev);
ret = intel_gpu_reset(dev, ALL_ENGINES);
/* Also reset the gpu hangman. */
if (error->stop_rings != 0) {
DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
error->stop_rings = 0;
if (ret == -ENODEV) {
DRM_INFO("Reset not implemented, but ignoring "
"error for simulated gpu hangs\n");
ret = 0;
}
}
if (i915_stop_ring_allow_warn(dev_priv))
pr_notice("drm/i915: Resetting chip after gpu hang\n");
if (ret) {
if (ret != -ENODEV)
DRM_ERROR("Failed to reset chip: %i\n", ret);
else
DRM_DEBUG_DRIVER("GPU reset disabled\n");
goto error;
}
intel_overlay_reset(dev_priv);
/* Ok, now get things going again... */
/*
* Everything depends on having the GTT running, so we need to start
* there. Fortunately we don't need to do this unless we reset the
* chip at a PCI level.
*
* Next we need to restore the context, but we don't use those
* yet either...
*
* Ring buffer needs to be re-initialized in the KMS case, or if X
* was running at the time of the reset (i.e. we weren't VT
* switched away).
*/
ret = i915_gem_init_hw(dev);
if (ret) {
DRM_ERROR("Failed hw init on reset %d\n", ret);
goto error;
}
mutex_unlock(&dev->struct_mutex);
/*
* rps/rc6 re-init is necessary to restore state lost after the
* reset and the re-install of gt irqs. Skip for ironlake per
* previous concerns that it doesn't respond well to some forms
* of re-init after reset.
*/
if (INTEL_INFO(dev)->gen > 5)
intel_enable_gt_powersave(dev);
return 0;
error:
atomic_or(I915_WEDGED, &error->reset_counter);
mutex_unlock(&dev->struct_mutex);
return ret;
}
static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct intel_device_info *intel_info =
(struct intel_device_info *) ent->driver_data;
if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) {
DRM_INFO("This hardware requires preliminary hardware support.\n"
"See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
return -ENODEV;
}
/* Only bind to function 0 of the device. Early generations
* used function 1 as a placeholder for multi-head. This causes
* us confusion instead, especially on the systems where both
* functions have the same PCI-ID!
*/
if (PCI_FUNC(pdev->devfn))
return -ENODEV;
/*
* apple-gmux is needed on dual GPU MacBook Pro
* to probe the panel if we're the inactive GPU.
*/
if (IS_ENABLED(CONFIG_VGA_ARB) && IS_ENABLED(CONFIG_VGA_SWITCHEROO) &&
apple_gmux_present() && pdev != vga_default_device() &&
!vga_switcheroo_handler_flags())
return -EPROBE_DEFER;
return drm_get_pci_dev(pdev, ent, &driver);
}
static void
i915_pci_remove(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
drm_put_dev(dev);
}
static int i915_pm_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
if (!drm_dev || !drm_dev->dev_private) {
dev_err(dev, "DRM not initialized, aborting suspend.\n");
return -ENODEV;
}
if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
return i915_drm_suspend(drm_dev);
}
static int i915_pm_suspend_late(struct device *dev)
{
struct drm_device *drm_dev = dev_to_i915(dev)->dev;
/*
* We have a suspend ordering issue with the snd-hda driver also
* requiring our device to be power up. Due to the lack of a
* parent/child relationship we currently solve this with an late
* suspend hook.
*
* FIXME: This should be solved with a special hdmi sink device or
* similar so that power domains can be employed.
*/
if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
2015-03-02 14:04:41 +03:00
return i915_drm_suspend_late(drm_dev, false);
}
static int i915_pm_poweroff_late(struct device *dev)
{
struct drm_device *drm_dev = dev_to_i915(dev)->dev;
if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
return i915_drm_suspend_late(drm_dev, true);
}
static int i915_pm_resume_early(struct device *dev)
{
struct drm_device *drm_dev = dev_to_i915(dev)->dev;
if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
return i915_drm_resume_early(drm_dev);
}
static int i915_pm_resume(struct device *dev)
{
struct drm_device *drm_dev = dev_to_i915(dev)->dev;
if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
return i915_drm_resume(drm_dev);
}
static int hsw_suspend_complete(struct drm_i915_private *dev_priv)
{
hsw_enable_pc8(dev_priv);
return 0;
}
static int bxt_suspend_complete(struct drm_i915_private *dev_priv)
{
bxt_display_core_uninit(dev_priv);
bxt_enable_dc9(dev_priv);
return 0;
}
static int bxt_resume_prepare(struct drm_i915_private *dev_priv)
{
bxt_disable_dc9(dev_priv);
bxt_display_core_init(dev_priv, true);
return 0;
}
/*
* Save all Gunit registers that may be lost after a D3 and a subsequent
* S0i[R123] transition. The list of registers needing a save/restore is
* defined in the VLV2_S0IXRegs document. This documents marks all Gunit
* registers in the following way:
* - Driver: saved/restored by the driver
* - Punit : saved/restored by the Punit firmware
* - No, w/o marking: no need to save/restore, since the register is R/O or
* used internally by the HW in a way that doesn't depend
* keeping the content across a suspend/resume.
* - Debug : used for debugging
*
* We save/restore all registers marked with 'Driver', with the following
* exceptions:
* - Registers out of use, including also registers marked with 'Debug'.
* These have no effect on the driver's operation, so we don't save/restore
* them to reduce the overhead.
* - Registers that are fully setup by an initialization function called from
* the resume path. For example many clock gating and RPS/RC6 registers.
* - Registers that provide the right functionality with their reset defaults.
*
* TODO: Except for registers that based on the above 3 criteria can be safely
* ignored, we save/restore all others, practically treating the HW context as
* a black-box for the driver. Further investigation is needed to reduce the
* saved/restored registers even further, by following the same 3 criteria.
*/
static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
{
struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
int i;
/* GAM 0x4000-0x4770 */
s->wr_watermark = I915_READ(GEN7_WR_WATERMARK);
s->gfx_prio_ctrl = I915_READ(GEN7_GFX_PRIO_CTRL);
s->arb_mode = I915_READ(ARB_MODE);
s->gfx_pend_tlb0 = I915_READ(GEN7_GFX_PEND_TLB0);
s->gfx_pend_tlb1 = I915_READ(GEN7_GFX_PEND_TLB1);
for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS(i));
s->media_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
s->gfx_max_req_count = I915_READ(GEN7_GFX_MAX_REQ_COUNT);
s->render_hwsp = I915_READ(RENDER_HWS_PGA_GEN7);
s->ecochk = I915_READ(GAM_ECOCHK);
s->bsd_hwsp = I915_READ(BSD_HWS_PGA_GEN7);
s->blt_hwsp = I915_READ(BLT_HWS_PGA_GEN7);
s->tlb_rd_addr = I915_READ(GEN7_TLB_RD_ADDR);
/* MBC 0x9024-0x91D0, 0x8500 */
s->g3dctl = I915_READ(VLV_G3DCTL);
s->gsckgctl = I915_READ(VLV_GSCKGCTL);
s->mbctl = I915_READ(GEN6_MBCTL);
/* GCP 0x9400-0x9424, 0x8100-0x810C */
s->ucgctl1 = I915_READ(GEN6_UCGCTL1);
s->ucgctl3 = I915_READ(GEN6_UCGCTL3);
s->rcgctl1 = I915_READ(GEN6_RCGCTL1);
s->rcgctl2 = I915_READ(GEN6_RCGCTL2);
s->rstctl = I915_READ(GEN6_RSTCTL);
s->misccpctl = I915_READ(GEN7_MISCCPCTL);
/* GPM 0xA000-0xAA84, 0x8000-0x80FC */
s->gfxpause = I915_READ(GEN6_GFXPAUSE);
s->rpdeuhwtc = I915_READ(GEN6_RPDEUHWTC);
s->rpdeuc = I915_READ(GEN6_RPDEUC);
s->ecobus = I915_READ(ECOBUS);
s->pwrdwnupctl = I915_READ(VLV_PWRDWNUPCTL);
s->rp_down_timeout = I915_READ(GEN6_RP_DOWN_TIMEOUT);
s->rp_deucsw = I915_READ(GEN6_RPDEUCSW);
s->rcubmabdtmr = I915_READ(GEN6_RCUBMABDTMR);
s->rcedata = I915_READ(VLV_RCEDATA);
s->spare2gh = I915_READ(VLV_SPAREG2H);
/* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
s->gt_imr = I915_READ(GTIMR);
s->gt_ier = I915_READ(GTIER);
s->pm_imr = I915_READ(GEN6_PMIMR);
s->pm_ier = I915_READ(GEN6_PMIER);
for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH(i));
/* GT SA CZ domain, 0x100000-0x138124 */
s->tilectl = I915_READ(TILECTL);
s->gt_fifoctl = I915_READ(GTFIFOCTL);
s->gtlc_wake_ctrl = I915_READ(VLV_GTLC_WAKE_CTRL);
s->gtlc_survive = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
s->pmwgicz = I915_READ(VLV_PMWGICZ);
/* Gunit-Display CZ domain, 0x182028-0x1821CF */
s->gu_ctl0 = I915_READ(VLV_GU_CTL0);
s->gu_ctl1 = I915_READ(VLV_GU_CTL1);
s->pcbr = I915_READ(VLV_PCBR);
s->clock_gate_dis2 = I915_READ(VLV_GUNIT_CLOCK_GATE2);
/*
* Not saving any of:
* DFT, 0x9800-0x9EC0
* SARB, 0xB000-0xB1FC
* GAC, 0x5208-0x524C, 0x14000-0x14C000
* PCI CFG
*/
}
static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
{
struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
u32 val;
int i;
/* GAM 0x4000-0x4770 */
I915_WRITE(GEN7_WR_WATERMARK, s->wr_watermark);
I915_WRITE(GEN7_GFX_PRIO_CTRL, s->gfx_prio_ctrl);
I915_WRITE(ARB_MODE, s->arb_mode | (0xffff << 16));
I915_WRITE(GEN7_GFX_PEND_TLB0, s->gfx_pend_tlb0);
I915_WRITE(GEN7_GFX_PEND_TLB1, s->gfx_pend_tlb1);
for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
I915_WRITE(GEN7_LRA_LIMITS(i), s->lra_limits[i]);
I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count);
I915_WRITE(RENDER_HWS_PGA_GEN7, s->render_hwsp);
I915_WRITE(GAM_ECOCHK, s->ecochk);
I915_WRITE(BSD_HWS_PGA_GEN7, s->bsd_hwsp);
I915_WRITE(BLT_HWS_PGA_GEN7, s->blt_hwsp);
I915_WRITE(GEN7_TLB_RD_ADDR, s->tlb_rd_addr);
/* MBC 0x9024-0x91D0, 0x8500 */
I915_WRITE(VLV_G3DCTL, s->g3dctl);
I915_WRITE(VLV_GSCKGCTL, s->gsckgctl);
I915_WRITE(GEN6_MBCTL, s->mbctl);
/* GCP 0x9400-0x9424, 0x8100-0x810C */
I915_WRITE(GEN6_UCGCTL1, s->ucgctl1);
I915_WRITE(GEN6_UCGCTL3, s->ucgctl3);
I915_WRITE(GEN6_RCGCTL1, s->rcgctl1);
I915_WRITE(GEN6_RCGCTL2, s->rcgctl2);
I915_WRITE(GEN6_RSTCTL, s->rstctl);
I915_WRITE(GEN7_MISCCPCTL, s->misccpctl);
/* GPM 0xA000-0xAA84, 0x8000-0x80FC */
I915_WRITE(GEN6_GFXPAUSE, s->gfxpause);
I915_WRITE(GEN6_RPDEUHWTC, s->rpdeuhwtc);
I915_WRITE(GEN6_RPDEUC, s->rpdeuc);
I915_WRITE(ECOBUS, s->ecobus);
I915_WRITE(VLV_PWRDWNUPCTL, s->pwrdwnupctl);
I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout);
I915_WRITE(GEN6_RPDEUCSW, s->rp_deucsw);
I915_WRITE(GEN6_RCUBMABDTMR, s->rcubmabdtmr);
I915_WRITE(VLV_RCEDATA, s->rcedata);
I915_WRITE(VLV_SPAREG2H, s->spare2gh);
/* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
I915_WRITE(GTIMR, s->gt_imr);
I915_WRITE(GTIER, s->gt_ier);
I915_WRITE(GEN6_PMIMR, s->pm_imr);
I915_WRITE(GEN6_PMIER, s->pm_ier);
for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
I915_WRITE(GEN7_GT_SCRATCH(i), s->gt_scratch[i]);
/* GT SA CZ domain, 0x100000-0x138124 */
I915_WRITE(TILECTL, s->tilectl);
I915_WRITE(GTFIFOCTL, s->gt_fifoctl);
/*
* Preserve the GT allow wake and GFX force clock bit, they are not
* be restored, as they are used to control the s0ix suspend/resume
* sequence by the caller.
*/
val = I915_READ(VLV_GTLC_WAKE_CTRL);
val &= VLV_GTLC_ALLOWWAKEREQ;
val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ;
I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
val &= VLV_GFX_CLK_FORCE_ON_BIT;
val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT;
I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
I915_WRITE(VLV_PMWGICZ, s->pmwgicz);
/* Gunit-Display CZ domain, 0x182028-0x1821CF */
I915_WRITE(VLV_GU_CTL0, s->gu_ctl0);
I915_WRITE(VLV_GU_CTL1, s->gu_ctl1);
I915_WRITE(VLV_PCBR, s->pcbr);
I915_WRITE(VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2);
}
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
{
u32 val;
int err;
#define COND (I915_READ(VLV_GTLC_SURVIVABILITY_REG) & VLV_GFX_CLK_STATUS_BIT)
val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
if (force_on)
val |= VLV_GFX_CLK_FORCE_ON_BIT;
I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
if (!force_on)
return 0;
err = wait_for(COND, 20);
if (err)
DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n",
I915_READ(VLV_GTLC_SURVIVABILITY_REG));
return err;
#undef COND
}
static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
{
u32 val;
int err = 0;
val = I915_READ(VLV_GTLC_WAKE_CTRL);
val &= ~VLV_GTLC_ALLOWWAKEREQ;
if (allow)
val |= VLV_GTLC_ALLOWWAKEREQ;
I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
POSTING_READ(VLV_GTLC_WAKE_CTRL);
#define COND (!!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEACK) == \
allow)
err = wait_for(COND, 1);
if (err)
DRM_ERROR("timeout disabling GT waking\n");
return err;
#undef COND
}
static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
bool wait_for_on)
{
u32 mask;
u32 val;
int err;
mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
val = wait_for_on ? mask : 0;
#define COND ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val)
if (COND)
return 0;
DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n",
onoff(wait_for_on),
I915_READ(VLV_GTLC_PW_STATUS));
/*
* RC6 transitioning can be delayed up to 2 msec (see
* valleyview_enable_rps), use 3 msec for safety.
*/
err = wait_for(COND, 3);
if (err)
DRM_ERROR("timeout waiting for GT wells to go %s\n",
onoff(wait_for_on));
return err;
#undef COND
}
static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
{
if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
return;
DRM_DEBUG_DRIVER("GT register access while GT waking disabled\n");
I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
}
static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
{
u32 mask;
int err;
/*
* Bspec defines the following GT well on flags as debug only, so
* don't treat them as hard failures.
*/
(void)vlv_wait_for_gt_wells(dev_priv, false);
mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS;
WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask);
vlv_check_no_gt_access(dev_priv);
err = vlv_force_gfx_clock(dev_priv, true);
if (err)
goto err1;
err = vlv_allow_gt_wake(dev_priv, false);
if (err)
goto err2;
if (!IS_CHERRYVIEW(dev_priv))
vlv_save_gunit_s0ix_state(dev_priv);
err = vlv_force_gfx_clock(dev_priv, false);
if (err)
goto err2;
return 0;
err2:
/* For safety always re-enable waking and disable gfx clock forcing */
vlv_allow_gt_wake(dev_priv, true);
err1:
vlv_force_gfx_clock(dev_priv, false);
return err;
}
drm/i915: Sharing platform specific sequence between runtime and system suspend/ resume paths On VLV, post S0i3 during i915_drm_thaw following issue is observed during ring initialization. [ 335.604039] [drm:stop_ring] ERROR render ring :timed out trying to stop ring [ 336.607340] [drm:stop_ring] ERROR render ring :timed out trying to stop ring [ 336.607345] [drm:init_ring_common] ERROR failed to set render ring head to zero ctl 00000000 head 00000000 tail 00000000 start 00000000 [ 337.610645] [drm:stop_ring] ERROR bsd ring :timed out trying to stop ring [ 338.613952] [drm:stop_ring] ERROR bsd ring :timed out trying to stop ring [ 338.613956] [drm:init_ring_common] ERROR failed to set bsd ring head to zero ctl 00000000 head 00000000 tail 00000000 start 00000000 [ 339.617256] [drm:stop_ring] ERROR render ring :timed out trying to stop ring [ 339.617258] -----------[ cut here ]----------- [ 339.617267] WARNING: CPU: 0 PID: 6 at drivers/gpu/drm/i915/intel_ringbuffer.c:1666 intel_cleanup_ring+0xe6/0xf0() [ 339.617396] --[ end trace 5ef5ed1a3c92e2a6 ]-- [ 339.617428] [drm:__i915_drm_thaw] ERROR failed to re-initialize GPU, declaring wedged! This is happening since wake is not enabled and Gunit registers are not restored. For this system suspend/resume paths need to follow save/restore and additional platform specific setup in suspend_complete and resume_prepare. suspend_complete is shared unconditionaly for VLV, HSW, BDW. resume_prepare for HSW and BDW has pc8 disabling which is needed during thaw_early so sharing uncondtionally. For VLV and SNB runtime resume specific sequence exists. Cc: Imre Deak <imre.deak@intel.com> Cc: Paulo Zanoni <paulo.r.zanoni@intel.com> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Cc: Jani Nikula <jani.nikula@linux.intel.com> Cc: Goel, Akash <akash.goel@intel.com> Signed-off-by: Sagar Kamble <sagar.a.kamble@intel.com> Reviewed-by: Imre Deak <imre.deak@intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2014-08-13 21:37:06 +04:00
static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
bool rpm_resume)
{
struct drm_device *dev = dev_priv->dev;
int err;
int ret;
/*
* If any of the steps fail just try to continue, that's the best we
* can do at this point. Return the first error code (which will also
* leave RPM permanently disabled).
*/
ret = vlv_force_gfx_clock(dev_priv, true);
if (!IS_CHERRYVIEW(dev_priv))
vlv_restore_gunit_s0ix_state(dev_priv);
err = vlv_allow_gt_wake(dev_priv, true);
if (!ret)
ret = err;
err = vlv_force_gfx_clock(dev_priv, false);
if (!ret)
ret = err;
vlv_check_no_gt_access(dev_priv);
drm/i915: Sharing platform specific sequence between runtime and system suspend/ resume paths On VLV, post S0i3 during i915_drm_thaw following issue is observed during ring initialization. [ 335.604039] [drm:stop_ring] ERROR render ring :timed out trying to stop ring [ 336.607340] [drm:stop_ring] ERROR render ring :timed out trying to stop ring [ 336.607345] [drm:init_ring_common] ERROR failed to set render ring head to zero ctl 00000000 head 00000000 tail 00000000 start 00000000 [ 337.610645] [drm:stop_ring] ERROR bsd ring :timed out trying to stop ring [ 338.613952] [drm:stop_ring] ERROR bsd ring :timed out trying to stop ring [ 338.613956] [drm:init_ring_common] ERROR failed to set bsd ring head to zero ctl 00000000 head 00000000 tail 00000000 start 00000000 [ 339.617256] [drm:stop_ring] ERROR render ring :timed out trying to stop ring [ 339.617258] -----------[ cut here ]----------- [ 339.617267] WARNING: CPU: 0 PID: 6 at drivers/gpu/drm/i915/intel_ringbuffer.c:1666 intel_cleanup_ring+0xe6/0xf0() [ 339.617396] --[ end trace 5ef5ed1a3c92e2a6 ]-- [ 339.617428] [drm:__i915_drm_thaw] ERROR failed to re-initialize GPU, declaring wedged! This is happening since wake is not enabled and Gunit registers are not restored. For this system suspend/resume paths need to follow save/restore and additional platform specific setup in suspend_complete and resume_prepare. suspend_complete is shared unconditionaly for VLV, HSW, BDW. resume_prepare for HSW and BDW has pc8 disabling which is needed during thaw_early so sharing uncondtionally. For VLV and SNB runtime resume specific sequence exists. Cc: Imre Deak <imre.deak@intel.com> Cc: Paulo Zanoni <paulo.r.zanoni@intel.com> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Cc: Jani Nikula <jani.nikula@linux.intel.com> Cc: Goel, Akash <akash.goel@intel.com> Signed-off-by: Sagar Kamble <sagar.a.kamble@intel.com> Reviewed-by: Imre Deak <imre.deak@intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2014-08-13 21:37:06 +04:00
if (rpm_resume) {
intel_init_clock_gating(dev);
i915_gem_restore_fences(dev);
}
return ret;
}
static int intel_runtime_suspend(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
struct drm_device *dev = pci_get_drvdata(pdev);
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev))))
return -ENODEV;
if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
return -ENODEV;
DRM_DEBUG_KMS("Suspending device\n");
/*
* We could deadlock here in case another thread holding struct_mutex
* calls RPM suspend concurrently, since the RPM suspend will wait
* first for this RPM suspend to finish. In this case the concurrent
* RPM resume will be followed by its RPM suspend counterpart. Still
* for consistency return -EAGAIN, which will reschedule this suspend.
*/
if (!mutex_trylock(&dev->struct_mutex)) {
DRM_DEBUG_KMS("device lock contention, deffering suspend\n");
/*
* Bump the expiration timestamp, otherwise the suspend won't
* be rescheduled.
*/
pm_runtime_mark_last_busy(device);
return -EAGAIN;
}
drm/i915: add support for checking if we hold an RPM reference Atm, we assert that the device is not suspended until the point when the device is truly put to a suspended state. This is fine, but we can catch more problems if we check that RPM refcount is non-zero. After that one drops to zero we shouldn't access the device any more, even if the actual device suspend may be delayed. Change assert_rpm_wakelock_held() accordingly to check for a non-zero RPM refcount in addition to the current device-not-suspended check. For the new asserts to work we need to annotate every place explicitly in the code where we expect that the device is powered. The places where we only assume this, but may not hold an RPM reference: - driver load We assume the device to be powered until we enable RPM. Make this explicit by taking an RPM reference around the load function. - system and runtime sudpend/resume handlers These handlers are called when the RPM reference becomes 0 and know the exact point after which the device can get powered off. Disable the RPM-reference-held check for their duration. - the IRQ, hangcheck and RPS work handlers These handlers are flushed in the system/runtime suspend handler before the device is powered off, so it's guaranteed that they won't run while the device is powered off even though they don't hold any RPM reference. Disable the RPM-reference-held check for their duration. In all these cases we still check that the device is not suspended. These explicit annotations also have the positive side effect of documenting our assumptions better. This caught additional WARNs from the atomic modeset path, those should be fixed separately. v2: - remove the redundant HAS_RUNTIME_PM check (moved to patch 1) (Ville) v3: - use a new dedicated RPM wakelock refcount to also catch cases where our own RPM get/put functions were not called (Chris) - assert also that the new RPM wakelock refcount is 0 in the RPM suspend handler (Chris) - change the assert error message to be more meaningful (Chris) - prevent false assert errors and check that the RPM wakelock is 0 in the RPM resume handler too - prevent false assert errors in the hangcheck work too - add a device not suspended assert check to the hangcheck work v4: - rename disable/enable_rpm_asserts to disable/enable_rpm_wakeref_asserts and wakelock_count to wakeref_count - disable the wakeref asserts in the IRQ handlers and RPS work too - update/clarify commit message v5: - mark places we plan to change to use proper RPM refcounting with separate DISABLE/ENABLE_RPM_WAKEREF_ASSERTS aliases (Chris) Signed-off-by: Imre Deak <imre.deak@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Link: http://patchwork.freedesktop.org/patch/msgid/1450227139-13471-1-git-send-email-imre.deak@intel.com
2015-12-16 03:52:19 +03:00
disable_rpm_wakeref_asserts(dev_priv);
/*
* We are safe here against re-faults, since the fault handler takes
* an RPM reference.
*/
i915_gem_release_all_mmaps(dev_priv);
mutex_unlock(&dev->struct_mutex);
cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
intel_guc_suspend(dev);
intel_suspend_gt_powersave(dev);
intel_runtime_pm_disable_interrupts(dev_priv);
ret = intel_suspend_complete(dev_priv);
if (ret) {
DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
intel_runtime_pm_enable_interrupts(dev_priv);
drm/i915: add support for checking if we hold an RPM reference Atm, we assert that the device is not suspended until the point when the device is truly put to a suspended state. This is fine, but we can catch more problems if we check that RPM refcount is non-zero. After that one drops to zero we shouldn't access the device any more, even if the actual device suspend may be delayed. Change assert_rpm_wakelock_held() accordingly to check for a non-zero RPM refcount in addition to the current device-not-suspended check. For the new asserts to work we need to annotate every place explicitly in the code where we expect that the device is powered. The places where we only assume this, but may not hold an RPM reference: - driver load We assume the device to be powered until we enable RPM. Make this explicit by taking an RPM reference around the load function. - system and runtime sudpend/resume handlers These handlers are called when the RPM reference becomes 0 and know the exact point after which the device can get powered off. Disable the RPM-reference-held check for their duration. - the IRQ, hangcheck and RPS work handlers These handlers are flushed in the system/runtime suspend handler before the device is powered off, so it's guaranteed that they won't run while the device is powered off even though they don't hold any RPM reference. Disable the RPM-reference-held check for their duration. In all these cases we still check that the device is not suspended. These explicit annotations also have the positive side effect of documenting our assumptions better. This caught additional WARNs from the atomic modeset path, those should be fixed separately. v2: - remove the redundant HAS_RUNTIME_PM check (moved to patch 1) (Ville) v3: - use a new dedicated RPM wakelock refcount to also catch cases where our own RPM get/put functions were not called (Chris) - assert also that the new RPM wakelock refcount is 0 in the RPM suspend handler (Chris) - change the assert error message to be more meaningful (Chris) - prevent false assert errors and check that the RPM wakelock is 0 in the RPM resume handler too - prevent false assert errors in the hangcheck work too - add a device not suspended assert check to the hangcheck work v4: - rename disable/enable_rpm_asserts to disable/enable_rpm_wakeref_asserts and wakelock_count to wakeref_count - disable the wakeref asserts in the IRQ handlers and RPS work too - update/clarify commit message v5: - mark places we plan to change to use proper RPM refcounting with separate DISABLE/ENABLE_RPM_WAKEREF_ASSERTS aliases (Chris) Signed-off-by: Imre Deak <imre.deak@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Link: http://patchwork.freedesktop.org/patch/msgid/1450227139-13471-1-git-send-email-imre.deak@intel.com
2015-12-16 03:52:19 +03:00
enable_rpm_wakeref_asserts(dev_priv);
return ret;
}
drm/i915: make PC8 be part of runtime PM suspend/resume Currently, when our driver becomes idle for i915.pc8_timeout (default: 5s) we enable PC8, so we save some power, but not everything we can. Then, while PC8 is enabled, if we stay idle for more autosuspend_delay_ms (default: 10s) we'll enter runtime PM and put the graphics device in D3 state, saving even more power. The two features are separate things with increasing levels of power savings, but if we disable PC8 we'll never get into D3. While from the modularity point of view it would be nice to keep these features as separate, we have reasons to merge them: - We are not aware of anybody wanting a "PC8 without D3" environment. - If we keep both features as separate, we'll have to to test both PC8 and PC8+D3 code paths. We're already having a major pain to make QA do automated testing of just one thing, testing both paths will cost even more. - Only Haswell+ supports PC8, so if we want to add runtime PM support to, for example, IVB, we'll have to copy some code from the PC8 feature to runtime PM, so merging both features as a single thing will make it easier for enabling runtime PM on other platforms. This patch only does the very basic steps required to have PC8 and runtime PM merged on a single feature: the next patches will take care of cleaning up everything. v2: - Rebase. v3: - Rebase. - Fully remove the deprecated i915 params since Daniel doesn't consider them as part of the ABI. v4: - Rebase. - Fix typo in the commit message. v5: - Rebase, again. - Add a huge comment explaining the different forcewake usage (Chris, Daniel). - Use open-coded forcewake functions (Daniel). Signed-off-by: Paulo Zanoni <paulo.r.zanoni@intel.com> Reviewed-by: Imre Deak <imre.deak@intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2014-03-08 03:08:05 +04:00
intel_uncore_forcewake_reset(dev, false);
drm/i915: add support for checking if we hold an RPM reference Atm, we assert that the device is not suspended until the point when the device is truly put to a suspended state. This is fine, but we can catch more problems if we check that RPM refcount is non-zero. After that one drops to zero we shouldn't access the device any more, even if the actual device suspend may be delayed. Change assert_rpm_wakelock_held() accordingly to check for a non-zero RPM refcount in addition to the current device-not-suspended check. For the new asserts to work we need to annotate every place explicitly in the code where we expect that the device is powered. The places where we only assume this, but may not hold an RPM reference: - driver load We assume the device to be powered until we enable RPM. Make this explicit by taking an RPM reference around the load function. - system and runtime sudpend/resume handlers These handlers are called when the RPM reference becomes 0 and know the exact point after which the device can get powered off. Disable the RPM-reference-held check for their duration. - the IRQ, hangcheck and RPS work handlers These handlers are flushed in the system/runtime suspend handler before the device is powered off, so it's guaranteed that they won't run while the device is powered off even though they don't hold any RPM reference. Disable the RPM-reference-held check for their duration. In all these cases we still check that the device is not suspended. These explicit annotations also have the positive side effect of documenting our assumptions better. This caught additional WARNs from the atomic modeset path, those should be fixed separately. v2: - remove the redundant HAS_RUNTIME_PM check (moved to patch 1) (Ville) v3: - use a new dedicated RPM wakelock refcount to also catch cases where our own RPM get/put functions were not called (Chris) - assert also that the new RPM wakelock refcount is 0 in the RPM suspend handler (Chris) - change the assert error message to be more meaningful (Chris) - prevent false assert errors and check that the RPM wakelock is 0 in the RPM resume handler too - prevent false assert errors in the hangcheck work too - add a device not suspended assert check to the hangcheck work v4: - rename disable/enable_rpm_asserts to disable/enable_rpm_wakeref_asserts and wakelock_count to wakeref_count - disable the wakeref asserts in the IRQ handlers and RPS work too - update/clarify commit message v5: - mark places we plan to change to use proper RPM refcounting with separate DISABLE/ENABLE_RPM_WAKEREF_ASSERTS aliases (Chris) Signed-off-by: Imre Deak <imre.deak@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Link: http://patchwork.freedesktop.org/patch/msgid/1450227139-13471-1-git-send-email-imre.deak@intel.com
2015-12-16 03:52:19 +03:00
enable_rpm_wakeref_asserts(dev_priv);
WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
if (intel_uncore_arm_unclaimed_mmio_detection(dev_priv))
DRM_ERROR("Unclaimed access detected prior to suspending\n");
dev_priv->pm.suspended = true;
/*
* FIXME: We really should find a document that references the arguments
* used below!
*/
if (IS_BROADWELL(dev)) {
/*
* On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
* being detected, and the call we do at intel_runtime_resume()
* won't be able to restore them. Since PCI_D3hot matches the
* actual specification and appears to be working, use it.
*/
intel_opregion_notify_adapter(dev, PCI_D3hot);
} else {
/*
* current versions of firmware which depend on this opregion
* notification have repurposed the D1 definition to mean
* "runtime suspended" vs. what you would normally expect (D3)
* to distinguish it from notifications that might be sent via
* the suspend path.
*/
intel_opregion_notify_adapter(dev, PCI_D1);
}
assert_forcewakes_inactive(dev_priv);
drm/i915: make PC8 be part of runtime PM suspend/resume Currently, when our driver becomes idle for i915.pc8_timeout (default: 5s) we enable PC8, so we save some power, but not everything we can. Then, while PC8 is enabled, if we stay idle for more autosuspend_delay_ms (default: 10s) we'll enter runtime PM and put the graphics device in D3 state, saving even more power. The two features are separate things with increasing levels of power savings, but if we disable PC8 we'll never get into D3. While from the modularity point of view it would be nice to keep these features as separate, we have reasons to merge them: - We are not aware of anybody wanting a "PC8 without D3" environment. - If we keep both features as separate, we'll have to to test both PC8 and PC8+D3 code paths. We're already having a major pain to make QA do automated testing of just one thing, testing both paths will cost even more. - Only Haswell+ supports PC8, so if we want to add runtime PM support to, for example, IVB, we'll have to copy some code from the PC8 feature to runtime PM, so merging both features as a single thing will make it easier for enabling runtime PM on other platforms. This patch only does the very basic steps required to have PC8 and runtime PM merged on a single feature: the next patches will take care of cleaning up everything. v2: - Rebase. v3: - Rebase. - Fully remove the deprecated i915 params since Daniel doesn't consider them as part of the ABI. v4: - Rebase. - Fix typo in the commit message. v5: - Rebase, again. - Add a huge comment explaining the different forcewake usage (Chris, Daniel). - Use open-coded forcewake functions (Daniel). Signed-off-by: Paulo Zanoni <paulo.r.zanoni@intel.com> Reviewed-by: Imre Deak <imre.deak@intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2014-03-08 03:08:05 +04:00
DRM_DEBUG_KMS("Device suspended\n");
return 0;
}
static int intel_runtime_resume(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
struct drm_device *dev = pci_get_drvdata(pdev);
struct drm_i915_private *dev_priv = dev->dev_private;
int ret = 0;
if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
return -ENODEV;
DRM_DEBUG_KMS("Resuming device\n");
drm/i915: add support for checking if we hold an RPM reference Atm, we assert that the device is not suspended until the point when the device is truly put to a suspended state. This is fine, but we can catch more problems if we check that RPM refcount is non-zero. After that one drops to zero we shouldn't access the device any more, even if the actual device suspend may be delayed. Change assert_rpm_wakelock_held() accordingly to check for a non-zero RPM refcount in addition to the current device-not-suspended check. For the new asserts to work we need to annotate every place explicitly in the code where we expect that the device is powered. The places where we only assume this, but may not hold an RPM reference: - driver load We assume the device to be powered until we enable RPM. Make this explicit by taking an RPM reference around the load function. - system and runtime sudpend/resume handlers These handlers are called when the RPM reference becomes 0 and know the exact point after which the device can get powered off. Disable the RPM-reference-held check for their duration. - the IRQ, hangcheck and RPS work handlers These handlers are flushed in the system/runtime suspend handler before the device is powered off, so it's guaranteed that they won't run while the device is powered off even though they don't hold any RPM reference. Disable the RPM-reference-held check for their duration. In all these cases we still check that the device is not suspended. These explicit annotations also have the positive side effect of documenting our assumptions better. This caught additional WARNs from the atomic modeset path, those should be fixed separately. v2: - remove the redundant HAS_RUNTIME_PM check (moved to patch 1) (Ville) v3: - use a new dedicated RPM wakelock refcount to also catch cases where our own RPM get/put functions were not called (Chris) - assert also that the new RPM wakelock refcount is 0 in the RPM suspend handler (Chris) - change the assert error message to be more meaningful (Chris) - prevent false assert errors and check that the RPM wakelock is 0 in the RPM resume handler too - prevent false assert errors in the hangcheck work too - add a device not suspended assert check to the hangcheck work v4: - rename disable/enable_rpm_asserts to disable/enable_rpm_wakeref_asserts and wakelock_count to wakeref_count - disable the wakeref asserts in the IRQ handlers and RPS work too - update/clarify commit message v5: - mark places we plan to change to use proper RPM refcounting with separate DISABLE/ENABLE_RPM_WAKEREF_ASSERTS aliases (Chris) Signed-off-by: Imre Deak <imre.deak@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Link: http://patchwork.freedesktop.org/patch/msgid/1450227139-13471-1-git-send-email-imre.deak@intel.com
2015-12-16 03:52:19 +03:00
WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
disable_rpm_wakeref_asserts(dev_priv);
intel_opregion_notify_adapter(dev, PCI_D0);
dev_priv->pm.suspended = false;
if (intel_uncore_unclaimed_mmio(dev_priv))
DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
intel_guc_resume(dev);
if (IS_GEN6(dev_priv))
intel_init_pch_refclk(dev);
if (IS_BROXTON(dev))
ret = bxt_resume_prepare(dev_priv);
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
hsw_disable_pc8(dev_priv);
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
ret = vlv_resume_prepare(dev_priv, true);
/*
* No point of rolling back things in case of an error, as the best
* we can do is to hope that things will still work (and disable RPM).
*/
i915_gem_init_swizzling(dev);
gen6_update_ring_freq(dev);
intel_runtime_pm_enable_interrupts(dev_priv);
/*
* On VLV/CHV display interrupts are part of the display
* power well, so hpd is reinitialized from there. For
* everyone else do it here.
*/
if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
intel_hpd_init(dev_priv);
intel_enable_gt_powersave(dev);
drm/i915: add support for checking if we hold an RPM reference Atm, we assert that the device is not suspended until the point when the device is truly put to a suspended state. This is fine, but we can catch more problems if we check that RPM refcount is non-zero. After that one drops to zero we shouldn't access the device any more, even if the actual device suspend may be delayed. Change assert_rpm_wakelock_held() accordingly to check for a non-zero RPM refcount in addition to the current device-not-suspended check. For the new asserts to work we need to annotate every place explicitly in the code where we expect that the device is powered. The places where we only assume this, but may not hold an RPM reference: - driver load We assume the device to be powered until we enable RPM. Make this explicit by taking an RPM reference around the load function. - system and runtime sudpend/resume handlers These handlers are called when the RPM reference becomes 0 and know the exact point after which the device can get powered off. Disable the RPM-reference-held check for their duration. - the IRQ, hangcheck and RPS work handlers These handlers are flushed in the system/runtime suspend handler before the device is powered off, so it's guaranteed that they won't run while the device is powered off even though they don't hold any RPM reference. Disable the RPM-reference-held check for their duration. In all these cases we still check that the device is not suspended. These explicit annotations also have the positive side effect of documenting our assumptions better. This caught additional WARNs from the atomic modeset path, those should be fixed separately. v2: - remove the redundant HAS_RUNTIME_PM check (moved to patch 1) (Ville) v3: - use a new dedicated RPM wakelock refcount to also catch cases where our own RPM get/put functions were not called (Chris) - assert also that the new RPM wakelock refcount is 0 in the RPM suspend handler (Chris) - change the assert error message to be more meaningful (Chris) - prevent false assert errors and check that the RPM wakelock is 0 in the RPM resume handler too - prevent false assert errors in the hangcheck work too - add a device not suspended assert check to the hangcheck work v4: - rename disable/enable_rpm_asserts to disable/enable_rpm_wakeref_asserts and wakelock_count to wakeref_count - disable the wakeref asserts in the IRQ handlers and RPS work too - update/clarify commit message v5: - mark places we plan to change to use proper RPM refcounting with separate DISABLE/ENABLE_RPM_WAKEREF_ASSERTS aliases (Chris) Signed-off-by: Imre Deak <imre.deak@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Link: http://patchwork.freedesktop.org/patch/msgid/1450227139-13471-1-git-send-email-imre.deak@intel.com
2015-12-16 03:52:19 +03:00
enable_rpm_wakeref_asserts(dev_priv);
if (ret)
DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
else
DRM_DEBUG_KMS("Device resumed\n");
return ret;
}
drm/i915: Sharing platform specific sequence between runtime and system suspend/ resume paths On VLV, post S0i3 during i915_drm_thaw following issue is observed during ring initialization. [ 335.604039] [drm:stop_ring] ERROR render ring :timed out trying to stop ring [ 336.607340] [drm:stop_ring] ERROR render ring :timed out trying to stop ring [ 336.607345] [drm:init_ring_common] ERROR failed to set render ring head to zero ctl 00000000 head 00000000 tail 00000000 start 00000000 [ 337.610645] [drm:stop_ring] ERROR bsd ring :timed out trying to stop ring [ 338.613952] [drm:stop_ring] ERROR bsd ring :timed out trying to stop ring [ 338.613956] [drm:init_ring_common] ERROR failed to set bsd ring head to zero ctl 00000000 head 00000000 tail 00000000 start 00000000 [ 339.617256] [drm:stop_ring] ERROR render ring :timed out trying to stop ring [ 339.617258] -----------[ cut here ]----------- [ 339.617267] WARNING: CPU: 0 PID: 6 at drivers/gpu/drm/i915/intel_ringbuffer.c:1666 intel_cleanup_ring+0xe6/0xf0() [ 339.617396] --[ end trace 5ef5ed1a3c92e2a6 ]-- [ 339.617428] [drm:__i915_drm_thaw] ERROR failed to re-initialize GPU, declaring wedged! This is happening since wake is not enabled and Gunit registers are not restored. For this system suspend/resume paths need to follow save/restore and additional platform specific setup in suspend_complete and resume_prepare. suspend_complete is shared unconditionaly for VLV, HSW, BDW. resume_prepare for HSW and BDW has pc8 disabling which is needed during thaw_early so sharing uncondtionally. For VLV and SNB runtime resume specific sequence exists. Cc: Imre Deak <imre.deak@intel.com> Cc: Paulo Zanoni <paulo.r.zanoni@intel.com> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Cc: Jani Nikula <jani.nikula@linux.intel.com> Cc: Goel, Akash <akash.goel@intel.com> Signed-off-by: Sagar Kamble <sagar.a.kamble@intel.com> Reviewed-by: Imre Deak <imre.deak@intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2014-08-13 21:37:06 +04:00
/*
* This function implements common functionality of runtime and system
* suspend sequence.
*/
static int intel_suspend_complete(struct drm_i915_private *dev_priv)
{
int ret;
if (IS_BROXTON(dev_priv))
ret = bxt_suspend_complete(dev_priv);
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
ret = hsw_suspend_complete(dev_priv);
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
ret = vlv_suspend_complete(dev_priv);
else
ret = 0;
return ret;
}
static const struct dev_pm_ops i915_pm_ops = {
/*
* S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
* PMSG_RESUME]
*/
.suspend = i915_pm_suspend,
.suspend_late = i915_pm_suspend_late,
.resume_early = i915_pm_resume_early,
.resume = i915_pm_resume,
/*
* S4 event handlers
* @freeze, @freeze_late : called (1) before creating the
* hibernation image [PMSG_FREEZE] and
* (2) after rebooting, before restoring
* the image [PMSG_QUIESCE]
* @thaw, @thaw_early : called (1) after creating the hibernation
* image, before writing it [PMSG_THAW]
* and (2) after failing to create or
* restore the image [PMSG_RECOVER]
* @poweroff, @poweroff_late: called after writing the hibernation
* image, before rebooting [PMSG_HIBERNATE]
* @restore, @restore_early : called after rebooting and restoring the
* hibernation image [PMSG_RESTORE]
*/
.freeze = i915_pm_suspend,
.freeze_late = i915_pm_suspend_late,
.thaw_early = i915_pm_resume_early,
.thaw = i915_pm_resume,
.poweroff = i915_pm_suspend,
2015-03-02 14:04:41 +03:00
.poweroff_late = i915_pm_poweroff_late,
.restore_early = i915_pm_resume_early,
.restore = i915_pm_resume,
/* S0ix (via runtime suspend) event handlers */
.runtime_suspend = intel_runtime_suspend,
.runtime_resume = intel_runtime_resume,
};
static const struct vm_operations_struct i915_gem_vm_ops = {
.fault = i915_gem_fault,
.open = drm_gem_vm_open,
.close = drm_gem_vm_close,
};
static const struct file_operations i915_driver_fops = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
.mmap = drm_gem_mmap,
.poll = drm_poll,
.read = drm_read,
#ifdef CONFIG_COMPAT
.compat_ioctl = i915_compat_ioctl,
#endif
.llseek = noop_llseek,
};
static struct drm_driver driver = {
/* Don't use MTRRs here; the Xserver or userspace app should
* deal with them for Intel hardware.
*/
.driver_features =
DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
DRIVER_RENDER | DRIVER_MODESET,
.load = i915_driver_load,
.unload = i915_driver_unload,
.open = i915_driver_open,
.lastclose = i915_driver_lastclose,
.preclose = i915_driver_preclose,
.postclose = i915_driver_postclose,
.set_busid = drm_pci_set_busid,
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = i915_debugfs_init,
.debugfs_cleanup = i915_debugfs_cleanup,
#endif
.gem_free_object = i915_gem_free_object,
.gem_vm_ops = &i915_gem_vm_ops,
i915: add dmabuf/prime buffer sharing support. This adds handle->fd and fd->handle support to i915, this is to allow for offloading of rendering in one direction and outputs in the other. v2 from Daniel Vetter: - fixup conflicts with the prepare/finish gtt prep work. - implement ppgtt binding support. Note that we have squat i-g-t testcoverage for any of the lifetime and access rules dma_buf/prime support brings along. And there are quite a few intricate situations here. Also note that the integration with the existing code is a bit hackish, especially around get_gtt_pages and put_gtt_pages. It imo would be easier with the prep code from Chris Wilson's unbound series, but that is for 3.6. Also note that I didn't bother to put the new prepare/finish gtt hooks to good use by moving the dma_buf_map/unmap_attachment calls in there (like we've originally planned for). Last but not least this patch is only compile-tested, but I've changed very little compared to Dave Airlie's version. So there's a decent chance v2 on drm-next works as well as v1 on 3.4-rc. v3: Right when I've hit sent I've noticed that I've screwed up one obj->sg_list (for dmar support) and obj->sg_table (for prime support) disdinction. We should be able to merge these 2 paths, but that's material for another patch. v4: fix the error reporting bugs pointed out by ickle. v5: fix another error, and stop non-gtt mmaps on shared objects stop pread/pwrite on imported objects, add fake kmap Signed-off-by: Dave Airlie <airlied@redhat.com> Signed-Off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2012-05-10 17:25:09 +04:00
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = i915_gem_prime_export,
.gem_prime_import = i915_gem_prime_import,
.dumb_create = i915_gem_dumb_create,
.dumb_map_offset = i915_gem_mmap_gtt,
.dumb_destroy = drm_gem_dumb_destroy,
.ioctls = i915_ioctls,
.fops = &i915_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
};
static struct pci_driver i915_pci_driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
.probe = i915_pci_probe,
.remove = i915_pci_remove,
.driver.pm = &i915_pm_ops,
};
static int __init i915_init(void)
{
driver.num_ioctls = i915_max_ioctl;
/*
* Enable KMS by default, unless explicitly overriden by
* either the i915.modeset prarameter or by the
* vga_text_mode_force boot option.
*/
if (i915.modeset == 0)
driver.driver_features &= ~DRIVER_MODESET;
#ifdef CONFIG_VGA_CONSOLE
if (vgacon_text_force() && i915.modeset == -1)
driver.driver_features &= ~DRIVER_MODESET;
#endif
drm/i915: Deprecated UMS support It's been 5 years since kms support was merged and roughly 4 years since UMS support was ripped out from userspace drivers. Thus far it's not been a big burden to keep the ums paths alive, and we've made some good progress in better separating it from the kms code by sprinkling DRIVER_MODESET checks all over the place. But now that the drm demidlayering is within reach this changes. I want to make the driver loading code more robust using devres.c and other cool tricks. But that doesn't work with ums due to the shadow-attach trick. Which means we either a) need to split out a complete ums codebase like radeon has b) kill it for good. The 2nd option is obviously much less work than the first, so I think it's time to test the waters and see how many people out there still use ums. I've decided that silently failing to initialize the driver (and not e.g. failing to load the module) is the right thing. That way we should only get reports from users that actually care about some ums features (like accelerated gl or support for secondary outputs). Everyone else will just fall back to the vesa X driver. For developers there's a small info level dmesg output. The plan is to drop this Kconfig option after 3.16 (so gives us 2 full releases) and then start killing code for real 2-3 releases afterwards. That should be more than enough time for users to pipe up. Of course if anyone does we need to revisit this plan and maybe go with option a) above. Also enable the KMS support by default in Kconfig and polish the help texts a bit. v2: Add the missing hunk of actual code changes. Oops. (Ville) Cc: Ville Syrjälä <ville.syrjala@linux.intel.com> Cc: Dave Airlie <airlied@gmail.com> Acked-by: Dave Airlie <airlied@gmail.com> Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-11-14 01:11:25 +04:00
if (!(driver.driver_features & DRIVER_MODESET)) {
/* Silently fail loading to not upset userspace. */
DRM_DEBUG_DRIVER("KMS and UMS disabled.\n");
drm/i915: Deprecated UMS support It's been 5 years since kms support was merged and roughly 4 years since UMS support was ripped out from userspace drivers. Thus far it's not been a big burden to keep the ums paths alive, and we've made some good progress in better separating it from the kms code by sprinkling DRIVER_MODESET checks all over the place. But now that the drm demidlayering is within reach this changes. I want to make the driver loading code more robust using devres.c and other cool tricks. But that doesn't work with ums due to the shadow-attach trick. Which means we either a) need to split out a complete ums codebase like radeon has b) kill it for good. The 2nd option is obviously much less work than the first, so I think it's time to test the waters and see how many people out there still use ums. I've decided that silently failing to initialize the driver (and not e.g. failing to load the module) is the right thing. That way we should only get reports from users that actually care about some ums features (like accelerated gl or support for secondary outputs). Everyone else will just fall back to the vesa X driver. For developers there's a small info level dmesg output. The plan is to drop this Kconfig option after 3.16 (so gives us 2 full releases) and then start killing code for real 2-3 releases afterwards. That should be more than enough time for users to pipe up. Of course if anyone does we need to revisit this plan and maybe go with option a) above. Also enable the KMS support by default in Kconfig and polish the help texts a bit. v2: Add the missing hunk of actual code changes. Oops. (Ville) Cc: Ville Syrjälä <ville.syrjala@linux.intel.com> Cc: Dave Airlie <airlied@gmail.com> Acked-by: Dave Airlie <airlied@gmail.com> Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-11-14 01:11:25 +04:00
return 0;
}
if (i915.nuclear_pageflip)
driver.driver_features |= DRIVER_ATOMIC;
return drm_pci_init(&driver, &i915_pci_driver);
}
static void __exit i915_exit(void)
{
if (!(driver.driver_features & DRIVER_MODESET))
return; /* Never loaded a driver. */
drm_pci_exit(&driver, &i915_pci_driver);
}
module_init(i915_init);
module_exit(i915_exit);
MODULE_AUTHOR("Tungsten Graphics, Inc.");
MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL and additional rights");