Merge tag 'drm-intel-next-2015-12-04-1' of git://anongit.freedesktop.org/drm-intel into drm-next
This is the "fix igt basic test set issues" edition. - more PSR fixes from Rodrigo, getting closer - tons of fifo underrun fixes from Ville - runtime pm fixes from Imre, Daniel Stone - fix SDE interrupt handling properly (Jani Nikula) - hsw/bdw fdi modeset sequence fixes (Ville) - "don't register bad VGA connectors and fall over" fixes (Ville) - more fbc fixes from Paulo - and a grand total of exactly one feature item: Implement dma-buf/fence based cross-driver sync in the i915 pageflip path (Alex Goins) * tag 'drm-intel-next-2015-12-04-1' of git://anongit.freedesktop.org/drm-intel: (70 commits) drm/i915: Update DRIVER_DATE to 20151204 drm/i915/skl: Add SKL GT4 PCI IDs Revert "drm/i915: Extend LRC pinning to cover GPU context writeback" drm/i915: Correct the Ref clock value for BXT drm/i915: Restore skl_gt3 device info drm/i915: Fix RPS pointer passed from wait_ioctl to i915_wait_request Revert "drm/i915: Remove superfluous NULL check" drm/i915: Clean up device info structure definitions drm/i915: Remove superfluous NULL check drm/i915: Handle cdclk limits on broadwell. i915: wait for fence in prepare_plane_fb i915: wait for fence in mmio_flip_work_func drm/i915: Extend LRC pinning to cover GPU context writeback drm/i915/guc: Clean up locks in GuC drm/i915: only recompress FBC after flushing a drawing operation drm/i915: get rid of FBC {,de}activation messages drm/i915: kill fbc.uncompressed_size drm/i915: use a single intel_fbc_work struct drm/i915: check for FBC planes in the same place as the pipes drm/i915: alloc/free the FBC CFB during enable/disable ...
This commit is contained in:
Коммит
51bce5bc38
|
@ -1639,7 +1639,7 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
|
||||||
intel_runtime_pm_get(dev_priv);
|
intel_runtime_pm_get(dev_priv);
|
||||||
mutex_lock(&dev_priv->fbc.lock);
|
mutex_lock(&dev_priv->fbc.lock);
|
||||||
|
|
||||||
if (intel_fbc_enabled(dev_priv))
|
if (intel_fbc_is_active(dev_priv))
|
||||||
seq_puts(m, "FBC enabled\n");
|
seq_puts(m, "FBC enabled\n");
|
||||||
else
|
else
|
||||||
seq_printf(m, "FBC disabled: %s\n",
|
seq_printf(m, "FBC disabled: %s\n",
|
||||||
|
@ -1869,33 +1869,29 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
|
||||||
{
|
{
|
||||||
struct drm_info_node *node = m->private;
|
struct drm_info_node *node = m->private;
|
||||||
struct drm_device *dev = node->minor->dev;
|
struct drm_device *dev = node->minor->dev;
|
||||||
struct intel_fbdev *ifbdev = NULL;
|
struct intel_framebuffer *fbdev_fb = NULL;
|
||||||
struct intel_framebuffer *fb;
|
|
||||||
struct drm_framebuffer *drm_fb;
|
struct drm_framebuffer *drm_fb;
|
||||||
|
|
||||||
#ifdef CONFIG_DRM_FBDEV_EMULATION
|
#ifdef CONFIG_DRM_FBDEV_EMULATION
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
if (to_i915(dev)->fbdev) {
|
||||||
|
fbdev_fb = to_intel_framebuffer(to_i915(dev)->fbdev->helper.fb);
|
||||||
|
|
||||||
ifbdev = dev_priv->fbdev;
|
seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
|
||||||
if (ifbdev) {
|
fbdev_fb->base.width,
|
||||||
fb = to_intel_framebuffer(ifbdev->helper.fb);
|
fbdev_fb->base.height,
|
||||||
|
fbdev_fb->base.depth,
|
||||||
seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
|
fbdev_fb->base.bits_per_pixel,
|
||||||
fb->base.width,
|
fbdev_fb->base.modifier[0],
|
||||||
fb->base.height,
|
atomic_read(&fbdev_fb->base.refcount.refcount));
|
||||||
fb->base.depth,
|
describe_obj(m, fbdev_fb->obj);
|
||||||
fb->base.bits_per_pixel,
|
seq_putc(m, '\n');
|
||||||
fb->base.modifier[0],
|
}
|
||||||
atomic_read(&fb->base.refcount.refcount));
|
|
||||||
describe_obj(m, fb->obj);
|
|
||||||
seq_putc(m, '\n');
|
|
||||||
}
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
mutex_lock(&dev->mode_config.fb_lock);
|
mutex_lock(&dev->mode_config.fb_lock);
|
||||||
drm_for_each_fb(drm_fb, dev) {
|
drm_for_each_fb(drm_fb, dev) {
|
||||||
fb = to_intel_framebuffer(drm_fb);
|
struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
|
||||||
if (ifbdev && &fb->base == ifbdev->helper.fb)
|
if (fb == fbdev_fb)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
|
seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
|
||||||
|
@ -2473,15 +2469,15 @@ static int i915_guc_info(struct seq_file *m, void *data)
|
||||||
if (!HAS_GUC_SCHED(dev_priv->dev))
|
if (!HAS_GUC_SCHED(dev_priv->dev))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
if (mutex_lock_interruptible(&dev->struct_mutex))
|
||||||
|
return 0;
|
||||||
|
|
||||||
/* Take a local copy of the GuC data, so we can dump it at leisure */
|
/* Take a local copy of the GuC data, so we can dump it at leisure */
|
||||||
spin_lock(&dev_priv->guc.host2guc_lock);
|
|
||||||
guc = dev_priv->guc;
|
guc = dev_priv->guc;
|
||||||
if (guc.execbuf_client) {
|
if (guc.execbuf_client)
|
||||||
spin_lock(&guc.execbuf_client->wq_lock);
|
|
||||||
client = *guc.execbuf_client;
|
client = *guc.execbuf_client;
|
||||||
spin_unlock(&guc.execbuf_client->wq_lock);
|
|
||||||
}
|
mutex_unlock(&dev->struct_mutex);
|
||||||
spin_unlock(&dev_priv->guc.host2guc_lock);
|
|
||||||
|
|
||||||
seq_printf(m, "GuC total action count: %llu\n", guc.action_count);
|
seq_printf(m, "GuC total action count: %llu\n", guc.action_count);
|
||||||
seq_printf(m, "GuC action failure count: %u\n", guc.action_fail);
|
seq_printf(m, "GuC action failure count: %u\n", guc.action_fail);
|
||||||
|
@ -2582,8 +2578,11 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
|
||||||
}
|
}
|
||||||
seq_puts(m, "\n");
|
seq_puts(m, "\n");
|
||||||
|
|
||||||
/* CHV PSR has no kind of performance counter */
|
/*
|
||||||
if (HAS_DDI(dev)) {
|
* VLV/CHV PSR has no kind of performance counter
|
||||||
|
* SKL+ Perf counter is reset to 0 everytime DC state is entered
|
||||||
|
*/
|
||||||
|
if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
|
||||||
psrperf = I915_READ(EDP_PSR_PERF_CNT) &
|
psrperf = I915_READ(EDP_PSR_PERF_CNT) &
|
||||||
EDP_PSR_PERF_CNT_MASK;
|
EDP_PSR_PERF_CNT_MASK;
|
||||||
|
|
||||||
|
@ -2685,71 +2684,6 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const char *power_domain_str(enum intel_display_power_domain domain)
|
|
||||||
{
|
|
||||||
switch (domain) {
|
|
||||||
case POWER_DOMAIN_PIPE_A:
|
|
||||||
return "PIPE_A";
|
|
||||||
case POWER_DOMAIN_PIPE_B:
|
|
||||||
return "PIPE_B";
|
|
||||||
case POWER_DOMAIN_PIPE_C:
|
|
||||||
return "PIPE_C";
|
|
||||||
case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
|
|
||||||
return "PIPE_A_PANEL_FITTER";
|
|
||||||
case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
|
|
||||||
return "PIPE_B_PANEL_FITTER";
|
|
||||||
case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
|
|
||||||
return "PIPE_C_PANEL_FITTER";
|
|
||||||
case POWER_DOMAIN_TRANSCODER_A:
|
|
||||||
return "TRANSCODER_A";
|
|
||||||
case POWER_DOMAIN_TRANSCODER_B:
|
|
||||||
return "TRANSCODER_B";
|
|
||||||
case POWER_DOMAIN_TRANSCODER_C:
|
|
||||||
return "TRANSCODER_C";
|
|
||||||
case POWER_DOMAIN_TRANSCODER_EDP:
|
|
||||||
return "TRANSCODER_EDP";
|
|
||||||
case POWER_DOMAIN_PORT_DDI_A_LANES:
|
|
||||||
return "PORT_DDI_A_LANES";
|
|
||||||
case POWER_DOMAIN_PORT_DDI_B_LANES:
|
|
||||||
return "PORT_DDI_B_LANES";
|
|
||||||
case POWER_DOMAIN_PORT_DDI_C_LANES:
|
|
||||||
return "PORT_DDI_C_LANES";
|
|
||||||
case POWER_DOMAIN_PORT_DDI_D_LANES:
|
|
||||||
return "PORT_DDI_D_LANES";
|
|
||||||
case POWER_DOMAIN_PORT_DDI_E_LANES:
|
|
||||||
return "PORT_DDI_E_LANES";
|
|
||||||
case POWER_DOMAIN_PORT_DSI:
|
|
||||||
return "PORT_DSI";
|
|
||||||
case POWER_DOMAIN_PORT_CRT:
|
|
||||||
return "PORT_CRT";
|
|
||||||
case POWER_DOMAIN_PORT_OTHER:
|
|
||||||
return "PORT_OTHER";
|
|
||||||
case POWER_DOMAIN_VGA:
|
|
||||||
return "VGA";
|
|
||||||
case POWER_DOMAIN_AUDIO:
|
|
||||||
return "AUDIO";
|
|
||||||
case POWER_DOMAIN_PLLS:
|
|
||||||
return "PLLS";
|
|
||||||
case POWER_DOMAIN_AUX_A:
|
|
||||||
return "AUX_A";
|
|
||||||
case POWER_DOMAIN_AUX_B:
|
|
||||||
return "AUX_B";
|
|
||||||
case POWER_DOMAIN_AUX_C:
|
|
||||||
return "AUX_C";
|
|
||||||
case POWER_DOMAIN_AUX_D:
|
|
||||||
return "AUX_D";
|
|
||||||
case POWER_DOMAIN_GMBUS:
|
|
||||||
return "GMBUS";
|
|
||||||
case POWER_DOMAIN_MODESET:
|
|
||||||
return "MODESET";
|
|
||||||
case POWER_DOMAIN_INIT:
|
|
||||||
return "INIT";
|
|
||||||
default:
|
|
||||||
MISSING_CASE(domain);
|
|
||||||
return "?";
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static int i915_power_domain_info(struct seq_file *m, void *unused)
|
static int i915_power_domain_info(struct seq_file *m, void *unused)
|
||||||
{
|
{
|
||||||
struct drm_info_node *node = m->private;
|
struct drm_info_node *node = m->private;
|
||||||
|
@ -2775,7 +2709,7 @@ static int i915_power_domain_info(struct seq_file *m, void *unused)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
seq_printf(m, " %-23s %d\n",
|
seq_printf(m, " %-23s %d\n",
|
||||||
power_domain_str(power_domain),
|
intel_display_power_domain_str(power_domain),
|
||||||
power_domains->domain_use_count[power_domain]);
|
power_domains->domain_use_count[power_domain]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -228,121 +228,83 @@ static const struct intel_device_info intel_sandybridge_m_info = {
|
||||||
.need_gfx_hws = 1, .has_hotplug = 1, \
|
.need_gfx_hws = 1, .has_hotplug = 1, \
|
||||||
.has_fbc = 1, \
|
.has_fbc = 1, \
|
||||||
.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
|
.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
|
||||||
.has_llc = 1
|
.has_llc = 1, \
|
||||||
|
GEN_DEFAULT_PIPEOFFSETS, \
|
||||||
|
IVB_CURSOR_OFFSETS
|
||||||
|
|
||||||
static const struct intel_device_info intel_ivybridge_d_info = {
|
static const struct intel_device_info intel_ivybridge_d_info = {
|
||||||
GEN7_FEATURES,
|
GEN7_FEATURES,
|
||||||
.is_ivybridge = 1,
|
.is_ivybridge = 1,
|
||||||
GEN_DEFAULT_PIPEOFFSETS,
|
|
||||||
IVB_CURSOR_OFFSETS,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct intel_device_info intel_ivybridge_m_info = {
|
static const struct intel_device_info intel_ivybridge_m_info = {
|
||||||
GEN7_FEATURES,
|
GEN7_FEATURES,
|
||||||
.is_ivybridge = 1,
|
.is_ivybridge = 1,
|
||||||
.is_mobile = 1,
|
.is_mobile = 1,
|
||||||
GEN_DEFAULT_PIPEOFFSETS,
|
|
||||||
IVB_CURSOR_OFFSETS,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct intel_device_info intel_ivybridge_q_info = {
|
static const struct intel_device_info intel_ivybridge_q_info = {
|
||||||
GEN7_FEATURES,
|
GEN7_FEATURES,
|
||||||
.is_ivybridge = 1,
|
.is_ivybridge = 1,
|
||||||
.num_pipes = 0, /* legal, last one wins */
|
.num_pipes = 0, /* legal, last one wins */
|
||||||
GEN_DEFAULT_PIPEOFFSETS,
|
|
||||||
IVB_CURSOR_OFFSETS,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#define VLV_FEATURES \
|
||||||
|
.gen = 7, .num_pipes = 2, \
|
||||||
|
.need_gfx_hws = 1, .has_hotplug = 1, \
|
||||||
|
.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
|
||||||
|
.display_mmio_offset = VLV_DISPLAY_BASE, \
|
||||||
|
GEN_DEFAULT_PIPEOFFSETS, \
|
||||||
|
CURSOR_OFFSETS
|
||||||
|
|
||||||
static const struct intel_device_info intel_valleyview_m_info = {
|
static const struct intel_device_info intel_valleyview_m_info = {
|
||||||
GEN7_FEATURES,
|
VLV_FEATURES,
|
||||||
.is_mobile = 1,
|
|
||||||
.num_pipes = 2,
|
|
||||||
.is_valleyview = 1,
|
.is_valleyview = 1,
|
||||||
.display_mmio_offset = VLV_DISPLAY_BASE,
|
.is_mobile = 1,
|
||||||
.has_fbc = 0, /* legal, last one wins */
|
|
||||||
.has_llc = 0, /* legal, last one wins */
|
|
||||||
GEN_DEFAULT_PIPEOFFSETS,
|
|
||||||
CURSOR_OFFSETS,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct intel_device_info intel_valleyview_d_info = {
|
static const struct intel_device_info intel_valleyview_d_info = {
|
||||||
GEN7_FEATURES,
|
VLV_FEATURES,
|
||||||
.num_pipes = 2,
|
|
||||||
.is_valleyview = 1,
|
.is_valleyview = 1,
|
||||||
.display_mmio_offset = VLV_DISPLAY_BASE,
|
|
||||||
.has_fbc = 0, /* legal, last one wins */
|
|
||||||
.has_llc = 0, /* legal, last one wins */
|
|
||||||
GEN_DEFAULT_PIPEOFFSETS,
|
|
||||||
CURSOR_OFFSETS,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#define HSW_FEATURES \
|
||||||
|
GEN7_FEATURES, \
|
||||||
|
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \
|
||||||
|
.has_ddi = 1, \
|
||||||
|
.has_fpga_dbg = 1
|
||||||
|
|
||||||
static const struct intel_device_info intel_haswell_d_info = {
|
static const struct intel_device_info intel_haswell_d_info = {
|
||||||
GEN7_FEATURES,
|
HSW_FEATURES,
|
||||||
.is_haswell = 1,
|
.is_haswell = 1,
|
||||||
.has_ddi = 1,
|
|
||||||
.has_fpga_dbg = 1,
|
|
||||||
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
|
|
||||||
GEN_DEFAULT_PIPEOFFSETS,
|
|
||||||
IVB_CURSOR_OFFSETS,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct intel_device_info intel_haswell_m_info = {
|
static const struct intel_device_info intel_haswell_m_info = {
|
||||||
GEN7_FEATURES,
|
HSW_FEATURES,
|
||||||
.is_haswell = 1,
|
.is_haswell = 1,
|
||||||
.is_mobile = 1,
|
.is_mobile = 1,
|
||||||
.has_ddi = 1,
|
|
||||||
.has_fpga_dbg = 1,
|
|
||||||
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
|
|
||||||
GEN_DEFAULT_PIPEOFFSETS,
|
|
||||||
IVB_CURSOR_OFFSETS,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct intel_device_info intel_broadwell_d_info = {
|
static const struct intel_device_info intel_broadwell_d_info = {
|
||||||
.gen = 8, .num_pipes = 3,
|
HSW_FEATURES,
|
||||||
.need_gfx_hws = 1, .has_hotplug = 1,
|
.gen = 8,
|
||||||
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
|
|
||||||
.has_llc = 1,
|
|
||||||
.has_ddi = 1,
|
|
||||||
.has_fpga_dbg = 1,
|
|
||||||
.has_fbc = 1,
|
|
||||||
GEN_DEFAULT_PIPEOFFSETS,
|
|
||||||
IVB_CURSOR_OFFSETS,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct intel_device_info intel_broadwell_m_info = {
|
static const struct intel_device_info intel_broadwell_m_info = {
|
||||||
.gen = 8, .is_mobile = 1, .num_pipes = 3,
|
HSW_FEATURES,
|
||||||
.need_gfx_hws = 1, .has_hotplug = 1,
|
.gen = 8, .is_mobile = 1,
|
||||||
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
|
|
||||||
.has_llc = 1,
|
|
||||||
.has_ddi = 1,
|
|
||||||
.has_fpga_dbg = 1,
|
|
||||||
.has_fbc = 1,
|
|
||||||
GEN_DEFAULT_PIPEOFFSETS,
|
|
||||||
IVB_CURSOR_OFFSETS,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct intel_device_info intel_broadwell_gt3d_info = {
|
static const struct intel_device_info intel_broadwell_gt3d_info = {
|
||||||
.gen = 8, .num_pipes = 3,
|
HSW_FEATURES,
|
||||||
.need_gfx_hws = 1, .has_hotplug = 1,
|
.gen = 8,
|
||||||
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
|
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
|
||||||
.has_llc = 1,
|
|
||||||
.has_ddi = 1,
|
|
||||||
.has_fpga_dbg = 1,
|
|
||||||
.has_fbc = 1,
|
|
||||||
GEN_DEFAULT_PIPEOFFSETS,
|
|
||||||
IVB_CURSOR_OFFSETS,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct intel_device_info intel_broadwell_gt3m_info = {
|
static const struct intel_device_info intel_broadwell_gt3m_info = {
|
||||||
.gen = 8, .is_mobile = 1, .num_pipes = 3,
|
HSW_FEATURES,
|
||||||
.need_gfx_hws = 1, .has_hotplug = 1,
|
.gen = 8, .is_mobile = 1,
|
||||||
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
|
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
|
||||||
.has_llc = 1,
|
|
||||||
.has_ddi = 1,
|
|
||||||
.has_fpga_dbg = 1,
|
|
||||||
.has_fbc = 1,
|
|
||||||
GEN_DEFAULT_PIPEOFFSETS,
|
|
||||||
IVB_CURSOR_OFFSETS,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct intel_device_info intel_cherryview_info = {
|
static const struct intel_device_info intel_cherryview_info = {
|
||||||
|
@ -356,29 +318,16 @@ static const struct intel_device_info intel_cherryview_info = {
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct intel_device_info intel_skylake_info = {
|
static const struct intel_device_info intel_skylake_info = {
|
||||||
|
HSW_FEATURES,
|
||||||
.is_skylake = 1,
|
.is_skylake = 1,
|
||||||
.gen = 9, .num_pipes = 3,
|
.gen = 9,
|
||||||
.need_gfx_hws = 1, .has_hotplug = 1,
|
|
||||||
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
|
|
||||||
.has_llc = 1,
|
|
||||||
.has_ddi = 1,
|
|
||||||
.has_fpga_dbg = 1,
|
|
||||||
.has_fbc = 1,
|
|
||||||
GEN_DEFAULT_PIPEOFFSETS,
|
|
||||||
IVB_CURSOR_OFFSETS,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct intel_device_info intel_skylake_gt3_info = {
|
static const struct intel_device_info intel_skylake_gt3_info = {
|
||||||
|
HSW_FEATURES,
|
||||||
.is_skylake = 1,
|
.is_skylake = 1,
|
||||||
.gen = 9, .num_pipes = 3,
|
.gen = 9,
|
||||||
.need_gfx_hws = 1, .has_hotplug = 1,
|
|
||||||
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
|
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
|
||||||
.has_llc = 1,
|
|
||||||
.has_ddi = 1,
|
|
||||||
.has_fpga_dbg = 1,
|
|
||||||
.has_fbc = 1,
|
|
||||||
GEN_DEFAULT_PIPEOFFSETS,
|
|
||||||
IVB_CURSOR_OFFSETS,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct intel_device_info intel_broxton_info = {
|
static const struct intel_device_info intel_broxton_info = {
|
||||||
|
@ -396,33 +345,18 @@ static const struct intel_device_info intel_broxton_info = {
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct intel_device_info intel_kabylake_info = {
|
static const struct intel_device_info intel_kabylake_info = {
|
||||||
|
HSW_FEATURES,
|
||||||
.is_preliminary = 1,
|
.is_preliminary = 1,
|
||||||
.is_kabylake = 1,
|
.is_kabylake = 1,
|
||||||
.gen = 9,
|
.gen = 9,
|
||||||
.num_pipes = 3,
|
|
||||||
.need_gfx_hws = 1, .has_hotplug = 1,
|
|
||||||
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
|
|
||||||
.has_llc = 1,
|
|
||||||
.has_ddi = 1,
|
|
||||||
.has_fpga_dbg = 1,
|
|
||||||
.has_fbc = 1,
|
|
||||||
GEN_DEFAULT_PIPEOFFSETS,
|
|
||||||
IVB_CURSOR_OFFSETS,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct intel_device_info intel_kabylake_gt3_info = {
|
static const struct intel_device_info intel_kabylake_gt3_info = {
|
||||||
|
HSW_FEATURES,
|
||||||
.is_preliminary = 1,
|
.is_preliminary = 1,
|
||||||
.is_kabylake = 1,
|
.is_kabylake = 1,
|
||||||
.gen = 9,
|
.gen = 9,
|
||||||
.num_pipes = 3,
|
|
||||||
.need_gfx_hws = 1, .has_hotplug = 1,
|
|
||||||
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
|
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
|
||||||
.has_llc = 1,
|
|
||||||
.has_ddi = 1,
|
|
||||||
.has_fpga_dbg = 1,
|
|
||||||
.has_fbc = 1,
|
|
||||||
GEN_DEFAULT_PIPEOFFSETS,
|
|
||||||
IVB_CURSOR_OFFSETS,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -465,6 +399,7 @@ static const struct pci_device_id pciidlist[] = {
|
||||||
INTEL_SKL_GT1_IDS(&intel_skylake_info),
|
INTEL_SKL_GT1_IDS(&intel_skylake_info),
|
||||||
INTEL_SKL_GT2_IDS(&intel_skylake_info),
|
INTEL_SKL_GT2_IDS(&intel_skylake_info),
|
||||||
INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info),
|
INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info),
|
||||||
|
INTEL_SKL_GT4_IDS(&intel_skylake_gt3_info),
|
||||||
INTEL_BXT_IDS(&intel_broxton_info),
|
INTEL_BXT_IDS(&intel_broxton_info),
|
||||||
INTEL_KBL_GT1_IDS(&intel_kabylake_info),
|
INTEL_KBL_GT1_IDS(&intel_kabylake_info),
|
||||||
INTEL_KBL_GT2_IDS(&intel_kabylake_info),
|
INTEL_KBL_GT2_IDS(&intel_kabylake_info),
|
||||||
|
@ -565,7 +500,8 @@ void intel_detect_pch(struct drm_device *dev)
|
||||||
DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
|
DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
|
||||||
WARN_ON(!IS_SKYLAKE(dev) &&
|
WARN_ON(!IS_SKYLAKE(dev) &&
|
||||||
!IS_KABYLAKE(dev));
|
!IS_KABYLAKE(dev));
|
||||||
} else if (id == INTEL_PCH_P2X_DEVICE_ID_TYPE) {
|
} else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
|
||||||
|
(id == INTEL_PCH_QEMU_DEVICE_ID_TYPE)) {
|
||||||
dev_priv->pch_type = intel_virt_detect_pch(dev);
|
dev_priv->pch_type = intel_virt_detect_pch(dev);
|
||||||
} else
|
} else
|
||||||
continue;
|
continue;
|
||||||
|
@ -624,6 +560,14 @@ static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
|
||||||
bool rpm_resume);
|
bool rpm_resume);
|
||||||
static int bxt_resume_prepare(struct drm_i915_private *dev_priv);
|
static int bxt_resume_prepare(struct drm_i915_private *dev_priv);
|
||||||
|
|
||||||
|
static bool suspend_to_idle(struct drm_i915_private *dev_priv)
|
||||||
|
{
|
||||||
|
#if IS_ENABLED(CONFIG_ACPI_SLEEP)
|
||||||
|
if (acpi_target_system_state() < ACPI_STATE_S3)
|
||||||
|
return true;
|
||||||
|
#endif
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
static int i915_drm_suspend(struct drm_device *dev)
|
static int i915_drm_suspend(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
|
@ -676,11 +620,7 @@ static int i915_drm_suspend(struct drm_device *dev)
|
||||||
|
|
||||||
i915_save_state(dev);
|
i915_save_state(dev);
|
||||||
|
|
||||||
opregion_target_state = PCI_D3cold;
|
opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
|
||||||
#if IS_ENABLED(CONFIG_ACPI_SLEEP)
|
|
||||||
if (acpi_target_system_state() < ACPI_STATE_S3)
|
|
||||||
opregion_target_state = PCI_D1;
|
|
||||||
#endif
|
|
||||||
intel_opregion_notify_adapter(dev, opregion_target_state);
|
intel_opregion_notify_adapter(dev, opregion_target_state);
|
||||||
|
|
||||||
intel_uncore_forcewake_reset(dev, false);
|
intel_uncore_forcewake_reset(dev, false);
|
||||||
|
@ -701,15 +641,26 @@ static int i915_drm_suspend(struct drm_device *dev)
|
||||||
static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
|
static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = drm_dev->dev_private;
|
struct drm_i915_private *dev_priv = drm_dev->dev_private;
|
||||||
|
bool fw_csr;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
intel_power_domains_suspend(dev_priv);
|
fw_csr = suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload;
|
||||||
|
/*
|
||||||
|
* In case of firmware assisted context save/restore don't manually
|
||||||
|
* deinit the power domains. This also means the CSR/DMC firmware will
|
||||||
|
* stay active, it will power down any HW resources as required and
|
||||||
|
* also enable deeper system power states that would be blocked if the
|
||||||
|
* firmware was inactive.
|
||||||
|
*/
|
||||||
|
if (!fw_csr)
|
||||||
|
intel_power_domains_suspend(dev_priv);
|
||||||
|
|
||||||
ret = intel_suspend_complete(dev_priv);
|
ret = intel_suspend_complete(dev_priv);
|
||||||
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
DRM_ERROR("Suspend complete failed: %d\n", ret);
|
DRM_ERROR("Suspend complete failed: %d\n", ret);
|
||||||
intel_power_domains_init_hw(dev_priv, true);
|
if (!fw_csr)
|
||||||
|
intel_power_domains_init_hw(dev_priv, true);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -730,6 +681,8 @@ static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
|
||||||
if (!(hibernation && INTEL_INFO(dev_priv)->gen < 6))
|
if (!(hibernation && INTEL_INFO(dev_priv)->gen < 6))
|
||||||
pci_set_power_state(drm_dev->pdev, PCI_D3hot);
|
pci_set_power_state(drm_dev->pdev, PCI_D3hot);
|
||||||
|
|
||||||
|
dev_priv->suspended_to_idle = suspend_to_idle(dev_priv);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -842,8 +795,10 @@ static int i915_drm_resume_early(struct drm_device *dev)
|
||||||
* FIXME: This should be solved with a special hdmi sink device or
|
* FIXME: This should be solved with a special hdmi sink device or
|
||||||
* similar so that power domains can be employed.
|
* similar so that power domains can be employed.
|
||||||
*/
|
*/
|
||||||
if (pci_enable_device(dev->pdev))
|
if (pci_enable_device(dev->pdev)) {
|
||||||
return -EIO;
|
ret = -EIO;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
pci_set_master(dev->pdev);
|
pci_set_master(dev->pdev);
|
||||||
|
|
||||||
|
@ -861,7 +816,12 @@ static int i915_drm_resume_early(struct drm_device *dev)
|
||||||
hsw_disable_pc8(dev_priv);
|
hsw_disable_pc8(dev_priv);
|
||||||
|
|
||||||
intel_uncore_sanitize(dev);
|
intel_uncore_sanitize(dev);
|
||||||
intel_power_domains_init_hw(dev_priv, true);
|
|
||||||
|
if (!(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
|
||||||
|
intel_power_domains_init_hw(dev_priv, true);
|
||||||
|
|
||||||
|
out:
|
||||||
|
dev_priv->suspended_to_idle = false;
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
|
@ -57,7 +57,7 @@
|
||||||
|
|
||||||
#define DRIVER_NAME "i915"
|
#define DRIVER_NAME "i915"
|
||||||
#define DRIVER_DESC "Intel Graphics"
|
#define DRIVER_DESC "Intel Graphics"
|
||||||
#define DRIVER_DATE "20151120"
|
#define DRIVER_DATE "20151204"
|
||||||
|
|
||||||
#undef WARN_ON
|
#undef WARN_ON
|
||||||
/* Many gcc seem to no see through this and fall over :( */
|
/* Many gcc seem to no see through this and fall over :( */
|
||||||
|
@ -902,7 +902,6 @@ struct i915_fbc {
|
||||||
/* This is always the inner lock when overlapping with struct_mutex and
|
/* This is always the inner lock when overlapping with struct_mutex and
|
||||||
* it's the outer lock when overlapping with stolen_lock. */
|
* it's the outer lock when overlapping with stolen_lock. */
|
||||||
struct mutex lock;
|
struct mutex lock;
|
||||||
unsigned long uncompressed_size;
|
|
||||||
unsigned threshold;
|
unsigned threshold;
|
||||||
unsigned int fb_id;
|
unsigned int fb_id;
|
||||||
unsigned int possible_framebuffer_bits;
|
unsigned int possible_framebuffer_bits;
|
||||||
|
@ -915,21 +914,21 @@ struct i915_fbc {
|
||||||
|
|
||||||
bool false_color;
|
bool false_color;
|
||||||
|
|
||||||
/* Tracks whether the HW is actually enabled, not whether the feature is
|
|
||||||
* possible. */
|
|
||||||
bool enabled;
|
bool enabled;
|
||||||
|
bool active;
|
||||||
|
|
||||||
struct intel_fbc_work {
|
struct intel_fbc_work {
|
||||||
struct delayed_work work;
|
bool scheduled;
|
||||||
struct intel_crtc *crtc;
|
struct work_struct work;
|
||||||
struct drm_framebuffer *fb;
|
struct drm_framebuffer *fb;
|
||||||
} *fbc_work;
|
unsigned long enable_jiffies;
|
||||||
|
} work;
|
||||||
|
|
||||||
const char *no_fbc_reason;
|
const char *no_fbc_reason;
|
||||||
|
|
||||||
bool (*fbc_enabled)(struct drm_i915_private *dev_priv);
|
bool (*is_active)(struct drm_i915_private *dev_priv);
|
||||||
void (*enable_fbc)(struct intel_crtc *crtc);
|
void (*activate)(struct intel_crtc *crtc);
|
||||||
void (*disable_fbc)(struct drm_i915_private *dev_priv);
|
void (*deactivate)(struct drm_i915_private *dev_priv);
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -1885,6 +1884,7 @@ struct drm_i915_private {
|
||||||
u32 chv_phy_control;
|
u32 chv_phy_control;
|
||||||
|
|
||||||
u32 suspend_count;
|
u32 suspend_count;
|
||||||
|
bool suspended_to_idle;
|
||||||
struct i915_suspend_saved_registers regfile;
|
struct i915_suspend_saved_registers regfile;
|
||||||
struct vlv_s0ix_state vlv_s0ix_state;
|
struct vlv_s0ix_state vlv_s0ix_state;
|
||||||
|
|
||||||
|
@ -2608,11 +2608,13 @@ struct drm_i915_cmd_table {
|
||||||
#define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100
|
#define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100
|
||||||
#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00
|
#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00
|
||||||
#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
|
#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
|
||||||
|
#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */
|
||||||
|
|
||||||
#define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type)
|
#define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type)
|
||||||
#define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT)
|
#define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT)
|
||||||
#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
|
#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
|
||||||
#define HAS_PCH_LPT_LP(dev) (__I915__(dev)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
|
#define HAS_PCH_LPT_LP(dev) (__I915__(dev)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
|
||||||
|
#define HAS_PCH_LPT_H(dev) (__I915__(dev)->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE)
|
||||||
#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
|
#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
|
||||||
#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
|
#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
|
||||||
#define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
|
#define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
|
||||||
|
@ -2749,17 +2751,47 @@ void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv);
|
||||||
void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
|
void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
|
||||||
uint32_t mask,
|
uint32_t mask,
|
||||||
uint32_t bits);
|
uint32_t bits);
|
||||||
void
|
void ilk_update_display_irq(struct drm_i915_private *dev_priv,
|
||||||
ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask);
|
uint32_t interrupt_mask,
|
||||||
void
|
uint32_t enabled_irq_mask);
|
||||||
ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask);
|
static inline void
|
||||||
|
ilk_enable_display_irq(struct drm_i915_private *dev_priv, uint32_t bits)
|
||||||
|
{
|
||||||
|
ilk_update_display_irq(dev_priv, bits, bits);
|
||||||
|
}
|
||||||
|
static inline void
|
||||||
|
ilk_disable_display_irq(struct drm_i915_private *dev_priv, uint32_t bits)
|
||||||
|
{
|
||||||
|
ilk_update_display_irq(dev_priv, bits, 0);
|
||||||
|
}
|
||||||
|
void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
|
||||||
|
enum pipe pipe,
|
||||||
|
uint32_t interrupt_mask,
|
||||||
|
uint32_t enabled_irq_mask);
|
||||||
|
static inline void bdw_enable_pipe_irq(struct drm_i915_private *dev_priv,
|
||||||
|
enum pipe pipe, uint32_t bits)
|
||||||
|
{
|
||||||
|
bdw_update_pipe_irq(dev_priv, pipe, bits, bits);
|
||||||
|
}
|
||||||
|
static inline void bdw_disable_pipe_irq(struct drm_i915_private *dev_priv,
|
||||||
|
enum pipe pipe, uint32_t bits)
|
||||||
|
{
|
||||||
|
bdw_update_pipe_irq(dev_priv, pipe, bits, 0);
|
||||||
|
}
|
||||||
void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
|
void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
|
||||||
uint32_t interrupt_mask,
|
uint32_t interrupt_mask,
|
||||||
uint32_t enabled_irq_mask);
|
uint32_t enabled_irq_mask);
|
||||||
#define ibx_enable_display_interrupt(dev_priv, bits) \
|
static inline void
|
||||||
ibx_display_interrupt_update((dev_priv), (bits), (bits))
|
ibx_enable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits)
|
||||||
#define ibx_disable_display_interrupt(dev_priv, bits) \
|
{
|
||||||
ibx_display_interrupt_update((dev_priv), (bits), 0)
|
ibx_display_interrupt_update(dev_priv, bits, bits);
|
||||||
|
}
|
||||||
|
static inline void
|
||||||
|
ibx_disable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits)
|
||||||
|
{
|
||||||
|
ibx_display_interrupt_update(dev_priv, bits, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/* i915_gem.c */
|
/* i915_gem.c */
|
||||||
int i915_gem_create_ioctl(struct drm_device *dev, void *data,
|
int i915_gem_create_ioctl(struct drm_device *dev, void *data,
|
||||||
|
|
|
@ -3077,7 +3077,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
|
||||||
if (ret == 0)
|
if (ret == 0)
|
||||||
ret = __i915_wait_request(req[i], reset_counter, true,
|
ret = __i915_wait_request(req[i], reset_counter, true,
|
||||||
args->timeout_ns > 0 ? &args->timeout_ns : NULL,
|
args->timeout_ns > 0 ? &args->timeout_ns : NULL,
|
||||||
file->driver_priv);
|
to_rps_client(file));
|
||||||
i915_gem_request_unreference__unlocked(req[i]);
|
i915_gem_request_unreference__unlocked(req[i]);
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
|
|
|
@ -141,8 +141,6 @@ static void i915_gem_context_clean(struct intel_context *ctx)
|
||||||
if (!ppgtt)
|
if (!ppgtt)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
WARN_ON(!list_empty(&ppgtt->base.active_list));
|
|
||||||
|
|
||||||
list_for_each_entry_safe(vma, next, &ppgtt->base.inactive_list,
|
list_for_each_entry_safe(vma, next, &ppgtt->base.inactive_list,
|
||||||
mm_list) {
|
mm_list) {
|
||||||
if (WARN_ON(__i915_vma_unbind_no_wait(vma)))
|
if (WARN_ON(__i915_vma_unbind_no_wait(vma)))
|
||||||
|
|
|
@ -86,7 +86,6 @@ static int host2guc_action(struct intel_guc *guc, u32 *data, u32 len)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
|
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
|
||||||
spin_lock(&dev_priv->guc.host2guc_lock);
|
|
||||||
|
|
||||||
dev_priv->guc.action_count += 1;
|
dev_priv->guc.action_count += 1;
|
||||||
dev_priv->guc.action_cmd = data[0];
|
dev_priv->guc.action_cmd = data[0];
|
||||||
|
@ -119,7 +118,6 @@ static int host2guc_action(struct intel_guc *guc, u32 *data, u32 len)
|
||||||
}
|
}
|
||||||
dev_priv->guc.action_status = status;
|
dev_priv->guc.action_status = status;
|
||||||
|
|
||||||
spin_unlock(&dev_priv->guc.host2guc_lock);
|
|
||||||
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
|
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -292,16 +290,12 @@ static uint32_t select_doorbell_cacheline(struct intel_guc *guc)
|
||||||
const uint32_t cacheline_size = cache_line_size();
|
const uint32_t cacheline_size = cache_line_size();
|
||||||
uint32_t offset;
|
uint32_t offset;
|
||||||
|
|
||||||
spin_lock(&guc->host2guc_lock);
|
|
||||||
|
|
||||||
/* Doorbell uses a single cache line within a page */
|
/* Doorbell uses a single cache line within a page */
|
||||||
offset = offset_in_page(guc->db_cacheline);
|
offset = offset_in_page(guc->db_cacheline);
|
||||||
|
|
||||||
/* Moving to next cache line to reduce contention */
|
/* Moving to next cache line to reduce contention */
|
||||||
guc->db_cacheline += cacheline_size;
|
guc->db_cacheline += cacheline_size;
|
||||||
|
|
||||||
spin_unlock(&guc->host2guc_lock);
|
|
||||||
|
|
||||||
DRM_DEBUG_DRIVER("selected doorbell cacheline 0x%x, next 0x%x, linesize %u\n",
|
DRM_DEBUG_DRIVER("selected doorbell cacheline 0x%x, next 0x%x, linesize %u\n",
|
||||||
offset, guc->db_cacheline, cacheline_size);
|
offset, guc->db_cacheline, cacheline_size);
|
||||||
|
|
||||||
|
@ -322,13 +316,11 @@ static uint16_t assign_doorbell(struct intel_guc *guc, uint32_t priority)
|
||||||
const uint16_t end = start + half;
|
const uint16_t end = start + half;
|
||||||
uint16_t id;
|
uint16_t id;
|
||||||
|
|
||||||
spin_lock(&guc->host2guc_lock);
|
|
||||||
id = find_next_zero_bit(guc->doorbell_bitmap, end, start);
|
id = find_next_zero_bit(guc->doorbell_bitmap, end, start);
|
||||||
if (id == end)
|
if (id == end)
|
||||||
id = GUC_INVALID_DOORBELL_ID;
|
id = GUC_INVALID_DOORBELL_ID;
|
||||||
else
|
else
|
||||||
bitmap_set(guc->doorbell_bitmap, id, 1);
|
bitmap_set(guc->doorbell_bitmap, id, 1);
|
||||||
spin_unlock(&guc->host2guc_lock);
|
|
||||||
|
|
||||||
DRM_DEBUG_DRIVER("assigned %s priority doorbell id 0x%x\n",
|
DRM_DEBUG_DRIVER("assigned %s priority doorbell id 0x%x\n",
|
||||||
hi_pri ? "high" : "normal", id);
|
hi_pri ? "high" : "normal", id);
|
||||||
|
@ -338,9 +330,7 @@ static uint16_t assign_doorbell(struct intel_guc *guc, uint32_t priority)
|
||||||
|
|
||||||
static void release_doorbell(struct intel_guc *guc, uint16_t id)
|
static void release_doorbell(struct intel_guc *guc, uint16_t id)
|
||||||
{
|
{
|
||||||
spin_lock(&guc->host2guc_lock);
|
|
||||||
bitmap_clear(guc->doorbell_bitmap, id, 1);
|
bitmap_clear(guc->doorbell_bitmap, id, 1);
|
||||||
spin_unlock(&guc->host2guc_lock);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -487,16 +477,13 @@ static int guc_get_workqueue_space(struct i915_guc_client *gc, u32 *offset)
|
||||||
struct guc_process_desc *desc;
|
struct guc_process_desc *desc;
|
||||||
void *base;
|
void *base;
|
||||||
u32 size = sizeof(struct guc_wq_item);
|
u32 size = sizeof(struct guc_wq_item);
|
||||||
int ret = 0, timeout_counter = 200;
|
int ret = -ETIMEDOUT, timeout_counter = 200;
|
||||||
|
|
||||||
base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, 0));
|
base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, 0));
|
||||||
desc = base + gc->proc_desc_offset;
|
desc = base + gc->proc_desc_offset;
|
||||||
|
|
||||||
while (timeout_counter-- > 0) {
|
while (timeout_counter-- > 0) {
|
||||||
ret = wait_for_atomic(CIRC_SPACE(gc->wq_tail, desc->head,
|
if (CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size) >= size) {
|
||||||
gc->wq_size) >= size, 1);
|
|
||||||
|
|
||||||
if (!ret) {
|
|
||||||
*offset = gc->wq_tail;
|
*offset = gc->wq_tail;
|
||||||
|
|
||||||
/* advance the tail for next workqueue item */
|
/* advance the tail for next workqueue item */
|
||||||
|
@ -505,7 +492,11 @@ static int guc_get_workqueue_space(struct i915_guc_client *gc, u32 *offset)
|
||||||
|
|
||||||
/* this will break the loop */
|
/* this will break the loop */
|
||||||
timeout_counter = 0;
|
timeout_counter = 0;
|
||||||
|
ret = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (timeout_counter)
|
||||||
|
usleep_range(1000, 2000);
|
||||||
};
|
};
|
||||||
|
|
||||||
kunmap_atomic(base);
|
kunmap_atomic(base);
|
||||||
|
@ -597,15 +588,12 @@ int i915_guc_submit(struct i915_guc_client *client,
|
||||||
{
|
{
|
||||||
struct intel_guc *guc = client->guc;
|
struct intel_guc *guc = client->guc;
|
||||||
enum intel_ring_id ring_id = rq->ring->id;
|
enum intel_ring_id ring_id = rq->ring->id;
|
||||||
unsigned long flags;
|
|
||||||
int q_ret, b_ret;
|
int q_ret, b_ret;
|
||||||
|
|
||||||
/* Need this because of the deferred pin ctx and ring */
|
/* Need this because of the deferred pin ctx and ring */
|
||||||
/* Shall we move this right after ring is pinned? */
|
/* Shall we move this right after ring is pinned? */
|
||||||
lr_context_update(rq);
|
lr_context_update(rq);
|
||||||
|
|
||||||
spin_lock_irqsave(&client->wq_lock, flags);
|
|
||||||
|
|
||||||
q_ret = guc_add_workqueue_item(client, rq);
|
q_ret = guc_add_workqueue_item(client, rq);
|
||||||
if (q_ret == 0)
|
if (q_ret == 0)
|
||||||
b_ret = guc_ring_doorbell(client);
|
b_ret = guc_ring_doorbell(client);
|
||||||
|
@ -620,12 +608,8 @@ int i915_guc_submit(struct i915_guc_client *client,
|
||||||
} else {
|
} else {
|
||||||
client->retcode = 0;
|
client->retcode = 0;
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&client->wq_lock, flags);
|
|
||||||
|
|
||||||
spin_lock(&guc->host2guc_lock);
|
|
||||||
guc->submissions[ring_id] += 1;
|
guc->submissions[ring_id] += 1;
|
||||||
guc->last_seqno[ring_id] = rq->seqno;
|
guc->last_seqno[ring_id] = rq->seqno;
|
||||||
spin_unlock(&guc->host2guc_lock);
|
|
||||||
|
|
||||||
return q_ret;
|
return q_ret;
|
||||||
}
|
}
|
||||||
|
@ -677,7 +661,7 @@ static struct drm_i915_gem_object *gem_allocate_guc_obj(struct drm_device *dev,
|
||||||
/**
|
/**
|
||||||
* gem_release_guc_obj() - Release gem object allocated for GuC usage
|
* gem_release_guc_obj() - Release gem object allocated for GuC usage
|
||||||
* @obj: gem obj to be released
|
* @obj: gem obj to be released
|
||||||
*/
|
*/
|
||||||
static void gem_release_guc_obj(struct drm_i915_gem_object *obj)
|
static void gem_release_guc_obj(struct drm_i915_gem_object *obj)
|
||||||
{
|
{
|
||||||
if (!obj)
|
if (!obj)
|
||||||
|
@ -768,7 +752,6 @@ static struct i915_guc_client *guc_client_alloc(struct drm_device *dev,
|
||||||
client->client_obj = obj;
|
client->client_obj = obj;
|
||||||
client->wq_offset = GUC_DB_SIZE;
|
client->wq_offset = GUC_DB_SIZE;
|
||||||
client->wq_size = GUC_WQ_SIZE;
|
client->wq_size = GUC_WQ_SIZE;
|
||||||
spin_lock_init(&client->wq_lock);
|
|
||||||
|
|
||||||
client->doorbell_offset = select_doorbell_cacheline(guc);
|
client->doorbell_offset = select_doorbell_cacheline(guc);
|
||||||
|
|
||||||
|
@ -871,8 +854,6 @@ int i915_guc_submission_init(struct drm_device *dev)
|
||||||
if (!guc->ctx_pool_obj)
|
if (!guc->ctx_pool_obj)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
spin_lock_init(&dev_priv->guc.host2guc_lock);
|
|
||||||
|
|
||||||
ida_init(&guc->ctx_ids);
|
ida_init(&guc->ctx_ids);
|
||||||
|
|
||||||
guc_create_log(guc);
|
guc_create_log(guc);
|
||||||
|
|
|
@ -215,9 +215,9 @@ void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
|
||||||
* @interrupt_mask: mask of interrupt bits to update
|
* @interrupt_mask: mask of interrupt bits to update
|
||||||
* @enabled_irq_mask: mask of interrupt bits to enable
|
* @enabled_irq_mask: mask of interrupt bits to enable
|
||||||
*/
|
*/
|
||||||
static void ilk_update_display_irq(struct drm_i915_private *dev_priv,
|
void ilk_update_display_irq(struct drm_i915_private *dev_priv,
|
||||||
uint32_t interrupt_mask,
|
uint32_t interrupt_mask,
|
||||||
uint32_t enabled_irq_mask)
|
uint32_t enabled_irq_mask)
|
||||||
{
|
{
|
||||||
uint32_t new_val;
|
uint32_t new_val;
|
||||||
|
|
||||||
|
@ -239,18 +239,6 @@ static void ilk_update_display_irq(struct drm_i915_private *dev_priv,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
|
||||||
ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
|
|
||||||
{
|
|
||||||
ilk_update_display_irq(dev_priv, mask, mask);
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
|
|
||||||
{
|
|
||||||
ilk_update_display_irq(dev_priv, mask, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ilk_update_gt_irq - update GTIMR
|
* ilk_update_gt_irq - update GTIMR
|
||||||
* @dev_priv: driver private
|
* @dev_priv: driver private
|
||||||
|
@ -300,11 +288,11 @@ static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* snb_update_pm_irq - update GEN6_PMIMR
|
* snb_update_pm_irq - update GEN6_PMIMR
|
||||||
* @dev_priv: driver private
|
* @dev_priv: driver private
|
||||||
* @interrupt_mask: mask of interrupt bits to update
|
* @interrupt_mask: mask of interrupt bits to update
|
||||||
* @enabled_irq_mask: mask of interrupt bits to enable
|
* @enabled_irq_mask: mask of interrupt bits to enable
|
||||||
*/
|
*/
|
||||||
static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
|
static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
|
||||||
uint32_t interrupt_mask,
|
uint32_t interrupt_mask,
|
||||||
uint32_t enabled_irq_mask)
|
uint32_t enabled_irq_mask)
|
||||||
|
@ -418,11 +406,11 @@ void gen6_disable_rps_interrupts(struct drm_device *dev)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* bdw_update_port_irq - update DE port interrupt
|
* bdw_update_port_irq - update DE port interrupt
|
||||||
* @dev_priv: driver private
|
* @dev_priv: driver private
|
||||||
* @interrupt_mask: mask of interrupt bits to update
|
* @interrupt_mask: mask of interrupt bits to update
|
||||||
* @enabled_irq_mask: mask of interrupt bits to enable
|
* @enabled_irq_mask: mask of interrupt bits to enable
|
||||||
*/
|
*/
|
||||||
static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
|
static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
|
||||||
uint32_t interrupt_mask,
|
uint32_t interrupt_mask,
|
||||||
uint32_t enabled_irq_mask)
|
uint32_t enabled_irq_mask)
|
||||||
|
@ -449,6 +437,38 @@ static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* bdw_update_pipe_irq - update DE pipe interrupt
|
||||||
|
* @dev_priv: driver private
|
||||||
|
* @pipe: pipe whose interrupt to update
|
||||||
|
* @interrupt_mask: mask of interrupt bits to update
|
||||||
|
* @enabled_irq_mask: mask of interrupt bits to enable
|
||||||
|
*/
|
||||||
|
void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
|
||||||
|
enum pipe pipe,
|
||||||
|
uint32_t interrupt_mask,
|
||||||
|
uint32_t enabled_irq_mask)
|
||||||
|
{
|
||||||
|
uint32_t new_val;
|
||||||
|
|
||||||
|
assert_spin_locked(&dev_priv->irq_lock);
|
||||||
|
|
||||||
|
WARN_ON(enabled_irq_mask & ~interrupt_mask);
|
||||||
|
|
||||||
|
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
|
||||||
|
return;
|
||||||
|
|
||||||
|
new_val = dev_priv->de_irq_mask[pipe];
|
||||||
|
new_val &= ~interrupt_mask;
|
||||||
|
new_val |= (~enabled_irq_mask & interrupt_mask);
|
||||||
|
|
||||||
|
if (new_val != dev_priv->de_irq_mask[pipe]) {
|
||||||
|
dev_priv->de_irq_mask[pipe] = new_val;
|
||||||
|
I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
|
||||||
|
POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ibx_display_interrupt_update - update SDEIMR
|
* ibx_display_interrupt_update - update SDEIMR
|
||||||
* @dev_priv: driver private
|
* @dev_priv: driver private
|
||||||
|
@ -1824,8 +1844,24 @@ static void ibx_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
|
||||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||||
u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
|
u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Somehow the PCH doesn't seem to really ack the interrupt to the CPU
|
||||||
|
* unless we touch the hotplug register, even if hotplug_trigger is
|
||||||
|
* zero. Not acking leads to "The master control interrupt lied (SDE)!"
|
||||||
|
* errors.
|
||||||
|
*/
|
||||||
dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
|
dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
|
||||||
|
if (!hotplug_trigger) {
|
||||||
|
u32 mask = PORTA_HOTPLUG_STATUS_MASK |
|
||||||
|
PORTD_HOTPLUG_STATUS_MASK |
|
||||||
|
PORTC_HOTPLUG_STATUS_MASK |
|
||||||
|
PORTB_HOTPLUG_STATUS_MASK;
|
||||||
|
dig_hotplug_reg &= ~mask;
|
||||||
|
}
|
||||||
|
|
||||||
I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
|
I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
|
||||||
|
if (!hotplug_trigger)
|
||||||
|
return;
|
||||||
|
|
||||||
intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
|
intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
|
||||||
dig_hotplug_reg, hpd,
|
dig_hotplug_reg, hpd,
|
||||||
|
@ -1840,8 +1876,7 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
|
||||||
int pipe;
|
int pipe;
|
||||||
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
|
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
|
||||||
|
|
||||||
if (hotplug_trigger)
|
ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
|
||||||
ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
|
|
||||||
|
|
||||||
if (pch_iir & SDE_AUDIO_POWER_MASK) {
|
if (pch_iir & SDE_AUDIO_POWER_MASK) {
|
||||||
int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
|
int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
|
||||||
|
@ -1934,8 +1969,7 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
|
||||||
int pipe;
|
int pipe;
|
||||||
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
|
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
|
||||||
|
|
||||||
if (hotplug_trigger)
|
ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
|
||||||
ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
|
|
||||||
|
|
||||||
if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
|
if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
|
||||||
int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
|
int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
|
||||||
|
@ -2351,13 +2385,9 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
|
||||||
spt_irq_handler(dev, pch_iir);
|
spt_irq_handler(dev, pch_iir);
|
||||||
else
|
else
|
||||||
cpt_irq_handler(dev, pch_iir);
|
cpt_irq_handler(dev, pch_iir);
|
||||||
} else {
|
} else
|
||||||
/*
|
DRM_ERROR("The master control interrupt lied (SDE)!\n");
|
||||||
* Like on previous PCH there seems to be something
|
|
||||||
* fishy going on with forwarding PCH interrupts.
|
|
||||||
*/
|
|
||||||
DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
|
I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
|
||||||
|
@ -2645,7 +2675,7 @@ static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
|
||||||
DE_PIPE_VBLANK(pipe);
|
DE_PIPE_VBLANK(pipe);
|
||||||
|
|
||||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
||||||
ironlake_enable_display_irq(dev_priv, bit);
|
ilk_enable_display_irq(dev_priv, bit);
|
||||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -2670,10 +2700,9 @@ static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
|
||||||
unsigned long irqflags;
|
unsigned long irqflags;
|
||||||
|
|
||||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
||||||
dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
|
bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
|
||||||
I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
|
|
||||||
POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
|
|
||||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2700,7 +2729,7 @@ static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
|
||||||
DE_PIPE_VBLANK(pipe);
|
DE_PIPE_VBLANK(pipe);
|
||||||
|
|
||||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
||||||
ironlake_disable_display_irq(dev_priv, bit);
|
ilk_disable_display_irq(dev_priv, bit);
|
||||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2721,9 +2750,7 @@ static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
|
||||||
unsigned long irqflags;
|
unsigned long irqflags;
|
||||||
|
|
||||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
||||||
dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
|
bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
|
||||||
I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
|
|
||||||
POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
|
|
||||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3452,7 +3479,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
|
||||||
* setup is guaranteed to run in single-threaded context. But we
|
* setup is guaranteed to run in single-threaded context. But we
|
||||||
* need it to make the assert_spin_locked happy. */
|
* need it to make the assert_spin_locked happy. */
|
||||||
spin_lock_irq(&dev_priv->irq_lock);
|
spin_lock_irq(&dev_priv->irq_lock);
|
||||||
ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
|
ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
|
||||||
spin_unlock_irq(&dev_priv->irq_lock);
|
spin_unlock_irq(&dev_priv->irq_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2972,6 +2972,13 @@ enum skl_disp_power_wells {
|
||||||
#define OGAMC1 _MMIO(0x30020)
|
#define OGAMC1 _MMIO(0x30020)
|
||||||
#define OGAMC0 _MMIO(0x30024)
|
#define OGAMC0 _MMIO(0x30024)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* GEN9 clock gating regs
|
||||||
|
*/
|
||||||
|
#define GEN9_CLKGATE_DIS_0 _MMIO(0x46530)
|
||||||
|
#define PWM2_GATING_DIS (1 << 14)
|
||||||
|
#define PWM1_GATING_DIS (1 << 13)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Display engine regs
|
* Display engine regs
|
||||||
*/
|
*/
|
||||||
|
@ -7549,6 +7556,7 @@ enum skl_disp_power_wells {
|
||||||
#define SFUSE_STRAP _MMIO(0xc2014)
|
#define SFUSE_STRAP _MMIO(0xc2014)
|
||||||
#define SFUSE_STRAP_FUSE_LOCK (1<<13)
|
#define SFUSE_STRAP_FUSE_LOCK (1<<13)
|
||||||
#define SFUSE_STRAP_DISPLAY_DISABLED (1<<7)
|
#define SFUSE_STRAP_DISPLAY_DISABLED (1<<7)
|
||||||
|
#define SFUSE_STRAP_CRT_DISABLED (1<<6)
|
||||||
#define SFUSE_STRAP_DDIB_DETECTED (1<<2)
|
#define SFUSE_STRAP_DDIB_DETECTED (1<<2)
|
||||||
#define SFUSE_STRAP_DDIC_DETECTED (1<<1)
|
#define SFUSE_STRAP_DDIC_DETECTED (1<<1)
|
||||||
#define SFUSE_STRAP_DDID_DETECTED (1<<0)
|
#define SFUSE_STRAP_DDID_DETECTED (1<<0)
|
||||||
|
@ -7706,7 +7714,7 @@ enum skl_disp_power_wells {
|
||||||
#define BXT_DSI_PLL_RATIO_MAX 0x7D
|
#define BXT_DSI_PLL_RATIO_MAX 0x7D
|
||||||
#define BXT_DSI_PLL_RATIO_MIN 0x22
|
#define BXT_DSI_PLL_RATIO_MIN 0x22
|
||||||
#define BXT_DSI_PLL_RATIO_MASK 0xFF
|
#define BXT_DSI_PLL_RATIO_MASK 0xFF
|
||||||
#define BXT_REF_CLOCK_KHZ 19500
|
#define BXT_REF_CLOCK_KHZ 19200
|
||||||
|
|
||||||
#define BXT_DSI_PLL_ENABLE _MMIO(0x46080)
|
#define BXT_DSI_PLL_ENABLE _MMIO(0x46080)
|
||||||
#define BXT_DSI_PLL_DO_ENABLE (1 << 31)
|
#define BXT_DSI_PLL_DO_ENABLE (1 << 31)
|
||||||
|
|
|
@ -356,7 +356,10 @@ parse_general_features(struct drm_i915_private *dev_priv,
|
||||||
general = find_section(bdb, BDB_GENERAL_FEATURES);
|
general = find_section(bdb, BDB_GENERAL_FEATURES);
|
||||||
if (general) {
|
if (general) {
|
||||||
dev_priv->vbt.int_tv_support = general->int_tv_support;
|
dev_priv->vbt.int_tv_support = general->int_tv_support;
|
||||||
dev_priv->vbt.int_crt_support = general->int_crt_support;
|
/* int_crt_support can't be trusted on earlier platforms */
|
||||||
|
if (bdb->version >= 155 &&
|
||||||
|
(HAS_DDI(dev_priv) || IS_VALLEYVIEW(dev_priv)))
|
||||||
|
dev_priv->vbt.int_crt_support = general->int_crt_support;
|
||||||
dev_priv->vbt.lvds_use_ssc = general->enable_ssc;
|
dev_priv->vbt.lvds_use_ssc = general->enable_ssc;
|
||||||
dev_priv->vbt.lvds_ssc_freq =
|
dev_priv->vbt.lvds_ssc_freq =
|
||||||
intel_bios_ssc_frequency(dev, general->ssc_freq);
|
intel_bios_ssc_frequency(dev, general->ssc_freq);
|
||||||
|
|
|
@ -777,11 +777,37 @@ void intel_crt_init(struct drm_device *dev)
|
||||||
struct intel_crt *crt;
|
struct intel_crt *crt;
|
||||||
struct intel_connector *intel_connector;
|
struct intel_connector *intel_connector;
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
|
i915_reg_t adpa_reg;
|
||||||
|
u32 adpa;
|
||||||
|
|
||||||
/* Skip machines without VGA that falsely report hotplug events */
|
/* Skip machines without VGA that falsely report hotplug events */
|
||||||
if (dmi_check_system(intel_no_crt))
|
if (dmi_check_system(intel_no_crt))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
if (HAS_PCH_SPLIT(dev))
|
||||||
|
adpa_reg = PCH_ADPA;
|
||||||
|
else if (IS_VALLEYVIEW(dev))
|
||||||
|
adpa_reg = VLV_ADPA;
|
||||||
|
else
|
||||||
|
adpa_reg = ADPA;
|
||||||
|
|
||||||
|
adpa = I915_READ(adpa_reg);
|
||||||
|
if ((adpa & ADPA_DAC_ENABLE) == 0) {
|
||||||
|
/*
|
||||||
|
* On some machines (some IVB at least) CRT can be
|
||||||
|
* fused off, but there's no known fuse bit to
|
||||||
|
* indicate that. On these machine the ADPA register
|
||||||
|
* works normally, except the DAC enable bit won't
|
||||||
|
* take. So the only way to tell is attempt to enable
|
||||||
|
* it and see what happens.
|
||||||
|
*/
|
||||||
|
I915_WRITE(adpa_reg, adpa | ADPA_DAC_ENABLE |
|
||||||
|
ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
|
||||||
|
if ((I915_READ(adpa_reg) & ADPA_DAC_ENABLE) == 0)
|
||||||
|
return;
|
||||||
|
I915_WRITE(adpa_reg, adpa);
|
||||||
|
}
|
||||||
|
|
||||||
crt = kzalloc(sizeof(struct intel_crt), GFP_KERNEL);
|
crt = kzalloc(sizeof(struct intel_crt), GFP_KERNEL);
|
||||||
if (!crt)
|
if (!crt)
|
||||||
return;
|
return;
|
||||||
|
@ -815,12 +841,7 @@ void intel_crt_init(struct drm_device *dev)
|
||||||
connector->interlace_allowed = 1;
|
connector->interlace_allowed = 1;
|
||||||
connector->doublescan_allowed = 0;
|
connector->doublescan_allowed = 0;
|
||||||
|
|
||||||
if (HAS_PCH_SPLIT(dev))
|
crt->adpa_reg = adpa_reg;
|
||||||
crt->adpa_reg = PCH_ADPA;
|
|
||||||
else if (IS_VALLEYVIEW(dev))
|
|
||||||
crt->adpa_reg = VLV_ADPA;
|
|
||||||
else
|
|
||||||
crt->adpa_reg = ADPA;
|
|
||||||
|
|
||||||
crt->base.compute_config = intel_crt_compute_config;
|
crt->base.compute_config = intel_crt_compute_config;
|
||||||
if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev)) {
|
if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev)) {
|
||||||
|
|
|
@ -3151,7 +3151,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
|
||||||
pipe_config->has_hdmi_sink = true;
|
pipe_config->has_hdmi_sink = true;
|
||||||
intel_hdmi = enc_to_intel_hdmi(&encoder->base);
|
intel_hdmi = enc_to_intel_hdmi(&encoder->base);
|
||||||
|
|
||||||
if (intel_hdmi->infoframe_enabled(&encoder->base))
|
if (intel_hdmi->infoframe_enabled(&encoder->base, pipe_config))
|
||||||
pipe_config->has_infoframe = true;
|
pipe_config->has_infoframe = true;
|
||||||
break;
|
break;
|
||||||
case TRANS_DDI_MODE_SELECT_DVI:
|
case TRANS_DDI_MODE_SELECT_DVI:
|
||||||
|
|
|
@ -44,6 +44,8 @@
|
||||||
#include <drm/drm_plane_helper.h>
|
#include <drm/drm_plane_helper.h>
|
||||||
#include <drm/drm_rect.h>
|
#include <drm/drm_rect.h>
|
||||||
#include <linux/dma_remapping.h>
|
#include <linux/dma_remapping.h>
|
||||||
|
#include <linux/reservation.h>
|
||||||
|
#include <linux/dma-buf.h>
|
||||||
|
|
||||||
/* Primary plane formats for gen <= 3 */
|
/* Primary plane formats for gen <= 3 */
|
||||||
static const uint32_t i8xx_primary_formats[] = {
|
static const uint32_t i8xx_primary_formats[] = {
|
||||||
|
@ -2130,7 +2132,7 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
|
||||||
* need the check.
|
* need the check.
|
||||||
*/
|
*/
|
||||||
if (HAS_GMCH_DISPLAY(dev_priv->dev))
|
if (HAS_GMCH_DISPLAY(dev_priv->dev))
|
||||||
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
|
if (crtc->config->has_dsi_encoder)
|
||||||
assert_dsi_pll_enabled(dev_priv);
|
assert_dsi_pll_enabled(dev_priv);
|
||||||
else
|
else
|
||||||
assert_pll_enabled(dev_priv, pipe);
|
assert_pll_enabled(dev_priv, pipe);
|
||||||
|
@ -3174,8 +3176,8 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
|
||||||
struct drm_device *dev = crtc->dev;
|
struct drm_device *dev = crtc->dev;
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
|
|
||||||
if (dev_priv->fbc.disable_fbc)
|
if (dev_priv->fbc.deactivate)
|
||||||
dev_priv->fbc.disable_fbc(dev_priv);
|
dev_priv->fbc.deactivate(dev_priv);
|
||||||
|
|
||||||
dev_priv->display.update_primary_plane(crtc, fb, x, y);
|
dev_priv->display.update_primary_plane(crtc, fb, x, y);
|
||||||
|
|
||||||
|
@ -4137,6 +4139,12 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
|
||||||
I915_WRITE(FDI_RX_TUSIZE1(pipe),
|
I915_WRITE(FDI_RX_TUSIZE1(pipe),
|
||||||
I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
|
I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Sometimes spurious CPU pipe underruns happen during FDI
|
||||||
|
* training, at least with VGA+HDMI cloning. Suppress them.
|
||||||
|
*/
|
||||||
|
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
|
||||||
|
|
||||||
/* For PCH output, training FDI link */
|
/* For PCH output, training FDI link */
|
||||||
dev_priv->display.fdi_link_train(crtc);
|
dev_priv->display.fdi_link_train(crtc);
|
||||||
|
|
||||||
|
@ -4170,6 +4178,8 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
|
||||||
|
|
||||||
intel_fdi_normal_train(crtc);
|
intel_fdi_normal_train(crtc);
|
||||||
|
|
||||||
|
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
|
||||||
|
|
||||||
/* For PCH DP, enable TRANS_DP_CTL */
|
/* For PCH DP, enable TRANS_DP_CTL */
|
||||||
if (HAS_PCH_CPT(dev) && intel_crtc->config->has_dp_encoder) {
|
if (HAS_PCH_CPT(dev) && intel_crtc->config->has_dp_encoder) {
|
||||||
const struct drm_display_mode *adjusted_mode =
|
const struct drm_display_mode *adjusted_mode =
|
||||||
|
@ -4628,7 +4638,7 @@ static void intel_crtc_load_lut(struct drm_crtc *crtc)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (HAS_GMCH_DISPLAY(dev_priv->dev)) {
|
if (HAS_GMCH_DISPLAY(dev_priv->dev)) {
|
||||||
if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI))
|
if (intel_crtc->config->has_dsi_encoder)
|
||||||
assert_dsi_pll_enabled(dev_priv);
|
assert_dsi_pll_enabled(dev_priv);
|
||||||
else
|
else
|
||||||
assert_pll_enabled(dev_priv, pipe);
|
assert_pll_enabled(dev_priv, pipe);
|
||||||
|
@ -4784,7 +4794,6 @@ static void intel_post_plane_update(struct intel_crtc *crtc)
|
||||||
{
|
{
|
||||||
struct intel_crtc_atomic_commit *atomic = &crtc->atomic;
|
struct intel_crtc_atomic_commit *atomic = &crtc->atomic;
|
||||||
struct drm_device *dev = crtc->base.dev;
|
struct drm_device *dev = crtc->base.dev;
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
||||||
|
|
||||||
if (atomic->wait_vblank)
|
if (atomic->wait_vblank)
|
||||||
intel_wait_for_vblank(dev, crtc->pipe);
|
intel_wait_for_vblank(dev, crtc->pipe);
|
||||||
|
@ -4798,7 +4807,7 @@ static void intel_post_plane_update(struct intel_crtc *crtc)
|
||||||
intel_update_watermarks(&crtc->base);
|
intel_update_watermarks(&crtc->base);
|
||||||
|
|
||||||
if (atomic->update_fbc)
|
if (atomic->update_fbc)
|
||||||
intel_fbc_update(dev_priv);
|
intel_fbc_update(crtc);
|
||||||
|
|
||||||
if (atomic->post_enable_primary)
|
if (atomic->post_enable_primary)
|
||||||
intel_post_enable_primary(&crtc->base);
|
intel_post_enable_primary(&crtc->base);
|
||||||
|
@ -4813,7 +4822,7 @@ static void intel_pre_plane_update(struct intel_crtc *crtc)
|
||||||
struct intel_crtc_atomic_commit *atomic = &crtc->atomic;
|
struct intel_crtc_atomic_commit *atomic = &crtc->atomic;
|
||||||
|
|
||||||
if (atomic->disable_fbc)
|
if (atomic->disable_fbc)
|
||||||
intel_fbc_disable_crtc(crtc);
|
intel_fbc_deactivate(crtc);
|
||||||
|
|
||||||
if (crtc->atomic.disable_ips)
|
if (crtc->atomic.disable_ips)
|
||||||
hsw_disable_ips(crtc);
|
hsw_disable_ips(crtc);
|
||||||
|
@ -4921,6 +4930,8 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
|
||||||
if (intel_crtc->config->has_pch_encoder)
|
if (intel_crtc->config->has_pch_encoder)
|
||||||
intel_wait_for_vblank(dev, pipe);
|
intel_wait_for_vblank(dev, pipe);
|
||||||
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
|
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
|
||||||
|
|
||||||
|
intel_fbc_enable(intel_crtc);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* IPS only exists on ULT machines and is tied to pipe A. */
|
/* IPS only exists on ULT machines and is tied to pipe A. */
|
||||||
|
@ -4938,7 +4949,6 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
|
||||||
int pipe = intel_crtc->pipe, hsw_workaround_pipe;
|
int pipe = intel_crtc->pipe, hsw_workaround_pipe;
|
||||||
struct intel_crtc_state *pipe_config =
|
struct intel_crtc_state *pipe_config =
|
||||||
to_intel_crtc_state(crtc->state);
|
to_intel_crtc_state(crtc->state);
|
||||||
bool is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI);
|
|
||||||
|
|
||||||
if (WARN_ON(intel_crtc->active))
|
if (WARN_ON(intel_crtc->active))
|
||||||
return;
|
return;
|
||||||
|
@ -4971,10 +4981,12 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
|
||||||
|
|
||||||
intel_crtc->active = true;
|
intel_crtc->active = true;
|
||||||
|
|
||||||
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
|
if (intel_crtc->config->has_pch_encoder)
|
||||||
|
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
|
||||||
|
else
|
||||||
|
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
|
||||||
|
|
||||||
for_each_encoder_on_crtc(dev, crtc, encoder) {
|
for_each_encoder_on_crtc(dev, crtc, encoder) {
|
||||||
if (encoder->pre_pll_enable)
|
|
||||||
encoder->pre_pll_enable(encoder);
|
|
||||||
if (encoder->pre_enable)
|
if (encoder->pre_enable)
|
||||||
encoder->pre_enable(encoder);
|
encoder->pre_enable(encoder);
|
||||||
}
|
}
|
||||||
|
@ -4982,7 +4994,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
|
||||||
if (intel_crtc->config->has_pch_encoder)
|
if (intel_crtc->config->has_pch_encoder)
|
||||||
dev_priv->display.fdi_link_train(crtc);
|
dev_priv->display.fdi_link_train(crtc);
|
||||||
|
|
||||||
if (!is_dsi)
|
if (!intel_crtc->config->has_dsi_encoder)
|
||||||
intel_ddi_enable_pipe_clock(intel_crtc);
|
intel_ddi_enable_pipe_clock(intel_crtc);
|
||||||
|
|
||||||
if (INTEL_INFO(dev)->gen >= 9)
|
if (INTEL_INFO(dev)->gen >= 9)
|
||||||
|
@ -4997,7 +5009,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
|
||||||
intel_crtc_load_lut(crtc);
|
intel_crtc_load_lut(crtc);
|
||||||
|
|
||||||
intel_ddi_set_pipe_settings(crtc);
|
intel_ddi_set_pipe_settings(crtc);
|
||||||
if (!is_dsi)
|
if (!intel_crtc->config->has_dsi_encoder)
|
||||||
intel_ddi_enable_transcoder_func(crtc);
|
intel_ddi_enable_transcoder_func(crtc);
|
||||||
|
|
||||||
intel_update_watermarks(crtc);
|
intel_update_watermarks(crtc);
|
||||||
|
@ -5006,7 +5018,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
|
||||||
if (intel_crtc->config->has_pch_encoder)
|
if (intel_crtc->config->has_pch_encoder)
|
||||||
lpt_pch_enable(crtc);
|
lpt_pch_enable(crtc);
|
||||||
|
|
||||||
if (intel_crtc->config->dp_encoder_is_mst && !is_dsi)
|
if (intel_crtc->config->dp_encoder_is_mst)
|
||||||
intel_ddi_set_vc_payload_alloc(crtc, true);
|
intel_ddi_set_vc_payload_alloc(crtc, true);
|
||||||
|
|
||||||
assert_vblank_disabled(crtc);
|
assert_vblank_disabled(crtc);
|
||||||
|
@ -5017,9 +5029,13 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
|
||||||
intel_opregion_notify_encoder(encoder, true);
|
intel_opregion_notify_encoder(encoder, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (intel_crtc->config->has_pch_encoder)
|
if (intel_crtc->config->has_pch_encoder) {
|
||||||
|
intel_wait_for_vblank(dev, pipe);
|
||||||
|
intel_wait_for_vblank(dev, pipe);
|
||||||
|
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
|
||||||
intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
|
intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
|
||||||
true);
|
true);
|
||||||
|
}
|
||||||
|
|
||||||
/* If we change the relative order between pipe/planes enabling, we need
|
/* If we change the relative order between pipe/planes enabling, we need
|
||||||
* to change the workaround. */
|
* to change the workaround. */
|
||||||
|
@ -5028,6 +5044,8 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
|
||||||
intel_wait_for_vblank(dev, hsw_workaround_pipe);
|
intel_wait_for_vblank(dev, hsw_workaround_pipe);
|
||||||
intel_wait_for_vblank(dev, hsw_workaround_pipe);
|
intel_wait_for_vblank(dev, hsw_workaround_pipe);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
intel_fbc_enable(intel_crtc);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
|
static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
|
||||||
|
@ -5062,12 +5080,22 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
|
||||||
drm_crtc_vblank_off(crtc);
|
drm_crtc_vblank_off(crtc);
|
||||||
assert_vblank_disabled(crtc);
|
assert_vblank_disabled(crtc);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Sometimes spurious CPU pipe underruns happen when the
|
||||||
|
* pipe is already disabled, but FDI RX/TX is still enabled.
|
||||||
|
* Happens at least with VGA+HDMI cloning. Suppress them.
|
||||||
|
*/
|
||||||
|
if (intel_crtc->config->has_pch_encoder)
|
||||||
|
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
|
||||||
|
|
||||||
intel_disable_pipe(intel_crtc);
|
intel_disable_pipe(intel_crtc);
|
||||||
|
|
||||||
ironlake_pfit_disable(intel_crtc, false);
|
ironlake_pfit_disable(intel_crtc, false);
|
||||||
|
|
||||||
if (intel_crtc->config->has_pch_encoder)
|
if (intel_crtc->config->has_pch_encoder) {
|
||||||
ironlake_fdi_disable(crtc);
|
ironlake_fdi_disable(crtc);
|
||||||
|
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
|
||||||
|
}
|
||||||
|
|
||||||
for_each_encoder_on_crtc(dev, crtc, encoder)
|
for_each_encoder_on_crtc(dev, crtc, encoder)
|
||||||
if (encoder->post_disable)
|
if (encoder->post_disable)
|
||||||
|
@ -5098,6 +5126,8 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
|
||||||
}
|
}
|
||||||
|
|
||||||
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
|
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
|
||||||
|
|
||||||
|
intel_fbc_disable_crtc(intel_crtc);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void haswell_crtc_disable(struct drm_crtc *crtc)
|
static void haswell_crtc_disable(struct drm_crtc *crtc)
|
||||||
|
@ -5107,7 +5137,6 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
|
||||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||||
struct intel_encoder *encoder;
|
struct intel_encoder *encoder;
|
||||||
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
|
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
|
||||||
bool is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI);
|
|
||||||
|
|
||||||
if (intel_crtc->config->has_pch_encoder)
|
if (intel_crtc->config->has_pch_encoder)
|
||||||
intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
|
intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
|
||||||
|
@ -5126,7 +5155,7 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
|
||||||
if (intel_crtc->config->dp_encoder_is_mst)
|
if (intel_crtc->config->dp_encoder_is_mst)
|
||||||
intel_ddi_set_vc_payload_alloc(crtc, false);
|
intel_ddi_set_vc_payload_alloc(crtc, false);
|
||||||
|
|
||||||
if (!is_dsi)
|
if (!intel_crtc->config->has_dsi_encoder)
|
||||||
intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
|
intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
|
||||||
|
|
||||||
if (INTEL_INFO(dev)->gen >= 9)
|
if (INTEL_INFO(dev)->gen >= 9)
|
||||||
|
@ -5134,7 +5163,7 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
|
||||||
else
|
else
|
||||||
ironlake_pfit_disable(intel_crtc, false);
|
ironlake_pfit_disable(intel_crtc, false);
|
||||||
|
|
||||||
if (!is_dsi)
|
if (!intel_crtc->config->has_dsi_encoder)
|
||||||
intel_ddi_disable_pipe_clock(intel_crtc);
|
intel_ddi_disable_pipe_clock(intel_crtc);
|
||||||
|
|
||||||
if (intel_crtc->config->has_pch_encoder) {
|
if (intel_crtc->config->has_pch_encoder) {
|
||||||
|
@ -5149,6 +5178,8 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
|
||||||
if (intel_crtc->config->has_pch_encoder)
|
if (intel_crtc->config->has_pch_encoder)
|
||||||
intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
|
intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
|
||||||
true);
|
true);
|
||||||
|
|
||||||
|
intel_fbc_disable_crtc(intel_crtc);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void i9xx_pfit_enable(struct intel_crtc *crtc)
|
static void i9xx_pfit_enable(struct intel_crtc *crtc)
|
||||||
|
@ -5214,10 +5245,6 @@ static enum intel_display_power_domain port_to_aux_power_domain(enum port port)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#define for_each_power_domain(domain, mask) \
|
|
||||||
for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
|
|
||||||
if ((1 << (domain)) & (mask))
|
|
||||||
|
|
||||||
enum intel_display_power_domain
|
enum intel_display_power_domain
|
||||||
intel_display_port_power_domain(struct intel_encoder *intel_encoder)
|
intel_display_port_power_domain(struct intel_encoder *intel_encoder)
|
||||||
{
|
{
|
||||||
|
@ -6140,13 +6167,10 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
|
||||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||||
struct intel_encoder *encoder;
|
struct intel_encoder *encoder;
|
||||||
int pipe = intel_crtc->pipe;
|
int pipe = intel_crtc->pipe;
|
||||||
bool is_dsi;
|
|
||||||
|
|
||||||
if (WARN_ON(intel_crtc->active))
|
if (WARN_ON(intel_crtc->active))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI);
|
|
||||||
|
|
||||||
if (intel_crtc->config->has_dp_encoder)
|
if (intel_crtc->config->has_dp_encoder)
|
||||||
intel_dp_set_m_n(intel_crtc, M1_N1);
|
intel_dp_set_m_n(intel_crtc, M1_N1);
|
||||||
|
|
||||||
|
@ -6169,7 +6193,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
|
||||||
if (encoder->pre_pll_enable)
|
if (encoder->pre_pll_enable)
|
||||||
encoder->pre_pll_enable(encoder);
|
encoder->pre_pll_enable(encoder);
|
||||||
|
|
||||||
if (!is_dsi) {
|
if (!intel_crtc->config->has_dsi_encoder) {
|
||||||
if (IS_CHERRYVIEW(dev)) {
|
if (IS_CHERRYVIEW(dev)) {
|
||||||
chv_prepare_pll(intel_crtc, intel_crtc->config);
|
chv_prepare_pll(intel_crtc, intel_crtc->config);
|
||||||
chv_enable_pll(intel_crtc, intel_crtc->config);
|
chv_enable_pll(intel_crtc, intel_crtc->config);
|
||||||
|
@ -6248,6 +6272,8 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
|
||||||
|
|
||||||
for_each_encoder_on_crtc(dev, crtc, encoder)
|
for_each_encoder_on_crtc(dev, crtc, encoder)
|
||||||
encoder->enable(encoder);
|
encoder->enable(encoder);
|
||||||
|
|
||||||
|
intel_fbc_enable(intel_crtc);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void i9xx_pfit_disable(struct intel_crtc *crtc)
|
static void i9xx_pfit_disable(struct intel_crtc *crtc)
|
||||||
|
@ -6295,7 +6321,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
|
||||||
if (encoder->post_disable)
|
if (encoder->post_disable)
|
||||||
encoder->post_disable(encoder);
|
encoder->post_disable(encoder);
|
||||||
|
|
||||||
if (!intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI)) {
|
if (!intel_crtc->config->has_dsi_encoder) {
|
||||||
if (IS_CHERRYVIEW(dev))
|
if (IS_CHERRYVIEW(dev))
|
||||||
chv_disable_pll(dev_priv, pipe);
|
chv_disable_pll(dev_priv, pipe);
|
||||||
else if (IS_VALLEYVIEW(dev))
|
else if (IS_VALLEYVIEW(dev))
|
||||||
|
@ -6310,6 +6336,8 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
|
||||||
|
|
||||||
if (!IS_GEN2(dev))
|
if (!IS_GEN2(dev))
|
||||||
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
|
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
|
||||||
|
|
||||||
|
intel_fbc_disable_crtc(intel_crtc);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
|
static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
|
||||||
|
@ -7908,8 +7936,6 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
|
||||||
int refclk, num_connectors = 0;
|
int refclk, num_connectors = 0;
|
||||||
intel_clock_t clock;
|
intel_clock_t clock;
|
||||||
bool ok;
|
bool ok;
|
||||||
bool is_dsi = false;
|
|
||||||
struct intel_encoder *encoder;
|
|
||||||
const intel_limit_t *limit;
|
const intel_limit_t *limit;
|
||||||
struct drm_atomic_state *state = crtc_state->base.state;
|
struct drm_atomic_state *state = crtc_state->base.state;
|
||||||
struct drm_connector *connector;
|
struct drm_connector *connector;
|
||||||
|
@ -7919,26 +7945,14 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
|
||||||
memset(&crtc_state->dpll_hw_state, 0,
|
memset(&crtc_state->dpll_hw_state, 0,
|
||||||
sizeof(crtc_state->dpll_hw_state));
|
sizeof(crtc_state->dpll_hw_state));
|
||||||
|
|
||||||
for_each_connector_in_state(state, connector, connector_state, i) {
|
if (crtc_state->has_dsi_encoder)
|
||||||
if (connector_state->crtc != &crtc->base)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
encoder = to_intel_encoder(connector_state->best_encoder);
|
|
||||||
|
|
||||||
switch (encoder->type) {
|
|
||||||
case INTEL_OUTPUT_DSI:
|
|
||||||
is_dsi = true;
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
num_connectors++;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (is_dsi)
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
for_each_connector_in_state(state, connector, connector_state, i) {
|
||||||
|
if (connector_state->crtc == &crtc->base)
|
||||||
|
num_connectors++;
|
||||||
|
}
|
||||||
|
|
||||||
if (!crtc_state->clock_set) {
|
if (!crtc_state->clock_set) {
|
||||||
refclk = i9xx_get_refclk(crtc_state, num_connectors);
|
refclk = i9xx_get_refclk(crtc_state, num_connectors);
|
||||||
|
|
||||||
|
@ -8931,7 +8945,7 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
|
||||||
memset(&crtc_state->dpll_hw_state, 0,
|
memset(&crtc_state->dpll_hw_state, 0,
|
||||||
sizeof(crtc_state->dpll_hw_state));
|
sizeof(crtc_state->dpll_hw_state));
|
||||||
|
|
||||||
is_lvds = intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS);
|
is_lvds = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS);
|
||||||
|
|
||||||
WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
|
WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
|
||||||
"Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
|
"Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
|
||||||
|
@ -9705,14 +9719,10 @@ static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state)
|
||||||
else
|
else
|
||||||
cdclk = 337500;
|
cdclk = 337500;
|
||||||
|
|
||||||
/*
|
|
||||||
* FIXME move the cdclk caclulation to
|
|
||||||
* compute_config() so we can fail gracegully.
|
|
||||||
*/
|
|
||||||
if (cdclk > dev_priv->max_cdclk_freq) {
|
if (cdclk > dev_priv->max_cdclk_freq) {
|
||||||
DRM_ERROR("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
|
DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
|
||||||
cdclk, dev_priv->max_cdclk_freq);
|
cdclk, dev_priv->max_cdclk_freq);
|
||||||
cdclk = dev_priv->max_cdclk_freq;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
to_intel_atomic_state(state)->cdclk = cdclk;
|
to_intel_atomic_state(state)->cdclk = cdclk;
|
||||||
|
@ -9807,6 +9817,7 @@ static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
|
||||||
break;
|
break;
|
||||||
case PORT_CLK_SEL_SPLL:
|
case PORT_CLK_SEL_SPLL:
|
||||||
pipe_config->shared_dpll = DPLL_ID_SPLL;
|
pipe_config->shared_dpll = DPLL_ID_SPLL;
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -11191,6 +11202,10 @@ static bool use_mmio_flip(struct intel_engine_cs *ring,
|
||||||
return true;
|
return true;
|
||||||
else if (i915.enable_execlists)
|
else if (i915.enable_execlists)
|
||||||
return true;
|
return true;
|
||||||
|
else if (obj->base.dma_buf &&
|
||||||
|
!reservation_object_test_signaled_rcu(obj->base.dma_buf->resv,
|
||||||
|
false))
|
||||||
|
return true;
|
||||||
else
|
else
|
||||||
return ring != i915_gem_request_get_ring(obj->last_write_req);
|
return ring != i915_gem_request_get_ring(obj->last_write_req);
|
||||||
}
|
}
|
||||||
|
@ -11305,6 +11320,9 @@ static void intel_mmio_flip_work_func(struct work_struct *work)
|
||||||
{
|
{
|
||||||
struct intel_mmio_flip *mmio_flip =
|
struct intel_mmio_flip *mmio_flip =
|
||||||
container_of(work, struct intel_mmio_flip, work);
|
container_of(work, struct intel_mmio_flip, work);
|
||||||
|
struct intel_framebuffer *intel_fb =
|
||||||
|
to_intel_framebuffer(mmio_flip->crtc->base.primary->fb);
|
||||||
|
struct drm_i915_gem_object *obj = intel_fb->obj;
|
||||||
|
|
||||||
if (mmio_flip->req) {
|
if (mmio_flip->req) {
|
||||||
WARN_ON(__i915_wait_request(mmio_flip->req,
|
WARN_ON(__i915_wait_request(mmio_flip->req,
|
||||||
|
@ -11314,6 +11332,12 @@ static void intel_mmio_flip_work_func(struct work_struct *work)
|
||||||
i915_gem_request_unreference__unlocked(mmio_flip->req);
|
i915_gem_request_unreference__unlocked(mmio_flip->req);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* For framebuffer backed by dmabuf, wait for fence */
|
||||||
|
if (obj->base.dma_buf)
|
||||||
|
WARN_ON(reservation_object_wait_timeout_rcu(obj->base.dma_buf->resv,
|
||||||
|
false, false,
|
||||||
|
MAX_SCHEDULE_TIMEOUT) < 0);
|
||||||
|
|
||||||
intel_do_mmio_flip(mmio_flip);
|
intel_do_mmio_flip(mmio_flip);
|
||||||
kfree(mmio_flip);
|
kfree(mmio_flip);
|
||||||
}
|
}
|
||||||
|
@ -11584,7 +11608,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
||||||
to_intel_plane(primary)->frontbuffer_bit);
|
to_intel_plane(primary)->frontbuffer_bit);
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
|
||||||
intel_fbc_disable_crtc(intel_crtc);
|
intel_fbc_deactivate(intel_crtc);
|
||||||
intel_frontbuffer_flip_prepare(dev,
|
intel_frontbuffer_flip_prepare(dev,
|
||||||
to_intel_plane(primary)->frontbuffer_bit);
|
to_intel_plane(primary)->frontbuffer_bit);
|
||||||
|
|
||||||
|
@ -12587,6 +12611,8 @@ intel_pipe_config_compare(struct drm_device *dev,
|
||||||
} else
|
} else
|
||||||
PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
|
PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
|
||||||
|
|
||||||
|
PIPE_CONF_CHECK_I(has_dsi_encoder);
|
||||||
|
|
||||||
PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
|
PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
|
||||||
PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
|
PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
|
||||||
PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start);
|
PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start);
|
||||||
|
@ -13383,6 +13409,13 @@ static int intel_atomic_commit(struct drm_device *dev,
|
||||||
dev_priv->display.crtc_disable(crtc);
|
dev_priv->display.crtc_disable(crtc);
|
||||||
intel_crtc->active = false;
|
intel_crtc->active = false;
|
||||||
intel_disable_shared_dpll(intel_crtc);
|
intel_disable_shared_dpll(intel_crtc);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Underruns don't always raise
|
||||||
|
* interrupts, so check manually.
|
||||||
|
*/
|
||||||
|
intel_check_cpu_fifo_underruns(dev_priv);
|
||||||
|
intel_check_pch_fifo_underruns(dev_priv);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -13652,6 +13685,17 @@ intel_prepare_plane_fb(struct drm_plane *plane,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* For framebuffer backed by dmabuf, wait for fence */
|
||||||
|
if (obj && obj->base.dma_buf) {
|
||||||
|
ret = reservation_object_wait_timeout_rcu(obj->base.dma_buf->resv,
|
||||||
|
false, true,
|
||||||
|
MAX_SCHEDULE_TIMEOUT);
|
||||||
|
if (ret == -ERESTARTSYS)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
WARN_ON(ret < 0);
|
||||||
|
}
|
||||||
|
|
||||||
if (!obj) {
|
if (!obj) {
|
||||||
ret = 0;
|
ret = 0;
|
||||||
} else if (plane->type == DRM_PLANE_TYPE_CURSOR &&
|
} else if (plane->type == DRM_PLANE_TYPE_CURSOR &&
|
||||||
|
@ -14246,7 +14290,14 @@ static bool intel_crt_present(struct drm_device *dev)
|
||||||
if (IS_CHERRYVIEW(dev))
|
if (IS_CHERRYVIEW(dev))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
if (IS_VALLEYVIEW(dev) && !dev_priv->vbt.int_crt_support)
|
if (HAS_PCH_LPT_H(dev) && I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
/* DDI E can't be used if DDI A requires 4 lanes */
|
||||||
|
if (HAS_DDI(dev) && I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (!dev_priv->vbt.int_crt_support)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
|
@ -14789,9 +14840,6 @@ static void intel_init_display(struct drm_device *dev)
|
||||||
else if (IS_I945GM(dev) || IS_845G(dev))
|
else if (IS_I945GM(dev) || IS_845G(dev))
|
||||||
dev_priv->display.get_display_clock_speed =
|
dev_priv->display.get_display_clock_speed =
|
||||||
i9xx_misc_get_display_clock_speed;
|
i9xx_misc_get_display_clock_speed;
|
||||||
else if (IS_PINEVIEW(dev))
|
|
||||||
dev_priv->display.get_display_clock_speed =
|
|
||||||
pnv_get_display_clock_speed;
|
|
||||||
else if (IS_I915GM(dev))
|
else if (IS_I915GM(dev))
|
||||||
dev_priv->display.get_display_clock_speed =
|
dev_priv->display.get_display_clock_speed =
|
||||||
i915gm_get_display_clock_speed;
|
i915gm_get_display_clock_speed;
|
||||||
|
|
|
@ -681,7 +681,7 @@ static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
|
||||||
* The clock divider is based off the hrawclk, and would like to run at
|
* The clock divider is based off the hrawclk, and would like to run at
|
||||||
* 2MHz. So, take the hrawclk value and divide by 2 and use that
|
* 2MHz. So, take the hrawclk value and divide by 2 and use that
|
||||||
*/
|
*/
|
||||||
return index ? 0 : intel_hrawclk(dev) / 2;
|
return index ? 0 : DIV_ROUND_CLOSEST(intel_hrawclk(dev), 2);
|
||||||
}
|
}
|
||||||
|
|
||||||
static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
|
static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
|
||||||
|
@ -694,10 +694,10 @@ static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (intel_dig_port->port == PORT_A) {
|
if (intel_dig_port->port == PORT_A) {
|
||||||
return DIV_ROUND_UP(dev_priv->cdclk_freq, 2000);
|
return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
|
return DIV_ROUND_CLOSEST(intel_pch_rawclk(dev), 2);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -711,7 +711,7 @@ static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
|
||||||
if (index)
|
if (index)
|
||||||
return 0;
|
return 0;
|
||||||
return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
|
return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
|
||||||
} else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
|
} else if (HAS_PCH_LPT_H(dev_priv)) {
|
||||||
/* Workaround for non-ULT HSW */
|
/* Workaround for non-ULT HSW */
|
||||||
switch (index) {
|
switch (index) {
|
||||||
case 0: return 63;
|
case 0: return 63;
|
||||||
|
@ -719,7 +719,7 @@ static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
|
||||||
default: return 0;
|
default: return 0;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
|
return index ? 0 : DIV_ROUND_CLOSEST(intel_pch_rawclk(dev), 2);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2697,6 +2697,15 @@ static void intel_enable_dp(struct intel_encoder *encoder)
|
||||||
if (IS_VALLEYVIEW(dev))
|
if (IS_VALLEYVIEW(dev))
|
||||||
vlv_init_panel_power_sequencer(intel_dp);
|
vlv_init_panel_power_sequencer(intel_dp);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We get an occasional spurious underrun between the port
|
||||||
|
* enable and vdd enable, when enabling port A eDP.
|
||||||
|
*
|
||||||
|
* FIXME: Not sure if this applies to (PCH) port D eDP as well
|
||||||
|
*/
|
||||||
|
if (port == PORT_A)
|
||||||
|
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
|
||||||
|
|
||||||
intel_dp_enable_port(intel_dp);
|
intel_dp_enable_port(intel_dp);
|
||||||
|
|
||||||
if (port == PORT_A && IS_GEN5(dev_priv)) {
|
if (port == PORT_A && IS_GEN5(dev_priv)) {
|
||||||
|
@ -2714,6 +2723,9 @@ static void intel_enable_dp(struct intel_encoder *encoder)
|
||||||
edp_panel_on(intel_dp);
|
edp_panel_on(intel_dp);
|
||||||
edp_panel_vdd_off(intel_dp, true);
|
edp_panel_vdd_off(intel_dp, true);
|
||||||
|
|
||||||
|
if (port == PORT_A)
|
||||||
|
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
|
||||||
|
|
||||||
pps_unlock(intel_dp);
|
pps_unlock(intel_dp);
|
||||||
|
|
||||||
if (IS_VALLEYVIEW(dev)) {
|
if (IS_VALLEYVIEW(dev)) {
|
||||||
|
|
|
@ -393,6 +393,9 @@ struct intel_crtc_state {
|
||||||
* accordingly. */
|
* accordingly. */
|
||||||
bool has_dp_encoder;
|
bool has_dp_encoder;
|
||||||
|
|
||||||
|
/* DSI has special cases */
|
||||||
|
bool has_dsi_encoder;
|
||||||
|
|
||||||
/* Whether we should send NULL infoframes. Required for audio. */
|
/* Whether we should send NULL infoframes. Required for audio. */
|
||||||
bool has_hdmi_sink;
|
bool has_hdmi_sink;
|
||||||
|
|
||||||
|
@ -710,7 +713,8 @@ struct intel_hdmi {
|
||||||
void (*set_infoframes)(struct drm_encoder *encoder,
|
void (*set_infoframes)(struct drm_encoder *encoder,
|
||||||
bool enable,
|
bool enable,
|
||||||
const struct drm_display_mode *adjusted_mode);
|
const struct drm_display_mode *adjusted_mode);
|
||||||
bool (*infoframe_enabled)(struct drm_encoder *encoder);
|
bool (*infoframe_enabled)(struct drm_encoder *encoder,
|
||||||
|
const struct intel_crtc_state *pipe_config);
|
||||||
};
|
};
|
||||||
|
|
||||||
struct intel_dp_mst_encoder;
|
struct intel_dp_mst_encoder;
|
||||||
|
@ -1316,9 +1320,11 @@ static inline void intel_fbdev_restore_mode(struct drm_device *dev)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* intel_fbc.c */
|
/* intel_fbc.c */
|
||||||
bool intel_fbc_enabled(struct drm_i915_private *dev_priv);
|
bool intel_fbc_is_active(struct drm_i915_private *dev_priv);
|
||||||
void intel_fbc_update(struct drm_i915_private *dev_priv);
|
void intel_fbc_deactivate(struct intel_crtc *crtc);
|
||||||
|
void intel_fbc_update(struct intel_crtc *crtc);
|
||||||
void intel_fbc_init(struct drm_i915_private *dev_priv);
|
void intel_fbc_init(struct drm_i915_private *dev_priv);
|
||||||
|
void intel_fbc_enable(struct intel_crtc *crtc);
|
||||||
void intel_fbc_disable(struct drm_i915_private *dev_priv);
|
void intel_fbc_disable(struct drm_i915_private *dev_priv);
|
||||||
void intel_fbc_disable_crtc(struct intel_crtc *crtc);
|
void intel_fbc_disable_crtc(struct intel_crtc *crtc);
|
||||||
void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
|
void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
|
||||||
|
@ -1410,6 +1416,8 @@ void intel_power_domains_suspend(struct drm_i915_private *dev_priv);
|
||||||
void skl_pw1_misc_io_init(struct drm_i915_private *dev_priv);
|
void skl_pw1_misc_io_init(struct drm_i915_private *dev_priv);
|
||||||
void skl_pw1_misc_io_fini(struct drm_i915_private *dev_priv);
|
void skl_pw1_misc_io_fini(struct drm_i915_private *dev_priv);
|
||||||
void intel_runtime_pm_enable(struct drm_i915_private *dev_priv);
|
void intel_runtime_pm_enable(struct drm_i915_private *dev_priv);
|
||||||
|
const char *
|
||||||
|
intel_display_power_domain_str(enum intel_display_power_domain domain);
|
||||||
|
|
||||||
bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
|
bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
|
||||||
enum intel_display_power_domain domain);
|
enum intel_display_power_domain domain);
|
||||||
|
|
|
@ -266,16 +266,18 @@ static inline bool is_cmd_mode(struct intel_dsi *intel_dsi)
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool intel_dsi_compute_config(struct intel_encoder *encoder,
|
static bool intel_dsi_compute_config(struct intel_encoder *encoder,
|
||||||
struct intel_crtc_state *config)
|
struct intel_crtc_state *pipe_config)
|
||||||
{
|
{
|
||||||
struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
|
struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
|
||||||
base);
|
base);
|
||||||
struct intel_connector *intel_connector = intel_dsi->attached_connector;
|
struct intel_connector *intel_connector = intel_dsi->attached_connector;
|
||||||
struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
|
struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
|
||||||
struct drm_display_mode *adjusted_mode = &config->base.adjusted_mode;
|
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
|
||||||
|
|
||||||
DRM_DEBUG_KMS("\n");
|
DRM_DEBUG_KMS("\n");
|
||||||
|
|
||||||
|
pipe_config->has_dsi_encoder = true;
|
||||||
|
|
||||||
if (fixed_mode)
|
if (fixed_mode)
|
||||||
intel_fixed_panel_mode(fixed_mode, adjusted_mode);
|
intel_fixed_panel_mode(fixed_mode, adjusted_mode);
|
||||||
|
|
||||||
|
@ -462,6 +464,8 @@ static void intel_dsi_enable(struct intel_encoder *encoder)
|
||||||
intel_panel_enable_backlight(intel_dsi->attached_connector);
|
intel_panel_enable_backlight(intel_dsi->attached_connector);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void intel_dsi_prepare(struct intel_encoder *intel_encoder);
|
||||||
|
|
||||||
static void intel_dsi_pre_enable(struct intel_encoder *encoder)
|
static void intel_dsi_pre_enable(struct intel_encoder *encoder)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = encoder->base.dev;
|
struct drm_device *dev = encoder->base.dev;
|
||||||
|
@ -474,6 +478,9 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder)
|
||||||
|
|
||||||
DRM_DEBUG_KMS("\n");
|
DRM_DEBUG_KMS("\n");
|
||||||
|
|
||||||
|
intel_dsi_prepare(encoder);
|
||||||
|
intel_enable_dsi_pll(encoder);
|
||||||
|
|
||||||
/* Panel Enable over CRC PMIC */
|
/* Panel Enable over CRC PMIC */
|
||||||
if (intel_dsi->gpio_panel)
|
if (intel_dsi->gpio_panel)
|
||||||
gpiod_set_value_cansleep(intel_dsi->gpio_panel, 1);
|
gpiod_set_value_cansleep(intel_dsi->gpio_panel, 1);
|
||||||
|
@ -699,6 +706,8 @@ static void intel_dsi_get_config(struct intel_encoder *encoder,
|
||||||
u32 pclk = 0;
|
u32 pclk = 0;
|
||||||
DRM_DEBUG_KMS("\n");
|
DRM_DEBUG_KMS("\n");
|
||||||
|
|
||||||
|
pipe_config->has_dsi_encoder = true;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* DPLL_MD is not used in case of DSI, reading will get some default value
|
* DPLL_MD is not used in case of DSI, reading will get some default value
|
||||||
* set dpll_md = 0
|
* set dpll_md = 0
|
||||||
|
@ -1026,15 +1035,6 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void intel_dsi_pre_pll_enable(struct intel_encoder *encoder)
|
|
||||||
{
|
|
||||||
DRM_DEBUG_KMS("\n");
|
|
||||||
|
|
||||||
intel_dsi_prepare(encoder);
|
|
||||||
intel_enable_dsi_pll(encoder);
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
static enum drm_connector_status
|
static enum drm_connector_status
|
||||||
intel_dsi_detect(struct drm_connector *connector, bool force)
|
intel_dsi_detect(struct drm_connector *connector, bool force)
|
||||||
{
|
{
|
||||||
|
@ -1155,9 +1155,7 @@ void intel_dsi_init(struct drm_device *dev)
|
||||||
drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI,
|
drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI,
|
||||||
NULL);
|
NULL);
|
||||||
|
|
||||||
/* XXX: very likely not all of these are needed */
|
|
||||||
intel_encoder->compute_config = intel_dsi_compute_config;
|
intel_encoder->compute_config = intel_dsi_compute_config;
|
||||||
intel_encoder->pre_pll_enable = intel_dsi_pre_pll_enable;
|
|
||||||
intel_encoder->pre_enable = intel_dsi_pre_enable;
|
intel_encoder->pre_enable = intel_dsi_pre_enable;
|
||||||
intel_encoder->enable = intel_dsi_enable_nop;
|
intel_encoder->enable = intel_dsi_enable_nop;
|
||||||
intel_encoder->disable = intel_dsi_pre_disable;
|
intel_encoder->disable = intel_dsi_pre_disable;
|
||||||
|
|
|
@ -43,7 +43,7 @@
|
||||||
|
|
||||||
static inline bool fbc_supported(struct drm_i915_private *dev_priv)
|
static inline bool fbc_supported(struct drm_i915_private *dev_priv)
|
||||||
{
|
{
|
||||||
return dev_priv->fbc.enable_fbc != NULL;
|
return dev_priv->fbc.activate != NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool fbc_on_pipe_a_only(struct drm_i915_private *dev_priv)
|
static inline bool fbc_on_pipe_a_only(struct drm_i915_private *dev_priv)
|
||||||
|
@ -51,6 +51,11 @@ static inline bool fbc_on_pipe_a_only(struct drm_i915_private *dev_priv)
|
||||||
return IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8;
|
return IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool fbc_on_plane_a_only(struct drm_i915_private *dev_priv)
|
||||||
|
{
|
||||||
|
return INTEL_INFO(dev_priv)->gen < 4;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* In some platforms where the CRTC's x:0/y:0 coordinates doesn't match the
|
* In some platforms where the CRTC's x:0/y:0 coordinates doesn't match the
|
||||||
* frontbuffer's x:0/y:0 coordinates we lie to the hardware about the plane's
|
* frontbuffer's x:0/y:0 coordinates we lie to the hardware about the plane's
|
||||||
|
@ -64,11 +69,51 @@ static unsigned int get_crtc_fence_y_offset(struct intel_crtc *crtc)
|
||||||
return crtc->base.y - crtc->adjusted_y;
|
return crtc->base.y - crtc->adjusted_y;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void i8xx_fbc_disable(struct drm_i915_private *dev_priv)
|
/*
|
||||||
|
* For SKL+, the plane source size used by the hardware is based on the value we
|
||||||
|
* write to the PLANE_SIZE register. For BDW-, the hardware looks at the value
|
||||||
|
* we wrote to PIPESRC.
|
||||||
|
*/
|
||||||
|
static void intel_fbc_get_plane_source_size(struct intel_crtc *crtc,
|
||||||
|
int *width, int *height)
|
||||||
|
{
|
||||||
|
struct intel_plane_state *plane_state =
|
||||||
|
to_intel_plane_state(crtc->base.primary->state);
|
||||||
|
int w, h;
|
||||||
|
|
||||||
|
if (intel_rotation_90_or_270(plane_state->base.rotation)) {
|
||||||
|
w = drm_rect_height(&plane_state->src) >> 16;
|
||||||
|
h = drm_rect_width(&plane_state->src) >> 16;
|
||||||
|
} else {
|
||||||
|
w = drm_rect_width(&plane_state->src) >> 16;
|
||||||
|
h = drm_rect_height(&plane_state->src) >> 16;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (width)
|
||||||
|
*width = w;
|
||||||
|
if (height)
|
||||||
|
*height = h;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int intel_fbc_calculate_cfb_size(struct intel_crtc *crtc,
|
||||||
|
struct drm_framebuffer *fb)
|
||||||
|
{
|
||||||
|
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
|
||||||
|
int lines;
|
||||||
|
|
||||||
|
intel_fbc_get_plane_source_size(crtc, NULL, &lines);
|
||||||
|
if (INTEL_INFO(dev_priv)->gen >= 7)
|
||||||
|
lines = min(lines, 2048);
|
||||||
|
|
||||||
|
/* Hardware needs the full buffer stride, not just the active area. */
|
||||||
|
return lines * fb->pitches[0];
|
||||||
|
}
|
||||||
|
|
||||||
|
static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv)
|
||||||
{
|
{
|
||||||
u32 fbc_ctl;
|
u32 fbc_ctl;
|
||||||
|
|
||||||
dev_priv->fbc.enabled = false;
|
dev_priv->fbc.active = false;
|
||||||
|
|
||||||
/* Disable compression */
|
/* Disable compression */
|
||||||
fbc_ctl = I915_READ(FBC_CONTROL);
|
fbc_ctl = I915_READ(FBC_CONTROL);
|
||||||
|
@ -83,11 +128,9 @@ static void i8xx_fbc_disable(struct drm_i915_private *dev_priv)
|
||||||
DRM_DEBUG_KMS("FBC idle timed out\n");
|
DRM_DEBUG_KMS("FBC idle timed out\n");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
DRM_DEBUG_KMS("disabled FBC\n");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void i8xx_fbc_enable(struct intel_crtc *crtc)
|
static void i8xx_fbc_activate(struct intel_crtc *crtc)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
|
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
|
||||||
struct drm_framebuffer *fb = crtc->base.primary->fb;
|
struct drm_framebuffer *fb = crtc->base.primary->fb;
|
||||||
|
@ -96,10 +139,10 @@ static void i8xx_fbc_enable(struct intel_crtc *crtc)
|
||||||
int i;
|
int i;
|
||||||
u32 fbc_ctl;
|
u32 fbc_ctl;
|
||||||
|
|
||||||
dev_priv->fbc.enabled = true;
|
dev_priv->fbc.active = true;
|
||||||
|
|
||||||
/* Note: fbc.threshold == 1 for i8xx */
|
/* Note: fbc.threshold == 1 for i8xx */
|
||||||
cfb_pitch = dev_priv->fbc.uncompressed_size / FBC_LL_SIZE;
|
cfb_pitch = intel_fbc_calculate_cfb_size(crtc, fb) / FBC_LL_SIZE;
|
||||||
if (fb->pitches[0] < cfb_pitch)
|
if (fb->pitches[0] < cfb_pitch)
|
||||||
cfb_pitch = fb->pitches[0];
|
cfb_pitch = fb->pitches[0];
|
||||||
|
|
||||||
|
@ -132,24 +175,21 @@ static void i8xx_fbc_enable(struct intel_crtc *crtc)
|
||||||
fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
|
fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
|
||||||
fbc_ctl |= obj->fence_reg;
|
fbc_ctl |= obj->fence_reg;
|
||||||
I915_WRITE(FBC_CONTROL, fbc_ctl);
|
I915_WRITE(FBC_CONTROL, fbc_ctl);
|
||||||
|
|
||||||
DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c\n",
|
|
||||||
cfb_pitch, crtc->base.y, plane_name(crtc->plane));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool i8xx_fbc_enabled(struct drm_i915_private *dev_priv)
|
static bool i8xx_fbc_is_active(struct drm_i915_private *dev_priv)
|
||||||
{
|
{
|
||||||
return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
|
return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void g4x_fbc_enable(struct intel_crtc *crtc)
|
static void g4x_fbc_activate(struct intel_crtc *crtc)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
|
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
|
||||||
struct drm_framebuffer *fb = crtc->base.primary->fb;
|
struct drm_framebuffer *fb = crtc->base.primary->fb;
|
||||||
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
|
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
|
||||||
u32 dpfc_ctl;
|
u32 dpfc_ctl;
|
||||||
|
|
||||||
dev_priv->fbc.enabled = true;
|
dev_priv->fbc.active = true;
|
||||||
|
|
||||||
dpfc_ctl = DPFC_CTL_PLANE(crtc->plane) | DPFC_SR_EN;
|
dpfc_ctl = DPFC_CTL_PLANE(crtc->plane) | DPFC_SR_EN;
|
||||||
if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
|
if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
|
||||||
|
@ -162,27 +202,23 @@ static void g4x_fbc_enable(struct intel_crtc *crtc)
|
||||||
|
|
||||||
/* enable it... */
|
/* enable it... */
|
||||||
I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
|
I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
|
||||||
|
|
||||||
DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void g4x_fbc_disable(struct drm_i915_private *dev_priv)
|
static void g4x_fbc_deactivate(struct drm_i915_private *dev_priv)
|
||||||
{
|
{
|
||||||
u32 dpfc_ctl;
|
u32 dpfc_ctl;
|
||||||
|
|
||||||
dev_priv->fbc.enabled = false;
|
dev_priv->fbc.active = false;
|
||||||
|
|
||||||
/* Disable compression */
|
/* Disable compression */
|
||||||
dpfc_ctl = I915_READ(DPFC_CONTROL);
|
dpfc_ctl = I915_READ(DPFC_CONTROL);
|
||||||
if (dpfc_ctl & DPFC_CTL_EN) {
|
if (dpfc_ctl & DPFC_CTL_EN) {
|
||||||
dpfc_ctl &= ~DPFC_CTL_EN;
|
dpfc_ctl &= ~DPFC_CTL_EN;
|
||||||
I915_WRITE(DPFC_CONTROL, dpfc_ctl);
|
I915_WRITE(DPFC_CONTROL, dpfc_ctl);
|
||||||
|
|
||||||
DRM_DEBUG_KMS("disabled FBC\n");
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool g4x_fbc_enabled(struct drm_i915_private *dev_priv)
|
static bool g4x_fbc_is_active(struct drm_i915_private *dev_priv)
|
||||||
{
|
{
|
||||||
return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
|
return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
|
||||||
}
|
}
|
||||||
|
@ -194,7 +230,7 @@ static void intel_fbc_recompress(struct drm_i915_private *dev_priv)
|
||||||
POSTING_READ(MSG_FBC_REND_STATE);
|
POSTING_READ(MSG_FBC_REND_STATE);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ilk_fbc_enable(struct intel_crtc *crtc)
|
static void ilk_fbc_activate(struct intel_crtc *crtc)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
|
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
|
||||||
struct drm_framebuffer *fb = crtc->base.primary->fb;
|
struct drm_framebuffer *fb = crtc->base.primary->fb;
|
||||||
|
@ -203,7 +239,7 @@ static void ilk_fbc_enable(struct intel_crtc *crtc)
|
||||||
int threshold = dev_priv->fbc.threshold;
|
int threshold = dev_priv->fbc.threshold;
|
||||||
unsigned int y_offset;
|
unsigned int y_offset;
|
||||||
|
|
||||||
dev_priv->fbc.enabled = true;
|
dev_priv->fbc.active = true;
|
||||||
|
|
||||||
dpfc_ctl = DPFC_CTL_PLANE(crtc->plane);
|
dpfc_ctl = DPFC_CTL_PLANE(crtc->plane);
|
||||||
if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
|
if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
|
||||||
|
@ -238,32 +274,28 @@ static void ilk_fbc_enable(struct intel_crtc *crtc)
|
||||||
}
|
}
|
||||||
|
|
||||||
intel_fbc_recompress(dev_priv);
|
intel_fbc_recompress(dev_priv);
|
||||||
|
|
||||||
DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ilk_fbc_disable(struct drm_i915_private *dev_priv)
|
static void ilk_fbc_deactivate(struct drm_i915_private *dev_priv)
|
||||||
{
|
{
|
||||||
u32 dpfc_ctl;
|
u32 dpfc_ctl;
|
||||||
|
|
||||||
dev_priv->fbc.enabled = false;
|
dev_priv->fbc.active = false;
|
||||||
|
|
||||||
/* Disable compression */
|
/* Disable compression */
|
||||||
dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
|
dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
|
||||||
if (dpfc_ctl & DPFC_CTL_EN) {
|
if (dpfc_ctl & DPFC_CTL_EN) {
|
||||||
dpfc_ctl &= ~DPFC_CTL_EN;
|
dpfc_ctl &= ~DPFC_CTL_EN;
|
||||||
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
|
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
|
||||||
|
|
||||||
DRM_DEBUG_KMS("disabled FBC\n");
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool ilk_fbc_enabled(struct drm_i915_private *dev_priv)
|
static bool ilk_fbc_is_active(struct drm_i915_private *dev_priv)
|
||||||
{
|
{
|
||||||
return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
|
return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void gen7_fbc_enable(struct intel_crtc *crtc)
|
static void gen7_fbc_activate(struct intel_crtc *crtc)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
|
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
|
||||||
struct drm_framebuffer *fb = crtc->base.primary->fb;
|
struct drm_framebuffer *fb = crtc->base.primary->fb;
|
||||||
|
@ -271,7 +303,7 @@ static void gen7_fbc_enable(struct intel_crtc *crtc)
|
||||||
u32 dpfc_ctl;
|
u32 dpfc_ctl;
|
||||||
int threshold = dev_priv->fbc.threshold;
|
int threshold = dev_priv->fbc.threshold;
|
||||||
|
|
||||||
dev_priv->fbc.enabled = true;
|
dev_priv->fbc.active = true;
|
||||||
|
|
||||||
dpfc_ctl = 0;
|
dpfc_ctl = 0;
|
||||||
if (IS_IVYBRIDGE(dev_priv))
|
if (IS_IVYBRIDGE(dev_priv))
|
||||||
|
@ -317,103 +349,41 @@ static void gen7_fbc_enable(struct intel_crtc *crtc)
|
||||||
I915_WRITE(DPFC_CPU_FENCE_OFFSET, get_crtc_fence_y_offset(crtc));
|
I915_WRITE(DPFC_CPU_FENCE_OFFSET, get_crtc_fence_y_offset(crtc));
|
||||||
|
|
||||||
intel_fbc_recompress(dev_priv);
|
intel_fbc_recompress(dev_priv);
|
||||||
|
|
||||||
DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* intel_fbc_enabled - Is FBC enabled?
|
* intel_fbc_is_active - Is FBC active?
|
||||||
* @dev_priv: i915 device instance
|
* @dev_priv: i915 device instance
|
||||||
*
|
*
|
||||||
* This function is used to verify the current state of FBC.
|
* This function is used to verify the current state of FBC.
|
||||||
* FIXME: This should be tracked in the plane config eventually
|
* FIXME: This should be tracked in the plane config eventually
|
||||||
* instead of queried at runtime for most callers.
|
* instead of queried at runtime for most callers.
|
||||||
*/
|
*/
|
||||||
bool intel_fbc_enabled(struct drm_i915_private *dev_priv)
|
bool intel_fbc_is_active(struct drm_i915_private *dev_priv)
|
||||||
{
|
{
|
||||||
return dev_priv->fbc.enabled;
|
return dev_priv->fbc.active;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void intel_fbc_enable(struct intel_crtc *crtc,
|
static void intel_fbc_activate(const struct drm_framebuffer *fb)
|
||||||
const struct drm_framebuffer *fb)
|
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
|
struct drm_i915_private *dev_priv = fb->dev->dev_private;
|
||||||
|
struct intel_crtc *crtc = dev_priv->fbc.crtc;
|
||||||
|
|
||||||
dev_priv->fbc.enable_fbc(crtc);
|
dev_priv->fbc.activate(crtc);
|
||||||
|
|
||||||
dev_priv->fbc.crtc = crtc;
|
|
||||||
dev_priv->fbc.fb_id = fb->base.id;
|
dev_priv->fbc.fb_id = fb->base.id;
|
||||||
dev_priv->fbc.y = crtc->base.y;
|
dev_priv->fbc.y = crtc->base.y;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void intel_fbc_work_fn(struct work_struct *__work)
|
static void intel_fbc_work_fn(struct work_struct *__work)
|
||||||
{
|
{
|
||||||
struct intel_fbc_work *work =
|
struct drm_i915_private *dev_priv =
|
||||||
container_of(to_delayed_work(__work),
|
container_of(__work, struct drm_i915_private, fbc.work.work);
|
||||||
struct intel_fbc_work, work);
|
struct intel_fbc_work *work = &dev_priv->fbc.work;
|
||||||
struct drm_i915_private *dev_priv = work->crtc->base.dev->dev_private;
|
struct intel_crtc *crtc = dev_priv->fbc.crtc;
|
||||||
struct drm_framebuffer *crtc_fb = work->crtc->base.primary->fb;
|
int delay_ms = 50;
|
||||||
|
|
||||||
mutex_lock(&dev_priv->fbc.lock);
|
|
||||||
if (work == dev_priv->fbc.fbc_work) {
|
|
||||||
/* Double check that we haven't switched fb without cancelling
|
|
||||||
* the prior work.
|
|
||||||
*/
|
|
||||||
if (crtc_fb == work->fb)
|
|
||||||
intel_fbc_enable(work->crtc, work->fb);
|
|
||||||
|
|
||||||
dev_priv->fbc.fbc_work = NULL;
|
|
||||||
}
|
|
||||||
mutex_unlock(&dev_priv->fbc.lock);
|
|
||||||
|
|
||||||
kfree(work);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void intel_fbc_cancel_work(struct drm_i915_private *dev_priv)
|
|
||||||
{
|
|
||||||
WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
|
|
||||||
|
|
||||||
if (dev_priv->fbc.fbc_work == NULL)
|
|
||||||
return;
|
|
||||||
|
|
||||||
/* Synchronisation is provided by struct_mutex and checking of
|
|
||||||
* dev_priv->fbc.fbc_work, so we can perform the cancellation
|
|
||||||
* entirely asynchronously.
|
|
||||||
*/
|
|
||||||
if (cancel_delayed_work(&dev_priv->fbc.fbc_work->work))
|
|
||||||
/* tasklet was killed before being run, clean up */
|
|
||||||
kfree(dev_priv->fbc.fbc_work);
|
|
||||||
|
|
||||||
/* Mark the work as no longer wanted so that if it does
|
|
||||||
* wake-up (because the work was already running and waiting
|
|
||||||
* for our mutex), it will discover that is no longer
|
|
||||||
* necessary to run.
|
|
||||||
*/
|
|
||||||
dev_priv->fbc.fbc_work = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void intel_fbc_schedule_enable(struct intel_crtc *crtc)
|
|
||||||
{
|
|
||||||
struct intel_fbc_work *work;
|
|
||||||
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
|
|
||||||
|
|
||||||
WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
|
|
||||||
|
|
||||||
intel_fbc_cancel_work(dev_priv);
|
|
||||||
|
|
||||||
work = kzalloc(sizeof(*work), GFP_KERNEL);
|
|
||||||
if (work == NULL) {
|
|
||||||
DRM_ERROR("Failed to allocate FBC work structure\n");
|
|
||||||
intel_fbc_enable(crtc, crtc->base.primary->fb);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
work->crtc = crtc;
|
|
||||||
work->fb = crtc->base.primary->fb;
|
|
||||||
INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
|
|
||||||
|
|
||||||
dev_priv->fbc.fbc_work = work;
|
|
||||||
|
|
||||||
|
retry:
|
||||||
/* Delay the actual enabling to let pageflipping cease and the
|
/* Delay the actual enabling to let pageflipping cease and the
|
||||||
* display to settle before starting the compression. Note that
|
* display to settle before starting the compression. Note that
|
||||||
* this delay also serves a second purpose: it allows for a
|
* this delay also serves a second purpose: it allows for a
|
||||||
|
@ -427,43 +397,71 @@ static void intel_fbc_schedule_enable(struct intel_crtc *crtc)
|
||||||
*
|
*
|
||||||
* WaFbcWaitForVBlankBeforeEnable:ilk,snb
|
* WaFbcWaitForVBlankBeforeEnable:ilk,snb
|
||||||
*/
|
*/
|
||||||
schedule_delayed_work(&work->work, msecs_to_jiffies(50));
|
wait_remaining_ms_from_jiffies(work->enable_jiffies, delay_ms);
|
||||||
|
|
||||||
|
mutex_lock(&dev_priv->fbc.lock);
|
||||||
|
|
||||||
|
/* Were we cancelled? */
|
||||||
|
if (!work->scheduled)
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
/* Were we delayed again while this function was sleeping? */
|
||||||
|
if (time_after(work->enable_jiffies + msecs_to_jiffies(delay_ms),
|
||||||
|
jiffies)) {
|
||||||
|
mutex_unlock(&dev_priv->fbc.lock);
|
||||||
|
goto retry;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (crtc->base.primary->fb == work->fb)
|
||||||
|
intel_fbc_activate(work->fb);
|
||||||
|
|
||||||
|
work->scheduled = false;
|
||||||
|
|
||||||
|
out:
|
||||||
|
mutex_unlock(&dev_priv->fbc.lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
|
static void intel_fbc_cancel_work(struct drm_i915_private *dev_priv)
|
||||||
|
{
|
||||||
|
WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
|
||||||
|
dev_priv->fbc.work.scheduled = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void intel_fbc_schedule_activation(struct intel_crtc *crtc)
|
||||||
|
{
|
||||||
|
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
|
||||||
|
struct intel_fbc_work *work = &dev_priv->fbc.work;
|
||||||
|
|
||||||
|
WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
|
||||||
|
|
||||||
|
/* It is useless to call intel_fbc_cancel_work() in this function since
|
||||||
|
* we're not releasing fbc.lock, so it won't have an opportunity to grab
|
||||||
|
* it to discover that it was cancelled. So we just update the expected
|
||||||
|
* jiffy count. */
|
||||||
|
work->fb = crtc->base.primary->fb;
|
||||||
|
work->scheduled = true;
|
||||||
|
work->enable_jiffies = jiffies;
|
||||||
|
|
||||||
|
schedule_work(&work->work);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __intel_fbc_deactivate(struct drm_i915_private *dev_priv)
|
||||||
{
|
{
|
||||||
WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
|
WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
|
||||||
|
|
||||||
intel_fbc_cancel_work(dev_priv);
|
intel_fbc_cancel_work(dev_priv);
|
||||||
|
|
||||||
if (dev_priv->fbc.enabled)
|
if (dev_priv->fbc.active)
|
||||||
dev_priv->fbc.disable_fbc(dev_priv);
|
dev_priv->fbc.deactivate(dev_priv);
|
||||||
dev_priv->fbc.crtc = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* intel_fbc_disable - disable FBC
|
|
||||||
* @dev_priv: i915 device instance
|
|
||||||
*
|
|
||||||
* This function disables FBC.
|
|
||||||
*/
|
|
||||||
void intel_fbc_disable(struct drm_i915_private *dev_priv)
|
|
||||||
{
|
|
||||||
if (!fbc_supported(dev_priv))
|
|
||||||
return;
|
|
||||||
|
|
||||||
mutex_lock(&dev_priv->fbc.lock);
|
|
||||||
__intel_fbc_disable(dev_priv);
|
|
||||||
mutex_unlock(&dev_priv->fbc.lock);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* intel_fbc_disable_crtc - disable FBC if it's associated with crtc
|
* intel_fbc_deactivate - deactivate FBC if it's associated with crtc
|
||||||
* @crtc: the CRTC
|
* @crtc: the CRTC
|
||||||
*
|
*
|
||||||
* This function disables FBC if it's associated with the provided CRTC.
|
* This function deactivates FBC if it's associated with the provided CRTC.
|
||||||
*/
|
*/
|
||||||
void intel_fbc_disable_crtc(struct intel_crtc *crtc)
|
void intel_fbc_deactivate(struct intel_crtc *crtc)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
|
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
|
||||||
|
|
||||||
|
@ -472,7 +470,7 @@ void intel_fbc_disable_crtc(struct intel_crtc *crtc)
|
||||||
|
|
||||||
mutex_lock(&dev_priv->fbc.lock);
|
mutex_lock(&dev_priv->fbc.lock);
|
||||||
if (dev_priv->fbc.crtc == crtc)
|
if (dev_priv->fbc.crtc == crtc)
|
||||||
__intel_fbc_disable(dev_priv);
|
__intel_fbc_deactivate(dev_priv);
|
||||||
mutex_unlock(&dev_priv->fbc.lock);
|
mutex_unlock(&dev_priv->fbc.lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -486,13 +484,21 @@ static void set_no_fbc_reason(struct drm_i915_private *dev_priv,
|
||||||
DRM_DEBUG_KMS("Disabling FBC: %s\n", reason);
|
DRM_DEBUG_KMS("Disabling FBC: %s\n", reason);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool crtc_is_valid(struct intel_crtc *crtc)
|
static bool crtc_can_fbc(struct intel_crtc *crtc)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
|
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
|
||||||
|
|
||||||
if (fbc_on_pipe_a_only(dev_priv) && crtc->pipe != PIPE_A)
|
if (fbc_on_pipe_a_only(dev_priv) && crtc->pipe != PIPE_A)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
|
if (fbc_on_plane_a_only(dev_priv) && crtc->plane != PLANE_A)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool crtc_is_valid(struct intel_crtc *crtc)
|
||||||
|
{
|
||||||
if (!intel_crtc_active(&crtc->base))
|
if (!intel_crtc_active(&crtc->base))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
|
@ -502,24 +508,6 @@ static bool crtc_is_valid(struct intel_crtc *crtc)
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct drm_crtc *intel_fbc_find_crtc(struct drm_i915_private *dev_priv)
|
|
||||||
{
|
|
||||||
struct drm_crtc *crtc = NULL, *tmp_crtc;
|
|
||||||
enum pipe pipe;
|
|
||||||
|
|
||||||
for_each_pipe(dev_priv, pipe) {
|
|
||||||
tmp_crtc = dev_priv->pipe_to_crtc_mapping[pipe];
|
|
||||||
|
|
||||||
if (crtc_is_valid(to_intel_crtc(tmp_crtc)))
|
|
||||||
crtc = tmp_crtc;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!crtc)
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
return crtc;
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool multiple_pipes_ok(struct drm_i915_private *dev_priv)
|
static bool multiple_pipes_ok(struct drm_i915_private *dev_priv)
|
||||||
{
|
{
|
||||||
enum pipe pipe;
|
enum pipe pipe;
|
||||||
|
@ -590,11 +578,17 @@ again:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv, int size,
|
static int intel_fbc_alloc_cfb(struct intel_crtc *crtc)
|
||||||
int fb_cpp)
|
|
||||||
{
|
{
|
||||||
|
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
|
||||||
|
struct drm_framebuffer *fb = crtc->base.primary->state->fb;
|
||||||
struct drm_mm_node *uninitialized_var(compressed_llb);
|
struct drm_mm_node *uninitialized_var(compressed_llb);
|
||||||
int ret;
|
int size, fb_cpp, ret;
|
||||||
|
|
||||||
|
WARN_ON(drm_mm_node_allocated(&dev_priv->fbc.compressed_fb));
|
||||||
|
|
||||||
|
size = intel_fbc_calculate_cfb_size(crtc, fb);
|
||||||
|
fb_cpp = drm_format_plane_cpp(fb->pixel_format, 0);
|
||||||
|
|
||||||
ret = find_compression_threshold(dev_priv, &dev_priv->fbc.compressed_fb,
|
ret = find_compression_threshold(dev_priv, &dev_priv->fbc.compressed_fb,
|
||||||
size, fb_cpp);
|
size, fb_cpp);
|
||||||
|
@ -629,8 +623,6 @@ static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv, int size,
|
||||||
dev_priv->mm.stolen_base + compressed_llb->start);
|
dev_priv->mm.stolen_base + compressed_llb->start);
|
||||||
}
|
}
|
||||||
|
|
||||||
dev_priv->fbc.uncompressed_size = size;
|
|
||||||
|
|
||||||
DRM_DEBUG_KMS("reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n",
|
DRM_DEBUG_KMS("reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n",
|
||||||
dev_priv->fbc.compressed_fb.size,
|
dev_priv->fbc.compressed_fb.size,
|
||||||
dev_priv->fbc.threshold);
|
dev_priv->fbc.threshold);
|
||||||
|
@ -647,18 +639,15 @@ err_llb:
|
||||||
|
|
||||||
static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
|
static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
|
||||||
{
|
{
|
||||||
if (dev_priv->fbc.uncompressed_size == 0)
|
if (drm_mm_node_allocated(&dev_priv->fbc.compressed_fb))
|
||||||
return;
|
i915_gem_stolen_remove_node(dev_priv,
|
||||||
|
&dev_priv->fbc.compressed_fb);
|
||||||
i915_gem_stolen_remove_node(dev_priv, &dev_priv->fbc.compressed_fb);
|
|
||||||
|
|
||||||
if (dev_priv->fbc.compressed_llb) {
|
if (dev_priv->fbc.compressed_llb) {
|
||||||
i915_gem_stolen_remove_node(dev_priv,
|
i915_gem_stolen_remove_node(dev_priv,
|
||||||
dev_priv->fbc.compressed_llb);
|
dev_priv->fbc.compressed_llb);
|
||||||
kfree(dev_priv->fbc.compressed_llb);
|
kfree(dev_priv->fbc.compressed_llb);
|
||||||
}
|
}
|
||||||
|
|
||||||
dev_priv->fbc.uncompressed_size = 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
|
void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
|
||||||
|
@ -671,64 +660,6 @@ void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
|
||||||
mutex_unlock(&dev_priv->fbc.lock);
|
mutex_unlock(&dev_priv->fbc.lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* For SKL+, the plane source size used by the hardware is based on the value we
|
|
||||||
* write to the PLANE_SIZE register. For BDW-, the hardware looks at the value
|
|
||||||
* we wrote to PIPESRC.
|
|
||||||
*/
|
|
||||||
static void intel_fbc_get_plane_source_size(struct intel_crtc *crtc,
|
|
||||||
int *width, int *height)
|
|
||||||
{
|
|
||||||
struct intel_plane_state *plane_state =
|
|
||||||
to_intel_plane_state(crtc->base.primary->state);
|
|
||||||
int w, h;
|
|
||||||
|
|
||||||
if (intel_rotation_90_or_270(plane_state->base.rotation)) {
|
|
||||||
w = drm_rect_height(&plane_state->src) >> 16;
|
|
||||||
h = drm_rect_width(&plane_state->src) >> 16;
|
|
||||||
} else {
|
|
||||||
w = drm_rect_width(&plane_state->src) >> 16;
|
|
||||||
h = drm_rect_height(&plane_state->src) >> 16;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (width)
|
|
||||||
*width = w;
|
|
||||||
if (height)
|
|
||||||
*height = h;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int intel_fbc_calculate_cfb_size(struct intel_crtc *crtc)
|
|
||||||
{
|
|
||||||
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
|
|
||||||
struct drm_framebuffer *fb = crtc->base.primary->fb;
|
|
||||||
int lines;
|
|
||||||
|
|
||||||
intel_fbc_get_plane_source_size(crtc, NULL, &lines);
|
|
||||||
if (INTEL_INFO(dev_priv)->gen >= 7)
|
|
||||||
lines = min(lines, 2048);
|
|
||||||
|
|
||||||
/* Hardware needs the full buffer stride, not just the active area. */
|
|
||||||
return lines * fb->pitches[0];
|
|
||||||
}
|
|
||||||
|
|
||||||
static int intel_fbc_setup_cfb(struct intel_crtc *crtc)
|
|
||||||
{
|
|
||||||
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
|
|
||||||
struct drm_framebuffer *fb = crtc->base.primary->fb;
|
|
||||||
int size, cpp;
|
|
||||||
|
|
||||||
size = intel_fbc_calculate_cfb_size(crtc);
|
|
||||||
cpp = drm_format_plane_cpp(fb->pixel_format, 0);
|
|
||||||
|
|
||||||
if (size <= dev_priv->fbc.uncompressed_size)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
/* Release any current block */
|
|
||||||
__intel_fbc_cleanup_cfb(dev_priv);
|
|
||||||
|
|
||||||
return intel_fbc_alloc_cfb(dev_priv, size, cpp);
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool stride_is_valid(struct drm_i915_private *dev_priv,
|
static bool stride_is_valid(struct drm_i915_private *dev_priv,
|
||||||
unsigned int stride)
|
unsigned int stride)
|
||||||
{
|
{
|
||||||
|
@ -803,47 +734,34 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* __intel_fbc_update - enable/disable FBC as needed, unlocked
|
* __intel_fbc_update - activate/deactivate FBC as needed, unlocked
|
||||||
* @dev_priv: i915 device instance
|
* @crtc: the CRTC that triggered the update
|
||||||
*
|
*
|
||||||
* This function completely reevaluates the status of FBC, then enables,
|
* This function completely reevaluates the status of FBC, then activates,
|
||||||
* disables or maintains it on the same state.
|
* deactivates or maintains it on the same state.
|
||||||
*/
|
*/
|
||||||
static void __intel_fbc_update(struct drm_i915_private *dev_priv)
|
static void __intel_fbc_update(struct intel_crtc *crtc)
|
||||||
{
|
{
|
||||||
struct drm_crtc *drm_crtc = NULL;
|
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
|
||||||
struct intel_crtc *crtc;
|
|
||||||
struct drm_framebuffer *fb;
|
struct drm_framebuffer *fb;
|
||||||
struct drm_i915_gem_object *obj;
|
struct drm_i915_gem_object *obj;
|
||||||
const struct drm_display_mode *adjusted_mode;
|
const struct drm_display_mode *adjusted_mode;
|
||||||
|
|
||||||
WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
|
WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
|
||||||
|
|
||||||
if (intel_vgpu_active(dev_priv->dev))
|
|
||||||
i915.enable_fbc = 0;
|
|
||||||
|
|
||||||
if (i915.enable_fbc < 0) {
|
|
||||||
set_no_fbc_reason(dev_priv, "disabled per chip default");
|
|
||||||
goto out_disable;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!i915.enable_fbc) {
|
|
||||||
set_no_fbc_reason(dev_priv, "disabled per module param");
|
|
||||||
goto out_disable;
|
|
||||||
}
|
|
||||||
|
|
||||||
drm_crtc = intel_fbc_find_crtc(dev_priv);
|
|
||||||
if (!drm_crtc) {
|
|
||||||
set_no_fbc_reason(dev_priv, "no output");
|
|
||||||
goto out_disable;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!multiple_pipes_ok(dev_priv)) {
|
if (!multiple_pipes_ok(dev_priv)) {
|
||||||
set_no_fbc_reason(dev_priv, "more than one pipe active");
|
set_no_fbc_reason(dev_priv, "more than one pipe active");
|
||||||
goto out_disable;
|
goto out_disable;
|
||||||
}
|
}
|
||||||
|
|
||||||
crtc = to_intel_crtc(drm_crtc);
|
if (!dev_priv->fbc.enabled || dev_priv->fbc.crtc != crtc)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (!crtc_is_valid(crtc)) {
|
||||||
|
set_no_fbc_reason(dev_priv, "no output");
|
||||||
|
goto out_disable;
|
||||||
|
}
|
||||||
|
|
||||||
fb = crtc->base.primary->fb;
|
fb = crtc->base.primary->fb;
|
||||||
obj = intel_fb_obj(fb);
|
obj = intel_fb_obj(fb);
|
||||||
adjusted_mode = &crtc->config->base.adjusted_mode;
|
adjusted_mode = &crtc->config->base.adjusted_mode;
|
||||||
|
@ -859,12 +777,6 @@ static void __intel_fbc_update(struct drm_i915_private *dev_priv)
|
||||||
goto out_disable;
|
goto out_disable;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((INTEL_INFO(dev_priv)->gen < 4 || HAS_DDI(dev_priv)) &&
|
|
||||||
crtc->plane != PLANE_A) {
|
|
||||||
set_no_fbc_reason(dev_priv, "FBC unsupported on plane");
|
|
||||||
goto out_disable;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* The use of a CPU fence is mandatory in order to detect writes
|
/* The use of a CPU fence is mandatory in order to detect writes
|
||||||
* by the CPU to the scanout and trigger updates to the FBC.
|
* by the CPU to the scanout and trigger updates to the FBC.
|
||||||
*/
|
*/
|
||||||
|
@ -897,8 +809,19 @@ static void __intel_fbc_update(struct drm_i915_private *dev_priv)
|
||||||
goto out_disable;
|
goto out_disable;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (intel_fbc_setup_cfb(crtc)) {
|
/* It is possible for the required CFB size change without a
|
||||||
set_no_fbc_reason(dev_priv, "not enough stolen memory");
|
* crtc->disable + crtc->enable since it is possible to change the
|
||||||
|
* stride without triggering a full modeset. Since we try to
|
||||||
|
* over-allocate the CFB, there's a chance we may keep FBC enabled even
|
||||||
|
* if this happens, but if we exceed the current CFB size we'll have to
|
||||||
|
* disable FBC. Notice that it would be possible to disable FBC, wait
|
||||||
|
* for a frame, free the stolen node, then try to reenable FBC in case
|
||||||
|
* we didn't get any invalidate/deactivate calls, but this would require
|
||||||
|
* a lot of tracking just for a specific case. If we conclude it's an
|
||||||
|
* important case, we can implement it later. */
|
||||||
|
if (intel_fbc_calculate_cfb_size(crtc, fb) >
|
||||||
|
dev_priv->fbc.compressed_fb.size * dev_priv->fbc.threshold) {
|
||||||
|
set_no_fbc_reason(dev_priv, "CFB requirements changed");
|
||||||
goto out_disable;
|
goto out_disable;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -909,10 +832,11 @@ static void __intel_fbc_update(struct drm_i915_private *dev_priv)
|
||||||
*/
|
*/
|
||||||
if (dev_priv->fbc.crtc == crtc &&
|
if (dev_priv->fbc.crtc == crtc &&
|
||||||
dev_priv->fbc.fb_id == fb->base.id &&
|
dev_priv->fbc.fb_id == fb->base.id &&
|
||||||
dev_priv->fbc.y == crtc->base.y)
|
dev_priv->fbc.y == crtc->base.y &&
|
||||||
|
dev_priv->fbc.active)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (intel_fbc_enabled(dev_priv)) {
|
if (intel_fbc_is_active(dev_priv)) {
|
||||||
/* We update FBC along two paths, after changing fb/crtc
|
/* We update FBC along two paths, after changing fb/crtc
|
||||||
* configuration (modeswitching) and after page-flipping
|
* configuration (modeswitching) and after page-flipping
|
||||||
* finishes. For the latter, we know that not only did
|
* finishes. For the latter, we know that not only did
|
||||||
|
@ -936,36 +860,37 @@ static void __intel_fbc_update(struct drm_i915_private *dev_priv)
|
||||||
* disabling paths we do need to wait for a vblank at
|
* disabling paths we do need to wait for a vblank at
|
||||||
* some point. And we wait before enabling FBC anyway.
|
* some point. And we wait before enabling FBC anyway.
|
||||||
*/
|
*/
|
||||||
DRM_DEBUG_KMS("disabling active FBC for update\n");
|
DRM_DEBUG_KMS("deactivating FBC for update\n");
|
||||||
__intel_fbc_disable(dev_priv);
|
__intel_fbc_deactivate(dev_priv);
|
||||||
}
|
}
|
||||||
|
|
||||||
intel_fbc_schedule_enable(crtc);
|
intel_fbc_schedule_activation(crtc);
|
||||||
dev_priv->fbc.no_fbc_reason = "FBC enabled (not necessarily active)";
|
dev_priv->fbc.no_fbc_reason = "FBC enabled (not necessarily active)";
|
||||||
return;
|
return;
|
||||||
|
|
||||||
out_disable:
|
out_disable:
|
||||||
/* Multiple disables should be harmless */
|
/* Multiple disables should be harmless */
|
||||||
if (intel_fbc_enabled(dev_priv)) {
|
if (intel_fbc_is_active(dev_priv)) {
|
||||||
DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
|
DRM_DEBUG_KMS("unsupported config, deactivating FBC\n");
|
||||||
__intel_fbc_disable(dev_priv);
|
__intel_fbc_deactivate(dev_priv);
|
||||||
}
|
}
|
||||||
__intel_fbc_cleanup_cfb(dev_priv);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* intel_fbc_update - enable/disable FBC as needed
|
* intel_fbc_update - activate/deactivate FBC as needed
|
||||||
* @dev_priv: i915 device instance
|
* @crtc: the CRTC that triggered the update
|
||||||
*
|
*
|
||||||
* This function reevaluates the overall state and enables or disables FBC.
|
* This function reevaluates the overall state and activates or deactivates FBC.
|
||||||
*/
|
*/
|
||||||
void intel_fbc_update(struct drm_i915_private *dev_priv)
|
void intel_fbc_update(struct intel_crtc *crtc)
|
||||||
{
|
{
|
||||||
|
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
|
||||||
|
|
||||||
if (!fbc_supported(dev_priv))
|
if (!fbc_supported(dev_priv))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
mutex_lock(&dev_priv->fbc.lock);
|
mutex_lock(&dev_priv->fbc.lock);
|
||||||
__intel_fbc_update(dev_priv);
|
__intel_fbc_update(crtc);
|
||||||
mutex_unlock(&dev_priv->fbc.lock);
|
mutex_unlock(&dev_priv->fbc.lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -985,16 +910,13 @@ void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
|
||||||
|
|
||||||
if (dev_priv->fbc.enabled)
|
if (dev_priv->fbc.enabled)
|
||||||
fbc_bits = INTEL_FRONTBUFFER_PRIMARY(dev_priv->fbc.crtc->pipe);
|
fbc_bits = INTEL_FRONTBUFFER_PRIMARY(dev_priv->fbc.crtc->pipe);
|
||||||
else if (dev_priv->fbc.fbc_work)
|
|
||||||
fbc_bits = INTEL_FRONTBUFFER_PRIMARY(
|
|
||||||
dev_priv->fbc.fbc_work->crtc->pipe);
|
|
||||||
else
|
else
|
||||||
fbc_bits = dev_priv->fbc.possible_framebuffer_bits;
|
fbc_bits = dev_priv->fbc.possible_framebuffer_bits;
|
||||||
|
|
||||||
dev_priv->fbc.busy_bits |= (fbc_bits & frontbuffer_bits);
|
dev_priv->fbc.busy_bits |= (fbc_bits & frontbuffer_bits);
|
||||||
|
|
||||||
if (dev_priv->fbc.busy_bits)
|
if (dev_priv->fbc.busy_bits)
|
||||||
__intel_fbc_disable(dev_priv);
|
__intel_fbc_deactivate(dev_priv);
|
||||||
|
|
||||||
mutex_unlock(&dev_priv->fbc.lock);
|
mutex_unlock(&dev_priv->fbc.lock);
|
||||||
}
|
}
|
||||||
|
@ -1012,14 +934,139 @@ void intel_fbc_flush(struct drm_i915_private *dev_priv,
|
||||||
|
|
||||||
dev_priv->fbc.busy_bits &= ~frontbuffer_bits;
|
dev_priv->fbc.busy_bits &= ~frontbuffer_bits;
|
||||||
|
|
||||||
if (!dev_priv->fbc.busy_bits) {
|
if (!dev_priv->fbc.busy_bits && dev_priv->fbc.enabled) {
|
||||||
__intel_fbc_disable(dev_priv);
|
if (origin != ORIGIN_FLIP && dev_priv->fbc.active) {
|
||||||
__intel_fbc_update(dev_priv);
|
intel_fbc_recompress(dev_priv);
|
||||||
|
} else {
|
||||||
|
__intel_fbc_deactivate(dev_priv);
|
||||||
|
__intel_fbc_update(dev_priv->fbc.crtc);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_unlock(&dev_priv->fbc.lock);
|
mutex_unlock(&dev_priv->fbc.lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* intel_fbc_enable: tries to enable FBC on the CRTC
|
||||||
|
* @crtc: the CRTC
|
||||||
|
*
|
||||||
|
* This function checks if it's possible to enable FBC on the following CRTC,
|
||||||
|
* then enables it. Notice that it doesn't activate FBC.
|
||||||
|
*/
|
||||||
|
void intel_fbc_enable(struct intel_crtc *crtc)
|
||||||
|
{
|
||||||
|
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
|
||||||
|
|
||||||
|
if (!fbc_supported(dev_priv))
|
||||||
|
return;
|
||||||
|
|
||||||
|
mutex_lock(&dev_priv->fbc.lock);
|
||||||
|
|
||||||
|
if (dev_priv->fbc.enabled) {
|
||||||
|
WARN_ON(dev_priv->fbc.crtc == crtc);
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
WARN_ON(dev_priv->fbc.active);
|
||||||
|
WARN_ON(dev_priv->fbc.crtc != NULL);
|
||||||
|
|
||||||
|
if (intel_vgpu_active(dev_priv->dev)) {
|
||||||
|
set_no_fbc_reason(dev_priv, "VGPU is active");
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (i915.enable_fbc < 0) {
|
||||||
|
set_no_fbc_reason(dev_priv, "disabled per chip default");
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!i915.enable_fbc) {
|
||||||
|
set_no_fbc_reason(dev_priv, "disabled per module param");
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!crtc_can_fbc(crtc)) {
|
||||||
|
set_no_fbc_reason(dev_priv, "no enabled pipes can have FBC");
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (intel_fbc_alloc_cfb(crtc)) {
|
||||||
|
set_no_fbc_reason(dev_priv, "not enough stolen memory");
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
DRM_DEBUG_KMS("Enabling FBC on pipe %c\n", pipe_name(crtc->pipe));
|
||||||
|
dev_priv->fbc.no_fbc_reason = "FBC enabled but not active yet\n";
|
||||||
|
|
||||||
|
dev_priv->fbc.enabled = true;
|
||||||
|
dev_priv->fbc.crtc = crtc;
|
||||||
|
out:
|
||||||
|
mutex_unlock(&dev_priv->fbc.lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* __intel_fbc_disable - disable FBC
|
||||||
|
* @dev_priv: i915 device instance
|
||||||
|
*
|
||||||
|
* This is the low level function that actually disables FBC. Callers should
|
||||||
|
* grab the FBC lock.
|
||||||
|
*/
|
||||||
|
static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
|
||||||
|
{
|
||||||
|
struct intel_crtc *crtc = dev_priv->fbc.crtc;
|
||||||
|
|
||||||
|
WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
|
||||||
|
WARN_ON(!dev_priv->fbc.enabled);
|
||||||
|
WARN_ON(dev_priv->fbc.active);
|
||||||
|
assert_pipe_disabled(dev_priv, crtc->pipe);
|
||||||
|
|
||||||
|
DRM_DEBUG_KMS("Disabling FBC on pipe %c\n", pipe_name(crtc->pipe));
|
||||||
|
|
||||||
|
__intel_fbc_cleanup_cfb(dev_priv);
|
||||||
|
|
||||||
|
dev_priv->fbc.enabled = false;
|
||||||
|
dev_priv->fbc.crtc = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* intel_fbc_disable_crtc - disable FBC if it's associated with crtc
|
||||||
|
* @crtc: the CRTC
|
||||||
|
*
|
||||||
|
* This function disables FBC if it's associated with the provided CRTC.
|
||||||
|
*/
|
||||||
|
void intel_fbc_disable_crtc(struct intel_crtc *crtc)
|
||||||
|
{
|
||||||
|
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
|
||||||
|
|
||||||
|
if (!fbc_supported(dev_priv))
|
||||||
|
return;
|
||||||
|
|
||||||
|
mutex_lock(&dev_priv->fbc.lock);
|
||||||
|
if (dev_priv->fbc.crtc == crtc) {
|
||||||
|
WARN_ON(!dev_priv->fbc.enabled);
|
||||||
|
WARN_ON(dev_priv->fbc.active);
|
||||||
|
__intel_fbc_disable(dev_priv);
|
||||||
|
}
|
||||||
|
mutex_unlock(&dev_priv->fbc.lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* intel_fbc_disable - globally disable FBC
|
||||||
|
* @dev_priv: i915 device instance
|
||||||
|
*
|
||||||
|
* This function disables FBC regardless of which CRTC is associated with it.
|
||||||
|
*/
|
||||||
|
void intel_fbc_disable(struct drm_i915_private *dev_priv)
|
||||||
|
{
|
||||||
|
if (!fbc_supported(dev_priv))
|
||||||
|
return;
|
||||||
|
|
||||||
|
mutex_lock(&dev_priv->fbc.lock);
|
||||||
|
if (dev_priv->fbc.enabled)
|
||||||
|
__intel_fbc_disable(dev_priv);
|
||||||
|
mutex_unlock(&dev_priv->fbc.lock);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* intel_fbc_init - Initialize FBC
|
* intel_fbc_init - Initialize FBC
|
||||||
* @dev_priv: the i915 device
|
* @dev_priv: the i915 device
|
||||||
|
@ -1030,8 +1077,11 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
|
||||||
{
|
{
|
||||||
enum pipe pipe;
|
enum pipe pipe;
|
||||||
|
|
||||||
|
INIT_WORK(&dev_priv->fbc.work.work, intel_fbc_work_fn);
|
||||||
mutex_init(&dev_priv->fbc.lock);
|
mutex_init(&dev_priv->fbc.lock);
|
||||||
dev_priv->fbc.enabled = false;
|
dev_priv->fbc.enabled = false;
|
||||||
|
dev_priv->fbc.active = false;
|
||||||
|
dev_priv->fbc.work.scheduled = false;
|
||||||
|
|
||||||
if (!HAS_FBC(dev_priv)) {
|
if (!HAS_FBC(dev_priv)) {
|
||||||
dev_priv->fbc.no_fbc_reason = "unsupported by this chipset";
|
dev_priv->fbc.no_fbc_reason = "unsupported by this chipset";
|
||||||
|
@ -1047,29 +1097,29 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (INTEL_INFO(dev_priv)->gen >= 7) {
|
if (INTEL_INFO(dev_priv)->gen >= 7) {
|
||||||
dev_priv->fbc.fbc_enabled = ilk_fbc_enabled;
|
dev_priv->fbc.is_active = ilk_fbc_is_active;
|
||||||
dev_priv->fbc.enable_fbc = gen7_fbc_enable;
|
dev_priv->fbc.activate = gen7_fbc_activate;
|
||||||
dev_priv->fbc.disable_fbc = ilk_fbc_disable;
|
dev_priv->fbc.deactivate = ilk_fbc_deactivate;
|
||||||
} else if (INTEL_INFO(dev_priv)->gen >= 5) {
|
} else if (INTEL_INFO(dev_priv)->gen >= 5) {
|
||||||
dev_priv->fbc.fbc_enabled = ilk_fbc_enabled;
|
dev_priv->fbc.is_active = ilk_fbc_is_active;
|
||||||
dev_priv->fbc.enable_fbc = ilk_fbc_enable;
|
dev_priv->fbc.activate = ilk_fbc_activate;
|
||||||
dev_priv->fbc.disable_fbc = ilk_fbc_disable;
|
dev_priv->fbc.deactivate = ilk_fbc_deactivate;
|
||||||
} else if (IS_GM45(dev_priv)) {
|
} else if (IS_GM45(dev_priv)) {
|
||||||
dev_priv->fbc.fbc_enabled = g4x_fbc_enabled;
|
dev_priv->fbc.is_active = g4x_fbc_is_active;
|
||||||
dev_priv->fbc.enable_fbc = g4x_fbc_enable;
|
dev_priv->fbc.activate = g4x_fbc_activate;
|
||||||
dev_priv->fbc.disable_fbc = g4x_fbc_disable;
|
dev_priv->fbc.deactivate = g4x_fbc_deactivate;
|
||||||
} else {
|
} else {
|
||||||
dev_priv->fbc.fbc_enabled = i8xx_fbc_enabled;
|
dev_priv->fbc.is_active = i8xx_fbc_is_active;
|
||||||
dev_priv->fbc.enable_fbc = i8xx_fbc_enable;
|
dev_priv->fbc.activate = i8xx_fbc_activate;
|
||||||
dev_priv->fbc.disable_fbc = i8xx_fbc_disable;
|
dev_priv->fbc.deactivate = i8xx_fbc_deactivate;
|
||||||
|
|
||||||
/* This value was pulled out of someone's hat */
|
/* This value was pulled out of someone's hat */
|
||||||
I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
|
I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* We still don't have any sort of hardware state readout for FBC, so
|
/* We still don't have any sort of hardware state readout for FBC, so
|
||||||
* disable it in case the BIOS enabled it to make sure software matches
|
* deactivate it in case the BIOS activated it to make sure software
|
||||||
* the hardware state. */
|
* matches the hardware state. */
|
||||||
if (dev_priv->fbc.fbc_enabled(dev_priv))
|
if (dev_priv->fbc.is_active(dev_priv))
|
||||||
dev_priv->fbc.disable_fbc(dev_priv);
|
dev_priv->fbc.deactivate(dev_priv);
|
||||||
}
|
}
|
||||||
|
|
|
@ -128,9 +128,9 @@ static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
|
||||||
DE_PIPEB_FIFO_UNDERRUN;
|
DE_PIPEB_FIFO_UNDERRUN;
|
||||||
|
|
||||||
if (enable)
|
if (enable)
|
||||||
ironlake_enable_display_irq(dev_priv, bit);
|
ilk_enable_display_irq(dev_priv, bit);
|
||||||
else
|
else
|
||||||
ironlake_disable_display_irq(dev_priv, bit);
|
ilk_disable_display_irq(dev_priv, bit);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ivybridge_check_fifo_underruns(struct intel_crtc *crtc)
|
static void ivybridge_check_fifo_underruns(struct intel_crtc *crtc)
|
||||||
|
@ -161,9 +161,9 @@ static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
|
||||||
if (!ivb_can_enable_err_int(dev))
|
if (!ivb_can_enable_err_int(dev))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
|
ilk_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
|
||||||
} else {
|
} else {
|
||||||
ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
|
ilk_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
|
||||||
|
|
||||||
if (old &&
|
if (old &&
|
||||||
I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) {
|
I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) {
|
||||||
|
@ -178,14 +178,10 @@ static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
|
|
||||||
assert_spin_locked(&dev_priv->irq_lock);
|
|
||||||
|
|
||||||
if (enable)
|
if (enable)
|
||||||
dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN;
|
bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_FIFO_UNDERRUN);
|
||||||
else
|
else
|
||||||
dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN;
|
bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_FIFO_UNDERRUN);
|
||||||
I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
|
|
||||||
POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
|
static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
|
||||||
|
|
|
@ -42,8 +42,6 @@ struct i915_guc_client {
|
||||||
|
|
||||||
uint32_t wq_offset;
|
uint32_t wq_offset;
|
||||||
uint32_t wq_size;
|
uint32_t wq_size;
|
||||||
|
|
||||||
spinlock_t wq_lock; /* Protects all data below */
|
|
||||||
uint32_t wq_tail;
|
uint32_t wq_tail;
|
||||||
|
|
||||||
/* GuC submission statistics & status */
|
/* GuC submission statistics & status */
|
||||||
|
@ -95,8 +93,6 @@ struct intel_guc {
|
||||||
|
|
||||||
struct i915_guc_client *execbuf_client;
|
struct i915_guc_client *execbuf_client;
|
||||||
|
|
||||||
spinlock_t host2guc_lock; /* Protects all data below */
|
|
||||||
|
|
||||||
DECLARE_BITMAP(doorbell_bitmap, GUC_MAX_DOORBELLS);
|
DECLARE_BITMAP(doorbell_bitmap, GUC_MAX_DOORBELLS);
|
||||||
uint32_t db_cacheline; /* Cyclic counter mod pagesize */
|
uint32_t db_cacheline; /* Cyclic counter mod pagesize */
|
||||||
|
|
||||||
|
|
|
@ -169,10 +169,10 @@ static void g4x_write_infoframe(struct drm_encoder *encoder,
|
||||||
POSTING_READ(VIDEO_DIP_CTL);
|
POSTING_READ(VIDEO_DIP_CTL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool g4x_infoframe_enabled(struct drm_encoder *encoder)
|
static bool g4x_infoframe_enabled(struct drm_encoder *encoder,
|
||||||
|
const struct intel_crtc_state *pipe_config)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = encoder->dev;
|
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
||||||
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
|
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
|
||||||
u32 val = I915_READ(VIDEO_DIP_CTL);
|
u32 val = I915_READ(VIDEO_DIP_CTL);
|
||||||
|
|
||||||
|
@ -225,13 +225,13 @@ static void ibx_write_infoframe(struct drm_encoder *encoder,
|
||||||
POSTING_READ(reg);
|
POSTING_READ(reg);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool ibx_infoframe_enabled(struct drm_encoder *encoder)
|
static bool ibx_infoframe_enabled(struct drm_encoder *encoder,
|
||||||
|
const struct intel_crtc_state *pipe_config)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = encoder->dev;
|
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
||||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
|
|
||||||
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
|
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
|
||||||
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
|
enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe;
|
||||||
|
i915_reg_t reg = TVIDEO_DIP_CTL(pipe);
|
||||||
u32 val = I915_READ(reg);
|
u32 val = I915_READ(reg);
|
||||||
|
|
||||||
if ((val & VIDEO_DIP_ENABLE) == 0)
|
if ((val & VIDEO_DIP_ENABLE) == 0)
|
||||||
|
@ -287,12 +287,12 @@ static void cpt_write_infoframe(struct drm_encoder *encoder,
|
||||||
POSTING_READ(reg);
|
POSTING_READ(reg);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool cpt_infoframe_enabled(struct drm_encoder *encoder)
|
static bool cpt_infoframe_enabled(struct drm_encoder *encoder,
|
||||||
|
const struct intel_crtc_state *pipe_config)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = encoder->dev;
|
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe;
|
||||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
|
u32 val = I915_READ(TVIDEO_DIP_CTL(pipe));
|
||||||
u32 val = I915_READ(TVIDEO_DIP_CTL(intel_crtc->pipe));
|
|
||||||
|
|
||||||
if ((val & VIDEO_DIP_ENABLE) == 0)
|
if ((val & VIDEO_DIP_ENABLE) == 0)
|
||||||
return false;
|
return false;
|
||||||
|
@ -341,13 +341,13 @@ static void vlv_write_infoframe(struct drm_encoder *encoder,
|
||||||
POSTING_READ(reg);
|
POSTING_READ(reg);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool vlv_infoframe_enabled(struct drm_encoder *encoder)
|
static bool vlv_infoframe_enabled(struct drm_encoder *encoder,
|
||||||
|
const struct intel_crtc_state *pipe_config)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = encoder->dev;
|
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
||||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
|
|
||||||
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
|
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
|
||||||
u32 val = I915_READ(VLV_TVIDEO_DIP_CTL(intel_crtc->pipe));
|
enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe;
|
||||||
|
u32 val = I915_READ(VLV_TVIDEO_DIP_CTL(pipe));
|
||||||
|
|
||||||
if ((val & VIDEO_DIP_ENABLE) == 0)
|
if ((val & VIDEO_DIP_ENABLE) == 0)
|
||||||
return false;
|
return false;
|
||||||
|
@ -398,12 +398,11 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
|
||||||
POSTING_READ(ctl_reg);
|
POSTING_READ(ctl_reg);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool hsw_infoframe_enabled(struct drm_encoder *encoder)
|
static bool hsw_infoframe_enabled(struct drm_encoder *encoder,
|
||||||
|
const struct intel_crtc_state *pipe_config)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = encoder->dev;
|
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
u32 val = I915_READ(HSW_TVIDEO_DIP_CTL(pipe_config->cpu_transcoder));
|
||||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
|
|
||||||
u32 val = I915_READ(HSW_TVIDEO_DIP_CTL(intel_crtc->config->cpu_transcoder));
|
|
||||||
|
|
||||||
return val & (VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW |
|
return val & (VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW |
|
||||||
VIDEO_DIP_ENABLE_GCP_HSW | VIDEO_DIP_ENABLE_VS_HSW |
|
VIDEO_DIP_ENABLE_GCP_HSW | VIDEO_DIP_ENABLE_VS_HSW |
|
||||||
|
@ -927,7 +926,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
|
||||||
if (tmp & HDMI_MODE_SELECT_HDMI)
|
if (tmp & HDMI_MODE_SELECT_HDMI)
|
||||||
pipe_config->has_hdmi_sink = true;
|
pipe_config->has_hdmi_sink = true;
|
||||||
|
|
||||||
if (intel_hdmi->infoframe_enabled(&encoder->base))
|
if (intel_hdmi->infoframe_enabled(&encoder->base, pipe_config))
|
||||||
pipe_config->has_infoframe = true;
|
pipe_config->has_infoframe = true;
|
||||||
|
|
||||||
if (tmp & SDVO_AUDIO_ENABLE)
|
if (tmp & SDVO_AUDIO_ENABLE)
|
||||||
|
|
|
@ -472,9 +472,7 @@ gmbus_xfer_index_read(struct drm_i915_private *dev_priv, struct i2c_msg *msgs)
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
gmbus_xfer(struct i2c_adapter *adapter,
|
do_gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
|
||||||
struct i2c_msg *msgs,
|
|
||||||
int num)
|
|
||||||
{
|
{
|
||||||
struct intel_gmbus *bus = container_of(adapter,
|
struct intel_gmbus *bus = container_of(adapter,
|
||||||
struct intel_gmbus,
|
struct intel_gmbus,
|
||||||
|
@ -483,14 +481,6 @@ gmbus_xfer(struct i2c_adapter *adapter,
|
||||||
int i = 0, inc, try = 0;
|
int i = 0, inc, try = 0;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
|
|
||||||
mutex_lock(&dev_priv->gmbus_mutex);
|
|
||||||
|
|
||||||
if (bus->force_bit) {
|
|
||||||
ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
retry:
|
retry:
|
||||||
I915_WRITE(GMBUS0, bus->reg0);
|
I915_WRITE(GMBUS0, bus->reg0);
|
||||||
|
|
||||||
|
@ -505,17 +495,13 @@ retry:
|
||||||
ret = gmbus_xfer_write(dev_priv, &msgs[i]);
|
ret = gmbus_xfer_write(dev_priv, &msgs[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!ret)
|
||||||
|
ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_WAIT_PHASE,
|
||||||
|
GMBUS_HW_WAIT_EN);
|
||||||
if (ret == -ETIMEDOUT)
|
if (ret == -ETIMEDOUT)
|
||||||
goto timeout;
|
goto timeout;
|
||||||
if (ret == -ENXIO)
|
else if (ret)
|
||||||
goto clear_err;
|
goto clear_err;
|
||||||
|
|
||||||
ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_WAIT_PHASE,
|
|
||||||
GMBUS_HW_WAIT_EN);
|
|
||||||
if (ret == -ENXIO)
|
|
||||||
goto clear_err;
|
|
||||||
if (ret)
|
|
||||||
goto timeout;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Generate a STOP condition on the bus. Note that gmbus can't generata
|
/* Generate a STOP condition on the bus. Note that gmbus can't generata
|
||||||
|
@ -589,13 +575,34 @@ timeout:
|
||||||
bus->adapter.name, bus->reg0 & 0xff);
|
bus->adapter.name, bus->reg0 & 0xff);
|
||||||
I915_WRITE(GMBUS0, 0);
|
I915_WRITE(GMBUS0, 0);
|
||||||
|
|
||||||
/* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */
|
/*
|
||||||
|
* Hardware may not support GMBUS over these pins? Try GPIO bitbanging
|
||||||
|
* instead. Use EAGAIN to have i2c core retry.
|
||||||
|
*/
|
||||||
bus->force_bit = 1;
|
bus->force_bit = 1;
|
||||||
ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
|
ret = -EAGAIN;
|
||||||
|
|
||||||
out:
|
out:
|
||||||
mutex_unlock(&dev_priv->gmbus_mutex);
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
|
||||||
|
{
|
||||||
|
struct intel_gmbus *bus = container_of(adapter, struct intel_gmbus,
|
||||||
|
adapter);
|
||||||
|
struct drm_i915_private *dev_priv = bus->dev_priv;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
|
||||||
|
mutex_lock(&dev_priv->gmbus_mutex);
|
||||||
|
|
||||||
|
if (bus->force_bit)
|
||||||
|
ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
|
||||||
|
else
|
||||||
|
ret = do_gmbus_xfer(adapter, msgs, num);
|
||||||
|
|
||||||
|
mutex_unlock(&dev_priv->gmbus_mutex);
|
||||||
intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
|
intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
|
|
@ -1263,6 +1263,14 @@ static void intel_backlight_device_unregister(struct intel_connector *connector)
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */
|
#endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* BXT: PWM clock frequency = 19.2 MHz.
|
||||||
|
*/
|
||||||
|
static u32 bxt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
|
||||||
|
{
|
||||||
|
return KHz(19200) / pwm_freq_hz;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* SPT: This value represents the period of the PWM stream in clock periods
|
* SPT: This value represents the period of the PWM stream in clock periods
|
||||||
* multiplied by 16 (default increment) or 128 (alternate increment selected in
|
* multiplied by 16 (default increment) or 128 (alternate increment selected in
|
||||||
|
@ -1300,7 +1308,7 @@ static u32 lpt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
|
||||||
else
|
else
|
||||||
mul = 128;
|
mul = 128;
|
||||||
|
|
||||||
if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE)
|
if (HAS_PCH_LPT_H(dev_priv))
|
||||||
clock = MHz(135); /* LPT:H */
|
clock = MHz(135); /* LPT:H */
|
||||||
else
|
else
|
||||||
clock = MHz(24); /* LPT:LP */
|
clock = MHz(24); /* LPT:LP */
|
||||||
|
@ -1335,22 +1343,28 @@ static u32 i9xx_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
|
||||||
int clock;
|
int clock;
|
||||||
|
|
||||||
if (IS_PINEVIEW(dev))
|
if (IS_PINEVIEW(dev))
|
||||||
clock = intel_hrawclk(dev);
|
clock = MHz(intel_hrawclk(dev));
|
||||||
else
|
else
|
||||||
clock = 1000 * dev_priv->display.get_display_clock_speed(dev);
|
clock = 1000 * dev_priv->cdclk_freq;
|
||||||
|
|
||||||
return clock / (pwm_freq_hz * 32);
|
return clock / (pwm_freq_hz * 32);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Gen4: This value represents the period of the PWM stream in display core
|
* Gen4: This value represents the period of the PWM stream in display core
|
||||||
* clocks multiplied by 128.
|
* clocks ([DevCTG] HRAW clocks) multiplied by 128.
|
||||||
|
*
|
||||||
*/
|
*/
|
||||||
static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
|
static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = connector->base.dev;
|
struct drm_device *dev = connector->base.dev;
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
int clock = 1000 * dev_priv->display.get_display_clock_speed(dev);
|
int clock;
|
||||||
|
|
||||||
|
if (IS_G4X(dev_priv))
|
||||||
|
clock = MHz(intel_hrawclk(dev));
|
||||||
|
else
|
||||||
|
clock = 1000 * dev_priv->cdclk_freq;
|
||||||
|
|
||||||
return clock / (pwm_freq_hz * 128);
|
return clock / (pwm_freq_hz * 128);
|
||||||
}
|
}
|
||||||
|
@ -1385,14 +1399,18 @@ static u32 get_backlight_max_vbt(struct intel_connector *connector)
|
||||||
u16 pwm_freq_hz = dev_priv->vbt.backlight.pwm_freq_hz;
|
u16 pwm_freq_hz = dev_priv->vbt.backlight.pwm_freq_hz;
|
||||||
u32 pwm;
|
u32 pwm;
|
||||||
|
|
||||||
if (!pwm_freq_hz) {
|
if (!panel->backlight.hz_to_pwm) {
|
||||||
DRM_DEBUG_KMS("backlight frequency not specified in VBT\n");
|
DRM_DEBUG_KMS("backlight frequency conversion not supported\n");
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!panel->backlight.hz_to_pwm) {
|
if (pwm_freq_hz) {
|
||||||
DRM_DEBUG_KMS("backlight frequency setting from VBT currently not supported on this platform\n");
|
DRM_DEBUG_KMS("VBT defined backlight frequency %u Hz\n",
|
||||||
return 0;
|
pwm_freq_hz);
|
||||||
|
} else {
|
||||||
|
pwm_freq_hz = 200;
|
||||||
|
DRM_DEBUG_KMS("default backlight frequency %u Hz\n",
|
||||||
|
pwm_freq_hz);
|
||||||
}
|
}
|
||||||
|
|
||||||
pwm = panel->backlight.hz_to_pwm(connector, pwm_freq_hz);
|
pwm = panel->backlight.hz_to_pwm(connector, pwm_freq_hz);
|
||||||
|
@ -1401,8 +1419,6 @@ static u32 get_backlight_max_vbt(struct intel_connector *connector)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
DRM_DEBUG_KMS("backlight frequency %u Hz from VBT\n", pwm_freq_hz);
|
|
||||||
|
|
||||||
return pwm;
|
return pwm;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1750,6 +1766,7 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel)
|
||||||
panel->backlight.disable = bxt_disable_backlight;
|
panel->backlight.disable = bxt_disable_backlight;
|
||||||
panel->backlight.set = bxt_set_backlight;
|
panel->backlight.set = bxt_set_backlight;
|
||||||
panel->backlight.get = bxt_get_backlight;
|
panel->backlight.get = bxt_get_backlight;
|
||||||
|
panel->backlight.hz_to_pwm = bxt_hz_to_pwm;
|
||||||
} else if (HAS_PCH_LPT(dev) || HAS_PCH_SPT(dev)) {
|
} else if (HAS_PCH_LPT(dev) || HAS_PCH_SPT(dev)) {
|
||||||
panel->backlight.setup = lpt_setup_backlight;
|
panel->backlight.setup = lpt_setup_backlight;
|
||||||
panel->backlight.enable = lpt_enable_backlight;
|
panel->backlight.enable = lpt_enable_backlight;
|
||||||
|
|
|
@ -66,6 +66,14 @@ static void bxt_init_clock_gating(struct drm_device *dev)
|
||||||
*/
|
*/
|
||||||
I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
|
I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
|
||||||
GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ);
|
GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Wa: Backlight PWM may stop in the asserted state, causing backlight
|
||||||
|
* to stay fully on.
|
||||||
|
*/
|
||||||
|
if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER))
|
||||||
|
I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
|
||||||
|
PWM1_GATING_DIS | PWM2_GATING_DIS);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void i915_pineview_get_mem_freq(struct drm_device *dev)
|
static void i915_pineview_get_mem_freq(struct drm_device *dev)
|
||||||
|
@ -2422,7 +2430,7 @@ static void ilk_wm_merge(struct drm_device *dev,
|
||||||
* enabled sometime later.
|
* enabled sometime later.
|
||||||
*/
|
*/
|
||||||
if (IS_GEN5(dev) && !merged->fbc_wm_enabled &&
|
if (IS_GEN5(dev) && !merged->fbc_wm_enabled &&
|
||||||
intel_fbc_enabled(dev_priv)) {
|
intel_fbc_is_active(dev_priv)) {
|
||||||
for (level = 2; level <= max_level; level++) {
|
for (level = 2; level <= max_level; level++) {
|
||||||
struct intel_wm_level *wm = &merged->wm[level];
|
struct intel_wm_level *wm = &merged->wm[level];
|
||||||
|
|
||||||
|
|
|
@ -191,9 +191,6 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
|
||||||
|
|
||||||
aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
|
aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
|
||||||
|
|
||||||
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
|
|
||||||
DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE);
|
|
||||||
|
|
||||||
/* Enable AUX frame sync at sink */
|
/* Enable AUX frame sync at sink */
|
||||||
if (dev_priv->psr.aux_frame_sync)
|
if (dev_priv->psr.aux_frame_sync)
|
||||||
drm_dp_dpcd_writeb(&intel_dp->aux,
|
drm_dp_dpcd_writeb(&intel_dp->aux,
|
||||||
|
@ -414,9 +411,14 @@ void intel_psr_enable(struct intel_dp *intel_dp)
|
||||||
skl_psr_setup_su_vsc(intel_dp);
|
skl_psr_setup_su_vsc(intel_dp);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Avoid continuous PSR exit by masking memup and hpd */
|
/*
|
||||||
|
* Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD.
|
||||||
|
* Also mask LPSP to avoid dependency on other drivers that
|
||||||
|
* might block runtime_pm besides preventing other hw tracking
|
||||||
|
* issues now we can rely on frontbuffer tracking.
|
||||||
|
*/
|
||||||
I915_WRITE(EDP_PSR_DEBUG_CTL, EDP_PSR_DEBUG_MASK_MEMUP |
|
I915_WRITE(EDP_PSR_DEBUG_CTL, EDP_PSR_DEBUG_MASK_MEMUP |
|
||||||
EDP_PSR_DEBUG_MASK_HPD);
|
EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
|
||||||
|
|
||||||
/* Enable PSR on the panel */
|
/* Enable PSR on the panel */
|
||||||
hsw_psr_enable_sink(intel_dp);
|
hsw_psr_enable_sink(intel_dp);
|
||||||
|
@ -522,11 +524,15 @@ void intel_psr_disable(struct intel_dp *intel_dp)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Disable PSR on Source */
|
||||||
if (HAS_DDI(dev))
|
if (HAS_DDI(dev))
|
||||||
hsw_psr_disable(intel_dp);
|
hsw_psr_disable(intel_dp);
|
||||||
else
|
else
|
||||||
vlv_psr_disable(intel_dp);
|
vlv_psr_disable(intel_dp);
|
||||||
|
|
||||||
|
/* Disable PSR on Sink */
|
||||||
|
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
|
||||||
|
|
||||||
dev_priv->psr.enabled = NULL;
|
dev_priv->psr.enabled = NULL;
|
||||||
mutex_unlock(&dev_priv->psr.lock);
|
mutex_unlock(&dev_priv->psr.lock);
|
||||||
|
|
||||||
|
@ -737,25 +743,9 @@ void intel_psr_flush(struct drm_device *dev,
|
||||||
frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
|
frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
|
||||||
dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
|
dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
|
||||||
|
|
||||||
if (HAS_DDI(dev)) {
|
/* By definition flush = invalidate + flush */
|
||||||
/*
|
if (frontbuffer_bits)
|
||||||
* By definition every flush should mean invalidate + flush,
|
intel_psr_exit(dev);
|
||||||
* however on core platforms let's minimize the
|
|
||||||
* disable/re-enable so we can avoid the invalidate when flip
|
|
||||||
* originated the flush.
|
|
||||||
*/
|
|
||||||
if (frontbuffer_bits && origin != ORIGIN_FLIP)
|
|
||||||
intel_psr_exit(dev);
|
|
||||||
} else {
|
|
||||||
/*
|
|
||||||
* On Valleyview and Cherryview we don't use hardware tracking
|
|
||||||
* so any plane updates or cursor moves don't result in a PSR
|
|
||||||
* invalidating. Which means we need to manually fake this in
|
|
||||||
* software for all flushes.
|
|
||||||
*/
|
|
||||||
if (frontbuffer_bits)
|
|
||||||
intel_psr_exit(dev);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
|
if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
|
||||||
if (!work_busy(&dev_priv->psr.work.work))
|
if (!work_busy(&dev_priv->psr.work.work))
|
||||||
|
|
|
@ -65,6 +65,72 @@
|
||||||
bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
|
bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
|
||||||
int power_well_id);
|
int power_well_id);
|
||||||
|
|
||||||
|
const char *
|
||||||
|
intel_display_power_domain_str(enum intel_display_power_domain domain)
|
||||||
|
{
|
||||||
|
switch (domain) {
|
||||||
|
case POWER_DOMAIN_PIPE_A:
|
||||||
|
return "PIPE_A";
|
||||||
|
case POWER_DOMAIN_PIPE_B:
|
||||||
|
return "PIPE_B";
|
||||||
|
case POWER_DOMAIN_PIPE_C:
|
||||||
|
return "PIPE_C";
|
||||||
|
case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
|
||||||
|
return "PIPE_A_PANEL_FITTER";
|
||||||
|
case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
|
||||||
|
return "PIPE_B_PANEL_FITTER";
|
||||||
|
case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
|
||||||
|
return "PIPE_C_PANEL_FITTER";
|
||||||
|
case POWER_DOMAIN_TRANSCODER_A:
|
||||||
|
return "TRANSCODER_A";
|
||||||
|
case POWER_DOMAIN_TRANSCODER_B:
|
||||||
|
return "TRANSCODER_B";
|
||||||
|
case POWER_DOMAIN_TRANSCODER_C:
|
||||||
|
return "TRANSCODER_C";
|
||||||
|
case POWER_DOMAIN_TRANSCODER_EDP:
|
||||||
|
return "TRANSCODER_EDP";
|
||||||
|
case POWER_DOMAIN_PORT_DDI_A_LANES:
|
||||||
|
return "PORT_DDI_A_LANES";
|
||||||
|
case POWER_DOMAIN_PORT_DDI_B_LANES:
|
||||||
|
return "PORT_DDI_B_LANES";
|
||||||
|
case POWER_DOMAIN_PORT_DDI_C_LANES:
|
||||||
|
return "PORT_DDI_C_LANES";
|
||||||
|
case POWER_DOMAIN_PORT_DDI_D_LANES:
|
||||||
|
return "PORT_DDI_D_LANES";
|
||||||
|
case POWER_DOMAIN_PORT_DDI_E_LANES:
|
||||||
|
return "PORT_DDI_E_LANES";
|
||||||
|
case POWER_DOMAIN_PORT_DSI:
|
||||||
|
return "PORT_DSI";
|
||||||
|
case POWER_DOMAIN_PORT_CRT:
|
||||||
|
return "PORT_CRT";
|
||||||
|
case POWER_DOMAIN_PORT_OTHER:
|
||||||
|
return "PORT_OTHER";
|
||||||
|
case POWER_DOMAIN_VGA:
|
||||||
|
return "VGA";
|
||||||
|
case POWER_DOMAIN_AUDIO:
|
||||||
|
return "AUDIO";
|
||||||
|
case POWER_DOMAIN_PLLS:
|
||||||
|
return "PLLS";
|
||||||
|
case POWER_DOMAIN_AUX_A:
|
||||||
|
return "AUX_A";
|
||||||
|
case POWER_DOMAIN_AUX_B:
|
||||||
|
return "AUX_B";
|
||||||
|
case POWER_DOMAIN_AUX_C:
|
||||||
|
return "AUX_C";
|
||||||
|
case POWER_DOMAIN_AUX_D:
|
||||||
|
return "AUX_D";
|
||||||
|
case POWER_DOMAIN_GMBUS:
|
||||||
|
return "GMBUS";
|
||||||
|
case POWER_DOMAIN_INIT:
|
||||||
|
return "INIT";
|
||||||
|
case POWER_DOMAIN_MODESET:
|
||||||
|
return "MODESET";
|
||||||
|
default:
|
||||||
|
MISSING_CASE(domain);
|
||||||
|
return "?";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void intel_power_well_enable(struct drm_i915_private *dev_priv,
|
static void intel_power_well_enable(struct drm_i915_private *dev_priv,
|
||||||
struct i915_power_well *power_well)
|
struct i915_power_well *power_well)
|
||||||
{
|
{
|
||||||
|
@ -1433,11 +1499,15 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
|
||||||
|
|
||||||
mutex_lock(&power_domains->lock);
|
mutex_lock(&power_domains->lock);
|
||||||
|
|
||||||
WARN_ON(!power_domains->domain_use_count[domain]);
|
WARN(!power_domains->domain_use_count[domain],
|
||||||
|
"Use count on domain %s is already zero\n",
|
||||||
|
intel_display_power_domain_str(domain));
|
||||||
power_domains->domain_use_count[domain]--;
|
power_domains->domain_use_count[domain]--;
|
||||||
|
|
||||||
for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
|
for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
|
||||||
WARN_ON(!power_well->count);
|
WARN(!power_well->count,
|
||||||
|
"Use count on power well %s is already zero",
|
||||||
|
power_well->name);
|
||||||
|
|
||||||
if (!--power_well->count)
|
if (!--power_well->count)
|
||||||
intel_power_well_disable(dev_priv, power_well);
|
intel_power_well_disable(dev_priv, power_well);
|
||||||
|
@ -1841,7 +1911,7 @@ sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
|
||||||
if (disable_power_well >= 0)
|
if (disable_power_well >= 0)
|
||||||
return !!disable_power_well;
|
return !!disable_power_well;
|
||||||
|
|
||||||
if (IS_SKYLAKE(dev_priv)) {
|
if (IS_BROXTON(dev_priv)) {
|
||||||
DRM_DEBUG_KMS("Disabling display power well support\n");
|
DRM_DEBUG_KMS("Disabling display power well support\n");
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -279,12 +279,19 @@
|
||||||
#define INTEL_SKL_GT3_IDS(info) \
|
#define INTEL_SKL_GT3_IDS(info) \
|
||||||
INTEL_VGA_DEVICE(0x1926, info), /* ULT GT3 */ \
|
INTEL_VGA_DEVICE(0x1926, info), /* ULT GT3 */ \
|
||||||
INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3 */ \
|
INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3 */ \
|
||||||
INTEL_VGA_DEVICE(0x192A, info) /* SRV GT3 */ \
|
INTEL_VGA_DEVICE(0x192A, info) /* SRV GT3 */
|
||||||
|
|
||||||
#define INTEL_SKL_IDS(info) \
|
#define INTEL_SKL_GT4_IDS(info) \
|
||||||
|
INTEL_VGA_DEVICE(0x1932, info), /* DT GT4 */ \
|
||||||
|
INTEL_VGA_DEVICE(0x193B, info), /* Halo GT4 */ \
|
||||||
|
INTEL_VGA_DEVICE(0x193D, info), /* WKS GT4 */ \
|
||||||
|
INTEL_VGA_DEVICE(0x193A, info) /* SRV GT4 */
|
||||||
|
|
||||||
|
#define INTEL_SKL_IDS(info) \
|
||||||
INTEL_SKL_GT1_IDS(info), \
|
INTEL_SKL_GT1_IDS(info), \
|
||||||
INTEL_SKL_GT2_IDS(info), \
|
INTEL_SKL_GT2_IDS(info), \
|
||||||
INTEL_SKL_GT3_IDS(info)
|
INTEL_SKL_GT3_IDS(info), \
|
||||||
|
INTEL_SKL_GT4_IDS(info)
|
||||||
|
|
||||||
#define INTEL_BXT_IDS(info) \
|
#define INTEL_BXT_IDS(info) \
|
||||||
INTEL_VGA_DEVICE(0x0A84, info), \
|
INTEL_VGA_DEVICE(0x0A84, info), \
|
||||||
|
|
Загрузка…
Ссылка в новой задаче