Merge tag 'drm-intel-next-2014-05-06' of git://anongit.freedesktop.org/drm-intel into drm-next
- ring init improvements (Chris) - vebox2 support (Zhao Yakui) - more prep work for runtime pm on Baytrail (Imre) - eDram support for BDW (Ben) - prep work for userptr support (Chris) - first parts of the encoder->mode_set callback removal (Daniel) - 64b reloc fixes (Ben) - first part of atomic plane updates (Ville) * tag 'drm-intel-next-2014-05-06' of git://anongit.freedesktop.org/drm-intel: (75 commits) drm/i915: Remove useless checks from primary enable/disable drm/i915: Merge LP1+ watermarks in safer way drm/i915: Make sure computed watermarks never overflow the registers drm/i915: Add pipe update trace points drm/i915: Perform primary enable/disable atomically with sprite updates drm/i915: Make sprite updates atomic drm/i915: Support 64b relocations drm/i915: Support 64b execbuf drm/i915/sdvo: Remove ->mode_set callback drm/i915/crt: Remove ->mode_set callback drm/i915/tv: Remove ->mode_set callback drm/i915/tv: Rip out pipe-disabling nonsense from ->mode_set drm/i915/tv: De-magic device check drm/i915/tv: extract set_color_conversion drm/i915/tv: extract set_tv_mode_timings drm/i915/dvo: Remove ->mode_set callback drm/i915: Make encoder->mode_set callbacks optional drm/i915: Make primary_enabled match the actual hardware state drm/i915: Move ring_begin to signal() drm/i915: Virtualize the ringbuffer signal func ...
This commit is contained in:
Коммит
e5daa1ddc1
|
@ -2942,6 +2942,11 @@ int num_ioctls;</synopsis>
|
|||
This sections covers all things related to the GEM implementation in the
|
||||
i915 driver.
|
||||
</para>
|
||||
<sect2>
|
||||
<title>Batchbuffer Parsing</title>
|
||||
!Pdrivers/gpu/drm/i915/i915_cmd_parser.c batch buffer command parser
|
||||
!Idrivers/gpu/drm/i915/i915_cmd_parser.c
|
||||
</sect2>
|
||||
</sect1>
|
||||
</chapter>
|
||||
</part>
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
#include "i915_drv.h"
|
||||
|
||||
/**
|
||||
* DOC: i915 batch buffer command parser
|
||||
* DOC: batch buffer command parser
|
||||
*
|
||||
* Motivation:
|
||||
* Certain OpenGL features (e.g. transform feedback, performance monitoring)
|
||||
|
@ -919,7 +919,7 @@ int i915_parse_cmds(struct intel_ring_buffer *ring,
|
|||
DRM_DEBUG_DRIVER("CMD: Command length exceeds batch length: 0x%08X length=%u batchlen=%td\n",
|
||||
*cmd,
|
||||
length,
|
||||
(unsigned long)(batch_end - cmd));
|
||||
batch_end - cmd);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -1239,9 +1239,13 @@ static int vlv_drpc_info(struct seq_file *m)
|
|||
u32 rpmodectl1, rcctl1;
|
||||
unsigned fw_rendercount = 0, fw_mediacount = 0;
|
||||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
|
||||
rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
|
||||
rcctl1 = I915_READ(GEN6_RC_CONTROL);
|
||||
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
|
||||
seq_printf(m, "Video Turbo Mode: %s\n",
|
||||
yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
|
||||
seq_printf(m, "Turbo enabled: %s\n",
|
||||
|
@ -1261,6 +1265,11 @@ static int vlv_drpc_info(struct seq_file *m)
|
|||
(I915_READ(VLV_GTLC_PW_STATUS) &
|
||||
VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
|
||||
|
||||
seq_printf(m, "Render RC6 residency since boot: %u\n",
|
||||
I915_READ(VLV_GT_RENDER_RC6));
|
||||
seq_printf(m, "Media RC6 residency since boot: %u\n",
|
||||
I915_READ(VLV_GT_MEDIA_RC6));
|
||||
|
||||
spin_lock_irq(&dev_priv->uncore.lock);
|
||||
fw_rendercount = dev_priv->uncore.fw_rendercount;
|
||||
fw_mediacount = dev_priv->uncore.fw_mediacount;
|
||||
|
@ -1689,6 +1698,9 @@ static int i915_context_status(struct seq_file *m, void *unused)
|
|||
}
|
||||
|
||||
list_for_each_entry(ctx, &dev_priv->context_list, link) {
|
||||
if (ctx->obj == NULL)
|
||||
continue;
|
||||
|
||||
seq_puts(m, "HW context ");
|
||||
describe_ctx(m, ctx);
|
||||
for_each_ring(ring, dev_priv, i)
|
||||
|
@ -1898,53 +1910,6 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int i915_dpio_info(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int ret;
|
||||
|
||||
|
||||
if (!IS_VALLEYVIEW(dev)) {
|
||||
seq_puts(m, "unsupported\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = mutex_lock_interruptible(&dev_priv->dpio_lock);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
seq_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL));
|
||||
|
||||
seq_printf(m, "DPIO PLL DW3 CH0 : 0x%08x\n",
|
||||
vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW3(0)));
|
||||
seq_printf(m, "DPIO PLL DW3 CH1: 0x%08x\n",
|
||||
vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW3(1)));
|
||||
|
||||
seq_printf(m, "DPIO PLL DW5 CH0: 0x%08x\n",
|
||||
vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW5(0)));
|
||||
seq_printf(m, "DPIO PLL DW5 CH1: 0x%08x\n",
|
||||
vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW5(1)));
|
||||
|
||||
seq_printf(m, "DPIO PLL DW7 CH0: 0x%08x\n",
|
||||
vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW7(0)));
|
||||
seq_printf(m, "DPIO PLL DW7 CH1: 0x%08x\n",
|
||||
vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW7(1)));
|
||||
|
||||
seq_printf(m, "DPIO PLL DW10 CH0: 0x%08x\n",
|
||||
vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW10(0)));
|
||||
seq_printf(m, "DPIO PLL DW10 CH1: 0x%08x\n",
|
||||
vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW10(1)));
|
||||
|
||||
seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n",
|
||||
vlv_dpio_read(dev_priv, PIPE_A, VLV_CMN_DW0));
|
||||
|
||||
mutex_unlock(&dev_priv->dpio_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int i915_llc(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
|
@ -3299,9 +3264,15 @@ static int
|
|||
i915_wedged_set(void *data, u64 val)
|
||||
{
|
||||
struct drm_device *dev = data;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
|
||||
i915_handle_error(dev, val,
|
||||
"Manually setting wedged to %llu", val);
|
||||
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -3803,7 +3774,6 @@ static const struct drm_info_list i915_debugfs_list[] = {
|
|||
{"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0},
|
||||
{"i915_swizzle_info", i915_swizzle_info, 0},
|
||||
{"i915_ppgtt_info", i915_ppgtt_info, 0},
|
||||
{"i915_dpio", i915_dpio_info, 0},
|
||||
{"i915_llc", i915_llc, 0},
|
||||
{"i915_edp_psr_status", i915_edp_psr_status, 0},
|
||||
{"i915_sink_crc_eDP1", i915_sink_crc, 0},
|
||||
|
|
|
@ -1340,7 +1340,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
|
|||
|
||||
ret = i915_gem_init(dev);
|
||||
if (ret)
|
||||
goto cleanup_power;
|
||||
goto cleanup_irq;
|
||||
|
||||
INIT_WORK(&dev_priv->console_resume_work, intel_console_resume);
|
||||
|
||||
|
@ -1349,10 +1349,8 @@ static int i915_load_modeset_init(struct drm_device *dev)
|
|||
/* Always safe in the mode setting case. */
|
||||
/* FIXME: do pre/post-mode set stuff in core KMS code */
|
||||
dev->vblank_disable_allowed = true;
|
||||
if (INTEL_INFO(dev)->num_pipes == 0) {
|
||||
intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
|
||||
if (INTEL_INFO(dev)->num_pipes == 0)
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = intel_fbdev_init(dev);
|
||||
if (ret)
|
||||
|
@ -1387,8 +1385,7 @@ cleanup_gem:
|
|||
mutex_unlock(&dev->struct_mutex);
|
||||
WARN_ON(dev_priv->mm.aliasing_ppgtt);
|
||||
drm_mm_takedown(&dev_priv->gtt.base.mm);
|
||||
cleanup_power:
|
||||
intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
|
||||
cleanup_irq:
|
||||
drm_irq_uninstall(dev);
|
||||
cleanup_gem_stolen:
|
||||
i915_gem_cleanup_stolen(dev);
|
||||
|
@ -1573,6 +1570,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
|||
spin_lock_init(&dev_priv->backlight_lock);
|
||||
spin_lock_init(&dev_priv->uncore.lock);
|
||||
spin_lock_init(&dev_priv->mm.object_stat_lock);
|
||||
dev_priv->ring_index = 0;
|
||||
mutex_init(&dev_priv->dpio_lock);
|
||||
mutex_init(&dev_priv->modeset_restore_lock);
|
||||
|
||||
|
@ -1930,6 +1928,8 @@ void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
|
|||
{
|
||||
struct drm_i915_file_private *file_priv = file->driver_priv;
|
||||
|
||||
if (file_priv && file_priv->bsd_ring)
|
||||
file_priv->bsd_ring = NULL;
|
||||
kfree(file_priv);
|
||||
}
|
||||
|
||||
|
|
|
@ -279,6 +279,26 @@ static const struct intel_device_info intel_broadwell_m_info = {
|
|||
GEN_DEFAULT_PIPEOFFSETS,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_broadwell_gt3d_info = {
|
||||
.gen = 8, .num_pipes = 3,
|
||||
.need_gfx_hws = 1, .has_hotplug = 1,
|
||||
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
|
||||
.has_llc = 1,
|
||||
.has_ddi = 1,
|
||||
.has_fbc = 1,
|
||||
GEN_DEFAULT_PIPEOFFSETS,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_broadwell_gt3m_info = {
|
||||
.gen = 8, .is_mobile = 1, .num_pipes = 3,
|
||||
.need_gfx_hws = 1, .has_hotplug = 1,
|
||||
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
|
||||
.has_llc = 1,
|
||||
.has_ddi = 1,
|
||||
.has_fbc = 1,
|
||||
GEN_DEFAULT_PIPEOFFSETS,
|
||||
};
|
||||
|
||||
/*
|
||||
* Make sure any device matches here are from most specific to most
|
||||
* general. For example, since the Quanta match is based on the subsystem
|
||||
|
@ -311,8 +331,10 @@ static const struct intel_device_info intel_broadwell_m_info = {
|
|||
INTEL_HSW_M_IDS(&intel_haswell_m_info), \
|
||||
INTEL_VLV_M_IDS(&intel_valleyview_m_info), \
|
||||
INTEL_VLV_D_IDS(&intel_valleyview_d_info), \
|
||||
INTEL_BDW_M_IDS(&intel_broadwell_m_info), \
|
||||
INTEL_BDW_D_IDS(&intel_broadwell_d_info)
|
||||
INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info), \
|
||||
INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info), \
|
||||
INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info), \
|
||||
INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info)
|
||||
|
||||
static const struct pci_device_id pciidlist[] = { /* aka */
|
||||
INTEL_PCI_IDS,
|
||||
|
@ -551,7 +573,6 @@ static int i915_drm_thaw_early(struct drm_device *dev)
|
|||
static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int error = 0;
|
||||
|
||||
if (drm_core_check_feature(dev, DRIVER_MODESET) &&
|
||||
restore_gtt_mappings) {
|
||||
|
@ -569,8 +590,10 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
|
|||
drm_mode_config_reset(dev);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
||||
error = i915_gem_init_hw(dev);
|
||||
if (i915_gem_init_hw(dev)) {
|
||||
DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
|
||||
atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
|
||||
}
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
/* We need working interrupts for modeset enabling ... */
|
||||
|
@ -613,7 +636,7 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
|
|||
mutex_unlock(&dev_priv->modeset_restore_lock);
|
||||
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
return error;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int i915_drm_thaw(struct drm_device *dev)
|
||||
|
@ -758,11 +781,8 @@ int i915_reset(struct drm_device *dev)
|
|||
* reset and the re-install of drm irq. Skip for ironlake per
|
||||
* previous concerns that it doesn't respond well to some forms
|
||||
* of re-init after reset. */
|
||||
if (INTEL_INFO(dev)->gen > 5) {
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
intel_enable_gt_powersave(dev);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
if (INTEL_INFO(dev)->gen > 5)
|
||||
intel_reset_gt_powersave(dev);
|
||||
|
||||
intel_hpd_init(dev);
|
||||
} else {
|
||||
|
@ -896,13 +916,6 @@ static int i915_pm_poweroff(struct device *dev)
|
|||
return i915_drm_freeze(drm_dev);
|
||||
}
|
||||
|
||||
static void snb_runtime_suspend(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
|
||||
intel_runtime_pm_disable_interrupts(dev);
|
||||
}
|
||||
|
||||
static void hsw_runtime_suspend(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
hsw_enable_pc8(dev_priv);
|
||||
|
@ -912,12 +925,7 @@ static void snb_runtime_resume(struct drm_i915_private *dev_priv)
|
|||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
|
||||
intel_runtime_pm_restore_interrupts(dev);
|
||||
intel_init_pch_refclk(dev);
|
||||
i915_gem_init_swizzling(dev);
|
||||
mutex_lock(&dev_priv->rps.hw_lock);
|
||||
gen6_update_ring_freq(dev);
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
}
|
||||
|
||||
static void hsw_runtime_resume(struct drm_i915_private *dev_priv)
|
||||
|
@ -925,19 +933,67 @@ static void hsw_runtime_resume(struct drm_i915_private *dev_priv)
|
|||
hsw_disable_pc8(dev_priv);
|
||||
}
|
||||
|
||||
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
|
||||
{
|
||||
u32 val;
|
||||
int err;
|
||||
|
||||
val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
|
||||
WARN_ON(!!(val & VLV_GFX_CLK_FORCE_ON_BIT) == force_on);
|
||||
|
||||
#define COND (I915_READ(VLV_GTLC_SURVIVABILITY_REG) & VLV_GFX_CLK_STATUS_BIT)
|
||||
/* Wait for a previous force-off to settle */
|
||||
if (force_on) {
|
||||
err = wait_for(!COND, 20);
|
||||
if (err) {
|
||||
DRM_ERROR("timeout waiting for GFX clock force-off (%08x)\n",
|
||||
I915_READ(VLV_GTLC_SURVIVABILITY_REG));
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
|
||||
val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
|
||||
if (force_on)
|
||||
val |= VLV_GFX_CLK_FORCE_ON_BIT;
|
||||
I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
|
||||
|
||||
if (!force_on)
|
||||
return 0;
|
||||
|
||||
err = wait_for(COND, 20);
|
||||
if (err)
|
||||
DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n",
|
||||
I915_READ(VLV_GTLC_SURVIVABILITY_REG));
|
||||
|
||||
return err;
|
||||
#undef COND
|
||||
}
|
||||
|
||||
static int intel_runtime_suspend(struct device *device)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(device);
|
||||
struct drm_device *dev = pci_get_drvdata(pdev);
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev))))
|
||||
return -ENODEV;
|
||||
|
||||
WARN_ON(!HAS_RUNTIME_PM(dev));
|
||||
assert_force_wake_inactive(dev_priv);
|
||||
|
||||
DRM_DEBUG_KMS("Suspending device\n");
|
||||
|
||||
/*
|
||||
* rps.work can't be rearmed here, since we get here only after making
|
||||
* sure the GPU is idle and the RPS freq is set to the minimum. See
|
||||
* intel_mark_idle().
|
||||
*/
|
||||
cancel_work_sync(&dev_priv->rps.work);
|
||||
intel_runtime_pm_disable_interrupts(dev);
|
||||
|
||||
if (IS_GEN6(dev))
|
||||
snb_runtime_suspend(dev_priv);
|
||||
;
|
||||
else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
|
||||
hsw_runtime_suspend(dev_priv);
|
||||
else
|
||||
|
@ -981,6 +1037,12 @@ static int intel_runtime_resume(struct device *device)
|
|||
else
|
||||
WARN_ON(1);
|
||||
|
||||
i915_gem_init_swizzling(dev);
|
||||
gen6_update_ring_freq(dev);
|
||||
|
||||
intel_runtime_pm_restore_interrupts(dev);
|
||||
intel_reset_gt_powersave(dev);
|
||||
|
||||
DRM_DEBUG_KMS("Device resumed\n");
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -325,7 +325,6 @@ struct drm_i915_error_state {
|
|||
u32 gab_ctl;
|
||||
u32 gfx_mode;
|
||||
u32 extra_instdone[I915_NUM_INSTDONE_REG];
|
||||
u32 pipestat[I915_MAX_PIPES];
|
||||
u64 fence[I915_MAX_NUM_FENCES];
|
||||
struct intel_overlay_error_state *overlay;
|
||||
struct intel_display_error_state *display;
|
||||
|
@ -929,6 +928,7 @@ struct i915_power_domains {
|
|||
* time are on. They are kept on until after the first modeset.
|
||||
*/
|
||||
bool init_power_on;
|
||||
bool initializing;
|
||||
int power_well_count;
|
||||
|
||||
struct mutex lock;
|
||||
|
@ -1473,6 +1473,8 @@ struct drm_i915_private {
|
|||
struct i915_dri1_state dri1;
|
||||
/* Old ums support infrastructure, same warning applies. */
|
||||
struct i915_ums_state ums;
|
||||
/* the indicator for dispatch video commands on two BSD rings */
|
||||
int ring_index;
|
||||
};
|
||||
|
||||
static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
|
||||
|
@ -1680,6 +1682,7 @@ struct drm_i915_file_private {
|
|||
|
||||
struct i915_hw_context *private_default_ctx;
|
||||
atomic_t rps_wait_boost;
|
||||
struct intel_ring_buffer *bsd_ring;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -1834,11 +1837,14 @@ struct drm_i915_cmd_table {
|
|||
#define BSD_RING (1<<VCS)
|
||||
#define BLT_RING (1<<BCS)
|
||||
#define VEBOX_RING (1<<VECS)
|
||||
#define HAS_BSD(dev) (INTEL_INFO(dev)->ring_mask & BSD_RING)
|
||||
#define HAS_BLT(dev) (INTEL_INFO(dev)->ring_mask & BLT_RING)
|
||||
#define HAS_VEBOX(dev) (INTEL_INFO(dev)->ring_mask & VEBOX_RING)
|
||||
#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
|
||||
#define HAS_WT(dev) (IS_HASWELL(dev) && to_i915(dev)->ellc_size)
|
||||
#define BSD2_RING (1<<VCS2)
|
||||
#define HAS_BSD(dev) (INTEL_INFO(dev)->ring_mask & BSD_RING)
|
||||
#define HAS_BSD2(dev) (INTEL_INFO(dev)->ring_mask & BSD2_RING)
|
||||
#define HAS_BLT(dev) (INTEL_INFO(dev)->ring_mask & BLT_RING)
|
||||
#define HAS_VEBOX(dev) (INTEL_INFO(dev)->ring_mask & VEBOX_RING)
|
||||
#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
|
||||
#define HAS_WT(dev) ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \
|
||||
to_i915(dev)->ellc_size)
|
||||
#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
|
||||
|
||||
#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
|
||||
|
@ -1969,6 +1975,7 @@ extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
|
|||
extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
|
||||
extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
|
||||
extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
|
||||
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
|
||||
|
||||
extern void intel_console_resume(struct work_struct *work);
|
||||
|
||||
|
|
|
@ -43,6 +43,9 @@ static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *o
|
|||
static __must_check int
|
||||
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
|
||||
bool readonly);
|
||||
static void
|
||||
i915_gem_object_retire(struct drm_i915_gem_object *obj);
|
||||
|
||||
static int i915_gem_phys_pwrite(struct drm_device *dev,
|
||||
struct drm_i915_gem_object *obj,
|
||||
struct drm_i915_gem_pwrite *args,
|
||||
|
@ -352,6 +355,8 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
|
|||
ret = i915_gem_object_wait_rendering(obj, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
i915_gem_object_retire(obj);
|
||||
}
|
||||
|
||||
ret = i915_gem_object_get_pages(obj);
|
||||
|
@ -767,6 +772,8 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
|
|||
ret = i915_gem_object_wait_rendering(obj, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
i915_gem_object_retire(obj);
|
||||
}
|
||||
/* Same trick applies to invalidate partially written cachelines read
|
||||
* before writing. */
|
||||
|
@ -1154,7 +1161,8 @@ static int
|
|||
i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj,
|
||||
struct intel_ring_buffer *ring)
|
||||
{
|
||||
i915_gem_retire_requests_ring(ring);
|
||||
if (!obj->active)
|
||||
return 0;
|
||||
|
||||
/* Manually manage the write flush as we may have not yet
|
||||
* retired the buffer.
|
||||
|
@ -1164,7 +1172,6 @@ i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj,
|
|||
* we know we have passed the last write.
|
||||
*/
|
||||
obj->last_write_seqno = 0;
|
||||
obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1785,58 +1792,58 @@ static unsigned long
|
|||
__i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
|
||||
bool purgeable_only)
|
||||
{
|
||||
struct list_head still_bound_list;
|
||||
struct drm_i915_gem_object *obj, *next;
|
||||
struct list_head still_in_list;
|
||||
struct drm_i915_gem_object *obj;
|
||||
unsigned long count = 0;
|
||||
|
||||
list_for_each_entry_safe(obj, next,
|
||||
&dev_priv->mm.unbound_list,
|
||||
global_list) {
|
||||
if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
|
||||
i915_gem_object_put_pages(obj) == 0) {
|
||||
count += obj->base.size >> PAGE_SHIFT;
|
||||
if (count >= target)
|
||||
return count;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* As we may completely rewrite the bound list whilst unbinding
|
||||
* As we may completely rewrite the (un)bound list whilst unbinding
|
||||
* (due to retiring requests) we have to strictly process only
|
||||
* one element of the list at the time, and recheck the list
|
||||
* on every iteration.
|
||||
*
|
||||
* In particular, we must hold a reference whilst removing the
|
||||
* object as we may end up waiting for and/or retiring the objects.
|
||||
* This might release the final reference (held by the active list)
|
||||
* and result in the object being freed from under us. This is
|
||||
* similar to the precautions the eviction code must take whilst
|
||||
* removing objects.
|
||||
*
|
||||
* Also note that although these lists do not hold a reference to
|
||||
* the object we can safely grab one here: The final object
|
||||
* unreferencing and the bound_list are both protected by the
|
||||
* dev->struct_mutex and so we won't ever be able to observe an
|
||||
* object on the bound_list with a reference count equals 0.
|
||||
*/
|
||||
INIT_LIST_HEAD(&still_bound_list);
|
||||
INIT_LIST_HEAD(&still_in_list);
|
||||
while (count < target && !list_empty(&dev_priv->mm.unbound_list)) {
|
||||
obj = list_first_entry(&dev_priv->mm.unbound_list,
|
||||
typeof(*obj), global_list);
|
||||
list_move_tail(&obj->global_list, &still_in_list);
|
||||
|
||||
if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
|
||||
continue;
|
||||
|
||||
drm_gem_object_reference(&obj->base);
|
||||
|
||||
if (i915_gem_object_put_pages(obj) == 0)
|
||||
count += obj->base.size >> PAGE_SHIFT;
|
||||
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
}
|
||||
list_splice(&still_in_list, &dev_priv->mm.unbound_list);
|
||||
|
||||
INIT_LIST_HEAD(&still_in_list);
|
||||
while (count < target && !list_empty(&dev_priv->mm.bound_list)) {
|
||||
struct i915_vma *vma, *v;
|
||||
|
||||
obj = list_first_entry(&dev_priv->mm.bound_list,
|
||||
typeof(*obj), global_list);
|
||||
list_move_tail(&obj->global_list, &still_bound_list);
|
||||
list_move_tail(&obj->global_list, &still_in_list);
|
||||
|
||||
if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Hold a reference whilst we unbind this object, as we may
|
||||
* end up waiting for and retiring requests. This might
|
||||
* release the final reference (held by the active list)
|
||||
* and result in the object being freed from under us.
|
||||
* in this object being freed.
|
||||
*
|
||||
* Note 1: Shrinking the bound list is special since only active
|
||||
* (and hence bound objects) can contain such limbo objects, so
|
||||
* we don't need special tricks for shrinking the unbound list.
|
||||
* The only other place where we have to be careful with active
|
||||
* objects suddenly disappearing due to retiring requests is the
|
||||
* eviction code.
|
||||
*
|
||||
* Note 2: Even though the bound list doesn't hold a reference
|
||||
* to the object we can safely grab one here: The final object
|
||||
* unreferencing and the bound_list are both protected by the
|
||||
* dev->struct_mutex and so we won't ever be able to observe an
|
||||
* object on the bound_list with a reference count equals 0.
|
||||
*/
|
||||
drm_gem_object_reference(&obj->base);
|
||||
|
||||
list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
|
||||
|
@ -1848,7 +1855,7 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
|
|||
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
}
|
||||
list_splice(&still_bound_list, &dev_priv->mm.bound_list);
|
||||
list_splice(&still_in_list, &dev_priv->mm.bound_list);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
@ -1862,17 +1869,8 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target)
|
|||
static unsigned long
|
||||
i915_gem_shrink_all(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_i915_gem_object *obj, *next;
|
||||
long freed = 0;
|
||||
|
||||
i915_gem_evict_everything(dev_priv->dev);
|
||||
|
||||
list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
|
||||
global_list) {
|
||||
if (i915_gem_object_put_pages(obj) == 0)
|
||||
freed += obj->base.size >> PAGE_SHIFT;
|
||||
}
|
||||
return freed;
|
||||
return __i915_gem_shrink(dev_priv, LONG_MAX, false);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -2089,6 +2087,19 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
|
|||
WARN_ON(i915_verify_lists(dev));
|
||||
}
|
||||
|
||||
static void
|
||||
i915_gem_object_retire(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct intel_ring_buffer *ring = obj->ring;
|
||||
|
||||
if (ring == NULL)
|
||||
return;
|
||||
|
||||
if (i915_seqno_passed(ring->get_seqno(ring, true),
|
||||
obj->last_read_seqno))
|
||||
i915_gem_object_move_to_inactive(obj);
|
||||
}
|
||||
|
||||
static int
|
||||
i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
|
||||
{
|
||||
|
@ -2108,8 +2119,8 @@ i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
|
|||
for_each_ring(ring, dev_priv, i) {
|
||||
intel_ring_init_seqno(ring, seqno);
|
||||
|
||||
for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
|
||||
ring->sync_seqno[j] = 0;
|
||||
for (j = 0; j < ARRAY_SIZE(ring->semaphore.sync_seqno); j++)
|
||||
ring->semaphore.sync_seqno[j] = 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -2384,6 +2395,11 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
|
|||
|
||||
i915_gem_free_request(request);
|
||||
}
|
||||
|
||||
/* These may not have been flush before the reset, do so now */
|
||||
kfree(ring->preallocated_lazy_request);
|
||||
ring->preallocated_lazy_request = NULL;
|
||||
ring->outstanding_lazy_seqno = 0;
|
||||
}
|
||||
|
||||
void i915_gem_restore_fences(struct drm_device *dev)
|
||||
|
@ -2424,8 +2440,6 @@ void i915_gem_reset(struct drm_device *dev)
|
|||
for_each_ring(ring, dev_priv, i)
|
||||
i915_gem_reset_ring_cleanup(dev_priv, ring);
|
||||
|
||||
i915_gem_cleanup_ringbuffer(dev);
|
||||
|
||||
i915_gem_context_reset(dev);
|
||||
|
||||
i915_gem_restore_fences(dev);
|
||||
|
@ -2678,7 +2692,7 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
|
|||
idx = intel_ring_sync_index(from, to);
|
||||
|
||||
seqno = obj->last_read_seqno;
|
||||
if (seqno <= from->sync_seqno[idx])
|
||||
if (seqno <= from->semaphore.sync_seqno[idx])
|
||||
return 0;
|
||||
|
||||
ret = i915_gem_check_olr(obj->ring, seqno);
|
||||
|
@ -2686,13 +2700,13 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
|
|||
return ret;
|
||||
|
||||
trace_i915_gem_ring_sync_to(from, to, seqno);
|
||||
ret = to->sync_to(to, from, seqno);
|
||||
ret = to->semaphore.sync_to(to, from, seqno);
|
||||
if (!ret)
|
||||
/* We use last_read_seqno because sync_to()
|
||||
* might have just caused seqno wrap under
|
||||
* the radar.
|
||||
*/
|
||||
from->sync_seqno[idx] = obj->last_read_seqno;
|
||||
from->semaphore.sync_seqno[idx] = obj->last_read_seqno;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -3426,6 +3440,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
i915_gem_object_retire(obj);
|
||||
i915_gem_object_flush_cpu_write_domain(obj, false);
|
||||
|
||||
/* Serialise direct access to this object with the barriers for
|
||||
|
@ -3524,6 +3539,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
|
|||
* in obj->write_domain and have been skipping the clflushes.
|
||||
* Just set it to the CPU cache for now.
|
||||
*/
|
||||
i915_gem_object_retire(obj);
|
||||
WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
|
||||
|
||||
old_read_domains = obj->base.read_domains;
|
||||
|
@ -3746,6 +3762,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
i915_gem_object_retire(obj);
|
||||
i915_gem_object_flush_gtt_write_domain(obj);
|
||||
|
||||
old_write_domain = obj->base.write_domain;
|
||||
|
@ -4233,6 +4250,17 @@ void i915_gem_vma_destroy(struct i915_vma *vma)
|
|||
kfree(vma);
|
||||
}
|
||||
|
||||
static void
|
||||
i915_gem_stop_ringbuffers(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_ring_buffer *ring;
|
||||
int i;
|
||||
|
||||
for_each_ring(ring, dev_priv, i)
|
||||
intel_stop_ring_buffer(ring);
|
||||
}
|
||||
|
||||
int
|
||||
i915_gem_suspend(struct drm_device *dev)
|
||||
{
|
||||
|
@ -4254,7 +4282,7 @@ i915_gem_suspend(struct drm_device *dev)
|
|||
i915_gem_evict_everything(dev);
|
||||
|
||||
i915_kernel_lost_context(dev);
|
||||
i915_gem_cleanup_ringbuffer(dev);
|
||||
i915_gem_stop_ringbuffers(dev);
|
||||
|
||||
/* Hack! Don't let anybody do execbuf while we don't control the chip.
|
||||
* We need to replace this with a semaphore, or something.
|
||||
|
@ -4374,13 +4402,20 @@ static int i915_gem_init_rings(struct drm_device *dev)
|
|||
goto cleanup_blt_ring;
|
||||
}
|
||||
|
||||
if (HAS_BSD2(dev)) {
|
||||
ret = intel_init_bsd2_ring_buffer(dev);
|
||||
if (ret)
|
||||
goto cleanup_vebox_ring;
|
||||
}
|
||||
|
||||
ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
|
||||
if (ret)
|
||||
goto cleanup_vebox_ring;
|
||||
goto cleanup_bsd2_ring;
|
||||
|
||||
return 0;
|
||||
|
||||
cleanup_bsd2_ring:
|
||||
intel_cleanup_ring_buffer(&dev_priv->ring[VCS2]);
|
||||
cleanup_vebox_ring:
|
||||
intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
|
||||
cleanup_blt_ring:
|
||||
|
@ -4438,15 +4473,11 @@ i915_gem_init_hw(struct drm_device *dev)
|
|||
* the do_switch), but before enabling PPGTT. So don't move this.
|
||||
*/
|
||||
ret = i915_gem_context_enable(dev_priv);
|
||||
if (ret) {
|
||||
if (ret && ret != -EIO) {
|
||||
DRM_ERROR("Context enable failed %d\n", ret);
|
||||
goto err_out;
|
||||
i915_gem_cleanup_ringbuffer(dev);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_out:
|
||||
i915_gem_cleanup_ringbuffer(dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -4459,8 +4490,9 @@ int i915_gem_init(struct drm_device *dev)
|
|||
|
||||
if (IS_VALLEYVIEW(dev)) {
|
||||
/* VLVA0 (potential hack), BIOS isn't actually waking us */
|
||||
I915_WRITE(VLV_GTLC_WAKE_CTRL, 1);
|
||||
if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) & 1) == 1, 10))
|
||||
I915_WRITE(VLV_GTLC_WAKE_CTRL, VLV_GTLC_ALLOWWAKEREQ);
|
||||
if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) &
|
||||
VLV_GTLC_ALLOWWAKEACK), 10))
|
||||
DRM_DEBUG_DRIVER("allow wake ack timed out\n");
|
||||
}
|
||||
|
||||
|
@ -4473,18 +4505,21 @@ int i915_gem_init(struct drm_device *dev)
|
|||
}
|
||||
|
||||
ret = i915_gem_init_hw(dev);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
if (ret) {
|
||||
WARN_ON(dev_priv->mm.aliasing_ppgtt);
|
||||
i915_gem_context_fini(dev);
|
||||
drm_mm_takedown(&dev_priv->gtt.base.mm);
|
||||
return ret;
|
||||
if (ret == -EIO) {
|
||||
/* Allow ring initialisation to fail by marking the GPU as
|
||||
* wedged. But we only want to do this where the GPU is angry,
|
||||
* for all other failure, such as an allocation failure, bail.
|
||||
*/
|
||||
DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
|
||||
atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
|
||||
ret = 0;
|
||||
}
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
/* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
|
||||
if (!drm_core_check_feature(dev, DRIVER_MODESET))
|
||||
dev_priv->dri1.allow_batchbuffer = 1;
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
void
|
||||
|
|
|
@ -262,10 +262,12 @@ static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
|
|||
|
||||
static int
|
||||
relocate_entry_cpu(struct drm_i915_gem_object *obj,
|
||||
struct drm_i915_gem_relocation_entry *reloc)
|
||||
struct drm_i915_gem_relocation_entry *reloc,
|
||||
uint64_t target_offset)
|
||||
{
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
uint32_t page_offset = offset_in_page(reloc->offset);
|
||||
uint64_t delta = reloc->delta + target_offset;
|
||||
char *vaddr;
|
||||
int ret;
|
||||
|
||||
|
@ -275,7 +277,7 @@ relocate_entry_cpu(struct drm_i915_gem_object *obj,
|
|||
|
||||
vaddr = kmap_atomic(i915_gem_object_get_page(obj,
|
||||
reloc->offset >> PAGE_SHIFT));
|
||||
*(uint32_t *)(vaddr + page_offset) = reloc->delta;
|
||||
*(uint32_t *)(vaddr + page_offset) = lower_32_bits(delta);
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 8) {
|
||||
page_offset = offset_in_page(page_offset + sizeof(uint32_t));
|
||||
|
@ -286,7 +288,7 @@ relocate_entry_cpu(struct drm_i915_gem_object *obj,
|
|||
(reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
|
||||
}
|
||||
|
||||
*(uint32_t *)(vaddr + page_offset) = 0;
|
||||
*(uint32_t *)(vaddr + page_offset) = upper_32_bits(delta);
|
||||
}
|
||||
|
||||
kunmap_atomic(vaddr);
|
||||
|
@ -296,10 +298,12 @@ relocate_entry_cpu(struct drm_i915_gem_object *obj,
|
|||
|
||||
static int
|
||||
relocate_entry_gtt(struct drm_i915_gem_object *obj,
|
||||
struct drm_i915_gem_relocation_entry *reloc)
|
||||
struct drm_i915_gem_relocation_entry *reloc,
|
||||
uint64_t target_offset)
|
||||
{
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
uint64_t delta = reloc->delta + target_offset;
|
||||
uint32_t __iomem *reloc_entry;
|
||||
void __iomem *reloc_page;
|
||||
int ret;
|
||||
|
@ -318,7 +322,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
|
|||
reloc->offset & PAGE_MASK);
|
||||
reloc_entry = (uint32_t __iomem *)
|
||||
(reloc_page + offset_in_page(reloc->offset));
|
||||
iowrite32(reloc->delta, reloc_entry);
|
||||
iowrite32(lower_32_bits(delta), reloc_entry);
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 8) {
|
||||
reloc_entry += 1;
|
||||
|
@ -331,7 +335,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
|
|||
reloc_entry = reloc_page;
|
||||
}
|
||||
|
||||
iowrite32(0, reloc_entry);
|
||||
iowrite32(upper_32_bits(delta), reloc_entry);
|
||||
}
|
||||
|
||||
io_mapping_unmap_atomic(reloc_page);
|
||||
|
@ -348,7 +352,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
|
|||
struct drm_gem_object *target_obj;
|
||||
struct drm_i915_gem_object *target_i915_obj;
|
||||
struct i915_vma *target_vma;
|
||||
uint32_t target_offset;
|
||||
uint64_t target_offset;
|
||||
int ret;
|
||||
|
||||
/* we've already hold a reference to all valid objects */
|
||||
|
@ -426,11 +430,10 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
|
|||
if (obj->active && in_atomic())
|
||||
return -EFAULT;
|
||||
|
||||
reloc->delta += target_offset;
|
||||
if (use_cpu_reloc(obj))
|
||||
ret = relocate_entry_cpu(obj, reloc);
|
||||
ret = relocate_entry_cpu(obj, reloc, target_offset);
|
||||
else
|
||||
ret = relocate_entry_gtt(obj, reloc);
|
||||
ret = relocate_entry_gtt(obj, reloc, target_offset);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -955,6 +958,9 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
|
|||
if (i915_gem_obj_ggtt_bound(obj) &&
|
||||
i915_gem_obj_to_ggtt(obj)->pin_count)
|
||||
intel_mark_fb_busy(obj, ring);
|
||||
|
||||
/* update for the implicit flush after a batch */
|
||||
obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
|
||||
}
|
||||
|
||||
trace_i915_gem_object_change_domain(obj, old_read, old_write);
|
||||
|
@ -981,8 +987,10 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int ret, i;
|
||||
|
||||
if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS])
|
||||
return 0;
|
||||
if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS]) {
|
||||
DRM_DEBUG("sol reset is gen7/rcs only\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = intel_ring_begin(ring, 4 * 3);
|
||||
if (ret)
|
||||
|
@ -999,6 +1007,37 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Find one BSD ring to dispatch the corresponding BSD command.
|
||||
* The Ring ID is returned.
|
||||
*/
|
||||
static int gen8_dispatch_bsd_ring(struct drm_device *dev,
|
||||
struct drm_file *file)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_file_private *file_priv = file->driver_priv;
|
||||
|
||||
/* Check whether the file_priv is using one ring */
|
||||
if (file_priv->bsd_ring)
|
||||
return file_priv->bsd_ring->id;
|
||||
else {
|
||||
/* If no, use the ping-pong mechanism to select one ring */
|
||||
int ring_id;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
if (dev_priv->ring_index == 0) {
|
||||
ring_id = VCS;
|
||||
dev_priv->ring_index = 1;
|
||||
} else {
|
||||
ring_id = VCS2;
|
||||
dev_priv->ring_index = 0;
|
||||
}
|
||||
file_priv->bsd_ring = &dev_priv->ring[ring_id];
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return ring_id;
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
||||
struct drm_file *file,
|
||||
|
@ -1013,7 +1052,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|||
struct i915_hw_context *ctx;
|
||||
struct i915_address_space *vm;
|
||||
const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
|
||||
u32 exec_start = args->batch_start_offset, exec_len;
|
||||
u64 exec_start = args->batch_start_offset, exec_len;
|
||||
u32 mask, flags;
|
||||
int ret, mode, i;
|
||||
bool need_relocs;
|
||||
|
@ -1035,7 +1074,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|||
if (args->flags & I915_EXEC_IS_PINNED)
|
||||
flags |= I915_DISPATCH_PINNED;
|
||||
|
||||
if ((args->flags & I915_EXEC_RING_MASK) > I915_NUM_RINGS) {
|
||||
if ((args->flags & I915_EXEC_RING_MASK) > LAST_USER_RING) {
|
||||
DRM_DEBUG("execbuf with unknown ring: %d\n",
|
||||
(int)(args->flags & I915_EXEC_RING_MASK));
|
||||
return -EINVAL;
|
||||
|
@ -1043,7 +1082,14 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|||
|
||||
if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_DEFAULT)
|
||||
ring = &dev_priv->ring[RCS];
|
||||
else
|
||||
else if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_BSD) {
|
||||
if (HAS_BSD2(dev)) {
|
||||
int ring_id;
|
||||
ring_id = gen8_dispatch_bsd_ring(dev, file);
|
||||
ring = &dev_priv->ring[ring_id];
|
||||
} else
|
||||
ring = &dev_priv->ring[VCS];
|
||||
} else
|
||||
ring = &dev_priv->ring[(args->flags & I915_EXEC_RING_MASK) - 1];
|
||||
|
||||
if (!intel_ring_initialized(ring)) {
|
||||
|
@ -1058,14 +1104,22 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|||
case I915_EXEC_CONSTANTS_REL_GENERAL:
|
||||
case I915_EXEC_CONSTANTS_ABSOLUTE:
|
||||
case I915_EXEC_CONSTANTS_REL_SURFACE:
|
||||
if (ring == &dev_priv->ring[RCS] &&
|
||||
mode != dev_priv->relative_constants_mode) {
|
||||
if (INTEL_INFO(dev)->gen < 4)
|
||||
if (mode != 0 && ring != &dev_priv->ring[RCS]) {
|
||||
DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (mode != dev_priv->relative_constants_mode) {
|
||||
if (INTEL_INFO(dev)->gen < 4) {
|
||||
DRM_DEBUG("no rel constants on pre-gen4\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (INTEL_INFO(dev)->gen > 5 &&
|
||||
mode == I915_EXEC_CONSTANTS_REL_SURFACE)
|
||||
mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
|
||||
DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* The HW changed the meaning on this bit on gen6 */
|
||||
if (INTEL_INFO(dev)->gen >= 6)
|
||||
|
@ -1113,6 +1167,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|||
ret = -EFAULT;
|
||||
goto pre_mutex_err;
|
||||
}
|
||||
} else {
|
||||
if (args->DR1 || args->DR4 || args->cliprects_ptr) {
|
||||
DRM_DEBUG("0 cliprects but dirt in cliprects fields\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
|
@ -1390,6 +1449,11 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (args->rsvd2 != 0) {
|
||||
DRM_DEBUG("dirty rvsd2 field\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
|
||||
GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
|
||||
if (exec2_list == NULL)
|
||||
|
|
|
@ -68,10 +68,19 @@ static inline gen8_gtt_pte_t gen8_pte_encode(dma_addr_t addr,
|
|||
{
|
||||
gen8_gtt_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0;
|
||||
pte |= addr;
|
||||
if (level != I915_CACHE_NONE)
|
||||
pte |= PPAT_CACHED_INDEX;
|
||||
else
|
||||
|
||||
switch (level) {
|
||||
case I915_CACHE_NONE:
|
||||
pte |= PPAT_UNCACHED_INDEX;
|
||||
break;
|
||||
case I915_CACHE_WT:
|
||||
pte |= PPAT_DISPLAY_ELLC_INDEX;
|
||||
break;
|
||||
default:
|
||||
pte |= PPAT_CACHED_INDEX;
|
||||
break;
|
||||
}
|
||||
|
||||
return pte;
|
||||
}
|
||||
|
||||
|
@ -1368,7 +1377,7 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
|
|||
(gen8_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
|
||||
int i = 0;
|
||||
struct sg_page_iter sg_iter;
|
||||
dma_addr_t addr;
|
||||
dma_addr_t addr = 0;
|
||||
|
||||
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
|
||||
addr = sg_dma_address(sg_iter.sg) +
|
||||
|
|
|
@ -42,6 +42,7 @@ static const char *ring_str(int ring)
|
|||
case VCS: return "bsd";
|
||||
case BCS: return "blt";
|
||||
case VECS: return "vebox";
|
||||
case VCS2: return "bsd2";
|
||||
default: return "";
|
||||
}
|
||||
}
|
||||
|
@ -756,14 +757,14 @@ static void i915_record_ring_state(struct drm_device *dev,
|
|||
= I915_READ(RING_SYNC_0(ring->mmio_base));
|
||||
ering->semaphore_mboxes[1]
|
||||
= I915_READ(RING_SYNC_1(ring->mmio_base));
|
||||
ering->semaphore_seqno[0] = ring->sync_seqno[0];
|
||||
ering->semaphore_seqno[1] = ring->sync_seqno[1];
|
||||
ering->semaphore_seqno[0] = ring->semaphore.sync_seqno[0];
|
||||
ering->semaphore_seqno[1] = ring->semaphore.sync_seqno[1];
|
||||
}
|
||||
|
||||
if (HAS_VEBOX(dev)) {
|
||||
ering->semaphore_mboxes[2] =
|
||||
I915_READ(RING_SYNC_2(ring->mmio_base));
|
||||
ering->semaphore_seqno[2] = ring->sync_seqno[2];
|
||||
ering->semaphore_seqno[2] = ring->semaphore.sync_seqno[2];
|
||||
}
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 4) {
|
||||
|
@ -1028,7 +1029,6 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
|
|||
struct drm_i915_error_state *error)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
int pipe;
|
||||
|
||||
/* General organization
|
||||
* 1. Registers specific to a single generation
|
||||
|
@ -1053,9 +1053,6 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
|
|||
error->gfx_mode = I915_READ(GFX_MODE);
|
||||
}
|
||||
|
||||
if (IS_GEN2(dev))
|
||||
error->ier = I915_READ16(IER);
|
||||
|
||||
/* 2: Registers which belong to multiple generations */
|
||||
if (INTEL_INFO(dev)->gen >= 7)
|
||||
error->forcewake = I915_READ(FORCEWAKE_MT);
|
||||
|
@ -1079,9 +1076,10 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
|
|||
if (HAS_PCH_SPLIT(dev))
|
||||
error->ier = I915_READ(DEIER) | I915_READ(GTIER);
|
||||
else {
|
||||
error->ier = I915_READ(IER);
|
||||
for_each_pipe(pipe)
|
||||
error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
|
||||
if (IS_GEN2(dev))
|
||||
error->ier = I915_READ16(IER);
|
||||
else
|
||||
error->ier = I915_READ(IER);
|
||||
}
|
||||
|
||||
/* 4: Everything else */
|
||||
|
|
|
@ -751,24 +751,32 @@ static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
|
|||
/* raw reads, only for fast reads of display block, no need for forcewake etc. */
|
||||
#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
|
||||
|
||||
static bool ilk_pipe_in_vblank_locked(struct drm_device *dev, enum pipe pipe)
|
||||
static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
uint32_t status;
|
||||
int reg;
|
||||
const struct drm_display_mode *mode = &crtc->config.adjusted_mode;
|
||||
enum pipe pipe = crtc->pipe;
|
||||
int vtotal = mode->crtc_vtotal;
|
||||
int position;
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 8) {
|
||||
status = GEN8_PIPE_VBLANK;
|
||||
reg = GEN8_DE_PIPE_ISR(pipe);
|
||||
} else if (INTEL_INFO(dev)->gen >= 7) {
|
||||
status = DE_PIPE_VBLANK_IVB(pipe);
|
||||
reg = DEISR;
|
||||
} else {
|
||||
status = DE_PIPE_VBLANK(pipe);
|
||||
reg = DEISR;
|
||||
}
|
||||
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
|
||||
vtotal /= 2;
|
||||
|
||||
return __raw_i915_read32(dev_priv, reg) & status;
|
||||
if (IS_GEN2(dev))
|
||||
position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
|
||||
else
|
||||
position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
|
||||
|
||||
/*
|
||||
* Scanline counter increments at leading edge of hsync, and
|
||||
* it starts counting from vtotal-1 on the first active line.
|
||||
* That means the scanline counter value is always one less
|
||||
* than what we would expect. Ie. just after start of vblank,
|
||||
* which also occurs at start of hsync (on the last active line),
|
||||
* the scanline counter will read vblank_start-1.
|
||||
*/
|
||||
return (position + 1) % vtotal;
|
||||
}
|
||||
|
||||
static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
|
||||
|
@ -780,7 +788,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
|
|||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
|
||||
int position;
|
||||
int vbl_start, vbl_end, htotal, vtotal;
|
||||
int vbl_start, vbl_end, hsync_start, htotal, vtotal;
|
||||
bool in_vbl = true;
|
||||
int ret = 0;
|
||||
unsigned long irqflags;
|
||||
|
@ -792,6 +800,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
|
|||
}
|
||||
|
||||
htotal = mode->crtc_htotal;
|
||||
hsync_start = mode->crtc_hsync_start;
|
||||
vtotal = mode->crtc_vtotal;
|
||||
vbl_start = mode->crtc_vblank_start;
|
||||
vbl_end = mode->crtc_vblank_end;
|
||||
|
@ -810,7 +819,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
|
|||
* following code must not block on uncore.lock.
|
||||
*/
|
||||
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
|
||||
|
||||
|
||||
/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
|
||||
|
||||
/* Get optional system timestamp before query. */
|
||||
|
@ -821,68 +830,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
|
|||
/* No obvious pixelcount register. Only query vertical
|
||||
* scanout position from Display scan line register.
|
||||
*/
|
||||
if (IS_GEN2(dev))
|
||||
position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
|
||||
else
|
||||
position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
|
||||
|
||||
if (HAS_DDI(dev)) {
|
||||
/*
|
||||
* On HSW HDMI outputs there seems to be a 2 line
|
||||
* difference, whereas eDP has the normal 1 line
|
||||
* difference that earlier platforms have. External
|
||||
* DP is unknown. For now just check for the 2 line
|
||||
* difference case on all output types on HSW+.
|
||||
*
|
||||
* This might misinterpret the scanline counter being
|
||||
* one line too far along on eDP, but that's less
|
||||
* dangerous than the alternative since that would lead
|
||||
* the vblank timestamp code astray when it sees a
|
||||
* scanline count before vblank_start during a vblank
|
||||
* interrupt.
|
||||
*/
|
||||
in_vbl = ilk_pipe_in_vblank_locked(dev, pipe);
|
||||
if ((in_vbl && (position == vbl_start - 2 ||
|
||||
position == vbl_start - 1)) ||
|
||||
(!in_vbl && (position == vbl_end - 2 ||
|
||||
position == vbl_end - 1)))
|
||||
position = (position + 2) % vtotal;
|
||||
} else if (HAS_PCH_SPLIT(dev)) {
|
||||
/*
|
||||
* The scanline counter increments at the leading edge
|
||||
* of hsync, ie. it completely misses the active portion
|
||||
* of the line. Fix up the counter at both edges of vblank
|
||||
* to get a more accurate picture whether we're in vblank
|
||||
* or not.
|
||||
*/
|
||||
in_vbl = ilk_pipe_in_vblank_locked(dev, pipe);
|
||||
if ((in_vbl && position == vbl_start - 1) ||
|
||||
(!in_vbl && position == vbl_end - 1))
|
||||
position = (position + 1) % vtotal;
|
||||
} else {
|
||||
/*
|
||||
* ISR vblank status bits don't work the way we'd want
|
||||
* them to work on non-PCH platforms (for
|
||||
* ilk_pipe_in_vblank_locked()), and there doesn't
|
||||
* appear any other way to determine if we're currently
|
||||
* in vblank.
|
||||
*
|
||||
* Instead let's assume that we're already in vblank if
|
||||
* we got called from the vblank interrupt and the
|
||||
* scanline counter value indicates that we're on the
|
||||
* line just prior to vblank start. This should result
|
||||
* in the correct answer, unless the vblank interrupt
|
||||
* delivery really got delayed for almost exactly one
|
||||
* full frame/field.
|
||||
*/
|
||||
if (flags & DRM_CALLED_FROM_VBLIRQ &&
|
||||
position == vbl_start - 1) {
|
||||
position = (position + 1) % vtotal;
|
||||
|
||||
/* Signal this correction as "applied". */
|
||||
ret |= 0x8;
|
||||
}
|
||||
}
|
||||
position = __intel_get_crtc_scanline(intel_crtc);
|
||||
} else {
|
||||
/* Have access to pixelcount since start of frame.
|
||||
* We can split this into vertical and horizontal
|
||||
|
@ -894,6 +842,17 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
|
|||
vbl_start *= htotal;
|
||||
vbl_end *= htotal;
|
||||
vtotal *= htotal;
|
||||
|
||||
/*
|
||||
* Start of vblank interrupt is triggered at start of hsync,
|
||||
* just prior to the first active line of vblank. However we
|
||||
* consider lines to start at the leading edge of horizontal
|
||||
* active. So, should we get here before we've crossed into
|
||||
* the horizontal active of the first line in vblank, we would
|
||||
* not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
|
||||
* always add htotal-hsync_start to the current pixel position.
|
||||
*/
|
||||
position = (position + htotal - hsync_start) % vtotal;
|
||||
}
|
||||
|
||||
/* Get optional system timestamp after query. */
|
||||
|
@ -932,6 +891,19 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
|
|||
return ret;
|
||||
}
|
||||
|
||||
int intel_get_crtc_scanline(struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
|
||||
unsigned long irqflags;
|
||||
int position;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
|
||||
position = __intel_get_crtc_scanline(crtc);
|
||||
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
|
||||
|
||||
return position;
|
||||
}
|
||||
|
||||
static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
|
||||
int *max_error,
|
||||
struct timeval *vblank_time,
|
||||
|
@ -1347,13 +1319,16 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
|
|||
DRM_ERROR("The master control interrupt lied (GT0)!\n");
|
||||
}
|
||||
|
||||
if (master_ctl & GEN8_GT_VCS1_IRQ) {
|
||||
if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
|
||||
tmp = I915_READ(GEN8_GT_IIR(1));
|
||||
if (tmp) {
|
||||
ret = IRQ_HANDLED;
|
||||
vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
|
||||
if (vcs & GT_RENDER_USER_INTERRUPT)
|
||||
notify_ring(dev, &dev_priv->ring[VCS]);
|
||||
vcs = tmp >> GEN8_VCS2_IRQ_SHIFT;
|
||||
if (vcs & GT_RENDER_USER_INTERRUPT)
|
||||
notify_ring(dev, &dev_priv->ring[VCS2]);
|
||||
I915_WRITE(GEN8_GT_IIR(1), tmp);
|
||||
} else
|
||||
DRM_ERROR("The master control interrupt lied (GT1)!\n");
|
||||
|
@ -1581,6 +1556,19 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
|
|||
}
|
||||
}
|
||||
|
||||
static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
|
||||
{
|
||||
struct intel_crtc *crtc;
|
||||
|
||||
if (!drm_handle_vblank(dev, pipe))
|
||||
return false;
|
||||
|
||||
crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
|
||||
wake_up(&crtc->vbl_wait);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
@ -1632,7 +1620,7 @@ static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
|
|||
|
||||
for_each_pipe(pipe) {
|
||||
if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
|
||||
drm_handle_vblank(dev, pipe);
|
||||
intel_pipe_handle_vblank(dev, pipe);
|
||||
|
||||
if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
|
||||
intel_prepare_page_flip(dev, pipe);
|
||||
|
@ -1875,7 +1863,7 @@ static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
|
|||
|
||||
for_each_pipe(pipe) {
|
||||
if (de_iir & DE_PIPE_VBLANK(pipe))
|
||||
drm_handle_vblank(dev, pipe);
|
||||
intel_pipe_handle_vblank(dev, pipe);
|
||||
|
||||
if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
|
||||
if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
|
||||
|
@ -1925,7 +1913,7 @@ static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
|
|||
|
||||
for_each_pipe(pipe) {
|
||||
if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
|
||||
drm_handle_vblank(dev, pipe);
|
||||
intel_pipe_handle_vblank(dev, pipe);
|
||||
|
||||
/* plane/pipes map 1:1 on ilk+ */
|
||||
if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
|
||||
|
@ -2068,7 +2056,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
|
|||
|
||||
pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
|
||||
if (pipe_iir & GEN8_PIPE_VBLANK)
|
||||
drm_handle_vblank(dev, pipe);
|
||||
intel_pipe_handle_vblank(dev, pipe);
|
||||
|
||||
if (pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE) {
|
||||
intel_prepare_page_flip(dev, pipe);
|
||||
|
@ -2184,6 +2172,14 @@ static void i915_error_work_func(struct work_struct *work)
|
|||
kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
|
||||
reset_event);
|
||||
|
||||
/*
|
||||
* In most cases it's guaranteed that we get here with an RPM
|
||||
* reference held, for example because there is a pending GPU
|
||||
* request that won't finish until the reset is done. This
|
||||
* isn't the case at least when we get here by doing a
|
||||
* simulated reset via debugs, so get an RPM reference.
|
||||
*/
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
/*
|
||||
* All state reset _must_ be completed before we update the
|
||||
* reset counter, for otherwise waiters might miss the reset
|
||||
|
@ -2194,6 +2190,8 @@ static void i915_error_work_func(struct work_struct *work)
|
|||
|
||||
intel_display_handle_reset(dev);
|
||||
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
|
||||
if (ret == 0) {
|
||||
/*
|
||||
* After all the gem state is reset, increment the reset
|
||||
|
@ -2597,8 +2595,7 @@ semaphore_wait_to_signaller_ring(struct intel_ring_buffer *ring, u32 ipehr)
|
|||
if(ring == signaller)
|
||||
continue;
|
||||
|
||||
if (sync_bits ==
|
||||
signaller->semaphore_register[ring->id])
|
||||
if (sync_bits == signaller->semaphore.mbox.wait[ring->id])
|
||||
return signaller;
|
||||
}
|
||||
}
|
||||
|
@ -3315,6 +3312,8 @@ static void valleyview_irq_uninstall(struct drm_device *dev)
|
|||
if (!dev_priv)
|
||||
return;
|
||||
|
||||
I915_WRITE(VLV_MASTER_IER, 0);
|
||||
|
||||
intel_hpd_irq_uninstall(dev_priv);
|
||||
|
||||
for_each_pipe(pipe)
|
||||
|
@ -3404,7 +3403,7 @@ static bool i8xx_handle_vblank(struct drm_device *dev,
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
|
||||
|
||||
if (!drm_handle_vblank(dev, pipe))
|
||||
if (!intel_pipe_handle_vblank(dev, pipe))
|
||||
return false;
|
||||
|
||||
if ((iir & flip_pending) == 0)
|
||||
|
@ -3589,7 +3588,7 @@ static bool i915_handle_vblank(struct drm_device *dev,
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
|
||||
|
||||
if (!drm_handle_vblank(dev, pipe))
|
||||
if (!intel_pipe_handle_vblank(dev, pipe))
|
||||
return false;
|
||||
|
||||
if ((iir & flip_pending) == 0)
|
||||
|
|
|
@ -760,6 +760,7 @@ enum punit_power_well {
|
|||
#define RENDER_RING_BASE 0x02000
|
||||
#define BSD_RING_BASE 0x04000
|
||||
#define GEN6_BSD_RING_BASE 0x12000
|
||||
#define GEN8_BSD2_RING_BASE 0x1c000
|
||||
#define VEBOX_RING_BASE 0x1a000
|
||||
#define BLT_RING_BASE 0x22000
|
||||
#define RING_TAIL(base) ((base)+0x30)
|
||||
|
@ -4996,9 +4997,15 @@ enum punit_power_well {
|
|||
#define FORCEWAKE_ACK_HSW 0x130044
|
||||
#define FORCEWAKE_ACK 0x130090
|
||||
#define VLV_GTLC_WAKE_CTRL 0x130090
|
||||
#define VLV_GTLC_RENDER_CTX_EXISTS (1 << 25)
|
||||
#define VLV_GTLC_MEDIA_CTX_EXISTS (1 << 24)
|
||||
#define VLV_GTLC_ALLOWWAKEREQ (1 << 0)
|
||||
|
||||
#define VLV_GTLC_PW_STATUS 0x130094
|
||||
#define VLV_GTLC_PW_RENDER_STATUS_MASK 0x80
|
||||
#define VLV_GTLC_PW_MEDIA_STATUS_MASK 0x20
|
||||
#define VLV_GTLC_ALLOWWAKEACK (1 << 0)
|
||||
#define VLV_GTLC_ALLOWWAKEERR (1 << 1)
|
||||
#define VLV_GTLC_PW_MEDIA_STATUS_MASK (1 << 5)
|
||||
#define VLV_GTLC_PW_RENDER_STATUS_MASK (1 << 7)
|
||||
#define FORCEWAKE_MT 0xa188 /* multi-threaded */
|
||||
#define FORCEWAKE_KERNEL 0x1
|
||||
#define FORCEWAKE_USER 0x2
|
||||
|
@ -5130,6 +5137,9 @@ enum punit_power_well {
|
|||
#define VLV_MEDIA_RC6_COUNT_EN (1<<1)
|
||||
#define VLV_RENDER_RC6_COUNT_EN (1<<0)
|
||||
#define GEN6_GT_GFX_RC6 0x138108
|
||||
#define VLV_GT_RENDER_RC6 0x138108
|
||||
#define VLV_GT_MEDIA_RC6 0x13810C
|
||||
|
||||
#define GEN6_GT_GFX_RC6p 0x13810C
|
||||
#define GEN6_GT_GFX_RC6pp 0x138110
|
||||
|
||||
|
|
|
@ -263,6 +263,8 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
|
|||
|
||||
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
|
||||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
|
||||
mutex_lock(&dev_priv->rps.hw_lock);
|
||||
if (IS_VALLEYVIEW(dev_priv->dev)) {
|
||||
u32 freq;
|
||||
|
@ -273,6 +275,8 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
|
|||
}
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", ret);
|
||||
}
|
||||
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
|
||||
#include <drm/drmP.h>
|
||||
#include "i915_drv.h"
|
||||
#include "intel_drv.h"
|
||||
#include "intel_ringbuffer.h"
|
||||
|
||||
#undef TRACE_SYSTEM
|
||||
|
@ -14,6 +15,80 @@
|
|||
#define TRACE_SYSTEM_STRING __stringify(TRACE_SYSTEM)
|
||||
#define TRACE_INCLUDE_FILE i915_trace
|
||||
|
||||
/* pipe updates */
|
||||
|
||||
TRACE_EVENT(i915_pipe_update_start,
|
||||
TP_PROTO(struct intel_crtc *crtc, u32 min, u32 max),
|
||||
TP_ARGS(crtc, min, max),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(enum pipe, pipe)
|
||||
__field(u32, frame)
|
||||
__field(u32, scanline)
|
||||
__field(u32, min)
|
||||
__field(u32, max)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->pipe = crtc->pipe;
|
||||
__entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
|
||||
crtc->pipe);
|
||||
__entry->scanline = intel_get_crtc_scanline(crtc);
|
||||
__entry->min = min;
|
||||
__entry->max = max;
|
||||
),
|
||||
|
||||
TP_printk("pipe %c, frame=%u, scanline=%u, min=%u, max=%u",
|
||||
pipe_name(__entry->pipe), __entry->frame,
|
||||
__entry->scanline, __entry->min, __entry->max)
|
||||
);
|
||||
|
||||
TRACE_EVENT(i915_pipe_update_vblank_evaded,
|
||||
TP_PROTO(struct intel_crtc *crtc, u32 min, u32 max, u32 frame),
|
||||
TP_ARGS(crtc, min, max, frame),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(enum pipe, pipe)
|
||||
__field(u32, frame)
|
||||
__field(u32, scanline)
|
||||
__field(u32, min)
|
||||
__field(u32, max)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->pipe = crtc->pipe;
|
||||
__entry->frame = frame;
|
||||
__entry->scanline = intel_get_crtc_scanline(crtc);
|
||||
__entry->min = min;
|
||||
__entry->max = max;
|
||||
),
|
||||
|
||||
TP_printk("pipe %c, frame=%u, scanline=%u, min=%u, max=%u",
|
||||
pipe_name(__entry->pipe), __entry->frame,
|
||||
__entry->scanline, __entry->min, __entry->max)
|
||||
);
|
||||
|
||||
TRACE_EVENT(i915_pipe_update_end,
|
||||
TP_PROTO(struct intel_crtc *crtc, u32 frame),
|
||||
TP_ARGS(crtc, frame),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(enum pipe, pipe)
|
||||
__field(u32, frame)
|
||||
__field(u32, scanline)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->pipe = crtc->pipe;
|
||||
__entry->frame = frame;
|
||||
__entry->scanline = intel_get_crtc_scanline(crtc);
|
||||
),
|
||||
|
||||
TP_printk("pipe %c, frame=%u, scanline=%u",
|
||||
pipe_name(__entry->pipe), __entry->frame,
|
||||
__entry->scanline)
|
||||
);
|
||||
|
||||
/* object tracking */
|
||||
|
||||
TRACE_EVENT(i915_gem_object_create,
|
||||
|
|
|
@ -49,13 +49,19 @@ find_section(struct bdb_header *bdb, int section_id)
|
|||
total = bdb->bdb_size;
|
||||
|
||||
/* walk the sections looking for section_id */
|
||||
while (index < total) {
|
||||
while (index + 3 < total) {
|
||||
current_id = *(base + index);
|
||||
index++;
|
||||
|
||||
current_size = *((u16 *)(base + index));
|
||||
index += 2;
|
||||
|
||||
if (index + current_size > total)
|
||||
return NULL;
|
||||
|
||||
if (current_id == section_id)
|
||||
return base + index;
|
||||
|
||||
index += current_size;
|
||||
}
|
||||
|
||||
|
@ -1099,6 +1105,46 @@ static const struct dmi_system_id intel_no_opregion_vbt[] = {
|
|||
{ }
|
||||
};
|
||||
|
||||
static struct bdb_header *validate_vbt(char *base, size_t size,
|
||||
struct vbt_header *vbt,
|
||||
const char *source)
|
||||
{
|
||||
size_t offset;
|
||||
struct bdb_header *bdb;
|
||||
|
||||
if (vbt == NULL) {
|
||||
DRM_DEBUG_DRIVER("VBT signature missing\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
offset = (char *)vbt - base;
|
||||
if (offset + sizeof(struct vbt_header) > size) {
|
||||
DRM_DEBUG_DRIVER("VBT header incomplete\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (memcmp(vbt->signature, "$VBT", 4)) {
|
||||
DRM_DEBUG_DRIVER("VBT invalid signature\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
offset += vbt->bdb_offset;
|
||||
if (offset + sizeof(struct bdb_header) > size) {
|
||||
DRM_DEBUG_DRIVER("BDB header incomplete\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
bdb = (struct bdb_header *)(base + offset);
|
||||
if (offset + bdb->bdb_size > size) {
|
||||
DRM_DEBUG_DRIVER("BDB incomplete\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
DRM_DEBUG_KMS("Using VBT from %s: %20s\n",
|
||||
source, vbt->signature);
|
||||
return bdb;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_parse_bios - find VBT and initialize settings from the BIOS
|
||||
* @dev: DRM device
|
||||
|
@ -1122,20 +1168,13 @@ intel_parse_bios(struct drm_device *dev)
|
|||
init_vbt_defaults(dev_priv);
|
||||
|
||||
/* XXX Should this validation be moved to intel_opregion.c? */
|
||||
if (!dmi_check_system(intel_no_opregion_vbt) && dev_priv->opregion.vbt) {
|
||||
struct vbt_header *vbt = dev_priv->opregion.vbt;
|
||||
if (memcmp(vbt->signature, "$VBT", 4) == 0) {
|
||||
DRM_DEBUG_KMS("Using VBT from OpRegion: %20s\n",
|
||||
vbt->signature);
|
||||
bdb = (struct bdb_header *)((char *)vbt + vbt->bdb_offset);
|
||||
} else
|
||||
dev_priv->opregion.vbt = NULL;
|
||||
}
|
||||
if (!dmi_check_system(intel_no_opregion_vbt) && dev_priv->opregion.vbt)
|
||||
bdb = validate_vbt((char *)dev_priv->opregion.header, OPREGION_SIZE,
|
||||
(struct vbt_header *)dev_priv->opregion.vbt,
|
||||
"OpRegion");
|
||||
|
||||
if (bdb == NULL) {
|
||||
struct vbt_header *vbt = NULL;
|
||||
size_t size;
|
||||
int i;
|
||||
size_t i, size;
|
||||
|
||||
bios = pci_map_rom(pdev, &size);
|
||||
if (!bios)
|
||||
|
@ -1143,19 +1182,18 @@ intel_parse_bios(struct drm_device *dev)
|
|||
|
||||
/* Scour memory looking for the VBT signature */
|
||||
for (i = 0; i + 4 < size; i++) {
|
||||
if (!memcmp(bios + i, "$VBT", 4)) {
|
||||
vbt = (struct vbt_header *)(bios + i);
|
||||
if (memcmp(bios + i, "$VBT", 4) == 0) {
|
||||
bdb = validate_vbt(bios, size,
|
||||
(struct vbt_header *)(bios + i),
|
||||
"PCI ROM");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!vbt) {
|
||||
DRM_DEBUG_DRIVER("VBT signature missing\n");
|
||||
if (!bdb) {
|
||||
pci_unmap_rom(pdev, bios);
|
||||
return -1;
|
||||
}
|
||||
|
||||
bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset);
|
||||
}
|
||||
|
||||
/* Grab useful general definitions */
|
||||
|
|
|
@ -144,28 +144,49 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
|
|||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crt *crt = intel_encoder_to_crt(encoder);
|
||||
u32 temp;
|
||||
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
|
||||
struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
|
||||
u32 adpa;
|
||||
|
||||
temp = I915_READ(crt->adpa_reg);
|
||||
temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
|
||||
temp &= ~ADPA_DAC_ENABLE;
|
||||
if (INTEL_INFO(dev)->gen >= 5)
|
||||
adpa = ADPA_HOTPLUG_BITS;
|
||||
else
|
||||
adpa = 0;
|
||||
|
||||
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
|
||||
adpa |= ADPA_HSYNC_ACTIVE_HIGH;
|
||||
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
|
||||
adpa |= ADPA_VSYNC_ACTIVE_HIGH;
|
||||
|
||||
/* For CPT allow 3 pipe config, for others just use A or B */
|
||||
if (HAS_PCH_LPT(dev))
|
||||
; /* Those bits don't exist here */
|
||||
else if (HAS_PCH_CPT(dev))
|
||||
adpa |= PORT_TRANS_SEL_CPT(crtc->pipe);
|
||||
else if (crtc->pipe == 0)
|
||||
adpa |= ADPA_PIPE_A_SELECT;
|
||||
else
|
||||
adpa |= ADPA_PIPE_B_SELECT;
|
||||
|
||||
if (!HAS_PCH_SPLIT(dev))
|
||||
I915_WRITE(BCLRPAT(crtc->pipe), 0);
|
||||
|
||||
switch (mode) {
|
||||
case DRM_MODE_DPMS_ON:
|
||||
temp |= ADPA_DAC_ENABLE;
|
||||
adpa |= ADPA_DAC_ENABLE;
|
||||
break;
|
||||
case DRM_MODE_DPMS_STANDBY:
|
||||
temp |= ADPA_DAC_ENABLE | ADPA_HSYNC_CNTL_DISABLE;
|
||||
adpa |= ADPA_DAC_ENABLE | ADPA_HSYNC_CNTL_DISABLE;
|
||||
break;
|
||||
case DRM_MODE_DPMS_SUSPEND:
|
||||
temp |= ADPA_DAC_ENABLE | ADPA_VSYNC_CNTL_DISABLE;
|
||||
adpa |= ADPA_DAC_ENABLE | ADPA_VSYNC_CNTL_DISABLE;
|
||||
break;
|
||||
case DRM_MODE_DPMS_OFF:
|
||||
temp |= ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE;
|
||||
adpa |= ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE;
|
||||
break;
|
||||
}
|
||||
|
||||
I915_WRITE(crt->adpa_reg, temp);
|
||||
I915_WRITE(crt->adpa_reg, adpa);
|
||||
}
|
||||
|
||||
static void intel_disable_crt(struct intel_encoder *encoder)
|
||||
|
@ -274,42 +295,6 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
|
|||
return true;
|
||||
}
|
||||
|
||||
static void intel_crt_mode_set(struct intel_encoder *encoder)
|
||||
{
|
||||
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct intel_crt *crt = intel_encoder_to_crt(encoder);
|
||||
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
|
||||
u32 adpa;
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 5)
|
||||
adpa = ADPA_HOTPLUG_BITS;
|
||||
else
|
||||
adpa = 0;
|
||||
|
||||
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
|
||||
adpa |= ADPA_HSYNC_ACTIVE_HIGH;
|
||||
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
|
||||
adpa |= ADPA_VSYNC_ACTIVE_HIGH;
|
||||
|
||||
/* For CPT allow 3 pipe config, for others just use A or B */
|
||||
if (HAS_PCH_LPT(dev))
|
||||
; /* Those bits don't exist here */
|
||||
else if (HAS_PCH_CPT(dev))
|
||||
adpa |= PORT_TRANS_SEL_CPT(crtc->pipe);
|
||||
else if (crtc->pipe == 0)
|
||||
adpa |= ADPA_PIPE_A_SELECT;
|
||||
else
|
||||
adpa |= ADPA_PIPE_B_SELECT;
|
||||
|
||||
if (!HAS_PCH_SPLIT(dev))
|
||||
I915_WRITE(BCLRPAT(crtc->pipe), 0);
|
||||
|
||||
I915_WRITE(crt->adpa_reg, adpa);
|
||||
}
|
||||
|
||||
static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
|
||||
{
|
||||
struct drm_device *dev = connector->dev;
|
||||
|
@ -867,7 +852,6 @@ void intel_crt_init(struct drm_device *dev)
|
|||
crt->adpa_reg = ADPA;
|
||||
|
||||
crt->base.compute_config = intel_crt_compute_config;
|
||||
crt->base.mode_set = intel_crt_mode_set;
|
||||
crt->base.disable = intel_disable_crt;
|
||||
crt->base.enable = intel_enable_crt;
|
||||
if (I915_HAS_HOTPLUG(dev))
|
||||
|
|
|
@ -1880,14 +1880,14 @@ static void intel_enable_primary_hw_plane(struct drm_i915_private *dev_priv,
|
|||
/* If the pipe isn't enabled, we can't pump pixels and may hang */
|
||||
assert_pipe_enabled(dev_priv, pipe);
|
||||
|
||||
WARN(intel_crtc->primary_enabled, "Primary plane already enabled\n");
|
||||
if (intel_crtc->primary_enabled)
|
||||
return;
|
||||
|
||||
intel_crtc->primary_enabled = true;
|
||||
|
||||
reg = DSPCNTR(plane);
|
||||
val = I915_READ(reg);
|
||||
if (val & DISPLAY_PLANE_ENABLE)
|
||||
return;
|
||||
WARN_ON(val & DISPLAY_PLANE_ENABLE);
|
||||
|
||||
I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
|
||||
intel_flush_primary_plane(dev_priv, plane);
|
||||
|
@ -1910,14 +1910,14 @@ static void intel_disable_primary_hw_plane(struct drm_i915_private *dev_priv,
|
|||
int reg;
|
||||
u32 val;
|
||||
|
||||
WARN(!intel_crtc->primary_enabled, "Primary plane already disabled\n");
|
||||
if (!intel_crtc->primary_enabled)
|
||||
return;
|
||||
|
||||
intel_crtc->primary_enabled = false;
|
||||
|
||||
reg = DSPCNTR(plane);
|
||||
val = I915_READ(reg);
|
||||
if ((val & DISPLAY_PLANE_ENABLE) == 0)
|
||||
return;
|
||||
WARN_ON((val & DISPLAY_PLANE_ENABLE) == 0);
|
||||
|
||||
I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
|
||||
intel_flush_primary_plane(dev_priv, plane);
|
||||
|
@ -2599,12 +2599,10 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
int pipe = intel_crtc->pipe;
|
||||
int plane = intel_crtc->plane;
|
||||
u32 reg, temp, tries;
|
||||
|
||||
/* FDI needs bits from pipe & plane first */
|
||||
/* FDI needs bits from pipe first */
|
||||
assert_pipe_enabled(dev_priv, pipe);
|
||||
assert_plane_enabled(dev_priv, plane);
|
||||
|
||||
/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
|
||||
for train result */
|
||||
|
@ -7036,7 +7034,6 @@ void hsw_enable_pc8(struct drm_i915_private *dev_priv)
|
|||
}
|
||||
|
||||
lpt_disable_clkout_dp(dev);
|
||||
intel_runtime_pm_disable_interrupts(dev);
|
||||
hsw_disable_lcpll(dev_priv, true, true);
|
||||
}
|
||||
|
||||
|
@ -7048,7 +7045,6 @@ void hsw_disable_pc8(struct drm_i915_private *dev_priv)
|
|||
DRM_DEBUG_KMS("Disabling package C8+\n");
|
||||
|
||||
hsw_restore_lcpll(dev_priv);
|
||||
intel_runtime_pm_restore_interrupts(dev);
|
||||
lpt_init_pch_refclk(dev);
|
||||
|
||||
if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
|
||||
|
@ -7058,10 +7054,6 @@ void hsw_disable_pc8(struct drm_i915_private *dev_priv)
|
|||
}
|
||||
|
||||
intel_prepare_ddi(dev);
|
||||
i915_gem_init_swizzling(dev);
|
||||
mutex_lock(&dev_priv->rps.hw_lock);
|
||||
gen6_update_ring_freq(dev);
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
}
|
||||
|
||||
static void snb_modeset_global_resources(struct drm_device *dev)
|
||||
|
@ -7216,7 +7208,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
|
|||
encoder->base.base.id,
|
||||
drm_get_encoder_name(&encoder->base),
|
||||
mode->base.id, mode->name);
|
||||
encoder->mode_set(encoder);
|
||||
|
||||
if (encoder->mode_set)
|
||||
encoder->mode_set(encoder);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -10570,6 +10564,8 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
|
|||
intel_crtc->plane = !pipe;
|
||||
}
|
||||
|
||||
init_waitqueue_head(&intel_crtc->vbl_wait);
|
||||
|
||||
BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
|
||||
dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
|
||||
dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
|
||||
|
@ -11258,9 +11254,7 @@ void intel_modeset_init_hw(struct drm_device *dev)
|
|||
|
||||
intel_reset_dpio(dev);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
intel_enable_gt_powersave(dev);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
|
||||
void intel_modeset_suspend_hw(struct drm_device *dev)
|
||||
|
@ -11589,6 +11583,16 @@ void i915_redisable_vga(struct drm_device *dev)
|
|||
i915_redisable_vga_power_on(dev);
|
||||
}
|
||||
|
||||
static bool primary_get_hw_state(struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
|
||||
|
||||
if (!crtc->active)
|
||||
return false;
|
||||
|
||||
return I915_READ(DSPCNTR(crtc->plane)) & DISPLAY_PLANE_ENABLE;
|
||||
}
|
||||
|
||||
static void intel_modeset_readout_hw_state(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
@ -11608,7 +11612,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
|
|||
&crtc->config);
|
||||
|
||||
crtc->base.enabled = crtc->active;
|
||||
crtc->primary_enabled = crtc->active;
|
||||
crtc->primary_enabled = primary_get_hw_state(crtc);
|
||||
|
||||
DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
|
||||
crtc->base.base.id,
|
||||
|
@ -11911,6 +11915,7 @@ struct intel_display_error_state {
|
|||
struct intel_pipe_error_state {
|
||||
bool power_domain_on;
|
||||
u32 source;
|
||||
u32 stat;
|
||||
} pipe[I915_MAX_PIPES];
|
||||
|
||||
struct intel_plane_error_state {
|
||||
|
@ -11992,6 +11997,9 @@ intel_display_capture_error_state(struct drm_device *dev)
|
|||
}
|
||||
|
||||
error->pipe[i].source = I915_READ(PIPESRC(i));
|
||||
|
||||
if (!HAS_PCH_SPLIT(dev))
|
||||
error->pipe[i].stat = I915_READ(PIPESTAT(i));
|
||||
}
|
||||
|
||||
error->num_transcoders = INTEL_INFO(dev)->num_pipes;
|
||||
|
@ -12042,6 +12050,7 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
|
|||
err_printf(m, " Power: %s\n",
|
||||
error->pipe[i].power_domain_on ? "on" : "off");
|
||||
err_printf(m, " SRC: %08x\n", error->pipe[i].source);
|
||||
err_printf(m, " STAT: %08x\n", error->pipe[i].stat);
|
||||
|
||||
err_printf(m, "Plane [%d]:\n", i);
|
||||
err_printf(m, " CNTR: %08x\n", error->plane[i].control);
|
||||
|
|
|
@ -313,8 +313,12 @@ static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
|
|||
{
|
||||
struct drm_device *dev = intel_dp_to_dev(intel_dp);
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
|
||||
struct intel_encoder *intel_encoder = &intel_dig_port->base;
|
||||
enum intel_display_power_domain power_domain;
|
||||
|
||||
return !dev_priv->pm.suspended &&
|
||||
power_domain = intel_display_port_power_domain(intel_encoder);
|
||||
return intel_display_power_enabled(dev_priv, power_domain) &&
|
||||
(I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD) != 0;
|
||||
}
|
||||
|
||||
|
@ -2779,9 +2783,6 @@ intel_dp_link_down(struct intel_dp *intel_dp)
|
|||
}
|
||||
POSTING_READ(intel_dp->output_reg);
|
||||
|
||||
/* We don't really know why we're doing this */
|
||||
intel_wait_for_vblank(dev, intel_crtc->pipe);
|
||||
|
||||
if (HAS_PCH_IBX(dev) &&
|
||||
I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
|
||||
struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
|
||||
|
|
|
@ -401,6 +401,8 @@ struct intel_crtc {
|
|||
/* watermarks currently being used */
|
||||
struct intel_pipe_wm active;
|
||||
} wm;
|
||||
|
||||
wait_queue_head_t vbl_wait;
|
||||
};
|
||||
|
||||
struct intel_plane_wm_parameters {
|
||||
|
@ -653,6 +655,7 @@ void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
|
|||
void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
|
||||
void intel_runtime_pm_disable_interrupts(struct drm_device *dev);
|
||||
void intel_runtime_pm_restore_interrupts(struct drm_device *dev);
|
||||
int intel_get_crtc_scanline(struct intel_crtc *crtc);
|
||||
|
||||
|
||||
/* intel_crt.c */
|
||||
|
@ -925,6 +928,7 @@ void intel_init_gt_powersave(struct drm_device *dev);
|
|||
void intel_cleanup_gt_powersave(struct drm_device *dev);
|
||||
void intel_enable_gt_powersave(struct drm_device *dev);
|
||||
void intel_disable_gt_powersave(struct drm_device *dev);
|
||||
void intel_reset_gt_powersave(struct drm_device *dev);
|
||||
void ironlake_teardown_rc6(struct drm_device *dev);
|
||||
void gen6_update_ring_freq(struct drm_device *dev);
|
||||
void gen6_rps_idle(struct drm_i915_private *dev_priv);
|
||||
|
@ -932,6 +936,7 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv);
|
|||
void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
|
||||
void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
|
||||
void intel_runtime_pm_get(struct drm_i915_private *dev_priv);
|
||||
void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv);
|
||||
void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
|
||||
void intel_init_runtime_pm(struct drm_i915_private *dev_priv);
|
||||
void intel_fini_runtime_pm(struct drm_i915_private *dev_priv);
|
||||
|
|
|
@ -285,7 +285,7 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder,
|
|||
return true;
|
||||
}
|
||||
|
||||
static void intel_dvo_mode_set(struct intel_encoder *encoder)
|
||||
static void intel_dvo_pre_enable(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
@ -475,7 +475,7 @@ void intel_dvo_init(struct drm_device *dev)
|
|||
intel_encoder->get_hw_state = intel_dvo_get_hw_state;
|
||||
intel_encoder->get_config = intel_dvo_get_config;
|
||||
intel_encoder->compute_config = intel_dvo_compute_config;
|
||||
intel_encoder->mode_set = intel_dvo_mode_set;
|
||||
intel_encoder->pre_enable = intel_dvo_pre_enable;
|
||||
intel_connector->get_hw_state = intel_dvo_connector_get_hw_state;
|
||||
intel_connector->unregister = intel_connector_unregister;
|
||||
|
||||
|
|
|
@ -1931,6 +1931,16 @@ static void ilk_compute_wm_maximums(const struct drm_device *dev,
|
|||
max->fbc = ilk_fbc_wm_reg_max(dev);
|
||||
}
|
||||
|
||||
static void ilk_compute_wm_reg_maximums(struct drm_device *dev,
|
||||
int level,
|
||||
struct ilk_wm_maximums *max)
|
||||
{
|
||||
max->pri = ilk_plane_wm_reg_max(dev, level, false);
|
||||
max->spr = ilk_plane_wm_reg_max(dev, level, true);
|
||||
max->cur = ilk_cursor_wm_reg_max(dev, level);
|
||||
max->fbc = ilk_fbc_wm_reg_max(dev);
|
||||
}
|
||||
|
||||
static bool ilk_validate_wm_level(int level,
|
||||
const struct ilk_wm_maximums *max,
|
||||
struct intel_wm_level *result)
|
||||
|
@ -2188,9 +2198,6 @@ static bool intel_compute_pipe_wm(struct drm_crtc *crtc,
|
|||
};
|
||||
struct ilk_wm_maximums max;
|
||||
|
||||
/* LP0 watermarks always use 1/2 DDB partitioning */
|
||||
ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
|
||||
|
||||
pipe_wm->pipe_enabled = params->active;
|
||||
pipe_wm->sprites_enabled = params->spr.enabled;
|
||||
pipe_wm->sprites_scaled = params->spr.scaled;
|
||||
|
@ -2203,15 +2210,37 @@ static bool intel_compute_pipe_wm(struct drm_crtc *crtc,
|
|||
if (params->spr.scaled)
|
||||
max_level = 0;
|
||||
|
||||
for (level = 0; level <= max_level; level++)
|
||||
ilk_compute_wm_level(dev_priv, level, params,
|
||||
&pipe_wm->wm[level]);
|
||||
ilk_compute_wm_level(dev_priv, 0, params, &pipe_wm->wm[0]);
|
||||
|
||||
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
|
||||
pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc);
|
||||
|
||||
/* LP0 watermarks always use 1/2 DDB partitioning */
|
||||
ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
|
||||
|
||||
/* At least LP0 must be valid */
|
||||
return ilk_validate_wm_level(0, &max, &pipe_wm->wm[0]);
|
||||
if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0]))
|
||||
return false;
|
||||
|
||||
ilk_compute_wm_reg_maximums(dev, 1, &max);
|
||||
|
||||
for (level = 1; level <= max_level; level++) {
|
||||
struct intel_wm_level wm = {};
|
||||
|
||||
ilk_compute_wm_level(dev_priv, level, params, &wm);
|
||||
|
||||
/*
|
||||
* Disable any watermark level that exceeds the
|
||||
* register maximums since such watermarks are
|
||||
* always invalid.
|
||||
*/
|
||||
if (!ilk_validate_wm_level(level, &max, &wm))
|
||||
break;
|
||||
|
||||
pipe_wm->wm[level] = wm;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2223,6 +2252,8 @@ static void ilk_merge_wm_level(struct drm_device *dev,
|
|||
{
|
||||
const struct intel_crtc *intel_crtc;
|
||||
|
||||
ret_wm->enable = true;
|
||||
|
||||
list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) {
|
||||
const struct intel_pipe_wm *active = &intel_crtc->wm.active;
|
||||
const struct intel_wm_level *wm = &active->wm[level];
|
||||
|
@ -2230,16 +2261,19 @@ static void ilk_merge_wm_level(struct drm_device *dev,
|
|||
if (!active->pipe_enabled)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* The watermark values may have been used in the past,
|
||||
* so we must maintain them in the registers for some
|
||||
* time even if the level is now disabled.
|
||||
*/
|
||||
if (!wm->enable)
|
||||
return;
|
||||
ret_wm->enable = false;
|
||||
|
||||
ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
|
||||
ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
|
||||
ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
|
||||
ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
|
||||
}
|
||||
|
||||
ret_wm->enable = true;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2251,6 +2285,7 @@ static void ilk_wm_merge(struct drm_device *dev,
|
|||
struct intel_pipe_wm *merged)
|
||||
{
|
||||
int level, max_level = ilk_wm_max_level(dev);
|
||||
int last_enabled_level = max_level;
|
||||
|
||||
/* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
|
||||
if ((INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) &&
|
||||
|
@ -2266,15 +2301,19 @@ static void ilk_wm_merge(struct drm_device *dev,
|
|||
|
||||
ilk_merge_wm_level(dev, level, wm);
|
||||
|
||||
if (!ilk_validate_wm_level(level, max, wm))
|
||||
break;
|
||||
if (level > last_enabled_level)
|
||||
wm->enable = false;
|
||||
else if (!ilk_validate_wm_level(level, max, wm))
|
||||
/* make sure all following levels get disabled */
|
||||
last_enabled_level = level - 1;
|
||||
|
||||
/*
|
||||
* The spec says it is preferred to disable
|
||||
* FBC WMs instead of disabling a WM level.
|
||||
*/
|
||||
if (wm->fbc_val > max->fbc) {
|
||||
merged->fbc_wm_enabled = false;
|
||||
if (wm->enable)
|
||||
merged->fbc_wm_enabled = false;
|
||||
wm->fbc_val = 0;
|
||||
}
|
||||
}
|
||||
|
@ -2329,14 +2368,19 @@ static void ilk_compute_wm_results(struct drm_device *dev,
|
|||
level = ilk_wm_lp_to_level(wm_lp, merged);
|
||||
|
||||
r = &merged->wm[level];
|
||||
if (!r->enable)
|
||||
break;
|
||||
|
||||
results->wm_lp[wm_lp - 1] = WM3_LP_EN |
|
||||
/*
|
||||
* Maintain the watermark values even if the level is
|
||||
* disabled. Doing otherwise could cause underruns.
|
||||
*/
|
||||
results->wm_lp[wm_lp - 1] =
|
||||
(ilk_wm_lp_latency(dev, level) << WM1_LP_LATENCY_SHIFT) |
|
||||
(r->pri_val << WM1_LP_SR_SHIFT) |
|
||||
r->cur_val;
|
||||
|
||||
if (r->enable)
|
||||
results->wm_lp[wm_lp - 1] |= WM1_LP_SR_EN;
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 8)
|
||||
results->wm_lp[wm_lp - 1] |=
|
||||
r->fbc_val << WM1_LP_FBC_SHIFT_BDW;
|
||||
|
@ -2344,6 +2388,10 @@ static void ilk_compute_wm_results(struct drm_device *dev,
|
|||
results->wm_lp[wm_lp - 1] |=
|
||||
r->fbc_val << WM1_LP_FBC_SHIFT;
|
||||
|
||||
/*
|
||||
* Always set WM1S_LP_EN when spr_val != 0, even if the
|
||||
* level is disabled. Doing otherwise could cause underruns.
|
||||
*/
|
||||
if (INTEL_INFO(dev)->gen <= 6 && r->spr_val) {
|
||||
WARN_ON(wm_lp != 1);
|
||||
results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
|
||||
|
@ -3129,16 +3177,7 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
|
|||
/* Mask turbo interrupt so that they will not come in between */
|
||||
I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
|
||||
|
||||
/* Bring up the Gfx clock */
|
||||
I915_WRITE(VLV_GTLC_SURVIVABILITY_REG,
|
||||
I915_READ(VLV_GTLC_SURVIVABILITY_REG) |
|
||||
VLV_GFX_CLK_FORCE_ON_BIT);
|
||||
|
||||
if (wait_for(((VLV_GFX_CLK_STATUS_BIT &
|
||||
I915_READ(VLV_GTLC_SURVIVABILITY_REG)) != 0), 5)) {
|
||||
DRM_ERROR("GFX_CLK_ON request timed out\n");
|
||||
return;
|
||||
}
|
||||
vlv_force_gfx_clock(dev_priv, true);
|
||||
|
||||
dev_priv->rps.cur_freq = dev_priv->rps.min_freq_softlimit;
|
||||
|
||||
|
@ -3149,10 +3188,7 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
|
|||
& GENFREQSTATUS) == 0, 5))
|
||||
DRM_ERROR("timed out waiting for Punit\n");
|
||||
|
||||
/* Release the Gfx clock */
|
||||
I915_WRITE(VLV_GTLC_SURVIVABILITY_REG,
|
||||
I915_READ(VLV_GTLC_SURVIVABILITY_REG) &
|
||||
~VLV_GFX_CLK_FORCE_ON_BIT);
|
||||
vlv_force_gfx_clock(dev_priv, false);
|
||||
|
||||
I915_WRITE(GEN6_PMINTRMSK,
|
||||
gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
|
||||
|
@ -3250,21 +3286,48 @@ static void valleyview_disable_rps(struct drm_device *dev)
|
|||
|
||||
static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
|
||||
{
|
||||
if (IS_VALLEYVIEW(dev)) {
|
||||
if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1)))
|
||||
mode = GEN6_RC_CTL_RC6_ENABLE;
|
||||
else
|
||||
mode = 0;
|
||||
}
|
||||
DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
|
||||
(mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
|
||||
(mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
|
||||
(mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
|
||||
}
|
||||
|
||||
int intel_enable_rc6(const struct drm_device *dev)
|
||||
static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
|
||||
{
|
||||
/* No RC6 before Ironlake */
|
||||
if (INTEL_INFO(dev)->gen < 5)
|
||||
return 0;
|
||||
|
||||
/* RC6 is only on Ironlake mobile not on desktop */
|
||||
if (INTEL_INFO(dev)->gen == 5 && !IS_IRONLAKE_M(dev))
|
||||
return 0;
|
||||
|
||||
/* Disable RC6 on Broadwell for now */
|
||||
if (IS_BROADWELL(dev))
|
||||
return 0;
|
||||
|
||||
/* Respect the kernel parameter if it is set */
|
||||
if (i915.enable_rc6 >= 0)
|
||||
return i915.enable_rc6;
|
||||
if (enable_rc6 >= 0) {
|
||||
int mask;
|
||||
|
||||
if (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev))
|
||||
mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE |
|
||||
INTEL_RC6pp_ENABLE;
|
||||
else
|
||||
mask = INTEL_RC6_ENABLE;
|
||||
|
||||
if ((enable_rc6 & mask) != enable_rc6)
|
||||
DRM_INFO("Adjusting RC6 mask to %d (requested %d, valid %d)\n",
|
||||
enable_rc6, enable_rc6 & mask, mask);
|
||||
|
||||
return enable_rc6 & mask;
|
||||
}
|
||||
|
||||
/* Disable RC6 on Ironlake */
|
||||
if (INTEL_INFO(dev)->gen == 5)
|
||||
|
@ -3276,6 +3339,11 @@ int intel_enable_rc6(const struct drm_device *dev)
|
|||
return INTEL_RC6_ENABLE;
|
||||
}
|
||||
|
||||
int intel_enable_rc6(const struct drm_device *dev)
|
||||
{
|
||||
return i915.enable_rc6;
|
||||
}
|
||||
|
||||
static void gen6_enable_rps_interrupts(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
@ -3497,7 +3565,7 @@ static void gen6_enable_rps(struct drm_device *dev)
|
|||
gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
|
||||
}
|
||||
|
||||
void gen6_update_ring_freq(struct drm_device *dev)
|
||||
static void __gen6_update_ring_freq(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int min_freq = 15;
|
||||
|
@ -3567,6 +3635,18 @@ void gen6_update_ring_freq(struct drm_device *dev)
|
|||
}
|
||||
}
|
||||
|
||||
void gen6_update_ring_freq(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (INTEL_INFO(dev)->gen < 6 || IS_VALLEYVIEW(dev))
|
||||
return;
|
||||
|
||||
mutex_lock(&dev_priv->rps.hw_lock);
|
||||
__gen6_update_ring_freq(dev);
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
}
|
||||
|
||||
int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
u32 val, rp0;
|
||||
|
@ -3661,6 +3741,45 @@ static void valleyview_cleanup_pctx(struct drm_device *dev)
|
|||
dev_priv->vlv_pctx = NULL;
|
||||
}
|
||||
|
||||
static void valleyview_init_gt_powersave(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
valleyview_setup_pctx(dev);
|
||||
|
||||
mutex_lock(&dev_priv->rps.hw_lock);
|
||||
|
||||
dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv);
|
||||
dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
|
||||
DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
|
||||
vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq),
|
||||
dev_priv->rps.max_freq);
|
||||
|
||||
dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv);
|
||||
DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
|
||||
vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
|
||||
dev_priv->rps.efficient_freq);
|
||||
|
||||
dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv);
|
||||
DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
|
||||
vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
|
||||
dev_priv->rps.min_freq);
|
||||
|
||||
/* Preserve min/max settings in case of re-init */
|
||||
if (dev_priv->rps.max_freq_softlimit == 0)
|
||||
dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
|
||||
|
||||
if (dev_priv->rps.min_freq_softlimit == 0)
|
||||
dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
|
||||
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
}
|
||||
|
||||
static void valleyview_cleanup_gt_powersave(struct drm_device *dev)
|
||||
{
|
||||
valleyview_cleanup_pctx(dev);
|
||||
}
|
||||
|
||||
static void valleyview_enable_rps(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
@ -3727,29 +3846,6 @@ static void valleyview_enable_rps(struct drm_device *dev)
|
|||
vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
|
||||
dev_priv->rps.cur_freq);
|
||||
|
||||
dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv);
|
||||
dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
|
||||
DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
|
||||
vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq),
|
||||
dev_priv->rps.max_freq);
|
||||
|
||||
dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv);
|
||||
DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
|
||||
vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
|
||||
dev_priv->rps.efficient_freq);
|
||||
|
||||
dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv);
|
||||
DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
|
||||
vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
|
||||
dev_priv->rps.min_freq);
|
||||
|
||||
/* Preserve min/max settings in case of re-init */
|
||||
if (dev_priv->rps.max_freq_softlimit == 0)
|
||||
dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
|
||||
|
||||
if (dev_priv->rps.min_freq_softlimit == 0)
|
||||
dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
|
||||
|
||||
DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
|
||||
vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
|
||||
dev_priv->rps.efficient_freq);
|
||||
|
@ -3876,7 +3972,7 @@ static void ironlake_enable_rc6(struct drm_device *dev)
|
|||
I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN);
|
||||
I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
|
||||
|
||||
intel_print_rc6_info(dev, INTEL_RC6_ENABLE);
|
||||
intel_print_rc6_info(dev, GEN6_RC_CTL_RC6_ENABLE);
|
||||
}
|
||||
|
||||
static unsigned long intel_pxfreq(u32 vidfreq)
|
||||
|
@ -4490,14 +4586,16 @@ static void intel_init_emon(struct drm_device *dev)
|
|||
|
||||
void intel_init_gt_powersave(struct drm_device *dev)
|
||||
{
|
||||
i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6);
|
||||
|
||||
if (IS_VALLEYVIEW(dev))
|
||||
valleyview_setup_pctx(dev);
|
||||
valleyview_init_gt_powersave(dev);
|
||||
}
|
||||
|
||||
void intel_cleanup_gt_powersave(struct drm_device *dev)
|
||||
{
|
||||
if (IS_VALLEYVIEW(dev))
|
||||
valleyview_cleanup_pctx(dev);
|
||||
valleyview_cleanup_gt_powersave(dev);
|
||||
}
|
||||
|
||||
void intel_disable_gt_powersave(struct drm_device *dev)
|
||||
|
@ -4510,7 +4608,7 @@ void intel_disable_gt_powersave(struct drm_device *dev)
|
|||
if (IS_IRONLAKE_M(dev)) {
|
||||
ironlake_disable_drps(dev);
|
||||
ironlake_disable_rc6(dev);
|
||||
} else if (INTEL_INFO(dev)->gen >= 6) {
|
||||
} else if (IS_GEN6(dev) || IS_GEN7(dev)) {
|
||||
cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
|
||||
cancel_work_sync(&dev_priv->rps.work);
|
||||
mutex_lock(&dev_priv->rps.hw_lock);
|
||||
|
@ -4536,13 +4634,15 @@ static void intel_gen6_powersave_work(struct work_struct *work)
|
|||
valleyview_enable_rps(dev);
|
||||
} else if (IS_BROADWELL(dev)) {
|
||||
gen8_enable_rps(dev);
|
||||
gen6_update_ring_freq(dev);
|
||||
__gen6_update_ring_freq(dev);
|
||||
} else {
|
||||
gen6_enable_rps(dev);
|
||||
gen6_update_ring_freq(dev);
|
||||
__gen6_update_ring_freq(dev);
|
||||
}
|
||||
dev_priv->rps.enabled = true;
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
}
|
||||
|
||||
void intel_enable_gt_powersave(struct drm_device *dev)
|
||||
|
@ -4550,20 +4650,38 @@ void intel_enable_gt_powersave(struct drm_device *dev)
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (IS_IRONLAKE_M(dev)) {
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
ironlake_enable_drps(dev);
|
||||
ironlake_enable_rc6(dev);
|
||||
intel_init_emon(dev);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
} else if (IS_GEN6(dev) || IS_GEN7(dev)) {
|
||||
/*
|
||||
* PCU communication is slow and this doesn't need to be
|
||||
* done at any specific time, so do this out of our fast path
|
||||
* to make resume and init faster.
|
||||
*
|
||||
* We depend on the HW RC6 power context save/restore
|
||||
* mechanism when entering D3 through runtime PM suspend. So
|
||||
* disable RPM until RPS/RC6 is properly setup. We can only
|
||||
* get here via the driver load/system resume/runtime resume
|
||||
* paths, so the _noresume version is enough (and in case of
|
||||
* runtime resume it's necessary).
|
||||
*/
|
||||
schedule_delayed_work(&dev_priv->rps.delayed_resume_work,
|
||||
round_jiffies_up_relative(HZ));
|
||||
if (schedule_delayed_work(&dev_priv->rps.delayed_resume_work,
|
||||
round_jiffies_up_relative(HZ)))
|
||||
intel_runtime_pm_get_noresume(dev_priv);
|
||||
}
|
||||
}
|
||||
|
||||
void intel_reset_gt_powersave(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
dev_priv->rps.enabled = false;
|
||||
intel_enable_gt_powersave(dev);
|
||||
}
|
||||
|
||||
static void ibx_init_clock_gating(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
@ -4918,6 +5036,10 @@ static void gen8_init_clock_gating(struct drm_device *dev)
|
|||
I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
|
||||
_MASKED_BIT_ENABLE(GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE));
|
||||
|
||||
/* WaDisableDopClockGating:bdw May not be needed for production */
|
||||
I915_WRITE(GEN7_ROW_CHICKEN2,
|
||||
_MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
|
||||
|
||||
/* WaSwitchSolVfFArbitrationPriority:bdw */
|
||||
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
|
||||
|
||||
|
@ -5622,11 +5744,13 @@ static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
|
|||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
|
||||
/*
|
||||
* During driver initialization we need to defer enabling hotplug
|
||||
* processing until fbdev is set up.
|
||||
* During driver initialization/resume we can avoid restoring the
|
||||
* part of the HW/SW state that will be inited anyway explicitly.
|
||||
*/
|
||||
if (dev_priv->enable_hotplug_processing)
|
||||
intel_hpd_init(dev_priv->dev);
|
||||
if (dev_priv->power_domains.initializing)
|
||||
return;
|
||||
|
||||
intel_hpd_init(dev_priv->dev);
|
||||
|
||||
i915_redisable_vga_power_on(dev_priv->dev);
|
||||
}
|
||||
|
@ -5990,9 +6114,13 @@ static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
|
|||
|
||||
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct i915_power_domains *power_domains = &dev_priv->power_domains;
|
||||
|
||||
power_domains->initializing = true;
|
||||
/* For now, we need the power well to be always enabled. */
|
||||
intel_display_set_init_power(dev_priv, true);
|
||||
intel_power_domains_resume(dev_priv);
|
||||
power_domains->initializing = false;
|
||||
}
|
||||
|
||||
void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv)
|
||||
|
@ -6017,6 +6145,18 @@ void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
|
|||
WARN(dev_priv->pm.suspended, "Device still suspended.\n");
|
||||
}
|
||||
|
||||
void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct device *device = &dev->pdev->dev;
|
||||
|
||||
if (!HAS_RUNTIME_PM(dev))
|
||||
return;
|
||||
|
||||
WARN(dev_priv->pm.suspended, "Getting nosync-ref while suspended.\n");
|
||||
pm_runtime_get_noresume(device);
|
||||
}
|
||||
|
||||
void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
|
@ -6039,6 +6179,15 @@ void intel_init_runtime_pm(struct drm_i915_private *dev_priv)
|
|||
|
||||
pm_runtime_set_active(device);
|
||||
|
||||
/*
|
||||
* RPM depends on RC6 to save restore the GT HW context, so make RC6 a
|
||||
* requirement.
|
||||
*/
|
||||
if (!intel_enable_rc6(dev)) {
|
||||
DRM_INFO("RC6 disabled, disabling runtime PM support\n");
|
||||
return;
|
||||
}
|
||||
|
||||
pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
|
||||
pm_runtime_mark_last_busy(device);
|
||||
pm_runtime_use_autosuspend(device);
|
||||
|
@ -6054,6 +6203,9 @@ void intel_fini_runtime_pm(struct drm_i915_private *dev_priv)
|
|||
if (!HAS_RUNTIME_PM(dev))
|
||||
return;
|
||||
|
||||
if (!intel_enable_rc6(dev))
|
||||
return;
|
||||
|
||||
/* Make sure we're not suspended first. */
|
||||
pm_runtime_get_sync(device);
|
||||
pm_runtime_disable(device);
|
||||
|
|
|
@ -33,6 +33,13 @@
|
|||
#include "i915_trace.h"
|
||||
#include "intel_drv.h"
|
||||
|
||||
/* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
|
||||
* but keeps the logic simple. Indeed, the whole purpose of this macro is just
|
||||
* to give some inclination as to some of the magic values used in the various
|
||||
* workarounds!
|
||||
*/
|
||||
#define CACHELINE_BYTES 64
|
||||
|
||||
static inline int ring_space(struct intel_ring_buffer *ring)
|
||||
{
|
||||
int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE);
|
||||
|
@ -179,7 +186,7 @@ gen4_render_ring_flush(struct intel_ring_buffer *ring,
|
|||
static int
|
||||
intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring)
|
||||
{
|
||||
u32 scratch_addr = ring->scratch.gtt_offset + 128;
|
||||
u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
|
||||
int ret;
|
||||
|
||||
|
||||
|
@ -216,7 +223,7 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring,
|
|||
u32 invalidate_domains, u32 flush_domains)
|
||||
{
|
||||
u32 flags = 0;
|
||||
u32 scratch_addr = ring->scratch.gtt_offset + 128;
|
||||
u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
|
||||
int ret;
|
||||
|
||||
/* Force SNB workarounds for PIPE_CONTROL flushes */
|
||||
|
@ -310,7 +317,7 @@ gen7_render_ring_flush(struct intel_ring_buffer *ring,
|
|||
u32 invalidate_domains, u32 flush_domains)
|
||||
{
|
||||
u32 flags = 0;
|
||||
u32 scratch_addr = ring->scratch.gtt_offset + 128;
|
||||
u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
|
@ -371,7 +378,7 @@ gen8_render_ring_flush(struct intel_ring_buffer *ring,
|
|||
u32 invalidate_domains, u32 flush_domains)
|
||||
{
|
||||
u32 flags = 0;
|
||||
u32 scratch_addr = ring->scratch.gtt_offset + 128;
|
||||
u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
|
||||
int ret;
|
||||
|
||||
flags |= PIPE_CONTROL_CS_STALL;
|
||||
|
@ -516,12 +523,11 @@ static int init_ring_common(struct intel_ring_buffer *ring)
|
|||
I915_READ_START(ring) == i915_gem_obj_ggtt_offset(obj) &&
|
||||
(I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
|
||||
DRM_ERROR("%s initialization failed "
|
||||
"ctl %08x head %08x tail %08x start %08x\n",
|
||||
ring->name,
|
||||
I915_READ_CTL(ring),
|
||||
I915_READ_HEAD(ring),
|
||||
I915_READ_TAIL(ring),
|
||||
I915_READ_START(ring));
|
||||
"ctl %08x (valid? %d) head %08x tail %08x start %08x [expected %08lx]\n",
|
||||
ring->name,
|
||||
I915_READ_CTL(ring), I915_READ_CTL(ring) & RING_VALID,
|
||||
I915_READ_HEAD(ring), I915_READ_TAIL(ring),
|
||||
I915_READ_START(ring), (unsigned long)i915_gem_obj_ggtt_offset(obj));
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
|
@ -657,20 +663,44 @@ static void render_ring_cleanup(struct intel_ring_buffer *ring)
|
|||
ring->scratch.obj = NULL;
|
||||
}
|
||||
|
||||
static void
|
||||
update_mboxes(struct intel_ring_buffer *ring,
|
||||
u32 mmio_offset)
|
||||
static int gen6_signal(struct intel_ring_buffer *signaller,
|
||||
unsigned int num_dwords)
|
||||
{
|
||||
/* NB: In order to be able to do semaphore MBOX updates for varying number
|
||||
* of rings, it's easiest if we round up each individual update to a
|
||||
* multiple of 2 (since ring updates must always be a multiple of 2)
|
||||
* even though the actual update only requires 3 dwords.
|
||||
*/
|
||||
struct drm_device *dev = signaller->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_ring_buffer *useless;
|
||||
int i, ret;
|
||||
|
||||
/* NB: In order to be able to do semaphore MBOX updates for varying
|
||||
* number of rings, it's easiest if we round up each individual update
|
||||
* to a multiple of 2 (since ring updates must always be a multiple of
|
||||
* 2) even though the actual update only requires 3 dwords.
|
||||
*/
|
||||
#define MBOX_UPDATE_DWORDS 4
|
||||
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
|
||||
intel_ring_emit(ring, mmio_offset);
|
||||
intel_ring_emit(ring, ring->outstanding_lazy_seqno);
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
if (i915_semaphore_is_enabled(dev))
|
||||
num_dwords += ((I915_NUM_RINGS-1) * MBOX_UPDATE_DWORDS);
|
||||
|
||||
ret = intel_ring_begin(signaller, num_dwords);
|
||||
if (ret)
|
||||
return ret;
|
||||
#undef MBOX_UPDATE_DWORDS
|
||||
|
||||
for_each_ring(useless, dev_priv, i) {
|
||||
u32 mbox_reg = signaller->semaphore.mbox.signal[i];
|
||||
if (mbox_reg != GEN6_NOSYNC) {
|
||||
intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
|
||||
intel_ring_emit(signaller, mbox_reg);
|
||||
intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
|
||||
intel_ring_emit(signaller, MI_NOOP);
|
||||
} else {
|
||||
intel_ring_emit(signaller, MI_NOOP);
|
||||
intel_ring_emit(signaller, MI_NOOP);
|
||||
intel_ring_emit(signaller, MI_NOOP);
|
||||
intel_ring_emit(signaller, MI_NOOP);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -685,27 +715,12 @@ update_mboxes(struct intel_ring_buffer *ring,
|
|||
static int
|
||||
gen6_add_request(struct intel_ring_buffer *ring)
|
||||
{
|
||||
struct drm_device *dev = ring->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_ring_buffer *useless;
|
||||
int i, ret, num_dwords = 4;
|
||||
int ret;
|
||||
|
||||
if (i915_semaphore_is_enabled(dev))
|
||||
num_dwords += ((I915_NUM_RINGS-1) * MBOX_UPDATE_DWORDS);
|
||||
#undef MBOX_UPDATE_DWORDS
|
||||
|
||||
ret = intel_ring_begin(ring, num_dwords);
|
||||
ret = ring->semaphore.signal(ring, 4);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (i915_semaphore_is_enabled(dev)) {
|
||||
for_each_ring(useless, dev_priv, i) {
|
||||
u32 mbox_reg = ring->signal_mbox[i];
|
||||
if (mbox_reg != GEN6_NOSYNC)
|
||||
update_mboxes(ring, mbox_reg);
|
||||
}
|
||||
}
|
||||
|
||||
intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
|
||||
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
|
||||
intel_ring_emit(ring, ring->outstanding_lazy_seqno);
|
||||
|
@ -734,10 +749,11 @@ gen6_ring_sync(struct intel_ring_buffer *waiter,
|
|||
struct intel_ring_buffer *signaller,
|
||||
u32 seqno)
|
||||
{
|
||||
int ret;
|
||||
u32 dw1 = MI_SEMAPHORE_MBOX |
|
||||
MI_SEMAPHORE_COMPARE |
|
||||
MI_SEMAPHORE_REGISTER;
|
||||
u32 wait_mbox = signaller->semaphore.mbox.wait[waiter->id];
|
||||
int ret;
|
||||
|
||||
/* Throughout all of the GEM code, seqno passed implies our current
|
||||
* seqno is >= the last seqno executed. However for hardware the
|
||||
|
@ -745,8 +761,7 @@ gen6_ring_sync(struct intel_ring_buffer *waiter,
|
|||
*/
|
||||
seqno -= 1;
|
||||
|
||||
WARN_ON(signaller->semaphore_register[waiter->id] ==
|
||||
MI_SEMAPHORE_SYNC_INVALID);
|
||||
WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
|
||||
|
||||
ret = intel_ring_begin(waiter, 4);
|
||||
if (ret)
|
||||
|
@ -754,9 +769,7 @@ gen6_ring_sync(struct intel_ring_buffer *waiter,
|
|||
|
||||
/* If seqno wrap happened, omit the wait with no-ops */
|
||||
if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) {
|
||||
intel_ring_emit(waiter,
|
||||
dw1 |
|
||||
signaller->semaphore_register[waiter->id]);
|
||||
intel_ring_emit(waiter, dw1 | wait_mbox);
|
||||
intel_ring_emit(waiter, seqno);
|
||||
intel_ring_emit(waiter, 0);
|
||||
intel_ring_emit(waiter, MI_NOOP);
|
||||
|
@ -783,7 +796,7 @@ do { \
|
|||
static int
|
||||
pc_render_add_request(struct intel_ring_buffer *ring)
|
||||
{
|
||||
u32 scratch_addr = ring->scratch.gtt_offset + 128;
|
||||
u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
|
||||
int ret;
|
||||
|
||||
/* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
|
||||
|
@ -805,15 +818,15 @@ pc_render_add_request(struct intel_ring_buffer *ring)
|
|||
intel_ring_emit(ring, ring->outstanding_lazy_seqno);
|
||||
intel_ring_emit(ring, 0);
|
||||
PIPE_CONTROL_FLUSH(ring, scratch_addr);
|
||||
scratch_addr += 128; /* write to separate cachelines */
|
||||
scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */
|
||||
PIPE_CONTROL_FLUSH(ring, scratch_addr);
|
||||
scratch_addr += 128;
|
||||
scratch_addr += 2 * CACHELINE_BYTES;
|
||||
PIPE_CONTROL_FLUSH(ring, scratch_addr);
|
||||
scratch_addr += 128;
|
||||
scratch_addr += 2 * CACHELINE_BYTES;
|
||||
PIPE_CONTROL_FLUSH(ring, scratch_addr);
|
||||
scratch_addr += 128;
|
||||
scratch_addr += 2 * CACHELINE_BYTES;
|
||||
PIPE_CONTROL_FLUSH(ring, scratch_addr);
|
||||
scratch_addr += 128;
|
||||
scratch_addr += 2 * CACHELINE_BYTES;
|
||||
PIPE_CONTROL_FLUSH(ring, scratch_addr);
|
||||
|
||||
intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
|
||||
|
@ -988,6 +1001,11 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
|
|||
case BCS:
|
||||
mmio = BLT_HWS_PGA_GEN7;
|
||||
break;
|
||||
/*
|
||||
* VCS2 actually doesn't exist on Gen7. Only shut up
|
||||
* gcc switch check warning
|
||||
*/
|
||||
case VCS2:
|
||||
case VCS:
|
||||
mmio = BSD_HWS_PGA_GEN7;
|
||||
break;
|
||||
|
@ -1192,7 +1210,7 @@ gen8_ring_put_irq(struct intel_ring_buffer *ring)
|
|||
|
||||
static int
|
||||
i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
|
||||
u32 offset, u32 length,
|
||||
u64 offset, u32 length,
|
||||
unsigned flags)
|
||||
{
|
||||
int ret;
|
||||
|
@ -1215,7 +1233,7 @@ i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
|
|||
#define I830_BATCH_LIMIT (256*1024)
|
||||
static int
|
||||
i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
|
||||
u32 offset, u32 len,
|
||||
u64 offset, u32 len,
|
||||
unsigned flags)
|
||||
{
|
||||
int ret;
|
||||
|
@ -1266,7 +1284,7 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
|
|||
|
||||
static int
|
||||
i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
|
||||
u32 offset, u32 len,
|
||||
u64 offset, u32 len,
|
||||
unsigned flags)
|
||||
{
|
||||
int ret;
|
||||
|
@ -1298,45 +1316,39 @@ static void cleanup_status_page(struct intel_ring_buffer *ring)
|
|||
|
||||
static int init_status_page(struct intel_ring_buffer *ring)
|
||||
{
|
||||
struct drm_device *dev = ring->dev;
|
||||
struct drm_i915_gem_object *obj;
|
||||
int ret;
|
||||
|
||||
obj = i915_gem_alloc_object(dev, 4096);
|
||||
if (obj == NULL) {
|
||||
DRM_ERROR("Failed to allocate status page\n");
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
if ((obj = ring->status_page.obj) == NULL) {
|
||||
int ret;
|
||||
|
||||
obj = i915_gem_alloc_object(ring->dev, 4096);
|
||||
if (obj == NULL) {
|
||||
DRM_ERROR("Failed to allocate status page\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
|
||||
if (ret)
|
||||
goto err_unref;
|
||||
|
||||
ret = i915_gem_obj_ggtt_pin(obj, 4096, 0);
|
||||
if (ret) {
|
||||
err_unref:
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ring->status_page.obj = obj;
|
||||
}
|
||||
|
||||
ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
|
||||
if (ret)
|
||||
goto err_unref;
|
||||
|
||||
ret = i915_gem_obj_ggtt_pin(obj, 4096, 0);
|
||||
if (ret)
|
||||
goto err_unref;
|
||||
|
||||
ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
|
||||
ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
|
||||
if (ring->status_page.page_addr == NULL) {
|
||||
ret = -ENOMEM;
|
||||
goto err_unpin;
|
||||
}
|
||||
ring->status_page.obj = obj;
|
||||
memset(ring->status_page.page_addr, 0, PAGE_SIZE);
|
||||
|
||||
DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
|
||||
ring->name, ring->status_page.gfx_addr);
|
||||
|
||||
return 0;
|
||||
|
||||
err_unpin:
|
||||
i915_gem_object_ggtt_unpin(obj);
|
||||
err_unref:
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int init_phys_status_page(struct intel_ring_buffer *ring)
|
||||
|
@ -1356,18 +1368,60 @@ static int init_phys_status_page(struct intel_ring_buffer *ring)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int allocate_ring_buffer(struct intel_ring_buffer *ring)
|
||||
{
|
||||
struct drm_device *dev = ring->dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_i915_gem_object *obj;
|
||||
int ret;
|
||||
|
||||
if (ring->obj)
|
||||
return 0;
|
||||
|
||||
obj = NULL;
|
||||
if (!HAS_LLC(dev))
|
||||
obj = i915_gem_object_create_stolen(dev, ring->size);
|
||||
if (obj == NULL)
|
||||
obj = i915_gem_alloc_object(dev, ring->size);
|
||||
if (obj == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
|
||||
if (ret)
|
||||
goto err_unref;
|
||||
|
||||
ret = i915_gem_object_set_to_gtt_domain(obj, true);
|
||||
if (ret)
|
||||
goto err_unpin;
|
||||
|
||||
ring->virtual_start =
|
||||
ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
|
||||
ring->size);
|
||||
if (ring->virtual_start == NULL) {
|
||||
ret = -EINVAL;
|
||||
goto err_unpin;
|
||||
}
|
||||
|
||||
ring->obj = obj;
|
||||
return 0;
|
||||
|
||||
err_unpin:
|
||||
i915_gem_object_ggtt_unpin(obj);
|
||||
err_unref:
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int intel_init_ring_buffer(struct drm_device *dev,
|
||||
struct intel_ring_buffer *ring)
|
||||
{
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int ret;
|
||||
|
||||
ring->dev = dev;
|
||||
INIT_LIST_HEAD(&ring->active_list);
|
||||
INIT_LIST_HEAD(&ring->request_list);
|
||||
ring->size = 32 * PAGE_SIZE;
|
||||
memset(ring->sync_seqno, 0, sizeof(ring->sync_seqno));
|
||||
memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno));
|
||||
|
||||
init_waitqueue_head(&ring->irq_queue);
|
||||
|
||||
|
@ -1382,80 +1436,34 @@ static int intel_init_ring_buffer(struct drm_device *dev,
|
|||
return ret;
|
||||
}
|
||||
|
||||
obj = NULL;
|
||||
if (!HAS_LLC(dev))
|
||||
obj = i915_gem_object_create_stolen(dev, ring->size);
|
||||
if (obj == NULL)
|
||||
obj = i915_gem_alloc_object(dev, ring->size);
|
||||
if (obj == NULL) {
|
||||
DRM_ERROR("Failed to allocate ringbuffer\n");
|
||||
ret = -ENOMEM;
|
||||
goto err_hws;
|
||||
ret = allocate_ring_buffer(ring);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to allocate ringbuffer %s: %d\n", ring->name, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ring->obj = obj;
|
||||
|
||||
ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
|
||||
if (ret)
|
||||
goto err_unref;
|
||||
|
||||
ret = i915_gem_object_set_to_gtt_domain(obj, true);
|
||||
if (ret)
|
||||
goto err_unpin;
|
||||
|
||||
ring->virtual_start =
|
||||
ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
|
||||
ring->size);
|
||||
if (ring->virtual_start == NULL) {
|
||||
DRM_ERROR("Failed to map ringbuffer.\n");
|
||||
ret = -EINVAL;
|
||||
goto err_unpin;
|
||||
}
|
||||
|
||||
ret = ring->init(ring);
|
||||
if (ret)
|
||||
goto err_unmap;
|
||||
|
||||
/* Workaround an erratum on the i830 which causes a hang if
|
||||
* the TAIL pointer points to within the last 2 cachelines
|
||||
* of the buffer.
|
||||
*/
|
||||
ring->effective_size = ring->size;
|
||||
if (IS_I830(ring->dev) || IS_845G(ring->dev))
|
||||
ring->effective_size -= 128;
|
||||
if (IS_I830(dev) || IS_845G(dev))
|
||||
ring->effective_size -= 2 * CACHELINE_BYTES;
|
||||
|
||||
i915_cmd_parser_init_ring(ring);
|
||||
|
||||
return 0;
|
||||
|
||||
err_unmap:
|
||||
iounmap(ring->virtual_start);
|
||||
err_unpin:
|
||||
i915_gem_object_ggtt_unpin(obj);
|
||||
err_unref:
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
ring->obj = NULL;
|
||||
err_hws:
|
||||
cleanup_status_page(ring);
|
||||
return ret;
|
||||
return ring->init(ring);
|
||||
}
|
||||
|
||||
void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
|
||||
{
|
||||
struct drm_i915_private *dev_priv;
|
||||
int ret;
|
||||
struct drm_i915_private *dev_priv = to_i915(ring->dev);
|
||||
|
||||
if (ring->obj == NULL)
|
||||
return;
|
||||
|
||||
/* Disable the ring buffer. The ring must be idle at this point */
|
||||
dev_priv = ring->dev->dev_private;
|
||||
ret = intel_ring_idle(ring);
|
||||
if (ret && !i915_reset_in_progress(&dev_priv->gpu_error))
|
||||
DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
|
||||
ring->name, ret);
|
||||
|
||||
I915_WRITE_CTL(ring, 0);
|
||||
intel_stop_ring_buffer(ring);
|
||||
WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
|
||||
|
||||
iounmap(ring->virtual_start);
|
||||
|
||||
|
@ -1683,12 +1691,13 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
|
|||
/* Align the ring tail to a cacheline boundary */
|
||||
int intel_ring_cacheline_align(struct intel_ring_buffer *ring)
|
||||
{
|
||||
int num_dwords = (64 - (ring->tail & 63)) / sizeof(uint32_t);
|
||||
int num_dwords = (ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
|
||||
int ret;
|
||||
|
||||
if (num_dwords == 0)
|
||||
return 0;
|
||||
|
||||
num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords;
|
||||
ret = intel_ring_begin(ring, num_dwords);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -1788,7 +1797,7 @@ static int gen6_bsd_ring_flush(struct intel_ring_buffer *ring,
|
|||
|
||||
static int
|
||||
gen8_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
|
||||
u32 offset, u32 len,
|
||||
u64 offset, u32 len,
|
||||
unsigned flags)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = ring->dev->dev_private;
|
||||
|
@ -1802,8 +1811,8 @@ gen8_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
|
|||
|
||||
/* FIXME(BDW): Address space and security selectors. */
|
||||
intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8));
|
||||
intel_ring_emit(ring, offset);
|
||||
intel_ring_emit(ring, 0);
|
||||
intel_ring_emit(ring, lower_32_bits(offset));
|
||||
intel_ring_emit(ring, upper_32_bits(offset));
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
intel_ring_advance(ring);
|
||||
|
||||
|
@ -1812,7 +1821,7 @@ gen8_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
|
|||
|
||||
static int
|
||||
hsw_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
|
||||
u32 offset, u32 len,
|
||||
u64 offset, u32 len,
|
||||
unsigned flags)
|
||||
{
|
||||
int ret;
|
||||
|
@ -1833,7 +1842,7 @@ hsw_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
|
|||
|
||||
static int
|
||||
gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
|
||||
u32 offset, u32 len,
|
||||
u64 offset, u32 len,
|
||||
unsigned flags)
|
||||
{
|
||||
int ret;
|
||||
|
@ -1919,15 +1928,24 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
|
|||
ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
|
||||
ring->get_seqno = gen6_ring_get_seqno;
|
||||
ring->set_seqno = ring_set_seqno;
|
||||
ring->sync_to = gen6_ring_sync;
|
||||
ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_INVALID;
|
||||
ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_RV;
|
||||
ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_RB;
|
||||
ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_RVE;
|
||||
ring->signal_mbox[RCS] = GEN6_NOSYNC;
|
||||
ring->signal_mbox[VCS] = GEN6_VRSYNC;
|
||||
ring->signal_mbox[BCS] = GEN6_BRSYNC;
|
||||
ring->signal_mbox[VECS] = GEN6_VERSYNC;
|
||||
ring->semaphore.sync_to = gen6_ring_sync;
|
||||
ring->semaphore.signal = gen6_signal;
|
||||
/*
|
||||
* The current semaphore is only applied on pre-gen8 platform.
|
||||
* And there is no VCS2 ring on the pre-gen8 platform. So the
|
||||
* semaphore between RCS and VCS2 is initialized as INVALID.
|
||||
* Gen8 will initialize the sema between VCS2 and RCS later.
|
||||
*/
|
||||
ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID;
|
||||
ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_RV;
|
||||
ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_RB;
|
||||
ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_RVE;
|
||||
ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
|
||||
ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC;
|
||||
ring->semaphore.mbox.signal[VCS] = GEN6_VRSYNC;
|
||||
ring->semaphore.mbox.signal[BCS] = GEN6_BRSYNC;
|
||||
ring->semaphore.mbox.signal[VECS] = GEN6_VERSYNC;
|
||||
ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
|
||||
} else if (IS_GEN5(dev)) {
|
||||
ring->add_request = pc_render_add_request;
|
||||
ring->flush = gen4_render_ring_flush;
|
||||
|
@ -2045,7 +2063,7 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
|
|||
ring->size = size;
|
||||
ring->effective_size = ring->size;
|
||||
if (IS_I830(ring->dev) || IS_845G(ring->dev))
|
||||
ring->effective_size -= 128;
|
||||
ring->effective_size -= 2 * CACHELINE_BYTES;
|
||||
|
||||
ring->virtual_start = ioremap_wc(start, size);
|
||||
if (ring->virtual_start == NULL) {
|
||||
|
@ -2095,15 +2113,24 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
|
|||
ring->dispatch_execbuffer =
|
||||
gen6_ring_dispatch_execbuffer;
|
||||
}
|
||||
ring->sync_to = gen6_ring_sync;
|
||||
ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VR;
|
||||
ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_INVALID;
|
||||
ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_VB;
|
||||
ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_VVE;
|
||||
ring->signal_mbox[RCS] = GEN6_RVSYNC;
|
||||
ring->signal_mbox[VCS] = GEN6_NOSYNC;
|
||||
ring->signal_mbox[BCS] = GEN6_BVSYNC;
|
||||
ring->signal_mbox[VECS] = GEN6_VEVSYNC;
|
||||
ring->semaphore.sync_to = gen6_ring_sync;
|
||||
ring->semaphore.signal = gen6_signal;
|
||||
/*
|
||||
* The current semaphore is only applied on pre-gen8 platform.
|
||||
* And there is no VCS2 ring on the pre-gen8 platform. So the
|
||||
* semaphore between VCS and VCS2 is initialized as INVALID.
|
||||
* Gen8 will initialize the sema between VCS2 and VCS later.
|
||||
*/
|
||||
ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR;
|
||||
ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID;
|
||||
ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VB;
|
||||
ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_VVE;
|
||||
ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
|
||||
ring->semaphore.mbox.signal[RCS] = GEN6_RVSYNC;
|
||||
ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC;
|
||||
ring->semaphore.mbox.signal[BCS] = GEN6_BVSYNC;
|
||||
ring->semaphore.mbox.signal[VECS] = GEN6_VEVSYNC;
|
||||
ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
|
||||
} else {
|
||||
ring->mmio_base = BSD_RING_BASE;
|
||||
ring->flush = bsd_ring_flush;
|
||||
|
@ -2126,6 +2153,58 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
|
|||
return intel_init_ring_buffer(dev, ring);
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize the second BSD ring for Broadwell GT3.
|
||||
* It is noted that this only exists on Broadwell GT3.
|
||||
*/
|
||||
int intel_init_bsd2_ring_buffer(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_ring_buffer *ring = &dev_priv->ring[VCS2];
|
||||
|
||||
if ((INTEL_INFO(dev)->gen != 8)) {
|
||||
DRM_ERROR("No dual-BSD ring on non-BDW machine\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ring->name = "bds2_ring";
|
||||
ring->id = VCS2;
|
||||
|
||||
ring->write_tail = ring_write_tail;
|
||||
ring->mmio_base = GEN8_BSD2_RING_BASE;
|
||||
ring->flush = gen6_bsd_ring_flush;
|
||||
ring->add_request = gen6_add_request;
|
||||
ring->get_seqno = gen6_ring_get_seqno;
|
||||
ring->set_seqno = ring_set_seqno;
|
||||
ring->irq_enable_mask =
|
||||
GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
|
||||
ring->irq_get = gen8_ring_get_irq;
|
||||
ring->irq_put = gen8_ring_put_irq;
|
||||
ring->dispatch_execbuffer =
|
||||
gen8_ring_dispatch_execbuffer;
|
||||
ring->semaphore.sync_to = gen6_ring_sync;
|
||||
/*
|
||||
* The current semaphore is only applied on the pre-gen8. And there
|
||||
* is no bsd2 ring on the pre-gen8. So now the semaphore_register
|
||||
* between VCS2 and other ring is initialized as invalid.
|
||||
* Gen8 will initialize the sema between VCS2 and other ring later.
|
||||
*/
|
||||
ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID;
|
||||
ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID;
|
||||
ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID;
|
||||
ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID;
|
||||
ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
|
||||
ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC;
|
||||
ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC;
|
||||
ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
|
||||
ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
|
||||
ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
|
||||
|
||||
ring->init = init_ring_common;
|
||||
|
||||
return intel_init_ring_buffer(dev, ring);
|
||||
}
|
||||
|
||||
int intel_init_blt_ring_buffer(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
@ -2152,15 +2231,24 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
|
|||
ring->irq_put = gen6_ring_put_irq;
|
||||
ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
|
||||
}
|
||||
ring->sync_to = gen6_ring_sync;
|
||||
ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_BR;
|
||||
ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_BV;
|
||||
ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_INVALID;
|
||||
ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_BVE;
|
||||
ring->signal_mbox[RCS] = GEN6_RBSYNC;
|
||||
ring->signal_mbox[VCS] = GEN6_VBSYNC;
|
||||
ring->signal_mbox[BCS] = GEN6_NOSYNC;
|
||||
ring->signal_mbox[VECS] = GEN6_VEBSYNC;
|
||||
ring->semaphore.sync_to = gen6_ring_sync;
|
||||
ring->semaphore.signal = gen6_signal;
|
||||
/*
|
||||
* The current semaphore is only applied on pre-gen8 platform. And
|
||||
* there is no VCS2 ring on the pre-gen8 platform. So the semaphore
|
||||
* between BCS and VCS2 is initialized as INVALID.
|
||||
* Gen8 will initialize the sema between BCS and VCS2 later.
|
||||
*/
|
||||
ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_BR;
|
||||
ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_BV;
|
||||
ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID;
|
||||
ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_BVE;
|
||||
ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
|
||||
ring->semaphore.mbox.signal[RCS] = GEN6_RBSYNC;
|
||||
ring->semaphore.mbox.signal[VCS] = GEN6_VBSYNC;
|
||||
ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
|
||||
ring->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC;
|
||||
ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
|
||||
ring->init = init_ring_common;
|
||||
|
||||
return intel_init_ring_buffer(dev, ring);
|
||||
|
@ -2193,15 +2281,18 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
|
|||
ring->irq_put = hsw_vebox_put_irq;
|
||||
ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
|
||||
}
|
||||
ring->sync_to = gen6_ring_sync;
|
||||
ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VER;
|
||||
ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_VEV;
|
||||
ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_VEB;
|
||||
ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_INVALID;
|
||||
ring->signal_mbox[RCS] = GEN6_RVESYNC;
|
||||
ring->signal_mbox[VCS] = GEN6_VVESYNC;
|
||||
ring->signal_mbox[BCS] = GEN6_BVESYNC;
|
||||
ring->signal_mbox[VECS] = GEN6_NOSYNC;
|
||||
ring->semaphore.sync_to = gen6_ring_sync;
|
||||
ring->semaphore.signal = gen6_signal;
|
||||
ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER;
|
||||
ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_VEV;
|
||||
ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VEB;
|
||||
ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID;
|
||||
ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
|
||||
ring->semaphore.mbox.signal[RCS] = GEN6_RVESYNC;
|
||||
ring->semaphore.mbox.signal[VCS] = GEN6_VVESYNC;
|
||||
ring->semaphore.mbox.signal[BCS] = GEN6_BVESYNC;
|
||||
ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
|
||||
ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
|
||||
ring->init = init_ring_common;
|
||||
|
||||
return intel_init_ring_buffer(dev, ring);
|
||||
|
@ -2244,3 +2335,19 @@ intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring)
|
|||
ring->gpu_caches_dirty = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
intel_stop_ring_buffer(struct intel_ring_buffer *ring)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!intel_ring_initialized(ring))
|
||||
return;
|
||||
|
||||
ret = intel_ring_idle(ring);
|
||||
if (ret && !i915_reset_in_progress(&to_i915(ring->dev)->gpu_error))
|
||||
DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
|
||||
ring->name, ret);
|
||||
|
||||
stop_ring(ring);
|
||||
}
|
||||
|
|
|
@ -61,8 +61,10 @@ struct intel_ring_buffer {
|
|||
VCS,
|
||||
BCS,
|
||||
VECS,
|
||||
VCS2
|
||||
} id;
|
||||
#define I915_NUM_RINGS 4
|
||||
#define I915_NUM_RINGS 5
|
||||
#define LAST_USER_RING (VECS + 1)
|
||||
u32 mmio_base;
|
||||
void __iomem *virtual_start;
|
||||
struct drm_device *dev;
|
||||
|
@ -88,7 +90,6 @@ struct intel_ring_buffer {
|
|||
unsigned irq_refcount; /* protected by dev_priv->irq_lock */
|
||||
u32 irq_enable_mask; /* bitmask to enable ring interrupt */
|
||||
u32 trace_irq_seqno;
|
||||
u32 sync_seqno[I915_NUM_RINGS-1];
|
||||
bool __must_check (*irq_get)(struct intel_ring_buffer *ring);
|
||||
void (*irq_put)(struct intel_ring_buffer *ring);
|
||||
|
||||
|
@ -111,19 +112,30 @@ struct intel_ring_buffer {
|
|||
void (*set_seqno)(struct intel_ring_buffer *ring,
|
||||
u32 seqno);
|
||||
int (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
|
||||
u32 offset, u32 length,
|
||||
u64 offset, u32 length,
|
||||
unsigned flags);
|
||||
#define I915_DISPATCH_SECURE 0x1
|
||||
#define I915_DISPATCH_PINNED 0x2
|
||||
void (*cleanup)(struct intel_ring_buffer *ring);
|
||||
int (*sync_to)(struct intel_ring_buffer *ring,
|
||||
|
||||
struct {
|
||||
u32 sync_seqno[I915_NUM_RINGS-1];
|
||||
|
||||
struct {
|
||||
/* our mbox written by others */
|
||||
u32 wait[I915_NUM_RINGS];
|
||||
/* mboxes this ring signals to */
|
||||
u32 signal[I915_NUM_RINGS];
|
||||
} mbox;
|
||||
|
||||
/* AKA wait() */
|
||||
int (*sync_to)(struct intel_ring_buffer *ring,
|
||||
struct intel_ring_buffer *to,
|
||||
u32 seqno);
|
||||
|
||||
/* our mbox written by others */
|
||||
u32 semaphore_register[I915_NUM_RINGS];
|
||||
/* mboxes this ring signals to */
|
||||
u32 signal_mbox[I915_NUM_RINGS];
|
||||
int (*signal)(struct intel_ring_buffer *signaller,
|
||||
/* num_dwords needed by caller */
|
||||
unsigned int num_dwords);
|
||||
} semaphore;
|
||||
|
||||
/**
|
||||
* List of objects currently involved in rendering from the
|
||||
|
@ -263,6 +275,7 @@ intel_write_status_page(struct intel_ring_buffer *ring,
|
|||
#define I915_GEM_HWS_SCRATCH_INDEX 0x30
|
||||
#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
|
||||
|
||||
void intel_stop_ring_buffer(struct intel_ring_buffer *ring);
|
||||
void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
|
||||
|
||||
int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
|
||||
|
@ -286,6 +299,7 @@ int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring);
|
|||
|
||||
int intel_init_render_ring_buffer(struct drm_device *dev);
|
||||
int intel_init_bsd_ring_buffer(struct drm_device *dev);
|
||||
int intel_init_bsd2_ring_buffer(struct drm_device *dev);
|
||||
int intel_init_blt_ring_buffer(struct drm_device *dev);
|
||||
int intel_init_vebox_ring_buffer(struct drm_device *dev);
|
||||
|
||||
|
|
|
@ -1174,7 +1174,7 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
|
|||
return true;
|
||||
}
|
||||
|
||||
static void intel_sdvo_mode_set(struct intel_encoder *intel_encoder)
|
||||
static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder)
|
||||
{
|
||||
struct drm_device *dev = intel_encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
@ -2999,7 +2999,7 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
|
|||
|
||||
intel_encoder->compute_config = intel_sdvo_compute_config;
|
||||
intel_encoder->disable = intel_disable_sdvo;
|
||||
intel_encoder->mode_set = intel_sdvo_mode_set;
|
||||
intel_encoder->pre_enable = intel_sdvo_pre_enable;
|
||||
intel_encoder->enable = intel_enable_sdvo;
|
||||
intel_encoder->get_hw_state = intel_sdvo_get_hw_state;
|
||||
intel_encoder->get_config = intel_sdvo_get_config;
|
||||
|
|
|
@ -37,6 +37,106 @@
|
|||
#include <drm/i915_drm.h>
|
||||
#include "i915_drv.h"
|
||||
|
||||
static int usecs_to_scanlines(const struct drm_display_mode *mode, int usecs)
|
||||
{
|
||||
/* paranoia */
|
||||
if (!mode->crtc_htotal)
|
||||
return 1;
|
||||
|
||||
return DIV_ROUND_UP(usecs * mode->crtc_clock, 1000 * mode->crtc_htotal);
|
||||
}
|
||||
|
||||
static bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
|
||||
{
|
||||
struct drm_device *dev = crtc->base.dev;
|
||||
const struct drm_display_mode *mode = &crtc->config.adjusted_mode;
|
||||
enum pipe pipe = crtc->pipe;
|
||||
long timeout = msecs_to_jiffies_timeout(1);
|
||||
int scanline, min, max, vblank_start;
|
||||
DEFINE_WAIT(wait);
|
||||
|
||||
WARN_ON(!mutex_is_locked(&crtc->base.mutex));
|
||||
|
||||
vblank_start = mode->crtc_vblank_start;
|
||||
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
|
||||
vblank_start = DIV_ROUND_UP(vblank_start, 2);
|
||||
|
||||
/* FIXME needs to be calibrated sensibly */
|
||||
min = vblank_start - usecs_to_scanlines(mode, 100);
|
||||
max = vblank_start - 1;
|
||||
|
||||
if (min <= 0 || max <= 0)
|
||||
return false;
|
||||
|
||||
if (WARN_ON(drm_vblank_get(dev, pipe)))
|
||||
return false;
|
||||
|
||||
local_irq_disable();
|
||||
|
||||
trace_i915_pipe_update_start(crtc, min, max);
|
||||
|
||||
for (;;) {
|
||||
/*
|
||||
* prepare_to_wait() has a memory barrier, which guarantees
|
||||
* other CPUs can see the task state update by the time we
|
||||
* read the scanline.
|
||||
*/
|
||||
prepare_to_wait(&crtc->vbl_wait, &wait, TASK_UNINTERRUPTIBLE);
|
||||
|
||||
scanline = intel_get_crtc_scanline(crtc);
|
||||
if (scanline < min || scanline > max)
|
||||
break;
|
||||
|
||||
if (timeout <= 0) {
|
||||
DRM_ERROR("Potential atomic update failure on pipe %c\n",
|
||||
pipe_name(crtc->pipe));
|
||||
break;
|
||||
}
|
||||
|
||||
local_irq_enable();
|
||||
|
||||
timeout = schedule_timeout(timeout);
|
||||
|
||||
local_irq_disable();
|
||||
}
|
||||
|
||||
finish_wait(&crtc->vbl_wait, &wait);
|
||||
|
||||
drm_vblank_put(dev, pipe);
|
||||
|
||||
*start_vbl_count = dev->driver->get_vblank_counter(dev, pipe);
|
||||
|
||||
trace_i915_pipe_update_vblank_evaded(crtc, min, max, *start_vbl_count);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count)
|
||||
{
|
||||
struct drm_device *dev = crtc->base.dev;
|
||||
enum pipe pipe = crtc->pipe;
|
||||
u32 end_vbl_count = dev->driver->get_vblank_counter(dev, pipe);
|
||||
|
||||
trace_i915_pipe_update_end(crtc, end_vbl_count);
|
||||
|
||||
local_irq_enable();
|
||||
|
||||
if (start_vbl_count != end_vbl_count)
|
||||
DRM_ERROR("Atomic update failure on pipe %c (start=%u end=%u)\n",
|
||||
pipe_name(pipe), start_vbl_count, end_vbl_count);
|
||||
}
|
||||
|
||||
static void intel_update_primary_plane(struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
|
||||
int reg = DSPCNTR(crtc->plane);
|
||||
|
||||
if (crtc->primary_enabled)
|
||||
I915_WRITE(reg, I915_READ(reg) | DISPLAY_PLANE_ENABLE);
|
||||
else
|
||||
I915_WRITE(reg, I915_READ(reg) & ~DISPLAY_PLANE_ENABLE);
|
||||
}
|
||||
|
||||
static void
|
||||
vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
|
||||
struct drm_framebuffer *fb,
|
||||
|
@ -48,11 +148,14 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
|
|||
struct drm_device *dev = dplane->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_plane *intel_plane = to_intel_plane(dplane);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
int pipe = intel_plane->pipe;
|
||||
int plane = intel_plane->plane;
|
||||
u32 sprctl;
|
||||
unsigned long sprsurf_offset, linear_offset;
|
||||
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
|
||||
u32 start_vbl_count;
|
||||
bool atomic_update;
|
||||
|
||||
sprctl = I915_READ(SPCNTR(pipe, plane));
|
||||
|
||||
|
@ -131,6 +234,10 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
|
|||
fb->pitches[0]);
|
||||
linear_offset -= sprsurf_offset;
|
||||
|
||||
atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
|
||||
|
||||
intel_update_primary_plane(intel_crtc);
|
||||
|
||||
I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]);
|
||||
I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x);
|
||||
|
||||
|
@ -143,7 +250,11 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
|
|||
I915_WRITE(SPCNTR(pipe, plane), sprctl);
|
||||
I915_WRITE(SPSURF(pipe, plane), i915_gem_obj_ggtt_offset(obj) +
|
||||
sprsurf_offset);
|
||||
POSTING_READ(SPSURF(pipe, plane));
|
||||
|
||||
intel_flush_primary_plane(dev_priv, intel_crtc->plane);
|
||||
|
||||
if (atomic_update)
|
||||
intel_pipe_update_end(intel_crtc, start_vbl_count);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -152,14 +263,25 @@ vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
|
|||
struct drm_device *dev = dplane->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_plane *intel_plane = to_intel_plane(dplane);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
int pipe = intel_plane->pipe;
|
||||
int plane = intel_plane->plane;
|
||||
u32 start_vbl_count;
|
||||
bool atomic_update;
|
||||
|
||||
atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
|
||||
|
||||
intel_update_primary_plane(intel_crtc);
|
||||
|
||||
I915_WRITE(SPCNTR(pipe, plane), I915_READ(SPCNTR(pipe, plane)) &
|
||||
~SP_ENABLE);
|
||||
/* Activate double buffered register update */
|
||||
I915_WRITE(SPSURF(pipe, plane), 0);
|
||||
POSTING_READ(SPSURF(pipe, plane));
|
||||
|
||||
intel_flush_primary_plane(dev_priv, intel_crtc->plane);
|
||||
|
||||
if (atomic_update)
|
||||
intel_pipe_update_end(intel_crtc, start_vbl_count);
|
||||
|
||||
intel_update_sprite_watermarks(dplane, crtc, 0, 0, false, false);
|
||||
}
|
||||
|
@ -226,10 +348,13 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
|||
struct drm_device *dev = plane->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_plane *intel_plane = to_intel_plane(plane);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
int pipe = intel_plane->pipe;
|
||||
u32 sprctl, sprscale = 0;
|
||||
unsigned long sprsurf_offset, linear_offset;
|
||||
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
|
||||
u32 start_vbl_count;
|
||||
bool atomic_update;
|
||||
|
||||
sprctl = I915_READ(SPRCTL(pipe));
|
||||
|
||||
|
@ -299,6 +424,10 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
|||
pixel_size, fb->pitches[0]);
|
||||
linear_offset -= sprsurf_offset;
|
||||
|
||||
atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
|
||||
|
||||
intel_update_primary_plane(intel_crtc);
|
||||
|
||||
I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
|
||||
I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
|
||||
|
||||
|
@ -317,7 +446,11 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
|||
I915_WRITE(SPRCTL(pipe), sprctl);
|
||||
I915_WRITE(SPRSURF(pipe),
|
||||
i915_gem_obj_ggtt_offset(obj) + sprsurf_offset);
|
||||
POSTING_READ(SPRSURF(pipe));
|
||||
|
||||
intel_flush_primary_plane(dev_priv, intel_crtc->plane);
|
||||
|
||||
if (atomic_update)
|
||||
intel_pipe_update_end(intel_crtc, start_vbl_count);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -326,7 +459,14 @@ ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
|
|||
struct drm_device *dev = plane->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_plane *intel_plane = to_intel_plane(plane);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
int pipe = intel_plane->pipe;
|
||||
u32 start_vbl_count;
|
||||
bool atomic_update;
|
||||
|
||||
atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
|
||||
|
||||
intel_update_primary_plane(intel_crtc);
|
||||
|
||||
I915_WRITE(SPRCTL(pipe), I915_READ(SPRCTL(pipe)) & ~SPRITE_ENABLE);
|
||||
/* Can't leave the scaler enabled... */
|
||||
|
@ -334,7 +474,11 @@ ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
|
|||
I915_WRITE(SPRSCALE(pipe), 0);
|
||||
/* Activate double buffered register update */
|
||||
I915_WRITE(SPRSURF(pipe), 0);
|
||||
POSTING_READ(SPRSURF(pipe));
|
||||
|
||||
intel_flush_primary_plane(dev_priv, intel_crtc->plane);
|
||||
|
||||
if (atomic_update)
|
||||
intel_pipe_update_end(intel_crtc, start_vbl_count);
|
||||
|
||||
/*
|
||||
* Avoid underruns when disabling the sprite.
|
||||
|
@ -410,10 +554,13 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
|||
struct drm_device *dev = plane->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_plane *intel_plane = to_intel_plane(plane);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
int pipe = intel_plane->pipe;
|
||||
unsigned long dvssurf_offset, linear_offset;
|
||||
u32 dvscntr, dvsscale;
|
||||
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
|
||||
u32 start_vbl_count;
|
||||
bool atomic_update;
|
||||
|
||||
dvscntr = I915_READ(DVSCNTR(pipe));
|
||||
|
||||
|
@ -478,6 +625,10 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
|||
pixel_size, fb->pitches[0]);
|
||||
linear_offset -= dvssurf_offset;
|
||||
|
||||
atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
|
||||
|
||||
intel_update_primary_plane(intel_crtc);
|
||||
|
||||
I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
|
||||
I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
|
||||
|
||||
|
@ -491,7 +642,11 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
|||
I915_WRITE(DVSCNTR(pipe), dvscntr);
|
||||
I915_WRITE(DVSSURF(pipe),
|
||||
i915_gem_obj_ggtt_offset(obj) + dvssurf_offset);
|
||||
POSTING_READ(DVSSURF(pipe));
|
||||
|
||||
intel_flush_primary_plane(dev_priv, intel_crtc->plane);
|
||||
|
||||
if (atomic_update)
|
||||
intel_pipe_update_end(intel_crtc, start_vbl_count);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -500,14 +655,25 @@ ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
|
|||
struct drm_device *dev = plane->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_plane *intel_plane = to_intel_plane(plane);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
int pipe = intel_plane->pipe;
|
||||
u32 start_vbl_count;
|
||||
bool atomic_update;
|
||||
|
||||
atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
|
||||
|
||||
intel_update_primary_plane(intel_crtc);
|
||||
|
||||
I915_WRITE(DVSCNTR(pipe), I915_READ(DVSCNTR(pipe)) & ~DVS_ENABLE);
|
||||
/* Disable the scaler */
|
||||
I915_WRITE(DVSSCALE(pipe), 0);
|
||||
/* Flush double buffered register updates */
|
||||
I915_WRITE(DVSSURF(pipe), 0);
|
||||
POSTING_READ(DVSSURF(pipe));
|
||||
|
||||
intel_flush_primary_plane(dev_priv, intel_crtc->plane);
|
||||
|
||||
if (atomic_update)
|
||||
intel_pipe_update_end(intel_crtc, start_vbl_count);
|
||||
|
||||
/*
|
||||
* Avoid underruns when disabling the sprite.
|
||||
|
@ -519,20 +685,10 @@ ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
|
|||
}
|
||||
|
||||
static void
|
||||
intel_enable_primary(struct drm_crtc *crtc)
|
||||
intel_post_enable_primary(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
int reg = DSPCNTR(intel_crtc->plane);
|
||||
|
||||
if (intel_crtc->primary_enabled)
|
||||
return;
|
||||
|
||||
intel_crtc->primary_enabled = true;
|
||||
|
||||
I915_WRITE(reg, I915_READ(reg) | DISPLAY_PLANE_ENABLE);
|
||||
intel_flush_primary_plane(dev_priv, intel_crtc->plane);
|
||||
|
||||
/*
|
||||
* FIXME IPS should be fine as long as one plane is
|
||||
|
@ -551,17 +707,11 @@ intel_enable_primary(struct drm_crtc *crtc)
|
|||
}
|
||||
|
||||
static void
|
||||
intel_disable_primary(struct drm_crtc *crtc)
|
||||
intel_pre_disable_primary(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
int reg = DSPCNTR(intel_crtc->plane);
|
||||
|
||||
if (!intel_crtc->primary_enabled)
|
||||
return;
|
||||
|
||||
intel_crtc->primary_enabled = false;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
if (dev_priv->fbc.plane == intel_crtc->plane)
|
||||
|
@ -575,9 +725,6 @@ intel_disable_primary(struct drm_crtc *crtc)
|
|||
* versa.
|
||||
*/
|
||||
hsw_disable_ips(intel_crtc);
|
||||
|
||||
I915_WRITE(reg, I915_READ(reg) & ~DISPLAY_PLANE_ENABLE);
|
||||
intel_flush_primary_plane(dev_priv, intel_crtc->plane);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -671,7 +818,7 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
|||
struct drm_i915_gem_object *obj = intel_fb->obj;
|
||||
struct drm_i915_gem_object *old_obj = intel_plane->obj;
|
||||
int ret;
|
||||
bool disable_primary = false;
|
||||
bool primary_enabled;
|
||||
bool visible;
|
||||
int hscale, vscale;
|
||||
int max_scale, min_scale;
|
||||
|
@ -842,8 +989,8 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
|||
* If the sprite is completely covering the primary plane,
|
||||
* we can disable the primary and save power.
|
||||
*/
|
||||
disable_primary = drm_rect_equals(&dst, &clip) && !colorkey_enabled(intel_plane);
|
||||
WARN_ON(disable_primary && !visible && intel_crtc->active);
|
||||
primary_enabled = !drm_rect_equals(&dst, &clip) || colorkey_enabled(intel_plane);
|
||||
WARN_ON(!primary_enabled && !visible && intel_crtc->active);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
||||
|
@ -870,12 +1017,12 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
|||
intel_plane->obj = obj;
|
||||
|
||||
if (intel_crtc->active) {
|
||||
/*
|
||||
* Be sure to re-enable the primary before the sprite is no longer
|
||||
* covering it fully.
|
||||
*/
|
||||
if (!disable_primary)
|
||||
intel_enable_primary(crtc);
|
||||
bool primary_was_enabled = intel_crtc->primary_enabled;
|
||||
|
||||
intel_crtc->primary_enabled = primary_enabled;
|
||||
|
||||
if (primary_was_enabled && !primary_enabled)
|
||||
intel_pre_disable_primary(crtc);
|
||||
|
||||
if (visible)
|
||||
intel_plane->update_plane(plane, crtc, fb, obj,
|
||||
|
@ -884,8 +1031,8 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
|||
else
|
||||
intel_plane->disable_plane(plane, crtc);
|
||||
|
||||
if (disable_primary)
|
||||
intel_disable_primary(crtc);
|
||||
if (!primary_was_enabled && primary_enabled)
|
||||
intel_post_enable_primary(crtc);
|
||||
}
|
||||
|
||||
/* Unpin old obj after new one is active to avoid ugliness */
|
||||
|
@ -923,8 +1070,14 @@ intel_disable_plane(struct drm_plane *plane)
|
|||
intel_crtc = to_intel_crtc(plane->crtc);
|
||||
|
||||
if (intel_crtc->active) {
|
||||
intel_enable_primary(plane->crtc);
|
||||
bool primary_was_enabled = intel_crtc->primary_enabled;
|
||||
|
||||
intel_crtc->primary_enabled = true;
|
||||
|
||||
intel_plane->disable_plane(plane, plane->crtc);
|
||||
|
||||
if (!primary_was_enabled && intel_crtc->primary_enabled)
|
||||
intel_post_enable_primary(plane->crtc);
|
||||
}
|
||||
|
||||
if (intel_plane->obj) {
|
||||
|
|
|
@ -934,54 +934,14 @@ intel_tv_compute_config(struct intel_encoder *encoder,
|
|||
return true;
|
||||
}
|
||||
|
||||
static void intel_tv_mode_set(struct intel_encoder *encoder)
|
||||
static void
|
||||
set_tv_mode_timings(struct drm_i915_private *dev_priv,
|
||||
const struct tv_mode *tv_mode,
|
||||
bool burst_ena)
|
||||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
|
||||
struct intel_tv *intel_tv = enc_to_tv(encoder);
|
||||
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
|
||||
u32 tv_ctl;
|
||||
u32 hctl1, hctl2, hctl3;
|
||||
u32 vctl1, vctl2, vctl3, vctl4, vctl5, vctl6, vctl7;
|
||||
u32 scctl1, scctl2, scctl3;
|
||||
int i, j;
|
||||
const struct video_levels *video_levels;
|
||||
const struct color_conversion *color_conversion;
|
||||
bool burst_ena;
|
||||
int pipe = intel_crtc->pipe;
|
||||
|
||||
if (!tv_mode)
|
||||
return; /* can't happen (mode_prepare prevents this) */
|
||||
|
||||
tv_ctl = I915_READ(TV_CTL);
|
||||
tv_ctl &= TV_CTL_SAVE;
|
||||
|
||||
switch (intel_tv->type) {
|
||||
default:
|
||||
case DRM_MODE_CONNECTOR_Unknown:
|
||||
case DRM_MODE_CONNECTOR_Composite:
|
||||
tv_ctl |= TV_ENC_OUTPUT_COMPOSITE;
|
||||
video_levels = tv_mode->composite_levels;
|
||||
color_conversion = tv_mode->composite_color;
|
||||
burst_ena = tv_mode->burst_ena;
|
||||
break;
|
||||
case DRM_MODE_CONNECTOR_Component:
|
||||
tv_ctl |= TV_ENC_OUTPUT_COMPONENT;
|
||||
video_levels = &component_levels;
|
||||
if (tv_mode->burst_ena)
|
||||
color_conversion = &sdtv_csc_yprpb;
|
||||
else
|
||||
color_conversion = &hdtv_csc_yprpb;
|
||||
burst_ena = false;
|
||||
break;
|
||||
case DRM_MODE_CONNECTOR_SVIDEO:
|
||||
tv_ctl |= TV_ENC_OUTPUT_SVIDEO;
|
||||
video_levels = tv_mode->svideo_levels;
|
||||
color_conversion = tv_mode->svideo_color;
|
||||
burst_ena = tv_mode->burst_ena;
|
||||
break;
|
||||
}
|
||||
hctl1 = (tv_mode->hsync_end << TV_HSYNC_END_SHIFT) |
|
||||
(tv_mode->htotal << TV_HTOTAL_SHIFT);
|
||||
|
||||
|
@ -1021,6 +981,86 @@ static void intel_tv_mode_set(struct intel_encoder *encoder)
|
|||
vctl7 = (tv_mode->vburst_start_f4 << TV_VBURST_START_F4_SHIFT) |
|
||||
(tv_mode->vburst_end_f4 << TV_VBURST_END_F4_SHIFT);
|
||||
|
||||
I915_WRITE(TV_H_CTL_1, hctl1);
|
||||
I915_WRITE(TV_H_CTL_2, hctl2);
|
||||
I915_WRITE(TV_H_CTL_3, hctl3);
|
||||
I915_WRITE(TV_V_CTL_1, vctl1);
|
||||
I915_WRITE(TV_V_CTL_2, vctl2);
|
||||
I915_WRITE(TV_V_CTL_3, vctl3);
|
||||
I915_WRITE(TV_V_CTL_4, vctl4);
|
||||
I915_WRITE(TV_V_CTL_5, vctl5);
|
||||
I915_WRITE(TV_V_CTL_6, vctl6);
|
||||
I915_WRITE(TV_V_CTL_7, vctl7);
|
||||
}
|
||||
|
||||
static void set_color_conversion(struct drm_i915_private *dev_priv,
|
||||
const struct color_conversion *color_conversion)
|
||||
{
|
||||
if (!color_conversion)
|
||||
return;
|
||||
|
||||
I915_WRITE(TV_CSC_Y, (color_conversion->ry << 16) |
|
||||
color_conversion->gy);
|
||||
I915_WRITE(TV_CSC_Y2, (color_conversion->by << 16) |
|
||||
color_conversion->ay);
|
||||
I915_WRITE(TV_CSC_U, (color_conversion->ru << 16) |
|
||||
color_conversion->gu);
|
||||
I915_WRITE(TV_CSC_U2, (color_conversion->bu << 16) |
|
||||
color_conversion->au);
|
||||
I915_WRITE(TV_CSC_V, (color_conversion->rv << 16) |
|
||||
color_conversion->gv);
|
||||
I915_WRITE(TV_CSC_V2, (color_conversion->bv << 16) |
|
||||
color_conversion->av);
|
||||
}
|
||||
|
||||
static void intel_tv_pre_enable(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
|
||||
struct intel_tv *intel_tv = enc_to_tv(encoder);
|
||||
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
|
||||
u32 tv_ctl;
|
||||
u32 scctl1, scctl2, scctl3;
|
||||
int i, j;
|
||||
const struct video_levels *video_levels;
|
||||
const struct color_conversion *color_conversion;
|
||||
bool burst_ena;
|
||||
int xpos = 0x0, ypos = 0x0;
|
||||
unsigned int xsize, ysize;
|
||||
|
||||
if (!tv_mode)
|
||||
return; /* can't happen (mode_prepare prevents this) */
|
||||
|
||||
tv_ctl = I915_READ(TV_CTL);
|
||||
tv_ctl &= TV_CTL_SAVE;
|
||||
|
||||
switch (intel_tv->type) {
|
||||
default:
|
||||
case DRM_MODE_CONNECTOR_Unknown:
|
||||
case DRM_MODE_CONNECTOR_Composite:
|
||||
tv_ctl |= TV_ENC_OUTPUT_COMPOSITE;
|
||||
video_levels = tv_mode->composite_levels;
|
||||
color_conversion = tv_mode->composite_color;
|
||||
burst_ena = tv_mode->burst_ena;
|
||||
break;
|
||||
case DRM_MODE_CONNECTOR_Component:
|
||||
tv_ctl |= TV_ENC_OUTPUT_COMPONENT;
|
||||
video_levels = &component_levels;
|
||||
if (tv_mode->burst_ena)
|
||||
color_conversion = &sdtv_csc_yprpb;
|
||||
else
|
||||
color_conversion = &hdtv_csc_yprpb;
|
||||
burst_ena = false;
|
||||
break;
|
||||
case DRM_MODE_CONNECTOR_SVIDEO:
|
||||
tv_ctl |= TV_ENC_OUTPUT_SVIDEO;
|
||||
video_levels = tv_mode->svideo_levels;
|
||||
color_conversion = tv_mode->svideo_color;
|
||||
burst_ena = tv_mode->burst_ena;
|
||||
break;
|
||||
}
|
||||
|
||||
if (intel_crtc->pipe == 1)
|
||||
tv_ctl |= TV_ENC_PIPEB_SELECT;
|
||||
tv_ctl |= tv_mode->oversample;
|
||||
|
@ -1051,37 +1091,16 @@ static void intel_tv_mode_set(struct intel_encoder *encoder)
|
|||
tv_mode->dda3_inc << TV_SCDDA3_INC_SHIFT;
|
||||
|
||||
/* Enable two fixes for the chips that need them. */
|
||||
if (dev->pdev->device < 0x2772)
|
||||
if (IS_I915GM(dev))
|
||||
tv_ctl |= TV_ENC_C0_FIX | TV_ENC_SDP_FIX;
|
||||
|
||||
I915_WRITE(TV_H_CTL_1, hctl1);
|
||||
I915_WRITE(TV_H_CTL_2, hctl2);
|
||||
I915_WRITE(TV_H_CTL_3, hctl3);
|
||||
I915_WRITE(TV_V_CTL_1, vctl1);
|
||||
I915_WRITE(TV_V_CTL_2, vctl2);
|
||||
I915_WRITE(TV_V_CTL_3, vctl3);
|
||||
I915_WRITE(TV_V_CTL_4, vctl4);
|
||||
I915_WRITE(TV_V_CTL_5, vctl5);
|
||||
I915_WRITE(TV_V_CTL_6, vctl6);
|
||||
I915_WRITE(TV_V_CTL_7, vctl7);
|
||||
set_tv_mode_timings(dev_priv, tv_mode, burst_ena);
|
||||
|
||||
I915_WRITE(TV_SC_CTL_1, scctl1);
|
||||
I915_WRITE(TV_SC_CTL_2, scctl2);
|
||||
I915_WRITE(TV_SC_CTL_3, scctl3);
|
||||
|
||||
if (color_conversion) {
|
||||
I915_WRITE(TV_CSC_Y, (color_conversion->ry << 16) |
|
||||
color_conversion->gy);
|
||||
I915_WRITE(TV_CSC_Y2, (color_conversion->by << 16) |
|
||||
color_conversion->ay);
|
||||
I915_WRITE(TV_CSC_U, (color_conversion->ru << 16) |
|
||||
color_conversion->gu);
|
||||
I915_WRITE(TV_CSC_U2, (color_conversion->bu << 16) |
|
||||
color_conversion->au);
|
||||
I915_WRITE(TV_CSC_V, (color_conversion->rv << 16) |
|
||||
color_conversion->gv);
|
||||
I915_WRITE(TV_CSC_V2, (color_conversion->bv << 16) |
|
||||
color_conversion->av);
|
||||
}
|
||||
set_color_conversion(dev_priv, color_conversion);
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 4)
|
||||
I915_WRITE(TV_CLR_KNOBS, 0x00404000);
|
||||
|
@ -1092,46 +1111,25 @@ static void intel_tv_mode_set(struct intel_encoder *encoder)
|
|||
I915_WRITE(TV_CLR_LEVEL,
|
||||
((video_levels->black << TV_BLACK_LEVEL_SHIFT) |
|
||||
(video_levels->blank << TV_BLANK_LEVEL_SHIFT)));
|
||||
{
|
||||
int pipeconf_reg = PIPECONF(pipe);
|
||||
int dspcntr_reg = DSPCNTR(intel_crtc->plane);
|
||||
int pipeconf = I915_READ(pipeconf_reg);
|
||||
int dspcntr = I915_READ(dspcntr_reg);
|
||||
int xpos = 0x0, ypos = 0x0;
|
||||
unsigned int xsize, ysize;
|
||||
/* Pipe must be off here */
|
||||
I915_WRITE(dspcntr_reg, dspcntr & ~DISPLAY_PLANE_ENABLE);
|
||||
intel_flush_primary_plane(dev_priv, intel_crtc->plane);
|
||||
|
||||
/* Wait for vblank for the disable to take effect */
|
||||
if (IS_GEN2(dev))
|
||||
intel_wait_for_vblank(dev, intel_crtc->pipe);
|
||||
assert_pipe_disabled(dev_priv, intel_crtc->pipe);
|
||||
|
||||
I915_WRITE(pipeconf_reg, pipeconf & ~PIPECONF_ENABLE);
|
||||
/* Wait for vblank for the disable to take effect. */
|
||||
intel_wait_for_pipe_off(dev, intel_crtc->pipe);
|
||||
/* Filter ctl must be set before TV_WIN_SIZE */
|
||||
I915_WRITE(TV_FILTER_CTL_1, TV_AUTO_SCALE);
|
||||
xsize = tv_mode->hblank_start - tv_mode->hblank_end;
|
||||
if (tv_mode->progressive)
|
||||
ysize = tv_mode->nbr_end + 1;
|
||||
else
|
||||
ysize = 2*tv_mode->nbr_end + 1;
|
||||
|
||||
/* Filter ctl must be set before TV_WIN_SIZE */
|
||||
I915_WRITE(TV_FILTER_CTL_1, TV_AUTO_SCALE);
|
||||
xsize = tv_mode->hblank_start - tv_mode->hblank_end;
|
||||
if (tv_mode->progressive)
|
||||
ysize = tv_mode->nbr_end + 1;
|
||||
else
|
||||
ysize = 2*tv_mode->nbr_end + 1;
|
||||
|
||||
xpos += intel_tv->margin[TV_MARGIN_LEFT];
|
||||
ypos += intel_tv->margin[TV_MARGIN_TOP];
|
||||
xsize -= (intel_tv->margin[TV_MARGIN_LEFT] +
|
||||
intel_tv->margin[TV_MARGIN_RIGHT]);
|
||||
ysize -= (intel_tv->margin[TV_MARGIN_TOP] +
|
||||
intel_tv->margin[TV_MARGIN_BOTTOM]);
|
||||
I915_WRITE(TV_WIN_POS, (xpos<<16)|ypos);
|
||||
I915_WRITE(TV_WIN_SIZE, (xsize<<16)|ysize);
|
||||
|
||||
I915_WRITE(pipeconf_reg, pipeconf);
|
||||
I915_WRITE(dspcntr_reg, dspcntr);
|
||||
intel_flush_primary_plane(dev_priv, intel_crtc->plane);
|
||||
}
|
||||
xpos += intel_tv->margin[TV_MARGIN_LEFT];
|
||||
ypos += intel_tv->margin[TV_MARGIN_TOP];
|
||||
xsize -= (intel_tv->margin[TV_MARGIN_LEFT] +
|
||||
intel_tv->margin[TV_MARGIN_RIGHT]);
|
||||
ysize -= (intel_tv->margin[TV_MARGIN_TOP] +
|
||||
intel_tv->margin[TV_MARGIN_BOTTOM]);
|
||||
I915_WRITE(TV_WIN_POS, (xpos<<16)|ypos);
|
||||
I915_WRITE(TV_WIN_SIZE, (xsize<<16)|ysize);
|
||||
|
||||
j = 0;
|
||||
for (i = 0; i < 60; i++)
|
||||
|
@ -1634,7 +1632,7 @@ intel_tv_init(struct drm_device *dev)
|
|||
|
||||
intel_encoder->compute_config = intel_tv_compute_config;
|
||||
intel_encoder->get_config = intel_tv_get_config;
|
||||
intel_encoder->mode_set = intel_tv_mode_set;
|
||||
intel_encoder->pre_enable = intel_tv_pre_enable;
|
||||
intel_encoder->enable = intel_enable_tv;
|
||||
intel_encoder->disable = intel_disable_tv;
|
||||
intel_encoder->get_hw_state = intel_tv_get_hw_state;
|
||||
|
|
|
@ -370,7 +370,7 @@ void intel_uncore_early_sanitize(struct drm_device *dev)
|
|||
if (HAS_FPGA_DBG_UNCLAIMED(dev))
|
||||
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
|
||||
|
||||
if (IS_HASWELL(dev) &&
|
||||
if ((IS_HASWELL(dev) || IS_BROADWELL(dev)) &&
|
||||
(__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) == 1)) {
|
||||
/* The docs do not explain exactly how the calculation can be
|
||||
* made. It is somewhat guessable, but for now, it's always
|
||||
|
|
|
@ -223,14 +223,26 @@
|
|||
_INTEL_BDW_D(gt, 0x160A, info), /* Server */ \
|
||||
_INTEL_BDW_D(gt, 0x160D, info) /* Workstation */
|
||||
|
||||
#define INTEL_BDW_M_IDS(info) \
|
||||
#define INTEL_BDW_GT12M_IDS(info) \
|
||||
_INTEL_BDW_M_IDS(1, info), \
|
||||
_INTEL_BDW_M_IDS(2, info), \
|
||||
_INTEL_BDW_M_IDS(2, info)
|
||||
|
||||
#define INTEL_BDW_GT12D_IDS(info) \
|
||||
_INTEL_BDW_D_IDS(1, info), \
|
||||
_INTEL_BDW_D_IDS(2, info)
|
||||
|
||||
#define INTEL_BDW_GT3M_IDS(info) \
|
||||
_INTEL_BDW_M_IDS(3, info)
|
||||
|
||||
#define INTEL_BDW_D_IDS(info) \
|
||||
_INTEL_BDW_D_IDS(1, info), \
|
||||
_INTEL_BDW_D_IDS(2, info), \
|
||||
#define INTEL_BDW_GT3D_IDS(info) \
|
||||
_INTEL_BDW_D_IDS(3, info)
|
||||
|
||||
#define INTEL_BDW_M_IDS(info) \
|
||||
INTEL_BDW_GT12M_IDS(info), \
|
||||
INTEL_BDW_GT3M_IDS(info)
|
||||
|
||||
#define INTEL_BDW_D_IDS(info) \
|
||||
INTEL_BDW_GT12D_IDS(info), \
|
||||
INTEL_BDW_GT3D_IDS(info)
|
||||
|
||||
#endif /* _I915_PCIIDS_H */
|
||||
|
|
14
lib/Kconfig
14
lib/Kconfig
|
@ -331,6 +331,20 @@ config TEXTSEARCH_FSM
|
|||
config BTREE
|
||||
boolean
|
||||
|
||||
config INTERVAL_TREE
|
||||
boolean
|
||||
help
|
||||
Simple, embeddable, interval-tree. Can find the start of an
|
||||
overlapping range in log(n) time and then iterate over all
|
||||
overlapping nodes. The algorithm is implemented as an
|
||||
augmented rbtree.
|
||||
|
||||
See:
|
||||
|
||||
Documentation/rbtree.txt
|
||||
|
||||
for more information.
|
||||
|
||||
config ASSOCIATIVE_ARRAY
|
||||
bool
|
||||
help
|
||||
|
|
|
@ -1496,6 +1496,7 @@ config RBTREE_TEST
|
|||
config INTERVAL_TREE_TEST
|
||||
tristate "Interval tree test"
|
||||
depends on m && DEBUG_KERNEL
|
||||
select INTERVAL_TREE
|
||||
help
|
||||
A benchmark measuring the performance of the interval tree library
|
||||
|
||||
|
|
|
@ -50,6 +50,7 @@ CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
|
|||
obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
|
||||
|
||||
obj-$(CONFIG_BTREE) += btree.o
|
||||
obj-$(CONFIG_INTERVAL_TREE) += interval_tree.o
|
||||
obj-$(CONFIG_ASSOCIATIVE_ARRAY) += assoc_array.o
|
||||
obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
|
||||
obj-$(CONFIG_DEBUG_LIST) += list_debug.o
|
||||
|
@ -156,8 +157,6 @@ lib-$(CONFIG_LIBFDT) += $(libfdt_files)
|
|||
obj-$(CONFIG_RBTREE_TEST) += rbtree_test.o
|
||||
obj-$(CONFIG_INTERVAL_TREE_TEST) += interval_tree_test.o
|
||||
|
||||
interval_tree_test-objs := interval_tree_test_main.o interval_tree.o
|
||||
|
||||
obj-$(CONFIG_PERCPU_TEST) += percpu_test.o
|
||||
|
||||
obj-$(CONFIG_ASN1) += asn1_decoder.o
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
#include <linux/init.h>
|
||||
#include <linux/interval_tree.h>
|
||||
#include <linux/interval_tree_generic.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#define START(node) ((node)->start)
|
||||
#define LAST(node) ((node)->last)
|
||||
|
@ -8,3 +9,8 @@
|
|||
INTERVAL_TREE_DEFINE(struct interval_tree_node, rb,
|
||||
unsigned long, __subtree_last,
|
||||
START, LAST,, interval_tree)
|
||||
|
||||
EXPORT_SYMBOL_GPL(interval_tree_insert);
|
||||
EXPORT_SYMBOL_GPL(interval_tree_remove);
|
||||
EXPORT_SYMBOL_GPL(interval_tree_iter_first);
|
||||
EXPORT_SYMBOL_GPL(interval_tree_iter_next);
|
||||
|
|
Загрузка…
Ссылка в новой задаче