Merge tag 'drm-intel-next-2013-05-20-merged' of git://people.freedesktop.org/~danvet/drm-intel into drm-next

Daniel writes:
Highlights (copy-pasted from my testing cycle mails):
- fbc support for Haswell (Rodrigo)
- streamlined workaround comments, including an igt tool to grep for
  them (Damien)
- sdvo and TV out cleanups, including a fixup for sdvo multifunction devices
- refactor our eDP mess a bit (Imre)
- don't register the hdmi connector on haswell when desktop eDP is present
- vlv support is no longer preliminary!
- more vlv fixes from Jesse for stolen and dpll handling
- more flexible power well checking infrastructure from Paulo
- a few gtt patches from Ben
- a bit of OCD cleanups for transcoder #defines and an assorted pile
  of smaller things.
- fixes for the gmch modeset sequence
- a bit of OCD around plane/pipe usage (Ville)
- vlv turbo support (Jesse)
- tons of vlv modeset fixes (Jesse et al.)
- vlv pte write fixes (Kenneth Graunke)
- hpd filtering to avoid costly probes on unaffected outputs (Egbert Eich)
- intel dev_info cleanups and refactorings (Damien)
- vlv rc6 support (Jesse)
- random pile of fixes around non-24bpp modes handling
- asle/opregion cleanups and locking fixes (Jani)
- dp dpll refactoring
- improvements for reduced_clock computation on g4x/ilk+
- pfit state refactored to use pipe_config (Jesse)
- lots more computed modeset state moved to pipe_config, including readout
  and cross-check support
- fdi auto-dithering for ivb B/C links, using the neat pipe_config
  improvements
- drm_rect helpers plus sprite clipping fixes (Ville)
- hw context refcounting (Mika + Ben)

* tag 'drm-intel-next-2013-05-20-merged' of git://people.freedesktop.org/~danvet/drm-intel: (155 commits)
  drm/i915: add support for dvo Chrontel 7010B
  drm/i915: Use pipe config state to control gmch pfit enable/disable
  drm/i915: Use pipe_config state to disable ilk+ pfit
  drm/i915: panel fitter hw state readout&check support
  drm/i915: implement WADPOClockGatingDisable for LPT
  drm/i915: Add missing platform tags to FBC workaround comments
  drm/i915: rip out an unused lvds_reg variable
  drm/i915: Compute WR PLL dividers dynamically
  drm/i915: HSW FBC WaFbcDisableDpfcClockGating
  drm/i915: HSW FBC WaFbcAsynchFlipDisableFbcQueue
  drm/i915: Enable FBC at Haswell.
  drm/i915: IVB FBC WaFbcDisableDpfcClockGating
  drm/i915: IVB FBC WaFbcAsynchFlipDisableFbcQueue
  drm/i915: Add support for FBC on Ivybridge.
  drm/i915: Organize VBT stuff inside drm_i915_private
  drm/i915: make SDVO TV-out work for multifunction devices
  drm/i915: rip out now unused is_foo tracking from crtc code
  drm/i915: rip out TV-out lore ...
  drm/i915: drop TVclock special casing on ilk+
  drm/i915: move sdvo TV clock computation to intel_sdvo.c
  ...
This commit is contained in:
Dave Airlie 2013-05-31 12:56:05 +10:00
Родитель 970fa986fa e1b73cba13
Коммит e81f3d81e2
35 изменённых файлов: 4397 добавлений и 2158 удалений

Просмотреть файл

@ -1653,6 +1653,8 @@ void intel_crt_init(struct drm_device *dev)
<sect2>
<title>KMS API Functions</title>
!Edrivers/gpu/drm/drm_crtc.c
!Edrivers/gpu/drm/drm_rect.c
!Finclude/drm/drm_rect.h
</sect2>
</sect1>

Просмотреть файл

@ -12,7 +12,8 @@ drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \
drm_crtc.o drm_modes.o drm_edid.o \
drm_info.o drm_debugfs.o drm_encoder_slave.o \
drm_trace_points.o drm_global.o drm_prime.o
drm_trace_points.o drm_global.o drm_prime.o \
drm_rect.o
drm-$(CONFIG_COMPAT) += drm_ioc32.o
drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o

295
drivers/gpu/drm/drm_rect.c Normal file
Просмотреть файл

@ -0,0 +1,295 @@
/*
* Copyright (C) 2011-2013 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <drm/drmP.h>
#include <drm/drm_rect.h>
/**
* drm_rect_intersect - intersect two rectangles
* @r1: first rectangle
* @r2: second rectangle
*
* Calculate the intersection of rectangles @r1 and @r2.
* @r1 will be overwritten with the intersection.
*
* RETURNS:
* %true if rectangle @r1 is still visible after the operation,
* %false otherwise.
*/
bool drm_rect_intersect(struct drm_rect *r1, const struct drm_rect *r2)
{
r1->x1 = max(r1->x1, r2->x1);
r1->y1 = max(r1->y1, r2->y1);
r1->x2 = min(r1->x2, r2->x2);
r1->y2 = min(r1->y2, r2->y2);
return drm_rect_visible(r1);
}
EXPORT_SYMBOL(drm_rect_intersect);
/**
* drm_rect_clip_scaled - perform a scaled clip operation
* @src: source window rectangle
* @dst: destination window rectangle
* @clip: clip rectangle
* @hscale: horizontal scaling factor
* @vscale: vertical scaling factor
*
* Clip rectangle @dst by rectangle @clip. Clip rectangle @src by the
* same amounts multiplied by @hscale and @vscale.
*
* RETURNS:
* %true if rectangle @dst is still visible after being clipped,
* %false otherwise
*/
bool drm_rect_clip_scaled(struct drm_rect *src, struct drm_rect *dst,
const struct drm_rect *clip,
int hscale, int vscale)
{
int diff;
diff = clip->x1 - dst->x1;
if (diff > 0) {
int64_t tmp = src->x1 + (int64_t) diff * hscale;
src->x1 = clamp_t(int64_t, tmp, INT_MIN, INT_MAX);
}
diff = clip->y1 - dst->y1;
if (diff > 0) {
int64_t tmp = src->y1 + (int64_t) diff * vscale;
src->y1 = clamp_t(int64_t, tmp, INT_MIN, INT_MAX);
}
diff = dst->x2 - clip->x2;
if (diff > 0) {
int64_t tmp = src->x2 - (int64_t) diff * hscale;
src->x2 = clamp_t(int64_t, tmp, INT_MIN, INT_MAX);
}
diff = dst->y2 - clip->y2;
if (diff > 0) {
int64_t tmp = src->y2 - (int64_t) diff * vscale;
src->y2 = clamp_t(int64_t, tmp, INT_MIN, INT_MAX);
}
return drm_rect_intersect(dst, clip);
}
EXPORT_SYMBOL(drm_rect_clip_scaled);
static int drm_calc_scale(int src, int dst)
{
int scale = 0;
if (src < 0 || dst < 0)
return -EINVAL;
if (dst == 0)
return 0;
scale = src / dst;
return scale;
}
/**
* drm_rect_calc_hscale - calculate the horizontal scaling factor
* @src: source window rectangle
* @dst: destination window rectangle
* @min_hscale: minimum allowed horizontal scaling factor
* @max_hscale: maximum allowed horizontal scaling factor
*
* Calculate the horizontal scaling factor as
* (@src width) / (@dst width).
*
* RETURNS:
* The horizontal scaling factor, or errno of out of limits.
*/
int drm_rect_calc_hscale(const struct drm_rect *src,
const struct drm_rect *dst,
int min_hscale, int max_hscale)
{
int src_w = drm_rect_width(src);
int dst_w = drm_rect_width(dst);
int hscale = drm_calc_scale(src_w, dst_w);
if (hscale < 0 || dst_w == 0)
return hscale;
if (hscale < min_hscale || hscale > max_hscale)
return -ERANGE;
return hscale;
}
EXPORT_SYMBOL(drm_rect_calc_hscale);
/**
* drm_rect_calc_vscale - calculate the vertical scaling factor
* @src: source window rectangle
* @dst: destination window rectangle
* @min_vscale: minimum allowed vertical scaling factor
* @max_vscale: maximum allowed vertical scaling factor
*
* Calculate the vertical scaling factor as
* (@src height) / (@dst height).
*
* RETURNS:
* The vertical scaling factor, or errno of out of limits.
*/
int drm_rect_calc_vscale(const struct drm_rect *src,
const struct drm_rect *dst,
int min_vscale, int max_vscale)
{
int src_h = drm_rect_height(src);
int dst_h = drm_rect_height(dst);
int vscale = drm_calc_scale(src_h, dst_h);
if (vscale < 0 || dst_h == 0)
return vscale;
if (vscale < min_vscale || vscale > max_vscale)
return -ERANGE;
return vscale;
}
EXPORT_SYMBOL(drm_rect_calc_vscale);
/**
* drm_calc_hscale_relaxed - calculate the horizontal scaling factor
* @src: source window rectangle
* @dst: destination window rectangle
* @min_hscale: minimum allowed horizontal scaling factor
* @max_hscale: maximum allowed horizontal scaling factor
*
* Calculate the horizontal scaling factor as
* (@src width) / (@dst width).
*
* If the calculated scaling factor is below @min_vscale,
* decrease the height of rectangle @dst to compensate.
*
* If the calculated scaling factor is above @max_vscale,
* decrease the height of rectangle @src to compensate.
*
* RETURNS:
* The horizontal scaling factor.
*/
int drm_rect_calc_hscale_relaxed(struct drm_rect *src,
struct drm_rect *dst,
int min_hscale, int max_hscale)
{
int src_w = drm_rect_width(src);
int dst_w = drm_rect_width(dst);
int hscale = drm_calc_scale(src_w, dst_w);
if (hscale < 0 || dst_w == 0)
return hscale;
if (hscale < min_hscale) {
int max_dst_w = src_w / min_hscale;
drm_rect_adjust_size(dst, max_dst_w - dst_w, 0);
return min_hscale;
}
if (hscale > max_hscale) {
int max_src_w = dst_w * max_hscale;
drm_rect_adjust_size(src, max_src_w - src_w, 0);
return max_hscale;
}
return hscale;
}
EXPORT_SYMBOL(drm_rect_calc_hscale_relaxed);
/**
* drm_rect_calc_vscale_relaxed - calculate the vertical scaling factor
* @src: source window rectangle
* @dst: destination window rectangle
* @min_vscale: minimum allowed vertical scaling factor
* @max_vscale: maximum allowed vertical scaling factor
*
* Calculate the vertical scaling factor as
* (@src height) / (@dst height).
*
* If the calculated scaling factor is below @min_vscale,
* decrease the height of rectangle @dst to compensate.
*
* If the calculated scaling factor is above @max_vscale,
* decrease the height of rectangle @src to compensate.
*
* RETURNS:
* The vertical scaling factor.
*/
int drm_rect_calc_vscale_relaxed(struct drm_rect *src,
struct drm_rect *dst,
int min_vscale, int max_vscale)
{
int src_h = drm_rect_height(src);
int dst_h = drm_rect_height(dst);
int vscale = drm_calc_scale(src_h, dst_h);
if (vscale < 0 || dst_h == 0)
return vscale;
if (vscale < min_vscale) {
int max_dst_h = src_h / min_vscale;
drm_rect_adjust_size(dst, 0, max_dst_h - dst_h);
return min_vscale;
}
if (vscale > max_vscale) {
int max_src_h = dst_h * max_vscale;
drm_rect_adjust_size(src, 0, max_src_h - src_h);
return max_vscale;
}
return vscale;
}
EXPORT_SYMBOL(drm_rect_calc_vscale_relaxed);
/**
* drm_rect_debug_print - print the rectangle information
* @r: rectangle to print
* @fixed_point: rectangle is in 16.16 fixed point format
*/
void drm_rect_debug_print(const struct drm_rect *r, bool fixed_point)
{
int w = drm_rect_width(r);
int h = drm_rect_height(r);
if (fixed_point)
DRM_DEBUG_KMS("%d.%06ux%d.%06u%+d.%06u%+d.%06u\n",
w >> 16, ((w & 0xffff) * 15625) >> 10,
h >> 16, ((h & 0xffff) * 15625) >> 10,
r->x1 >> 16, ((r->x1 & 0xffff) * 15625) >> 10,
r->y1 >> 16, ((r->y1 & 0xffff) * 15625) >> 10);
else
DRM_DEBUG_KMS("%dx%d%+d%+d\n", w, h, r->x1, r->y1);
}
EXPORT_SYMBOL(drm_rect_debug_print);

Просмотреть файл

@ -32,12 +32,14 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#define CH7xxx_REG_DID 0x4b
#define CH7011_VID 0x83 /* 7010 as well */
#define CH7010B_VID 0x05
#define CH7009A_VID 0x84
#define CH7009B_VID 0x85
#define CH7301_VID 0x95
#define CH7xxx_VID 0x84
#define CH7xxx_DID 0x17
#define CH7010_DID 0x16
#define CH7xxx_NUM_REGS 0x4c
@ -87,11 +89,20 @@ static struct ch7xxx_id_struct {
char *name;
} ch7xxx_ids[] = {
{ CH7011_VID, "CH7011" },
{ CH7010B_VID, "CH7010B" },
{ CH7009A_VID, "CH7009A" },
{ CH7009B_VID, "CH7009B" },
{ CH7301_VID, "CH7301" },
};
static struct ch7xxx_did_struct {
uint8_t did;
char *name;
} ch7xxx_dids[] = {
{ CH7xxx_DID, "CH7XXX" },
{ CH7010_DID, "CH7010B" },
};
struct ch7xxx_priv {
bool quiet;
};
@ -108,6 +119,18 @@ static char *ch7xxx_get_id(uint8_t vid)
return NULL;
}
static char *ch7xxx_get_did(uint8_t did)
{
int i;
for (i = 0; i < ARRAY_SIZE(ch7xxx_dids); i++) {
if (ch7xxx_dids[i].did == did)
return ch7xxx_dids[i].name;
}
return NULL;
}
/** Reads an 8 bit register */
static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
{
@ -179,7 +202,7 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo,
/* this will detect the CH7xxx chip on the specified i2c bus */
struct ch7xxx_priv *ch7xxx;
uint8_t vendor, device;
char *name;
char *name, *devid;
ch7xxx = kzalloc(sizeof(struct ch7xxx_priv), GFP_KERNEL);
if (ch7xxx == NULL)
@ -204,7 +227,8 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo,
if (!ch7xxx_readb(dvo, CH7xxx_REG_DID, &device))
goto out;
if (device != CH7xxx_DID) {
devid = ch7xxx_get_did(device);
if (!devid) {
DRM_DEBUG_KMS("ch7xxx not detected; got 0x%02x from %s "
"slave %d.\n",
vendor, adapter->name, dvo->slave_addr);

Просмотреть файл

@ -61,11 +61,11 @@ static int i915_capabilities(struct seq_file *m, void *data)
seq_printf(m, "gen: %d\n", info->gen);
seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
#define DEV_INFO_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x))
#define DEV_INFO_SEP ;
DEV_INFO_FLAGS;
#undef DEV_INFO_FLAG
#undef DEV_INFO_SEP
#define PRINT_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x))
#define SEP_SEMICOLON ;
DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_SEMICOLON);
#undef PRINT_FLAG
#undef SEP_SEMICOLON
return 0;
}
@ -941,7 +941,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
MEMSTAT_VID_SHIFT);
seq_printf(m, "Current P-state: %d\n",
(rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
} else if (IS_GEN6(dev) || IS_GEN7(dev)) {
} else if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) {
u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
@ -1009,6 +1009,27 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
seq_printf(m, "Max overclocked frequency: %dMHz\n",
dev_priv->rps.hw_max * GT_FREQUENCY_MULTIPLIER);
} else if (IS_VALLEYVIEW(dev)) {
u32 freq_sts, val;
mutex_lock(&dev_priv->rps.hw_lock);
valleyview_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS,
&freq_sts);
seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
valleyview_punit_read(dev_priv, PUNIT_FUSE_BUS1, &val);
seq_printf(m, "max GPU freq: %d MHz\n",
vlv_gpu_freq(dev_priv->mem_freq, val));
valleyview_punit_read(dev_priv, PUNIT_REG_GPU_LFM, &val);
seq_printf(m, "min GPU freq: %d MHz\n",
vlv_gpu_freq(dev_priv->mem_freq, val));
seq_printf(m, "current GPU freq: %d MHz\n",
vlv_gpu_freq(dev_priv->mem_freq,
(freq_sts >> 8) & 0xff));
mutex_unlock(&dev_priv->rps.hw_lock);
} else {
seq_printf(m, "no P-state info available\n");
}
@ -1812,7 +1833,11 @@ i915_max_freq_get(void *data, u64 *val)
if (ret)
return ret;
*val = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
if (IS_VALLEYVIEW(dev))
*val = vlv_gpu_freq(dev_priv->mem_freq,
dev_priv->rps.max_delay);
else
*val = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
mutex_unlock(&dev_priv->rps.hw_lock);
return 0;
@ -1837,9 +1862,16 @@ i915_max_freq_set(void *data, u64 val)
/*
* Turbo will still be enabled, but won't go above the set value.
*/
do_div(val, GT_FREQUENCY_MULTIPLIER);
dev_priv->rps.max_delay = val;
gen6_set_rps(dev, val);
if (IS_VALLEYVIEW(dev)) {
val = vlv_freq_opcode(dev_priv->mem_freq, val);
dev_priv->rps.max_delay = val;
gen6_set_rps(dev, val);
} else {
do_div(val, GT_FREQUENCY_MULTIPLIER);
dev_priv->rps.max_delay = val;
gen6_set_rps(dev, val);
}
mutex_unlock(&dev_priv->rps.hw_lock);
return 0;
@ -1863,7 +1895,11 @@ i915_min_freq_get(void *data, u64 *val)
if (ret)
return ret;
*val = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
if (IS_VALLEYVIEW(dev))
*val = vlv_gpu_freq(dev_priv->mem_freq,
dev_priv->rps.min_delay);
else
*val = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
mutex_unlock(&dev_priv->rps.hw_lock);
return 0;
@ -1888,9 +1924,15 @@ i915_min_freq_set(void *data, u64 val)
/*
* Turbo will still be enabled, but won't go below the set value.
*/
do_div(val, GT_FREQUENCY_MULTIPLIER);
dev_priv->rps.min_delay = val;
gen6_set_rps(dev, val);
if (IS_VALLEYVIEW(dev)) {
val = vlv_freq_opcode(dev_priv->mem_freq, val);
dev_priv->rps.min_delay = val;
valleyview_set_rps(dev, val);
} else {
do_div(val, GT_FREQUENCY_MULTIPLIER);
dev_priv->rps.min_delay = val;
gen6_set_rps(dev, val);
}
mutex_unlock(&dev_priv->rps.hw_lock);
return 0;

Просмотреть файл

@ -1445,15 +1445,19 @@ static void i915_dump_device_info(struct drm_i915_private *dev_priv)
{
const struct intel_device_info *info = dev_priv->info;
#define DEV_INFO_FLAG(name) info->name ? #name "," : ""
#define DEV_INFO_SEP ,
#define PRINT_S(name) "%s"
#define SEP_EMPTY
#define PRINT_FLAG(name) info->name ? #name "," : ""
#define SEP_COMMA ,
DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x flags="
"%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY),
info->gen,
dev_priv->dev->pdev->device,
DEV_INFO_FLAGS);
#undef DEV_INFO_FLAG
#undef DEV_INFO_SEP
DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA));
#undef PRINT_S
#undef SEP_EMPTY
#undef PRINT_FLAG
#undef SEP_COMMA
}
/**
@ -1468,7 +1472,7 @@ static void intel_early_sanitize_regs(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (IS_HASWELL(dev))
if (HAS_FPGA_DBG_UNCLAIMED(dev))
I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
}
@ -1629,6 +1633,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->gpu_error.lock);
spin_lock_init(&dev_priv->rps.lock);
spin_lock_init(&dev_priv->backlight.lock);
mutex_init(&dev_priv->dpio_lock);
mutex_init(&dev_priv->rps.hw_lock);
@ -1737,10 +1742,10 @@ int i915_driver_unload(struct drm_device *dev)
* free the memory space allocated for the child device
* config parsed from VBT
*/
if (dev_priv->child_dev && dev_priv->child_dev_num) {
kfree(dev_priv->child_dev);
dev_priv->child_dev = NULL;
dev_priv->child_dev_num = 0;
if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
kfree(dev_priv->vbt.child_dev);
dev_priv->vbt.child_dev = NULL;
dev_priv->vbt.child_dev_num = 0;
}
vga_switcheroo_unregister_client(dev->pdev);

Просмотреть файл

@ -280,6 +280,7 @@ static const struct intel_device_info intel_ivybridge_m_info = {
GEN7_FEATURES,
.is_ivybridge = 1,
.is_mobile = 1,
.has_fbc = 1,
};
static const struct intel_device_info intel_ivybridge_q_info = {
@ -308,12 +309,17 @@ static const struct intel_device_info intel_valleyview_d_info = {
static const struct intel_device_info intel_haswell_d_info = {
GEN7_FEATURES,
.is_haswell = 1,
.has_ddi = 1,
.has_fpga_dbg = 1,
};
static const struct intel_device_info intel_haswell_m_info = {
GEN7_FEATURES,
.is_haswell = 1,
.is_mobile = 1,
.has_ddi = 1,
.has_fpga_dbg = 1,
.has_fbc = 1,
};
static const struct pci_device_id pciidlist[] = { /* aka */
@ -549,6 +555,8 @@ static int i915_drm_freeze(struct drm_device *dev)
*/
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
dev_priv->display.crtc_disable(crtc);
intel_modeset_suspend_hw(dev);
}
i915_save_state(dev);
@ -984,12 +992,6 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct intel_device_info *intel_info =
(struct intel_device_info *) ent->driver_data;
if (intel_info->is_valleyview)
if(!i915_preliminary_hw_support) {
DRM_ERROR("Preliminary hardware support disabled\n");
return -ENODEV;
}
/* Only bind to function 0 of the device. Early generations
* used function 1 as a placeholder for multi-head. This causes
* us confusion instead, especially on the systems where both
@ -1218,16 +1220,16 @@ MODULE_LICENSE("GPL and additional rights");
static void
ilk_dummy_write(struct drm_i915_private *dev_priv)
{
/* WaIssueDummyWriteToWakeupFromRC6: Issue a dummy write to wake up the
* chip from rc6 before touching it for real. MI_MODE is masked, hence
* harmless to write 0 into. */
/* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
* the chip from rc6 before touching it for real. MI_MODE is masked,
* hence harmless to write 0 into. */
I915_WRITE_NOTRACE(MI_MODE, 0);
}
static void
hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
{
if (IS_HASWELL(dev_priv->dev) &&
if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
(I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
DRM_ERROR("Unknown unclaimed register before writing to %x\n",
reg);
@ -1238,7 +1240,7 @@ hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
static void
hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
{
if (IS_HASWELL(dev_priv->dev) &&
if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
(I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
DRM_ERROR("Unclaimed write to %x\n", reg);
I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);

Просмотреть файл

@ -76,6 +76,8 @@ enum plane {
};
#define plane_name(p) ((p) + 'A')
#define sprite_name(p, s) ((p) * dev_priv->num_plane + (s) + 'A')
enum port {
PORT_A = 0,
PORT_B,
@ -86,6 +88,24 @@ enum port {
};
#define port_name(p) ((p) + 'A')
enum intel_display_power_domain {
POWER_DOMAIN_PIPE_A,
POWER_DOMAIN_PIPE_B,
POWER_DOMAIN_PIPE_C,
POWER_DOMAIN_PIPE_A_PANEL_FITTER,
POWER_DOMAIN_PIPE_B_PANEL_FITTER,
POWER_DOMAIN_PIPE_C_PANEL_FITTER,
POWER_DOMAIN_TRANSCODER_A,
POWER_DOMAIN_TRANSCODER_B,
POWER_DOMAIN_TRANSCODER_C,
POWER_DOMAIN_TRANSCODER_EDP = POWER_DOMAIN_TRANSCODER_A + 0xF,
};
#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
#define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
#define POWER_DOMAIN_TRANSCODER(tran) ((tran) + POWER_DOMAIN_TRANSCODER_A)
enum hpd_pin {
HPD_NONE = 0,
HPD_PORT_A = HPD_NONE, /* PORT_A is internal */
@ -331,68 +351,55 @@ struct drm_i915_gt_funcs {
void (*force_wake_put)(struct drm_i915_private *dev_priv);
};
#define DEV_INFO_FLAGS \
DEV_INFO_FLAG(is_mobile) DEV_INFO_SEP \
DEV_INFO_FLAG(is_i85x) DEV_INFO_SEP \
DEV_INFO_FLAG(is_i915g) DEV_INFO_SEP \
DEV_INFO_FLAG(is_i945gm) DEV_INFO_SEP \
DEV_INFO_FLAG(is_g33) DEV_INFO_SEP \
DEV_INFO_FLAG(need_gfx_hws) DEV_INFO_SEP \
DEV_INFO_FLAG(is_g4x) DEV_INFO_SEP \
DEV_INFO_FLAG(is_pineview) DEV_INFO_SEP \
DEV_INFO_FLAG(is_broadwater) DEV_INFO_SEP \
DEV_INFO_FLAG(is_crestline) DEV_INFO_SEP \
DEV_INFO_FLAG(is_ivybridge) DEV_INFO_SEP \
DEV_INFO_FLAG(is_valleyview) DEV_INFO_SEP \
DEV_INFO_FLAG(is_haswell) DEV_INFO_SEP \
DEV_INFO_FLAG(has_force_wake) DEV_INFO_SEP \
DEV_INFO_FLAG(has_fbc) DEV_INFO_SEP \
DEV_INFO_FLAG(has_pipe_cxsr) DEV_INFO_SEP \
DEV_INFO_FLAG(has_hotplug) DEV_INFO_SEP \
DEV_INFO_FLAG(cursor_needs_physical) DEV_INFO_SEP \
DEV_INFO_FLAG(has_overlay) DEV_INFO_SEP \
DEV_INFO_FLAG(overlay_needs_physical) DEV_INFO_SEP \
DEV_INFO_FLAG(supports_tv) DEV_INFO_SEP \
DEV_INFO_FLAG(has_bsd_ring) DEV_INFO_SEP \
DEV_INFO_FLAG(has_blt_ring) DEV_INFO_SEP \
DEV_INFO_FLAG(has_llc)
#define DEV_INFO_FOR_EACH_FLAG(func, sep) \
func(is_mobile) sep \
func(is_i85x) sep \
func(is_i915g) sep \
func(is_i945gm) sep \
func(is_g33) sep \
func(need_gfx_hws) sep \
func(is_g4x) sep \
func(is_pineview) sep \
func(is_broadwater) sep \
func(is_crestline) sep \
func(is_ivybridge) sep \
func(is_valleyview) sep \
func(is_haswell) sep \
func(has_force_wake) sep \
func(has_fbc) sep \
func(has_pipe_cxsr) sep \
func(has_hotplug) sep \
func(cursor_needs_physical) sep \
func(has_overlay) sep \
func(overlay_needs_physical) sep \
func(supports_tv) sep \
func(has_bsd_ring) sep \
func(has_blt_ring) sep \
func(has_llc) sep \
func(has_ddi) sep \
func(has_fpga_dbg)
#define DEFINE_FLAG(name) u8 name:1
#define SEP_SEMICOLON ;
struct intel_device_info {
u32 display_mmio_offset;
u8 num_pipes:3;
u8 gen;
u8 is_mobile:1;
u8 is_i85x:1;
u8 is_i915g:1;
u8 is_i945gm:1;
u8 is_g33:1;
u8 need_gfx_hws:1;
u8 is_g4x:1;
u8 is_pineview:1;
u8 is_broadwater:1;
u8 is_crestline:1;
u8 is_ivybridge:1;
u8 is_valleyview:1;
u8 has_force_wake:1;
u8 is_haswell:1;
u8 has_fbc:1;
u8 has_pipe_cxsr:1;
u8 has_hotplug:1;
u8 cursor_needs_physical:1;
u8 has_overlay:1;
u8 overlay_needs_physical:1;
u8 supports_tv:1;
u8 has_bsd_ring:1;
u8 has_blt_ring:1;
u8 has_llc:1;
DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
};
#undef DEFINE_FLAG
#undef SEP_SEMICOLON
enum i915_cache_level {
I915_CACHE_NONE = 0,
I915_CACHE_LLC,
I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */
};
typedef uint32_t gen6_gtt_pte_t;
/* The Graphics Translation Table is the way in which GEN hardware translates a
* Graphics Virtual Address into a Physical Address. In addition to the normal
* collateral associated with any va->pa translations GEN hardware also has a
@ -428,6 +435,9 @@ struct i915_gtt {
struct sg_table *st,
unsigned int pg_start,
enum i915_cache_level cache_level);
gen6_gtt_pte_t (*pte_encode)(struct drm_device *dev,
dma_addr_t addr,
enum i915_cache_level level);
};
#define gtt_total_entries(gtt) ((gtt).total >> PAGE_SHIFT)
@ -449,6 +459,9 @@ struct i915_hw_ppgtt {
struct sg_table *st,
unsigned int pg_start,
enum i915_cache_level cache_level);
gen6_gtt_pte_t (*pte_encode)(struct drm_device *dev,
dma_addr_t addr,
enum i915_cache_level level);
int (*enable)(struct drm_device *dev);
void (*cleanup)(struct i915_hw_ppgtt *ppgtt);
};
@ -457,6 +470,7 @@ struct i915_hw_ppgtt {
/* This must match up with the value previously used for execbuf2.rsvd1. */
#define DEFAULT_CONTEXT_ID 0
struct i915_hw_context {
struct kref ref;
int id;
bool is_initialized;
struct drm_i915_file_private *file_priv;
@ -658,6 +672,7 @@ struct i915_suspend_saved_registers {
struct intel_gen6_power_mgmt {
struct work_struct work;
struct delayed_work vlv_work;
u32 pm_iir;
/* lock - irqsave spinlock that protectects the work_struct and
* pm_iir. */
@ -668,6 +683,7 @@ struct intel_gen6_power_mgmt {
u8 cur_delay;
u8 min_delay;
u8 max_delay;
u8 rpe_delay;
u8 hw_max;
struct delayed_work delayed_resume_work;
@ -875,6 +891,37 @@ enum modeset_restore {
MODESET_SUSPENDED,
};
struct intel_vbt_data {
struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
/* Feature bits */
unsigned int int_tv_support:1;
unsigned int lvds_dither:1;
unsigned int lvds_vbt:1;
unsigned int int_crt_support:1;
unsigned int lvds_use_ssc:1;
unsigned int display_clock_mode:1;
unsigned int fdi_rx_polarity_inverted:1;
int lvds_ssc_freq;
unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
/* eDP */
int edp_rate;
int edp_lanes;
int edp_preemphasis;
int edp_vswing;
bool edp_initialized;
bool edp_support;
int edp_bpp;
struct edp_power_seq edp_pps;
int crt_ddc_pin;
int child_dev_num;
struct child_device_config *child_dev;
};
typedef struct drm_i915_private {
struct drm_device *dev;
struct kmem_cache *slab;
@ -941,6 +988,7 @@ typedef struct drm_i915_private {
HPD_MARK_DISABLED = 2
} hpd_mark;
} hpd_stats[HPD_NUM_PINS];
u32 hpd_event_bits;
struct timer_list hotplug_reenable_timer;
int num_pch_pll;
@ -953,6 +1001,7 @@ typedef struct drm_i915_private {
struct intel_fbc_work *fbc_work;
struct intel_opregion opregion;
struct intel_vbt_data vbt;
/* overlay */
struct intel_overlay *overlay;
@ -962,37 +1011,15 @@ typedef struct drm_i915_private {
struct {
int level;
bool enabled;
spinlock_t lock; /* bl registers and the above bl fields */
struct backlight_device *device;
} backlight;
/* LVDS info */
struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
/* Feature bits from the VBIOS */
unsigned int int_tv_support:1;
unsigned int lvds_dither:1;
unsigned int lvds_vbt:1;
unsigned int int_crt_support:1;
unsigned int lvds_use_ssc:1;
unsigned int display_clock_mode:1;
unsigned int fdi_rx_polarity_inverted:1;
int lvds_ssc_freq;
unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
struct {
int rate;
int lanes;
int preemphasis;
int vswing;
bool initialized;
bool support;
int bpp;
struct edp_power_seq pps;
} edp;
bool no_aux_handshake;
int crt_ddc_pin;
struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
int num_fence_regs; /* 8 on pre-965, 16 otherwise */
@ -1020,10 +1047,6 @@ typedef struct drm_i915_private {
/* Kernel Modesetting */
struct sdvo_device_mapping sdvo_mappings[2];
/* indicate whether the LVDS_BORDER should be enabled or not */
unsigned int lvds_border_bits;
/* Panel fitter placement and size for Ironlake+ */
u32 pch_pf_pos, pch_pf_size;
struct drm_crtc *plane_to_crtc_mapping[3];
struct drm_crtc *pipe_to_crtc_mapping[3];
@ -1038,8 +1061,6 @@ typedef struct drm_i915_private {
/* indicates the reduced downclock for LVDS*/
int lvds_downclock;
u16 orig_clock;
int child_dev_num;
struct child_device_config *child_dev;
bool mchbar_need_disable;
@ -1059,6 +1080,8 @@ typedef struct drm_i915_private {
struct i915_gpu_error gpu_error;
struct drm_i915_gem_object *vlv_pctx;
/* list of fbdev register on this device */
struct intel_fbdev *fbdev;
@ -1274,6 +1297,9 @@ struct drm_i915_gem_request {
/** Postion in the ringbuffer of the end of the request */
u32 tail;
/** Context related to this request */
struct i915_hw_context *ctx;
/** Time at which this request was emitted, in jiffies. */
unsigned long emitted_jiffies;
@ -1373,8 +1399,9 @@ struct drm_i915_file_private {
#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
#define HAS_DDI(dev) (IS_HASWELL(dev))
#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi)
#define HAS_POWER_WELL(dev) (IS_HASWELL(dev))
#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
#define INTEL_PCH_DEVICE_ID_MASK 0xff00
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
@ -1486,8 +1513,6 @@ i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
void
i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
void intel_enable_asle(struct drm_device *dev);
#ifdef CONFIG_DEBUG_FS
extern void i915_destroy_error_state(struct drm_device *dev);
#else
@ -1703,6 +1728,17 @@ void i915_gem_context_fini(struct drm_device *dev);
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
int i915_switch_context(struct intel_ring_buffer *ring,
struct drm_file *file, int to_id);
void i915_gem_context_free(struct kref *ctx_ref);
static inline void i915_gem_context_reference(struct i915_hw_context *ctx)
{
kref_get(&ctx->ref);
}
static inline void i915_gem_context_unreference(struct i915_hw_context *ctx)
{
kref_put(&ctx->ref, i915_gem_context_free);
}
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
@ -1800,7 +1836,7 @@ void i915_teardown_sysfs(struct drm_device *dev_priv);
/* intel_i2c.c */
extern int intel_setup_gmbus(struct drm_device *dev);
extern void intel_teardown_gmbus(struct drm_device *dev);
extern inline bool intel_gmbus_is_port_valid(unsigned port)
static inline bool intel_gmbus_is_port_valid(unsigned port)
{
return (port >= GMBUS_PORT_SSC && port <= GMBUS_PORT_DPD);
}
@ -1809,7 +1845,7 @@ extern struct i2c_adapter *intel_gmbus_get_adapter(
struct drm_i915_private *dev_priv, unsigned port);
extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
{
return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
}
@ -1821,14 +1857,10 @@ extern int intel_opregion_setup(struct drm_device *dev);
extern void intel_opregion_init(struct drm_device *dev);
extern void intel_opregion_fini(struct drm_device *dev);
extern void intel_opregion_asle_intr(struct drm_device *dev);
extern void intel_opregion_gse_intr(struct drm_device *dev);
extern void intel_opregion_enable_asle(struct drm_device *dev);
#else
static inline void intel_opregion_init(struct drm_device *dev) { return; }
static inline void intel_opregion_fini(struct drm_device *dev) { return; }
static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; }
static inline void intel_opregion_gse_intr(struct drm_device *dev) { return; }
static inline void intel_opregion_enable_asle(struct drm_device *dev) { return; }
#endif
/* intel_acpi.c */
@ -1842,6 +1874,7 @@ static inline void intel_unregister_dsm_handler(void) { return; }
/* modesetting */
extern void intel_modeset_init_hw(struct drm_device *dev);
extern void intel_modeset_suspend_hw(struct drm_device *dev);
extern void intel_modeset_init(struct drm_device *dev);
extern void intel_modeset_gem_init(struct drm_device *dev);
extern void intel_modeset_cleanup(struct drm_device *dev);
@ -1854,6 +1887,9 @@ extern void intel_disable_fbc(struct drm_device *dev);
extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
extern void intel_init_pch_refclk(struct drm_device *dev);
extern void gen6_set_rps(struct drm_device *dev, u8 val);
extern void valleyview_set_rps(struct drm_device *dev, u8 val);
extern int valleyview_rps_max_freq(struct drm_i915_private *dev_priv);
extern int valleyview_rps_min_freq(struct drm_i915_private *dev_priv);
extern void intel_detect_pch(struct drm_device *dev);
extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
extern int intel_enable_rc6(const struct drm_device *dev);
@ -1885,6 +1921,10 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
int valleyview_punit_read(struct drm_i915_private *dev_priv, u8 addr, u32 *val);
int valleyview_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val);
int valleyview_nc_read(struct drm_i915_private *dev_priv, u8 addr, u32 *val);
int vlv_gpu_freq(int ddr_freq, int val);
int vlv_freq_opcode(int ddr_freq, int val);
#define __i915_read(x, y) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);

Просмотреть файл

@ -2042,6 +2042,11 @@ i915_add_request(struct intel_ring_buffer *ring,
request->seqno = intel_ring_get_seqno(ring);
request->ring = ring;
request->tail = request_ring_position;
request->ctx = ring->last_context;
if (request->ctx)
i915_gem_context_reference(request->ctx);
request->emitted_jiffies = jiffies;
was_empty = list_empty(&ring->request_list);
list_add_tail(&request->list, &ring->request_list);
@ -2094,6 +2099,17 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
spin_unlock(&file_priv->mm.lock);
}
static void i915_gem_free_request(struct drm_i915_gem_request *request)
{
list_del(&request->list);
i915_gem_request_remove_from_client(request);
if (request->ctx)
i915_gem_context_unreference(request->ctx);
kfree(request);
}
static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
struct intel_ring_buffer *ring)
{
@ -2104,9 +2120,7 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
struct drm_i915_gem_request,
list);
list_del(&request->list);
i915_gem_request_remove_from_client(request);
kfree(request);
i915_gem_free_request(request);
}
while (!list_empty(&ring->active_list)) {
@ -2198,9 +2212,7 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
*/
ring->last_retired_head = request->tail;
list_del(&request->list);
i915_gem_request_remove_from_client(request);
kfree(request);
i915_gem_free_request(request);
}
/* Move any buffers on the active list that are no longer referenced

Просмотреть файл

@ -124,10 +124,10 @@ static int get_context_size(struct drm_device *dev)
return ret;
}
static void do_destroy(struct i915_hw_context *ctx)
void i915_gem_context_free(struct kref *ctx_ref)
{
if (ctx->file_priv)
idr_remove(&ctx->file_priv->context_idr, ctx->id);
struct i915_hw_context *ctx = container_of(ctx_ref,
typeof(*ctx), ref);
drm_gem_object_unreference(&ctx->obj->base);
kfree(ctx);
@ -145,6 +145,7 @@ create_hw_context(struct drm_device *dev,
if (ctx == NULL)
return ERR_PTR(-ENOMEM);
kref_init(&ctx->ref);
ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size);
if (ctx->obj == NULL) {
kfree(ctx);
@ -169,18 +170,18 @@ create_hw_context(struct drm_device *dev,
if (file_priv == NULL)
return ctx;
ctx->file_priv = file_priv;
ret = idr_alloc(&file_priv->context_idr, ctx, DEFAULT_CONTEXT_ID + 1, 0,
GFP_KERNEL);
if (ret < 0)
goto err_out;
ctx->file_priv = file_priv;
ctx->id = ret;
return ctx;
err_out:
do_destroy(ctx);
i915_gem_context_unreference(ctx);
return ERR_PTR(ret);
}
@ -226,7 +227,7 @@ static int create_default_context(struct drm_i915_private *dev_priv)
err_unpin:
i915_gem_object_unpin(ctx->obj);
err_destroy:
do_destroy(ctx);
i915_gem_context_unreference(ctx);
return ret;
}
@ -262,6 +263,7 @@ void i915_gem_context_init(struct drm_device *dev)
void i915_gem_context_fini(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_hw_context *dctx = dev_priv->ring[RCS].default_context;
if (dev_priv->hw_contexts_disabled)
return;
@ -271,9 +273,16 @@ void i915_gem_context_fini(struct drm_device *dev)
* other code, leading to spurious errors. */
intel_gpu_reset(dev);
i915_gem_object_unpin(dev_priv->ring[RCS].default_context->obj);
i915_gem_object_unpin(dctx->obj);
do_destroy(dev_priv->ring[RCS].default_context);
/* When default context is created and switched to, base object refcount
* will be 2 (+1 from object creation and +1 from do_switch()).
* i915_gem_context_fini() will be called after gpu_idle() has switched
* to default context. So we need to unreference the base object once
* to offset the do_switch part, so that i915_gem_context_unreference()
* can then free the base object correctly. */
drm_gem_object_unreference(&dctx->obj->base);
i915_gem_context_unreference(dctx);
}
static int context_idr_cleanup(int id, void *p, void *data)
@ -282,8 +291,7 @@ static int context_idr_cleanup(int id, void *p, void *data)
BUG_ON(id == DEFAULT_CONTEXT_ID);
do_destroy(ctx);
i915_gem_context_unreference(ctx);
return 0;
}
@ -325,6 +333,7 @@ mi_set_context(struct intel_ring_buffer *ring,
if (ret)
return ret;
/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw */
if (IS_GEN7(ring->dev))
intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
else
@ -353,13 +362,13 @@ mi_set_context(struct intel_ring_buffer *ring,
static int do_switch(struct i915_hw_context *to)
{
struct intel_ring_buffer *ring = to->ring;
struct drm_i915_gem_object *from_obj = ring->last_context_obj;
struct i915_hw_context *from = ring->last_context;
u32 hw_flags = 0;
int ret;
BUG_ON(from_obj != NULL && from_obj->pin_count == 0);
BUG_ON(from != NULL && from->obj != NULL && from->obj->pin_count == 0);
if (from_obj == to->obj)
if (from == to)
return 0;
ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false, false);
@ -382,7 +391,7 @@ static int do_switch(struct i915_hw_context *to)
if (!to->is_initialized || is_default_context(to))
hw_flags |= MI_RESTORE_INHIBIT;
else if (WARN_ON_ONCE(from_obj == to->obj)) /* not yet expected */
else if (WARN_ON_ONCE(from == to)) /* not yet expected */
hw_flags |= MI_FORCE_RESTORE;
ret = mi_set_context(ring, to, hw_flags);
@ -397,9 +406,9 @@ static int do_switch(struct i915_hw_context *to)
* is a bit suboptimal because the retiring can occur simply after the
* MI_SET_CONTEXT instead of when the next seqno has completed.
*/
if (from_obj != NULL) {
from_obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
i915_gem_object_move_to_active(from_obj, ring);
if (from != NULL) {
from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
i915_gem_object_move_to_active(from->obj, ring);
/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
* whole damn pipeline, we don't need to explicitly mark the
* object dirty. The only exception is that the context must be
@ -407,15 +416,26 @@ static int do_switch(struct i915_hw_context *to)
* able to defer doing this until we know the object would be
* swapped, but there is no way to do that yet.
*/
from_obj->dirty = 1;
BUG_ON(from_obj->ring != ring);
i915_gem_object_unpin(from_obj);
from->obj->dirty = 1;
BUG_ON(from->obj->ring != ring);
drm_gem_object_unreference(&from_obj->base);
ret = i915_add_request(ring, NULL, NULL);
if (ret) {
/* Too late, we've already scheduled a context switch.
* Try to undo the change so that the hw state is
* consistent with out tracking. In case of emergency,
* scream.
*/
WARN_ON(mi_set_context(ring, from, MI_RESTORE_INHIBIT));
return ret;
}
i915_gem_object_unpin(from->obj);
i915_gem_context_unreference(from);
}
drm_gem_object_reference(&to->obj->base);
ring->last_context_obj = to->obj;
i915_gem_context_reference(to);
ring->last_context = to;
to->is_initialized = true;
return 0;
@ -444,6 +464,8 @@ int i915_switch_context(struct intel_ring_buffer *ring,
if (dev_priv->hw_contexts_disabled)
return 0;
WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
if (ring != &dev_priv->ring[RCS])
return 0;
@ -512,8 +534,8 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
}
do_destroy(ctx);
idr_remove(&ctx->file_priv->context_idr, ctx->id);
i915_gem_context_unreference(ctx);
mutex_unlock(&dev->struct_mutex);
DRM_DEBUG_DRIVER("HW context %d destroyed\n", args->ctx_id);

Просмотреть файл

@ -28,8 +28,6 @@
#include "i915_trace.h"
#include "intel_drv.h"
typedef uint32_t gen6_gtt_pte_t;
/* PPGTT stuff */
#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
@ -44,29 +42,22 @@ typedef uint32_t gen6_gtt_pte_t;
#define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
static inline gen6_gtt_pte_t gen6_pte_encode(struct drm_device *dev,
dma_addr_t addr,
enum i915_cache_level level)
static gen6_gtt_pte_t gen6_pte_encode(struct drm_device *dev,
dma_addr_t addr,
enum i915_cache_level level)
{
gen6_gtt_pte_t pte = GEN6_PTE_VALID;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
switch (level) {
case I915_CACHE_LLC_MLC:
/* Haswell doesn't set L3 this way */
if (IS_HASWELL(dev))
pte |= GEN6_PTE_CACHE_LLC;
else
pte |= GEN6_PTE_CACHE_LLC_MLC;
pte |= GEN6_PTE_CACHE_LLC_MLC;
break;
case I915_CACHE_LLC:
pte |= GEN6_PTE_CACHE_LLC;
break;
case I915_CACHE_NONE:
if (IS_HASWELL(dev))
pte |= HSW_PTE_UNCACHED;
else
pte |= GEN6_PTE_UNCACHED;
pte |= GEN6_PTE_UNCACHED;
break;
default:
BUG();
@ -75,16 +66,48 @@ static inline gen6_gtt_pte_t gen6_pte_encode(struct drm_device *dev,
return pte;
}
static int gen6_ppgtt_enable(struct drm_device *dev)
#define BYT_PTE_WRITEABLE (1 << 1)
#define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2)
static gen6_gtt_pte_t byt_pte_encode(struct drm_device *dev,
dma_addr_t addr,
enum i915_cache_level level)
{
drm_i915_private_t *dev_priv = dev->dev_private;
uint32_t pd_offset;
struct intel_ring_buffer *ring;
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
gen6_gtt_pte_t pte = GEN6_PTE_VALID;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
/* Mark the page as writeable. Other platforms don't have a
* setting for read-only/writable, so this matches that behavior.
*/
pte |= BYT_PTE_WRITEABLE;
if (level != I915_CACHE_NONE)
pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
return pte;
}
static gen6_gtt_pte_t hsw_pte_encode(struct drm_device *dev,
dma_addr_t addr,
enum i915_cache_level level)
{
gen6_gtt_pte_t pte = GEN6_PTE_VALID;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
if (level != I915_CACHE_NONE)
pte |= GEN6_PTE_CACHE_LLC;
return pte;
}
static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt)
{
struct drm_i915_private *dev_priv = ppgtt->dev->dev_private;
gen6_gtt_pte_t __iomem *pd_addr;
uint32_t pd_entry;
int i;
WARN_ON(ppgtt->pd_offset & 0x3f);
pd_addr = (gen6_gtt_pte_t __iomem*)dev_priv->gtt.gsm +
ppgtt->pd_offset / sizeof(gen6_gtt_pte_t);
for (i = 0; i < ppgtt->num_pd_entries; i++) {
@ -97,6 +120,19 @@ static int gen6_ppgtt_enable(struct drm_device *dev)
writel(pd_entry, pd_addr + i);
}
readl(pd_addr);
}
static int gen6_ppgtt_enable(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
uint32_t pd_offset;
struct intel_ring_buffer *ring;
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
int i;
BUG_ON(ppgtt->pd_offset & 0x3f);
gen6_write_pdes(ppgtt);
pd_offset = ppgtt->pd_offset;
pd_offset /= 64; /* in cachelines, */
@ -154,9 +190,9 @@ static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
unsigned last_pte, i;
scratch_pte = gen6_pte_encode(ppgtt->dev,
ppgtt->scratch_page_dma_addr,
I915_CACHE_LLC);
scratch_pte = ppgtt->pte_encode(ppgtt->dev,
ppgtt->scratch_page_dma_addr,
I915_CACHE_LLC);
while (num_entries) {
last_pte = first_pte + num_entries;
@ -191,8 +227,8 @@ static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt,
dma_addr_t page_addr;
page_addr = sg_page_iter_dma_address(&sg_iter);
pt_vaddr[act_pte] = gen6_pte_encode(ppgtt->dev, page_addr,
cache_level);
pt_vaddr[act_pte] = ppgtt->pte_encode(ppgtt->dev, page_addr,
cache_level);
if (++act_pte == I915_PPGTT_PT_ENTRIES) {
kunmap_atomic(pt_vaddr);
act_pt++;
@ -233,8 +269,15 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
/* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
* entries. For aliasing ppgtt support we just steal them at the end for
* now. */
first_pd_entry_in_global_pt = gtt_total_entries(dev_priv->gtt);
first_pd_entry_in_global_pt = gtt_total_entries(dev_priv->gtt);
if (IS_HASWELL(dev)) {
ppgtt->pte_encode = hsw_pte_encode;
} else if (IS_VALLEYVIEW(dev)) {
ppgtt->pte_encode = byt_pte_encode;
} else {
ppgtt->pte_encode = gen6_pte_encode;
}
ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
ppgtt->enable = gen6_ppgtt_enable;
ppgtt->clear_range = gen6_ppgtt_clear_range;
@ -437,7 +480,8 @@ static void gen6_ggtt_insert_entries(struct drm_device *dev,
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
addr = sg_page_iter_dma_address(&sg_iter);
iowrite32(gen6_pte_encode(dev, addr, level), &gtt_entries[i]);
iowrite32(dev_priv->gtt.pte_encode(dev, addr, level),
&gtt_entries[i]);
i++;
}
@ -449,7 +493,7 @@ static void gen6_ggtt_insert_entries(struct drm_device *dev,
*/
if (i != 0)
WARN_ON(readl(&gtt_entries[i-1])
!= gen6_pte_encode(dev, addr, level));
!= dev_priv->gtt.pte_encode(dev, addr, level));
/* This next bit makes the above posting read even more important. We
* want to flush the TLBs only after we're certain all the PTE updates
@ -474,8 +518,9 @@ static void gen6_ggtt_clear_range(struct drm_device *dev,
first_entry, num_entries, max_entries))
num_entries = max_entries;
scratch_pte = gen6_pte_encode(dev, dev_priv->gtt.scratch_page_dma,
I915_CACHE_LLC);
scratch_pte = dev_priv->gtt.pte_encode(dev,
dev_priv->gtt.scratch_page_dma,
I915_CACHE_LLC);
for (i = 0; i < num_entries; i++)
iowrite32(scratch_pte, &gtt_base[i]);
readl(gtt_base);
@ -809,6 +854,13 @@ int i915_gem_gtt_init(struct drm_device *dev)
} else {
dev_priv->gtt.gtt_probe = gen6_gmch_probe;
dev_priv->gtt.gtt_remove = gen6_gmch_remove;
if (IS_HASWELL(dev)) {
dev_priv->gtt.pte_encode = hsw_pte_encode;
} else if (IS_VALLEYVIEW(dev)) {
dev_priv->gtt.pte_encode = byt_pte_encode;
} else {
dev_priv->gtt.pte_encode = gen6_pte_encode;
}
}
ret = dev_priv->gtt.gtt_probe(dev, &dev_priv->gtt.total,

Просмотреть файл

@ -62,7 +62,10 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
* its value of TOLUD.
*/
base = 0;
if (INTEL_INFO(dev)->gen >= 6) {
if (IS_VALLEYVIEW(dev)) {
pci_read_config_dword(dev->pdev, 0x5c, &base);
base &= ~((1<<20) - 1);
} else if (INTEL_INFO(dev)->gen >= 6) {
/* Read Base Data of Stolen Memory Register (BDSM) directly.
* Note that there is also a MCHBAR miror at 0x1080c0 or
* we could use device 2:0x5c instead.
@ -136,6 +139,7 @@ static int i915_setup_compression(struct drm_device *dev, int size)
err_fb:
drm_mm_put_block(compressed_fb);
err:
pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
return -ENOSPC;
}
@ -182,6 +186,7 @@ void i915_gem_cleanup_stolen(struct drm_device *dev)
int i915_gem_init_stolen(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int bios_reserved = 0;
dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
if (dev_priv->mm.stolen_base == 0)
@ -190,8 +195,12 @@ int i915_gem_init_stolen(struct drm_device *dev)
DRM_DEBUG_KMS("found %zd bytes of stolen memory at %08lx\n",
dev_priv->gtt.stolen_size, dev_priv->mm.stolen_base);
if (IS_VALLEYVIEW(dev))
bios_reserved = 1024*1024; /* top 1M on VLV/BYT */
/* Basic memrange allocator for stolen space */
drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_size);
drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_size -
bios_reserved);
return 0;
}
@ -330,7 +339,6 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
/* KISS and expect everything to be page-aligned */
BUG_ON(stolen_offset & 4095);
BUG_ON(gtt_offset & 4095);
BUG_ON(size & 4095);
if (WARN_ON(size == 0))
@ -351,6 +359,10 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
return NULL;
}
/* Some objects just need physical mem from stolen space */
if (gtt_offset == -1)
return obj;
/* To simplify the initialisation sequence between KMS and GTT,
* we allow construction of the stolen object prior to
* setting up the GTT space. The actual reservation will occur

Просмотреть файл

@ -112,6 +112,213 @@ ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
}
}
static bool ivb_can_enable_err_int(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc;
enum pipe pipe;
for_each_pipe(pipe) {
crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
if (crtc->cpu_fifo_underrun_disabled)
return false;
}
return true;
}
static bool cpt_can_enable_serr_int(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe;
struct intel_crtc *crtc;
for_each_pipe(pipe) {
crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
if (crtc->pch_fifo_underrun_disabled)
return false;
}
return true;
}
static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
enum pipe pipe, bool enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
DE_PIPEB_FIFO_UNDERRUN;
if (enable)
ironlake_enable_display_irq(dev_priv, bit);
else
ironlake_disable_display_irq(dev_priv, bit);
}
static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
bool enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (enable) {
if (!ivb_can_enable_err_int(dev))
return;
I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN_A |
ERR_INT_FIFO_UNDERRUN_B |
ERR_INT_FIFO_UNDERRUN_C);
ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
} else {
ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
}
}
static void ibx_set_fifo_underrun_reporting(struct intel_crtc *crtc,
bool enable)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t bit = (crtc->pipe == PIPE_A) ? SDE_TRANSA_FIFO_UNDER :
SDE_TRANSB_FIFO_UNDER;
if (enable)
I915_WRITE(SDEIMR, I915_READ(SDEIMR) & ~bit);
else
I915_WRITE(SDEIMR, I915_READ(SDEIMR) | bit);
POSTING_READ(SDEIMR);
}
static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
enum transcoder pch_transcoder,
bool enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (enable) {
if (!cpt_can_enable_serr_int(dev))
return;
I915_WRITE(SERR_INT, SERR_INT_TRANS_A_FIFO_UNDERRUN |
SERR_INT_TRANS_B_FIFO_UNDERRUN |
SERR_INT_TRANS_C_FIFO_UNDERRUN);
I915_WRITE(SDEIMR, I915_READ(SDEIMR) & ~SDE_ERROR_CPT);
} else {
I915_WRITE(SDEIMR, I915_READ(SDEIMR) | SDE_ERROR_CPT);
}
POSTING_READ(SDEIMR);
}
/**
* intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages
* @dev: drm device
* @pipe: pipe
* @enable: true if we want to report FIFO underrun errors, false otherwise
*
* This function makes us disable or enable CPU fifo underruns for a specific
* pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun
* reporting for one pipe may also disable all the other CPU error interruts for
* the other pipes, due to the fact that there's just one interrupt mask/enable
* bit for all the pipes.
*
* Returns the previous state of underrun reporting.
*/
bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
enum pipe pipe, bool enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
unsigned long flags;
bool ret;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
ret = !intel_crtc->cpu_fifo_underrun_disabled;
if (enable == ret)
goto done;
intel_crtc->cpu_fifo_underrun_disabled = !enable;
if (IS_GEN5(dev) || IS_GEN6(dev))
ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
else if (IS_GEN7(dev))
ivybridge_set_fifo_underrun_reporting(dev, enable);
done:
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
return ret;
}
/**
* intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages
* @dev: drm device
* @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
* @enable: true if we want to report FIFO underrun errors, false otherwise
*
* This function makes us disable or enable PCH fifo underruns for a specific
* PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
* underrun reporting for one transcoder may also disable all the other PCH
* error interruts for the other transcoders, due to the fact that there's just
* one interrupt mask/enable bit for all the transcoders.
*
* Returns the previous state of underrun reporting.
*/
bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
enum transcoder pch_transcoder,
bool enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe p;
struct drm_crtc *crtc;
struct intel_crtc *intel_crtc;
unsigned long flags;
bool ret;
if (HAS_PCH_LPT(dev)) {
crtc = NULL;
for_each_pipe(p) {
struct drm_crtc *c = dev_priv->pipe_to_crtc_mapping[p];
if (intel_pipe_has_type(c, INTEL_OUTPUT_ANALOG)) {
crtc = c;
break;
}
}
if (!crtc) {
DRM_ERROR("PCH FIFO underrun, but no CRTC using the PCH found\n");
return false;
}
} else {
crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
}
intel_crtc = to_intel_crtc(crtc);
spin_lock_irqsave(&dev_priv->irq_lock, flags);
ret = !intel_crtc->pch_fifo_underrun_disabled;
if (enable == ret)
goto done;
intel_crtc->pch_fifo_underrun_disabled = !enable;
if (HAS_PCH_IBX(dev))
ibx_set_fifo_underrun_reporting(intel_crtc, enable);
else
cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
done:
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
return ret;
}
void
i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
{
@ -142,28 +349,21 @@ i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
}
/**
* intel_enable_asle - enable ASLE interrupt for OpRegion
* i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
*/
void intel_enable_asle(struct drm_device *dev)
static void i915_enable_asle_pipestat(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
unsigned long irqflags;
/* FIXME: opregion/asle for VLV */
if (IS_VALLEYVIEW(dev))
if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
return;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (HAS_PCH_SPLIT(dev))
ironlake_enable_display_irq(dev_priv, DE_GSE);
else {
i915_enable_pipestat(dev_priv, 1,
PIPE_LEGACY_BLC_EVENT_ENABLE);
if (INTEL_INFO(dev)->gen >= 4)
i915_enable_pipestat(dev_priv, 0,
PIPE_LEGACY_BLC_EVENT_ENABLE);
}
i915_enable_pipestat(dev_priv, 1, PIPE_LEGACY_BLC_EVENT_ENABLE);
if (INTEL_INFO(dev)->gen >= 4)
i915_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
@ -184,6 +384,10 @@ i915_pipe_enabled(struct drm_device *dev, int pipe)
enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
pipe);
if (!intel_display_power_enabled(dev,
POWER_DOMAIN_TRANSCODER(cpu_transcoder)))
return false;
return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE;
}
@ -334,6 +538,21 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
crtc);
}
static int intel_hpd_irq_event(struct drm_device *dev, struct drm_connector *connector)
{
enum drm_connector_status old_status;
WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
old_status = connector->status;
connector->status = connector->funcs->detect(connector, false);
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
connector->base.id,
drm_get_connector_name(connector),
old_status, connector->status);
return (old_status != connector->status);
}
/*
* Handle hotplug events outside the interrupt handler proper.
*/
@ -350,6 +569,8 @@ static void i915_hotplug_work_func(struct work_struct *work)
struct drm_connector *connector;
unsigned long irqflags;
bool hpd_disabled = false;
bool changed = false;
u32 hpd_event_bits;
/* HPD irq before everything is fully set up. */
if (!dev_priv->enable_hotplug_processing)
@ -359,6 +580,9 @@ static void i915_hotplug_work_func(struct work_struct *work)
DRM_DEBUG_KMS("running encoder hotplug functions\n");
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
hpd_event_bits = dev_priv->hpd_event_bits;
dev_priv->hpd_event_bits = 0;
list_for_each_entry(connector, &mode_config->connector_list, head) {
intel_connector = to_intel_connector(connector);
intel_encoder = intel_connector->encoder;
@ -373,6 +597,10 @@ static void i915_hotplug_work_func(struct work_struct *work)
| DRM_CONNECTOR_POLL_DISCONNECT;
hpd_disabled = true;
}
if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
drm_get_connector_name(connector), intel_encoder->hpd_pin);
}
}
/* if there were no outputs to poll, poll was disabled,
* therefore make sure it's enabled when disabling HPD on
@ -385,14 +613,20 @@ static void i915_hotplug_work_func(struct work_struct *work)
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
if (intel_encoder->hot_plug)
intel_encoder->hot_plug(intel_encoder);
list_for_each_entry(connector, &mode_config->connector_list, head) {
intel_connector = to_intel_connector(connector);
intel_encoder = intel_connector->encoder;
if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
if (intel_encoder->hot_plug)
intel_encoder->hot_plug(intel_encoder);
if (intel_hpd_irq_event(dev, connector))
changed = true;
}
}
mutex_unlock(&mode_config->mutex);
/* Just fire off a uevent and let userspace tell us what to do */
drm_helper_hpd_irq_event(dev);
if (changed)
drm_kms_helper_hotplug_event(dev);
}
static void ironlake_handle_rps_change(struct drm_device *dev)
@ -482,7 +716,21 @@ static void gen6_pm_rps_work(struct work_struct *work)
*/
if (!(new_delay > dev_priv->rps.max_delay ||
new_delay < dev_priv->rps.min_delay)) {
gen6_set_rps(dev_priv->dev, new_delay);
if (IS_VALLEYVIEW(dev_priv->dev))
valleyview_set_rps(dev_priv->dev, new_delay);
else
gen6_set_rps(dev_priv->dev, new_delay);
}
if (IS_VALLEYVIEW(dev_priv->dev)) {
/*
* On VLV, when we enter RC6 we may not be at the minimum
* voltage level, so arm a timer to check. It should only
* fire when there's activity or once after we've entered
* RC6, and then won't be re-armed until the next RPS interrupt.
*/
mod_delayed_work(dev_priv->wq, &dev_priv->rps.vlv_work,
msecs_to_jiffies(100));
}
mutex_unlock(&dev_priv->rps.hw_lock);
@ -636,6 +884,7 @@ static inline bool hotplug_irq_storm_detect(struct drm_device *dev,
dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
continue;
dev_priv->hpd_event_bits |= (1 << i);
if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
dev_priv->hpd_stats[i].hpd_last_jiffies
+ msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
@ -643,6 +892,7 @@ static inline bool hotplug_irq_storm_detect(struct drm_device *dev,
dev_priv->hpd_stats[i].hpd_cnt = 0;
} else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
dev_priv->hpd_event_bits &= ~(1 << i);
DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
ret = true;
} else {
@ -763,10 +1013,12 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
ibx_hpd_irq_setup(dev);
queue_work(dev_priv->wq, &dev_priv->hotplug_work);
}
if (pch_iir & SDE_AUDIO_POWER_MASK)
if (pch_iir & SDE_AUDIO_POWER_MASK) {
int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
SDE_AUDIO_POWER_SHIFT);
DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
(pch_iir & SDE_AUDIO_POWER_MASK) >>
SDE_AUDIO_POWER_SHIFT);
port_name(port));
}
if (pch_iir & SDE_AUX_MASK)
dp_aux_irq_handler(dev);
@ -795,10 +1047,64 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
if (pch_iir & SDE_TRANSB_FIFO_UNDER)
DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n");
if (pch_iir & SDE_TRANSA_FIFO_UNDER)
DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
false))
DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
if (pch_iir & SDE_TRANSB_FIFO_UNDER)
if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
false))
DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
}
static void ivb_err_int_handler(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 err_int = I915_READ(GEN7_ERR_INT);
if (err_int & ERR_INT_POISON)
DRM_ERROR("Poison interrupt\n");
if (err_int & ERR_INT_FIFO_UNDERRUN_A)
if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
if (err_int & ERR_INT_FIFO_UNDERRUN_B)
if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
if (err_int & ERR_INT_FIFO_UNDERRUN_C)
if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_C, false))
DRM_DEBUG_DRIVER("Pipe C FIFO underrun\n");
I915_WRITE(GEN7_ERR_INT, err_int);
}
static void cpt_serr_int_handler(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 serr_int = I915_READ(SERR_INT);
if (serr_int & SERR_INT_POISON)
DRM_ERROR("PCH poison interrupt\n");
if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
false))
DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
false))
DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C,
false))
DRM_DEBUG_DRIVER("PCH transcoder C FIFO underrun\n");
I915_WRITE(SERR_INT, serr_int);
}
static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
@ -812,10 +1118,12 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
ibx_hpd_irq_setup(dev);
queue_work(dev_priv->wq, &dev_priv->hotplug_work);
}
if (pch_iir & SDE_AUDIO_POWER_MASK_CPT)
DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
(pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
SDE_AUDIO_POWER_SHIFT_CPT);
if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
SDE_AUDIO_POWER_SHIFT_CPT);
DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
port_name(port));
}
if (pch_iir & SDE_AUX_MASK_CPT)
dp_aux_irq_handler(dev);
@ -834,6 +1142,9 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
pipe_name(pipe),
I915_READ(FDI_RX_IIR(pipe)));
if (pch_iir & SDE_ERROR_CPT)
cpt_serr_int_handler(dev);
}
static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
@ -846,6 +1157,14 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
atomic_inc(&dev_priv->irq_received);
/* We get interrupts on unclaimed registers, so check for this before we
* do any I915_{READ,WRITE}. */
if (IS_HASWELL(dev) &&
(I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
DRM_ERROR("Unclaimed register before interrupt\n");
I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
}
/* disable master interrupt before clearing iir */
de_ier = I915_READ(DEIER);
I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
@ -861,6 +1180,12 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
POSTING_READ(SDEIER);
}
/* On Haswell, also mask ERR_INT because we don't want to risk
* generating "unclaimed register" interrupts from inside the interrupt
* handler. */
if (IS_HASWELL(dev))
ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
gt_iir = I915_READ(GTIIR);
if (gt_iir) {
snb_gt_irq_handler(dev, dev_priv, gt_iir);
@ -870,11 +1195,14 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
de_iir = I915_READ(DEIIR);
if (de_iir) {
if (de_iir & DE_ERR_INT_IVB)
ivb_err_int_handler(dev);
if (de_iir & DE_AUX_CHANNEL_A_IVB)
dp_aux_irq_handler(dev);
if (de_iir & DE_GSE_IVB)
intel_opregion_gse_intr(dev);
intel_opregion_asle_intr(dev);
for (i = 0; i < 3; i++) {
if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
@ -907,6 +1235,9 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
ret = IRQ_HANDLED;
}
if (IS_HASWELL(dev) && ivb_can_enable_err_int(dev))
ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
I915_WRITE(DEIER, de_ier);
POSTING_READ(DEIER);
if (!HAS_PCH_NOP(dev)) {
@ -968,7 +1299,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
dp_aux_irq_handler(dev);
if (de_iir & DE_GSE)
intel_opregion_gse_intr(dev);
intel_opregion_asle_intr(dev);
if (de_iir & DE_PIPEA_VBLANK)
drm_handle_vblank(dev, 0);
@ -976,6 +1307,17 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
if (de_iir & DE_PIPEB_VBLANK)
drm_handle_vblank(dev, 1);
if (de_iir & DE_POISON)
DRM_ERROR("Poison interrupt\n");
if (de_iir & DE_PIPEA_FIFO_UNDERRUN)
if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
if (de_iir & DE_PIPEB_FIFO_UNDERRUN)
if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
if (de_iir & DE_PLANEA_FLIP_DONE) {
intel_prepare_page_flip(dev, 0);
intel_finish_page_flip_plane(dev, 0);
@ -2201,10 +2543,14 @@ static void ibx_irq_postinstall(struct drm_device *dev)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 mask;
if (HAS_PCH_IBX(dev))
mask = SDE_GMBUS | SDE_AUX_MASK;
else
mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
if (HAS_PCH_IBX(dev)) {
mask = SDE_GMBUS | SDE_AUX_MASK | SDE_TRANSB_FIFO_UNDER |
SDE_TRANSA_FIFO_UNDER | SDE_POISON;
} else {
mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT | SDE_ERROR_CPT;
I915_WRITE(SERR_INT, I915_READ(SERR_INT));
}
if (HAS_PCH_NOP(dev))
return;
@ -2219,7 +2565,8 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
/* enable kind of interrupts always enabled */
u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
DE_AUX_CHANNEL_A;
DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN |
DE_PIPEA_FIFO_UNDERRUN | DE_POISON;
u32 render_irqs;
dev_priv->irq_mask = ~display_mask;
@ -2269,12 +2616,14 @@ static int ivybridge_irq_postinstall(struct drm_device *dev)
DE_PLANEC_FLIP_DONE_IVB |
DE_PLANEB_FLIP_DONE_IVB |
DE_PLANEA_FLIP_DONE_IVB |
DE_AUX_CHANNEL_A_IVB;
DE_AUX_CHANNEL_A_IVB |
DE_ERR_INT_IVB;
u32 render_irqs;
dev_priv->irq_mask = ~display_mask;
/* should always can generate irq */
I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
I915_WRITE(DEIIR, I915_READ(DEIIR));
I915_WRITE(DEIMR, dev_priv->irq_mask);
I915_WRITE(DEIER,
@ -2305,7 +2654,6 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
u32 enable_mask;
u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
u32 render_irqs;
u16 msid;
enable_mask = I915_DISPLAY_PORT_INTERRUPT;
enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
@ -2321,13 +2669,6 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
/* Hack for broken MSIs on VLV */
pci_write_config_dword(dev_priv->dev->pdev, 0x94, 0xfee00000);
pci_read_config_word(dev->pdev, 0x98, &msid);
msid &= 0xff; /* mask out delivery bits */
msid |= (1<<14);
pci_write_config_word(dev_priv->dev->pdev, 0x98, msid);
I915_WRITE(PORT_HOTPLUG_EN, 0);
POSTING_READ(PORT_HOTPLUG_EN);
@ -2402,6 +2743,8 @@ static void ironlake_irq_uninstall(struct drm_device *dev)
I915_WRITE(DEIMR, 0xffffffff);
I915_WRITE(DEIER, 0x0);
I915_WRITE(DEIIR, I915_READ(DEIIR));
if (IS_GEN7(dev))
I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
I915_WRITE(GTIMR, 0xffffffff);
I915_WRITE(GTIER, 0x0);
@ -2413,6 +2756,8 @@ static void ironlake_irq_uninstall(struct drm_device *dev)
I915_WRITE(SDEIMR, 0xffffffff);
I915_WRITE(SDEIER, 0x0);
I915_WRITE(SDEIIR, I915_READ(SDEIIR));
if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
I915_WRITE(SERR_INT, I915_READ(SERR_INT));
}
static void i8xx_irq_preinstall(struct drm_device * dev)
@ -2626,7 +2971,7 @@ static int i915_irq_postinstall(struct drm_device *dev)
I915_WRITE(IER, enable_mask);
POSTING_READ(IER);
intel_opregion_enable_asle(dev);
i915_enable_asle_pipestat(dev);
return 0;
}
@ -2860,7 +3205,7 @@ static int i965_irq_postinstall(struct drm_device *dev)
I915_WRITE(PORT_HOTPLUG_EN, 0);
POSTING_READ(PORT_HOTPLUG_EN);
intel_opregion_enable_asle(dev);
i915_enable_asle_pipestat(dev);
return 0;
}

Просмотреть файл

@ -351,6 +351,8 @@
* 0x8100: fast clock controls
*
* DPIO is VLV only.
*
* Note: digital port B is DDI0, digital pot C is DDI1
*/
#define DPIO_PKT (VLV_DISPLAY_BASE + 0x2100)
#define DPIO_RID (0<<24)
@ -367,8 +369,20 @@
#define DPIO_SFR_BYPASS (1<<1)
#define DPIO_RESET (1<<0)
#define _DPIO_TX3_SWING_CTL4_A 0x690
#define _DPIO_TX3_SWING_CTL4_B 0x2a90
#define DPIO_TX3_SWING_CTL4(pipe) _PIPE(pipe, _DPIO_TX_SWING_CTL4_A, \
_DPIO_TX3_SWING_CTL4_B)
/*
* Per pipe/PLL DPIO regs
*/
#define _DPIO_DIV_A 0x800c
#define DPIO_POST_DIV_SHIFT (28) /* 3 bits */
#define DPIO_POST_DIV_DAC 0
#define DPIO_POST_DIV_HDMIDP 1 /* DAC 225-400M rate */
#define DPIO_POST_DIV_LVDS1 2
#define DPIO_POST_DIV_LVDS2 3
#define DPIO_K_SHIFT (24) /* 4 bits */
#define DPIO_P1_SHIFT (21) /* 3 bits */
#define DPIO_P2_SHIFT (16) /* 5 bits */
@ -394,14 +408,111 @@
#define _DPIO_CORE_CLK_B 0x803c
#define DPIO_CORE_CLK(pipe) _PIPE(pipe, _DPIO_CORE_CLK_A, _DPIO_CORE_CLK_B)
#define _DPIO_IREF_CTL_A 0x8040
#define _DPIO_IREF_CTL_B 0x8060
#define DPIO_IREF_CTL(pipe) _PIPE(pipe, _DPIO_IREF_CTL_A, _DPIO_IREF_CTL_B)
#define DPIO_IREF_BCAST 0xc044
#define _DPIO_IREF_A 0x8044
#define _DPIO_IREF_B 0x8064
#define DPIO_IREF(pipe) _PIPE(pipe, _DPIO_IREF_A, _DPIO_IREF_B)
#define _DPIO_PLL_CML_A 0x804c
#define _DPIO_PLL_CML_B 0x806c
#define DPIO_PLL_CML(pipe) _PIPE(pipe, _DPIO_PLL_CML_A, _DPIO_PLL_CML_B)
#define _DPIO_LFP_COEFF_A 0x8048
#define _DPIO_LFP_COEFF_B 0x8068
#define DPIO_LFP_COEFF(pipe) _PIPE(pipe, _DPIO_LFP_COEFF_A, _DPIO_LFP_COEFF_B)
#define DPIO_CALIBRATION 0x80ac
#define DPIO_FASTCLK_DISABLE 0x8100
#define DPIO_DATA_CHANNEL1 0x8220
#define DPIO_DATA_CHANNEL2 0x8420
/*
* Per DDI channel DPIO regs
*/
#define _DPIO_PCS_TX_0 0x8200
#define _DPIO_PCS_TX_1 0x8400
#define DPIO_PCS_TX_LANE2_RESET (1<<16)
#define DPIO_PCS_TX_LANE1_RESET (1<<7)
#define DPIO_PCS_TX(port) _PORT(port, _DPIO_PCS_TX_0, _DPIO_PCS_TX_1)
#define _DPIO_PCS_CLK_0 0x8204
#define _DPIO_PCS_CLK_1 0x8404
#define DPIO_PCS_CLK_CRI_RXEB_EIOS_EN (1<<22)
#define DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN (1<<21)
#define DPIO_PCS_CLK_DATAWIDTH_SHIFT (6)
#define DPIO_PCS_CLK_SOFT_RESET (1<<5)
#define DPIO_PCS_CLK(port) _PORT(port, _DPIO_PCS_CLK_0, _DPIO_PCS_CLK_1)
#define _DPIO_PCS_CTL_OVR1_A 0x8224
#define _DPIO_PCS_CTL_OVR1_B 0x8424
#define DPIO_PCS_CTL_OVER1(port) _PORT(port, _DPIO_PCS_CTL_OVR1_A, \
_DPIO_PCS_CTL_OVR1_B)
#define _DPIO_PCS_STAGGER0_A 0x822c
#define _DPIO_PCS_STAGGER0_B 0x842c
#define DPIO_PCS_STAGGER0(port) _PORT(port, _DPIO_PCS_STAGGER0_A, \
_DPIO_PCS_STAGGER0_B)
#define _DPIO_PCS_STAGGER1_A 0x8230
#define _DPIO_PCS_STAGGER1_B 0x8430
#define DPIO_PCS_STAGGER1(port) _PORT(port, _DPIO_PCS_STAGGER1_A, \
_DPIO_PCS_STAGGER1_B)
#define _DPIO_PCS_CLOCKBUF0_A 0x8238
#define _DPIO_PCS_CLOCKBUF0_B 0x8438
#define DPIO_PCS_CLOCKBUF0(port) _PORT(port, _DPIO_PCS_CLOCKBUF0_A, \
_DPIO_PCS_CLOCKBUF0_B)
#define _DPIO_PCS_CLOCKBUF8_A 0x825c
#define _DPIO_PCS_CLOCKBUF8_B 0x845c
#define DPIO_PCS_CLOCKBUF8(port) _PORT(port, _DPIO_PCS_CLOCKBUF8_A, \
_DPIO_PCS_CLOCKBUF8_B)
#define _DPIO_TX_SWING_CTL2_A 0x8288
#define _DPIO_TX_SWING_CTL2_B 0x8488
#define DPIO_TX_SWING_CTL2(port) _PORT(port, _DPIO_TX_SWING_CTL2_A, \
_DPIO_TX_SWING_CTL2_B)
#define _DPIO_TX_SWING_CTL3_A 0x828c
#define _DPIO_TX_SWING_CTL3_B 0x848c
#define DPIO_TX_SWING_CTL3(port) _PORT(port, _DPIO_TX_SWING_CTL3_A, \
_DPIO_TX_SWING_CTL3_B)
#define _DPIO_TX_SWING_CTL4_A 0x8290
#define _DPIO_TX_SWING_CTL4_B 0x8490
#define DPIO_TX_SWING_CTL4(port) _PORT(port, _DPIO_TX_SWING_CTL4_A, \
_DPIO_TX_SWING_CTL4_B)
#define _DPIO_TX_OCALINIT_0 0x8294
#define _DPIO_TX_OCALINIT_1 0x8494
#define DPIO_TX_OCALINIT_EN (1<<31)
#define DPIO_TX_OCALINIT(port) _PORT(port, _DPIO_TX_OCALINIT_0, \
_DPIO_TX_OCALINIT_1)
#define _DPIO_TX_CTL_0 0x82ac
#define _DPIO_TX_CTL_1 0x84ac
#define DPIO_TX_CTL(port) _PORT(port, _DPIO_TX_CTL_0, _DPIO_TX_CTL_1)
#define _DPIO_TX_LANE_0 0x82b8
#define _DPIO_TX_LANE_1 0x84b8
#define DPIO_TX_LANE(port) _PORT(port, _DPIO_TX_LANE_0, _DPIO_TX_LANE_1)
#define _DPIO_DATA_CHANNEL1 0x8220
#define _DPIO_DATA_CHANNEL2 0x8420
#define DPIO_DATA_CHANNEL(port) _PORT(port, _DPIO_DATA_CHANNEL1, _DPIO_DATA_CHANNEL2)
#define _DPIO_PORT0_PCS0 0x0220
#define _DPIO_PORT0_PCS1 0x0420
#define _DPIO_PORT1_PCS2 0x2620
#define _DPIO_PORT1_PCS3 0x2820
#define DPIO_DATA_LANE_A(port) _PORT(port, _DPIO_PORT0_PCS0, _DPIO_PORT1_PCS2)
#define DPIO_DATA_LANE_B(port) _PORT(port, _DPIO_PORT0_PCS1, _DPIO_PORT1_PCS3)
#define DPIO_DATA_CHANNEL1 0x8220
#define DPIO_DATA_CHANNEL2 0x8420
/*
* Fence registers
@ -527,7 +638,11 @@
#define ERROR_GEN6 0x040a0
#define GEN7_ERR_INT 0x44040
#define ERR_INT_MMIO_UNCLAIMED (1<<13)
#define ERR_INT_POISON (1<<31)
#define ERR_INT_MMIO_UNCLAIMED (1<<13)
#define ERR_INT_FIFO_UNDERRUN_C (1<<6)
#define ERR_INT_FIFO_UNDERRUN_B (1<<3)
#define ERR_INT_FIFO_UNDERRUN_A (1<<0)
#define FPGA_DBG 0x42300
#define FPGA_DBG_RM_NOCLAIM (1<<31)
@ -583,6 +698,7 @@
#define VLV_IIR (VLV_DISPLAY_BASE + 0x20a4)
#define VLV_IMR (VLV_DISPLAY_BASE + 0x20a8)
#define VLV_ISR (VLV_DISPLAY_BASE + 0x20ac)
#define VLV_PCBR (VLV_DISPLAY_BASE + 0x2120)
#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18)
#define I915_DISPLAY_PORT_INTERRUPT (1<<17)
#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15)
@ -807,7 +923,9 @@
#define DPFC_CTL_EN (1<<31)
#define DPFC_CTL_PLANEA (0<<30)
#define DPFC_CTL_PLANEB (1<<30)
#define IVB_DPFC_CTL_PLANE_SHIFT (29)
#define DPFC_CTL_FENCE_EN (1<<29)
#define IVB_DPFC_CTL_FENCE_EN (1<<28)
#define DPFC_CTL_PERSISTENT_MODE (1<<25)
#define DPFC_SR_EN (1<<10)
#define DPFC_CTL_LIMIT_1X (0<<6)
@ -840,6 +958,7 @@
#define ILK_DPFC_CHICKEN 0x43224
#define ILK_FBC_RT_BASE 0x2128
#define ILK_FBC_RT_VALID (1<<0)
#define SNB_FBC_FRONT_BUFFER (1<<1)
#define ILK_DISPLAY_CHICKEN1 0x42000
#define ILK_FBCQ_DIS (1<<22)
@ -855,6 +974,19 @@
#define SNB_CPU_FENCE_ENABLE (1<<29)
#define DPFC_CPU_FENCE_OFFSET 0x100104
/* Framebuffer compression for Ivybridge */
#define IVB_FBC_RT_BASE 0x7020
#define _HSW_PIPE_SLICE_CHICKEN_1_A 0x420B0
#define _HSW_PIPE_SLICE_CHICKEN_1_B 0x420B4
#define HSW_BYPASS_FBC_QUEUE (1<<22)
#define HSW_PIPE_SLICE_CHICKEN_1(pipe) _PIPE(pipe, + \
_HSW_PIPE_SLICE_CHICKEN_1_A, + \
_HSW_PIPE_SLICE_CHICKEN_1_B)
#define HSW_CLKGATE_DISABLE_PART_1 0x46500
#define HSW_DPFC_GATING_DISABLE (1<<23)
/*
* GPIO regs
@ -963,7 +1095,10 @@
#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
#define DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW 0x00ff8000 /* Pineview */
#define DPLL_LOCK_VLV (1<<15)
#define DPLL_INTEGRATED_CRI_CLK_VLV (1<<14)
#define DPLL_INTEGRATED_CLOCK_VLV (1<<13)
#define DPLL_PORTC_READY_MASK (0xf << 4)
#define DPLL_PORTB_READY_MASK (0xf)
#define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000
/*
@ -1967,6 +2102,10 @@
#define BLM_PIPE_A (0 << 29)
#define BLM_PIPE_B (1 << 29)
#define BLM_PIPE_C (2 << 29) /* ivb + */
#define BLM_TRANSCODER_A BLM_PIPE_A /* hsw */
#define BLM_TRANSCODER_B BLM_PIPE_B
#define BLM_TRANSCODER_C BLM_PIPE_C
#define BLM_TRANSCODER_EDP (3 << 29)
#define BLM_PIPE(pipe) ((pipe) << 29)
#define BLM_POLARITY_I965 (1 << 28) /* gen4 only */
#define BLM_PHASE_IN_INTERUPT_STATUS (1 << 26)
@ -2540,9 +2679,7 @@
#define DP_PRE_EMPHASIS_SHIFT 22
/* How many wires to use. I guess 3 was too hard */
#define DP_PORT_WIDTH_1 (0 << 19)
#define DP_PORT_WIDTH_2 (1 << 19)
#define DP_PORT_WIDTH_4 (3 << 19)
#define DP_PORT_WIDTH(width) (((width) - 1) << 19)
#define DP_PORT_WIDTH_MASK (7 << 19)
/* Mystic DPCD version 1.1 special mode */
@ -2646,18 +2783,20 @@
* which is after the LUTs, so we want the bytes for our color format.
* For our current usage, this is always 3, one byte for R, G and B.
*/
#define _PIPEA_GMCH_DATA_M 0x70050
#define _PIPEB_GMCH_DATA_M 0x71050
#define _PIPEA_DATA_M_G4X 0x70050
#define _PIPEB_DATA_M_G4X 0x71050
/* Transfer unit size for display port - 1, default is 0x3f (for TU size 64) */
#define TU_SIZE(x) (((x)-1) << 25) /* default size 64 */
#define TU_SIZE_SHIFT 25
#define TU_SIZE_MASK (0x3f << 25)
#define DATA_LINK_M_N_MASK (0xffffff)
#define DATA_LINK_N_MAX (0x800000)
#define _PIPEA_GMCH_DATA_N 0x70054
#define _PIPEB_GMCH_DATA_N 0x71054
#define _PIPEA_DATA_N_G4X 0x70054
#define _PIPEB_DATA_N_G4X 0x71054
#define PIPE_GMCH_DATA_N_MASK (0xffffff)
/*
* Computing Link M and N values for the Display Port link
@ -2670,16 +2809,18 @@
* Attributes and VB-ID.
*/
#define _PIPEA_DP_LINK_M 0x70060
#define _PIPEB_DP_LINK_M 0x71060
#define _PIPEA_LINK_M_G4X 0x70060
#define _PIPEB_LINK_M_G4X 0x71060
#define PIPEA_DP_LINK_M_MASK (0xffffff)
#define _PIPEA_DP_LINK_N 0x70064
#define _PIPEB_DP_LINK_N 0x71064
#define _PIPEA_LINK_N_G4X 0x70064
#define _PIPEB_LINK_N_G4X 0x71064
#define PIPEA_DP_LINK_N_MASK (0xffffff)
#define PIPE_GMCH_DATA_M(pipe) _PIPE(pipe, _PIPEA_GMCH_DATA_M, _PIPEB_GMCH_DATA_M)
#define PIPE_GMCH_DATA_N(pipe) _PIPE(pipe, _PIPEA_GMCH_DATA_N, _PIPEB_GMCH_DATA_N)
#define PIPE_DP_LINK_M(pipe) _PIPE(pipe, _PIPEA_DP_LINK_M, _PIPEB_DP_LINK_M)
#define PIPE_DP_LINK_N(pipe) _PIPE(pipe, _PIPEA_DP_LINK_N, _PIPEB_DP_LINK_N)
#define PIPE_DATA_M_G4X(pipe) _PIPE(pipe, _PIPEA_DATA_M_G4X, _PIPEB_DATA_M_G4X)
#define PIPE_DATA_N_G4X(pipe) _PIPE(pipe, _PIPEA_DATA_N_G4X, _PIPEB_DATA_N_G4X)
#define PIPE_LINK_M_G4X(pipe) _PIPE(pipe, _PIPEA_LINK_M_G4X, _PIPEB_LINK_M_G4X)
#define PIPE_LINK_N_G4X(pipe) _PIPE(pipe, _PIPEA_LINK_N_G4X, _PIPEB_LINK_N_G4X)
/* Display & cursor control */
@ -2715,6 +2856,7 @@
#define PIPECONF_INTERLACED_ILK (3 << 21)
#define PIPECONF_INTERLACED_DBL_ILK (4 << 21) /* ilk/snb only */
#define PIPECONF_PFIT_PF_INTERLACED_DBL_ILK (5 << 21) /* ilk/snb only */
#define PIPECONF_INTERLACE_MODE_MASK (7 << 21)
#define PIPECONF_CXSR_DOWNCLOCK (1<<16)
#define PIPECONF_COLOR_RANGE_SELECT (1 << 13)
#define PIPECONF_BPC_MASK (0x7 << 5)
@ -3502,7 +3644,7 @@
#define DE_PIPEA_FIFO_UNDERRUN (1 << 0)
/* More Ivybridge lolz */
#define DE_ERR_DEBUG_IVB (1<<30)
#define DE_ERR_INT_IVB (1<<30)
#define DE_GSE_IVB (1<<29)
#define DE_PCH_EVENT_IVB (1<<28)
#define DE_DP_A_HOTPLUG_IVB (1<<27)
@ -3661,6 +3803,7 @@
SDE_PORTC_HOTPLUG_CPT | \
SDE_PORTB_HOTPLUG_CPT)
#define SDE_GMBUS_CPT (1 << 17)
#define SDE_ERROR_CPT (1 << 16)
#define SDE_AUDIO_CP_REQ_C_CPT (1 << 10)
#define SDE_AUDIO_CP_CHG_C_CPT (1 << 9)
#define SDE_FDI_RXC_CPT (1 << 8)
@ -3685,6 +3828,12 @@
#define SDEIIR 0xc4008
#define SDEIER 0xc400c
#define SERR_INT 0xc4040
#define SERR_INT_POISON (1<<31)
#define SERR_INT_TRANS_C_FIFO_UNDERRUN (1<<6)
#define SERR_INT_TRANS_B_FIFO_UNDERRUN (1<<3)
#define SERR_INT_TRANS_A_FIFO_UNDERRUN (1<<0)
/* digital port hotplug */
#define PCH_PORT_HOTPLUG 0xc4030 /* SHOTPLUG_CTL */
#define PORTD_HOTPLUG_ENABLE (1 << 20)
@ -3794,34 +3943,34 @@
/* transcoder */
#define _TRANS_HTOTAL_A 0xe0000
#define TRANS_HTOTAL_SHIFT 16
#define TRANS_HACTIVE_SHIFT 0
#define _TRANS_HBLANK_A 0xe0004
#define TRANS_HBLANK_END_SHIFT 16
#define TRANS_HBLANK_START_SHIFT 0
#define _TRANS_HSYNC_A 0xe0008
#define TRANS_HSYNC_END_SHIFT 16
#define TRANS_HSYNC_START_SHIFT 0
#define _TRANS_VTOTAL_A 0xe000c
#define TRANS_VTOTAL_SHIFT 16
#define TRANS_VACTIVE_SHIFT 0
#define _TRANS_VBLANK_A 0xe0010
#define TRANS_VBLANK_END_SHIFT 16
#define TRANS_VBLANK_START_SHIFT 0
#define _TRANS_VSYNC_A 0xe0014
#define TRANS_VSYNC_END_SHIFT 16
#define TRANS_VSYNC_START_SHIFT 0
#define _TRANS_VSYNCSHIFT_A 0xe0028
#define _PCH_TRANS_HTOTAL_A 0xe0000
#define TRANS_HTOTAL_SHIFT 16
#define TRANS_HACTIVE_SHIFT 0
#define _PCH_TRANS_HBLANK_A 0xe0004
#define TRANS_HBLANK_END_SHIFT 16
#define TRANS_HBLANK_START_SHIFT 0
#define _PCH_TRANS_HSYNC_A 0xe0008
#define TRANS_HSYNC_END_SHIFT 16
#define TRANS_HSYNC_START_SHIFT 0
#define _PCH_TRANS_VTOTAL_A 0xe000c
#define TRANS_VTOTAL_SHIFT 16
#define TRANS_VACTIVE_SHIFT 0
#define _PCH_TRANS_VBLANK_A 0xe0010
#define TRANS_VBLANK_END_SHIFT 16
#define TRANS_VBLANK_START_SHIFT 0
#define _PCH_TRANS_VSYNC_A 0xe0014
#define TRANS_VSYNC_END_SHIFT 16
#define TRANS_VSYNC_START_SHIFT 0
#define _PCH_TRANS_VSYNCSHIFT_A 0xe0028
#define _TRANSA_DATA_M1 0xe0030
#define _TRANSA_DATA_N1 0xe0034
#define _TRANSA_DATA_M2 0xe0038
#define _TRANSA_DATA_N2 0xe003c
#define _TRANSA_DP_LINK_M1 0xe0040
#define _TRANSA_DP_LINK_N1 0xe0044
#define _TRANSA_DP_LINK_M2 0xe0048
#define _TRANSA_DP_LINK_N2 0xe004c
#define _PCH_TRANSA_DATA_M1 0xe0030
#define _PCH_TRANSA_DATA_N1 0xe0034
#define _PCH_TRANSA_DATA_M2 0xe0038
#define _PCH_TRANSA_DATA_N2 0xe003c
#define _PCH_TRANSA_LINK_M1 0xe0040
#define _PCH_TRANSA_LINK_N1 0xe0044
#define _PCH_TRANSA_LINK_M2 0xe0048
#define _PCH_TRANSA_LINK_N2 0xe004c
/* Per-transcoder DIP controls */
@ -3890,44 +4039,45 @@
#define HSW_TVIDEO_DIP_VSC_DATA(trans) \
_TRANSCODER(trans, HSW_VIDEO_DIP_VSC_DATA_A, HSW_VIDEO_DIP_VSC_DATA_B)
#define _TRANS_HTOTAL_B 0xe1000
#define _TRANS_HBLANK_B 0xe1004
#define _TRANS_HSYNC_B 0xe1008
#define _TRANS_VTOTAL_B 0xe100c
#define _TRANS_VBLANK_B 0xe1010
#define _TRANS_VSYNC_B 0xe1014
#define _TRANS_VSYNCSHIFT_B 0xe1028
#define _PCH_TRANS_HTOTAL_B 0xe1000
#define _PCH_TRANS_HBLANK_B 0xe1004
#define _PCH_TRANS_HSYNC_B 0xe1008
#define _PCH_TRANS_VTOTAL_B 0xe100c
#define _PCH_TRANS_VBLANK_B 0xe1010
#define _PCH_TRANS_VSYNC_B 0xe1014
#define _PCH_TRANS_VSYNCSHIFT_B 0xe1028
#define TRANS_HTOTAL(pipe) _PIPE(pipe, _TRANS_HTOTAL_A, _TRANS_HTOTAL_B)
#define TRANS_HBLANK(pipe) _PIPE(pipe, _TRANS_HBLANK_A, _TRANS_HBLANK_B)
#define TRANS_HSYNC(pipe) _PIPE(pipe, _TRANS_HSYNC_A, _TRANS_HSYNC_B)
#define TRANS_VTOTAL(pipe) _PIPE(pipe, _TRANS_VTOTAL_A, _TRANS_VTOTAL_B)
#define TRANS_VBLANK(pipe) _PIPE(pipe, _TRANS_VBLANK_A, _TRANS_VBLANK_B)
#define TRANS_VSYNC(pipe) _PIPE(pipe, _TRANS_VSYNC_A, _TRANS_VSYNC_B)
#define TRANS_VSYNCSHIFT(pipe) _PIPE(pipe, _TRANS_VSYNCSHIFT_A, \
_TRANS_VSYNCSHIFT_B)
#define PCH_TRANS_HTOTAL(pipe) _PIPE(pipe, _PCH_TRANS_HTOTAL_A, _PCH_TRANS_HTOTAL_B)
#define PCH_TRANS_HBLANK(pipe) _PIPE(pipe, _PCH_TRANS_HBLANK_A, _PCH_TRANS_HBLANK_B)
#define PCH_TRANS_HSYNC(pipe) _PIPE(pipe, _PCH_TRANS_HSYNC_A, _PCH_TRANS_HSYNC_B)
#define PCH_TRANS_VTOTAL(pipe) _PIPE(pipe, _PCH_TRANS_VTOTAL_A, _PCH_TRANS_VTOTAL_B)
#define PCH_TRANS_VBLANK(pipe) _PIPE(pipe, _PCH_TRANS_VBLANK_A, _PCH_TRANS_VBLANK_B)
#define PCH_TRANS_VSYNC(pipe) _PIPE(pipe, _PCH_TRANS_VSYNC_A, _PCH_TRANS_VSYNC_B)
#define PCH_TRANS_VSYNCSHIFT(pipe) _PIPE(pipe, _PCH_TRANS_VSYNCSHIFT_A, \
_PCH_TRANS_VSYNCSHIFT_B)
#define _TRANSB_DATA_M1 0xe1030
#define _TRANSB_DATA_N1 0xe1034
#define _TRANSB_DATA_M2 0xe1038
#define _TRANSB_DATA_N2 0xe103c
#define _TRANSB_DP_LINK_M1 0xe1040
#define _TRANSB_DP_LINK_N1 0xe1044
#define _TRANSB_DP_LINK_M2 0xe1048
#define _TRANSB_DP_LINK_N2 0xe104c
#define _PCH_TRANSB_DATA_M1 0xe1030
#define _PCH_TRANSB_DATA_N1 0xe1034
#define _PCH_TRANSB_DATA_M2 0xe1038
#define _PCH_TRANSB_DATA_N2 0xe103c
#define _PCH_TRANSB_LINK_M1 0xe1040
#define _PCH_TRANSB_LINK_N1 0xe1044
#define _PCH_TRANSB_LINK_M2 0xe1048
#define _PCH_TRANSB_LINK_N2 0xe104c
#define TRANSDATA_M1(pipe) _PIPE(pipe, _TRANSA_DATA_M1, _TRANSB_DATA_M1)
#define TRANSDATA_N1(pipe) _PIPE(pipe, _TRANSA_DATA_N1, _TRANSB_DATA_N1)
#define TRANSDATA_M2(pipe) _PIPE(pipe, _TRANSA_DATA_M2, _TRANSB_DATA_M2)
#define TRANSDATA_N2(pipe) _PIPE(pipe, _TRANSA_DATA_N2, _TRANSB_DATA_N2)
#define TRANSDPLINK_M1(pipe) _PIPE(pipe, _TRANSA_DP_LINK_M1, _TRANSB_DP_LINK_M1)
#define TRANSDPLINK_N1(pipe) _PIPE(pipe, _TRANSA_DP_LINK_N1, _TRANSB_DP_LINK_N1)
#define TRANSDPLINK_M2(pipe) _PIPE(pipe, _TRANSA_DP_LINK_M2, _TRANSB_DP_LINK_M2)
#define TRANSDPLINK_N2(pipe) _PIPE(pipe, _TRANSA_DP_LINK_N2, _TRANSB_DP_LINK_N2)
#define PCH_TRANS_DATA_M1(pipe) _PIPE(pipe, _PCH_TRANSA_DATA_M1, _PCH_TRANSB_DATA_M1)
#define PCH_TRANS_DATA_N1(pipe) _PIPE(pipe, _PCH_TRANSA_DATA_N1, _PCH_TRANSB_DATA_N1)
#define PCH_TRANS_DATA_M2(pipe) _PIPE(pipe, _PCH_TRANSA_DATA_M2, _PCH_TRANSB_DATA_M2)
#define PCH_TRANS_DATA_N2(pipe) _PIPE(pipe, _PCH_TRANSA_DATA_N2, _PCH_TRANSB_DATA_N2)
#define PCH_TRANS_LINK_M1(pipe) _PIPE(pipe, _PCH_TRANSA_LINK_M1, _PCH_TRANSB_LINK_M1)
#define PCH_TRANS_LINK_N1(pipe) _PIPE(pipe, _PCH_TRANSA_LINK_N1, _PCH_TRANSB_LINK_N1)
#define PCH_TRANS_LINK_M2(pipe) _PIPE(pipe, _PCH_TRANSA_LINK_M2, _PCH_TRANSB_LINK_M2)
#define PCH_TRANS_LINK_N2(pipe) _PIPE(pipe, _PCH_TRANSA_LINK_N2, _PCH_TRANSB_LINK_N2)
#define _TRANSACONF 0xf0008
#define _TRANSBCONF 0xf1008
#define TRANSCONF(plane) _PIPE(plane, _TRANSACONF, _TRANSBCONF)
#define _PCH_TRANSACONF 0xf0008
#define _PCH_TRANSBCONF 0xf1008
#define PCH_TRANSCONF(pipe) _PIPE(pipe, _PCH_TRANSACONF, _PCH_TRANSBCONF)
#define LPT_TRANSCONF _PCH_TRANSACONF /* lpt has only one transcoder */
#define TRANS_DISABLE (0<<31)
#define TRANS_ENABLE (1<<31)
#define TRANS_STATE_MASK (1<<30)
@ -4011,10 +4161,9 @@
#define FDI_LINK_TRAIN_600MV_3_5DB_SNB_B (0x39<<22)
#define FDI_LINK_TRAIN_800MV_0DB_SNB_B (0x38<<22)
#define FDI_LINK_TRAIN_VOL_EMP_MASK (0x3f<<22)
#define FDI_DP_PORT_WIDTH_X1 (0<<19)
#define FDI_DP_PORT_WIDTH_X2 (1<<19)
#define FDI_DP_PORT_WIDTH_X3 (2<<19)
#define FDI_DP_PORT_WIDTH_X4 (3<<19)
#define FDI_DP_PORT_WIDTH_SHIFT 19
#define FDI_DP_PORT_WIDTH_MASK (7 << FDI_DP_PORT_WIDTH_SHIFT)
#define FDI_DP_PORT_WIDTH(width) (((width) - 1) << FDI_DP_PORT_WIDTH_SHIFT)
#define FDI_TX_ENHANCE_FRAME_ENABLE (1<<18)
/* Ironlake: hardwired to 1 */
#define FDI_TX_PLL_ENABLE (1<<14)
@ -4039,7 +4188,6 @@
/* train, dp width same as FDI_TX */
#define FDI_FS_ERRC_ENABLE (1<<27)
#define FDI_FE_ERRC_ENABLE (1<<26)
#define FDI_DP_PORT_WIDTH_X8 (7<<19)
#define FDI_RX_POLARITY_REVERSED_LPT (1<<16)
#define FDI_8BPC (0<<16)
#define FDI_10BPC (1<<16)
@ -4061,9 +4209,6 @@
#define FDI_LINK_TRAIN_PATTERN_IDLE_CPT (2<<8)
#define FDI_LINK_TRAIN_NORMAL_CPT (3<<8)
#define FDI_LINK_TRAIN_PATTERN_MASK_CPT (3<<8)
/* LPT */
#define FDI_PORT_WIDTH_2X_LPT (1<<19)
#define FDI_PORT_WIDTH_1X_LPT (0<<19)
#define _FDI_RXA_MISC 0xf0010
#define _FDI_RXB_MISC 0xf1010
@ -4309,6 +4454,7 @@
#define GEN6_RC_CTL_RC6_ENABLE (1<<18)
#define GEN6_RC_CTL_RC1e_ENABLE (1<<20)
#define GEN6_RC_CTL_RC7_ENABLE (1<<22)
#define GEN7_RC_CTL_TO_MODE (1<<28)
#define GEN6_RC_CTL_EI_MODE(x) ((x)<<27)
#define GEN6_RC_CTL_HW_ENABLE (1<<31)
#define GEN6_RP_DOWN_TIMEOUT 0xA010
@ -4400,12 +4546,32 @@
#define IOSF_BAR_SHIFT 1
#define IOSF_SB_BUSY (1<<0)
#define IOSF_PORT_PUNIT 0x4
#define IOSF_PORT_NC 0x11
#define VLV_IOSF_DATA 0x182104
#define VLV_IOSF_ADDR 0x182108
#define PUNIT_OPCODE_REG_READ 6
#define PUNIT_OPCODE_REG_WRITE 7
#define PUNIT_REG_GPU_LFM 0xd3
#define PUNIT_REG_GPU_FREQ_REQ 0xd4
#define PUNIT_REG_GPU_FREQ_STS 0xd8
#define PUNIT_REG_MEDIA_TURBO_FREQ_REQ 0xdc
#define PUNIT_FUSE_BUS2 0xf6 /* bits 47:40 */
#define PUNIT_FUSE_BUS1 0xf5 /* bits 55:48 */
#define IOSF_NC_FB_GFX_FREQ_FUSE 0x1c
#define FB_GFX_MAX_FREQ_FUSE_SHIFT 3
#define FB_GFX_MAX_FREQ_FUSE_MASK 0x000007f8
#define FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT 11
#define FB_GFX_FGUARANTEED_FREQ_FUSE_MASK 0x0007f800
#define IOSF_NC_FB_GFX_FMAX_FUSE_HI 0x34
#define FB_FMAX_VMIN_FREQ_HI_MASK 0x00000007
#define IOSF_NC_FB_GFX_FMAX_FUSE_LO 0x30
#define FB_FMAX_VMIN_FREQ_LO_SHIFT 27
#define FB_FMAX_VMIN_FREQ_LO_MASK 0xf8000000
#define GEN6_GT_CORE_STATUS 0x138060
#define GEN6_CORE_CPD_STATE_MASK (7<<4)
#define GEN6_RCn_MASK 7
@ -4602,9 +4768,6 @@
#define TRANS_DDI_EDP_INPUT_B_ONOFF (5<<12)
#define TRANS_DDI_EDP_INPUT_C_ONOFF (6<<12)
#define TRANS_DDI_BFI_ENABLE (1<<4)
#define TRANS_DDI_PORT_WIDTH_X1 (0<<1)
#define TRANS_DDI_PORT_WIDTH_X2 (1<<1)
#define TRANS_DDI_PORT_WIDTH_X4 (3<<1)
/* DisplayPort Transport Control */
#define DP_TP_CTL_A 0x64040
@ -4648,9 +4811,7 @@
#define DDI_BUF_PORT_REVERSAL (1<<16)
#define DDI_BUF_IS_IDLE (1<<7)
#define DDI_A_4_LANES (1<<4)
#define DDI_PORT_WIDTH_X1 (0<<1)
#define DDI_PORT_WIDTH_X2 (1<<1)
#define DDI_PORT_WIDTH_X4 (3<<1)
#define DDI_PORT_WIDTH(width) (((width) - 1) << 1)
#define DDI_INIT_DISPLAY_DETECTED (1<<0)
/* DDI Buffer Translations */
@ -4787,6 +4948,9 @@
#define _PIPE_A_CSC_COEFF_RV_GV 0x49020
#define _PIPE_A_CSC_COEFF_BV 0x49024
#define _PIPE_A_CSC_MODE 0x49028
#define CSC_BLACK_SCREEN_OFFSET (1 << 2)
#define CSC_POSITION_BEFORE_GAMMA (1 << 1)
#define CSC_MODE_YUV_TO_RGB (1 << 0)
#define _PIPE_A_CSC_PREOFF_HI 0x49030
#define _PIPE_A_CSC_PREOFF_ME 0x49034
#define _PIPE_A_CSC_PREOFF_LO 0x49038
@ -4808,10 +4972,6 @@
#define _PIPE_B_CSC_POSTOFF_ME 0x49144
#define _PIPE_B_CSC_POSTOFF_LO 0x49148
#define CSC_BLACK_SCREEN_OFFSET (1 << 2)
#define CSC_POSITION_BEFORE_GAMMA (1 << 1)
#define CSC_MODE_YUV_TO_RGB (1 << 0)
#define PIPE_CSC_COEFF_RY_GY(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_RY_GY, _PIPE_B_CSC_COEFF_RY_GY)
#define PIPE_CSC_COEFF_BY(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_BY, _PIPE_B_CSC_COEFF_BY)
#define PIPE_CSC_COEFF_RU_GU(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_RU_GU, _PIPE_B_CSC_COEFF_RU_GU)

Просмотреть файл

@ -192,6 +192,7 @@ static void i915_restore_vga(struct drm_device *dev)
static void i915_save_display(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
/* Display arbitration control */
if (INTEL_INFO(dev)->gen <= 4)
@ -202,6 +203,8 @@ static void i915_save_display(struct drm_device *dev)
if (!drm_core_check_feature(dev, DRIVER_MODESET))
i915_save_display_reg(dev);
spin_lock_irqsave(&dev_priv->backlight.lock, flags);
/* LVDS state */
if (HAS_PCH_SPLIT(dev)) {
dev_priv->regfile.savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
@ -222,6 +225,8 @@ static void i915_save_display(struct drm_device *dev)
dev_priv->regfile.saveLVDS = I915_READ(LVDS);
}
spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
dev_priv->regfile.savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
@ -257,6 +262,7 @@ static void i915_restore_display(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 mask = 0xffffffff;
unsigned long flags;
/* Display arbitration */
if (INTEL_INFO(dev)->gen <= 4)
@ -265,6 +271,8 @@ static void i915_restore_display(struct drm_device *dev)
if (!drm_core_check_feature(dev, DRIVER_MODESET))
i915_restore_display_reg(dev);
spin_lock_irqsave(&dev_priv->backlight.lock, flags);
/* LVDS state */
if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
I915_WRITE(BLC_PWM_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
@ -304,6 +312,8 @@ static void i915_restore_display(struct drm_device *dev)
I915_WRITE(PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
}
spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
/* only restore FBC info on the platform that supports FBC*/
intel_disable_fbc(dev);
if (I915_HAS_FBC(dev)) {

Просмотреть файл

@ -212,7 +212,13 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
int ret;
mutex_lock(&dev_priv->rps.hw_lock);
ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER;
if (IS_VALLEYVIEW(dev_priv->dev)) {
u32 freq;
valleyview_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS, &freq);
ret = vlv_gpu_freq(dev_priv->mem_freq, (freq >> 8) & 0xff);
} else {
ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER;
}
mutex_unlock(&dev_priv->rps.hw_lock);
return snprintf(buf, PAGE_SIZE, "%d\n", ret);
@ -226,7 +232,10 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute
int ret;
mutex_lock(&dev_priv->rps.hw_lock);
ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
if (IS_VALLEYVIEW(dev_priv->dev))
ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.max_delay);
else
ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
mutex_unlock(&dev_priv->rps.hw_lock);
return snprintf(buf, PAGE_SIZE, "%d\n", ret);
@ -246,16 +255,25 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
if (ret)
return ret;
val /= GT_FREQUENCY_MULTIPLIER;
mutex_lock(&dev_priv->rps.hw_lock);
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
hw_max = dev_priv->rps.hw_max;
non_oc_max = (rp_state_cap & 0xff);
hw_min = ((rp_state_cap & 0xff0000) >> 16);
if (IS_VALLEYVIEW(dev_priv->dev)) {
val = vlv_freq_opcode(dev_priv->mem_freq, val);
if (val < hw_min || val > hw_max || val < dev_priv->rps.min_delay) {
hw_max = valleyview_rps_max_freq(dev_priv);
hw_min = valleyview_rps_min_freq(dev_priv);
non_oc_max = hw_max;
} else {
val /= GT_FREQUENCY_MULTIPLIER;
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
hw_max = dev_priv->rps.hw_max;
non_oc_max = (rp_state_cap & 0xff);
hw_min = ((rp_state_cap & 0xff0000) >> 16);
}
if (val < hw_min || val > hw_max ||
val < dev_priv->rps.min_delay) {
mutex_unlock(&dev_priv->rps.hw_lock);
return -EINVAL;
}
@ -264,8 +282,12 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
DRM_DEBUG("User requested overclocking to %d\n",
val * GT_FREQUENCY_MULTIPLIER);
if (dev_priv->rps.cur_delay > val)
gen6_set_rps(dev_priv->dev, val);
if (dev_priv->rps.cur_delay > val) {
if (IS_VALLEYVIEW(dev_priv->dev))
valleyview_set_rps(dev_priv->dev, val);
else
gen6_set_rps(dev_priv->dev, val);
}
dev_priv->rps.max_delay = val;
@ -282,7 +304,10 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute
int ret;
mutex_lock(&dev_priv->rps.hw_lock);
ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
if (IS_VALLEYVIEW(dev_priv->dev))
ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.min_delay);
else
ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
mutex_unlock(&dev_priv->rps.hw_lock);
return snprintf(buf, PAGE_SIZE, "%d\n", ret);
@ -302,21 +327,32 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
if (ret)
return ret;
val /= GT_FREQUENCY_MULTIPLIER;
mutex_lock(&dev_priv->rps.hw_lock);
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
hw_max = dev_priv->rps.hw_max;
hw_min = ((rp_state_cap & 0xff0000) >> 16);
if (IS_VALLEYVIEW(dev)) {
val = vlv_freq_opcode(dev_priv->mem_freq, val);
hw_max = valleyview_rps_max_freq(dev_priv);
hw_min = valleyview_rps_min_freq(dev_priv);
} else {
val /= GT_FREQUENCY_MULTIPLIER;
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
hw_max = dev_priv->rps.hw_max;
hw_min = ((rp_state_cap & 0xff0000) >> 16);
}
if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) {
mutex_unlock(&dev_priv->rps.hw_lock);
return -EINVAL;
}
if (dev_priv->rps.cur_delay < val)
gen6_set_rps(dev_priv->dev, val);
if (dev_priv->rps.cur_delay < val) {
if (IS_VALLEYVIEW(dev))
valleyview_set_rps(dev, val);
else
gen6_set_rps(dev_priv->dev, val);
}
dev_priv->rps.min_delay = val;

Просмотреть файл

@ -148,13 +148,13 @@ void i915_save_display_reg(struct drm_device *dev)
dev_priv->regfile.savePFA_WIN_SZ = I915_READ(_PFA_WIN_SZ);
dev_priv->regfile.savePFA_WIN_POS = I915_READ(_PFA_WIN_POS);
dev_priv->regfile.saveTRANSACONF = I915_READ(_TRANSACONF);
dev_priv->regfile.saveTRANS_HTOTAL_A = I915_READ(_TRANS_HTOTAL_A);
dev_priv->regfile.saveTRANS_HBLANK_A = I915_READ(_TRANS_HBLANK_A);
dev_priv->regfile.saveTRANS_HSYNC_A = I915_READ(_TRANS_HSYNC_A);
dev_priv->regfile.saveTRANS_VTOTAL_A = I915_READ(_TRANS_VTOTAL_A);
dev_priv->regfile.saveTRANS_VBLANK_A = I915_READ(_TRANS_VBLANK_A);
dev_priv->regfile.saveTRANS_VSYNC_A = I915_READ(_TRANS_VSYNC_A);
dev_priv->regfile.saveTRANSACONF = I915_READ(_PCH_TRANSACONF);
dev_priv->regfile.saveTRANS_HTOTAL_A = I915_READ(_PCH_TRANS_HTOTAL_A);
dev_priv->regfile.saveTRANS_HBLANK_A = I915_READ(_PCH_TRANS_HBLANK_A);
dev_priv->regfile.saveTRANS_HSYNC_A = I915_READ(_PCH_TRANS_HSYNC_A);
dev_priv->regfile.saveTRANS_VTOTAL_A = I915_READ(_PCH_TRANS_VTOTAL_A);
dev_priv->regfile.saveTRANS_VBLANK_A = I915_READ(_PCH_TRANS_VBLANK_A);
dev_priv->regfile.saveTRANS_VSYNC_A = I915_READ(_PCH_TRANS_VSYNC_A);
}
dev_priv->regfile.saveDSPACNTR = I915_READ(_DSPACNTR);
@ -205,13 +205,13 @@ void i915_save_display_reg(struct drm_device *dev)
dev_priv->regfile.savePFB_WIN_SZ = I915_READ(_PFB_WIN_SZ);
dev_priv->regfile.savePFB_WIN_POS = I915_READ(_PFB_WIN_POS);
dev_priv->regfile.saveTRANSBCONF = I915_READ(_TRANSBCONF);
dev_priv->regfile.saveTRANS_HTOTAL_B = I915_READ(_TRANS_HTOTAL_B);
dev_priv->regfile.saveTRANS_HBLANK_B = I915_READ(_TRANS_HBLANK_B);
dev_priv->regfile.saveTRANS_HSYNC_B = I915_READ(_TRANS_HSYNC_B);
dev_priv->regfile.saveTRANS_VTOTAL_B = I915_READ(_TRANS_VTOTAL_B);
dev_priv->regfile.saveTRANS_VBLANK_B = I915_READ(_TRANS_VBLANK_B);
dev_priv->regfile.saveTRANS_VSYNC_B = I915_READ(_TRANS_VSYNC_B);
dev_priv->regfile.saveTRANSBCONF = I915_READ(_PCH_TRANSBCONF);
dev_priv->regfile.saveTRANS_HTOTAL_B = I915_READ(_PCH_TRANS_HTOTAL_B);
dev_priv->regfile.saveTRANS_HBLANK_B = I915_READ(_PCH_TRANS_HBLANK_B);
dev_priv->regfile.saveTRANS_HSYNC_B = I915_READ(_PCH_TRANS_HSYNC_B);
dev_priv->regfile.saveTRANS_VTOTAL_B = I915_READ(_PCH_TRANS_VTOTAL_B);
dev_priv->regfile.saveTRANS_VBLANK_B = I915_READ(_PCH_TRANS_VBLANK_B);
dev_priv->regfile.saveTRANS_VSYNC_B = I915_READ(_PCH_TRANS_VSYNC_B);
}
dev_priv->regfile.saveDSPBCNTR = I915_READ(_DSPBCNTR);
@ -259,14 +259,14 @@ void i915_save_display_reg(struct drm_device *dev)
dev_priv->regfile.saveDP_B = I915_READ(DP_B);
dev_priv->regfile.saveDP_C = I915_READ(DP_C);
dev_priv->regfile.saveDP_D = I915_READ(DP_D);
dev_priv->regfile.savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_GMCH_DATA_M);
dev_priv->regfile.savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_GMCH_DATA_M);
dev_priv->regfile.savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_GMCH_DATA_N);
dev_priv->regfile.savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_GMCH_DATA_N);
dev_priv->regfile.savePIPEA_DP_LINK_M = I915_READ(_PIPEA_DP_LINK_M);
dev_priv->regfile.savePIPEB_DP_LINK_M = I915_READ(_PIPEB_DP_LINK_M);
dev_priv->regfile.savePIPEA_DP_LINK_N = I915_READ(_PIPEA_DP_LINK_N);
dev_priv->regfile.savePIPEB_DP_LINK_N = I915_READ(_PIPEB_DP_LINK_N);
dev_priv->regfile.savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_DATA_M_G4X);
dev_priv->regfile.savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_DATA_M_G4X);
dev_priv->regfile.savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_DATA_N_G4X);
dev_priv->regfile.savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_DATA_N_G4X);
dev_priv->regfile.savePIPEA_DP_LINK_M = I915_READ(_PIPEA_LINK_M_G4X);
dev_priv->regfile.savePIPEB_DP_LINK_M = I915_READ(_PIPEB_LINK_M_G4X);
dev_priv->regfile.savePIPEA_DP_LINK_N = I915_READ(_PIPEA_LINK_N_G4X);
dev_priv->regfile.savePIPEB_DP_LINK_N = I915_READ(_PIPEB_LINK_N_G4X);
}
/* FIXME: regfile.save TV & SDVO state */
@ -282,14 +282,14 @@ void i915_restore_display_reg(struct drm_device *dev)
/* Display port ratios (must be done before clock is set) */
if (SUPPORTS_INTEGRATED_DP(dev)) {
I915_WRITE(_PIPEA_GMCH_DATA_M, dev_priv->regfile.savePIPEA_GMCH_DATA_M);
I915_WRITE(_PIPEB_GMCH_DATA_M, dev_priv->regfile.savePIPEB_GMCH_DATA_M);
I915_WRITE(_PIPEA_GMCH_DATA_N, dev_priv->regfile.savePIPEA_GMCH_DATA_N);
I915_WRITE(_PIPEB_GMCH_DATA_N, dev_priv->regfile.savePIPEB_GMCH_DATA_N);
I915_WRITE(_PIPEA_DP_LINK_M, dev_priv->regfile.savePIPEA_DP_LINK_M);
I915_WRITE(_PIPEB_DP_LINK_M, dev_priv->regfile.savePIPEB_DP_LINK_M);
I915_WRITE(_PIPEA_DP_LINK_N, dev_priv->regfile.savePIPEA_DP_LINK_N);
I915_WRITE(_PIPEB_DP_LINK_N, dev_priv->regfile.savePIPEB_DP_LINK_N);
I915_WRITE(_PIPEA_DATA_M_G4X, dev_priv->regfile.savePIPEA_GMCH_DATA_M);
I915_WRITE(_PIPEB_DATA_M_G4X, dev_priv->regfile.savePIPEB_GMCH_DATA_M);
I915_WRITE(_PIPEA_DATA_N_G4X, dev_priv->regfile.savePIPEA_GMCH_DATA_N);
I915_WRITE(_PIPEB_DATA_N_G4X, dev_priv->regfile.savePIPEB_GMCH_DATA_N);
I915_WRITE(_PIPEA_LINK_M_G4X, dev_priv->regfile.savePIPEA_DP_LINK_M);
I915_WRITE(_PIPEB_LINK_M_G4X, dev_priv->regfile.savePIPEB_DP_LINK_M);
I915_WRITE(_PIPEA_LINK_N_G4X, dev_priv->regfile.savePIPEA_DP_LINK_N);
I915_WRITE(_PIPEB_LINK_N_G4X, dev_priv->regfile.savePIPEB_DP_LINK_N);
}
/* Fences */
@ -379,13 +379,13 @@ void i915_restore_display_reg(struct drm_device *dev)
I915_WRITE(_PFA_WIN_SZ, dev_priv->regfile.savePFA_WIN_SZ);
I915_WRITE(_PFA_WIN_POS, dev_priv->regfile.savePFA_WIN_POS);
I915_WRITE(_TRANSACONF, dev_priv->regfile.saveTRANSACONF);
I915_WRITE(_TRANS_HTOTAL_A, dev_priv->regfile.saveTRANS_HTOTAL_A);
I915_WRITE(_TRANS_HBLANK_A, dev_priv->regfile.saveTRANS_HBLANK_A);
I915_WRITE(_TRANS_HSYNC_A, dev_priv->regfile.saveTRANS_HSYNC_A);
I915_WRITE(_TRANS_VTOTAL_A, dev_priv->regfile.saveTRANS_VTOTAL_A);
I915_WRITE(_TRANS_VBLANK_A, dev_priv->regfile.saveTRANS_VBLANK_A);
I915_WRITE(_TRANS_VSYNC_A, dev_priv->regfile.saveTRANS_VSYNC_A);
I915_WRITE(_PCH_TRANSACONF, dev_priv->regfile.saveTRANSACONF);
I915_WRITE(_PCH_TRANS_HTOTAL_A, dev_priv->regfile.saveTRANS_HTOTAL_A);
I915_WRITE(_PCH_TRANS_HBLANK_A, dev_priv->regfile.saveTRANS_HBLANK_A);
I915_WRITE(_PCH_TRANS_HSYNC_A, dev_priv->regfile.saveTRANS_HSYNC_A);
I915_WRITE(_PCH_TRANS_VTOTAL_A, dev_priv->regfile.saveTRANS_VTOTAL_A);
I915_WRITE(_PCH_TRANS_VBLANK_A, dev_priv->regfile.saveTRANS_VBLANK_A);
I915_WRITE(_PCH_TRANS_VSYNC_A, dev_priv->regfile.saveTRANS_VSYNC_A);
}
/* Restore plane info */
@ -448,13 +448,13 @@ void i915_restore_display_reg(struct drm_device *dev)
I915_WRITE(_PFB_WIN_SZ, dev_priv->regfile.savePFB_WIN_SZ);
I915_WRITE(_PFB_WIN_POS, dev_priv->regfile.savePFB_WIN_POS);
I915_WRITE(_TRANSBCONF, dev_priv->regfile.saveTRANSBCONF);
I915_WRITE(_TRANS_HTOTAL_B, dev_priv->regfile.saveTRANS_HTOTAL_B);
I915_WRITE(_TRANS_HBLANK_B, dev_priv->regfile.saveTRANS_HBLANK_B);
I915_WRITE(_TRANS_HSYNC_B, dev_priv->regfile.saveTRANS_HSYNC_B);
I915_WRITE(_TRANS_VTOTAL_B, dev_priv->regfile.saveTRANS_VTOTAL_B);
I915_WRITE(_TRANS_VBLANK_B, dev_priv->regfile.saveTRANS_VBLANK_B);
I915_WRITE(_TRANS_VSYNC_B, dev_priv->regfile.saveTRANS_VSYNC_B);
I915_WRITE(_PCH_TRANSBCONF, dev_priv->regfile.saveTRANSBCONF);
I915_WRITE(_PCH_TRANS_HTOTAL_B, dev_priv->regfile.saveTRANS_HTOTAL_B);
I915_WRITE(_PCH_TRANS_HBLANK_B, dev_priv->regfile.saveTRANS_HBLANK_B);
I915_WRITE(_PCH_TRANS_HSYNC_B, dev_priv->regfile.saveTRANS_HSYNC_B);
I915_WRITE(_PCH_TRANS_VTOTAL_B, dev_priv->regfile.saveTRANS_VTOTAL_B);
I915_WRITE(_PCH_TRANS_VBLANK_B, dev_priv->regfile.saveTRANS_VBLANK_B);
I915_WRITE(_PCH_TRANS_VSYNC_B, dev_priv->regfile.saveTRANS_VSYNC_B);
}
/* Restore plane info */

Просмотреть файл

@ -212,7 +212,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
if (!lvds_options)
return;
dev_priv->lvds_dither = lvds_options->pixel_dither;
dev_priv->vbt.lvds_dither = lvds_options->pixel_dither;
if (lvds_options->panel_type == 0xff)
return;
@ -226,7 +226,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
if (!lvds_lfp_data_ptrs)
return;
dev_priv->lvds_vbt = 1;
dev_priv->vbt.lvds_vbt = 1;
panel_dvo_timing = get_lvds_dvo_timing(lvds_lfp_data,
lvds_lfp_data_ptrs,
@ -238,7 +238,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
fill_detail_timing_data(panel_fixed_mode, panel_dvo_timing);
dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode;
dev_priv->vbt.lfp_lvds_vbt_mode = panel_fixed_mode;
DRM_DEBUG_KMS("Found panel mode in BIOS VBT tables:\n");
drm_mode_debug_printmodeline(panel_fixed_mode);
@ -274,9 +274,9 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
/* check the resolution, just to be sure */
if (fp_timing->x_res == panel_fixed_mode->hdisplay &&
fp_timing->y_res == panel_fixed_mode->vdisplay) {
dev_priv->bios_lvds_val = fp_timing->lvds_reg_val;
dev_priv->vbt.bios_lvds_val = fp_timing->lvds_reg_val;
DRM_DEBUG_KMS("VBT initial LVDS value %x\n",
dev_priv->bios_lvds_val);
dev_priv->vbt.bios_lvds_val);
}
}
}
@ -316,7 +316,7 @@ parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
fill_detail_timing_data(panel_fixed_mode, dvo_timing + index);
dev_priv->sdvo_lvds_vbt_mode = panel_fixed_mode;
dev_priv->vbt.sdvo_lvds_vbt_mode = panel_fixed_mode;
DRM_DEBUG_KMS("Found SDVO panel mode in BIOS VBT tables:\n");
drm_mode_debug_printmodeline(panel_fixed_mode);
@ -345,20 +345,20 @@ parse_general_features(struct drm_i915_private *dev_priv,
general = find_section(bdb, BDB_GENERAL_FEATURES);
if (general) {
dev_priv->int_tv_support = general->int_tv_support;
dev_priv->int_crt_support = general->int_crt_support;
dev_priv->lvds_use_ssc = general->enable_ssc;
dev_priv->lvds_ssc_freq =
dev_priv->vbt.int_tv_support = general->int_tv_support;
dev_priv->vbt.int_crt_support = general->int_crt_support;
dev_priv->vbt.lvds_use_ssc = general->enable_ssc;
dev_priv->vbt.lvds_ssc_freq =
intel_bios_ssc_frequency(dev, general->ssc_freq);
dev_priv->display_clock_mode = general->display_clock_mode;
dev_priv->fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted;
dev_priv->vbt.display_clock_mode = general->display_clock_mode;
dev_priv->vbt.fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted;
DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d fdi_rx_polarity_inverted %d\n",
dev_priv->int_tv_support,
dev_priv->int_crt_support,
dev_priv->lvds_use_ssc,
dev_priv->lvds_ssc_freq,
dev_priv->display_clock_mode,
dev_priv->fdi_rx_polarity_inverted);
dev_priv->vbt.int_tv_support,
dev_priv->vbt.int_crt_support,
dev_priv->vbt.lvds_use_ssc,
dev_priv->vbt.lvds_ssc_freq,
dev_priv->vbt.display_clock_mode,
dev_priv->vbt.fdi_rx_polarity_inverted);
}
}
@ -375,7 +375,7 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
int bus_pin = general->crt_ddc_gmbus_pin;
DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin);
if (intel_gmbus_is_port_valid(bus_pin))
dev_priv->crt_ddc_pin = bus_pin;
dev_priv->vbt.crt_ddc_pin = bus_pin;
} else {
DRM_DEBUG_KMS("BDB_GD too small (%d). Invalid.\n",
block_size);
@ -486,7 +486,7 @@ parse_driver_features(struct drm_i915_private *dev_priv,
if (SUPPORTS_EDP(dev) &&
driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
dev_priv->edp.support = 1;
dev_priv->vbt.edp_support = 1;
if (driver->dual_frequency)
dev_priv->render_reclock_avail = true;
@ -501,20 +501,20 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
edp = find_section(bdb, BDB_EDP);
if (!edp) {
if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp.support)
if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->vbt.edp_support)
DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported.\n");
return;
}
switch ((edp->color_depth >> (panel_type * 2)) & 3) {
case EDP_18BPP:
dev_priv->edp.bpp = 18;
dev_priv->vbt.edp_bpp = 18;
break;
case EDP_24BPP:
dev_priv->edp.bpp = 24;
dev_priv->vbt.edp_bpp = 24;
break;
case EDP_30BPP:
dev_priv->edp.bpp = 30;
dev_priv->vbt.edp_bpp = 30;
break;
}
@ -522,48 +522,48 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
edp_pps = &edp->power_seqs[panel_type];
edp_link_params = &edp->link_params[panel_type];
dev_priv->edp.pps = *edp_pps;
dev_priv->vbt.edp_pps = *edp_pps;
dev_priv->edp.rate = edp_link_params->rate ? DP_LINK_BW_2_7 :
dev_priv->vbt.edp_rate = edp_link_params->rate ? DP_LINK_BW_2_7 :
DP_LINK_BW_1_62;
switch (edp_link_params->lanes) {
case 0:
dev_priv->edp.lanes = 1;
dev_priv->vbt.edp_lanes = 1;
break;
case 1:
dev_priv->edp.lanes = 2;
dev_priv->vbt.edp_lanes = 2;
break;
case 3:
default:
dev_priv->edp.lanes = 4;
dev_priv->vbt.edp_lanes = 4;
break;
}
switch (edp_link_params->preemphasis) {
case 0:
dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_0;
dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_0;
break;
case 1:
dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5;
dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5;
break;
case 2:
dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_6;
dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_6;
break;
case 3:
dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5;
dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5;
break;
}
switch (edp_link_params->vswing) {
case 0:
dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_400;
dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_400;
break;
case 1:
dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_600;
dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_600;
break;
case 2:
dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_800;
dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_800;
break;
case 3:
dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_1200;
dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_1200;
break;
}
}
@ -611,13 +611,13 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
DRM_DEBUG_KMS("no child dev is parsed from VBT\n");
return;
}
dev_priv->child_dev = kcalloc(count, sizeof(*p_child), GFP_KERNEL);
if (!dev_priv->child_dev) {
dev_priv->vbt.child_dev = kcalloc(count, sizeof(*p_child), GFP_KERNEL);
if (!dev_priv->vbt.child_dev) {
DRM_DEBUG_KMS("No memory space for child device\n");
return;
}
dev_priv->child_dev_num = count;
dev_priv->vbt.child_dev_num = count;
count = 0;
for (i = 0; i < child_device_num; i++) {
p_child = &(p_defs->devices[i]);
@ -625,7 +625,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
/* skip the device block if device type is invalid */
continue;
}
child_dev_ptr = dev_priv->child_dev + count;
child_dev_ptr = dev_priv->vbt.child_dev + count;
count++;
memcpy((void *)child_dev_ptr, (void *)p_child,
sizeof(*p_child));
@ -638,23 +638,23 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
dev_priv->crt_ddc_pin = GMBUS_PORT_VGADDC;
dev_priv->vbt.crt_ddc_pin = GMBUS_PORT_VGADDC;
/* LFP panel data */
dev_priv->lvds_dither = 1;
dev_priv->lvds_vbt = 0;
dev_priv->vbt.lvds_dither = 1;
dev_priv->vbt.lvds_vbt = 0;
/* SDVO panel data */
dev_priv->sdvo_lvds_vbt_mode = NULL;
dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
/* general features */
dev_priv->int_tv_support = 1;
dev_priv->int_crt_support = 1;
dev_priv->vbt.int_tv_support = 1;
dev_priv->vbt.int_crt_support = 1;
/* Default to using SSC */
dev_priv->lvds_use_ssc = 1;
dev_priv->lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1);
DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->lvds_ssc_freq);
dev_priv->vbt.lvds_use_ssc = 1;
dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1);
DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->vbt.lvds_ssc_freq);
}
static int __init intel_no_opregion_vbt_callback(const struct dmi_system_id *id)

Просмотреть файл

@ -207,6 +207,10 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
if (HAS_PCH_SPLIT(dev))
pipe_config->has_pch_encoder = true;
/* LPT FDI RX only supports 8bpc. */
if (HAS_PCH_LPT(dev))
pipe_config->pipe_bpp = 24;
return true;
}
@ -431,7 +435,7 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG);
i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin);
i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->vbt.crt_ddc_pin);
edid = intel_crt_get_edid(connector, i2c);
if (edid) {
@ -637,7 +641,7 @@ static int intel_crt_get_modes(struct drm_connector *connector)
int ret;
struct i2c_adapter *i2c;
i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin);
i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->vbt.crt_ddc_pin);
ret = intel_crt_ddc_get_modes(connector, i2c);
if (ret || !IS_G4X(dev))
return ret;

Просмотреть файл

@ -174,6 +174,8 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
* mode set "sequence for CRT port" document:
* - TP1 to TP2 time with the default value
* - FDI delay to 90h
*
* WaFDIAutoLinkSetTimingOverrride:hsw
*/
I915_WRITE(_FDI_RXA_MISC, FDI_RX_PWRDN_LANE1_VAL(2) |
FDI_RX_PWRDN_LANE0_VAL(2) |
@ -181,7 +183,8 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
/* Enable the PCH Receiver FDI PLL */
rx_ctl_val = dev_priv->fdi_rx_config | FDI_RX_ENHANCE_FRAME_ENABLE |
FDI_RX_PLL_ENABLE | ((intel_crtc->fdi_lanes - 1) << 19);
FDI_RX_PLL_ENABLE |
FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
POSTING_READ(_FDI_RXA_CTL);
udelay(220);
@ -209,7 +212,7 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
* port reversal bit */
I915_WRITE(DDI_BUF_CTL(PORT_E),
DDI_BUF_CTL_ENABLE |
((intel_crtc->fdi_lanes - 1) << 1) |
((intel_crtc->config.fdi_lanes - 1) << 1) |
hsw_ddi_buf_ctl_values[i / 2]);
POSTING_READ(DDI_BUF_CTL(PORT_E));
@ -278,392 +281,6 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
DRM_ERROR("FDI link training failed!\n");
}
/* WRPLL clock dividers */
struct wrpll_tmds_clock {
u32 clock;
u16 p; /* Post divider */
u16 n2; /* Feedback divider */
u16 r2; /* Reference divider */
};
/* Table of matching values for WRPLL clocks programming for each frequency.
* The code assumes this table is sorted. */
static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = {
{19750, 38, 25, 18},
{20000, 48, 32, 18},
{21000, 36, 21, 15},
{21912, 42, 29, 17},
{22000, 36, 22, 15},
{23000, 36, 23, 15},
{23500, 40, 40, 23},
{23750, 26, 16, 14},
{24000, 36, 24, 15},
{25000, 36, 25, 15},
{25175, 26, 40, 33},
{25200, 30, 21, 15},
{26000, 36, 26, 15},
{27000, 30, 21, 14},
{27027, 18, 100, 111},
{27500, 30, 29, 19},
{28000, 34, 30, 17},
{28320, 26, 30, 22},
{28322, 32, 42, 25},
{28750, 24, 23, 18},
{29000, 30, 29, 18},
{29750, 32, 30, 17},
{30000, 30, 25, 15},
{30750, 30, 41, 24},
{31000, 30, 31, 18},
{31500, 30, 28, 16},
{32000, 30, 32, 18},
{32500, 28, 32, 19},
{33000, 24, 22, 15},
{34000, 28, 30, 17},
{35000, 26, 32, 19},
{35500, 24, 30, 19},
{36000, 26, 26, 15},
{36750, 26, 46, 26},
{37000, 24, 23, 14},
{37762, 22, 40, 26},
{37800, 20, 21, 15},
{38000, 24, 27, 16},
{38250, 24, 34, 20},
{39000, 24, 26, 15},
{40000, 24, 32, 18},
{40500, 20, 21, 14},
{40541, 22, 147, 89},
{40750, 18, 19, 14},
{41000, 16, 17, 14},
{41500, 22, 44, 26},
{41540, 22, 44, 26},
{42000, 18, 21, 15},
{42500, 22, 45, 26},
{43000, 20, 43, 27},
{43163, 20, 24, 15},
{44000, 18, 22, 15},
{44900, 20, 108, 65},
{45000, 20, 25, 15},
{45250, 20, 52, 31},
{46000, 18, 23, 15},
{46750, 20, 45, 26},
{47000, 20, 40, 23},
{48000, 18, 24, 15},
{49000, 18, 49, 30},
{49500, 16, 22, 15},
{50000, 18, 25, 15},
{50500, 18, 32, 19},
{51000, 18, 34, 20},
{52000, 18, 26, 15},
{52406, 14, 34, 25},
{53000, 16, 22, 14},
{54000, 16, 24, 15},
{54054, 16, 173, 108},
{54500, 14, 24, 17},
{55000, 12, 22, 18},
{56000, 14, 45, 31},
{56250, 16, 25, 15},
{56750, 14, 25, 17},
{57000, 16, 27, 16},
{58000, 16, 43, 25},
{58250, 16, 38, 22},
{58750, 16, 40, 23},
{59000, 14, 26, 17},
{59341, 14, 40, 26},
{59400, 16, 44, 25},
{60000, 16, 32, 18},
{60500, 12, 39, 29},
{61000, 14, 49, 31},
{62000, 14, 37, 23},
{62250, 14, 42, 26},
{63000, 12, 21, 15},
{63500, 14, 28, 17},
{64000, 12, 27, 19},
{65000, 14, 32, 19},
{65250, 12, 29, 20},
{65500, 12, 32, 22},
{66000, 12, 22, 15},
{66667, 14, 38, 22},
{66750, 10, 21, 17},
{67000, 14, 33, 19},
{67750, 14, 58, 33},
{68000, 14, 30, 17},
{68179, 14, 46, 26},
{68250, 14, 46, 26},
{69000, 12, 23, 15},
{70000, 12, 28, 18},
{71000, 12, 30, 19},
{72000, 12, 24, 15},
{73000, 10, 23, 17},
{74000, 12, 23, 14},
{74176, 8, 100, 91},
{74250, 10, 22, 16},
{74481, 12, 43, 26},
{74500, 10, 29, 21},
{75000, 12, 25, 15},
{75250, 10, 39, 28},
{76000, 12, 27, 16},
{77000, 12, 53, 31},
{78000, 12, 26, 15},
{78750, 12, 28, 16},
{79000, 10, 38, 26},
{79500, 10, 28, 19},
{80000, 12, 32, 18},
{81000, 10, 21, 14},
{81081, 6, 100, 111},
{81624, 8, 29, 24},
{82000, 8, 17, 14},
{83000, 10, 40, 26},
{83950, 10, 28, 18},
{84000, 10, 28, 18},
{84750, 6, 16, 17},
{85000, 6, 17, 18},
{85250, 10, 30, 19},
{85750, 10, 27, 17},
{86000, 10, 43, 27},
{87000, 10, 29, 18},
{88000, 10, 44, 27},
{88500, 10, 41, 25},
{89000, 10, 28, 17},
{89012, 6, 90, 91},
{89100, 10, 33, 20},
{90000, 10, 25, 15},
{91000, 10, 32, 19},
{92000, 10, 46, 27},
{93000, 10, 31, 18},
{94000, 10, 40, 23},
{94500, 10, 28, 16},
{95000, 10, 44, 25},
{95654, 10, 39, 22},
{95750, 10, 39, 22},
{96000, 10, 32, 18},
{97000, 8, 23, 16},
{97750, 8, 42, 29},
{98000, 8, 45, 31},
{99000, 8, 22, 15},
{99750, 8, 34, 23},
{100000, 6, 20, 18},
{100500, 6, 19, 17},
{101000, 6, 37, 33},
{101250, 8, 21, 14},
{102000, 6, 17, 15},
{102250, 6, 25, 22},
{103000, 8, 29, 19},
{104000, 8, 37, 24},
{105000, 8, 28, 18},
{106000, 8, 22, 14},
{107000, 8, 46, 29},
{107214, 8, 27, 17},
{108000, 8, 24, 15},
{108108, 8, 173, 108},
{109000, 6, 23, 19},
{110000, 6, 22, 18},
{110013, 6, 22, 18},
{110250, 8, 49, 30},
{110500, 8, 36, 22},
{111000, 8, 23, 14},
{111264, 8, 150, 91},
{111375, 8, 33, 20},
{112000, 8, 63, 38},
{112500, 8, 25, 15},
{113100, 8, 57, 34},
{113309, 8, 42, 25},
{114000, 8, 27, 16},
{115000, 6, 23, 18},
{116000, 8, 43, 25},
{117000, 8, 26, 15},
{117500, 8, 40, 23},
{118000, 6, 38, 29},
{119000, 8, 30, 17},
{119500, 8, 46, 26},
{119651, 8, 39, 22},
{120000, 8, 32, 18},
{121000, 6, 39, 29},
{121250, 6, 31, 23},
{121750, 6, 23, 17},
{122000, 6, 42, 31},
{122614, 6, 30, 22},
{123000, 6, 41, 30},
{123379, 6, 37, 27},
{124000, 6, 51, 37},
{125000, 6, 25, 18},
{125250, 4, 13, 14},
{125750, 4, 27, 29},
{126000, 6, 21, 15},
{127000, 6, 24, 17},
{127250, 6, 41, 29},
{128000, 6, 27, 19},
{129000, 6, 43, 30},
{129859, 4, 25, 26},
{130000, 6, 26, 18},
{130250, 6, 42, 29},
{131000, 6, 32, 22},
{131500, 6, 38, 26},
{131850, 6, 41, 28},
{132000, 6, 22, 15},
{132750, 6, 28, 19},
{133000, 6, 34, 23},
{133330, 6, 37, 25},
{134000, 6, 61, 41},
{135000, 6, 21, 14},
{135250, 6, 167, 111},
{136000, 6, 62, 41},
{137000, 6, 35, 23},
{138000, 6, 23, 15},
{138500, 6, 40, 26},
{138750, 6, 37, 24},
{139000, 6, 34, 22},
{139050, 6, 34, 22},
{139054, 6, 34, 22},
{140000, 6, 28, 18},
{141000, 6, 36, 23},
{141500, 6, 22, 14},
{142000, 6, 30, 19},
{143000, 6, 27, 17},
{143472, 4, 17, 16},
{144000, 6, 24, 15},
{145000, 6, 29, 18},
{146000, 6, 47, 29},
{146250, 6, 26, 16},
{147000, 6, 49, 30},
{147891, 6, 23, 14},
{148000, 6, 23, 14},
{148250, 6, 28, 17},
{148352, 4, 100, 91},
{148500, 6, 33, 20},
{149000, 6, 48, 29},
{150000, 6, 25, 15},
{151000, 4, 19, 17},
{152000, 6, 27, 16},
{152280, 6, 44, 26},
{153000, 6, 34, 20},
{154000, 6, 53, 31},
{155000, 6, 31, 18},
{155250, 6, 50, 29},
{155750, 6, 45, 26},
{156000, 6, 26, 15},
{157000, 6, 61, 35},
{157500, 6, 28, 16},
{158000, 6, 65, 37},
{158250, 6, 44, 25},
{159000, 6, 53, 30},
{159500, 6, 39, 22},
{160000, 6, 32, 18},
{161000, 4, 31, 26},
{162000, 4, 18, 15},
{162162, 4, 131, 109},
{162500, 4, 53, 44},
{163000, 4, 29, 24},
{164000, 4, 17, 14},
{165000, 4, 22, 18},
{166000, 4, 32, 26},
{167000, 4, 26, 21},
{168000, 4, 46, 37},
{169000, 4, 104, 83},
{169128, 4, 64, 51},
{169500, 4, 39, 31},
{170000, 4, 34, 27},
{171000, 4, 19, 15},
{172000, 4, 51, 40},
{172750, 4, 32, 25},
{172800, 4, 32, 25},
{173000, 4, 41, 32},
{174000, 4, 49, 38},
{174787, 4, 22, 17},
{175000, 4, 35, 27},
{176000, 4, 30, 23},
{177000, 4, 38, 29},
{178000, 4, 29, 22},
{178500, 4, 37, 28},
{179000, 4, 53, 40},
{179500, 4, 73, 55},
{180000, 4, 20, 15},
{181000, 4, 55, 41},
{182000, 4, 31, 23},
{183000, 4, 42, 31},
{184000, 4, 30, 22},
{184750, 4, 26, 19},
{185000, 4, 37, 27},
{186000, 4, 51, 37},
{187000, 4, 36, 26},
{188000, 4, 32, 23},
{189000, 4, 21, 15},
{190000, 4, 38, 27},
{190960, 4, 41, 29},
{191000, 4, 41, 29},
{192000, 4, 27, 19},
{192250, 4, 37, 26},
{193000, 4, 20, 14},
{193250, 4, 53, 37},
{194000, 4, 23, 16},
{194208, 4, 23, 16},
{195000, 4, 26, 18},
{196000, 4, 45, 31},
{197000, 4, 35, 24},
{197750, 4, 41, 28},
{198000, 4, 22, 15},
{198500, 4, 25, 17},
{199000, 4, 28, 19},
{200000, 4, 37, 25},
{201000, 4, 61, 41},
{202000, 4, 112, 75},
{202500, 4, 21, 14},
{203000, 4, 146, 97},
{204000, 4, 62, 41},
{204750, 4, 44, 29},
{205000, 4, 38, 25},
{206000, 4, 29, 19},
{207000, 4, 23, 15},
{207500, 4, 40, 26},
{208000, 4, 37, 24},
{208900, 4, 48, 31},
{209000, 4, 48, 31},
{209250, 4, 31, 20},
{210000, 4, 28, 18},
{211000, 4, 25, 16},
{212000, 4, 22, 14},
{213000, 4, 30, 19},
{213750, 4, 38, 24},
{214000, 4, 46, 29},
{214750, 4, 35, 22},
{215000, 4, 43, 27},
{216000, 4, 24, 15},
{217000, 4, 37, 23},
{218000, 4, 42, 26},
{218250, 4, 42, 26},
{218750, 4, 34, 21},
{219000, 4, 47, 29},
{220000, 4, 44, 27},
{220640, 4, 49, 30},
{220750, 4, 36, 22},
{221000, 4, 36, 22},
{222000, 4, 23, 14},
{222525, 4, 28, 17},
{222750, 4, 33, 20},
{227000, 4, 37, 22},
{230250, 4, 29, 17},
{233500, 4, 38, 22},
{235000, 4, 40, 23},
{238000, 4, 30, 17},
{241500, 2, 17, 19},
{245250, 2, 20, 22},
{247750, 2, 22, 24},
{253250, 2, 15, 16},
{256250, 2, 18, 19},
{262500, 2, 31, 32},
{267250, 2, 66, 67},
{268500, 2, 94, 95},
{270000, 2, 14, 14},
{272500, 2, 77, 76},
{273750, 2, 57, 56},
{280750, 2, 24, 23},
{281250, 2, 23, 22},
{286000, 2, 17, 16},
{291750, 2, 26, 24},
{296703, 2, 56, 51},
{297000, 2, 22, 20},
{298000, 2, 21, 19},
};
static void intel_ddi_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@ -675,7 +292,7 @@ static void intel_ddi_mode_set(struct drm_encoder *encoder,
int pipe = intel_crtc->pipe;
int type = intel_encoder->type;
DRM_DEBUG_KMS("Preparing DDI mode for Haswell on port %c, pipe %c\n",
DRM_DEBUG_KMS("Preparing DDI mode on port %c, pipe %c\n",
port_name(port), pipe_name(pipe));
intel_crtc->eld_vld = false;
@ -686,22 +303,7 @@ static void intel_ddi_mode_set(struct drm_encoder *encoder,
intel_dp->DP = intel_dig_port->port_reversal |
DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW;
switch (intel_dp->lane_count) {
case 1:
intel_dp->DP |= DDI_PORT_WIDTH_X1;
break;
case 2:
intel_dp->DP |= DDI_PORT_WIDTH_X2;
break;
case 4:
intel_dp->DP |= DDI_PORT_WIDTH_X4;
break;
default:
intel_dp->DP |= DDI_PORT_WIDTH_X4;
WARN(1, "Unexpected DP lane count %d\n",
intel_dp->lane_count);
break;
}
intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count);
if (intel_dp->has_audio) {
DRM_DEBUG_DRIVER("DP audio on pipe %c on DDI\n",
@ -748,8 +350,8 @@ intel_ddi_get_crtc_encoder(struct drm_crtc *crtc)
}
if (num_encoders != 1)
WARN(1, "%d encoders on crtc for pipe %d\n", num_encoders,
intel_crtc->pipe);
WARN(1, "%d encoders on crtc for pipe %c\n", num_encoders,
pipe_name(intel_crtc->pipe));
BUG_ON(ret == NULL);
return ret;
@ -802,27 +404,224 @@ void intel_ddi_put_crtc_pll(struct drm_crtc *crtc)
intel_crtc->ddi_pll_sel = PORT_CLK_SEL_NONE;
}
static void intel_ddi_calculate_wrpll(int clock, int *p, int *n2, int *r2)
#define LC_FREQ 2700
#define LC_FREQ_2K (LC_FREQ * 2000)
#define P_MIN 2
#define P_MAX 64
#define P_INC 2
/* Constraints for PLL good behavior */
#define REF_MIN 48
#define REF_MAX 400
#define VCO_MIN 2400
#define VCO_MAX 4800
#define ABS_DIFF(a, b) ((a > b) ? (a - b) : (b - a))
struct wrpll_rnp {
unsigned p, n2, r2;
};
static unsigned wrpll_get_budget_for_freq(int clock)
{
u32 i;
unsigned budget;
for (i = 0; i < ARRAY_SIZE(wrpll_tmds_clock_table); i++)
if (clock <= wrpll_tmds_clock_table[i].clock)
break;
switch (clock) {
case 25175000:
case 25200000:
case 27000000:
case 27027000:
case 37762500:
case 37800000:
case 40500000:
case 40541000:
case 54000000:
case 54054000:
case 59341000:
case 59400000:
case 72000000:
case 74176000:
case 74250000:
case 81000000:
case 81081000:
case 89012000:
case 89100000:
case 108000000:
case 108108000:
case 111264000:
case 111375000:
case 148352000:
case 148500000:
case 162000000:
case 162162000:
case 222525000:
case 222750000:
case 296703000:
case 297000000:
budget = 0;
break;
case 233500000:
case 245250000:
case 247750000:
case 253250000:
case 298000000:
budget = 1500;
break;
case 169128000:
case 169500000:
case 179500000:
case 202000000:
budget = 2000;
break;
case 256250000:
case 262500000:
case 270000000:
case 272500000:
case 273750000:
case 280750000:
case 281250000:
case 286000000:
case 291750000:
budget = 4000;
break;
case 267250000:
case 268500000:
budget = 5000;
break;
default:
budget = 1000;
break;
}
if (i == ARRAY_SIZE(wrpll_tmds_clock_table))
i--;
return budget;
}
*p = wrpll_tmds_clock_table[i].p;
*n2 = wrpll_tmds_clock_table[i].n2;
*r2 = wrpll_tmds_clock_table[i].r2;
static void wrpll_update_rnp(uint64_t freq2k, unsigned budget,
unsigned r2, unsigned n2, unsigned p,
struct wrpll_rnp *best)
{
uint64_t a, b, c, d, diff, diff_best;
if (wrpll_tmds_clock_table[i].clock != clock)
DRM_INFO("WRPLL: using settings for %dKHz on %dKHz mode\n",
wrpll_tmds_clock_table[i].clock, clock);
/* No best (r,n,p) yet */
if (best->p == 0) {
best->p = p;
best->n2 = n2;
best->r2 = r2;
return;
}
DRM_DEBUG_KMS("WRPLL: %dKHz refresh rate with p=%d, n2=%d r2=%d\n",
clock, *p, *n2, *r2);
/*
* Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
* freq2k.
*
* delta = 1e6 *
* abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
* freq2k;
*
* and we would like delta <= budget.
*
* If the discrepancy is above the PPM-based budget, always prefer to
* improve upon the previous solution. However, if you're within the
* budget, try to maximize Ref * VCO, that is N / (P * R^2).
*/
a = freq2k * budget * p * r2;
b = freq2k * budget * best->p * best->r2;
diff = ABS_DIFF((freq2k * p * r2), (LC_FREQ_2K * n2));
diff_best = ABS_DIFF((freq2k * best->p * best->r2),
(LC_FREQ_2K * best->n2));
c = 1000000 * diff;
d = 1000000 * diff_best;
if (a < c && b < d) {
/* If both are above the budget, pick the closer */
if (best->p * best->r2 * diff < p * r2 * diff_best) {
best->p = p;
best->n2 = n2;
best->r2 = r2;
}
} else if (a >= c && b < d) {
/* If A is below the threshold but B is above it? Update. */
best->p = p;
best->n2 = n2;
best->r2 = r2;
} else if (a >= c && b >= d) {
/* Both are below the limit, so pick the higher n2/(r2*r2) */
if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
best->p = p;
best->n2 = n2;
best->r2 = r2;
}
}
/* Otherwise a < c && b >= d, do nothing */
}
static void
intel_ddi_calculate_wrpll(int clock /* in Hz */,
unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
{
uint64_t freq2k;
unsigned p, n2, r2;
struct wrpll_rnp best = { 0, 0, 0 };
unsigned budget;
freq2k = clock / 100;
budget = wrpll_get_budget_for_freq(clock);
/* Special case handling for 540 pixel clock: bypass WR PLL entirely
* and directly pass the LC PLL to it. */
if (freq2k == 5400000) {
*n2_out = 2;
*p_out = 1;
*r2_out = 2;
return;
}
/*
* Ref = LC_FREQ / R, where Ref is the actual reference input seen by
* the WR PLL.
*
* We want R so that REF_MIN <= Ref <= REF_MAX.
* Injecting R2 = 2 * R gives:
* REF_MAX * r2 > LC_FREQ * 2 and
* REF_MIN * r2 < LC_FREQ * 2
*
* Which means the desired boundaries for r2 are:
* LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
*
*/
for (r2 = LC_FREQ * 2 / REF_MAX + 1;
r2 <= LC_FREQ * 2 / REF_MIN;
r2++) {
/*
* VCO = N * Ref, that is: VCO = N * LC_FREQ / R
*
* Once again we want VCO_MIN <= VCO <= VCO_MAX.
* Injecting R2 = 2 * R and N2 = 2 * N, we get:
* VCO_MAX * r2 > n2 * LC_FREQ and
* VCO_MIN * r2 < n2 * LC_FREQ)
*
* Which means the desired boundaries for n2 are:
* VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
*/
for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
n2 <= VCO_MAX * r2 / LC_FREQ;
n2++) {
for (p = P_MIN; p <= P_MAX; p += P_INC)
wrpll_update_rnp(freq2k, budget,
r2, n2, p, &best);
}
}
*n2_out = best.n2;
*p_out = best.p;
*r2_out = best.r2;
DRM_DEBUG_KMS("WRPLL: %dHz refresh rate with p=%d, n2=%d r2=%d\n",
clock, *p_out, *n2_out, *r2_out);
}
bool intel_ddi_pll_mode_set(struct drm_crtc *crtc, int clock)
@ -863,7 +662,7 @@ bool intel_ddi_pll_mode_set(struct drm_crtc *crtc, int clock)
return true;
} else if (type == INTEL_OUTPUT_HDMI) {
int p, n2, r2;
unsigned p, n2, r2;
if (plls->wrpll1_refcount == 0) {
DRM_DEBUG_KMS("Using WRPLL 1 on pipe %c\n",
@ -885,7 +684,7 @@ bool intel_ddi_pll_mode_set(struct drm_crtc *crtc, int clock)
WARN(I915_READ(reg) & WRPLL_PLL_ENABLE,
"WRPLL already enabled\n");
intel_ddi_calculate_wrpll(clock, &p, &n2, &r2);
intel_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 |
WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
@ -995,7 +794,7 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
/* Can only use the always-on power well for eDP when
* not using the panel fitter, and when not using motion
* blur mitigation (which we don't support). */
if (dev_priv->pch_pf_size)
if (intel_crtc->config.pch_pfit.size)
temp |= TRANS_DDI_EDP_INPUT_A_ONOFF;
else
temp |= TRANS_DDI_EDP_INPUT_A_ON;
@ -1022,7 +821,7 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
} else if (type == INTEL_OUTPUT_ANALOG) {
temp |= TRANS_DDI_MODE_SELECT_FDI;
temp |= (intel_crtc->fdi_lanes - 1) << 1;
temp |= (intel_crtc->config.fdi_lanes - 1) << 1;
} else if (type == INTEL_OUTPUT_DISPLAYPORT ||
type == INTEL_OUTPUT_EDP) {
@ -1030,25 +829,10 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
temp |= TRANS_DDI_MODE_SELECT_DP_SST;
switch (intel_dp->lane_count) {
case 1:
temp |= TRANS_DDI_PORT_WIDTH_X1;
break;
case 2:
temp |= TRANS_DDI_PORT_WIDTH_X2;
break;
case 4:
temp |= TRANS_DDI_PORT_WIDTH_X4;
break;
default:
temp |= TRANS_DDI_PORT_WIDTH_X4;
WARN(1, "Unsupported lane count %d\n",
intel_dp->lane_count);
}
temp |= DDI_PORT_WIDTH(intel_dp->lane_count);
} else {
WARN(1, "Invalid encoder type %d for pipe %d\n",
intel_encoder->type, pipe);
WARN(1, "Invalid encoder type %d for pipe %c\n",
intel_encoder->type, pipe_name(pipe));
}
I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
@ -1148,7 +932,7 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
}
}
DRM_DEBUG_KMS("No pipe for ddi port %i found\n", port);
DRM_DEBUG_KMS("No pipe for ddi port %c found\n", port_name(port));
return false;
}
@ -1334,7 +1118,7 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder)
ironlake_edp_backlight_on(intel_dp);
}
if (intel_crtc->eld_vld) {
if (intel_crtc->eld_vld && type != INTEL_OUTPUT_EDP) {
tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
tmp |= ((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) << (pipe * 4));
I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
@ -1352,9 +1136,12 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t tmp;
tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
tmp &= ~((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) << (pipe * 4));
I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
if (intel_crtc->eld_vld && type != INTEL_OUTPUT_EDP) {
tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
tmp &= ~((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) <<
(pipe * 4));
I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
}
if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@ -1518,16 +1305,6 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
return;
}
if (port != PORT_A) {
hdmi_connector = kzalloc(sizeof(struct intel_connector),
GFP_KERNEL);
if (!hdmi_connector) {
kfree(dp_connector);
kfree(intel_dig_port);
return;
}
}
intel_encoder = &intel_dig_port->base;
encoder = &intel_encoder->base;
@ -1545,8 +1322,6 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
intel_dig_port->port = port;
intel_dig_port->port_reversal = I915_READ(DDI_BUF_CTL(port)) &
DDI_BUF_PORT_REVERSAL;
if (hdmi_connector)
intel_dig_port->hdmi.hdmi_reg = DDI_BUF_CTL(port);
intel_dig_port->dp.output_reg = DDI_BUF_CTL(port);
intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
@ -1554,7 +1329,16 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
intel_encoder->cloneable = false;
intel_encoder->hot_plug = intel_ddi_hot_plug;
if (hdmi_connector)
intel_hdmi_init_connector(intel_dig_port, hdmi_connector);
intel_dp_init_connector(intel_dig_port, dp_connector);
if (intel_encoder->type != INTEL_OUTPUT_EDP) {
hdmi_connector = kzalloc(sizeof(struct intel_connector),
GFP_KERNEL);
if (!hdmi_connector) {
return;
}
intel_dig_port->hdmi.hdmi_reg = DDI_BUF_CTL(port);
intel_hdmi_init_connector(intel_dig_port, hdmi_connector);
}
}

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -52,17 +52,11 @@ static bool is_edp(struct intel_dp *intel_dp)
return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
}
/**
* is_pch_edp - is the port on the PCH and attached to an eDP panel?
* @intel_dp: DP struct
*
* Returns true if the given DP struct corresponds to a PCH DP port attached
* to an eDP panel, false otherwise. Helpful for determining whether we
* may need FDI resources for a given DP output or not.
*/
static bool is_pch_edp(struct intel_dp *intel_dp)
static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
{
return intel_dp->is_pch_edp;
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
return intel_dig_port->base.base.dev;
}
/**
@ -73,14 +67,12 @@ static bool is_pch_edp(struct intel_dp *intel_dp)
*/
static bool is_cpu_edp(struct intel_dp *intel_dp)
{
return is_edp(intel_dp) && !is_pch_edp(intel_dp);
}
static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
enum port port = intel_dig_port->port;
return intel_dig_port->base.base.dev;
return is_edp(intel_dp) &&
(port == PORT_A || (port == PORT_C && IS_VALLEYVIEW(dev)));
}
static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
@ -88,25 +80,6 @@ static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
}
/**
* intel_encoder_is_pch_edp - is the given encoder a PCH attached eDP?
* @encoder: DRM encoder
*
* Return true if @encoder corresponds to a PCH attached eDP panel. Needed
* by intel_display.c.
*/
bool intel_encoder_is_pch_edp(struct drm_encoder *encoder)
{
struct intel_dp *intel_dp;
if (!encoder)
return false;
intel_dp = enc_to_intel_dp(encoder);
return is_pch_edp(intel_dp);
}
static void intel_dp_link_down(struct intel_dp *intel_dp);
static int
@ -660,6 +633,49 @@ intel_dp_i2c_init(struct intel_dp *intel_dp,
return ret;
}
static void
intel_dp_set_clock(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config, int link_bw)
{
struct drm_device *dev = encoder->base.dev;
if (IS_G4X(dev)) {
if (link_bw == DP_LINK_BW_1_62) {
pipe_config->dpll.p1 = 2;
pipe_config->dpll.p2 = 10;
pipe_config->dpll.n = 2;
pipe_config->dpll.m1 = 23;
pipe_config->dpll.m2 = 8;
} else {
pipe_config->dpll.p1 = 1;
pipe_config->dpll.p2 = 10;
pipe_config->dpll.n = 1;
pipe_config->dpll.m1 = 14;
pipe_config->dpll.m2 = 2;
}
pipe_config->clock_set = true;
} else if (IS_HASWELL(dev)) {
/* Haswell has special-purpose DP DDI clocks. */
} else if (HAS_PCH_SPLIT(dev)) {
if (link_bw == DP_LINK_BW_1_62) {
pipe_config->dpll.n = 1;
pipe_config->dpll.p1 = 2;
pipe_config->dpll.p2 = 10;
pipe_config->dpll.m1 = 12;
pipe_config->dpll.m2 = 9;
} else {
pipe_config->dpll.n = 2;
pipe_config->dpll.p1 = 1;
pipe_config->dpll.p2 = 10;
pipe_config->dpll.m1 = 14;
pipe_config->dpll.m2 = 8;
}
pipe_config->clock_set = true;
} else if (IS_VALLEYVIEW(dev)) {
/* FIXME: Need to figure out optimized DP clocks for vlv. */
}
}
bool
intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
@ -667,8 +683,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
struct drm_display_mode *mode = &pipe_config->requested_mode;
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct intel_crtc *intel_crtc = encoder->new_crtc;
struct intel_connector *intel_connector = intel_dp->attached_connector;
int lane_count, clock;
int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
@ -685,9 +701,12 @@ intel_dp_compute_config(struct intel_encoder *encoder,
if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
adjusted_mode);
intel_pch_panel_fitting(dev,
intel_connector->panel.fitting_mode,
mode, adjusted_mode);
if (!HAS_PCH_SPLIT(dev))
intel_gmch_panel_fitting(intel_crtc, pipe_config,
intel_connector->panel.fitting_mode);
else
intel_pch_panel_fitting(intel_crtc, pipe_config,
intel_connector->panel.fitting_mode);
}
/* We need to take the panel's fixed mode into account. */
target_clock = adjusted_mode->clock;
@ -702,8 +721,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
/* Walk through all bpp values. Luckily they're all nicely spaced with 2
* bpc in between. */
bpp = min_t(int, 8*3, pipe_config->pipe_bpp);
if (is_edp(intel_dp) && dev_priv->edp.bpp)
bpp = min_t(int, bpp, dev_priv->edp.bpp);
if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp)
bpp = min_t(int, bpp, dev_priv->vbt.edp_bpp);
for (; bpp >= 6*3; bpp -= 2*3) {
mode_rate = intel_dp_link_required(target_clock, bpp);
@ -755,6 +774,8 @@ found:
target_clock, adjusted_mode->clock,
&pipe_config->dp_m_n);
intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
return true;
}
@ -833,18 +854,8 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
/* Handle DP bits in common between all three register formats */
intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
switch (intel_dp->lane_count) {
case 1:
intel_dp->DP |= DP_PORT_WIDTH_1;
break;
case 2:
intel_dp->DP |= DP_PORT_WIDTH_2;
break;
case 4:
intel_dp->DP |= DP_PORT_WIDTH_4;
break;
}
if (intel_dp->has_audio) {
DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
pipe_name(intel_crtc->pipe));
@ -1381,15 +1392,77 @@ static void intel_enable_dp(struct intel_encoder *encoder)
intel_dp_complete_link_train(intel_dp);
intel_dp_stop_link_train(intel_dp);
ironlake_edp_backlight_on(intel_dp);
if (IS_VALLEYVIEW(dev)) {
struct intel_digital_port *dport =
enc_to_dig_port(&encoder->base);
int channel = vlv_dport_to_channel(dport);
vlv_wait_port_ready(dev_priv, channel);
}
}
static void intel_pre_enable_dp(struct intel_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
if (is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev))
ironlake_edp_pll_on(intel_dp);
if (IS_VALLEYVIEW(dev)) {
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
struct intel_crtc *intel_crtc =
to_intel_crtc(encoder->base.crtc);
int port = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
u32 val;
WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
val = intel_dpio_read(dev_priv, DPIO_DATA_LANE_A(port));
val = 0;
if (pipe)
val |= (1<<21);
else
val &= ~(1<<21);
val |= 0x001000c4;
intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL(port), val);
intel_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF0(port),
0x00760018);
intel_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF8(port),
0x00400888);
}
}
static void intel_dp_pre_pll_enable(struct intel_encoder *encoder)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int port = vlv_dport_to_channel(dport);
if (!IS_VALLEYVIEW(dev))
return;
WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
/* Program Tx lane resets to default */
intel_dpio_write(dev_priv, DPIO_PCS_TX(port),
DPIO_PCS_TX_LANE2_RESET |
DPIO_PCS_TX_LANE1_RESET);
intel_dpio_write(dev_priv, DPIO_PCS_CLK(port),
DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
(1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
DPIO_PCS_CLK_SOFT_RESET);
/* Fix up inter-pair skew failure */
intel_dpio_write(dev_priv, DPIO_PCS_STAGGER1(port), 0x00750f00);
intel_dpio_write(dev_priv, DPIO_TX_CTL(port), 0x00001500);
intel_dpio_write(dev_priv, DPIO_TX_LANE(port), 0x40400000);
}
/*
@ -1452,7 +1525,9 @@ intel_dp_voltage_max(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
if (IS_GEN7(dev) && is_cpu_edp(intel_dp))
if (IS_VALLEYVIEW(dev))
return DP_TRAIN_VOLTAGE_SWING_1200;
else if (IS_GEN7(dev) && is_cpu_edp(intel_dp))
return DP_TRAIN_VOLTAGE_SWING_800;
else if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
return DP_TRAIN_VOLTAGE_SWING_1200;
@ -1477,7 +1552,19 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
default:
return DP_TRAIN_PRE_EMPHASIS_0;
}
} else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
} else if (IS_VALLEYVIEW(dev)) {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_400:
return DP_TRAIN_PRE_EMPHASIS_9_5;
case DP_TRAIN_VOLTAGE_SWING_600:
return DP_TRAIN_PRE_EMPHASIS_6;
case DP_TRAIN_VOLTAGE_SWING_800:
return DP_TRAIN_PRE_EMPHASIS_3_5;
case DP_TRAIN_VOLTAGE_SWING_1200:
default:
return DP_TRAIN_PRE_EMPHASIS_0;
}
} else if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_400:
return DP_TRAIN_PRE_EMPHASIS_6;
@ -1502,6 +1589,103 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
}
}
static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
unsigned long demph_reg_value, preemph_reg_value,
uniqtranscale_reg_value;
uint8_t train_set = intel_dp->train_set[0];
int port = vlv_dport_to_channel(dport);
WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
case DP_TRAIN_PRE_EMPHASIS_0:
preemph_reg_value = 0x0004000;
switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_400:
demph_reg_value = 0x2B405555;
uniqtranscale_reg_value = 0x552AB83A;
break;
case DP_TRAIN_VOLTAGE_SWING_600:
demph_reg_value = 0x2B404040;
uniqtranscale_reg_value = 0x5548B83A;
break;
case DP_TRAIN_VOLTAGE_SWING_800:
demph_reg_value = 0x2B245555;
uniqtranscale_reg_value = 0x5560B83A;
break;
case DP_TRAIN_VOLTAGE_SWING_1200:
demph_reg_value = 0x2B405555;
uniqtranscale_reg_value = 0x5598DA3A;
break;
default:
return 0;
}
break;
case DP_TRAIN_PRE_EMPHASIS_3_5:
preemph_reg_value = 0x0002000;
switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_400:
demph_reg_value = 0x2B404040;
uniqtranscale_reg_value = 0x5552B83A;
break;
case DP_TRAIN_VOLTAGE_SWING_600:
demph_reg_value = 0x2B404848;
uniqtranscale_reg_value = 0x5580B83A;
break;
case DP_TRAIN_VOLTAGE_SWING_800:
demph_reg_value = 0x2B404040;
uniqtranscale_reg_value = 0x55ADDA3A;
break;
default:
return 0;
}
break;
case DP_TRAIN_PRE_EMPHASIS_6:
preemph_reg_value = 0x0000000;
switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_400:
demph_reg_value = 0x2B305555;
uniqtranscale_reg_value = 0x5570B83A;
break;
case DP_TRAIN_VOLTAGE_SWING_600:
demph_reg_value = 0x2B2B4040;
uniqtranscale_reg_value = 0x55ADDA3A;
break;
default:
return 0;
}
break;
case DP_TRAIN_PRE_EMPHASIS_9_5:
preemph_reg_value = 0x0006000;
switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_400:
demph_reg_value = 0x1B405555;
uniqtranscale_reg_value = 0x55ADDA3A;
break;
default:
return 0;
}
break;
default:
return 0;
}
intel_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0x00000000);
intel_dpio_write(dev_priv, DPIO_TX_SWING_CTL4(port), demph_reg_value);
intel_dpio_write(dev_priv, DPIO_TX_SWING_CTL2(port),
uniqtranscale_reg_value);
intel_dpio_write(dev_priv, DPIO_TX_SWING_CTL3(port), 0x0C782040);
intel_dpio_write(dev_priv, DPIO_PCS_STAGGER0(port), 0x00030000);
intel_dpio_write(dev_priv, DPIO_PCS_CTL_OVER1(port), preemph_reg_value);
intel_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0x80000000);
return 0;
}
static void
intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
{
@ -1676,7 +1860,10 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
if (HAS_DDI(dev)) {
signal_levels = intel_hsw_signal_levels(train_set);
mask = DDI_BUF_EMP_MASK;
} else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
} else if (IS_VALLEYVIEW(dev)) {
signal_levels = intel_vlv_signal_levels(intel_dp);
mask = 0;
} else if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
signal_levels = intel_gen7_edp_signal_levels(train_set);
mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
} else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
@ -2588,11 +2775,11 @@ bool intel_dpd_is_edp(struct drm_device *dev)
struct child_device_config *p_child;
int i;
if (!dev_priv->child_dev_num)
if (!dev_priv->vbt.child_dev_num)
return false;
for (i = 0; i < dev_priv->child_dev_num; i++) {
p_child = dev_priv->child_dev + i;
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
p_child = dev_priv->vbt.child_dev + i;
if (p_child->dvo_port == PORT_IDPD &&
p_child->device_type == DEVICE_TYPE_eDP)
@ -2670,7 +2857,7 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
vbt = dev_priv->edp.pps;
vbt = dev_priv->vbt.edp_pps;
/* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
* our hw here, which are all in 100usec. */
@ -2792,28 +2979,39 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_dp->DP = I915_READ(intel_dp->output_reg);
intel_dp->attached_connector = intel_connector;
if (HAS_PCH_SPLIT(dev) && port == PORT_D)
if (intel_dpd_is_edp(dev))
intel_dp->is_pch_edp = true;
type = DRM_MODE_CONNECTOR_DisplayPort;
/*
* FIXME : We need to initialize built-in panels before external panels.
* For X0, DP_C is fixed as eDP. Revisit this as part of VLV eDP cleanup
*/
if (IS_VALLEYVIEW(dev) && port == PORT_C) {
switch (port) {
case PORT_A:
type = DRM_MODE_CONNECTOR_eDP;
intel_encoder->type = INTEL_OUTPUT_EDP;
} else if (port == PORT_A || is_pch_edp(intel_dp)) {
type = DRM_MODE_CONNECTOR_eDP;
intel_encoder->type = INTEL_OUTPUT_EDP;
} else {
/* The intel_encoder->type value may be INTEL_OUTPUT_UNKNOWN for
* DDI or INTEL_OUTPUT_DISPLAYPORT for the older gens, so don't
* rewrite it.
*/
type = DRM_MODE_CONNECTOR_DisplayPort;
break;
case PORT_C:
if (IS_VALLEYVIEW(dev))
type = DRM_MODE_CONNECTOR_eDP;
break;
case PORT_D:
if (HAS_PCH_SPLIT(dev) && intel_dpd_is_edp(dev))
type = DRM_MODE_CONNECTOR_eDP;
break;
default: /* silence GCC warning */
break;
}
/*
* For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
* for DP the encoder type can be set by the caller to
* INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
*/
if (type == DRM_MODE_CONNECTOR_eDP)
intel_encoder->type = INTEL_OUTPUT_EDP;
DRM_DEBUG_KMS("Adding %s connector on port %c\n",
type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
port_name(port));
drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
@ -2929,8 +3127,8 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
}
/* fallback to VBT if available for eDP */
if (!fixed_mode && dev_priv->lfp_lvds_vbt_mode) {
fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
fixed_mode = drm_mode_duplicate(dev, dev_priv->vbt.lfp_lvds_vbt_mode);
if (fixed_mode)
fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
}
@ -2986,6 +3184,8 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
intel_encoder->disable = intel_disable_dp;
intel_encoder->post_disable = intel_post_disable_dp;
intel_encoder->get_hw_state = intel_dp_get_hw_state;
if (IS_VALLEYVIEW(dev))
intel_encoder->pre_pll_enable = intel_dp_pre_pll_enable;
intel_dig_port->port = port;
intel_dig_port->dp.output_reg = output_reg;

Просмотреть файл

@ -120,7 +120,6 @@ struct intel_encoder {
struct intel_crtc *new_crtc;
int type;
bool needs_tv_clock;
/*
* Intel hw has only one MUX where encoders could be clone, hence a
* simple flag is enough to compute the possible_clones mask.
@ -177,6 +176,18 @@ struct intel_connector {
u8 polled;
};
typedef struct dpll {
/* given values */
int n;
int m1, m2;
int p1, p2;
/* derived values */
int dot;
int vco;
int m;
int p;
} intel_clock_t;
struct intel_crtc_config {
struct drm_display_mode requested_mode;
struct drm_display_mode adjusted_mode;
@ -201,18 +212,30 @@ struct intel_crtc_config {
/* DP has a bunch of special case unfortunately, so mark the pipe
* accordingly. */
bool has_dp_encoder;
/*
* Enable dithering, used when the selected pipe bpp doesn't match the
* plane bpp.
*/
bool dither;
/* Controls for the clock computation, to override various stages. */
bool clock_set;
/* SDVO TV has a bunch of special case. To make multifunction encoders
* work correctly, we need to track this at runtime.*/
bool sdvo_tv_clock;
/*
* crtc bandwidth limit, don't increase pipe bpp or clock if not really
* required. This is set in the 2nd loop of calling encoder's
* ->compute_config if the first pick doesn't work out.
*/
bool bw_constrained;
/* Settings for the intel dpll used on pretty much everything but
* haswell. */
struct dpll {
unsigned n;
unsigned m1, m2;
unsigned p1, p2;
} dpll;
struct dpll dpll;
int pipe_bpp;
struct intel_link_m_n dp_m_n;
@ -224,6 +247,23 @@ struct intel_crtc_config {
int pixel_target_clock;
/* Used by SDVO (and if we ever fix it, HDMI). */
unsigned pixel_multiplier;
/* Panel fitter controls for gen2-gen4 + VLV */
struct {
u32 control;
u32 pgm_ratios;
u32 lvds_border_bits;
} gmch_pfit;
/* Panel fitter placement and size for Ironlake+ */
struct {
u32 pos;
u32 size;
} pch_pfit;
/* FDI configuration, only valid if has_pch_encoder is set. */
int fdi_lanes;
struct intel_link_m_n fdi_m_n;
};
struct intel_crtc {
@ -242,7 +282,6 @@ struct intel_crtc {
bool lowfreq_avail;
struct intel_overlay *overlay;
struct intel_unpin_work *unpin_work;
int fdi_lanes;
atomic_t unpin_work_count;
@ -265,6 +304,10 @@ struct intel_crtc {
/* reset counter value when the last flip was submitted */
unsigned int reset_counter;
/* Access to these should be protected by dev_priv->irq_lock. */
bool cpu_fifo_underrun_disabled;
bool pch_fifo_underrun_disabled;
};
struct intel_plane {
@ -411,7 +454,6 @@ struct intel_dp {
uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
struct i2c_adapter adapter;
struct i2c_algo_dp_aux_data algo;
bool is_pch_edp;
uint8_t train_set[4];
int panel_power_up_delay;
int panel_power_down_delay;
@ -431,6 +473,19 @@ struct intel_digital_port {
struct intel_hdmi hdmi;
};
static inline int
vlv_dport_to_channel(struct intel_digital_port *dport)
{
switch (dport->port) {
case PORT_B:
return 0;
case PORT_C:
return 1;
default:
BUG();
}
}
static inline struct drm_crtc *
intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
{
@ -474,6 +529,7 @@ int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
extern void intel_attach_force_audio_property(struct drm_connector *connector);
extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
extern bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
extern void intel_crt_init(struct drm_device *dev);
extern void intel_hdmi_init(struct drm_device *dev,
int hdmi_reg, enum port port);
@ -512,7 +568,6 @@ extern void ironlake_edp_panel_on(struct intel_dp *intel_dp);
extern void ironlake_edp_panel_off(struct intel_dp *intel_dp);
extern void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
extern void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
extern bool intel_encoder_is_pch_edp(struct drm_encoder *encoder);
extern int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
extern void intel_flush_display_plane(struct drm_i915_private *dev_priv,
enum plane plane);
@ -524,12 +579,14 @@ extern void intel_panel_fini(struct intel_panel *panel);
extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
struct drm_display_mode *adjusted_mode);
extern void intel_pch_panel_fitting(struct drm_device *dev,
int fitting_mode,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
extern u32 intel_panel_get_max_backlight(struct drm_device *dev);
extern void intel_panel_set_backlight(struct drm_device *dev, u32 level);
extern void intel_pch_panel_fitting(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config,
int fitting_mode);
extern void intel_gmch_panel_fitting(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config,
int fitting_mode);
extern void intel_panel_set_backlight(struct drm_device *dev,
u32 level, u32 max);
extern int intel_panel_setup_backlight(struct drm_connector *connector);
extern void intel_panel_enable_backlight(struct drm_device *dev,
enum pipe pipe);
@ -565,19 +622,17 @@ static inline struct intel_encoder *intel_attached_encoder(struct drm_connector
return to_intel_connector(connector)->encoder;
}
static inline struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
{
struct intel_digital_port *intel_dig_port =
container_of(encoder, struct intel_digital_port, base.base);
return &intel_dig_port->dp;
}
static inline struct intel_digital_port *
enc_to_dig_port(struct drm_encoder *encoder)
{
return container_of(encoder, struct intel_digital_port, base.base);
}
static inline struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
{
return &enc_to_dig_port(encoder)->dp;
}
static inline struct intel_digital_port *
dp_to_dig_port(struct intel_dp *intel_dp)
{
@ -607,6 +662,7 @@ intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
extern void intel_wait_for_vblank(struct drm_device *dev, int pipe);
extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe);
extern int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
extern void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port);
struct intel_load_detect_pipe {
struct drm_framebuffer *release_fb;
@ -660,13 +716,9 @@ extern void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
extern void intel_init_clock_gating(struct drm_device *dev);
extern void intel_suspend_hw(struct drm_device *dev);
extern void intel_write_eld(struct drm_encoder *encoder,
struct drm_display_mode *mode);
extern void intel_cpt_verify_modeset(struct drm_device *dev, int pipe);
extern void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
struct intel_link_m_n *m_n);
extern void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
struct intel_link_m_n *m_n);
extern void intel_prepare_ddi(struct drm_device *dev);
extern void hsw_fdi_link_train(struct drm_crtc *crtc);
extern void intel_ddi_init(struct drm_device *dev, enum port port);
@ -690,6 +742,8 @@ extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg);
extern void intel_dpio_write(struct drm_i915_private *dev_priv, int reg,
u32 val);
/* Power-related functions, located in intel_pm.c */
extern void intel_init_pm(struct drm_device *dev);
@ -701,7 +755,8 @@ extern void intel_update_fbc(struct drm_device *dev);
extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
extern void intel_gpu_ips_teardown(void);
extern bool intel_using_power_well(struct drm_device *dev);
extern bool intel_display_power_enabled(struct drm_device *dev,
enum intel_display_power_domain domain);
extern void intel_init_power_well(struct drm_device *dev);
extern void intel_set_power_well(struct drm_device *dev, bool enable);
extern void intel_enable_gt_powersave(struct drm_device *dev);
@ -728,5 +783,11 @@ intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
extern void intel_ddi_fdi_disable(struct drm_crtc *crtc);
extern void intel_display_handle_reset(struct drm_device *dev);
extern bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
enum pipe pipe,
bool enable);
extern bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
enum transcoder pch_transcoder,
bool enable);
#endif /* __INTEL_DRV_H__ */

Просмотреть файл

@ -53,6 +53,13 @@ static const struct intel_dvo_device intel_dvo_devices[] = {
.slave_addr = CH7xxx_ADDR,
.dev_ops = &ch7xxx_ops,
},
{
.type = INTEL_DVO_CHIP_TMDS,
.name = "ch7xxx",
.dvo_reg = DVOC,
.slave_addr = 0x75, /* For some ch7010 */
.dev_ops = &ch7xxx_ops,
},
{
.type = INTEL_DVO_CHIP_LVDS,
.name = "ivch",

Просмотреть файл

@ -697,6 +697,14 @@ static void intel_enable_hdmi(struct intel_encoder *encoder)
I915_WRITE(intel_hdmi->hdmi_reg, temp);
POSTING_READ(intel_hdmi->hdmi_reg);
}
if (IS_VALLEYVIEW(dev)) {
struct intel_digital_port *dport =
enc_to_dig_port(&encoder->base);
int channel = vlv_dport_to_channel(dport);
vlv_wait_port_ready(dev_priv, channel);
}
}
static void intel_disable_hdmi(struct intel_encoder *encoder)
@ -775,6 +783,8 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
struct drm_device *dev = encoder->base.dev;
struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
int clock_12bpc = pipe_config->requested_mode.clock * 3 / 2;
int desired_bpp;
if (intel_hdmi->color_range_auto) {
/* See CEA-861-E - 5.1 Default Encoding Parameters */
@ -794,14 +804,31 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
/*
* HDMI is either 12 or 8, so if the display lets 10bpc sneak
* through, clamp it down. Note that g4x/vlv don't support 12bpc hdmi
* outputs.
* outputs. We also need to check that the higher clock still fits
* within limits.
*/
if (pipe_config->pipe_bpp > 8*3 && HAS_PCH_SPLIT(dev)) {
DRM_DEBUG_KMS("forcing bpc to 12 for HDMI\n");
pipe_config->pipe_bpp = 12*3;
if (pipe_config->pipe_bpp > 8*3 && clock_12bpc <= 225000
&& HAS_PCH_SPLIT(dev)) {
DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
desired_bpp = 12*3;
/* Need to adjust the port link by 1.5x for 12bpc. */
adjusted_mode->clock = clock_12bpc;
pipe_config->pixel_target_clock =
pipe_config->requested_mode.clock;
} else {
DRM_DEBUG_KMS("forcing bpc to 8 for HDMI\n");
pipe_config->pipe_bpp = 8*3;
DRM_DEBUG_KMS("picking bpc to 8 for HDMI output\n");
desired_bpp = 8*3;
}
if (!pipe_config->bw_constrained) {
DRM_DEBUG_KMS("forcing pipe bpc to %i for HDMI\n", desired_bpp);
pipe_config->pipe_bpp = desired_bpp;
}
if (adjusted_mode->clock > 225000) {
DRM_DEBUG_KMS("too high HDMI clock, rejecting mode\n");
return false;
}
return true;
@ -955,6 +982,101 @@ done:
return 0;
}
static void intel_hdmi_pre_enable(struct intel_encoder *encoder)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc =
to_intel_crtc(encoder->base.crtc);
int port = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
u32 val;
if (!IS_VALLEYVIEW(dev))
return;
WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
/* Enable clock channels for this port */
val = intel_dpio_read(dev_priv, DPIO_DATA_LANE_A(port));
val = 0;
if (pipe)
val |= (1<<21);
else
val &= ~(1<<21);
val |= 0x001000c4;
intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL(port), val);
/* HDMI 1.0V-2dB */
intel_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0);
intel_dpio_write(dev_priv, DPIO_TX_SWING_CTL4(port),
0x2b245f5f);
intel_dpio_write(dev_priv, DPIO_TX_SWING_CTL2(port),
0x5578b83a);
intel_dpio_write(dev_priv, DPIO_TX_SWING_CTL3(port),
0x0c782040);
intel_dpio_write(dev_priv, DPIO_TX3_SWING_CTL4(port),
0x2b247878);
intel_dpio_write(dev_priv, DPIO_PCS_STAGGER0(port), 0x00030000);
intel_dpio_write(dev_priv, DPIO_PCS_CTL_OVER1(port),
0x00002000);
intel_dpio_write(dev_priv, DPIO_TX_OCALINIT(port),
DPIO_TX_OCALINIT_EN);
/* Program lane clock */
intel_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF0(port),
0x00760018);
intel_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF8(port),
0x00400888);
}
static void intel_hdmi_pre_pll_enable(struct intel_encoder *encoder)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int port = vlv_dport_to_channel(dport);
if (!IS_VALLEYVIEW(dev))
return;
WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
/* Program Tx lane resets to default */
intel_dpio_write(dev_priv, DPIO_PCS_TX(port),
DPIO_PCS_TX_LANE2_RESET |
DPIO_PCS_TX_LANE1_RESET);
intel_dpio_write(dev_priv, DPIO_PCS_CLK(port),
DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
(1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
DPIO_PCS_CLK_SOFT_RESET);
/* Fix up inter-pair skew failure */
intel_dpio_write(dev_priv, DPIO_PCS_STAGGER1(port), 0x00750f00);
intel_dpio_write(dev_priv, DPIO_TX_CTL(port), 0x00001500);
intel_dpio_write(dev_priv, DPIO_TX_LANE(port), 0x40400000);
intel_dpio_write(dev_priv, DPIO_PCS_CTL_OVER1(port),
0x00002000);
intel_dpio_write(dev_priv, DPIO_TX_OCALINIT(port),
DPIO_TX_OCALINIT_EN);
}
static void intel_hdmi_post_disable(struct intel_encoder *encoder)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
int port = vlv_dport_to_channel(dport);
/* Reset lanes to avoid HDMI flicker (VLV w/a) */
mutex_lock(&dev_priv->dpio_lock);
intel_dpio_write(dev_priv, DPIO_PCS_TX(port), 0x00000000);
intel_dpio_write(dev_priv, DPIO_PCS_CLK(port), 0x00e00060);
mutex_unlock(&dev_priv->dpio_lock);
}
static void intel_hdmi_destroy(struct drm_connector *connector)
{
drm_sysfs_connector_remove(connector);
@ -1094,6 +1216,11 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
intel_encoder->enable = intel_enable_hdmi;
intel_encoder->disable = intel_disable_hdmi;
intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
if (IS_VALLEYVIEW(dev)) {
intel_encoder->pre_enable = intel_hdmi_pre_enable;
intel_encoder->pre_pll_enable = intel_hdmi_pre_pll_enable;
intel_encoder->post_disable = intel_hdmi_post_disable;
}
intel_encoder->type = INTEL_OUTPUT_HDMI;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);

Просмотреть файл

@ -49,8 +49,6 @@ struct intel_lvds_connector {
struct intel_lvds_encoder {
struct intel_encoder base;
u32 pfit_control;
u32 pfit_pgm_ratios;
bool is_dual_link;
u32 reg;
@ -118,7 +116,8 @@ static void intel_pre_pll_enable_lvds(struct intel_encoder *encoder)
}
/* set the corresponsding LVDS_BORDER bit */
temp |= dev_priv->lvds_border_bits;
temp &= ~LVDS_BORDER_ENABLE;
temp |= intel_crtc->config.gmch_pfit.lvds_border_bits;
/* Set the B0-B3 data pairs corresponding to whether we're going to
* set the DPLLs for dual-channel mode or not.
*/
@ -136,7 +135,10 @@ static void intel_pre_pll_enable_lvds(struct intel_encoder *encoder)
* special lvds dither control bit on pch-split platforms, dithering is
* only controlled through the PIPECONF reg. */
if (INTEL_INFO(dev)->gen == 4) {
if (dev_priv->lvds_dither)
/* Bspec wording suggests that LVDS port dithering only exists
* for 18bpp panels. */
if (intel_crtc->config.dither &&
intel_crtc->config.pipe_bpp == 18)
temp |= LVDS_ENABLE_DITHER;
else
temp &= ~LVDS_ENABLE_DITHER;
@ -150,29 +152,6 @@ static void intel_pre_pll_enable_lvds(struct intel_encoder *encoder)
I915_WRITE(lvds_encoder->reg, temp);
}
static void intel_pre_enable_lvds(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct intel_lvds_encoder *enc = to_lvds_encoder(&encoder->base);
struct drm_i915_private *dev_priv = dev->dev_private;
if (HAS_PCH_SPLIT(dev) || !enc->pfit_control)
return;
/*
* Enable automatic panel scaling so that non-native modes
* fill the screen. The panel fitter should only be
* adjusted whilst the pipe is disabled, according to
* register description and PRM.
*/
DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n",
enc->pfit_control,
enc->pfit_pgm_ratios);
I915_WRITE(PFIT_PGM_RATIOS, enc->pfit_pgm_ratios);
I915_WRITE(PFIT_CONTROL, enc->pfit_control);
}
/**
* Sets the power state for the panel.
*/
@ -241,62 +220,6 @@ static int intel_lvds_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
static void
centre_horizontally(struct drm_display_mode *mode,
int width)
{
u32 border, sync_pos, blank_width, sync_width;
/* keep the hsync and hblank widths constant */
sync_width = mode->crtc_hsync_end - mode->crtc_hsync_start;
blank_width = mode->crtc_hblank_end - mode->crtc_hblank_start;
sync_pos = (blank_width - sync_width + 1) / 2;
border = (mode->hdisplay - width + 1) / 2;
border += border & 1; /* make the border even */
mode->crtc_hdisplay = width;
mode->crtc_hblank_start = width + border;
mode->crtc_hblank_end = mode->crtc_hblank_start + blank_width;
mode->crtc_hsync_start = mode->crtc_hblank_start + sync_pos;
mode->crtc_hsync_end = mode->crtc_hsync_start + sync_width;
}
static void
centre_vertically(struct drm_display_mode *mode,
int height)
{
u32 border, sync_pos, blank_width, sync_width;
/* keep the vsync and vblank widths constant */
sync_width = mode->crtc_vsync_end - mode->crtc_vsync_start;
blank_width = mode->crtc_vblank_end - mode->crtc_vblank_start;
sync_pos = (blank_width - sync_width + 1) / 2;
border = (mode->vdisplay - height + 1) / 2;
mode->crtc_vdisplay = height;
mode->crtc_vblank_start = height + border;
mode->crtc_vblank_end = mode->crtc_vblank_start + blank_width;
mode->crtc_vsync_start = mode->crtc_vblank_start + sync_pos;
mode->crtc_vsync_end = mode->crtc_vsync_start + sync_width;
}
static inline u32 panel_fitter_scaling(u32 source, u32 target)
{
/*
* Floating point operation is not supported. So the FACTOR
* is defined, which can avoid the floating point computation
* when calculating the panel ratio.
*/
#define ACCURACY 12
#define FACTOR (1 << ACCURACY)
u32 ratio = source * FACTOR / target;
return (FACTOR * ratio + FACTOR/2) / FACTOR;
}
static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
struct intel_crtc_config *pipe_config)
{
@ -307,11 +230,8 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
struct intel_connector *intel_connector =
&lvds_encoder->attached_connector->base;
struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
struct drm_display_mode *mode = &pipe_config->requested_mode;
struct intel_crtc *intel_crtc = lvds_encoder->base.new_crtc;
u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
unsigned int lvds_bpp;
int pipe;
/* Should never happen!! */
if (INTEL_INFO(dev)->gen < 4 && intel_crtc->pipe == 0) {
@ -328,11 +248,12 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
else
lvds_bpp = 6*3;
if (lvds_bpp != pipe_config->pipe_bpp) {
if (lvds_bpp != pipe_config->pipe_bpp && !pipe_config->bw_constrained) {
DRM_DEBUG_KMS("forcing display bpp (was %d) to LVDS (%d)\n",
pipe_config->pipe_bpp, lvds_bpp);
pipe_config->pipe_bpp = lvds_bpp;
}
/*
* We have timings from the BIOS for the panel, put them in
* to the adjusted mode. The CRTC will be set up for this mode,
@ -345,139 +266,17 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
if (HAS_PCH_SPLIT(dev)) {
pipe_config->has_pch_encoder = true;
intel_pch_panel_fitting(dev,
intel_connector->panel.fitting_mode,
mode, adjusted_mode);
intel_pch_panel_fitting(intel_crtc, pipe_config,
intel_connector->panel.fitting_mode);
return true;
} else {
intel_gmch_panel_fitting(intel_crtc, pipe_config,
intel_connector->panel.fitting_mode);
}
/* Native modes don't need fitting */
if (adjusted_mode->hdisplay == mode->hdisplay &&
adjusted_mode->vdisplay == mode->vdisplay)
goto out;
/* 965+ wants fuzzy fitting */
if (INTEL_INFO(dev)->gen >= 4)
pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) |
PFIT_FILTER_FUZZY);
/*
* Enable automatic panel scaling for non-native modes so that they fill
* the screen. Should be enabled before the pipe is enabled, according
* to register description and PRM.
* Change the value here to see the borders for debugging
*/
for_each_pipe(pipe)
I915_WRITE(BCLRPAT(pipe), 0);
drm_mode_set_crtcinfo(adjusted_mode, 0);
pipe_config->timings_set = true;
switch (intel_connector->panel.fitting_mode) {
case DRM_MODE_SCALE_CENTER:
/*
* For centered modes, we have to calculate border widths &
* heights and modify the values programmed into the CRTC.
*/
centre_horizontally(adjusted_mode, mode->hdisplay);
centre_vertically(adjusted_mode, mode->vdisplay);
border = LVDS_BORDER_ENABLE;
break;
case DRM_MODE_SCALE_ASPECT:
/* Scale but preserve the aspect ratio */
if (INTEL_INFO(dev)->gen >= 4) {
u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
/* 965+ is easy, it does everything in hw */
if (scaled_width > scaled_height)
pfit_control |= PFIT_ENABLE | PFIT_SCALING_PILLAR;
else if (scaled_width < scaled_height)
pfit_control |= PFIT_ENABLE | PFIT_SCALING_LETTER;
else if (adjusted_mode->hdisplay != mode->hdisplay)
pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO;
} else {
u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
/*
* For earlier chips we have to calculate the scaling
* ratio by hand and program it into the
* PFIT_PGM_RATIO register
*/
if (scaled_width > scaled_height) { /* pillar */
centre_horizontally(adjusted_mode, scaled_height / mode->vdisplay);
border = LVDS_BORDER_ENABLE;
if (mode->vdisplay != adjusted_mode->vdisplay) {
u32 bits = panel_fitter_scaling(mode->vdisplay, adjusted_mode->vdisplay);
pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
bits << PFIT_VERT_SCALE_SHIFT);
pfit_control |= (PFIT_ENABLE |
VERT_INTERP_BILINEAR |
HORIZ_INTERP_BILINEAR);
}
} else if (scaled_width < scaled_height) { /* letter */
centre_vertically(adjusted_mode, scaled_width / mode->hdisplay);
border = LVDS_BORDER_ENABLE;
if (mode->hdisplay != adjusted_mode->hdisplay) {
u32 bits = panel_fitter_scaling(mode->hdisplay, adjusted_mode->hdisplay);
pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
bits << PFIT_VERT_SCALE_SHIFT);
pfit_control |= (PFIT_ENABLE |
VERT_INTERP_BILINEAR |
HORIZ_INTERP_BILINEAR);
}
} else
/* Aspects match, Let hw scale both directions */
pfit_control |= (PFIT_ENABLE |
VERT_AUTO_SCALE | HORIZ_AUTO_SCALE |
VERT_INTERP_BILINEAR |
HORIZ_INTERP_BILINEAR);
}
break;
case DRM_MODE_SCALE_FULLSCREEN:
/*
* Full scaling, even if it changes the aspect ratio.
* Fortunately this is all done for us in hw.
*/
if (mode->vdisplay != adjusted_mode->vdisplay ||
mode->hdisplay != adjusted_mode->hdisplay) {
pfit_control |= PFIT_ENABLE;
if (INTEL_INFO(dev)->gen >= 4)
pfit_control |= PFIT_SCALING_AUTO;
else
pfit_control |= (VERT_AUTO_SCALE |
VERT_INTERP_BILINEAR |
HORIZ_AUTO_SCALE |
HORIZ_INTERP_BILINEAR);
}
break;
default:
break;
}
out:
/* If not enabling scaling, be consistent and always use 0. */
if ((pfit_control & PFIT_ENABLE) == 0) {
pfit_control = 0;
pfit_pgm_ratios = 0;
}
/* Make sure pre-965 set dither correctly */
if (INTEL_INFO(dev)->gen < 4 && dev_priv->lvds_dither)
pfit_control |= PANEL_8TO6_DITHER_ENABLE;
if (pfit_control != lvds_encoder->pfit_control ||
pfit_pgm_ratios != lvds_encoder->pfit_pgm_ratios) {
lvds_encoder->pfit_control = pfit_control;
lvds_encoder->pfit_pgm_ratios = pfit_pgm_ratios;
}
dev_priv->lvds_border_bits = border;
/*
* XXX: It would be nice to support lower refresh rates on the
* panels to reduce power consumption, and perhaps match the
@ -937,11 +736,11 @@ static bool lvds_is_present_in_vbt(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
if (!dev_priv->child_dev_num)
if (!dev_priv->vbt.child_dev_num)
return true;
for (i = 0; i < dev_priv->child_dev_num; i++) {
struct child_device_config *child = dev_priv->child_dev + i;
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
struct child_device_config *child = dev_priv->vbt.child_dev + i;
/* If the device type is not LFP, continue.
* We have to check both the new identifiers as well as the
@ -1029,7 +828,7 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
*/
val = I915_READ(lvds_encoder->reg);
if (!(val & ~(LVDS_PIPE_MASK | LVDS_DETECTED)))
val = dev_priv->bios_lvds_val;
val = dev_priv->vbt.bios_lvds_val;
return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
}
@ -1089,7 +888,7 @@ bool intel_lvds_init(struct drm_device *dev)
if (HAS_PCH_SPLIT(dev)) {
if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0)
return false;
if (dev_priv->edp.support) {
if (dev_priv->vbt.edp_support) {
DRM_DEBUG_KMS("disable LVDS for eDP support\n");
return false;
}
@ -1107,10 +906,6 @@ bool intel_lvds_init(struct drm_device *dev)
lvds_encoder->attached_connector = lvds_connector;
if (!HAS_PCH_SPLIT(dev)) {
lvds_encoder->pfit_control = I915_READ(PFIT_CONTROL);
}
intel_encoder = &lvds_encoder->base;
encoder = &intel_encoder->base;
intel_connector = &lvds_connector->base;
@ -1122,7 +917,6 @@ bool intel_lvds_init(struct drm_device *dev)
DRM_MODE_ENCODER_LVDS);
intel_encoder->enable = intel_enable_lvds;
intel_encoder->pre_enable = intel_pre_enable_lvds;
intel_encoder->pre_pll_enable = intel_pre_pll_enable_lvds;
intel_encoder->compute_config = intel_lvds_compute_config;
intel_encoder->disable = intel_disable_lvds;
@ -1212,11 +1006,11 @@ bool intel_lvds_init(struct drm_device *dev)
}
/* Failed to get EDID, what about VBT? */
if (dev_priv->lfp_lvds_vbt_mode) {
if (dev_priv->vbt.lfp_lvds_vbt_mode) {
DRM_DEBUG_KMS("using mode from VBT: ");
drm_mode_debug_printmodeline(dev_priv->lfp_lvds_vbt_mode);
drm_mode_debug_printmodeline(dev_priv->vbt.lfp_lvds_vbt_mode);
fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
fixed_mode = drm_mode_duplicate(dev, dev_priv->vbt.lfp_lvds_vbt_mode);
if (fixed_mode) {
fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
goto out;

Просмотреть файл

@ -110,6 +110,10 @@ struct opregion_asle {
u8 rsvd[102];
} __attribute__((packed));
/* Driver readiness indicator */
#define ASLE_ARDY_READY (1 << 0)
#define ASLE_ARDY_NOT_READY (0 << 0)
/* ASLE irq request bits */
#define ASLE_SET_ALS_ILLUM (1 << 0)
#define ASLE_SET_BACKLIGHT (1 << 1)
@ -123,6 +127,12 @@ struct opregion_asle {
#define ASLE_PFIT_FAILED (1<<14)
#define ASLE_PWM_FREQ_FAILED (1<<16)
/* Technology enabled indicator */
#define ASLE_TCHE_ALS_EN (1 << 0)
#define ASLE_TCHE_BLC_EN (1 << 1)
#define ASLE_TCHE_PFIT_EN (1 << 2)
#define ASLE_TCHE_PFMB_EN (1 << 3)
/* ASLE backlight brightness to set */
#define ASLE_BCLP_VALID (1<<31)
#define ASLE_BCLP_MSK (~(1<<31))
@ -152,7 +162,6 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
u32 max;
DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
@ -163,8 +172,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
if (bclp > 255)
return ASLE_BACKLIGHT_FAILED;
max = intel_panel_get_max_backlight(dev);
intel_panel_set_backlight(dev, bclp * max / 255);
intel_panel_set_backlight(dev, bclp, 255);
iowrite32((bclp*0x64)/0xff | ASLE_CBLV_VALID, &asle->cblv);
return 0;
@ -174,29 +182,22 @@ static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi)
{
/* alsi is the current ALS reading in lux. 0 indicates below sensor
range, 0xffff indicates above sensor range. 1-0xfffe are valid */
return 0;
DRM_DEBUG_DRIVER("Illum is not supported\n");
return ASLE_ALS_ILLUM_FAILED;
}
static u32 asle_set_pwm_freq(struct drm_device *dev, u32 pfmb)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (pfmb & ASLE_PFMB_PWM_VALID) {
u32 blc_pwm_ctl = I915_READ(BLC_PWM_CTL);
u32 pwm = pfmb & ASLE_PFMB_PWM_MASK;
blc_pwm_ctl &= BACKLIGHT_DUTY_CYCLE_MASK;
pwm = pwm >> 9;
/* FIXME - what do we do with the PWM? */
}
return 0;
DRM_DEBUG_DRIVER("PWM freq is not supported\n");
return ASLE_PWM_FREQ_FAILED;
}
static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
{
/* Panel fitting is currently controlled by the X code, so this is a
noop until modesetting support works fully */
if (!(pfit & ASLE_PFIT_VALID))
return ASLE_PFIT_FAILED;
return 0;
DRM_DEBUG_DRIVER("Pfit is not supported\n");
return ASLE_PFIT_FAILED;
}
void intel_opregion_asle_intr(struct drm_device *dev)
@ -231,64 +232,6 @@ void intel_opregion_asle_intr(struct drm_device *dev)
iowrite32(asle_stat, &asle->aslc);
}
void intel_opregion_gse_intr(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
u32 asle_stat = 0;
u32 asle_req;
if (!asle)
return;
asle_req = ioread32(&asle->aslc) & ASLE_REQ_MSK;
if (!asle_req) {
DRM_DEBUG_DRIVER("non asle set request??\n");
return;
}
if (asle_req & ASLE_SET_ALS_ILLUM) {
DRM_DEBUG_DRIVER("Illum is not supported\n");
asle_stat |= ASLE_ALS_ILLUM_FAILED;
}
if (asle_req & ASLE_SET_BACKLIGHT)
asle_stat |= asle_set_backlight(dev, ioread32(&asle->bclp));
if (asle_req & ASLE_SET_PFIT) {
DRM_DEBUG_DRIVER("Pfit is not supported\n");
asle_stat |= ASLE_PFIT_FAILED;
}
if (asle_req & ASLE_SET_PWM_FREQ) {
DRM_DEBUG_DRIVER("PWM freq is not supported\n");
asle_stat |= ASLE_PWM_FREQ_FAILED;
}
iowrite32(asle_stat, &asle->aslc);
}
#define ASLE_ALS_EN (1<<0)
#define ASLE_BLC_EN (1<<1)
#define ASLE_PFIT_EN (1<<2)
#define ASLE_PFMB_EN (1<<3)
void intel_opregion_enable_asle(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
if (asle) {
if (IS_MOBILE(dev))
intel_enable_asle(dev);
iowrite32(ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN |
ASLE_PFMB_EN,
&asle->tche);
iowrite32(1, &asle->ardy);
}
}
#define ACPI_EV_DISPLAY_SWITCH (1<<0)
#define ACPI_EV_LID (1<<1)
#define ACPI_EV_DOCK (1<<2)
@ -472,8 +415,10 @@ void intel_opregion_init(struct drm_device *dev)
register_acpi_notifier(&intel_opregion_notifier);
}
if (opregion->asle)
intel_opregion_enable_asle(dev);
if (opregion->asle) {
iowrite32(ASLE_TCHE_BLC_EN, &opregion->asle->tche);
iowrite32(ASLE_ARDY_READY, &opregion->asle->ardy);
}
}
void intel_opregion_fini(struct drm_device *dev)
@ -484,6 +429,9 @@ void intel_opregion_fini(struct drm_device *dev)
if (!opregion->header)
return;
if (opregion->asle)
iowrite32(ASLE_ARDY_NOT_READY, &opregion->asle->ardy);
if (opregion->acpi) {
iowrite32(0, &opregion->acpi->drdy);
@ -546,6 +494,8 @@ int intel_opregion_setup(struct drm_device *dev)
if (mboxes & MBOX_ASLE) {
DRM_DEBUG_DRIVER("ASLE supported\n");
opregion->asle = base + OPREGION_ASLE_OFFSET;
iowrite32(ASLE_ARDY_NOT_READY, &opregion->asle->ardy);
}
return 0;

Просмотреть файл

@ -54,14 +54,16 @@ intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
/* adjusted_mode has been preset to be the panel's fixed mode */
void
intel_pch_panel_fitting(struct drm_device *dev,
int fitting_mode,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
struct intel_crtc_config *pipe_config,
int fitting_mode)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_display_mode *mode, *adjusted_mode;
int x, y, width, height;
mode = &pipe_config->requested_mode;
adjusted_mode = &pipe_config->adjusted_mode;
x = y = width = height = 0;
/* Native modes don't need fitting */
@ -104,17 +106,209 @@ intel_pch_panel_fitting(struct drm_device *dev,
}
break;
default:
case DRM_MODE_SCALE_FULLSCREEN:
x = y = 0;
width = adjusted_mode->hdisplay;
height = adjusted_mode->vdisplay;
break;
default:
WARN(1, "bad panel fit mode: %d\n", fitting_mode);
return;
}
done:
dev_priv->pch_pf_pos = (x << 16) | y;
dev_priv->pch_pf_size = (width << 16) | height;
pipe_config->pch_pfit.pos = (x << 16) | y;
pipe_config->pch_pfit.size = (width << 16) | height;
}
static void
centre_horizontally(struct drm_display_mode *mode,
int width)
{
u32 border, sync_pos, blank_width, sync_width;
/* keep the hsync and hblank widths constant */
sync_width = mode->crtc_hsync_end - mode->crtc_hsync_start;
blank_width = mode->crtc_hblank_end - mode->crtc_hblank_start;
sync_pos = (blank_width - sync_width + 1) / 2;
border = (mode->hdisplay - width + 1) / 2;
border += border & 1; /* make the border even */
mode->crtc_hdisplay = width;
mode->crtc_hblank_start = width + border;
mode->crtc_hblank_end = mode->crtc_hblank_start + blank_width;
mode->crtc_hsync_start = mode->crtc_hblank_start + sync_pos;
mode->crtc_hsync_end = mode->crtc_hsync_start + sync_width;
}
static void
centre_vertically(struct drm_display_mode *mode,
int height)
{
u32 border, sync_pos, blank_width, sync_width;
/* keep the vsync and vblank widths constant */
sync_width = mode->crtc_vsync_end - mode->crtc_vsync_start;
blank_width = mode->crtc_vblank_end - mode->crtc_vblank_start;
sync_pos = (blank_width - sync_width + 1) / 2;
border = (mode->vdisplay - height + 1) / 2;
mode->crtc_vdisplay = height;
mode->crtc_vblank_start = height + border;
mode->crtc_vblank_end = mode->crtc_vblank_start + blank_width;
mode->crtc_vsync_start = mode->crtc_vblank_start + sync_pos;
mode->crtc_vsync_end = mode->crtc_vsync_start + sync_width;
}
static inline u32 panel_fitter_scaling(u32 source, u32 target)
{
/*
* Floating point operation is not supported. So the FACTOR
* is defined, which can avoid the floating point computation
* when calculating the panel ratio.
*/
#define ACCURACY 12
#define FACTOR (1 << ACCURACY)
u32 ratio = source * FACTOR / target;
return (FACTOR * ratio + FACTOR/2) / FACTOR;
}
void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
struct intel_crtc_config *pipe_config,
int fitting_mode)
{
struct drm_device *dev = intel_crtc->base.dev;
u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
struct drm_display_mode *mode, *adjusted_mode;
mode = &pipe_config->requested_mode;
adjusted_mode = &pipe_config->adjusted_mode;
/* Native modes don't need fitting */
if (adjusted_mode->hdisplay == mode->hdisplay &&
adjusted_mode->vdisplay == mode->vdisplay)
goto out;
switch (fitting_mode) {
case DRM_MODE_SCALE_CENTER:
/*
* For centered modes, we have to calculate border widths &
* heights and modify the values programmed into the CRTC.
*/
centre_horizontally(adjusted_mode, mode->hdisplay);
centre_vertically(adjusted_mode, mode->vdisplay);
border = LVDS_BORDER_ENABLE;
break;
case DRM_MODE_SCALE_ASPECT:
/* Scale but preserve the aspect ratio */
if (INTEL_INFO(dev)->gen >= 4) {
u32 scaled_width = adjusted_mode->hdisplay *
mode->vdisplay;
u32 scaled_height = mode->hdisplay *
adjusted_mode->vdisplay;
/* 965+ is easy, it does everything in hw */
if (scaled_width > scaled_height)
pfit_control |= PFIT_ENABLE |
PFIT_SCALING_PILLAR;
else if (scaled_width < scaled_height)
pfit_control |= PFIT_ENABLE |
PFIT_SCALING_LETTER;
else if (adjusted_mode->hdisplay != mode->hdisplay)
pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO;
} else {
u32 scaled_width = adjusted_mode->hdisplay *
mode->vdisplay;
u32 scaled_height = mode->hdisplay *
adjusted_mode->vdisplay;
/*
* For earlier chips we have to calculate the scaling
* ratio by hand and program it into the
* PFIT_PGM_RATIO register
*/
if (scaled_width > scaled_height) { /* pillar */
centre_horizontally(adjusted_mode,
scaled_height /
mode->vdisplay);
border = LVDS_BORDER_ENABLE;
if (mode->vdisplay != adjusted_mode->vdisplay) {
u32 bits = panel_fitter_scaling(mode->vdisplay, adjusted_mode->vdisplay);
pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
bits << PFIT_VERT_SCALE_SHIFT);
pfit_control |= (PFIT_ENABLE |
VERT_INTERP_BILINEAR |
HORIZ_INTERP_BILINEAR);
}
} else if (scaled_width < scaled_height) { /* letter */
centre_vertically(adjusted_mode,
scaled_width /
mode->hdisplay);
border = LVDS_BORDER_ENABLE;
if (mode->hdisplay != adjusted_mode->hdisplay) {
u32 bits = panel_fitter_scaling(mode->hdisplay, adjusted_mode->hdisplay);
pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
bits << PFIT_VERT_SCALE_SHIFT);
pfit_control |= (PFIT_ENABLE |
VERT_INTERP_BILINEAR |
HORIZ_INTERP_BILINEAR);
}
} else {
/* Aspects match, Let hw scale both directions */
pfit_control |= (PFIT_ENABLE |
VERT_AUTO_SCALE | HORIZ_AUTO_SCALE |
VERT_INTERP_BILINEAR |
HORIZ_INTERP_BILINEAR);
}
}
break;
case DRM_MODE_SCALE_FULLSCREEN:
/*
* Full scaling, even if it changes the aspect ratio.
* Fortunately this is all done for us in hw.
*/
if (mode->vdisplay != adjusted_mode->vdisplay ||
mode->hdisplay != adjusted_mode->hdisplay) {
pfit_control |= PFIT_ENABLE;
if (INTEL_INFO(dev)->gen >= 4)
pfit_control |= PFIT_SCALING_AUTO;
else
pfit_control |= (VERT_AUTO_SCALE |
VERT_INTERP_BILINEAR |
HORIZ_AUTO_SCALE |
HORIZ_INTERP_BILINEAR);
}
break;
default:
WARN(1, "bad panel fit mode: %d\n", fitting_mode);
return;
}
/* 965+ wants fuzzy fitting */
/* FIXME: handle multiple panels by failing gracefully */
if (INTEL_INFO(dev)->gen >= 4)
pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) |
PFIT_FILTER_FUZZY);
out:
if ((pfit_control & PFIT_ENABLE) == 0) {
pfit_control = 0;
pfit_pgm_ratios = 0;
}
/* Make sure pre-965 set dither correctly for 18bpp panels. */
if (INTEL_INFO(dev)->gen < 4 && pipe_config->pipe_bpp == 18)
pfit_control |= PANEL_8TO6_DITHER_ENABLE;
pipe_config->gmch_pfit.control = pfit_control;
pipe_config->gmch_pfit.pgm_ratios = pfit_pgm_ratios;
pipe_config->gmch_pfit.lvds_border_bits = border;
}
static int is_backlight_combination_mode(struct drm_device *dev)
@ -130,11 +324,16 @@ static int is_backlight_combination_mode(struct drm_device *dev)
return 0;
}
/* XXX: query mode clock or hardware clock and program max PWM appropriately
* when it's 0.
*/
static u32 i915_read_blc_pwm_ctl(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val;
WARN_ON(!spin_is_locked(&dev_priv->backlight.lock));
/* Restore the CTL value if it lost, e.g. GPU reset */
if (HAS_PCH_SPLIT(dev_priv->dev)) {
@ -164,7 +363,7 @@ static u32 i915_read_blc_pwm_ctl(struct drm_device *dev)
return val;
}
static u32 _intel_panel_get_max_backlight(struct drm_device *dev)
static u32 intel_panel_get_max_backlight(struct drm_device *dev)
{
u32 max;
@ -182,23 +381,8 @@ static u32 _intel_panel_get_max_backlight(struct drm_device *dev)
max *= 0xff;
}
return max;
}
u32 intel_panel_get_max_backlight(struct drm_device *dev)
{
u32 max;
max = _intel_panel_get_max_backlight(dev);
if (max == 0) {
/* XXX add code here to query mode clock or hardware clock
* and program max PWM appropriately.
*/
pr_warn_once("fixme: max PWM is zero\n");
return 1;
}
DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max);
return max;
}
@ -217,8 +401,11 @@ static u32 intel_panel_compute_brightness(struct drm_device *dev, u32 val)
return val;
if (i915_panel_invert_brightness > 0 ||
dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS)
return intel_panel_get_max_backlight(dev) - val;
dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) {
u32 max = intel_panel_get_max_backlight(dev);
if (max)
return max - val;
}
return val;
}
@ -227,6 +414,9 @@ static u32 intel_panel_get_backlight(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val;
unsigned long flags;
spin_lock_irqsave(&dev_priv->backlight.lock, flags);
if (HAS_PCH_SPLIT(dev)) {
val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
@ -244,6 +434,9 @@ static u32 intel_panel_get_backlight(struct drm_device *dev)
}
val = intel_panel_compute_brightness(dev, val);
spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
return val;
}
@ -270,6 +463,10 @@ static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level
u32 max = intel_panel_get_max_backlight(dev);
u8 lbpc;
/* we're screwed, but keep behaviour backwards compatible */
if (!max)
max = 1;
lbpc = level * 0xfe / max + 1;
level /= lbpc;
pci_write_config_byte(dev->pdev, PCI_LBPC, lbpc);
@ -282,9 +479,23 @@ static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level
I915_WRITE(BLC_PWM_CTL, tmp | level);
}
void intel_panel_set_backlight(struct drm_device *dev, u32 level)
/* set backlight brightness to level in range [0..max] */
void intel_panel_set_backlight(struct drm_device *dev, u32 level, u32 max)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 freq;
unsigned long flags;
spin_lock_irqsave(&dev_priv->backlight.lock, flags);
freq = intel_panel_get_max_backlight(dev);
if (!freq) {
/* we are screwed, bail out */
goto out;
}
/* scale to hardware */
level = level * freq / max;
dev_priv->backlight.level = level;
if (dev_priv->backlight.device)
@ -292,11 +503,16 @@ void intel_panel_set_backlight(struct drm_device *dev, u32 level)
if (dev_priv->backlight.enabled)
intel_panel_actually_set_backlight(dev, level);
out:
spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
}
void intel_panel_disable_backlight(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
spin_lock_irqsave(&dev_priv->backlight.lock, flags);
dev_priv->backlight.enabled = false;
intel_panel_actually_set_backlight(dev, 0);
@ -314,12 +530,19 @@ void intel_panel_disable_backlight(struct drm_device *dev)
I915_WRITE(BLC_PWM_PCH_CTL1, tmp);
}
}
spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
}
void intel_panel_enable_backlight(struct drm_device *dev,
enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
enum transcoder cpu_transcoder =
intel_pipe_to_cpu_transcoder(dev_priv, pipe);
unsigned long flags;
spin_lock_irqsave(&dev_priv->backlight.lock, flags);
if (dev_priv->backlight.level == 0) {
dev_priv->backlight.level = intel_panel_get_max_backlight(dev);
@ -347,7 +570,10 @@ void intel_panel_enable_backlight(struct drm_device *dev,
else
tmp &= ~BLM_PIPE_SELECT;
tmp |= BLM_PIPE(pipe);
if (cpu_transcoder == TRANSCODER_EDP)
tmp |= BLM_TRANSCODER_EDP;
else
tmp |= BLM_PIPE(cpu_transcoder);
tmp &= ~BLM_PWM_ENABLE;
I915_WRITE(reg, tmp);
@ -369,6 +595,8 @@ set_level:
*/
dev_priv->backlight.enabled = true;
intel_panel_actually_set_backlight(dev, dev_priv->backlight.level);
spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
}
static void intel_panel_init_backlight(struct drm_device *dev)
@ -405,7 +633,8 @@ intel_panel_detect(struct drm_device *dev)
static int intel_panel_update_status(struct backlight_device *bd)
{
struct drm_device *dev = bl_get_data(bd);
intel_panel_set_backlight(dev, bd->props.brightness);
intel_panel_set_backlight(dev, bd->props.brightness,
bd->props.max_brightness);
return 0;
}
@ -425,6 +654,7 @@ int intel_panel_setup_backlight(struct drm_connector *connector)
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct backlight_properties props;
unsigned long flags;
intel_panel_init_backlight(dev);
@ -434,7 +664,11 @@ int intel_panel_setup_backlight(struct drm_connector *connector)
memset(&props, 0, sizeof(props));
props.type = BACKLIGHT_RAW;
props.brightness = dev_priv->backlight.level;
props.max_brightness = _intel_panel_get_max_backlight(dev);
spin_lock_irqsave(&dev_priv->backlight.lock, flags);
props.max_brightness = intel_panel_get_max_backlight(dev);
spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
if (props.max_brightness == 0) {
DRM_DEBUG_DRIVER("Failed to get maximum backlight value\n");
return -ENODEV;

Просмотреть файл

@ -113,8 +113,8 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
fbc_ctl |= obj->fence_reg;
I915_WRITE(FBC_CONTROL, fbc_ctl);
DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ",
cfb_pitch, crtc->y, intel_crtc->plane);
DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c, ",
cfb_pitch, crtc->y, plane_name(intel_crtc->plane));
}
static bool i8xx_fbc_enabled(struct drm_device *dev)
@ -148,7 +148,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
/* enable it... */
I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
}
static void g4x_disable_fbc(struct drm_device *dev)
@ -228,7 +228,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
sandybridge_blit_fbc_update(dev);
}
DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
}
static void ironlake_disable_fbc(struct drm_device *dev)
@ -242,6 +242,18 @@ static void ironlake_disable_fbc(struct drm_device *dev)
dpfc_ctl &= ~DPFC_CTL_EN;
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
if (IS_IVYBRIDGE(dev))
/* WaFbcDisableDpfcClockGating:ivb */
I915_WRITE(ILK_DSPCLK_GATE_D,
I915_READ(ILK_DSPCLK_GATE_D) &
~ILK_DPFCUNIT_CLOCK_GATE_DISABLE);
if (IS_HASWELL(dev))
/* WaFbcDisableDpfcClockGating:hsw */
I915_WRITE(HSW_CLKGATE_DISABLE_PART_1,
I915_READ(HSW_CLKGATE_DISABLE_PART_1) &
~HSW_DPFC_GATING_DISABLE);
DRM_DEBUG_KMS("disabled FBC\n");
}
}
@ -253,6 +265,47 @@ static bool ironlake_fbc_enabled(struct drm_device *dev)
return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
}
static void gen7_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->fb;
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
I915_WRITE(IVB_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
I915_WRITE(ILK_DPFC_CONTROL, DPFC_CTL_EN | DPFC_CTL_LIMIT_1X |
IVB_DPFC_CTL_FENCE_EN |
intel_crtc->plane << IVB_DPFC_CTL_PLANE_SHIFT);
if (IS_IVYBRIDGE(dev)) {
/* WaFbcAsynchFlipDisableFbcQueue:ivb */
I915_WRITE(ILK_DISPLAY_CHICKEN1, ILK_FBCQ_DIS);
/* WaFbcDisableDpfcClockGating:ivb */
I915_WRITE(ILK_DSPCLK_GATE_D,
I915_READ(ILK_DSPCLK_GATE_D) |
ILK_DPFCUNIT_CLOCK_GATE_DISABLE);
} else {
/* WaFbcAsynchFlipDisableFbcQueue:hsw */
I915_WRITE(HSW_PIPE_SLICE_CHICKEN_1(intel_crtc->pipe),
HSW_BYPASS_FBC_QUEUE);
/* WaFbcDisableDpfcClockGating:hsw */
I915_WRITE(HSW_CLKGATE_DISABLE_PART_1,
I915_READ(HSW_CLKGATE_DISABLE_PART_1) |
HSW_DPFC_GATING_DISABLE);
}
I915_WRITE(SNB_DPFC_CTL_SA,
SNB_CPU_FENCE_ENABLE | obj->fence_reg);
I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
sandybridge_blit_fbc_update(dev);
DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
}
bool intel_fbc_enabled(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@ -439,7 +492,7 @@ void intel_update_fbc(struct drm_device *dev)
if (enable_fbc < 0) {
DRM_DEBUG_KMS("fbc set to per-chip default\n");
enable_fbc = 1;
if (INTEL_INFO(dev)->gen <= 6)
if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
enable_fbc = 0;
}
if (!enable_fbc) {
@ -460,7 +513,8 @@ void intel_update_fbc(struct drm_device *dev)
dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
goto out_disable;
}
if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) {
if ((IS_I915GM(dev) || IS_I945GM(dev) || IS_HASWELL(dev)) &&
intel_crtc->plane != 0) {
DRM_DEBUG_KMS("plane not 0, disabling compression\n");
dev_priv->no_fbc_reason = FBC_BAD_PLANE;
goto out_disable;
@ -481,8 +535,6 @@ void intel_update_fbc(struct drm_device *dev)
goto out_disable;
if (i915_gem_stolen_setup_compression(dev, intel_fb->obj->base.size)) {
DRM_INFO("not enough stolen space for compressed buffer (need %zd bytes), disabling\n", intel_fb->obj->base.size);
DRM_INFO("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n");
DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
goto out_disable;
@ -1633,6 +1685,10 @@ static bool ironlake_check_srwm(struct drm_device *dev, int level,
I915_WRITE(DISP_ARB_CTL,
I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
return false;
} else if (INTEL_INFO(dev)->gen >= 6) {
/* enable FBC WM (except on ILK, where it must remain off) */
I915_WRITE(DISP_ARB_CTL,
I915_READ(DISP_ARB_CTL) & ~DISP_FBC_WM_DIS);
}
if (display_wm > display->max_wm) {
@ -2146,15 +2202,15 @@ static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
&sandybridge_display_wm_info,
latency, &sprite_wm);
if (!ret) {
DRM_DEBUG_KMS("failed to compute sprite wm for pipe %d\n",
pipe);
DRM_DEBUG_KMS("failed to compute sprite wm for pipe %c\n",
pipe_name(pipe));
return;
}
val = I915_READ(reg);
val &= ~WM0_PIPE_SPRITE_MASK;
I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm);
DRM_DEBUG_KMS("sprite watermarks For pipe %c - %d\n", pipe_name(pipe), sprite_wm);
ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
@ -2163,8 +2219,8 @@ static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
SNB_READ_WM1_LATENCY() * 500,
&sprite_wm);
if (!ret) {
DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %d\n",
pipe);
DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %c\n",
pipe_name(pipe));
return;
}
I915_WRITE(WM1S_LP_ILK, sprite_wm);
@ -2179,8 +2235,8 @@ static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
SNB_READ_WM2_LATENCY() * 500,
&sprite_wm);
if (!ret) {
DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %d\n",
pipe);
DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %c\n",
pipe_name(pipe));
return;
}
I915_WRITE(WM2S_LP_IVB, sprite_wm);
@ -2191,8 +2247,8 @@ static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
SNB_READ_WM3_LATENCY() * 500,
&sprite_wm);
if (!ret) {
DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %d\n",
pipe);
DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %c\n",
pipe_name(pipe));
return;
}
I915_WRITE(WM3S_LP_IVB, sprite_wm);
@ -2481,6 +2537,52 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
trace_intel_gpu_freq_change(val * 50);
}
void valleyview_set_rps(struct drm_device *dev, u8 val)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long timeout = jiffies + msecs_to_jiffies(10);
u32 limits = gen6_rps_limits(dev_priv, &val);
u32 pval;
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
WARN_ON(val > dev_priv->rps.max_delay);
WARN_ON(val < dev_priv->rps.min_delay);
DRM_DEBUG_DRIVER("gpu freq request from %d to %d\n",
vlv_gpu_freq(dev_priv->mem_freq,
dev_priv->rps.cur_delay),
vlv_gpu_freq(dev_priv->mem_freq, val));
if (val == dev_priv->rps.cur_delay)
return;
valleyview_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
do {
valleyview_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS, &pval);
if (time_after(jiffies, timeout)) {
DRM_DEBUG_DRIVER("timed out waiting for Punit\n");
break;
}
udelay(10);
} while (pval & 1);
valleyview_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS, &pval);
if ((pval >> 8) != val)
DRM_DEBUG_DRIVER("punit overrode freq: %d requested, but got %d\n",
val, pval >> 8);
/* Make sure we continue to get interrupts
* until we hit the minimum or maximum frequencies.
*/
I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
dev_priv->rps.cur_delay = pval >> 8;
trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv->mem_freq, val));
}
static void gen6_disable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@ -2501,6 +2603,30 @@ static void gen6_disable_rps(struct drm_device *dev)
I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
}
static void valleyview_disable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
I915_WRITE(GEN6_RC_CONTROL, 0);
I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
I915_WRITE(GEN6_PMIER, 0);
/* Complete PM interrupt masking here doesn't race with the rps work
* item again unmasking PM interrupts because that is using a different
* register (PMIMR) to mask PM interrupts. The only risk is in leaving
* stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
spin_lock_irq(&dev_priv->rps.lock);
dev_priv->rps.pm_iir = 0;
spin_unlock_irq(&dev_priv->rps.lock);
I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
if (dev_priv->vlv_pctx) {
drm_gem_object_unreference(&dev_priv->vlv_pctx->base);
dev_priv->vlv_pctx = NULL;
}
}
int intel_enable_rc6(const struct drm_device *dev)
{
/* Respect the kernel parameter if it is set */
@ -2742,6 +2868,202 @@ static void gen6_update_ring_freq(struct drm_device *dev)
}
}
int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
{
u32 val, rp0;
valleyview_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE, &val);
rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
/* Clamp to max */
rp0 = min_t(u32, rp0, 0xea);
return rp0;
}
static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv)
{
u32 val, rpe;
valleyview_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO, &val);
rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
valleyview_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI, &val);
rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
return rpe;
}
int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
{
u32 val;
valleyview_punit_read(dev_priv, PUNIT_REG_GPU_LFM, &val);
return val & 0xff;
}
static void vlv_rps_timer_work(struct work_struct *work)
{
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
rps.vlv_work.work);
/*
* Timer fired, we must be idle. Drop to min voltage state.
* Note: we use RPe here since it should match the
* Vmin we were shooting for. That should give us better
* perf when we come back out of RC6 than if we used the
* min freq available.
*/
mutex_lock(&dev_priv->rps.hw_lock);
valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
mutex_unlock(&dev_priv->rps.hw_lock);
}
static void valleyview_setup_pctx(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *pctx;
unsigned long pctx_paddr;
u32 pcbr;
int pctx_size = 24*1024;
pcbr = I915_READ(VLV_PCBR);
if (pcbr) {
/* BIOS set it up already, grab the pre-alloc'd space */
int pcbr_offset;
pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev,
pcbr_offset,
-1,
pctx_size);
goto out;
}
/*
* From the Gunit register HAS:
* The Gfx driver is expected to program this register and ensure
* proper allocation within Gfx stolen memory. For example, this
* register should be programmed such than the PCBR range does not
* overlap with other ranges, such as the frame buffer, protected
* memory, or any other relevant ranges.
*/
pctx = i915_gem_object_create_stolen(dev, pctx_size);
if (!pctx) {
DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
return;
}
pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start;
I915_WRITE(VLV_PCBR, pctx_paddr);
out:
dev_priv->vlv_pctx = pctx;
}
static void valleyview_enable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
u32 gtfifodbg, val, rpe;
int i;
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
if ((gtfifodbg = I915_READ(GTFIFODBG))) {
DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
I915_WRITE(GTFIFODBG, gtfifodbg);
}
valleyview_setup_pctx(dev);
gen6_gt_force_wake_get(dev_priv);
I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
I915_WRITE(GEN6_RP_UP_EI, 66000);
I915_WRITE(GEN6_RP_DOWN_EI, 350000);
I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
I915_WRITE(GEN6_RP_CONTROL,
GEN6_RP_MEDIA_TURBO |
GEN6_RP_MEDIA_HW_NORMAL_MODE |
GEN6_RP_MEDIA_IS_GFX |
GEN6_RP_ENABLE |
GEN6_RP_UP_BUSY_AVG |
GEN6_RP_DOWN_IDLE_CONT);
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
for_each_ring(ring, dev_priv, i)
I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
I915_WRITE(GEN6_RC6_THRESHOLD, 0xc350);
/* allows RC6 residency counter to work */
I915_WRITE(0x138104, _MASKED_BIT_ENABLE(0x3));
I915_WRITE(GEN6_RC_CONTROL,
GEN7_RC_CTL_TO_MODE);
valleyview_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS, &val);
switch ((val >> 6) & 3) {
case 0:
case 1:
dev_priv->mem_freq = 800;
break;
case 2:
dev_priv->mem_freq = 1066;
break;
case 3:
dev_priv->mem_freq = 1333;
break;
}
DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
DRM_DEBUG_DRIVER("current GPU freq: %d\n",
vlv_gpu_freq(dev_priv->mem_freq, (val >> 8) & 0xff));
dev_priv->rps.cur_delay = (val >> 8) & 0xff;
dev_priv->rps.max_delay = valleyview_rps_max_freq(dev_priv);
dev_priv->rps.hw_max = dev_priv->rps.max_delay;
DRM_DEBUG_DRIVER("max GPU freq: %d\n", vlv_gpu_freq(dev_priv->mem_freq,
dev_priv->rps.max_delay));
rpe = valleyview_rps_rpe_freq(dev_priv);
DRM_DEBUG_DRIVER("RPe GPU freq: %d\n",
vlv_gpu_freq(dev_priv->mem_freq, rpe));
dev_priv->rps.rpe_delay = rpe;
val = valleyview_rps_min_freq(dev_priv);
DRM_DEBUG_DRIVER("min GPU freq: %d\n", vlv_gpu_freq(dev_priv->mem_freq,
val));
dev_priv->rps.min_delay = val;
DRM_DEBUG_DRIVER("setting GPU freq to %d\n",
vlv_gpu_freq(dev_priv->mem_freq, rpe));
INIT_DELAYED_WORK(&dev_priv->rps.vlv_work, vlv_rps_timer_work);
valleyview_set_rps(dev_priv->dev, rpe);
/* requires MSI enabled */
I915_WRITE(GEN6_PMIER, GEN6_PM_DEFERRED_EVENTS);
spin_lock_irq(&dev_priv->rps.lock);
WARN_ON(dev_priv->rps.pm_iir != 0);
I915_WRITE(GEN6_PMIMR, 0);
spin_unlock_irq(&dev_priv->rps.lock);
/* enable all PM interrupts */
I915_WRITE(GEN6_PMINTRMSK, 0);
gen6_gt_force_wake_put(dev_priv);
}
void ironlake_teardown_rc6(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@ -3465,13 +3787,22 @@ void intel_disable_gt_powersave(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
/* Interrupts should be disabled already to avoid re-arming. */
WARN_ON(dev->irq_enabled);
if (IS_IRONLAKE_M(dev)) {
ironlake_disable_drps(dev);
ironlake_disable_rc6(dev);
} else if (INTEL_INFO(dev)->gen >= 6 && !IS_VALLEYVIEW(dev)) {
} else if (INTEL_INFO(dev)->gen >= 6) {
cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
cancel_work_sync(&dev_priv->rps.work);
if (IS_VALLEYVIEW(dev))
cancel_delayed_work_sync(&dev_priv->rps.vlv_work);
mutex_lock(&dev_priv->rps.hw_lock);
gen6_disable_rps(dev);
if (IS_VALLEYVIEW(dev))
valleyview_disable_rps(dev);
else
gen6_disable_rps(dev);
mutex_unlock(&dev_priv->rps.hw_lock);
}
}
@ -3484,8 +3815,13 @@ static void intel_gen6_powersave_work(struct work_struct *work)
struct drm_device *dev = dev_priv->dev;
mutex_lock(&dev_priv->rps.hw_lock);
gen6_enable_rps(dev);
gen6_update_ring_freq(dev);
if (IS_VALLEYVIEW(dev)) {
valleyview_enable_rps(dev);
} else {
gen6_enable_rps(dev);
gen6_update_ring_freq(dev);
}
mutex_unlock(&dev_priv->rps.hw_lock);
}
@ -3497,7 +3833,7 @@ void intel_enable_gt_powersave(struct drm_device *dev)
ironlake_enable_drps(dev);
ironlake_enable_rc6(dev);
intel_init_emon(dev);
} else if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) {
} else if (IS_GEN6(dev) || IS_GEN7(dev)) {
/*
* PCU communication is slow and this doesn't need to be
* done at any specific time, so do this out of our fast path
@ -3579,7 +3915,7 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
_3D_CHICKEN2_WM_READ_PIPELINED << 16 |
_3D_CHICKEN2_WM_READ_PIPELINED);
/* WaDisableRenderCachePipelinedFlush */
/* WaDisableRenderCachePipelinedFlush:ilk */
I915_WRITE(CACHE_MODE_0,
_MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
@ -3607,7 +3943,7 @@ static void cpt_init_clock_gating(struct drm_device *dev)
val = I915_READ(TRANS_CHICKEN2(pipe));
val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
if (dev_priv->fdi_rx_polarity_inverted)
if (dev_priv->vbt.fdi_rx_polarity_inverted)
val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
@ -3646,11 +3982,11 @@ static void gen6_init_clock_gating(struct drm_device *dev)
I915_READ(ILK_DISPLAY_CHICKEN2) |
ILK_ELPIN_409_SELECT);
/* WaDisableHiZPlanesWhenMSAAEnabled */
/* WaDisableHiZPlanesWhenMSAAEnabled:snb */
I915_WRITE(_3D_CHICKEN,
_MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
/* WaSetupGtModeTdRowDispatch */
/* WaSetupGtModeTdRowDispatch:snb */
if (IS_SNB_GT1(dev))
I915_WRITE(GEN6_GT_MODE,
_MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE));
@ -3677,8 +4013,8 @@ static void gen6_init_clock_gating(struct drm_device *dev)
* According to the spec, bit 11 (RCCUNIT) must also be set,
* but we didn't debug actual testcases to find it out.
*
* Also apply WaDisableVDSUnitClockGating and
* WaDisableRCPBUnitClockGating.
* Also apply WaDisableVDSUnitClockGating:snb and
* WaDisableRCPBUnitClockGating:snb.
*/
I915_WRITE(GEN6_UCGCTL2,
GEN7_VDSUNIT_CLOCK_GATE_DISABLE |
@ -3709,7 +4045,7 @@ static void gen6_init_clock_gating(struct drm_device *dev)
ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
/* WaMbcDriverBootEnable */
/* WaMbcDriverBootEnable:snb */
I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
GEN6_MBCTL_ENABLE_BOOT_FETCH);
@ -3739,7 +4075,6 @@ static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
reg |= GEN7_FF_VS_SCHED_HW;
reg |= GEN7_FF_DS_SCHED_HW;
/* WaVSRefCountFullforceMissDisable */
if (IS_HASWELL(dev_priv->dev))
reg &= ~GEN7_FF_VS_REF_CNT_FFME;
@ -3758,6 +4093,23 @@ static void lpt_init_clock_gating(struct drm_device *dev)
I915_WRITE(SOUTH_DSPCLK_GATE_D,
I915_READ(SOUTH_DSPCLK_GATE_D) |
PCH_LP_PARTITION_LEVEL_DISABLE);
/* WADPOClockGatingDisable:hsw */
I915_WRITE(_TRANSA_CHICKEN1,
I915_READ(_TRANSA_CHICKEN1) |
TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
}
static void lpt_suspend_hw(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
}
}
static void haswell_init_clock_gating(struct drm_device *dev)
@ -3770,21 +4122,21 @@ static void haswell_init_clock_gating(struct drm_device *dev)
I915_WRITE(WM1_LP_ILK, 0);
/* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
* This implements the WaDisableRCZUnitClockGating workaround.
* This implements the WaDisableRCZUnitClockGating:hsw workaround.
*/
I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
/* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
/* Apply the WaDisableRHWOOptimizationForRenderHang:hsw workaround. */
I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
/* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
/* WaApplyL3ControlAndL3ChickenMode:hsw */
I915_WRITE(GEN7_L3CNTLREG1,
GEN7_WA_FOR_GEN7_L3_CONTROL);
I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
GEN7_WA_L3_CHICKEN_MODE);
/* This is required by WaCatErrorRejectionIssue */
/* This is required by WaCatErrorRejectionIssue:hsw */
I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
@ -3796,17 +4148,18 @@ static void haswell_init_clock_gating(struct drm_device *dev)
intel_flush_display_plane(dev_priv, pipe);
}
/* WaVSRefCountFullforceMissDisable:hsw */
gen7_setup_fixed_func_scheduler(dev_priv);
/* WaDisable4x2SubspanOptimization */
/* WaDisable4x2SubspanOptimization:hsw */
I915_WRITE(CACHE_MODE_1,
_MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
/* WaMbcDriverBootEnable */
/* WaMbcDriverBootEnable:hsw */
I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
GEN6_MBCTL_ENABLE_BOOT_FETCH);
/* WaSwitchSolVfFArbitrationPriority */
/* WaSwitchSolVfFArbitrationPriority:hsw */
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
/* XXX: This is a workaround for early silicon revisions and should be
@ -3833,16 +4186,16 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
/* WaDisableEarlyCull */
/* WaDisableEarlyCull:ivb */
I915_WRITE(_3D_CHICKEN3,
_MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
/* WaDisableBackToBackFlipFix */
/* WaDisableBackToBackFlipFix:ivb */
I915_WRITE(IVB_CHICKEN3,
CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
CHICKEN3_DGMG_DONE_FIX_DISABLE);
/* WaDisablePSDDualDispatchEnable */
/* WaDisablePSDDualDispatchEnable:ivb */
if (IS_IVB_GT1(dev))
I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
_MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
@ -3850,11 +4203,11 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
I915_WRITE(GEN7_HALF_SLICE_CHICKEN1_GT2,
_MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
/* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
/* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
/* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
/* WaApplyL3ControlAndL3ChickenMode:ivb */
I915_WRITE(GEN7_L3CNTLREG1,
GEN7_WA_FOR_GEN7_L3_CONTROL);
I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
@ -3867,7 +4220,7 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
_MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
/* WaForceL3Serialization */
/* WaForceL3Serialization:ivb */
I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
~L3SQ_URB_READ_CAM_MATCH_DISABLE);
@ -3882,13 +4235,13 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
* but we didn't debug actual testcases to find it out.
*
* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
* This implements the WaDisableRCZUnitClockGating workaround.
* This implements the WaDisableRCZUnitClockGating:ivb workaround.
*/
I915_WRITE(GEN6_UCGCTL2,
GEN6_RCZUNIT_CLOCK_GATE_DISABLE |
GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
/* This is required by WaCatErrorRejectionIssue */
/* This is required by WaCatErrorRejectionIssue:ivb */
I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
@ -3900,13 +4253,14 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
intel_flush_display_plane(dev_priv, pipe);
}
/* WaMbcDriverBootEnable */
/* WaMbcDriverBootEnable:ivb */
I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
GEN6_MBCTL_ENABLE_BOOT_FETCH);
/* WaVSRefCountFullforceMissDisable:ivb */
gen7_setup_fixed_func_scheduler(dev_priv);
/* WaDisable4x2SubspanOptimization */
/* WaDisable4x2SubspanOptimization:ivb */
I915_WRITE(CACHE_MODE_1,
_MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
@ -3932,46 +4286,46 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
/* WaDisableEarlyCull */
/* WaDisableEarlyCull:vlv */
I915_WRITE(_3D_CHICKEN3,
_MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
/* WaDisableBackToBackFlipFix */
/* WaDisableBackToBackFlipFix:vlv */
I915_WRITE(IVB_CHICKEN3,
CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
CHICKEN3_DGMG_DONE_FIX_DISABLE);
/* WaDisablePSDDualDispatchEnable */
/* WaDisablePSDDualDispatchEnable:vlv */
I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
_MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
/* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
/* Apply the WaDisableRHWOOptimizationForRenderHang:vlv workaround. */
I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
/* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
/* WaApplyL3ControlAndL3ChickenMode:vlv */
I915_WRITE(GEN7_L3CNTLREG1, I915_READ(GEN7_L3CNTLREG1) | GEN7_L3AGDIS);
I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE);
/* WaForceL3Serialization */
/* WaForceL3Serialization:vlv */
I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
~L3SQ_URB_READ_CAM_MATCH_DISABLE);
/* WaDisableDopClockGating */
/* WaDisableDopClockGating:vlv */
I915_WRITE(GEN7_ROW_CHICKEN2,
_MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
/* WaForceL3Serialization */
/* WaForceL3Serialization:vlv */
I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
~L3SQ_URB_READ_CAM_MATCH_DISABLE);
/* This is required by WaCatErrorRejectionIssue */
/* This is required by WaCatErrorRejectionIssue:vlv */
I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
/* WaMbcDriverBootEnable */
/* WaMbcDriverBootEnable:vlv */
I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
GEN6_MBCTL_ENABLE_BOOT_FETCH);
@ -3987,10 +4341,10 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
* but we didn't debug actual testcases to find it out.
*
* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
* This implements the WaDisableRCZUnitClockGating workaround.
* This implements the WaDisableRCZUnitClockGating:vlv workaround.
*
* Also apply WaDisableVDSUnitClockGating and
* WaDisableRCPBUnitClockGating.
* Also apply WaDisableVDSUnitClockGating:vlv and
* WaDisableRCPBUnitClockGating:vlv.
*/
I915_WRITE(GEN6_UCGCTL2,
GEN7_VDSUNIT_CLOCK_GATE_DISABLE |
@ -4012,7 +4366,7 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
_MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
/*
* WaDisableVLVClockGating_VBIIssue
* WaDisableVLVClockGating_VBIIssue:vlv
* Disable clock gating on th GCFG unit to prevent a delay
* in the reporting of vblank events.
*/
@ -4110,20 +4464,42 @@ void intel_init_clock_gating(struct drm_device *dev)
dev_priv->display.init_clock_gating(dev);
}
void intel_suspend_hw(struct drm_device *dev)
{
if (HAS_PCH_LPT(dev))
lpt_suspend_hw(dev);
}
/**
* We should only use the power well if we explicitly asked the hardware to
* enable it, so check if it's enabled and also check if we've requested it to
* be enabled.
*/
bool intel_using_power_well(struct drm_device *dev)
bool intel_display_power_enabled(struct drm_device *dev,
enum intel_display_power_domain domain)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (IS_HASWELL(dev))
if (!HAS_POWER_WELL(dev))
return true;
switch (domain) {
case POWER_DOMAIN_PIPE_A:
case POWER_DOMAIN_TRANSCODER_EDP:
return true;
case POWER_DOMAIN_PIPE_B:
case POWER_DOMAIN_PIPE_C:
case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
case POWER_DOMAIN_TRANSCODER_A:
case POWER_DOMAIN_TRANSCODER_B:
case POWER_DOMAIN_TRANSCODER_C:
return I915_READ(HSW_PWR_WELL_DRIVER) ==
(HSW_PWR_WELL_ENABLE | HSW_PWR_WELL_STATE);
else
return true;
default:
BUG();
}
}
void intel_set_power_well(struct drm_device *dev, bool enable)
@ -4190,7 +4566,12 @@ void intel_init_pm(struct drm_device *dev)
if (I915_HAS_FBC(dev)) {
if (HAS_PCH_SPLIT(dev)) {
dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
dev_priv->display.enable_fbc = ironlake_enable_fbc;
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
dev_priv->display.enable_fbc =
gen7_enable_fbc;
else
dev_priv->display.enable_fbc =
ironlake_enable_fbc;
dev_priv->display.disable_fbc = ironlake_disable_fbc;
} else if (IS_GM45(dev)) {
dev_priv->display.fbc_enabled = g4x_fbc_enabled;
@ -4340,6 +4721,7 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
/* WaRsForcewakeWaitTC0:snb */
__gen6_gt_wait_for_thread_c0(dev_priv);
}
@ -4371,6 +4753,7 @@ static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
/* WaRsForcewakeWaitTC0:ivb,hsw */
__gen6_gt_wait_for_thread_c0(dev_priv);
}
@ -4474,6 +4857,7 @@ static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out waiting for media to ack forcewake request.\n");
/* WaRsForcewakeWaitTC0:vlv */
__gen6_gt_wait_for_thread_c0(dev_priv);
}
@ -4568,14 +4952,13 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
return 0;
}
static int vlv_punit_rw(struct drm_i915_private *dev_priv, u8 opcode,
static int vlv_punit_rw(struct drm_i915_private *dev_priv, u32 port, u8 opcode,
u8 addr, u32 *val)
{
u32 cmd, devfn, port, be, bar;
u32 cmd, devfn, be, bar;
bar = 0;
be = 0xf;
port = IOSF_PORT_PUNIT;
devfn = PCI_DEVFN(2, 0);
cmd = (devfn << IOSF_DEVFN_SHIFT) | (opcode << IOSF_OPCODE_SHIFT) |
@ -4597,7 +4980,7 @@ static int vlv_punit_rw(struct drm_i915_private *dev_priv, u8 opcode,
I915_WRITE(VLV_IOSF_DOORBELL_REQ, cmd);
if (wait_for((I915_READ(VLV_IOSF_DOORBELL_REQ) & IOSF_SB_BUSY) == 0,
500)) {
5)) {
DRM_ERROR("timeout waiting for pcode %s (%d) to finish\n",
opcode == PUNIT_OPCODE_REG_READ ? "read" : "write",
addr);
@ -4613,10 +4996,74 @@ static int vlv_punit_rw(struct drm_i915_private *dev_priv, u8 opcode,
int valleyview_punit_read(struct drm_i915_private *dev_priv, u8 addr, u32 *val)
{
return vlv_punit_rw(dev_priv, PUNIT_OPCODE_REG_READ, addr, val);
return vlv_punit_rw(dev_priv, IOSF_PORT_PUNIT, PUNIT_OPCODE_REG_READ,
addr, val);
}
int valleyview_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val)
{
return vlv_punit_rw(dev_priv, PUNIT_OPCODE_REG_WRITE, addr, &val);
return vlv_punit_rw(dev_priv, IOSF_PORT_PUNIT, PUNIT_OPCODE_REG_WRITE,
addr, &val);
}
int valleyview_nc_read(struct drm_i915_private *dev_priv, u8 addr, u32 *val)
{
return vlv_punit_rw(dev_priv, IOSF_PORT_NC, PUNIT_OPCODE_REG_READ,
addr, val);
}
int vlv_gpu_freq(int ddr_freq, int val)
{
int mult, base;
switch (ddr_freq) {
case 800:
mult = 20;
base = 120;
break;
case 1066:
mult = 22;
base = 133;
break;
case 1333:
mult = 21;
base = 125;
break;
default:
return -1;
}
return ((val - 0xbd) * mult) + base;
}
int vlv_freq_opcode(int ddr_freq, int val)
{
int mult, base;
switch (ddr_freq) {
case 800:
mult = 20;
base = 120;
break;
case 1066:
mult = 22;
base = 133;
break;
case 1333:
mult = 21;
base = 125;
break;
default:
return -1;
}
val /= mult;
val -= base / mult;
val += 0xbd;
if (val > 0xea)
val = 0xea;
return val;
}

Просмотреть файл

@ -515,6 +515,8 @@ static int init_render_ring(struct intel_ring_buffer *ring)
/* We need to disable the AsyncFlip performance optimisations in order
* to use MI_WAIT_FOR_EVENT within the CS. It should already be
* programmed to '1' on all products.
*
* WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
*/
if (INTEL_INFO(dev)->gen >= 6)
I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));

Просмотреть файл

@ -135,7 +135,7 @@ struct intel_ring_buffer {
*/
bool itlb_before_ctx_switch;
struct i915_hw_context *default_context;
struct drm_i915_gem_object *last_context_obj;
struct i915_hw_context *last_context;
void *private;
};

Просмотреть файл

@ -1041,6 +1041,32 @@ intel_sdvo_get_preferred_input_mode(struct intel_sdvo *intel_sdvo,
return true;
}
static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc_config *pipe_config)
{
unsigned dotclock = pipe_config->adjusted_mode.clock;
struct dpll *clock = &pipe_config->dpll;
/* SDVO TV has fixed PLL values depend on its clock range,
this mirrors vbios setting. */
if (dotclock >= 100000 && dotclock < 140500) {
clock->p1 = 2;
clock->p2 = 10;
clock->n = 3;
clock->m1 = 16;
clock->m2 = 8;
} else if (dotclock >= 140500 && dotclock <= 200000) {
clock->p1 = 1;
clock->p2 = 10;
clock->n = 6;
clock->m1 = 12;
clock->m2 = 8;
} else {
WARN(1, "SDVO TV clock out of range: %i\n", dotclock);
}
pipe_config->clock_set = true;
}
static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
{
@ -1066,6 +1092,7 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
(void) intel_sdvo_get_preferred_input_mode(intel_sdvo,
mode,
adjusted_mode);
pipe_config->sdvo_tv_clock = true;
} else if (intel_sdvo->is_lvds) {
if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo,
intel_sdvo->sdvo_lvds_fixed_mode))
@ -1097,6 +1124,10 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
if (intel_sdvo->color_range)
pipe_config->limited_color_range = true;
/* Clock computation needs to happen after pixel multiplier. */
if (intel_sdvo->is_tv)
i9xx_adjust_sdvo_tv_clock(pipe_config);
return true;
}
@ -1495,7 +1526,7 @@ intel_sdvo_get_analog_edid(struct drm_connector *connector)
return drm_get_edid(connector,
intel_gmbus_get_adapter(dev_priv,
dev_priv->crt_ddc_pin));
dev_priv->vbt.crt_ddc_pin));
}
static enum drm_connector_status
@ -1625,12 +1656,9 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
if (ret == connector_status_connected) {
intel_sdvo->is_tv = false;
intel_sdvo->is_lvds = false;
intel_sdvo->base.needs_tv_clock = false;
if (response & SDVO_TV_MASK) {
if (response & SDVO_TV_MASK)
intel_sdvo->is_tv = true;
intel_sdvo->base.needs_tv_clock = true;
}
if (response & SDVO_LVDS_MASK)
intel_sdvo->is_lvds = intel_sdvo->sdvo_lvds_fixed_mode != NULL;
}
@ -1781,9 +1809,9 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
goto end;
/* Fetch modes from VBT */
if (dev_priv->sdvo_lvds_vbt_mode != NULL) {
if (dev_priv->vbt.sdvo_lvds_vbt_mode != NULL) {
newmode = drm_mode_duplicate(connector->dev,
dev_priv->sdvo_lvds_vbt_mode);
dev_priv->vbt.sdvo_lvds_vbt_mode);
if (newmode != NULL) {
/* Guarantee the mode is preferred */
newmode->type = (DRM_MODE_TYPE_PREFERRED |
@ -2327,7 +2355,6 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
intel_sdvo_connector->output_flag = type;
intel_sdvo->is_tv = true;
intel_sdvo->base.needs_tv_clock = true;
intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
@ -2415,7 +2442,6 @@ static bool
intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags)
{
intel_sdvo->is_tv = false;
intel_sdvo->base.needs_tv_clock = false;
intel_sdvo->is_lvds = false;
/* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/

Просмотреть файл

@ -32,6 +32,7 @@
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_rect.h>
#include "intel_drv.h"
#include <drm/i915_drm.h>
#include "i915_drv.h"
@ -583,6 +584,20 @@ ilk_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
key->flags = I915_SET_COLORKEY_NONE;
}
static bool
format_is_yuv(uint32_t format)
{
switch (format) {
case DRM_FORMAT_YUYV:
case DRM_FORMAT_UYVY:
case DRM_FORMAT_VYUY:
case DRM_FORMAT_YVYU:
return true;
default:
return false;
}
}
static int
intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb, int crtc_x, int crtc_y,
@ -600,9 +615,29 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
pipe);
int ret = 0;
int x = src_x >> 16, y = src_y >> 16;
int primary_w = crtc->mode.hdisplay, primary_h = crtc->mode.vdisplay;
bool disable_primary = false;
bool visible;
int hscale, vscale;
int max_scale, min_scale;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
struct drm_rect src = {
/* sample coordinates in 16.16 fixed point */
.x1 = src_x,
.x2 = src_x + src_w,
.y1 = src_y,
.y2 = src_y + src_h,
};
struct drm_rect dst = {
/* integer pixels */
.x1 = crtc_x,
.x2 = crtc_x + crtc_w,
.y1 = crtc_y,
.y2 = crtc_y + crtc_h,
};
const struct drm_rect clip = {
.x2 = crtc->mode.hdisplay,
.y2 = crtc->mode.vdisplay,
};
intel_fb = to_intel_framebuffer(fb);
obj = intel_fb->obj;
@ -618,19 +653,23 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
intel_plane->src_w = src_w;
intel_plane->src_h = src_h;
src_w = src_w >> 16;
src_h = src_h >> 16;
/* Pipe must be running... */
if (!(I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE))
return -EINVAL;
if (crtc_x >= primary_w || crtc_y >= primary_h)
if (!(I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE)) {
DRM_DEBUG_KMS("Pipe disabled\n");
return -EINVAL;
}
/* Don't modify another pipe's plane */
if (intel_plane->pipe != intel_crtc->pipe)
if (intel_plane->pipe != intel_crtc->pipe) {
DRM_DEBUG_KMS("Wrong plane <-> crtc mapping\n");
return -EINVAL;
}
/* FIXME check all gen limits */
if (fb->width < 3 || fb->height < 3 || fb->pitches[0] > 16384) {
DRM_DEBUG_KMS("Unsuitable framebuffer for plane\n");
return -EINVAL;
}
/* Sprite planes can be linear or x-tiled surfaces */
switch (obj->tiling_mode) {
@ -638,55 +677,123 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
case I915_TILING_X:
break;
default:
DRM_DEBUG_KMS("Unsupported tiling mode\n");
return -EINVAL;
}
/*
* Clamp the width & height into the visible area. Note we don't
* try to scale the source if part of the visible region is offscreen.
* The caller must handle that by adjusting source offset and size.
* FIXME the following code does a bunch of fuzzy adjustments to the
* coordinates and sizes. We probably need some way to decide whether
* more strict checking should be done instead.
*/
if ((crtc_x < 0) && ((crtc_x + crtc_w) > 0)) {
crtc_w += crtc_x;
crtc_x = 0;
max_scale = intel_plane->max_downscale << 16;
min_scale = intel_plane->can_scale ? 1 : (1 << 16);
hscale = drm_rect_calc_hscale_relaxed(&src, &dst, min_scale, max_scale);
BUG_ON(hscale < 0);
vscale = drm_rect_calc_vscale_relaxed(&src, &dst, min_scale, max_scale);
BUG_ON(vscale < 0);
visible = drm_rect_clip_scaled(&src, &dst, &clip, hscale, vscale);
crtc_x = dst.x1;
crtc_y = dst.y1;
crtc_w = drm_rect_width(&dst);
crtc_h = drm_rect_height(&dst);
if (visible) {
/* check again in case clipping clamped the results */
hscale = drm_rect_calc_hscale(&src, &dst, min_scale, max_scale);
if (hscale < 0) {
DRM_DEBUG_KMS("Horizontal scaling factor out of limits\n");
drm_rect_debug_print(&src, true);
drm_rect_debug_print(&dst, false);
return hscale;
}
vscale = drm_rect_calc_vscale(&src, &dst, min_scale, max_scale);
if (vscale < 0) {
DRM_DEBUG_KMS("Vertical scaling factor out of limits\n");
drm_rect_debug_print(&src, true);
drm_rect_debug_print(&dst, false);
return vscale;
}
/* Make the source viewport size an exact multiple of the scaling factors. */
drm_rect_adjust_size(&src,
drm_rect_width(&dst) * hscale - drm_rect_width(&src),
drm_rect_height(&dst) * vscale - drm_rect_height(&src));
/* sanity check to make sure the src viewport wasn't enlarged */
WARN_ON(src.x1 < (int) src_x ||
src.y1 < (int) src_y ||
src.x2 > (int) (src_x + src_w) ||
src.y2 > (int) (src_y + src_h));
/*
* Hardware doesn't handle subpixel coordinates.
* Adjust to (macro)pixel boundary, but be careful not to
* increase the source viewport size, because that could
* push the downscaling factor out of bounds.
*/
src_x = src.x1 >> 16;
src_w = drm_rect_width(&src) >> 16;
src_y = src.y1 >> 16;
src_h = drm_rect_height(&src) >> 16;
if (format_is_yuv(fb->pixel_format)) {
src_x &= ~1;
src_w &= ~1;
/*
* Must keep src and dst the
* same if we can't scale.
*/
if (!intel_plane->can_scale)
crtc_w &= ~1;
if (crtc_w == 0)
visible = false;
}
}
if ((crtc_x + crtc_w) <= 0) /* Nothing to display */
goto out;
if ((crtc_x + crtc_w) > primary_w)
crtc_w = primary_w - crtc_x;
if ((crtc_y < 0) && ((crtc_y + crtc_h) > 0)) {
crtc_h += crtc_y;
crtc_y = 0;
/* Check size restrictions when scaling */
if (visible && (src_w != crtc_w || src_h != crtc_h)) {
unsigned int width_bytes;
WARN_ON(!intel_plane->can_scale);
/* FIXME interlacing min height is 6 */
if (crtc_w < 3 || crtc_h < 3)
visible = false;
if (src_w < 3 || src_h < 3)
visible = false;
width_bytes = ((src_x * pixel_size) & 63) + src_w * pixel_size;
if (src_w > 2048 || src_h > 2048 ||
width_bytes > 4096 || fb->pitches[0] > 4096) {
DRM_DEBUG_KMS("Source dimensions exceed hardware limits\n");
return -EINVAL;
}
}
if ((crtc_y + crtc_h) <= 0) /* Nothing to display */
goto out;
if (crtc_y + crtc_h > primary_h)
crtc_h = primary_h - crtc_y;
if (!crtc_w || !crtc_h) /* Again, nothing to display */
goto out;
/*
* We may not have a scaler, eg. HSW does not have it any more
*/
if (!intel_plane->can_scale && (crtc_w != src_w || crtc_h != src_h))
return -EINVAL;
/*
* We can take a larger source and scale it down, but
* only so much... 16x is the max on SNB.
*/
if (((src_w * src_h) / (crtc_w * crtc_h)) > intel_plane->max_downscale)
return -EINVAL;
dst.x1 = crtc_x;
dst.x2 = crtc_x + crtc_w;
dst.y1 = crtc_y;
dst.y2 = crtc_y + crtc_h;
/*
* If the sprite is completely covering the primary plane,
* we can disable the primary and save power.
*/
if ((crtc_x == 0) && (crtc_y == 0) &&
(crtc_w == primary_w) && (crtc_h == primary_h))
disable_primary = true;
disable_primary = drm_rect_equals(&dst, &clip);
WARN_ON(disable_primary && !visible);
mutex_lock(&dev->struct_mutex);
@ -708,8 +815,12 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
if (!disable_primary)
intel_enable_primary(crtc);
intel_plane->update_plane(plane, fb, obj, crtc_x, crtc_y,
crtc_w, crtc_h, x, y, src_w, src_h);
if (visible)
intel_plane->update_plane(plane, fb, obj,
crtc_x, crtc_y, crtc_w, crtc_h,
src_x, src_y, src_w, src_h);
else
intel_plane->disable_plane(plane);
if (disable_primary)
intel_disable_primary(crtc);
@ -732,7 +843,6 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
out_unlock:
mutex_unlock(&dev->struct_mutex);
out:
return ret;
}
@ -918,13 +1028,15 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
break;
case 7:
if (IS_HASWELL(dev) || IS_VALLEYVIEW(dev))
intel_plane->can_scale = false;
else
if (IS_IVYBRIDGE(dev)) {
intel_plane->can_scale = true;
intel_plane->max_downscale = 2;
} else {
intel_plane->can_scale = false;
intel_plane->max_downscale = 1;
}
if (IS_VALLEYVIEW(dev)) {
intel_plane->max_downscale = 1;
intel_plane->update_plane = vlv_update_plane;
intel_plane->disable_plane = vlv_disable_plane;
intel_plane->update_colorkey = vlv_update_colorkey;
@ -933,7 +1045,6 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
plane_formats = vlv_plane_formats;
num_plane_formats = ARRAY_SIZE(vlv_plane_formats);
} else {
intel_plane->max_downscale = 2;
intel_plane->update_plane = ivb_update_plane;
intel_plane->disable_plane = ivb_disable_plane;
intel_plane->update_colorkey = ivb_update_colorkey;

Просмотреть файл

@ -1521,12 +1521,12 @@ static int tv_is_present_in_vbt(struct drm_device *dev)
struct child_device_config *p_child;
int i, ret;
if (!dev_priv->child_dev_num)
if (!dev_priv->vbt.child_dev_num)
return 1;
ret = 0;
for (i = 0; i < dev_priv->child_dev_num; i++) {
p_child = dev_priv->child_dev + i;
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
p_child = dev_priv->vbt.child_dev + i;
/*
* If the device type is not TV, continue.
*/
@ -1564,7 +1564,7 @@ intel_tv_init(struct drm_device *dev)
return;
}
/* Even if we have an encoder we may not have a connector */
if (!dev_priv->int_tv_support)
if (!dev_priv->vbt.int_tv_support)
return;
/*

160
include/drm/drm_rect.h Normal file
Просмотреть файл

@ -0,0 +1,160 @@
/*
* Copyright (C) 2011-2013 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef DRM_RECT_H
#define DRM_RECT_H
/**
* drm_rect - two dimensional rectangle
* @x1: horizontal starting coordinate (inclusive)
* @x2: horizontal ending coordinate (exclusive)
* @y1: vertical starting coordinate (inclusive)
* @y2: vertical ending coordinate (exclusive)
*/
struct drm_rect {
int x1, y1, x2, y2;
};
/**
* drm_rect_adjust_size - adjust the size of the rectangle
* @r: rectangle to be adjusted
* @dw: horizontal adjustment
* @dh: vertical adjustment
*
* Change the size of rectangle @r by @dw in the horizontal direction,
* and by @dh in the vertical direction, while keeping the center
* of @r stationary.
*
* Positive @dw and @dh increase the size, negative values decrease it.
*/
static inline void drm_rect_adjust_size(struct drm_rect *r, int dw, int dh)
{
r->x1 -= dw >> 1;
r->y1 -= dh >> 1;
r->x2 += (dw + 1) >> 1;
r->y2 += (dh + 1) >> 1;
}
/**
* drm_rect_translate - translate the rectangle
* @r: rectangle to be tranlated
* @dx: horizontal translation
* @dy: vertical translation
*
* Move rectangle @r by @dx in the horizontal direction,
* and by @dy in the vertical direction.
*/
static inline void drm_rect_translate(struct drm_rect *r, int dx, int dy)
{
r->x1 += dx;
r->y1 += dy;
r->x2 += dx;
r->y2 += dy;
}
/**
* drm_rect_downscale - downscale a rectangle
* @r: rectangle to be downscaled
* @horz: horizontal downscale factor
* @vert: vertical downscale factor
*
* Divide the coordinates of rectangle @r by @horz and @vert.
*/
static inline void drm_rect_downscale(struct drm_rect *r, int horz, int vert)
{
r->x1 /= horz;
r->y1 /= vert;
r->x2 /= horz;
r->y2 /= vert;
}
/**
* drm_rect_width - determine the rectangle width
* @r: rectangle whose width is returned
*
* RETURNS:
* The width of the rectangle.
*/
static inline int drm_rect_width(const struct drm_rect *r)
{
return r->x2 - r->x1;
}
/**
* drm_rect_height - determine the rectangle height
* @r: rectangle whose height is returned
*
* RETURNS:
* The height of the rectangle.
*/
static inline int drm_rect_height(const struct drm_rect *r)
{
return r->y2 - r->y1;
}
/**
* drm_rect_visible - determine if the the rectangle is visible
* @r: rectangle whose visibility is returned
*
* RETURNS:
* %true if the rectangle is visible, %false otherwise.
*/
static inline bool drm_rect_visible(const struct drm_rect *r)
{
return drm_rect_width(r) > 0 && drm_rect_height(r) > 0;
}
/**
* drm_rect_equals - determine if two rectangles are equal
* @r1: first rectangle
* @r2: second rectangle
*
* RETURNS:
* %true if the rectangles are equal, %false otherwise.
*/
static inline bool drm_rect_equals(const struct drm_rect *r1,
const struct drm_rect *r2)
{
return r1->x1 == r2->x1 && r1->x2 == r2->x2 &&
r1->y1 == r2->y1 && r1->y2 == r2->y2;
}
bool drm_rect_intersect(struct drm_rect *r, const struct drm_rect *clip);
bool drm_rect_clip_scaled(struct drm_rect *src, struct drm_rect *dst,
const struct drm_rect *clip,
int hscale, int vscale);
int drm_rect_calc_hscale(const struct drm_rect *src,
const struct drm_rect *dst,
int min_hscale, int max_hscale);
int drm_rect_calc_vscale(const struct drm_rect *src,
const struct drm_rect *dst,
int min_vscale, int max_vscale);
int drm_rect_calc_hscale_relaxed(struct drm_rect *src,
struct drm_rect *dst,
int min_hscale, int max_hscale);
int drm_rect_calc_vscale_relaxed(struct drm_rect *src,
struct drm_rect *dst,
int min_vscale, int max_vscale);
void drm_rect_debug_print(const struct drm_rect *r, bool fixed_point);
#endif