Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux

* 'drm-fixes' of git://people.freedesktop.org/~airlied/linux: (31 commits)
  drm: integer overflow in drm_mode_dirtyfb_ioctl()
  drivers/gpu/vga/vgaarb.c: add missing kfree
  drm/radeon/kms/atom: unify i2c gpio table handling
  drm/radeon/kms: fix up gpio i2c mask bits for r4xx for real
  ttm: Don't return the bo reserved on error path
  drm/radeon/kms: add a CS ioctl flag not to rewrite tiling flags in the CS
  drm/i915: Fix inconsistent backlight level during disabled
  drm, i915: Fix memory leak in i915_gem_busy_ioctl().
  drm/i915: Use DPCD value for max DP lanes.
  drm/i915: Initiate DP link training only on the lanes we'll be using
  drm/i915: Remove trailing white space
  drm/i915: Try harder during dp pattern 1 link training
  drm/i915: Make DP prepare/commit consistent with DP dpms
  drm/i915: Let panel power sequencing hardware do its job
  drm/i915: Treat PCH eDP like DP in most places
  drm/i915: Remove link_status field from intel_dp structure
  drm/i915: Move common PCH_PP_CONTROL setup to ironlake_get_pp_control
  drm/i915: Module parameters using '-1' as default must be signed type
  drm/i915: Turn on another required clock gating bit on gen6.
  drm/i915: Turn on a required 3D clock gating bit on Sandybridge.
  ...
This commit is contained in:
Linus Torvalds 2011-11-23 09:47:25 -08:00
Родитель b4bbb02934 a5cd335165
Коммит 2d0f2400a4
22 изменённых файлов: 601 добавлений и 433 удалений

Просмотреть файл

@ -1873,6 +1873,10 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
}
if (num_clips && clips_ptr) {
if (num_clips < 0 || num_clips > DRM_MODE_FB_DIRTY_MAX_CLIPS) {
ret = -EINVAL;
goto out_err1;
}
clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL);
if (!clips) {
ret = -ENOMEM;

Просмотреть файл

@ -636,11 +636,16 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
int ret;
ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
if (ring->size == 0)
return 0;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
seq_printf(m, "Ring %s:\n", ring->name);
seq_printf(m, " Head : %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR);
seq_printf(m, " Tail : %08x\n", I915_READ_TAIL(ring) & TAIL_ADDR);
@ -654,6 +659,8 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data)
seq_printf(m, " Control : %08x\n", I915_READ_CTL(ring));
seq_printf(m, " Start : %08x\n", I915_READ_START(ring));
mutex_unlock(&dev->struct_mutex);
return 0;
}
@ -842,7 +849,16 @@ static int i915_rstdby_delays(struct seq_file *m, void *unused)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
u16 crstanddelay = I915_READ16(CRSTANDVID);
u16 crstanddelay;
int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
crstanddelay = I915_READ16(CRSTANDVID);
mutex_unlock(&dev->struct_mutex);
seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
@ -940,7 +956,11 @@ static int i915_delayfreq_table(struct seq_file *m, void *unused)
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
u32 delayfreq;
int i;
int ret, i;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
for (i = 0; i < 16; i++) {
delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
@ -948,6 +968,8 @@ static int i915_delayfreq_table(struct seq_file *m, void *unused)
(delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT);
}
mutex_unlock(&dev->struct_mutex);
return 0;
}
@ -962,13 +984,19 @@ static int i915_inttoext_table(struct seq_file *m, void *unused)
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
u32 inttoext;
int i;
int ret, i;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
for (i = 1; i <= 32; i++) {
inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
}
mutex_unlock(&dev->struct_mutex);
return 0;
}
@ -977,9 +1005,19 @@ static int i915_drpc_info(struct seq_file *m, void *unused)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
u32 rgvmodectl = I915_READ(MEMMODECTL);
u32 rstdbyctl = I915_READ(RSTDBYCTL);
u16 crstandvid = I915_READ16(CRSTANDVID);
u32 rgvmodectl, rstdbyctl;
u16 crstandvid;
int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
rgvmodectl = I915_READ(MEMMODECTL);
rstdbyctl = I915_READ(RSTDBYCTL);
crstandvid = I915_READ16(CRSTANDVID);
mutex_unlock(&dev->struct_mutex);
seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
"yes" : "no");
@ -1167,9 +1205,16 @@ static int i915_gfxec(struct seq_file *m, void *unused)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
mutex_unlock(&dev->struct_mutex);
return 0;
}

Просмотреть файл

@ -68,7 +68,7 @@ module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
MODULE_PARM_DESC(i915_enable_rc6,
"Enable power-saving render C-state 6 (default: true)");
unsigned int i915_enable_fbc __read_mostly = -1;
int i915_enable_fbc __read_mostly = -1;
module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600);
MODULE_PARM_DESC(i915_enable_fbc,
"Enable frame buffer compression for power savings "
@ -80,7 +80,7 @@ MODULE_PARM_DESC(lvds_downclock,
"Use panel (LVDS/eDP) downclocking for power savings "
"(default: false)");
unsigned int i915_panel_use_ssc __read_mostly = -1;
int i915_panel_use_ssc __read_mostly = -1;
module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600);
MODULE_PARM_DESC(lvds_use_ssc,
"Use Spread Spectrum Clock with panels [LVDS/eDP] "
@ -107,7 +107,7 @@ static struct drm_driver driver;
extern int intel_agp_enabled;
#define INTEL_VGA_DEVICE(id, info) { \
.class = PCI_CLASS_DISPLAY_VGA << 8, \
.class = PCI_BASE_CLASS_DISPLAY << 16, \
.class_mask = 0xff0000, \
.vendor = 0x8086, \
.device = id, \

Просмотреть файл

@ -126,6 +126,9 @@ struct drm_i915_master_private {
struct _drm_i915_sarea *sarea_priv;
};
#define I915_FENCE_REG_NONE -1
#define I915_MAX_NUM_FENCES 16
/* 16 fences + sign bit for FENCE_REG_NONE */
#define I915_MAX_NUM_FENCE_BITS 5
struct drm_i915_fence_reg {
struct list_head lru_list;
@ -168,7 +171,7 @@ struct drm_i915_error_state {
u32 instdone1;
u32 seqno;
u64 bbaddr;
u64 fence[16];
u64 fence[I915_MAX_NUM_FENCES];
struct timeval time;
struct drm_i915_error_object {
int page_count;
@ -182,7 +185,7 @@ struct drm_i915_error_state {
u32 gtt_offset;
u32 read_domains;
u32 write_domain;
s32 fence_reg:5;
s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
s32 pinned:2;
u32 tiling:2;
u32 dirty:1;
@ -375,7 +378,7 @@ typedef struct drm_i915_private {
struct notifier_block lid_notifier;
int crt_ddc_pin;
struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */
struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
int num_fence_regs; /* 8 on pre-965, 16 otherwise */
@ -506,7 +509,7 @@ typedef struct drm_i915_private {
u8 saveAR[21];
u8 saveDACMASK;
u8 saveCR[37];
uint64_t saveFENCE[16];
uint64_t saveFENCE[I915_MAX_NUM_FENCES];
u32 saveCURACNTR;
u32 saveCURAPOS;
u32 saveCURABASE;
@ -777,10 +780,8 @@ struct drm_i915_gem_object {
* Fence register bits (if any) for this object. Will be set
* as needed when mapped into the GTT.
* Protected by dev->struct_mutex.
*
* Size: 4 bits for 16 fences + sign (for FENCE_REG_NONE)
*/
signed int fence_reg:5;
signed int fence_reg:I915_MAX_NUM_FENCE_BITS;
/**
* Advice: are the backing pages purgeable?
@ -999,10 +1000,10 @@ extern int i915_panel_ignore_lid __read_mostly;
extern unsigned int i915_powersave __read_mostly;
extern unsigned int i915_semaphores __read_mostly;
extern unsigned int i915_lvds_downclock __read_mostly;
extern unsigned int i915_panel_use_ssc __read_mostly;
extern int i915_panel_use_ssc __read_mostly;
extern int i915_vbt_sdvo_panel_type __read_mostly;
extern unsigned int i915_enable_rc6 __read_mostly;
extern unsigned int i915_enable_fbc __read_mostly;
extern int i915_enable_fbc __read_mostly;
extern bool i915_enable_hangcheck __read_mostly;
extern int i915_suspend(struct drm_device *dev, pm_message_t state);

Просмотреть файл

@ -1745,7 +1745,7 @@ static void i915_gem_reset_fences(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
for (i = 0; i < 16; i++) {
for (i = 0; i < dev_priv->num_fence_regs; i++) {
struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
struct drm_i915_gem_object *obj = reg->obj;
@ -3512,9 +3512,11 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
* so emit a request to do so.
*/
request = kzalloc(sizeof(*request), GFP_KERNEL);
if (request)
if (request) {
ret = i915_add_request(obj->ring, NULL, request);
else
if (ret)
kfree(request);
} else
ret = -ENOMEM;
}
@ -3613,7 +3615,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
if (IS_GEN6(dev)) {
if (IS_GEN6(dev) || IS_GEN7(dev)) {
/* On Gen6, we can have the GPU use the LLC (the CPU
* cache) for about a 10% performance improvement
* compared to uncached. Graphics requests other than
@ -3877,7 +3879,7 @@ i915_gem_load(struct drm_device *dev)
INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
for (i = 0; i < I915_NUM_RINGS; i++)
init_ring_lists(&dev_priv->ring[i]);
for (i = 0; i < 16; i++)
for (i = 0; i < I915_MAX_NUM_FENCES; i++)
INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
i915_gem_retire_work_handler);

Просмотреть файл

@ -824,6 +824,7 @@ static void i915_gem_record_fences(struct drm_device *dev,
/* Fences */
switch (INTEL_INFO(dev)->gen) {
case 7:
case 6:
for (i = 0; i < 16; i++)
error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));

Просмотреть файл

@ -1553,12 +1553,21 @@
*/
#define PP_READY (1 << 30)
#define PP_SEQUENCE_NONE (0 << 28)
#define PP_SEQUENCE_ON (1 << 28)
#define PP_SEQUENCE_OFF (2 << 28)
#define PP_SEQUENCE_MASK 0x30000000
#define PP_SEQUENCE_POWER_UP (1 << 28)
#define PP_SEQUENCE_POWER_DOWN (2 << 28)
#define PP_SEQUENCE_MASK (3 << 28)
#define PP_SEQUENCE_SHIFT 28
#define PP_CYCLE_DELAY_ACTIVE (1 << 27)
#define PP_SEQUENCE_STATE_ON_IDLE (1 << 3)
#define PP_SEQUENCE_STATE_MASK 0x0000000f
#define PP_SEQUENCE_STATE_OFF_IDLE (0x0 << 0)
#define PP_SEQUENCE_STATE_OFF_S0_1 (0x1 << 0)
#define PP_SEQUENCE_STATE_OFF_S0_2 (0x2 << 0)
#define PP_SEQUENCE_STATE_OFF_S0_3 (0x3 << 0)
#define PP_SEQUENCE_STATE_ON_IDLE (0x8 << 0)
#define PP_SEQUENCE_STATE_ON_S1_0 (0x9 << 0)
#define PP_SEQUENCE_STATE_ON_S1_2 (0xa << 0)
#define PP_SEQUENCE_STATE_ON_S1_3 (0xb << 0)
#define PP_SEQUENCE_STATE_RESET (0xf << 0)
#define PP_CONTROL 0x61204
#define POWER_TARGET_ON (1 << 0)
#define PP_ON_DELAYS 0x61208
@ -3444,6 +3453,10 @@
#define GT_FIFO_FREE_ENTRIES 0x120008
#define GT_FIFO_NUM_RESERVED_ENTRIES 20
#define GEN6_UCGCTL2 0x9404
# define GEN6_RCPBUNIT_CLOCK_GATE_DISABLE (1 << 12)
# define GEN6_RCCUNIT_CLOCK_GATE_DISABLE (1 << 11)
#define GEN6_RPNSWREQ 0xA008
#define GEN6_TURBO_DISABLE (1<<31)
#define GEN6_FREQUENCY(x) ((x)<<25)

Просмотреть файл

@ -370,6 +370,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
/* Fences */
switch (INTEL_INFO(dev)->gen) {
case 7:
case 6:
for (i = 0; i < 16; i++)
dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
@ -404,6 +405,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
/* Fences */
switch (INTEL_INFO(dev)->gen) {
case 7:
case 6:
for (i = 0; i < 16; i++)
I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]);

Просмотреть файл

@ -2933,7 +2933,8 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
/* For PCH DP, enable TRANS_DP_CTL */
if (HAS_PCH_CPT(dev) &&
intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
(intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5;
reg = TRANS_DP_CTL(pipe);
temp = I915_READ(reg);
@ -4711,7 +4712,7 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
lvds_bpc = 6;
if (lvds_bpc < display_bpc) {
DRM_DEBUG_DRIVER("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc);
DRM_DEBUG_KMS("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc);
display_bpc = lvds_bpc;
}
continue;
@ -4722,7 +4723,7 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
unsigned int edp_bpc = dev_priv->edp.bpp / 3;
if (edp_bpc < display_bpc) {
DRM_DEBUG_DRIVER("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
display_bpc = edp_bpc;
}
continue;
@ -4737,7 +4738,7 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
/* Don't use an invalid EDID bpc value */
if (connector->display_info.bpc &&
connector->display_info.bpc < display_bpc) {
DRM_DEBUG_DRIVER("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc);
DRM_DEBUG_KMS("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc);
display_bpc = connector->display_info.bpc;
}
}
@ -4748,10 +4749,10 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
*/
if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
if (display_bpc > 8 && display_bpc < 12) {
DRM_DEBUG_DRIVER("forcing bpc to 12 for HDMI\n");
DRM_DEBUG_KMS("forcing bpc to 12 for HDMI\n");
display_bpc = 12;
} else {
DRM_DEBUG_DRIVER("forcing bpc to 8 for HDMI\n");
DRM_DEBUG_KMS("forcing bpc to 8 for HDMI\n");
display_bpc = 8;
}
}
@ -4789,8 +4790,8 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
display_bpc = min(display_bpc, bpc);
DRM_DEBUG_DRIVER("setting pipe bpc to %d (max display bpc %d)\n",
bpc, display_bpc);
DRM_DEBUG_KMS("setting pipe bpc to %d (max display bpc %d)\n",
bpc, display_bpc);
*pipe_bpp = display_bpc * 3;
@ -5671,7 +5672,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
if ((is_lvds && dev_priv->lvds_dither) || dither) {
pipeconf |= PIPECONF_DITHER_EN;
pipeconf |= PIPECONF_DITHER_TYPE_ST1;
pipeconf |= PIPECONF_DITHER_TYPE_SP;
}
if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
intel_dp_set_m_n(crtc, mode, adjusted_mode);
@ -8148,6 +8149,20 @@ static void gen6_init_clock_gating(struct drm_device *dev)
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
/* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
* gating disable must be set. Failure to set it results in
* flickering pixels due to Z write ordering failures after
* some amount of runtime in the Mesa "fire" demo, and Unigine
* Sanctuary and Tropics, and apparently anything else with
* alpha test or pixel discard.
*
* According to the spec, bit 11 (RCCUNIT) must also be set,
* but we didn't debug actual testcases to find it out.
*/
I915_WRITE(GEN6_UCGCTL2,
GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
/*
* According to the spec the following bits should be
* set in order to enable memory self-refresh and fbc:

Просмотреть файл

@ -59,7 +59,6 @@ struct intel_dp {
struct i2c_algo_dp_aux_data algo;
bool is_pch_edp;
uint8_t train_set[4];
uint8_t link_status[DP_LINK_STATUS_SIZE];
int panel_power_up_delay;
int panel_power_down_delay;
int panel_power_cycle_delay;
@ -68,7 +67,6 @@ struct intel_dp {
struct drm_display_mode *panel_fixed_mode; /* for eDP */
struct delayed_work panel_vdd_work;
bool want_panel_vdd;
unsigned long panel_off_jiffies;
};
/**
@ -157,16 +155,12 @@ intel_edp_link_config(struct intel_encoder *intel_encoder,
static int
intel_dp_max_lane_count(struct intel_dp *intel_dp)
{
int max_lane_count = 4;
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f;
switch (max_lane_count) {
case 1: case 2: case 4:
break;
default:
max_lane_count = 4;
}
int max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f;
switch (max_lane_count) {
case 1: case 2: case 4:
break;
default:
max_lane_count = 4;
}
return max_lane_count;
}
@ -768,12 +762,11 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
continue;
intel_dp = enc_to_intel_dp(encoder);
if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) {
if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT ||
intel_dp->base.type == INTEL_OUTPUT_EDP)
{
lane_count = intel_dp->lane_count;
break;
} else if (is_edp(intel_dp)) {
lane_count = dev_priv->edp.lanes;
break;
}
}
@ -810,6 +803,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct drm_crtc *crtc = intel_dp->base.base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@ -822,18 +816,31 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
ironlake_edp_pll_off(encoder);
}
intel_dp->DP = DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
intel_dp->DP |= intel_dp->color_range;
/*
* There are three kinds of DP registers:
*
* IBX PCH
* CPU
* CPT PCH
*
* IBX PCH and CPU are the same for almost everything,
* except that the CPU DP PLL is configured in this
* register
*
* CPT PCH is quite different, having many bits moved
* to the TRANS_DP_CTL register instead. That
* configuration happens (oddly) in ironlake_pch_enable
*/
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
intel_dp->DP |= DP_SYNC_HS_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
intel_dp->DP |= DP_SYNC_VS_HIGH;
/* Preserve the BIOS-computed detected bit. This is
* supposed to be read-only.
*/
intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
else
intel_dp->DP |= DP_LINK_TRAIN_OFF;
/* Handle DP bits in common between all three register formats */
intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
switch (intel_dp->lane_count) {
case 1:
@ -852,59 +859,106 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
intel_write_eld(encoder, adjusted_mode);
}
memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
intel_dp->link_configuration[0] = intel_dp->link_bw;
intel_dp->link_configuration[1] = intel_dp->lane_count;
intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B;
/*
* Check for DPCD version > 1.1 and enhanced framing support
*/
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
(intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
intel_dp->DP |= DP_ENHANCED_FRAMING;
}
/* CPT DP's pipe select is decided in TRANS_DP_CTL */
if (intel_crtc->pipe == 1 && !HAS_PCH_CPT(dev))
intel_dp->DP |= DP_PIPEB_SELECT;
/* Split out the IBX/CPU vs CPT settings */
if (is_cpu_edp(intel_dp)) {
/* don't miss out required setting for eDP */
intel_dp->DP |= DP_PLL_ENABLE;
if (adjusted_mode->clock < 200000)
intel_dp->DP |= DP_PLL_FREQ_160MHZ;
else
intel_dp->DP |= DP_PLL_FREQ_270MHZ;
if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
intel_dp->DP |= intel_dp->color_range;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
intel_dp->DP |= DP_SYNC_HS_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
intel_dp->DP |= DP_SYNC_VS_HIGH;
intel_dp->DP |= DP_LINK_TRAIN_OFF;
if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
intel_dp->DP |= DP_ENHANCED_FRAMING;
if (intel_crtc->pipe == 1)
intel_dp->DP |= DP_PIPEB_SELECT;
if (is_cpu_edp(intel_dp)) {
/* don't miss out required setting for eDP */
intel_dp->DP |= DP_PLL_ENABLE;
if (adjusted_mode->clock < 200000)
intel_dp->DP |= DP_PLL_FREQ_160MHZ;
else
intel_dp->DP |= DP_PLL_FREQ_270MHZ;
}
} else {
intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
}
}
#define IDLE_ON_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
#define IDLE_ON_VALUE (PP_ON | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
#define IDLE_OFF_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
#define IDLE_OFF_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
#define IDLE_CYCLE_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
#define IDLE_CYCLE_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
static void ironlake_wait_panel_status(struct intel_dp *intel_dp,
u32 mask,
u32 value)
{
struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
mask, value,
I915_READ(PCH_PP_STATUS),
I915_READ(PCH_PP_CONTROL));
if (_wait_for((I915_READ(PCH_PP_STATUS) & mask) == value, 5000, 10)) {
DRM_ERROR("Panel status timeout: status %08x control %08x\n",
I915_READ(PCH_PP_STATUS),
I915_READ(PCH_PP_CONTROL));
}
}
static void ironlake_wait_panel_on(struct intel_dp *intel_dp)
{
DRM_DEBUG_KMS("Wait for panel power on\n");
ironlake_wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
}
static void ironlake_wait_panel_off(struct intel_dp *intel_dp)
{
unsigned long off_time;
unsigned long delay;
DRM_DEBUG_KMS("Wait for panel power off time\n");
ironlake_wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
}
if (ironlake_edp_have_panel_power(intel_dp) ||
ironlake_edp_have_panel_vdd(intel_dp))
{
DRM_DEBUG_KMS("Panel still on, no delay needed\n");
return;
}
static void ironlake_wait_panel_power_cycle(struct intel_dp *intel_dp)
{
DRM_DEBUG_KMS("Wait for panel power cycle\n");
ironlake_wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
}
off_time = intel_dp->panel_off_jiffies + msecs_to_jiffies(intel_dp->panel_power_down_delay);
if (time_after(jiffies, off_time)) {
DRM_DEBUG_KMS("Time already passed");
return;
}
delay = jiffies_to_msecs(off_time - jiffies);
if (delay > intel_dp->panel_power_down_delay)
delay = intel_dp->panel_power_down_delay;
DRM_DEBUG_KMS("Waiting an additional %ld ms\n", delay);
msleep(delay);
/* Read the current pp_control value, unlocking the register if it
* is locked
*/
static u32 ironlake_get_pp_control(struct drm_i915_private *dev_priv)
{
u32 control = I915_READ(PCH_PP_CONTROL);
control &= ~PANEL_UNLOCK_MASK;
control |= PANEL_UNLOCK_REGS;
return control;
}
static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
@ -921,15 +975,16 @@ static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
"eDP VDD already requested on\n");
intel_dp->want_panel_vdd = true;
if (ironlake_edp_have_panel_vdd(intel_dp)) {
DRM_DEBUG_KMS("eDP VDD already on\n");
return;
}
ironlake_wait_panel_off(intel_dp);
pp = I915_READ(PCH_PP_CONTROL);
pp &= ~PANEL_UNLOCK_MASK;
pp |= PANEL_UNLOCK_REGS;
if (!ironlake_edp_have_panel_power(intel_dp))
ironlake_wait_panel_power_cycle(intel_dp);
pp = ironlake_get_pp_control(dev_priv);
pp |= EDP_FORCE_VDD;
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
@ -952,9 +1007,7 @@ static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
u32 pp;
if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) {
pp = I915_READ(PCH_PP_CONTROL);
pp &= ~PANEL_UNLOCK_MASK;
pp |= PANEL_UNLOCK_REGS;
pp = ironlake_get_pp_control(dev_priv);
pp &= ~EDP_FORCE_VDD;
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
@ -962,7 +1015,8 @@ static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
/* Make sure sequencer is idle before allowing subsequent activity */
DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n",
I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL));
intel_dp->panel_off_jiffies = jiffies;
msleep(intel_dp->panel_power_down_delay);
}
}
@ -972,9 +1026,9 @@ static void ironlake_panel_vdd_work(struct work_struct *__work)
struct intel_dp, panel_vdd_work);
struct drm_device *dev = intel_dp->base.base.dev;
mutex_lock(&dev->struct_mutex);
mutex_lock(&dev->mode_config.mutex);
ironlake_panel_vdd_off_sync(intel_dp);
mutex_unlock(&dev->struct_mutex);
mutex_unlock(&dev->mode_config.mutex);
}
static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
@ -984,7 +1038,7 @@ static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd);
WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on");
intel_dp->want_panel_vdd = false;
if (sync) {
@ -1000,23 +1054,25 @@ static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
}
}
/* Returns true if the panel was already on when called */
static void ironlake_edp_panel_on(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp, idle_on_mask = PP_ON | PP_SEQUENCE_STATE_ON_IDLE;
u32 pp;
if (!is_edp(intel_dp))
return;
if (ironlake_edp_have_panel_power(intel_dp))
DRM_DEBUG_KMS("Turn eDP power on\n");
if (ironlake_edp_have_panel_power(intel_dp)) {
DRM_DEBUG_KMS("eDP power already on\n");
return;
}
ironlake_wait_panel_off(intel_dp);
pp = I915_READ(PCH_PP_CONTROL);
pp &= ~PANEL_UNLOCK_MASK;
pp |= PANEL_UNLOCK_REGS;
ironlake_wait_panel_power_cycle(intel_dp);
pp = ironlake_get_pp_control(dev_priv);
if (IS_GEN5(dev)) {
/* ILK workaround: disable reset around power sequence */
pp &= ~PANEL_POWER_RESET;
@ -1025,13 +1081,13 @@ static void ironlake_edp_panel_on(struct intel_dp *intel_dp)
}
pp |= POWER_TARGET_ON;
if (!IS_GEN5(dev))
pp |= PANEL_POWER_RESET;
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
if (wait_for((I915_READ(PCH_PP_STATUS) & idle_on_mask) == idle_on_mask,
5000))
DRM_ERROR("panel on wait timed out: 0x%08x\n",
I915_READ(PCH_PP_STATUS));
ironlake_wait_panel_on(intel_dp);
if (IS_GEN5(dev)) {
pp |= PANEL_POWER_RESET; /* restore panel reset bit */
@ -1040,46 +1096,25 @@ static void ironlake_edp_panel_on(struct intel_dp *intel_dp)
}
}
static void ironlake_edp_panel_off(struct drm_encoder *encoder)
static void ironlake_edp_panel_off(struct intel_dp *intel_dp)
{
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct drm_device *dev = encoder->dev;
struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp, idle_off_mask = PP_ON | PP_SEQUENCE_MASK |
PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK;
u32 pp;
if (!is_edp(intel_dp))
return;
pp = I915_READ(PCH_PP_CONTROL);
pp &= ~PANEL_UNLOCK_MASK;
pp |= PANEL_UNLOCK_REGS;
if (IS_GEN5(dev)) {
/* ILK workaround: disable reset around power sequence */
pp &= ~PANEL_POWER_RESET;
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
}
DRM_DEBUG_KMS("Turn eDP power off\n");
intel_dp->panel_off_jiffies = jiffies;
WARN(intel_dp->want_panel_vdd, "Cannot turn power off while VDD is on\n");
if (IS_GEN5(dev)) {
pp &= ~POWER_TARGET_ON;
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
pp &= ~POWER_TARGET_ON;
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
msleep(intel_dp->panel_power_cycle_delay);
pp = ironlake_get_pp_control(dev_priv);
pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
if (wait_for((I915_READ(PCH_PP_STATUS) & idle_off_mask) == 0, 5000))
DRM_ERROR("panel off wait timed out: 0x%08x\n",
I915_READ(PCH_PP_STATUS));
pp |= PANEL_POWER_RESET; /* restore panel reset bit */
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
}
ironlake_wait_panel_off(intel_dp);
}
static void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
@ -1099,9 +1134,7 @@ static void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
* allowing it to appear.
*/
msleep(intel_dp->backlight_on_delay);
pp = I915_READ(PCH_PP_CONTROL);
pp &= ~PANEL_UNLOCK_MASK;
pp |= PANEL_UNLOCK_REGS;
pp = ironlake_get_pp_control(dev_priv);
pp |= EDP_BLC_ENABLE;
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
@ -1117,9 +1150,7 @@ static void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
return;
DRM_DEBUG_KMS("\n");
pp = I915_READ(PCH_PP_CONTROL);
pp &= ~PANEL_UNLOCK_MASK;
pp |= PANEL_UNLOCK_REGS;
pp = ironlake_get_pp_control(dev_priv);
pp &= ~EDP_BLC_ENABLE;
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
@ -1187,17 +1218,18 @@ static void intel_dp_prepare(struct drm_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
ironlake_edp_backlight_off(intel_dp);
ironlake_edp_panel_off(intel_dp);
/* Wake up the sink first */
ironlake_edp_panel_vdd_on(intel_dp);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
intel_dp_link_down(intel_dp);
ironlake_edp_panel_vdd_off(intel_dp, false);
/* Make sure the panel is off before trying to
* change the mode
*/
ironlake_edp_backlight_off(intel_dp);
intel_dp_link_down(intel_dp);
ironlake_edp_panel_off(encoder);
}
static void intel_dp_commit(struct drm_encoder *encoder)
@ -1211,7 +1243,6 @@ static void intel_dp_commit(struct drm_encoder *encoder)
intel_dp_start_link_train(intel_dp);
ironlake_edp_panel_on(intel_dp);
ironlake_edp_panel_vdd_off(intel_dp, true);
intel_dp_complete_link_train(intel_dp);
ironlake_edp_backlight_on(intel_dp);
@ -1230,16 +1261,20 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
uint32_t dp_reg = I915_READ(intel_dp->output_reg);
if (mode != DRM_MODE_DPMS_ON) {
ironlake_edp_backlight_off(intel_dp);
ironlake_edp_panel_off(intel_dp);
ironlake_edp_panel_vdd_on(intel_dp);
if (is_edp(intel_dp))
ironlake_edp_backlight_off(intel_dp);
intel_dp_sink_dpms(intel_dp, mode);
intel_dp_link_down(intel_dp);
ironlake_edp_panel_off(encoder);
if (is_edp(intel_dp) && !is_pch_edp(intel_dp))
ironlake_edp_pll_off(encoder);
ironlake_edp_panel_vdd_off(intel_dp, false);
if (is_cpu_edp(intel_dp))
ironlake_edp_pll_off(encoder);
} else {
if (is_cpu_edp(intel_dp))
ironlake_edp_pll_on(encoder);
ironlake_edp_panel_vdd_on(intel_dp);
intel_dp_sink_dpms(intel_dp, mode);
if (!(dp_reg & DP_PORT_EN)) {
@ -1247,7 +1282,6 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
ironlake_edp_panel_on(intel_dp);
ironlake_edp_panel_vdd_off(intel_dp, true);
intel_dp_complete_link_train(intel_dp);
ironlake_edp_backlight_on(intel_dp);
} else
ironlake_edp_panel_vdd_off(intel_dp, false);
ironlake_edp_backlight_on(intel_dp);
@ -1285,11 +1319,11 @@ intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address,
* link status information
*/
static bool
intel_dp_get_link_status(struct intel_dp *intel_dp)
intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
{
return intel_dp_aux_native_read_retry(intel_dp,
DP_LANE0_1_STATUS,
intel_dp->link_status,
link_status,
DP_LINK_STATUS_SIZE);
}
@ -1301,27 +1335,25 @@ intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
}
static uint8_t
intel_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE],
intel_get_adjust_request_voltage(uint8_t adjust_request[2],
int lane)
{
int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
int s = ((lane & 1) ?
DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
uint8_t l = intel_dp_link_status(link_status, i);
uint8_t l = adjust_request[lane>>1];
return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
}
static uint8_t
intel_get_adjust_request_pre_emphasis(uint8_t link_status[DP_LINK_STATUS_SIZE],
intel_get_adjust_request_pre_emphasis(uint8_t adjust_request[2],
int lane)
{
int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
int s = ((lane & 1) ?
DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
uint8_t l = intel_dp_link_status(link_status, i);
uint8_t l = adjust_request[lane>>1];
return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
}
@ -1344,6 +1376,7 @@ static char *link_train_names[] = {
* a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
*/
#define I830_DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_800
#define I830_DP_VOLTAGE_MAX_CPT DP_TRAIN_VOLTAGE_SWING_1200
static uint8_t
intel_dp_pre_emphasis_max(uint8_t voltage_swing)
@ -1362,15 +1395,18 @@ intel_dp_pre_emphasis_max(uint8_t voltage_swing)
}
static void
intel_get_adjust_train(struct intel_dp *intel_dp)
intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
{
struct drm_device *dev = intel_dp->base.base.dev;
uint8_t v = 0;
uint8_t p = 0;
int lane;
uint8_t *adjust_request = link_status + (DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS);
int voltage_max;
for (lane = 0; lane < intel_dp->lane_count; lane++) {
uint8_t this_v = intel_get_adjust_request_voltage(intel_dp->link_status, lane);
uint8_t this_p = intel_get_adjust_request_pre_emphasis(intel_dp->link_status, lane);
uint8_t this_v = intel_get_adjust_request_voltage(adjust_request, lane);
uint8_t this_p = intel_get_adjust_request_pre_emphasis(adjust_request, lane);
if (this_v > v)
v = this_v;
@ -1378,8 +1414,12 @@ intel_get_adjust_train(struct intel_dp *intel_dp)
p = this_p;
}
if (v >= I830_DP_VOLTAGE_MAX)
v = I830_DP_VOLTAGE_MAX | DP_TRAIN_MAX_SWING_REACHED;
if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
voltage_max = I830_DP_VOLTAGE_MAX_CPT;
else
voltage_max = I830_DP_VOLTAGE_MAX;
if (v >= voltage_max)
v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
if (p >= intel_dp_pre_emphasis_max(v))
p = intel_dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
@ -1389,7 +1429,7 @@ intel_get_adjust_train(struct intel_dp *intel_dp)
}
static uint32_t
intel_dp_signal_levels(uint8_t train_set, int lane_count)
intel_dp_signal_levels(uint8_t train_set)
{
uint32_t signal_levels = 0;
@ -1458,9 +1498,8 @@ static uint8_t
intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
int lane)
{
int i = DP_LANE0_1_STATUS + (lane >> 1);
int s = (lane & 1) * 4;
uint8_t l = intel_dp_link_status(link_status, i);
uint8_t l = link_status[lane>>1];
return (l >> s) & 0xf;
}
@ -1485,18 +1524,18 @@ intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count
DP_LANE_CHANNEL_EQ_DONE|\
DP_LANE_SYMBOL_LOCKED)
static bool
intel_channel_eq_ok(struct intel_dp *intel_dp)
intel_channel_eq_ok(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
{
uint8_t lane_align;
uint8_t lane_status;
int lane;
lane_align = intel_dp_link_status(intel_dp->link_status,
lane_align = intel_dp_link_status(link_status,
DP_LANE_ALIGN_STATUS_UPDATED);
if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
return false;
for (lane = 0; lane < intel_dp->lane_count; lane++) {
lane_status = intel_get_lane_status(intel_dp->link_status, lane);
lane_status = intel_get_lane_status(link_status, lane);
if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS)
return false;
}
@ -1521,8 +1560,9 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
ret = intel_dp_aux_native_write(intel_dp,
DP_TRAINING_LANE0_SET,
intel_dp->train_set, 4);
if (ret != 4)
intel_dp->train_set,
intel_dp->lane_count);
if (ret != intel_dp->lane_count)
return false;
return true;
@ -1538,7 +1578,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
int i;
uint8_t voltage;
bool clock_recovery = false;
int tries;
int voltage_tries, loop_tries;
u32 reg;
uint32_t DP = intel_dp->DP;
@ -1565,16 +1605,20 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
DP &= ~DP_LINK_TRAIN_MASK;
memset(intel_dp->train_set, 0, 4);
voltage = 0xff;
tries = 0;
voltage_tries = 0;
loop_tries = 0;
clock_recovery = false;
for (;;) {
/* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
uint8_t link_status[DP_LINK_STATUS_SIZE];
uint32_t signal_levels;
if (IS_GEN6(dev) && is_edp(intel_dp)) {
if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
} else {
signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count);
signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]);
DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n", signal_levels);
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
@ -1590,10 +1634,13 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
/* Set training pattern 1 */
udelay(100);
if (!intel_dp_get_link_status(intel_dp))
if (!intel_dp_get_link_status(intel_dp, link_status)) {
DRM_ERROR("failed to get link status\n");
break;
}
if (intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) {
if (intel_clock_recovery_ok(link_status, intel_dp->lane_count)) {
DRM_DEBUG_KMS("clock recovery OK\n");
clock_recovery = true;
break;
}
@ -1602,20 +1649,30 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
for (i = 0; i < intel_dp->lane_count; i++)
if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
break;
if (i == intel_dp->lane_count)
break;
if (i == intel_dp->lane_count) {
++loop_tries;
if (loop_tries == 5) {
DRM_DEBUG_KMS("too many full retries, give up\n");
break;
}
memset(intel_dp->train_set, 0, 4);
voltage_tries = 0;
continue;
}
/* Check to see if we've tried the same voltage 5 times */
if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
++tries;
if (tries == 5)
++voltage_tries;
if (voltage_tries == 5) {
DRM_DEBUG_KMS("too many voltage retries, give up\n");
break;
}
} else
tries = 0;
voltage_tries = 0;
voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
/* Compute new intel_dp->train_set as requested by target */
intel_get_adjust_train(intel_dp);
intel_get_adjust_train(intel_dp, link_status);
}
intel_dp->DP = DP;
@ -1638,6 +1695,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
for (;;) {
/* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
uint32_t signal_levels;
uint8_t link_status[DP_LINK_STATUS_SIZE];
if (cr_tries > 5) {
DRM_ERROR("failed to train DP, aborting\n");
@ -1645,11 +1703,11 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
break;
}
if (IS_GEN6(dev) && is_edp(intel_dp)) {
if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
} else {
signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count);
signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
@ -1665,17 +1723,17 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
break;
udelay(400);
if (!intel_dp_get_link_status(intel_dp))
if (!intel_dp_get_link_status(intel_dp, link_status))
break;
/* Make sure clock is still ok */
if (!intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) {
if (!intel_clock_recovery_ok(link_status, intel_dp->lane_count)) {
intel_dp_start_link_train(intel_dp);
cr_tries++;
continue;
}
if (intel_channel_eq_ok(intel_dp)) {
if (intel_channel_eq_ok(intel_dp, link_status)) {
channel_eq = true;
break;
}
@ -1690,7 +1748,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
}
/* Compute new intel_dp->train_set as requested by target */
intel_get_adjust_train(intel_dp);
intel_get_adjust_train(intel_dp, link_status);
++tries;
}
@ -1735,8 +1793,12 @@ intel_dp_link_down(struct intel_dp *intel_dp)
msleep(17);
if (is_edp(intel_dp))
DP |= DP_LINK_TRAIN_OFF;
if (is_edp(intel_dp)) {
if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
DP |= DP_LINK_TRAIN_OFF_CPT;
else
DP |= DP_LINK_TRAIN_OFF;
}
if (!HAS_PCH_CPT(dev) &&
I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
@ -1822,6 +1884,7 @@ static void
intel_dp_check_link_status(struct intel_dp *intel_dp)
{
u8 sink_irq_vector;
u8 link_status[DP_LINK_STATUS_SIZE];
if (intel_dp->dpms_mode != DRM_MODE_DPMS_ON)
return;
@ -1830,7 +1893,7 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
return;
/* Try to read receiver status if the link appears to be up */
if (!intel_dp_get_link_status(intel_dp)) {
if (!intel_dp_get_link_status(intel_dp, link_status)) {
intel_dp_link_down(intel_dp);
return;
}
@ -1855,7 +1918,7 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
}
if (!intel_channel_eq_ok(intel_dp)) {
if (!intel_channel_eq_ok(intel_dp, link_status)) {
DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
drm_get_encoder_name(&intel_dp->base.base));
intel_dp_start_link_train(intel_dp);
@ -2179,7 +2242,8 @@ intel_trans_dp_port_sel(struct drm_crtc *crtc)
continue;
intel_dp = enc_to_intel_dp(encoder);
if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT)
if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT ||
intel_dp->base.type == INTEL_OUTPUT_EDP)
return intel_dp->output_reg;
}
@ -2321,7 +2385,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
PANEL_LIGHT_ON_DELAY_SHIFT;
cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
PANEL_LIGHT_OFF_DELAY_SHIFT;
@ -2354,11 +2418,10 @@ intel_dp_init(struct drm_device *dev, int output_reg)
DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
intel_dp->panel_off_jiffies = jiffies - intel_dp->panel_power_down_delay;
ironlake_edp_panel_vdd_on(intel_dp);
ret = intel_dp_get_dpcd(intel_dp);
ironlake_edp_panel_vdd_off(intel_dp, false);
if (ret) {
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
dev_priv->no_aux_handshake =

Просмотреть файл

@ -326,7 +326,8 @@ static int intel_panel_update_status(struct backlight_device *bd)
static int intel_panel_get_brightness(struct backlight_device *bd)
{
struct drm_device *dev = bl_get_data(bd);
return intel_panel_get_backlight(dev);
struct drm_i915_private *dev_priv = dev->dev_private;
return dev_priv->backlight_level;
}
static const struct backlight_ops intel_panel_bl_ops = {

Просмотреть файл

@ -480,21 +480,23 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
}
break;
case DB_Z_INFO:
r = evergreen_cs_packet_next_reloc(p, &reloc);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
return -EINVAL;
}
track->db_z_info = radeon_get_ib_value(p, idx);
ib[idx] &= ~Z_ARRAY_MODE(0xf);
track->db_z_info &= ~Z_ARRAY_MODE(0xf);
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
ib[idx] |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
track->db_z_info |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
} else {
ib[idx] |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
track->db_z_info |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
if (!p->keep_tiling_flags) {
r = evergreen_cs_packet_next_reloc(p, &reloc);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
return -EINVAL;
}
ib[idx] &= ~Z_ARRAY_MODE(0xf);
track->db_z_info &= ~Z_ARRAY_MODE(0xf);
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
ib[idx] |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
track->db_z_info |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
} else {
ib[idx] |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
track->db_z_info |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
}
}
break;
case DB_STENCIL_INFO:
@ -607,40 +609,44 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR5_INFO:
case CB_COLOR6_INFO:
case CB_COLOR7_INFO:
r = evergreen_cs_packet_next_reloc(p, &reloc);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
return -EINVAL;
}
tmp = (reg - CB_COLOR0_INFO) / 0x3c;
track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
} else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
if (!p->keep_tiling_flags) {
r = evergreen_cs_packet_next_reloc(p, &reloc);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
return -EINVAL;
}
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
} else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
}
}
break;
case CB_COLOR8_INFO:
case CB_COLOR9_INFO:
case CB_COLOR10_INFO:
case CB_COLOR11_INFO:
r = evergreen_cs_packet_next_reloc(p, &reloc);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
return -EINVAL;
}
tmp = ((reg - CB_COLOR8_INFO) / 0x1c) + 8;
track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
} else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
if (!p->keep_tiling_flags) {
r = evergreen_cs_packet_next_reloc(p, &reloc);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
return -EINVAL;
}
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
} else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
}
}
break;
case CB_COLOR0_PITCH:
@ -1311,10 +1317,12 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
ib[idx+1+(i*8)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
if (!p->keep_tiling_flags) {
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
}
texture = reloc->robj;
/* tex mip base */
r = evergreen_cs_packet_next_reloc(p, &reloc);

Просмотреть файл

@ -701,16 +701,21 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
return r;
}
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
tile_flags |= R300_TXO_MACRO_TILE;
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
tile_flags |= R300_TXO_MICRO_TILE;
else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
tile_flags |= R300_TXO_MICRO_TILE_SQUARE;
if (p->keep_tiling_flags) {
ib[idx] = (idx_value & 31) | /* keep the 1st 5 bits */
((idx_value & ~31) + (u32)reloc->lobj.gpu_offset);
} else {
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
tile_flags |= R300_TXO_MACRO_TILE;
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
tile_flags |= R300_TXO_MICRO_TILE;
else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
tile_flags |= R300_TXO_MICRO_TILE_SQUARE;
tmp = idx_value + ((u32)reloc->lobj.gpu_offset);
tmp |= tile_flags;
ib[idx] = tmp;
tmp = idx_value + ((u32)reloc->lobj.gpu_offset);
tmp |= tile_flags;
ib[idx] = tmp;
}
track->textures[i].robj = reloc->robj;
track->tex_dirty = true;
break;
@ -760,24 +765,26 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
/* RB3D_COLORPITCH1 */
/* RB3D_COLORPITCH2 */
/* RB3D_COLORPITCH3 */
r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
r100_cs_dump_packet(p, pkt);
return r;
if (!p->keep_tiling_flags) {
r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
r100_cs_dump_packet(p, pkt);
return r;
}
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
tile_flags |= R300_COLOR_TILE_ENABLE;
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
tile_flags |= R300_COLOR_MICROTILE_ENABLE;
else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE;
tmp = idx_value & ~(0x7 << 16);
tmp |= tile_flags;
ib[idx] = tmp;
}
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
tile_flags |= R300_COLOR_TILE_ENABLE;
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
tile_flags |= R300_COLOR_MICROTILE_ENABLE;
else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE;
tmp = idx_value & ~(0x7 << 16);
tmp |= tile_flags;
ib[idx] = tmp;
i = (reg - 0x4E38) >> 2;
track->cb[i].pitch = idx_value & 0x3FFE;
switch (((idx_value >> 21) & 0xF)) {
@ -843,25 +850,26 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
break;
case 0x4F24:
/* ZB_DEPTHPITCH */
r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
r100_cs_dump_packet(p, pkt);
return r;
if (!p->keep_tiling_flags) {
r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
r100_cs_dump_packet(p, pkt);
return r;
}
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
tile_flags |= R300_DEPTHMACROTILE_ENABLE;
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
tile_flags |= R300_DEPTHMICROTILE_TILED;
else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE;
tmp = idx_value & ~(0x7 << 16);
tmp |= tile_flags;
ib[idx] = tmp;
}
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
tile_flags |= R300_DEPTHMACROTILE_ENABLE;
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
tile_flags |= R300_DEPTHMICROTILE_TILED;
else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE;
tmp = idx_value & ~(0x7 << 16);
tmp |= tile_flags;
ib[idx] = tmp;
track->zb.pitch = idx_value & 0x3FFC;
track->zb_dirty = true;
break;

Просмотреть файл

@ -941,7 +941,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->db_depth_control = radeon_get_ib_value(p, idx);
break;
case R_028010_DB_DEPTH_INFO:
if (r600_cs_packet_next_is_pkt3_nop(p)) {
if (!p->keep_tiling_flags &&
r600_cs_packet_next_is_pkt3_nop(p)) {
r = r600_cs_packet_next_reloc(p, &reloc);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
@ -992,7 +993,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case R_0280B4_CB_COLOR5_INFO:
case R_0280B8_CB_COLOR6_INFO:
case R_0280BC_CB_COLOR7_INFO:
if (r600_cs_packet_next_is_pkt3_nop(p)) {
if (!p->keep_tiling_flags &&
r600_cs_packet_next_is_pkt3_nop(p)) {
r = r600_cs_packet_next_reloc(p, &reloc);
if (r) {
dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
@ -1291,10 +1293,12 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
mip_offset <<= 8;
word0 = radeon_get_ib_value(p, idx + 0);
if (tiling_flags & RADEON_TILING_MACRO)
word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
else if (tiling_flags & RADEON_TILING_MICRO)
word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
if (!p->keep_tiling_flags) {
if (tiling_flags & RADEON_TILING_MACRO)
word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
else if (tiling_flags & RADEON_TILING_MICRO)
word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
}
word1 = radeon_get_ib_value(p, idx + 1);
w0 = G_038000_TEX_WIDTH(word0) + 1;
h0 = G_038004_TEX_HEIGHT(word1) + 1;
@ -1621,10 +1625,12 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
base_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
if (!p->keep_tiling_flags) {
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
}
texture = reloc->robj;
/* tex mip base */
r = r600_cs_packet_next_reloc(p, &reloc);

Просмотреть файл

@ -611,7 +611,8 @@ struct radeon_cs_parser {
struct radeon_ib *ib;
void *track;
unsigned family;
int parser_error;
int parser_error;
bool keep_tiling_flags;
};
extern int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx);

Просмотреть файл

@ -62,6 +62,87 @@ union atom_supported_devices {
struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 info_2d1;
};
static void radeon_lookup_i2c_gpio_quirks(struct radeon_device *rdev,
ATOM_GPIO_I2C_ASSIGMENT *gpio,
u8 index)
{
/* r4xx mask is technically not used by the hw, so patch in the legacy mask bits */
if ((rdev->family == CHIP_R420) ||
(rdev->family == CHIP_R423) ||
(rdev->family == CHIP_RV410)) {
if ((le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0018) ||
(le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0019) ||
(le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x001a)) {
gpio->ucClkMaskShift = 0x19;
gpio->ucDataMaskShift = 0x18;
}
}
/* some evergreen boards have bad data for this entry */
if (ASIC_IS_DCE4(rdev)) {
if ((index == 7) &&
(le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) &&
(gpio->sucI2cId.ucAccess == 0)) {
gpio->sucI2cId.ucAccess = 0x97;
gpio->ucDataMaskShift = 8;
gpio->ucDataEnShift = 8;
gpio->ucDataY_Shift = 8;
gpio->ucDataA_Shift = 8;
}
}
/* some DCE3 boards have bad data for this entry */
if (ASIC_IS_DCE3(rdev)) {
if ((index == 4) &&
(le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) &&
(gpio->sucI2cId.ucAccess == 0x94))
gpio->sucI2cId.ucAccess = 0x14;
}
}
static struct radeon_i2c_bus_rec radeon_get_bus_rec_for_i2c_gpio(ATOM_GPIO_I2C_ASSIGMENT *gpio)
{
struct radeon_i2c_bus_rec i2c;
memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec));
i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4;
i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4;
i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4;
i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4;
i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4;
i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift);
i2c.mask_data_mask = (1 << gpio->ucDataMaskShift);
i2c.en_clk_mask = (1 << gpio->ucClkEnShift);
i2c.en_data_mask = (1 << gpio->ucDataEnShift);
i2c.y_clk_mask = (1 << gpio->ucClkY_Shift);
i2c.y_data_mask = (1 << gpio->ucDataY_Shift);
i2c.a_clk_mask = (1 << gpio->ucClkA_Shift);
i2c.a_data_mask = (1 << gpio->ucDataA_Shift);
if (gpio->sucI2cId.sbfAccess.bfHW_Capable)
i2c.hw_capable = true;
else
i2c.hw_capable = false;
if (gpio->sucI2cId.ucAccess == 0xa0)
i2c.mm_i2c = true;
else
i2c.mm_i2c = false;
i2c.i2c_id = gpio->sucI2cId.ucAccess;
if (i2c.mask_clk_reg)
i2c.valid = true;
else
i2c.valid = false;
return i2c;
}
static struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rdev,
uint8_t id)
{
@ -85,71 +166,10 @@ static struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rd
for (i = 0; i < num_indices; i++) {
gpio = &i2c_info->asGPIO_Info[i];
/* r4xx mask is technically not used by the hw, so patch in the legacy mask bits */
if ((rdev->family == CHIP_R420) ||
(rdev->family == CHIP_R423) ||
(rdev->family == CHIP_RV410)) {
if ((le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0018) ||
(le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0019) ||
(le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x001a)) {
gpio->ucClkMaskShift = 0x19;
gpio->ucDataMaskShift = 0x18;
}
}
/* some evergreen boards have bad data for this entry */
if (ASIC_IS_DCE4(rdev)) {
if ((i == 7) &&
(le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) &&
(gpio->sucI2cId.ucAccess == 0)) {
gpio->sucI2cId.ucAccess = 0x97;
gpio->ucDataMaskShift = 8;
gpio->ucDataEnShift = 8;
gpio->ucDataY_Shift = 8;
gpio->ucDataA_Shift = 8;
}
}
/* some DCE3 boards have bad data for this entry */
if (ASIC_IS_DCE3(rdev)) {
if ((i == 4) &&
(le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) &&
(gpio->sucI2cId.ucAccess == 0x94))
gpio->sucI2cId.ucAccess = 0x14;
}
radeon_lookup_i2c_gpio_quirks(rdev, gpio, i);
if (gpio->sucI2cId.ucAccess == id) {
i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4;
i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4;
i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4;
i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4;
i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4;
i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift);
i2c.mask_data_mask = (1 << gpio->ucDataMaskShift);
i2c.en_clk_mask = (1 << gpio->ucClkEnShift);
i2c.en_data_mask = (1 << gpio->ucDataEnShift);
i2c.y_clk_mask = (1 << gpio->ucClkY_Shift);
i2c.y_data_mask = (1 << gpio->ucDataY_Shift);
i2c.a_clk_mask = (1 << gpio->ucClkA_Shift);
i2c.a_data_mask = (1 << gpio->ucDataA_Shift);
if (gpio->sucI2cId.sbfAccess.bfHW_Capable)
i2c.hw_capable = true;
else
i2c.hw_capable = false;
if (gpio->sucI2cId.ucAccess == 0xa0)
i2c.mm_i2c = true;
else
i2c.mm_i2c = false;
i2c.i2c_id = gpio->sucI2cId.ucAccess;
if (i2c.mask_clk_reg)
i2c.valid = true;
i2c = radeon_get_bus_rec_for_i2c_gpio(gpio);
break;
}
}
@ -169,8 +189,6 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
int i, num_indices;
char stmp[32];
memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec));
if (atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) {
i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset);
@ -179,60 +197,12 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
for (i = 0; i < num_indices; i++) {
gpio = &i2c_info->asGPIO_Info[i];
i2c.valid = false;
/* some evergreen boards have bad data for this entry */
if (ASIC_IS_DCE4(rdev)) {
if ((i == 7) &&
(le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) &&
(gpio->sucI2cId.ucAccess == 0)) {
gpio->sucI2cId.ucAccess = 0x97;
gpio->ucDataMaskShift = 8;
gpio->ucDataEnShift = 8;
gpio->ucDataY_Shift = 8;
gpio->ucDataA_Shift = 8;
}
}
radeon_lookup_i2c_gpio_quirks(rdev, gpio, i);
/* some DCE3 boards have bad data for this entry */
if (ASIC_IS_DCE3(rdev)) {
if ((i == 4) &&
(le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) &&
(gpio->sucI2cId.ucAccess == 0x94))
gpio->sucI2cId.ucAccess = 0x14;
}
i2c = radeon_get_bus_rec_for_i2c_gpio(gpio);
i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4;
i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4;
i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4;
i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4;
i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4;
i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift);
i2c.mask_data_mask = (1 << gpio->ucDataMaskShift);
i2c.en_clk_mask = (1 << gpio->ucClkEnShift);
i2c.en_data_mask = (1 << gpio->ucDataEnShift);
i2c.y_clk_mask = (1 << gpio->ucClkY_Shift);
i2c.y_data_mask = (1 << gpio->ucDataY_Shift);
i2c.a_clk_mask = (1 << gpio->ucClkA_Shift);
i2c.a_data_mask = (1 << gpio->ucDataA_Shift);
if (gpio->sucI2cId.sbfAccess.bfHW_Capable)
i2c.hw_capable = true;
else
i2c.hw_capable = false;
if (gpio->sucI2cId.ucAccess == 0xa0)
i2c.mm_i2c = true;
else
i2c.mm_i2c = false;
i2c.i2c_id = gpio->sucI2cId.ucAccess;
if (i2c.mask_clk_reg) {
i2c.valid = true;
if (i2c.valid) {
sprintf(stmp, "0x%x", i2c.i2c_id);
rdev->i2c_bus[i] = radeon_i2c_create(rdev->ddev, &i2c, stmp);
}

Просмотреть файл

@ -93,7 +93,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
{
struct drm_radeon_cs *cs = data;
uint64_t *chunk_array_ptr;
unsigned size, i;
unsigned size, i, flags = 0;
if (!cs->num_chunks) {
return 0;
@ -140,6 +140,10 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
if (p->chunks[i].length_dw == 0)
return -EINVAL;
}
if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS &&
!p->chunks[i].length_dw) {
return -EINVAL;
}
p->chunks[i].length_dw = user_chunk.length_dw;
p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data;
@ -155,6 +159,9 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
p->chunks[i].user_ptr, size)) {
return -EFAULT;
}
if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
flags = p->chunks[i].kdata[0];
}
} else {
p->chunks[i].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
p->chunks[i].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
@ -174,6 +181,8 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
p->chunks[p->chunk_ib_idx].length_dw);
return -EINVAL;
}
p->keep_tiling_flags = (flags & RADEON_CS_KEEP_TILING_FLAGS) != 0;
return 0;
}

Просмотреть файл

@ -53,9 +53,10 @@
* 2.9.0 - r600 tiling (s3tc,rgtc) working, SET_PREDICATION packet 3 on r600 + eg, backend query
* 2.10.0 - fusion 2D tiling
* 2.11.0 - backend map, initial compute support for the CS checker
* 2.12.0 - RADEON_CS_KEEP_TILING_FLAGS
*/
#define KMS_DRIVER_MAJOR 2
#define KMS_DRIVER_MINOR 11
#define KMS_DRIVER_MINOR 12
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);

Просмотреть файл

@ -574,10 +574,16 @@ retry:
return ret;
spin_lock(&glob->lru_lock);
if (unlikely(list_empty(&bo->ddestroy))) {
spin_unlock(&glob->lru_lock);
return 0;
}
ret = ttm_bo_reserve_locked(bo, interruptible,
no_wait_reserve, false, 0);
if (unlikely(ret != 0) || list_empty(&bo->ddestroy)) {
if (unlikely(ret != 0)) {
spin_unlock(&glob->lru_lock);
return ret;
}

Просмотреть файл

@ -991,14 +991,20 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
uc = &priv->cards[i];
}
if (!uc)
return -EINVAL;
if (!uc) {
ret_val = -EINVAL;
goto done;
}
if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0)
return -EINVAL;
if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0) {
ret_val = -EINVAL;
goto done;
}
if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0)
return -EINVAL;
if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0) {
ret_val = -EINVAL;
goto done;
}
vga_put(pdev, io_state);

Просмотреть файл

@ -235,6 +235,8 @@ struct drm_mode_fb_cmd {
#define DRM_MODE_FB_DIRTY_ANNOTATE_FILL 0x02
#define DRM_MODE_FB_DIRTY_FLAGS 0x03
#define DRM_MODE_FB_DIRTY_MAX_CLIPS 256
/*
* Mark a region of a framebuffer as dirty.
*

Просмотреть файл

@ -874,6 +874,10 @@ struct drm_radeon_gem_pwrite {
#define RADEON_CHUNK_ID_RELOCS 0x01
#define RADEON_CHUNK_ID_IB 0x02
#define RADEON_CHUNK_ID_FLAGS 0x03
/* The first dword of RADEON_CHUNK_ID_FLAGS is a uint32 of these flags: */
#define RADEON_CS_KEEP_TILING_FLAGS 0x01
struct drm_radeon_cs_chunk {
uint32_t chunk_id;