Merge tag 'drm-intel-next-2013-09-21-merged' of git://people.freedesktop.org/~danvet/drm-intel into drm-next
drm-intel-next-2013-09-21: - clock state handling rework from Ville - l3 parity handling fixes for hsw from Ben - some more watermark improvements from Ville - ban badly behaved context from Mika - a few vlv improvements from Jesse - VGA power domain handling from Ville drm-intel-next-2013-09-06: - Basic mipi dsi support from Jani. Not yet converted over to drm_bridge since that was too fresh, but the porting is in progress already. - More vma patches from Ben, this time the code to convert the execbuffer code. Now that the shrinker recursion bug is tracked down we can move ahead here again. Yay! - Optimize hw context switching to not generate needless interrupts (Chris Wilson). Also some shuffling for the oustanding request allocation. - Opregion support for SWSCI, although not yet fully wired up (we need a bit of runtime D3 support for that apparently, due to Windows design deficiencies), from Jani Nikula. - A few smaller changes all over. [airlied: merge conflict fix in i9xx_set_pipeconf] * tag 'drm-intel-next-2013-09-21-merged' of git://people.freedesktop.org/~danvet/drm-intel: (119 commits) drm/i915: assume all GM45 Acer laptops use inverted backlight PWM drm/i915: cleanup a min_t() cast drm/i915: Pull intel_init_power_well() out of intel_modeset_init_hw() drm/i915: Add POWER_DOMAIN_VGA drm/i915: Refactor power well refcount inc/dec operations drm/i915: Add intel_display_power_{get, put} to request power for specific domains drm/i915: Change i915_request power well handling drm/i915: POSTING_READ IPS_CTL before waiting for the vblank drm/i915: don't disable ERR_INT on the IRQ handler drm/i915/vlv: disable rc6p and rc6pp residency reporting on BYT drm/i915/vlv: honor i915_enable_rc6 boot param on VLV drm/i915: s/HAS_L3_GPU_CACHE/HAS_L3_DPF drm/i915: Do remaps for all contexts drm/i915: Keep a list of all contexts drm/i915: Make l3 remapping use the ring drm/i915: Add second slice l3 remapping drm/i915: Fix HSW parity test drm/i915: dump crtc timings from the pipe config drm/i915: register backlight device also when backlight class is a module drm/i915: write D_COMP using the mailbox ... Conflicts: drivers/gpu/drm/i915/intel_display.c
This commit is contained in:
Коммит
4821ff14a3
|
@ -202,6 +202,7 @@ static struct drm_conn_prop_enum_list drm_connector_enum_list[] =
|
|||
{ DRM_MODE_CONNECTOR_TV, "TV" },
|
||||
{ DRM_MODE_CONNECTOR_eDP, "eDP" },
|
||||
{ DRM_MODE_CONNECTOR_VIRTUAL, "Virtual" },
|
||||
{ DRM_MODE_CONNECTOR_DSI, "DSI" },
|
||||
};
|
||||
|
||||
static const struct drm_prop_enum_list drm_encoder_enum_list[] =
|
||||
|
@ -211,6 +212,7 @@ static const struct drm_prop_enum_list drm_encoder_enum_list[] =
|
|||
{ DRM_MODE_ENCODER_LVDS, "LVDS" },
|
||||
{ DRM_MODE_ENCODER_TVDAC, "TV" },
|
||||
{ DRM_MODE_ENCODER_VIRTUAL, "Virtual" },
|
||||
{ DRM_MODE_ENCODER_DSI, "DSI" },
|
||||
};
|
||||
|
||||
void drm_connector_ida_init(void)
|
||||
|
|
|
@ -21,6 +21,9 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
|
|||
intel_display.o \
|
||||
intel_crt.o \
|
||||
intel_lvds.o \
|
||||
intel_dsi.o \
|
||||
intel_dsi_cmd.o \
|
||||
intel_dsi_pll.o \
|
||||
intel_bios.o \
|
||||
intel_ddi.o \
|
||||
intel_dp.o \
|
||||
|
|
|
@ -76,17 +76,6 @@ struct intel_dvo_dev_ops {
|
|||
int (*mode_valid)(struct intel_dvo_device *dvo,
|
||||
struct drm_display_mode *mode);
|
||||
|
||||
/*
|
||||
* Callback to adjust the mode to be set in the CRTC.
|
||||
*
|
||||
* This allows an output to adjust the clock or even the entire set of
|
||||
* timings, which is used for panels with fixed timings or for
|
||||
* buses with clock limitations.
|
||||
*/
|
||||
bool (*mode_fixup)(struct intel_dvo_device *dvo,
|
||||
const struct drm_display_mode *mode,
|
||||
struct drm_display_mode *adjusted_mode);
|
||||
|
||||
/*
|
||||
* Callback for preparing mode changes on an output
|
||||
*/
|
||||
|
|
|
@ -145,6 +145,13 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
|
|||
seq_printf(m, " (%s)", obj->ring->name);
|
||||
}
|
||||
|
||||
static void describe_ctx(struct seq_file *m, struct i915_hw_context *ctx)
|
||||
{
|
||||
seq_putc(m, ctx->is_initialized ? 'I' : 'i');
|
||||
seq_putc(m, ctx->remap_slice ? 'R' : 'r');
|
||||
seq_putc(m, ' ');
|
||||
}
|
||||
|
||||
static int i915_gem_object_list_info(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
|
@ -1442,6 +1449,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
|
|||
struct drm_device *dev = node->minor->dev;
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
struct intel_ring_buffer *ring;
|
||||
struct i915_hw_context *ctx;
|
||||
int ret, i;
|
||||
|
||||
ret = mutex_lock_interruptible(&dev->mode_config.mutex);
|
||||
|
@ -1460,12 +1468,15 @@ static int i915_context_status(struct seq_file *m, void *unused)
|
|||
seq_putc(m, '\n');
|
||||
}
|
||||
|
||||
for_each_ring(ring, dev_priv, i) {
|
||||
if (ring->default_context) {
|
||||
seq_printf(m, "HW default context %s ring ", ring->name);
|
||||
describe_obj(m, ring->default_context->obj);
|
||||
seq_putc(m, '\n');
|
||||
}
|
||||
list_for_each_entry(ctx, &dev_priv->context_list, link) {
|
||||
seq_puts(m, "HW context ");
|
||||
describe_ctx(m, ctx);
|
||||
for_each_ring(ring, dev_priv, i)
|
||||
if (ring->default_context == ctx)
|
||||
seq_printf(m, "(default context %s) ", ring->name);
|
||||
|
||||
describe_obj(m, ctx->obj);
|
||||
seq_putc(m, '\n');
|
||||
}
|
||||
|
||||
mutex_unlock(&dev->mode_config.mutex);
|
||||
|
@ -1610,27 +1621,27 @@ static int i915_dpio_info(struct seq_file *m, void *data)
|
|||
seq_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL));
|
||||
|
||||
seq_printf(m, "DPIO_DIV_A: 0x%08x\n",
|
||||
vlv_dpio_read(dev_priv, _DPIO_DIV_A));
|
||||
vlv_dpio_read(dev_priv, PIPE_A, _DPIO_DIV_A));
|
||||
seq_printf(m, "DPIO_DIV_B: 0x%08x\n",
|
||||
vlv_dpio_read(dev_priv, _DPIO_DIV_B));
|
||||
vlv_dpio_read(dev_priv, PIPE_A, _DPIO_DIV_B));
|
||||
|
||||
seq_printf(m, "DPIO_REFSFR_A: 0x%08x\n",
|
||||
vlv_dpio_read(dev_priv, _DPIO_REFSFR_A));
|
||||
vlv_dpio_read(dev_priv, PIPE_A, _DPIO_REFSFR_A));
|
||||
seq_printf(m, "DPIO_REFSFR_B: 0x%08x\n",
|
||||
vlv_dpio_read(dev_priv, _DPIO_REFSFR_B));
|
||||
vlv_dpio_read(dev_priv, PIPE_A, _DPIO_REFSFR_B));
|
||||
|
||||
seq_printf(m, "DPIO_CORE_CLK_A: 0x%08x\n",
|
||||
vlv_dpio_read(dev_priv, _DPIO_CORE_CLK_A));
|
||||
vlv_dpio_read(dev_priv, PIPE_A, _DPIO_CORE_CLK_A));
|
||||
seq_printf(m, "DPIO_CORE_CLK_B: 0x%08x\n",
|
||||
vlv_dpio_read(dev_priv, _DPIO_CORE_CLK_B));
|
||||
vlv_dpio_read(dev_priv, PIPE_A, _DPIO_CORE_CLK_B));
|
||||
|
||||
seq_printf(m, "DPIO_LPF_COEFF_A: 0x%08x\n",
|
||||
vlv_dpio_read(dev_priv, _DPIO_LPF_COEFF_A));
|
||||
vlv_dpio_read(dev_priv, PIPE_A, _DPIO_LPF_COEFF_A));
|
||||
seq_printf(m, "DPIO_LPF_COEFF_B: 0x%08x\n",
|
||||
vlv_dpio_read(dev_priv, _DPIO_LPF_COEFF_B));
|
||||
vlv_dpio_read(dev_priv, PIPE_A, _DPIO_LPF_COEFF_B));
|
||||
|
||||
seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n",
|
||||
vlv_dpio_read(dev_priv, DPIO_FASTCLK_DISABLE));
|
||||
vlv_dpio_read(dev_priv, PIPE_A, DPIO_FASTCLK_DISABLE));
|
||||
|
||||
mutex_unlock(&dev_priv->dpio_lock);
|
||||
|
||||
|
|
|
@ -52,7 +52,7 @@
|
|||
intel_ring_emit(LP_RING(dev_priv), x)
|
||||
|
||||
#define ADVANCE_LP_RING() \
|
||||
intel_ring_advance(LP_RING(dev_priv))
|
||||
__intel_ring_advance(LP_RING(dev_priv))
|
||||
|
||||
/**
|
||||
* Lock test for when it's just for synchronization of ring access.
|
||||
|
@ -1324,6 +1324,8 @@ static int i915_load_modeset_init(struct drm_device *dev)
|
|||
|
||||
INIT_WORK(&dev_priv->console_resume_work, intel_console_resume);
|
||||
|
||||
intel_init_power_well(dev);
|
||||
|
||||
intel_modeset_gem_init(dev);
|
||||
|
||||
/* Always safe in the mode setting case. */
|
||||
|
|
|
@ -576,11 +576,20 @@ static void intel_resume_hotplug(struct drm_device *dev)
|
|||
drm_helper_hpd_irq_event(dev);
|
||||
}
|
||||
|
||||
static int __i915_drm_thaw(struct drm_device *dev)
|
||||
static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int error = 0;
|
||||
|
||||
intel_uncore_sanitize(dev);
|
||||
|
||||
if (drm_core_check_feature(dev, DRIVER_MODESET) &&
|
||||
restore_gtt_mappings) {
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
i915_gem_restore_gtt_mappings(dev);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
|
||||
i915_restore_state(dev);
|
||||
intel_opregion_setup(dev);
|
||||
|
||||
|
@ -596,6 +605,8 @@ static int __i915_drm_thaw(struct drm_device *dev)
|
|||
/* We need working interrupts for modeset enabling ... */
|
||||
drm_irq_install(dev);
|
||||
|
||||
intel_init_power_well(dev);
|
||||
|
||||
intel_modeset_init_hw(dev);
|
||||
|
||||
drm_modeset_lock_all(dev);
|
||||
|
@ -640,19 +651,7 @@ static int __i915_drm_thaw(struct drm_device *dev)
|
|||
|
||||
static int i915_drm_thaw(struct drm_device *dev)
|
||||
{
|
||||
int error = 0;
|
||||
|
||||
intel_uncore_sanitize(dev);
|
||||
|
||||
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
i915_gem_restore_gtt_mappings(dev);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
|
||||
__i915_drm_thaw(dev);
|
||||
|
||||
return error;
|
||||
return __i915_drm_thaw(dev, true);
|
||||
}
|
||||
|
||||
int i915_resume(struct drm_device *dev)
|
||||
|
@ -668,20 +667,12 @@ int i915_resume(struct drm_device *dev)
|
|||
|
||||
pci_set_master(dev->pdev);
|
||||
|
||||
intel_uncore_sanitize(dev);
|
||||
|
||||
/*
|
||||
* Platforms with opregion should have sane BIOS, older ones (gen3 and
|
||||
* earlier) need this since the BIOS might clear all our scratch PTEs.
|
||||
* earlier) need to restore the GTT mappings since the BIOS might clear
|
||||
* all our scratch PTEs.
|
||||
*/
|
||||
if (drm_core_check_feature(dev, DRIVER_MODESET) &&
|
||||
!dev_priv->opregion.header) {
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
i915_gem_restore_gtt_mappings(dev);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
|
||||
ret = __i915_drm_thaw(dev);
|
||||
ret = __i915_drm_thaw(dev, !dev_priv->opregion.header);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -719,24 +710,19 @@ int i915_reset(struct drm_device *dev)
|
|||
|
||||
simulated = dev_priv->gpu_error.stop_rings != 0;
|
||||
|
||||
if (!simulated && get_seconds() - dev_priv->gpu_error.last_reset < 5) {
|
||||
DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
|
||||
ret = -ENODEV;
|
||||
} else {
|
||||
ret = intel_gpu_reset(dev);
|
||||
ret = intel_gpu_reset(dev);
|
||||
|
||||
/* Also reset the gpu hangman. */
|
||||
if (simulated) {
|
||||
DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
|
||||
dev_priv->gpu_error.stop_rings = 0;
|
||||
if (ret == -ENODEV) {
|
||||
DRM_ERROR("Reset not implemented, but ignoring "
|
||||
"error for simulated gpu hangs\n");
|
||||
ret = 0;
|
||||
}
|
||||
} else
|
||||
dev_priv->gpu_error.last_reset = get_seconds();
|
||||
/* Also reset the gpu hangman. */
|
||||
if (simulated) {
|
||||
DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
|
||||
dev_priv->gpu_error.stop_rings = 0;
|
||||
if (ret == -ENODEV) {
|
||||
DRM_ERROR("Reset not implemented, but ignoring "
|
||||
"error for simulated gpu hangs\n");
|
||||
ret = 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to reset chip.\n");
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
@ -799,6 +785,12 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
struct intel_device_info *intel_info =
|
||||
(struct intel_device_info *) ent->driver_data;
|
||||
|
||||
if (IS_PRELIMINARY_HW(intel_info) && !i915_preliminary_hw_support) {
|
||||
DRM_INFO("This hardware requires preliminary hardware support.\n"
|
||||
"See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/* Only bind to function 0 of the device. Early generations
|
||||
* used function 1 as a placeholder for multi-head. This causes
|
||||
* us confusion instead, especially on the systems where both
|
||||
|
|
|
@ -99,6 +99,7 @@ enum intel_display_power_domain {
|
|||
POWER_DOMAIN_TRANSCODER_B,
|
||||
POWER_DOMAIN_TRANSCODER_C,
|
||||
POWER_DOMAIN_TRANSCODER_EDP = POWER_DOMAIN_TRANSCODER_A + 0xF,
|
||||
POWER_DOMAIN_VGA,
|
||||
};
|
||||
|
||||
#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
|
||||
|
@ -225,6 +226,8 @@ struct intel_opregion {
|
|||
struct opregion_header __iomem *header;
|
||||
struct opregion_acpi __iomem *acpi;
|
||||
struct opregion_swsci __iomem *swsci;
|
||||
u32 swsci_gbda_sub_functions;
|
||||
u32 swsci_sbcb_sub_functions;
|
||||
struct opregion_asle __iomem *asle;
|
||||
void __iomem *vbt;
|
||||
u32 __iomem *lid_state;
|
||||
|
@ -326,6 +329,8 @@ struct drm_i915_error_state {
|
|||
u32 *active_bo_count, *pinned_bo_count;
|
||||
struct intel_overlay_error_state *overlay;
|
||||
struct intel_display_error_state *display;
|
||||
int hangcheck_score[I915_NUM_RINGS];
|
||||
enum intel_ring_hangcheck_action hangcheck_action[I915_NUM_RINGS];
|
||||
};
|
||||
|
||||
struct intel_crtc_config;
|
||||
|
@ -357,7 +362,7 @@ struct drm_i915_display_funcs {
|
|||
int target, int refclk,
|
||||
struct dpll *match_clock,
|
||||
struct dpll *best_clock);
|
||||
void (*update_wm)(struct drm_device *dev);
|
||||
void (*update_wm)(struct drm_crtc *crtc);
|
||||
void (*update_sprite_wm)(struct drm_plane *plane,
|
||||
struct drm_crtc *crtc,
|
||||
uint32_t sprite_width, int pixel_size,
|
||||
|
@ -367,7 +372,6 @@ struct drm_i915_display_funcs {
|
|||
* fills out the pipe-config with the hw state. */
|
||||
bool (*get_pipe_config)(struct intel_crtc *,
|
||||
struct intel_crtc_config *);
|
||||
void (*get_clock)(struct intel_crtc *, struct intel_crtc_config *);
|
||||
int (*crtc_mode_set)(struct drm_crtc *crtc,
|
||||
int x, int y,
|
||||
struct drm_framebuffer *old_fb);
|
||||
|
@ -420,6 +424,7 @@ struct intel_uncore {
|
|||
func(is_ivybridge) sep \
|
||||
func(is_valleyview) sep \
|
||||
func(is_haswell) sep \
|
||||
func(is_preliminary) sep \
|
||||
func(has_force_wake) sep \
|
||||
func(has_fbc) sep \
|
||||
func(has_pipe_cxsr) sep \
|
||||
|
@ -568,6 +573,13 @@ struct i915_vma {
|
|||
/** This vma's place in the batchbuffer or on the eviction list */
|
||||
struct list_head exec_list;
|
||||
|
||||
/**
|
||||
* Used for performing relocations during execbuffer insertion.
|
||||
*/
|
||||
struct hlist_node exec_node;
|
||||
unsigned long exec_handle;
|
||||
struct drm_i915_gem_exec_object2 *exec_entry;
|
||||
|
||||
};
|
||||
|
||||
struct i915_ctx_hang_stats {
|
||||
|
@ -576,6 +588,12 @@ struct i915_ctx_hang_stats {
|
|||
|
||||
/* This context had batch active when hang was declared */
|
||||
unsigned batch_active;
|
||||
|
||||
/* Time when this context was last blamed for a GPU reset */
|
||||
unsigned long guilty_ts;
|
||||
|
||||
/* This context is banned to submit more work */
|
||||
bool banned;
|
||||
};
|
||||
|
||||
/* This must match up with the value previously used for execbuf2.rsvd1. */
|
||||
|
@ -584,10 +602,13 @@ struct i915_hw_context {
|
|||
struct kref ref;
|
||||
int id;
|
||||
bool is_initialized;
|
||||
uint8_t remap_slice;
|
||||
struct drm_i915_file_private *file_priv;
|
||||
struct intel_ring_buffer *ring;
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct i915_ctx_hang_stats hang_stats;
|
||||
|
||||
struct list_head link;
|
||||
};
|
||||
|
||||
struct i915_fbc {
|
||||
|
@ -900,9 +921,11 @@ struct i915_ums_state {
|
|||
int mm_suspended;
|
||||
};
|
||||
|
||||
#define MAX_L3_SLICES 2
|
||||
struct intel_l3_parity {
|
||||
u32 *remap_info;
|
||||
u32 *remap_info[MAX_L3_SLICES];
|
||||
struct work_struct error_work;
|
||||
int which_slice;
|
||||
};
|
||||
|
||||
struct i915_gem_mm {
|
||||
|
@ -977,6 +1000,9 @@ struct i915_gpu_error {
|
|||
/* For hangcheck timer */
|
||||
#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
|
||||
#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
|
||||
/* Hang gpu twice in this window and your context gets banned */
|
||||
#define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000)
|
||||
|
||||
struct timer_list hangcheck_timer;
|
||||
|
||||
/* For reset and error_state handling. */
|
||||
|
@ -985,8 +1011,6 @@ struct i915_gpu_error {
|
|||
struct drm_i915_error_state *first_error;
|
||||
struct work_struct work;
|
||||
|
||||
unsigned long last_reset;
|
||||
|
||||
/**
|
||||
* State variable and reset counter controlling the reset flow
|
||||
*
|
||||
|
@ -1058,6 +1082,11 @@ struct intel_vbt_data {
|
|||
int edp_bpp;
|
||||
struct edp_power_seq edp_pps;
|
||||
|
||||
/* MIPI DSI */
|
||||
struct {
|
||||
u16 panel_id;
|
||||
} dsi;
|
||||
|
||||
int crt_ddc_pin;
|
||||
|
||||
int child_dev_num;
|
||||
|
@ -1318,6 +1347,7 @@ typedef struct drm_i915_private {
|
|||
|
||||
bool hw_contexts_disabled;
|
||||
uint32_t hw_context_size;
|
||||
struct list_head context_list;
|
||||
|
||||
u32 fdi_rx_config;
|
||||
|
||||
|
@ -1398,8 +1428,6 @@ struct drm_i915_gem_object {
|
|||
struct list_head ring_list;
|
||||
/** Used in execbuf to temporarily hold a ref */
|
||||
struct list_head obj_exec_link;
|
||||
/** This object's place in the batchbuffer or on the eviction list */
|
||||
struct list_head exec_list;
|
||||
|
||||
/**
|
||||
* This is set if the object is on the active lists (has pending
|
||||
|
@ -1485,13 +1513,6 @@ struct drm_i915_gem_object {
|
|||
void *dma_buf_vmapping;
|
||||
int vmapping_count;
|
||||
|
||||
/**
|
||||
* Used for performing relocations during execbuffer insertion.
|
||||
*/
|
||||
struct hlist_node exec_node;
|
||||
unsigned long exec_handle;
|
||||
struct drm_i915_gem_exec_object2 *exec_entry;
|
||||
|
||||
struct intel_ring_buffer *ring;
|
||||
|
||||
/** Breadcrumb of last rendering to the buffer. */
|
||||
|
@ -1600,6 +1621,9 @@ struct drm_i915_file_private {
|
|||
((dev)->pci_device & 0xFF00) == 0x0C00)
|
||||
#define IS_ULT(dev) (IS_HASWELL(dev) && \
|
||||
((dev)->pci_device & 0xFF00) == 0x0A00)
|
||||
#define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \
|
||||
((dev)->pci_device & 0x00F0) == 0x0020)
|
||||
#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
|
||||
|
||||
/*
|
||||
* The genX designation typically refers to the render engine, so render
|
||||
|
@ -1668,7 +1692,9 @@ struct drm_i915_file_private {
|
|||
|
||||
#define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake)
|
||||
|
||||
#define HAS_L3_GPU_CACHE(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
|
||||
/* DPF == dynamic parity feature */
|
||||
#define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
|
||||
#define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev))
|
||||
|
||||
#define GT_FREQUENCY_MULTIPLIER 50
|
||||
|
||||
|
@ -1828,8 +1854,6 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
|
|||
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
|
||||
size_t size);
|
||||
void i915_gem_free_object(struct drm_gem_object *obj);
|
||||
struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
|
||||
struct i915_address_space *vm);
|
||||
void i915_gem_vma_destroy(struct i915_vma *vma);
|
||||
|
||||
int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
|
||||
|
@ -1931,7 +1955,7 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
|
|||
int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
|
||||
int __must_check i915_gem_init(struct drm_device *dev);
|
||||
int __must_check i915_gem_init_hw(struct drm_device *dev);
|
||||
void i915_gem_l3_remap(struct drm_device *dev);
|
||||
int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice);
|
||||
void i915_gem_init_swizzling(struct drm_device *dev);
|
||||
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
|
||||
int __must_check i915_gpu_idle(struct drm_device *dev);
|
||||
|
@ -2090,6 +2114,7 @@ int __must_check i915_gem_evict_something(struct drm_device *dev,
|
|||
unsigned cache_level,
|
||||
bool mappable,
|
||||
bool nonblock);
|
||||
int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
|
||||
int i915_gem_evict_everything(struct drm_device *dev);
|
||||
|
||||
/* i915_gem_stolen.c */
|
||||
|
@ -2182,15 +2207,30 @@ static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
|
|||
extern void intel_i2c_reset(struct drm_device *dev);
|
||||
|
||||
/* intel_opregion.c */
|
||||
struct intel_encoder;
|
||||
extern int intel_opregion_setup(struct drm_device *dev);
|
||||
#ifdef CONFIG_ACPI
|
||||
extern void intel_opregion_init(struct drm_device *dev);
|
||||
extern void intel_opregion_fini(struct drm_device *dev);
|
||||
extern void intel_opregion_asle_intr(struct drm_device *dev);
|
||||
extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
|
||||
bool enable);
|
||||
extern int intel_opregion_notify_adapter(struct drm_device *dev,
|
||||
pci_power_t state);
|
||||
#else
|
||||
static inline void intel_opregion_init(struct drm_device *dev) { return; }
|
||||
static inline void intel_opregion_fini(struct drm_device *dev) { return; }
|
||||
static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; }
|
||||
static inline int
|
||||
intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline int
|
||||
intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* intel_acpi.c */
|
||||
|
@ -2252,8 +2292,16 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
|
|||
u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr);
|
||||
void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val);
|
||||
u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr);
|
||||
u32 vlv_dpio_read(struct drm_i915_private *dev_priv, int reg);
|
||||
void vlv_dpio_write(struct drm_i915_private *dev_priv, int reg, u32 val);
|
||||
u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg);
|
||||
void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
|
||||
u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg);
|
||||
void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
|
||||
u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg);
|
||||
void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
|
||||
u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg);
|
||||
void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
|
||||
u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg);
|
||||
void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val);
|
||||
u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
|
||||
enum intel_sbi_destination destination);
|
||||
void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
|
||||
|
|
|
@ -41,6 +41,9 @@ static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *o
|
|||
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
|
||||
bool force);
|
||||
static __must_check int
|
||||
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
|
||||
bool readonly);
|
||||
static __must_check int
|
||||
i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
|
||||
struct i915_address_space *vm,
|
||||
unsigned alignment,
|
||||
|
@ -432,11 +435,9 @@ i915_gem_shmem_pread(struct drm_device *dev,
|
|||
* optimizes for the case when the gpu will dirty the data
|
||||
* anyway again before the next pread happens. */
|
||||
needs_clflush = !cpu_cache_is_coherent(dev, obj->cache_level);
|
||||
if (i915_gem_obj_bound_any(obj)) {
|
||||
ret = i915_gem_object_set_to_gtt_domain(obj, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
ret = i915_gem_object_wait_rendering(obj, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = i915_gem_object_get_pages(obj);
|
||||
|
@ -748,11 +749,9 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
|
|||
* optimizes for the case when the gpu will use the data
|
||||
* right away and we therefore have to clflush anyway. */
|
||||
needs_clflush_after = cpu_write_needs_clflush(obj);
|
||||
if (i915_gem_obj_bound_any(obj)) {
|
||||
ret = i915_gem_object_set_to_gtt_domain(obj, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
ret = i915_gem_object_wait_rendering(obj, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
/* Same trick applies to invalidate partially written cachelines read
|
||||
* before writing. */
|
||||
|
@ -966,7 +965,7 @@ i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
|
|||
BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
|
||||
|
||||
ret = 0;
|
||||
if (seqno == ring->outstanding_lazy_request)
|
||||
if (seqno == ring->outstanding_lazy_seqno)
|
||||
ret = i915_add_request(ring, NULL);
|
||||
|
||||
return ret;
|
||||
|
@ -2078,11 +2077,10 @@ int __i915_add_request(struct intel_ring_buffer *ring,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
request = kmalloc(sizeof(*request), GFP_KERNEL);
|
||||
if (request == NULL)
|
||||
request = ring->preallocated_lazy_request;
|
||||
if (WARN_ON(request == NULL))
|
||||
return -ENOMEM;
|
||||
|
||||
|
||||
/* Record the position of the start of the request so that
|
||||
* should we detect the updated seqno part-way through the
|
||||
* GPU processing the request, we never over-estimate the
|
||||
|
@ -2091,17 +2089,13 @@ int __i915_add_request(struct intel_ring_buffer *ring,
|
|||
request_ring_position = intel_ring_get_tail(ring);
|
||||
|
||||
ret = ring->add_request(ring);
|
||||
if (ret) {
|
||||
kfree(request);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
request->seqno = intel_ring_get_seqno(ring);
|
||||
request->ring = ring;
|
||||
request->head = request_start;
|
||||
request->tail = request_ring_position;
|
||||
request->ctx = ring->last_context;
|
||||
request->batch_obj = obj;
|
||||
|
||||
/* Whilst this request exists, batch_obj will be on the
|
||||
* active_list, and so will hold the active reference. Only when this
|
||||
|
@ -2109,7 +2103,12 @@ int __i915_add_request(struct intel_ring_buffer *ring,
|
|||
* inactive_list and lose its active reference. Hence we do not need
|
||||
* to explicitly hold another reference here.
|
||||
*/
|
||||
request->batch_obj = obj;
|
||||
|
||||
/* Hold a reference to the current context so that we can inspect
|
||||
* it later in case a hangcheck error event fires.
|
||||
*/
|
||||
request->ctx = ring->last_context;
|
||||
if (request->ctx)
|
||||
i915_gem_context_reference(request->ctx);
|
||||
|
||||
|
@ -2129,7 +2128,8 @@ int __i915_add_request(struct intel_ring_buffer *ring,
|
|||
}
|
||||
|
||||
trace_i915_gem_request_add(ring, request->seqno);
|
||||
ring->outstanding_lazy_request = 0;
|
||||
ring->outstanding_lazy_seqno = 0;
|
||||
ring->preallocated_lazy_request = NULL;
|
||||
|
||||
if (!dev_priv->ums.mm_suspended) {
|
||||
i915_queue_hangcheck(ring->dev);
|
||||
|
@ -2224,6 +2224,21 @@ static bool i915_request_guilty(struct drm_i915_gem_request *request,
|
|||
return false;
|
||||
}
|
||||
|
||||
static bool i915_context_is_banned(const struct i915_ctx_hang_stats *hs)
|
||||
{
|
||||
const unsigned long elapsed = get_seconds() - hs->guilty_ts;
|
||||
|
||||
if (hs->banned)
|
||||
return true;
|
||||
|
||||
if (elapsed <= DRM_I915_CTX_BAN_PERIOD) {
|
||||
DRM_ERROR("context hanging too fast, declaring banned!\n");
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static void i915_set_reset_status(struct intel_ring_buffer *ring,
|
||||
struct drm_i915_gem_request *request,
|
||||
u32 acthd)
|
||||
|
@ -2260,10 +2275,13 @@ static void i915_set_reset_status(struct intel_ring_buffer *ring,
|
|||
hs = &request->file_priv->hang_stats;
|
||||
|
||||
if (hs) {
|
||||
if (guilty)
|
||||
if (guilty) {
|
||||
hs->banned = i915_context_is_banned(hs);
|
||||
hs->batch_active++;
|
||||
else
|
||||
hs->guilty_ts = get_seconds();
|
||||
} else {
|
||||
hs->batch_pending++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2641,11 +2659,17 @@ int i915_vma_unbind(struct i915_vma *vma)
|
|||
drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
|
||||
int ret;
|
||||
|
||||
/* For now we only ever use 1 vma per object */
|
||||
WARN_ON(!list_is_singular(&obj->vma_list));
|
||||
|
||||
if (list_empty(&vma->vma_link))
|
||||
return 0;
|
||||
|
||||
if (!drm_mm_node_allocated(&vma->node))
|
||||
goto destroy;
|
||||
if (!drm_mm_node_allocated(&vma->node)) {
|
||||
i915_gem_vma_destroy(vma);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (obj->pin_count)
|
||||
return -EBUSY;
|
||||
|
@ -2685,13 +2709,10 @@ int i915_vma_unbind(struct i915_vma *vma)
|
|||
|
||||
drm_mm_remove_node(&vma->node);
|
||||
|
||||
destroy:
|
||||
i915_gem_vma_destroy(vma);
|
||||
|
||||
/* Since the unbound list is global, only move to that list if
|
||||
* no more VMAs exist.
|
||||
* NB: Until we have real VMAs there will only ever be one */
|
||||
WARN_ON(!list_empty(&obj->vma_list));
|
||||
* no more VMAs exist. */
|
||||
if (list_empty(&obj->vma_list))
|
||||
list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
|
||||
|
||||
|
@ -4015,7 +4036,6 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
|
|||
{
|
||||
INIT_LIST_HEAD(&obj->global_list);
|
||||
INIT_LIST_HEAD(&obj->ring_list);
|
||||
INIT_LIST_HEAD(&obj->exec_list);
|
||||
INIT_LIST_HEAD(&obj->obj_exec_link);
|
||||
INIT_LIST_HEAD(&obj->vma_list);
|
||||
|
||||
|
@ -4147,8 +4167,19 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
|
|||
i915_gem_object_free(obj);
|
||||
}
|
||||
|
||||
struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
|
||||
struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
|
||||
struct i915_address_space *vm)
|
||||
{
|
||||
struct i915_vma *vma;
|
||||
list_for_each_entry(vma, &obj->vma_list, vma_link)
|
||||
if (vma->vm == vm)
|
||||
return vma;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
|
||||
struct i915_address_space *vm)
|
||||
{
|
||||
struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
|
||||
if (vma == NULL)
|
||||
|
@ -4169,10 +4200,29 @@ struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
|
|||
return vma;
|
||||
}
|
||||
|
||||
struct i915_vma *
|
||||
i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
|
||||
struct i915_address_space *vm)
|
||||
{
|
||||
struct i915_vma *vma;
|
||||
|
||||
vma = i915_gem_obj_to_vma(obj, vm);
|
||||
if (!vma)
|
||||
vma = __i915_gem_vma_create(obj, vm);
|
||||
|
||||
return vma;
|
||||
}
|
||||
|
||||
void i915_gem_vma_destroy(struct i915_vma *vma)
|
||||
{
|
||||
WARN_ON(vma->node.allocated);
|
||||
|
||||
/* Keep the vma as a placeholder in the execbuffer reservation lists */
|
||||
if (!list_empty(&vma->exec_list))
|
||||
return;
|
||||
|
||||
list_del(&vma->vma_link);
|
||||
|
||||
kfree(vma);
|
||||
}
|
||||
|
||||
|
@ -4209,36 +4259,35 @@ i915_gem_idle(struct drm_device *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
void i915_gem_l3_remap(struct drm_device *dev)
|
||||
int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice)
|
||||
{
|
||||
struct drm_device *dev = ring->dev;
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
u32 misccpctl;
|
||||
int i;
|
||||
u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
|
||||
u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
|
||||
int i, ret;
|
||||
|
||||
if (!HAS_L3_GPU_CACHE(dev))
|
||||
return;
|
||||
if (!HAS_L3_DPF(dev) || !remap_info)
|
||||
return 0;
|
||||
|
||||
if (!dev_priv->l3_parity.remap_info)
|
||||
return;
|
||||
|
||||
misccpctl = I915_READ(GEN7_MISCCPCTL);
|
||||
I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
|
||||
POSTING_READ(GEN7_MISCCPCTL);
|
||||
ret = intel_ring_begin(ring, GEN7_L3LOG_SIZE / 4 * 3);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* Note: We do not worry about the concurrent register cacheline hang
|
||||
* here because no other code should access these registers other than
|
||||
* at initialization time.
|
||||
*/
|
||||
for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
|
||||
u32 remap = I915_READ(GEN7_L3LOG_BASE + i);
|
||||
if (remap && remap != dev_priv->l3_parity.remap_info[i/4])
|
||||
DRM_DEBUG("0x%x was already programmed to %x\n",
|
||||
GEN7_L3LOG_BASE + i, remap);
|
||||
if (remap && !dev_priv->l3_parity.remap_info[i/4])
|
||||
DRM_DEBUG_DRIVER("Clearing remapped register\n");
|
||||
I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->l3_parity.remap_info[i/4]);
|
||||
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
|
||||
intel_ring_emit(ring, reg_base + i);
|
||||
intel_ring_emit(ring, remap_info[i/4]);
|
||||
}
|
||||
|
||||
/* Make sure all the writes land before disabling dop clock gating */
|
||||
POSTING_READ(GEN7_L3LOG_BASE);
|
||||
intel_ring_advance(ring);
|
||||
|
||||
I915_WRITE(GEN7_MISCCPCTL, misccpctl);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void i915_gem_init_swizzling(struct drm_device *dev)
|
||||
|
@ -4330,7 +4379,7 @@ int
|
|||
i915_gem_init_hw(struct drm_device *dev)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
int ret;
|
||||
int ret, i;
|
||||
|
||||
if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
|
||||
return -EIO;
|
||||
|
@ -4338,20 +4387,26 @@ i915_gem_init_hw(struct drm_device *dev)
|
|||
if (dev_priv->ellc_size)
|
||||
I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
|
||||
|
||||
if (IS_HSW_GT3(dev))
|
||||
I915_WRITE(MI_PREDICATE_RESULT_2, LOWER_SLICE_ENABLED);
|
||||
else
|
||||
I915_WRITE(MI_PREDICATE_RESULT_2, LOWER_SLICE_DISABLED);
|
||||
|
||||
if (HAS_PCH_NOP(dev)) {
|
||||
u32 temp = I915_READ(GEN7_MSG_CTL);
|
||||
temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
|
||||
I915_WRITE(GEN7_MSG_CTL, temp);
|
||||
}
|
||||
|
||||
i915_gem_l3_remap(dev);
|
||||
|
||||
i915_gem_init_swizzling(dev);
|
||||
|
||||
ret = i915_gem_init_rings(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
for (i = 0; i < NUM_L3_SLICES(dev); i++)
|
||||
i915_gem_l3_remap(&dev_priv->ring[RCS], i);
|
||||
|
||||
/*
|
||||
* XXX: There was some w/a described somewhere suggesting loading
|
||||
* contexts before PPGTT.
|
||||
|
@ -4523,6 +4578,7 @@ i915_gem_load(struct drm_device *dev)
|
|||
INIT_LIST_HEAD(&dev_priv->vm_list);
|
||||
i915_init_vm(dev_priv, &dev_priv->gtt.base);
|
||||
|
||||
INIT_LIST_HEAD(&dev_priv->context_list);
|
||||
INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
|
||||
INIT_LIST_HEAD(&dev_priv->mm.bound_list);
|
||||
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
|
||||
|
@ -4859,11 +4915,10 @@ bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
|
|||
|
||||
bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = o->base.dev->dev_private;
|
||||
struct i915_address_space *vm;
|
||||
struct i915_vma *vma;
|
||||
|
||||
list_for_each_entry(vm, &dev_priv->vm_list, global_link)
|
||||
if (i915_gem_obj_bound(o, vm))
|
||||
list_for_each_entry(vma, &o->vma_list, vma_link)
|
||||
if (drm_mm_node_allocated(&vma->node))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
|
@ -4920,27 +4975,3 @@ i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc)
|
|||
mutex_unlock(&dev->struct_mutex);
|
||||
return freed;
|
||||
}
|
||||
|
||||
struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
|
||||
struct i915_address_space *vm)
|
||||
{
|
||||
struct i915_vma *vma;
|
||||
list_for_each_entry(vma, &obj->vma_list, vma_link)
|
||||
if (vma->vm == vm)
|
||||
return vma;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct i915_vma *
|
||||
i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
|
||||
struct i915_address_space *vm)
|
||||
{
|
||||
struct i915_vma *vma;
|
||||
|
||||
vma = i915_gem_obj_to_vma(obj, vm);
|
||||
if (!vma)
|
||||
vma = i915_gem_vma_create(obj, vm);
|
||||
|
||||
return vma;
|
||||
}
|
||||
|
|
|
@ -73,7 +73,7 @@
|
|||
*
|
||||
* There are two confusing terms used above:
|
||||
* The "current context" means the context which is currently running on the
|
||||
* GPU. The GPU has loaded it's state already and has stored away the gtt
|
||||
* GPU. The GPU has loaded its state already and has stored away the gtt
|
||||
* offset of the BO. The GPU is not actively referencing the data at this
|
||||
* offset, but it will on the next context switch. The only way to avoid this
|
||||
* is to do a GPU reset.
|
||||
|
@ -129,6 +129,7 @@ void i915_gem_context_free(struct kref *ctx_ref)
|
|||
struct i915_hw_context *ctx = container_of(ctx_ref,
|
||||
typeof(*ctx), ref);
|
||||
|
||||
list_del(&ctx->link);
|
||||
drm_gem_object_unreference(&ctx->obj->base);
|
||||
kfree(ctx);
|
||||
}
|
||||
|
@ -147,6 +148,7 @@ create_hw_context(struct drm_device *dev,
|
|||
|
||||
kref_init(&ctx->ref);
|
||||
ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size);
|
||||
INIT_LIST_HEAD(&ctx->link);
|
||||
if (ctx->obj == NULL) {
|
||||
kfree(ctx);
|
||||
DRM_DEBUG_DRIVER("Context object allocated failed\n");
|
||||
|
@ -166,6 +168,7 @@ create_hw_context(struct drm_device *dev,
|
|||
* assertion in the context switch code.
|
||||
*/
|
||||
ctx->ring = &dev_priv->ring[RCS];
|
||||
list_add_tail(&ctx->link, &dev_priv->context_list);
|
||||
|
||||
/* Default context will never have a file_priv */
|
||||
if (file_priv == NULL)
|
||||
|
@ -178,6 +181,10 @@ create_hw_context(struct drm_device *dev,
|
|||
|
||||
ctx->file_priv = file_priv;
|
||||
ctx->id = ret;
|
||||
/* NB: Mark all slices as needing a remap so that when the context first
|
||||
* loads it will restore whatever remap state already exists. If there
|
||||
* is no remap info, it will be a NOP. */
|
||||
ctx->remap_slice = (1 << NUM_L3_SLICES(dev)) - 1;
|
||||
|
||||
return ctx;
|
||||
|
||||
|
@ -393,11 +400,11 @@ static int do_switch(struct i915_hw_context *to)
|
|||
struct intel_ring_buffer *ring = to->ring;
|
||||
struct i915_hw_context *from = ring->last_context;
|
||||
u32 hw_flags = 0;
|
||||
int ret;
|
||||
int ret, i;
|
||||
|
||||
BUG_ON(from != NULL && from->obj != NULL && from->obj->pin_count == 0);
|
||||
|
||||
if (from == to)
|
||||
if (from == to && !to->remap_slice)
|
||||
return 0;
|
||||
|
||||
ret = i915_gem_obj_ggtt_pin(to->obj, CONTEXT_ALIGN, false, false);
|
||||
|
@ -420,8 +427,6 @@ static int do_switch(struct i915_hw_context *to)
|
|||
|
||||
if (!to->is_initialized || is_default_context(to))
|
||||
hw_flags |= MI_RESTORE_INHIBIT;
|
||||
else if (WARN_ON_ONCE(from == to)) /* not yet expected */
|
||||
hw_flags |= MI_FORCE_RESTORE;
|
||||
|
||||
ret = mi_set_context(ring, to, hw_flags);
|
||||
if (ret) {
|
||||
|
@ -429,6 +434,18 @@ static int do_switch(struct i915_hw_context *to)
|
|||
return ret;
|
||||
}
|
||||
|
||||
for (i = 0; i < MAX_L3_SLICES; i++) {
|
||||
if (!(to->remap_slice & (1<<i)))
|
||||
continue;
|
||||
|
||||
ret = i915_gem_l3_remap(ring, i);
|
||||
/* If it failed, try again next round */
|
||||
if (ret)
|
||||
DRM_DEBUG_DRIVER("L3 remapping failed\n");
|
||||
else
|
||||
to->remap_slice &= ~(1<<i);
|
||||
}
|
||||
|
||||
/* The backing object for the context is done after switching to the
|
||||
* *next* context. Therefore we cannot retire the previous context until
|
||||
* the next context has already started running. In fact, the below code
|
||||
|
@ -451,17 +468,7 @@ static int do_switch(struct i915_hw_context *to)
|
|||
from->obj->dirty = 1;
|
||||
BUG_ON(from->obj->ring != ring);
|
||||
|
||||
ret = i915_add_request(ring, NULL);
|
||||
if (ret) {
|
||||
/* Too late, we've already scheduled a context switch.
|
||||
* Try to undo the change so that the hw state is
|
||||
* consistent with out tracking. In case of emergency,
|
||||
* scream.
|
||||
*/
|
||||
WARN_ON(mi_set_context(ring, from, MI_RESTORE_INHIBIT));
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* obj is kept alive until the next request by its active ref */
|
||||
i915_gem_object_unpin(from->obj);
|
||||
i915_gem_context_unreference(from);
|
||||
}
|
||||
|
|
|
@ -37,6 +37,9 @@ mark_free(struct i915_vma *vma, struct list_head *unwind)
|
|||
if (vma->obj->pin_count)
|
||||
return false;
|
||||
|
||||
if (WARN_ON(!list_empty(&vma->exec_list)))
|
||||
return false;
|
||||
|
||||
list_add(&vma->exec_list, unwind);
|
||||
return drm_mm_scan_add_block(&vma->node);
|
||||
}
|
||||
|
@ -113,7 +116,7 @@ none:
|
|||
}
|
||||
|
||||
/* We expect the caller to unpin, evict all and try again, or give up.
|
||||
* So calling i915_gem_evict_everything() is unnecessary.
|
||||
* So calling i915_gem_evict_vm() is unnecessary.
|
||||
*/
|
||||
return -ENOSPC;
|
||||
|
||||
|
@ -152,12 +155,46 @@ found:
|
|||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_gem_evict_vm - Try to free up VM space
|
||||
*
|
||||
* @vm: Address space to evict from
|
||||
* @do_idle: Boolean directing whether to idle first.
|
||||
*
|
||||
* VM eviction is about freeing up virtual address space. If one wants fine
|
||||
* grained eviction, they should see evict something for more details. In terms
|
||||
* of freeing up actual system memory, this function may not accomplish the
|
||||
* desired result. An object may be shared in multiple address space, and this
|
||||
* function will not assert those objects be freed.
|
||||
*
|
||||
* Using do_idle will result in a more complete eviction because it retires, and
|
||||
* inactivates current BOs.
|
||||
*/
|
||||
int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
|
||||
{
|
||||
struct i915_vma *vma, *next;
|
||||
int ret;
|
||||
|
||||
if (do_idle) {
|
||||
ret = i915_gpu_idle(vm->dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
i915_gem_retire_requests(vm->dev);
|
||||
}
|
||||
|
||||
list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list)
|
||||
if (vma->obj->pin_count == 0)
|
||||
WARN_ON(i915_vma_unbind(vma));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
i915_gem_evict_everything(struct drm_device *dev)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
struct i915_address_space *vm;
|
||||
struct i915_vma *vma, *next;
|
||||
bool lists_empty = true;
|
||||
int ret;
|
||||
|
||||
|
@ -184,11 +221,8 @@ i915_gem_evict_everything(struct drm_device *dev)
|
|||
i915_gem_retire_requests(dev);
|
||||
|
||||
/* Having flushed everything, unbind() should never raise an error */
|
||||
list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
|
||||
list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list)
|
||||
if (vma->obj->pin_count == 0)
|
||||
WARN_ON(i915_vma_unbind(vma));
|
||||
}
|
||||
list_for_each_entry(vm, &dev_priv->vm_list, global_link)
|
||||
WARN_ON(i915_gem_evict_vm(vm, false));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -33,24 +33,24 @@
|
|||
#include "intel_drv.h"
|
||||
#include <linux/dma_remapping.h>
|
||||
|
||||
struct eb_objects {
|
||||
struct list_head objects;
|
||||
struct eb_vmas {
|
||||
struct list_head vmas;
|
||||
int and;
|
||||
union {
|
||||
struct drm_i915_gem_object *lut[0];
|
||||
struct i915_vma *lut[0];
|
||||
struct hlist_head buckets[0];
|
||||
};
|
||||
};
|
||||
|
||||
static struct eb_objects *
|
||||
eb_create(struct drm_i915_gem_execbuffer2 *args)
|
||||
static struct eb_vmas *
|
||||
eb_create(struct drm_i915_gem_execbuffer2 *args, struct i915_address_space *vm)
|
||||
{
|
||||
struct eb_objects *eb = NULL;
|
||||
struct eb_vmas *eb = NULL;
|
||||
|
||||
if (args->flags & I915_EXEC_HANDLE_LUT) {
|
||||
int size = args->buffer_count;
|
||||
size *= sizeof(struct drm_i915_gem_object *);
|
||||
size += sizeof(struct eb_objects);
|
||||
size *= sizeof(struct i915_vma *);
|
||||
size += sizeof(struct eb_vmas);
|
||||
eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
|
||||
}
|
||||
|
||||
|
@ -61,7 +61,7 @@ eb_create(struct drm_i915_gem_execbuffer2 *args)
|
|||
while (count > 2*size)
|
||||
count >>= 1;
|
||||
eb = kzalloc(count*sizeof(struct hlist_head) +
|
||||
sizeof(struct eb_objects),
|
||||
sizeof(struct eb_vmas),
|
||||
GFP_TEMPORARY);
|
||||
if (eb == NULL)
|
||||
return eb;
|
||||
|
@ -70,64 +70,102 @@ eb_create(struct drm_i915_gem_execbuffer2 *args)
|
|||
} else
|
||||
eb->and = -args->buffer_count;
|
||||
|
||||
INIT_LIST_HEAD(&eb->objects);
|
||||
INIT_LIST_HEAD(&eb->vmas);
|
||||
return eb;
|
||||
}
|
||||
|
||||
static void
|
||||
eb_reset(struct eb_objects *eb)
|
||||
eb_reset(struct eb_vmas *eb)
|
||||
{
|
||||
if (eb->and >= 0)
|
||||
memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
|
||||
}
|
||||
|
||||
static int
|
||||
eb_lookup_objects(struct eb_objects *eb,
|
||||
struct drm_i915_gem_exec_object2 *exec,
|
||||
const struct drm_i915_gem_execbuffer2 *args,
|
||||
struct drm_file *file)
|
||||
eb_lookup_vmas(struct eb_vmas *eb,
|
||||
struct drm_i915_gem_exec_object2 *exec,
|
||||
const struct drm_i915_gem_execbuffer2 *args,
|
||||
struct i915_address_space *vm,
|
||||
struct drm_file *file)
|
||||
{
|
||||
int i;
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct list_head objects;
|
||||
int i, ret = 0;
|
||||
|
||||
INIT_LIST_HEAD(&objects);
|
||||
spin_lock(&file->table_lock);
|
||||
/* Grab a reference to the object and release the lock so we can lookup
|
||||
* or create the VMA without using GFP_ATOMIC */
|
||||
for (i = 0; i < args->buffer_count; i++) {
|
||||
struct drm_i915_gem_object *obj;
|
||||
|
||||
obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
|
||||
if (obj == NULL) {
|
||||
spin_unlock(&file->table_lock);
|
||||
DRM_DEBUG("Invalid object handle %d at index %d\n",
|
||||
exec[i].handle, i);
|
||||
return -ENOENT;
|
||||
ret = -ENOENT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!list_empty(&obj->exec_list)) {
|
||||
if (!list_empty(&obj->obj_exec_link)) {
|
||||
spin_unlock(&file->table_lock);
|
||||
DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
|
||||
obj, exec[i].handle, i);
|
||||
return -EINVAL;
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
drm_gem_object_reference(&obj->base);
|
||||
list_add_tail(&obj->exec_list, &eb->objects);
|
||||
|
||||
obj->exec_entry = &exec[i];
|
||||
if (eb->and < 0) {
|
||||
eb->lut[i] = obj;
|
||||
} else {
|
||||
uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
|
||||
obj->exec_handle = handle;
|
||||
hlist_add_head(&obj->exec_node,
|
||||
&eb->buckets[handle & eb->and]);
|
||||
}
|
||||
list_add_tail(&obj->obj_exec_link, &objects);
|
||||
}
|
||||
spin_unlock(&file->table_lock);
|
||||
|
||||
return 0;
|
||||
i = 0;
|
||||
list_for_each_entry(obj, &objects, obj_exec_link) {
|
||||
struct i915_vma *vma;
|
||||
|
||||
/*
|
||||
* NOTE: We can leak any vmas created here when something fails
|
||||
* later on. But that's no issue since vma_unbind can deal with
|
||||
* vmas which are not actually bound. And since only
|
||||
* lookup_or_create exists as an interface to get at the vma
|
||||
* from the (obj, vm) we don't run the risk of creating
|
||||
* duplicated vmas for the same vm.
|
||||
*/
|
||||
vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
|
||||
if (IS_ERR(vma)) {
|
||||
DRM_DEBUG("Failed to lookup VMA\n");
|
||||
ret = PTR_ERR(vma);
|
||||
goto out;
|
||||
}
|
||||
|
||||
list_add_tail(&vma->exec_list, &eb->vmas);
|
||||
|
||||
vma->exec_entry = &exec[i];
|
||||
if (eb->and < 0) {
|
||||
eb->lut[i] = vma;
|
||||
} else {
|
||||
uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
|
||||
vma->exec_handle = handle;
|
||||
hlist_add_head(&vma->exec_node,
|
||||
&eb->buckets[handle & eb->and]);
|
||||
}
|
||||
++i;
|
||||
}
|
||||
|
||||
|
||||
out:
|
||||
while (!list_empty(&objects)) {
|
||||
obj = list_first_entry(&objects,
|
||||
struct drm_i915_gem_object,
|
||||
obj_exec_link);
|
||||
list_del_init(&obj->obj_exec_link);
|
||||
if (ret)
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct drm_i915_gem_object *
|
||||
eb_get_object(struct eb_objects *eb, unsigned long handle)
|
||||
static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
|
||||
{
|
||||
if (eb->and < 0) {
|
||||
if (handle >= -eb->and)
|
||||
|
@ -139,34 +177,33 @@ eb_get_object(struct eb_objects *eb, unsigned long handle)
|
|||
|
||||
head = &eb->buckets[handle & eb->and];
|
||||
hlist_for_each(node, head) {
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct i915_vma *vma;
|
||||
|
||||
obj = hlist_entry(node, struct drm_i915_gem_object, exec_node);
|
||||
if (obj->exec_handle == handle)
|
||||
return obj;
|
||||
vma = hlist_entry(node, struct i915_vma, exec_node);
|
||||
if (vma->exec_handle == handle)
|
||||
return vma;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
eb_destroy(struct eb_objects *eb)
|
||||
{
|
||||
while (!list_empty(&eb->objects)) {
|
||||
struct drm_i915_gem_object *obj;
|
||||
static void eb_destroy(struct eb_vmas *eb) {
|
||||
while (!list_empty(&eb->vmas)) {
|
||||
struct i915_vma *vma;
|
||||
|
||||
obj = list_first_entry(&eb->objects,
|
||||
struct drm_i915_gem_object,
|
||||
vma = list_first_entry(&eb->vmas,
|
||||
struct i915_vma,
|
||||
exec_list);
|
||||
list_del_init(&obj->exec_list);
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
list_del_init(&vma->exec_list);
|
||||
drm_gem_object_unreference(&vma->obj->base);
|
||||
}
|
||||
kfree(eb);
|
||||
}
|
||||
|
||||
static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
return (obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
|
||||
return (HAS_LLC(obj->base.dev) ||
|
||||
obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
|
||||
!obj->map_and_fenceable ||
|
||||
obj->cache_level != I915_CACHE_NONE);
|
||||
}
|
||||
|
@ -179,7 +216,7 @@ relocate_entry_cpu(struct drm_i915_gem_object *obj,
|
|||
char *vaddr;
|
||||
int ret = -EINVAL;
|
||||
|
||||
ret = i915_gem_object_set_to_cpu_domain(obj, 1);
|
||||
ret = i915_gem_object_set_to_cpu_domain(obj, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -223,22 +260,24 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
|
|||
|
||||
static int
|
||||
i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
|
||||
struct eb_objects *eb,
|
||||
struct eb_vmas *eb,
|
||||
struct drm_i915_gem_relocation_entry *reloc,
|
||||
struct i915_address_space *vm)
|
||||
{
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
struct drm_gem_object *target_obj;
|
||||
struct drm_i915_gem_object *target_i915_obj;
|
||||
struct i915_vma *target_vma;
|
||||
uint32_t target_offset;
|
||||
int ret = -EINVAL;
|
||||
|
||||
/* we've already hold a reference to all valid objects */
|
||||
target_obj = &eb_get_object(eb, reloc->target_handle)->base;
|
||||
if (unlikely(target_obj == NULL))
|
||||
target_vma = eb_get_vma(eb, reloc->target_handle);
|
||||
if (unlikely(target_vma == NULL))
|
||||
return -ENOENT;
|
||||
target_i915_obj = target_vma->obj;
|
||||
target_obj = &target_vma->obj->base;
|
||||
|
||||
target_i915_obj = to_intel_bo(target_obj);
|
||||
target_offset = i915_gem_obj_ggtt_offset(target_i915_obj);
|
||||
|
||||
/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
|
||||
|
@ -320,14 +359,13 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
|
|||
}
|
||||
|
||||
static int
|
||||
i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
|
||||
struct eb_objects *eb,
|
||||
struct i915_address_space *vm)
|
||||
i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
|
||||
struct eb_vmas *eb)
|
||||
{
|
||||
#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
|
||||
struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
|
||||
struct drm_i915_gem_relocation_entry __user *user_relocs;
|
||||
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
|
||||
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
|
||||
int remain, ret;
|
||||
|
||||
user_relocs = to_user_ptr(entry->relocs_ptr);
|
||||
|
@ -346,8 +384,8 @@ i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
|
|||
do {
|
||||
u64 offset = r->presumed_offset;
|
||||
|
||||
ret = i915_gem_execbuffer_relocate_entry(obj, eb, r,
|
||||
vm);
|
||||
ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r,
|
||||
vma->vm);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -368,17 +406,16 @@ i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
|
|||
}
|
||||
|
||||
static int
|
||||
i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
|
||||
struct eb_objects *eb,
|
||||
struct drm_i915_gem_relocation_entry *relocs,
|
||||
struct i915_address_space *vm)
|
||||
i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
|
||||
struct eb_vmas *eb,
|
||||
struct drm_i915_gem_relocation_entry *relocs)
|
||||
{
|
||||
const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
|
||||
const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
|
||||
int i, ret;
|
||||
|
||||
for (i = 0; i < entry->relocation_count; i++) {
|
||||
ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i],
|
||||
vm);
|
||||
ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i],
|
||||
vma->vm);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
@ -387,10 +424,10 @@ i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
|
|||
}
|
||||
|
||||
static int
|
||||
i915_gem_execbuffer_relocate(struct eb_objects *eb,
|
||||
i915_gem_execbuffer_relocate(struct eb_vmas *eb,
|
||||
struct i915_address_space *vm)
|
||||
{
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct i915_vma *vma;
|
||||
int ret = 0;
|
||||
|
||||
/* This is the fast path and we cannot handle a pagefault whilst
|
||||
|
@ -401,8 +438,8 @@ i915_gem_execbuffer_relocate(struct eb_objects *eb,
|
|||
* lockdep complains vehemently.
|
||||
*/
|
||||
pagefault_disable();
|
||||
list_for_each_entry(obj, &eb->objects, exec_list) {
|
||||
ret = i915_gem_execbuffer_relocate_object(obj, eb, vm);
|
||||
list_for_each_entry(vma, &eb->vmas, exec_list) {
|
||||
ret = i915_gem_execbuffer_relocate_vma(vma, eb);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
|
@ -415,31 +452,32 @@ i915_gem_execbuffer_relocate(struct eb_objects *eb,
|
|||
#define __EXEC_OBJECT_HAS_FENCE (1<<30)
|
||||
|
||||
static int
|
||||
need_reloc_mappable(struct drm_i915_gem_object *obj)
|
||||
need_reloc_mappable(struct i915_vma *vma)
|
||||
{
|
||||
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
|
||||
return entry->relocation_count && !use_cpu_reloc(obj);
|
||||
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
|
||||
return entry->relocation_count && !use_cpu_reloc(vma->obj) &&
|
||||
i915_is_ggtt(vma->vm);
|
||||
}
|
||||
|
||||
static int
|
||||
i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
|
||||
struct intel_ring_buffer *ring,
|
||||
struct i915_address_space *vm,
|
||||
bool *need_reloc)
|
||||
i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
|
||||
struct intel_ring_buffer *ring,
|
||||
bool *need_reloc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
|
||||
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
|
||||
struct drm_i915_private *dev_priv = ring->dev->dev_private;
|
||||
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
|
||||
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
|
||||
bool need_fence, need_mappable;
|
||||
struct drm_i915_gem_object *obj = vma->obj;
|
||||
int ret;
|
||||
|
||||
need_fence =
|
||||
has_fenced_gpu_access &&
|
||||
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
|
||||
obj->tiling_mode != I915_TILING_NONE;
|
||||
need_mappable = need_fence || need_reloc_mappable(obj);
|
||||
need_mappable = need_fence || need_reloc_mappable(vma);
|
||||
|
||||
ret = i915_gem_object_pin(obj, vm, entry->alignment, need_mappable,
|
||||
ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, need_mappable,
|
||||
false);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -467,8 +505,8 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
|
|||
obj->has_aliasing_ppgtt_mapping = 1;
|
||||
}
|
||||
|
||||
if (entry->offset != i915_gem_obj_offset(obj, vm)) {
|
||||
entry->offset = i915_gem_obj_offset(obj, vm);
|
||||
if (entry->offset != vma->node.start) {
|
||||
entry->offset = vma->node.start;
|
||||
*need_reloc = true;
|
||||
}
|
||||
|
||||
|
@ -485,14 +523,15 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
|
|||
}
|
||||
|
||||
static void
|
||||
i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
|
||||
i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
|
||||
{
|
||||
struct drm_i915_gem_exec_object2 *entry;
|
||||
struct drm_i915_gem_object *obj = vma->obj;
|
||||
|
||||
if (!i915_gem_obj_bound_any(obj))
|
||||
if (!drm_mm_node_allocated(&vma->node))
|
||||
return;
|
||||
|
||||
entry = obj->exec_entry;
|
||||
entry = vma->exec_entry;
|
||||
|
||||
if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
|
||||
i915_gem_object_unpin_fence(obj);
|
||||
|
@ -505,41 +544,46 @@ i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
|
|||
|
||||
static int
|
||||
i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
|
||||
struct list_head *objects,
|
||||
struct i915_address_space *vm,
|
||||
struct list_head *vmas,
|
||||
bool *need_relocs)
|
||||
{
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct list_head ordered_objects;
|
||||
struct i915_vma *vma;
|
||||
struct i915_address_space *vm;
|
||||
struct list_head ordered_vmas;
|
||||
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
|
||||
int retry;
|
||||
|
||||
INIT_LIST_HEAD(&ordered_objects);
|
||||
while (!list_empty(objects)) {
|
||||
if (list_empty(vmas))
|
||||
return 0;
|
||||
|
||||
vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
|
||||
|
||||
INIT_LIST_HEAD(&ordered_vmas);
|
||||
while (!list_empty(vmas)) {
|
||||
struct drm_i915_gem_exec_object2 *entry;
|
||||
bool need_fence, need_mappable;
|
||||
|
||||
obj = list_first_entry(objects,
|
||||
struct drm_i915_gem_object,
|
||||
exec_list);
|
||||
entry = obj->exec_entry;
|
||||
vma = list_first_entry(vmas, struct i915_vma, exec_list);
|
||||
obj = vma->obj;
|
||||
entry = vma->exec_entry;
|
||||
|
||||
need_fence =
|
||||
has_fenced_gpu_access &&
|
||||
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
|
||||
obj->tiling_mode != I915_TILING_NONE;
|
||||
need_mappable = need_fence || need_reloc_mappable(obj);
|
||||
need_mappable = need_fence || need_reloc_mappable(vma);
|
||||
|
||||
if (need_mappable)
|
||||
list_move(&obj->exec_list, &ordered_objects);
|
||||
list_move(&vma->exec_list, &ordered_vmas);
|
||||
else
|
||||
list_move_tail(&obj->exec_list, &ordered_objects);
|
||||
list_move_tail(&vma->exec_list, &ordered_vmas);
|
||||
|
||||
obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
|
||||
obj->base.pending_write_domain = 0;
|
||||
obj->pending_fenced_gpu_access = false;
|
||||
}
|
||||
list_splice(&ordered_objects, objects);
|
||||
list_splice(&ordered_vmas, vmas);
|
||||
|
||||
/* Attempt to pin all of the buffers into the GTT.
|
||||
* This is done in 3 phases:
|
||||
|
@ -558,52 +602,52 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
|
|||
int ret = 0;
|
||||
|
||||
/* Unbind any ill-fitting objects or pin. */
|
||||
list_for_each_entry(obj, objects, exec_list) {
|
||||
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
|
||||
list_for_each_entry(vma, vmas, exec_list) {
|
||||
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
|
||||
bool need_fence, need_mappable;
|
||||
u32 obj_offset;
|
||||
|
||||
if (!i915_gem_obj_bound(obj, vm))
|
||||
obj = vma->obj;
|
||||
|
||||
if (!drm_mm_node_allocated(&vma->node))
|
||||
continue;
|
||||
|
||||
obj_offset = i915_gem_obj_offset(obj, vm);
|
||||
need_fence =
|
||||
has_fenced_gpu_access &&
|
||||
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
|
||||
obj->tiling_mode != I915_TILING_NONE;
|
||||
need_mappable = need_fence || need_reloc_mappable(obj);
|
||||
need_mappable = need_fence || need_reloc_mappable(vma);
|
||||
|
||||
WARN_ON((need_mappable || need_fence) &&
|
||||
!i915_is_ggtt(vm));
|
||||
!i915_is_ggtt(vma->vm));
|
||||
|
||||
if ((entry->alignment &&
|
||||
obj_offset & (entry->alignment - 1)) ||
|
||||
vma->node.start & (entry->alignment - 1)) ||
|
||||
(need_mappable && !obj->map_and_fenceable))
|
||||
ret = i915_vma_unbind(i915_gem_obj_to_vma(obj, vm));
|
||||
ret = i915_vma_unbind(vma);
|
||||
else
|
||||
ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs);
|
||||
ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* Bind fresh objects */
|
||||
list_for_each_entry(obj, objects, exec_list) {
|
||||
if (i915_gem_obj_bound(obj, vm))
|
||||
list_for_each_entry(vma, vmas, exec_list) {
|
||||
if (drm_mm_node_allocated(&vma->node))
|
||||
continue;
|
||||
|
||||
ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs);
|
||||
ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
||||
err: /* Decrement pin count for bound objects */
|
||||
list_for_each_entry(obj, objects, exec_list)
|
||||
i915_gem_execbuffer_unreserve_object(obj);
|
||||
list_for_each_entry(vma, vmas, exec_list)
|
||||
i915_gem_execbuffer_unreserve_vma(vma);
|
||||
|
||||
if (ret != -ENOSPC || retry++)
|
||||
return ret;
|
||||
|
||||
ret = i915_gem_evict_everything(ring->dev);
|
||||
ret = i915_gem_evict_vm(vm, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
} while (1);
|
||||
|
@ -614,24 +658,27 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
|
|||
struct drm_i915_gem_execbuffer2 *args,
|
||||
struct drm_file *file,
|
||||
struct intel_ring_buffer *ring,
|
||||
struct eb_objects *eb,
|
||||
struct drm_i915_gem_exec_object2 *exec,
|
||||
struct i915_address_space *vm)
|
||||
struct eb_vmas *eb,
|
||||
struct drm_i915_gem_exec_object2 *exec)
|
||||
{
|
||||
struct drm_i915_gem_relocation_entry *reloc;
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct i915_address_space *vm;
|
||||
struct i915_vma *vma;
|
||||
bool need_relocs;
|
||||
int *reloc_offset;
|
||||
int i, total, ret;
|
||||
int count = args->buffer_count;
|
||||
|
||||
if (WARN_ON(list_empty(&eb->vmas)))
|
||||
return 0;
|
||||
|
||||
vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;
|
||||
|
||||
/* We may process another execbuffer during the unlock... */
|
||||
while (!list_empty(&eb->objects)) {
|
||||
obj = list_first_entry(&eb->objects,
|
||||
struct drm_i915_gem_object,
|
||||
exec_list);
|
||||
list_del_init(&obj->exec_list);
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
while (!list_empty(&eb->vmas)) {
|
||||
vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
|
||||
list_del_init(&vma->exec_list);
|
||||
drm_gem_object_unreference(&vma->obj->base);
|
||||
}
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
@ -695,20 +742,19 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
|
|||
|
||||
/* reacquire the objects */
|
||||
eb_reset(eb);
|
||||
ret = eb_lookup_objects(eb, exec, args, file);
|
||||
ret = eb_lookup_vmas(eb, exec, args, vm, file);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
|
||||
ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs);
|
||||
ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
list_for_each_entry(obj, &eb->objects, exec_list) {
|
||||
int offset = obj->exec_entry - exec;
|
||||
ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
|
||||
reloc + reloc_offset[offset],
|
||||
vm);
|
||||
list_for_each_entry(vma, &eb->vmas, exec_list) {
|
||||
int offset = vma->exec_entry - exec;
|
||||
ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb,
|
||||
reloc + reloc_offset[offset]);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
@ -727,14 +773,15 @@ err:
|
|||
|
||||
static int
|
||||
i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
|
||||
struct list_head *objects)
|
||||
struct list_head *vmas)
|
||||
{
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct i915_vma *vma;
|
||||
uint32_t flush_domains = 0;
|
||||
bool flush_chipset = false;
|
||||
int ret;
|
||||
|
||||
list_for_each_entry(obj, objects, exec_list) {
|
||||
list_for_each_entry(vma, vmas, exec_list) {
|
||||
struct drm_i915_gem_object *obj = vma->obj;
|
||||
ret = i915_gem_object_sync(obj, ring);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -809,13 +856,13 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
|
|||
}
|
||||
|
||||
static void
|
||||
i915_gem_execbuffer_move_to_active(struct list_head *objects,
|
||||
struct i915_address_space *vm,
|
||||
i915_gem_execbuffer_move_to_active(struct list_head *vmas,
|
||||
struct intel_ring_buffer *ring)
|
||||
{
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct i915_vma *vma;
|
||||
|
||||
list_for_each_entry(obj, objects, exec_list) {
|
||||
list_for_each_entry(vma, vmas, exec_list) {
|
||||
struct drm_i915_gem_object *obj = vma->obj;
|
||||
u32 old_read = obj->base.read_domains;
|
||||
u32 old_write = obj->base.write_domain;
|
||||
|
||||
|
@ -825,8 +872,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
|
|||
obj->base.read_domains = obj->base.pending_read_domains;
|
||||
obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
|
||||
|
||||
/* FIXME: This lookup gets fixed later <-- danvet */
|
||||
list_move_tail(&i915_gem_obj_to_vma(obj, vm)->mm_list, &vm->active_list);
|
||||
list_move_tail(&vma->mm_list, &vma->vm->active_list);
|
||||
i915_gem_object_move_to_active(obj, ring);
|
||||
if (obj->base.write_domain) {
|
||||
obj->dirty = 1;
|
||||
|
@ -885,10 +931,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|||
struct i915_address_space *vm)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
struct eb_objects *eb;
|
||||
struct eb_vmas *eb;
|
||||
struct drm_i915_gem_object *batch_obj;
|
||||
struct drm_clip_rect *cliprects = NULL;
|
||||
struct intel_ring_buffer *ring;
|
||||
struct i915_ctx_hang_stats *hs;
|
||||
u32 ctx_id = i915_execbuffer2_get_context_id(*args);
|
||||
u32 exec_start, exec_len;
|
||||
u32 mask, flags;
|
||||
|
@ -1025,7 +1072,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|||
goto pre_mutex_err;
|
||||
}
|
||||
|
||||
eb = eb_create(args);
|
||||
eb = eb_create(args, vm);
|
||||
if (eb == NULL) {
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
ret = -ENOMEM;
|
||||
|
@ -1033,18 +1080,16 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|||
}
|
||||
|
||||
/* Look up object handles */
|
||||
ret = eb_lookup_objects(eb, exec, args, file);
|
||||
ret = eb_lookup_vmas(eb, exec, args, vm, file);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
/* take note of the batch buffer before we might reorder the lists */
|
||||
batch_obj = list_entry(eb->objects.prev,
|
||||
struct drm_i915_gem_object,
|
||||
exec_list);
|
||||
batch_obj = list_entry(eb->vmas.prev, struct i915_vma, exec_list)->obj;
|
||||
|
||||
/* Move the objects en-masse into the GTT, evicting if necessary. */
|
||||
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
|
||||
ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs);
|
||||
ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
|
@ -1054,7 +1099,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|||
if (ret) {
|
||||
if (ret == -EFAULT) {
|
||||
ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
|
||||
eb, exec, vm);
|
||||
eb, exec);
|
||||
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
|
||||
}
|
||||
if (ret)
|
||||
|
@ -1076,10 +1121,21 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|||
if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
|
||||
i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);
|
||||
|
||||
ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->objects);
|
||||
ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->vmas);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
hs = i915_gem_context_get_hang_stats(dev, file, ctx_id);
|
||||
if (IS_ERR(hs)) {
|
||||
ret = PTR_ERR(hs);
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (hs->banned) {
|
||||
ret = -EIO;
|
||||
goto err;
|
||||
}
|
||||
|
||||
ret = i915_switch_context(ring, file, ctx_id);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
@ -1131,7 +1187,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|||
|
||||
trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
|
||||
|
||||
i915_gem_execbuffer_move_to_active(&eb->objects, vm, ring);
|
||||
i915_gem_execbuffer_move_to_active(&eb->vmas, ring);
|
||||
i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
|
||||
|
||||
err:
|
||||
|
|
|
@ -395,7 +395,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
|
|||
if (gtt_offset == I915_GTT_OFFSET_NONE)
|
||||
return obj;
|
||||
|
||||
vma = i915_gem_vma_create(obj, ggtt);
|
||||
vma = i915_gem_obj_lookup_or_create_vma(obj, ggtt);
|
||||
if (IS_ERR(vma)) {
|
||||
ret = PTR_ERR(vma);
|
||||
goto err_out;
|
||||
|
|
|
@ -215,6 +215,24 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m,
|
|||
}
|
||||
}
|
||||
|
||||
static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a)
|
||||
{
|
||||
switch (a) {
|
||||
case HANGCHECK_IDLE:
|
||||
return "idle";
|
||||
case HANGCHECK_WAIT:
|
||||
return "wait";
|
||||
case HANGCHECK_ACTIVE:
|
||||
return "active";
|
||||
case HANGCHECK_KICK:
|
||||
return "kick";
|
||||
case HANGCHECK_HUNG:
|
||||
return "hung";
|
||||
}
|
||||
|
||||
return "unknown";
|
||||
}
|
||||
|
||||
static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
|
||||
struct drm_device *dev,
|
||||
struct drm_i915_error_state *error,
|
||||
|
@ -255,6 +273,9 @@ static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
|
|||
err_printf(m, " waiting: %s\n", yesno(error->waiting[ring]));
|
||||
err_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]);
|
||||
err_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
|
||||
err_printf(m, " hangcheck: %s [%d]\n",
|
||||
hangcheck_action_to_str(error->hangcheck_action[ring]),
|
||||
error->hangcheck_score[ring]);
|
||||
}
|
||||
|
||||
void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
|
||||
|
@ -720,6 +741,9 @@ static void i915_record_ring_state(struct drm_device *dev,
|
|||
|
||||
error->cpu_ring_head[ring->id] = ring->head;
|
||||
error->cpu_ring_tail[ring->id] = ring->tail;
|
||||
|
||||
error->hangcheck_score[ring->id] = ring->hangcheck.score;
|
||||
error->hangcheck_action[ring->id] = ring->hangcheck.action;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -665,7 +665,8 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
|
|||
crtc);
|
||||
}
|
||||
|
||||
static int intel_hpd_irq_event(struct drm_device *dev, struct drm_connector *connector)
|
||||
static bool intel_hpd_irq_event(struct drm_device *dev,
|
||||
struct drm_connector *connector)
|
||||
{
|
||||
enum drm_connector_status old_status;
|
||||
|
||||
|
@ -673,11 +674,16 @@ static int intel_hpd_irq_event(struct drm_device *dev, struct drm_connector *con
|
|||
old_status = connector->status;
|
||||
|
||||
connector->status = connector->funcs->detect(connector, false);
|
||||
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
|
||||
if (old_status == connector->status)
|
||||
return false;
|
||||
|
||||
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
|
||||
connector->base.id,
|
||||
drm_get_connector_name(connector),
|
||||
old_status, connector->status);
|
||||
return (old_status != connector->status);
|
||||
drm_get_connector_status_name(old_status),
|
||||
drm_get_connector_status_name(connector->status));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -882,9 +888,10 @@ static void ivybridge_parity_work(struct work_struct *work)
|
|||
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
|
||||
l3_parity.error_work);
|
||||
u32 error_status, row, bank, subbank;
|
||||
char *parity_event[5];
|
||||
char *parity_event[6];
|
||||
uint32_t misccpctl;
|
||||
unsigned long flags;
|
||||
uint8_t slice = 0;
|
||||
|
||||
/* We must turn off DOP level clock gating to access the L3 registers.
|
||||
* In order to prevent a get/put style interface, acquire struct mutex
|
||||
|
@ -892,55 +899,81 @@ static void ivybridge_parity_work(struct work_struct *work)
|
|||
*/
|
||||
mutex_lock(&dev_priv->dev->struct_mutex);
|
||||
|
||||
/* If we've screwed up tracking, just let the interrupt fire again */
|
||||
if (WARN_ON(!dev_priv->l3_parity.which_slice))
|
||||
goto out;
|
||||
|
||||
misccpctl = I915_READ(GEN7_MISCCPCTL);
|
||||
I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
|
||||
POSTING_READ(GEN7_MISCCPCTL);
|
||||
|
||||
error_status = I915_READ(GEN7_L3CDERRST1);
|
||||
row = GEN7_PARITY_ERROR_ROW(error_status);
|
||||
bank = GEN7_PARITY_ERROR_BANK(error_status);
|
||||
subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
|
||||
while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
|
||||
u32 reg;
|
||||
|
||||
I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID |
|
||||
GEN7_L3CDERRST1_ENABLE);
|
||||
POSTING_READ(GEN7_L3CDERRST1);
|
||||
slice--;
|
||||
if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
|
||||
break;
|
||||
|
||||
dev_priv->l3_parity.which_slice &= ~(1<<slice);
|
||||
|
||||
reg = GEN7_L3CDERRST1 + (slice * 0x200);
|
||||
|
||||
error_status = I915_READ(reg);
|
||||
row = GEN7_PARITY_ERROR_ROW(error_status);
|
||||
bank = GEN7_PARITY_ERROR_BANK(error_status);
|
||||
subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
|
||||
|
||||
I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
|
||||
POSTING_READ(reg);
|
||||
|
||||
parity_event[0] = I915_L3_PARITY_UEVENT "=1";
|
||||
parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
|
||||
parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
|
||||
parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
|
||||
parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
|
||||
parity_event[5] = NULL;
|
||||
|
||||
kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj,
|
||||
KOBJ_CHANGE, parity_event);
|
||||
|
||||
DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
|
||||
slice, row, bank, subbank);
|
||||
|
||||
kfree(parity_event[4]);
|
||||
kfree(parity_event[3]);
|
||||
kfree(parity_event[2]);
|
||||
kfree(parity_event[1]);
|
||||
}
|
||||
|
||||
I915_WRITE(GEN7_MISCCPCTL, misccpctl);
|
||||
|
||||
out:
|
||||
WARN_ON(dev_priv->l3_parity.which_slice);
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, flags);
|
||||
ilk_enable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
|
||||
ilk_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
|
||||
|
||||
mutex_unlock(&dev_priv->dev->struct_mutex);
|
||||
|
||||
parity_event[0] = I915_L3_PARITY_UEVENT "=1";
|
||||
parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
|
||||
parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
|
||||
parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
|
||||
parity_event[4] = NULL;
|
||||
|
||||
kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj,
|
||||
KOBJ_CHANGE, parity_event);
|
||||
|
||||
DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
|
||||
row, bank, subbank);
|
||||
|
||||
kfree(parity_event[3]);
|
||||
kfree(parity_event[2]);
|
||||
kfree(parity_event[1]);
|
||||
}
|
||||
|
||||
static void ivybridge_parity_error_irq_handler(struct drm_device *dev)
|
||||
static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
|
||||
|
||||
if (!HAS_L3_GPU_CACHE(dev))
|
||||
if (!HAS_L3_DPF(dev))
|
||||
return;
|
||||
|
||||
spin_lock(&dev_priv->irq_lock);
|
||||
ilk_disable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
|
||||
ilk_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
|
||||
spin_unlock(&dev_priv->irq_lock);
|
||||
|
||||
iir &= GT_PARITY_ERROR(dev);
|
||||
if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
|
||||
dev_priv->l3_parity.which_slice |= 1 << 1;
|
||||
|
||||
if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
|
||||
dev_priv->l3_parity.which_slice |= 1 << 0;
|
||||
|
||||
queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
|
||||
}
|
||||
|
||||
|
@ -975,8 +1008,8 @@ static void snb_gt_irq_handler(struct drm_device *dev,
|
|||
i915_handle_error(dev, false);
|
||||
}
|
||||
|
||||
if (gt_iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
|
||||
ivybridge_parity_error_irq_handler(dev);
|
||||
if (gt_iir & GT_PARITY_ERROR(dev))
|
||||
ivybridge_parity_error_irq_handler(dev, gt_iir);
|
||||
}
|
||||
|
||||
#define HPD_STORM_DETECT_PERIOD 1000
|
||||
|
@ -1388,7 +1421,6 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
|
|||
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
|
||||
u32 de_iir, gt_iir, de_ier, sde_ier = 0;
|
||||
irqreturn_t ret = IRQ_NONE;
|
||||
bool err_int_reenable = false;
|
||||
|
||||
atomic_inc(&dev_priv->irq_received);
|
||||
|
||||
|
@ -1412,17 +1444,6 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
|
|||
POSTING_READ(SDEIER);
|
||||
}
|
||||
|
||||
/* On Haswell, also mask ERR_INT because we don't want to risk
|
||||
* generating "unclaimed register" interrupts from inside the interrupt
|
||||
* handler. */
|
||||
if (IS_HASWELL(dev)) {
|
||||
spin_lock(&dev_priv->irq_lock);
|
||||
err_int_reenable = ~dev_priv->irq_mask & DE_ERR_INT_IVB;
|
||||
if (err_int_reenable)
|
||||
ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
|
||||
spin_unlock(&dev_priv->irq_lock);
|
||||
}
|
||||
|
||||
gt_iir = I915_READ(GTIIR);
|
||||
if (gt_iir) {
|
||||
if (INTEL_INFO(dev)->gen >= 6)
|
||||
|
@ -1452,13 +1473,6 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
|
|||
}
|
||||
}
|
||||
|
||||
if (err_int_reenable) {
|
||||
spin_lock(&dev_priv->irq_lock);
|
||||
if (ivb_can_enable_err_int(dev))
|
||||
ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
|
||||
spin_unlock(&dev_priv->irq_lock);
|
||||
}
|
||||
|
||||
I915_WRITE(DEIER, de_ier);
|
||||
POSTING_READ(DEIER);
|
||||
if (!HAS_PCH_NOP(dev)) {
|
||||
|
@ -2021,6 +2035,8 @@ static void i915_hangcheck_elapsed(unsigned long data)
|
|||
|
||||
if (ring->hangcheck.seqno == seqno) {
|
||||
if (ring_idle(ring, seqno)) {
|
||||
ring->hangcheck.action = HANGCHECK_IDLE;
|
||||
|
||||
if (waitqueue_active(&ring->irq_queue)) {
|
||||
/* Issue a wake-up to catch stuck h/w. */
|
||||
DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
|
||||
|
@ -2049,6 +2065,7 @@ static void i915_hangcheck_elapsed(unsigned long data)
|
|||
acthd);
|
||||
|
||||
switch (ring->hangcheck.action) {
|
||||
case HANGCHECK_IDLE:
|
||||
case HANGCHECK_WAIT:
|
||||
break;
|
||||
case HANGCHECK_ACTIVE:
|
||||
|
@ -2064,6 +2081,8 @@ static void i915_hangcheck_elapsed(unsigned long data)
|
|||
}
|
||||
}
|
||||
} else {
|
||||
ring->hangcheck.action = HANGCHECK_ACTIVE;
|
||||
|
||||
/* Gradually reduce the count so that we catch DoS
|
||||
* attempts across multiple batches.
|
||||
*/
|
||||
|
@ -2254,10 +2273,10 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
|
|||
pm_irqs = gt_irqs = 0;
|
||||
|
||||
dev_priv->gt_irq_mask = ~0;
|
||||
if (HAS_L3_GPU_CACHE(dev)) {
|
||||
if (HAS_L3_DPF(dev)) {
|
||||
/* L3 parity interrupt is always unmasked. */
|
||||
dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
|
||||
gt_irqs |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
|
||||
dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
|
||||
gt_irqs |= GT_PARITY_ERROR(dev);
|
||||
}
|
||||
|
||||
gt_irqs |= GT_RENDER_USER_INTERRUPT;
|
||||
|
|
|
@ -264,6 +264,11 @@
|
|||
#define MI_SEMAPHORE_SYNC_VVE (1<<16) /* VECS wait for VCS (VEVSYNC) */
|
||||
#define MI_SEMAPHORE_SYNC_RVE (2<<16) /* VECS wait for RCS (VERSYNC) */
|
||||
#define MI_SEMAPHORE_SYNC_INVALID (3<<16)
|
||||
|
||||
#define MI_PREDICATE_RESULT_2 (0x2214)
|
||||
#define LOWER_SLICE_ENABLED (1<<0)
|
||||
#define LOWER_SLICE_DISABLED (0<<0)
|
||||
|
||||
/*
|
||||
* 3D instructions used by the kernel
|
||||
*/
|
||||
|
@ -346,6 +351,10 @@
|
|||
#define IOSF_PORT_PUNIT 0x4
|
||||
#define IOSF_PORT_NC 0x11
|
||||
#define IOSF_PORT_DPIO 0x12
|
||||
#define IOSF_PORT_GPIO_NC 0x13
|
||||
#define IOSF_PORT_CCK 0x14
|
||||
#define IOSF_PORT_CCU 0xA9
|
||||
#define IOSF_PORT_GPS_CORE 0x48
|
||||
#define VLV_IOSF_DATA (VLV_DISPLAY_BASE + 0x2104)
|
||||
#define VLV_IOSF_ADDR (VLV_DISPLAY_BASE + 0x2108)
|
||||
|
||||
|
@ -372,6 +381,38 @@
|
|||
#define FB_FMAX_VMIN_FREQ_LO_SHIFT 27
|
||||
#define FB_FMAX_VMIN_FREQ_LO_MASK 0xf8000000
|
||||
|
||||
/* vlv2 north clock has */
|
||||
#define CCK_REG_DSI_PLL_FUSE 0x44
|
||||
#define CCK_REG_DSI_PLL_CONTROL 0x48
|
||||
#define DSI_PLL_VCO_EN (1 << 31)
|
||||
#define DSI_PLL_LDO_GATE (1 << 30)
|
||||
#define DSI_PLL_P1_POST_DIV_SHIFT 17
|
||||
#define DSI_PLL_P1_POST_DIV_MASK (0x1ff << 17)
|
||||
#define DSI_PLL_P2_MUX_DSI0_DIV2 (1 << 13)
|
||||
#define DSI_PLL_P3_MUX_DSI1_DIV2 (1 << 12)
|
||||
#define DSI_PLL_MUX_MASK (3 << 9)
|
||||
#define DSI_PLL_MUX_DSI0_DSIPLL (0 << 10)
|
||||
#define DSI_PLL_MUX_DSI0_CCK (1 << 10)
|
||||
#define DSI_PLL_MUX_DSI1_DSIPLL (0 << 9)
|
||||
#define DSI_PLL_MUX_DSI1_CCK (1 << 9)
|
||||
#define DSI_PLL_CLK_GATE_MASK (0xf << 5)
|
||||
#define DSI_PLL_CLK_GATE_DSI0_DSIPLL (1 << 8)
|
||||
#define DSI_PLL_CLK_GATE_DSI1_DSIPLL (1 << 7)
|
||||
#define DSI_PLL_CLK_GATE_DSI0_CCK (1 << 6)
|
||||
#define DSI_PLL_CLK_GATE_DSI1_CCK (1 << 5)
|
||||
#define DSI_PLL_LOCK (1 << 0)
|
||||
#define CCK_REG_DSI_PLL_DIVIDER 0x4c
|
||||
#define DSI_PLL_LFSR (1 << 31)
|
||||
#define DSI_PLL_FRACTION_EN (1 << 30)
|
||||
#define DSI_PLL_FRAC_COUNTER_SHIFT 27
|
||||
#define DSI_PLL_FRAC_COUNTER_MASK (7 << 27)
|
||||
#define DSI_PLL_USYNC_CNT_SHIFT 18
|
||||
#define DSI_PLL_USYNC_CNT_MASK (0x1ff << 18)
|
||||
#define DSI_PLL_N1_DIV_SHIFT 16
|
||||
#define DSI_PLL_N1_DIV_MASK (3 << 16)
|
||||
#define DSI_PLL_M1_DIV_SHIFT 0
|
||||
#define DSI_PLL_M1_DIV_MASK (0x1ff << 0)
|
||||
|
||||
/*
|
||||
* DPIO - a special bus for various display related registers to hide behind
|
||||
*
|
||||
|
@ -886,6 +927,7 @@
|
|||
#define GT_BLT_USER_INTERRUPT (1 << 22)
|
||||
#define GT_BSD_CS_ERROR_INTERRUPT (1 << 15)
|
||||
#define GT_BSD_USER_INTERRUPT (1 << 12)
|
||||
#define GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 (1 << 11) /* hsw+; rsvd on snb, ivb, vlv */
|
||||
#define GT_RENDER_L3_PARITY_ERROR_INTERRUPT (1 << 5) /* !snb */
|
||||
#define GT_RENDER_PIPECTL_NOTIFY_INTERRUPT (1 << 4)
|
||||
#define GT_RENDER_CS_MASTER_ERROR_INTERRUPT (1 << 3)
|
||||
|
@ -896,6 +938,10 @@
|
|||
#define PM_VEBOX_CS_ERROR_INTERRUPT (1 << 12) /* hsw+ */
|
||||
#define PM_VEBOX_USER_INTERRUPT (1 << 10) /* hsw+ */
|
||||
|
||||
#define GT_PARITY_ERROR(dev) \
|
||||
(GT_RENDER_L3_PARITY_ERROR_INTERRUPT | \
|
||||
IS_HASWELL(dev) ? GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 : 0)
|
||||
|
||||
/* These are all the "old" interrupts */
|
||||
#define ILK_BSD_USER_INTERRUPT (1<<5)
|
||||
#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18)
|
||||
|
@ -1400,6 +1446,8 @@
|
|||
* device 0 function 0's pci config register 0x44 or 0x48 and matches it in
|
||||
* every way. It is not accessible from the CP register read instructions.
|
||||
*
|
||||
* Starting from Haswell, you can't write registers using the MCHBAR mirror,
|
||||
* just read.
|
||||
*/
|
||||
#define MCHBAR_MIRROR_BASE 0x10000
|
||||
|
||||
|
@ -2030,6 +2078,7 @@
|
|||
|
||||
/* Gen 4 SDVO/HDMI bits: */
|
||||
#define SDVO_COLOR_FORMAT_8bpc (0 << 26)
|
||||
#define SDVO_COLOR_FORMAT_MASK (7 << 26)
|
||||
#define SDVO_ENCODING_SDVO (0 << 10)
|
||||
#define SDVO_ENCODING_HDMI (2 << 10)
|
||||
#define HDMI_MODE_SELECT_HDMI (1 << 9) /* HDMI only */
|
||||
|
@ -2982,6 +3031,7 @@
|
|||
#define PIPECONF_DISABLE 0
|
||||
#define PIPECONF_DOUBLE_WIDE (1<<30)
|
||||
#define I965_PIPECONF_ACTIVE (1<<30)
|
||||
#define PIPECONF_DSI_PLL_LOCKED (1<<29) /* vlv & pipe A only */
|
||||
#define PIPECONF_FRAME_START_DELAY_MASK (3<<27)
|
||||
#define PIPECONF_SINGLE_WIDE 0
|
||||
#define PIPECONF_PIPE_UNLOCKED 0
|
||||
|
@ -4407,6 +4457,8 @@
|
|||
#define PIPEA_PP_STATUS (VLV_DISPLAY_BASE + 0x61200)
|
||||
#define PIPEA_PP_CONTROL (VLV_DISPLAY_BASE + 0x61204)
|
||||
#define PIPEA_PP_ON_DELAYS (VLV_DISPLAY_BASE + 0x61208)
|
||||
#define PANEL_PORT_SELECT_DPB_VLV (1 << 30)
|
||||
#define PANEL_PORT_SELECT_DPC_VLV (2 << 30)
|
||||
#define PIPEA_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6120c)
|
||||
#define PIPEA_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61210)
|
||||
|
||||
|
@ -4438,7 +4490,6 @@
|
|||
#define PANEL_PORT_SELECT_MASK (3 << 30)
|
||||
#define PANEL_PORT_SELECT_LVDS (0 << 30)
|
||||
#define PANEL_PORT_SELECT_DPA (1 << 30)
|
||||
#define EDP_PANEL (1 << 30)
|
||||
#define PANEL_PORT_SELECT_DPC (2 << 30)
|
||||
#define PANEL_PORT_SELECT_DPD (3 << 30)
|
||||
#define PANEL_POWER_UP_DELAY_MASK (0x1fff0000)
|
||||
|
@ -4447,11 +4498,6 @@
|
|||
#define PANEL_LIGHT_ON_DELAY_SHIFT 0
|
||||
|
||||
#define PCH_PP_OFF_DELAYS 0xc720c
|
||||
#define PANEL_POWER_PORT_SELECT_MASK (0x3 << 30)
|
||||
#define PANEL_POWER_PORT_LVDS (0 << 30)
|
||||
#define PANEL_POWER_PORT_DP_A (1 << 30)
|
||||
#define PANEL_POWER_PORT_DP_C (2 << 30)
|
||||
#define PANEL_POWER_PORT_DP_D (3 << 30)
|
||||
#define PANEL_POWER_DOWN_DELAY_MASK (0x1fff0000)
|
||||
#define PANEL_POWER_DOWN_DELAY_SHIFT 16
|
||||
#define PANEL_LIGHT_OFF_DELAY_MASK (0x1fff)
|
||||
|
@ -4685,6 +4731,8 @@
|
|||
#define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9
|
||||
#define GEN6_PCODE_WRITE_RC6VIDS 0x4
|
||||
#define GEN6_PCODE_READ_RC6VIDS 0x5
|
||||
#define GEN6_PCODE_READ_D_COMP 0x10
|
||||
#define GEN6_PCODE_WRITE_D_COMP 0x11
|
||||
#define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5)
|
||||
#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245)
|
||||
#define GEN6_PCODE_DATA 0x138128
|
||||
|
@ -4704,6 +4752,7 @@
|
|||
|
||||
/* IVYBRIDGE DPF */
|
||||
#define GEN7_L3CDERRST1 0xB008 /* L3CD Error Status 1 */
|
||||
#define HSW_L3CDERRST11 0xB208 /* L3CD Error Status register 1 slice 1 */
|
||||
#define GEN7_L3CDERRST1_ROW_MASK (0x7ff<<14)
|
||||
#define GEN7_PARITY_ERROR_VALID (1<<13)
|
||||
#define GEN7_L3CDERRST1_BANK_MASK (3<<11)
|
||||
|
@ -4717,6 +4766,7 @@
|
|||
#define GEN7_L3CDERRST1_ENABLE (1<<7)
|
||||
|
||||
#define GEN7_L3LOG_BASE 0xB070
|
||||
#define HSW_L3LOG_BASE_SLICE1 0xB270
|
||||
#define GEN7_L3LOG_SIZE 0x80
|
||||
|
||||
#define GEN7_HALF_SLICE_CHICKEN1 0xe100 /* IVB GT1 + VLV */
|
||||
|
@ -5116,4 +5166,414 @@
|
|||
#define PIPE_CSC_POSTOFF_ME(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_ME, _PIPE_B_CSC_POSTOFF_ME)
|
||||
#define PIPE_CSC_POSTOFF_LO(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_LO, _PIPE_B_CSC_POSTOFF_LO)
|
||||
|
||||
/* VLV MIPI registers */
|
||||
|
||||
#define _MIPIA_PORT_CTRL (VLV_DISPLAY_BASE + 0x61190)
|
||||
#define _MIPIB_PORT_CTRL (VLV_DISPLAY_BASE + 0x61700)
|
||||
#define MIPI_PORT_CTRL(pipe) _PIPE(pipe, _MIPIA_PORT_CTRL, _MIPIB_PORT_CTRL)
|
||||
#define DPI_ENABLE (1 << 31) /* A + B */
|
||||
#define MIPIA_MIPI4DPHY_DELAY_COUNT_SHIFT 27
|
||||
#define MIPIA_MIPI4DPHY_DELAY_COUNT_MASK (0xf << 27)
|
||||
#define DUAL_LINK_MODE_MASK (1 << 26)
|
||||
#define DUAL_LINK_MODE_FRONT_BACK (0 << 26)
|
||||
#define DUAL_LINK_MODE_PIXEL_ALTERNATIVE (1 << 26)
|
||||
#define DITHERING_ENABLE (1 << 25) /* A + B */
|
||||
#define FLOPPED_HSTX (1 << 23)
|
||||
#define DE_INVERT (1 << 19) /* XXX */
|
||||
#define MIPIA_FLISDSI_DELAY_COUNT_SHIFT 18
|
||||
#define MIPIA_FLISDSI_DELAY_COUNT_MASK (0xf << 18)
|
||||
#define AFE_LATCHOUT (1 << 17)
|
||||
#define LP_OUTPUT_HOLD (1 << 16)
|
||||
#define MIPIB_FLISDSI_DELAY_COUNT_HIGH_SHIFT 15
|
||||
#define MIPIB_FLISDSI_DELAY_COUNT_HIGH_MASK (1 << 15)
|
||||
#define MIPIB_MIPI4DPHY_DELAY_COUNT_SHIFT 11
|
||||
#define MIPIB_MIPI4DPHY_DELAY_COUNT_MASK (0xf << 11)
|
||||
#define CSB_SHIFT 9
|
||||
#define CSB_MASK (3 << 9)
|
||||
#define CSB_20MHZ (0 << 9)
|
||||
#define CSB_10MHZ (1 << 9)
|
||||
#define CSB_40MHZ (2 << 9)
|
||||
#define BANDGAP_MASK (1 << 8)
|
||||
#define BANDGAP_PNW_CIRCUIT (0 << 8)
|
||||
#define BANDGAP_LNC_CIRCUIT (1 << 8)
|
||||
#define MIPIB_FLISDSI_DELAY_COUNT_LOW_SHIFT 5
|
||||
#define MIPIB_FLISDSI_DELAY_COUNT_LOW_MASK (7 << 5)
|
||||
#define TEARING_EFFECT_DELAY (1 << 4) /* A + B */
|
||||
#define TEARING_EFFECT_SHIFT 2 /* A + B */
|
||||
#define TEARING_EFFECT_MASK (3 << 2)
|
||||
#define TEARING_EFFECT_OFF (0 << 2)
|
||||
#define TEARING_EFFECT_DSI (1 << 2)
|
||||
#define TEARING_EFFECT_GPIO (2 << 2)
|
||||
#define LANE_CONFIGURATION_SHIFT 0
|
||||
#define LANE_CONFIGURATION_MASK (3 << 0)
|
||||
#define LANE_CONFIGURATION_4LANE (0 << 0)
|
||||
#define LANE_CONFIGURATION_DUAL_LINK_A (1 << 0)
|
||||
#define LANE_CONFIGURATION_DUAL_LINK_B (2 << 0)
|
||||
|
||||
#define _MIPIA_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61194)
|
||||
#define _MIPIB_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61704)
|
||||
#define MIPI_TEARING_CTRL(pipe) _PIPE(pipe, _MIPIA_TEARING_CTRL, _MIPIB_TEARING_CTRL)
|
||||
#define TEARING_EFFECT_DELAY_SHIFT 0
|
||||
#define TEARING_EFFECT_DELAY_MASK (0xffff << 0)
|
||||
|
||||
/* XXX: all bits reserved */
|
||||
#define _MIPIA_AUTOPWG (VLV_DISPLAY_BASE + 0x611a0)
|
||||
|
||||
/* MIPI DSI Controller and D-PHY registers */
|
||||
|
||||
#define _MIPIA_DEVICE_READY (VLV_DISPLAY_BASE + 0xb000)
|
||||
#define _MIPIB_DEVICE_READY (VLV_DISPLAY_BASE + 0xb800)
|
||||
#define MIPI_DEVICE_READY(pipe) _PIPE(pipe, _MIPIA_DEVICE_READY, _MIPIB_DEVICE_READY)
|
||||
#define BUS_POSSESSION (1 << 3) /* set to give bus to receiver */
|
||||
#define ULPS_STATE_MASK (3 << 1)
|
||||
#define ULPS_STATE_ENTER (2 << 1)
|
||||
#define ULPS_STATE_EXIT (1 << 1)
|
||||
#define ULPS_STATE_NORMAL_OPERATION (0 << 1)
|
||||
#define DEVICE_READY (1 << 0)
|
||||
|
||||
#define _MIPIA_INTR_STAT (VLV_DISPLAY_BASE + 0xb004)
|
||||
#define _MIPIB_INTR_STAT (VLV_DISPLAY_BASE + 0xb804)
|
||||
#define MIPI_INTR_STAT(pipe) _PIPE(pipe, _MIPIA_INTR_STAT, _MIPIB_INTR_STAT)
|
||||
#define _MIPIA_INTR_EN (VLV_DISPLAY_BASE + 0xb008)
|
||||
#define _MIPIB_INTR_EN (VLV_DISPLAY_BASE + 0xb808)
|
||||
#define MIPI_INTR_EN(pipe) _PIPE(pipe, _MIPIA_INTR_EN, _MIPIB_INTR_EN)
|
||||
#define TEARING_EFFECT (1 << 31)
|
||||
#define SPL_PKT_SENT_INTERRUPT (1 << 30)
|
||||
#define GEN_READ_DATA_AVAIL (1 << 29)
|
||||
#define LP_GENERIC_WR_FIFO_FULL (1 << 28)
|
||||
#define HS_GENERIC_WR_FIFO_FULL (1 << 27)
|
||||
#define RX_PROT_VIOLATION (1 << 26)
|
||||
#define RX_INVALID_TX_LENGTH (1 << 25)
|
||||
#define ACK_WITH_NO_ERROR (1 << 24)
|
||||
#define TURN_AROUND_ACK_TIMEOUT (1 << 23)
|
||||
#define LP_RX_TIMEOUT (1 << 22)
|
||||
#define HS_TX_TIMEOUT (1 << 21)
|
||||
#define DPI_FIFO_UNDERRUN (1 << 20)
|
||||
#define LOW_CONTENTION (1 << 19)
|
||||
#define HIGH_CONTENTION (1 << 18)
|
||||
#define TXDSI_VC_ID_INVALID (1 << 17)
|
||||
#define TXDSI_DATA_TYPE_NOT_RECOGNISED (1 << 16)
|
||||
#define TXCHECKSUM_ERROR (1 << 15)
|
||||
#define TXECC_MULTIBIT_ERROR (1 << 14)
|
||||
#define TXECC_SINGLE_BIT_ERROR (1 << 13)
|
||||
#define TXFALSE_CONTROL_ERROR (1 << 12)
|
||||
#define RXDSI_VC_ID_INVALID (1 << 11)
|
||||
#define RXDSI_DATA_TYPE_NOT_REGOGNISED (1 << 10)
|
||||
#define RXCHECKSUM_ERROR (1 << 9)
|
||||
#define RXECC_MULTIBIT_ERROR (1 << 8)
|
||||
#define RXECC_SINGLE_BIT_ERROR (1 << 7)
|
||||
#define RXFALSE_CONTROL_ERROR (1 << 6)
|
||||
#define RXHS_RECEIVE_TIMEOUT_ERROR (1 << 5)
|
||||
#define RX_LP_TX_SYNC_ERROR (1 << 4)
|
||||
#define RXEXCAPE_MODE_ENTRY_ERROR (1 << 3)
|
||||
#define RXEOT_SYNC_ERROR (1 << 2)
|
||||
#define RXSOT_SYNC_ERROR (1 << 1)
|
||||
#define RXSOT_ERROR (1 << 0)
|
||||
|
||||
#define _MIPIA_DSI_FUNC_PRG (VLV_DISPLAY_BASE + 0xb00c)
|
||||
#define _MIPIB_DSI_FUNC_PRG (VLV_DISPLAY_BASE + 0xb80c)
|
||||
#define MIPI_DSI_FUNC_PRG(pipe) _PIPE(pipe, _MIPIA_DSI_FUNC_PRG, _MIPIB_DSI_FUNC_PRG)
|
||||
#define CMD_MODE_DATA_WIDTH_MASK (7 << 13)
|
||||
#define CMD_MODE_NOT_SUPPORTED (0 << 13)
|
||||
#define CMD_MODE_DATA_WIDTH_16_BIT (1 << 13)
|
||||
#define CMD_MODE_DATA_WIDTH_9_BIT (2 << 13)
|
||||
#define CMD_MODE_DATA_WIDTH_8_BIT (3 << 13)
|
||||
#define CMD_MODE_DATA_WIDTH_OPTION1 (4 << 13)
|
||||
#define CMD_MODE_DATA_WIDTH_OPTION2 (5 << 13)
|
||||
#define VID_MODE_FORMAT_MASK (0xf << 7)
|
||||
#define VID_MODE_NOT_SUPPORTED (0 << 7)
|
||||
#define VID_MODE_FORMAT_RGB565 (1 << 7)
|
||||
#define VID_MODE_FORMAT_RGB666 (2 << 7)
|
||||
#define VID_MODE_FORMAT_RGB666_LOOSE (3 << 7)
|
||||
#define VID_MODE_FORMAT_RGB888 (4 << 7)
|
||||
#define CMD_MODE_CHANNEL_NUMBER_SHIFT 5
|
||||
#define CMD_MODE_CHANNEL_NUMBER_MASK (3 << 5)
|
||||
#define VID_MODE_CHANNEL_NUMBER_SHIFT 3
|
||||
#define VID_MODE_CHANNEL_NUMBER_MASK (3 << 3)
|
||||
#define DATA_LANES_PRG_REG_SHIFT 0
|
||||
#define DATA_LANES_PRG_REG_MASK (7 << 0)
|
||||
|
||||
#define _MIPIA_HS_TX_TIMEOUT (VLV_DISPLAY_BASE + 0xb010)
|
||||
#define _MIPIB_HS_TX_TIMEOUT (VLV_DISPLAY_BASE + 0xb810)
|
||||
#define MIPI_HS_TX_TIMEOUT(pipe) _PIPE(pipe, _MIPIA_HS_TX_TIMEOUT, _MIPIB_HS_TX_TIMEOUT)
|
||||
#define HIGH_SPEED_TX_TIMEOUT_COUNTER_MASK 0xffffff
|
||||
|
||||
#define _MIPIA_LP_RX_TIMEOUT (VLV_DISPLAY_BASE + 0xb014)
|
||||
#define _MIPIB_LP_RX_TIMEOUT (VLV_DISPLAY_BASE + 0xb814)
|
||||
#define MIPI_LP_RX_TIMEOUT(pipe) _PIPE(pipe, _MIPIA_LP_RX_TIMEOUT, _MIPIB_LP_RX_TIMEOUT)
|
||||
#define LOW_POWER_RX_TIMEOUT_COUNTER_MASK 0xffffff
|
||||
|
||||
#define _MIPIA_TURN_AROUND_TIMEOUT (VLV_DISPLAY_BASE + 0xb018)
|
||||
#define _MIPIB_TURN_AROUND_TIMEOUT (VLV_DISPLAY_BASE + 0xb818)
|
||||
#define MIPI_TURN_AROUND_TIMEOUT(pipe) _PIPE(pipe, _MIPIA_TURN_AROUND_TIMEOUT, _MIPIB_TURN_AROUND_TIMEOUT)
|
||||
#define TURN_AROUND_TIMEOUT_MASK 0x3f
|
||||
|
||||
#define _MIPIA_DEVICE_RESET_TIMER (VLV_DISPLAY_BASE + 0xb01c)
|
||||
#define _MIPIB_DEVICE_RESET_TIMER (VLV_DISPLAY_BASE + 0xb81c)
|
||||
#define MIPI_DEVICE_RESET_TIMER(pipe) _PIPE(pipe, _MIPIA_DEVICE_RESET_TIMER, _MIPIB_DEVICE_RESET_TIMER)
|
||||
#define DEVICE_RESET_TIMER_MASK 0xffff
|
||||
|
||||
#define _MIPIA_DPI_RESOLUTION (VLV_DISPLAY_BASE + 0xb020)
|
||||
#define _MIPIB_DPI_RESOLUTION (VLV_DISPLAY_BASE + 0xb820)
|
||||
#define MIPI_DPI_RESOLUTION(pipe) _PIPE(pipe, _MIPIA_DPI_RESOLUTION, _MIPIB_DPI_RESOLUTION)
|
||||
#define VERTICAL_ADDRESS_SHIFT 16
|
||||
#define VERTICAL_ADDRESS_MASK (0xffff << 16)
|
||||
#define HORIZONTAL_ADDRESS_SHIFT 0
|
||||
#define HORIZONTAL_ADDRESS_MASK 0xffff
|
||||
|
||||
#define _MIPIA_DBI_FIFO_THROTTLE (VLV_DISPLAY_BASE + 0xb024)
|
||||
#define _MIPIB_DBI_FIFO_THROTTLE (VLV_DISPLAY_BASE + 0xb824)
|
||||
#define MIPI_DBI_FIFO_THROTTLE(pipe) _PIPE(pipe, _MIPIA_DBI_FIFO_THROTTLE, _MIPIB_DBI_FIFO_THROTTLE)
|
||||
#define DBI_FIFO_EMPTY_HALF (0 << 0)
|
||||
#define DBI_FIFO_EMPTY_QUARTER (1 << 0)
|
||||
#define DBI_FIFO_EMPTY_7_LOCATIONS (2 << 0)
|
||||
|
||||
/* regs below are bits 15:0 */
|
||||
#define _MIPIA_HSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb028)
|
||||
#define _MIPIB_HSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb828)
|
||||
#define MIPI_HSYNC_PADDING_COUNT(pipe) _PIPE(pipe, _MIPIA_HSYNC_PADDING_COUNT, _MIPIB_HSYNC_PADDING_COUNT)
|
||||
|
||||
#define _MIPIA_HBP_COUNT (VLV_DISPLAY_BASE + 0xb02c)
|
||||
#define _MIPIB_HBP_COUNT (VLV_DISPLAY_BASE + 0xb82c)
|
||||
#define MIPI_HBP_COUNT(pipe) _PIPE(pipe, _MIPIA_HBP_COUNT, _MIPIB_HBP_COUNT)
|
||||
|
||||
#define _MIPIA_HFP_COUNT (VLV_DISPLAY_BASE + 0xb030)
|
||||
#define _MIPIB_HFP_COUNT (VLV_DISPLAY_BASE + 0xb830)
|
||||
#define MIPI_HFP_COUNT(pipe) _PIPE(pipe, _MIPIA_HFP_COUNT, _MIPIB_HFP_COUNT)
|
||||
|
||||
#define _MIPIA_HACTIVE_AREA_COUNT (VLV_DISPLAY_BASE + 0xb034)
|
||||
#define _MIPIB_HACTIVE_AREA_COUNT (VLV_DISPLAY_BASE + 0xb834)
|
||||
#define MIPI_HACTIVE_AREA_COUNT(pipe) _PIPE(pipe, _MIPIA_HACTIVE_AREA_COUNT, _MIPIB_HACTIVE_AREA_COUNT)
|
||||
|
||||
#define _MIPIA_VSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb038)
|
||||
#define _MIPIB_VSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb838)
|
||||
#define MIPI_VSYNC_PADDING_COUNT(pipe) _PIPE(pipe, _MIPIA_VSYNC_PADDING_COUNT, _MIPIB_VSYNC_PADDING_COUNT)
|
||||
|
||||
#define _MIPIA_VBP_COUNT (VLV_DISPLAY_BASE + 0xb03c)
|
||||
#define _MIPIB_VBP_COUNT (VLV_DISPLAY_BASE + 0xb83c)
|
||||
#define MIPI_VBP_COUNT(pipe) _PIPE(pipe, _MIPIA_VBP_COUNT, _MIPIB_VBP_COUNT)
|
||||
|
||||
#define _MIPIA_VFP_COUNT (VLV_DISPLAY_BASE + 0xb040)
|
||||
#define _MIPIB_VFP_COUNT (VLV_DISPLAY_BASE + 0xb840)
|
||||
#define MIPI_VFP_COUNT(pipe) _PIPE(pipe, _MIPIA_VFP_COUNT, _MIPIB_VFP_COUNT)
|
||||
|
||||
#define _MIPIA_HIGH_LOW_SWITCH_COUNT (VLV_DISPLAY_BASE + 0xb044)
|
||||
#define _MIPIB_HIGH_LOW_SWITCH_COUNT (VLV_DISPLAY_BASE + 0xb844)
|
||||
#define MIPI_HIGH_LOW_SWITCH_COUNT(pipe) _PIPE(pipe, _MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIB_HIGH_LOW_SWITCH_COUNT)
|
||||
/* regs above are bits 15:0 */
|
||||
|
||||
#define _MIPIA_DPI_CONTROL (VLV_DISPLAY_BASE + 0xb048)
|
||||
#define _MIPIB_DPI_CONTROL (VLV_DISPLAY_BASE + 0xb848)
|
||||
#define MIPI_DPI_CONTROL(pipe) _PIPE(pipe, _MIPIA_DPI_CONTROL, _MIPIB_DPI_CONTROL)
|
||||
#define DPI_LP_MODE (1 << 6)
|
||||
#define BACKLIGHT_OFF (1 << 5)
|
||||
#define BACKLIGHT_ON (1 << 4)
|
||||
#define COLOR_MODE_OFF (1 << 3)
|
||||
#define COLOR_MODE_ON (1 << 2)
|
||||
#define TURN_ON (1 << 1)
|
||||
#define SHUTDOWN (1 << 0)
|
||||
|
||||
#define _MIPIA_DPI_DATA (VLV_DISPLAY_BASE + 0xb04c)
|
||||
#define _MIPIB_DPI_DATA (VLV_DISPLAY_BASE + 0xb84c)
|
||||
#define MIPI_DPI_DATA(pipe) _PIPE(pipe, _MIPIA_DPI_DATA, _MIPIB_DPI_DATA)
|
||||
#define COMMAND_BYTE_SHIFT 0
|
||||
#define COMMAND_BYTE_MASK (0x3f << 0)
|
||||
|
||||
#define _MIPIA_INIT_COUNT (VLV_DISPLAY_BASE + 0xb050)
|
||||
#define _MIPIB_INIT_COUNT (VLV_DISPLAY_BASE + 0xb850)
|
||||
#define MIPI_INIT_COUNT(pipe) _PIPE(pipe, _MIPIA_INIT_COUNT, _MIPIB_INIT_COUNT)
|
||||
#define MASTER_INIT_TIMER_SHIFT 0
|
||||
#define MASTER_INIT_TIMER_MASK (0xffff << 0)
|
||||
|
||||
#define _MIPIA_MAX_RETURN_PKT_SIZE (VLV_DISPLAY_BASE + 0xb054)
|
||||
#define _MIPIB_MAX_RETURN_PKT_SIZE (VLV_DISPLAY_BASE + 0xb854)
|
||||
#define MIPI_MAX_RETURN_PKT_SIZE(pipe) _PIPE(pipe, _MIPIA_MAX_RETURN_PKT_SIZE, _MIPIB_MAX_RETURN_PKT_SIZE)
|
||||
#define MAX_RETURN_PKT_SIZE_SHIFT 0
|
||||
#define MAX_RETURN_PKT_SIZE_MASK (0x3ff << 0)
|
||||
|
||||
#define _MIPIA_VIDEO_MODE_FORMAT (VLV_DISPLAY_BASE + 0xb058)
|
||||
#define _MIPIB_VIDEO_MODE_FORMAT (VLV_DISPLAY_BASE + 0xb858)
|
||||
#define MIPI_VIDEO_MODE_FORMAT(pipe) _PIPE(pipe, _MIPIA_VIDEO_MODE_FORMAT, _MIPIB_VIDEO_MODE_FORMAT)
|
||||
#define RANDOM_DPI_DISPLAY_RESOLUTION (1 << 4)
|
||||
#define DISABLE_VIDEO_BTA (1 << 3)
|
||||
#define IP_TG_CONFIG (1 << 2)
|
||||
#define VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE (1 << 0)
|
||||
#define VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS (2 << 0)
|
||||
#define VIDEO_MODE_BURST (3 << 0)
|
||||
|
||||
#define _MIPIA_EOT_DISABLE (VLV_DISPLAY_BASE + 0xb05c)
|
||||
#define _MIPIB_EOT_DISABLE (VLV_DISPLAY_BASE + 0xb85c)
|
||||
#define MIPI_EOT_DISABLE(pipe) _PIPE(pipe, _MIPIA_EOT_DISABLE, _MIPIB_EOT_DISABLE)
|
||||
#define LP_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 7)
|
||||
#define HS_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 6)
|
||||
#define LOW_CONTENTION_RECOVERY_DISABLE (1 << 5)
|
||||
#define HIGH_CONTENTION_RECOVERY_DISABLE (1 << 4)
|
||||
#define TXDSI_TYPE_NOT_RECOGNISED_ERROR_RECOVERY_DISABLE (1 << 3)
|
||||
#define TXECC_MULTIBIT_ERROR_RECOVERY_DISABLE (1 << 2)
|
||||
#define CLOCKSTOP (1 << 1)
|
||||
#define EOT_DISABLE (1 << 0)
|
||||
|
||||
#define _MIPIA_LP_BYTECLK (VLV_DISPLAY_BASE + 0xb060)
|
||||
#define _MIPIB_LP_BYTECLK (VLV_DISPLAY_BASE + 0xb860)
|
||||
#define MIPI_LP_BYTECLK(pipe) _PIPE(pipe, _MIPIA_LP_BYTECLK, _MIPIB_LP_BYTECLK)
|
||||
#define LP_BYTECLK_SHIFT 0
|
||||
#define LP_BYTECLK_MASK (0xffff << 0)
|
||||
|
||||
/* bits 31:0 */
|
||||
#define _MIPIA_LP_GEN_DATA (VLV_DISPLAY_BASE + 0xb064)
|
||||
#define _MIPIB_LP_GEN_DATA (VLV_DISPLAY_BASE + 0xb864)
|
||||
#define MIPI_LP_GEN_DATA(pipe) _PIPE(pipe, _MIPIA_LP_GEN_DATA, _MIPIB_LP_GEN_DATA)
|
||||
|
||||
/* bits 31:0 */
|
||||
#define _MIPIA_HS_GEN_DATA (VLV_DISPLAY_BASE + 0xb068)
|
||||
#define _MIPIB_HS_GEN_DATA (VLV_DISPLAY_BASE + 0xb868)
|
||||
#define MIPI_HS_GEN_DATA(pipe) _PIPE(pipe, _MIPIA_HS_GEN_DATA, _MIPIB_HS_GEN_DATA)
|
||||
|
||||
#define _MIPIA_LP_GEN_CTRL (VLV_DISPLAY_BASE + 0xb06c)
|
||||
#define _MIPIB_LP_GEN_CTRL (VLV_DISPLAY_BASE + 0xb86c)
|
||||
#define MIPI_LP_GEN_CTRL(pipe) _PIPE(pipe, _MIPIA_LP_GEN_CTRL, _MIPIB_LP_GEN_CTRL)
|
||||
#define _MIPIA_HS_GEN_CTRL (VLV_DISPLAY_BASE + 0xb070)
|
||||
#define _MIPIB_HS_GEN_CTRL (VLV_DISPLAY_BASE + 0xb870)
|
||||
#define MIPI_HS_GEN_CTRL(pipe) _PIPE(pipe, _MIPIA_HS_GEN_CTRL, _MIPIB_HS_GEN_CTRL)
|
||||
#define LONG_PACKET_WORD_COUNT_SHIFT 8
|
||||
#define LONG_PACKET_WORD_COUNT_MASK (0xffff << 8)
|
||||
#define SHORT_PACKET_PARAM_SHIFT 8
|
||||
#define SHORT_PACKET_PARAM_MASK (0xffff << 8)
|
||||
#define VIRTUAL_CHANNEL_SHIFT 6
|
||||
#define VIRTUAL_CHANNEL_MASK (3 << 6)
|
||||
#define DATA_TYPE_SHIFT 0
|
||||
#define DATA_TYPE_MASK (3f << 0)
|
||||
/* data type values, see include/video/mipi_display.h */
|
||||
|
||||
#define _MIPIA_GEN_FIFO_STAT (VLV_DISPLAY_BASE + 0xb074)
|
||||
#define _MIPIB_GEN_FIFO_STAT (VLV_DISPLAY_BASE + 0xb874)
|
||||
#define MIPI_GEN_FIFO_STAT(pipe) _PIPE(pipe, _MIPIA_GEN_FIFO_STAT, _MIPIB_GEN_FIFO_STAT)
|
||||
#define DPI_FIFO_EMPTY (1 << 28)
|
||||
#define DBI_FIFO_EMPTY (1 << 27)
|
||||
#define LP_CTRL_FIFO_EMPTY (1 << 26)
|
||||
#define LP_CTRL_FIFO_HALF_EMPTY (1 << 25)
|
||||
#define LP_CTRL_FIFO_FULL (1 << 24)
|
||||
#define HS_CTRL_FIFO_EMPTY (1 << 18)
|
||||
#define HS_CTRL_FIFO_HALF_EMPTY (1 << 17)
|
||||
#define HS_CTRL_FIFO_FULL (1 << 16)
|
||||
#define LP_DATA_FIFO_EMPTY (1 << 10)
|
||||
#define LP_DATA_FIFO_HALF_EMPTY (1 << 9)
|
||||
#define LP_DATA_FIFO_FULL (1 << 8)
|
||||
#define HS_DATA_FIFO_EMPTY (1 << 2)
|
||||
#define HS_DATA_FIFO_HALF_EMPTY (1 << 1)
|
||||
#define HS_DATA_FIFO_FULL (1 << 0)
|
||||
|
||||
#define _MIPIA_HS_LS_DBI_ENABLE (VLV_DISPLAY_BASE + 0xb078)
|
||||
#define _MIPIB_HS_LS_DBI_ENABLE (VLV_DISPLAY_BASE + 0xb878)
|
||||
#define MIPI_HS_LP_DBI_ENABLE(pipe) _PIPE(pipe, _MIPIA_HS_LS_DBI_ENABLE, _MIPIB_HS_LS_DBI_ENABLE)
|
||||
#define DBI_HS_LP_MODE_MASK (1 << 0)
|
||||
#define DBI_LP_MODE (1 << 0)
|
||||
#define DBI_HS_MODE (0 << 0)
|
||||
|
||||
#define _MIPIA_DPHY_PARAM (VLV_DISPLAY_BASE + 0xb080)
|
||||
#define _MIPIB_DPHY_PARAM (VLV_DISPLAY_BASE + 0xb880)
|
||||
#define MIPI_DPHY_PARAM(pipe) _PIPE(pipe, _MIPIA_DPHY_PARAM, _MIPIB_DPHY_PARAM)
|
||||
#define EXIT_ZERO_COUNT_SHIFT 24
|
||||
#define EXIT_ZERO_COUNT_MASK (0x3f << 24)
|
||||
#define TRAIL_COUNT_SHIFT 16
|
||||
#define TRAIL_COUNT_MASK (0x1f << 16)
|
||||
#define CLK_ZERO_COUNT_SHIFT 8
|
||||
#define CLK_ZERO_COUNT_MASK (0xff << 8)
|
||||
#define PREPARE_COUNT_SHIFT 0
|
||||
#define PREPARE_COUNT_MASK (0x3f << 0)
|
||||
|
||||
/* bits 31:0 */
|
||||
#define _MIPIA_DBI_BW_CTRL (VLV_DISPLAY_BASE + 0xb084)
|
||||
#define _MIPIB_DBI_BW_CTRL (VLV_DISPLAY_BASE + 0xb884)
|
||||
#define MIPI_DBI_BW_CTRL(pipe) _PIPE(pipe, _MIPIA_DBI_BW_CTRL, _MIPIB_DBI_BW_CTRL)
|
||||
|
||||
#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT (VLV_DISPLAY_BASE + 0xb088)
|
||||
#define _MIPIB_CLK_LANE_SWITCH_TIME_CNT (VLV_DISPLAY_BASE + 0xb888)
|
||||
#define MIPI_CLK_LANE_SWITCH_TIME_CNT(pipe) _PIPE(pipe, _MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIB_CLK_LANE_SWITCH_TIME_CNT)
|
||||
#define LP_HS_SSW_CNT_SHIFT 16
|
||||
#define LP_HS_SSW_CNT_MASK (0xffff << 16)
|
||||
#define HS_LP_PWR_SW_CNT_SHIFT 0
|
||||
#define HS_LP_PWR_SW_CNT_MASK (0xffff << 0)
|
||||
|
||||
#define _MIPIA_STOP_STATE_STALL (VLV_DISPLAY_BASE + 0xb08c)
|
||||
#define _MIPIB_STOP_STATE_STALL (VLV_DISPLAY_BASE + 0xb88c)
|
||||
#define MIPI_STOP_STATE_STALL(pipe) _PIPE(pipe, _MIPIA_STOP_STATE_STALL, _MIPIB_STOP_STATE_STALL)
|
||||
#define STOP_STATE_STALL_COUNTER_SHIFT 0
|
||||
#define STOP_STATE_STALL_COUNTER_MASK (0xff << 0)
|
||||
|
||||
#define _MIPIA_INTR_STAT_REG_1 (VLV_DISPLAY_BASE + 0xb090)
|
||||
#define _MIPIB_INTR_STAT_REG_1 (VLV_DISPLAY_BASE + 0xb890)
|
||||
#define MIPI_INTR_STAT_REG_1(pipe) _PIPE(pipe, _MIPIA_INTR_STAT_REG_1, _MIPIB_INTR_STAT_REG_1)
|
||||
#define _MIPIA_INTR_EN_REG_1 (VLV_DISPLAY_BASE + 0xb094)
|
||||
#define _MIPIB_INTR_EN_REG_1 (VLV_DISPLAY_BASE + 0xb894)
|
||||
#define MIPI_INTR_EN_REG_1(pipe) _PIPE(pipe, _MIPIA_INTR_EN_REG_1, _MIPIB_INTR_EN_REG_1)
|
||||
#define RX_CONTENTION_DETECTED (1 << 0)
|
||||
|
||||
/* XXX: only pipe A ?!? */
|
||||
#define MIPIA_DBI_TYPEC_CTRL (VLV_DISPLAY_BASE + 0xb100)
|
||||
#define DBI_TYPEC_ENABLE (1 << 31)
|
||||
#define DBI_TYPEC_WIP (1 << 30)
|
||||
#define DBI_TYPEC_OPTION_SHIFT 28
|
||||
#define DBI_TYPEC_OPTION_MASK (3 << 28)
|
||||
#define DBI_TYPEC_FREQ_SHIFT 24
|
||||
#define DBI_TYPEC_FREQ_MASK (0xf << 24)
|
||||
#define DBI_TYPEC_OVERRIDE (1 << 8)
|
||||
#define DBI_TYPEC_OVERRIDE_COUNTER_SHIFT 0
|
||||
#define DBI_TYPEC_OVERRIDE_COUNTER_MASK (0xff << 0)
|
||||
|
||||
|
||||
/* MIPI adapter registers */
|
||||
|
||||
#define _MIPIA_CTRL (VLV_DISPLAY_BASE + 0xb104)
|
||||
#define _MIPIB_CTRL (VLV_DISPLAY_BASE + 0xb904)
|
||||
#define MIPI_CTRL(pipe) _PIPE(pipe, _MIPIA_CTRL, _MIPIB_CTRL)
|
||||
#define ESCAPE_CLOCK_DIVIDER_SHIFT 5 /* A only */
|
||||
#define ESCAPE_CLOCK_DIVIDER_MASK (3 << 5)
|
||||
#define ESCAPE_CLOCK_DIVIDER_1 (0 << 5)
|
||||
#define ESCAPE_CLOCK_DIVIDER_2 (1 << 5)
|
||||
#define ESCAPE_CLOCK_DIVIDER_4 (2 << 5)
|
||||
#define READ_REQUEST_PRIORITY_SHIFT 3
|
||||
#define READ_REQUEST_PRIORITY_MASK (3 << 3)
|
||||
#define READ_REQUEST_PRIORITY_LOW (0 << 3)
|
||||
#define READ_REQUEST_PRIORITY_HIGH (3 << 3)
|
||||
#define RGB_FLIP_TO_BGR (1 << 2)
|
||||
|
||||
#define _MIPIA_DATA_ADDRESS (VLV_DISPLAY_BASE + 0xb108)
|
||||
#define _MIPIB_DATA_ADDRESS (VLV_DISPLAY_BASE + 0xb908)
|
||||
#define MIPI_DATA_ADDRESS(pipe) _PIPE(pipe, _MIPIA_DATA_ADDRESS, _MIPIB_DATA_ADDRESS)
|
||||
#define DATA_MEM_ADDRESS_SHIFT 5
|
||||
#define DATA_MEM_ADDRESS_MASK (0x7ffffff << 5)
|
||||
#define DATA_VALID (1 << 0)
|
||||
|
||||
#define _MIPIA_DATA_LENGTH (VLV_DISPLAY_BASE + 0xb10c)
|
||||
#define _MIPIB_DATA_LENGTH (VLV_DISPLAY_BASE + 0xb90c)
|
||||
#define MIPI_DATA_LENGTH(pipe) _PIPE(pipe, _MIPIA_DATA_LENGTH, _MIPIB_DATA_LENGTH)
|
||||
#define DATA_LENGTH_SHIFT 0
|
||||
#define DATA_LENGTH_MASK (0xfffff << 0)
|
||||
|
||||
#define _MIPIA_COMMAND_ADDRESS (VLV_DISPLAY_BASE + 0xb110)
|
||||
#define _MIPIB_COMMAND_ADDRESS (VLV_DISPLAY_BASE + 0xb910)
|
||||
#define MIPI_COMMAND_ADDRESS(pipe) _PIPE(pipe, _MIPIA_COMMAND_ADDRESS, _MIPIB_COMMAND_ADDRESS)
|
||||
#define COMMAND_MEM_ADDRESS_SHIFT 5
|
||||
#define COMMAND_MEM_ADDRESS_MASK (0x7ffffff << 5)
|
||||
#define AUTO_PWG_ENABLE (1 << 2)
|
||||
#define MEMORY_WRITE_DATA_FROM_PIPE_RENDERING (1 << 1)
|
||||
#define COMMAND_VALID (1 << 0)
|
||||
|
||||
#define _MIPIA_COMMAND_LENGTH (VLV_DISPLAY_BASE + 0xb114)
|
||||
#define _MIPIB_COMMAND_LENGTH (VLV_DISPLAY_BASE + 0xb914)
|
||||
#define MIPI_COMMAND_LENGTH(pipe) _PIPE(pipe, _MIPIA_COMMAND_LENGTH, _MIPIB_COMMAND_LENGTH)
|
||||
#define COMMAND_LENGTH_SHIFT(n) (8 * (n)) /* n: 0...3 */
|
||||
#define COMMAND_LENGTH_MASK(n) (0xff << (8 * (n)))
|
||||
|
||||
#define _MIPIA_READ_DATA_RETURN0 (VLV_DISPLAY_BASE + 0xb118)
|
||||
#define _MIPIB_READ_DATA_RETURN0 (VLV_DISPLAY_BASE + 0xb918)
|
||||
#define MIPI_READ_DATA_RETURN(pipe, n) \
|
||||
(_PIPE(pipe, _MIPIA_READ_DATA_RETURN0, _MIPIB_READ_DATA_RETURN0) + 4 * (n)) /* n: 0...7 */
|
||||
|
||||
#define _MIPIA_READ_DATA_VALID (VLV_DISPLAY_BASE + 0xb138)
|
||||
#define _MIPIB_READ_DATA_VALID (VLV_DISPLAY_BASE + 0xb938)
|
||||
#define MIPI_READ_DATA_VALID(pipe) _PIPE(pipe, _MIPIA_READ_DATA_VALID, _MIPIB_READ_DATA_VALID)
|
||||
#define READ_DATA_VALID(n) (1 << (n))
|
||||
|
||||
#endif /* _I915_REG_H_ */
|
||||
|
|
|
@ -340,7 +340,9 @@ int i915_save_state(struct drm_device *dev)
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int i;
|
||||
|
||||
pci_read_config_byte(dev->pdev, LBB, &dev_priv->regfile.saveLBB);
|
||||
if (INTEL_INFO(dev)->gen <= 4)
|
||||
pci_read_config_byte(dev->pdev, LBB,
|
||||
&dev_priv->regfile.saveLBB);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
||||
|
@ -390,7 +392,9 @@ int i915_restore_state(struct drm_device *dev)
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int i;
|
||||
|
||||
pci_write_config_byte(dev->pdev, LBB, dev_priv->regfile.saveLBB);
|
||||
if (INTEL_INFO(dev)->gen <= 4)
|
||||
pci_write_config_byte(dev->pdev, LBB,
|
||||
dev_priv->regfile.saveLBB);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
||||
|
|
|
@ -65,6 +65,8 @@ show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
|
|||
{
|
||||
struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
|
||||
u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
|
||||
if (IS_VALLEYVIEW(dminor->dev))
|
||||
rc6p_residency = 0;
|
||||
return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency);
|
||||
}
|
||||
|
||||
|
@ -73,6 +75,8 @@ show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
|
|||
{
|
||||
struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
|
||||
u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
|
||||
if (IS_VALLEYVIEW(dminor->dev))
|
||||
rc6pp_residency = 0;
|
||||
return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency);
|
||||
}
|
||||
|
||||
|
@ -97,7 +101,7 @@ static struct attribute_group rc6_attr_group = {
|
|||
|
||||
static int l3_access_valid(struct drm_device *dev, loff_t offset)
|
||||
{
|
||||
if (!HAS_L3_GPU_CACHE(dev))
|
||||
if (!HAS_L3_DPF(dev))
|
||||
return -EPERM;
|
||||
|
||||
if (offset % 4 != 0)
|
||||
|
@ -118,28 +122,31 @@ i915_l3_read(struct file *filp, struct kobject *kobj,
|
|||
struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
|
||||
struct drm_device *drm_dev = dminor->dev;
|
||||
struct drm_i915_private *dev_priv = drm_dev->dev_private;
|
||||
uint32_t misccpctl;
|
||||
int i, ret;
|
||||
int slice = (int)(uintptr_t)attr->private;
|
||||
int ret;
|
||||
|
||||
count = round_down(count, 4);
|
||||
|
||||
ret = l3_access_valid(drm_dev, offset);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
count = min_t(size_t, GEN7_L3LOG_SIZE - offset, count);
|
||||
|
||||
ret = i915_mutex_lock_interruptible(drm_dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
misccpctl = I915_READ(GEN7_MISCCPCTL);
|
||||
I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
|
||||
|
||||
for (i = offset; count >= 4 && i < GEN7_L3LOG_SIZE; i += 4, count -= 4)
|
||||
*((uint32_t *)(&buf[i])) = I915_READ(GEN7_L3LOG_BASE + i);
|
||||
|
||||
I915_WRITE(GEN7_MISCCPCTL, misccpctl);
|
||||
if (dev_priv->l3_parity.remap_info[slice])
|
||||
memcpy(buf,
|
||||
dev_priv->l3_parity.remap_info[slice] + (offset/4),
|
||||
count);
|
||||
else
|
||||
memset(buf, 0, count);
|
||||
|
||||
mutex_unlock(&drm_dev->struct_mutex);
|
||||
|
||||
return i - offset;
|
||||
return count;
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
|
@ -151,18 +158,23 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
|
|||
struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
|
||||
struct drm_device *drm_dev = dminor->dev;
|
||||
struct drm_i915_private *dev_priv = drm_dev->dev_private;
|
||||
struct i915_hw_context *ctx;
|
||||
u32 *temp = NULL; /* Just here to make handling failures easy */
|
||||
int slice = (int)(uintptr_t)attr->private;
|
||||
int ret;
|
||||
|
||||
ret = l3_access_valid(drm_dev, offset);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (dev_priv->hw_contexts_disabled)
|
||||
return -ENXIO;
|
||||
|
||||
ret = i915_mutex_lock_interruptible(drm_dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!dev_priv->l3_parity.remap_info) {
|
||||
if (!dev_priv->l3_parity.remap_info[slice]) {
|
||||
temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
|
||||
if (!temp) {
|
||||
mutex_unlock(&drm_dev->struct_mutex);
|
||||
|
@ -182,13 +194,13 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
|
|||
* at this point it is left as a TODO.
|
||||
*/
|
||||
if (temp)
|
||||
dev_priv->l3_parity.remap_info = temp;
|
||||
dev_priv->l3_parity.remap_info[slice] = temp;
|
||||
|
||||
memcpy(dev_priv->l3_parity.remap_info + (offset/4),
|
||||
buf + (offset/4),
|
||||
count);
|
||||
memcpy(dev_priv->l3_parity.remap_info[slice] + (offset/4), buf, count);
|
||||
|
||||
i915_gem_l3_remap(drm_dev);
|
||||
/* NB: We defer the remapping until we switch to the context */
|
||||
list_for_each_entry(ctx, &dev_priv->context_list, link)
|
||||
ctx->remap_slice |= (1<<slice);
|
||||
|
||||
mutex_unlock(&drm_dev->struct_mutex);
|
||||
|
||||
|
@ -200,7 +212,17 @@ static struct bin_attribute dpf_attrs = {
|
|||
.size = GEN7_L3LOG_SIZE,
|
||||
.read = i915_l3_read,
|
||||
.write = i915_l3_write,
|
||||
.mmap = NULL
|
||||
.mmap = NULL,
|
||||
.private = (void *)0
|
||||
};
|
||||
|
||||
static struct bin_attribute dpf_attrs_1 = {
|
||||
.attr = {.name = "l3_parity_slice_1", .mode = (S_IRUSR | S_IWUSR)},
|
||||
.size = GEN7_L3LOG_SIZE,
|
||||
.read = i915_l3_read,
|
||||
.write = i915_l3_write,
|
||||
.mmap = NULL,
|
||||
.private = (void *)1
|
||||
};
|
||||
|
||||
static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
|
||||
|
@ -507,10 +529,17 @@ void i915_setup_sysfs(struct drm_device *dev)
|
|||
DRM_ERROR("RC6 residency sysfs setup failed\n");
|
||||
}
|
||||
#endif
|
||||
if (HAS_L3_GPU_CACHE(dev)) {
|
||||
if (HAS_L3_DPF(dev)) {
|
||||
ret = device_create_bin_file(&dev->primary->kdev, &dpf_attrs);
|
||||
if (ret)
|
||||
DRM_ERROR("l3 parity sysfs setup failed\n");
|
||||
|
||||
if (NUM_L3_SLICES(dev) > 1) {
|
||||
ret = device_create_bin_file(&dev->primary->kdev,
|
||||
&dpf_attrs_1);
|
||||
if (ret)
|
||||
DRM_ERROR("l3 parity slice 1 setup failed\n");
|
||||
}
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
|
@ -534,6 +563,7 @@ void i915_teardown_sysfs(struct drm_device *dev)
|
|||
sysfs_remove_files(&dev->primary->kdev.kobj, vlv_attrs);
|
||||
else
|
||||
sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs);
|
||||
device_remove_bin_file(&dev->primary->kdev, &dpf_attrs_1);
|
||||
device_remove_bin_file(&dev->primary->kdev, &dpf_attrs);
|
||||
#ifdef CONFIG_PM
|
||||
sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group);
|
||||
|
|
|
@ -568,6 +568,21 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
|
|||
}
|
||||
}
|
||||
|
||||
static void
|
||||
parse_mipi(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
|
||||
{
|
||||
struct bdb_mipi *mipi;
|
||||
|
||||
mipi = find_section(bdb, BDB_MIPI);
|
||||
if (!mipi) {
|
||||
DRM_DEBUG_KMS("No MIPI BDB found");
|
||||
return;
|
||||
}
|
||||
|
||||
/* XXX: add more info */
|
||||
dev_priv->vbt.dsi.panel_id = mipi->panel_id;
|
||||
}
|
||||
|
||||
static void
|
||||
parse_device_mapping(struct drm_i915_private *dev_priv,
|
||||
struct bdb_header *bdb)
|
||||
|
@ -745,6 +760,7 @@ intel_parse_bios(struct drm_device *dev)
|
|||
parse_device_mapping(dev_priv, bdb);
|
||||
parse_driver_features(dev_priv, bdb);
|
||||
parse_edp(dev_priv, bdb);
|
||||
parse_mipi(dev_priv, bdb);
|
||||
|
||||
if (bios)
|
||||
pci_unmap_rom(pdev, bios);
|
||||
|
|
|
@ -104,6 +104,7 @@ struct vbios_data {
|
|||
#define BDB_LVDS_LFP_DATA 42
|
||||
#define BDB_LVDS_BACKLIGHT 43
|
||||
#define BDB_LVDS_POWER 44
|
||||
#define BDB_MIPI 50
|
||||
#define BDB_SKIP 254 /* VBIOS private block, ignore */
|
||||
|
||||
struct bdb_general_features {
|
||||
|
@ -618,4 +619,44 @@ int intel_parse_bios(struct drm_device *dev);
|
|||
#define PORT_IDPC 8
|
||||
#define PORT_IDPD 9
|
||||
|
||||
/* MIPI DSI panel info */
|
||||
struct bdb_mipi {
|
||||
u16 panel_id;
|
||||
u16 bridge_revision;
|
||||
|
||||
/* General params */
|
||||
u32 dithering:1;
|
||||
u32 bpp_pixel_format:1;
|
||||
u32 rsvd1:1;
|
||||
u32 dphy_valid:1;
|
||||
u32 resvd2:28;
|
||||
|
||||
u16 port_info;
|
||||
u16 rsvd3:2;
|
||||
u16 num_lanes:2;
|
||||
u16 rsvd4:12;
|
||||
|
||||
/* DSI config */
|
||||
u16 virt_ch_num:2;
|
||||
u16 vtm:2;
|
||||
u16 rsvd5:12;
|
||||
|
||||
u32 dsi_clock;
|
||||
u32 bridge_ref_clk;
|
||||
u16 rsvd_pwr;
|
||||
|
||||
/* Dphy Params */
|
||||
u32 prepare_cnt:5;
|
||||
u32 rsvd6:3;
|
||||
u32 clk_zero_cnt:8;
|
||||
u32 trail_cnt:5;
|
||||
u32 rsvd7:3;
|
||||
u32 exit_zero_cnt:6;
|
||||
u32 rsvd8:2;
|
||||
|
||||
u32 hl_switch_cnt;
|
||||
u32 lp_byte_clk;
|
||||
u32 clk_lane_switch_cnt;
|
||||
} __attribute__((packed));
|
||||
|
||||
#endif /* _I830_BIOS_H_ */
|
||||
|
|
|
@ -89,6 +89,7 @@ static void intel_crt_get_config(struct intel_encoder *encoder,
|
|||
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
|
||||
struct intel_crt *crt = intel_encoder_to_crt(encoder);
|
||||
u32 tmp, flags = 0;
|
||||
int dotclock;
|
||||
|
||||
tmp = I915_READ(crt->adpa_reg);
|
||||
|
||||
|
@ -103,6 +104,13 @@ static void intel_crt_get_config(struct intel_encoder *encoder,
|
|||
flags |= DRM_MODE_FLAG_NVSYNC;
|
||||
|
||||
pipe_config->adjusted_mode.flags |= flags;
|
||||
|
||||
dotclock = pipe_config->port_clock;
|
||||
|
||||
if (HAS_PCH_SPLIT(dev_priv->dev))
|
||||
ironlake_check_encoder_dotclock(pipe_config, dotclock);
|
||||
|
||||
pipe_config->adjusted_mode.clock = dotclock;
|
||||
}
|
||||
|
||||
/* Note: The caller is required to filter out dpms modes not supported by the
|
||||
|
@ -349,9 +357,6 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
|
|||
|
||||
DRM_DEBUG_KMS("valleyview hotplug adpa=0x%x, result %d\n", adpa, ret);
|
||||
|
||||
/* FIXME: debug force function and remove */
|
||||
ret = true;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -58,7 +58,7 @@ static const u32 hsw_ddi_translations_fdi[] = {
|
|||
0x00FFFFFF, 0x00040006 /* HDMI parameters */
|
||||
};
|
||||
|
||||
static enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
|
||||
enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
|
||||
{
|
||||
struct drm_encoder *encoder = &intel_encoder->base;
|
||||
int type = intel_encoder->type;
|
||||
|
@ -767,9 +767,9 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
|
|||
BUG();
|
||||
}
|
||||
|
||||
if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
|
||||
if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_PVSYNC)
|
||||
temp |= TRANS_DDI_PVSYNC;
|
||||
if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
|
||||
if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_PHSYNC)
|
||||
temp |= TRANS_DDI_PHSYNC;
|
||||
|
||||
if (cpu_transcoder == TRANSCODER_EDP) {
|
||||
|
@ -1268,6 +1268,37 @@ static void intel_ddi_get_config(struct intel_encoder *encoder,
|
|||
flags |= DRM_MODE_FLAG_NVSYNC;
|
||||
|
||||
pipe_config->adjusted_mode.flags |= flags;
|
||||
|
||||
switch (temp & TRANS_DDI_BPC_MASK) {
|
||||
case TRANS_DDI_BPC_6:
|
||||
pipe_config->pipe_bpp = 18;
|
||||
break;
|
||||
case TRANS_DDI_BPC_8:
|
||||
pipe_config->pipe_bpp = 24;
|
||||
break;
|
||||
case TRANS_DDI_BPC_10:
|
||||
pipe_config->pipe_bpp = 30;
|
||||
break;
|
||||
case TRANS_DDI_BPC_12:
|
||||
pipe_config->pipe_bpp = 36;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
switch (temp & TRANS_DDI_MODE_SELECT_MASK) {
|
||||
case TRANS_DDI_MODE_SELECT_HDMI:
|
||||
case TRANS_DDI_MODE_SELECT_DVI:
|
||||
case TRANS_DDI_MODE_SELECT_FDI:
|
||||
break;
|
||||
case TRANS_DDI_MODE_SELECT_DP_SST:
|
||||
case TRANS_DDI_MODE_SELECT_DP_MST:
|
||||
pipe_config->has_dp_encoder = true;
|
||||
intel_dp_get_m_n(intel_crtc, pipe_config);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void intel_ddi_destroy(struct drm_encoder *encoder)
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -38,6 +38,32 @@
|
|||
|
||||
#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
|
||||
|
||||
struct dp_link_dpll {
|
||||
int link_bw;
|
||||
struct dpll dpll;
|
||||
};
|
||||
|
||||
static const struct dp_link_dpll gen4_dpll[] = {
|
||||
{ DP_LINK_BW_1_62,
|
||||
{ .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
|
||||
{ DP_LINK_BW_2_7,
|
||||
{ .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
|
||||
};
|
||||
|
||||
static const struct dp_link_dpll pch_dpll[] = {
|
||||
{ DP_LINK_BW_1_62,
|
||||
{ .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
|
||||
{ DP_LINK_BW_2_7,
|
||||
{ .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
|
||||
};
|
||||
|
||||
static const struct dp_link_dpll vlv_dpll[] = {
|
||||
{ DP_LINK_BW_1_62,
|
||||
{ .p1 = 3, .p2 = 2, .n = 5, .m1 = 5, .m2 = 3 } },
|
||||
{ DP_LINK_BW_2_7,
|
||||
{ .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
|
||||
};
|
||||
|
||||
/**
|
||||
* is_edp - is the given port attached to an eDP panel (either CPU or PCH)
|
||||
* @intel_dp: DP struct
|
||||
|
@ -211,24 +237,77 @@ intel_hrawclk(struct drm_device *dev)
|
|||
}
|
||||
}
|
||||
|
||||
static void
|
||||
intel_dp_init_panel_power_sequencer(struct drm_device *dev,
|
||||
struct intel_dp *intel_dp,
|
||||
struct edp_power_seq *out);
|
||||
static void
|
||||
intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
|
||||
struct intel_dp *intel_dp,
|
||||
struct edp_power_seq *out);
|
||||
|
||||
static enum pipe
|
||||
vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
|
||||
struct drm_device *dev = intel_dig_port->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
enum port port = intel_dig_port->port;
|
||||
enum pipe pipe;
|
||||
|
||||
/* modeset should have pipe */
|
||||
if (crtc)
|
||||
return to_intel_crtc(crtc)->pipe;
|
||||
|
||||
/* init time, try to find a pipe with this port selected */
|
||||
for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
|
||||
u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
|
||||
PANEL_PORT_SELECT_MASK;
|
||||
if (port_sel == PANEL_PORT_SELECT_DPB_VLV && port == PORT_B)
|
||||
return pipe;
|
||||
if (port_sel == PANEL_PORT_SELECT_DPC_VLV && port == PORT_C)
|
||||
return pipe;
|
||||
}
|
||||
|
||||
/* shrug */
|
||||
return PIPE_A;
|
||||
}
|
||||
|
||||
static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct drm_device *dev = intel_dp_to_dev(intel_dp);
|
||||
|
||||
if (HAS_PCH_SPLIT(dev))
|
||||
return PCH_PP_CONTROL;
|
||||
else
|
||||
return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
|
||||
}
|
||||
|
||||
static u32 _pp_stat_reg(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct drm_device *dev = intel_dp_to_dev(intel_dp);
|
||||
|
||||
if (HAS_PCH_SPLIT(dev))
|
||||
return PCH_PP_STATUS;
|
||||
else
|
||||
return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
|
||||
}
|
||||
|
||||
static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct drm_device *dev = intel_dp_to_dev(intel_dp);
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 pp_stat_reg;
|
||||
|
||||
pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
|
||||
return (I915_READ(pp_stat_reg) & PP_ON) != 0;
|
||||
return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
|
||||
}
|
||||
|
||||
static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct drm_device *dev = intel_dp_to_dev(intel_dp);
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 pp_ctrl_reg;
|
||||
|
||||
pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
|
||||
return (I915_READ(pp_ctrl_reg) & EDP_FORCE_VDD) != 0;
|
||||
return (I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD) != 0;
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -236,19 +315,15 @@ intel_dp_check_edp(struct intel_dp *intel_dp)
|
|||
{
|
||||
struct drm_device *dev = intel_dp_to_dev(intel_dp);
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 pp_stat_reg, pp_ctrl_reg;
|
||||
|
||||
if (!is_edp(intel_dp))
|
||||
return;
|
||||
|
||||
pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
|
||||
pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
|
||||
|
||||
if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) {
|
||||
WARN(1, "eDP powered off while attempting aux channel communication.\n");
|
||||
DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
|
||||
I915_READ(pp_stat_reg),
|
||||
I915_READ(pp_ctrl_reg));
|
||||
I915_READ(_pp_stat_reg(intel_dp)),
|
||||
I915_READ(_pp_ctrl_reg(intel_dp)));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -361,6 +436,12 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
|
|||
goto out;
|
||||
}
|
||||
|
||||
/* Only 5 data registers! */
|
||||
if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
|
||||
ret = -E2BIG;
|
||||
goto out;
|
||||
}
|
||||
|
||||
while ((aux_clock_divider = get_aux_clock_divider(intel_dp, clock++))) {
|
||||
/* Must try at least 3 times according to DP spec */
|
||||
for (try = 0; try < 5; try++) {
|
||||
|
@ -451,9 +532,10 @@ intel_dp_aux_native_write(struct intel_dp *intel_dp,
|
|||
int msg_bytes;
|
||||
uint8_t ack;
|
||||
|
||||
if (WARN_ON(send_bytes > 16))
|
||||
return -E2BIG;
|
||||
|
||||
intel_dp_check_edp(intel_dp);
|
||||
if (send_bytes > 16)
|
||||
return -1;
|
||||
msg[0] = AUX_NATIVE_WRITE << 4;
|
||||
msg[1] = address >> 8;
|
||||
msg[2] = address & 0xff;
|
||||
|
@ -494,6 +576,9 @@ intel_dp_aux_native_read(struct intel_dp *intel_dp,
|
|||
uint8_t ack;
|
||||
int ret;
|
||||
|
||||
if (WARN_ON(recv_bytes > 19))
|
||||
return -E2BIG;
|
||||
|
||||
intel_dp_check_edp(intel_dp);
|
||||
msg[0] = AUX_NATIVE_READ << 4;
|
||||
msg[1] = address >> 8;
|
||||
|
@ -660,41 +745,30 @@ intel_dp_set_clock(struct intel_encoder *encoder,
|
|||
struct intel_crtc_config *pipe_config, int link_bw)
|
||||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
const struct dp_link_dpll *divisor = NULL;
|
||||
int i, count = 0;
|
||||
|
||||
if (IS_G4X(dev)) {
|
||||
if (link_bw == DP_LINK_BW_1_62) {
|
||||
pipe_config->dpll.p1 = 2;
|
||||
pipe_config->dpll.p2 = 10;
|
||||
pipe_config->dpll.n = 2;
|
||||
pipe_config->dpll.m1 = 23;
|
||||
pipe_config->dpll.m2 = 8;
|
||||
} else {
|
||||
pipe_config->dpll.p1 = 1;
|
||||
pipe_config->dpll.p2 = 10;
|
||||
pipe_config->dpll.n = 1;
|
||||
pipe_config->dpll.m1 = 14;
|
||||
pipe_config->dpll.m2 = 2;
|
||||
}
|
||||
pipe_config->clock_set = true;
|
||||
divisor = gen4_dpll;
|
||||
count = ARRAY_SIZE(gen4_dpll);
|
||||
} else if (IS_HASWELL(dev)) {
|
||||
/* Haswell has special-purpose DP DDI clocks. */
|
||||
} else if (HAS_PCH_SPLIT(dev)) {
|
||||
if (link_bw == DP_LINK_BW_1_62) {
|
||||
pipe_config->dpll.n = 1;
|
||||
pipe_config->dpll.p1 = 2;
|
||||
pipe_config->dpll.p2 = 10;
|
||||
pipe_config->dpll.m1 = 12;
|
||||
pipe_config->dpll.m2 = 9;
|
||||
} else {
|
||||
pipe_config->dpll.n = 2;
|
||||
pipe_config->dpll.p1 = 1;
|
||||
pipe_config->dpll.p2 = 10;
|
||||
pipe_config->dpll.m1 = 14;
|
||||
pipe_config->dpll.m2 = 8;
|
||||
}
|
||||
pipe_config->clock_set = true;
|
||||
divisor = pch_dpll;
|
||||
count = ARRAY_SIZE(pch_dpll);
|
||||
} else if (IS_VALLEYVIEW(dev)) {
|
||||
/* FIXME: Need to figure out optimized DP clocks for vlv. */
|
||||
divisor = vlv_dpll;
|
||||
count = ARRAY_SIZE(vlv_dpll);
|
||||
}
|
||||
|
||||
if (divisor && count) {
|
||||
for (i = 0; i < count; i++) {
|
||||
if (link_bw == divisor[i].link_bw) {
|
||||
pipe_config->dpll = divisor[i].dpll;
|
||||
pipe_config->clock_set = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -944,8 +1018,8 @@ static void ironlake_wait_panel_status(struct intel_dp *intel_dp,
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 pp_stat_reg, pp_ctrl_reg;
|
||||
|
||||
pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
|
||||
pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
|
||||
pp_stat_reg = _pp_stat_reg(intel_dp);
|
||||
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
|
||||
|
||||
DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
|
||||
mask, value,
|
||||
|
@ -987,11 +1061,8 @@ static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
|
|||
struct drm_device *dev = intel_dp_to_dev(intel_dp);
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 control;
|
||||
u32 pp_ctrl_reg;
|
||||
|
||||
pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
|
||||
control = I915_READ(pp_ctrl_reg);
|
||||
|
||||
control = I915_READ(_pp_ctrl_reg(intel_dp));
|
||||
control &= ~PANEL_UNLOCK_MASK;
|
||||
control |= PANEL_UNLOCK_REGS;
|
||||
return control;
|
||||
|
@ -1024,8 +1095,8 @@ void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
|
|||
pp = ironlake_get_pp_control(intel_dp);
|
||||
pp |= EDP_FORCE_VDD;
|
||||
|
||||
pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
|
||||
pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
|
||||
pp_stat_reg = _pp_stat_reg(intel_dp);
|
||||
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
|
||||
|
||||
I915_WRITE(pp_ctrl_reg, pp);
|
||||
POSTING_READ(pp_ctrl_reg);
|
||||
|
@ -1053,8 +1124,8 @@ static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
|
|||
pp = ironlake_get_pp_control(intel_dp);
|
||||
pp &= ~EDP_FORCE_VDD;
|
||||
|
||||
pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
|
||||
pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
|
||||
pp_stat_reg = _pp_ctrl_reg(intel_dp);
|
||||
pp_ctrl_reg = _pp_stat_reg(intel_dp);
|
||||
|
||||
I915_WRITE(pp_ctrl_reg, pp);
|
||||
POSTING_READ(pp_ctrl_reg);
|
||||
|
@ -1119,20 +1190,19 @@ void ironlake_edp_panel_on(struct intel_dp *intel_dp)
|
|||
|
||||
ironlake_wait_panel_power_cycle(intel_dp);
|
||||
|
||||
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
|
||||
pp = ironlake_get_pp_control(intel_dp);
|
||||
if (IS_GEN5(dev)) {
|
||||
/* ILK workaround: disable reset around power sequence */
|
||||
pp &= ~PANEL_POWER_RESET;
|
||||
I915_WRITE(PCH_PP_CONTROL, pp);
|
||||
POSTING_READ(PCH_PP_CONTROL);
|
||||
I915_WRITE(pp_ctrl_reg, pp);
|
||||
POSTING_READ(pp_ctrl_reg);
|
||||
}
|
||||
|
||||
pp |= POWER_TARGET_ON;
|
||||
if (!IS_GEN5(dev))
|
||||
pp |= PANEL_POWER_RESET;
|
||||
|
||||
pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
|
||||
|
||||
I915_WRITE(pp_ctrl_reg, pp);
|
||||
POSTING_READ(pp_ctrl_reg);
|
||||
|
||||
|
@ -1140,8 +1210,8 @@ void ironlake_edp_panel_on(struct intel_dp *intel_dp)
|
|||
|
||||
if (IS_GEN5(dev)) {
|
||||
pp |= PANEL_POWER_RESET; /* restore panel reset bit */
|
||||
I915_WRITE(PCH_PP_CONTROL, pp);
|
||||
POSTING_READ(PCH_PP_CONTROL);
|
||||
I915_WRITE(pp_ctrl_reg, pp);
|
||||
POSTING_READ(pp_ctrl_reg);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1164,7 +1234,7 @@ void ironlake_edp_panel_off(struct intel_dp *intel_dp)
|
|||
* panels get very unhappy and cease to work. */
|
||||
pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
|
||||
|
||||
pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
|
||||
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
|
||||
|
||||
I915_WRITE(pp_ctrl_reg, pp);
|
||||
POSTING_READ(pp_ctrl_reg);
|
||||
|
@ -1197,7 +1267,7 @@ void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
|
|||
pp = ironlake_get_pp_control(intel_dp);
|
||||
pp |= EDP_BLC_ENABLE;
|
||||
|
||||
pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
|
||||
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
|
||||
|
||||
I915_WRITE(pp_ctrl_reg, pp);
|
||||
POSTING_READ(pp_ctrl_reg);
|
||||
|
@ -1221,7 +1291,7 @@ void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
|
|||
pp = ironlake_get_pp_control(intel_dp);
|
||||
pp &= ~EDP_BLC_ENABLE;
|
||||
|
||||
pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
|
||||
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
|
||||
|
||||
I915_WRITE(pp_ctrl_reg, pp);
|
||||
POSTING_READ(pp_ctrl_reg);
|
||||
|
@ -1368,6 +1438,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
enum port port = dp_to_dig_port(intel_dp)->port;
|
||||
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
|
||||
int dotclock;
|
||||
|
||||
if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
|
||||
tmp = I915_READ(intel_dp->output_reg);
|
||||
|
@ -1395,12 +1466,24 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
|
|||
|
||||
pipe_config->adjusted_mode.flags |= flags;
|
||||
|
||||
if (dp_to_dig_port(intel_dp)->port == PORT_A) {
|
||||
pipe_config->has_dp_encoder = true;
|
||||
|
||||
intel_dp_get_m_n(crtc, pipe_config);
|
||||
|
||||
if (port == PORT_A) {
|
||||
if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
|
||||
pipe_config->port_clock = 162000;
|
||||
else
|
||||
pipe_config->port_clock = 270000;
|
||||
}
|
||||
|
||||
dotclock = intel_dotclock_calculate(pipe_config->port_clock,
|
||||
&pipe_config->dp_m_n);
|
||||
|
||||
if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
|
||||
ironlake_check_encoder_dotclock(pipe_config, dotclock);
|
||||
|
||||
pipe_config->adjusted_mode.clock = dotclock;
|
||||
}
|
||||
|
||||
static bool is_edp_psr(struct intel_dp *intel_dp)
|
||||
|
@ -1566,7 +1649,7 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
|
|||
}
|
||||
|
||||
intel_crtc = to_intel_crtc(crtc);
|
||||
if (!intel_crtc->active || !crtc->fb || !crtc->mode.clock) {
|
||||
if (!intel_crtc_active(crtc)) {
|
||||
DRM_DEBUG_KMS("crtc not active for PSR\n");
|
||||
dev_priv->no_psr_reason = PSR_CRTC_NOT_ACTIVE;
|
||||
return false;
|
||||
|
@ -1593,7 +1676,7 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
|
|||
return false;
|
||||
}
|
||||
|
||||
if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) {
|
||||
if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
|
||||
DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
|
||||
dev_priv->no_psr_reason = PSR_INTERLACED_ENABLED;
|
||||
return false;
|
||||
|
@ -1713,14 +1796,24 @@ static void intel_enable_dp(struct intel_encoder *encoder)
|
|||
ironlake_edp_panel_vdd_off(intel_dp, true);
|
||||
intel_dp_complete_link_train(intel_dp);
|
||||
intel_dp_stop_link_train(intel_dp);
|
||||
}
|
||||
|
||||
static void g4x_enable_dp(struct intel_encoder *encoder)
|
||||
{
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
|
||||
|
||||
intel_enable_dp(encoder);
|
||||
ironlake_edp_backlight_on(intel_dp);
|
||||
}
|
||||
|
||||
static void vlv_enable_dp(struct intel_encoder *encoder)
|
||||
{
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
|
||||
|
||||
ironlake_edp_backlight_on(intel_dp);
|
||||
}
|
||||
|
||||
static void intel_pre_enable_dp(struct intel_encoder *encoder)
|
||||
static void g4x_pre_enable_dp(struct intel_encoder *encoder)
|
||||
{
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
|
||||
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
|
||||
|
@ -1738,53 +1831,59 @@ static void vlv_pre_enable_dp(struct intel_encoder *encoder)
|
|||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
|
||||
int port = vlv_dport_to_channel(dport);
|
||||
int pipe = intel_crtc->pipe;
|
||||
struct edp_power_seq power_seq;
|
||||
u32 val;
|
||||
|
||||
mutex_lock(&dev_priv->dpio_lock);
|
||||
|
||||
val = vlv_dpio_read(dev_priv, DPIO_DATA_LANE_A(port));
|
||||
val = vlv_dpio_read(dev_priv, pipe, DPIO_DATA_LANE_A(port));
|
||||
val = 0;
|
||||
if (pipe)
|
||||
val |= (1<<21);
|
||||
else
|
||||
val &= ~(1<<21);
|
||||
val |= 0x001000c4;
|
||||
vlv_dpio_write(dev_priv, DPIO_DATA_CHANNEL(port), val);
|
||||
vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF0(port), 0x00760018);
|
||||
vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF8(port), 0x00400888);
|
||||
vlv_dpio_write(dev_priv, pipe, DPIO_DATA_CHANNEL(port), val);
|
||||
vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF0(port), 0x00760018);
|
||||
vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF8(port), 0x00400888);
|
||||
|
||||
mutex_unlock(&dev_priv->dpio_lock);
|
||||
|
||||
/* init power sequencer on this pipe and port */
|
||||
intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
|
||||
intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
|
||||
&power_seq);
|
||||
|
||||
intel_enable_dp(encoder);
|
||||
|
||||
vlv_wait_port_ready(dev_priv, port);
|
||||
}
|
||||
|
||||
static void intel_dp_pre_pll_enable(struct intel_encoder *encoder)
|
||||
static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
|
||||
{
|
||||
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc =
|
||||
to_intel_crtc(encoder->base.crtc);
|
||||
int port = vlv_dport_to_channel(dport);
|
||||
|
||||
if (!IS_VALLEYVIEW(dev))
|
||||
return;
|
||||
int pipe = intel_crtc->pipe;
|
||||
|
||||
/* Program Tx lane resets to default */
|
||||
mutex_lock(&dev_priv->dpio_lock);
|
||||
vlv_dpio_write(dev_priv, DPIO_PCS_TX(port),
|
||||
vlv_dpio_write(dev_priv, pipe, DPIO_PCS_TX(port),
|
||||
DPIO_PCS_TX_LANE2_RESET |
|
||||
DPIO_PCS_TX_LANE1_RESET);
|
||||
vlv_dpio_write(dev_priv, DPIO_PCS_CLK(port),
|
||||
vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLK(port),
|
||||
DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
|
||||
DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
|
||||
(1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
|
||||
DPIO_PCS_CLK_SOFT_RESET);
|
||||
|
||||
/* Fix up inter-pair skew failure */
|
||||
vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER1(port), 0x00750f00);
|
||||
vlv_dpio_write(dev_priv, DPIO_TX_CTL(port), 0x00001500);
|
||||
vlv_dpio_write(dev_priv, DPIO_TX_LANE(port), 0x40400000);
|
||||
vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER1(port), 0x00750f00);
|
||||
vlv_dpio_write(dev_priv, pipe, DPIO_TX_CTL(port), 0x00001500);
|
||||
vlv_dpio_write(dev_priv, pipe, DPIO_TX_LANE(port), 0x40400000);
|
||||
mutex_unlock(&dev_priv->dpio_lock);
|
||||
}
|
||||
|
||||
|
@ -1919,10 +2018,13 @@ static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
|
|||
struct drm_device *dev = intel_dp_to_dev(intel_dp);
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
|
||||
struct intel_crtc *intel_crtc =
|
||||
to_intel_crtc(dport->base.base.crtc);
|
||||
unsigned long demph_reg_value, preemph_reg_value,
|
||||
uniqtranscale_reg_value;
|
||||
uint8_t train_set = intel_dp->train_set[0];
|
||||
int port = vlv_dport_to_channel(dport);
|
||||
int pipe = intel_crtc->pipe;
|
||||
|
||||
switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
|
||||
case DP_TRAIN_PRE_EMPHASIS_0:
|
||||
|
@ -1998,14 +2100,14 @@ static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
|
|||
}
|
||||
|
||||
mutex_lock(&dev_priv->dpio_lock);
|
||||
vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0x00000000);
|
||||
vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL4(port), demph_reg_value);
|
||||
vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL2(port),
|
||||
vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port), 0x00000000);
|
||||
vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL4(port), demph_reg_value);
|
||||
vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL2(port),
|
||||
uniqtranscale_reg_value);
|
||||
vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL3(port), 0x0C782040);
|
||||
vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER0(port), 0x00030000);
|
||||
vlv_dpio_write(dev_priv, DPIO_PCS_CTL_OVER1(port), preemph_reg_value);
|
||||
vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0x80000000);
|
||||
vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL3(port), 0x0C782040);
|
||||
vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER0(port), 0x00030000);
|
||||
vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CTL_OVER1(port), preemph_reg_value);
|
||||
vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port), 0x80000000);
|
||||
mutex_unlock(&dev_priv->dpio_lock);
|
||||
|
||||
return 0;
|
||||
|
@ -3144,24 +3246,26 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct edp_power_seq cur, vbt, spec, final;
|
||||
u32 pp_on, pp_off, pp_div, pp;
|
||||
int pp_control_reg, pp_on_reg, pp_off_reg, pp_div_reg;
|
||||
int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
|
||||
|
||||
if (HAS_PCH_SPLIT(dev)) {
|
||||
pp_control_reg = PCH_PP_CONTROL;
|
||||
pp_ctrl_reg = PCH_PP_CONTROL;
|
||||
pp_on_reg = PCH_PP_ON_DELAYS;
|
||||
pp_off_reg = PCH_PP_OFF_DELAYS;
|
||||
pp_div_reg = PCH_PP_DIVISOR;
|
||||
} else {
|
||||
pp_control_reg = PIPEA_PP_CONTROL;
|
||||
pp_on_reg = PIPEA_PP_ON_DELAYS;
|
||||
pp_off_reg = PIPEA_PP_OFF_DELAYS;
|
||||
pp_div_reg = PIPEA_PP_DIVISOR;
|
||||
enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
|
||||
|
||||
pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
|
||||
pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
|
||||
pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
|
||||
pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
|
||||
}
|
||||
|
||||
/* Workaround: Need to write PP_CONTROL with the unlock key as
|
||||
* the very first thing. */
|
||||
pp = ironlake_get_pp_control(intel_dp);
|
||||
I915_WRITE(pp_control_reg, pp);
|
||||
I915_WRITE(pp_ctrl_reg, pp);
|
||||
|
||||
pp_on = I915_READ(pp_on_reg);
|
||||
pp_off = I915_READ(pp_off_reg);
|
||||
|
@ -3249,9 +3353,11 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
|
|||
pp_off_reg = PCH_PP_OFF_DELAYS;
|
||||
pp_div_reg = PCH_PP_DIVISOR;
|
||||
} else {
|
||||
pp_on_reg = PIPEA_PP_ON_DELAYS;
|
||||
pp_off_reg = PIPEA_PP_OFF_DELAYS;
|
||||
pp_div_reg = PIPEA_PP_DIVISOR;
|
||||
enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
|
||||
|
||||
pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
|
||||
pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
|
||||
pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
|
||||
}
|
||||
|
||||
/* And finally store the new values in the power sequencer. */
|
||||
|
@ -3268,12 +3374,15 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
|
|||
/* Haswell doesn't have any port selection bits for the panel
|
||||
* power sequencer any more. */
|
||||
if (IS_VALLEYVIEW(dev)) {
|
||||
port_sel = I915_READ(pp_on_reg) & 0xc0000000;
|
||||
if (dp_to_dig_port(intel_dp)->port == PORT_B)
|
||||
port_sel = PANEL_PORT_SELECT_DPB_VLV;
|
||||
else
|
||||
port_sel = PANEL_PORT_SELECT_DPC_VLV;
|
||||
} else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
|
||||
if (dp_to_dig_port(intel_dp)->port == PORT_A)
|
||||
port_sel = PANEL_POWER_PORT_DP_A;
|
||||
port_sel = PANEL_PORT_SELECT_DPA;
|
||||
else
|
||||
port_sel = PANEL_POWER_PORT_DP_D;
|
||||
port_sel = PANEL_PORT_SELECT_DPD;
|
||||
}
|
||||
|
||||
pp_on |= port_sel;
|
||||
|
@ -3539,12 +3648,12 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
|
|||
intel_encoder->get_hw_state = intel_dp_get_hw_state;
|
||||
intel_encoder->get_config = intel_dp_get_config;
|
||||
if (IS_VALLEYVIEW(dev)) {
|
||||
intel_encoder->pre_pll_enable = intel_dp_pre_pll_enable;
|
||||
intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
|
||||
intel_encoder->pre_enable = vlv_pre_enable_dp;
|
||||
intel_encoder->enable = vlv_enable_dp;
|
||||
} else {
|
||||
intel_encoder->pre_enable = intel_pre_enable_dp;
|
||||
intel_encoder->enable = intel_enable_dp;
|
||||
intel_encoder->pre_enable = g4x_pre_enable_dp;
|
||||
intel_encoder->enable = g4x_enable_dp;
|
||||
}
|
||||
|
||||
intel_dig_port->port = port;
|
||||
|
|
|
@ -93,13 +93,17 @@
|
|||
#define INTEL_OUTPUT_HDMI 6
|
||||
#define INTEL_OUTPUT_DISPLAYPORT 7
|
||||
#define INTEL_OUTPUT_EDP 8
|
||||
#define INTEL_OUTPUT_UNKNOWN 9
|
||||
#define INTEL_OUTPUT_DSI 9
|
||||
#define INTEL_OUTPUT_UNKNOWN 10
|
||||
|
||||
#define INTEL_DVO_CHIP_NONE 0
|
||||
#define INTEL_DVO_CHIP_LVDS 1
|
||||
#define INTEL_DVO_CHIP_TMDS 2
|
||||
#define INTEL_DVO_CHIP_TVOUT 4
|
||||
|
||||
#define INTEL_DSI_COMMAND_MODE 0
|
||||
#define INTEL_DSI_VIDEO_MODE 1
|
||||
|
||||
struct intel_framebuffer {
|
||||
struct drm_framebuffer base;
|
||||
struct drm_i915_gem_object *obj;
|
||||
|
@ -207,8 +211,21 @@ struct intel_crtc_config {
|
|||
#define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */
|
||||
unsigned long quirks;
|
||||
|
||||
/* User requested mode, only valid as a starting point to
|
||||
* compute adjusted_mode, except in the case of (S)DVO where
|
||||
* it's also for the output timings of the (S)DVO chip.
|
||||
* adjusted_mode will then correspond to the S(DVO) chip's
|
||||
* preferred input timings. */
|
||||
struct drm_display_mode requested_mode;
|
||||
/* Actual pipe timings ie. what we program into the pipe timing
|
||||
* registers. adjusted_mode.clock is the pipe pixel clock. */
|
||||
struct drm_display_mode adjusted_mode;
|
||||
|
||||
/* Pipe source size (ie. panel fitter input size)
|
||||
* All planes will be positioned inside this space,
|
||||
* and get clipped at the edges. */
|
||||
int pipe_src_w, pipe_src_h;
|
||||
|
||||
/* Whether to set up the PCH/FDI. Note that we never allow sharing
|
||||
* between pch encoders and cpu encoders. */
|
||||
bool has_pch_encoder;
|
||||
|
@ -262,7 +279,8 @@ struct intel_crtc_config {
|
|||
|
||||
/*
|
||||
* Frequence the dpll for the port should run at. Differs from the
|
||||
* adjusted dotclock e.g. for DP or 12bpc hdmi mode.
|
||||
* adjusted dotclock e.g. for DP or 12bpc hdmi mode. This is also
|
||||
* already multiplied by pixel_multiplier.
|
||||
*/
|
||||
int port_clock;
|
||||
|
||||
|
@ -288,6 +306,8 @@ struct intel_crtc_config {
|
|||
struct intel_link_m_n fdi_m_n;
|
||||
|
||||
bool ips_enabled;
|
||||
|
||||
bool double_wide;
|
||||
};
|
||||
|
||||
struct intel_crtc {
|
||||
|
@ -522,6 +542,7 @@ extern void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
|
|||
struct intel_ring_buffer *ring);
|
||||
extern void intel_mark_idle(struct drm_device *dev);
|
||||
extern void intel_lvds_init(struct drm_device *dev);
|
||||
extern bool intel_dsi_init(struct drm_device *dev);
|
||||
extern bool intel_is_dual_link_lvds(struct drm_device *dev);
|
||||
extern void intel_dp_init(struct drm_device *dev, int output_reg,
|
||||
enum port port);
|
||||
|
@ -708,9 +729,10 @@ extern void intel_write_eld(struct drm_encoder *encoder,
|
|||
extern void intel_prepare_ddi(struct drm_device *dev);
|
||||
extern void hsw_fdi_link_train(struct drm_crtc *crtc);
|
||||
extern void intel_ddi_init(struct drm_device *dev, enum port port);
|
||||
extern enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder);
|
||||
|
||||
/* For use by IVB LP watermark workaround in intel_sprite.c */
|
||||
extern void intel_update_watermarks(struct drm_device *dev);
|
||||
extern void intel_update_watermarks(struct drm_crtc *crtc);
|
||||
extern void intel_update_sprite_watermarks(struct drm_plane *plane,
|
||||
struct drm_crtc *crtc,
|
||||
uint32_t sprite_width, int pixel_size,
|
||||
|
@ -741,8 +763,13 @@ extern void i915_remove_power_well(struct drm_device *dev);
|
|||
|
||||
extern bool intel_display_power_enabled(struct drm_device *dev,
|
||||
enum intel_display_power_domain domain);
|
||||
extern void intel_display_power_get(struct drm_device *dev,
|
||||
enum intel_display_power_domain domain);
|
||||
extern void intel_display_power_put(struct drm_device *dev,
|
||||
enum intel_display_power_domain domain);
|
||||
extern void intel_init_power_well(struct drm_device *dev);
|
||||
extern void intel_set_power_well(struct drm_device *dev, bool enable);
|
||||
extern void intel_resume_power_well(struct drm_device *dev);
|
||||
extern void intel_enable_gt_powersave(struct drm_device *dev);
|
||||
extern void intel_disable_gt_powersave(struct drm_device *dev);
|
||||
extern void ironlake_teardown_rc6(struct drm_device *dev);
|
||||
|
@ -793,6 +820,14 @@ extern void hsw_pc8_disable_interrupts(struct drm_device *dev);
|
|||
extern void hsw_pc8_restore_interrupts(struct drm_device *dev);
|
||||
extern void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
|
||||
extern void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
|
||||
extern void intel_dp_get_m_n(struct intel_crtc *crtc,
|
||||
struct intel_crtc_config *pipe_config);
|
||||
extern int intel_dotclock_calculate(int link_freq,
|
||||
const struct intel_link_m_n *m_n);
|
||||
extern void ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
|
||||
int dotclock);
|
||||
|
||||
extern bool intel_crtc_active(struct drm_crtc *crtc);
|
||||
extern void i915_disable_vga_mem(struct drm_device *dev);
|
||||
|
||||
#endif /* __INTEL_DRV_H__ */
|
||||
|
|
|
@ -0,0 +1,621 @@
|
|||
/*
|
||||
* Copyright © 2013 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Author: Jani Nikula <jani.nikula@intel.com>
|
||||
*/
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_crtc.h>
|
||||
#include <drm/drm_edid.h>
|
||||
#include <drm/i915_drm.h>
|
||||
#include <linux/slab.h>
|
||||
#include "i915_drv.h"
|
||||
#include "intel_drv.h"
|
||||
#include "intel_dsi.h"
|
||||
#include "intel_dsi_cmd.h"
|
||||
|
||||
/* the sub-encoders aka panel drivers */
|
||||
static const struct intel_dsi_device intel_dsi_devices[] = {
|
||||
};
|
||||
|
||||
|
||||
static void vlv_cck_modify(struct drm_i915_private *dev_priv, u32 reg, u32 val,
|
||||
u32 mask)
|
||||
{
|
||||
u32 tmp = vlv_cck_read(dev_priv, reg);
|
||||
tmp &= ~mask;
|
||||
tmp |= val;
|
||||
vlv_cck_write(dev_priv, reg, tmp);
|
||||
}
|
||||
|
||||
static void band_gap_wa(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
mutex_lock(&dev_priv->dpio_lock);
|
||||
|
||||
/* Enable bandgap fix in GOP driver */
|
||||
vlv_cck_modify(dev_priv, 0x6D, 0x00010000, 0x00030000);
|
||||
msleep(20);
|
||||
vlv_cck_modify(dev_priv, 0x6E, 0x00010000, 0x00030000);
|
||||
msleep(20);
|
||||
vlv_cck_modify(dev_priv, 0x6F, 0x00010000, 0x00030000);
|
||||
msleep(20);
|
||||
vlv_cck_modify(dev_priv, 0x00, 0x00008000, 0x00008000);
|
||||
msleep(20);
|
||||
vlv_cck_modify(dev_priv, 0x00, 0x00000000, 0x00008000);
|
||||
msleep(20);
|
||||
|
||||
/* Turn Display Trunk on */
|
||||
vlv_cck_modify(dev_priv, 0x6B, 0x00020000, 0x00030000);
|
||||
msleep(20);
|
||||
|
||||
vlv_cck_modify(dev_priv, 0x6C, 0x00020000, 0x00030000);
|
||||
msleep(20);
|
||||
|
||||
vlv_cck_modify(dev_priv, 0x6D, 0x00020000, 0x00030000);
|
||||
msleep(20);
|
||||
vlv_cck_modify(dev_priv, 0x6E, 0x00020000, 0x00030000);
|
||||
msleep(20);
|
||||
vlv_cck_modify(dev_priv, 0x6F, 0x00020000, 0x00030000);
|
||||
|
||||
mutex_unlock(&dev_priv->dpio_lock);
|
||||
|
||||
/* Need huge delay, otherwise clock is not stable */
|
||||
msleep(100);
|
||||
}
|
||||
|
||||
static struct intel_dsi *intel_attached_dsi(struct drm_connector *connector)
|
||||
{
|
||||
return container_of(intel_attached_encoder(connector),
|
||||
struct intel_dsi, base);
|
||||
}
|
||||
|
||||
static inline bool is_vid_mode(struct intel_dsi *intel_dsi)
|
||||
{
|
||||
return intel_dsi->dev.type == INTEL_DSI_VIDEO_MODE;
|
||||
}
|
||||
|
||||
static inline bool is_cmd_mode(struct intel_dsi *intel_dsi)
|
||||
{
|
||||
return intel_dsi->dev.type == INTEL_DSI_COMMAND_MODE;
|
||||
}
|
||||
|
||||
static void intel_dsi_hot_plug(struct intel_encoder *encoder)
|
||||
{
|
||||
DRM_DEBUG_KMS("\n");
|
||||
}
|
||||
|
||||
static bool intel_dsi_compute_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_config *config)
|
||||
{
|
||||
struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
|
||||
base);
|
||||
struct intel_connector *intel_connector = intel_dsi->attached_connector;
|
||||
struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
|
||||
struct drm_display_mode *adjusted_mode = &config->adjusted_mode;
|
||||
struct drm_display_mode *mode = &config->requested_mode;
|
||||
|
||||
DRM_DEBUG_KMS("\n");
|
||||
|
||||
if (fixed_mode)
|
||||
intel_fixed_panel_mode(fixed_mode, adjusted_mode);
|
||||
|
||||
if (intel_dsi->dev.dev_ops->mode_fixup)
|
||||
return intel_dsi->dev.dev_ops->mode_fixup(&intel_dsi->dev,
|
||||
mode, adjusted_mode);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void intel_dsi_pre_pll_enable(struct intel_encoder *encoder)
|
||||
{
|
||||
DRM_DEBUG_KMS("\n");
|
||||
|
||||
vlv_enable_dsi_pll(encoder);
|
||||
}
|
||||
|
||||
static void intel_dsi_pre_enable(struct intel_encoder *encoder)
|
||||
{
|
||||
DRM_DEBUG_KMS("\n");
|
||||
}
|
||||
|
||||
static void intel_dsi_enable(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
int pipe = intel_crtc->pipe;
|
||||
u32 temp;
|
||||
|
||||
DRM_DEBUG_KMS("\n");
|
||||
|
||||
temp = I915_READ(MIPI_DEVICE_READY(pipe));
|
||||
if ((temp & DEVICE_READY) == 0) {
|
||||
temp &= ~ULPS_STATE_MASK;
|
||||
I915_WRITE(MIPI_DEVICE_READY(pipe), temp | DEVICE_READY);
|
||||
} else if (temp & ULPS_STATE_MASK) {
|
||||
temp &= ~ULPS_STATE_MASK;
|
||||
I915_WRITE(MIPI_DEVICE_READY(pipe), temp | ULPS_STATE_EXIT);
|
||||
/*
|
||||
* We need to ensure that there is a minimum of 1 ms time
|
||||
* available before clearing the UPLS exit state.
|
||||
*/
|
||||
msleep(2);
|
||||
I915_WRITE(MIPI_DEVICE_READY(pipe), temp);
|
||||
}
|
||||
|
||||
if (is_cmd_mode(intel_dsi))
|
||||
I915_WRITE(MIPI_MAX_RETURN_PKT_SIZE(pipe), 8 * 4);
|
||||
|
||||
if (is_vid_mode(intel_dsi)) {
|
||||
msleep(20); /* XXX */
|
||||
dpi_send_cmd(intel_dsi, TURN_ON);
|
||||
msleep(100);
|
||||
|
||||
/* assert ip_tg_enable signal */
|
||||
temp = I915_READ(MIPI_PORT_CTRL(pipe));
|
||||
I915_WRITE(MIPI_PORT_CTRL(pipe), temp | DPI_ENABLE);
|
||||
POSTING_READ(MIPI_PORT_CTRL(pipe));
|
||||
}
|
||||
|
||||
intel_dsi->dev.dev_ops->enable(&intel_dsi->dev);
|
||||
}
|
||||
|
||||
static void intel_dsi_disable(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
int pipe = intel_crtc->pipe;
|
||||
u32 temp;
|
||||
|
||||
DRM_DEBUG_KMS("\n");
|
||||
|
||||
intel_dsi->dev.dev_ops->disable(&intel_dsi->dev);
|
||||
|
||||
if (is_vid_mode(intel_dsi)) {
|
||||
dpi_send_cmd(intel_dsi, SHUTDOWN);
|
||||
msleep(10);
|
||||
|
||||
/* de-assert ip_tg_enable signal */
|
||||
temp = I915_READ(MIPI_PORT_CTRL(pipe));
|
||||
I915_WRITE(MIPI_PORT_CTRL(pipe), temp & ~DPI_ENABLE);
|
||||
POSTING_READ(MIPI_PORT_CTRL(pipe));
|
||||
|
||||
msleep(2);
|
||||
}
|
||||
|
||||
temp = I915_READ(MIPI_DEVICE_READY(pipe));
|
||||
if (temp & DEVICE_READY) {
|
||||
temp &= ~DEVICE_READY;
|
||||
temp &= ~ULPS_STATE_MASK;
|
||||
I915_WRITE(MIPI_DEVICE_READY(pipe), temp);
|
||||
}
|
||||
}
|
||||
|
||||
static void intel_dsi_post_disable(struct intel_encoder *encoder)
|
||||
{
|
||||
DRM_DEBUG_KMS("\n");
|
||||
|
||||
vlv_disable_dsi_pll(encoder);
|
||||
}
|
||||
|
||||
static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
|
||||
enum pipe *pipe)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
|
||||
u32 port, func;
|
||||
enum pipe p;
|
||||
|
||||
DRM_DEBUG_KMS("\n");
|
||||
|
||||
/* XXX: this only works for one DSI output */
|
||||
for (p = PIPE_A; p <= PIPE_B; p++) {
|
||||
port = I915_READ(MIPI_PORT_CTRL(p));
|
||||
func = I915_READ(MIPI_DSI_FUNC_PRG(p));
|
||||
|
||||
if ((port & DPI_ENABLE) || (func & CMD_MODE_DATA_WIDTH_MASK)) {
|
||||
if (I915_READ(MIPI_DEVICE_READY(p)) & DEVICE_READY) {
|
||||
*pipe = p;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static void intel_dsi_get_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_config *pipe_config)
|
||||
{
|
||||
DRM_DEBUG_KMS("\n");
|
||||
|
||||
/* XXX: read flags, set to adjusted_mode */
|
||||
}
|
||||
|
||||
static int intel_dsi_mode_valid(struct drm_connector *connector,
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
struct intel_connector *intel_connector = to_intel_connector(connector);
|
||||
struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
|
||||
struct intel_dsi *intel_dsi = intel_attached_dsi(connector);
|
||||
|
||||
DRM_DEBUG_KMS("\n");
|
||||
|
||||
if (mode->flags & DRM_MODE_FLAG_DBLSCAN) {
|
||||
DRM_DEBUG_KMS("MODE_NO_DBLESCAN\n");
|
||||
return MODE_NO_DBLESCAN;
|
||||
}
|
||||
|
||||
if (fixed_mode) {
|
||||
if (mode->hdisplay > fixed_mode->hdisplay)
|
||||
return MODE_PANEL;
|
||||
if (mode->vdisplay > fixed_mode->vdisplay)
|
||||
return MODE_PANEL;
|
||||
}
|
||||
|
||||
return intel_dsi->dev.dev_ops->mode_valid(&intel_dsi->dev, mode);
|
||||
}
|
||||
|
||||
/* return txclkesc cycles in terms of divider and duration in us */
|
||||
static u16 txclkesc(u32 divider, unsigned int us)
|
||||
{
|
||||
switch (divider) {
|
||||
case ESCAPE_CLOCK_DIVIDER_1:
|
||||
default:
|
||||
return 20 * us;
|
||||
case ESCAPE_CLOCK_DIVIDER_2:
|
||||
return 10 * us;
|
||||
case ESCAPE_CLOCK_DIVIDER_4:
|
||||
return 5 * us;
|
||||
}
|
||||
}
|
||||
|
||||
/* return pixels in terms of txbyteclkhs */
|
||||
static u16 txbyteclkhs(u16 pixels, int bpp, int lane_count)
|
||||
{
|
||||
return DIV_ROUND_UP(DIV_ROUND_UP(pixels * bpp, 8), lane_count);
|
||||
}
|
||||
|
||||
static void set_dsi_timings(struct drm_encoder *encoder,
|
||||
const struct drm_display_mode *mode)
|
||||
{
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
int pipe = intel_crtc->pipe;
|
||||
unsigned int bpp = intel_crtc->config.pipe_bpp;
|
||||
unsigned int lane_count = intel_dsi->lane_count;
|
||||
|
||||
u16 hactive, hfp, hsync, hbp, vfp, vsync, vbp;
|
||||
|
||||
hactive = mode->hdisplay;
|
||||
hfp = mode->hsync_start - mode->hdisplay;
|
||||
hsync = mode->hsync_end - mode->hsync_start;
|
||||
hbp = mode->htotal - mode->hsync_end;
|
||||
|
||||
vfp = mode->vsync_start - mode->vdisplay;
|
||||
vsync = mode->vsync_end - mode->vsync_start;
|
||||
vbp = mode->vtotal - mode->vsync_end;
|
||||
|
||||
/* horizontal values are in terms of high speed byte clock */
|
||||
hactive = txbyteclkhs(hactive, bpp, lane_count);
|
||||
hfp = txbyteclkhs(hfp, bpp, lane_count);
|
||||
hsync = txbyteclkhs(hsync, bpp, lane_count);
|
||||
hbp = txbyteclkhs(hbp, bpp, lane_count);
|
||||
|
||||
I915_WRITE(MIPI_HACTIVE_AREA_COUNT(pipe), hactive);
|
||||
I915_WRITE(MIPI_HFP_COUNT(pipe), hfp);
|
||||
|
||||
/* meaningful for video mode non-burst sync pulse mode only, can be zero
|
||||
* for non-burst sync events and burst modes */
|
||||
I915_WRITE(MIPI_HSYNC_PADDING_COUNT(pipe), hsync);
|
||||
I915_WRITE(MIPI_HBP_COUNT(pipe), hbp);
|
||||
|
||||
/* vertical values are in terms of lines */
|
||||
I915_WRITE(MIPI_VFP_COUNT(pipe), vfp);
|
||||
I915_WRITE(MIPI_VSYNC_PADDING_COUNT(pipe), vsync);
|
||||
I915_WRITE(MIPI_VBP_COUNT(pipe), vbp);
|
||||
}
|
||||
|
||||
static void intel_dsi_mode_set(struct intel_encoder *intel_encoder)
|
||||
{
|
||||
struct drm_encoder *encoder = &intel_encoder->base;
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
struct drm_display_mode *adjusted_mode =
|
||||
&intel_crtc->config.adjusted_mode;
|
||||
int pipe = intel_crtc->pipe;
|
||||
unsigned int bpp = intel_crtc->config.pipe_bpp;
|
||||
u32 val, tmp;
|
||||
|
||||
DRM_DEBUG_KMS("pipe %d\n", pipe);
|
||||
|
||||
/* Update the DSI PLL */
|
||||
vlv_enable_dsi_pll(intel_encoder);
|
||||
|
||||
/* XXX: Location of the call */
|
||||
band_gap_wa(dev_priv);
|
||||
|
||||
/* escape clock divider, 20MHz, shared for A and C. device ready must be
|
||||
* off when doing this! txclkesc? */
|
||||
tmp = I915_READ(MIPI_CTRL(0));
|
||||
tmp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
|
||||
I915_WRITE(MIPI_CTRL(0), tmp | ESCAPE_CLOCK_DIVIDER_1);
|
||||
|
||||
/* read request priority is per pipe */
|
||||
tmp = I915_READ(MIPI_CTRL(pipe));
|
||||
tmp &= ~READ_REQUEST_PRIORITY_MASK;
|
||||
I915_WRITE(MIPI_CTRL(pipe), tmp | READ_REQUEST_PRIORITY_HIGH);
|
||||
|
||||
/* XXX: why here, why like this? handling in irq handler?! */
|
||||
I915_WRITE(MIPI_INTR_STAT(pipe), 0xffffffff);
|
||||
I915_WRITE(MIPI_INTR_EN(pipe), 0xffffffff);
|
||||
|
||||
I915_WRITE(MIPI_DPHY_PARAM(pipe),
|
||||
0x3c << EXIT_ZERO_COUNT_SHIFT |
|
||||
0x1f << TRAIL_COUNT_SHIFT |
|
||||
0xc5 << CLK_ZERO_COUNT_SHIFT |
|
||||
0x1f << PREPARE_COUNT_SHIFT);
|
||||
|
||||
I915_WRITE(MIPI_DPI_RESOLUTION(pipe),
|
||||
adjusted_mode->vdisplay << VERTICAL_ADDRESS_SHIFT |
|
||||
adjusted_mode->hdisplay << HORIZONTAL_ADDRESS_SHIFT);
|
||||
|
||||
set_dsi_timings(encoder, adjusted_mode);
|
||||
|
||||
val = intel_dsi->lane_count << DATA_LANES_PRG_REG_SHIFT;
|
||||
if (is_cmd_mode(intel_dsi)) {
|
||||
val |= intel_dsi->channel << CMD_MODE_CHANNEL_NUMBER_SHIFT;
|
||||
val |= CMD_MODE_DATA_WIDTH_8_BIT; /* XXX */
|
||||
} else {
|
||||
val |= intel_dsi->channel << VID_MODE_CHANNEL_NUMBER_SHIFT;
|
||||
|
||||
/* XXX: cross-check bpp vs. pixel format? */
|
||||
val |= intel_dsi->pixel_format;
|
||||
}
|
||||
I915_WRITE(MIPI_DSI_FUNC_PRG(pipe), val);
|
||||
|
||||
/* timeouts for recovery. one frame IIUC. if counter expires, EOT and
|
||||
* stop state. */
|
||||
|
||||
/*
|
||||
* In burst mode, value greater than one DPI line Time in byte clock
|
||||
* (txbyteclkhs) To timeout this timer 1+ of the above said value is
|
||||
* recommended.
|
||||
*
|
||||
* In non-burst mode, Value greater than one DPI frame time in byte
|
||||
* clock(txbyteclkhs) To timeout this timer 1+ of the above said value
|
||||
* is recommended.
|
||||
*
|
||||
* In DBI only mode, value greater than one DBI frame time in byte
|
||||
* clock(txbyteclkhs) To timeout this timer 1+ of the above said value
|
||||
* is recommended.
|
||||
*/
|
||||
|
||||
if (is_vid_mode(intel_dsi) &&
|
||||
intel_dsi->video_mode_format == VIDEO_MODE_BURST) {
|
||||
I915_WRITE(MIPI_HS_TX_TIMEOUT(pipe),
|
||||
txbyteclkhs(adjusted_mode->htotal, bpp,
|
||||
intel_dsi->lane_count) + 1);
|
||||
} else {
|
||||
I915_WRITE(MIPI_HS_TX_TIMEOUT(pipe),
|
||||
txbyteclkhs(adjusted_mode->vtotal *
|
||||
adjusted_mode->htotal,
|
||||
bpp, intel_dsi->lane_count) + 1);
|
||||
}
|
||||
I915_WRITE(MIPI_LP_RX_TIMEOUT(pipe), 8309); /* max */
|
||||
I915_WRITE(MIPI_TURN_AROUND_TIMEOUT(pipe), 0x14); /* max */
|
||||
I915_WRITE(MIPI_DEVICE_RESET_TIMER(pipe), 0xffff); /* max */
|
||||
|
||||
/* dphy stuff */
|
||||
|
||||
/* in terms of low power clock */
|
||||
I915_WRITE(MIPI_INIT_COUNT(pipe), txclkesc(ESCAPE_CLOCK_DIVIDER_1, 100));
|
||||
|
||||
/* recovery disables */
|
||||
I915_WRITE(MIPI_EOT_DISABLE(pipe), intel_dsi->eot_disable);
|
||||
|
||||
/* in terms of txbyteclkhs. actual high to low switch +
|
||||
* MIPI_STOP_STATE_STALL * MIPI_LP_BYTECLK.
|
||||
*
|
||||
* XXX: write MIPI_STOP_STATE_STALL?
|
||||
*/
|
||||
I915_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT(pipe), 0x46);
|
||||
|
||||
/* XXX: low power clock equivalence in terms of byte clock. the number
|
||||
* of byte clocks occupied in one low power clock. based on txbyteclkhs
|
||||
* and txclkesc. txclkesc time / txbyteclk time * (105 +
|
||||
* MIPI_STOP_STATE_STALL) / 105.???
|
||||
*/
|
||||
I915_WRITE(MIPI_LP_BYTECLK(pipe), 4);
|
||||
|
||||
/* the bw essential for transmitting 16 long packets containing 252
|
||||
* bytes meant for dcs write memory command is programmed in this
|
||||
* register in terms of byte clocks. based on dsi transfer rate and the
|
||||
* number of lanes configured the time taken to transmit 16 long packets
|
||||
* in a dsi stream varies. */
|
||||
I915_WRITE(MIPI_DBI_BW_CTRL(pipe), 0x820);
|
||||
|
||||
I915_WRITE(MIPI_CLK_LANE_SWITCH_TIME_CNT(pipe),
|
||||
0xa << LP_HS_SSW_CNT_SHIFT |
|
||||
0x14 << HS_LP_PWR_SW_CNT_SHIFT);
|
||||
|
||||
if (is_vid_mode(intel_dsi))
|
||||
I915_WRITE(MIPI_VIDEO_MODE_FORMAT(pipe),
|
||||
intel_dsi->video_mode_format);
|
||||
}
|
||||
|
||||
static enum drm_connector_status
|
||||
intel_dsi_detect(struct drm_connector *connector, bool force)
|
||||
{
|
||||
struct intel_dsi *intel_dsi = intel_attached_dsi(connector);
|
||||
DRM_DEBUG_KMS("\n");
|
||||
return intel_dsi->dev.dev_ops->detect(&intel_dsi->dev);
|
||||
}
|
||||
|
||||
static int intel_dsi_get_modes(struct drm_connector *connector)
|
||||
{
|
||||
struct intel_connector *intel_connector = to_intel_connector(connector);
|
||||
struct drm_display_mode *mode;
|
||||
|
||||
DRM_DEBUG_KMS("\n");
|
||||
|
||||
if (!intel_connector->panel.fixed_mode) {
|
||||
DRM_DEBUG_KMS("no fixed mode\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
mode = drm_mode_duplicate(connector->dev,
|
||||
intel_connector->panel.fixed_mode);
|
||||
if (!mode) {
|
||||
DRM_DEBUG_KMS("drm_mode_duplicate failed\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
drm_mode_probed_add(connector, mode);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void intel_dsi_destroy(struct drm_connector *connector)
|
||||
{
|
||||
struct intel_connector *intel_connector = to_intel_connector(connector);
|
||||
|
||||
DRM_DEBUG_KMS("\n");
|
||||
intel_panel_fini(&intel_connector->panel);
|
||||
drm_sysfs_connector_remove(connector);
|
||||
drm_connector_cleanup(connector);
|
||||
kfree(connector);
|
||||
}
|
||||
|
||||
static const struct drm_encoder_funcs intel_dsi_funcs = {
|
||||
.destroy = intel_encoder_destroy,
|
||||
};
|
||||
|
||||
static const struct drm_connector_helper_funcs intel_dsi_connector_helper_funcs = {
|
||||
.get_modes = intel_dsi_get_modes,
|
||||
.mode_valid = intel_dsi_mode_valid,
|
||||
.best_encoder = intel_best_encoder,
|
||||
};
|
||||
|
||||
static const struct drm_connector_funcs intel_dsi_connector_funcs = {
|
||||
.dpms = intel_connector_dpms,
|
||||
.detect = intel_dsi_detect,
|
||||
.destroy = intel_dsi_destroy,
|
||||
.fill_modes = drm_helper_probe_single_connector_modes,
|
||||
};
|
||||
|
||||
bool intel_dsi_init(struct drm_device *dev)
|
||||
{
|
||||
struct intel_dsi *intel_dsi;
|
||||
struct intel_encoder *intel_encoder;
|
||||
struct drm_encoder *encoder;
|
||||
struct intel_connector *intel_connector;
|
||||
struct drm_connector *connector;
|
||||
struct drm_display_mode *fixed_mode = NULL;
|
||||
const struct intel_dsi_device *dsi;
|
||||
unsigned int i;
|
||||
|
||||
DRM_DEBUG_KMS("\n");
|
||||
|
||||
intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL);
|
||||
if (!intel_dsi)
|
||||
return false;
|
||||
|
||||
intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
|
||||
if (!intel_connector) {
|
||||
kfree(intel_dsi);
|
||||
return false;
|
||||
}
|
||||
|
||||
intel_encoder = &intel_dsi->base;
|
||||
encoder = &intel_encoder->base;
|
||||
intel_dsi->attached_connector = intel_connector;
|
||||
|
||||
connector = &intel_connector->base;
|
||||
|
||||
drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI);
|
||||
|
||||
/* XXX: very likely not all of these are needed */
|
||||
intel_encoder->hot_plug = intel_dsi_hot_plug;
|
||||
intel_encoder->compute_config = intel_dsi_compute_config;
|
||||
intel_encoder->pre_pll_enable = intel_dsi_pre_pll_enable;
|
||||
intel_encoder->pre_enable = intel_dsi_pre_enable;
|
||||
intel_encoder->enable = intel_dsi_enable;
|
||||
intel_encoder->mode_set = intel_dsi_mode_set;
|
||||
intel_encoder->disable = intel_dsi_disable;
|
||||
intel_encoder->post_disable = intel_dsi_post_disable;
|
||||
intel_encoder->get_hw_state = intel_dsi_get_hw_state;
|
||||
intel_encoder->get_config = intel_dsi_get_config;
|
||||
|
||||
intel_connector->get_hw_state = intel_connector_get_hw_state;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(intel_dsi_devices); i++) {
|
||||
dsi = &intel_dsi_devices[i];
|
||||
intel_dsi->dev = *dsi;
|
||||
|
||||
if (dsi->dev_ops->init(&intel_dsi->dev))
|
||||
break;
|
||||
}
|
||||
|
||||
if (i == ARRAY_SIZE(intel_dsi_devices)) {
|
||||
DRM_DEBUG_KMS("no device found\n");
|
||||
goto err;
|
||||
}
|
||||
|
||||
intel_encoder->type = INTEL_OUTPUT_DSI;
|
||||
intel_encoder->crtc_mask = (1 << 0); /* XXX */
|
||||
|
||||
intel_encoder->cloneable = false;
|
||||
drm_connector_init(dev, connector, &intel_dsi_connector_funcs,
|
||||
DRM_MODE_CONNECTOR_DSI);
|
||||
|
||||
drm_connector_helper_add(connector, &intel_dsi_connector_helper_funcs);
|
||||
|
||||
connector->display_info.subpixel_order = SubPixelHorizontalRGB; /*XXX*/
|
||||
connector->interlace_allowed = false;
|
||||
connector->doublescan_allowed = false;
|
||||
|
||||
intel_connector_attach_encoder(intel_connector, intel_encoder);
|
||||
|
||||
drm_sysfs_connector_add(connector);
|
||||
|
||||
fixed_mode = dsi->dev_ops->get_modes(&intel_dsi->dev);
|
||||
if (!fixed_mode) {
|
||||
DRM_DEBUG_KMS("no fixed mode\n");
|
||||
goto err;
|
||||
}
|
||||
|
||||
fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
|
||||
intel_panel_init(&intel_connector->panel, fixed_mode);
|
||||
|
||||
return true;
|
||||
|
||||
err:
|
||||
drm_encoder_cleanup(&intel_encoder->base);
|
||||
kfree(intel_dsi);
|
||||
kfree(intel_connector);
|
||||
|
||||
return false;
|
||||
}
|
|
@ -0,0 +1,102 @@
|
|||
/*
|
||||
* Copyright © 2013 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _INTEL_DSI_H
|
||||
#define _INTEL_DSI_H
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_crtc.h>
|
||||
#include "intel_drv.h"
|
||||
|
||||
struct intel_dsi_device {
|
||||
unsigned int panel_id;
|
||||
const char *name;
|
||||
int type;
|
||||
const struct intel_dsi_dev_ops *dev_ops;
|
||||
void *dev_priv;
|
||||
};
|
||||
|
||||
struct intel_dsi_dev_ops {
|
||||
bool (*init)(struct intel_dsi_device *dsi);
|
||||
|
||||
/* This callback must be able to assume DSI commands can be sent */
|
||||
void (*enable)(struct intel_dsi_device *dsi);
|
||||
|
||||
/* This callback must be able to assume DSI commands can be sent */
|
||||
void (*disable)(struct intel_dsi_device *dsi);
|
||||
|
||||
int (*mode_valid)(struct intel_dsi_device *dsi,
|
||||
struct drm_display_mode *mode);
|
||||
|
||||
bool (*mode_fixup)(struct intel_dsi_device *dsi,
|
||||
const struct drm_display_mode *mode,
|
||||
struct drm_display_mode *adjusted_mode);
|
||||
|
||||
void (*mode_set)(struct intel_dsi_device *dsi,
|
||||
struct drm_display_mode *mode,
|
||||
struct drm_display_mode *adjusted_mode);
|
||||
|
||||
enum drm_connector_status (*detect)(struct intel_dsi_device *dsi);
|
||||
|
||||
bool (*get_hw_state)(struct intel_dsi_device *dev);
|
||||
|
||||
struct drm_display_mode *(*get_modes)(struct intel_dsi_device *dsi);
|
||||
|
||||
void (*destroy) (struct intel_dsi_device *dsi);
|
||||
};
|
||||
|
||||
struct intel_dsi {
|
||||
struct intel_encoder base;
|
||||
|
||||
struct intel_dsi_device dev;
|
||||
|
||||
struct intel_connector *attached_connector;
|
||||
|
||||
/* if true, use HS mode, otherwise LP */
|
||||
bool hs;
|
||||
|
||||
/* virtual channel */
|
||||
int channel;
|
||||
|
||||
/* number of DSI lanes */
|
||||
unsigned int lane_count;
|
||||
|
||||
/* video mode pixel format for MIPI_DSI_FUNC_PRG register */
|
||||
u32 pixel_format;
|
||||
|
||||
/* video mode format for MIPI_VIDEO_MODE_FORMAT register */
|
||||
u32 video_mode_format;
|
||||
|
||||
/* eot for MIPI_EOT_DISABLE register */
|
||||
u32 eot_disable;
|
||||
};
|
||||
|
||||
static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
|
||||
{
|
||||
return container_of(encoder, struct intel_dsi, base.base);
|
||||
}
|
||||
|
||||
extern void vlv_enable_dsi_pll(struct intel_encoder *encoder);
|
||||
extern void vlv_disable_dsi_pll(struct intel_encoder *encoder);
|
||||
|
||||
#endif /* _INTEL_DSI_H */
|
|
@ -0,0 +1,427 @@
|
|||
/*
|
||||
* Copyright © 2013 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Author: Jani Nikula <jani.nikula@intel.com>
|
||||
*/
|
||||
|
||||
#include <linux/export.h>
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_crtc.h>
|
||||
#include <video/mipi_display.h>
|
||||
#include "i915_drv.h"
|
||||
#include "intel_drv.h"
|
||||
#include "intel_dsi.h"
|
||||
#include "intel_dsi_cmd.h"
|
||||
|
||||
/*
|
||||
* XXX: MIPI_DATA_ADDRESS, MIPI_DATA_LENGTH, MIPI_COMMAND_LENGTH, and
|
||||
* MIPI_COMMAND_ADDRESS registers.
|
||||
*
|
||||
* Apparently these registers provide a MIPI adapter level way to send (lots of)
|
||||
* commands and data to the receiver, without having to write the commands and
|
||||
* data to MIPI_{HS,LP}_GEN_{CTRL,DATA} registers word by word.
|
||||
*
|
||||
* Presumably for anything other than MIPI_DCS_WRITE_MEMORY_START and
|
||||
* MIPI_DCS_WRITE_MEMORY_CONTINUE (which are used to update the external
|
||||
* framebuffer in command mode displays) these are just an optimization that can
|
||||
* come later.
|
||||
*
|
||||
* For memory writes, these should probably be used for performance.
|
||||
*/
|
||||
|
||||
static void print_stat(struct intel_dsi *intel_dsi)
|
||||
{
|
||||
struct drm_encoder *encoder = &intel_dsi->base.base;
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
|
||||
enum pipe pipe = intel_crtc->pipe;
|
||||
u32 val;
|
||||
|
||||
val = I915_READ(MIPI_INTR_STAT(pipe));
|
||||
|
||||
#define STAT_BIT(val, bit) (val) & (bit) ? " " #bit : ""
|
||||
DRM_DEBUG_KMS("MIPI_INTR_STAT(%d) = %08x"
|
||||
"%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s"
|
||||
"\n", pipe, val,
|
||||
STAT_BIT(val, TEARING_EFFECT),
|
||||
STAT_BIT(val, SPL_PKT_SENT_INTERRUPT),
|
||||
STAT_BIT(val, GEN_READ_DATA_AVAIL),
|
||||
STAT_BIT(val, LP_GENERIC_WR_FIFO_FULL),
|
||||
STAT_BIT(val, HS_GENERIC_WR_FIFO_FULL),
|
||||
STAT_BIT(val, RX_PROT_VIOLATION),
|
||||
STAT_BIT(val, RX_INVALID_TX_LENGTH),
|
||||
STAT_BIT(val, ACK_WITH_NO_ERROR),
|
||||
STAT_BIT(val, TURN_AROUND_ACK_TIMEOUT),
|
||||
STAT_BIT(val, LP_RX_TIMEOUT),
|
||||
STAT_BIT(val, HS_TX_TIMEOUT),
|
||||
STAT_BIT(val, DPI_FIFO_UNDERRUN),
|
||||
STAT_BIT(val, LOW_CONTENTION),
|
||||
STAT_BIT(val, HIGH_CONTENTION),
|
||||
STAT_BIT(val, TXDSI_VC_ID_INVALID),
|
||||
STAT_BIT(val, TXDSI_DATA_TYPE_NOT_RECOGNISED),
|
||||
STAT_BIT(val, TXCHECKSUM_ERROR),
|
||||
STAT_BIT(val, TXECC_MULTIBIT_ERROR),
|
||||
STAT_BIT(val, TXECC_SINGLE_BIT_ERROR),
|
||||
STAT_BIT(val, TXFALSE_CONTROL_ERROR),
|
||||
STAT_BIT(val, RXDSI_VC_ID_INVALID),
|
||||
STAT_BIT(val, RXDSI_DATA_TYPE_NOT_REGOGNISED),
|
||||
STAT_BIT(val, RXCHECKSUM_ERROR),
|
||||
STAT_BIT(val, RXECC_MULTIBIT_ERROR),
|
||||
STAT_BIT(val, RXECC_SINGLE_BIT_ERROR),
|
||||
STAT_BIT(val, RXFALSE_CONTROL_ERROR),
|
||||
STAT_BIT(val, RXHS_RECEIVE_TIMEOUT_ERROR),
|
||||
STAT_BIT(val, RX_LP_TX_SYNC_ERROR),
|
||||
STAT_BIT(val, RXEXCAPE_MODE_ENTRY_ERROR),
|
||||
STAT_BIT(val, RXEOT_SYNC_ERROR),
|
||||
STAT_BIT(val, RXSOT_SYNC_ERROR),
|
||||
STAT_BIT(val, RXSOT_ERROR));
|
||||
#undef STAT_BIT
|
||||
}
|
||||
|
||||
enum dsi_type {
|
||||
DSI_DCS,
|
||||
DSI_GENERIC,
|
||||
};
|
||||
|
||||
/* enable or disable command mode hs transmissions */
|
||||
void dsi_hs_mode_enable(struct intel_dsi *intel_dsi, bool enable)
|
||||
{
|
||||
struct drm_encoder *encoder = &intel_dsi->base.base;
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
|
||||
enum pipe pipe = intel_crtc->pipe;
|
||||
u32 temp;
|
||||
u32 mask = DBI_FIFO_EMPTY;
|
||||
|
||||
if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == mask, 50))
|
||||
DRM_ERROR("Timeout waiting for DBI FIFO empty\n");
|
||||
|
||||
temp = I915_READ(MIPI_HS_LP_DBI_ENABLE(pipe));
|
||||
temp &= DBI_HS_LP_MODE_MASK;
|
||||
I915_WRITE(MIPI_HS_LP_DBI_ENABLE(pipe), enable ? DBI_HS_MODE : DBI_LP_MODE);
|
||||
|
||||
intel_dsi->hs = enable;
|
||||
}
|
||||
|
||||
static int dsi_vc_send_short(struct intel_dsi *intel_dsi, int channel,
|
||||
u8 data_type, u16 data)
|
||||
{
|
||||
struct drm_encoder *encoder = &intel_dsi->base.base;
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
|
||||
enum pipe pipe = intel_crtc->pipe;
|
||||
u32 ctrl_reg;
|
||||
u32 ctrl;
|
||||
u32 mask;
|
||||
|
||||
DRM_DEBUG_KMS("channel %d, data_type %d, data %04x\n",
|
||||
channel, data_type, data);
|
||||
|
||||
if (intel_dsi->hs) {
|
||||
ctrl_reg = MIPI_HS_GEN_CTRL(pipe);
|
||||
mask = HS_CTRL_FIFO_FULL;
|
||||
} else {
|
||||
ctrl_reg = MIPI_LP_GEN_CTRL(pipe);
|
||||
mask = LP_CTRL_FIFO_FULL;
|
||||
}
|
||||
|
||||
if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == 0, 50)) {
|
||||
DRM_ERROR("Timeout waiting for HS/LP CTRL FIFO !full\n");
|
||||
print_stat(intel_dsi);
|
||||
}
|
||||
|
||||
/*
|
||||
* Note: This function is also used for long packets, with length passed
|
||||
* as data, since SHORT_PACKET_PARAM_SHIFT ==
|
||||
* LONG_PACKET_WORD_COUNT_SHIFT.
|
||||
*/
|
||||
ctrl = data << SHORT_PACKET_PARAM_SHIFT |
|
||||
channel << VIRTUAL_CHANNEL_SHIFT |
|
||||
data_type << DATA_TYPE_SHIFT;
|
||||
|
||||
I915_WRITE(ctrl_reg, ctrl);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dsi_vc_send_long(struct intel_dsi *intel_dsi, int channel,
|
||||
u8 data_type, const u8 *data, int len)
|
||||
{
|
||||
struct drm_encoder *encoder = &intel_dsi->base.base;
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
|
||||
enum pipe pipe = intel_crtc->pipe;
|
||||
u32 data_reg;
|
||||
int i, j, n;
|
||||
u32 mask;
|
||||
|
||||
DRM_DEBUG_KMS("channel %d, data_type %d, len %04x\n",
|
||||
channel, data_type, len);
|
||||
|
||||
if (intel_dsi->hs) {
|
||||
data_reg = MIPI_HS_GEN_DATA(pipe);
|
||||
mask = HS_DATA_FIFO_FULL;
|
||||
} else {
|
||||
data_reg = MIPI_LP_GEN_DATA(pipe);
|
||||
mask = LP_DATA_FIFO_FULL;
|
||||
}
|
||||
|
||||
if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == 0, 50))
|
||||
DRM_ERROR("Timeout waiting for HS/LP DATA FIFO !full\n");
|
||||
|
||||
for (i = 0; i < len; i += n) {
|
||||
u32 val = 0;
|
||||
n = min_t(int, len - i, 4);
|
||||
|
||||
for (j = 0; j < n; j++)
|
||||
val |= *data++ << 8 * j;
|
||||
|
||||
I915_WRITE(data_reg, val);
|
||||
/* XXX: check for data fifo full, once that is set, write 4
|
||||
* dwords, then wait for not set, then continue. */
|
||||
}
|
||||
|
||||
return dsi_vc_send_short(intel_dsi, channel, data_type, len);
|
||||
}
|
||||
|
||||
static int dsi_vc_write_common(struct intel_dsi *intel_dsi,
|
||||
int channel, const u8 *data, int len,
|
||||
enum dsi_type type)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (len == 0) {
|
||||
BUG_ON(type == DSI_GENERIC);
|
||||
ret = dsi_vc_send_short(intel_dsi, channel,
|
||||
MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM,
|
||||
0);
|
||||
} else if (len == 1) {
|
||||
ret = dsi_vc_send_short(intel_dsi, channel,
|
||||
type == DSI_GENERIC ?
|
||||
MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM :
|
||||
MIPI_DSI_DCS_SHORT_WRITE, data[0]);
|
||||
} else if (len == 2) {
|
||||
ret = dsi_vc_send_short(intel_dsi, channel,
|
||||
type == DSI_GENERIC ?
|
||||
MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM :
|
||||
MIPI_DSI_DCS_SHORT_WRITE_PARAM,
|
||||
(data[1] << 8) | data[0]);
|
||||
} else {
|
||||
ret = dsi_vc_send_long(intel_dsi, channel,
|
||||
type == DSI_GENERIC ?
|
||||
MIPI_DSI_GENERIC_LONG_WRITE :
|
||||
MIPI_DSI_DCS_LONG_WRITE, data, len);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int dsi_vc_dcs_write(struct intel_dsi *intel_dsi, int channel,
|
||||
const u8 *data, int len)
|
||||
{
|
||||
return dsi_vc_write_common(intel_dsi, channel, data, len, DSI_DCS);
|
||||
}
|
||||
|
||||
int dsi_vc_generic_write(struct intel_dsi *intel_dsi, int channel,
|
||||
const u8 *data, int len)
|
||||
{
|
||||
return dsi_vc_write_common(intel_dsi, channel, data, len, DSI_GENERIC);
|
||||
}
|
||||
|
||||
static int dsi_vc_dcs_send_read_request(struct intel_dsi *intel_dsi,
|
||||
int channel, u8 dcs_cmd)
|
||||
{
|
||||
return dsi_vc_send_short(intel_dsi, channel, MIPI_DSI_DCS_READ,
|
||||
dcs_cmd);
|
||||
}
|
||||
|
||||
static int dsi_vc_generic_send_read_request(struct intel_dsi *intel_dsi,
|
||||
int channel, u8 *reqdata,
|
||||
int reqlen)
|
||||
{
|
||||
u16 data;
|
||||
u8 data_type;
|
||||
|
||||
switch (reqlen) {
|
||||
case 0:
|
||||
data_type = MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM;
|
||||
data = 0;
|
||||
break;
|
||||
case 1:
|
||||
data_type = MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM;
|
||||
data = reqdata[0];
|
||||
break;
|
||||
case 2:
|
||||
data_type = MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM;
|
||||
data = (reqdata[1] << 8) | reqdata[0];
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
|
||||
return dsi_vc_send_short(intel_dsi, channel, data_type, data);
|
||||
}
|
||||
|
||||
static int dsi_read_data_return(struct intel_dsi *intel_dsi,
|
||||
u8 *buf, int buflen)
|
||||
{
|
||||
struct drm_encoder *encoder = &intel_dsi->base.base;
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
|
||||
enum pipe pipe = intel_crtc->pipe;
|
||||
int i, len = 0;
|
||||
u32 data_reg, val;
|
||||
|
||||
if (intel_dsi->hs) {
|
||||
data_reg = MIPI_HS_GEN_DATA(pipe);
|
||||
} else {
|
||||
data_reg = MIPI_LP_GEN_DATA(pipe);
|
||||
}
|
||||
|
||||
while (len < buflen) {
|
||||
val = I915_READ(data_reg);
|
||||
for (i = 0; i < 4 && len < buflen; i++, len++)
|
||||
buf[len] = val >> 8 * i;
|
||||
}
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
int dsi_vc_dcs_read(struct intel_dsi *intel_dsi, int channel, u8 dcs_cmd,
|
||||
u8 *buf, int buflen)
|
||||
{
|
||||
struct drm_encoder *encoder = &intel_dsi->base.base;
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
|
||||
enum pipe pipe = intel_crtc->pipe;
|
||||
u32 mask;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* XXX: should issue multiple read requests and reads if request is
|
||||
* longer than MIPI_MAX_RETURN_PKT_SIZE
|
||||
*/
|
||||
|
||||
I915_WRITE(MIPI_INTR_STAT(pipe), GEN_READ_DATA_AVAIL);
|
||||
|
||||
ret = dsi_vc_dcs_send_read_request(intel_dsi, channel, dcs_cmd);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
mask = GEN_READ_DATA_AVAIL;
|
||||
if (wait_for((I915_READ(MIPI_INTR_STAT(pipe)) & mask) == mask, 50))
|
||||
DRM_ERROR("Timeout waiting for read data.\n");
|
||||
|
||||
ret = dsi_read_data_return(intel_dsi, buf, buflen);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (ret != buflen)
|
||||
return -EIO;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int dsi_vc_generic_read(struct intel_dsi *intel_dsi, int channel,
|
||||
u8 *reqdata, int reqlen, u8 *buf, int buflen)
|
||||
{
|
||||
struct drm_encoder *encoder = &intel_dsi->base.base;
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
|
||||
enum pipe pipe = intel_crtc->pipe;
|
||||
u32 mask;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* XXX: should issue multiple read requests and reads if request is
|
||||
* longer than MIPI_MAX_RETURN_PKT_SIZE
|
||||
*/
|
||||
|
||||
I915_WRITE(MIPI_INTR_STAT(pipe), GEN_READ_DATA_AVAIL);
|
||||
|
||||
ret = dsi_vc_generic_send_read_request(intel_dsi, channel, reqdata,
|
||||
reqlen);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
mask = GEN_READ_DATA_AVAIL;
|
||||
if (wait_for((I915_READ(MIPI_INTR_STAT(pipe)) & mask) == mask, 50))
|
||||
DRM_ERROR("Timeout waiting for read data.\n");
|
||||
|
||||
ret = dsi_read_data_return(intel_dsi, buf, buflen);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (ret != buflen)
|
||||
return -EIO;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* send a video mode command
|
||||
*
|
||||
* XXX: commands with data in MIPI_DPI_DATA?
|
||||
*/
|
||||
int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd)
|
||||
{
|
||||
struct drm_encoder *encoder = &intel_dsi->base.base;
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
|
||||
enum pipe pipe = intel_crtc->pipe;
|
||||
u32 mask;
|
||||
|
||||
/* XXX: pipe, hs */
|
||||
if (intel_dsi->hs)
|
||||
cmd &= ~DPI_LP_MODE;
|
||||
else
|
||||
cmd |= DPI_LP_MODE;
|
||||
|
||||
/* DPI virtual channel?! */
|
||||
|
||||
mask = DPI_FIFO_EMPTY;
|
||||
if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == mask, 50))
|
||||
DRM_ERROR("Timeout waiting for DPI FIFO empty.\n");
|
||||
|
||||
/* clear bit */
|
||||
I915_WRITE(MIPI_INTR_STAT(pipe), SPL_PKT_SENT_INTERRUPT);
|
||||
|
||||
/* XXX: old code skips write if control unchanged */
|
||||
if (cmd == I915_READ(MIPI_DPI_CONTROL(pipe)))
|
||||
DRM_ERROR("Same special packet %02x twice in a row.\n", cmd);
|
||||
|
||||
I915_WRITE(MIPI_DPI_CONTROL(pipe), cmd);
|
||||
|
||||
mask = SPL_PKT_SENT_INTERRUPT;
|
||||
if (wait_for((I915_READ(MIPI_INTR_STAT(pipe)) & mask) == mask, 100))
|
||||
DRM_ERROR("Video mode command 0x%08x send failed.\n", cmd);
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -0,0 +1,109 @@
|
|||
/*
|
||||
* Copyright © 2013 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Author: Jani Nikula <jani.nikula@intel.com>
|
||||
*/
|
||||
|
||||
#ifndef _INTEL_DSI_DSI_H
|
||||
#define _INTEL_DSI_DSI_H
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_crtc.h>
|
||||
#include <video/mipi_display.h>
|
||||
#include "i915_drv.h"
|
||||
#include "intel_drv.h"
|
||||
#include "intel_dsi.h"
|
||||
|
||||
void dsi_hs_mode_enable(struct intel_dsi *intel_dsi, bool enable);
|
||||
|
||||
int dsi_vc_dcs_write(struct intel_dsi *intel_dsi, int channel,
|
||||
const u8 *data, int len);
|
||||
|
||||
int dsi_vc_generic_write(struct intel_dsi *intel_dsi, int channel,
|
||||
const u8 *data, int len);
|
||||
|
||||
int dsi_vc_dcs_read(struct intel_dsi *intel_dsi, int channel, u8 dcs_cmd,
|
||||
u8 *buf, int buflen);
|
||||
|
||||
int dsi_vc_generic_read(struct intel_dsi *intel_dsi, int channel,
|
||||
u8 *reqdata, int reqlen, u8 *buf, int buflen);
|
||||
|
||||
int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd);
|
||||
|
||||
/* XXX: questionable write helpers */
|
||||
static inline int dsi_vc_dcs_write_0(struct intel_dsi *intel_dsi,
|
||||
int channel, u8 dcs_cmd)
|
||||
{
|
||||
return dsi_vc_dcs_write(intel_dsi, channel, &dcs_cmd, 1);
|
||||
}
|
||||
|
||||
static inline int dsi_vc_dcs_write_1(struct intel_dsi *intel_dsi,
|
||||
int channel, u8 dcs_cmd, u8 param)
|
||||
{
|
||||
u8 buf[2] = { dcs_cmd, param };
|
||||
return dsi_vc_dcs_write(intel_dsi, channel, buf, 2);
|
||||
}
|
||||
|
||||
static inline int dsi_vc_generic_write_0(struct intel_dsi *intel_dsi,
|
||||
int channel)
|
||||
{
|
||||
return dsi_vc_generic_write(intel_dsi, channel, NULL, 0);
|
||||
}
|
||||
|
||||
static inline int dsi_vc_generic_write_1(struct intel_dsi *intel_dsi,
|
||||
int channel, u8 param)
|
||||
{
|
||||
return dsi_vc_generic_write(intel_dsi, channel, ¶m, 1);
|
||||
}
|
||||
|
||||
static inline int dsi_vc_generic_write_2(struct intel_dsi *intel_dsi,
|
||||
int channel, u8 param1, u8 param2)
|
||||
{
|
||||
u8 buf[2] = { param1, param2 };
|
||||
return dsi_vc_generic_write(intel_dsi, channel, buf, 2);
|
||||
}
|
||||
|
||||
/* XXX: questionable read helpers */
|
||||
static inline int dsi_vc_generic_read_0(struct intel_dsi *intel_dsi,
|
||||
int channel, u8 *buf, int buflen)
|
||||
{
|
||||
return dsi_vc_generic_read(intel_dsi, channel, NULL, 0, buf, buflen);
|
||||
}
|
||||
|
||||
static inline int dsi_vc_generic_read_1(struct intel_dsi *intel_dsi,
|
||||
int channel, u8 param, u8 *buf,
|
||||
int buflen)
|
||||
{
|
||||
return dsi_vc_generic_read(intel_dsi, channel, ¶m, 1, buf, buflen);
|
||||
}
|
||||
|
||||
static inline int dsi_vc_generic_read_2(struct intel_dsi *intel_dsi,
|
||||
int channel, u8 param1, u8 param2,
|
||||
u8 *buf, int buflen)
|
||||
{
|
||||
u8 req[2] = { param1, param2 };
|
||||
|
||||
return dsi_vc_generic_read(intel_dsi, channel, req, 2, buf, buflen);
|
||||
}
|
||||
|
||||
|
||||
#endif /* _INTEL_DSI_DSI_H */
|
|
@ -0,0 +1,317 @@
|
|||
/*
|
||||
* Copyright © 2013 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Shobhit Kumar <shobhit.kumar@intel.com>
|
||||
* Yogesh Mohan Marimuthu <yogesh.mohan.marimuthu@intel.com>
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include "intel_drv.h"
|
||||
#include "i915_drv.h"
|
||||
#include "intel_dsi.h"
|
||||
|
||||
#define DSI_HSS_PACKET_SIZE 4
|
||||
#define DSI_HSE_PACKET_SIZE 4
|
||||
#define DSI_HSA_PACKET_EXTRA_SIZE 6
|
||||
#define DSI_HBP_PACKET_EXTRA_SIZE 6
|
||||
#define DSI_HACTIVE_PACKET_EXTRA_SIZE 6
|
||||
#define DSI_HFP_PACKET_EXTRA_SIZE 6
|
||||
#define DSI_EOTP_PACKET_SIZE 4
|
||||
|
||||
struct dsi_mnp {
|
||||
u32 dsi_pll_ctrl;
|
||||
u32 dsi_pll_div;
|
||||
};
|
||||
|
||||
static const u32 lfsr_converts[] = {
|
||||
426, 469, 234, 373, 442, 221, 110, 311, 411, /* 62 - 70 */
|
||||
461, 486, 243, 377, 188, 350, 175, 343, 427, 213, /* 71 - 80 */
|
||||
106, 53, 282, 397, 354, 227, 113, 56, 284, 142, /* 81 - 90 */
|
||||
71, 35 /* 91 - 92 */
|
||||
};
|
||||
|
||||
static u32 dsi_rr_formula(const struct drm_display_mode *mode,
|
||||
int pixel_format, int video_mode_format,
|
||||
int lane_count, bool eotp)
|
||||
{
|
||||
u32 bpp;
|
||||
u32 hactive, vactive, hfp, hsync, hbp, vfp, vsync, vbp;
|
||||
u32 hsync_bytes, hbp_bytes, hactive_bytes, hfp_bytes;
|
||||
u32 bytes_per_line, bytes_per_frame;
|
||||
u32 num_frames;
|
||||
u32 bytes_per_x_frames, bytes_per_x_frames_x_lanes;
|
||||
u32 dsi_bit_clock_hz;
|
||||
u32 dsi_clk;
|
||||
|
||||
switch (pixel_format) {
|
||||
default:
|
||||
case VID_MODE_FORMAT_RGB888:
|
||||
case VID_MODE_FORMAT_RGB666_LOOSE:
|
||||
bpp = 24;
|
||||
break;
|
||||
case VID_MODE_FORMAT_RGB666:
|
||||
bpp = 18;
|
||||
break;
|
||||
case VID_MODE_FORMAT_RGB565:
|
||||
bpp = 16;
|
||||
break;
|
||||
}
|
||||
|
||||
hactive = mode->hdisplay;
|
||||
vactive = mode->vdisplay;
|
||||
hfp = mode->hsync_start - mode->hdisplay;
|
||||
hsync = mode->hsync_end - mode->hsync_start;
|
||||
hbp = mode->htotal - mode->hsync_end;
|
||||
|
||||
vfp = mode->vsync_start - mode->vdisplay;
|
||||
vsync = mode->vsync_end - mode->vsync_start;
|
||||
vbp = mode->vtotal - mode->vsync_end;
|
||||
|
||||
hsync_bytes = DIV_ROUND_UP(hsync * bpp, 8);
|
||||
hbp_bytes = DIV_ROUND_UP(hbp * bpp, 8);
|
||||
hactive_bytes = DIV_ROUND_UP(hactive * bpp, 8);
|
||||
hfp_bytes = DIV_ROUND_UP(hfp * bpp, 8);
|
||||
|
||||
bytes_per_line = DSI_HSS_PACKET_SIZE + hsync_bytes +
|
||||
DSI_HSA_PACKET_EXTRA_SIZE + DSI_HSE_PACKET_SIZE +
|
||||
hbp_bytes + DSI_HBP_PACKET_EXTRA_SIZE +
|
||||
hactive_bytes + DSI_HACTIVE_PACKET_EXTRA_SIZE +
|
||||
hfp_bytes + DSI_HFP_PACKET_EXTRA_SIZE;
|
||||
|
||||
/*
|
||||
* XXX: Need to accurately calculate LP to HS transition timeout and add
|
||||
* it to bytes_per_line/bytes_per_frame.
|
||||
*/
|
||||
|
||||
if (eotp && video_mode_format == VIDEO_MODE_BURST)
|
||||
bytes_per_line += DSI_EOTP_PACKET_SIZE;
|
||||
|
||||
bytes_per_frame = vsync * bytes_per_line + vbp * bytes_per_line +
|
||||
vactive * bytes_per_line + vfp * bytes_per_line;
|
||||
|
||||
if (eotp &&
|
||||
(video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE ||
|
||||
video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS))
|
||||
bytes_per_frame += DSI_EOTP_PACKET_SIZE;
|
||||
|
||||
num_frames = drm_mode_vrefresh(mode);
|
||||
bytes_per_x_frames = num_frames * bytes_per_frame;
|
||||
|
||||
bytes_per_x_frames_x_lanes = bytes_per_x_frames / lane_count;
|
||||
|
||||
/* the dsi clock is divided by 2 in the hardware to get dsi ddr clock */
|
||||
dsi_bit_clock_hz = bytes_per_x_frames_x_lanes * 8;
|
||||
dsi_clk = dsi_bit_clock_hz / (1000 * 1000);
|
||||
|
||||
if (eotp && video_mode_format == VIDEO_MODE_BURST)
|
||||
dsi_clk *= 2;
|
||||
|
||||
return dsi_clk;
|
||||
}
|
||||
|
||||
#ifdef MNP_FROM_TABLE
|
||||
|
||||
struct dsi_clock_table {
|
||||
u32 freq;
|
||||
u8 m;
|
||||
u8 p;
|
||||
};
|
||||
|
||||
static const struct dsi_clock_table dsi_clk_tbl[] = {
|
||||
{300, 72, 6}, {313, 75, 6}, {323, 78, 6}, {333, 80, 6},
|
||||
{343, 82, 6}, {353, 85, 6}, {363, 87, 6}, {373, 90, 6},
|
||||
{383, 92, 6}, {390, 78, 5}, {393, 79, 5}, {400, 80, 5},
|
||||
{401, 80, 5}, {402, 80, 5}, {403, 81, 5}, {404, 81, 5},
|
||||
{405, 81, 5}, {406, 81, 5}, {407, 81, 5}, {408, 82, 5},
|
||||
{409, 82, 5}, {410, 82, 5}, {411, 82, 5}, {412, 82, 5},
|
||||
{413, 83, 5}, {414, 83, 5}, {415, 83, 5}, {416, 83, 5},
|
||||
{417, 83, 5}, {418, 84, 5}, {419, 84, 5}, {420, 84, 5},
|
||||
{430, 86, 5}, {440, 88, 5}, {450, 90, 5}, {460, 92, 5},
|
||||
{470, 75, 4}, {480, 77, 4}, {490, 78, 4}, {500, 80, 4},
|
||||
{510, 82, 4}, {520, 83, 4}, {530, 85, 4}, {540, 86, 4},
|
||||
{550, 88, 4}, {560, 90, 4}, {570, 91, 4}, {580, 70, 3},
|
||||
{590, 71, 3}, {600, 72, 3}, {610, 73, 3}, {620, 74, 3},
|
||||
{630, 76, 3}, {640, 77, 3}, {650, 78, 3}, {660, 79, 3},
|
||||
{670, 80, 3}, {680, 82, 3}, {690, 83, 3}, {700, 84, 3},
|
||||
{710, 85, 3}, {720, 86, 3}, {730, 88, 3}, {740, 89, 3},
|
||||
{750, 90, 3}, {760, 91, 3}, {770, 92, 3}, {780, 62, 2},
|
||||
{790, 63, 2}, {800, 64, 2}, {880, 70, 2}, {900, 72, 2},
|
||||
{1000, 80, 2}, /* dsi clock frequency in Mhz*/
|
||||
};
|
||||
|
||||
static int dsi_calc_mnp(u32 dsi_clk, struct dsi_mnp *dsi_mnp)
|
||||
{
|
||||
unsigned int i;
|
||||
u8 m;
|
||||
u8 n;
|
||||
u8 p;
|
||||
u32 m_seed;
|
||||
|
||||
if (dsi_clk < 300 || dsi_clk > 1000)
|
||||
return -ECHRNG;
|
||||
|
||||
for (i = 0; i <= ARRAY_SIZE(dsi_clk_tbl); i++) {
|
||||
if (dsi_clk_tbl[i].freq > dsi_clk)
|
||||
break;
|
||||
}
|
||||
|
||||
m = dsi_clk_tbl[i].m;
|
||||
p = dsi_clk_tbl[i].p;
|
||||
m_seed = lfsr_converts[m - 62];
|
||||
n = 1;
|
||||
dsi_mnp->dsi_pll_ctrl = 1 << (DSI_PLL_P1_POST_DIV_SHIFT + p - 2);
|
||||
dsi_mnp->dsi_pll_div = (n - 1) << DSI_PLL_N1_DIV_SHIFT |
|
||||
m_seed << DSI_PLL_M1_DIV_SHIFT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static int dsi_calc_mnp(u32 dsi_clk, struct dsi_mnp *dsi_mnp)
|
||||
{
|
||||
u32 m, n, p;
|
||||
u32 ref_clk;
|
||||
u32 error;
|
||||
u32 tmp_error;
|
||||
u32 target_dsi_clk;
|
||||
u32 calc_dsi_clk;
|
||||
u32 calc_m;
|
||||
u32 calc_p;
|
||||
u32 m_seed;
|
||||
|
||||
if (dsi_clk < 300 || dsi_clk > 1150) {
|
||||
DRM_ERROR("DSI CLK Out of Range\n");
|
||||
return -ECHRNG;
|
||||
}
|
||||
|
||||
ref_clk = 25000;
|
||||
target_dsi_clk = dsi_clk * 1000;
|
||||
error = 0xFFFFFFFF;
|
||||
calc_m = 0;
|
||||
calc_p = 0;
|
||||
|
||||
for (m = 62; m <= 92; m++) {
|
||||
for (p = 2; p <= 6; p++) {
|
||||
|
||||
calc_dsi_clk = (m * ref_clk) / p;
|
||||
if (calc_dsi_clk >= target_dsi_clk) {
|
||||
tmp_error = calc_dsi_clk - target_dsi_clk;
|
||||
if (tmp_error < error) {
|
||||
error = tmp_error;
|
||||
calc_m = m;
|
||||
calc_p = p;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
m_seed = lfsr_converts[calc_m - 62];
|
||||
n = 1;
|
||||
dsi_mnp->dsi_pll_ctrl = 1 << (DSI_PLL_P1_POST_DIV_SHIFT + calc_p - 2);
|
||||
dsi_mnp->dsi_pll_div = (n - 1) << DSI_PLL_N1_DIV_SHIFT |
|
||||
m_seed << DSI_PLL_M1_DIV_SHIFT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* XXX: The muxing and gating is hard coded for now. Need to add support for
|
||||
* sharing PLLs with two DSI outputs.
|
||||
*/
|
||||
static void vlv_configure_dsi_pll(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
|
||||
const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
int ret;
|
||||
struct dsi_mnp dsi_mnp;
|
||||
u32 dsi_clk;
|
||||
|
||||
dsi_clk = dsi_rr_formula(mode, intel_dsi->pixel_format,
|
||||
intel_dsi->video_mode_format,
|
||||
intel_dsi->lane_count, !intel_dsi->eot_disable);
|
||||
|
||||
ret = dsi_calc_mnp(dsi_clk, &dsi_mnp);
|
||||
if (ret) {
|
||||
DRM_DEBUG_KMS("dsi_calc_mnp failed\n");
|
||||
return;
|
||||
}
|
||||
|
||||
dsi_mnp.dsi_pll_ctrl |= DSI_PLL_CLK_GATE_DSI0_DSIPLL;
|
||||
|
||||
DRM_DEBUG_KMS("dsi pll div %08x, ctrl %08x\n",
|
||||
dsi_mnp.dsi_pll_div, dsi_mnp.dsi_pll_ctrl);
|
||||
|
||||
vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, 0);
|
||||
vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_DIVIDER, dsi_mnp.dsi_pll_div);
|
||||
vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, dsi_mnp.dsi_pll_ctrl);
|
||||
}
|
||||
|
||||
void vlv_enable_dsi_pll(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
|
||||
u32 tmp;
|
||||
|
||||
DRM_DEBUG_KMS("\n");
|
||||
|
||||
mutex_lock(&dev_priv->dpio_lock);
|
||||
|
||||
vlv_configure_dsi_pll(encoder);
|
||||
|
||||
/* wait at least 0.5 us after ungating before enabling VCO */
|
||||
usleep_range(1, 10);
|
||||
|
||||
tmp = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
|
||||
tmp |= DSI_PLL_VCO_EN;
|
||||
vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, tmp);
|
||||
|
||||
mutex_unlock(&dev_priv->dpio_lock);
|
||||
|
||||
if (wait_for(I915_READ(PIPECONF(PIPE_A)) & PIPECONF_DSI_PLL_LOCKED, 20)) {
|
||||
DRM_ERROR("DSI PLL lock failed\n");
|
||||
return;
|
||||
}
|
||||
|
||||
DRM_DEBUG_KMS("DSI PLL locked\n");
|
||||
}
|
||||
|
||||
void vlv_disable_dsi_pll(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
|
||||
u32 tmp;
|
||||
|
||||
DRM_DEBUG_KMS("\n");
|
||||
|
||||
mutex_lock(&dev_priv->dpio_lock);
|
||||
|
||||
tmp = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
|
||||
tmp &= ~DSI_PLL_VCO_EN;
|
||||
tmp |= DSI_PLL_LDO_GATE;
|
||||
vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, tmp);
|
||||
|
||||
mutex_unlock(&dev_priv->dpio_lock);
|
||||
}
|
|
@ -153,6 +153,8 @@ static void intel_dvo_get_config(struct intel_encoder *encoder,
|
|||
flags |= DRM_MODE_FLAG_NVSYNC;
|
||||
|
||||
pipe_config->adjusted_mode.flags |= flags;
|
||||
|
||||
pipe_config->adjusted_mode.clock = pipe_config->port_clock;
|
||||
}
|
||||
|
||||
static void intel_disable_dvo(struct intel_encoder *encoder)
|
||||
|
@ -267,11 +269,6 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder,
|
|||
drm_mode_set_crtcinfo(adjusted_mode, 0);
|
||||
}
|
||||
|
||||
if (intel_dvo->dev.dev_ops->mode_fixup)
|
||||
return intel_dvo->dev.dev_ops->mode_fixup(&intel_dvo->dev,
|
||||
&pipe_config->requested_mode,
|
||||
adjusted_mode);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -713,6 +713,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
|
|||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
|
||||
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
|
||||
u32 tmp, flags = 0;
|
||||
int dotclock;
|
||||
|
||||
tmp = I915_READ(intel_hdmi->hdmi_reg);
|
||||
|
||||
|
@ -727,6 +728,16 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
|
|||
flags |= DRM_MODE_FLAG_NVSYNC;
|
||||
|
||||
pipe_config->adjusted_mode.flags |= flags;
|
||||
|
||||
if ((tmp & SDVO_COLOR_FORMAT_MASK) == HDMI_COLOR_FORMAT_12bpc)
|
||||
dotclock = pipe_config->port_clock * 2 / 3;
|
||||
else
|
||||
dotclock = pipe_config->port_clock;
|
||||
|
||||
if (HAS_PCH_SPLIT(dev_priv->dev))
|
||||
ironlake_check_encoder_dotclock(pipe_config, dotclock);
|
||||
|
||||
pipe_config->adjusted_mode.clock = dotclock;
|
||||
}
|
||||
|
||||
static void intel_enable_hdmi(struct intel_encoder *encoder)
|
||||
|
@ -862,7 +873,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
|
|||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
|
||||
int clock_12bpc = pipe_config->requested_mode.clock * 3 / 2;
|
||||
int clock_12bpc = pipe_config->adjusted_mode.clock * 3 / 2;
|
||||
int portclock_limit = hdmi_portclock_limit(intel_hdmi);
|
||||
int desired_bpp;
|
||||
|
||||
|
@ -1079,35 +1090,35 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder)
|
|||
|
||||
/* Enable clock channels for this port */
|
||||
mutex_lock(&dev_priv->dpio_lock);
|
||||
val = vlv_dpio_read(dev_priv, DPIO_DATA_LANE_A(port));
|
||||
val = vlv_dpio_read(dev_priv, pipe, DPIO_DATA_LANE_A(port));
|
||||
val = 0;
|
||||
if (pipe)
|
||||
val |= (1<<21);
|
||||
else
|
||||
val &= ~(1<<21);
|
||||
val |= 0x001000c4;
|
||||
vlv_dpio_write(dev_priv, DPIO_DATA_CHANNEL(port), val);
|
||||
vlv_dpio_write(dev_priv, pipe, DPIO_DATA_CHANNEL(port), val);
|
||||
|
||||
/* HDMI 1.0V-2dB */
|
||||
vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0);
|
||||
vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL4(port),
|
||||
vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port), 0);
|
||||
vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL4(port),
|
||||
0x2b245f5f);
|
||||
vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL2(port),
|
||||
vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL2(port),
|
||||
0x5578b83a);
|
||||
vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL3(port),
|
||||
vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL3(port),
|
||||
0x0c782040);
|
||||
vlv_dpio_write(dev_priv, DPIO_TX3_SWING_CTL4(port),
|
||||
vlv_dpio_write(dev_priv, pipe, DPIO_TX3_SWING_CTL4(port),
|
||||
0x2b247878);
|
||||
vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER0(port), 0x00030000);
|
||||
vlv_dpio_write(dev_priv, DPIO_PCS_CTL_OVER1(port),
|
||||
vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER0(port), 0x00030000);
|
||||
vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CTL_OVER1(port),
|
||||
0x00002000);
|
||||
vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port),
|
||||
vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port),
|
||||
DPIO_TX_OCALINIT_EN);
|
||||
|
||||
/* Program lane clock */
|
||||
vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF0(port),
|
||||
vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF0(port),
|
||||
0x00760018);
|
||||
vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF8(port),
|
||||
vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF8(port),
|
||||
0x00400888);
|
||||
mutex_unlock(&dev_priv->dpio_lock);
|
||||
|
||||
|
@ -1121,30 +1132,33 @@ static void intel_hdmi_pre_pll_enable(struct intel_encoder *encoder)
|
|||
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc =
|
||||
to_intel_crtc(encoder->base.crtc);
|
||||
int port = vlv_dport_to_channel(dport);
|
||||
int pipe = intel_crtc->pipe;
|
||||
|
||||
if (!IS_VALLEYVIEW(dev))
|
||||
return;
|
||||
|
||||
/* Program Tx lane resets to default */
|
||||
mutex_lock(&dev_priv->dpio_lock);
|
||||
vlv_dpio_write(dev_priv, DPIO_PCS_TX(port),
|
||||
vlv_dpio_write(dev_priv, pipe, DPIO_PCS_TX(port),
|
||||
DPIO_PCS_TX_LANE2_RESET |
|
||||
DPIO_PCS_TX_LANE1_RESET);
|
||||
vlv_dpio_write(dev_priv, DPIO_PCS_CLK(port),
|
||||
vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLK(port),
|
||||
DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
|
||||
DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
|
||||
(1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
|
||||
DPIO_PCS_CLK_SOFT_RESET);
|
||||
|
||||
/* Fix up inter-pair skew failure */
|
||||
vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER1(port), 0x00750f00);
|
||||
vlv_dpio_write(dev_priv, DPIO_TX_CTL(port), 0x00001500);
|
||||
vlv_dpio_write(dev_priv, DPIO_TX_LANE(port), 0x40400000);
|
||||
vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER1(port), 0x00750f00);
|
||||
vlv_dpio_write(dev_priv, pipe, DPIO_TX_CTL(port), 0x00001500);
|
||||
vlv_dpio_write(dev_priv, pipe, DPIO_TX_LANE(port), 0x40400000);
|
||||
|
||||
vlv_dpio_write(dev_priv, DPIO_PCS_CTL_OVER1(port),
|
||||
vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CTL_OVER1(port),
|
||||
0x00002000);
|
||||
vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port),
|
||||
vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port),
|
||||
DPIO_TX_OCALINIT_EN);
|
||||
mutex_unlock(&dev_priv->dpio_lock);
|
||||
}
|
||||
|
@ -1153,12 +1167,15 @@ static void intel_hdmi_post_disable(struct intel_encoder *encoder)
|
|||
{
|
||||
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
|
||||
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
|
||||
struct intel_crtc *intel_crtc =
|
||||
to_intel_crtc(encoder->base.crtc);
|
||||
int port = vlv_dport_to_channel(dport);
|
||||
int pipe = intel_crtc->pipe;
|
||||
|
||||
/* Reset lanes to avoid HDMI flicker (VLV w/a) */
|
||||
mutex_lock(&dev_priv->dpio_lock);
|
||||
vlv_dpio_write(dev_priv, DPIO_PCS_TX(port), 0x00000000);
|
||||
vlv_dpio_write(dev_priv, DPIO_PCS_CLK(port), 0x00e00060);
|
||||
vlv_dpio_write(dev_priv, pipe, DPIO_PCS_TX(port), 0x00000000);
|
||||
vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLK(port), 0x00e00060);
|
||||
mutex_unlock(&dev_priv->dpio_lock);
|
||||
}
|
||||
|
||||
|
|
|
@ -92,6 +92,7 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
|
|||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 lvds_reg, tmp, flags = 0;
|
||||
int dotclock;
|
||||
|
||||
if (HAS_PCH_SPLIT(dev))
|
||||
lvds_reg = PCH_LVDS;
|
||||
|
@ -116,6 +117,13 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
|
|||
|
||||
pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE;
|
||||
}
|
||||
|
||||
dotclock = pipe_config->port_clock;
|
||||
|
||||
if (HAS_PCH_SPLIT(dev_priv->dev))
|
||||
ironlake_check_encoder_dotclock(pipe_config, dotclock);
|
||||
|
||||
pipe_config->adjusted_mode.clock = dotclock;
|
||||
}
|
||||
|
||||
/* The LVDS pin pair needs to be on before the DPLLs are enabled.
|
||||
|
|
|
@ -36,8 +36,11 @@
|
|||
#include "i915_drv.h"
|
||||
#include "intel_drv.h"
|
||||
|
||||
#define PCI_ASLE 0xe4
|
||||
#define PCI_ASLS 0xfc
|
||||
#define PCI_ASLE 0xe4
|
||||
#define PCI_ASLS 0xfc
|
||||
#define PCI_SWSCI 0xe8
|
||||
#define PCI_SWSCI_SCISEL (1 << 15)
|
||||
#define PCI_SWSCI_GSSCIE (1 << 0)
|
||||
|
||||
#define OPREGION_HEADER_OFFSET 0
|
||||
#define OPREGION_ACPI_OFFSET 0x100
|
||||
|
@ -107,25 +110,38 @@ struct opregion_asle {
|
|||
u32 epfm; /* enabled panel fitting modes */
|
||||
u8 plut[74]; /* panel LUT and identifier */
|
||||
u32 pfmb; /* PWM freq and min brightness */
|
||||
u8 rsvd[102];
|
||||
u32 cddv; /* color correction default values */
|
||||
u32 pcft; /* power conservation features */
|
||||
u32 srot; /* supported rotation angles */
|
||||
u32 iuer; /* IUER events */
|
||||
u8 rsvd[86];
|
||||
} __attribute__((packed));
|
||||
|
||||
/* Driver readiness indicator */
|
||||
#define ASLE_ARDY_READY (1 << 0)
|
||||
#define ASLE_ARDY_NOT_READY (0 << 0)
|
||||
|
||||
/* ASLE irq request bits */
|
||||
#define ASLE_SET_ALS_ILLUM (1 << 0)
|
||||
#define ASLE_SET_BACKLIGHT (1 << 1)
|
||||
#define ASLE_SET_PFIT (1 << 2)
|
||||
#define ASLE_SET_PWM_FREQ (1 << 3)
|
||||
#define ASLE_REQ_MSK 0xf
|
||||
|
||||
/* response bits of ASLE irq request */
|
||||
#define ASLE_ALS_ILLUM_FAILED (1<<10)
|
||||
#define ASLE_BACKLIGHT_FAILED (1<<12)
|
||||
#define ASLE_PFIT_FAILED (1<<14)
|
||||
#define ASLE_PWM_FREQ_FAILED (1<<16)
|
||||
/* ASLE Interrupt Command (ASLC) bits */
|
||||
#define ASLC_SET_ALS_ILLUM (1 << 0)
|
||||
#define ASLC_SET_BACKLIGHT (1 << 1)
|
||||
#define ASLC_SET_PFIT (1 << 2)
|
||||
#define ASLC_SET_PWM_FREQ (1 << 3)
|
||||
#define ASLC_SUPPORTED_ROTATION_ANGLES (1 << 4)
|
||||
#define ASLC_BUTTON_ARRAY (1 << 5)
|
||||
#define ASLC_CONVERTIBLE_INDICATOR (1 << 6)
|
||||
#define ASLC_DOCKING_INDICATOR (1 << 7)
|
||||
#define ASLC_ISCT_STATE_CHANGE (1 << 8)
|
||||
#define ASLC_REQ_MSK 0x1ff
|
||||
/* response bits */
|
||||
#define ASLC_ALS_ILLUM_FAILED (1 << 10)
|
||||
#define ASLC_BACKLIGHT_FAILED (1 << 12)
|
||||
#define ASLC_PFIT_FAILED (1 << 14)
|
||||
#define ASLC_PWM_FREQ_FAILED (1 << 16)
|
||||
#define ASLC_ROTATION_ANGLES_FAILED (1 << 18)
|
||||
#define ASLC_BUTTON_ARRAY_FAILED (1 << 20)
|
||||
#define ASLC_CONVERTIBLE_FAILED (1 << 22)
|
||||
#define ASLC_DOCKING_FAILED (1 << 24)
|
||||
#define ASLC_ISCT_STATE_FAILED (1 << 26)
|
||||
|
||||
/* Technology enabled indicator */
|
||||
#define ASLE_TCHE_ALS_EN (1 << 0)
|
||||
|
@ -151,6 +167,60 @@ struct opregion_asle {
|
|||
|
||||
#define ASLE_CBLV_VALID (1<<31)
|
||||
|
||||
/* IUER */
|
||||
#define ASLE_IUER_DOCKING (1 << 7)
|
||||
#define ASLE_IUER_CONVERTIBLE (1 << 6)
|
||||
#define ASLE_IUER_ROTATION_LOCK_BTN (1 << 4)
|
||||
#define ASLE_IUER_VOLUME_DOWN_BTN (1 << 3)
|
||||
#define ASLE_IUER_VOLUME_UP_BTN (1 << 2)
|
||||
#define ASLE_IUER_WINDOWS_BTN (1 << 1)
|
||||
#define ASLE_IUER_POWER_BTN (1 << 0)
|
||||
|
||||
/* Software System Control Interrupt (SWSCI) */
|
||||
#define SWSCI_SCIC_INDICATOR (1 << 0)
|
||||
#define SWSCI_SCIC_MAIN_FUNCTION_SHIFT 1
|
||||
#define SWSCI_SCIC_MAIN_FUNCTION_MASK (0xf << 1)
|
||||
#define SWSCI_SCIC_SUB_FUNCTION_SHIFT 8
|
||||
#define SWSCI_SCIC_SUB_FUNCTION_MASK (0xff << 8)
|
||||
#define SWSCI_SCIC_EXIT_PARAMETER_SHIFT 8
|
||||
#define SWSCI_SCIC_EXIT_PARAMETER_MASK (0xff << 8)
|
||||
#define SWSCI_SCIC_EXIT_STATUS_SHIFT 5
|
||||
#define SWSCI_SCIC_EXIT_STATUS_MASK (7 << 5)
|
||||
#define SWSCI_SCIC_EXIT_STATUS_SUCCESS 1
|
||||
|
||||
#define SWSCI_FUNCTION_CODE(main, sub) \
|
||||
((main) << SWSCI_SCIC_MAIN_FUNCTION_SHIFT | \
|
||||
(sub) << SWSCI_SCIC_SUB_FUNCTION_SHIFT)
|
||||
|
||||
/* SWSCI: Get BIOS Data (GBDA) */
|
||||
#define SWSCI_GBDA 4
|
||||
#define SWSCI_GBDA_SUPPORTED_CALLS SWSCI_FUNCTION_CODE(SWSCI_GBDA, 0)
|
||||
#define SWSCI_GBDA_REQUESTED_CALLBACKS SWSCI_FUNCTION_CODE(SWSCI_GBDA, 1)
|
||||
#define SWSCI_GBDA_BOOT_DISPLAY_PREF SWSCI_FUNCTION_CODE(SWSCI_GBDA, 4)
|
||||
#define SWSCI_GBDA_PANEL_DETAILS SWSCI_FUNCTION_CODE(SWSCI_GBDA, 5)
|
||||
#define SWSCI_GBDA_TV_STANDARD SWSCI_FUNCTION_CODE(SWSCI_GBDA, 6)
|
||||
#define SWSCI_GBDA_INTERNAL_GRAPHICS SWSCI_FUNCTION_CODE(SWSCI_GBDA, 7)
|
||||
#define SWSCI_GBDA_SPREAD_SPECTRUM SWSCI_FUNCTION_CODE(SWSCI_GBDA, 10)
|
||||
|
||||
/* SWSCI: System BIOS Callbacks (SBCB) */
|
||||
#define SWSCI_SBCB 6
|
||||
#define SWSCI_SBCB_SUPPORTED_CALLBACKS SWSCI_FUNCTION_CODE(SWSCI_SBCB, 0)
|
||||
#define SWSCI_SBCB_INIT_COMPLETION SWSCI_FUNCTION_CODE(SWSCI_SBCB, 1)
|
||||
#define SWSCI_SBCB_PRE_HIRES_SET_MODE SWSCI_FUNCTION_CODE(SWSCI_SBCB, 3)
|
||||
#define SWSCI_SBCB_POST_HIRES_SET_MODE SWSCI_FUNCTION_CODE(SWSCI_SBCB, 4)
|
||||
#define SWSCI_SBCB_DISPLAY_SWITCH SWSCI_FUNCTION_CODE(SWSCI_SBCB, 5)
|
||||
#define SWSCI_SBCB_SET_TV_FORMAT SWSCI_FUNCTION_CODE(SWSCI_SBCB, 6)
|
||||
#define SWSCI_SBCB_ADAPTER_POWER_STATE SWSCI_FUNCTION_CODE(SWSCI_SBCB, 7)
|
||||
#define SWSCI_SBCB_DISPLAY_POWER_STATE SWSCI_FUNCTION_CODE(SWSCI_SBCB, 8)
|
||||
#define SWSCI_SBCB_SET_BOOT_DISPLAY SWSCI_FUNCTION_CODE(SWSCI_SBCB, 9)
|
||||
#define SWSCI_SBCB_SET_PANEL_DETAILS SWSCI_FUNCTION_CODE(SWSCI_SBCB, 10)
|
||||
#define SWSCI_SBCB_SET_INTERNAL_GFX SWSCI_FUNCTION_CODE(SWSCI_SBCB, 11)
|
||||
#define SWSCI_SBCB_POST_HIRES_TO_DOS_FS SWSCI_FUNCTION_CODE(SWSCI_SBCB, 16)
|
||||
#define SWSCI_SBCB_SUSPEND_RESUME SWSCI_FUNCTION_CODE(SWSCI_SBCB, 17)
|
||||
#define SWSCI_SBCB_SET_SPREAD_SPECTRUM SWSCI_FUNCTION_CODE(SWSCI_SBCB, 18)
|
||||
#define SWSCI_SBCB_POST_VBE_PM SWSCI_FUNCTION_CODE(SWSCI_SBCB, 19)
|
||||
#define SWSCI_SBCB_ENABLE_DISABLE_AUDIO SWSCI_FUNCTION_CODE(SWSCI_SBCB, 21)
|
||||
|
||||
#define ACPI_OTHER_OUTPUT (0<<8)
|
||||
#define ACPI_VGA_OUTPUT (1<<8)
|
||||
#define ACPI_TV_OUTPUT (2<<8)
|
||||
|
@ -158,6 +228,169 @@ struct opregion_asle {
|
|||
#define ACPI_LVDS_OUTPUT (4<<8)
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct opregion_swsci __iomem *swsci = dev_priv->opregion.swsci;
|
||||
u32 main_function, sub_function, scic;
|
||||
u16 pci_swsci;
|
||||
u32 dslp;
|
||||
|
||||
if (!swsci)
|
||||
return -ENODEV;
|
||||
|
||||
main_function = (function & SWSCI_SCIC_MAIN_FUNCTION_MASK) >>
|
||||
SWSCI_SCIC_MAIN_FUNCTION_SHIFT;
|
||||
sub_function = (function & SWSCI_SCIC_SUB_FUNCTION_MASK) >>
|
||||
SWSCI_SCIC_SUB_FUNCTION_SHIFT;
|
||||
|
||||
/* Check if we can call the function. See swsci_setup for details. */
|
||||
if (main_function == SWSCI_SBCB) {
|
||||
if ((dev_priv->opregion.swsci_sbcb_sub_functions &
|
||||
(1 << sub_function)) == 0)
|
||||
return -EINVAL;
|
||||
} else if (main_function == SWSCI_GBDA) {
|
||||
if ((dev_priv->opregion.swsci_gbda_sub_functions &
|
||||
(1 << sub_function)) == 0)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Driver sleep timeout in ms. */
|
||||
dslp = ioread32(&swsci->dslp);
|
||||
if (!dslp) {
|
||||
dslp = 2;
|
||||
} else if (dslp > 500) {
|
||||
/* Hey bios, trust must be earned. */
|
||||
WARN_ONCE(1, "excessive driver sleep timeout (DSPL) %u\n", dslp);
|
||||
dslp = 500;
|
||||
}
|
||||
|
||||
/* The spec tells us to do this, but we are the only user... */
|
||||
scic = ioread32(&swsci->scic);
|
||||
if (scic & SWSCI_SCIC_INDICATOR) {
|
||||
DRM_DEBUG_DRIVER("SWSCI request already in progress\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
scic = function | SWSCI_SCIC_INDICATOR;
|
||||
|
||||
iowrite32(parm, &swsci->parm);
|
||||
iowrite32(scic, &swsci->scic);
|
||||
|
||||
/* Ensure SCI event is selected and event trigger is cleared. */
|
||||
pci_read_config_word(dev->pdev, PCI_SWSCI, &pci_swsci);
|
||||
if (!(pci_swsci & PCI_SWSCI_SCISEL) || (pci_swsci & PCI_SWSCI_GSSCIE)) {
|
||||
pci_swsci |= PCI_SWSCI_SCISEL;
|
||||
pci_swsci &= ~PCI_SWSCI_GSSCIE;
|
||||
pci_write_config_word(dev->pdev, PCI_SWSCI, pci_swsci);
|
||||
}
|
||||
|
||||
/* Use event trigger to tell bios to check the mail. */
|
||||
pci_swsci |= PCI_SWSCI_GSSCIE;
|
||||
pci_write_config_word(dev->pdev, PCI_SWSCI, pci_swsci);
|
||||
|
||||
/* Poll for the result. */
|
||||
#define C (((scic = ioread32(&swsci->scic)) & SWSCI_SCIC_INDICATOR) == 0)
|
||||
if (wait_for(C, dslp)) {
|
||||
DRM_DEBUG_DRIVER("SWSCI request timed out\n");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
scic = (scic & SWSCI_SCIC_EXIT_STATUS_MASK) >>
|
||||
SWSCI_SCIC_EXIT_STATUS_SHIFT;
|
||||
|
||||
/* Note: scic == 0 is an error! */
|
||||
if (scic != SWSCI_SCIC_EXIT_STATUS_SUCCESS) {
|
||||
DRM_DEBUG_DRIVER("SWSCI request error %u\n", scic);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (parm_out)
|
||||
*parm_out = ioread32(&swsci->parm);
|
||||
|
||||
return 0;
|
||||
|
||||
#undef C
|
||||
}
|
||||
|
||||
#define DISPLAY_TYPE_CRT 0
|
||||
#define DISPLAY_TYPE_TV 1
|
||||
#define DISPLAY_TYPE_EXTERNAL_FLAT_PANEL 2
|
||||
#define DISPLAY_TYPE_INTERNAL_FLAT_PANEL 3
|
||||
|
||||
int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
|
||||
bool enable)
|
||||
{
|
||||
struct drm_device *dev = intel_encoder->base.dev;
|
||||
u32 parm = 0;
|
||||
u32 type = 0;
|
||||
u32 port;
|
||||
|
||||
/* don't care about old stuff for now */
|
||||
if (!HAS_DDI(dev))
|
||||
return 0;
|
||||
|
||||
port = intel_ddi_get_encoder_port(intel_encoder);
|
||||
if (port == PORT_E) {
|
||||
port = 0;
|
||||
} else {
|
||||
parm |= 1 << port;
|
||||
port++;
|
||||
}
|
||||
|
||||
if (!enable)
|
||||
parm |= 4 << 8;
|
||||
|
||||
switch (intel_encoder->type) {
|
||||
case INTEL_OUTPUT_ANALOG:
|
||||
type = DISPLAY_TYPE_CRT;
|
||||
break;
|
||||
case INTEL_OUTPUT_UNKNOWN:
|
||||
case INTEL_OUTPUT_DISPLAYPORT:
|
||||
case INTEL_OUTPUT_HDMI:
|
||||
type = DISPLAY_TYPE_EXTERNAL_FLAT_PANEL;
|
||||
break;
|
||||
case INTEL_OUTPUT_EDP:
|
||||
type = DISPLAY_TYPE_INTERNAL_FLAT_PANEL;
|
||||
break;
|
||||
default:
|
||||
WARN_ONCE(1, "unsupported intel_encoder type %d\n",
|
||||
intel_encoder->type);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
parm |= type << (16 + port * 3);
|
||||
|
||||
return swsci(dev, SWSCI_SBCB_DISPLAY_POWER_STATE, parm, NULL);
|
||||
}
|
||||
|
||||
static const struct {
|
||||
pci_power_t pci_power_state;
|
||||
u32 parm;
|
||||
} power_state_map[] = {
|
||||
{ PCI_D0, 0x00 },
|
||||
{ PCI_D1, 0x01 },
|
||||
{ PCI_D2, 0x02 },
|
||||
{ PCI_D3hot, 0x04 },
|
||||
{ PCI_D3cold, 0x04 },
|
||||
};
|
||||
|
||||
int intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!HAS_DDI(dev))
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(power_state_map); i++) {
|
||||
if (state == power_state_map[i].pci_power_state)
|
||||
return swsci(dev, SWSCI_SBCB_ADAPTER_POWER_STATE,
|
||||
power_state_map[i].parm, NULL);
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
@ -166,11 +399,11 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
|
|||
DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
|
||||
|
||||
if (!(bclp & ASLE_BCLP_VALID))
|
||||
return ASLE_BACKLIGHT_FAILED;
|
||||
return ASLC_BACKLIGHT_FAILED;
|
||||
|
||||
bclp &= ASLE_BCLP_MSK;
|
||||
if (bclp > 255)
|
||||
return ASLE_BACKLIGHT_FAILED;
|
||||
return ASLC_BACKLIGHT_FAILED;
|
||||
|
||||
intel_panel_set_backlight(dev, bclp, 255);
|
||||
iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv);
|
||||
|
@ -183,13 +416,13 @@ static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi)
|
|||
/* alsi is the current ALS reading in lux. 0 indicates below sensor
|
||||
range, 0xffff indicates above sensor range. 1-0xfffe are valid */
|
||||
DRM_DEBUG_DRIVER("Illum is not supported\n");
|
||||
return ASLE_ALS_ILLUM_FAILED;
|
||||
return ASLC_ALS_ILLUM_FAILED;
|
||||
}
|
||||
|
||||
static u32 asle_set_pwm_freq(struct drm_device *dev, u32 pfmb)
|
||||
{
|
||||
DRM_DEBUG_DRIVER("PWM freq is not supported\n");
|
||||
return ASLE_PWM_FREQ_FAILED;
|
||||
return ASLC_PWM_FREQ_FAILED;
|
||||
}
|
||||
|
||||
static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
|
||||
|
@ -197,39 +430,106 @@ static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
|
|||
/* Panel fitting is currently controlled by the X code, so this is a
|
||||
noop until modesetting support works fully */
|
||||
DRM_DEBUG_DRIVER("Pfit is not supported\n");
|
||||
return ASLE_PFIT_FAILED;
|
||||
return ASLC_PFIT_FAILED;
|
||||
}
|
||||
|
||||
static u32 asle_set_supported_rotation_angles(struct drm_device *dev, u32 srot)
|
||||
{
|
||||
DRM_DEBUG_DRIVER("SROT is not supported\n");
|
||||
return ASLC_ROTATION_ANGLES_FAILED;
|
||||
}
|
||||
|
||||
static u32 asle_set_button_array(struct drm_device *dev, u32 iuer)
|
||||
{
|
||||
if (!iuer)
|
||||
DRM_DEBUG_DRIVER("Button array event is not supported (nothing)\n");
|
||||
if (iuer & ASLE_IUER_ROTATION_LOCK_BTN)
|
||||
DRM_DEBUG_DRIVER("Button array event is not supported (rotation lock)\n");
|
||||
if (iuer & ASLE_IUER_VOLUME_DOWN_BTN)
|
||||
DRM_DEBUG_DRIVER("Button array event is not supported (volume down)\n");
|
||||
if (iuer & ASLE_IUER_VOLUME_UP_BTN)
|
||||
DRM_DEBUG_DRIVER("Button array event is not supported (volume up)\n");
|
||||
if (iuer & ASLE_IUER_WINDOWS_BTN)
|
||||
DRM_DEBUG_DRIVER("Button array event is not supported (windows)\n");
|
||||
if (iuer & ASLE_IUER_POWER_BTN)
|
||||
DRM_DEBUG_DRIVER("Button array event is not supported (power)\n");
|
||||
|
||||
return ASLC_BUTTON_ARRAY_FAILED;
|
||||
}
|
||||
|
||||
static u32 asle_set_convertible(struct drm_device *dev, u32 iuer)
|
||||
{
|
||||
if (iuer & ASLE_IUER_CONVERTIBLE)
|
||||
DRM_DEBUG_DRIVER("Convertible is not supported (clamshell)\n");
|
||||
else
|
||||
DRM_DEBUG_DRIVER("Convertible is not supported (slate)\n");
|
||||
|
||||
return ASLC_CONVERTIBLE_FAILED;
|
||||
}
|
||||
|
||||
static u32 asle_set_docking(struct drm_device *dev, u32 iuer)
|
||||
{
|
||||
if (iuer & ASLE_IUER_DOCKING)
|
||||
DRM_DEBUG_DRIVER("Docking is not supported (docked)\n");
|
||||
else
|
||||
DRM_DEBUG_DRIVER("Docking is not supported (undocked)\n");
|
||||
|
||||
return ASLC_DOCKING_FAILED;
|
||||
}
|
||||
|
||||
static u32 asle_isct_state(struct drm_device *dev)
|
||||
{
|
||||
DRM_DEBUG_DRIVER("ISCT is not supported\n");
|
||||
return ASLC_ISCT_STATE_FAILED;
|
||||
}
|
||||
|
||||
void intel_opregion_asle_intr(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
|
||||
u32 asle_stat = 0;
|
||||
u32 asle_req;
|
||||
u32 aslc_stat = 0;
|
||||
u32 aslc_req;
|
||||
|
||||
if (!asle)
|
||||
return;
|
||||
|
||||
asle_req = ioread32(&asle->aslc) & ASLE_REQ_MSK;
|
||||
aslc_req = ioread32(&asle->aslc);
|
||||
|
||||
if (!asle_req) {
|
||||
DRM_DEBUG_DRIVER("non asle set request??\n");
|
||||
if (!(aslc_req & ASLC_REQ_MSK)) {
|
||||
DRM_DEBUG_DRIVER("No request on ASLC interrupt 0x%08x\n",
|
||||
aslc_req);
|
||||
return;
|
||||
}
|
||||
|
||||
if (asle_req & ASLE_SET_ALS_ILLUM)
|
||||
asle_stat |= asle_set_als_illum(dev, ioread32(&asle->alsi));
|
||||
if (aslc_req & ASLC_SET_ALS_ILLUM)
|
||||
aslc_stat |= asle_set_als_illum(dev, ioread32(&asle->alsi));
|
||||
|
||||
if (asle_req & ASLE_SET_BACKLIGHT)
|
||||
asle_stat |= asle_set_backlight(dev, ioread32(&asle->bclp));
|
||||
if (aslc_req & ASLC_SET_BACKLIGHT)
|
||||
aslc_stat |= asle_set_backlight(dev, ioread32(&asle->bclp));
|
||||
|
||||
if (asle_req & ASLE_SET_PFIT)
|
||||
asle_stat |= asle_set_pfit(dev, ioread32(&asle->pfit));
|
||||
if (aslc_req & ASLC_SET_PFIT)
|
||||
aslc_stat |= asle_set_pfit(dev, ioread32(&asle->pfit));
|
||||
|
||||
if (asle_req & ASLE_SET_PWM_FREQ)
|
||||
asle_stat |= asle_set_pwm_freq(dev, ioread32(&asle->pfmb));
|
||||
if (aslc_req & ASLC_SET_PWM_FREQ)
|
||||
aslc_stat |= asle_set_pwm_freq(dev, ioread32(&asle->pfmb));
|
||||
|
||||
iowrite32(asle_stat, &asle->aslc);
|
||||
if (aslc_req & ASLC_SUPPORTED_ROTATION_ANGLES)
|
||||
aslc_stat |= asle_set_supported_rotation_angles(dev,
|
||||
ioread32(&asle->srot));
|
||||
|
||||
if (aslc_req & ASLC_BUTTON_ARRAY)
|
||||
aslc_stat |= asle_set_button_array(dev, ioread32(&asle->iuer));
|
||||
|
||||
if (aslc_req & ASLC_CONVERTIBLE_INDICATOR)
|
||||
aslc_stat |= asle_set_convertible(dev, ioread32(&asle->iuer));
|
||||
|
||||
if (aslc_req & ASLC_DOCKING_INDICATOR)
|
||||
aslc_stat |= asle_set_docking(dev, ioread32(&asle->iuer));
|
||||
|
||||
if (aslc_req & ASLC_ISCT_STATE_CHANGE)
|
||||
aslc_stat |= asle_isct_state(dev);
|
||||
|
||||
iowrite32(aslc_stat, &asle->aslc);
|
||||
}
|
||||
|
||||
#define ACPI_EV_DISPLAY_SWITCH (1<<0)
|
||||
|
@ -446,8 +746,68 @@ void intel_opregion_fini(struct drm_device *dev)
|
|||
opregion->swsci = NULL;
|
||||
opregion->asle = NULL;
|
||||
opregion->vbt = NULL;
|
||||
opregion->lid_state = NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
static void swsci_setup(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_opregion *opregion = &dev_priv->opregion;
|
||||
bool requested_callbacks = false;
|
||||
u32 tmp;
|
||||
|
||||
/* Sub-function code 0 is okay, let's allow them. */
|
||||
opregion->swsci_gbda_sub_functions = 1;
|
||||
opregion->swsci_sbcb_sub_functions = 1;
|
||||
|
||||
/* We use GBDA to ask for supported GBDA calls. */
|
||||
if (swsci(dev, SWSCI_GBDA_SUPPORTED_CALLS, 0, &tmp) == 0) {
|
||||
/* make the bits match the sub-function codes */
|
||||
tmp <<= 1;
|
||||
opregion->swsci_gbda_sub_functions |= tmp;
|
||||
}
|
||||
|
||||
/*
|
||||
* We also use GBDA to ask for _requested_ SBCB callbacks. The driver
|
||||
* must not call interfaces that are not specifically requested by the
|
||||
* bios.
|
||||
*/
|
||||
if (swsci(dev, SWSCI_GBDA_REQUESTED_CALLBACKS, 0, &tmp) == 0) {
|
||||
/* here, the bits already match sub-function codes */
|
||||
opregion->swsci_sbcb_sub_functions |= tmp;
|
||||
requested_callbacks = true;
|
||||
}
|
||||
|
||||
/*
|
||||
* But we use SBCB to ask for _supported_ SBCB calls. This does not mean
|
||||
* the callback is _requested_. But we still can't call interfaces that
|
||||
* are not requested.
|
||||
*/
|
||||
if (swsci(dev, SWSCI_SBCB_SUPPORTED_CALLBACKS, 0, &tmp) == 0) {
|
||||
/* make the bits match the sub-function codes */
|
||||
u32 low = tmp & 0x7ff;
|
||||
u32 high = tmp & ~0xfff; /* bit 11 is reserved */
|
||||
tmp = (high << 4) | (low << 1) | 1;
|
||||
|
||||
/* best guess what to do with supported wrt requested */
|
||||
if (requested_callbacks) {
|
||||
u32 req = opregion->swsci_sbcb_sub_functions;
|
||||
if ((req & tmp) != req)
|
||||
DRM_DEBUG_DRIVER("SWSCI BIOS requested (%08x) SBCB callbacks that are not supported (%08x)\n", req, tmp);
|
||||
/* XXX: for now, trust the requested callbacks */
|
||||
/* opregion->swsci_sbcb_sub_functions &= tmp; */
|
||||
} else {
|
||||
opregion->swsci_sbcb_sub_functions |= tmp;
|
||||
}
|
||||
}
|
||||
|
||||
DRM_DEBUG_DRIVER("SWSCI GBDA callbacks %08x, SBCB callbacks %08x\n",
|
||||
opregion->swsci_gbda_sub_functions,
|
||||
opregion->swsci_sbcb_sub_functions);
|
||||
}
|
||||
#else /* CONFIG_ACPI */
|
||||
static inline void swsci_setup(struct drm_device *dev) {}
|
||||
#endif /* CONFIG_ACPI */
|
||||
|
||||
int intel_opregion_setup(struct drm_device *dev)
|
||||
{
|
||||
|
@ -490,6 +850,7 @@ int intel_opregion_setup(struct drm_device *dev)
|
|||
if (mboxes & MBOX_SWSCI) {
|
||||
DRM_DEBUG_DRIVER("SWSCI supported\n");
|
||||
opregion->swsci = base + OPREGION_SWSCI_OFFSET;
|
||||
swsci_setup(dev);
|
||||
}
|
||||
if (mboxes & MBOX_ASLE) {
|
||||
DRM_DEBUG_DRIVER("ASLE supported\n");
|
||||
|
|
|
@ -821,14 +821,11 @@ int intel_overlay_switch_off(struct intel_overlay *overlay)
|
|||
static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
|
||||
struct intel_crtc *crtc)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = overlay->dev->dev_private;
|
||||
|
||||
if (!crtc->active)
|
||||
return -EINVAL;
|
||||
|
||||
/* can't use the overlay with double wide pipe */
|
||||
if (INTEL_INFO(overlay->dev)->gen < 4 &&
|
||||
(I915_READ(PIPECONF(crtc->pipe)) & (PIPECONF_DOUBLE_WIDE | PIPECONF_ENABLE)) != PIPECONF_ENABLE)
|
||||
if (crtc->config.double_wide)
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -50,23 +50,22 @@ intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
|
|||
struct intel_crtc_config *pipe_config,
|
||||
int fitting_mode)
|
||||
{
|
||||
struct drm_display_mode *mode, *adjusted_mode;
|
||||
struct drm_display_mode *adjusted_mode;
|
||||
int x, y, width, height;
|
||||
|
||||
mode = &pipe_config->requested_mode;
|
||||
adjusted_mode = &pipe_config->adjusted_mode;
|
||||
|
||||
x = y = width = height = 0;
|
||||
|
||||
/* Native modes don't need fitting */
|
||||
if (adjusted_mode->hdisplay == mode->hdisplay &&
|
||||
adjusted_mode->vdisplay == mode->vdisplay)
|
||||
if (adjusted_mode->hdisplay == pipe_config->pipe_src_w &&
|
||||
adjusted_mode->vdisplay == pipe_config->pipe_src_h)
|
||||
goto done;
|
||||
|
||||
switch (fitting_mode) {
|
||||
case DRM_MODE_SCALE_CENTER:
|
||||
width = mode->hdisplay;
|
||||
height = mode->vdisplay;
|
||||
width = pipe_config->pipe_src_w;
|
||||
height = pipe_config->pipe_src_h;
|
||||
x = (adjusted_mode->hdisplay - width + 1)/2;
|
||||
y = (adjusted_mode->vdisplay - height + 1)/2;
|
||||
break;
|
||||
|
@ -74,17 +73,19 @@ intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
|
|||
case DRM_MODE_SCALE_ASPECT:
|
||||
/* Scale but preserve the aspect ratio */
|
||||
{
|
||||
u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
|
||||
u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
|
||||
u32 scaled_width = adjusted_mode->hdisplay
|
||||
* pipe_config->pipe_src_h;
|
||||
u32 scaled_height = pipe_config->pipe_src_w
|
||||
* adjusted_mode->vdisplay;
|
||||
if (scaled_width > scaled_height) { /* pillar */
|
||||
width = scaled_height / mode->vdisplay;
|
||||
width = scaled_height / pipe_config->pipe_src_h;
|
||||
if (width & 1)
|
||||
width++;
|
||||
x = (adjusted_mode->hdisplay - width + 1) / 2;
|
||||
y = 0;
|
||||
height = adjusted_mode->vdisplay;
|
||||
} else if (scaled_width < scaled_height) { /* letter */
|
||||
height = scaled_width / mode->hdisplay;
|
||||
height = scaled_width / pipe_config->pipe_src_w;
|
||||
if (height & 1)
|
||||
height++;
|
||||
y = (adjusted_mode->vdisplay - height + 1) / 2;
|
||||
|
@ -171,20 +172,96 @@ static inline u32 panel_fitter_scaling(u32 source, u32 target)
|
|||
return (FACTOR * ratio + FACTOR/2) / FACTOR;
|
||||
}
|
||||
|
||||
static void i965_scale_aspect(struct intel_crtc_config *pipe_config,
|
||||
u32 *pfit_control)
|
||||
{
|
||||
struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
|
||||
u32 scaled_width = adjusted_mode->hdisplay *
|
||||
pipe_config->pipe_src_h;
|
||||
u32 scaled_height = pipe_config->pipe_src_w *
|
||||
adjusted_mode->vdisplay;
|
||||
|
||||
/* 965+ is easy, it does everything in hw */
|
||||
if (scaled_width > scaled_height)
|
||||
*pfit_control |= PFIT_ENABLE |
|
||||
PFIT_SCALING_PILLAR;
|
||||
else if (scaled_width < scaled_height)
|
||||
*pfit_control |= PFIT_ENABLE |
|
||||
PFIT_SCALING_LETTER;
|
||||
else if (adjusted_mode->hdisplay != pipe_config->pipe_src_w)
|
||||
*pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO;
|
||||
}
|
||||
|
||||
static void i9xx_scale_aspect(struct intel_crtc_config *pipe_config,
|
||||
u32 *pfit_control, u32 *pfit_pgm_ratios,
|
||||
u32 *border)
|
||||
{
|
||||
struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
|
||||
u32 scaled_width = adjusted_mode->hdisplay *
|
||||
pipe_config->pipe_src_h;
|
||||
u32 scaled_height = pipe_config->pipe_src_w *
|
||||
adjusted_mode->vdisplay;
|
||||
u32 bits;
|
||||
|
||||
/*
|
||||
* For earlier chips we have to calculate the scaling
|
||||
* ratio by hand and program it into the
|
||||
* PFIT_PGM_RATIO register
|
||||
*/
|
||||
if (scaled_width > scaled_height) { /* pillar */
|
||||
centre_horizontally(adjusted_mode,
|
||||
scaled_height /
|
||||
pipe_config->pipe_src_h);
|
||||
|
||||
*border = LVDS_BORDER_ENABLE;
|
||||
if (pipe_config->pipe_src_h != adjusted_mode->vdisplay) {
|
||||
bits = panel_fitter_scaling(pipe_config->pipe_src_h,
|
||||
adjusted_mode->vdisplay);
|
||||
|
||||
*pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
|
||||
bits << PFIT_VERT_SCALE_SHIFT);
|
||||
*pfit_control |= (PFIT_ENABLE |
|
||||
VERT_INTERP_BILINEAR |
|
||||
HORIZ_INTERP_BILINEAR);
|
||||
}
|
||||
} else if (scaled_width < scaled_height) { /* letter */
|
||||
centre_vertically(adjusted_mode,
|
||||
scaled_width /
|
||||
pipe_config->pipe_src_w);
|
||||
|
||||
*border = LVDS_BORDER_ENABLE;
|
||||
if (pipe_config->pipe_src_w != adjusted_mode->hdisplay) {
|
||||
bits = panel_fitter_scaling(pipe_config->pipe_src_w,
|
||||
adjusted_mode->hdisplay);
|
||||
|
||||
*pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
|
||||
bits << PFIT_VERT_SCALE_SHIFT);
|
||||
*pfit_control |= (PFIT_ENABLE |
|
||||
VERT_INTERP_BILINEAR |
|
||||
HORIZ_INTERP_BILINEAR);
|
||||
}
|
||||
} else {
|
||||
/* Aspects match, Let hw scale both directions */
|
||||
*pfit_control |= (PFIT_ENABLE |
|
||||
VERT_AUTO_SCALE | HORIZ_AUTO_SCALE |
|
||||
VERT_INTERP_BILINEAR |
|
||||
HORIZ_INTERP_BILINEAR);
|
||||
}
|
||||
}
|
||||
|
||||
void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
|
||||
struct intel_crtc_config *pipe_config,
|
||||
int fitting_mode)
|
||||
{
|
||||
struct drm_device *dev = intel_crtc->base.dev;
|
||||
u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
|
||||
struct drm_display_mode *mode, *adjusted_mode;
|
||||
struct drm_display_mode *adjusted_mode;
|
||||
|
||||
mode = &pipe_config->requested_mode;
|
||||
adjusted_mode = &pipe_config->adjusted_mode;
|
||||
|
||||
/* Native modes don't need fitting */
|
||||
if (adjusted_mode->hdisplay == mode->hdisplay &&
|
||||
adjusted_mode->vdisplay == mode->vdisplay)
|
||||
if (adjusted_mode->hdisplay == pipe_config->pipe_src_w &&
|
||||
adjusted_mode->vdisplay == pipe_config->pipe_src_h)
|
||||
goto out;
|
||||
|
||||
switch (fitting_mode) {
|
||||
|
@ -193,81 +270,25 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
|
|||
* For centered modes, we have to calculate border widths &
|
||||
* heights and modify the values programmed into the CRTC.
|
||||
*/
|
||||
centre_horizontally(adjusted_mode, mode->hdisplay);
|
||||
centre_vertically(adjusted_mode, mode->vdisplay);
|
||||
centre_horizontally(adjusted_mode, pipe_config->pipe_src_w);
|
||||
centre_vertically(adjusted_mode, pipe_config->pipe_src_h);
|
||||
border = LVDS_BORDER_ENABLE;
|
||||
break;
|
||||
case DRM_MODE_SCALE_ASPECT:
|
||||
/* Scale but preserve the aspect ratio */
|
||||
if (INTEL_INFO(dev)->gen >= 4) {
|
||||
u32 scaled_width = adjusted_mode->hdisplay *
|
||||
mode->vdisplay;
|
||||
u32 scaled_height = mode->hdisplay *
|
||||
adjusted_mode->vdisplay;
|
||||
|
||||
/* 965+ is easy, it does everything in hw */
|
||||
if (scaled_width > scaled_height)
|
||||
pfit_control |= PFIT_ENABLE |
|
||||
PFIT_SCALING_PILLAR;
|
||||
else if (scaled_width < scaled_height)
|
||||
pfit_control |= PFIT_ENABLE |
|
||||
PFIT_SCALING_LETTER;
|
||||
else if (adjusted_mode->hdisplay != mode->hdisplay)
|
||||
pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO;
|
||||
} else {
|
||||
u32 scaled_width = adjusted_mode->hdisplay *
|
||||
mode->vdisplay;
|
||||
u32 scaled_height = mode->hdisplay *
|
||||
adjusted_mode->vdisplay;
|
||||
/*
|
||||
* For earlier chips we have to calculate the scaling
|
||||
* ratio by hand and program it into the
|
||||
* PFIT_PGM_RATIO register
|
||||
*/
|
||||
if (scaled_width > scaled_height) { /* pillar */
|
||||
centre_horizontally(adjusted_mode,
|
||||
scaled_height /
|
||||
mode->vdisplay);
|
||||
|
||||
border = LVDS_BORDER_ENABLE;
|
||||
if (mode->vdisplay != adjusted_mode->vdisplay) {
|
||||
u32 bits = panel_fitter_scaling(mode->vdisplay, adjusted_mode->vdisplay);
|
||||
pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
|
||||
bits << PFIT_VERT_SCALE_SHIFT);
|
||||
pfit_control |= (PFIT_ENABLE |
|
||||
VERT_INTERP_BILINEAR |
|
||||
HORIZ_INTERP_BILINEAR);
|
||||
}
|
||||
} else if (scaled_width < scaled_height) { /* letter */
|
||||
centre_vertically(adjusted_mode,
|
||||
scaled_width /
|
||||
mode->hdisplay);
|
||||
|
||||
border = LVDS_BORDER_ENABLE;
|
||||
if (mode->hdisplay != adjusted_mode->hdisplay) {
|
||||
u32 bits = panel_fitter_scaling(mode->hdisplay, adjusted_mode->hdisplay);
|
||||
pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
|
||||
bits << PFIT_VERT_SCALE_SHIFT);
|
||||
pfit_control |= (PFIT_ENABLE |
|
||||
VERT_INTERP_BILINEAR |
|
||||
HORIZ_INTERP_BILINEAR);
|
||||
}
|
||||
} else {
|
||||
/* Aspects match, Let hw scale both directions */
|
||||
pfit_control |= (PFIT_ENABLE |
|
||||
VERT_AUTO_SCALE | HORIZ_AUTO_SCALE |
|
||||
VERT_INTERP_BILINEAR |
|
||||
HORIZ_INTERP_BILINEAR);
|
||||
}
|
||||
}
|
||||
if (INTEL_INFO(dev)->gen >= 4)
|
||||
i965_scale_aspect(pipe_config, &pfit_control);
|
||||
else
|
||||
i9xx_scale_aspect(pipe_config, &pfit_control,
|
||||
&pfit_pgm_ratios, &border);
|
||||
break;
|
||||
case DRM_MODE_SCALE_FULLSCREEN:
|
||||
/*
|
||||
* Full scaling, even if it changes the aspect ratio.
|
||||
* Fortunately this is all done for us in hw.
|
||||
*/
|
||||
if (mode->vdisplay != adjusted_mode->vdisplay ||
|
||||
mode->hdisplay != adjusted_mode->hdisplay) {
|
||||
if (pipe_config->pipe_src_h != adjusted_mode->vdisplay ||
|
||||
pipe_config->pipe_src_w != adjusted_mode->hdisplay) {
|
||||
pfit_control |= PFIT_ENABLE;
|
||||
if (INTEL_INFO(dev)->gen >= 4)
|
||||
pfit_control |= PFIT_SCALING_AUTO;
|
||||
|
@ -441,7 +462,8 @@ static void intel_pch_panel_set_backlight(struct drm_device *dev, u32 level)
|
|||
I915_WRITE(BLC_PWM_CPU_CTL, val | level);
|
||||
}
|
||||
|
||||
static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level)
|
||||
static void intel_panel_actually_set_backlight(struct drm_device *dev,
|
||||
u32 level)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 tmp;
|
||||
|
@ -637,7 +659,7 @@ intel_panel_detect(struct drm_device *dev)
|
|||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
|
||||
#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
|
||||
static int intel_panel_update_status(struct backlight_device *bd)
|
||||
{
|
||||
struct drm_device *dev = bl_get_data(bd);
|
||||
|
|
|
@ -43,14 +43,6 @@
|
|||
* i915.i915_enable_fbc parameter
|
||||
*/
|
||||
|
||||
static bool intel_crtc_active(struct drm_crtc *crtc)
|
||||
{
|
||||
/* Be paranoid as we can arrive here with only partial
|
||||
* state retrieved from the hardware during setup.
|
||||
*/
|
||||
return to_intel_crtc(crtc)->active && crtc->fb && crtc->mode.clock;
|
||||
}
|
||||
|
||||
static void i8xx_disable_fbc(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
@ -458,7 +450,8 @@ void intel_update_fbc(struct drm_device *dev)
|
|||
struct drm_framebuffer *fb;
|
||||
struct intel_framebuffer *intel_fb;
|
||||
struct drm_i915_gem_object *obj;
|
||||
unsigned int max_hdisplay, max_vdisplay;
|
||||
const struct drm_display_mode *adjusted_mode;
|
||||
unsigned int max_width, max_height;
|
||||
|
||||
if (!I915_HAS_FBC(dev)) {
|
||||
set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED);
|
||||
|
@ -502,6 +495,7 @@ void intel_update_fbc(struct drm_device *dev)
|
|||
fb = crtc->fb;
|
||||
intel_fb = to_intel_framebuffer(fb);
|
||||
obj = intel_fb->obj;
|
||||
adjusted_mode = &intel_crtc->config.adjusted_mode;
|
||||
|
||||
if (i915_enable_fbc < 0 &&
|
||||
INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) {
|
||||
|
@ -514,8 +508,8 @@ void intel_update_fbc(struct drm_device *dev)
|
|||
DRM_DEBUG_KMS("fbc disabled per module param\n");
|
||||
goto out_disable;
|
||||
}
|
||||
if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
|
||||
(crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
|
||||
if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ||
|
||||
(adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
|
||||
if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
|
||||
DRM_DEBUG_KMS("mode incompatible with compression, "
|
||||
"disabling\n");
|
||||
|
@ -523,14 +517,14 @@ void intel_update_fbc(struct drm_device *dev)
|
|||
}
|
||||
|
||||
if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
|
||||
max_hdisplay = 4096;
|
||||
max_vdisplay = 2048;
|
||||
max_width = 4096;
|
||||
max_height = 2048;
|
||||
} else {
|
||||
max_hdisplay = 2048;
|
||||
max_vdisplay = 1536;
|
||||
max_width = 2048;
|
||||
max_height = 1536;
|
||||
}
|
||||
if ((crtc->mode.hdisplay > max_hdisplay) ||
|
||||
(crtc->mode.vdisplay > max_vdisplay)) {
|
||||
if (intel_crtc->config.pipe_src_w > max_width ||
|
||||
intel_crtc->config.pipe_src_h > max_height) {
|
||||
if (set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE))
|
||||
DRM_DEBUG_KMS("mode too large for compression, disabling\n");
|
||||
goto out_disable;
|
||||
|
@ -1087,8 +1081,9 @@ static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
|
|||
return enabled;
|
||||
}
|
||||
|
||||
static void pineview_update_wm(struct drm_device *dev)
|
||||
static void pineview_update_wm(struct drm_crtc *unused_crtc)
|
||||
{
|
||||
struct drm_device *dev = unused_crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_crtc *crtc;
|
||||
const struct cxsr_latency *latency;
|
||||
|
@ -1105,7 +1100,7 @@ static void pineview_update_wm(struct drm_device *dev)
|
|||
|
||||
crtc = single_enabled_crtc(dev);
|
||||
if (crtc) {
|
||||
int clock = crtc->mode.clock;
|
||||
int clock = to_intel_crtc(crtc)->config.adjusted_mode.clock;
|
||||
int pixel_size = crtc->fb->bits_per_pixel / 8;
|
||||
|
||||
/* Display SR */
|
||||
|
@ -1166,6 +1161,7 @@ static bool g4x_compute_wm0(struct drm_device *dev,
|
|||
int *cursor_wm)
|
||||
{
|
||||
struct drm_crtc *crtc;
|
||||
const struct drm_display_mode *adjusted_mode;
|
||||
int htotal, hdisplay, clock, pixel_size;
|
||||
int line_time_us, line_count;
|
||||
int entries, tlb_miss;
|
||||
|
@ -1177,9 +1173,10 @@ static bool g4x_compute_wm0(struct drm_device *dev,
|
|||
return false;
|
||||
}
|
||||
|
||||
htotal = crtc->mode.htotal;
|
||||
hdisplay = crtc->mode.hdisplay;
|
||||
clock = crtc->mode.clock;
|
||||
adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
|
||||
clock = adjusted_mode->clock;
|
||||
htotal = adjusted_mode->htotal;
|
||||
hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
|
||||
pixel_size = crtc->fb->bits_per_pixel / 8;
|
||||
|
||||
/* Use the small buffer method to calculate plane watermark */
|
||||
|
@ -1250,6 +1247,7 @@ static bool g4x_compute_srwm(struct drm_device *dev,
|
|||
int *display_wm, int *cursor_wm)
|
||||
{
|
||||
struct drm_crtc *crtc;
|
||||
const struct drm_display_mode *adjusted_mode;
|
||||
int hdisplay, htotal, pixel_size, clock;
|
||||
unsigned long line_time_us;
|
||||
int line_count, line_size;
|
||||
|
@ -1262,9 +1260,10 @@ static bool g4x_compute_srwm(struct drm_device *dev,
|
|||
}
|
||||
|
||||
crtc = intel_get_crtc_for_plane(dev, plane);
|
||||
hdisplay = crtc->mode.hdisplay;
|
||||
htotal = crtc->mode.htotal;
|
||||
clock = crtc->mode.clock;
|
||||
adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
|
||||
clock = adjusted_mode->clock;
|
||||
htotal = adjusted_mode->htotal;
|
||||
hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
|
||||
pixel_size = crtc->fb->bits_per_pixel / 8;
|
||||
|
||||
line_time_us = (htotal * 1000) / clock;
|
||||
|
@ -1303,7 +1302,7 @@ static bool vlv_compute_drain_latency(struct drm_device *dev,
|
|||
if (!intel_crtc_active(crtc))
|
||||
return false;
|
||||
|
||||
clock = crtc->mode.clock; /* VESA DOT Clock */
|
||||
clock = to_intel_crtc(crtc)->config.adjusted_mode.clock;
|
||||
pixel_size = crtc->fb->bits_per_pixel / 8; /* BPP */
|
||||
|
||||
entries = (clock / 1000) * pixel_size;
|
||||
|
@ -1365,8 +1364,9 @@ static void vlv_update_drain_latency(struct drm_device *dev)
|
|||
|
||||
#define single_plane_enabled(mask) is_power_of_2(mask)
|
||||
|
||||
static void valleyview_update_wm(struct drm_device *dev)
|
||||
static void valleyview_update_wm(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
static const int sr_latency_ns = 12000;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
|
||||
|
@ -1424,8 +1424,9 @@ static void valleyview_update_wm(struct drm_device *dev)
|
|||
(cursor_sr << DSPFW_CURSOR_SR_SHIFT));
|
||||
}
|
||||
|
||||
static void g4x_update_wm(struct drm_device *dev)
|
||||
static void g4x_update_wm(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
static const int sr_latency_ns = 12000;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
|
||||
|
@ -1476,8 +1477,9 @@ static void g4x_update_wm(struct drm_device *dev)
|
|||
(cursor_sr << DSPFW_CURSOR_SR_SHIFT));
|
||||
}
|
||||
|
||||
static void i965_update_wm(struct drm_device *dev)
|
||||
static void i965_update_wm(struct drm_crtc *unused_crtc)
|
||||
{
|
||||
struct drm_device *dev = unused_crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_crtc *crtc;
|
||||
int srwm = 1;
|
||||
|
@ -1488,9 +1490,11 @@ static void i965_update_wm(struct drm_device *dev)
|
|||
if (crtc) {
|
||||
/* self-refresh has much higher latency */
|
||||
static const int sr_latency_ns = 12000;
|
||||
int clock = crtc->mode.clock;
|
||||
int htotal = crtc->mode.htotal;
|
||||
int hdisplay = crtc->mode.hdisplay;
|
||||
const struct drm_display_mode *adjusted_mode =
|
||||
&to_intel_crtc(crtc)->config.adjusted_mode;
|
||||
int clock = adjusted_mode->clock;
|
||||
int htotal = adjusted_mode->htotal;
|
||||
int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
|
||||
int pixel_size = crtc->fb->bits_per_pixel / 8;
|
||||
unsigned long line_time_us;
|
||||
int entries;
|
||||
|
@ -1541,8 +1545,9 @@ static void i965_update_wm(struct drm_device *dev)
|
|||
I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
|
||||
}
|
||||
|
||||
static void i9xx_update_wm(struct drm_device *dev)
|
||||
static void i9xx_update_wm(struct drm_crtc *unused_crtc)
|
||||
{
|
||||
struct drm_device *dev = unused_crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
const struct intel_watermark_params *wm_info;
|
||||
uint32_t fwater_lo;
|
||||
|
@ -1566,7 +1571,7 @@ static void i9xx_update_wm(struct drm_device *dev)
|
|||
if (IS_GEN2(dev))
|
||||
cpp = 4;
|
||||
|
||||
planea_wm = intel_calculate_wm(crtc->mode.clock,
|
||||
planea_wm = intel_calculate_wm(to_intel_crtc(crtc)->config.adjusted_mode.clock,
|
||||
wm_info, fifo_size, cpp,
|
||||
latency_ns);
|
||||
enabled = crtc;
|
||||
|
@ -1580,7 +1585,7 @@ static void i9xx_update_wm(struct drm_device *dev)
|
|||
if (IS_GEN2(dev))
|
||||
cpp = 4;
|
||||
|
||||
planeb_wm = intel_calculate_wm(crtc->mode.clock,
|
||||
planeb_wm = intel_calculate_wm(to_intel_crtc(crtc)->config.adjusted_mode.clock,
|
||||
wm_info, fifo_size, cpp,
|
||||
latency_ns);
|
||||
if (enabled == NULL)
|
||||
|
@ -1607,9 +1612,11 @@ static void i9xx_update_wm(struct drm_device *dev)
|
|||
if (HAS_FW_BLC(dev) && enabled) {
|
||||
/* self-refresh has much higher latency */
|
||||
static const int sr_latency_ns = 6000;
|
||||
int clock = enabled->mode.clock;
|
||||
int htotal = enabled->mode.htotal;
|
||||
int hdisplay = enabled->mode.hdisplay;
|
||||
const struct drm_display_mode *adjusted_mode =
|
||||
&to_intel_crtc(enabled)->config.adjusted_mode;
|
||||
int clock = adjusted_mode->clock;
|
||||
int htotal = adjusted_mode->htotal;
|
||||
int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
|
||||
int pixel_size = enabled->fb->bits_per_pixel / 8;
|
||||
unsigned long line_time_us;
|
||||
int entries;
|
||||
|
@ -1658,8 +1665,9 @@ static void i9xx_update_wm(struct drm_device *dev)
|
|||
}
|
||||
}
|
||||
|
||||
static void i830_update_wm(struct drm_device *dev)
|
||||
static void i830_update_wm(struct drm_crtc *unused_crtc)
|
||||
{
|
||||
struct drm_device *dev = unused_crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_crtc *crtc;
|
||||
uint32_t fwater_lo;
|
||||
|
@ -1669,7 +1677,8 @@ static void i830_update_wm(struct drm_device *dev)
|
|||
if (crtc == NULL)
|
||||
return;
|
||||
|
||||
planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
|
||||
planea_wm = intel_calculate_wm(to_intel_crtc(crtc)->config.adjusted_mode.clock,
|
||||
&i830_wm_info,
|
||||
dev_priv->display.get_fifo_size(dev, 0),
|
||||
4, latency_ns);
|
||||
fwater_lo = I915_READ(FW_BLC) & ~0xfff;
|
||||
|
@ -1741,6 +1750,7 @@ static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
|
|||
int *fbc_wm, int *display_wm, int *cursor_wm)
|
||||
{
|
||||
struct drm_crtc *crtc;
|
||||
const struct drm_display_mode *adjusted_mode;
|
||||
unsigned long line_time_us;
|
||||
int hdisplay, htotal, pixel_size, clock;
|
||||
int line_count, line_size;
|
||||
|
@ -1753,9 +1763,10 @@ static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
|
|||
}
|
||||
|
||||
crtc = intel_get_crtc_for_plane(dev, plane);
|
||||
hdisplay = crtc->mode.hdisplay;
|
||||
htotal = crtc->mode.htotal;
|
||||
clock = crtc->mode.clock;
|
||||
adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
|
||||
clock = adjusted_mode->clock;
|
||||
htotal = adjusted_mode->htotal;
|
||||
hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
|
||||
pixel_size = crtc->fb->bits_per_pixel / 8;
|
||||
|
||||
line_time_us = (htotal * 1000) / clock;
|
||||
|
@ -1785,8 +1796,9 @@ static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
|
|||
display, cursor);
|
||||
}
|
||||
|
||||
static void ironlake_update_wm(struct drm_device *dev)
|
||||
static void ironlake_update_wm(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int fbc_wm, plane_wm, cursor_wm;
|
||||
unsigned int enabled;
|
||||
|
@ -1868,8 +1880,9 @@ static void ironlake_update_wm(struct drm_device *dev)
|
|||
*/
|
||||
}
|
||||
|
||||
static void sandybridge_update_wm(struct drm_device *dev)
|
||||
static void sandybridge_update_wm(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int latency = dev_priv->wm.pri_latency[0] * 100; /* In unit 0.1us */
|
||||
u32 val;
|
||||
|
@ -1970,8 +1983,9 @@ static void sandybridge_update_wm(struct drm_device *dev)
|
|||
cursor_wm);
|
||||
}
|
||||
|
||||
static void ivybridge_update_wm(struct drm_device *dev)
|
||||
static void ivybridge_update_wm(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int latency = dev_priv->wm.pri_latency[0] * 100; /* In unit 0.1us */
|
||||
u32 val;
|
||||
|
@ -2107,8 +2121,8 @@ static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev,
|
|||
uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
|
||||
uint32_t pfit_size = intel_crtc->config.pch_pfit.size;
|
||||
|
||||
pipe_w = intel_crtc->config.requested_mode.hdisplay;
|
||||
pipe_h = intel_crtc->config.requested_mode.vdisplay;
|
||||
pipe_w = intel_crtc->config.pipe_src_w;
|
||||
pipe_h = intel_crtc->config.pipe_src_h;
|
||||
pfit_w = (pfit_size >> 16) & 0xFFFF;
|
||||
pfit_h = pfit_size & 0xFFFF;
|
||||
if (pipe_w < pfit_w)
|
||||
|
@ -2196,7 +2210,7 @@ struct intel_wm_config {
|
|||
* For both WM_PIPE and WM_LP.
|
||||
* mem_value must be in 0.1us units.
|
||||
*/
|
||||
static uint32_t ilk_compute_pri_wm(struct hsw_pipe_wm_parameters *params,
|
||||
static uint32_t ilk_compute_pri_wm(const struct hsw_pipe_wm_parameters *params,
|
||||
uint32_t mem_value,
|
||||
bool is_lp)
|
||||
{
|
||||
|
@ -2225,7 +2239,7 @@ static uint32_t ilk_compute_pri_wm(struct hsw_pipe_wm_parameters *params,
|
|||
* For both WM_PIPE and WM_LP.
|
||||
* mem_value must be in 0.1us units.
|
||||
*/
|
||||
static uint32_t ilk_compute_spr_wm(struct hsw_pipe_wm_parameters *params,
|
||||
static uint32_t ilk_compute_spr_wm(const struct hsw_pipe_wm_parameters *params,
|
||||
uint32_t mem_value)
|
||||
{
|
||||
uint32_t method1, method2;
|
||||
|
@ -2248,7 +2262,7 @@ static uint32_t ilk_compute_spr_wm(struct hsw_pipe_wm_parameters *params,
|
|||
* For both WM_PIPE and WM_LP.
|
||||
* mem_value must be in 0.1us units.
|
||||
*/
|
||||
static uint32_t ilk_compute_cur_wm(struct hsw_pipe_wm_parameters *params,
|
||||
static uint32_t ilk_compute_cur_wm(const struct hsw_pipe_wm_parameters *params,
|
||||
uint32_t mem_value)
|
||||
{
|
||||
if (!params->active || !params->cur.enabled)
|
||||
|
@ -2262,7 +2276,7 @@ static uint32_t ilk_compute_cur_wm(struct hsw_pipe_wm_parameters *params,
|
|||
}
|
||||
|
||||
/* Only for WM_LP. */
|
||||
static uint32_t ilk_compute_fbc_wm(struct hsw_pipe_wm_parameters *params,
|
||||
static uint32_t ilk_compute_fbc_wm(const struct hsw_pipe_wm_parameters *params,
|
||||
uint32_t pri_val)
|
||||
{
|
||||
if (!params->active || !params->pri.enabled)
|
||||
|
@ -2413,7 +2427,7 @@ static bool ilk_check_wm(int level,
|
|||
|
||||
static void ilk_compute_wm_level(struct drm_i915_private *dev_priv,
|
||||
int level,
|
||||
struct hsw_pipe_wm_parameters *p,
|
||||
const struct hsw_pipe_wm_parameters *p,
|
||||
struct intel_wm_level *result)
|
||||
{
|
||||
uint16_t pri_latency = dev_priv->wm.pri_latency[level];
|
||||
|
@ -2435,8 +2449,8 @@ static void ilk_compute_wm_level(struct drm_i915_private *dev_priv,
|
|||
}
|
||||
|
||||
static bool hsw_compute_lp_wm(struct drm_i915_private *dev_priv,
|
||||
int level, struct hsw_wm_maximums *max,
|
||||
struct hsw_pipe_wm_parameters *params,
|
||||
int level, const struct hsw_wm_maximums *max,
|
||||
const struct hsw_pipe_wm_parameters *params,
|
||||
struct intel_wm_level *result)
|
||||
{
|
||||
enum pipe pipe;
|
||||
|
@ -2454,33 +2468,31 @@ static bool hsw_compute_lp_wm(struct drm_i915_private *dev_priv,
|
|||
return ilk_check_wm(level, max, result);
|
||||
}
|
||||
|
||||
static uint32_t hsw_compute_wm_pipe(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe,
|
||||
struct hsw_pipe_wm_parameters *params)
|
||||
|
||||
static uint32_t hsw_compute_wm_pipe(struct drm_device *dev,
|
||||
const struct hsw_pipe_wm_parameters *params)
|
||||
{
|
||||
uint32_t pri_val, cur_val, spr_val;
|
||||
/* WM0 latency values stored in 0.1us units */
|
||||
uint16_t pri_latency = dev_priv->wm.pri_latency[0];
|
||||
uint16_t spr_latency = dev_priv->wm.spr_latency[0];
|
||||
uint16_t cur_latency = dev_priv->wm.cur_latency[0];
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_wm_config config = {
|
||||
.num_pipes_active = 1,
|
||||
.sprites_enabled = params->spr.enabled,
|
||||
.sprites_scaled = params->spr.scaled,
|
||||
};
|
||||
struct hsw_wm_maximums max;
|
||||
struct intel_wm_level res;
|
||||
|
||||
pri_val = ilk_compute_pri_wm(params, pri_latency, false);
|
||||
spr_val = ilk_compute_spr_wm(params, spr_latency);
|
||||
cur_val = ilk_compute_cur_wm(params, cur_latency);
|
||||
if (!params->active)
|
||||
return 0;
|
||||
|
||||
WARN(pri_val > 127,
|
||||
"Primary WM error, mode not supported for pipe %c\n",
|
||||
pipe_name(pipe));
|
||||
WARN(spr_val > 127,
|
||||
"Sprite WM error, mode not supported for pipe %c\n",
|
||||
pipe_name(pipe));
|
||||
WARN(cur_val > 63,
|
||||
"Cursor WM error, mode not supported for pipe %c\n",
|
||||
pipe_name(pipe));
|
||||
ilk_wm_max(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
|
||||
|
||||
return (pri_val << WM0_PIPE_PLANE_SHIFT) |
|
||||
(spr_val << WM0_PIPE_SPRITE_SHIFT) |
|
||||
cur_val;
|
||||
ilk_compute_wm_level(dev_priv, 0, params, &res);
|
||||
|
||||
ilk_check_wm(0, &max, &res);
|
||||
|
||||
return (res.pri_val << WM0_PIPE_PLANE_SHIFT) |
|
||||
(res.spr_val << WM0_PIPE_SPRITE_SHIFT) |
|
||||
res.cur_val;
|
||||
}
|
||||
|
||||
static uint32_t
|
||||
|
@ -2554,19 +2566,22 @@ static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
|
|||
wm[3] *= 2;
|
||||
}
|
||||
|
||||
static int ilk_wm_max_level(const struct drm_device *dev)
|
||||
{
|
||||
/* how many WM levels are we expecting */
|
||||
if (IS_HASWELL(dev))
|
||||
return 4;
|
||||
else if (INTEL_INFO(dev)->gen >= 6)
|
||||
return 3;
|
||||
else
|
||||
return 2;
|
||||
}
|
||||
|
||||
static void intel_print_wm_latency(struct drm_device *dev,
|
||||
const char *name,
|
||||
const uint16_t wm[5])
|
||||
{
|
||||
int level, max_level;
|
||||
|
||||
/* how many WM levels are we expecting */
|
||||
if (IS_HASWELL(dev))
|
||||
max_level = 4;
|
||||
else if (INTEL_INFO(dev)->gen >= 6)
|
||||
max_level = 3;
|
||||
else
|
||||
max_level = 2;
|
||||
int level, max_level = ilk_wm_max_level(dev);
|
||||
|
||||
for (level = 0; level <= max_level; level++) {
|
||||
unsigned int latency = wm[level];
|
||||
|
@ -2633,8 +2648,7 @@ static void hsw_compute_wm_parameters(struct drm_device *dev,
|
|||
p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
|
||||
p->pri.bytes_per_pixel = crtc->fb->bits_per_pixel / 8;
|
||||
p->cur.bytes_per_pixel = 4;
|
||||
p->pri.horiz_pixels =
|
||||
intel_crtc->config.requested_mode.hdisplay;
|
||||
p->pri.horiz_pixels = intel_crtc->config.pipe_src_w;
|
||||
p->cur.horiz_pixels = 64;
|
||||
/* TODO: for now, assume primary and cursor planes are always enabled. */
|
||||
p->pri.enabled = true;
|
||||
|
@ -2664,8 +2678,8 @@ static void hsw_compute_wm_parameters(struct drm_device *dev,
|
|||
}
|
||||
|
||||
static void hsw_compute_wm_results(struct drm_device *dev,
|
||||
struct hsw_pipe_wm_parameters *params,
|
||||
struct hsw_wm_maximums *lp_maximums,
|
||||
const struct hsw_pipe_wm_parameters *params,
|
||||
const struct hsw_wm_maximums *lp_maximums,
|
||||
struct hsw_wm_values *results)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
@ -2709,7 +2723,7 @@ static void hsw_compute_wm_results(struct drm_device *dev,
|
|||
}
|
||||
|
||||
for_each_pipe(pipe)
|
||||
results->wm_pipe[pipe] = hsw_compute_wm_pipe(dev_priv, pipe,
|
||||
results->wm_pipe[pipe] = hsw_compute_wm_pipe(dev,
|
||||
¶ms[pipe]);
|
||||
|
||||
for_each_pipe(pipe) {
|
||||
|
@ -2841,8 +2855,9 @@ static void hsw_write_wm_values(struct drm_i915_private *dev_priv,
|
|||
I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
|
||||
}
|
||||
|
||||
static void haswell_update_wm(struct drm_device *dev)
|
||||
static void haswell_update_wm(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct hsw_wm_maximums lp_max_1_2, lp_max_5_6;
|
||||
struct hsw_pipe_wm_parameters params[3];
|
||||
|
@ -2879,7 +2894,7 @@ static void haswell_update_sprite_wm(struct drm_plane *plane,
|
|||
intel_plane->wm.horiz_pixels = sprite_width;
|
||||
intel_plane->wm.bytes_per_pixel = pixel_size;
|
||||
|
||||
haswell_update_wm(plane->dev);
|
||||
haswell_update_wm(crtc);
|
||||
}
|
||||
|
||||
static bool
|
||||
|
@ -2898,7 +2913,7 @@ sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
|
|||
return false;
|
||||
}
|
||||
|
||||
clock = crtc->mode.clock;
|
||||
clock = to_intel_crtc(crtc)->config.adjusted_mode.clock;
|
||||
|
||||
/* Use the small buffer method to calculate the sprite watermark */
|
||||
entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
|
||||
|
@ -2933,7 +2948,7 @@ sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
|
|||
}
|
||||
|
||||
crtc = intel_get_crtc_for_plane(dev, plane);
|
||||
clock = crtc->mode.clock;
|
||||
clock = to_intel_crtc(crtc)->config.adjusted_mode.clock;
|
||||
if (!clock) {
|
||||
*sprite_wm = 0;
|
||||
return false;
|
||||
|
@ -3076,12 +3091,12 @@ static void sandybridge_update_sprite_wm(struct drm_plane *plane,
|
|||
* We don't use the sprite, so we can ignore that. And on Crestline we have
|
||||
* to set the non-SR watermarks to 8.
|
||||
*/
|
||||
void intel_update_watermarks(struct drm_device *dev)
|
||||
void intel_update_watermarks(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = crtc->dev->dev_private;
|
||||
|
||||
if (dev_priv->display.update_wm)
|
||||
dev_priv->display.update_wm(dev);
|
||||
dev_priv->display.update_wm(crtc);
|
||||
}
|
||||
|
||||
void intel_update_sprite_watermarks(struct drm_plane *plane,
|
||||
|
@ -3773,7 +3788,7 @@ static void valleyview_enable_rps(struct drm_device *dev)
|
|||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_ring_buffer *ring;
|
||||
u32 gtfifodbg, val;
|
||||
u32 gtfifodbg, val, rc6_mode = 0;
|
||||
int i;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
|
||||
|
@ -3813,8 +3828,9 @@ static void valleyview_enable_rps(struct drm_device *dev)
|
|||
|
||||
/* allows RC6 residency counter to work */
|
||||
I915_WRITE(0x138104, _MASKED_BIT_ENABLE(0x3));
|
||||
I915_WRITE(GEN6_RC_CONTROL,
|
||||
GEN7_RC_CTL_TO_MODE);
|
||||
if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
|
||||
rc6_mode = GEN7_RC_CTL_TO_MODE;
|
||||
I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
|
||||
|
||||
val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
|
||||
switch ((val >> 6) & 3) {
|
||||
|
@ -5267,6 +5283,7 @@ bool intel_display_power_enabled(struct drm_device *dev,
|
|||
case POWER_DOMAIN_PIPE_A:
|
||||
case POWER_DOMAIN_TRANSCODER_EDP:
|
||||
return true;
|
||||
case POWER_DOMAIN_VGA:
|
||||
case POWER_DOMAIN_PIPE_B:
|
||||
case POWER_DOMAIN_PIPE_C:
|
||||
case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
|
||||
|
@ -5329,6 +5346,81 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
|
|||
}
|
||||
}
|
||||
|
||||
static void __intel_power_well_get(struct i915_power_well *power_well)
|
||||
{
|
||||
if (!power_well->count++)
|
||||
__intel_set_power_well(power_well->device, true);
|
||||
}
|
||||
|
||||
static void __intel_power_well_put(struct i915_power_well *power_well)
|
||||
{
|
||||
WARN_ON(!power_well->count);
|
||||
if (!--power_well->count)
|
||||
__intel_set_power_well(power_well->device, false);
|
||||
}
|
||||
|
||||
void intel_display_power_get(struct drm_device *dev,
|
||||
enum intel_display_power_domain domain)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct i915_power_well *power_well = &dev_priv->power_well;
|
||||
|
||||
if (!HAS_POWER_WELL(dev))
|
||||
return;
|
||||
|
||||
switch (domain) {
|
||||
case POWER_DOMAIN_PIPE_A:
|
||||
case POWER_DOMAIN_TRANSCODER_EDP:
|
||||
return;
|
||||
case POWER_DOMAIN_VGA:
|
||||
case POWER_DOMAIN_PIPE_B:
|
||||
case POWER_DOMAIN_PIPE_C:
|
||||
case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
|
||||
case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
|
||||
case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
|
||||
case POWER_DOMAIN_TRANSCODER_A:
|
||||
case POWER_DOMAIN_TRANSCODER_B:
|
||||
case POWER_DOMAIN_TRANSCODER_C:
|
||||
spin_lock_irq(&power_well->lock);
|
||||
__intel_power_well_get(power_well);
|
||||
spin_unlock_irq(&power_well->lock);
|
||||
return;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
||||
void intel_display_power_put(struct drm_device *dev,
|
||||
enum intel_display_power_domain domain)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct i915_power_well *power_well = &dev_priv->power_well;
|
||||
|
||||
if (!HAS_POWER_WELL(dev))
|
||||
return;
|
||||
|
||||
switch (domain) {
|
||||
case POWER_DOMAIN_PIPE_A:
|
||||
case POWER_DOMAIN_TRANSCODER_EDP:
|
||||
return;
|
||||
case POWER_DOMAIN_VGA:
|
||||
case POWER_DOMAIN_PIPE_B:
|
||||
case POWER_DOMAIN_PIPE_C:
|
||||
case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
|
||||
case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
|
||||
case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
|
||||
case POWER_DOMAIN_TRANSCODER_A:
|
||||
case POWER_DOMAIN_TRANSCODER_B:
|
||||
case POWER_DOMAIN_TRANSCODER_C:
|
||||
spin_lock_irq(&power_well->lock);
|
||||
__intel_power_well_put(power_well);
|
||||
spin_unlock_irq(&power_well->lock);
|
||||
return;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
||||
static struct i915_power_well *hsw_pwr;
|
||||
|
||||
/* Display audio driver power well request */
|
||||
|
@ -5338,9 +5430,7 @@ void i915_request_power_well(void)
|
|||
return;
|
||||
|
||||
spin_lock_irq(&hsw_pwr->lock);
|
||||
if (!hsw_pwr->count++ &&
|
||||
!hsw_pwr->i915_request)
|
||||
__intel_set_power_well(hsw_pwr->device, true);
|
||||
__intel_power_well_get(hsw_pwr);
|
||||
spin_unlock_irq(&hsw_pwr->lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(i915_request_power_well);
|
||||
|
@ -5352,10 +5442,7 @@ void i915_release_power_well(void)
|
|||
return;
|
||||
|
||||
spin_lock_irq(&hsw_pwr->lock);
|
||||
WARN_ON(!hsw_pwr->count);
|
||||
if (!--hsw_pwr->count &&
|
||||
!hsw_pwr->i915_request)
|
||||
__intel_set_power_well(hsw_pwr->device, false);
|
||||
__intel_power_well_put(hsw_pwr);
|
||||
spin_unlock_irq(&hsw_pwr->lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(i915_release_power_well);
|
||||
|
@ -5390,15 +5477,37 @@ void intel_set_power_well(struct drm_device *dev, bool enable)
|
|||
return;
|
||||
|
||||
spin_lock_irq(&power_well->lock);
|
||||
|
||||
/*
|
||||
* This function will only ever contribute one
|
||||
* to the power well reference count. i915_request
|
||||
* is what tracks whether we have or have not
|
||||
* added the one to the reference count.
|
||||
*/
|
||||
if (power_well->i915_request == enable)
|
||||
goto out;
|
||||
|
||||
power_well->i915_request = enable;
|
||||
|
||||
/* only reject "disable" power well request */
|
||||
if (power_well->count && !enable) {
|
||||
spin_unlock_irq(&power_well->lock);
|
||||
return;
|
||||
}
|
||||
if (enable)
|
||||
__intel_power_well_get(power_well);
|
||||
else
|
||||
__intel_power_well_put(power_well);
|
||||
|
||||
__intel_set_power_well(dev, enable);
|
||||
out:
|
||||
spin_unlock_irq(&power_well->lock);
|
||||
}
|
||||
|
||||
void intel_resume_power_well(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct i915_power_well *power_well = &dev_priv->power_well;
|
||||
|
||||
if (!HAS_POWER_WELL(dev))
|
||||
return;
|
||||
|
||||
spin_lock_irq(&power_well->lock);
|
||||
__intel_set_power_well(dev, power_well->count > 0);
|
||||
spin_unlock_irq(&power_well->lock);
|
||||
}
|
||||
|
||||
|
@ -5417,6 +5526,7 @@ void intel_init_power_well(struct drm_device *dev)
|
|||
|
||||
/* For now, we need the power well to be always enabled. */
|
||||
intel_set_power_well(dev, true);
|
||||
intel_resume_power_well(dev);
|
||||
|
||||
/* We're taking over the BIOS, so clear any requests made by it since
|
||||
* the driver is in charge now. */
|
||||
|
|
|
@ -41,6 +41,16 @@ static inline int ring_space(struct intel_ring_buffer *ring)
|
|||
return space;
|
||||
}
|
||||
|
||||
void __intel_ring_advance(struct intel_ring_buffer *ring)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = ring->dev->dev_private;
|
||||
|
||||
ring->tail &= ring->size - 1;
|
||||
if (dev_priv->gpu_error.stop_rings & intel_ring_flag(ring))
|
||||
return;
|
||||
ring->write_tail(ring, ring->tail);
|
||||
}
|
||||
|
||||
static int
|
||||
gen2_render_ring_flush(struct intel_ring_buffer *ring,
|
||||
u32 invalidate_domains,
|
||||
|
@ -559,8 +569,8 @@ static int init_render_ring(struct intel_ring_buffer *ring)
|
|||
if (INTEL_INFO(dev)->gen >= 6)
|
||||
I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
|
||||
|
||||
if (HAS_L3_GPU_CACHE(dev))
|
||||
I915_WRITE_IMR(ring, ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
|
||||
if (HAS_L3_DPF(dev))
|
||||
I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -593,7 +603,7 @@ update_mboxes(struct intel_ring_buffer *ring,
|
|||
#define MBOX_UPDATE_DWORDS 4
|
||||
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
|
||||
intel_ring_emit(ring, mmio_offset);
|
||||
intel_ring_emit(ring, ring->outstanding_lazy_request);
|
||||
intel_ring_emit(ring, ring->outstanding_lazy_seqno);
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
}
|
||||
|
||||
|
@ -629,9 +639,9 @@ gen6_add_request(struct intel_ring_buffer *ring)
|
|||
|
||||
intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
|
||||
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
|
||||
intel_ring_emit(ring, ring->outstanding_lazy_request);
|
||||
intel_ring_emit(ring, ring->outstanding_lazy_seqno);
|
||||
intel_ring_emit(ring, MI_USER_INTERRUPT);
|
||||
intel_ring_advance(ring);
|
||||
__intel_ring_advance(ring);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -723,7 +733,7 @@ pc_render_add_request(struct intel_ring_buffer *ring)
|
|||
PIPE_CONTROL_WRITE_FLUSH |
|
||||
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
|
||||
intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
|
||||
intel_ring_emit(ring, ring->outstanding_lazy_request);
|
||||
intel_ring_emit(ring, ring->outstanding_lazy_seqno);
|
||||
intel_ring_emit(ring, 0);
|
||||
PIPE_CONTROL_FLUSH(ring, scratch_addr);
|
||||
scratch_addr += 128; /* write to separate cachelines */
|
||||
|
@ -742,9 +752,9 @@ pc_render_add_request(struct intel_ring_buffer *ring)
|
|||
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
|
||||
PIPE_CONTROL_NOTIFY);
|
||||
intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
|
||||
intel_ring_emit(ring, ring->outstanding_lazy_request);
|
||||
intel_ring_emit(ring, ring->outstanding_lazy_seqno);
|
||||
intel_ring_emit(ring, 0);
|
||||
intel_ring_advance(ring);
|
||||
__intel_ring_advance(ring);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -963,9 +973,9 @@ i9xx_add_request(struct intel_ring_buffer *ring)
|
|||
|
||||
intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
|
||||
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
|
||||
intel_ring_emit(ring, ring->outstanding_lazy_request);
|
||||
intel_ring_emit(ring, ring->outstanding_lazy_seqno);
|
||||
intel_ring_emit(ring, MI_USER_INTERRUPT);
|
||||
intel_ring_advance(ring);
|
||||
__intel_ring_advance(ring);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -987,10 +997,10 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring)
|
|||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, flags);
|
||||
if (ring->irq_refcount++ == 0) {
|
||||
if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
|
||||
if (HAS_L3_DPF(dev) && ring->id == RCS)
|
||||
I915_WRITE_IMR(ring,
|
||||
~(ring->irq_enable_mask |
|
||||
GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
|
||||
GT_PARITY_ERROR(dev)));
|
||||
else
|
||||
I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
|
||||
ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask);
|
||||
|
@ -1009,9 +1019,8 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
|
|||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, flags);
|
||||
if (--ring->irq_refcount == 0) {
|
||||
if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
|
||||
I915_WRITE_IMR(ring,
|
||||
~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
|
||||
if (HAS_L3_DPF(dev) && ring->id == RCS)
|
||||
I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
|
||||
else
|
||||
I915_WRITE_IMR(ring, ~0);
|
||||
ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
|
||||
|
@ -1414,6 +1423,9 @@ static int ring_wait_for_space(struct intel_ring_buffer *ring, int n)
|
|||
if (ret != -ENOSPC)
|
||||
return ret;
|
||||
|
||||
/* force the tail write in case we have been skipping them */
|
||||
__intel_ring_advance(ring);
|
||||
|
||||
trace_i915_ring_wait_begin(ring);
|
||||
/* With GEM the hangcheck timer should kick us out of the loop,
|
||||
* leaving it early runs the risk of corrupting GEM state (due
|
||||
|
@ -1475,7 +1487,7 @@ int intel_ring_idle(struct intel_ring_buffer *ring)
|
|||
int ret;
|
||||
|
||||
/* We need to add any requests required to flush the objects and ring */
|
||||
if (ring->outstanding_lazy_request) {
|
||||
if (ring->outstanding_lazy_seqno) {
|
||||
ret = i915_add_request(ring, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -1495,10 +1507,20 @@ int intel_ring_idle(struct intel_ring_buffer *ring)
|
|||
static int
|
||||
intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
|
||||
{
|
||||
if (ring->outstanding_lazy_request)
|
||||
if (ring->outstanding_lazy_seqno)
|
||||
return 0;
|
||||
|
||||
return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_request);
|
||||
if (ring->preallocated_lazy_request == NULL) {
|
||||
struct drm_i915_gem_request *request;
|
||||
|
||||
request = kmalloc(sizeof(*request), GFP_KERNEL);
|
||||
if (request == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
ring->preallocated_lazy_request = request;
|
||||
}
|
||||
|
||||
return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
|
||||
}
|
||||
|
||||
static int __intel_ring_begin(struct intel_ring_buffer *ring,
|
||||
|
@ -1545,7 +1567,7 @@ void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
|
|||
{
|
||||
struct drm_i915_private *dev_priv = ring->dev->dev_private;
|
||||
|
||||
BUG_ON(ring->outstanding_lazy_request);
|
||||
BUG_ON(ring->outstanding_lazy_seqno);
|
||||
|
||||
if (INTEL_INFO(ring->dev)->gen >= 6) {
|
||||
I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
|
||||
|
@ -1558,17 +1580,6 @@ void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
|
|||
ring->hangcheck.seqno = seqno;
|
||||
}
|
||||
|
||||
void intel_ring_advance(struct intel_ring_buffer *ring)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = ring->dev->dev_private;
|
||||
|
||||
ring->tail &= ring->size - 1;
|
||||
if (dev_priv->gpu_error.stop_rings & intel_ring_flag(ring))
|
||||
return;
|
||||
ring->write_tail(ring, ring->tail);
|
||||
}
|
||||
|
||||
|
||||
static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
|
||||
u32 value)
|
||||
{
|
||||
|
|
|
@ -34,6 +34,7 @@ struct intel_hw_status_page {
|
|||
#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
|
||||
|
||||
enum intel_ring_hangcheck_action {
|
||||
HANGCHECK_IDLE = 0,
|
||||
HANGCHECK_WAIT,
|
||||
HANGCHECK_ACTIVE,
|
||||
HANGCHECK_KICK,
|
||||
|
@ -140,7 +141,8 @@ struct intel_ring_buffer {
|
|||
/**
|
||||
* Do we have some not yet emitted requests outstanding?
|
||||
*/
|
||||
u32 outstanding_lazy_request;
|
||||
struct drm_i915_gem_request *preallocated_lazy_request;
|
||||
u32 outstanding_lazy_seqno;
|
||||
bool gpu_caches_dirty;
|
||||
bool fbc_dirty;
|
||||
|
||||
|
@ -237,7 +239,12 @@ static inline void intel_ring_emit(struct intel_ring_buffer *ring,
|
|||
iowrite32(data, ring->virtual_start + ring->tail);
|
||||
ring->tail += 4;
|
||||
}
|
||||
void intel_ring_advance(struct intel_ring_buffer *ring);
|
||||
static inline void intel_ring_advance(struct intel_ring_buffer *ring)
|
||||
{
|
||||
ring->tail &= ring->size - 1;
|
||||
}
|
||||
void __intel_ring_advance(struct intel_ring_buffer *ring);
|
||||
|
||||
int __must_check intel_ring_idle(struct intel_ring_buffer *ring);
|
||||
void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno);
|
||||
int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
|
||||
|
@ -258,8 +265,8 @@ static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring)
|
|||
|
||||
static inline u32 intel_ring_get_seqno(struct intel_ring_buffer *ring)
|
||||
{
|
||||
BUG_ON(ring->outstanding_lazy_request == 0);
|
||||
return ring->outstanding_lazy_request;
|
||||
BUG_ON(ring->outstanding_lazy_seqno == 0);
|
||||
return ring->outstanding_lazy_seqno;
|
||||
}
|
||||
|
||||
static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno)
|
||||
|
|
|
@ -1068,7 +1068,7 @@ intel_sdvo_get_preferred_input_mode(struct intel_sdvo *intel_sdvo,
|
|||
|
||||
static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc_config *pipe_config)
|
||||
{
|
||||
unsigned dotclock = pipe_config->adjusted_mode.clock;
|
||||
unsigned dotclock = pipe_config->port_clock;
|
||||
struct dpll *clock = &pipe_config->dpll;
|
||||
|
||||
/* SDVO TV has fixed PLL values depend on its clock range,
|
||||
|
@ -1133,7 +1133,6 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
|
|||
*/
|
||||
pipe_config->pixel_multiplier =
|
||||
intel_sdvo_get_pixel_multiplier(adjusted_mode);
|
||||
adjusted_mode->clock *= pipe_config->pixel_multiplier;
|
||||
|
||||
if (intel_sdvo->color_range_auto) {
|
||||
/* See CEA-861-E - 5.1 Default Encoding Parameters */
|
||||
|
@ -1217,11 +1216,7 @@ static void intel_sdvo_mode_set(struct intel_encoder *intel_encoder)
|
|||
!intel_sdvo_set_tv_format(intel_sdvo))
|
||||
return;
|
||||
|
||||
/* We have tried to get input timing in mode_fixup, and filled into
|
||||
* adjusted_mode.
|
||||
*/
|
||||
intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
|
||||
input_dtd.part1.clock /= crtc->config.pixel_multiplier;
|
||||
|
||||
if (intel_sdvo->is_tv || intel_sdvo->is_lvds)
|
||||
input_dtd.part2.sdvo_flags = intel_sdvo->dtd_sdvo_flags;
|
||||
|
@ -1330,6 +1325,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
|
|||
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
|
||||
struct intel_sdvo_dtd dtd;
|
||||
int encoder_pixel_multiplier = 0;
|
||||
int dotclock;
|
||||
u32 flags = 0, sdvox;
|
||||
u8 val;
|
||||
bool ret;
|
||||
|
@ -1368,6 +1364,13 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
|
|||
>> SDVO_PORT_MULTIPLY_SHIFT) + 1;
|
||||
}
|
||||
|
||||
dotclock = pipe_config->port_clock / pipe_config->pixel_multiplier;
|
||||
|
||||
if (HAS_PCH_SPLIT(dev))
|
||||
ironlake_check_encoder_dotclock(pipe_config, dotclock);
|
||||
|
||||
pipe_config->adjusted_mode.clock = dotclock;
|
||||
|
||||
/* Cross check the port pixel multiplier with the sdvo encoder state. */
|
||||
if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_CLOCK_RATE_MULT,
|
||||
&val, 1)) {
|
||||
|
|
|
@ -101,19 +101,83 @@ u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr)
|
|||
return val;
|
||||
}
|
||||
|
||||
u32 vlv_dpio_read(struct drm_i915_private *dev_priv, int reg)
|
||||
u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg)
|
||||
{
|
||||
u32 val = 0;
|
||||
|
||||
vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_DPIO,
|
||||
DPIO_OPCODE_REG_READ, reg, &val);
|
||||
|
||||
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPIO_NC,
|
||||
PUNIT_OPCODE_REG_READ, reg, &val);
|
||||
return val;
|
||||
}
|
||||
|
||||
void vlv_dpio_write(struct drm_i915_private *dev_priv, int reg, u32 val)
|
||||
void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
|
||||
{
|
||||
vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_DPIO,
|
||||
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPIO_NC,
|
||||
PUNIT_OPCODE_REG_WRITE, reg, &val);
|
||||
}
|
||||
|
||||
u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg)
|
||||
{
|
||||
u32 val = 0;
|
||||
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCK,
|
||||
PUNIT_OPCODE_REG_READ, reg, &val);
|
||||
return val;
|
||||
}
|
||||
|
||||
void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
|
||||
{
|
||||
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCK,
|
||||
PUNIT_OPCODE_REG_WRITE, reg, &val);
|
||||
}
|
||||
|
||||
u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg)
|
||||
{
|
||||
u32 val = 0;
|
||||
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCU,
|
||||
PUNIT_OPCODE_REG_READ, reg, &val);
|
||||
return val;
|
||||
}
|
||||
|
||||
void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
|
||||
{
|
||||
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCU,
|
||||
PUNIT_OPCODE_REG_WRITE, reg, &val);
|
||||
}
|
||||
|
||||
u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg)
|
||||
{
|
||||
u32 val = 0;
|
||||
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPS_CORE,
|
||||
PUNIT_OPCODE_REG_READ, reg, &val);
|
||||
return val;
|
||||
}
|
||||
|
||||
void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
|
||||
{
|
||||
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPS_CORE,
|
||||
PUNIT_OPCODE_REG_WRITE, reg, &val);
|
||||
}
|
||||
|
||||
static u32 vlv_get_phy_port(enum pipe pipe)
|
||||
{
|
||||
u32 port = IOSF_PORT_DPIO;
|
||||
|
||||
WARN_ON ((pipe != PIPE_A) && (pipe != PIPE_B));
|
||||
|
||||
return port;
|
||||
}
|
||||
|
||||
u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg)
|
||||
{
|
||||
u32 val = 0;
|
||||
|
||||
vlv_sideband_rw(dev_priv, DPIO_DEVFN, vlv_get_phy_port(pipe),
|
||||
DPIO_OPCODE_REG_READ, reg, &val);
|
||||
return val;
|
||||
}
|
||||
|
||||
void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val)
|
||||
{
|
||||
vlv_sideband_rw(dev_priv, DPIO_DEVFN, vlv_get_phy_port(pipe),
|
||||
DPIO_OPCODE_REG_WRITE, reg, &val);
|
||||
}
|
||||
|
||||
|
|
|
@ -288,7 +288,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
|||
dev_priv->sprite_scaling_enabled |= 1 << pipe;
|
||||
|
||||
if (!scaling_was_enabled) {
|
||||
intel_update_watermarks(dev);
|
||||
intel_update_watermarks(crtc);
|
||||
intel_wait_for_vblank(dev, pipe);
|
||||
}
|
||||
sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
|
||||
|
@ -323,7 +323,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
|||
|
||||
/* potentially re-enable LP watermarks */
|
||||
if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled)
|
||||
intel_update_watermarks(dev);
|
||||
intel_update_watermarks(crtc);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -349,7 +349,7 @@ ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
|
|||
|
||||
/* potentially re-enable LP watermarks */
|
||||
if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled)
|
||||
intel_update_watermarks(dev);
|
||||
intel_update_watermarks(crtc);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -652,8 +652,8 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
|||
.y2 = crtc_y + crtc_h,
|
||||
};
|
||||
const struct drm_rect clip = {
|
||||
.x2 = crtc->mode.hdisplay,
|
||||
.y2 = crtc->mode.vdisplay,
|
||||
.x2 = intel_crtc->config.pipe_src_w,
|
||||
.y2 = intel_crtc->config.pipe_src_h,
|
||||
};
|
||||
|
||||
intel_fb = to_intel_framebuffer(fb);
|
||||
|
|
|
@ -165,6 +165,7 @@ struct drm_mode_get_plane_res {
|
|||
#define DRM_MODE_ENCODER_LVDS 3
|
||||
#define DRM_MODE_ENCODER_TVDAC 4
|
||||
#define DRM_MODE_ENCODER_VIRTUAL 5
|
||||
#define DRM_MODE_ENCODER_DSI 6
|
||||
|
||||
struct drm_mode_get_encoder {
|
||||
__u32 encoder_id;
|
||||
|
@ -203,6 +204,7 @@ struct drm_mode_get_encoder {
|
|||
#define DRM_MODE_CONNECTOR_TV 13
|
||||
#define DRM_MODE_CONNECTOR_eDP 14
|
||||
#define DRM_MODE_CONNECTOR_VIRTUAL 15
|
||||
#define DRM_MODE_CONNECTOR_DSI 16
|
||||
|
||||
struct drm_mode_get_connector {
|
||||
|
||||
|
|
|
@ -38,10 +38,10 @@
|
|||
*
|
||||
* I915_L3_PARITY_UEVENT - Generated when the driver receives a parity mismatch
|
||||
* event from the gpu l3 cache. Additional information supplied is ROW,
|
||||
* BANK, SUBBANK of the affected cacheline. Userspace should keep track of
|
||||
* these events and if a specific cache-line seems to have a persistent
|
||||
* error remap it with the l3 remapping tool supplied in intel-gpu-tools.
|
||||
* The value supplied with the event is always 1.
|
||||
* BANK, SUBBANK, SLICE of the affected cacheline. Userspace should keep
|
||||
* track of these events and if a specific cache-line seems to have a
|
||||
* persistent error remap it with the l3 remapping tool supplied in
|
||||
* intel-gpu-tools. The value supplied with the event is always 1.
|
||||
*
|
||||
* I915_ERROR_UEVENT - Generated upon error detection, currently only via
|
||||
* hangcheck. The error detection event is a good indicator of when things
|
||||
|
|
Загрузка…
Ссылка в новой задаче