Merge tag 'drm-intel-next-2017-06-19' of git://anongit.freedesktop.org/git/drm-intel into drm-next

Final pile of features for 4.13

New uabi:
- batch bo in first slot, for faster execbuf assembly in userspace
  (Chris Wilson)
- (sub)slice getparam, needed for mesa perf support (Robert Bragg)

First pile of patches for cnl/cfl support, maintained by Rodrigo but
with lots of contributions from others. Still incomplete since public
review still ongoing.

Features/refactoring:
- Make execbuf faster (Chris Wilson), a pile of series to make execbuf
  buffer handling have fewer passes, use less list walking, postpone
  more work to async workers and shuffle buffers less, all to make the
  common case much faster (in some cases at least).
- cold boot support for glk dsi (Madhav Chauhan)
- Clean up pipe A quirk and related old platform hacks (Ville)
- perf sampling support for kbl/glk (Lionel)
- perf cleanups (Robert Bragg)
- wire atomic state to backlight code, to avoid pipe lookup hacks
  (Maarten)
- reduce request waiting latency/overhead to remove the spinning and
  associated cpu cycle wasting (Chris)
- fix 90/270 rotation wm computation (Ville)
- new ddb allocation algo for skl (Kumar Mahesh)
- fix regression due to system suspend optimiazatino (Imre)
- the usual pile of small cleanups and refactors all over

GVT updates contained in this tag:
- optimization for per-VM mmio save/restore (Changbin)
- optimization for mmio hash table (Changbin)
- scheduler optimization with event (Ping)
- vGPU reset refinement (Fred)
- other misc refactor and cleanups, etc.

* tag 'drm-intel-next-2017-06-19' of git://anongit.freedesktop.org/git/drm-intel: (170 commits)
  drm/i915: Update DRIVER_DATE to 20170619
  drm/i915/cfl: Introduce Coffee Lake workarounds.
  drm/i915: Store 9 bits of PCI Device ID for platforms with a LP PCH
  drm/i915: Stash a pointer to the obj's resv in the vma
  drm/i915: Async GPU relocation processing
  drm/i915: Allow execbuffer to use the first object as the batch
  drm/i915: Wait upon userptr get-user-pages within execbuffer
  drm/i915: First try the previous execbuffer location
  drm/i915: Store a persistent reference for an object in the execbuffer cache
  drm/i915: Eliminate lots of iterations over the execobjects array
  drm/i915: Disable EXEC_OBJECT_ASYNC when doing relocations
  drm/i915: Pass vma to relocate entry
  drm/i915: Store a direct lookup from object handle to vma
  drm/i915: Fix retrieval of hangcheck stats
  drm/i915: Store i915_gem_object_is_coherent() as a bit next to cache-dirty
  drm/i915: Mark CPU cache as dirty on every transition for CPU writes
  drm/i915: Make i915_vma_destroy() static
  drm/i915: Actually attach the tv_format property to the SDVO connector
  Revert "drm/i915/skl: New ddb allocation algorithm"
  drm/i915/glk: Add cold boot sequence for GLK DSI
  ...
This commit is contained in:
Dave Airlie 2017-06-21 08:55:22 +10:00
Родитель eafae133e4 9ddb8e1743
Коммит 305b9eddee
108 изменённых файлов: 36877 добавлений и 3339 удалений

Просмотреть файл

@ -129,7 +129,16 @@ i915-y += i915_vgpu.o
# perf code
i915-y += i915_perf.o \
i915_oa_hsw.o
i915_oa_hsw.o \
i915_oa_bdw.o \
i915_oa_chv.o \
i915_oa_sklgt2.o \
i915_oa_sklgt3.o \
i915_oa_sklgt4.o \
i915_oa_bxt.o \
i915_oa_kblgt2.o \
i915_oa_kblgt3.o \
i915_oa_glk.o
ifeq ($(CONFIG_DRM_I915_GVT),y)
i915-y += intel_gvt.o

Просмотреть файл

@ -217,9 +217,8 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo,
name = ch7xxx_get_id(vendor);
if (!name) {
DRM_DEBUG_KMS("ch7xxx not detected; got 0x%02x from %s "
"slave %d.\n",
vendor, adapter->name, dvo->slave_addr);
DRM_DEBUG_KMS("ch7xxx not detected; got VID 0x%02x from %s slave %d.\n",
vendor, adapter->name, dvo->slave_addr);
goto out;
}
@ -229,9 +228,8 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo,
devid = ch7xxx_get_did(device);
if (!devid) {
DRM_DEBUG_KMS("ch7xxx not detected; got 0x%02x from %s "
"slave %d.\n",
vendor, adapter->name, dvo->slave_addr);
DRM_DEBUG_KMS("ch7xxx not detected; got DID 0x%02x from %s slave %d.\n",
device, adapter->name, dvo->slave_addr);
goto out;
}

Просмотреть файл

@ -3,6 +3,6 @@ GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \
interrupt.o gtt.o cfg_space.o opregion.o mmio.o display.o edid.o \
execlist.o scheduler.o sched_policy.o render.o cmd_parser.o
ccflags-y += -I$(src) -I$(src)/$(GVT_DIR) -Wall
ccflags-y += -I$(src) -I$(src)/$(GVT_DIR)
i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))
obj-$(CONFIG_DRM_I915_GVT_KVMGT) += $(GVT_DIR)/kvmgt.o

Просмотреть файл

@ -2414,53 +2414,13 @@ static void add_cmd_entry(struct intel_gvt *gvt, struct cmd_entry *e)
hash_add(gvt->cmd_table, &e->hlist, e->info->opcode);
}
#define GVT_MAX_CMD_LENGTH 20 /* In Dword */
static void trace_cs_command(struct parser_exec_state *s,
cycles_t cost_pre_cmd_handler, cycles_t cost_cmd_handler)
{
/* This buffer is used by ftrace to store all commands copied from
* guest gma space. Sometimes commands can cross pages, this should
* not be handled in ftrace logic. So this is just used as a
* 'bounce buffer'
*/
u32 cmd_trace_buf[GVT_MAX_CMD_LENGTH];
int i;
u32 cmd_len = cmd_length(s);
/* The chosen value of GVT_MAX_CMD_LENGTH are just based on
* following two considerations:
* 1) From observation, most common ring commands is not that long.
* But there are execeptions. So it indeed makes sence to observe
* longer commands.
* 2) From the performance and debugging point of view, dumping all
* contents of very commands is not necessary.
* We mgith shrink GVT_MAX_CMD_LENGTH or remove this trace event in
* future for performance considerations.
*/
if (unlikely(cmd_len > GVT_MAX_CMD_LENGTH)) {
gvt_dbg_cmd("cmd length exceed tracing limitation!\n");
cmd_len = GVT_MAX_CMD_LENGTH;
}
for (i = 0; i < cmd_len; i++)
cmd_trace_buf[i] = cmd_val(s, i);
trace_gvt_command(s->vgpu->id, s->ring_id, s->ip_gma, cmd_trace_buf,
cmd_len, s->buf_type == RING_BUFFER_INSTRUCTION,
cost_pre_cmd_handler, cost_cmd_handler);
}
/* call the cmd handler, and advance ip */
static int cmd_parser_exec(struct parser_exec_state *s)
{
struct intel_vgpu *vgpu = s->vgpu;
struct cmd_info *info;
u32 cmd;
int ret = 0;
cycles_t t0, t1, t2;
struct parser_exec_state s_before_advance_custom;
struct intel_vgpu *vgpu = s->vgpu;
t0 = get_cycles();
cmd = cmd_val(s, 0);
@ -2471,13 +2431,10 @@ static int cmd_parser_exec(struct parser_exec_state *s)
return -EINVAL;
}
gvt_dbg_cmd("%s\n", info->name);
s->info = info;
t1 = get_cycles();
s_before_advance_custom = *s;
trace_gvt_command(vgpu->id, s->ring_id, s->ip_gma, s->ip_va,
cmd_length(s), s->buf_type);
if (info->handler) {
ret = info->handler(s);
@ -2486,9 +2443,6 @@ static int cmd_parser_exec(struct parser_exec_state *s)
return ret;
}
}
t2 = get_cycles();
trace_cs_command(&s_before_advance_custom, t1 - t0, t2 - t1);
if (!(info->flag & F_IP_ADVANCE_CUSTOM)) {
ret = cmd_advance_default(s);
@ -2522,8 +2476,6 @@ static int command_scan(struct parser_exec_state *s,
gma_tail = rb_start + rb_tail;
gma_bottom = rb_start + rb_len;
gvt_dbg_cmd("scan_start: start=%lx end=%lx\n", gma_head, gma_tail);
while (s->ip_gma != gma_tail) {
if (s->buf_type == RING_BUFFER_INSTRUCTION) {
if (!(s->ip_gma >= rb_start) ||
@ -2552,8 +2504,6 @@ static int command_scan(struct parser_exec_state *s,
}
}
gvt_dbg_cmd("scan_end\n");
return ret;
}

Просмотреть файл

@ -708,53 +708,43 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
{
struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];
struct execlist_ctx_descriptor_format *desc[2], valid_desc[2];
unsigned long valid_desc_bitmap = 0;
bool emulate_schedule_in = true;
int ret;
int i;
struct execlist_ctx_descriptor_format desc[2];
int i, ret;
memset(valid_desc, 0, sizeof(valid_desc));
desc[0] = *get_desc_from_elsp_dwords(&execlist->elsp_dwords, 1);
desc[1] = *get_desc_from_elsp_dwords(&execlist->elsp_dwords, 0);
desc[0] = get_desc_from_elsp_dwords(&execlist->elsp_dwords, 1);
desc[1] = get_desc_from_elsp_dwords(&execlist->elsp_dwords, 0);
if (!desc[0].valid) {
gvt_vgpu_err("invalid elsp submission, desc0 is invalid\n");
goto inv_desc;
}
for (i = 0; i < 2; i++) {
if (!desc[i]->valid)
for (i = 0; i < ARRAY_SIZE(desc); i++) {
if (!desc[i].valid)
continue;
if (!desc[i]->privilege_access) {
if (!desc[i].privilege_access) {
gvt_vgpu_err("unexpected GGTT elsp submission\n");
return -EINVAL;
goto inv_desc;
}
/* TODO: add another guest context checks here. */
set_bit(i, &valid_desc_bitmap);
valid_desc[i] = *desc[i];
}
if (!valid_desc_bitmap) {
gvt_vgpu_err("no valid desc in a elsp submission\n");
return -EINVAL;
}
if (!test_bit(0, (void *)&valid_desc_bitmap) &&
test_bit(1, (void *)&valid_desc_bitmap)) {
gvt_vgpu_err("weird elsp submission, desc 0 is not valid\n");
return -EINVAL;
}
/* submit workload */
for_each_set_bit(i, (void *)&valid_desc_bitmap, 2) {
ret = submit_context(vgpu, ring_id, &valid_desc[i],
emulate_schedule_in);
for (i = 0; i < ARRAY_SIZE(desc); i++) {
if (!desc[i].valid)
continue;
ret = submit_context(vgpu, ring_id, &desc[i], i == 0);
if (ret) {
gvt_vgpu_err("fail to schedule workload\n");
gvt_vgpu_err("failed to submit desc %d\n", i);
return ret;
}
emulate_schedule_in = false;
}
return 0;
inv_desc:
gvt_vgpu_err("descriptors content: desc0 %08x %08x desc1 %08x %08x\n",
desc[0].udw, desc[0].ldw, desc[1].udw, desc[1].ldw);
return -EINVAL;
}
static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)

Просмотреть файл

@ -102,13 +102,8 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
p = firmware + h->mmio_offset;
hash_for_each(gvt->mmio.mmio_info_table, i, e, node) {
int j;
for (j = 0; j < e->length; j += 4)
*(u32 *)(p + e->offset + j) =
I915_READ_NOTRACE(_MMIO(e->offset + j));
}
hash_for_each(gvt->mmio.mmio_info_table, i, e, node)
*(u32 *)(p + e->offset) = I915_READ_NOTRACE(_MMIO(e->offset));
memcpy(gvt->firmware.mmio, p, info->mmio_size);

Просмотреть файл

@ -244,15 +244,19 @@ static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index)
return readq(addr);
}
static void gtt_invalidate(struct drm_i915_private *dev_priv)
{
mmio_hw_access_pre(dev_priv);
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
mmio_hw_access_post(dev_priv);
}
static void write_pte64(struct drm_i915_private *dev_priv,
unsigned long index, u64 pte)
{
void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
writeq(pte, addr);
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
POSTING_READ(GFX_FLSH_CNTL_GEN6);
}
static inline struct intel_gvt_gtt_entry *gtt_get_entry64(void *pt,
@ -1849,6 +1853,7 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
}
ggtt_set_shadow_entry(ggtt_mm, &m, g_gtt_index);
gtt_invalidate(gvt->dev_priv);
ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
return 0;
}
@ -2301,8 +2306,6 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
u32 num_entries;
struct intel_gvt_gtt_entry e;
intel_runtime_pm_get(dev_priv);
memset(&e, 0, sizeof(struct intel_gvt_gtt_entry));
e.type = GTT_TYPE_GGTT_PTE;
ops->set_pfn(&e, gvt->gtt.scratch_ggtt_mfn);
@ -2318,7 +2321,7 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
for (offset = 0; offset < num_entries; offset++)
ops->set_entry(NULL, &e, index + offset, false, 0, vgpu);
intel_runtime_pm_put(dev_priv);
gtt_invalidate(dev_priv);
}
/**

Просмотреть файл

@ -147,7 +147,9 @@ static int gvt_service_thread(void *data)
mutex_unlock(&gvt->lock);
}
if (test_and_clear_bit(INTEL_GVT_REQUEST_SCHED,
if (test_bit(INTEL_GVT_REQUEST_SCHED,
(void *)&gvt->service_request) ||
test_bit(INTEL_GVT_REQUEST_EVENT_SCHED,
(void *)&gvt->service_request)) {
intel_gvt_schedule(gvt);
}
@ -244,7 +246,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
gvt_dbg_core("init gvt device\n");
idr_init(&gvt->vgpu_idr);
spin_lock_init(&gvt->scheduler.mmio_context_lock);
mutex_init(&gvt->lock);
gvt->dev_priv = dev_priv;

Просмотреть файл

@ -165,7 +165,6 @@ struct intel_vgpu {
struct list_head workload_q_head[I915_NUM_ENGINES];
struct kmem_cache *workloads;
atomic_t running_workload_num;
ktime_t last_ctx_submit_time;
DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
struct i915_gem_context *shadow_ctx;
@ -196,11 +195,27 @@ struct intel_gvt_fence {
unsigned long vgpu_allocated_fence_num;
};
#define INTEL_GVT_MMIO_HASH_BITS 9
#define INTEL_GVT_MMIO_HASH_BITS 11
struct intel_gvt_mmio {
u32 *mmio_attribute;
u8 *mmio_attribute;
/* Register contains RO bits */
#define F_RO (1 << 0)
/* Register contains graphics address */
#define F_GMADR (1 << 1)
/* Mode mask registers with high 16 bits as the mask bits */
#define F_MODE_MASK (1 << 2)
/* This reg can be accessed by GPU commands */
#define F_CMD_ACCESS (1 << 3)
/* This reg has been accessed by a VM */
#define F_ACCESSED (1 << 4)
/* This reg has been accessed through GPU commands */
#define F_CMD_ACCESSED (1 << 5)
/* This reg could be accessed by unaligned address */
#define F_UNALIGN (1 << 6)
DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS);
unsigned int num_tracked_mmio;
};
struct intel_gvt_firmware {
@ -257,7 +272,12 @@ static inline struct intel_gvt *to_gvt(struct drm_i915_private *i915)
enum {
INTEL_GVT_REQUEST_EMULATE_VBLANK = 0,
/* Scheduling trigger by timer */
INTEL_GVT_REQUEST_SCHED = 1,
/* Scheduling trigger by event */
INTEL_GVT_REQUEST_EVENT_SCHED = 2,
};
static inline void intel_gvt_request_service(struct intel_gvt *gvt,
@ -473,6 +493,80 @@ enum {
GVT_FAILSAFE_INSUFFICIENT_RESOURCE,
};
static inline void mmio_hw_access_pre(struct drm_i915_private *dev_priv)
{
intel_runtime_pm_get(dev_priv);
}
static inline void mmio_hw_access_post(struct drm_i915_private *dev_priv)
{
intel_runtime_pm_put(dev_priv);
}
/**
* intel_gvt_mmio_set_accessed - mark a MMIO has been accessed
* @gvt: a GVT device
* @offset: register offset
*
*/
static inline void intel_gvt_mmio_set_accessed(
struct intel_gvt *gvt, unsigned int offset)
{
gvt->mmio.mmio_attribute[offset >> 2] |= F_ACCESSED;
}
/**
* intel_gvt_mmio_is_cmd_accessed - mark a MMIO could be accessed by command
* @gvt: a GVT device
* @offset: register offset
*
*/
static inline bool intel_gvt_mmio_is_cmd_access(
struct intel_gvt *gvt, unsigned int offset)
{
return gvt->mmio.mmio_attribute[offset >> 2] & F_CMD_ACCESS;
}
/**
* intel_gvt_mmio_is_unalign - mark a MMIO could be accessed unaligned
* @gvt: a GVT device
* @offset: register offset
*
*/
static inline bool intel_gvt_mmio_is_unalign(
struct intel_gvt *gvt, unsigned int offset)
{
return gvt->mmio.mmio_attribute[offset >> 2] & F_UNALIGN;
}
/**
* intel_gvt_mmio_set_cmd_accessed - mark a MMIO has been accessed by command
* @gvt: a GVT device
* @offset: register offset
*
*/
static inline void intel_gvt_mmio_set_cmd_accessed(
struct intel_gvt *gvt, unsigned int offset)
{
gvt->mmio.mmio_attribute[offset >> 2] |= F_CMD_ACCESSED;
}
/**
* intel_gvt_mmio_has_mode_mask - if a MMIO has a mode mask
* @gvt: a GVT device
* @offset: register offset
*
* Returns:
* True if a MMIO has a mode mask in its higher 16 bits, false if it isn't.
*
*/
static inline bool intel_gvt_mmio_has_mode_mask(
struct intel_gvt *gvt, unsigned int offset)
{
return gvt->mmio.mmio_attribute[offset >> 2] & F_MODE_MASK;
}
#include "trace.h"
#include "mpt.h"
#endif

Просмотреть файл

@ -47,21 +47,6 @@
#define PCH_PP_OFF_DELAYS _MMIO(0xc720c)
#define PCH_PP_DIVISOR _MMIO(0xc7210)
/* Register contains RO bits */
#define F_RO (1 << 0)
/* Register contains graphics address */
#define F_GMADR (1 << 1)
/* Mode mask registers with high 16 bits as the mask bits */
#define F_MODE_MASK (1 << 2)
/* This reg can be accessed by GPU commands */
#define F_CMD_ACCESS (1 << 3)
/* This reg has been accessed by a VM */
#define F_ACCESSED (1 << 4)
/* This reg has been accessed through GPU commands */
#define F_CMD_ACCESSED (1 << 5)
/* This reg could be accessed by unaligned address */
#define F_UNALIGN (1 << 6)
unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt)
{
if (IS_BROADWELL(gvt->dev_priv))
@ -92,11 +77,22 @@ static void write_vreg(struct intel_vgpu *vgpu, unsigned int offset,
memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes);
}
static struct intel_gvt_mmio_info *find_mmio_info(struct intel_gvt *gvt,
unsigned int offset)
{
struct intel_gvt_mmio_info *e;
hash_for_each_possible(gvt->mmio.mmio_info_table, e, node, offset) {
if (e->offset == offset)
return e;
}
return NULL;
}
static int new_mmio_info(struct intel_gvt *gvt,
u32 offset, u32 flags, u32 size,
u32 offset, u8 flags, u32 size,
u32 addr_mask, u32 ro_mask, u32 device,
int (*read)(struct intel_vgpu *, unsigned int, void *, unsigned int),
int (*write)(struct intel_vgpu *, unsigned int, void *, unsigned int))
gvt_mmio_func read, gvt_mmio_func write)
{
struct intel_gvt_mmio_info *info, *p;
u32 start, end, i;
@ -116,13 +112,11 @@ static int new_mmio_info(struct intel_gvt *gvt,
return -ENOMEM;
info->offset = i;
p = intel_gvt_find_mmio_info(gvt, info->offset);
p = find_mmio_info(gvt, info->offset);
if (p)
gvt_err("dup mmio definition offset %x\n",
info->offset);
info->size = size;
info->length = (i + 4) < end ? 4 : (end - i);
info->addr_mask = addr_mask;
info->ro_mask = ro_mask;
info->device = device;
info->read = read ? read : intel_vgpu_default_mmio_read;
@ -130,6 +124,7 @@ static int new_mmio_info(struct intel_gvt *gvt,
gvt->mmio.mmio_attribute[info->offset / 4] = flags;
INIT_HLIST_NODE(&info->node);
hash_add(gvt->mmio.mmio_info_table, &info->node, info->offset);
gvt->mmio.num_tracked_mmio++;
}
return 0;
}
@ -209,6 +204,7 @@ static int fence_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
static int fence_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
void *p_data, unsigned int bytes)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
unsigned int fence_num = offset_to_fence_num(off);
int ret;
@ -217,8 +213,10 @@ static int fence_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
return ret;
write_vreg(vgpu, off, p_data, bytes);
mmio_hw_access_pre(dev_priv);
intel_vgpu_write_fence(vgpu, fence_num,
vgpu_vreg64(vgpu, fence_num_to_offset(fence_num)));
mmio_hw_access_post(dev_priv);
return 0;
}
@ -300,6 +298,9 @@ static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
intel_gvt_reset_vgpu_locked(vgpu, false, engine_mask);
/* sw will wait for the device to ack the reset request */
vgpu_vreg(vgpu, offset) = 0;
return 0;
}
@ -1265,7 +1266,10 @@ static int gen9_trtte_write(struct intel_vgpu *vgpu, unsigned int offset,
}
write_vreg(vgpu, offset, p_data, bytes);
/* TRTTE is not per-context */
mmio_hw_access_pre(dev_priv);
I915_WRITE(_MMIO(offset), vgpu_vreg(vgpu, offset));
mmio_hw_access_post(dev_priv);
return 0;
}
@ -1278,7 +1282,9 @@ static int gen9_trtt_chicken_write(struct intel_vgpu *vgpu, unsigned int offset,
if (val & 1) {
/* unblock hw logic */
mmio_hw_access_pre(dev_priv);
I915_WRITE(_MMIO(offset), val);
mmio_hw_access_post(dev_priv);
}
write_vreg(vgpu, offset, p_data, bytes);
return 0;
@ -1415,7 +1421,20 @@ static int ring_timestamp_mmio_read(struct intel_vgpu *vgpu,
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
mmio_hw_access_pre(dev_priv);
vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset));
mmio_hw_access_post(dev_priv);
return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
}
static int instdone_mmio_read(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
mmio_hw_access_pre(dev_priv);
vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset));
mmio_hw_access_post(dev_priv);
return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
}
@ -1434,7 +1453,6 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
execlist->elsp_dwords.data[execlist->elsp_dwords.index] = data;
if (execlist->elsp_dwords.index == 3) {
vgpu->last_ctx_submit_time = ktime_get();
ret = intel_vgpu_submit_execlist(vgpu, ring_id);
if(ret)
gvt_vgpu_err("fail submit workload on ring %d\n",
@ -1603,6 +1621,12 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_RING_DFH(RING_REG, D_ALL, F_CMD_ACCESS, NULL, NULL);
#undef RING_REG
#define RING_REG(base) (base + 0x6c)
MMIO_RING_DFH(RING_REG, D_ALL, 0, instdone_mmio_read, NULL);
MMIO_DH(RING_REG(GEN8_BSD2_RING_BASE), D_ALL, instdone_mmio_read, NULL);
#undef RING_REG
MMIO_DH(GEN7_SC_INSTDONE, D_BDW_PLUS, instdone_mmio_read, NULL);
MMIO_GM_RDR(0x2148, D_ALL, NULL, NULL);
MMIO_GM_RDR(CCID, D_ALL, NULL, NULL);
MMIO_GM_RDR(0x12198, D_ALL, NULL, NULL);
@ -1779,10 +1803,6 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_D(SPRSCALE(PIPE_C), D_ALL);
MMIO_D(SPRSURFLIVE(PIPE_C), D_ALL);
MMIO_F(LGC_PALETTE(PIPE_A, 0), 4 * 256, 0, 0, 0, D_ALL, NULL, NULL);
MMIO_F(LGC_PALETTE(PIPE_B, 0), 4 * 256, 0, 0, 0, D_ALL, NULL, NULL);
MMIO_F(LGC_PALETTE(PIPE_C, 0), 4 * 256, 0, 0, 0, D_ALL, NULL, NULL);
MMIO_D(HTOTAL(TRANSCODER_A), D_ALL);
MMIO_D(HBLANK(TRANSCODER_A), D_ALL);
MMIO_D(HSYNC(TRANSCODER_A), D_ALL);
@ -2187,7 +2207,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(GTFIFODBG, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(GTFIFOCTL, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DH(FORCEWAKE_MT, D_PRE_SKL, NULL, mul_force_wake_write);
MMIO_DH(FORCEWAKE_ACK_HSW, D_HSW | D_BDW, NULL, NULL);
MMIO_DH(FORCEWAKE_ACK_HSW, D_BDW, NULL, NULL);
MMIO_D(ECOBUS, D_ALL);
MMIO_DH(GEN6_RC_CONTROL, D_ALL, NULL, NULL);
MMIO_DH(GEN6_RC_STATE, D_ALL, NULL, NULL);
@ -2219,22 +2239,19 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_D(GEN6_RC6p_THRESHOLD, D_ALL);
MMIO_D(GEN6_RC6pp_THRESHOLD, D_ALL);
MMIO_D(GEN6_PMINTRMSK, D_ALL);
MMIO_DH(HSW_PWR_WELL_BIOS, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
MMIO_DH(HSW_PWR_WELL_DRIVER, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
MMIO_DH(HSW_PWR_WELL_KVMR, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
MMIO_DH(HSW_PWR_WELL_DEBUG, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
MMIO_DH(HSW_PWR_WELL_CTL5, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
MMIO_DH(HSW_PWR_WELL_CTL6, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
MMIO_DH(HSW_PWR_WELL_BIOS, D_BDW, NULL, power_well_ctl_mmio_write);
MMIO_DH(HSW_PWR_WELL_DRIVER, D_BDW, NULL, power_well_ctl_mmio_write);
MMIO_DH(HSW_PWR_WELL_KVMR, D_BDW, NULL, power_well_ctl_mmio_write);
MMIO_DH(HSW_PWR_WELL_DEBUG, D_BDW, NULL, power_well_ctl_mmio_write);
MMIO_DH(HSW_PWR_WELL_CTL5, D_BDW, NULL, power_well_ctl_mmio_write);
MMIO_DH(HSW_PWR_WELL_CTL6, D_BDW, NULL, power_well_ctl_mmio_write);
MMIO_D(RSTDBYCTL, D_ALL);
MMIO_DH(GEN6_GDRST, D_ALL, NULL, gdrst_mmio_write);
MMIO_F(FENCE_REG_GEN6_LO(0), 0x80, 0, 0, 0, D_ALL, fence_mmio_read, fence_mmio_write);
MMIO_F(VGT_PVINFO_PAGE, VGT_PVINFO_SIZE, F_UNALIGN, 0, 0, D_ALL, pvinfo_mmio_read, pvinfo_mmio_write);
MMIO_DH(CPU_VGACNTRL, D_ALL, NULL, vga_control_mmio_write);
MMIO_F(MCHBAR_MIRROR_BASE_SNB, 0x40000, 0, 0, 0, D_ALL, NULL, NULL);
MMIO_D(TILECTL, D_ALL);
MMIO_D(GEN6_UCGCTL1, D_ALL);
@ -2242,7 +2259,6 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_F(0x4f000, 0x90, 0, 0, 0, D_ALL, NULL, NULL);
MMIO_D(GEN6_PCODE_MAILBOX, D_PRE_BDW);
MMIO_D(GEN6_PCODE_DATA, D_ALL);
MMIO_D(0x13812c, D_ALL);
MMIO_DH(GEN7_ERR_INT, D_ALL, NULL, NULL);
@ -2321,14 +2337,13 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_D(0x1a054, D_ALL);
MMIO_D(0x44070, D_ALL);
MMIO_DFH(0x215c, D_HSW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x215c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x2178, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x217c, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x12178, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x1217c, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_F(0x2290, 8, F_CMD_ACCESS, 0, 0, D_HSW_PLUS, NULL, NULL);
MMIO_DFH(GEN7_OACONTROL, D_HSW, F_CMD_ACCESS, NULL, NULL);
MMIO_F(0x2290, 8, F_CMD_ACCESS, 0, 0, D_BDW_PLUS, NULL, NULL);
MMIO_D(0x2b00, D_BDW_PLUS);
MMIO_D(0x2360, D_BDW_PLUS);
MMIO_F(0x5200, 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
@ -2766,7 +2781,6 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(0x72380, D_SKL_PLUS);
MMIO_D(0x7039c, D_SKL_PLUS);
MMIO_F(0x80000, 0x3000, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
MMIO_D(0x8f074, D_SKL | D_KBL);
MMIO_D(0x8f004, D_SKL | D_KBL);
MMIO_D(0x8f034, D_SKL | D_KBL);
@ -2840,26 +2854,36 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
return 0;
}
/**
* intel_gvt_find_mmio_info - find MMIO information entry by aligned offset
* @gvt: GVT device
* @offset: register offset
*
* This function is used to find the MMIO information entry from hash table
*
* Returns:
* pointer to MMIO information entry, NULL if not exists
*/
struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
unsigned int offset)
/* Special MMIO blocks. */
static struct gvt_mmio_block {
unsigned int device;
i915_reg_t offset;
unsigned int size;
gvt_mmio_func read;
gvt_mmio_func write;
} gvt_mmio_blocks[] = {
{D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL},
{D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL},
{D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE,
pvinfo_mmio_read, pvinfo_mmio_write},
{D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL},
{D_ALL, LGC_PALETTE(PIPE_B, 0), 1024, NULL, NULL},
{D_ALL, LGC_PALETTE(PIPE_C, 0), 1024, NULL, NULL},
};
static struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt,
unsigned int offset)
{
struct intel_gvt_mmio_info *e;
unsigned long device = intel_gvt_get_device_type(gvt);
struct gvt_mmio_block *block = gvt_mmio_blocks;
int i;
WARN_ON(!IS_ALIGNED(offset, 4));
hash_for_each_possible(gvt->mmio.mmio_info_table, e, node, offset) {
if (e->offset == offset)
return e;
for (i = 0; i < ARRAY_SIZE(gvt_mmio_blocks); i++, block++) {
if (!(device & block->device))
continue;
if (offset >= INTEL_GVT_MMIO_OFFSET(block->offset) &&
offset < INTEL_GVT_MMIO_OFFSET(block->offset) + block->size)
return block;
}
return NULL;
}
@ -2899,9 +2923,10 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
{
struct intel_gvt_device_info *info = &gvt->device_info;
struct drm_i915_private *dev_priv = gvt->dev_priv;
int size = info->mmio_size / 4 * sizeof(*gvt->mmio.mmio_attribute);
int ret;
gvt->mmio.mmio_attribute = vzalloc(info->mmio_size);
gvt->mmio.mmio_attribute = vzalloc(size);
if (!gvt->mmio.mmio_attribute)
return -ENOMEM;
@ -2922,77 +2947,15 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
if (ret)
goto err;
}
gvt_dbg_mmio("traced %u virtual mmio registers\n",
gvt->mmio.num_tracked_mmio);
return 0;
err:
intel_gvt_clean_mmio_info(gvt);
return ret;
}
/**
* intel_gvt_mmio_set_accessed - mark a MMIO has been accessed
* @gvt: a GVT device
* @offset: register offset
*
*/
void intel_gvt_mmio_set_accessed(struct intel_gvt *gvt, unsigned int offset)
{
gvt->mmio.mmio_attribute[offset >> 2] |=
F_ACCESSED;
}
/**
* intel_gvt_mmio_is_cmd_accessed - mark a MMIO could be accessed by command
* @gvt: a GVT device
* @offset: register offset
*
*/
bool intel_gvt_mmio_is_cmd_access(struct intel_gvt *gvt,
unsigned int offset)
{
return gvt->mmio.mmio_attribute[offset >> 2] &
F_CMD_ACCESS;
}
/**
* intel_gvt_mmio_is_unalign - mark a MMIO could be accessed unaligned
* @gvt: a GVT device
* @offset: register offset
*
*/
bool intel_gvt_mmio_is_unalign(struct intel_gvt *gvt,
unsigned int offset)
{
return gvt->mmio.mmio_attribute[offset >> 2] &
F_UNALIGN;
}
/**
* intel_gvt_mmio_set_cmd_accessed - mark a MMIO has been accessed by command
* @gvt: a GVT device
* @offset: register offset
*
*/
void intel_gvt_mmio_set_cmd_accessed(struct intel_gvt *gvt,
unsigned int offset)
{
gvt->mmio.mmio_attribute[offset >> 2] |=
F_CMD_ACCESSED;
}
/**
* intel_gvt_mmio_has_mode_mask - if a MMIO has a mode mask
* @gvt: a GVT device
* @offset: register offset
*
* Returns:
* True if a MMIO has a mode mask in its higher 16 bits, false if it isn't.
*
*/
bool intel_gvt_mmio_has_mode_mask(struct intel_gvt *gvt, unsigned int offset)
{
return gvt->mmio.mmio_attribute[offset >> 2] &
F_MODE_MASK;
}
/**
* intel_vgpu_default_mmio_read - default MMIO read handler
@ -3044,3 +3007,91 @@ bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
{
return in_whitelist(offset);
}
/**
* intel_vgpu_mmio_reg_rw - emulate tracked mmio registers
* @vgpu: a vGPU
* @offset: register offset
* @pdata: data buffer
* @bytes: data length
*
* Returns:
* Zero on success, negative error code if failed.
*/
int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
void *pdata, unsigned int bytes, bool is_read)
{
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_mmio_info *mmio_info;
struct gvt_mmio_block *mmio_block;
gvt_mmio_func func;
int ret;
if (WARN_ON(bytes > 4))
return -EINVAL;
/*
* Handle special MMIO blocks.
*/
mmio_block = find_mmio_block(gvt, offset);
if (mmio_block) {
func = is_read ? mmio_block->read : mmio_block->write;
if (func)
return func(vgpu, offset, pdata, bytes);
goto default_rw;
}
/*
* Normal tracked MMIOs.
*/
mmio_info = find_mmio_info(gvt, offset);
if (!mmio_info) {
if (!vgpu->mmio.disable_warn_untrack)
gvt_vgpu_err("untracked MMIO %08x len %d\n",
offset, bytes);
goto default_rw;
}
if (is_read)
return mmio_info->read(vgpu, offset, pdata, bytes);
else {
u64 ro_mask = mmio_info->ro_mask;
u32 old_vreg = 0, old_sreg = 0;
u64 data = 0;
if (intel_gvt_mmio_has_mode_mask(gvt, mmio_info->offset)) {
old_vreg = vgpu_vreg(vgpu, offset);
old_sreg = vgpu_sreg(vgpu, offset);
}
if (likely(!ro_mask))
ret = mmio_info->write(vgpu, offset, pdata, bytes);
else if (!~ro_mask) {
gvt_vgpu_err("try to write RO reg %x\n", offset);
return 0;
} else {
/* keep the RO bits in the virtual register */
memcpy(&data, pdata, bytes);
data &= ~ro_mask;
data |= vgpu_vreg(vgpu, offset) & ro_mask;
ret = mmio_info->write(vgpu, offset, &data, bytes);
}
/* higher 16bits of mode ctl regs are mask bits for change */
if (intel_gvt_mmio_has_mode_mask(gvt, mmio_info->offset)) {
u32 mask = vgpu_vreg(vgpu, offset) >> 16;
vgpu_vreg(vgpu, offset) = (old_vreg & ~mask)
| (vgpu_vreg(vgpu, offset) & mask);
vgpu_sreg(vgpu, offset) = (old_sreg & ~mask)
| (vgpu_sreg(vgpu, offset) & mask);
}
}
return ret;
default_rw:
return is_read ?
intel_vgpu_default_mmio_read(vgpu, offset, pdata, bytes) :
intel_vgpu_default_mmio_write(vgpu, offset, pdata, bytes);
}

Просмотреть файл

@ -31,6 +31,7 @@
#include "i915_drv.h"
#include "gvt.h"
#include "trace.h"
/* common offset among interrupt control registers */
#define regbase_to_isr(base) (base)
@ -178,8 +179,8 @@ int intel_vgpu_reg_imr_handler(struct intel_vgpu *vgpu,
struct intel_gvt_irq_ops *ops = gvt->irq.ops;
u32 imr = *(u32 *)p_data;
gvt_dbg_irq("write IMR %x, new %08x, old %08x, changed %08x\n",
reg, imr, vgpu_vreg(vgpu, reg), vgpu_vreg(vgpu, reg) ^ imr);
trace_write_ir(vgpu->id, "IMR", reg, imr, vgpu_vreg(vgpu, reg),
(vgpu_vreg(vgpu, reg) ^ imr));
vgpu_vreg(vgpu, reg) = imr;
@ -209,8 +210,8 @@ int intel_vgpu_reg_master_irq_handler(struct intel_vgpu *vgpu,
u32 ier = *(u32 *)p_data;
u32 virtual_ier = vgpu_vreg(vgpu, reg);
gvt_dbg_irq("write MASTER_IRQ %x, new %08x, old %08x, changed %08x\n",
reg, ier, virtual_ier, virtual_ier ^ ier);
trace_write_ir(vgpu->id, "MASTER_IRQ", reg, ier, virtual_ier,
(virtual_ier ^ ier));
/*
* GEN8_MASTER_IRQ is a special irq register,
@ -248,8 +249,8 @@ int intel_vgpu_reg_ier_handler(struct intel_vgpu *vgpu,
struct intel_gvt_irq_info *info;
u32 ier = *(u32 *)p_data;
gvt_dbg_irq("write IER %x, new %08x, old %08x, changed %08x\n",
reg, ier, vgpu_vreg(vgpu, reg), vgpu_vreg(vgpu, reg) ^ ier);
trace_write_ir(vgpu->id, "IER", reg, ier, vgpu_vreg(vgpu, reg),
(vgpu_vreg(vgpu, reg) ^ ier));
vgpu_vreg(vgpu, reg) = ier;
@ -285,8 +286,8 @@ int intel_vgpu_reg_iir_handler(struct intel_vgpu *vgpu, unsigned int reg,
iir_to_regbase(reg));
u32 iir = *(u32 *)p_data;
gvt_dbg_irq("write IIR %x, new %08x, old %08x, changed %08x\n",
reg, iir, vgpu_vreg(vgpu, reg), vgpu_vreg(vgpu, reg) ^ iir);
trace_write_ir(vgpu->id, "IIR", reg, iir, vgpu_vreg(vgpu, reg),
(vgpu_vreg(vgpu, reg) ^ iir));
if (WARN_ON(!info))
return -EINVAL;
@ -411,8 +412,7 @@ static void propagate_event(struct intel_gvt_irq *irq,
if (!test_bit(bit, (void *)&vgpu_vreg(vgpu,
regbase_to_imr(reg_base)))) {
gvt_dbg_irq("set bit (%d) for (%s) for vgpu (%d)\n",
bit, irq_name[event], vgpu->id);
trace_propagate_event(vgpu->id, irq_name[event], bit);
set_bit(bit, (void *)&vgpu_vreg(vgpu,
regbase_to_iir(reg_base)));
}

Просмотреть файл

@ -123,7 +123,6 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
void *p_data, unsigned int bytes)
{
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_mmio_info *mmio;
unsigned int offset = 0;
int ret = -EINVAL;
@ -187,32 +186,8 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
goto err;
}
mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
if (mmio) {
if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) {
if (WARN_ON(offset + bytes > mmio->offset + mmio->size))
goto err;
if (WARN_ON(mmio->offset != offset))
goto err;
}
ret = mmio->read(vgpu, offset, p_data, bytes);
} else {
ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
if (!vgpu->mmio.disable_warn_untrack) {
gvt_vgpu_err("read untracked MMIO %x(%dB) val %x\n",
offset, bytes, *(u32 *)p_data);
if (offset == 0x206c) {
gvt_vgpu_err("------------------------------------------\n");
gvt_vgpu_err("likely triggers a gfx reset\n");
gvt_vgpu_err("------------------------------------------\n");
vgpu->mmio.disable_warn_untrack = true;
}
}
}
if (ret)
ret = intel_vgpu_mmio_reg_rw(vgpu, offset, p_data, bytes, true);
if (ret < 0)
goto err;
intel_gvt_mmio_set_accessed(gvt, offset);
@ -239,9 +214,7 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
void *p_data, unsigned int bytes)
{
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_mmio_info *mmio;
unsigned int offset = 0;
u32 old_vreg = 0, old_sreg = 0;
int ret = -EINVAL;
if (vgpu->failsafe) {
@ -296,66 +269,10 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
return ret;
}
mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
if (!mmio && !vgpu->mmio.disable_warn_untrack)
gvt_dbg_mmio("vgpu%d: write untracked MMIO %x len %d val %x\n",
vgpu->id, offset, bytes, *(u32 *)p_data);
if (!intel_gvt_mmio_is_unalign(gvt, offset)) {
if (WARN_ON(!IS_ALIGNED(offset, bytes)))
goto err;
}
if (mmio) {
u64 ro_mask = mmio->ro_mask;
if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) {
if (WARN_ON(offset + bytes > mmio->offset + mmio->size))
goto err;
if (WARN_ON(mmio->offset != offset))
goto err;
}
if (intel_gvt_mmio_has_mode_mask(gvt, mmio->offset)) {
old_vreg = vgpu_vreg(vgpu, offset);
old_sreg = vgpu_sreg(vgpu, offset);
}
if (!ro_mask) {
ret = mmio->write(vgpu, offset, p_data, bytes);
} else {
/* Protect RO bits like HW */
u64 data = 0;
/* all register bits are RO. */
if (ro_mask == ~(u64)0) {
gvt_vgpu_err("try to write RO reg %x\n",
offset);
ret = 0;
goto out;
}
/* keep the RO bits in the virtual register */
memcpy(&data, p_data, bytes);
data &= ~mmio->ro_mask;
data |= vgpu_vreg(vgpu, offset) & mmio->ro_mask;
ret = mmio->write(vgpu, offset, &data, bytes);
}
/* higher 16bits of mode ctl regs are mask bits for change */
if (intel_gvt_mmio_has_mode_mask(gvt, mmio->offset)) {
u32 mask = vgpu_vreg(vgpu, offset) >> 16;
vgpu_vreg(vgpu, offset) = (old_vreg & ~mask)
| (vgpu_vreg(vgpu, offset) & mask);
vgpu_sreg(vgpu, offset) = (old_sreg & ~mask)
| (vgpu_sreg(vgpu, offset) & mask);
}
} else
ret = intel_vgpu_default_mmio_write(vgpu, offset, p_data,
bytes);
if (ret)
ret = intel_vgpu_mmio_reg_rw(vgpu, offset, p_data, bytes, false);
if (ret < 0)
goto err;
out:
intel_gvt_mmio_set_accessed(gvt, offset);
mutex_unlock(&gvt->lock);
return 0;
@ -372,20 +289,32 @@ err:
* @vgpu: a vGPU
*
*/
void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu)
void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr)
{
struct intel_gvt *gvt = vgpu->gvt;
const struct intel_gvt_device_info *info = &gvt->device_info;
void *mmio = gvt->firmware.mmio;
memcpy(vgpu->mmio.vreg, gvt->firmware.mmio, info->mmio_size);
memcpy(vgpu->mmio.sreg, gvt->firmware.mmio, info->mmio_size);
if (dmlr) {
memcpy(vgpu->mmio.vreg, mmio, info->mmio_size);
memcpy(vgpu->mmio.sreg, mmio, info->mmio_size);
vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
/* set the bit 0:2(Core C-State ) to C0 */
vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0;
/* set the bit 0:2(Core C-State ) to C0 */
vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0;
vgpu->mmio.disable_warn_untrack = false;
} else {
#define GVT_GEN8_MMIO_RESET_OFFSET (0x44200)
/* only reset the engine related, so starting with 0x44200
* interrupt include DE,display mmio related will not be
* touched
*/
memcpy(vgpu->mmio.vreg, mmio, GVT_GEN8_MMIO_RESET_OFFSET);
memcpy(vgpu->mmio.sreg, mmio, GVT_GEN8_MMIO_RESET_OFFSET);
}
vgpu->mmio.disable_warn_untrack = false;
}
/**
@ -405,7 +334,7 @@ int intel_vgpu_init_mmio(struct intel_vgpu *vgpu)
vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size;
intel_vgpu_reset_mmio(vgpu);
intel_vgpu_reset_mmio(vgpu, true);
return 0;
}

Просмотреть файл

@ -39,36 +39,28 @@
struct intel_gvt;
struct intel_vgpu;
#define D_SNB (1 << 0)
#define D_IVB (1 << 1)
#define D_HSW (1 << 2)
#define D_BDW (1 << 3)
#define D_SKL (1 << 4)
#define D_KBL (1 << 5)
#define D_BDW (1 << 0)
#define D_SKL (1 << 1)
#define D_KBL (1 << 2)
#define D_GEN9PLUS (D_SKL | D_KBL)
#define D_GEN8PLUS (D_BDW | D_SKL | D_KBL)
#define D_GEN75PLUS (D_HSW | D_BDW | D_SKL | D_KBL)
#define D_GEN7PLUS (D_IVB | D_HSW | D_BDW | D_SKL | D_KBL)
#define D_SKL_PLUS (D_SKL | D_KBL)
#define D_BDW_PLUS (D_BDW | D_SKL | D_KBL)
#define D_HSW_PLUS (D_HSW | D_BDW | D_SKL | D_KBL)
#define D_IVB_PLUS (D_IVB | D_HSW | D_BDW | D_SKL | D_KBL)
#define D_PRE_BDW (D_SNB | D_IVB | D_HSW)
#define D_PRE_SKL (D_SNB | D_IVB | D_HSW | D_BDW)
#define D_ALL (D_SNB | D_IVB | D_HSW | D_BDW | D_SKL | D_KBL)
#define D_PRE_SKL (D_BDW)
#define D_ALL (D_BDW | D_SKL | D_KBL)
typedef int (*gvt_mmio_func)(struct intel_vgpu *, unsigned int, void *,
unsigned int);
struct intel_gvt_mmio_info {
u32 offset;
u32 size;
u32 length;
u32 addr_mask;
u64 ro_mask;
u32 device;
int (*read)(struct intel_vgpu *, unsigned int, void *, unsigned int);
int (*write)(struct intel_vgpu *, unsigned int, void *, unsigned int);
gvt_mmio_func read;
gvt_mmio_func write;
u32 addr_range;
struct hlist_node node;
};
@ -79,8 +71,6 @@ bool intel_gvt_match_device(struct intel_gvt *gvt, unsigned long device);
int intel_gvt_setup_mmio_info(struct intel_gvt *gvt);
void intel_gvt_clean_mmio_info(struct intel_gvt *gvt);
struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
unsigned int offset);
#define INTEL_GVT_MMIO_OFFSET(reg) ({ \
typeof(reg) __reg = reg; \
u32 *offset = (u32 *)&__reg; \
@ -88,7 +78,7 @@ struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
})
int intel_vgpu_init_mmio(struct intel_vgpu *vgpu);
void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu);
void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr);
void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu);
int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa);
@ -97,13 +87,7 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa,
void *p_data, unsigned int bytes);
int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, u64 pa,
void *p_data, unsigned int bytes);
bool intel_gvt_mmio_is_cmd_access(struct intel_gvt *gvt,
unsigned int offset);
bool intel_gvt_mmio_is_unalign(struct intel_gvt *gvt, unsigned int offset);
void intel_gvt_mmio_set_accessed(struct intel_gvt *gvt, unsigned int offset);
void intel_gvt_mmio_set_cmd_accessed(struct intel_gvt *gvt,
unsigned int offset);
bool intel_gvt_mmio_has_mode_mask(struct intel_gvt *gvt, unsigned int offset);
int intel_vgpu_default_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes);
int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
@ -111,4 +95,8 @@ int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
unsigned int offset);
int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
void *pdata, unsigned int bytes, bool is_read);
#endif

Просмотреть файл

@ -133,8 +133,7 @@ static inline int intel_gvt_hypervisor_inject_msi(struct intel_vgpu *vgpu)
if (WARN(control & GENMASK(15, 1), "only support one MSI format\n"))
return -EINVAL;
gvt_dbg_irq("vgpu%d: inject msi address %x data%x\n", vgpu->id, addr,
data);
trace_inject_msi(vgpu->id, addr, data);
ret = intel_gvt_host.mpt->inject_msi(vgpu->handle, addr, data);
if (ret)

Просмотреть файл

@ -35,6 +35,7 @@
#include "i915_drv.h"
#include "gvt.h"
#include "trace.h"
struct render_mmio {
int ring_id;
@ -260,7 +261,8 @@ static void restore_mocs(struct intel_vgpu *vgpu, int ring_id)
#define CTX_CONTEXT_CONTROL_VAL 0x03
void intel_gvt_load_render_mmio(struct intel_vgpu *vgpu, int ring_id)
/* Switch ring mmio values (context) from host to a vgpu. */
static void switch_mmio_to_vgpu(struct intel_vgpu *vgpu, int ring_id)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct render_mmio *mmio;
@ -305,14 +307,15 @@ void intel_gvt_load_render_mmio(struct intel_vgpu *vgpu, int ring_id)
I915_WRITE(mmio->reg, v);
POSTING_READ(mmio->reg);
gvt_dbg_render("load reg %x old %x new %x\n",
i915_mmio_reg_offset(mmio->reg),
mmio->value, v);
trace_render_mmio(vgpu->id, "load",
i915_mmio_reg_offset(mmio->reg),
mmio->value, v);
}
handle_tlb_pending_event(vgpu, ring_id);
}
void intel_gvt_restore_render_mmio(struct intel_vgpu *vgpu, int ring_id)
/* Switch ring mmio values (context) from vgpu to host. */
static void switch_mmio_to_host(struct intel_vgpu *vgpu, int ring_id)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct render_mmio *mmio;
@ -346,8 +349,37 @@ void intel_gvt_restore_render_mmio(struct intel_vgpu *vgpu, int ring_id)
I915_WRITE(mmio->reg, v);
POSTING_READ(mmio->reg);
gvt_dbg_render("restore reg %x old %x new %x\n",
i915_mmio_reg_offset(mmio->reg),
mmio->value, v);
trace_render_mmio(vgpu->id, "restore",
i915_mmio_reg_offset(mmio->reg),
mmio->value, v);
}
}
/**
* intel_gvt_switch_render_mmio - switch mmio context of specific engine
* @pre: the last vGPU that own the engine
* @next: the vGPU to switch to
* @ring_id: specify the engine
*
* If pre is null indicates that host own the engine. If next is null
* indicates that we are switching to host workload.
*/
void intel_gvt_switch_mmio(struct intel_vgpu *pre,
struct intel_vgpu *next, int ring_id)
{
if (WARN_ON(!pre && !next))
return;
gvt_dbg_render("switch ring %d from %s to %s\n", ring_id,
pre ? "vGPU" : "host", next ? "vGPU" : "HOST");
/**
* TODO: Optimize for vGPU to vGPU switch by merging
* switch_mmio_to_host() and switch_mmio_to_vgpu().
*/
if (pre)
switch_mmio_to_host(pre, ring_id);
if (next)
switch_mmio_to_vgpu(next, ring_id);
}

Просмотреть файл

@ -36,8 +36,8 @@
#ifndef __GVT_RENDER_H__
#define __GVT_RENDER_H__
void intel_gvt_load_render_mmio(struct intel_vgpu *vgpu, int ring_id);
void intel_gvt_switch_mmio(struct intel_vgpu *pre,
struct intel_vgpu *next, int ring_id);
void intel_gvt_restore_render_mmio(struct intel_vgpu *vgpu, int ring_id);
#endif

Просмотреть файл

@ -202,11 +202,6 @@ static void tbs_sched_func(struct gvt_sched_data *sched_data)
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct vgpu_sched_data *vgpu_data;
struct intel_vgpu *vgpu = NULL;
static uint64_t timer_check;
if (!(timer_check++ % GVT_TS_BALANCE_PERIOD_MS))
gvt_balance_timeslice(sched_data);
/* no active vgpu or has already had a target */
if (list_empty(&sched_data->lru_runq_head) || scheduler->next_vgpu)
goto out;
@ -231,9 +226,19 @@ out:
void intel_gvt_schedule(struct intel_gvt *gvt)
{
struct gvt_sched_data *sched_data = gvt->scheduler.sched_data;
static uint64_t timer_check;
mutex_lock(&gvt->lock);
if (test_and_clear_bit(INTEL_GVT_REQUEST_SCHED,
(void *)&gvt->service_request)) {
if (!(timer_check++ % GVT_TS_BALANCE_PERIOD_MS))
gvt_balance_timeslice(sched_data);
}
clear_bit(INTEL_GVT_REQUEST_EVENT_SCHED, (void *)&gvt->service_request);
tbs_sched_func(sched_data);
mutex_unlock(&gvt->lock);
}
@ -303,8 +308,20 @@ static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu)
static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu)
{
struct intel_gvt_workload_scheduler *scheduler = &vgpu->gvt->scheduler;
int ring_id;
kfree(vgpu->sched_data);
vgpu->sched_data = NULL;
spin_lock_bh(&scheduler->mmio_context_lock);
for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) {
if (scheduler->engine_owner[ring_id] == vgpu) {
intel_gvt_switch_mmio(vgpu, NULL, ring_id);
scheduler->engine_owner[ring_id] = NULL;
}
}
spin_unlock_bh(&scheduler->mmio_context_lock);
}
static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)

Просмотреть файл

@ -138,21 +138,42 @@ static int shadow_context_status_change(struct notifier_block *nb,
struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
shadow_ctx_notifier_block[req->engine->id]);
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct intel_vgpu_workload *workload =
scheduler->current_workload[req->engine->id];
enum intel_engine_id ring_id = req->engine->id;
struct intel_vgpu_workload *workload;
if (!is_gvt_request(req) || unlikely(!workload))
if (!is_gvt_request(req)) {
spin_lock_bh(&scheduler->mmio_context_lock);
if (action == INTEL_CONTEXT_SCHEDULE_IN &&
scheduler->engine_owner[ring_id]) {
/* Switch ring from vGPU to host. */
intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
NULL, ring_id);
scheduler->engine_owner[ring_id] = NULL;
}
spin_unlock_bh(&scheduler->mmio_context_lock);
return NOTIFY_OK;
}
workload = scheduler->current_workload[ring_id];
if (unlikely(!workload))
return NOTIFY_OK;
switch (action) {
case INTEL_CONTEXT_SCHEDULE_IN:
intel_gvt_load_render_mmio(workload->vgpu,
workload->ring_id);
spin_lock_bh(&scheduler->mmio_context_lock);
if (workload->vgpu != scheduler->engine_owner[ring_id]) {
/* Switch ring from host to vGPU or vGPU to vGPU. */
intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
workload->vgpu, ring_id);
scheduler->engine_owner[ring_id] = workload->vgpu;
} else
gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n",
ring_id, workload->vgpu->id);
spin_unlock_bh(&scheduler->mmio_context_lock);
atomic_set(&workload->shadow_ctx_active, 1);
break;
case INTEL_CONTEXT_SCHEDULE_OUT:
intel_gvt_restore_render_mmio(workload->vgpu,
workload->ring_id);
/* If the status is -EINPROGRESS means this workload
* doesn't meet any issue during dispatching so when
* get the SCHEDULE_OUT set the status to be zero for
@ -431,6 +452,10 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
atomic_dec(&vgpu->running_workload_num);
wake_up(&scheduler->workload_complete_wq);
if (gvt->scheduler.need_reschedule)
intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED);
mutex_unlock(&gvt->lock);
}

Просмотреть файл

@ -42,6 +42,10 @@ struct intel_gvt_workload_scheduler {
struct intel_vgpu_workload *current_workload[I915_NUM_ENGINES];
bool need_reschedule;
spinlock_t mmio_context_lock;
/* can be null when owner is host */
struct intel_vgpu *engine_owner[I915_NUM_ENGINES];
wait_queue_head_t workload_complete_wq;
struct task_struct *thread[I915_NUM_ENGINES];
wait_queue_head_t waitq[I915_NUM_ENGINES];

Просмотреть файл

@ -224,58 +224,138 @@ TRACE_EVENT(oos_sync,
TP_printk("%s", __entry->buf)
);
#define MAX_CMD_STR_LEN 256
TRACE_EVENT(gvt_command,
TP_PROTO(u8 vm_id, u8 ring_id, u32 ip_gma, u32 *cmd_va, u32 cmd_len, bool ring_buffer_cmd, cycles_t cost_pre_cmd_handler, cycles_t cost_cmd_handler),
TP_PROTO(u8 vgpu_id, u8 ring_id, u32 ip_gma, u32 *cmd_va, u32 cmd_len,
u32 buf_type),
TP_ARGS(vm_id, ring_id, ip_gma, cmd_va, cmd_len, ring_buffer_cmd, cost_pre_cmd_handler, cost_cmd_handler),
TP_ARGS(vgpu_id, ring_id, ip_gma, cmd_va, cmd_len, buf_type),
TP_STRUCT__entry(
__field(u8, vm_id)
__field(u8, ring_id)
__field(int, i)
__array(char, tmp_buf, MAX_CMD_STR_LEN)
__array(char, cmd_str, MAX_CMD_STR_LEN)
),
TP_STRUCT__entry(
__field(u8, vgpu_id)
__field(u8, ring_id)
__field(u32, ip_gma)
__field(u32, buf_type)
__field(u32, cmd_len)
__dynamic_array(u32, raw_cmd, cmd_len)
),
TP_fast_assign(
__entry->vm_id = vm_id;
__entry->ring_id = ring_id;
__entry->cmd_str[0] = '\0';
snprintf(__entry->tmp_buf, MAX_CMD_STR_LEN, "VM(%d) Ring(%d): %s ip(%08x) pre handler cost (%llu), handler cost (%llu) ", vm_id, ring_id, ring_buffer_cmd ? "RB":"BB", ip_gma, cost_pre_cmd_handler, cost_cmd_handler);
strcat(__entry->cmd_str, __entry->tmp_buf);
entry->i = 0;
while (cmd_len > 0) {
if (cmd_len >= 8) {
snprintf(__entry->tmp_buf, MAX_CMD_STR_LEN, "%08x %08x %08x %08x %08x %08x %08x %08x ",
cmd_va[__entry->i], cmd_va[__entry->i+1], cmd_va[__entry->i+2], cmd_va[__entry->i+3],
cmd_va[__entry->i+4], cmd_va[__entry->i+5], cmd_va[__entry->i+6], cmd_va[__entry->i+7]);
__entry->i += 8;
cmd_len -= 8;
strcat(__entry->cmd_str, __entry->tmp_buf);
} else if (cmd_len >= 4) {
snprintf(__entry->tmp_buf, MAX_CMD_STR_LEN, "%08x %08x %08x %08x ",
cmd_va[__entry->i], cmd_va[__entry->i+1], cmd_va[__entry->i+2], cmd_va[__entry->i+3]);
__entry->i += 4;
cmd_len -= 4;
strcat(__entry->cmd_str, __entry->tmp_buf);
} else if (cmd_len >= 2) {
snprintf(__entry->tmp_buf, MAX_CMD_STR_LEN, "%08x %08x ", cmd_va[__entry->i], cmd_va[__entry->i+1]);
__entry->i += 2;
cmd_len -= 2;
strcat(__entry->cmd_str, __entry->tmp_buf);
} else if (cmd_len == 1) {
snprintf(__entry->tmp_buf, MAX_CMD_STR_LEN, "%08x ", cmd_va[__entry->i]);
__entry->i += 1;
cmd_len -= 1;
strcat(__entry->cmd_str, __entry->tmp_buf);
}
}
strcat(__entry->cmd_str, "\n");
),
TP_fast_assign(
__entry->vgpu_id = vgpu_id;
__entry->ring_id = ring_id;
__entry->ip_gma = ip_gma;
__entry->buf_type = buf_type;
__entry->cmd_len = cmd_len;
memcpy(__get_dynamic_array(raw_cmd), cmd_va, cmd_len * sizeof(*cmd_va));
),
TP_printk("%s", __entry->cmd_str)
TP_printk("vgpu%d ring %d: buf_type %u, ip_gma %08x, raw cmd %s",
__entry->vgpu_id,
__entry->ring_id,
__entry->buf_type,
__entry->ip_gma,
__print_array(__get_dynamic_array(raw_cmd), __entry->cmd_len, 4))
);
#define GVT_TEMP_STR_LEN 10
TRACE_EVENT(write_ir,
TP_PROTO(int id, char *reg_name, unsigned int reg, unsigned int new_val,
unsigned int old_val, bool changed),
TP_ARGS(id, reg_name, reg, new_val, old_val, changed),
TP_STRUCT__entry(
__field(int, id)
__array(char, buf, GVT_TEMP_STR_LEN)
__field(unsigned int, reg)
__field(unsigned int, new_val)
__field(unsigned int, old_val)
__field(bool, changed)
),
TP_fast_assign(
__entry->id = id;
snprintf(__entry->buf, GVT_TEMP_STR_LEN, "%s", reg_name);
__entry->reg = reg;
__entry->new_val = new_val;
__entry->old_val = old_val;
__entry->changed = changed;
),
TP_printk("VM%u write [%s] %x, new %08x, old %08x, changed %08x\n",
__entry->id, __entry->buf, __entry->reg, __entry->new_val,
__entry->old_val, __entry->changed)
);
TRACE_EVENT(propagate_event,
TP_PROTO(int id, const char *irq_name, int bit),
TP_ARGS(id, irq_name, bit),
TP_STRUCT__entry(
__field(int, id)
__array(char, buf, GVT_TEMP_STR_LEN)
__field(int, bit)
),
TP_fast_assign(
__entry->id = id;
snprintf(__entry->buf, GVT_TEMP_STR_LEN, "%s", irq_name);
__entry->bit = bit;
),
TP_printk("Set bit (%d) for (%s) for vgpu (%d)\n",
__entry->bit, __entry->buf, __entry->id)
);
TRACE_EVENT(inject_msi,
TP_PROTO(int id, unsigned int address, unsigned int data),
TP_ARGS(id, address, data),
TP_STRUCT__entry(
__field(int, id)
__field(unsigned int, address)
__field(unsigned int, data)
),
TP_fast_assign(
__entry->id = id;
__entry->address = address;
__entry->data = data;
),
TP_printk("vgpu%d:inject msi address %x data %x\n",
__entry->id, __entry->address, __entry->data)
);
TRACE_EVENT(render_mmio,
TP_PROTO(int id, char *action, unsigned int reg,
unsigned int old_val, unsigned int new_val),
TP_ARGS(id, action, reg, new_val, old_val),
TP_STRUCT__entry(
__field(int, id)
__array(char, buf, GVT_TEMP_STR_LEN)
__field(unsigned int, reg)
__field(unsigned int, old_val)
__field(unsigned int, new_val)
),
TP_fast_assign(
__entry->id = id;
snprintf(__entry->buf, GVT_TEMP_STR_LEN, "%s", action);
__entry->reg = reg;
__entry->old_val = old_val;
__entry->new_val = new_val;
),
TP_printk("VM%u %s reg %x, old %08x new %08x\n",
__entry->id, __entry->buf, __entry->reg,
__entry->old_val, __entry->new_val)
);
#endif /* _GVT_TRACE_H_ */
/* This part must be out of protection */

Просмотреть файл

@ -501,9 +501,14 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
/* full GPU reset or device model level reset */
if (engine_mask == ALL_ENGINES || dmlr) {
intel_vgpu_reset_gtt(vgpu, dmlr);
intel_vgpu_reset_resource(vgpu);
intel_vgpu_reset_mmio(vgpu);
/*fence will not be reset during virtual reset */
if (dmlr)
intel_vgpu_reset_resource(vgpu);
intel_vgpu_reset_mmio(vgpu, dmlr);
populate_pvinfo_page(vgpu);
intel_vgpu_reset_display(vgpu);

Просмотреть файл

@ -1670,12 +1670,22 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
seq_printf(m, "FBC disabled: %s\n",
dev_priv->fbc.no_fbc_reason);
if (intel_fbc_is_active(dev_priv) && INTEL_GEN(dev_priv) >= 7) {
uint32_t mask = INTEL_GEN(dev_priv) >= 8 ?
BDW_FBC_COMPRESSION_MASK :
IVB_FBC_COMPRESSION_MASK;
seq_printf(m, "Compressing: %s\n",
yesno(I915_READ(FBC_STATUS2) & mask));
if (intel_fbc_is_active(dev_priv)) {
u32 mask;
if (INTEL_GEN(dev_priv) >= 8)
mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
else if (INTEL_GEN(dev_priv) >= 7)
mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
else if (INTEL_GEN(dev_priv) >= 5)
mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
else if (IS_G4X(dev_priv))
mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
else
mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
FBC_STAT_COMPRESSED);
seq_printf(m, "Compressing: %s\n", yesno(mask));
}
mutex_unlock(&dev_priv->fbc.lock);
@ -1684,7 +1694,7 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
return 0;
}
static int i915_fbc_fc_get(void *data, u64 *val)
static int i915_fbc_false_color_get(void *data, u64 *val)
{
struct drm_i915_private *dev_priv = data;
@ -1696,7 +1706,7 @@ static int i915_fbc_fc_get(void *data, u64 *val)
return 0;
}
static int i915_fbc_fc_set(void *data, u64 val)
static int i915_fbc_false_color_set(void *data, u64 val)
{
struct drm_i915_private *dev_priv = data;
u32 reg;
@ -1717,8 +1727,8 @@ static int i915_fbc_fc_set(void *data, u64 val)
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_fc_fops,
i915_fbc_fc_get, i915_fbc_fc_set,
DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
i915_fbc_false_color_get, i915_fbc_false_color_set,
"%llu\n");
static int i915_ips_status(struct seq_file *m, void *unused)
@ -1988,6 +1998,12 @@ static int i915_context_status(struct seq_file *m, void *unused)
seq_putc(m, '\n');
}
seq_printf(m,
"\tvma hashtable size=%u (actual %lu), count=%u\n",
ctx->vma_lut.ht_size,
BIT(ctx->vma_lut.ht_bits),
ctx->vma_lut.ht_count);
seq_putc(m, '\n');
}
@ -4289,26 +4305,27 @@ i915_drop_caches_set(void *data, u64 val)
{
struct drm_i915_private *dev_priv = data;
struct drm_device *dev = &dev_priv->drm;
int ret;
int ret = 0;
DRM_DEBUG("Dropping caches: 0x%08llx\n", val);
/* No need to check and wait for gpu resets, only libdrm auto-restarts
* on ioctls on -EAGAIN. */
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
if (val & DROP_ACTIVE) {
ret = i915_gem_wait_for_idle(dev_priv,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED);
if (val & (DROP_ACTIVE | DROP_RETIRE)) {
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
goto unlock;
}
return ret;
if (val & DROP_RETIRE)
i915_gem_retire_requests(dev_priv);
if (val & DROP_ACTIVE)
ret = i915_gem_wait_for_idle(dev_priv,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED);
if (val & DROP_RETIRE)
i915_gem_retire_requests(dev_priv);
mutex_unlock(&dev->struct_mutex);
}
lockdep_set_current_reclaim_state(GFP_KERNEL);
if (val & DROP_BOUND)
@ -4321,9 +4338,6 @@ i915_drop_caches_set(void *data, u64 val)
i915_gem_shrink_all(dev_priv);
lockdep_clear_current_reclaim_state();
unlock:
mutex_unlock(&dev->struct_mutex);
if (val & DROP_FREED) {
synchronize_rcu();
i915_gem_drain_freed_objects(dev_priv);
@ -4861,7 +4875,7 @@ static const struct i915_debugfs_files {
{"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
{"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
{"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
{"i915_fbc_false_color", &i915_fbc_fc_fops},
{"i915_fbc_false_color", &i915_fbc_false_color_fops},
{"i915_dp_test_data", &i915_displayport_test_data_fops},
{"i915_dp_test_type", &i915_displayport_test_type_fops},
{"i915_dp_test_active", &i915_displayport_test_active_fops},

Просмотреть файл

@ -139,6 +139,9 @@ static enum intel_pch intel_virt_detect_pch(struct drm_i915_private *dev_priv)
} else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
ret = PCH_SPT;
DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n");
} else if (IS_COFFEELAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) {
ret = PCH_CNP;
DRM_DEBUG_KMS("Assuming CannonPoint PCH\n");
}
return ret;
@ -170,24 +173,29 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv)
while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
if (pch->vendor == PCI_VENDOR_ID_INTEL) {
unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
dev_priv->pch_id = id;
unsigned short id_ext = pch->device &
INTEL_PCH_DEVICE_ID_MASK_EXT;
if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
dev_priv->pch_id = id;
dev_priv->pch_type = PCH_IBX;
DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
WARN_ON(!IS_GEN5(dev_priv));
} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
dev_priv->pch_id = id;
dev_priv->pch_type = PCH_CPT;
DRM_DEBUG_KMS("Found CougarPoint PCH\n");
WARN_ON(!(IS_GEN6(dev_priv) ||
IS_IVYBRIDGE(dev_priv)));
} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
/* PantherPoint is CPT compatible */
dev_priv->pch_id = id;
dev_priv->pch_type = PCH_CPT;
DRM_DEBUG_KMS("Found PantherPoint PCH\n");
WARN_ON(!(IS_GEN6(dev_priv) ||
IS_IVYBRIDGE(dev_priv)));
} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
dev_priv->pch_id = id;
dev_priv->pch_type = PCH_LPT;
DRM_DEBUG_KMS("Found LynxPoint PCH\n");
WARN_ON(!IS_HASWELL(dev_priv) &&
@ -195,6 +203,7 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv)
WARN_ON(IS_HSW_ULT(dev_priv) ||
IS_BDW_ULT(dev_priv));
} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
dev_priv->pch_id = id;
dev_priv->pch_type = PCH_LPT;
DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
WARN_ON(!IS_HASWELL(dev_priv) &&
@ -202,20 +211,35 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv)
WARN_ON(!IS_HSW_ULT(dev_priv) &&
!IS_BDW_ULT(dev_priv));
} else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
dev_priv->pch_id = id;
dev_priv->pch_type = PCH_SPT;
DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
WARN_ON(!IS_SKYLAKE(dev_priv) &&
!IS_KABYLAKE(dev_priv));
} else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
} else if (id_ext == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
dev_priv->pch_id = id_ext;
dev_priv->pch_type = PCH_SPT;
DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
WARN_ON(!IS_SKYLAKE(dev_priv) &&
!IS_KABYLAKE(dev_priv));
} else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) {
dev_priv->pch_id = id;
dev_priv->pch_type = PCH_KBP;
DRM_DEBUG_KMS("Found KabyPoint PCH\n");
WARN_ON(!IS_SKYLAKE(dev_priv) &&
!IS_KABYLAKE(dev_priv));
} else if (id == INTEL_PCH_CNP_DEVICE_ID_TYPE) {
dev_priv->pch_id = id;
dev_priv->pch_type = PCH_CNP;
DRM_DEBUG_KMS("Found CannonPoint PCH\n");
WARN_ON(!IS_CANNONLAKE(dev_priv) &&
!IS_COFFEELAKE(dev_priv));
} else if (id_ext == INTEL_PCH_CNP_LP_DEVICE_ID_TYPE) {
dev_priv->pch_id = id_ext;
dev_priv->pch_type = PCH_CNP;
DRM_DEBUG_KMS("Found CannonPoint LP PCH\n");
WARN_ON(!IS_CANNONLAKE(dev_priv) &&
!IS_COFFEELAKE(dev_priv));
} else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
(id == INTEL_PCH_P3X_DEVICE_ID_TYPE) ||
((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
@ -223,6 +247,7 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv)
PCI_SUBVENDOR_ID_REDHAT_QUMRANET &&
pch->subsystem_device ==
PCI_SUBDEVICE_ID_QEMU)) {
dev_priv->pch_id = id;
dev_priv->pch_type =
intel_virt_detect_pch(dev_priv);
} else
@ -351,6 +376,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_EXEC_ASYNC:
case I915_PARAM_HAS_EXEC_FENCE:
case I915_PARAM_HAS_EXEC_CAPTURE:
case I915_PARAM_HAS_EXEC_BATCH_FIRST:
/* For the time being all of these are always true;
* if some supported hardware does not have one of these
* features this value needs to be provided from
@ -358,6 +384,16 @@ static int i915_getparam(struct drm_device *dev, void *data,
*/
value = 1;
break;
case I915_PARAM_SLICE_MASK:
value = INTEL_INFO(dev_priv)->sseu.slice_mask;
if (!value)
return -ENODEV;
break;
case I915_PARAM_SUBSLICE_MASK:
value = INTEL_INFO(dev_priv)->sseu.subslice_mask;
if (!value)
return -ENODEV;
break;
default:
DRM_DEBUG("Unknown parameter %d\n", param->param);
return -EINVAL;
@ -553,6 +589,7 @@ static void i915_gem_fini(struct drm_i915_private *dev_priv)
intel_uc_fini_hw(dev_priv);
i915_gem_cleanup_engines(dev_priv);
i915_gem_context_fini(dev_priv);
i915_gem_cleanup_userptr(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex);
i915_gem_drain_freed_objects(dev_priv);
@ -997,6 +1034,8 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv)
DRM_DEBUG_DRIVER("use GPU semaphores? %s\n", yesno(i915.semaphores));
intel_uc_sanitize_options(dev_priv);
intel_gvt_sanitize_options(dev_priv);
}
/**
@ -2459,9 +2498,6 @@ static int intel_runtime_resume(struct device *kdev)
intel_guc_resume(dev_priv);
if (IS_GEN6(dev_priv))
intel_init_pch_refclk(dev_priv);
if (IS_GEN9_LP(dev_priv)) {
bxt_disable_dc9(dev_priv);
bxt_display_core_init(dev_priv, true);

Просмотреть файл

@ -37,7 +37,7 @@
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
#include <linux/backlight.h>
#include <linux/hashtable.h>
#include <linux/hash.h>
#include <linux/intel-iommu.h>
#include <linux/kref.h>
#include <linux/pm_qos.h>
@ -80,8 +80,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
#define DRIVER_DATE "20170529"
#define DRIVER_TIMESTAMP 1496041258
#define DRIVER_DATE "20170619"
#define DRIVER_TIMESTAMP 1497857498
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
* WARN_ON()) for hw state sanity checks to check for unexpected conditions
@ -752,7 +752,6 @@ struct intel_csr {
func(has_aliasing_ppgtt); \
func(has_csr); \
func(has_ddi); \
func(has_decoupled_mmio); \
func(has_dp_mst); \
func(has_fbc); \
func(has_fpga_dbg); \
@ -827,6 +826,8 @@ enum intel_platform {
INTEL_BROXTON,
INTEL_KABYLAKE,
INTEL_GEMINILAKE,
INTEL_COFFEELAKE,
INTEL_CANNONLAKE,
INTEL_MAX_PLATFORMS
};
@ -1152,6 +1153,7 @@ enum intel_pch {
PCH_LPT, /* Lynxpoint PCH */
PCH_SPT, /* Sunrisepoint PCH */
PCH_KBP, /* Kabypoint PCH */
PCH_CNP, /* Cannonpoint PCH */
PCH_NOP,
};
@ -1160,11 +1162,9 @@ enum intel_sbi_destination {
SBI_MPHY,
};
#define QUIRK_PIPEA_FORCE (1<<0)
#define QUIRK_LVDS_SSC_DISABLE (1<<1)
#define QUIRK_INVERT_BRIGHTNESS (1<<2)
#define QUIRK_BACKLIGHT_PRESENT (1<<3)
#define QUIRK_PIPEB_FORCE (1<<4)
#define QUIRK_PIN_SWIZZLED_PAGES (1<<5)
struct intel_fbdev;
@ -1454,6 +1454,13 @@ struct i915_gem_mm {
/** LRU list of objects with fence regs on them. */
struct list_head fence_list;
/**
* Workqueue to fault in userptr pages, flushed by the execbuf
* when required but otherwise left to userspace to try again
* on EAGAIN.
*/
struct workqueue_struct *userptr_wq;
u64 unordered_timeline;
/* the indicator for dispatch video commands on two BSD rings */
@ -2017,9 +2024,17 @@ struct i915_oa_ops {
void (*init_oa_buffer)(struct drm_i915_private *dev_priv);
/**
* @enable_metric_set: Applies any MUX configuration to set up the
* Boolean and Custom (B/C) counters that are part of the counter
* reports being sampled. May apply system constraints such as
* @select_metric_set: The auto generated code that checks whether a
* requested OA config is applicable to the system and if so sets up
* the mux, oa and flex eu register config pointers according to the
* current dev_priv->perf.oa.metrics_set.
*/
int (*select_metric_set)(struct drm_i915_private *dev_priv);
/**
* @enable_metric_set: Selects and applies any MUX configuration to set
* up the Boolean and Custom (B/C) counters that are part of the
* counter reports being sampled. May apply system constraints such as
* disabling EU clock gating as required.
*/
int (*enable_metric_set)(struct drm_i915_private *dev_priv);
@ -2050,20 +2065,13 @@ struct i915_oa_ops {
size_t *offset);
/**
* @oa_buffer_check: Check for OA buffer data + update tail
* @oa_hw_tail_read: read the OA tail pointer register
*
* This is either called via fops or the poll check hrtimer (atomic
* ctx) without any locks taken.
*
* It's safe to read OA config state here unlocked, assuming that this
* is only called while the stream is enabled, while the global OA
* configuration can't be modified.
*
* Efficiency is more important than avoiding some false positives
* here, which will be handled gracefully - likely resulting in an
* %EAGAIN error for userspace.
* In particular this enables us to share all the fiddly code for
* handling the OA unit tail pointer race that affects multiple
* generations.
*/
bool (*oa_buffer_check)(struct drm_i915_private *dev_priv);
u32 (*oa_hw_tail_read)(struct drm_i915_private *dev_priv);
};
struct intel_cdclk_state {
@ -2394,8 +2402,6 @@ struct drm_i915_private {
struct mutex lock;
struct list_head streams;
spinlock_t hook_lock;
struct {
struct i915_perf_stream *exclusive_stream;
@ -2413,17 +2419,23 @@ struct drm_i915_private {
bool periodic;
int period_exponent;
int timestamp_frequency;
int metrics_set;
const struct i915_oa_reg *mux_regs;
int mux_regs_len;
const struct i915_oa_reg *mux_regs[6];
int mux_regs_lens[6];
int n_mux_configs;
const struct i915_oa_reg *b_counter_regs;
int b_counter_regs_len;
const struct i915_oa_reg *flex_regs;
int flex_regs_len;
struct {
struct i915_vma *vma;
u8 *vaddr;
u32 last_ctx_id;
int format;
int format_size;
@ -2493,6 +2505,15 @@ struct drm_i915_private {
} oa_buffer;
u32 gen7_latched_oastatus1;
u32 ctx_oactxctrl_offset;
u32 ctx_flexeu0_offset;
/**
* The RPT_ID/reason field for Gen8+ includes a bit
* to determine if the CTX ID in the report is valid
* but the specific bit differs between Gen 8 and 9
*/
u32 gen8_valid_ctx_bit;
struct i915_oa_ops ops;
const struct i915_oa_format *oa_formats;
@ -2768,6 +2789,8 @@ intel_info(const struct drm_i915_private *dev_priv)
#define IS_BROXTON(dev_priv) ((dev_priv)->info.platform == INTEL_BROXTON)
#define IS_KABYLAKE(dev_priv) ((dev_priv)->info.platform == INTEL_KABYLAKE)
#define IS_GEMINILAKE(dev_priv) ((dev_priv)->info.platform == INTEL_GEMINILAKE)
#define IS_COFFEELAKE(dev_priv) ((dev_priv)->info.platform == INTEL_COFFEELAKE)
#define IS_CANNONLAKE(dev_priv) ((dev_priv)->info.platform == INTEL_CANNONLAKE)
#define IS_MOBILE(dev_priv) ((dev_priv)->info.is_mobile)
#define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
@ -2803,10 +2826,18 @@ intel_info(const struct drm_i915_private *dev_priv)
#define IS_KBL_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x590E || \
INTEL_DEVID(dev_priv) == 0x5915 || \
INTEL_DEVID(dev_priv) == 0x591E)
#define IS_SKL_GT2(dev_priv) (IS_SKYLAKE(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0x00F0) == 0x0010)
#define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020)
#define IS_SKL_GT4(dev_priv) (IS_SKYLAKE(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0x00F0) == 0x0030)
#define IS_KBL_GT2(dev_priv) (IS_KABYLAKE(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0x00F0) == 0x0010)
#define IS_KBL_GT3(dev_priv) (IS_KABYLAKE(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020)
#define IS_CFL_ULT(dev_priv) (IS_COFFEELAKE(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0x00F0) == 0x00A0)
#define IS_ALPHA_SUPPORT(intel_info) ((intel_info)->is_alpha_support)
@ -2845,6 +2876,12 @@ intel_info(const struct drm_i915_private *dev_priv)
#define IS_GLK_REVID(dev_priv, since, until) \
(IS_GEMINILAKE(dev_priv) && IS_REVID(dev_priv, since, until))
#define CNL_REVID_A0 0x0
#define CNL_REVID_B0 0x1
#define IS_CNL_REVID(p, since, until) \
(IS_CANNONLAKE(p) && IS_REVID(p, since, until))
/*
* The genX designation typically refers to the render engine, so render
* capability related checks should use IS_GEN, while display and other checks
@ -2859,6 +2896,7 @@ intel_info(const struct drm_i915_private *dev_priv)
#define IS_GEN7(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(6)))
#define IS_GEN8(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(7)))
#define IS_GEN9(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(8)))
#define IS_GEN10(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(9)))
#define IS_LP(dev_priv) (INTEL_INFO(dev_priv)->is_lp)
#define IS_GEN9_LP(dev_priv) (IS_GEN9(dev_priv) && IS_LP(dev_priv))
@ -2959,6 +2997,7 @@ intel_info(const struct drm_i915_private *dev_priv)
#define HAS_POOLED_EU(dev_priv) ((dev_priv)->info.has_pooled_eu)
#define INTEL_PCH_DEVICE_ID_MASK 0xff00
#define INTEL_PCH_DEVICE_ID_MASK_EXT 0xff80
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
@ -2967,11 +3006,16 @@ intel_info(const struct drm_i915_private *dev_priv)
#define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100
#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00
#define INTEL_PCH_KBP_DEVICE_ID_TYPE 0xA200
#define INTEL_PCH_CNP_DEVICE_ID_TYPE 0xA300
#define INTEL_PCH_CNP_LP_DEVICE_ID_TYPE 0x9D80
#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
#define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000
#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */
#define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type)
#define HAS_PCH_CNP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CNP)
#define HAS_PCH_CNP_LP(dev_priv) \
((dev_priv)->pch_id == INTEL_PCH_CNP_LP_DEVICE_ID_TYPE)
#define HAS_PCH_KBP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_KBP)
#define HAS_PCH_SPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_SPT)
#define HAS_PCH_LPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT)
@ -2986,7 +3030,7 @@ intel_info(const struct drm_i915_private *dev_priv)
#define HAS_GMCH_DISPLAY(dev_priv) ((dev_priv)->info.has_gmch_display)
#define HAS_LSPCON(dev_priv) (IS_GEN9(dev_priv))
#define HAS_LSPCON(dev_priv) (INTEL_GEN(dev_priv) >= 9)
/* DPF == dynamic parity feature */
#define HAS_L3_DPF(dev_priv) ((dev_priv)->info.has_l3_dpf)
@ -2996,8 +3040,6 @@ intel_info(const struct drm_i915_private *dev_priv)
#define GT_FREQUENCY_MULTIPLIER 50
#define GEN9_FREQ_SCALER 3
#define HAS_DECOUPLED_MMIO(dev_priv) (INTEL_INFO(dev_priv)->has_decoupled_mmio)
#include "i915_trace.h"
static inline bool intel_vtd_active(void)
@ -3194,7 +3236,8 @@ int i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void i915_gem_init_userptr(struct drm_i915_private *dev_priv);
int i915_gem_init_userptr(struct drm_i915_private *dev_priv);
void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv);
int i915_gem_userptr_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
@ -3534,6 +3577,9 @@ i915_gem_context_lookup_timeline(struct i915_gem_context *ctx,
int i915_perf_open_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
void i915_oa_init_reg_state(struct intel_engine_cs *engine,
struct i915_gem_context *ctx,
uint32_t *reg_state);
/* i915_gem_evict.c */
int __must_check i915_gem_evict_something(struct i915_address_space *vm,
@ -3544,7 +3590,7 @@ int __must_check i915_gem_evict_something(struct i915_address_space *vm,
int __must_check i915_gem_evict_for_node(struct i915_address_space *vm,
struct drm_mm_node *node,
unsigned int flags);
int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
int i915_gem_evict_vm(struct i915_address_space *vm);
/* belongs in i915_gem_gtt.h */
static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv)

Просмотреть файл

@ -49,10 +49,10 @@ static void i915_gem_flush_free_objects(struct drm_i915_private *i915);
static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
{
if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
if (obj->cache_dirty)
return false;
if (!i915_gem_object_is_coherent(obj))
if (!obj->cache_coherent)
return true;
return obj->pin_display;
@ -143,9 +143,9 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_i915_gem_get_aperture *args = data;
struct i915_vma *vma;
size_t pinned;
u64 pinned;
pinned = 0;
pinned = ggtt->base.reserved;
mutex_lock(&dev->struct_mutex);
list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
if (i915_vma_is_pinned(vma))
@ -233,6 +233,14 @@ err_phys:
return st;
}
static void __start_cpu_write(struct drm_i915_gem_object *obj)
{
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
if (cpu_write_needs_clflush(obj))
obj->cache_dirty = true;
}
static void
__i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
struct sg_table *pages,
@ -245,11 +253,10 @@ __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
if (needs_clflush &&
(obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
!i915_gem_object_is_coherent(obj))
!obj->cache_coherent)
drm_clflush_sg(pages);
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
__start_cpu_write(obj);
}
static void
@ -684,6 +691,12 @@ i915_gem_dumb_create(struct drm_file *file,
args->size, &args->handle);
}
static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
{
return !(obj->cache_level == I915_CACHE_NONE ||
obj->cache_level == I915_CACHE_WT);
}
/**
* Creates a new mm object and returns a handle to it.
* @dev: drm device pointer
@ -753,6 +766,11 @@ flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
case I915_GEM_DOMAIN_CPU:
i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
break;
case I915_GEM_DOMAIN_RENDER:
if (gpu_write_needs_clflush(obj))
obj->cache_dirty = true;
break;
}
obj->base.write_domain = 0;
@ -838,8 +856,7 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
if (ret)
return ret;
if (i915_gem_object_is_coherent(obj) ||
!static_cpu_has(X86_FEATURE_CLFLUSH)) {
if (obj->cache_coherent || !static_cpu_has(X86_FEATURE_CLFLUSH)) {
ret = i915_gem_object_set_to_cpu_domain(obj, false);
if (ret)
goto err_unpin;
@ -854,7 +871,8 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
* optimizes for the case when the gpu will dirty the data
* anyway again before the next pread happens.
*/
if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
if (!obj->cache_dirty &&
!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
*needs_clflush = CLFLUSH_BEFORE;
out:
@ -890,8 +908,7 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
if (ret)
return ret;
if (i915_gem_object_is_coherent(obj) ||
!static_cpu_has(X86_FEATURE_CLFLUSH)) {
if (obj->cache_coherent || !static_cpu_has(X86_FEATURE_CLFLUSH)) {
ret = i915_gem_object_set_to_cpu_domain(obj, true);
if (ret)
goto err_unpin;
@ -906,14 +923,16 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
* This optimizes for the case when the gpu will use the data
* right away and we therefore have to clflush anyway.
*/
if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
if (!obj->cache_dirty) {
*needs_clflush |= CLFLUSH_AFTER;
/* Same trick applies to invalidate partially written cachelines read
* before writing.
*/
if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
*needs_clflush |= CLFLUSH_BEFORE;
/*
* Same trick applies to invalidate partially written
* cachelines read before writing.
*/
if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
*needs_clflush |= CLFLUSH_BEFORE;
}
out:
intel_fb_obj_invalidate(obj, ORIGIN_CPU);
@ -2337,8 +2356,8 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
struct page *page;
unsigned long last_pfn = 0; /* suppress gcc warning */
unsigned int max_segment;
gfp_t noreclaim;
int ret;
gfp_t gfp;
/* Assert that the object is not currently in any GPU domain. As it
* wasn't in the GTT, there shouldn't be any way it could have been in
@ -2367,22 +2386,30 @@ rebuild_st:
* Fail silently without starting the shrinker
*/
mapping = obj->base.filp->f_mapping;
gfp = mapping_gfp_constraint(mapping, ~(__GFP_IO | __GFP_RECLAIM));
gfp |= __GFP_NORETRY | __GFP_NOWARN;
noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM);
noreclaim |= __GFP_NORETRY | __GFP_NOWARN;
sg = st->sgl;
st->nents = 0;
for (i = 0; i < page_count; i++) {
page = shmem_read_mapping_page_gfp(mapping, i, gfp);
if (unlikely(IS_ERR(page))) {
i915_gem_shrink(dev_priv,
page_count,
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND |
I915_SHRINK_PURGEABLE);
const unsigned int shrink[] = {
I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_PURGEABLE,
0,
}, *s = shrink;
gfp_t gfp = noreclaim;
do {
page = shmem_read_mapping_page_gfp(mapping, i, gfp);
}
if (unlikely(IS_ERR(page))) {
gfp_t reclaim;
if (likely(!IS_ERR(page)))
break;
if (!*s) {
ret = PTR_ERR(page);
goto err_sg;
}
i915_gem_shrink(dev_priv, 2 * page_count, *s++);
cond_resched();
/* We've tried hard to allocate the memory by reaping
* our own buffer, now let the real VM do its job and
@ -2392,15 +2419,26 @@ rebuild_st:
* defer the oom here by reporting the ENOMEM back
* to userspace.
*/
reclaim = mapping_gfp_mask(mapping);
reclaim |= __GFP_NORETRY; /* reclaim, but no oom */
if (!*s) {
/* reclaim and warn, but no oom */
gfp = mapping_gfp_mask(mapping);
page = shmem_read_mapping_page_gfp(mapping, i, reclaim);
if (IS_ERR(page)) {
ret = PTR_ERR(page);
goto err_sg;
/* Our bo are always dirty and so we require
* kswapd to reclaim our pages (direct reclaim
* does not effectively begin pageout of our
* buffers on its own). However, direct reclaim
* only waits for kswapd when under allocation
* congestion. So as a result __GFP_RECLAIM is
* unreliable and fails to actually reclaim our
* dirty pages -- unless you try over and over
* again with !__GFP_NORETRY. However, we still
* want to fail this allocation rather than
* trigger the out-of-memory killer and for
* this we want the future __GFP_MAYFAIL.
*/
}
}
} while (1);
if (!i ||
sg->length >= max_segment ||
page_to_pfn(page) != last_pfn + 1) {
@ -3223,6 +3261,10 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
if (vma->vm->file == fpriv)
i915_vma_close(vma);
vma = obj->vma_hashed;
if (vma && vma->ctx->file_priv == fpriv)
i915_vma_unlink_ctx(vma);
if (i915_gem_object_is_active(obj) &&
!i915_gem_object_has_active_reference(obj)) {
i915_gem_object_set_active_reference(obj);
@ -3376,10 +3418,13 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
{
if (obj->base.write_domain != I915_GEM_DOMAIN_CPU && !obj->cache_dirty)
return;
i915_gem_clflush_object(obj, I915_CLFLUSH_FORCE);
/*
* We manually flush the CPU domain so that we can override and
* force the flush for the display, and perform it asyncrhonously.
*/
flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
if (obj->cache_dirty)
i915_gem_clflush_object(obj, I915_CLFLUSH_FORCE);
obj->base.write_domain = 0;
}
@ -3638,13 +3683,11 @@ restart:
}
}
if (obj->base.write_domain == I915_GEM_DOMAIN_CPU &&
i915_gem_object_is_coherent(obj))
obj->cache_dirty = true;
list_for_each_entry(vma, &obj->vma_list, obj_link)
vma->node.color = cache_level;
obj->cache_level = cache_level;
obj->cache_coherent = i915_gem_object_is_coherent(obj);
obj->cache_dirty = true; /* Always invalidate stale cachelines */
return 0;
}
@ -3866,9 +3909,6 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
if (ret)
return ret;
if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
return 0;
flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
/* Flush the CPU cache if it's still invalid. */
@ -3880,15 +3920,13 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
GEM_BUG_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
/* If we're writing through the CPU, then the GPU read domains will
* need to be invalidated at next use.
*/
if (write) {
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
}
if (write)
__start_cpu_write(obj);
return 0;
}
@ -4220,7 +4258,6 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
INIT_LIST_HEAD(&obj->global_link);
INIT_LIST_HEAD(&obj->userfault_link);
INIT_LIST_HEAD(&obj->obj_exec_link);
INIT_LIST_HEAD(&obj->vma_list);
INIT_LIST_HEAD(&obj->batch_pool_link);
@ -4285,6 +4322,7 @@ i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
mapping = obj->base.filp->f_mapping;
mapping_set_gfp_mask(mapping, mask);
GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
i915_gem_object_init(obj, &i915_gem_object_ops);
@ -4308,6 +4346,9 @@ i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
} else
obj->cache_level = I915_CACHE_NONE;
obj->cache_coherent = i915_gem_object_is_coherent(obj);
obj->cache_dirty = !obj->cache_coherent;
trace_i915_gem_object_create(obj);
return obj;
@ -4356,7 +4397,6 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
GEM_BUG_ON(i915_gem_object_is_active(obj));
list_for_each_entry_safe(vma, vn,
&obj->vma_list, obj_link) {
GEM_BUG_ON(!i915_vma_is_ggtt(vma));
GEM_BUG_ON(i915_vma_is_active(vma));
vma->flags &= ~I915_VMA_PIN_MASK;
i915_vma_close(vma);
@ -4763,7 +4803,9 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
*/
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
i915_gem_init_userptr(dev_priv);
ret = i915_gem_init_userptr(dev_priv);
if (ret)
goto out_unlock;
ret = i915_gem_init_ggtt(dev_priv);
if (ret)
@ -4974,10 +5016,8 @@ int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
mutex_lock(&dev_priv->drm.struct_mutex);
for (p = phases; *p; p++) {
list_for_each_entry(obj, *p, global_link) {
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
}
list_for_each_entry(obj, *p, global_link)
__start_cpu_write(obj);
}
mutex_unlock(&dev_priv->drm.struct_mutex);

Просмотреть файл

@ -114,12 +114,27 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
list_for_each_entry(obj, list, batch_pool_link) {
/* The batches are strictly LRU ordered */
if (i915_gem_object_is_active(obj)) {
if (!reservation_object_test_signaled_rcu(obj->resv,
true))
struct reservation_object *resv = obj->resv;
if (!reservation_object_test_signaled_rcu(resv, true))
break;
i915_gem_retire_requests(pool->engine->i915);
GEM_BUG_ON(i915_gem_object_is_active(obj));
/*
* The object is now idle, clear the array of shared
* fences before we add a new request. Although, we
* remain on the same engine, we may be on a different
* timeline and so may continually grow the array,
* trapping a reference to all the old fences, rather
* than replace the existing fence.
*/
if (rcu_access_pointer(resv->fence)) {
reservation_object_lock(resv, NULL);
reservation_object_add_excl_fence(resv, NULL);
reservation_object_unlock(resv);
}
}
GEM_BUG_ON(!reservation_object_test_signaled_rcu(obj->resv,

Просмотреть файл

@ -71,8 +71,6 @@ static const struct dma_fence_ops i915_clflush_ops = {
static void __i915_do_clflush(struct drm_i915_gem_object *obj)
{
drm_clflush_sg(obj->mm.pages);
obj->cache_dirty = false;
intel_fb_obj_flush(obj, ORIGIN_CPU);
}
@ -81,9 +79,6 @@ static void i915_clflush_work(struct work_struct *work)
struct clflush *clflush = container_of(work, typeof(*clflush), work);
struct drm_i915_gem_object *obj = clflush->obj;
if (!obj->cache_dirty)
goto out;
if (i915_gem_object_pin_pages(obj)) {
DRM_ERROR("Failed to acquire obj->pages for clflushing\n");
goto out;
@ -131,10 +126,10 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
* anything not backed by physical memory we consider to be always
* coherent and not need clflushing.
*/
if (!i915_gem_object_has_struct_page(obj))
if (!i915_gem_object_has_struct_page(obj)) {
obj->cache_dirty = false;
return;
obj->cache_dirty = true;
}
/* If the GPU is snooping the contents of the CPU cache,
* we do not need to manually clear the CPU cache lines. However,
@ -144,7 +139,7 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
* snooping behaviour occurs naturally as the result of our domain
* tracking.
*/
if (!(flags & I915_CLFLUSH_FORCE) && i915_gem_object_is_coherent(obj))
if (!(flags & I915_CLFLUSH_FORCE) && obj->cache_coherent)
return;
trace_i915_gem_object_clflush(obj);
@ -153,6 +148,8 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
if (!(flags & I915_CLFLUSH_SYNC))
clflush = kmalloc(sizeof(*clflush), GFP_KERNEL);
if (clflush) {
GEM_BUG_ON(!obj->cache_dirty);
dma_fence_init(&clflush->dma,
&i915_clflush_ops,
&clflush_lock,
@ -180,4 +177,6 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
} else {
GEM_BUG_ON(obj->base.write_domain != I915_GEM_DOMAIN_CPU);
}
obj->cache_dirty = false;
}

Просмотреть файл

@ -85,6 +85,7 @@
*
*/
#include <linux/log2.h>
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
@ -92,6 +93,71 @@
#define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
/* Initial size (as log2) to preallocate the handle->object hashtable */
#define VMA_HT_BITS 2u /* 4 x 2 pointers, 64 bytes minimum */
static void resize_vma_ht(struct work_struct *work)
{
struct i915_gem_context_vma_lut *lut =
container_of(work, typeof(*lut), resize);
unsigned int bits, new_bits, size, i;
struct hlist_head *new_ht;
GEM_BUG_ON(!(lut->ht_size & I915_CTX_RESIZE_IN_PROGRESS));
bits = 1 + ilog2(4*lut->ht_count/3 + 1);
new_bits = min_t(unsigned int,
max(bits, VMA_HT_BITS),
sizeof(unsigned int) * BITS_PER_BYTE - 1);
if (new_bits == lut->ht_bits)
goto out;
new_ht = kzalloc(sizeof(*new_ht)<<new_bits, GFP_KERNEL | __GFP_NOWARN);
if (!new_ht)
new_ht = vzalloc(sizeof(*new_ht)<<new_bits);
if (!new_ht)
/* Pretend resize succeeded and stop calling us for a bit! */
goto out;
size = BIT(lut->ht_bits);
for (i = 0; i < size; i++) {
struct i915_vma *vma;
struct hlist_node *tmp;
hlist_for_each_entry_safe(vma, tmp, &lut->ht[i], ctx_node)
hlist_add_head(&vma->ctx_node,
&new_ht[hash_32(vma->ctx_handle,
new_bits)]);
}
kvfree(lut->ht);
lut->ht = new_ht;
lut->ht_bits = new_bits;
out:
smp_store_release(&lut->ht_size, BIT(bits));
GEM_BUG_ON(lut->ht_size & I915_CTX_RESIZE_IN_PROGRESS);
}
static void vma_lut_free(struct i915_gem_context *ctx)
{
struct i915_gem_context_vma_lut *lut = &ctx->vma_lut;
unsigned int i, size;
if (lut->ht_size & I915_CTX_RESIZE_IN_PROGRESS)
cancel_work_sync(&lut->resize);
size = BIT(lut->ht_bits);
for (i = 0; i < size; i++) {
struct i915_vma *vma;
hlist_for_each_entry(vma, &lut->ht[i], ctx_node) {
vma->obj->vma_hashed = NULL;
vma->ctx = NULL;
i915_vma_put(vma);
}
}
kvfree(lut->ht);
}
void i915_gem_context_free(struct kref *ctx_ref)
{
struct i915_gem_context *ctx = container_of(ctx_ref, typeof(*ctx), ref);
@ -101,6 +167,7 @@ void i915_gem_context_free(struct kref *ctx_ref)
trace_i915_context_free(ctx);
GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
vma_lut_free(ctx);
i915_ppgtt_put(ctx->ppgtt);
for (i = 0; i < I915_NUM_ENGINES; i++) {
@ -118,6 +185,7 @@ void i915_gem_context_free(struct kref *ctx_ref)
kfree(ctx->name);
put_pid(ctx->pid);
list_del(&ctx->link);
ida_simple_remove(&ctx->i915->context_hw_ida, ctx->hw_id);
@ -201,13 +269,24 @@ __create_hw_context(struct drm_i915_private *dev_priv,
ctx->i915 = dev_priv;
ctx->priority = I915_PRIORITY_NORMAL;
ctx->vma_lut.ht_bits = VMA_HT_BITS;
ctx->vma_lut.ht_size = BIT(VMA_HT_BITS);
BUILD_BUG_ON(BIT(VMA_HT_BITS) == I915_CTX_RESIZE_IN_PROGRESS);
ctx->vma_lut.ht = kcalloc(ctx->vma_lut.ht_size,
sizeof(*ctx->vma_lut.ht),
GFP_KERNEL);
if (!ctx->vma_lut.ht)
goto err_out;
INIT_WORK(&ctx->vma_lut.resize, resize_vma_ht);
/* Default context will never have a file_priv */
ret = DEFAULT_CONTEXT_HANDLE;
if (file_priv) {
ret = idr_alloc(&file_priv->context_idr, ctx,
DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL);
if (ret < 0)
goto err_out;
goto err_lut;
}
ctx->user_handle = ret;
@ -248,6 +327,8 @@ __create_hw_context(struct drm_i915_private *dev_priv,
err_pid:
put_pid(ctx->pid);
idr_remove(&file_priv->context_idr, ctx->user_handle);
err_lut:
kvfree(ctx->vma_lut.ht);
err_out:
context_close(ctx);
return ERR_PTR(ret);
@ -1034,9 +1115,6 @@ int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
if (args->flags || args->pad)
return -EINVAL;
if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN))
return -EPERM;
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;

Просмотреть файл

@ -143,6 +143,32 @@ struct i915_gem_context {
/** ggtt_offset_bias: placement restriction for context objects */
u32 ggtt_offset_bias;
struct i915_gem_context_vma_lut {
/** ht_size: last request size to allocate the hashtable for. */
unsigned int ht_size;
#define I915_CTX_RESIZE_IN_PROGRESS BIT(0)
/** ht_bits: real log2(size) of hashtable. */
unsigned int ht_bits;
/** ht_count: current number of entries inside the hashtable */
unsigned int ht_count;
/** ht: the array of buckets comprising the simple hashtable */
struct hlist_head *ht;
/**
* resize: After an execbuf completes, we check the load factor
* of the hashtable. If the hashtable is too full, or too empty,
* we schedule a task to resize the hashtable. During the
* resize, the entries are moved between different buckets and
* so we cannot simultaneously read the hashtable as it is
* being resized (unlike rhashtable). Therefore we treat the
* active work as a strong barrier, pausing a subsequent
* execbuf to wait for the resize worker to complete, if
* required.
*/
struct work_struct resize;
} vma_lut;
/** engine: per-engine logical HW state */
struct intel_context {
struct i915_vma *state;

Просмотреть файл

@ -50,6 +50,29 @@ static bool ggtt_is_idle(struct drm_i915_private *dev_priv)
return true;
}
static int ggtt_flush(struct drm_i915_private *i915)
{
int err;
/* Not everything in the GGTT is tracked via vma (otherwise we
* could evict as required with minimal stalling) so we are forced
* to idle the GPU and explicitly retire outstanding requests in
* the hopes that we can then remove contexts and the like only
* bound by their active reference.
*/
err = i915_gem_switch_to_kernel_context(i915);
if (err)
return err;
err = i915_gem_wait_for_idle(i915,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED);
if (err)
return err;
return 0;
}
static bool
mark_free(struct drm_mm_scan *scan,
struct i915_vma *vma,
@ -59,13 +82,10 @@ mark_free(struct drm_mm_scan *scan,
if (i915_vma_is_pinned(vma))
return false;
if (WARN_ON(!list_empty(&vma->exec_list)))
return false;
if (flags & PIN_NONFAULT && !list_empty(&vma->obj->userfault_link))
return false;
list_add(&vma->exec_list, unwind);
list_add(&vma->evict_link, unwind);
return drm_mm_scan_add_block(scan, &vma->node);
}
@ -157,11 +177,9 @@ search_again:
} while (*++phase);
/* Nothing found, clean up and bail out! */
list_for_each_entry_safe(vma, next, &eviction_list, exec_list) {
list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
ret = drm_mm_scan_remove_block(&scan, &vma->node);
BUG_ON(ret);
INIT_LIST_HEAD(&vma->exec_list);
}
/* Can we unpin some objects such as idle hw contents,
@ -180,19 +198,7 @@ search_again:
return intel_has_pending_fb_unpin(dev_priv) ? -EAGAIN : -ENOSPC;
}
/* Not everything in the GGTT is tracked via vma (otherwise we
* could evict as required with minimal stalling) so we are forced
* to idle the GPU and explicitly retire outstanding requests in
* the hopes that we can then remove contexts and the like only
* bound by their active reference.
*/
ret = i915_gem_switch_to_kernel_context(dev_priv);
if (ret)
return ret;
ret = i915_gem_wait_for_idle(dev_priv,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED);
ret = ggtt_flush(dev_priv);
if (ret)
return ret;
@ -205,21 +211,16 @@ found:
* calling unbind (which may remove the active reference
* of any of our objects, thus corrupting the list).
*/
list_for_each_entry_safe(vma, next, &eviction_list, exec_list) {
list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
if (drm_mm_scan_remove_block(&scan, &vma->node))
__i915_vma_pin(vma);
else
list_del_init(&vma->exec_list);
list_del(&vma->evict_link);
}
/* Unbinding will emit any required flushes */
ret = 0;
while (!list_empty(&eviction_list)) {
vma = list_first_entry(&eviction_list,
struct i915_vma,
exec_list);
list_del_init(&vma->exec_list);
list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
__i915_vma_unpin(vma);
if (ret == 0)
ret = i915_vma_unbind(vma);
@ -315,7 +316,7 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
}
/* Overlap of objects in the same batch? */
if (i915_vma_is_pinned(vma) || !list_empty(&vma->exec_list)) {
if (i915_vma_is_pinned(vma)) {
ret = -ENOSPC;
if (vma->exec_entry &&
vma->exec_entry->flags & EXEC_OBJECT_PINNED)
@ -332,11 +333,10 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
* reference) another in our eviction list.
*/
__i915_vma_pin(vma);
list_add(&vma->exec_list, &eviction_list);
list_add(&vma->evict_link, &eviction_list);
}
list_for_each_entry_safe(vma, next, &eviction_list, exec_list) {
list_del_init(&vma->exec_list);
list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
__i915_vma_unpin(vma);
if (ret == 0)
ret = i915_vma_unbind(vma);
@ -348,10 +348,8 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
/**
* i915_gem_evict_vm - Evict all idle vmas from a vm
* @vm: Address space to cleanse
* @do_idle: Boolean directing whether to idle first.
*
* This function evicts all idles vmas from a vm. If all unpinned vmas should be
* evicted the @do_idle needs to be set to true.
* This function evicts all vmas from a vm.
*
* This is used by the execbuf code as a last-ditch effort to defragment the
* address space.
@ -359,37 +357,50 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
* To clarify: This is for freeing up virtual address space, not for freeing
* memory in e.g. the shrinker.
*/
int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
int i915_gem_evict_vm(struct i915_address_space *vm)
{
struct list_head *phases[] = {
&vm->inactive_list,
&vm->active_list,
NULL
}, **phase;
struct list_head eviction_list;
struct i915_vma *vma, *next;
int ret;
lockdep_assert_held(&vm->i915->drm.struct_mutex);
trace_i915_gem_evict_vm(vm);
if (do_idle) {
struct drm_i915_private *dev_priv = vm->i915;
if (i915_is_ggtt(vm)) {
ret = i915_gem_switch_to_kernel_context(dev_priv);
if (ret)
return ret;
}
ret = i915_gem_wait_for_idle(dev_priv,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED);
/* Switch back to the default context in order to unpin
* the existing context objects. However, such objects only
* pin themselves inside the global GTT and performing the
* switch otherwise is ineffective.
*/
if (i915_is_ggtt(vm)) {
ret = ggtt_flush(vm->i915);
if (ret)
return ret;
WARN_ON(!list_empty(&vm->active_list));
}
list_for_each_entry_safe(vma, next, &vm->inactive_list, vm_link)
if (!i915_vma_is_pinned(vma))
WARN_ON(i915_vma_unbind(vma));
INIT_LIST_HEAD(&eviction_list);
phase = phases;
do {
list_for_each_entry(vma, *phase, vm_link) {
if (i915_vma_is_pinned(vma))
continue;
return 0;
__i915_vma_pin(vma);
list_add(&vma->evict_link, &eviction_list);
}
} while (*++phase);
ret = 0;
list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
__i915_vma_unpin(vma);
if (ret == 0)
ret = i915_vma_unbind(vma);
}
return ret;
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1884,7 +1884,7 @@ static void gtt_write_workarounds(struct drm_i915_private *dev_priv)
* called on driver load and after a GPU reset, so you can place
* workarounds here even if they get overwritten by GPU reset.
*/
/* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk */
/* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl */
if (IS_BROADWELL(dev_priv))
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
else if (IS_CHERRYVIEW(dev_priv))
@ -3095,13 +3095,17 @@ int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv)
void i915_ggtt_enable_guc(struct drm_i915_private *i915)
{
GEM_BUG_ON(i915->ggtt.invalidate != gen6_ggtt_invalidate);
i915->ggtt.invalidate = guc_ggtt_invalidate;
}
void i915_ggtt_disable_guc(struct drm_i915_private *i915)
{
if (i915->ggtt.invalidate == guc_ggtt_invalidate)
i915->ggtt.invalidate = gen6_ggtt_invalidate;
/* We should only be called after i915_ggtt_enable_guc() */
GEM_BUG_ON(i915->ggtt.invalidate != guc_ggtt_invalidate);
i915->ggtt.invalidate = gen6_ggtt_invalidate;
}
void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
@ -3398,6 +3402,9 @@ int i915_gem_gtt_reserve(struct i915_address_space *vm,
if (err != -ENOSPC)
return err;
if (flags & PIN_NOEVICT)
return -ENOSPC;
err = i915_gem_evict_for_node(vm, node, flags);
if (err == 0)
err = drm_mm_reserve_node(&vm->mm, node);
@ -3512,6 +3519,9 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
if (err != -ENOSPC)
return err;
if (flags & PIN_NOEVICT)
return -ENOSPC;
/* No free space, pick a slot at random.
*
* There is a pathological case here using a GTT shared between

Просмотреть файл

@ -255,6 +255,7 @@ struct i915_address_space {
struct drm_i915_file_private *file;
struct list_head global_link;
u64 total; /* size addr space maps (ex. 2GB for ggtt) */
u64 reserved; /* size addr space reserved */
bool closed;
@ -588,6 +589,7 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
#define PIN_MAPPABLE BIT(1)
#define PIN_ZONE_4G BIT(2)
#define PIN_NONFAULT BIT(3)
#define PIN_NOEVICT BIT(4)
#define PIN_MBZ BIT(5) /* I915_VMA_PIN_OVERFLOW */
#define PIN_GLOBAL BIT(6) /* I915_VMA_GLOBAL_BIND */

Просмотреть файл

@ -188,9 +188,11 @@ i915_gem_object_create_internal(struct drm_i915_private *i915,
drm_gem_private_object_init(&i915->drm, &obj->base, size);
i915_gem_object_init(obj, &i915_gem_object_internal_ops);
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
obj->cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
obj->cache_coherent = i915_gem_object_is_coherent(obj);
obj->cache_dirty = !obj->cache_coherent;
return obj;
}

Просмотреть файл

@ -68,9 +68,25 @@ struct drm_i915_gem_object {
const struct drm_i915_gem_object_ops *ops;
/** List of VMAs backed by this object */
/**
* @vma_list: List of VMAs backed by this object
*
* The VMA on this list are ordered by type, all GGTT vma are placed
* at the head and all ppGTT vma are placed at the tail. The different
* types of GGTT vma are unordered between themselves, use the
* @vma_tree (which has a defined order between all VMA) to find an
* exact match.
*/
struct list_head vma_list;
/**
* @vma_tree: Ordered tree of VMAs backed by this object
*
* All VMA created for this object are placed in the @vma_tree for
* fast retrieval via a binary search in i915_vma_instance().
* They are also added to @vma_list for easy iteration.
*/
struct rb_root vma_tree;
struct i915_vma *vma_hashed;
/** Stolen memory for this object, instead of being backed by shmem. */
struct drm_mm_node *stolen;
@ -85,9 +101,6 @@ struct drm_i915_gem_object {
*/
struct list_head userfault_link;
/** Used in execbuf to temporarily hold a ref */
struct list_head obj_exec_link;
struct list_head batch_pool_link;
I915_SELFTEST_DECLARE(struct list_head st_link);
@ -106,6 +119,7 @@ struct drm_i915_gem_object {
unsigned long gt_ro:1;
unsigned int cache_level:3;
unsigned int cache_dirty:1;
unsigned int cache_coherent:1;
atomic_t frontbuffer_bits;
unsigned int frontbuffer_ggtt_origin; /* write once */

Просмотреть файл

@ -62,7 +62,7 @@ static bool i915_fence_enable_signaling(struct dma_fence *fence)
return false;
intel_engine_enable_signaling(to_request(fence), true);
return true;
return !i915_fence_signaled(fence);
}
static signed long i915_fence_wait(struct dma_fence *fence,
@ -683,7 +683,6 @@ static int
i915_gem_request_await_request(struct drm_i915_gem_request *to,
struct drm_i915_gem_request *from)
{
u32 seqno;
int ret;
GEM_BUG_ON(to == from);
@ -707,19 +706,15 @@ i915_gem_request_await_request(struct drm_i915_gem_request *to,
return ret < 0 ? ret : 0;
}
seqno = i915_gem_request_global_seqno(from);
if (!seqno)
goto await_dma_fence;
if (to->engine->semaphore.sync_to) {
u32 seqno;
if (!to->engine->semaphore.sync_to) {
if (!__i915_gem_request_started(from, seqno))
goto await_dma_fence;
if (!__i915_spin_request(from, seqno, TASK_INTERRUPTIBLE, 2))
goto await_dma_fence;
} else {
GEM_BUG_ON(!from->engine->semaphore.signal);
seqno = i915_gem_request_global_seqno(from);
if (!seqno)
goto await_dma_fence;
if (seqno <= to->timeline->global_sync[from->engine->id])
return 0;
@ -729,10 +724,9 @@ i915_gem_request_await_request(struct drm_i915_gem_request *to,
return ret;
to->timeline->global_sync[from->engine->id] = seqno;
return 0;
}
return 0;
await_dma_fence:
ret = i915_sw_fence_await_dma_fence(&to->submit,
&from->fence, 0,

Просмотреть файл

@ -38,16 +38,21 @@
static bool shrinker_lock(struct drm_i915_private *dev_priv, bool *unlock)
{
switch (mutex_trylock_recursive(&dev_priv->drm.struct_mutex)) {
case MUTEX_TRYLOCK_FAILED:
return false;
case MUTEX_TRYLOCK_SUCCESS:
*unlock = true;
return true;
case MUTEX_TRYLOCK_RECURSIVE:
*unlock = false;
return true;
case MUTEX_TRYLOCK_FAILED:
do {
cpu_relax();
if (mutex_trylock(&dev_priv->drm.struct_mutex)) {
case MUTEX_TRYLOCK_SUCCESS:
*unlock = true;
return true;
}
} while (!need_resched());
return false;
}
BUG();
@ -332,6 +337,15 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
sc->nr_to_scan - freed,
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND);
if (freed < sc->nr_to_scan && current_is_kswapd()) {
intel_runtime_pm_get(dev_priv);
freed += i915_gem_shrink(dev_priv,
sc->nr_to_scan - freed,
I915_SHRINK_ACTIVE |
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND);
intel_runtime_pm_put(dev_priv);
}
shrinker_unlock(dev_priv, unlock);

Просмотреть файл

@ -590,6 +590,7 @@ _i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
obj->stolen = stolen;
obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
obj->cache_level = HAS_LLC(dev_priv) ? I915_CACHE_LLC : I915_CACHE_NONE;
obj->cache_coherent = true; /* assumptions! more like cache_oblivious */
if (i915_gem_object_pin_pages(obj))
goto cleanup;

Просмотреть файл

@ -378,7 +378,7 @@ __i915_mm_struct_free(struct kref *kref)
mutex_unlock(&mm->i915->mm_lock);
INIT_WORK(&mm->work, __i915_mm_struct_free__worker);
schedule_work(&mm->work);
queue_work(mm->i915->mm.userptr_wq, &mm->work);
}
static void
@ -598,7 +598,7 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj)
get_task_struct(work->task);
INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker);
schedule_work(&work->work);
queue_work(to_i915(obj->base.dev)->mm.userptr_wq, &work->work);
return ERR_PTR(-EAGAIN);
}
@ -802,9 +802,11 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
drm_gem_private_object_init(dev, &obj->base, args->user_size);
i915_gem_object_init(obj, &i915_gem_userptr_ops);
obj->cache_level = I915_CACHE_LLC;
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
obj->cache_level = I915_CACHE_LLC;
obj->cache_coherent = i915_gem_object_is_coherent(obj);
obj->cache_dirty = !obj->cache_coherent;
obj->userptr.ptr = args->user_ptr;
obj->userptr.read_only = !!(args->flags & I915_USERPTR_READ_ONLY);
@ -828,8 +830,20 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
return 0;
}
void i915_gem_init_userptr(struct drm_i915_private *dev_priv)
int i915_gem_init_userptr(struct drm_i915_private *dev_priv)
{
mutex_init(&dev_priv->mm_lock);
hash_init(dev_priv->mm_structs);
dev_priv->mm.userptr_wq =
alloc_workqueue("i915-userptr-acquire", WQ_HIGHPRI, 0);
if (!dev_priv->mm.userptr_wq)
return -ENOMEM;
return 0;
}
void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv)
{
destroy_workqueue(dev_priv->mm.userptr_wq);
}

Просмотреть файл

@ -105,7 +105,7 @@ static int __reserve_doorbell(struct i915_guc_client *client)
end += offset;
}
id = find_next_zero_bit(client->guc->doorbell_bitmap, offset, end);
id = find_next_zero_bit(client->guc->doorbell_bitmap, end, offset);
if (id == end)
return -ENOSPC;

Просмотреть файл

@ -2548,7 +2548,8 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
I915_WRITE(SDEIIR, iir);
ret = IRQ_HANDLED;
if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv))
if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv) ||
HAS_PCH_CNP(dev_priv))
spt_irq_handler(dev_priv, iir);
else
cpt_irq_handler(dev_priv, iir);
@ -4289,7 +4290,8 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
dev->driver->disable_vblank = gen8_disable_vblank;
if (IS_GEN9_LP(dev_priv))
dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv))
else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv) ||
HAS_PCH_CNP(dev_priv))
dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
else
dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -0,0 +1,40 @@
/*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
*
*
* Copyright (c) 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#ifndef __I915_OA_BDW_H__
#define __I915_OA_BDW_H__
extern int i915_oa_n_builtin_metric_sets_bdw;
extern int i915_oa_select_metric_set_bdw(struct drm_i915_private *dev_priv);
extern int i915_perf_register_sysfs_bdw(struct drm_i915_private *dev_priv);
extern void i915_perf_unregister_sysfs_bdw(struct drm_i915_private *dev_priv);
#endif

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -0,0 +1,40 @@
/*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
*
*
* Copyright (c) 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#ifndef __I915_OA_BXT_H__
#define __I915_OA_BXT_H__
extern int i915_oa_n_builtin_metric_sets_bxt;
extern int i915_oa_select_metric_set_bxt(struct drm_i915_private *dev_priv);
extern int i915_perf_register_sysfs_bxt(struct drm_i915_private *dev_priv);
extern void i915_perf_unregister_sysfs_bxt(struct drm_i915_private *dev_priv);
#endif

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -0,0 +1,40 @@
/*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
*
*
* Copyright (c) 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#ifndef __I915_OA_CHV_H__
#define __I915_OA_CHV_H__
extern int i915_oa_n_builtin_metric_sets_chv;
extern int i915_oa_select_metric_set_chv(struct drm_i915_private *dev_priv);
extern int i915_perf_register_sysfs_chv(struct drm_i915_private *dev_priv);
extern void i915_perf_unregister_sysfs_chv(struct drm_i915_private *dev_priv);
#endif

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -0,0 +1,40 @@
/*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
*
*
* Copyright (c) 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#ifndef __I915_OA_GLK_H__
#define __I915_OA_GLK_H__
extern int i915_oa_n_builtin_metric_sets_glk;
extern int i915_oa_select_metric_set_glk(struct drm_i915_private *dev_priv);
extern int i915_perf_register_sysfs_glk(struct drm_i915_private *dev_priv);
extern void i915_perf_unregister_sysfs_glk(struct drm_i915_private *dev_priv);
#endif

Просмотреть файл

@ -1,5 +1,7 @@
/*
* Autogenerated file, DO NOT EDIT manually!
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
*
*
* Copyright (c) 2015 Intel Corporation
*
@ -47,6 +49,9 @@ static const struct i915_oa_reg b_counter_config_render_basic[] = {
{ _MMIO(0x2710), 0x00000000 },
};
static const struct i915_oa_reg flex_eu_config_render_basic[] = {
};
static const struct i915_oa_reg mux_config_render_basic[] = {
{ _MMIO(0x253a4), 0x01600000 },
{ _MMIO(0x25440), 0x00100000 },
@ -109,12 +114,21 @@ static const struct i915_oa_reg mux_config_render_basic[] = {
{ _MMIO(0x25428), 0x00042049 },
};
static const struct i915_oa_reg *
static int
get_render_basic_mux_config(struct drm_i915_private *dev_priv,
int *len)
const struct i915_oa_reg **regs,
int *lens)
{
*len = ARRAY_SIZE(mux_config_render_basic);
return mux_config_render_basic;
int n = 0;
BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
regs[n] = mux_config_render_basic;
lens[n] = ARRAY_SIZE(mux_config_render_basic);
n++;
return n;
}
static const struct i915_oa_reg b_counter_config_compute_basic[] = {
@ -137,6 +151,9 @@ static const struct i915_oa_reg b_counter_config_compute_basic[] = {
{ _MMIO(0x236c), 0x00000000 },
};
static const struct i915_oa_reg flex_eu_config_compute_basic[] = {
};
static const struct i915_oa_reg mux_config_compute_basic[] = {
{ _MMIO(0x253a4), 0x00000000 },
{ _MMIO(0x2681c), 0x01f00800 },
@ -172,12 +189,21 @@ static const struct i915_oa_reg mux_config_compute_basic[] = {
{ _MMIO(0x25428), 0x00000c03 },
};
static const struct i915_oa_reg *
static int
get_compute_basic_mux_config(struct drm_i915_private *dev_priv,
int *len)
const struct i915_oa_reg **regs,
int *lens)
{
*len = ARRAY_SIZE(mux_config_compute_basic);
return mux_config_compute_basic;
int n = 0;
BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
regs[n] = mux_config_compute_basic;
lens[n] = ARRAY_SIZE(mux_config_compute_basic);
n++;
return n;
}
static const struct i915_oa_reg b_counter_config_compute_extended[] = {
@ -203,6 +229,9 @@ static const struct i915_oa_reg b_counter_config_compute_extended[] = {
{ _MMIO(0x27ac), 0x0000fffe },
};
static const struct i915_oa_reg flex_eu_config_compute_extended[] = {
};
static const struct i915_oa_reg mux_config_compute_extended[] = {
{ _MMIO(0x2681c), 0x3eb00800 },
{ _MMIO(0x26820), 0x00900000 },
@ -221,12 +250,21 @@ static const struct i915_oa_reg mux_config_compute_extended[] = {
{ _MMIO(0x25428), 0x00000000 },
};
static const struct i915_oa_reg *
static int
get_compute_extended_mux_config(struct drm_i915_private *dev_priv,
int *len)
const struct i915_oa_reg **regs,
int *lens)
{
*len = ARRAY_SIZE(mux_config_compute_extended);
return mux_config_compute_extended;
int n = 0;
BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
regs[n] = mux_config_compute_extended;
lens[n] = ARRAY_SIZE(mux_config_compute_extended);
n++;
return n;
}
static const struct i915_oa_reg b_counter_config_memory_reads[] = {
@ -260,6 +298,9 @@ static const struct i915_oa_reg b_counter_config_memory_reads[] = {
{ _MMIO(0x27ac), 0x0000fc00 },
};
static const struct i915_oa_reg flex_eu_config_memory_reads[] = {
};
static const struct i915_oa_reg mux_config_memory_reads[] = {
{ _MMIO(0x253a4), 0x34300000 },
{ _MMIO(0x25440), 0x2d800000 },
@ -281,12 +322,21 @@ static const struct i915_oa_reg mux_config_memory_reads[] = {
{ _MMIO(0x25428), 0x00000000 },
};
static const struct i915_oa_reg *
static int
get_memory_reads_mux_config(struct drm_i915_private *dev_priv,
int *len)
const struct i915_oa_reg **regs,
int *lens)
{
*len = ARRAY_SIZE(mux_config_memory_reads);
return mux_config_memory_reads;
int n = 0;
BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
regs[n] = mux_config_memory_reads;
lens[n] = ARRAY_SIZE(mux_config_memory_reads);
n++;
return n;
}
static const struct i915_oa_reg b_counter_config_memory_writes[] = {
@ -320,6 +370,9 @@ static const struct i915_oa_reg b_counter_config_memory_writes[] = {
{ _MMIO(0x27ac), 0x0000fc00 },
};
static const struct i915_oa_reg flex_eu_config_memory_writes[] = {
};
static const struct i915_oa_reg mux_config_memory_writes[] = {
{ _MMIO(0x253a4), 0x34300000 },
{ _MMIO(0x25440), 0x01500000 },
@ -341,12 +394,21 @@ static const struct i915_oa_reg mux_config_memory_writes[] = {
{ _MMIO(0x25428), 0x00000000 },
};
static const struct i915_oa_reg *
static int
get_memory_writes_mux_config(struct drm_i915_private *dev_priv,
int *len)
const struct i915_oa_reg **regs,
int *lens)
{
*len = ARRAY_SIZE(mux_config_memory_writes);
return mux_config_memory_writes;
int n = 0;
BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
regs[n] = mux_config_memory_writes;
lens[n] = ARRAY_SIZE(mux_config_memory_writes);
n++;
return n;
}
static const struct i915_oa_reg b_counter_config_sampler_balance[] = {
@ -358,6 +420,9 @@ static const struct i915_oa_reg b_counter_config_sampler_balance[] = {
{ _MMIO(0x2724), 0x00800000 },
};
static const struct i915_oa_reg flex_eu_config_sampler_balance[] = {
};
static const struct i915_oa_reg mux_config_sampler_balance[] = {
{ _MMIO(0x2eb9c), 0x01906400 },
{ _MMIO(0x2fb9c), 0x01906400 },
@ -401,31 +466,40 @@ static const struct i915_oa_reg mux_config_sampler_balance[] = {
{ _MMIO(0x25428), 0x0004a54a },
};
static const struct i915_oa_reg *
static int
get_sampler_balance_mux_config(struct drm_i915_private *dev_priv,
int *len)
const struct i915_oa_reg **regs,
int *lens)
{
*len = ARRAY_SIZE(mux_config_sampler_balance);
return mux_config_sampler_balance;
int n = 0;
BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
regs[n] = mux_config_sampler_balance;
lens[n] = ARRAY_SIZE(mux_config_sampler_balance);
n++;
return n;
}
int i915_oa_select_metric_set_hsw(struct drm_i915_private *dev_priv)
{
dev_priv->perf.oa.mux_regs = NULL;
dev_priv->perf.oa.mux_regs_len = 0;
dev_priv->perf.oa.n_mux_configs = 0;
dev_priv->perf.oa.b_counter_regs = NULL;
dev_priv->perf.oa.b_counter_regs_len = 0;
switch (dev_priv->perf.oa.metrics_set) {
case METRIC_SET_ID_RENDER_BASIC:
dev_priv->perf.oa.mux_regs =
dev_priv->perf.oa.n_mux_configs =
get_render_basic_mux_config(dev_priv,
&dev_priv->perf.oa.mux_regs_len);
if (!dev_priv->perf.oa.mux_regs) {
DRM_DEBUG_DRIVER("No suitable MUX config for \"RENDER_BASIC\" metric set");
dev_priv->perf.oa.mux_regs,
dev_priv->perf.oa.mux_regs_lens);
if (dev_priv->perf.oa.n_mux_configs == 0) {
DRM_DEBUG_DRIVER("No suitable MUX config for \"RENDER_BASIC\" metric set\n");
/* EINVAL because *_register_sysfs already checked this
* and so it wouldn't have been advertised so userspace and
* and so it wouldn't have been advertised to userspace and
* so shouldn't have been requested
*/
return -EINVAL;
@ -436,16 +510,22 @@ int i915_oa_select_metric_set_hsw(struct drm_i915_private *dev_priv)
dev_priv->perf.oa.b_counter_regs_len =
ARRAY_SIZE(b_counter_config_render_basic);
dev_priv->perf.oa.flex_regs =
flex_eu_config_render_basic;
dev_priv->perf.oa.flex_regs_len =
ARRAY_SIZE(flex_eu_config_render_basic);
return 0;
case METRIC_SET_ID_COMPUTE_BASIC:
dev_priv->perf.oa.mux_regs =
dev_priv->perf.oa.n_mux_configs =
get_compute_basic_mux_config(dev_priv,
&dev_priv->perf.oa.mux_regs_len);
if (!dev_priv->perf.oa.mux_regs) {
DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_BASIC\" metric set");
dev_priv->perf.oa.mux_regs,
dev_priv->perf.oa.mux_regs_lens);
if (dev_priv->perf.oa.n_mux_configs == 0) {
DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_BASIC\" metric set\n");
/* EINVAL because *_register_sysfs already checked this
* and so it wouldn't have been advertised so userspace and
* and so it wouldn't have been advertised to userspace and
* so shouldn't have been requested
*/
return -EINVAL;
@ -456,16 +536,22 @@ int i915_oa_select_metric_set_hsw(struct drm_i915_private *dev_priv)
dev_priv->perf.oa.b_counter_regs_len =
ARRAY_SIZE(b_counter_config_compute_basic);
dev_priv->perf.oa.flex_regs =
flex_eu_config_compute_basic;
dev_priv->perf.oa.flex_regs_len =
ARRAY_SIZE(flex_eu_config_compute_basic);
return 0;
case METRIC_SET_ID_COMPUTE_EXTENDED:
dev_priv->perf.oa.mux_regs =
dev_priv->perf.oa.n_mux_configs =
get_compute_extended_mux_config(dev_priv,
&dev_priv->perf.oa.mux_regs_len);
if (!dev_priv->perf.oa.mux_regs) {
DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_EXTENDED\" metric set");
dev_priv->perf.oa.mux_regs,
dev_priv->perf.oa.mux_regs_lens);
if (dev_priv->perf.oa.n_mux_configs == 0) {
DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_EXTENDED\" metric set\n");
/* EINVAL because *_register_sysfs already checked this
* and so it wouldn't have been advertised so userspace and
* and so it wouldn't have been advertised to userspace and
* so shouldn't have been requested
*/
return -EINVAL;
@ -476,16 +562,22 @@ int i915_oa_select_metric_set_hsw(struct drm_i915_private *dev_priv)
dev_priv->perf.oa.b_counter_regs_len =
ARRAY_SIZE(b_counter_config_compute_extended);
dev_priv->perf.oa.flex_regs =
flex_eu_config_compute_extended;
dev_priv->perf.oa.flex_regs_len =
ARRAY_SIZE(flex_eu_config_compute_extended);
return 0;
case METRIC_SET_ID_MEMORY_READS:
dev_priv->perf.oa.mux_regs =
dev_priv->perf.oa.n_mux_configs =
get_memory_reads_mux_config(dev_priv,
&dev_priv->perf.oa.mux_regs_len);
if (!dev_priv->perf.oa.mux_regs) {
DRM_DEBUG_DRIVER("No suitable MUX config for \"MEMORY_READS\" metric set");
dev_priv->perf.oa.mux_regs,
dev_priv->perf.oa.mux_regs_lens);
if (dev_priv->perf.oa.n_mux_configs == 0) {
DRM_DEBUG_DRIVER("No suitable MUX config for \"MEMORY_READS\" metric set\n");
/* EINVAL because *_register_sysfs already checked this
* and so it wouldn't have been advertised so userspace and
* and so it wouldn't have been advertised to userspace and
* so shouldn't have been requested
*/
return -EINVAL;
@ -496,16 +588,22 @@ int i915_oa_select_metric_set_hsw(struct drm_i915_private *dev_priv)
dev_priv->perf.oa.b_counter_regs_len =
ARRAY_SIZE(b_counter_config_memory_reads);
dev_priv->perf.oa.flex_regs =
flex_eu_config_memory_reads;
dev_priv->perf.oa.flex_regs_len =
ARRAY_SIZE(flex_eu_config_memory_reads);
return 0;
case METRIC_SET_ID_MEMORY_WRITES:
dev_priv->perf.oa.mux_regs =
dev_priv->perf.oa.n_mux_configs =
get_memory_writes_mux_config(dev_priv,
&dev_priv->perf.oa.mux_regs_len);
if (!dev_priv->perf.oa.mux_regs) {
DRM_DEBUG_DRIVER("No suitable MUX config for \"MEMORY_WRITES\" metric set");
dev_priv->perf.oa.mux_regs,
dev_priv->perf.oa.mux_regs_lens);
if (dev_priv->perf.oa.n_mux_configs == 0) {
DRM_DEBUG_DRIVER("No suitable MUX config for \"MEMORY_WRITES\" metric set\n");
/* EINVAL because *_register_sysfs already checked this
* and so it wouldn't have been advertised so userspace and
* and so it wouldn't have been advertised to userspace and
* so shouldn't have been requested
*/
return -EINVAL;
@ -516,16 +614,22 @@ int i915_oa_select_metric_set_hsw(struct drm_i915_private *dev_priv)
dev_priv->perf.oa.b_counter_regs_len =
ARRAY_SIZE(b_counter_config_memory_writes);
dev_priv->perf.oa.flex_regs =
flex_eu_config_memory_writes;
dev_priv->perf.oa.flex_regs_len =
ARRAY_SIZE(flex_eu_config_memory_writes);
return 0;
case METRIC_SET_ID_SAMPLER_BALANCE:
dev_priv->perf.oa.mux_regs =
dev_priv->perf.oa.n_mux_configs =
get_sampler_balance_mux_config(dev_priv,
&dev_priv->perf.oa.mux_regs_len);
if (!dev_priv->perf.oa.mux_regs) {
DRM_DEBUG_DRIVER("No suitable MUX config for \"SAMPLER_BALANCE\" metric set");
dev_priv->perf.oa.mux_regs,
dev_priv->perf.oa.mux_regs_lens);
if (dev_priv->perf.oa.n_mux_configs == 0) {
DRM_DEBUG_DRIVER("No suitable MUX config for \"SAMPLER_BALANCE\" metric set\n");
/* EINVAL because *_register_sysfs already checked this
* and so it wouldn't have been advertised so userspace and
* and so it wouldn't have been advertised to userspace and
* so shouldn't have been requested
*/
return -EINVAL;
@ -536,6 +640,11 @@ int i915_oa_select_metric_set_hsw(struct drm_i915_private *dev_priv)
dev_priv->perf.oa.b_counter_regs_len =
ARRAY_SIZE(b_counter_config_sampler_balance);
dev_priv->perf.oa.flex_regs =
flex_eu_config_sampler_balance;
dev_priv->perf.oa.flex_regs_len =
ARRAY_SIZE(flex_eu_config_sampler_balance);
return 0;
default:
return -ENODEV;
@ -677,35 +786,36 @@ static struct attribute_group group_sampler_balance = {
int
i915_perf_register_sysfs_hsw(struct drm_i915_private *dev_priv)
{
int mux_len;
const struct i915_oa_reg *mux_regs[ARRAY_SIZE(dev_priv->perf.oa.mux_regs)];
int mux_lens[ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens)];
int ret = 0;
if (get_render_basic_mux_config(dev_priv, &mux_len)) {
if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens)) {
ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_render_basic);
if (ret)
goto error_render_basic;
}
if (get_compute_basic_mux_config(dev_priv, &mux_len)) {
if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens)) {
ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
if (ret)
goto error_compute_basic;
}
if (get_compute_extended_mux_config(dev_priv, &mux_len)) {
if (get_compute_extended_mux_config(dev_priv, mux_regs, mux_lens)) {
ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
if (ret)
goto error_compute_extended;
}
if (get_memory_reads_mux_config(dev_priv, &mux_len)) {
if (get_memory_reads_mux_config(dev_priv, mux_regs, mux_lens)) {
ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
if (ret)
goto error_memory_reads;
}
if (get_memory_writes_mux_config(dev_priv, &mux_len)) {
if (get_memory_writes_mux_config(dev_priv, mux_regs, mux_lens)) {
ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
if (ret)
goto error_memory_writes;
}
if (get_sampler_balance_mux_config(dev_priv, &mux_len)) {
if (get_sampler_balance_mux_config(dev_priv, mux_regs, mux_lens)) {
ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_sampler_balance);
if (ret)
goto error_sampler_balance;
@ -714,19 +824,19 @@ i915_perf_register_sysfs_hsw(struct drm_i915_private *dev_priv)
return 0;
error_sampler_balance:
if (get_sampler_balance_mux_config(dev_priv, &mux_len))
if (get_memory_writes_mux_config(dev_priv, mux_regs, mux_lens))
sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
error_memory_writes:
if (get_sampler_balance_mux_config(dev_priv, &mux_len))
if (get_memory_reads_mux_config(dev_priv, mux_regs, mux_lens))
sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
error_memory_reads:
if (get_sampler_balance_mux_config(dev_priv, &mux_len))
if (get_compute_extended_mux_config(dev_priv, mux_regs, mux_lens))
sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
error_compute_extended:
if (get_sampler_balance_mux_config(dev_priv, &mux_len))
if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens))
sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
error_compute_basic:
if (get_sampler_balance_mux_config(dev_priv, &mux_len))
if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens))
sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_basic);
error_render_basic:
return ret;
@ -735,18 +845,19 @@ error_render_basic:
void
i915_perf_unregister_sysfs_hsw(struct drm_i915_private *dev_priv)
{
int mux_len;
const struct i915_oa_reg *mux_regs[ARRAY_SIZE(dev_priv->perf.oa.mux_regs)];
int mux_lens[ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens)];
if (get_render_basic_mux_config(dev_priv, &mux_len))
if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens))
sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_basic);
if (get_compute_basic_mux_config(dev_priv, &mux_len))
if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens))
sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
if (get_compute_extended_mux_config(dev_priv, &mux_len))
if (get_compute_extended_mux_config(dev_priv, mux_regs, mux_lens))
sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
if (get_memory_reads_mux_config(dev_priv, &mux_len))
if (get_memory_reads_mux_config(dev_priv, mux_regs, mux_lens))
sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
if (get_memory_writes_mux_config(dev_priv, &mux_len))
if (get_memory_writes_mux_config(dev_priv, mux_regs, mux_lens))
sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
if (get_sampler_balance_mux_config(dev_priv, &mux_len))
if (get_sampler_balance_mux_config(dev_priv, mux_regs, mux_lens))
sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_sampler_balance);
}

Просмотреть файл

@ -1,5 +1,7 @@
/*
* Autogenerated file, DO NOT EDIT manually!
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
*
*
* Copyright (c) 2015 Intel Corporation
*

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -0,0 +1,40 @@
/*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
*
*
* Copyright (c) 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#ifndef __I915_OA_KBLGT2_H__
#define __I915_OA_KBLGT2_H__
extern int i915_oa_n_builtin_metric_sets_kblgt2;
extern int i915_oa_select_metric_set_kblgt2(struct drm_i915_private *dev_priv);
extern int i915_perf_register_sysfs_kblgt2(struct drm_i915_private *dev_priv);
extern void i915_perf_unregister_sysfs_kblgt2(struct drm_i915_private *dev_priv);
#endif

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -0,0 +1,40 @@
/*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
*
*
* Copyright (c) 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#ifndef __I915_OA_KBLGT3_H__
#define __I915_OA_KBLGT3_H__
extern int i915_oa_n_builtin_metric_sets_kblgt3;
extern int i915_oa_select_metric_set_kblgt3(struct drm_i915_private *dev_priv);
extern int i915_perf_register_sysfs_kblgt3(struct drm_i915_private *dev_priv);
extern void i915_perf_unregister_sysfs_kblgt3(struct drm_i915_private *dev_priv);
#endif

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -0,0 +1,40 @@
/*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
*
*
* Copyright (c) 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#ifndef __I915_OA_SKLGT2_H__
#define __I915_OA_SKLGT2_H__
extern int i915_oa_n_builtin_metric_sets_sklgt2;
extern int i915_oa_select_metric_set_sklgt2(struct drm_i915_private *dev_priv);
extern int i915_perf_register_sysfs_sklgt2(struct drm_i915_private *dev_priv);
extern void i915_perf_unregister_sysfs_sklgt2(struct drm_i915_private *dev_priv);
#endif

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -0,0 +1,40 @@
/*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
*
*
* Copyright (c) 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#ifndef __I915_OA_SKLGT3_H__
#define __I915_OA_SKLGT3_H__
extern int i915_oa_n_builtin_metric_sets_sklgt3;
extern int i915_oa_select_metric_set_sklgt3(struct drm_i915_private *dev_priv);
extern int i915_perf_register_sysfs_sklgt3(struct drm_i915_private *dev_priv);
extern void i915_perf_unregister_sysfs_sklgt3(struct drm_i915_private *dev_priv);
#endif

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -0,0 +1,40 @@
/*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
*
*
* Copyright (c) 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#ifndef __I915_OA_SKLGT4_H__
#define __I915_OA_SKLGT4_H__
extern int i915_oa_n_builtin_metric_sets_sklgt4;
extern int i915_oa_select_metric_set_sklgt4(struct drm_i915_private *dev_priv);
extern int i915_perf_register_sysfs_sklgt4(struct drm_i915_private *dev_priv);
extern void i915_perf_unregister_sysfs_sklgt4(struct drm_i915_private *dev_priv);
#endif

Просмотреть файл

@ -312,16 +312,17 @@ static const struct intel_device_info intel_haswell_info = {
.has_full_48bit_ppgtt = 1, \
.has_64bit_reloc = 1
#define BDW_PLATFORM \
BDW_FEATURES, \
.gen = 8, \
.platform = INTEL_BROADWELL
static const struct intel_device_info intel_broadwell_info = {
BDW_FEATURES,
.gen = 8,
.platform = INTEL_BROADWELL,
BDW_PLATFORM,
};
static const struct intel_device_info intel_broadwell_gt3_info = {
BDW_FEATURES,
.gen = 8,
.platform = INTEL_BROADWELL,
BDW_PLATFORM,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
};
@ -347,22 +348,20 @@ static const struct intel_device_info intel_cherryview_info = {
CHV_COLORS,
};
#define SKL_PLATFORM \
BDW_FEATURES, \
.gen = 9, \
.platform = INTEL_SKYLAKE, \
.has_csr = 1, \
.has_guc = 1, \
.ddb_size = 896
static const struct intel_device_info intel_skylake_info = {
BDW_FEATURES,
.platform = INTEL_SKYLAKE,
.gen = 9,
.has_csr = 1,
.has_guc = 1,
.ddb_size = 896,
SKL_PLATFORM,
};
static const struct intel_device_info intel_skylake_gt3_info = {
BDW_FEATURES,
.platform = INTEL_SKYLAKE,
.gen = 9,
.has_csr = 1,
.has_guc = 1,
.ddb_size = 896,
SKL_PLATFORM,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
};
@ -401,30 +400,54 @@ static const struct intel_device_info intel_broxton_info = {
static const struct intel_device_info intel_geminilake_info = {
GEN9_LP_FEATURES,
.platform = INTEL_GEMINILAKE,
.is_alpha_support = 1,
.ddb_size = 1024,
.color = { .degamma_lut_size = 0, .gamma_lut_size = 1024 }
};
#define KBL_PLATFORM \
BDW_FEATURES, \
.gen = 9, \
.platform = INTEL_KABYLAKE, \
.has_csr = 1, \
.has_guc = 1, \
.ddb_size = 896
static const struct intel_device_info intel_kabylake_info = {
BDW_FEATURES,
.platform = INTEL_KABYLAKE,
.gen = 9,
.has_csr = 1,
.has_guc = 1,
.ddb_size = 896,
KBL_PLATFORM,
};
static const struct intel_device_info intel_kabylake_gt3_info = {
BDW_FEATURES,
.platform = INTEL_KABYLAKE,
.gen = 9,
.has_csr = 1,
.has_guc = 1,
.ddb_size = 896,
KBL_PLATFORM,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
};
#define CFL_PLATFORM \
.is_alpha_support = 1, \
BDW_FEATURES, \
.gen = 9, \
.platform = INTEL_COFFEELAKE, \
.has_csr = 1, \
.has_guc = 1, \
.ddb_size = 896
static const struct intel_device_info intel_coffeelake_info = {
CFL_PLATFORM,
};
static const struct intel_device_info intel_coffeelake_gt3_info = {
CFL_PLATFORM,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
};
static const struct intel_device_info intel_cannonlake_info = {
BDW_FEATURES,
.is_alpha_support = 1,
.platform = INTEL_CANNONLAKE,
.gen = 10,
.ddb_size = 1024,
.has_csr = 1,
};
/*
* Make sure any device matches here are from most specific to most
* general. For example, since the Quanta match is based on the subsystem
@ -469,6 +492,10 @@ static const struct pci_device_id pciidlist[] = {
INTEL_KBL_GT2_IDS(&intel_kabylake_info),
INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info),
INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info),
INTEL_CFL_S_IDS(&intel_coffeelake_info),
INTEL_CFL_H_IDS(&intel_coffeelake_info),
INTEL_CFL_U_IDS(&intel_coffeelake_gt3_info),
INTEL_CNL_IDS(&intel_cannonlake_info),
{0, 0, 0}
};
MODULE_DEVICE_TABLE(pci, pciidlist);

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -36,10 +36,6 @@
#define VGT_VERSION_MAJOR 1
#define VGT_VERSION_MINOR 0
#define INTEL_VGT_IF_VERSION_ENCODE(major, minor) ((major) << 16 | (minor))
#define INTEL_VGT_IF_VERSION \
INTEL_VGT_IF_VERSION_ENCODE(VGT_VERSION_MAJOR, VGT_VERSION_MINOR)
/*
* notifications from guest to vgpu device model
*/
@ -55,8 +51,8 @@ enum vgt_g2v_type {
struct vgt_if {
u64 magic; /* VGT_MAGIC */
uint16_t version_major;
uint16_t version_minor;
u16 version_major;
u16 version_minor;
u32 vgt_id; /* ID of vGT instance */
u32 rsv1[12]; /* pad to offset 0x40 */
/*

Просмотреть файл

@ -58,10 +58,13 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define _MMIO_TRANS(tran, a, b) _MMIO(_TRANS(tran, a, b))
#define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
#define _MMIO_PORT(port, a, b) _MMIO(_PORT(port, a, b))
#define _PIPE3(pipe, ...) _PICK(pipe, __VA_ARGS__)
#define _MMIO_PIPE3(pipe, a, b, c) _MMIO(_PIPE3(pipe, a, b, c))
#define _PORT3(port, ...) _PICK(port, __VA_ARGS__)
#define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PORT3(pipe, a, b, c))
#define _MMIO_PIPE3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c))
#define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c))
#define _PLL(pll, a, b) ((a) + (pll)*((b)-(a)))
#define _MMIO_PLL(pll, a, b) _MMIO(_PLL(pll, a, b))
#define _MMIO_PORT6(port, a, b, c, d, e, f) _MMIO(_PICK(port, a, b, c, d, e, f))
#define _MMIO_PORT6_LN(port, ln, a0, a1, b, c, d, e, f) \
_MMIO(_PICK(port, a0, b, c, d, e, f) + (ln * (a1 - a0)))
#define _PHY3(phy, ...) _PICK(phy, __VA_ARGS__)
#define _MMIO_PHY3(phy, a, b, c) _MMIO(_PHY3(phy, a, b, c))
@ -653,6 +656,12 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN8_OACTXID _MMIO(0x2364)
#define GEN8_OA_DEBUG _MMIO(0x2B04)
#define GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS (1<<5)
#define GEN9_OA_DEBUG_INCLUDE_CLK_RATIO (1<<6)
#define GEN9_OA_DEBUG_DISABLE_GO_1_0_REPORTS (1<<2)
#define GEN9_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS (1<<1)
#define GEN8_OACONTROL _MMIO(0x2B00)
#define GEN8_OA_REPORT_FORMAT_A12 (0<<2)
#define GEN8_OA_REPORT_FORMAT_A12_B8_C8 (2<<2)
@ -674,6 +683,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN7_OABUFFER_STOP_RESUME_ENABLE (1<<1)
#define GEN7_OABUFFER_RESUME (1<<0)
#define GEN8_OABUFFER_UDW _MMIO(0x23b4)
#define GEN8_OABUFFER _MMIO(0x2b14)
#define GEN7_OASTATUS1 _MMIO(0x2364)
@ -692,7 +702,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN8_OASTATUS_REPORT_LOST (1<<0)
#define GEN8_OAHEADPTR _MMIO(0x2B0C)
#define GEN8_OAHEADPTR_MASK 0xffffffc0
#define GEN8_OATAILPTR _MMIO(0x2B10)
#define GEN8_OATAILPTR_MASK 0xffffffc0
#define OABUFFER_SIZE_128K (0<<3)
#define OABUFFER_SIZE_256K (1<<3)
@ -705,7 +717,17 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define OA_MEM_SELECT_GGTT (1<<0)
/*
* Flexible, Aggregate EU Counter Registers.
* Note: these aren't contiguous
*/
#define EU_PERF_CNTL0 _MMIO(0xe458)
#define EU_PERF_CNTL1 _MMIO(0xe558)
#define EU_PERF_CNTL2 _MMIO(0xe658)
#define EU_PERF_CNTL3 _MMIO(0xe758)
#define EU_PERF_CNTL4 _MMIO(0xe45c)
#define EU_PERF_CNTL5 _MMIO(0xe55c)
#define EU_PERF_CNTL6 _MMIO(0xe65c)
#define GDT_CHICKEN_BITS _MMIO(0x9840)
#define GT_NOA_ENABLE 0x00000080
@ -1065,6 +1087,7 @@ enum skl_disp_power_wells {
SKL_DISP_PW_MISC_IO,
SKL_DISP_PW_DDI_A_E,
GLK_DISP_PW_DDI_A = SKL_DISP_PW_DDI_A_E,
CNL_DISP_PW_DDI_A = SKL_DISP_PW_DDI_A_E,
SKL_DISP_PW_DDI_B,
SKL_DISP_PW_DDI_C,
SKL_DISP_PW_DDI_D,
@ -1072,6 +1095,10 @@ enum skl_disp_power_wells {
GLK_DISP_PW_AUX_A = 8,
GLK_DISP_PW_AUX_B,
GLK_DISP_PW_AUX_C,
CNL_DISP_PW_AUX_A = GLK_DISP_PW_AUX_A,
CNL_DISP_PW_AUX_B = GLK_DISP_PW_AUX_B,
CNL_DISP_PW_AUX_C = GLK_DISP_PW_AUX_C,
CNL_DISP_PW_AUX_D,
SKL_DISP_PW_1 = 14,
SKL_DISP_PW_2,
@ -1658,6 +1685,10 @@ enum skl_disp_power_wells {
#define PHY_RESERVED (1 << 7)
#define BXT_PORT_CL1CM_DW0(phy) _BXT_PHY((phy), _PORT_CL1CM_DW0_BC)
#define CNL_PORT_CL1CM_DW5 _MMIO(0x162014)
#define CL_POWER_DOWN_ENABLE (1 << 4)
#define SUS_CLOCK_CONFIG (3 << 0)
#define _PORT_CL1CM_DW9_A 0x162024
#define _PORT_CL1CM_DW9_BC 0x6C024
#define IREF0RC_OFFSET_SHIFT 8
@ -1682,6 +1713,146 @@ enum skl_disp_power_wells {
#define OCL2_LDOFUSE_PWR_DIS (1 << 6)
#define BXT_PORT_CL1CM_DW30(phy) _BXT_PHY((phy), _PORT_CL1CM_DW30_BC)
#define _CNL_PORT_PCS_DW1_GRP_AE 0x162304
#define _CNL_PORT_PCS_DW1_GRP_B 0x162384
#define _CNL_PORT_PCS_DW1_GRP_C 0x162B04
#define _CNL_PORT_PCS_DW1_GRP_D 0x162B84
#define _CNL_PORT_PCS_DW1_GRP_F 0x162A04
#define _CNL_PORT_PCS_DW1_LN0_AE 0x162404
#define _CNL_PORT_PCS_DW1_LN0_B 0x162604
#define _CNL_PORT_PCS_DW1_LN0_C 0x162C04
#define _CNL_PORT_PCS_DW1_LN0_D 0x162E04
#define _CNL_PORT_PCS_DW1_LN0_F 0x162804
#define CNL_PORT_PCS_DW1_GRP(port) _MMIO_PORT6(port, \
_CNL_PORT_PCS_DW1_GRP_AE, \
_CNL_PORT_PCS_DW1_GRP_B, \
_CNL_PORT_PCS_DW1_GRP_C, \
_CNL_PORT_PCS_DW1_GRP_D, \
_CNL_PORT_PCS_DW1_GRP_AE, \
_CNL_PORT_PCS_DW1_GRP_F)
#define CNL_PORT_PCS_DW1_LN0(port) _MMIO_PORT6(port, \
_CNL_PORT_PCS_DW1_LN0_AE, \
_CNL_PORT_PCS_DW1_LN0_B, \
_CNL_PORT_PCS_DW1_LN0_C, \
_CNL_PORT_PCS_DW1_LN0_D, \
_CNL_PORT_PCS_DW1_LN0_AE, \
_CNL_PORT_PCS_DW1_LN0_F)
#define COMMON_KEEPER_EN (1 << 26)
#define _CNL_PORT_TX_DW2_GRP_AE 0x162348
#define _CNL_PORT_TX_DW2_GRP_B 0x1623C8
#define _CNL_PORT_TX_DW2_GRP_C 0x162B48
#define _CNL_PORT_TX_DW2_GRP_D 0x162BC8
#define _CNL_PORT_TX_DW2_GRP_F 0x162A48
#define _CNL_PORT_TX_DW2_LN0_AE 0x162448
#define _CNL_PORT_TX_DW2_LN0_B 0x162648
#define _CNL_PORT_TX_DW2_LN0_C 0x162C48
#define _CNL_PORT_TX_DW2_LN0_D 0x162E48
#define _CNL_PORT_TX_DW2_LN0_F 0x162A48
#define CNL_PORT_TX_DW2_GRP(port) _MMIO_PORT6(port, \
_CNL_PORT_TX_DW2_GRP_AE, \
_CNL_PORT_TX_DW2_GRP_B, \
_CNL_PORT_TX_DW2_GRP_C, \
_CNL_PORT_TX_DW2_GRP_D, \
_CNL_PORT_TX_DW2_GRP_AE, \
_CNL_PORT_TX_DW2_GRP_F)
#define CNL_PORT_TX_DW2_LN0(port) _MMIO_PORT6(port, \
_CNL_PORT_TX_DW2_LN0_AE, \
_CNL_PORT_TX_DW2_LN0_B, \
_CNL_PORT_TX_DW2_LN0_C, \
_CNL_PORT_TX_DW2_LN0_D, \
_CNL_PORT_TX_DW2_LN0_AE, \
_CNL_PORT_TX_DW2_LN0_F)
#define SWING_SEL_UPPER(x) ((x >> 3) << 15)
#define SWING_SEL_LOWER(x) ((x & 0x7) << 11)
#define RCOMP_SCALAR(x) ((x) << 0)
#define _CNL_PORT_TX_DW4_GRP_AE 0x162350
#define _CNL_PORT_TX_DW4_GRP_B 0x1623D0
#define _CNL_PORT_TX_DW4_GRP_C 0x162B50
#define _CNL_PORT_TX_DW4_GRP_D 0x162BD0
#define _CNL_PORT_TX_DW4_GRP_F 0x162A50
#define _CNL_PORT_TX_DW4_LN0_AE 0x162450
#define _CNL_PORT_TX_DW4_LN1_AE 0x1624D0
#define _CNL_PORT_TX_DW4_LN0_B 0x162650
#define _CNL_PORT_TX_DW4_LN0_C 0x162C50
#define _CNL_PORT_TX_DW4_LN0_D 0x162E50
#define _CNL_PORT_TX_DW4_LN0_F 0x162850
#define CNL_PORT_TX_DW4_GRP(port) _MMIO_PORT6(port, \
_CNL_PORT_TX_DW4_GRP_AE, \
_CNL_PORT_TX_DW4_GRP_B, \
_CNL_PORT_TX_DW4_GRP_C, \
_CNL_PORT_TX_DW4_GRP_D, \
_CNL_PORT_TX_DW4_GRP_AE, \
_CNL_PORT_TX_DW4_GRP_F)
#define CNL_PORT_TX_DW4_LN(port, ln) _MMIO_PORT6_LN(port, ln, \
_CNL_PORT_TX_DW4_LN0_AE, \
_CNL_PORT_TX_DW4_LN1_AE, \
_CNL_PORT_TX_DW4_LN0_B, \
_CNL_PORT_TX_DW4_LN0_C, \
_CNL_PORT_TX_DW4_LN0_D, \
_CNL_PORT_TX_DW4_LN0_AE, \
_CNL_PORT_TX_DW4_LN0_F)
#define LOADGEN_SELECT (1 << 31)
#define POST_CURSOR_1(x) ((x) << 12)
#define POST_CURSOR_2(x) ((x) << 6)
#define CURSOR_COEFF(x) ((x) << 0)
#define _CNL_PORT_TX_DW5_GRP_AE 0x162354
#define _CNL_PORT_TX_DW5_GRP_B 0x1623D4
#define _CNL_PORT_TX_DW5_GRP_C 0x162B54
#define _CNL_PORT_TX_DW5_GRP_D 0x162BD4
#define _CNL_PORT_TX_DW5_GRP_F 0x162A54
#define _CNL_PORT_TX_DW5_LN0_AE 0x162454
#define _CNL_PORT_TX_DW5_LN0_B 0x162654
#define _CNL_PORT_TX_DW5_LN0_C 0x162C54
#define _CNL_PORT_TX_DW5_LN0_D 0x162ED4
#define _CNL_PORT_TX_DW5_LN0_F 0x162854
#define CNL_PORT_TX_DW5_GRP(port) _MMIO_PORT6(port, \
_CNL_PORT_TX_DW5_GRP_AE, \
_CNL_PORT_TX_DW5_GRP_B, \
_CNL_PORT_TX_DW5_GRP_C, \
_CNL_PORT_TX_DW5_GRP_D, \
_CNL_PORT_TX_DW5_GRP_AE, \
_CNL_PORT_TX_DW5_GRP_F)
#define CNL_PORT_TX_DW5_LN0(port) _MMIO_PORT6(port, \
_CNL_PORT_TX_DW5_LN0_AE, \
_CNL_PORT_TX_DW5_LN0_B, \
_CNL_PORT_TX_DW5_LN0_C, \
_CNL_PORT_TX_DW5_LN0_D, \
_CNL_PORT_TX_DW5_LN0_AE, \
_CNL_PORT_TX_DW5_LN0_F)
#define TX_TRAINING_EN (1 << 31)
#define TAP3_DISABLE (1 << 29)
#define SCALING_MODE_SEL(x) ((x) << 18)
#define RTERM_SELECT(x) ((x) << 3)
#define _CNL_PORT_TX_DW7_GRP_AE 0x16235C
#define _CNL_PORT_TX_DW7_GRP_B 0x1623DC
#define _CNL_PORT_TX_DW7_GRP_C 0x162B5C
#define _CNL_PORT_TX_DW7_GRP_D 0x162BDC
#define _CNL_PORT_TX_DW7_GRP_F 0x162A5C
#define _CNL_PORT_TX_DW7_LN0_AE 0x16245C
#define _CNL_PORT_TX_DW7_LN0_B 0x16265C
#define _CNL_PORT_TX_DW7_LN0_C 0x162C5C
#define _CNL_PORT_TX_DW7_LN0_D 0x162EDC
#define _CNL_PORT_TX_DW7_LN0_F 0x16285C
#define CNL_PORT_TX_DW7_GRP(port) _MMIO_PORT6(port, \
_CNL_PORT_TX_DW7_GRP_AE, \
_CNL_PORT_TX_DW7_GRP_B, \
_CNL_PORT_TX_DW7_GRP_C, \
_CNL_PORT_TX_DW7_GRP_D, \
_CNL_PORT_TX_DW7_GRP_AE, \
_CNL_PORT_TX_DW7_GRP_F)
#define CNL_PORT_TX_DW7_LN0(port) _MMIO_PORT6(port, \
_CNL_PORT_TX_DW7_LN0_AE, \
_CNL_PORT_TX_DW7_LN0_B, \
_CNL_PORT_TX_DW7_LN0_C, \
_CNL_PORT_TX_DW7_LN0_D, \
_CNL_PORT_TX_DW7_LN0_AE, \
_CNL_PORT_TX_DW7_LN0_F)
#define N_SCALAR(x) ((x) << 24)
/* The spec defines this only for BXT PHY0, but lets assume that this
* would exist for PHY1 too if it had a second channel.
*/
@ -1690,6 +1861,23 @@ enum skl_disp_power_wells {
#define BXT_PORT_CL2CM_DW6(phy) _BXT_PHY((phy), _PORT_CL2CM_DW6_BC)
#define DW6_OLDO_DYN_PWR_DOWN_EN (1 << 28)
#define CNL_PORT_COMP_DW0 _MMIO(0x162100)
#define COMP_INIT (1 << 31)
#define CNL_PORT_COMP_DW1 _MMIO(0x162104)
#define CNL_PORT_COMP_DW3 _MMIO(0x16210c)
#define PROCESS_INFO_DOT_0 (0 << 26)
#define PROCESS_INFO_DOT_1 (1 << 26)
#define PROCESS_INFO_DOT_4 (2 << 26)
#define PROCESS_INFO_MASK (7 << 26)
#define PROCESS_INFO_SHIFT 26
#define VOLTAGE_INFO_0_85V (0 << 24)
#define VOLTAGE_INFO_0_95V (1 << 24)
#define VOLTAGE_INFO_1_05V (2 << 24)
#define VOLTAGE_INFO_MASK (3 << 24)
#define VOLTAGE_INFO_SHIFT 24
#define CNL_PORT_COMP_DW9 _MMIO(0x162124)
#define CNL_PORT_COMP_DW10 _MMIO(0x162128)
/* BXT PHY Ref registers */
#define _PORT_REF_DW3_A 0x16218C
#define _PORT_REF_DW3_BC 0x6C18C
@ -2325,6 +2513,9 @@ enum skl_disp_power_wells {
#define GEN8_RC_SEMA_IDLE_MSG_DISABLE (1 << 12)
#define GEN8_FF_DOP_CLOCK_GATE_DISABLE (1<<10)
#define GEN6_RCS_PWR_FSM _MMIO(0x22ac)
#define GEN9_RCS_FE_FSM2 _MMIO(0x22a4)
/* Fuse readout registers for GT */
#define CHV_FUSE_GT _MMIO(VLV_DISPLAY_BASE + 0x2168)
#define CHV_FGT_DISABLE_SS0 (1 << 10)
@ -2507,10 +2698,6 @@ enum skl_disp_power_wells {
#define FBC_FENCE_OFF _MMIO(0x3218) /* BSpec typo has 321Bh */
#define FBC_TAG(i) _MMIO(0x3300 + (i) * 4)
#define FBC_STATUS2 _MMIO(0x43214)
#define IVB_FBC_COMPRESSION_MASK 0x7ff
#define BDW_FBC_COMPRESSION_MASK 0xfff
#define FBC_LL_SIZE (1536)
#define FBC_LLC_READ_CTRL _MMIO(0x9044)
@ -2539,7 +2726,7 @@ enum skl_disp_power_wells {
#define DPFC_INVAL_SEG_SHIFT (16)
#define DPFC_INVAL_SEG_MASK (0x07ff0000)
#define DPFC_COMP_SEG_SHIFT (0)
#define DPFC_COMP_SEG_MASK (0x000003ff)
#define DPFC_COMP_SEG_MASK (0x000007ff)
#define DPFC_STATUS2 _MMIO(0x3214)
#define DPFC_FENCE_YOFF _MMIO(0x3218)
#define DPFC_CHICKEN _MMIO(0x3224)
@ -2553,6 +2740,10 @@ enum skl_disp_power_wells {
#define DPFC_RESERVED (0x1FFFFF00)
#define ILK_DPFC_RECOMP_CTL _MMIO(0x4320c)
#define ILK_DPFC_STATUS _MMIO(0x43210)
#define ILK_DPFC_COMP_SEG_MASK 0x7ff
#define IVB_FBC_STATUS2 _MMIO(0x43214)
#define IVB_FBC_COMP_SEG_MASK 0x7ff
#define BDW_FBC_COMP_SEG_MASK 0xfff
#define ILK_DPFC_FENCE_YOFF _MMIO(0x43218)
#define ILK_DPFC_CHICKEN _MMIO(0x43224)
#define ILK_DPFC_DISABLE_DUMMY0 (1<<8)
@ -2626,9 +2817,10 @@ enum skl_disp_power_wells {
#define GMBUS_PIN_DPB 5 /* SDVO, HDMIB */
#define GMBUS_PIN_DPD 6 /* HDMID */
#define GMBUS_PIN_RESERVED 7 /* 7 reserved */
#define GMBUS_PIN_1_BXT 1
#define GMBUS_PIN_1_BXT 1 /* BXT+ (atom) and CNP+ (big core) */
#define GMBUS_PIN_2_BXT 2
#define GMBUS_PIN_3_BXT 3
#define GMBUS_PIN_4_CNP 4
#define GMBUS_NUM_PINS 7 /* including 0 */
#define GMBUS1 _MMIO(dev_priv->gpio_mmio_base + 0x5104) /* command/status */
#define GMBUS_SW_CLR_INT (1<<31)
@ -6506,6 +6698,9 @@ enum {
#define GLK_CL1_PWR_DOWN (1 << 11)
#define GLK_CL2_PWR_DOWN (1 << 12)
#define CHICKEN_MISC_2 _MMIO(0x42084)
#define COMP_PWR_DOWN (1 << 23)
#define _CHICKEN_PIPESL_1_A 0x420b0
#define _CHICKEN_PIPESL_1_B 0x420b4
#define HSW_FBCQ_DIS (1 << 22)
@ -6546,6 +6741,9 @@ enum {
#define SKL_DFSM_PIPE_B_DISABLE (1 << 21)
#define SKL_DFSM_PIPE_C_DISABLE (1 << 28)
#define SKL_DSSM _MMIO(0x51004)
#define CNL_DSSM_CDCLK_PLL_REFCLK_24MHz (1 << 31)
#define GEN7_FF_SLICE_CS_CHICKEN1 _MMIO(0x20e0)
#define GEN9_FFSC_PERCTX_PREEMPT_CTRL (1<<14)
@ -6838,6 +7036,10 @@ enum {
#define FDL_TP2_TIMER_SHIFT 10
#define FDL_TP2_TIMER_MASK (3<<10)
#define RAWCLK_FREQ_MASK 0x3ff
#define CNP_RAWCLK_DIV_MASK (0x3ff << 16)
#define CNP_RAWCLK_DIV(div) ((div) << 16)
#define CNP_RAWCLK_FRAC_MASK (0xf << 26)
#define CNP_RAWCLK_FRAC(frac) ((frac) << 26)
#define PCH_DPLL_TMR_CFG _MMIO(0xc6208)
@ -7792,13 +7994,6 @@ enum {
#define SKL_FUSE_PG1_DIST_STATUS (1<<26)
#define SKL_FUSE_PG2_DIST_STATUS (1<<25)
/* Decoupled MMIO register pair for kernel driver */
#define GEN9_DECOUPLED_REG0_DW0 _MMIO(0xF00)
#define GEN9_DECOUPLED_REG0_DW1 _MMIO(0xF04)
#define GEN9_DECOUPLED_DW1_GO (1<<31)
#define GEN9_DECOUPLED_PD_SHIFT 28
#define GEN9_DECOUPLED_OP_SHIFT 24
/* Per-pipe DDI Function Control */
#define _TRANS_DDI_FUNC_CTL_A 0x60400
#define _TRANS_DDI_FUNC_CTL_B 0x61400
@ -8107,6 +8302,61 @@ enum {
#define DPLL_CFGCR1(id) _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR1, _DPLL2_CFGCR1)
#define DPLL_CFGCR2(id) _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR2, _DPLL2_CFGCR2)
/*
* CNL Clocks
*/
#define DPCLKA_CFGCR0 _MMIO(0x6C200)
#define DPCLKA_CFGCR0_DDI_CLK_OFF(port) (1 << ((port)+10))
#define DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port) (3 << ((port)*2))
#define DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port) ((port)*2)
#define DPCLKA_CFGCR0_DDI_CLK_SEL(pll, port) ((pll) << ((port)*2))
/* CNL PLL */
#define DPLL0_ENABLE 0x46010
#define DPLL1_ENABLE 0x46014
#define PLL_ENABLE (1 << 31)
#define PLL_LOCK (1 << 30)
#define PLL_POWER_ENABLE (1 << 27)
#define PLL_POWER_STATE (1 << 26)
#define CNL_DPLL_ENABLE(pll) _MMIO_PLL(pll, DPLL0_ENABLE, DPLL1_ENABLE)
#define _CNL_DPLL0_CFGCR0 0x6C000
#define _CNL_DPLL1_CFGCR0 0x6C080
#define DPLL_CFGCR0_HDMI_MODE (1 << 30)
#define DPLL_CFGCR0_SSC_ENABLE (1 << 29)
#define DPLL_CFGCR0_LINK_RATE_MASK (0xf << 25)
#define DPLL_CFGCR0_LINK_RATE_2700 (0 << 25)
#define DPLL_CFGCR0_LINK_RATE_1350 (1 << 25)
#define DPLL_CFGCR0_LINK_RATE_810 (2 << 25)
#define DPLL_CFGCR0_LINK_RATE_1620 (3 << 25)
#define DPLL_CFGCR0_LINK_RATE_1080 (4 << 25)
#define DPLL_CFGCR0_LINK_RATE_2160 (5 << 25)
#define DPLL_CFGCR0_LINK_RATE_3240 (6 << 25)
#define DPLL_CFGCR0_LINK_RATE_4050 (7 << 25)
#define DPLL_CFGCR0_DCO_FRACTION_MASK (0x7fff << 10)
#define DPLL_CFGCR0_DCO_FRACTION(x) ((x) << 10)
#define DPLL_CFGCR0_DCO_INTEGER_MASK (0x3ff)
#define CNL_DPLL_CFGCR0(pll) _MMIO_PLL(pll, _CNL_DPLL0_CFGCR0, _CNL_DPLL1_CFGCR0)
#define _CNL_DPLL0_CFGCR1 0x6C004
#define _CNL_DPLL1_CFGCR1 0x6C084
#define DPLL_CFGCR1_QDIV_RATIO_MASK (0xff << 10)
#define DPLL_CFGCR1_QDIV_RATIO(x) ((x) << 10)
#define DPLL_CFGCR1_QDIV_MODE(x) ((x) << 9)
#define DPLL_CFGCR1_KDIV_MASK (7 << 6)
#define DPLL_CFGCR1_KDIV(x) ((x) << 6)
#define DPLL_CFGCR1_KDIV_1 (1 << 6)
#define DPLL_CFGCR1_KDIV_2 (2 << 6)
#define DPLL_CFGCR1_KDIV_4 (4 << 6)
#define DPLL_CFGCR1_PDIV_MASK (0xf << 2)
#define DPLL_CFGCR1_PDIV(x) ((x) << 2)
#define DPLL_CFGCR1_PDIV_2 (1 << 2)
#define DPLL_CFGCR1_PDIV_3 (2 << 2)
#define DPLL_CFGCR1_PDIV_5 (4 << 2)
#define DPLL_CFGCR1_PDIV_7 (8 << 2)
#define DPLL_CFGCR1_CENTRAL_FREQ (3 << 0)
#define CNL_DPLL_CFGCR1(pll) _MMIO_PLL(pll, _CNL_DPLL0_CFGCR1, _CNL_DPLL1_CFGCR1)
/* BXT display engine PLL */
#define BXT_DE_PLL_CTL _MMIO(0x6d000)
#define BXT_DE_PLL_RATIO(x) (x) /* {60,65,100} * 19.2MHz */
@ -8115,6 +8365,8 @@ enum {
#define BXT_DE_PLL_ENABLE _MMIO(0x46070)
#define BXT_DE_PLL_PLL_ENABLE (1 << 31)
#define BXT_DE_PLL_LOCK (1 << 30)
#define CNL_CDCLK_PLL_RATIO(x) (x)
#define CNL_CDCLK_PLL_RATIO_MASK 0xff
/* GEN9 DC */
#define DC_STATE_EN _MMIO(0x45504)
@ -8148,6 +8400,7 @@ enum {
/* SFUSE_STRAP */
#define SFUSE_STRAP _MMIO(0xc2014)
#define SFUSE_STRAP_FUSE_LOCK (1<<13)
#define SFUSE_STRAP_RAW_FREQUENCY (1<<8)
#define SFUSE_STRAP_DISPLAY_DISABLED (1<<7)
#define SFUSE_STRAP_CRT_DISABLED (1<<6)
#define SFUSE_STRAP_DDIB_DETECTED (1<<2)

Просмотреть файл

@ -99,6 +99,11 @@
__T; \
})
#define u64_to_ptr(T, x) ({ \
typecheck(u64, x); \
(T *)(uintptr_t)(x); \
})
#define __mask_next_bit(mask) ({ \
int __idx = ffs(mask) - 1; \
mask &= ~BIT(__idx); \

Просмотреть файл

@ -60,8 +60,8 @@
*/
void i915_check_vgpu(struct drm_i915_private *dev_priv)
{
uint64_t magic;
uint32_t version;
u64 magic;
u16 version_major;
BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
@ -69,10 +69,8 @@ void i915_check_vgpu(struct drm_i915_private *dev_priv)
if (magic != VGT_MAGIC)
return;
version = INTEL_VGT_IF_VERSION_ENCODE(
__raw_i915_read16(dev_priv, vgtif_reg(version_major)),
__raw_i915_read16(dev_priv, vgtif_reg(version_minor)));
if (version != INTEL_VGT_IF_VERSION) {
version_major = __raw_i915_read16(dev_priv, vgtif_reg(version_major));
if (version_major < VGT_VERSION_MAJOR) {
DRM_INFO("VGT interface version mismatch!\n");
return;
}
@ -92,6 +90,18 @@ struct _balloon_info_ {
static struct _balloon_info_ bl_info;
static void vgt_deballoon_space(struct i915_ggtt *ggtt,
struct drm_mm_node *node)
{
DRM_DEBUG_DRIVER("deballoon space: range [0x%llx - 0x%llx] %llu KiB.\n",
node->start,
node->start + node->size,
node->size / 1024);
ggtt->base.reserved -= node->size;
drm_mm_remove_node(node);
}
/**
* intel_vgt_deballoon - deballoon reserved graphics address trunks
* @dev_priv: i915 device private data
@ -108,12 +118,8 @@ void intel_vgt_deballoon(struct drm_i915_private *dev_priv)
DRM_DEBUG("VGT deballoon.\n");
for (i = 0; i < 4; i++) {
if (bl_info.space[i].allocated)
drm_mm_remove_node(&bl_info.space[i]);
}
memset(&bl_info, 0, sizeof(bl_info));
for (i = 0; i < 4; i++)
vgt_deballoon_space(&dev_priv->ggtt, &bl_info.space[i]);
}
static int vgt_balloon_space(struct i915_ggtt *ggtt,
@ -121,15 +127,20 @@ static int vgt_balloon_space(struct i915_ggtt *ggtt,
unsigned long start, unsigned long end)
{
unsigned long size = end - start;
int ret;
if (start >= end)
return -EINVAL;
DRM_INFO("balloon space: range [ 0x%lx - 0x%lx ] %lu KiB.\n",
start, end, size / 1024);
return i915_gem_gtt_reserve(&ggtt->base, node,
size, start, I915_COLOR_UNEVICTABLE,
0);
ret = i915_gem_gtt_reserve(&ggtt->base, node,
size, start, I915_COLOR_UNEVICTABLE,
0);
if (!ret)
ggtt->base.reserved += size;
return ret;
}
/**
@ -222,7 +233,7 @@ int intel_vgt_balloon(struct drm_i915_private *dev_priv)
ret = vgt_balloon_space(ggtt, &bl_info.space[3],
unmappable_end, ggtt_end);
if (ret)
goto err;
goto err_upon_mappable;
}
/* Mappable graphic memory ballooning */
@ -231,7 +242,7 @@ int intel_vgt_balloon(struct drm_i915_private *dev_priv)
0, mappable_base);
if (ret)
goto err;
goto err_upon_unmappable;
}
if (mappable_end < ggtt->mappable_end) {
@ -239,14 +250,19 @@ int intel_vgt_balloon(struct drm_i915_private *dev_priv)
mappable_end, ggtt->mappable_end);
if (ret)
goto err;
goto err_below_mappable;
}
DRM_INFO("VGT balloon successfully\n");
return 0;
err_below_mappable:
vgt_deballoon_space(ggtt, &bl_info.space[0]);
err_upon_unmappable:
vgt_deballoon_space(ggtt, &bl_info.space[3]);
err_upon_mappable:
vgt_deballoon_space(ggtt, &bl_info.space[2]);
err:
DRM_ERROR("VGT balloon fail\n");
intel_vgt_deballoon(dev_priv);
return ret;
}

Просмотреть файл

@ -85,12 +85,12 @@ vma_create(struct drm_i915_gem_object *obj,
if (vma == NULL)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&vma->exec_list);
for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
init_request_active(&vma->last_read[i], i915_vma_retire);
init_request_active(&vma->last_fence, NULL);
vma->vm = vm;
vma->obj = obj;
vma->resv = obj->resv;
vma->size = obj->base.size;
vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
@ -464,7 +464,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
size, obj->base.size,
flags & PIN_MAPPABLE ? "mappable" : "total",
end);
return -E2BIG;
return -ENOSPC;
}
ret = i915_gem_object_pin_pages(obj);
@ -577,7 +577,7 @@ err_unpin:
return ret;
}
void i915_vma_destroy(struct i915_vma *vma)
static void i915_vma_destroy(struct i915_vma *vma)
{
GEM_BUG_ON(vma->node.allocated);
GEM_BUG_ON(i915_vma_is_active(vma));
@ -591,11 +591,33 @@ void i915_vma_destroy(struct i915_vma *vma)
kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
}
void i915_vma_unlink_ctx(struct i915_vma *vma)
{
struct i915_gem_context *ctx = vma->ctx;
if (ctx->vma_lut.ht_size & I915_CTX_RESIZE_IN_PROGRESS) {
cancel_work_sync(&ctx->vma_lut.resize);
ctx->vma_lut.ht_size &= ~I915_CTX_RESIZE_IN_PROGRESS;
}
__hlist_del(&vma->ctx_node);
ctx->vma_lut.ht_count--;
if (i915_vma_is_ggtt(vma))
vma->obj->vma_hashed = NULL;
vma->ctx = NULL;
i915_vma_put(vma);
}
void i915_vma_close(struct i915_vma *vma)
{
GEM_BUG_ON(i915_vma_is_closed(vma));
vma->flags |= I915_VMA_CLOSED;
if (vma->ctx)
i915_vma_unlink_ctx(vma);
list_del(&vma->obj_link);
rb_erase(&vma->obj_node, &vma->obj->vma_tree);

Просмотреть файл

@ -50,6 +50,7 @@ struct i915_vma {
struct drm_i915_gem_object *obj;
struct i915_address_space *vm;
struct drm_i915_fence_reg *fence;
struct reservation_object *resv; /** Alias of obj->resv */
struct sg_table *pages;
void __iomem *iomap;
u64 size;
@ -99,16 +100,25 @@ struct i915_vma {
struct list_head obj_link; /* Link in the object's VMA list */
struct rb_node obj_node;
struct hlist_node obj_hash;
/** This vma's place in the batchbuffer or on the eviction list */
struct list_head exec_list;
/** This vma's place in the execbuf reservation list */
struct list_head exec_link;
struct list_head reloc_link;
/** This vma's place in the eviction list */
struct list_head evict_link;
/**
* Used for performing relocations during execbuffer insertion.
*/
struct hlist_node exec_node;
unsigned long exec_handle;
struct drm_i915_gem_exec_object2 *exec_entry;
struct hlist_node exec_node;
u32 exec_handle;
struct i915_gem_context *ctx;
struct hlist_node ctx_node;
u32 ctx_handle;
};
struct i915_vma *
@ -232,8 +242,8 @@ bool i915_vma_misplaced(const struct i915_vma *vma,
u64 size, u64 alignment, u64 flags);
void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
int __must_check i915_vma_unbind(struct i915_vma *vma);
void i915_vma_unlink_ctx(struct i915_vma *vma);
void i915_vma_close(struct i915_vma *vma);
void i915_vma_destroy(struct i915_vma *vma);
int __i915_vma_do_pin(struct i915_vma *vma,
u64 size, u64 alignment, u64 flags);

Просмотреть файл

@ -36,44 +36,121 @@
#include "intel_drv.h"
/**
* intel_connector_atomic_get_property - fetch connector property value
* @connector: connector to fetch property for
* @state: state containing the property value
* @property: property to look up
* @val: pointer to write property value into
* intel_digital_connector_atomic_get_property - hook for connector->atomic_get_property.
* @connector: Connector to get the property for.
* @state: Connector state to retrieve the property from.
* @property: Property to retrieve.
* @val: Return value for the property.
*
* The DRM core does not store shadow copies of properties for
* atomic-capable drivers. This entrypoint is used to fetch
* the current value of a driver-specific connector property.
* Returns the atomic property value for a digital connector.
*/
int
intel_connector_atomic_get_property(struct drm_connector *connector,
const struct drm_connector_state *state,
struct drm_property *property,
uint64_t *val)
int intel_digital_connector_atomic_get_property(struct drm_connector *connector,
const struct drm_connector_state *state,
struct drm_property *property,
uint64_t *val)
{
int i;
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(state);
/*
* TODO: We only have atomic modeset for planes at the moment, so the
* crtc/connector code isn't quite ready yet. Until it's ready,
* continue to look up all property values in the DRM's shadow copy
* in obj->properties->values[].
*
* When the crtc/connector state work matures, this function should
* be updated to read the values out of the state structure instead.
*/
for (i = 0; i < connector->base.properties->count; i++) {
if (connector->base.properties->properties[i] == property) {
*val = connector->base.properties->values[i];
return 0;
}
if (property == dev_priv->force_audio_property)
*val = intel_conn_state->force_audio;
else if (property == dev_priv->broadcast_rgb_property)
*val = intel_conn_state->broadcast_rgb;
else {
DRM_DEBUG_ATOMIC("Unknown property %s\n", property->name);
return -EINVAL;
}
return 0;
}
/**
* intel_digital_connector_atomic_set_property - hook for connector->atomic_set_property.
* @connector: Connector to set the property for.
* @state: Connector state to set the property on.
* @property: Property to set.
* @val: New value for the property.
*
* Sets the atomic property value for a digital connector.
*/
int intel_digital_connector_atomic_set_property(struct drm_connector *connector,
struct drm_connector_state *state,
struct drm_property *property,
uint64_t val)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(state);
if (property == dev_priv->force_audio_property) {
intel_conn_state->force_audio = val;
return 0;
}
if (property == dev_priv->broadcast_rgb_property) {
intel_conn_state->broadcast_rgb = val;
return 0;
}
DRM_DEBUG_ATOMIC("Unknown property %s\n", property->name);
return -EINVAL;
}
/*
int intel_digital_connector_atomic_check(struct drm_connector *conn,
struct drm_connector_state *new_state)
{
struct intel_digital_connector_state *new_conn_state =
to_intel_digital_connector_state(new_state);
struct drm_connector_state *old_state =
drm_atomic_get_old_connector_state(new_state->state, conn);
struct intel_digital_connector_state *old_conn_state =
to_intel_digital_connector_state(old_state);
struct drm_crtc_state *crtc_state;
if (!new_state->crtc)
return 0;
crtc_state = drm_atomic_get_new_crtc_state(new_state->state, new_state->crtc);
/*
* These properties are handled by fastset, and might not end
* up in a modeset.
*/
if (new_conn_state->force_audio != old_conn_state->force_audio ||
new_conn_state->broadcast_rgb != old_conn_state->broadcast_rgb ||
new_conn_state->base.picture_aspect_ratio != old_conn_state->base.picture_aspect_ratio ||
new_conn_state->base.scaling_mode != old_conn_state->base.scaling_mode)
crtc_state->mode_changed = true;
return 0;
}
/**
* intel_digital_connector_duplicate_state - duplicate connector state
* @connector: digital connector
*
* Allocates and returns a copy of the connector state (both common and
* digital connector specific) for the specified connector.
*
* Returns: The newly allocated connector state, or NULL on failure.
*/
struct drm_connector_state *
intel_digital_connector_duplicate_state(struct drm_connector *connector)
{
struct intel_digital_connector_state *state;
state = kmemdup(connector->state, sizeof(*state), GFP_KERNEL);
if (!state)
return NULL;
__drm_atomic_helper_connector_duplicate_state(connector, &state->base);
return &state->base;
}
/**
* intel_crtc_duplicate_state - duplicate crtc state
* @crtc: drm crtc
*
@ -248,7 +325,7 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
}
/* set scaler mode */
if (IS_GEMINILAKE(dev_priv)) {
if (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) {
scaler_state->scalers[*scaler_id].mode = 0;
} else if (num_scalers_need == 1 && intel_crtc->pipe != PIPE_C) {
/*

Просмотреть файл

@ -234,7 +234,7 @@ static void enable_fake_irq(struct intel_breadcrumbs *b)
mod_timer(&b->hangcheck, wait_timeout());
}
static void __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b)
static bool __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b)
{
struct intel_engine_cs *engine =
container_of(b, struct intel_engine_cs, breadcrumbs);
@ -242,7 +242,7 @@ static void __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b)
lockdep_assert_held(&b->irq_lock);
if (b->irq_armed)
return;
return false;
/* The breadcrumb irq will be disarmed on the interrupt after the
* waiters are signaled. This gives us a single interrupt window in
@ -260,7 +260,7 @@ static void __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b)
* implementation to call intel_engine_wakeup()
* itself when it wants to simulate a user interrupt,
*/
return;
return true;
}
/* Since we are waiting on a request, the GPU should be busy
@ -278,6 +278,7 @@ static void __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b)
}
enable_fake_irq(b);
return true;
}
static inline struct intel_wait *to_wait(struct rb_node *node)
@ -329,7 +330,7 @@ static bool __intel_engine_add_wait(struct intel_engine_cs *engine,
{
struct intel_breadcrumbs *b = &engine->breadcrumbs;
struct rb_node **p, *parent, *completed;
bool first;
bool first, armed;
u32 seqno;
/* Insert the request into the retirement ordered list
@ -344,6 +345,7 @@ static bool __intel_engine_add_wait(struct intel_engine_cs *engine,
* removing stale elements in the tree, we may be able to reduce the
* ping-pong between the old bottom-half and ourselves as first-waiter.
*/
armed = false;
first = true;
parent = NULL;
completed = NULL;
@ -399,7 +401,7 @@ static bool __intel_engine_add_wait(struct intel_engine_cs *engine,
* in the unlocked read of b->irq_seqno_bh in the irq handler)
* and so we miss the wake up.
*/
__intel_breadcrumbs_enable_irq(b);
armed = __intel_breadcrumbs_enable_irq(b);
spin_unlock(&b->irq_lock);
}
@ -426,20 +428,24 @@ static bool __intel_engine_add_wait(struct intel_engine_cs *engine,
GEM_BUG_ON(!b->irq_armed);
GEM_BUG_ON(rb_first(&b->waiters) != &b->irq_wait->node);
return first;
return armed;
}
bool intel_engine_add_wait(struct intel_engine_cs *engine,
struct intel_wait *wait)
{
struct intel_breadcrumbs *b = &engine->breadcrumbs;
bool first;
bool armed;
spin_lock_irq(&b->rb_lock);
first = __intel_engine_add_wait(engine, wait);
armed = __intel_engine_add_wait(engine, wait);
spin_unlock_irq(&b->rb_lock);
if (armed)
return armed;
return first;
/* Make the caller recheck if its request has already started. */
return i915_seqno_passed(intel_engine_get_seqno(engine),
wait->seqno - 1);
}
static inline bool chain_wakeup(struct rb_node *rb, int priority)
@ -672,8 +678,6 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request,
{
struct intel_engine_cs *engine = request->engine;
struct intel_breadcrumbs *b = &engine->breadcrumbs;
struct rb_node *parent, **p;
bool first;
u32 seqno;
/* Note that we may be called from an interrupt handler on another
@ -708,27 +712,36 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request,
*/
wakeup &= __intel_engine_add_wait(engine, &request->signaling.wait);
/* Now insert ourselves into the retirement ordered list of signals
* on this engine. We track the oldest seqno as that will be the
* first signal to complete.
*/
parent = NULL;
first = true;
p = &b->signals.rb_node;
while (*p) {
parent = *p;
if (i915_seqno_passed(seqno,
to_signaler(parent)->signaling.wait.seqno)) {
p = &parent->rb_right;
first = false;
} else {
p = &parent->rb_left;
if (!__i915_gem_request_completed(request, seqno)) {
struct rb_node *parent, **p;
bool first;
/* Now insert ourselves into the retirement ordered list of
* signals on this engine. We track the oldest seqno as that
* will be the first signal to complete.
*/
parent = NULL;
first = true;
p = &b->signals.rb_node;
while (*p) {
parent = *p;
if (i915_seqno_passed(seqno,
to_signaler(parent)->signaling.wait.seqno)) {
p = &parent->rb_right;
first = false;
} else {
p = &parent->rb_left;
}
}
rb_link_node(&request->signaling.node, parent, p);
rb_insert_color(&request->signaling.node, &b->signals);
if (first)
rcu_assign_pointer(b->first_signal, request);
} else {
__intel_engine_remove_wait(engine, &request->signaling.wait);
i915_gem_request_put(request);
wakeup = false;
}
rb_link_node(&request->signaling.node, parent, p);
rb_insert_color(&request->signaling.node, &b->signals);
if (first)
rcu_assign_pointer(b->first_signal, request);
spin_unlock(&b->rb_lock);

Просмотреть файл

@ -1400,6 +1400,280 @@ void bxt_uninit_cdclk(struct drm_i915_private *dev_priv)
bxt_set_cdclk(dev_priv, &cdclk_state);
}
static int cnl_calc_cdclk(int max_pixclk)
{
if (max_pixclk > 336000)
return 528000;
else if (max_pixclk > 168000)
return 336000;
else
return 168000;
}
static void cnl_cdclk_pll_update(struct drm_i915_private *dev_priv,
struct intel_cdclk_state *cdclk_state)
{
u32 val;
if (I915_READ(SKL_DSSM) & CNL_DSSM_CDCLK_PLL_REFCLK_24MHz)
cdclk_state->ref = 24000;
else
cdclk_state->ref = 19200;
cdclk_state->vco = 0;
val = I915_READ(BXT_DE_PLL_ENABLE);
if ((val & BXT_DE_PLL_PLL_ENABLE) == 0)
return;
if (WARN_ON((val & BXT_DE_PLL_LOCK) == 0))
return;
cdclk_state->vco = (val & CNL_CDCLK_PLL_RATIO_MASK) * cdclk_state->ref;
}
static void cnl_get_cdclk(struct drm_i915_private *dev_priv,
struct intel_cdclk_state *cdclk_state)
{
u32 divider;
int div;
cnl_cdclk_pll_update(dev_priv, cdclk_state);
cdclk_state->cdclk = cdclk_state->ref;
if (cdclk_state->vco == 0)
return;
divider = I915_READ(CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK;
switch (divider) {
case BXT_CDCLK_CD2X_DIV_SEL_1:
div = 2;
break;
case BXT_CDCLK_CD2X_DIV_SEL_2:
div = 4;
break;
default:
MISSING_CASE(divider);
return;
}
cdclk_state->cdclk = DIV_ROUND_CLOSEST(cdclk_state->vco, div);
}
static void cnl_cdclk_pll_disable(struct drm_i915_private *dev_priv)
{
u32 val;
val = I915_READ(BXT_DE_PLL_ENABLE);
val &= ~BXT_DE_PLL_PLL_ENABLE;
I915_WRITE(BXT_DE_PLL_ENABLE, val);
/* Timeout 200us */
if (wait_for((I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK) == 0, 1))
DRM_ERROR("timout waiting for CDCLK PLL unlock\n");
dev_priv->cdclk.hw.vco = 0;
}
static void cnl_cdclk_pll_enable(struct drm_i915_private *dev_priv, int vco)
{
int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->cdclk.hw.ref);
u32 val;
val = CNL_CDCLK_PLL_RATIO(ratio);
I915_WRITE(BXT_DE_PLL_ENABLE, val);
val |= BXT_DE_PLL_PLL_ENABLE;
I915_WRITE(BXT_DE_PLL_ENABLE, val);
/* Timeout 200us */
if (wait_for((I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK) != 0, 1))
DRM_ERROR("timout waiting for CDCLK PLL lock\n");
dev_priv->cdclk.hw.vco = vco;
}
static void cnl_set_cdclk(struct drm_i915_private *dev_priv,
const struct intel_cdclk_state *cdclk_state)
{
int cdclk = cdclk_state->cdclk;
int vco = cdclk_state->vco;
u32 val, divider, pcu_ack;
int ret;
mutex_lock(&dev_priv->rps.hw_lock);
ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
SKL_CDCLK_PREPARE_FOR_CHANGE,
SKL_CDCLK_READY_FOR_CHANGE,
SKL_CDCLK_READY_FOR_CHANGE, 3);
mutex_unlock(&dev_priv->rps.hw_lock);
if (ret) {
DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n",
ret);
return;
}
/* cdclk = vco / 2 / div{1,2} */
switch (DIV_ROUND_CLOSEST(vco, cdclk)) {
case 4:
divider = BXT_CDCLK_CD2X_DIV_SEL_2;
break;
case 2:
divider = BXT_CDCLK_CD2X_DIV_SEL_1;
break;
default:
WARN_ON(cdclk != dev_priv->cdclk.hw.ref);
WARN_ON(vco != 0);
divider = BXT_CDCLK_CD2X_DIV_SEL_1;
break;
}
switch (cdclk) {
case 528000:
pcu_ack = 2;
break;
case 336000:
pcu_ack = 1;
break;
case 168000:
default:
pcu_ack = 0;
break;
}
if (dev_priv->cdclk.hw.vco != 0 &&
dev_priv->cdclk.hw.vco != vco)
cnl_cdclk_pll_disable(dev_priv);
if (dev_priv->cdclk.hw.vco != vco)
cnl_cdclk_pll_enable(dev_priv, vco);
val = divider | skl_cdclk_decimal(cdclk);
/*
* FIXME if only the cd2x divider needs changing, it could be done
* without shutting off the pipe (if only one pipe is active).
*/
val |= BXT_CDCLK_CD2X_PIPE_NONE;
I915_WRITE(CDCLK_CTL, val);
/* inform PCU of the change */
mutex_lock(&dev_priv->rps.hw_lock);
sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack);
mutex_unlock(&dev_priv->rps.hw_lock);
intel_update_cdclk(dev_priv);
}
static int cnl_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
{
int ratio;
if (cdclk == dev_priv->cdclk.hw.ref)
return 0;
switch (cdclk) {
default:
MISSING_CASE(cdclk);
case 168000:
case 336000:
ratio = dev_priv->cdclk.hw.ref == 19200 ? 35 : 28;
break;
case 528000:
ratio = dev_priv->cdclk.hw.ref == 19200 ? 55 : 44;
break;
}
return dev_priv->cdclk.hw.ref * ratio;
}
static void cnl_sanitize_cdclk(struct drm_i915_private *dev_priv)
{
u32 cdctl, expected;
intel_update_cdclk(dev_priv);
if (dev_priv->cdclk.hw.vco == 0 ||
dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.ref)
goto sanitize;
/* DPLL okay; verify the cdclock
*
* Some BIOS versions leave an incorrect decimal frequency value and
* set reserved MBZ bits in CDCLK_CTL at least during exiting from S4,
* so sanitize this register.
*/
cdctl = I915_READ(CDCLK_CTL);
/*
* Let's ignore the pipe field, since BIOS could have configured the
* dividers both synching to an active pipe, or asynchronously
* (PIPE_NONE).
*/
cdctl &= ~BXT_CDCLK_CD2X_PIPE_NONE;
expected = (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) |
skl_cdclk_decimal(dev_priv->cdclk.hw.cdclk);
if (cdctl == expected)
/* All well; nothing to sanitize */
return;
sanitize:
DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
/* force cdclk programming */
dev_priv->cdclk.hw.cdclk = 0;
/* force full PLL disable + enable */
dev_priv->cdclk.hw.vco = -1;
}
/**
* cnl_init_cdclk - Initialize CDCLK on CNL
* @dev_priv: i915 device
*
* Initialize CDCLK for CNL. This is generally
* done only during the display core initialization sequence,
* after which the DMC will take care of turning CDCLK off/on
* as needed.
*/
void cnl_init_cdclk(struct drm_i915_private *dev_priv)
{
struct intel_cdclk_state cdclk_state;
cnl_sanitize_cdclk(dev_priv);
if (dev_priv->cdclk.hw.cdclk != 0 &&
dev_priv->cdclk.hw.vco != 0)
return;
cdclk_state = dev_priv->cdclk.hw;
cdclk_state.cdclk = cnl_calc_cdclk(0);
cdclk_state.vco = cnl_cdclk_pll_vco(dev_priv, cdclk_state.cdclk);
cnl_set_cdclk(dev_priv, &cdclk_state);
}
/**
* cnl_uninit_cdclk - Uninitialize CDCLK on CNL
* @dev_priv: i915 device
*
* Uninitialize CDCLK for CNL. This is done only
* during the display core uninitialization sequence.
*/
void cnl_uninit_cdclk(struct drm_i915_private *dev_priv)
{
struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw;
cdclk_state.cdclk = cdclk_state.ref;
cdclk_state.vco = 0;
cnl_set_cdclk(dev_priv, &cdclk_state);
}
/**
* intel_cdclk_state_compare - Determine if two CDCLK states differ
* @a: first CDCLK state
@ -1458,7 +1732,9 @@ static int bdw_adjust_min_pipe_pixel_rate(struct intel_crtc_state *crtc_state,
crtc_state->has_audio &&
crtc_state->port_clock >= 540000 &&
crtc_state->lane_count == 4) {
if (IS_GEMINILAKE(dev_priv))
if (IS_CANNONLAKE(dev_priv))
pixel_rate = max(316800, pixel_rate);
else if (IS_GEMINILAKE(dev_priv))
pixel_rate = max(2 * 316800, pixel_rate);
else
pixel_rate = max(432000, pixel_rate);
@ -1504,7 +1780,7 @@ static int intel_max_pixel_rate(struct drm_atomic_state *state)
pixel_rate = crtc_state->pixel_rate;
if (IS_BROADWELL(dev_priv) || IS_GEN9(dev_priv))
if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9)
pixel_rate =
bdw_adjust_min_pipe_pixel_rate(crtc_state,
pixel_rate);
@ -1665,6 +1941,40 @@ static int bxt_modeset_calc_cdclk(struct drm_atomic_state *state)
return 0;
}
static int cnl_modeset_calc_cdclk(struct drm_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->dev);
struct intel_atomic_state *intel_state =
to_intel_atomic_state(state);
int max_pixclk = intel_max_pixel_rate(state);
int cdclk, vco;
cdclk = cnl_calc_cdclk(max_pixclk);
vco = cnl_cdclk_pll_vco(dev_priv, cdclk);
if (cdclk > dev_priv->max_cdclk_freq) {
DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
cdclk, dev_priv->max_cdclk_freq);
return -EINVAL;
}
intel_state->cdclk.logical.vco = vco;
intel_state->cdclk.logical.cdclk = cdclk;
if (!intel_state->active_crtcs) {
cdclk = cnl_calc_cdclk(0);
vco = cnl_cdclk_pll_vco(dev_priv, cdclk);
intel_state->cdclk.actual.vco = vco;
intel_state->cdclk.actual.cdclk = cdclk;
} else {
intel_state->cdclk.actual =
intel_state->cdclk.logical;
}
return 0;
}
static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
{
int max_cdclk_freq = dev_priv->max_cdclk_freq;
@ -1696,7 +2006,9 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
*/
void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
{
if (IS_GEN9_BC(dev_priv)) {
if (IS_CANNONLAKE(dev_priv)) {
dev_priv->max_cdclk_freq = 528000;
} else if (IS_GEN9_BC(dev_priv)) {
u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
int max_cdclk, vco;
@ -1780,6 +2092,30 @@ void intel_update_cdclk(struct drm_i915_private *dev_priv)
DIV_ROUND_UP(dev_priv->cdclk.hw.cdclk, 1000));
}
static int cnp_rawclk(struct drm_i915_private *dev_priv)
{
u32 rawclk;
int divider, fraction;
if (I915_READ(SFUSE_STRAP) & SFUSE_STRAP_RAW_FREQUENCY) {
/* 24 MHz */
divider = 24000;
fraction = 0;
} else {
/* 19.2 MHz */
divider = 19000;
fraction = 200;
}
rawclk = CNP_RAWCLK_DIV((divider / 1000) - 1);
if (fraction)
rawclk |= CNP_RAWCLK_FRAC(DIV_ROUND_CLOSEST(1000,
fraction) - 1);
I915_WRITE(PCH_RAWCLK_FREQ, rawclk);
return divider + fraction;
}
static int pch_rawclk(struct drm_i915_private *dev_priv)
{
return (I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK) * 1000;
@ -1827,7 +2163,10 @@ static int g4x_hrawclk(struct drm_i915_private *dev_priv)
*/
void intel_update_rawclk(struct drm_i915_private *dev_priv)
{
if (HAS_PCH_SPLIT(dev_priv))
if (HAS_PCH_CNP(dev_priv))
dev_priv->rawclk_freq = cnp_rawclk(dev_priv);
else if (HAS_PCH_SPLIT(dev_priv))
dev_priv->rawclk_freq = pch_rawclk(dev_priv);
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
dev_priv->rawclk_freq = vlv_hrawclk(dev_priv);
@ -1866,9 +2205,15 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
dev_priv->display.set_cdclk = skl_set_cdclk;
dev_priv->display.modeset_calc_cdclk =
skl_modeset_calc_cdclk;
} else if (IS_CANNONLAKE(dev_priv)) {
dev_priv->display.set_cdclk = cnl_set_cdclk;
dev_priv->display.modeset_calc_cdclk =
cnl_modeset_calc_cdclk;
}
if (IS_GEN9_BC(dev_priv))
if (IS_CANNONLAKE(dev_priv))
dev_priv->display.get_cdclk = cnl_get_cdclk;
else if (IS_GEN9_BC(dev_priv))
dev_priv->display.get_cdclk = skl_get_cdclk;
else if (IS_GEN9_LP(dev_priv))
dev_priv->display.get_cdclk = bxt_get_cdclk;

Просмотреть файл

@ -37,6 +37,9 @@
#define I915_CSR_GLK "i915/glk_dmc_ver1_04.bin"
#define GLK_CSR_VERSION_REQUIRED CSR_VERSION(1, 4)
#define I915_CSR_CNL "i915/cnl_dmc_ver1_04.bin"
#define CNL_CSR_VERSION_REQUIRED CSR_VERSION(1, 4)
#define I915_CSR_KBL "i915/kbl_dmc_ver1_01.bin"
MODULE_FIRMWARE(I915_CSR_KBL);
#define KBL_CSR_VERSION_REQUIRED CSR_VERSION(1, 1)
@ -238,7 +241,7 @@ void intel_csr_load_program(struct drm_i915_private *dev_priv)
u32 *payload = dev_priv->csr.dmc_payload;
uint32_t i, fw_size;
if (!IS_GEN9(dev_priv)) {
if (!HAS_CSR(dev_priv)) {
DRM_ERROR("No CSR support available for this platform\n");
return;
}
@ -289,9 +292,11 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
csr->version = css_header->version;
if (IS_GEMINILAKE(dev_priv)) {
if (IS_CANNONLAKE(dev_priv)) {
required_version = CNL_CSR_VERSION_REQUIRED;
} else if (IS_GEMINILAKE(dev_priv)) {
required_version = GLK_CSR_VERSION_REQUIRED;
} else if (IS_KABYLAKE(dev_priv)) {
} else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
required_version = KBL_CSR_VERSION_REQUIRED;
} else if (IS_SKYLAKE(dev_priv)) {
required_version = SKL_CSR_VERSION_REQUIRED;
@ -438,9 +443,11 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
if (!HAS_CSR(dev_priv))
return;
if (IS_GEMINILAKE(dev_priv))
if (IS_CANNONLAKE(dev_priv))
csr->fw_path = I915_CSR_CNL;
else if (IS_GEMINILAKE(dev_priv))
csr->fw_path = I915_CSR_GLK;
else if (IS_KABYLAKE(dev_priv))
else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
csr->fw_path = I915_CSR_KBL;
else if (IS_SKYLAKE(dev_priv))
csr->fw_path = I915_CSR_SKL;

Просмотреть файл

@ -353,6 +353,146 @@ static const struct bxt_ddi_buf_trans bxt_ddi_translations_hdmi[] = {
{ 154, 0x9A, 1, 128, true }, /* 9: 1200 0 */
};
struct cnl_ddi_buf_trans {
u32 dw2_swing_sel;
u32 dw7_n_scalar;
u32 dw4_cursor_coeff;
u32 dw4_post_cursor_2;
u32 dw4_post_cursor_1;
};
/* Voltage Swing Programming for VccIO 0.85V for DP */
static const struct cnl_ddi_buf_trans cnl_ddi_translations_dp_0_85V[] = {
/* NT mV Trans mV db */
{ 0xA, 0x5D, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
{ 0xA, 0x6A, 0x38, 0x00, 0x07 }, /* 350 500 3.1 */
{ 0xB, 0x7A, 0x32, 0x00, 0x0D }, /* 350 700 6.0 */
{ 0x6, 0x7C, 0x2D, 0x00, 0x12 }, /* 350 900 8.2 */
{ 0xA, 0x69, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
{ 0xB, 0x7A, 0x36, 0x00, 0x09 }, /* 500 700 2.9 */
{ 0x6, 0x7C, 0x30, 0x00, 0x0F }, /* 500 900 5.1 */
{ 0xB, 0x7D, 0x3C, 0x00, 0x03 }, /* 650 725 0.9 */
{ 0x6, 0x7C, 0x34, 0x00, 0x0B }, /* 600 900 3.5 */
{ 0x6, 0x7B, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
};
/* Voltage Swing Programming for VccIO 0.85V for HDMI */
static const struct cnl_ddi_buf_trans cnl_ddi_translations_hdmi_0_85V[] = {
/* NT mV Trans mV db */
{ 0xA, 0x60, 0x3F, 0x00, 0x00 }, /* 450 450 0.0 */
{ 0xB, 0x73, 0x36, 0x00, 0x09 }, /* 450 650 3.2 */
{ 0x6, 0x7F, 0x31, 0x00, 0x0E }, /* 450 850 5.5 */
{ 0xB, 0x73, 0x3F, 0x00, 0x00 }, /* 650 650 0.0 */
{ 0x6, 0x7F, 0x37, 0x00, 0x08 }, /* 650 850 2.3 */
{ 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 850 850 0.0 */
{ 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */
};
/* Voltage Swing Programming for VccIO 0.85V for eDP */
static const struct cnl_ddi_buf_trans cnl_ddi_translations_edp_0_85V[] = {
/* NT mV Trans mV db */
{ 0xA, 0x66, 0x3A, 0x00, 0x05 }, /* 384 500 2.3 */
{ 0x0, 0x7F, 0x38, 0x00, 0x07 }, /* 153 200 2.3 */
{ 0x8, 0x7F, 0x38, 0x00, 0x07 }, /* 192 250 2.3 */
{ 0x1, 0x7F, 0x38, 0x00, 0x07 }, /* 230 300 2.3 */
{ 0x9, 0x7F, 0x38, 0x00, 0x07 }, /* 269 350 2.3 */
{ 0xA, 0x66, 0x3C, 0x00, 0x03 }, /* 446 500 1.0 */
{ 0xB, 0x70, 0x3C, 0x00, 0x03 }, /* 460 600 2.3 */
{ 0xC, 0x75, 0x3C, 0x00, 0x03 }, /* 537 700 2.3 */
{ 0x2, 0x7F, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */
};
/* Voltage Swing Programming for VccIO 0.95V for DP */
static const struct cnl_ddi_buf_trans cnl_ddi_translations_dp_0_95V[] = {
/* NT mV Trans mV db */
{ 0xA, 0x5D, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
{ 0xA, 0x6A, 0x38, 0x00, 0x07 }, /* 350 500 3.1 */
{ 0xB, 0x7A, 0x32, 0x00, 0x0D }, /* 350 700 6.0 */
{ 0x6, 0x7C, 0x2D, 0x00, 0x12 }, /* 350 900 8.2 */
{ 0xA, 0x69, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
{ 0xB, 0x7A, 0x36, 0x00, 0x09 }, /* 500 700 2.9 */
{ 0x6, 0x7C, 0x30, 0x00, 0x0F }, /* 500 900 5.1 */
{ 0xB, 0x7D, 0x3C, 0x00, 0x03 }, /* 650 725 0.9 */
{ 0x6, 0x7C, 0x34, 0x00, 0x0B }, /* 600 900 3.5 */
{ 0x6, 0x7B, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
};
/* Voltage Swing Programming for VccIO 0.95V for HDMI */
static const struct cnl_ddi_buf_trans cnl_ddi_translations_hdmi_0_95V[] = {
/* NT mV Trans mV db */
{ 0xA, 0x5C, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */
{ 0xB, 0x69, 0x37, 0x00, 0x08 }, /* 400 600 3.5 */
{ 0x5, 0x76, 0x31, 0x00, 0x0E }, /* 400 800 6.0 */
{ 0xA, 0x5E, 0x3F, 0x00, 0x00 }, /* 450 450 0.0 */
{ 0xB, 0x69, 0x3F, 0x00, 0x00 }, /* 600 600 0.0 */
{ 0xB, 0x79, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */
{ 0x6, 0x7D, 0x32, 0x00, 0x0D }, /* 600 1000 4.4 */
{ 0x5, 0x76, 0x3F, 0x00, 0x00 }, /* 800 800 0.0 */
{ 0x6, 0x7D, 0x39, 0x00, 0x06 }, /* 800 1000 1.9 */
{ 0x6, 0x7F, 0x39, 0x00, 0x06 }, /* 850 1050 1.8 */
{ 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 1050 1050 0.0 */
};
/* Voltage Swing Programming for VccIO 0.95V for eDP */
static const struct cnl_ddi_buf_trans cnl_ddi_translations_edp_0_95V[] = {
/* NT mV Trans mV db */
{ 0xA, 0x61, 0x3A, 0x00, 0x05 }, /* 384 500 2.3 */
{ 0x0, 0x7F, 0x38, 0x00, 0x07 }, /* 153 200 2.3 */
{ 0x8, 0x7F, 0x38, 0x00, 0x07 }, /* 192 250 2.3 */
{ 0x1, 0x7F, 0x38, 0x00, 0x07 }, /* 230 300 2.3 */
{ 0x9, 0x7F, 0x38, 0x00, 0x07 }, /* 269 350 2.3 */
{ 0xA, 0x61, 0x3C, 0x00, 0x03 }, /* 446 500 1.0 */
{ 0xB, 0x68, 0x39, 0x00, 0x06 }, /* 460 600 2.3 */
{ 0xC, 0x6E, 0x39, 0x00, 0x06 }, /* 537 700 2.3 */
{ 0x4, 0x7F, 0x3A, 0x00, 0x05 }, /* 460 600 2.3 */
{ 0x2, 0x7F, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */
};
/* Voltage Swing Programming for VccIO 1.05V for DP */
static const struct cnl_ddi_buf_trans cnl_ddi_translations_dp_1_05V[] = {
/* NT mV Trans mV db */
{ 0xA, 0x58, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */
{ 0xB, 0x64, 0x37, 0x00, 0x08 }, /* 400 600 3.5 */
{ 0x5, 0x70, 0x31, 0x00, 0x0E }, /* 400 800 6.0 */
{ 0x6, 0x7F, 0x2C, 0x00, 0x13 }, /* 400 1050 8.4 */
{ 0xB, 0x64, 0x3F, 0x00, 0x00 }, /* 600 600 0.0 */
{ 0x5, 0x73, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */
{ 0x6, 0x7F, 0x30, 0x00, 0x0F }, /* 550 1050 5.6 */
{ 0x5, 0x76, 0x3E, 0x00, 0x01 }, /* 850 900 0.5 */
{ 0x6, 0x7F, 0x36, 0x00, 0x09 }, /* 750 1050 2.9 */
{ 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 1050 1050 0.0 */
};
/* Voltage Swing Programming for VccIO 1.05V for HDMI */
static const struct cnl_ddi_buf_trans cnl_ddi_translations_hdmi_1_05V[] = {
/* NT mV Trans mV db */
{ 0xA, 0x58, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */
{ 0xB, 0x64, 0x37, 0x00, 0x08 }, /* 400 600 3.5 */
{ 0x5, 0x70, 0x31, 0x00, 0x0E }, /* 400 800 6.0 */
{ 0xA, 0x5B, 0x3F, 0x00, 0x00 }, /* 450 450 0.0 */
{ 0xB, 0x64, 0x3F, 0x00, 0x00 }, /* 600 600 0.0 */
{ 0x5, 0x73, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */
{ 0x6, 0x7C, 0x32, 0x00, 0x0D }, /* 600 1000 4.4 */
{ 0x5, 0x70, 0x3F, 0x00, 0x00 }, /* 800 800 0.0 */
{ 0x6, 0x7C, 0x39, 0x00, 0x06 }, /* 800 1000 1.9 */
{ 0x6, 0x7F, 0x39, 0x00, 0x06 }, /* 850 1050 1.8 */
{ 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 1050 1050 0.0 */
};
/* Voltage Swing Programming for VccIO 1.05V for eDP */
static const struct cnl_ddi_buf_trans cnl_ddi_translations_edp_1_05V[] = {
/* NT mV Trans mV db */
{ 0xA, 0x5E, 0x3A, 0x00, 0x05 }, /* 384 500 2.3 */
{ 0x0, 0x7F, 0x38, 0x00, 0x07 }, /* 153 200 2.3 */
{ 0x8, 0x7F, 0x38, 0x00, 0x07 }, /* 192 250 2.3 */
{ 0x1, 0x7F, 0x38, 0x00, 0x07 }, /* 230 300 2.3 */
{ 0x9, 0x7F, 0x38, 0x00, 0x07 }, /* 269 350 2.3 */
{ 0xA, 0x5E, 0x3C, 0x00, 0x03 }, /* 446 500 1.0 */
{ 0xB, 0x64, 0x39, 0x00, 0x06 }, /* 460 600 2.3 */
{ 0xE, 0x6A, 0x39, 0x00, 0x06 }, /* 537 700 2.3 */
{ 0x2, 0x7F, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */
};
enum port intel_ddi_get_encoder_port(struct intel_encoder *encoder)
{
switch (encoder->type) {
@ -404,7 +544,7 @@ kbl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
if (IS_KBL_ULX(dev_priv)) {
*n_entries = ARRAY_SIZE(kbl_y_ddi_translations_dp);
return kbl_y_ddi_translations_dp;
} else if (IS_KBL_ULT(dev_priv)) {
} else if (IS_KBL_ULT(dev_priv) || IS_CFL_ULT(dev_priv)) {
*n_entries = ARRAY_SIZE(kbl_u_ddi_translations_dp);
return kbl_u_ddi_translations_dp;
} else {
@ -420,7 +560,8 @@ skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv)) {
*n_entries = ARRAY_SIZE(skl_y_ddi_translations_edp);
return skl_y_ddi_translations_edp;
} else if (IS_SKL_ULT(dev_priv) || IS_KBL_ULT(dev_priv)) {
} else if (IS_SKL_ULT(dev_priv) || IS_KBL_ULT(dev_priv) ||
IS_CFL_ULT(dev_priv)) {
*n_entries = ARRAY_SIZE(skl_u_ddi_translations_edp);
return skl_u_ddi_translations_edp;
} else {
@ -429,7 +570,7 @@ skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
}
}
if (IS_KABYLAKE(dev_priv))
if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
return kbl_get_buf_trans_dp(dev_priv, n_entries);
else
return skl_get_buf_trans_dp(dev_priv, n_entries);
@ -485,7 +626,7 @@ static const struct ddi_buf_trans *
intel_ddi_get_buf_trans_dp(struct drm_i915_private *dev_priv,
int *n_entries)
{
if (IS_KABYLAKE(dev_priv)) {
if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
return kbl_get_buf_trans_dp(dev_priv, n_entries);
} else if (IS_SKYLAKE(dev_priv)) {
return skl_get_buf_trans_dp(dev_priv, n_entries);
@ -505,7 +646,7 @@ static const struct ddi_buf_trans *
intel_ddi_get_buf_trans_edp(struct drm_i915_private *dev_priv,
int *n_entries)
{
if (IS_KABYLAKE(dev_priv) || IS_SKYLAKE(dev_priv)) {
if (IS_GEN9_BC(dev_priv)) {
return skl_get_buf_trans_edp(dev_priv, n_entries);
} else if (IS_BROADWELL(dev_priv)) {
return bdw_get_buf_trans_edp(dev_priv, n_entries);
@ -1478,7 +1619,7 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder, u32 level)
if (dp_iboost) {
iboost = dp_iboost;
} else {
if (IS_KABYLAKE(dev_priv))
if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
ddi_translations = kbl_get_buf_trans_dp(dev_priv,
&n_entries);
else
@ -1580,6 +1721,200 @@ u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
DP_TRAIN_VOLTAGE_SWING_MASK;
}
static const struct cnl_ddi_buf_trans *
cnl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv,
u32 voltage, int *n_entries)
{
if (voltage == VOLTAGE_INFO_0_85V) {
*n_entries = ARRAY_SIZE(cnl_ddi_translations_hdmi_0_85V);
return cnl_ddi_translations_hdmi_0_85V;
} else if (voltage == VOLTAGE_INFO_0_95V) {
*n_entries = ARRAY_SIZE(cnl_ddi_translations_hdmi_0_95V);
return cnl_ddi_translations_hdmi_0_95V;
} else if (voltage == VOLTAGE_INFO_1_05V) {
*n_entries = ARRAY_SIZE(cnl_ddi_translations_hdmi_1_05V);
return cnl_ddi_translations_hdmi_1_05V;
}
return NULL;
}
static const struct cnl_ddi_buf_trans *
cnl_get_buf_trans_dp(struct drm_i915_private *dev_priv,
u32 voltage, int *n_entries)
{
if (voltage == VOLTAGE_INFO_0_85V) {
*n_entries = ARRAY_SIZE(cnl_ddi_translations_dp_0_85V);
return cnl_ddi_translations_dp_0_85V;
} else if (voltage == VOLTAGE_INFO_0_95V) {
*n_entries = ARRAY_SIZE(cnl_ddi_translations_dp_0_95V);
return cnl_ddi_translations_dp_0_95V;
} else if (voltage == VOLTAGE_INFO_1_05V) {
*n_entries = ARRAY_SIZE(cnl_ddi_translations_dp_1_05V);
return cnl_ddi_translations_dp_1_05V;
}
return NULL;
}
static const struct cnl_ddi_buf_trans *
cnl_get_buf_trans_edp(struct drm_i915_private *dev_priv,
u32 voltage, int *n_entries)
{
if (dev_priv->vbt.edp.low_vswing) {
if (voltage == VOLTAGE_INFO_0_85V) {
*n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_0_85V);
return cnl_ddi_translations_dp_0_85V;
} else if (voltage == VOLTAGE_INFO_0_95V) {
*n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_0_95V);
return cnl_ddi_translations_edp_0_95V;
} else if (voltage == VOLTAGE_INFO_1_05V) {
*n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_1_05V);
return cnl_ddi_translations_edp_1_05V;
}
return NULL;
} else {
return cnl_get_buf_trans_dp(dev_priv, voltage, n_entries);
}
}
static void cnl_ddi_vswing_program(struct drm_i915_private *dev_priv,
u32 level, enum port port, int type)
{
const struct cnl_ddi_buf_trans *ddi_translations = NULL;
u32 n_entries, val, voltage;
int ln;
/*
* Values for each port type are listed in
* voltage swing programming tables.
* Vccio voltage found in PORT_COMP_DW3.
*/
voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
if (type == INTEL_OUTPUT_HDMI) {
ddi_translations = cnl_get_buf_trans_hdmi(dev_priv,
voltage, &n_entries);
} else if (type == INTEL_OUTPUT_DP) {
ddi_translations = cnl_get_buf_trans_dp(dev_priv,
voltage, &n_entries);
} else if (type == INTEL_OUTPUT_EDP) {
ddi_translations = cnl_get_buf_trans_edp(dev_priv,
voltage, &n_entries);
}
if (ddi_translations == NULL) {
MISSING_CASE(voltage);
return;
}
if (level >= n_entries) {
DRM_DEBUG_KMS("DDI translation not found for level %d. Using %d instead.", level, n_entries - 1);
level = n_entries - 1;
}
/* Set PORT_TX_DW5 Scaling Mode Sel to 010b. */
val = I915_READ(CNL_PORT_TX_DW5_LN0(port));
val |= SCALING_MODE_SEL(2);
I915_WRITE(CNL_PORT_TX_DW5_GRP(port), val);
/* Program PORT_TX_DW2 */
val = I915_READ(CNL_PORT_TX_DW2_LN0(port));
val |= SWING_SEL_UPPER(ddi_translations[level].dw2_swing_sel);
val |= SWING_SEL_LOWER(ddi_translations[level].dw2_swing_sel);
/* Rcomp scalar is fixed as 0x98 for every table entry */
val |= RCOMP_SCALAR(0x98);
I915_WRITE(CNL_PORT_TX_DW2_GRP(port), val);
/* Program PORT_TX_DW4 */
/* We cannot write to GRP. It would overrite individual loadgen */
for (ln = 0; ln < 4; ln++) {
val = I915_READ(CNL_PORT_TX_DW4_LN(port, ln));
val |= POST_CURSOR_1(ddi_translations[level].dw4_post_cursor_1);
val |= POST_CURSOR_2(ddi_translations[level].dw4_post_cursor_2);
val |= CURSOR_COEFF(ddi_translations[level].dw4_cursor_coeff);
I915_WRITE(CNL_PORT_TX_DW4_LN(port, ln), val);
}
/* Program PORT_TX_DW5 */
/* All DW5 values are fixed for every table entry */
val = I915_READ(CNL_PORT_TX_DW5_LN0(port));
val |= RTERM_SELECT(6);
val |= TAP3_DISABLE;
I915_WRITE(CNL_PORT_TX_DW5_GRP(port), val);
/* Program PORT_TX_DW7 */
val = I915_READ(CNL_PORT_TX_DW7_LN0(port));
val |= N_SCALAR(ddi_translations[level].dw7_n_scalar);
I915_WRITE(CNL_PORT_TX_DW7_GRP(port), val);
}
static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder, u32 level)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
enum port port = intel_ddi_get_encoder_port(encoder);
int type = encoder->type;
int width = 0;
int rate = 0;
u32 val;
int ln = 0;
if ((intel_dp) && (type == INTEL_OUTPUT_EDP || type == INTEL_OUTPUT_DP)) {
width = intel_dp->lane_count;
rate = intel_dp->link_rate;
} else {
width = 4;
/* Rate is always < than 6GHz for HDMI */
}
/*
* 1. If port type is eDP or DP,
* set PORT_PCS_DW1 cmnkeeper_enable to 1b,
* else clear to 0b.
*/
val = I915_READ(CNL_PORT_PCS_DW1_LN0(port));
if (type == INTEL_OUTPUT_EDP || type == INTEL_OUTPUT_DP)
val |= COMMON_KEEPER_EN;
else
val &= ~COMMON_KEEPER_EN;
I915_WRITE(CNL_PORT_PCS_DW1_GRP(port), val);
/* 2. Program loadgen select */
/*
* Program PORT_TX_DW4_LN depending on Bit rate and used lanes
* <= 6 GHz and 4 lanes (LN0=0, LN1=1, LN2=1, LN3=1)
* <= 6 GHz and 1,2 lanes (LN0=0, LN1=1, LN2=1, LN3=0)
* > 6 GHz (LN0=0, LN1=0, LN2=0, LN3=0)
*/
for (ln = 0; ln <= 3; ln++) {
val = I915_READ(CNL_PORT_TX_DW4_LN(port, ln));
val &= ~LOADGEN_SELECT;
if (((rate < 600000) && (width == 4) && (ln >= 1)) ||
((rate < 600000) && (width < 4) && ((ln == 1) || (ln == 2)))) {
val |= LOADGEN_SELECT;
}
I915_WRITE(CNL_PORT_TX_DW4_LN(port, ln), val);
}
/* 3. Set PORT_CL_DW5 SUS Clock Config to 11b */
val = I915_READ(CNL_PORT_CL1CM_DW5);
val |= SUS_CLOCK_CONFIG;
I915_WRITE(CNL_PORT_CL1CM_DW5, val);
/* 4. Clear training enable to change swing values */
val = I915_READ(CNL_PORT_TX_DW5_LN0(port));
val &= ~TX_TRAINING_EN;
I915_WRITE(CNL_PORT_TX_DW5_GRP(port), val);
/* 5. Program swing and de-emphasis */
cnl_ddi_vswing_program(dev_priv, level, port, type);
/* 6. Set training enable to trigger update */
val = I915_READ(CNL_PORT_TX_DW5_LN0(port));
val |= TX_TRAINING_EN;
I915_WRITE(CNL_PORT_TX_DW5_GRP(port), val);
}
static uint32_t translate_signal_level(int signal_levels)
{
int i;
@ -1612,7 +1947,11 @@ uint32_t ddi_signal_levels(struct intel_dp *intel_dp)
skl_ddi_set_iboost(encoder, level);
else if (IS_GEN9_LP(dev_priv))
bxt_ddi_vswing_sequence(dev_priv, level, port, encoder->type);
else if (IS_CANNONLAKE(dev_priv)) {
cnl_ddi_vswing_sequence(encoder, level);
/* DDI_BUF_CTL bits 27:24 are reserved on CNL */
return 0;
}
return DDI_BUF_TRANS_SELECT(level);
}
@ -1621,13 +1960,27 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = intel_ddi_get_encoder_port(encoder);
uint32_t val;
if (WARN_ON(!pll))
return;
if (IS_GEN9_BC(dev_priv)) {
uint32_t val;
if (IS_CANNONLAKE(dev_priv)) {
/* Configure DPCLKA_CFGCR0 to map the DPLL to the DDI. */
val = I915_READ(DPCLKA_CFGCR0);
val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->id, port);
I915_WRITE(DPCLKA_CFGCR0, val);
/*
* Configure DPCLKA_CFGCR0 to turn on the clock for the DDI.
* This step and the step before must be done with separate
* register writes.
*/
val = I915_READ(DPCLKA_CFGCR0);
val &= ~(DPCLKA_CFGCR0_DDI_CLK_OFF(port) |
DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port));
I915_WRITE(DPCLKA_CFGCR0, val);
} else if (IS_GEN9_BC(dev_priv)) {
/* DDI -> PLL mapping */
val = I915_READ(DPLL_CTRL2);
@ -1696,6 +2049,8 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
else if (IS_GEN9_LP(dev_priv))
bxt_ddi_vswing_sequence(dev_priv, level, port,
INTEL_OUTPUT_HDMI);
else if (IS_CANNONLAKE(dev_priv))
cnl_ddi_vswing_sequence(encoder, level);
intel_hdmi->set_infoframes(drm_encoder,
has_hdmi_sink,
@ -1732,12 +2087,18 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
enum port port = intel_ddi_get_encoder_port(intel_encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct intel_dp *intel_dp = NULL;
int type = intel_encoder->type;
uint32_t val;
bool wait = false;
/* old_crtc_state and old_conn_state are NULL when called from DP_MST */
if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP) {
intel_dp = enc_to_intel_dp(encoder);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
}
val = I915_READ(DDI_BUF_CTL(port));
if (val & DDI_BUF_CTL_ENABLE) {
val &= ~DDI_BUF_CTL_ENABLE;
@ -1753,9 +2114,7 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder,
if (wait)
intel_wait_ddi_buf_idle(dev_priv, port);
if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
if (intel_dp) {
intel_edp_panel_vdd_on(intel_dp);
intel_edp_panel_off(intel_dp);
}
@ -1763,7 +2122,10 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder,
if (dig_port)
intel_display_power_put(dev_priv, dig_port->ddi_io_power_domain);
if (IS_GEN9_BC(dev_priv))
if (IS_CANNONLAKE(dev_priv))
I915_WRITE(DPCLKA_CFGCR0, I915_READ(DPCLKA_CFGCR0) |
DPCLKA_CFGCR0_DDI_CLK_OFF(port));
else if (IS_GEN9_BC(dev_priv))
I915_WRITE(DPLL_CTRL2, (I915_READ(DPLL_CTRL2) |
DPLL_CTRL2_DDI_CLK_OFF(port)));
else if (INTEL_GEN(dev_priv) < 9)
@ -1841,7 +2203,7 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder,
if (port == PORT_A && INTEL_GEN(dev_priv) < 9)
intel_dp_stop_link_train(intel_dp);
intel_edp_backlight_on(intel_dp);
intel_edp_backlight_on(pipe_config, conn_state);
intel_psr_enable(intel_dp);
intel_edp_drrs_enable(intel_dp, pipe_config);
}
@ -1871,7 +2233,7 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder,
intel_edp_drrs_disable(intel_dp, old_crtc_state);
intel_psr_disable(intel_dp);
intel_edp_backlight_off(intel_dp);
intel_edp_backlight_off(old_conn_state);
}
}

Просмотреть файл

@ -51,6 +51,8 @@ static const char * const platform_names[] = {
PLATFORM_NAME(BROXTON),
PLATFORM_NAME(KABYLAKE),
PLATFORM_NAME(GEMINILAKE),
PLATFORM_NAME(COFFEELAKE),
PLATFORM_NAME(CANNONLAKE),
};
#undef PLATFORM_NAME
@ -183,16 +185,15 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
DIV_ROUND_UP(sseu->eu_total,
sseu_subslice_total(sseu)) : 0;
/*
* SKL supports slice power gating on devices with more than
* SKL+ supports slice power gating on devices with more than
* one slice, and supports EU power gating on devices with
* more than one EU pair per subslice. BXT supports subslice
* more than one EU pair per subslice. BXT+ supports subslice
* power gating on devices with more than one subslice, and
* supports EU power gating on devices with more than one EU
* pair per subslice.
*/
sseu->has_slice_pg =
(IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
hweight8(sseu->slice_mask) > 1;
!IS_GEN9_LP(dev_priv) && hweight8(sseu->slice_mask) > 1;
sseu->has_subslice_pg =
IS_GEN9_LP(dev_priv) && sseu_subslice_total(sseu) > 1;
sseu->has_eu_pg = sseu->eu_per_subslice > 2;
@ -327,7 +328,7 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
* we don't expose the topmost plane at all to prevent ABI breakage
* down the line.
*/
if (IS_GEMINILAKE(dev_priv))
if (IS_GEN10(dev_priv) || IS_GEMINILAKE(dev_priv))
for_each_pipe(dev_priv, pipe)
info->num_sprites[pipe] = 3;
else if (IS_BROXTON(dev_priv)) {

Просмотреть файл

@ -120,7 +120,8 @@ static void intel_crtc_init_scalers(struct intel_crtc *crtc,
static void skylake_pfit_enable(struct intel_crtc *crtc);
static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
static void ironlake_pfit_enable(struct intel_crtc *crtc);
static void intel_modeset_setup_hw_state(struct drm_device *dev);
static void intel_modeset_setup_hw_state(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx);
static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
struct intel_limit {
@ -1192,9 +1193,8 @@ void assert_pipe(struct drm_i915_private *dev_priv,
pipe);
enum intel_display_power_domain power_domain;
/* if we need the pipe quirk it must be always on */
if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
/* we keep both pipes enabled on 830 */
if (IS_I830(dev_priv))
state = true;
power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
@ -1549,6 +1549,7 @@ static void i9xx_enable_pll(struct intel_crtc *crtc)
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
i915_reg_t reg = DPLL(crtc->pipe);
u32 dpll = crtc->config->dpll_hw_state.dpll;
int i;
assert_pipe_disabled(dev_priv, crtc->pipe);
@ -1595,15 +1596,11 @@ static void i9xx_enable_pll(struct intel_crtc *crtc)
}
/* We do this three times for luck */
I915_WRITE(reg, dpll);
POSTING_READ(reg);
udelay(150); /* wait for warmup */
I915_WRITE(reg, dpll);
POSTING_READ(reg);
udelay(150); /* wait for warmup */
I915_WRITE(reg, dpll);
POSTING_READ(reg);
udelay(150); /* wait for warmup */
for (i = 0; i < 3; i++) {
I915_WRITE(reg, dpll);
POSTING_READ(reg);
udelay(150); /* wait for warmup */
}
}
/**
@ -1631,8 +1628,7 @@ static void i9xx_disable_pll(struct intel_crtc *crtc)
}
/* Don't disable pipe or pipe PLLs if needed */
if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
if (IS_I830(dev_priv))
return;
/* Make sure the pipe isn't still relying on us */
@ -1915,8 +1911,8 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
reg = PIPECONF(cpu_transcoder);
val = I915_READ(reg);
if (val & PIPECONF_ENABLE) {
WARN_ON(!((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)));
/* we keep both pipes enabled on 830 */
WARN_ON(!IS_I830(dev_priv));
return;
}
@ -1976,8 +1972,7 @@ static void intel_disable_pipe(struct intel_crtc *crtc)
val &= ~PIPECONF_DOUBLE_WIDE;
/* Don't disable pipe or pipe PLLs if needed */
if (!(pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) &&
!(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
if (!IS_I830(dev_priv))
val &= ~PIPECONF_ENABLE;
I915_WRITE(reg, val);
@ -3461,7 +3456,7 @@ __intel_display_resume(struct drm_device *dev,
struct drm_crtc *crtc;
int i, ret;
intel_modeset_setup_hw_state(dev);
intel_modeset_setup_hw_state(dev, ctx);
i915_redisable_vga(to_i915(dev));
if (!state)
@ -4610,7 +4605,7 @@ static void cpt_verify_modeset(struct drm_device *dev, int pipe)
static int
skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
unsigned scaler_user, int *scaler_id, unsigned int rotation,
unsigned int scaler_user, int *scaler_id,
int src_w, int src_h, int dst_w, int dst_h)
{
struct intel_crtc_scaler_state *scaler_state =
@ -4619,9 +4614,12 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
to_intel_crtc(crtc_state->base.crtc);
int need_scaling;
need_scaling = drm_rotation_90_or_270(rotation) ?
(src_h != dst_w || src_w != dst_h):
(src_w != dst_w || src_h != dst_h);
/*
* Src coordinates are already rotated by 270 degrees for
* the 90/270 degree plane rotation cases (to match the
* GTT mapping), hence no need to account for rotation here.
*/
need_scaling = src_w != dst_w || src_h != dst_h;
/*
* if plane is being disabled or scaler is no more required or force detach
@ -4683,7 +4681,7 @@ int skl_update_scaler_crtc(struct intel_crtc_state *state)
const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
&state->scaler_state.scaler_id, DRM_MODE_ROTATE_0,
&state->scaler_state.scaler_id,
state->pipe_src_w, state->pipe_src_h,
adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay);
}
@ -4712,7 +4710,6 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
ret = skl_update_scaler(crtc_state, force_detach,
drm_plane_index(&intel_plane->base),
&plane_state->scaler_id,
plane_state->base.rotation,
drm_rect_width(&plane_state->base.src) >> 16,
drm_rect_height(&plane_state->base.src) >> 16,
drm_rect_width(&plane_state->base.dst),
@ -5836,9 +5833,14 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
if (!dev_priv->display.initial_watermarks)
intel_update_watermarks(intel_crtc);
/* clock the pipe down to 640x480@60 to potentially save power */
if (IS_I830(dev_priv))
i830_enable_pipe(dev_priv, pipe);
}
static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
struct drm_modeset_acquire_ctx *ctx)
{
struct intel_encoder *encoder;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@ -5868,7 +5870,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
return;
}
state->acquire_ctx = crtc->dev->mode_config.acquire_ctx;
state->acquire_ctx = ctx;
/* Everything's already locked, -EDEADLK can't happen. */
crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
@ -5974,11 +5976,21 @@ static void intel_connector_verify_state(struct drm_crtc_state *crtc_state,
int intel_connector_init(struct intel_connector *connector)
{
drm_atomic_helper_connector_reset(&connector->base);
struct intel_digital_connector_state *conn_state;
if (!connector->base.state)
/*
* Allocate enough memory to hold intel_digital_connector_state,
* This might be a few bytes too many, but for connectors that don't
* need it we'll free the state and allocate a smaller one on the first
* succesful commit anyway.
*/
conn_state = kzalloc(sizeof(*conn_state), GFP_KERNEL);
if (!conn_state)
return -ENOMEM;
__drm_atomic_helper_connector_reset(&connector->base,
&conn_state->base);
return 0;
}
@ -7036,8 +7048,8 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
pipeconf = 0;
if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
(intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
/* we keep both pipes enabled on 830 */
if (IS_I830(dev_priv))
pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE;
if (intel_crtc->config->double_wide)
@ -8862,6 +8874,22 @@ static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
return 0;
}
static void cannonlake_get_ddi_pll(struct drm_i915_private *dev_priv,
enum port port,
struct intel_crtc_state *pipe_config)
{
enum intel_dpll_id id;
u32 temp;
temp = I915_READ(DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
id = temp >> (port * 2);
if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL2))
return;
pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
}
static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
enum port port,
struct intel_crtc_state *pipe_config)
@ -9049,7 +9077,9 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
if (IS_GEN9_BC(dev_priv))
if (IS_CANNONLAKE(dev_priv))
cannonlake_get_ddi_pll(dev_priv, port, pipe_config);
else if (IS_GEN9_BC(dev_priv))
skylake_get_ddi_pll(dev_priv, port, pipe_config);
else if (IS_GEN9_LP(dev_priv))
bxt_get_ddi_pll(dev_priv, port, pipe_config);
@ -11182,6 +11212,9 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
if (mode_changed)
ret = skl_update_scaler_crtc(pipe_config);
if (!ret)
ret = skl_check_pipe_max_pixel_rate(intel_crtc,
pipe_config);
if (!ret)
ret = intel_atomic_setup_scalers(dev_priv, intel_crtc,
pipe_config);
@ -12204,9 +12237,8 @@ verify_crtc_state(struct drm_crtc *crtc,
active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config);
/* hw state is inconsistent with the pipe quirk */
if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
(intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
/* we keep both pipes enabled on 830 */
if (IS_I830(dev_priv))
active = new_crtc_state->active;
I915_STATE_WARN(new_crtc_state->active != active,
@ -13115,8 +13147,16 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
drm_atomic_helper_commit_hw_done(state);
if (intel_state->modeset)
if (intel_state->modeset) {
/* As one of the primary mmio accessors, KMS has a high
* likelihood of triggering bugs in unclaimed access. After we
* finish modesetting, see if an error has been flagged, and if
* so enable debugging for the next modeset - and hope we catch
* the culprit.
*/
intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET);
}
mutex_lock(&dev->struct_mutex);
drm_atomic_helper_cleanup_planes(dev, state);
@ -13126,19 +13166,6 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
drm_atomic_state_put(state);
/* As one of the primary mmio accessors, KMS has a high likelihood
* of triggering bugs in unclaimed access. After we finish
* modesetting, see if an error has been flagged, and if so
* enable debugging for the next modeset - and hope we catch
* the culprit.
*
* XXX note that we assume display power is on at this point.
* This might hold true now but we need to add pm helper to check
* unclaimed only when the hardware is on, as atomic commits
* can happen also when the device is completely off.
*/
intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
intel_atomic_helper_free_state(dev_priv);
}
@ -13270,43 +13297,6 @@ static int intel_atomic_commit(struct drm_device *dev,
return 0;
}
void intel_crtc_restore_mode(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_atomic_state *state;
struct drm_crtc_state *crtc_state;
int ret;
state = drm_atomic_state_alloc(dev);
if (!state) {
DRM_DEBUG_KMS("[CRTC:%d:%s] crtc restore failed, out of memory",
crtc->base.id, crtc->name);
return;
}
state->acquire_ctx = crtc->dev->mode_config.acquire_ctx;
retry:
crtc_state = drm_atomic_get_crtc_state(state, crtc);
ret = PTR_ERR_OR_ZERO(crtc_state);
if (!ret) {
if (!crtc_state->active)
goto out;
crtc_state->mode_changed = true;
ret = drm_atomic_commit(state);
}
if (ret == -EDEADLK) {
drm_atomic_state_clear(state);
drm_modeset_backoff(state->acquire_ctx);
goto retry;
}
out:
drm_atomic_state_put(state);
}
static const struct drm_crtc_funcs intel_crtc_funcs = {
.gamma_set = drm_atomic_helper_legacy_gamma_set,
.set_config = drm_atomic_helper_set_config,
@ -14746,27 +14736,6 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
}
}
/*
* Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
* resume, or other times. This quirk makes sure that's the case for
* affected systems.
*/
static void quirk_pipea_force(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
dev_priv->quirks |= QUIRK_PIPEA_FORCE;
DRM_INFO("applying pipe a force quirk\n");
}
static void quirk_pipeb_force(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
dev_priv->quirks |= QUIRK_PIPEB_FORCE;
DRM_INFO("applying pipe b force quirk\n");
}
/*
* Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
*/
@ -14832,18 +14801,6 @@ static const struct intel_dmi_quirk intel_dmi_quirks[] = {
};
static struct intel_quirk intel_quirks[] = {
/* Toshiba Protege R-205, S-209 needs pipe A force quirk */
{ 0x2592, 0x1179, 0x0001, quirk_pipea_force },
/* ThinkPad T60 needs pipe A force quirk (bug #16494) */
{ 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
/* 830 needs to leave pipe A & dpll A up */
{ 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
/* 830 needs to leave pipe B & dpll B up */
{ 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipeb_force },
/* Lenovo U160 cannot use SSC on LVDS */
{ 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
@ -15127,7 +15084,7 @@ int intel_modeset_init(struct drm_device *dev)
intel_setup_outputs(dev_priv);
drm_modeset_lock_all(dev);
intel_modeset_setup_hw_state(dev);
intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
drm_modeset_unlock_all(dev);
for_each_intel_crtc(dev, crtc) {
@ -15164,35 +15121,89 @@ int intel_modeset_init(struct drm_device *dev)
return 0;
}
static void intel_enable_pipe_a(struct drm_device *dev)
void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
{
struct intel_connector *connector;
struct drm_connector_list_iter conn_iter;
struct drm_connector *crt = NULL;
struct intel_load_detect_pipe load_detect_temp;
struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx;
int ret;
/* 640x480@60Hz, ~25175 kHz */
struct dpll clock = {
.m1 = 18,
.m2 = 7,
.p1 = 13,
.p2 = 4,
.n = 2,
};
u32 dpll, fp;
int i;
/* We can't just switch on the pipe A, we need to set things up with a
* proper mode and output configuration. As a gross hack, enable pipe A
* by enabling the load detect pipe once. */
drm_connector_list_iter_begin(dev, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
crt = &connector->base;
break;
}
WARN_ON(i9xx_calc_dpll_params(48000, &clock) != 25154);
DRM_DEBUG_KMS("enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
pipe_name(pipe), clock.vco, clock.dot);
fp = i9xx_dpll_compute_fp(&clock);
dpll = (I915_READ(DPLL(pipe)) & DPLL_DVO_2X_MODE) |
DPLL_VGA_MODE_DIS |
((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
PLL_P2_DIVIDE_BY_4 |
PLL_REF_INPUT_DREFCLK |
DPLL_VCO_ENABLE;
I915_WRITE(FP0(pipe), fp);
I915_WRITE(FP1(pipe), fp);
I915_WRITE(HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
I915_WRITE(HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
I915_WRITE(HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
I915_WRITE(VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
I915_WRITE(VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
I915_WRITE(VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
I915_WRITE(PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
/*
* Apparently we need to have VGA mode enabled prior to changing
* the P1/P2 dividers. Otherwise the DPLL will keep using the old
* dividers, even though the register value does change.
*/
I915_WRITE(DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
I915_WRITE(DPLL(pipe), dpll);
/* Wait for the clocks to stabilize. */
POSTING_READ(DPLL(pipe));
udelay(150);
/* The pixel multiplier can only be updated once the
* DPLL is enabled and the clocks are stable.
*
* So write it again.
*/
I915_WRITE(DPLL(pipe), dpll);
/* We do this three times for luck */
for (i = 0; i < 3 ; i++) {
I915_WRITE(DPLL(pipe), dpll);
POSTING_READ(DPLL(pipe));
udelay(150); /* wait for warmup */
}
drm_connector_list_iter_end(&conn_iter);
if (!crt)
return;
I915_WRITE(PIPECONF(pipe), PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
POSTING_READ(PIPECONF(pipe));
}
ret = intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx);
WARN(ret < 0, "All modeset mutexes are locked, but intel_get_load_detect_pipe failed\n");
void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
{
DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n",
pipe_name(pipe));
if (ret > 0)
intel_release_load_detect_pipe(crt, &load_detect_temp, ctx);
assert_plane_disabled(dev_priv, PLANE_A);
assert_plane_disabled(dev_priv, PLANE_B);
I915_WRITE(PIPECONF(pipe), 0);
POSTING_READ(PIPECONF(pipe));
if (wait_for(pipe_dsl_stopped(dev_priv, pipe), 100))
DRM_ERROR("pipe %c off wait timed out\n", pipe_name(pipe));
I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
POSTING_READ(DPLL(pipe));
}
static bool
@ -15242,7 +15253,8 @@ static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
(HAS_PCH_LPT_H(dev_priv) && pch_transcoder == TRANSCODER_A);
}
static void intel_sanitize_crtc(struct intel_crtc *crtc)
static void intel_sanitize_crtc(struct intel_crtc *crtc,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@ -15288,23 +15300,14 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
plane = crtc->plane;
crtc->base.primary->state->visible = true;
crtc->plane = !plane;
intel_crtc_disable_noatomic(&crtc->base);
intel_crtc_disable_noatomic(&crtc->base, ctx);
crtc->plane = plane;
}
if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
crtc->pipe == PIPE_A && !crtc->active) {
/* BIOS forgot to enable pipe A, this mostly happens after
* resume. Force-enable the pipe to fix this, the update_dpms
* call below we restore the pipe to the right state, but leave
* the required bits on. */
intel_enable_pipe_a(dev);
}
/* Adjust the state of the output pipe according to whether we
* have active connectors/encoders. */
if (crtc->active && !intel_crtc_has_encoders(crtc))
intel_crtc_disable_noatomic(&crtc->base);
intel_crtc_disable_noatomic(&crtc->base, ctx);
if (crtc->active || HAS_GMCH_DISPLAY(dev_priv)) {
/*
@ -15601,7 +15604,8 @@ get_encoder_power_domains(struct drm_i915_private *dev_priv)
* and sanitizes it to the current state
*/
static void
intel_modeset_setup_hw_state(struct drm_device *dev)
intel_modeset_setup_hw_state(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe;
@ -15621,7 +15625,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev)
for_each_pipe(dev_priv, pipe) {
crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
intel_sanitize_crtc(crtc);
intel_sanitize_crtc(crtc, ctx);
intel_dump_pipe_config(crtc, crtc->config,
"[setup_hw_state]");
}

Просмотреть файл

@ -798,7 +798,7 @@ static void intel_pps_get_registers(struct drm_i915_private *dev_priv,
regs->pp_stat = PP_STATUS(pps_idx);
regs->pp_on = PP_ON_DELAYS(pps_idx);
regs->pp_off = PP_OFF_DELAYS(pps_idx);
if (!IS_GEN9_LP(dev_priv))
if (!IS_GEN9_LP(dev_priv) && !HAS_PCH_CNP(dev_priv))
regs->pp_div = PP_DIVISOR(pps_idx);
}
@ -1628,6 +1628,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
enum port port = dp_to_dig_port(intel_dp)->port;
struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
struct intel_connector *intel_connector = intel_dp->attached_connector;
struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(conn_state);
int lane_count, clock;
int min_lane_count = 1;
int max_lane_count = intel_dp_max_lane_count(intel_dp);
@ -1653,7 +1655,12 @@ intel_dp_compute_config(struct intel_encoder *encoder,
pipe_config->has_pch_encoder = true;
pipe_config->has_drrs = false;
pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
if (port == PORT_A)
pipe_config->has_audio = false;
else if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
pipe_config->has_audio = intel_dp->has_audio;
else
pipe_config->has_audio = intel_conn_state->force_audio == HDMI_AUDIO_ON;
if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
@ -1668,10 +1675,10 @@ intel_dp_compute_config(struct intel_encoder *encoder,
if (HAS_GMCH_DISPLAY(dev_priv))
intel_gmch_panel_fitting(intel_crtc, pipe_config,
intel_connector->panel.fitting_mode);
conn_state->scaling_mode);
else
intel_pch_panel_fitting(intel_crtc, pipe_config,
intel_connector->panel.fitting_mode);
conn_state->scaling_mode);
}
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
@ -1740,7 +1747,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
return false;
found:
if (intel_dp->color_range_auto) {
if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
/*
* See:
* CEA-861-E - 5.1 Default Encoding Parameters
@ -1752,7 +1759,7 @@ found:
HDMI_QUANTIZATION_RANGE_LIMITED;
} else {
pipe_config->limited_color_range =
intel_dp->limited_color_range;
intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_LIMITED;
}
pipe_config->lane_count = lane_count;
@ -2315,14 +2322,17 @@ static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
}
/* Enable backlight PWM and backlight PP control. */
void intel_edp_backlight_on(struct intel_dp *intel_dp)
void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(conn_state->best_encoder);
if (!is_edp(intel_dp))
return;
DRM_DEBUG_KMS("\n");
intel_panel_enable_backlight(intel_dp->attached_connector);
intel_panel_enable_backlight(crtc_state, conn_state);
_intel_edp_backlight_on(intel_dp);
}
@ -2354,15 +2364,17 @@ static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
}
/* Disable backlight PP control and backlight PWM. */
void intel_edp_backlight_off(struct intel_dp *intel_dp)
void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(old_conn_state->best_encoder);
if (!is_edp(intel_dp))
return;
DRM_DEBUG_KMS("\n");
_intel_edp_backlight_off(intel_dp);
intel_panel_disable_backlight(intel_dp->attached_connector);
intel_panel_disable_backlight(old_conn_state);
}
/*
@ -2658,7 +2670,7 @@ static void intel_disable_dp(struct intel_encoder *encoder,
/* Make sure the panel is off before trying to change the mode. But also
* ensure that we have vdd while we switch off the panel. */
intel_edp_panel_vdd_on(intel_dp);
intel_edp_backlight_off(intel_dp);
intel_edp_backlight_off(old_conn_state);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
intel_edp_panel_off(intel_dp);
@ -2872,10 +2884,8 @@ static void g4x_enable_dp(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
intel_enable_dp(encoder, pipe_config, conn_state);
intel_edp_backlight_on(intel_dp);
intel_edp_backlight_on(pipe_config, conn_state);
}
static void vlv_enable_dp(struct intel_encoder *encoder,
@ -2884,7 +2894,7 @@ static void vlv_enable_dp(struct intel_encoder *encoder,
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
intel_edp_backlight_on(intel_dp);
intel_edp_backlight_on(pipe_config, conn_state);
intel_psr_enable(intel_dp);
}
@ -3466,7 +3476,7 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp)
if (HAS_DDI(dev_priv)) {
signal_levels = ddi_signal_levels(intel_dp);
if (IS_GEN9_LP(dev_priv))
if (IS_GEN9_LP(dev_priv) || IS_CANNONLAKE(dev_priv))
signal_levels = 0;
else
mask = DDI_BUF_EMP_MASK;
@ -4587,10 +4597,7 @@ intel_dp_set_edid(struct intel_dp *intel_dp)
edid = intel_dp_get_edid(intel_dp);
intel_connector->detect_edid = edid;
if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
else
intel_dp->has_audio = drm_detect_monitor_audio(edid);
intel_dp->has_audio = drm_detect_monitor_audio(edid);
}
static void
@ -4799,112 +4806,6 @@ static int intel_dp_get_modes(struct drm_connector *connector)
return 0;
}
static bool
intel_dp_detect_audio(struct drm_connector *connector)
{
bool has_audio = false;
struct edid *edid;
edid = to_intel_connector(connector)->detect_edid;
if (edid)
has_audio = drm_detect_monitor_audio(edid);
return has_audio;
}
static int
intel_dp_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t val)
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
int ret;
ret = drm_object_property_set_value(&connector->base, property, val);
if (ret)
return ret;
if (property == dev_priv->force_audio_property) {
int i = val;
bool has_audio;
if (i == intel_dp->force_audio)
return 0;
intel_dp->force_audio = i;
if (i == HDMI_AUDIO_AUTO)
has_audio = intel_dp_detect_audio(connector);
else
has_audio = (i == HDMI_AUDIO_ON);
if (has_audio == intel_dp->has_audio)
return 0;
intel_dp->has_audio = has_audio;
goto done;
}
if (property == dev_priv->broadcast_rgb_property) {
bool old_auto = intel_dp->color_range_auto;
bool old_range = intel_dp->limited_color_range;
switch (val) {
case INTEL_BROADCAST_RGB_AUTO:
intel_dp->color_range_auto = true;
break;
case INTEL_BROADCAST_RGB_FULL:
intel_dp->color_range_auto = false;
intel_dp->limited_color_range = false;
break;
case INTEL_BROADCAST_RGB_LIMITED:
intel_dp->color_range_auto = false;
intel_dp->limited_color_range = true;
break;
default:
return -EINVAL;
}
if (old_auto == intel_dp->color_range_auto &&
old_range == intel_dp->limited_color_range)
return 0;
goto done;
}
if (is_edp(intel_dp) &&
property == connector->dev->mode_config.scaling_mode_property) {
if (val == DRM_MODE_SCALE_NONE) {
DRM_DEBUG_KMS("no scaling not supported\n");
return -EINVAL;
}
if (HAS_GMCH_DISPLAY(dev_priv) &&
val == DRM_MODE_SCALE_CENTER) {
DRM_DEBUG_KMS("centering not supported\n");
return -EINVAL;
}
if (intel_connector->panel.fitting_mode == val) {
/* the eDP scaling property is not changed */
return 0;
}
intel_connector->panel.fitting_mode = val;
goto done;
}
return -EINVAL;
done:
if (intel_encoder->base.crtc)
intel_crtc_restore_mode(intel_encoder->base.crtc);
return 0;
}
static int
intel_dp_connector_register(struct drm_connector *connector)
{
@ -5063,19 +4964,21 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.force = intel_dp_force,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_dp_set_property,
.atomic_get_property = intel_connector_atomic_get_property,
.set_property = drm_atomic_helper_connector_set_property,
.atomic_get_property = intel_digital_connector_atomic_get_property,
.atomic_set_property = intel_digital_connector_atomic_set_property,
.late_register = intel_dp_connector_register,
.early_unregister = intel_dp_connector_unregister,
.destroy = intel_dp_connector_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_duplicate_state = intel_digital_connector_duplicate_state,
};
static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
.detect_ctx = intel_dp_detect,
.get_modes = intel_dp_get_modes,
.mode_valid = intel_dp_mode_valid,
.atomic_check = intel_digital_connector_atomic_check,
};
static const struct drm_encoder_funcs intel_dp_enc_funcs = {
@ -5169,19 +5072,22 @@ bool intel_dp_is_edp(struct drm_i915_private *dev_priv, enum port port)
static void
intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_i915_private *dev_priv = to_i915(connector->dev);
intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
intel_dp->color_range_auto = true;
if (is_edp(intel_dp)) {
drm_mode_create_scaling_mode_property(connector->dev);
drm_object_attach_property(
&connector->base,
connector->dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_ASPECT);
intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
u32 allowed_scalers;
allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN);
if (!HAS_GMCH_DISPLAY(dev_priv))
allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER);
drm_connector_attach_scaling_mode_property(connector, allowed_scalers);
connector->state->scaling_mode = DRM_MODE_SCALE_ASPECT;
}
}
@ -5207,7 +5113,7 @@ intel_pps_readout_hw_state(struct drm_i915_private *dev_priv,
pp_on = I915_READ(regs.pp_on);
pp_off = I915_READ(regs.pp_off);
if (!IS_GEN9_LP(dev_priv)) {
if (!IS_GEN9_LP(dev_priv) && !HAS_PCH_CNP(dev_priv)) {
I915_WRITE(regs.pp_ctrl, pp_ctl);
pp_div = I915_READ(regs.pp_div);
}
@ -5225,7 +5131,7 @@ intel_pps_readout_hw_state(struct drm_i915_private *dev_priv,
seq->t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
PANEL_POWER_DOWN_DELAY_SHIFT;
if (IS_GEN9_LP(dev_priv)) {
if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv)) {
u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
BXT_POWER_CYCLE_DELAY_SHIFT;
if (tmp > 0)
@ -5382,7 +5288,7 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
(seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
/* Compute the divisor for the pp clock, simply match the Bspec
* formula. */
if (IS_GEN9_LP(dev_priv)) {
if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv)) {
pp_div = I915_READ(regs.pp_ctrl);
pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
@ -5408,7 +5314,7 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
I915_WRITE(regs.pp_on, pp_on);
I915_WRITE(regs.pp_off, pp_off);
if (IS_GEN9_LP(dev_priv))
if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv))
I915_WRITE(regs.pp_ctrl, pp_div);
else
I915_WRITE(regs.pp_div, pp_div);
@ -5416,7 +5322,7 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
I915_READ(regs.pp_on),
I915_READ(regs.pp_off),
IS_GEN9_LP(dev_priv) ?
(IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv)) ?
(I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK) :
I915_READ(regs.pp_div));
}

Просмотреть файл

@ -78,8 +78,9 @@ static uint32_t intel_dp_aux_get_backlight(struct intel_connector *connector)
* 8-bit or 16 bit value (MSB and LSB)
*/
static void
intel_dp_aux_set_backlight(struct intel_connector *connector, u32 level)
intel_dp_aux_set_backlight(const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
uint8_t vals[2] = { 0x0 };
@ -97,8 +98,10 @@ intel_dp_aux_set_backlight(struct intel_connector *connector, u32 level)
}
}
static void intel_dp_aux_enable_backlight(struct intel_connector *connector)
static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
uint8_t dpcd_buf = 0;
uint8_t edp_backlight_mode = 0;
@ -131,12 +134,12 @@ static void intel_dp_aux_enable_backlight(struct intel_connector *connector)
}
set_aux_backlight_enable(intel_dp, true);
intel_dp_aux_set_backlight(connector, connector->panel.backlight.level);
intel_dp_aux_set_backlight(conn_state, connector->panel.backlight.level);
}
static void intel_dp_aux_disable_backlight(struct intel_connector *connector)
static void intel_dp_aux_disable_backlight(const struct drm_connector_state *old_conn_state)
{
set_aux_backlight_enable(enc_to_intel_dp(&connector->encoder->base), false);
set_aux_backlight_enable(enc_to_intel_dp(old_conn_state->best_encoder), false);
}
static int intel_dp_aux_setup_backlight(struct intel_connector *connector,
@ -145,8 +148,6 @@ static int intel_dp_aux_setup_backlight(struct intel_connector *connector,
struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
struct intel_panel *panel = &connector->panel;
intel_dp_aux_enable_backlight(connector);
if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT)
panel->backlight.max = 0xFFFF;
else
@ -165,7 +166,7 @@ intel_dp_aux_display_control_capable(struct intel_connector *connector)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
/* Check the eDP Display control capabilities registers to determine if
/* Check the eDP Display control capabilities registers to determine if
* the panel can support backlight control over the aux channel
*/
if (intel_dp->edp_dpcd[1] & DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP &&

Просмотреть файл

@ -1321,7 +1321,6 @@ static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc *crtc,
return true;
}
static bool
skl_ddi_dp_set_dpll_hw_state(int clock,
struct intel_dpll_hw_state *dpll_hw_state)
@ -1967,6 +1966,438 @@ static const struct intel_dpll_mgr bxt_pll_mgr = {
.dump_hw_state = bxt_dump_hw_state,
};
static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
uint32_t val;
/* 1. Enable DPLL power in DPLL_ENABLE. */
val = I915_READ(CNL_DPLL_ENABLE(pll->id));
val |= PLL_POWER_ENABLE;
I915_WRITE(CNL_DPLL_ENABLE(pll->id), val);
/* 2. Wait for DPLL power state enabled in DPLL_ENABLE. */
if (intel_wait_for_register(dev_priv,
CNL_DPLL_ENABLE(pll->id),
PLL_POWER_STATE,
PLL_POWER_STATE,
5))
DRM_ERROR("PLL %d Power not enabled\n", pll->id);
/*
* 3. Configure DPLL_CFGCR0 to set SSC enable/disable,
* select DP mode, and set DP link rate.
*/
val = pll->state.hw_state.cfgcr0;
I915_WRITE(CNL_DPLL_CFGCR0(pll->id), val);
/* 4. Reab back to ensure writes completed */
POSTING_READ(CNL_DPLL_CFGCR0(pll->id));
/* 3. Configure DPLL_CFGCR0 */
/* Avoid touch CFGCR1 if HDMI mode is not enabled */
if (pll->state.hw_state.cfgcr0 & DPLL_CTRL1_HDMI_MODE(pll->id)) {
val = pll->state.hw_state.cfgcr1;
I915_WRITE(CNL_DPLL_CFGCR1(pll->id), val);
/* 4. Reab back to ensure writes completed */
POSTING_READ(CNL_DPLL_CFGCR1(pll->id));
}
/*
* 5. If the frequency will result in a change to the voltage
* requirement, follow the Display Voltage Frequency Switching
* Sequence Before Frequency Change
*
* FIXME: (DVFS) is used to adjust the display voltage to match the
* display clock frequencies
*/
/* 6. Enable DPLL in DPLL_ENABLE. */
val = I915_READ(CNL_DPLL_ENABLE(pll->id));
val |= PLL_ENABLE;
I915_WRITE(CNL_DPLL_ENABLE(pll->id), val);
/* 7. Wait for PLL lock status in DPLL_ENABLE. */
if (intel_wait_for_register(dev_priv,
CNL_DPLL_ENABLE(pll->id),
PLL_LOCK,
PLL_LOCK,
5))
DRM_ERROR("PLL %d not locked\n", pll->id);
/*
* 8. If the frequency will result in a change to the voltage
* requirement, follow the Display Voltage Frequency Switching
* Sequence After Frequency Change
*
* FIXME: (DVFS) is used to adjust the display voltage to match the
* display clock frequencies
*/
/*
* 9. turn on the clock for the DDI and map the DPLL to the DDI
* Done at intel_ddi_clk_select
*/
}
static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
uint32_t val;
/*
* 1. Configure DPCLKA_CFGCR0 to turn off the clock for the DDI.
* Done at intel_ddi_post_disable
*/
/*
* 2. If the frequency will result in a change to the voltage
* requirement, follow the Display Voltage Frequency Switching
* Sequence Before Frequency Change
*
* FIXME: (DVFS) is used to adjust the display voltage to match the
* display clock frequencies
*/
/* 3. Disable DPLL through DPLL_ENABLE. */
val = I915_READ(CNL_DPLL_ENABLE(pll->id));
val &= ~PLL_ENABLE;
I915_WRITE(CNL_DPLL_ENABLE(pll->id), val);
/* 4. Wait for PLL not locked status in DPLL_ENABLE. */
if (intel_wait_for_register(dev_priv,
CNL_DPLL_ENABLE(pll->id),
PLL_LOCK,
0,
5))
DRM_ERROR("PLL %d locked\n", pll->id);
/*
* 5. If the frequency will result in a change to the voltage
* requirement, follow the Display Voltage Frequency Switching
* Sequence After Frequency Change
*
* FIXME: (DVFS) is used to adjust the display voltage to match the
* display clock frequencies
*/
/* 6. Disable DPLL power in DPLL_ENABLE. */
val = I915_READ(CNL_DPLL_ENABLE(pll->id));
val &= ~PLL_POWER_ENABLE;
I915_WRITE(CNL_DPLL_ENABLE(pll->id), val);
/* 7. Wait for DPLL power state disabled in DPLL_ENABLE. */
if (intel_wait_for_register(dev_priv,
CNL_DPLL_ENABLE(pll->id),
PLL_POWER_STATE,
0,
5))
DRM_ERROR("PLL %d Power not disabled\n", pll->id);
}
static bool cnl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
struct intel_dpll_hw_state *hw_state)
{
uint32_t val;
bool ret;
if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
return false;
ret = false;
val = I915_READ(CNL_DPLL_ENABLE(pll->id));
if (!(val & PLL_ENABLE))
goto out;
val = I915_READ(CNL_DPLL_CFGCR0(pll->id));
hw_state->cfgcr0 = val;
/* avoid reading back stale values if HDMI mode is not enabled */
if (val & DPLL_CFGCR0_HDMI_MODE) {
hw_state->cfgcr1 = I915_READ(CNL_DPLL_CFGCR1(pll->id));
}
ret = true;
out:
intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
return ret;
}
static void cnl_wrpll_get_multipliers(unsigned int bestdiv,
unsigned int *pdiv,
unsigned int *qdiv,
unsigned int *kdiv)
{
/* even dividers */
if (bestdiv % 2 == 0) {
if (bestdiv == 2) {
*pdiv = 2;
*qdiv = 1;
*kdiv = 1;
} else if (bestdiv % 4 == 0) {
*pdiv = 2;
*qdiv = bestdiv / 4;
*kdiv = 2;
} else if (bestdiv % 6 == 0) {
*pdiv = 3;
*qdiv = bestdiv / 6;
*kdiv = 2;
} else if (bestdiv % 5 == 0) {
*pdiv = 5;
*qdiv = bestdiv / 10;
*kdiv = 2;
} else if (bestdiv % 14 == 0) {
*pdiv = 7;
*qdiv = bestdiv / 14;
*kdiv = 2;
}
} else {
if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
*pdiv = bestdiv;
*qdiv = 1;
*kdiv = 1;
} else { /* 9, 15, 21 */
*pdiv = bestdiv / 3;
*qdiv = 1;
*kdiv = 3;
}
}
}
static void cnl_wrpll_params_populate(struct skl_wrpll_params *params, uint32_t dco_freq,
uint32_t ref_freq, uint32_t pdiv, uint32_t qdiv,
uint32_t kdiv)
{
switch (kdiv) {
case 1:
params->kdiv = 1;
break;
case 2:
params->kdiv = 2;
break;
case 3:
params->kdiv = 4;
break;
default:
WARN(1, "Incorrect KDiv\n");
}
switch (pdiv) {
case 2:
params->pdiv = 1;
break;
case 3:
params->pdiv = 2;
break;
case 5:
params->pdiv = 4;
break;
case 7:
params->pdiv = 8;
break;
default:
WARN(1, "Incorrect PDiv\n");
}
if (kdiv != 2)
qdiv = 1;
params->qdiv_ratio = qdiv;
params->qdiv_mode = (qdiv == 1) ? 0 : 1;
params->dco_integer = div_u64(dco_freq, ref_freq);
params->dco_fraction = div_u64((div_u64((uint64_t)dco_freq<<15, (uint64_t)ref_freq) -
((uint64_t)params->dco_integer<<15)) * 0x8000, 0x8000);
}
static bool
cnl_ddi_calculate_wrpll(int clock /* in Hz */,
struct drm_i915_private *dev_priv,
struct skl_wrpll_params *wrpll_params)
{
uint64_t afe_clock = clock * 5 / KHz(1); /* clocks in kHz */
unsigned int dco_min = 7998 * KHz(1);
unsigned int dco_max = 10000 * KHz(1);
unsigned int dco_mid = (dco_min + dco_max) / 2;
static const int dividers[] = { 2, 4, 6, 8, 10, 12, 14, 16,
18, 20, 24, 28, 30, 32, 36, 40,
42, 44, 48, 50, 52, 54, 56, 60,
64, 66, 68, 70, 72, 76, 78, 80,
84, 88, 90, 92, 96, 98, 100, 102,
3, 5, 7, 9, 15, 21 };
unsigned int d, dco;
unsigned int dco_centrality = 0;
unsigned int best_dco_centrality = 999999;
unsigned int best_div = 0;
unsigned int best_dco = 0;
unsigned int pdiv = 0, qdiv = 0, kdiv = 0;
for (d = 0; d < ARRAY_SIZE(dividers); d++) {
dco = afe_clock * dividers[d];
if ((dco <= dco_max) && (dco >= dco_min)) {
dco_centrality = abs(dco - dco_mid);
if (dco_centrality < best_dco_centrality) {
best_dco_centrality = dco_centrality;
best_div = dividers[d];
best_dco = dco;
}
}
}
if (best_div == 0)
return false;
cnl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
cnl_wrpll_params_populate(wrpll_params, best_dco,
dev_priv->cdclk.hw.ref, pdiv, qdiv, kdiv);
return true;
}
static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state,
int clock)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
uint32_t cfgcr0, cfgcr1;
struct skl_wrpll_params wrpll_params = { 0, };
cfgcr0 = DPLL_CFGCR0_HDMI_MODE;
if (!cnl_ddi_calculate_wrpll(clock * 1000, dev_priv, &wrpll_params))
return false;
cfgcr0 |= DPLL_CFGCR0_DCO_FRACTION(wrpll_params.dco_fraction) |
wrpll_params.dco_integer;
cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(wrpll_params.qdiv_ratio) |
DPLL_CFGCR1_QDIV_MODE(wrpll_params.qdiv_mode) |
DPLL_CFGCR1_KDIV(wrpll_params.kdiv) |
DPLL_CFGCR1_PDIV(wrpll_params.pdiv) |
wrpll_params.central_freq |
DPLL_CFGCR1_CENTRAL_FREQ;
memset(&crtc_state->dpll_hw_state, 0,
sizeof(crtc_state->dpll_hw_state));
crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
return true;
}
static bool
cnl_ddi_dp_set_dpll_hw_state(int clock,
struct intel_dpll_hw_state *dpll_hw_state)
{
uint32_t cfgcr0;
cfgcr0 = DPLL_CFGCR0_SSC_ENABLE;
switch (clock / 2) {
case 81000:
cfgcr0 |= DPLL_CFGCR0_LINK_RATE_810;
break;
case 135000:
cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1350;
break;
case 270000:
cfgcr0 |= DPLL_CFGCR0_LINK_RATE_2700;
break;
/* eDP 1.4 rates */
case 162000:
cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1620;
break;
case 108000:
cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1080;
break;
case 216000:
cfgcr0 |= DPLL_CFGCR0_LINK_RATE_2160;
break;
case 324000:
/* Some SKUs may require elevated I/O voltage to support this */
cfgcr0 |= DPLL_CFGCR0_LINK_RATE_3240;
break;
case 405000:
/* Some SKUs may require elevated I/O voltage to support this */
cfgcr0 |= DPLL_CFGCR0_LINK_RATE_4050;
break;
}
dpll_hw_state->cfgcr0 = cfgcr0;
return true;
}
static struct intel_shared_dpll *
cnl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder)
{
struct intel_shared_dpll *pll;
int clock = crtc_state->port_clock;
bool bret;
struct intel_dpll_hw_state dpll_hw_state;
memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
if (encoder->type == INTEL_OUTPUT_HDMI) {
bret = cnl_ddi_hdmi_pll_dividers(crtc, crtc_state, clock);
if (!bret) {
DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n");
return NULL;
}
} else if (encoder->type == INTEL_OUTPUT_DP ||
encoder->type == INTEL_OUTPUT_DP_MST ||
encoder->type == INTEL_OUTPUT_EDP) {
bret = cnl_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state);
if (!bret) {
DRM_DEBUG_KMS("Could not set DP dpll HW state.\n");
return NULL;
}
crtc_state->dpll_hw_state = dpll_hw_state;
} else {
DRM_DEBUG_KMS("Skip DPLL setup for encoder %d\n",
encoder->type);
return NULL;
}
pll = intel_find_shared_dpll(crtc, crtc_state,
DPLL_ID_SKL_DPLL0,
DPLL_ID_SKL_DPLL2);
if (!pll) {
DRM_DEBUG_KMS("No PLL selected\n");
return NULL;
}
intel_reference_shared_dpll(pll, crtc_state);
return pll;
}
static const struct intel_shared_dpll_funcs cnl_ddi_pll_funcs = {
.enable = cnl_ddi_pll_enable,
.disable = cnl_ddi_pll_disable,
.get_hw_state = cnl_ddi_pll_get_hw_state,
};
static const struct dpll_info cnl_plls[] = {
{ "DPLL 0", DPLL_ID_SKL_DPLL0, &cnl_ddi_pll_funcs, 0 },
{ "DPLL 1", DPLL_ID_SKL_DPLL1, &cnl_ddi_pll_funcs, 0 },
{ "DPLL 2", DPLL_ID_SKL_DPLL2, &cnl_ddi_pll_funcs, 0 },
{ NULL, -1, NULL, },
};
static const struct intel_dpll_mgr cnl_pll_mgr = {
.dpll_info = cnl_plls,
.get_dpll = cnl_get_dpll,
.dump_hw_state = skl_dump_hw_state,
};
/**
* intel_shared_dpll_init - Initialize shared DPLLs
* @dev: drm device
@ -1980,7 +2411,9 @@ void intel_shared_dpll_init(struct drm_device *dev)
const struct dpll_info *dpll_info;
int i;
if (IS_GEN9_BC(dev_priv))
if (IS_CANNONLAKE(dev_priv))
dpll_mgr = &cnl_pll_mgr;
else if (IS_GEN9_BC(dev_priv))
dpll_mgr = &skl_pll_mgr;
else if (IS_GEN9_LP(dev_priv))
dpll_mgr = &bxt_pll_mgr;

Просмотреть файл

@ -128,6 +128,10 @@ struct intel_dpll_hw_state {
/* HDMI only, 0 when used for DP */
uint32_t cfgcr1, cfgcr2;
/* cnl */
uint32_t cfgcr0;
/* CNL also uses cfgcr1 */
/* bxt */
uint32_t ebb0, ebb4, pll0, pll1, pll2, pll3, pll6, pll8, pll9, pll10,
pcsdw12;

Просмотреть файл

@ -266,7 +266,6 @@ struct intel_encoder {
struct intel_panel {
struct drm_display_mode *fixed_mode;
struct drm_display_mode *downclock_mode;
int fitting_mode;
/* backlight */
struct {
@ -289,9 +288,10 @@ struct intel_panel {
/* Connector and platform specific backlight functions */
int (*setup)(struct intel_connector *connector, enum pipe pipe);
uint32_t (*get)(struct intel_connector *connector);
void (*set)(struct intel_connector *connector, uint32_t level);
void (*disable)(struct intel_connector *connector);
void (*enable)(struct intel_connector *connector);
void (*set)(const struct drm_connector_state *conn_state, uint32_t level);
void (*disable)(const struct drm_connector_state *conn_state);
void (*enable)(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
uint32_t (*hz_to_pwm)(struct intel_connector *connector,
uint32_t hz);
void (*power)(struct intel_connector *, bool enable);
@ -331,6 +331,15 @@ struct intel_connector {
struct work_struct modeset_retry_work;
};
struct intel_digital_connector_state {
struct drm_connector_state base;
enum hdmi_force_audio force_audio;
int broadcast_rgb;
};
#define to_intel_digital_connector_state(x) container_of(x, struct intel_digital_connector_state, base)
struct dpll {
/* given values */
int n;
@ -896,11 +905,8 @@ struct intel_hdmi {
enum drm_dp_dual_mode_type type;
int max_tmds_clock;
} dp_dual_mode;
bool limited_color_range;
bool color_range_auto;
bool has_hdmi_sink;
bool has_audio;
enum hdmi_force_audio force_audio;
bool rgb_quant_range_selectable;
struct intel_connector *attached_connector;
void (*write_infoframe)(struct drm_encoder *encoder,
@ -966,9 +972,6 @@ struct intel_dp {
bool detect_done;
bool channel_eq_status;
bool reset_link_params;
enum hdmi_force_audio force_audio;
bool limited_color_range;
bool color_range_auto;
uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
@ -1307,6 +1310,8 @@ void intel_audio_deinit(struct drm_i915_private *dev_priv);
/* intel_cdclk.c */
void skl_init_cdclk(struct drm_i915_private *dev_priv);
void skl_uninit_cdclk(struct drm_i915_private *dev_priv);
void cnl_init_cdclk(struct drm_i915_private *dev_priv);
void cnl_uninit_cdclk(struct drm_i915_private *dev_priv);
void bxt_init_cdclk(struct drm_i915_private *dev_priv);
void bxt_uninit_cdclk(struct drm_i915_private *dev_priv);
void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv);
@ -1319,6 +1324,8 @@ void intel_set_cdclk(struct drm_i915_private *dev_priv,
const struct intel_cdclk_state *cdclk_state);
/* intel_display.c */
void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
enum transcoder intel_crtc_pch_transcoder(struct intel_crtc *crtc);
void intel_update_rawclk(struct drm_i915_private *dev_priv);
int vlv_get_hpll_vco(struct drm_i915_private *dev_priv);
@ -1339,7 +1346,6 @@ unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info
bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv);
void intel_mark_busy(struct drm_i915_private *dev_priv);
void intel_mark_idle(struct drm_i915_private *dev_priv);
void intel_crtc_restore_mode(struct drm_crtc *crtc);
int intel_display_suspend(struct drm_device *dev);
void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv);
void intel_encoder_destroy(struct drm_encoder *encoder);
@ -1518,8 +1524,9 @@ bool intel_dp_compute_config(struct intel_encoder *encoder,
bool intel_dp_is_edp(struct drm_i915_private *dev_priv, enum port port);
enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port,
bool long_hpd);
void intel_edp_backlight_on(struct intel_dp *intel_dp);
void intel_edp_backlight_off(struct intel_dp *intel_dp);
void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
void intel_edp_backlight_off(const struct drm_connector_state *conn_state);
void intel_edp_panel_vdd_on(struct intel_dp *intel_dp);
void intel_edp_panel_on(struct intel_dp *intel_dp);
void intel_edp_panel_off(struct intel_dp *intel_dp);
@ -1699,12 +1706,13 @@ void intel_pch_panel_fitting(struct intel_crtc *crtc,
void intel_gmch_panel_fitting(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config,
int fitting_mode);
void intel_panel_set_backlight_acpi(struct intel_connector *connector,
void intel_panel_set_backlight_acpi(const struct drm_connector_state *conn_state,
u32 level, u32 max);
int intel_panel_setup_backlight(struct drm_connector *connector,
enum pipe pipe);
void intel_panel_enable_backlight(struct intel_connector *connector);
void intel_panel_disable_backlight(struct intel_connector *connector);
void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_state);
void intel_panel_destroy_backlight(struct drm_connector *connector);
enum drm_connector_status intel_panel_detect(struct drm_i915_private *dev_priv);
extern struct drm_display_mode *intel_find_panel_downclock(
@ -1874,6 +1882,8 @@ bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry **entries,
int ignore);
bool ilk_disable_lp_wm(struct drm_device *dev);
int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6);
int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
struct intel_crtc_state *cstate);
static inline int intel_enable_rc6(void)
{
return i915.enable_rc6;
@ -1898,10 +1908,19 @@ void intel_pipe_update_end(struct intel_crtc *crtc, struct intel_flip_work *work
void intel_tv_init(struct drm_i915_private *dev_priv);
/* intel_atomic.c */
int intel_connector_atomic_get_property(struct drm_connector *connector,
const struct drm_connector_state *state,
struct drm_property *property,
uint64_t *val);
int intel_digital_connector_atomic_get_property(struct drm_connector *connector,
const struct drm_connector_state *state,
struct drm_property *property,
uint64_t *val);
int intel_digital_connector_atomic_set_property(struct drm_connector *connector,
struct drm_connector_state *state,
struct drm_property *property,
uint64_t val);
int intel_digital_connector_atomic_check(struct drm_connector *conn,
struct drm_connector_state *new_state);
struct drm_connector_state *
intel_digital_connector_duplicate_state(struct drm_connector *connector);
struct drm_crtc_state *intel_crtc_duplicate_state(struct drm_crtc *crtc);
void intel_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state);

Просмотреть файл

@ -320,10 +320,10 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
if (HAS_GMCH_DISPLAY(dev_priv))
intel_gmch_panel_fitting(crtc, pipe_config,
intel_connector->panel.fitting_mode);
conn_state->scaling_mode);
else
intel_pch_panel_fitting(crtc, pipe_config,
intel_connector->panel.fitting_mode);
conn_state->scaling_mode);
}
/* DSI uses short packets for sync events, so clear mode flags for DSI */
@ -346,12 +346,13 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
return true;
}
static void glk_dsi_device_ready(struct intel_encoder *encoder)
static bool glk_dsi_enable_io(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
u32 tmp, val;
u32 tmp;
bool cold_boot = false;
/* Set the MIPI mode
* If MIPI_Mode is off, then writing to LP_Wake bit is not reflecting.
@ -370,7 +371,10 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
/* Program LP Wake */
for_each_dsi_port(port, intel_dsi->ports) {
tmp = I915_READ(MIPI_CTRL(port));
tmp |= GLK_LP_WAKE;
if (!(I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY))
tmp &= ~GLK_LP_WAKE;
else
tmp |= GLK_LP_WAKE;
I915_WRITE(MIPI_CTRL(port), tmp);
}
@ -382,6 +386,22 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
DRM_ERROR("MIPIO port is powergated\n");
}
/* Check for cold boot scenario */
for_each_dsi_port(port, intel_dsi->ports) {
cold_boot |= !(I915_READ(MIPI_DEVICE_READY(port)) &
DEVICE_READY);
}
return cold_boot;
}
static void glk_dsi_device_ready(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
u32 val;
/* Wait for MIPI PHY status bit to set */
for_each_dsi_port(port, intel_dsi->ports) {
if (intel_wait_for_register(dev_priv,
@ -391,8 +411,8 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
}
/* Get IO out of reset */
tmp = I915_READ(MIPI_CTRL(PORT_A));
I915_WRITE(MIPI_CTRL(PORT_A), tmp | GLK_MIPIIO_RESET_RELEASED);
val = I915_READ(MIPI_CTRL(PORT_A));
I915_WRITE(MIPI_CTRL(PORT_A), val | GLK_MIPIIO_RESET_RELEASED);
/* Get IO out of Low power state*/
for_each_dsi_port(port, intel_dsi->ports) {
@ -402,34 +422,34 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
val |= DEVICE_READY;
I915_WRITE(MIPI_DEVICE_READY(port), val);
usleep_range(10, 15);
}
} else {
/* Enter ULPS */
val = I915_READ(MIPI_DEVICE_READY(port));
val &= ~ULPS_STATE_MASK;
val |= (ULPS_STATE_ENTER | DEVICE_READY);
I915_WRITE(MIPI_DEVICE_READY(port), val);
/* Enter ULPS */
val = I915_READ(MIPI_DEVICE_READY(port));
val &= ~ULPS_STATE_MASK;
val |= (ULPS_STATE_ENTER | DEVICE_READY);
I915_WRITE(MIPI_DEVICE_READY(port), val);
/* Wait for ULPS active */
if (intel_wait_for_register(dev_priv,
/* Wait for ULPS active */
if (intel_wait_for_register(dev_priv,
MIPI_CTRL(port), GLK_ULPS_NOT_ACTIVE, 0, 20))
DRM_ERROR("ULPS not active\n");
DRM_ERROR("ULPS not active\n");
/* Exit ULPS */
val = I915_READ(MIPI_DEVICE_READY(port));
val &= ~ULPS_STATE_MASK;
val |= (ULPS_STATE_EXIT | DEVICE_READY);
I915_WRITE(MIPI_DEVICE_READY(port), val);
/* Exit ULPS */
val = I915_READ(MIPI_DEVICE_READY(port));
val &= ~ULPS_STATE_MASK;
val |= (ULPS_STATE_EXIT | DEVICE_READY);
I915_WRITE(MIPI_DEVICE_READY(port), val);
/* Enter Normal Mode */
val = I915_READ(MIPI_DEVICE_READY(port));
val &= ~ULPS_STATE_MASK;
val |= (ULPS_STATE_NORMAL_OPERATION | DEVICE_READY);
I915_WRITE(MIPI_DEVICE_READY(port), val);
/* Enter Normal Mode */
val = I915_READ(MIPI_DEVICE_READY(port));
val &= ~ULPS_STATE_MASK;
val |= (ULPS_STATE_NORMAL_OPERATION | DEVICE_READY);
I915_WRITE(MIPI_DEVICE_READY(port), val);
tmp = I915_READ(MIPI_CTRL(port));
tmp &= ~GLK_LP_WAKE;
I915_WRITE(MIPI_CTRL(port), tmp);
val = I915_READ(MIPI_CTRL(port));
val &= ~GLK_LP_WAKE;
I915_WRITE(MIPI_CTRL(port), val);
}
}
/* Wait for Stop state */
@ -770,6 +790,7 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
u32 val;
bool glk_cold_boot = false;
DRM_DEBUG_KMS("\n");
@ -800,7 +821,8 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
I915_WRITE(DSPCLK_GATE_D, val);
}
intel_dsi_prepare(encoder, pipe_config);
if (!IS_GEMINILAKE(dev_priv))
intel_dsi_prepare(encoder, pipe_config);
/* Power on, try both CRC pmic gpio and VBT */
if (intel_dsi->gpio_panel)
@ -811,9 +833,21 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
/* Deassert reset */
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET);
if (IS_GEMINILAKE(dev_priv)) {
glk_cold_boot = glk_dsi_enable_io(encoder);
/* Prepare port in cold boot(s3/s4) scenario */
if (glk_cold_boot)
intel_dsi_prepare(encoder, pipe_config);
}
/* Put device in ready state (LP-11) */
intel_dsi_device_ready(encoder);
/* Prepare port in normal boot scenario */
if (IS_GEMINILAKE(dev_priv) && !glk_cold_boot)
intel_dsi_prepare(encoder, pipe_config);
/* Send initialization commands in LP mode */
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_INIT_OTP);
@ -835,7 +869,7 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
intel_dsi_port_enable(encoder);
}
intel_panel_enable_backlight(intel_dsi->attached_connector);
intel_panel_enable_backlight(pipe_config, conn_state);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_ON);
}
@ -866,7 +900,7 @@ static void intel_dsi_disable(struct intel_encoder *encoder,
DRM_DEBUG_KMS("\n");
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_OFF);
intel_panel_disable_backlight(intel_dsi->attached_connector);
intel_panel_disable_backlight(old_conn_state);
/*
* Disable Device ready before the port shutdown in order
@ -1587,48 +1621,6 @@ static int intel_dsi_get_modes(struct drm_connector *connector)
return 1;
}
static int intel_dsi_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t val)
{
struct drm_device *dev = connector->dev;
struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_crtc *crtc;
int ret;
ret = drm_object_property_set_value(&connector->base, property, val);
if (ret)
return ret;
if (property == dev->mode_config.scaling_mode_property) {
if (val == DRM_MODE_SCALE_NONE) {
DRM_DEBUG_KMS("no scaling not supported\n");
return -EINVAL;
}
if (HAS_GMCH_DISPLAY(to_i915(dev)) &&
val == DRM_MODE_SCALE_CENTER) {
DRM_DEBUG_KMS("centering not supported\n");
return -EINVAL;
}
if (intel_connector->panel.fitting_mode == val)
return 0;
intel_connector->panel.fitting_mode = val;
}
crtc = connector->state->crtc;
if (crtc && crtc->state->enable) {
/*
* If the CRTC is enabled, the display will be changed
* according to the new panel fitting mode.
*/
intel_crtc_restore_mode(crtc);
}
return 0;
}
static void intel_dsi_connector_destroy(struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
@ -1657,6 +1649,7 @@ static const struct drm_encoder_funcs intel_dsi_funcs = {
static const struct drm_connector_helper_funcs intel_dsi_connector_helper_funcs = {
.get_modes = intel_dsi_get_modes,
.mode_valid = intel_dsi_mode_valid,
.atomic_check = intel_digital_connector_atomic_check,
};
static const struct drm_connector_funcs intel_dsi_connector_funcs = {
@ -1665,22 +1658,28 @@ static const struct drm_connector_funcs intel_dsi_connector_funcs = {
.early_unregister = intel_connector_unregister,
.destroy = intel_dsi_connector_destroy,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_dsi_set_property,
.atomic_get_property = intel_connector_atomic_get_property,
.set_property = drm_atomic_helper_connector_set_property,
.atomic_get_property = intel_digital_connector_atomic_get_property,
.atomic_set_property = intel_digital_connector_atomic_set_property,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_duplicate_state = intel_digital_connector_duplicate_state,
};
static void intel_dsi_add_properties(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
if (connector->panel.fixed_mode) {
drm_mode_create_scaling_mode_property(dev);
drm_object_attach_property(&connector->base.base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_ASPECT);
connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
u32 allowed_scalers;
allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN);
if (!HAS_GMCH_DISPLAY(dev_priv))
allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER);
drm_connector_attach_scaling_mode_property(&connector->base,
allowed_scalers);
connector->base.state->scaling_mode = DRM_MODE_SCALE_ASPECT;
}
}

Просмотреть файл

@ -60,10 +60,9 @@ static u32 dcs_get_backlight(struct intel_connector *connector)
return data;
}
static void dcs_set_backlight(struct intel_connector *connector, u32 level)
static void dcs_set_backlight(const struct drm_connector_state *conn_state, u32 level)
{
struct intel_encoder *encoder = connector->encoder;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(conn_state->best_encoder);
struct mipi_dsi_device *dsi_device;
u8 data = level;
enum port port;
@ -76,14 +75,13 @@ static void dcs_set_backlight(struct intel_connector *connector, u32 level)
}
}
static void dcs_disable_backlight(struct intel_connector *connector)
static void dcs_disable_backlight(const struct drm_connector_state *conn_state)
{
struct intel_encoder *encoder = connector->encoder;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(conn_state->best_encoder);
struct mipi_dsi_device *dsi_device;
enum port port;
dcs_set_backlight(connector, 0);
dcs_set_backlight(conn_state, 0);
for_each_dsi_port(port, intel_dsi->dcs_cabc_ports) {
u8 cabc = POWER_SAVE_OFF;
@ -110,11 +108,11 @@ static void dcs_disable_backlight(struct intel_connector *connector)
}
}
static void dcs_enable_backlight(struct intel_connector *connector)
static void dcs_enable_backlight(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct intel_encoder *encoder = connector->encoder;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
struct intel_panel *panel = &connector->panel;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(conn_state->best_encoder);
struct intel_panel *panel = &to_intel_connector(conn_state->connector)->panel;
struct mipi_dsi_device *dsi_device;
enum port port;
@ -142,7 +140,7 @@ static void dcs_enable_backlight(struct intel_connector *connector)
&cabc, sizeof(cabc));
}
dcs_set_backlight(connector, panel->backlight.level);
dcs_set_backlight(conn_state, panel->backlight.level);
}
static int dcs_setup_backlight(struct intel_connector *connector,

Просмотреть файл

@ -814,26 +814,27 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
struct drm_i915_private *dev_priv = engine->i915;
int ret;
/* WaConextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk */
/* WaConextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
I915_WRITE(GEN9_CSFE_CHICKEN1_RCS, _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE));
/* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk */
/* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */
I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
/* WaDisableKillLogic:bxt,skl,kbl */
/* WaDisableKillLogic:bxt,skl,kbl,cfl */
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
ECOCHK_DIS_TLB);
/* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk */
/* WaDisablePartialInstShootdown:skl,bxt,kbl,glk */
/* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk,cfl */
/* WaDisablePartialInstShootdown:skl,bxt,kbl,glk,cfl */
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
FLOW_CONTROL_ENABLE |
PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
/* Syncing dependencies between camera and graphics:skl,bxt,kbl */
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
if (!IS_COFFEELAKE(dev_priv))
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
/* WaDisableDgMirrorFixInHalfSliceChicken5:bxt */
if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
@ -851,18 +852,18 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
*/
}
/* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl,glk */
/* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl */
/* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl,glk,cfl */
/* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl,cfl */
WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
GEN9_ENABLE_YV12_BUGFIX |
GEN9_ENABLE_GPGPU_PREEMPTION);
/* Wa4x4STCOptimizationDisable:skl,bxt,kbl,glk */
/* WaDisablePartialResolveInVc:skl,bxt,kbl */
/* Wa4x4STCOptimizationDisable:skl,bxt,kbl,glk,cfl */
/* WaDisablePartialResolveInVc:skl,bxt,kbl,cfl */
WA_SET_BIT_MASKED(CACHE_MODE_1, (GEN8_4x4_STC_OPTIMIZATION_DISABLE |
GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE));
/* WaCcsTlbPrefetchDisable:skl,bxt,kbl,glk */
/* WaCcsTlbPrefetchDisable:skl,bxt,kbl,glk,cfl */
WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
GEN9_CCS_TLB_PREFETCH_ENABLE);
@ -871,7 +872,7 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
PIXEL_MASK_CAMMING_DISABLE);
/* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl */
/* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl,cfl */
WA_SET_BIT_MASKED(HDC_CHICKEN0,
HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
@ -889,39 +890,41 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
* a TLB invalidation occurs during a PSD flush.
*/
/* WaForceEnableNonCoherent:skl,bxt,kbl */
/* WaForceEnableNonCoherent:skl,bxt,kbl,cfl */
WA_SET_BIT_MASKED(HDC_CHICKEN0,
HDC_FORCE_NON_COHERENT);
/* WaDisableHDCInvalidation:skl,bxt,kbl */
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
BDW_DISABLE_HDC_INVALIDATION);
if (!IS_COFFEELAKE(dev_priv))
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
BDW_DISABLE_HDC_INVALIDATION);
/* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl */
/* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */
if (IS_SKYLAKE(dev_priv) ||
IS_KABYLAKE(dev_priv) ||
IS_COFFEELAKE(dev_priv) ||
IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
GEN8_SAMPLER_POWER_BYPASS_DIS);
/* WaDisableSTUnitPowerOptimization:skl,bxt,kbl,glk */
/* WaDisableSTUnitPowerOptimization:skl,bxt,kbl,glk,cfl */
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
/* WaOCLCoherentLineFlush:skl,bxt,kbl */
/* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) |
GEN8_LQSC_FLUSH_COHERENT_LINES));
/* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk */
/* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */
ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG);
if (ret)
return ret;
/* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl */
/* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl */
ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
if (ret)
return ret;
/* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl,glk */
/* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl,glk,cfl */
ret = wa_ring_whitelist_reg(engine, GEN8_HDC_CHICKEN1);
if (ret)
return ret;
@ -1140,6 +1143,38 @@ static int glk_init_workarounds(struct intel_engine_cs *engine)
return 0;
}
static int cfl_init_workarounds(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
int ret;
ret = gen9_init_workarounds(engine);
if (ret)
return ret;
/* WaEnableGapsTsvCreditFix:cfl */
I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
GEN9_GAPS_TSV_CREDIT_DISABLE));
/* WaToEnableHwFixForPushConstHWBug:cfl */
WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
/* WaDisableGafsUnitClkGating:cfl */
WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
/* WaDisableSbeCacheDispatchPortSharing:cfl */
WA_SET_BIT_MASKED(
GEN7_HALF_SLICE_CHICKEN1,
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
/* WaInPlaceDecompressionHang:cfl */
WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
return 0;
}
int init_workarounds_ring(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
@ -1162,6 +1197,8 @@ int init_workarounds_ring(struct intel_engine_cs *engine)
err = kbl_init_workarounds(engine);
else if (IS_GEMINILAKE(dev_priv))
err = glk_init_workarounds(engine);
else if (IS_COFFEELAKE(dev_priv))
err = cfl_init_workarounds(engine);
else
err = 0;
if (err)
@ -1212,6 +1249,11 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
intel_runtime_pm_get(dev_priv);
/* First check that no commands are left in the ring */
if ((I915_READ_HEAD(engine) & HEAD_ADDR) !=
(I915_READ_TAIL(engine) & TAIL_ADDR))
idle = false;
/* No bit for gen2, so assume the CS parser is idle */
if (INTEL_GEN(dev_priv) > 2 && !(I915_READ_MODE(engine) & MODE_IDLE))
idle = false;

Просмотреть файл

@ -262,7 +262,7 @@ static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
else if (IS_GEN7(dev_priv))
ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old);
else if (IS_GEN8(dev_priv) || IS_GEN9(dev_priv))
else if (INTEL_GEN(dev_priv) >= 8)
broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
return old;

Просмотреть файл

@ -402,7 +402,7 @@ int intel_guc_select_fw(struct intel_guc *guc)
guc->fw.path = I915_BXT_GUC_UCODE;
guc->fw.major_ver_wanted = BXT_FW_MAJOR;
guc->fw.minor_ver_wanted = BXT_FW_MINOR;
} else if (IS_KABYLAKE(dev_priv)) {
} else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
guc->fw.path = I915_KBL_GUC_UCODE;
guc->fw.major_ver_wanted = KBL_FW_MAJOR;
guc->fw.minor_ver_wanted = KBL_FW_MINOR;

Просмотреть файл

@ -50,6 +50,32 @@ static bool is_supported_device(struct drm_i915_private *dev_priv)
return false;
}
/**
* intel_gvt_sanitize_options - sanitize GVT related options
* @dev_priv: drm i915 private data
*
* This function is called at the i915 options sanitize stage.
*/
void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv)
{
if (!i915.enable_gvt)
return;
if (intel_vgpu_active(dev_priv)) {
DRM_INFO("GVT-g is disabled for guest\n");
goto bail;
}
if (!is_supported_device(dev_priv)) {
DRM_INFO("Unsupported device. GVT-g is disabled\n");
goto bail;
}
return;
bail:
i915.enable_gvt = 0;
}
/**
* intel_gvt_init - initialize GVT components
* @dev_priv: drm i915 private data
@ -69,19 +95,14 @@ int intel_gvt_init(struct drm_i915_private *dev_priv)
return 0;
}
if (intel_vgpu_active(dev_priv)) {
DRM_DEBUG_DRIVER("GVT-g is disabled for guest\n");
goto bail;
}
if (!is_supported_device(dev_priv)) {
DRM_DEBUG_DRIVER("Unsupported device. GVT-g is disabled\n");
goto bail;
}
if (!i915.enable_execlists) {
DRM_INFO("GPU guest virtualisation [GVT-g] disabled due to disabled execlist submission [i915.enable_execlists module parameter]\n");
goto bail;
DRM_ERROR("i915 GVT-g loading failed due to disabled execlists mode\n");
return -EIO;
}
if (i915.enable_guc_submission) {
DRM_ERROR("i915 GVT-g loading failed due to Graphics virtualization is not yet supported with GuC submission\n");
return -EIO;
}
/*

Просмотреть файл

@ -32,6 +32,7 @@ void intel_gvt_cleanup(struct drm_i915_private *dev_priv);
int intel_gvt_init_device(struct drm_i915_private *dev_priv);
void intel_gvt_clean_device(struct drm_i915_private *dev_priv);
int intel_gvt_init_host(void);
void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv);
#else
static inline int intel_gvt_init(struct drm_i915_private *dev_priv)
{
@ -40,6 +41,10 @@ static inline int intel_gvt_init(struct drm_i915_private *dev_priv)
static inline void intel_gvt_cleanup(struct drm_i915_private *dev_priv)
{
}
static inline void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv)
{
}
#endif
#endif /* _INTEL_GVT_H_ */

Просмотреть файл

@ -1218,7 +1218,8 @@ static int intel_hdmi_source_max_tmds_clock(struct drm_i915_private *dev_priv)
}
static int hdmi_port_clock_limit(struct intel_hdmi *hdmi,
bool respect_downstream_limits)
bool respect_downstream_limits,
bool force_dvi)
{
struct drm_device *dev = intel_hdmi_to_dev(hdmi);
int max_tmds_clock = intel_hdmi_source_max_tmds_clock(to_i915(dev));
@ -1234,7 +1235,7 @@ static int hdmi_port_clock_limit(struct intel_hdmi *hdmi,
if (info->max_tmds_clock)
max_tmds_clock = min(max_tmds_clock,
info->max_tmds_clock);
else if (!hdmi->has_hdmi_sink)
else if (!hdmi->has_hdmi_sink || force_dvi)
max_tmds_clock = min(max_tmds_clock, 165000);
}
@ -1243,13 +1244,14 @@ static int hdmi_port_clock_limit(struct intel_hdmi *hdmi,
static enum drm_mode_status
hdmi_port_clock_valid(struct intel_hdmi *hdmi,
int clock, bool respect_downstream_limits)
int clock, bool respect_downstream_limits,
bool force_dvi)
{
struct drm_i915_private *dev_priv = to_i915(intel_hdmi_to_dev(hdmi));
if (clock < 25000)
return MODE_CLOCK_LOW;
if (clock > hdmi_port_clock_limit(hdmi, respect_downstream_limits))
if (clock > hdmi_port_clock_limit(hdmi, respect_downstream_limits, force_dvi))
return MODE_CLOCK_HIGH;
/* BXT DPLL can't generate 223-240 MHz */
@ -1273,6 +1275,8 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
enum drm_mode_status status;
int clock;
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
bool force_dvi =
READ_ONCE(to_intel_digital_connector_state(connector->state)->force_audio) == HDMI_AUDIO_OFF_DVI;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
@ -1289,11 +1293,11 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
clock *= 2;
/* check if we can do 8bpc */
status = hdmi_port_clock_valid(hdmi, clock, true);
status = hdmi_port_clock_valid(hdmi, clock, true, force_dvi);
/* if we can't do 8bpc we may still be able to do 12bpc */
if (!HAS_GMCH_DISPLAY(dev_priv) && status != MODE_OK)
status = hdmi_port_clock_valid(hdmi, clock * 3 / 2, true);
if (!HAS_GMCH_DISPLAY(dev_priv) && status != MODE_OK && hdmi->has_hdmi_sink && !force_dvi)
status = hdmi_port_clock_valid(hdmi, clock * 3 / 2, true, force_dvi);
return status;
}
@ -1343,16 +1347,19 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
struct drm_scdc *scdc = &conn_state->connector->display_info.hdmi.scdc;
struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(conn_state);
int clock_8bpc = pipe_config->base.adjusted_mode.crtc_clock;
int clock_12bpc = clock_8bpc * 3 / 2;
int desired_bpp;
bool force_dvi = intel_conn_state->force_audio == HDMI_AUDIO_OFF_DVI;
pipe_config->has_hdmi_sink = intel_hdmi->has_hdmi_sink;
pipe_config->has_hdmi_sink = !force_dvi && intel_hdmi->has_hdmi_sink;
if (pipe_config->has_hdmi_sink)
pipe_config->has_infoframe = true;
if (intel_hdmi->color_range_auto) {
if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
/* See CEA-861-E - 5.1 Default Encoding Parameters */
pipe_config->limited_color_range =
pipe_config->has_hdmi_sink &&
@ -1360,7 +1367,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
HDMI_QUANTIZATION_RANGE_LIMITED;
} else {
pipe_config->limited_color_range =
intel_hdmi->limited_color_range;
intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_LIMITED;
}
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) {
@ -1372,8 +1379,13 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv))
pipe_config->has_pch_encoder = true;
if (pipe_config->has_hdmi_sink && intel_hdmi->has_audio)
pipe_config->has_audio = true;
if (pipe_config->has_hdmi_sink) {
if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
pipe_config->has_audio = intel_hdmi->has_audio;
else
pipe_config->has_audio =
intel_conn_state->force_audio == HDMI_AUDIO_ON;
}
/*
* HDMI is either 12 or 8, so if the display lets 10bpc sneak
@ -1381,8 +1393,8 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
* outputs. We also need to check that the higher clock still fits
* within limits.
*/
if (pipe_config->pipe_bpp > 8*3 && pipe_config->has_hdmi_sink &&
hdmi_port_clock_valid(intel_hdmi, clock_12bpc, true) == MODE_OK &&
if (pipe_config->pipe_bpp > 8*3 && pipe_config->has_hdmi_sink && !force_dvi &&
hdmi_port_clock_valid(intel_hdmi, clock_12bpc, true, force_dvi) == MODE_OK &&
hdmi_12bpc_possible(pipe_config)) {
DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
desired_bpp = 12*3;
@ -1402,7 +1414,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
}
if (hdmi_port_clock_valid(intel_hdmi, pipe_config->port_clock,
false) != MODE_OK) {
false, force_dvi) != MODE_OK) {
DRM_DEBUG_KMS("unsupported HDMI clock, rejecting mode\n");
return false;
}
@ -1509,13 +1521,7 @@ intel_hdmi_set_edid(struct drm_connector *connector)
drm_rgb_quant_range_selectable(edid);
intel_hdmi->has_audio = drm_detect_monitor_audio(edid);
if (intel_hdmi->force_audio != HDMI_AUDIO_AUTO)
intel_hdmi->has_audio =
intel_hdmi->force_audio == HDMI_AUDIO_ON;
if (intel_hdmi->force_audio != HDMI_AUDIO_OFF_DVI)
intel_hdmi->has_hdmi_sink =
drm_detect_hdmi_monitor(edid);
intel_hdmi->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
connected = true;
}
@ -1577,96 +1583,6 @@ static int intel_hdmi_get_modes(struct drm_connector *connector)
return intel_connector_update_modes(connector, edid);
}
static bool
intel_hdmi_detect_audio(struct drm_connector *connector)
{
bool has_audio = false;
struct edid *edid;
edid = to_intel_connector(connector)->detect_edid;
if (edid && edid->input & DRM_EDID_INPUT_DIGITAL)
has_audio = drm_detect_monitor_audio(edid);
return has_audio;
}
static int
intel_hdmi_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t val)
{
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
struct intel_digital_port *intel_dig_port =
hdmi_to_dig_port(intel_hdmi);
struct drm_i915_private *dev_priv = to_i915(connector->dev);
int ret;
ret = drm_object_property_set_value(&connector->base, property, val);
if (ret)
return ret;
if (property == dev_priv->force_audio_property) {
enum hdmi_force_audio i = val;
bool has_audio;
if (i == intel_hdmi->force_audio)
return 0;
intel_hdmi->force_audio = i;
if (i == HDMI_AUDIO_AUTO)
has_audio = intel_hdmi_detect_audio(connector);
else
has_audio = (i == HDMI_AUDIO_ON);
if (i == HDMI_AUDIO_OFF_DVI)
intel_hdmi->has_hdmi_sink = 0;
intel_hdmi->has_audio = has_audio;
goto done;
}
if (property == dev_priv->broadcast_rgb_property) {
bool old_auto = intel_hdmi->color_range_auto;
bool old_range = intel_hdmi->limited_color_range;
switch (val) {
case INTEL_BROADCAST_RGB_AUTO:
intel_hdmi->color_range_auto = true;
break;
case INTEL_BROADCAST_RGB_FULL:
intel_hdmi->color_range_auto = false;
intel_hdmi->limited_color_range = false;
break;
case INTEL_BROADCAST_RGB_LIMITED:
intel_hdmi->color_range_auto = false;
intel_hdmi->limited_color_range = true;
break;
default:
return -EINVAL;
}
if (old_auto == intel_hdmi->color_range_auto &&
old_range == intel_hdmi->limited_color_range)
return 0;
goto done;
}
if (property == connector->dev->mode_config.aspect_ratio_property) {
connector->state->picture_aspect_ratio = val;
goto done;
}
return -EINVAL;
done:
if (intel_dig_port->base.base.crtc)
intel_crtc_restore_mode(intel_dig_port->base.base.crtc);
return 0;
}
static void intel_hdmi_pre_enable(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
@ -1791,18 +1707,20 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
.detect = intel_hdmi_detect,
.force = intel_hdmi_force,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_hdmi_set_property,
.atomic_get_property = intel_connector_atomic_get_property,
.set_property = drm_atomic_helper_connector_set_property,
.atomic_get_property = intel_digital_connector_atomic_get_property,
.atomic_set_property = intel_digital_connector_atomic_set_property,
.late_register = intel_connector_register,
.early_unregister = intel_connector_unregister,
.destroy = intel_hdmi_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_duplicate_state = intel_digital_connector_duplicate_state,
};
static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = {
.get_modes = intel_hdmi_get_modes,
.mode_valid = intel_hdmi_mode_valid,
.atomic_check = intel_digital_connector_atomic_check,
};
static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
@ -1814,7 +1732,6 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
{
intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
intel_hdmi->color_range_auto = true;
intel_attach_aspect_ratio_property(connector);
connector->state->picture_aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
}
@ -1885,19 +1802,21 @@ static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv,
switch (port) {
case PORT_B:
if (IS_GEN9_LP(dev_priv))
if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv))
ddc_pin = GMBUS_PIN_1_BXT;
else
ddc_pin = GMBUS_PIN_DPB;
break;
case PORT_C:
if (IS_GEN9_LP(dev_priv))
if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv))
ddc_pin = GMBUS_PIN_2_BXT;
else
ddc_pin = GMBUS_PIN_DPC;
break;
case PORT_D:
if (IS_CHERRYVIEW(dev_priv))
if (HAS_PCH_CNP(dev_priv))
ddc_pin = GMBUS_PIN_4_CNP;
else if (IS_CHERRYVIEW(dev_priv))
ddc_pin = GMBUS_PIN_DPD_CHV;
else
ddc_pin = GMBUS_PIN_DPD;

Просмотреть файл

@ -167,7 +167,7 @@ void intel_huc_select_fw(struct intel_huc *huc)
huc->fw.path = I915_BXT_HUC_UCODE;
huc->fw.major_ver_wanted = BXT_HUC_FW_MAJOR;
huc->fw.minor_ver_wanted = BXT_HUC_FW_MINOR;
} else if (IS_KABYLAKE(dev_priv)) {
} else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
huc->fw.path = I915_KBL_HUC_UCODE;
huc->fw.major_ver_wanted = KBL_HUC_FW_MAJOR;
huc->fw.minor_ver_wanted = KBL_HUC_FW_MINOR;

Просмотреть файл

@ -68,11 +68,20 @@ static const struct gmbus_pin gmbus_pins_bxt[] = {
[GMBUS_PIN_3_BXT] = { "misc", GPIOD },
};
static const struct gmbus_pin gmbus_pins_cnp[] = {
[GMBUS_PIN_1_BXT] = { "dpb", GPIOB },
[GMBUS_PIN_2_BXT] = { "dpc", GPIOC },
[GMBUS_PIN_3_BXT] = { "misc", GPIOD },
[GMBUS_PIN_4_CNP] = { "dpd", GPIOE },
};
/* pin is expected to be valid */
static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *dev_priv,
unsigned int pin)
{
if (IS_GEN9_LP(dev_priv))
if (HAS_PCH_CNP(dev_priv))
return &gmbus_pins_cnp[pin];
else if (IS_GEN9_LP(dev_priv))
return &gmbus_pins_bxt[pin];
else if (IS_GEN9_BC(dev_priv))
return &gmbus_pins_skl[pin];
@ -87,7 +96,9 @@ bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv,
{
unsigned int size;
if (IS_GEN9_LP(dev_priv))
if (HAS_PCH_CNP(dev_priv))
size = ARRAY_SIZE(gmbus_pins_cnp);
else if (IS_GEN9_LP(dev_priv))
size = ARRAY_SIZE(gmbus_pins_bxt);
else if (IS_GEN9_BC(dev_priv))
size = ARRAY_SIZE(gmbus_pins_skl);

Просмотреть файл

@ -204,6 +204,7 @@
#define GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17
#define GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x26
#define GEN10_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x19
/* Typical size of the average request (2 pipecontrols and a MI_BB) */
#define EXECLISTS_REQUEST_SIZE 64 /* bytes */
@ -1861,6 +1862,10 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
default:
MISSING_CASE(INTEL_GEN(engine->i915));
/* fall through */
case 10:
indirect_ctx_offset =
GEN10_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
break;
case 9:
indirect_ctx_offset =
GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
@ -1957,6 +1962,8 @@ static void execlists_init_reg_state(u32 *regs,
regs[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
CTX_REG(regs, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
make_rpcs(dev_priv));
i915_oa_init_reg_state(engine, ctx, regs);
}
}

Просмотреть файл

@ -311,8 +311,6 @@ static void intel_enable_lvds(struct intel_encoder *encoder,
{
struct drm_device *dev = encoder->base.dev;
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
struct intel_connector *intel_connector =
&lvds_encoder->attached_connector->base;
struct drm_i915_private *dev_priv = to_i915(dev);
I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) | LVDS_PORT_EN);
@ -322,7 +320,7 @@ static void intel_enable_lvds(struct intel_encoder *encoder,
if (intel_wait_for_register(dev_priv, PP_STATUS(0), PP_ON, PP_ON, 1000))
DRM_ERROR("timed out waiting for panel to power on\n");
intel_panel_enable_backlight(intel_connector);
intel_panel_enable_backlight(pipe_config, conn_state);
}
static void intel_disable_lvds(struct intel_encoder *encoder,
@ -345,11 +343,7 @@ static void gmch_disable_lvds(struct intel_encoder *encoder,
struct drm_connector_state *old_conn_state)
{
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
struct intel_connector *intel_connector =
&lvds_encoder->attached_connector->base;
intel_panel_disable_backlight(intel_connector);
intel_panel_disable_backlight(old_conn_state);
intel_disable_lvds(encoder, old_crtc_state, old_conn_state);
}
@ -358,11 +352,7 @@ static void pch_disable_lvds(struct intel_encoder *encoder,
struct intel_crtc_state *old_crtc_state,
struct drm_connector_state *old_conn_state)
{
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
struct intel_connector *intel_connector =
&lvds_encoder->attached_connector->base;
intel_panel_disable_backlight(intel_connector);
intel_panel_disable_backlight(old_conn_state);
}
static void pch_post_disable_lvds(struct intel_encoder *encoder,
@ -433,10 +423,10 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
pipe_config->has_pch_encoder = true;
intel_pch_panel_fitting(intel_crtc, pipe_config,
intel_connector->panel.fitting_mode);
conn_state->scaling_mode);
} else {
intel_gmch_panel_fitting(intel_crtc, pipe_config,
intel_connector->panel.fitting_mode);
conn_state->scaling_mode);
}
@ -598,56 +588,24 @@ static void intel_lvds_destroy(struct drm_connector *connector)
kfree(connector);
}
static int intel_lvds_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t value)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_device *dev = connector->dev;
if (property == dev->mode_config.scaling_mode_property) {
struct drm_crtc *crtc;
if (value == DRM_MODE_SCALE_NONE) {
DRM_DEBUG_KMS("no scaling not supported\n");
return -EINVAL;
}
if (intel_connector->panel.fitting_mode == value) {
/* the LVDS scaling property is not changed */
return 0;
}
intel_connector->panel.fitting_mode = value;
crtc = intel_attached_encoder(connector)->base.crtc;
if (crtc && crtc->state->enable) {
/*
* If the CRTC is enabled, the display will be changed
* according to the new panel fitting mode.
*/
intel_crtc_restore_mode(crtc);
}
}
return 0;
}
static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
.get_modes = intel_lvds_get_modes,
.mode_valid = intel_lvds_mode_valid,
.atomic_check = intel_digital_connector_atomic_check,
};
static const struct drm_connector_funcs intel_lvds_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.detect = intel_lvds_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_lvds_set_property,
.atomic_get_property = intel_connector_atomic_get_property,
.set_property = drm_atomic_helper_connector_set_property,
.atomic_get_property = intel_digital_connector_atomic_get_property,
.atomic_set_property = intel_digital_connector_atomic_set_property,
.late_register = intel_connector_register,
.early_unregister = intel_connector_unregister,
.destroy = intel_lvds_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_duplicate_state = intel_digital_connector_duplicate_state,
};
static const struct drm_encoder_funcs intel_lvds_enc_funcs = {
@ -988,6 +946,7 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
u32 lvds;
int pipe;
u8 pin;
u32 allowed_scalers;
if (!intel_lvds_supported(dev_priv))
return;
@ -1083,11 +1042,11 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
lvds_encoder->reg = lvds_reg;
/* create the scaling mode property */
drm_mode_create_scaling_mode_property(dev);
drm_object_attach_property(&connector->base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_ASPECT);
intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT);
allowed_scalers |= BIT(DRM_MODE_SCALE_FULLSCREEN);
allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER);
drm_connector_attach_scaling_mode_property(connector, allowed_scalers);
connector->state->scaling_mode = DRM_MODE_SCALE_ASPECT;
intel_lvds_pps_get_hw_state(dev_priv, &lvds_encoder->init_pps);
lvds_encoder->init_lvds_val = lvds;

Просмотреть файл

@ -178,7 +178,7 @@ static bool get_mocs_settings(struct drm_i915_private *dev_priv,
{
bool result = false;
if (IS_GEN9_BC(dev_priv)) {
if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
table->size = ARRAY_SIZE(skylake_mocs_table);
table->table = skylake_mocs_table;
result = true;

Просмотреть файл

@ -461,7 +461,7 @@ static u32 asle_set_backlight(struct drm_i915_private *dev_priv, u32 bclp)
DRM_DEBUG_KMS("updating opregion backlight %d/255\n", bclp);
drm_connector_list_iter_begin(dev, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter)
intel_panel_set_backlight_acpi(connector, bclp, 255);
intel_panel_set_backlight_acpi(connector->base.state, bclp, 255);
drm_connector_list_iter_end(&conn_iter);
asle->cblv = DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID;

Просмотреть файл

@ -270,7 +270,6 @@ static int intel_overlay_on(struct intel_overlay *overlay)
u32 *cs;
WARN_ON(overlay->active);
WARN_ON(IS_I830(dev_priv) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
req = alloc_request(overlay);
if (IS_ERR(req))

Просмотреть файл

@ -561,15 +561,18 @@ static u32 intel_panel_get_backlight(struct intel_connector *connector)
return val;
}
static void lpt_set_backlight(struct intel_connector *connector, u32 level)
static void lpt_set_backlight(const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
u32 val = I915_READ(BLC_PWM_PCH_CTL2) & ~BACKLIGHT_DUTY_CYCLE_MASK;
I915_WRITE(BLC_PWM_PCH_CTL2, val | level);
}
static void pch_set_backlight(struct intel_connector *connector, u32 level)
static void pch_set_backlight(const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
u32 tmp;
@ -577,8 +580,9 @@ static void pch_set_backlight(struct intel_connector *connector, u32 level)
I915_WRITE(BLC_PWM_CPU_CTL, tmp | level);
}
static void i9xx_set_backlight(struct intel_connector *connector, u32 level)
static void i9xx_set_backlight(const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u32 tmp, mask;
@ -604,50 +608,51 @@ static void i9xx_set_backlight(struct intel_connector *connector, u32 level)
I915_WRITE(BLC_PWM_CTL, tmp | level);
}
static void vlv_set_backlight(struct intel_connector *connector, u32 level)
static void vlv_set_backlight(const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
enum pipe pipe = intel_get_pipe_from_connector(connector);
enum pipe pipe = to_intel_crtc(conn_state->crtc)->pipe;
u32 tmp;
if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
return;
tmp = I915_READ(VLV_BLC_PWM_CTL(pipe)) & ~BACKLIGHT_DUTY_CYCLE_MASK;
I915_WRITE(VLV_BLC_PWM_CTL(pipe), tmp | level);
}
static void bxt_set_backlight(struct intel_connector *connector, u32 level)
static void bxt_set_backlight(const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
I915_WRITE(BXT_BLC_PWM_DUTY(panel->backlight.controller), level);
}
static void pwm_set_backlight(struct intel_connector *connector, u32 level)
static void pwm_set_backlight(const struct drm_connector_state *conn_state, u32 level)
{
struct intel_panel *panel = &connector->panel;
struct intel_panel *panel = &to_intel_connector(conn_state->connector)->panel;
int duty_ns = DIV_ROUND_UP(level * CRC_PMIC_PWM_PERIOD_NS, 100);
pwm_config(panel->backlight.pwm, duty_ns, CRC_PMIC_PWM_PERIOD_NS);
}
static void
intel_panel_actually_set_backlight(struct intel_connector *connector, u32 level)
intel_panel_actually_set_backlight(const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct intel_panel *panel = &connector->panel;
DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level);
level = intel_panel_compute_brightness(connector, level);
panel->backlight.set(connector, level);
panel->backlight.set(conn_state, level);
}
/* set backlight brightness to level in range [0..max], scaling wrt hw min */
static void intel_panel_set_backlight(struct intel_connector *connector,
static void intel_panel_set_backlight(const struct drm_connector_state *conn_state,
u32 user_level, u32 user_max)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u32 hw_level;
@ -663,7 +668,7 @@ static void intel_panel_set_backlight(struct intel_connector *connector,
panel->backlight.level = hw_level;
if (panel->backlight.enabled)
intel_panel_actually_set_backlight(connector, hw_level);
intel_panel_actually_set_backlight(conn_state, hw_level);
mutex_unlock(&dev_priv->backlight_lock);
}
@ -671,21 +676,21 @@ static void intel_panel_set_backlight(struct intel_connector *connector,
/* set backlight brightness to level in range [0..max], assuming hw min is
* respected.
*/
void intel_panel_set_backlight_acpi(struct intel_connector *connector,
void intel_panel_set_backlight_acpi(const struct drm_connector_state *conn_state,
u32 user_level, u32 user_max)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
enum pipe pipe = intel_get_pipe_from_connector(connector);
u32 hw_level;
/*
* INVALID_PIPE may occur during driver init because
* Lack of crtc may occur during driver init because
* connection_mutex isn't held across the entire backlight
* setup + modeset readout, and the BIOS can issue the
* requests at any time.
*/
if (!panel->backlight.present || pipe == INVALID_PIPE)
if (!panel->backlight.present || !conn_state->crtc)
return;
mutex_lock(&dev_priv->backlight_lock);
@ -702,17 +707,18 @@ void intel_panel_set_backlight_acpi(struct intel_connector *connector,
panel->backlight.device->props.max_brightness);
if (panel->backlight.enabled)
intel_panel_actually_set_backlight(connector, hw_level);
intel_panel_actually_set_backlight(conn_state, hw_level);
mutex_unlock(&dev_priv->backlight_lock);
}
static void lpt_disable_backlight(struct intel_connector *connector)
static void lpt_disable_backlight(const struct drm_connector_state *old_conn_state)
{
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
u32 tmp;
intel_panel_actually_set_backlight(connector, 0);
intel_panel_actually_set_backlight(old_conn_state, 0);
/*
* Although we don't support or enable CPU PWM with LPT/SPT based
@ -732,12 +738,13 @@ static void lpt_disable_backlight(struct intel_connector *connector)
I915_WRITE(BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE);
}
static void pch_disable_backlight(struct intel_connector *connector)
static void pch_disable_backlight(const struct drm_connector_state *old_conn_state)
{
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
u32 tmp;
intel_panel_actually_set_backlight(connector, 0);
intel_panel_actually_set_backlight(old_conn_state, 0);
tmp = I915_READ(BLC_PWM_CPU_CTL2);
I915_WRITE(BLC_PWM_CPU_CTL2, tmp & ~BLM_PWM_ENABLE);
@ -746,44 +753,43 @@ static void pch_disable_backlight(struct intel_connector *connector)
I915_WRITE(BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE);
}
static void i9xx_disable_backlight(struct intel_connector *connector)
static void i9xx_disable_backlight(const struct drm_connector_state *old_conn_state)
{
intel_panel_actually_set_backlight(connector, 0);
intel_panel_actually_set_backlight(old_conn_state, 0);
}
static void i965_disable_backlight(struct intel_connector *connector)
static void i965_disable_backlight(const struct drm_connector_state *old_conn_state)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct drm_i915_private *dev_priv = to_i915(old_conn_state->connector->dev);
u32 tmp;
intel_panel_actually_set_backlight(connector, 0);
intel_panel_actually_set_backlight(old_conn_state, 0);
tmp = I915_READ(BLC_PWM_CTL2);
I915_WRITE(BLC_PWM_CTL2, tmp & ~BLM_PWM_ENABLE);
}
static void vlv_disable_backlight(struct intel_connector *connector)
static void vlv_disable_backlight(const struct drm_connector_state *old_conn_state)
{
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
enum pipe pipe = intel_get_pipe_from_connector(connector);
enum pipe pipe = to_intel_crtc(old_conn_state->crtc)->pipe;
u32 tmp;
if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
return;
intel_panel_actually_set_backlight(connector, 0);
intel_panel_actually_set_backlight(old_conn_state, 0);
tmp = I915_READ(VLV_BLC_PWM_CTL2(pipe));
I915_WRITE(VLV_BLC_PWM_CTL2(pipe), tmp & ~BLM_PWM_ENABLE);
}
static void bxt_disable_backlight(struct intel_connector *connector)
static void bxt_disable_backlight(const struct drm_connector_state *old_conn_state)
{
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u32 tmp, val;
intel_panel_actually_set_backlight(connector, 0);
intel_panel_actually_set_backlight(old_conn_state, 0);
tmp = I915_READ(BXT_BLC_PWM_CTL(panel->backlight.controller));
I915_WRITE(BXT_BLC_PWM_CTL(panel->backlight.controller),
@ -796,8 +802,23 @@ static void bxt_disable_backlight(struct intel_connector *connector)
}
}
static void pwm_disable_backlight(struct intel_connector *connector)
static void cnp_disable_backlight(const struct drm_connector_state *old_conn_state)
{
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u32 tmp;
intel_panel_actually_set_backlight(old_conn_state, 0);
tmp = I915_READ(BXT_BLC_PWM_CTL(panel->backlight.controller));
I915_WRITE(BXT_BLC_PWM_CTL(panel->backlight.controller),
tmp & ~BXT_BLC_PWM_ENABLE);
}
static void pwm_disable_backlight(const struct drm_connector_state *old_conn_state)
{
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
struct intel_panel *panel = &connector->panel;
/* Disable the backlight */
@ -806,8 +827,9 @@ static void pwm_disable_backlight(struct intel_connector *connector)
pwm_disable(panel->backlight.pwm);
}
void intel_panel_disable_backlight(struct intel_connector *connector)
void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_state)
{
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
@ -830,13 +852,15 @@ void intel_panel_disable_backlight(struct intel_connector *connector)
if (panel->backlight.device)
panel->backlight.device->props.power = FB_BLANK_POWERDOWN;
panel->backlight.enabled = false;
panel->backlight.disable(connector);
panel->backlight.disable(old_conn_state);
mutex_unlock(&dev_priv->backlight_lock);
}
static void lpt_enable_backlight(struct intel_connector *connector)
static void lpt_enable_backlight(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u32 pch_ctl1, pch_ctl2, schicken;
@ -880,22 +904,18 @@ static void lpt_enable_backlight(struct intel_connector *connector)
I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_PWM_ENABLE);
/* This won't stick until the above enable. */
intel_panel_actually_set_backlight(connector, panel->backlight.level);
intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
}
static void pch_enable_backlight(struct intel_connector *connector)
static void pch_enable_backlight(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
enum pipe pipe = intel_get_pipe_from_connector(connector);
enum transcoder cpu_transcoder;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 cpu_ctl2, pch_ctl1, pch_ctl2;
if (!WARN_ON_ONCE(pipe == INVALID_PIPE))
cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, pipe);
else
cpu_transcoder = TRANSCODER_EDP;
cpu_ctl2 = I915_READ(BLC_PWM_CPU_CTL2);
if (cpu_ctl2 & BLM_PWM_ENABLE) {
DRM_DEBUG_KMS("cpu backlight already enabled\n");
@ -919,7 +939,7 @@ static void pch_enable_backlight(struct intel_connector *connector)
I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2 | BLM_PWM_ENABLE);
/* This won't stick until the above enable. */
intel_panel_actually_set_backlight(connector, panel->backlight.level);
intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
pch_ctl2 = panel->backlight.max << 16;
I915_WRITE(BLC_PWM_PCH_CTL2, pch_ctl2);
@ -933,8 +953,10 @@ static void pch_enable_backlight(struct intel_connector *connector)
I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_PWM_ENABLE);
}
static void i9xx_enable_backlight(struct intel_connector *connector)
static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u32 ctl, freq;
@ -959,7 +981,7 @@ static void i9xx_enable_backlight(struct intel_connector *connector)
POSTING_READ(BLC_PWM_CTL);
/* XXX: combine this into above write? */
intel_panel_actually_set_backlight(connector, panel->backlight.level);
intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
/*
* Needed to enable backlight on some 855gm models. BLC_HIST_CTL is
@ -970,16 +992,15 @@ static void i9xx_enable_backlight(struct intel_connector *connector)
I915_WRITE(BLC_HIST_CTL, BLM_HISTOGRAM_ENABLE);
}
static void i965_enable_backlight(struct intel_connector *connector)
static void i965_enable_backlight(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
enum pipe pipe = intel_get_pipe_from_connector(connector);
enum pipe pipe = to_intel_crtc(conn_state->crtc)->pipe;
u32 ctl, ctl2, freq;
if (WARN_ON_ONCE(pipe == INVALID_PIPE))
pipe = PIPE_A;
ctl2 = I915_READ(BLC_PWM_CTL2);
if (ctl2 & BLM_PWM_ENABLE) {
DRM_DEBUG_KMS("backlight already enabled\n");
@ -1003,19 +1024,18 @@ static void i965_enable_backlight(struct intel_connector *connector)
POSTING_READ(BLC_PWM_CTL2);
I915_WRITE(BLC_PWM_CTL2, ctl2 | BLM_PWM_ENABLE);
intel_panel_actually_set_backlight(connector, panel->backlight.level);
intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
}
static void vlv_enable_backlight(struct intel_connector *connector)
static void vlv_enable_backlight(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
enum pipe pipe = intel_get_pipe_from_connector(connector);
enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
u32 ctl, ctl2;
if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
return;
ctl2 = I915_READ(VLV_BLC_PWM_CTL2(pipe));
if (ctl2 & BLM_PWM_ENABLE) {
DRM_DEBUG_KMS("backlight already enabled\n");
@ -1027,7 +1047,7 @@ static void vlv_enable_backlight(struct intel_connector *connector)
I915_WRITE(VLV_BLC_PWM_CTL(pipe), ctl);
/* XXX: combine this into above write? */
intel_panel_actually_set_backlight(connector, panel->backlight.level);
intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
ctl2 = 0;
if (panel->backlight.active_low_pwm)
@ -1037,16 +1057,15 @@ static void vlv_enable_backlight(struct intel_connector *connector)
I915_WRITE(VLV_BLC_PWM_CTL2(pipe), ctl2 | BLM_PWM_ENABLE);
}
static void bxt_enable_backlight(struct intel_connector *connector)
static void bxt_enable_backlight(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
enum pipe pipe = intel_get_pipe_from_connector(connector);
enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
u32 pwm_ctl, val;
if (WARN_ON_ONCE(pipe == INVALID_PIPE))
pipe = PIPE_A;
/* Controller 1 uses the utility pin. */
if (panel->backlight.controller == 1) {
val = I915_READ(UTIL_PIN_CTL);
@ -1074,7 +1093,7 @@ static void bxt_enable_backlight(struct intel_connector *connector)
I915_WRITE(BXT_BLC_PWM_FREQ(panel->backlight.controller),
panel->backlight.max);
intel_panel_actually_set_backlight(connector, panel->backlight.level);
intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
pwm_ctl = 0;
if (panel->backlight.active_low_pwm)
@ -1086,25 +1105,59 @@ static void bxt_enable_backlight(struct intel_connector *connector)
pwm_ctl | BXT_BLC_PWM_ENABLE);
}
static void pwm_enable_backlight(struct intel_connector *connector)
static void cnp_enable_backlight(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u32 pwm_ctl;
pwm_ctl = I915_READ(BXT_BLC_PWM_CTL(panel->backlight.controller));
if (pwm_ctl & BXT_BLC_PWM_ENABLE) {
DRM_DEBUG_KMS("backlight already enabled\n");
pwm_ctl &= ~BXT_BLC_PWM_ENABLE;
I915_WRITE(BXT_BLC_PWM_CTL(panel->backlight.controller),
pwm_ctl);
}
I915_WRITE(BXT_BLC_PWM_FREQ(panel->backlight.controller),
panel->backlight.max);
intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
pwm_ctl = 0;
if (panel->backlight.active_low_pwm)
pwm_ctl |= BXT_BLC_PWM_POLARITY;
I915_WRITE(BXT_BLC_PWM_CTL(panel->backlight.controller), pwm_ctl);
POSTING_READ(BXT_BLC_PWM_CTL(panel->backlight.controller));
I915_WRITE(BXT_BLC_PWM_CTL(panel->backlight.controller),
pwm_ctl | BXT_BLC_PWM_ENABLE);
}
static void pwm_enable_backlight(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct intel_panel *panel = &connector->panel;
pwm_enable(panel->backlight.pwm);
intel_panel_actually_set_backlight(connector, panel->backlight.level);
intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
}
void intel_panel_enable_backlight(struct intel_connector *connector)
void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
enum pipe pipe = intel_get_pipe_from_connector(connector);
enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
if (!panel->backlight.present)
return;
if (!WARN_ON_ONCE(pipe == INVALID_PIPE))
DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
mutex_lock(&dev_priv->backlight_lock);
@ -1119,7 +1172,7 @@ void intel_panel_enable_backlight(struct intel_connector *connector)
panel->backlight.device->props.max_brightness);
}
panel->backlight.enable(connector);
panel->backlight.enable(crtc_state, conn_state);
panel->backlight.enabled = true;
if (panel->backlight.device)
panel->backlight.device->props.power = FB_BLANK_UNBLANK;
@ -1137,7 +1190,7 @@ static int intel_backlight_device_update_status(struct backlight_device *bd)
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
DRM_DEBUG_KMS("updating intel_backlight, brightness=%d/%d\n",
bd->props.brightness, bd->props.max_brightness);
intel_panel_set_backlight(connector, bd->props.brightness,
intel_panel_set_backlight(connector->base.state, bd->props.brightness,
bd->props.max_brightness);
/*
@ -1249,6 +1302,17 @@ void intel_backlight_device_unregister(struct intel_connector *connector)
}
#endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */
/*
* CNP: PWM clock frequency is 19.2 MHz or 24 MHz.
* PWM increment = 1
*/
static u32 cnp_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
return DIV_ROUND_CLOSEST(KHz(dev_priv->rawclk_freq), pwm_freq_hz);
}
/*
* BXT: PWM clock frequency = 19.2 MHz.
*/
@ -1644,6 +1708,42 @@ bxt_setup_backlight(struct intel_connector *connector, enum pipe unused)
return 0;
}
static int
cnp_setup_backlight(struct intel_connector *connector, enum pipe unused)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u32 pwm_ctl, val;
/*
* CNP has the BXT implementation of backlight, but with only
* one controller. Future platforms could have multiple controllers
* so let's make this extensible and prepared for the future.
*/
panel->backlight.controller = 0;
pwm_ctl = I915_READ(BXT_BLC_PWM_CTL(panel->backlight.controller));
panel->backlight.active_low_pwm = pwm_ctl & BXT_BLC_PWM_POLARITY;
panel->backlight.max =
I915_READ(BXT_BLC_PWM_FREQ(panel->backlight.controller));
if (!panel->backlight.max)
panel->backlight.max = get_backlight_max_vbt(connector);
if (!panel->backlight.max)
return -ENODEV;
val = bxt_get_backlight(connector);
val = intel_panel_compute_brightness(connector, val);
panel->backlight.level = clamp(val, panel->backlight.min,
panel->backlight.max);
panel->backlight.enabled = pwm_ctl & BXT_BLC_PWM_ENABLE;
return 0;
}
static int pwm_setup_backlight(struct intel_connector *connector,
enum pipe pipe)
{
@ -1760,6 +1860,13 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel)
panel->backlight.set = bxt_set_backlight;
panel->backlight.get = bxt_get_backlight;
panel->backlight.hz_to_pwm = bxt_hz_to_pwm;
} else if (HAS_PCH_CNP(dev_priv)) {
panel->backlight.setup = cnp_setup_backlight;
panel->backlight.enable = cnp_enable_backlight;
panel->backlight.disable = cnp_disable_backlight;
panel->backlight.set = bxt_set_backlight;
panel->backlight.get = bxt_get_backlight;
panel->backlight.hz_to_pwm = cnp_hz_to_pwm;
} else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_SPT(dev_priv) ||
HAS_PCH_KBP(dev_priv)) {
panel->backlight.setup = lpt_setup_backlight;

Просмотреть файл

@ -58,24 +58,24 @@
static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
{
/* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl */
/* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl,cfl */
I915_WRITE(CHICKEN_PAR1_1,
I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);
I915_WRITE(GEN8_CONFIG0,
I915_READ(GEN8_CONFIG0) | GEN9_DEFAULT_FIXES);
/* WaEnableChickenDCPR:skl,bxt,kbl,glk */
/* WaEnableChickenDCPR:skl,bxt,kbl,glk,cfl */
I915_WRITE(GEN8_CHICKEN_DCPR_1,
I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
/* WaFbcTurnOffFbcWatermark:skl,bxt,kbl */
/* WaFbcWakeMemOn:skl,bxt,kbl,glk */
/* WaFbcTurnOffFbcWatermark:skl,bxt,kbl,cfl */
/* WaFbcWakeMemOn:skl,bxt,kbl,glk,cfl */
I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
DISP_FBC_WM_DIS |
DISP_FBC_MEMORY_WAKE);
/* WaFbcHighMemBwCorruptionAvoidance:skl,bxt,kbl */
/* WaFbcHighMemBwCorruptionAvoidance:skl,bxt,kbl,cfl */
I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
ILK_DPFC_DISABLE_DUMMY0);
}
@ -3549,7 +3549,7 @@ static bool skl_needs_memory_bw_wa(struct intel_atomic_state *state)
static bool
intel_has_sagv(struct drm_i915_private *dev_priv)
{
if (IS_KABYLAKE(dev_priv))
if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
return true;
if (IS_SKYLAKE(dev_priv) &&
@ -3841,20 +3841,26 @@ skl_plane_downscale_amount(const struct intel_crtc_state *cstate,
/* n.b., src is 16.16 fixed point, dst is whole integer */
if (plane->id == PLANE_CURSOR) {
/*
* Cursors only support 0/180 degree rotation,
* hence no need to account for rotation here.
*/
src_w = pstate->base.src_w >> 16;
src_h = pstate->base.src_h >> 16;
dst_w = pstate->base.crtc_w;
dst_h = pstate->base.crtc_h;
} else {
/*
* Src coordinates are already rotated by 270 degrees for
* the 90/270 degree plane rotation cases (to match the
* GTT mapping), hence no need to account for rotation here.
*/
src_w = drm_rect_width(&pstate->base.src) >> 16;
src_h = drm_rect_height(&pstate->base.src) >> 16;
dst_w = drm_rect_width(&pstate->base.dst);
dst_h = drm_rect_height(&pstate->base.dst);
}
if (drm_rotation_90_or_270(pstate->base.rotation))
swap(dst_w, dst_h);
fp_w_ratio = fixed_16_16_div(src_w, dst_w);
fp_h_ratio = fixed_16_16_div(src_h, dst_h);
downscale_w = max_fixed_16_16(fp_w_ratio, u32_to_fixed_16_16(1));
@ -3863,6 +3869,97 @@ skl_plane_downscale_amount(const struct intel_crtc_state *cstate,
return mul_fixed16(downscale_w, downscale_h);
}
static uint_fixed_16_16_t
skl_pipe_downscale_amount(const struct intel_crtc_state *crtc_state)
{
uint_fixed_16_16_t pipe_downscale = u32_to_fixed_16_16(1);
if (!crtc_state->base.enable)
return pipe_downscale;
if (crtc_state->pch_pfit.enabled) {
uint32_t src_w, src_h, dst_w, dst_h;
uint32_t pfit_size = crtc_state->pch_pfit.size;
uint_fixed_16_16_t fp_w_ratio, fp_h_ratio;
uint_fixed_16_16_t downscale_h, downscale_w;
src_w = crtc_state->pipe_src_w;
src_h = crtc_state->pipe_src_h;
dst_w = pfit_size >> 16;
dst_h = pfit_size & 0xffff;
if (!dst_w || !dst_h)
return pipe_downscale;
fp_w_ratio = fixed_16_16_div(src_w, dst_w);
fp_h_ratio = fixed_16_16_div(src_h, dst_h);
downscale_w = max_fixed_16_16(fp_w_ratio, u32_to_fixed_16_16(1));
downscale_h = max_fixed_16_16(fp_h_ratio, u32_to_fixed_16_16(1));
pipe_downscale = mul_fixed16(downscale_w, downscale_h);
}
return pipe_downscale;
}
int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
struct intel_crtc_state *cstate)
{
struct drm_crtc_state *crtc_state = &cstate->base;
struct drm_atomic_state *state = crtc_state->state;
struct drm_plane *plane;
const struct drm_plane_state *pstate;
struct intel_plane_state *intel_pstate;
int crtc_clock, dotclk;
uint32_t pipe_max_pixel_rate;
uint_fixed_16_16_t pipe_downscale;
uint_fixed_16_16_t max_downscale = u32_to_fixed_16_16(1);
if (!cstate->base.enable)
return 0;
drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
uint_fixed_16_16_t plane_downscale;
uint_fixed_16_16_t fp_9_div_8 = fixed_16_16_div(9, 8);
int bpp;
if (!intel_wm_plane_visible(cstate,
to_intel_plane_state(pstate)))
continue;
if (WARN_ON(!pstate->fb))
return -EINVAL;
intel_pstate = to_intel_plane_state(pstate);
plane_downscale = skl_plane_downscale_amount(cstate,
intel_pstate);
bpp = pstate->fb->format->cpp[0] * 8;
if (bpp == 64)
plane_downscale = mul_fixed16(plane_downscale,
fp_9_div_8);
max_downscale = max_fixed_16_16(plane_downscale, max_downscale);
}
pipe_downscale = skl_pipe_downscale_amount(cstate);
pipe_downscale = mul_fixed16(pipe_downscale, max_downscale);
crtc_clock = crtc_state->adjusted_mode.crtc_clock;
dotclk = to_intel_atomic_state(state)->cdclk.logical.cdclk;
if (IS_GEMINILAKE(to_i915(intel_crtc->base.dev)))
dotclk *= 2;
pipe_max_pixel_rate = div_round_up_u32_fixed16(dotclk, pipe_downscale);
if (pipe_max_pixel_rate < crtc_clock) {
DRM_DEBUG_KMS("Max supported pixel clock with scaling exceeded\n");
return -EINVAL;
}
return 0;
}
static unsigned int
skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
const struct drm_plane_state *pstate,
@ -3887,12 +3984,14 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
if (y && format != DRM_FORMAT_NV12)
return 0;
/*
* Src coordinates are already rotated by 270 degrees for
* the 90/270 degree plane rotation cases (to match the
* GTT mapping), hence no need to account for rotation here.
*/
width = drm_rect_width(&intel_pstate->base.src) >> 16;
height = drm_rect_height(&intel_pstate->base.src) >> 16;
if (drm_rotation_90_or_270(pstate->rotation))
swap(width, height);
/* for planar format */
if (format == DRM_FORMAT_NV12) {
if (y) /* y-plane data rate */
@ -3975,12 +4074,14 @@ skl_ddb_min_alloc(const struct drm_plane_state *pstate,
fb->modifier != I915_FORMAT_MOD_Yf_TILED)
return 8;
/*
* Src coordinates are already rotated by 270 degrees for
* the 90/270 degree plane rotation cases (to match the
* GTT mapping), hence no need to account for rotation here.
*/
src_w = drm_rect_width(&intel_pstate->base.src) >> 16;
src_h = drm_rect_height(&intel_pstate->base.src) >> 16;
if (drm_rotation_90_or_270(pstate->rotation))
swap(src_w, src_h);
/* Halve UV plane width and height for NV12 */
if (fb->format->format == DRM_FORMAT_NV12 && !y) {
src_w /= 2;
@ -4279,8 +4380,9 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
fb->modifier == I915_FORMAT_MOD_Yf_TILED;
x_tiled = fb->modifier == I915_FORMAT_MOD_X_TILED;
/* Display WA #1141: kbl. */
if (IS_KABYLAKE(dev_priv) && dev_priv->ipc_enabled)
/* Display WA #1141: kbl,cfl */
if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) &&
dev_priv->ipc_enabled)
latency += 4;
if (apply_memory_bw_wa && x_tiled)
@ -4290,13 +4392,15 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
width = intel_pstate->base.crtc_w;
height = intel_pstate->base.crtc_h;
} else {
/*
* Src coordinates are already rotated by 270 degrees for
* the 90/270 degree plane rotation cases (to match the
* GTT mapping), hence no need to account for rotation here.
*/
width = drm_rect_width(&intel_pstate->base.src) >> 16;
height = drm_rect_height(&intel_pstate->base.src) >> 16;
}
if (drm_rotation_90_or_270(pstate->rotation))
swap(width, height);
cpp = fb->format->cpp[0];
plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate);
@ -8148,7 +8252,7 @@ static void kabylake_init_clock_gating(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
GEN6_GAMUNIT_CLOCK_GATE_DISABLE);
/* WaFbcNukeOnHostModify:kbl */
/* WaFbcNukeOnHostModify:kbl,cfl */
I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
}
@ -8616,7 +8720,7 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
{
if (IS_SKYLAKE(dev_priv))
dev_priv->display.init_clock_gating = skylake_init_clock_gating;
else if (IS_KABYLAKE(dev_priv))
else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
dev_priv->display.init_clock_gating = kabylake_init_clock_gating;
else if (IS_BROXTON(dev_priv))
dev_priv->display.init_clock_gating = bxt_init_clock_gating;

Просмотреть файл

@ -494,6 +494,55 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv,
BIT_ULL(POWER_DOMAIN_AUX_A) | \
BIT_ULL(POWER_DOMAIN_INIT))
#define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
BIT_ULL(POWER_DOMAIN_PIPE_B) | \
BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
BIT_ULL(POWER_DOMAIN_PIPE_C) | \
BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
BIT_ULL(POWER_DOMAIN_AUX_B) | \
BIT_ULL(POWER_DOMAIN_AUX_C) | \
BIT_ULL(POWER_DOMAIN_AUX_D) | \
BIT_ULL(POWER_DOMAIN_AUDIO) | \
BIT_ULL(POWER_DOMAIN_VGA) | \
BIT_ULL(POWER_DOMAIN_INIT))
#define CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \
BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \
BIT_ULL(POWER_DOMAIN_INIT))
#define CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \
BIT_ULL(POWER_DOMAIN_INIT))
#define CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \
BIT_ULL(POWER_DOMAIN_INIT))
#define CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \
BIT_ULL(POWER_DOMAIN_INIT))
#define CNL_DISPLAY_AUX_A_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_A) | \
BIT_ULL(POWER_DOMAIN_INIT))
#define CNL_DISPLAY_AUX_B_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_B) | \
BIT_ULL(POWER_DOMAIN_INIT))
#define CNL_DISPLAY_AUX_C_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_C) | \
BIT_ULL(POWER_DOMAIN_INIT))
#define CNL_DISPLAY_AUX_D_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_D) | \
BIT_ULL(POWER_DOMAIN_INIT))
#define CNL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
BIT_ULL(POWER_DOMAIN_MODESET) | \
BIT_ULL(POWER_DOMAIN_AUX_A) | \
BIT_ULL(POWER_DOMAIN_INIT))
static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
{
WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
@ -762,13 +811,14 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
}
break;
case SKL_DISP_PW_MISC_IO:
case SKL_DISP_PW_DDI_A_E: /* GLK_DISP_PW_DDI_A */
case SKL_DISP_PW_DDI_A_E: /* GLK_DISP_PW_DDI_A, CNL_DISP_PW_DDI_A */
case SKL_DISP_PW_DDI_B:
case SKL_DISP_PW_DDI_C:
case SKL_DISP_PW_DDI_D:
case GLK_DISP_PW_AUX_A:
case GLK_DISP_PW_AUX_B:
case GLK_DISP_PW_AUX_C:
case GLK_DISP_PW_AUX_A: /* CNL_DISP_PW_AUX_A */
case GLK_DISP_PW_AUX_B: /* CNL_DISP_PW_AUX_B */
case GLK_DISP_PW_AUX_C: /* CNL_DISP_PW_AUX_C */
case CNL_DISP_PW_AUX_D:
break;
default:
WARN(1, "Unknown power well %lu\n", power_well->id);
@ -803,8 +853,7 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
DRM_DEBUG_KMS("Disabling %s\n", power_well->name);
}
if (IS_GEN9(dev_priv))
gen9_sanitize_power_well_requests(dev_priv, power_well);
gen9_sanitize_power_well_requests(dev_priv, power_well);
}
if (wait_for(!!(I915_READ(HSW_PWR_WELL_DRIVER) & state_mask) == enable,
@ -992,6 +1041,38 @@ static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
return true;
}
static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
if ((I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0)
i830_enable_pipe(dev_priv, PIPE_A);
if ((I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0)
i830_enable_pipe(dev_priv, PIPE_B);
}
static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
i830_disable_pipe(dev_priv, PIPE_B);
i830_disable_pipe(dev_priv, PIPE_A);
}
static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
return I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE &&
I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
}
static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
if (power_well->count > 0)
i830_pipes_power_well_enable(dev_priv, power_well);
else
i830_pipes_power_well_disable(dev_priv, power_well);
}
static void vlv_set_power_well(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well, bool enable)
{
@ -1880,6 +1961,15 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
BIT_ULL(POWER_DOMAIN_AUX_D) | \
BIT_ULL(POWER_DOMAIN_INIT))
#define I830_PIPES_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_PIPE_A) | \
BIT_ULL(POWER_DOMAIN_PIPE_B) | \
BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
BIT_ULL(POWER_DOMAIN_INIT))
static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
.sync_hw = i9xx_power_well_sync_hw_noop,
.enable = i9xx_always_on_power_well_noop,
@ -1910,6 +2000,27 @@ static struct i915_power_well i9xx_always_on_power_well[] = {
},
};
static const struct i915_power_well_ops i830_pipes_power_well_ops = {
.sync_hw = i830_pipes_power_well_sync_hw,
.enable = i830_pipes_power_well_enable,
.disable = i830_pipes_power_well_disable,
.is_enabled = i830_pipes_power_well_enabled,
};
static struct i915_power_well i830_power_wells[] = {
{
.name = "always-on",
.always_on = 1,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
},
{
.name = "pipes",
.domains = I830_PIPES_POWER_DOMAINS,
.ops = &i830_pipes_power_well_ops,
},
};
static const struct i915_power_well_ops hsw_power_well_ops = {
.sync_hw = hsw_power_well_sync_hw,
.enable = hsw_power_well_enable,
@ -2275,6 +2386,82 @@ static struct i915_power_well glk_power_wells[] = {
},
};
static struct i915_power_well cnl_power_wells[] = {
{
.name = "always-on",
.always_on = 1,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
},
{
.name = "power well 1",
/* Handled by the DMC firmware */
.domains = 0,
.ops = &skl_power_well_ops,
.id = SKL_DISP_PW_1,
},
{
.name = "AUX A",
.domains = CNL_DISPLAY_AUX_A_POWER_DOMAINS,
.ops = &skl_power_well_ops,
.id = CNL_DISP_PW_AUX_A,
},
{
.name = "AUX B",
.domains = CNL_DISPLAY_AUX_B_POWER_DOMAINS,
.ops = &skl_power_well_ops,
.id = CNL_DISP_PW_AUX_B,
},
{
.name = "AUX C",
.domains = CNL_DISPLAY_AUX_C_POWER_DOMAINS,
.ops = &skl_power_well_ops,
.id = CNL_DISP_PW_AUX_C,
},
{
.name = "AUX D",
.domains = CNL_DISPLAY_AUX_D_POWER_DOMAINS,
.ops = &skl_power_well_ops,
.id = CNL_DISP_PW_AUX_D,
},
{
.name = "DC off",
.domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS,
.ops = &gen9_dc_off_power_well_ops,
.id = SKL_DISP_PW_DC_OFF,
},
{
.name = "power well 2",
.domains = CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
.ops = &skl_power_well_ops,
.id = SKL_DISP_PW_2,
},
{
.name = "DDI A IO power well",
.domains = CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS,
.ops = &skl_power_well_ops,
.id = CNL_DISP_PW_DDI_A,
},
{
.name = "DDI B IO power well",
.domains = CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS,
.ops = &skl_power_well_ops,
.id = SKL_DISP_PW_DDI_B,
},
{
.name = "DDI C IO power well",
.domains = CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS,
.ops = &skl_power_well_ops,
.id = SKL_DISP_PW_DDI_C,
},
{
.name = "DDI D IO power well",
.domains = CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS,
.ops = &skl_power_well_ops,
.id = SKL_DISP_PW_DDI_D,
},
};
static int
sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
int disable_power_well)
@ -2369,6 +2556,8 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
set_power_wells(power_domains, bdw_power_wells);
} else if (IS_GEN9_BC(dev_priv)) {
set_power_wells(power_domains, skl_power_wells);
} else if (IS_CANNONLAKE(dev_priv)) {
set_power_wells(power_domains, cnl_power_wells);
} else if (IS_BROXTON(dev_priv)) {
set_power_wells(power_domains, bxt_power_wells);
} else if (IS_GEMINILAKE(dev_priv)) {
@ -2377,6 +2566,8 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
set_power_wells(power_domains, chv_power_wells);
} else if (IS_VALLEYVIEW(dev_priv)) {
set_power_wells(power_domains, vlv_power_wells);
} else if (IS_I830(dev_priv)) {
set_power_wells(power_domains, i830_power_wells);
} else {
set_power_wells(power_domains, i9xx_always_on_power_well);
}
@ -2569,6 +2760,111 @@ void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
mutex_unlock(&power_domains->lock);
}
#define CNL_PROCMON_IDX(val) \
(((val) & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) >> VOLTAGE_INFO_SHIFT)
#define NUM_CNL_PROCMON \
(CNL_PROCMON_IDX(VOLTAGE_INFO_MASK | PROCESS_INFO_MASK) + 1)
static const struct cnl_procmon {
u32 dw1, dw9, dw10;
} cnl_procmon_values[NUM_CNL_PROCMON] = {
[CNL_PROCMON_IDX(VOLTAGE_INFO_0_85V | PROCESS_INFO_DOT_0)] =
{ .dw1 = 0x00 << 16, .dw9 = 0x62AB67BB, .dw10 = 0x51914F96, },
[CNL_PROCMON_IDX(VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_0)] =
{ .dw1 = 0x00 << 16, .dw9 = 0x86E172C7, .dw10 = 0x77CA5EAB, },
[CNL_PROCMON_IDX(VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_1)] =
{ .dw1 = 0x00 << 16, .dw9 = 0x93F87FE1, .dw10 = 0x8AE871C5, },
[CNL_PROCMON_IDX(VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_0)] =
{ .dw1 = 0x00 << 16, .dw9 = 0x98FA82DD, .dw10 = 0x89E46DC1, },
[CNL_PROCMON_IDX(VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_1)] =
{ .dw1 = 0x44 << 16, .dw9 = 0x9A00AB25, .dw10 = 0x8AE38FF1, },
};
static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
const struct cnl_procmon *procmon;
struct i915_power_well *well;
u32 val;
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
/* 1. Enable PCH Reset Handshake */
val = I915_READ(HSW_NDE_RSTWRN_OPT);
val |= RESET_PCH_HANDSHAKE_ENABLE;
I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
/* 2. Enable Comp */
val = I915_READ(CHICKEN_MISC_2);
val &= ~COMP_PWR_DOWN;
I915_WRITE(CHICKEN_MISC_2, val);
val = I915_READ(CNL_PORT_COMP_DW3);
procmon = &cnl_procmon_values[CNL_PROCMON_IDX(val)];
WARN_ON(procmon->dw10 == 0);
val = I915_READ(CNL_PORT_COMP_DW1);
val &= ~((0xff << 16) | 0xff);
val |= procmon->dw1;
I915_WRITE(CNL_PORT_COMP_DW1, val);
I915_WRITE(CNL_PORT_COMP_DW9, procmon->dw9);
I915_WRITE(CNL_PORT_COMP_DW10, procmon->dw10);
val = I915_READ(CNL_PORT_COMP_DW0);
val |= COMP_INIT;
I915_WRITE(CNL_PORT_COMP_DW0, val);
/* 3. */
val = I915_READ(CNL_PORT_CL1CM_DW5);
val |= CL_POWER_DOWN_ENABLE;
I915_WRITE(CNL_PORT_CL1CM_DW5, val);
/* 4. Enable Power Well 1 (PG1) and Aux IO Power */
mutex_lock(&power_domains->lock);
well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
intel_power_well_enable(dev_priv, well);
mutex_unlock(&power_domains->lock);
/* 5. Enable CD clock */
cnl_init_cdclk(dev_priv);
/* 6. Enable DBUF */
gen9_dbuf_enable(dev_priv);
}
#undef CNL_PROCMON_IDX
#undef NUM_CNL_PROCMON
static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *well;
u32 val;
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
/* 1. Disable all display engine functions -> aready done */
/* 2. Disable DBUF */
gen9_dbuf_disable(dev_priv);
/* 3. Disable CD clock */
cnl_uninit_cdclk(dev_priv);
/* 4. Disable Power Well 1 (PG1) and Aux IO Power */
mutex_lock(&power_domains->lock);
well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
intel_power_well_disable(dev_priv, well);
mutex_unlock(&power_domains->lock);
/* 5. Disable Comp */
val = I915_READ(CHICKEN_MISC_2);
val |= COMP_PWR_DOWN;
I915_WRITE(CHICKEN_MISC_2, val);
}
static void chv_phy_control_init(struct drm_i915_private *dev_priv)
{
struct i915_power_well *cmn_bc =
@ -2701,7 +2997,9 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
power_domains->initializing = true;
if (IS_GEN9_BC(dev_priv)) {
if (IS_CANNONLAKE(dev_priv)) {
cnl_display_core_init(dev_priv, resume);
} else if (IS_GEN9_BC(dev_priv)) {
skl_display_core_init(dev_priv, resume);
} else if (IS_GEN9_LP(dev_priv)) {
bxt_display_core_init(dev_priv, resume);
@ -2740,7 +3038,9 @@ void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
if (!i915.disable_power_well)
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
if (IS_GEN9_BC(dev_priv))
if (IS_CANNONLAKE(dev_priv))
cnl_display_core_uninit(dev_priv);
else if (IS_GEN9_BC(dev_priv))
skl_display_core_uninit(dev_priv);
else if (IS_GEN9_LP(dev_priv))
bxt_display_core_uninit(dev_priv);

Просмотреть файл

@ -99,13 +99,6 @@ struct intel_sdvo {
*/
uint16_t hotplug_active;
/**
* This is used to select the color range of RBG outputs in HDMI mode.
* It is only valid when using TMDS encoding and 8 bit per color mode.
*/
uint32_t color_range;
bool color_range_auto;
/**
* This is set if we're going to treat the device as TV-out.
*
@ -117,9 +110,6 @@ struct intel_sdvo {
enum port port;
/* This is for current tv format name */
int tv_format_index;
/**
* This is set if we treat the device as HDMI, instead of DVI.
*/
@ -154,8 +144,6 @@ struct intel_sdvo_connector {
/* Mark the type of connector */
uint16_t output_flag;
enum hdmi_force_audio force_audio;
/* This contains all current supported TV format */
u8 tv_format_supported[TV_FORMAT_NUM];
int format_supported_num;
@ -182,24 +170,19 @@ struct intel_sdvo_connector {
/* add the property for the SDVO-TV/LVDS */
struct drm_property *brightness;
/* Add variable to record current setting for the above property */
u32 left_margin, right_margin, top_margin, bottom_margin;
/* this is to get the range of margin.*/
u32 max_hscan, max_vscan;
u32 max_hpos, cur_hpos;
u32 max_vpos, cur_vpos;
u32 cur_brightness, max_brightness;
u32 cur_contrast, max_contrast;
u32 cur_saturation, max_saturation;
u32 cur_hue, max_hue;
u32 cur_sharpness, max_sharpness;
u32 cur_flicker_filter, max_flicker_filter;
u32 cur_flicker_filter_adaptive, max_flicker_filter_adaptive;
u32 cur_flicker_filter_2d, max_flicker_filter_2d;
u32 cur_tv_chroma_filter, max_tv_chroma_filter;
u32 cur_tv_luma_filter, max_tv_luma_filter;
u32 cur_dot_crawl, max_dot_crawl;
u32 max_hscan, max_vscan;
};
struct intel_sdvo_connector_state {
/* base.base: tv.saturation/contrast/hue/brightness */
struct intel_digital_connector_state base;
struct {
unsigned overscan_h, overscan_v, hpos, vpos, sharpness;
unsigned flicker_filter, flicker_filter_2d, flicker_filter_adaptive;
unsigned chroma_filter, luma_filter, dot_crawl;
} tv;
};
static struct intel_sdvo *to_sdvo(struct intel_encoder *encoder)
@ -212,9 +195,16 @@ static struct intel_sdvo *intel_attached_sdvo(struct drm_connector *connector)
return to_sdvo(intel_attached_encoder(connector));
}
static struct intel_sdvo_connector *to_intel_sdvo_connector(struct drm_connector *connector)
static struct intel_sdvo_connector *
to_intel_sdvo_connector(struct drm_connector *connector)
{
return container_of(to_intel_connector(connector), struct intel_sdvo_connector, base);
return container_of(connector, struct intel_sdvo_connector, base.base);
}
static struct intel_sdvo_connector_state *
to_intel_sdvo_connector_state(struct drm_connector_state *conn_state)
{
return container_of(conn_state, struct intel_sdvo_connector_state, base.base);
}
static bool
@ -1030,12 +1020,13 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
sdvo_data, sizeof(sdvo_data));
}
static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo)
static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo,
struct drm_connector_state *conn_state)
{
struct intel_sdvo_tv_format format;
uint32_t format_map;
format_map = 1 << intel_sdvo->tv_format_index;
format_map = 1 << conn_state->tv.mode;
memset(&format, 0, sizeof(format));
memcpy(&format, &format_map, min(sizeof(format), sizeof(format_map)));
@ -1122,6 +1113,8 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
struct drm_connector_state *conn_state)
{
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
struct intel_sdvo_connector_state *intel_sdvo_state =
to_intel_sdvo_connector_state(conn_state);
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
struct drm_display_mode *mode = &pipe_config->base.mode;
@ -1160,9 +1153,14 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
pipe_config->pixel_multiplier =
intel_sdvo_get_pixel_multiplier(adjusted_mode);
pipe_config->has_hdmi_sink = intel_sdvo->has_hdmi_monitor;
if (intel_sdvo_state->base.force_audio != HDMI_AUDIO_OFF_DVI)
pipe_config->has_hdmi_sink = intel_sdvo->has_hdmi_monitor;
if (intel_sdvo->color_range_auto) {
if (intel_sdvo_state->base.force_audio == HDMI_AUDIO_ON ||
(intel_sdvo_state->base.force_audio == HDMI_AUDIO_AUTO && intel_sdvo->has_hdmi_audio))
pipe_config->has_audio = true;
if (intel_sdvo_state->base.broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
/* See CEA-861-E - 5.1 Default Encoding Parameters */
/* FIXME: This bit is only valid when using TMDS encoding and 8
* bit per color mode. */
@ -1171,7 +1169,7 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
pipe_config->limited_color_range = true;
} else {
if (pipe_config->has_hdmi_sink &&
intel_sdvo->color_range == HDMI_COLOR_RANGE_16_235)
intel_sdvo_state->base.broadcast_rgb == INTEL_BROADCAST_RGB_LIMITED)
pipe_config->limited_color_range = true;
}
@ -1186,6 +1184,68 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
return true;
}
#define UPDATE_PROPERTY(input, NAME) \
do { \
val = input; \
intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_##NAME, &val, sizeof(val)); \
} while (0)
static void intel_sdvo_update_props(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_connector_state *sdvo_state)
{
struct drm_connector_state *conn_state = &sdvo_state->base.base;
struct intel_sdvo_connector *intel_sdvo_conn =
to_intel_sdvo_connector(conn_state->connector);
uint16_t val;
if (intel_sdvo_conn->left)
UPDATE_PROPERTY(sdvo_state->tv.overscan_h, OVERSCAN_H);
if (intel_sdvo_conn->top)
UPDATE_PROPERTY(sdvo_state->tv.overscan_v, OVERSCAN_V);
if (intel_sdvo_conn->hpos)
UPDATE_PROPERTY(sdvo_state->tv.hpos, HPOS);
if (intel_sdvo_conn->vpos)
UPDATE_PROPERTY(sdvo_state->tv.vpos, VPOS);
if (intel_sdvo_conn->saturation)
UPDATE_PROPERTY(conn_state->tv.saturation, SATURATION);
if (intel_sdvo_conn->contrast)
UPDATE_PROPERTY(conn_state->tv.contrast, CONTRAST);
if (intel_sdvo_conn->hue)
UPDATE_PROPERTY(conn_state->tv.hue, HUE);
if (intel_sdvo_conn->brightness)
UPDATE_PROPERTY(conn_state->tv.brightness, BRIGHTNESS);
if (intel_sdvo_conn->sharpness)
UPDATE_PROPERTY(sdvo_state->tv.sharpness, SHARPNESS);
if (intel_sdvo_conn->flicker_filter)
UPDATE_PROPERTY(sdvo_state->tv.flicker_filter, FLICKER_FILTER);
if (intel_sdvo_conn->flicker_filter_2d)
UPDATE_PROPERTY(sdvo_state->tv.flicker_filter_2d, FLICKER_FILTER_2D);
if (intel_sdvo_conn->flicker_filter_adaptive)
UPDATE_PROPERTY(sdvo_state->tv.flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE);
if (intel_sdvo_conn->tv_chroma_filter)
UPDATE_PROPERTY(sdvo_state->tv.chroma_filter, TV_CHROMA_FILTER);
if (intel_sdvo_conn->tv_luma_filter)
UPDATE_PROPERTY(sdvo_state->tv.luma_filter, TV_LUMA_FILTER);
if (intel_sdvo_conn->dot_crawl)
UPDATE_PROPERTY(sdvo_state->tv.dot_crawl, DOT_CRAWL);
#undef UPDATE_PROPERTY
}
static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
@ -1193,6 +1253,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
struct intel_sdvo_connector_state *sdvo_state = to_intel_sdvo_connector_state(conn_state);
struct drm_display_mode *mode = &crtc_state->base.mode;
struct intel_sdvo *intel_sdvo = to_sdvo(intel_encoder);
u32 sdvox;
@ -1200,6 +1261,8 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
struct intel_sdvo_dtd input_dtd, output_dtd;
int rate;
intel_sdvo_update_props(intel_sdvo, sdvo_state);
/* First, set the input mapping for the first input to our controlled
* output. This is only correct if we're a single-input device, in
* which case the first input is the output from the appropriate SDVO
@ -1241,7 +1304,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_DVI);
if (intel_sdvo->is_tv &&
!intel_sdvo_set_tv_format(intel_sdvo))
!intel_sdvo_set_tv_format(intel_sdvo, conn_state))
return;
intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
@ -1285,7 +1348,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
else
sdvox |= SDVO_PIPE_SEL(crtc->pipe);
if (intel_sdvo->has_hdmi_audio)
if (crtc_state->has_audio)
sdvox |= SDVO_AUDIO_ENABLE;
if (INTEL_GEN(dev_priv) >= 4) {
@ -1694,12 +1757,6 @@ intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
kfree(edid);
}
if (status == connector_status_connected) {
struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
if (intel_sdvo_connector->force_audio != HDMI_AUDIO_AUTO)
intel_sdvo->has_hdmi_audio = (intel_sdvo_connector->force_audio == HDMI_AUDIO_ON);
}
return status;
}
@ -1879,6 +1936,7 @@ static const struct drm_display_mode sdvo_tv_modes[] = {
static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
{
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
const struct drm_connector_state *conn_state = connector->state;
struct intel_sdvo_sdtv_resolution_request tv_res;
uint32_t reply = 0, format_map = 0;
int i;
@ -1889,7 +1947,7 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
/* Read the list of supported input resolutions for the selected TV
* format.
*/
format_map = 1 << intel_sdvo->tv_format_index;
format_map = 1 << conn_state->tv.mode;
memcpy(&tv_res, &format_map,
min(sizeof(format_map), sizeof(struct intel_sdvo_sdtv_resolution_request)));
@ -1978,192 +2036,121 @@ static void intel_sdvo_destroy(struct drm_connector *connector)
kfree(intel_sdvo_connector);
}
static bool intel_sdvo_detect_hdmi_audio(struct drm_connector *connector)
static int
intel_sdvo_connector_atomic_get_property(struct drm_connector *connector,
const struct drm_connector_state *state,
struct drm_property *property,
uint64_t *val)
{
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
struct edid *edid;
bool has_audio = false;
struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
const struct intel_sdvo_connector_state *sdvo_state = to_intel_sdvo_connector_state((void *)state);
if (!intel_sdvo->is_hdmi)
return false;
if (property == intel_sdvo_connector->tv_format) {
int i;
edid = intel_sdvo_get_edid(connector);
if (edid != NULL && edid->input & DRM_EDID_INPUT_DIGITAL)
has_audio = drm_detect_monitor_audio(edid);
kfree(edid);
for (i = 0; i < intel_sdvo_connector->format_supported_num; i++)
if (state->tv.mode == intel_sdvo_connector->tv_format_supported[i]) {
*val = i;
return has_audio;
return 0;
}
WARN_ON(1);
*val = 0;
} else if (property == intel_sdvo_connector->top ||
property == intel_sdvo_connector->bottom)
*val = intel_sdvo_connector->max_vscan - sdvo_state->tv.overscan_v;
else if (property == intel_sdvo_connector->left ||
property == intel_sdvo_connector->right)
*val = intel_sdvo_connector->max_hscan - sdvo_state->tv.overscan_h;
else if (property == intel_sdvo_connector->hpos)
*val = sdvo_state->tv.hpos;
else if (property == intel_sdvo_connector->vpos)
*val = sdvo_state->tv.vpos;
else if (property == intel_sdvo_connector->saturation)
*val = state->tv.saturation;
else if (property == intel_sdvo_connector->contrast)
*val = state->tv.contrast;
else if (property == intel_sdvo_connector->hue)
*val = state->tv.hue;
else if (property == intel_sdvo_connector->brightness)
*val = state->tv.brightness;
else if (property == intel_sdvo_connector->sharpness)
*val = sdvo_state->tv.sharpness;
else if (property == intel_sdvo_connector->flicker_filter)
*val = sdvo_state->tv.flicker_filter;
else if (property == intel_sdvo_connector->flicker_filter_2d)
*val = sdvo_state->tv.flicker_filter_2d;
else if (property == intel_sdvo_connector->flicker_filter_adaptive)
*val = sdvo_state->tv.flicker_filter_adaptive;
else if (property == intel_sdvo_connector->tv_chroma_filter)
*val = sdvo_state->tv.chroma_filter;
else if (property == intel_sdvo_connector->tv_luma_filter)
*val = sdvo_state->tv.luma_filter;
else if (property == intel_sdvo_connector->dot_crawl)
*val = sdvo_state->tv.dot_crawl;
else
return intel_digital_connector_atomic_get_property(connector, state, property, val);
return 0;
}
static int
intel_sdvo_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t val)
intel_sdvo_connector_atomic_set_property(struct drm_connector *connector,
struct drm_connector_state *state,
struct drm_property *property,
uint64_t val)
{
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
struct drm_i915_private *dev_priv = to_i915(connector->dev);
uint16_t temp_value;
uint8_t cmd;
int ret;
ret = drm_object_property_set_value(&connector->base, property, val);
if (ret)
return ret;
if (property == dev_priv->force_audio_property) {
int i = val;
bool has_audio;
if (i == intel_sdvo_connector->force_audio)
return 0;
intel_sdvo_connector->force_audio = i;
if (i == HDMI_AUDIO_AUTO)
has_audio = intel_sdvo_detect_hdmi_audio(connector);
else
has_audio = (i == HDMI_AUDIO_ON);
if (has_audio == intel_sdvo->has_hdmi_audio)
return 0;
intel_sdvo->has_hdmi_audio = has_audio;
goto done;
}
if (property == dev_priv->broadcast_rgb_property) {
bool old_auto = intel_sdvo->color_range_auto;
uint32_t old_range = intel_sdvo->color_range;
switch (val) {
case INTEL_BROADCAST_RGB_AUTO:
intel_sdvo->color_range_auto = true;
break;
case INTEL_BROADCAST_RGB_FULL:
intel_sdvo->color_range_auto = false;
intel_sdvo->color_range = 0;
break;
case INTEL_BROADCAST_RGB_LIMITED:
intel_sdvo->color_range_auto = false;
/* FIXME: this bit is only valid when using TMDS
* encoding and 8 bit per color mode. */
intel_sdvo->color_range = HDMI_COLOR_RANGE_16_235;
break;
default:
return -EINVAL;
}
if (old_auto == intel_sdvo->color_range_auto &&
old_range == intel_sdvo->color_range)
return 0;
goto done;
}
if (property == connector->dev->mode_config.aspect_ratio_property) {
connector->state->picture_aspect_ratio = val;
goto done;
}
#define CHECK_PROPERTY(name, NAME) \
if (intel_sdvo_connector->name == property) { \
if (intel_sdvo_connector->cur_##name == temp_value) return 0; \
if (intel_sdvo_connector->max_##name < temp_value) return -EINVAL; \
cmd = SDVO_CMD_SET_##NAME; \
intel_sdvo_connector->cur_##name = temp_value; \
goto set_value; \
}
struct intel_sdvo_connector_state *sdvo_state = to_intel_sdvo_connector_state(state);
if (property == intel_sdvo_connector->tv_format) {
if (val >= TV_FORMAT_NUM)
return -EINVAL;
state->tv.mode = intel_sdvo_connector->tv_format_supported[val];
if (intel_sdvo->tv_format_index ==
intel_sdvo_connector->tv_format_supported[val])
return 0;
if (state->crtc) {
struct drm_crtc_state *crtc_state =
drm_atomic_get_new_crtc_state(state->state, state->crtc);
intel_sdvo->tv_format_index = intel_sdvo_connector->tv_format_supported[val];
goto done;
} else if (IS_TV_OR_LVDS(intel_sdvo_connector)) {
temp_value = val;
if (intel_sdvo_connector->left == property) {
drm_object_property_set_value(&connector->base,
intel_sdvo_connector->right, val);
if (intel_sdvo_connector->left_margin == temp_value)
return 0;
intel_sdvo_connector->left_margin = temp_value;
intel_sdvo_connector->right_margin = temp_value;
temp_value = intel_sdvo_connector->max_hscan -
intel_sdvo_connector->left_margin;
cmd = SDVO_CMD_SET_OVERSCAN_H;
goto set_value;
} else if (intel_sdvo_connector->right == property) {
drm_object_property_set_value(&connector->base,
intel_sdvo_connector->left, val);
if (intel_sdvo_connector->right_margin == temp_value)
return 0;
intel_sdvo_connector->left_margin = temp_value;
intel_sdvo_connector->right_margin = temp_value;
temp_value = intel_sdvo_connector->max_hscan -
intel_sdvo_connector->left_margin;
cmd = SDVO_CMD_SET_OVERSCAN_H;
goto set_value;
} else if (intel_sdvo_connector->top == property) {
drm_object_property_set_value(&connector->base,
intel_sdvo_connector->bottom, val);
if (intel_sdvo_connector->top_margin == temp_value)
return 0;
intel_sdvo_connector->top_margin = temp_value;
intel_sdvo_connector->bottom_margin = temp_value;
temp_value = intel_sdvo_connector->max_vscan -
intel_sdvo_connector->top_margin;
cmd = SDVO_CMD_SET_OVERSCAN_V;
goto set_value;
} else if (intel_sdvo_connector->bottom == property) {
drm_object_property_set_value(&connector->base,
intel_sdvo_connector->top, val);
if (intel_sdvo_connector->bottom_margin == temp_value)
return 0;
intel_sdvo_connector->top_margin = temp_value;
intel_sdvo_connector->bottom_margin = temp_value;
temp_value = intel_sdvo_connector->max_vscan -
intel_sdvo_connector->top_margin;
cmd = SDVO_CMD_SET_OVERSCAN_V;
goto set_value;
crtc_state->connectors_changed = true;
}
CHECK_PROPERTY(hpos, HPOS)
CHECK_PROPERTY(vpos, VPOS)
CHECK_PROPERTY(saturation, SATURATION)
CHECK_PROPERTY(contrast, CONTRAST)
CHECK_PROPERTY(hue, HUE)
CHECK_PROPERTY(brightness, BRIGHTNESS)
CHECK_PROPERTY(sharpness, SHARPNESS)
CHECK_PROPERTY(flicker_filter, FLICKER_FILTER)
CHECK_PROPERTY(flicker_filter_2d, FLICKER_FILTER_2D)
CHECK_PROPERTY(flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE)
CHECK_PROPERTY(tv_chroma_filter, TV_CHROMA_FILTER)
CHECK_PROPERTY(tv_luma_filter, TV_LUMA_FILTER)
CHECK_PROPERTY(dot_crawl, DOT_CRAWL)
}
return -EINVAL; /* unknown property */
set_value:
if (!intel_sdvo_set_value(intel_sdvo, cmd, &temp_value, 2))
return -EIO;
done:
if (intel_sdvo->base.base.crtc)
intel_crtc_restore_mode(intel_sdvo->base.base.crtc);
} else if (property == intel_sdvo_connector->top ||
property == intel_sdvo_connector->bottom)
/* Cannot set these independent from each other */
sdvo_state->tv.overscan_v = intel_sdvo_connector->max_vscan - val;
else if (property == intel_sdvo_connector->left ||
property == intel_sdvo_connector->right)
/* Cannot set these independent from each other */
sdvo_state->tv.overscan_h = intel_sdvo_connector->max_hscan - val;
else if (property == intel_sdvo_connector->hpos)
sdvo_state->tv.hpos = val;
else if (property == intel_sdvo_connector->vpos)
sdvo_state->tv.vpos = val;
else if (property == intel_sdvo_connector->saturation)
state->tv.saturation = val;
else if (property == intel_sdvo_connector->contrast)
state->tv.contrast = val;
else if (property == intel_sdvo_connector->hue)
state->tv.hue = val;
else if (property == intel_sdvo_connector->brightness)
state->tv.brightness = val;
else if (property == intel_sdvo_connector->sharpness)
sdvo_state->tv.sharpness = val;
else if (property == intel_sdvo_connector->flicker_filter)
sdvo_state->tv.flicker_filter = val;
else if (property == intel_sdvo_connector->flicker_filter_2d)
sdvo_state->tv.flicker_filter_2d = val;
else if (property == intel_sdvo_connector->flicker_filter_adaptive)
sdvo_state->tv.flicker_filter_adaptive = val;
else if (property == intel_sdvo_connector->tv_chroma_filter)
sdvo_state->tv.chroma_filter = val;
else if (property == intel_sdvo_connector->tv_luma_filter)
sdvo_state->tv.luma_filter = val;
else if (property == intel_sdvo_connector->dot_crawl)
sdvo_state->tv.dot_crawl = val;
else
return intel_digital_connector_atomic_set_property(connector, state, property, val);
return 0;
#undef CHECK_PROPERTY
}
static int
@ -2191,22 +2178,61 @@ intel_sdvo_connector_unregister(struct drm_connector *connector)
intel_connector_unregister(connector);
}
static struct drm_connector_state *
intel_sdvo_connector_duplicate_state(struct drm_connector *connector)
{
struct intel_sdvo_connector_state *state;
state = kmemdup(connector->state, sizeof(*state), GFP_KERNEL);
if (!state)
return NULL;
__drm_atomic_helper_connector_duplicate_state(connector, &state->base.base);
return &state->base.base;
}
static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.detect = intel_sdvo_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_sdvo_set_property,
.atomic_get_property = intel_connector_atomic_get_property,
.set_property = drm_atomic_helper_connector_set_property,
.atomic_get_property = intel_sdvo_connector_atomic_get_property,
.atomic_set_property = intel_sdvo_connector_atomic_set_property,
.late_register = intel_sdvo_connector_register,
.early_unregister = intel_sdvo_connector_unregister,
.destroy = intel_sdvo_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_duplicate_state = intel_sdvo_connector_duplicate_state,
};
static int intel_sdvo_atomic_check(struct drm_connector *conn,
struct drm_connector_state *new_conn_state)
{
struct drm_atomic_state *state = new_conn_state->state;
struct drm_connector_state *old_conn_state =
drm_atomic_get_old_connector_state(state, conn);
struct intel_sdvo_connector_state *old_state =
to_intel_sdvo_connector_state(old_conn_state);
struct intel_sdvo_connector_state *new_state =
to_intel_sdvo_connector_state(new_conn_state);
if (new_conn_state->crtc &&
(memcmp(&old_state->tv, &new_state->tv, sizeof(old_state->tv)) ||
memcmp(&old_conn_state->tv, &new_conn_state->tv, sizeof(old_conn_state->tv)))) {
struct drm_crtc_state *crtc_state =
drm_atomic_get_new_crtc_state(new_conn_state->state,
new_conn_state->crtc);
crtc_state->connectors_changed = true;
}
return intel_digital_connector_atomic_check(conn, new_conn_state);
}
static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = {
.get_modes = intel_sdvo_get_modes,
.mode_valid = intel_sdvo_mode_valid,
.atomic_check = intel_sdvo_atomic_check,
};
static void intel_sdvo_enc_destroy(struct drm_encoder *encoder)
@ -2398,7 +2424,6 @@ intel_sdvo_add_hdmi_properties(struct intel_sdvo *intel_sdvo,
intel_attach_force_audio_property(&connector->base.base);
if (INTEL_GEN(dev_priv) >= 4 && IS_MOBILE(dev_priv)) {
intel_attach_broadcast_rgb_property(&connector->base.base);
intel_sdvo->color_range_auto = true;
}
intel_attach_aspect_ratio_property(&connector->base.base);
connector->base.base.state->picture_aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
@ -2407,16 +2432,21 @@ intel_sdvo_add_hdmi_properties(struct intel_sdvo *intel_sdvo,
static struct intel_sdvo_connector *intel_sdvo_connector_alloc(void)
{
struct intel_sdvo_connector *sdvo_connector;
struct intel_sdvo_connector_state *conn_state;
sdvo_connector = kzalloc(sizeof(*sdvo_connector), GFP_KERNEL);
if (!sdvo_connector)
return NULL;
if (intel_connector_init(&sdvo_connector->base) < 0) {
conn_state = kzalloc(sizeof(*conn_state), GFP_KERNEL);
if (!conn_state) {
kfree(sdvo_connector);
return NULL;
}
__drm_atomic_helper_connector_reset(&sdvo_connector->base.base,
&conn_state->base.base);
return sdvo_connector;
}
@ -2708,31 +2738,31 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
intel_sdvo_connector->tv_format, i,
i, tv_format_names[intel_sdvo_connector->tv_format_supported[i]]);
intel_sdvo->tv_format_index = intel_sdvo_connector->tv_format_supported[0];
intel_sdvo_connector->base.base.state->tv.mode = intel_sdvo_connector->tv_format_supported[0];
drm_object_attach_property(&intel_sdvo_connector->base.base.base,
intel_sdvo_connector->tv_format, 0);
intel_sdvo_connector->tv_format, 0);
return true;
}
#define ENHANCEMENT(name, NAME) do { \
#define _ENHANCEMENT(state_assignment, name, NAME) do { \
if (enhancements.name) { \
if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_MAX_##NAME, &data_value, 4) || \
!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_##NAME, &response, 2)) \
return false; \
intel_sdvo_connector->max_##name = data_value[0]; \
intel_sdvo_connector->cur_##name = response; \
intel_sdvo_connector->name = \
drm_property_create_range(dev, 0, #name, 0, data_value[0]); \
if (!intel_sdvo_connector->name) return false; \
state_assignment = response; \
drm_object_attach_property(&connector->base, \
intel_sdvo_connector->name, \
intel_sdvo_connector->cur_##name); \
intel_sdvo_connector->name, 0); \
DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \
data_value[0], data_value[1], response); \
} \
} while (0)
#define ENHANCEMENT(state, name, NAME) _ENHANCEMENT((state)->name, name, NAME)
static bool
intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_connector *intel_sdvo_connector,
@ -2740,6 +2770,9 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
{
struct drm_device *dev = intel_sdvo->base.base.dev;
struct drm_connector *connector = &intel_sdvo_connector->base.base;
struct drm_connector_state *conn_state = connector->state;
struct intel_sdvo_connector_state *sdvo_state =
to_intel_sdvo_connector_state(conn_state);
uint16_t response, data_value[2];
/* when horizontal overscan is supported, Add the left/right property */
@ -2754,17 +2787,16 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
&response, 2))
return false;
sdvo_state->tv.overscan_h = response;
intel_sdvo_connector->max_hscan = data_value[0];
intel_sdvo_connector->left_margin = data_value[0] - response;
intel_sdvo_connector->right_margin = intel_sdvo_connector->left_margin;
intel_sdvo_connector->left =
drm_property_create_range(dev, 0, "left_margin", 0, data_value[0]);
if (!intel_sdvo_connector->left)
return false;
drm_object_attach_property(&connector->base,
intel_sdvo_connector->left,
intel_sdvo_connector->left_margin);
intel_sdvo_connector->left, 0);
intel_sdvo_connector->right =
drm_property_create_range(dev, 0, "right_margin", 0, data_value[0]);
@ -2772,8 +2804,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
return false;
drm_object_attach_property(&connector->base,
intel_sdvo_connector->right,
intel_sdvo_connector->right_margin);
intel_sdvo_connector->right, 0);
DRM_DEBUG_KMS("h_overscan: max %d, "
"default %d, current %d\n",
data_value[0], data_value[1], response);
@ -2790,9 +2821,9 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
&response, 2))
return false;
sdvo_state->tv.overscan_v = response;
intel_sdvo_connector->max_vscan = data_value[0];
intel_sdvo_connector->top_margin = data_value[0] - response;
intel_sdvo_connector->bottom_margin = intel_sdvo_connector->top_margin;
intel_sdvo_connector->top =
drm_property_create_range(dev, 0,
"top_margin", 0, data_value[0]);
@ -2800,8 +2831,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
return false;
drm_object_attach_property(&connector->base,
intel_sdvo_connector->top,
intel_sdvo_connector->top_margin);
intel_sdvo_connector->top, 0);
intel_sdvo_connector->bottom =
drm_property_create_range(dev, 0,
@ -2810,40 +2840,37 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
return false;
drm_object_attach_property(&connector->base,
intel_sdvo_connector->bottom,
intel_sdvo_connector->bottom_margin);
intel_sdvo_connector->bottom, 0);
DRM_DEBUG_KMS("v_overscan: max %d, "
"default %d, current %d\n",
data_value[0], data_value[1], response);
}
ENHANCEMENT(hpos, HPOS);
ENHANCEMENT(vpos, VPOS);
ENHANCEMENT(saturation, SATURATION);
ENHANCEMENT(contrast, CONTRAST);
ENHANCEMENT(hue, HUE);
ENHANCEMENT(sharpness, SHARPNESS);
ENHANCEMENT(brightness, BRIGHTNESS);
ENHANCEMENT(flicker_filter, FLICKER_FILTER);
ENHANCEMENT(flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE);
ENHANCEMENT(flicker_filter_2d, FLICKER_FILTER_2D);
ENHANCEMENT(tv_chroma_filter, TV_CHROMA_FILTER);
ENHANCEMENT(tv_luma_filter, TV_LUMA_FILTER);
ENHANCEMENT(&sdvo_state->tv, hpos, HPOS);
ENHANCEMENT(&sdvo_state->tv, vpos, VPOS);
ENHANCEMENT(&conn_state->tv, saturation, SATURATION);
ENHANCEMENT(&conn_state->tv, contrast, CONTRAST);
ENHANCEMENT(&conn_state->tv, hue, HUE);
ENHANCEMENT(&conn_state->tv, brightness, BRIGHTNESS);
ENHANCEMENT(&sdvo_state->tv, sharpness, SHARPNESS);
ENHANCEMENT(&sdvo_state->tv, flicker_filter, FLICKER_FILTER);
ENHANCEMENT(&sdvo_state->tv, flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE);
ENHANCEMENT(&sdvo_state->tv, flicker_filter_2d, FLICKER_FILTER_2D);
_ENHANCEMENT(sdvo_state->tv.chroma_filter, tv_chroma_filter, TV_CHROMA_FILTER);
_ENHANCEMENT(sdvo_state->tv.luma_filter, tv_luma_filter, TV_LUMA_FILTER);
if (enhancements.dot_crawl) {
if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_DOT_CRAWL, &response, 2))
return false;
intel_sdvo_connector->max_dot_crawl = 1;
intel_sdvo_connector->cur_dot_crawl = response & 0x1;
sdvo_state->tv.dot_crawl = response & 0x1;
intel_sdvo_connector->dot_crawl =
drm_property_create_range(dev, 0, "dot_crawl", 0, 1);
if (!intel_sdvo_connector->dot_crawl)
return false;
drm_object_attach_property(&connector->base,
intel_sdvo_connector->dot_crawl,
intel_sdvo_connector->cur_dot_crawl);
intel_sdvo_connector->dot_crawl, 0);
DRM_DEBUG_KMS("dot crawl: current %d\n", response);
}
@ -2859,11 +2886,12 @@ intel_sdvo_create_enhance_property_lvds(struct intel_sdvo *intel_sdvo,
struct drm_connector *connector = &intel_sdvo_connector->base.base;
uint16_t response, data_value[2];
ENHANCEMENT(brightness, BRIGHTNESS);
ENHANCEMENT(&connector->state->tv, brightness, BRIGHTNESS);
return true;
}
#undef ENHANCEMENT
#undef _ENHANCEMENT
static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_connector *intel_sdvo_connector)

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше